mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-12-27 10:01:39 -05:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Cross-merge networking fixes after downstream PR (net-6.18-rc3). No conflicts or adjacent changes. Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
1
.mailmap
1
.mailmap
@@ -27,6 +27,7 @@ Alan Cox <alan@lxorguk.ukuu.org.uk>
|
||||
Alan Cox <root@hraefn.swansea.linux.org.uk>
|
||||
Aleksandar Markovic <aleksandar.markovic@mips.com> <aleksandar.markovic@imgtec.com>
|
||||
Aleksey Gorelov <aleksey_gorelov@phoenix.com>
|
||||
Alex Williamson <alex@shazbot.org> <alex.williamson@redhat.com>
|
||||
Alexander Lobakin <alobakin@pm.me> <alobakin@dlink.ru>
|
||||
Alexander Lobakin <alobakin@pm.me> <alobakin@marvell.com>
|
||||
Alexander Lobakin <alobakin@pm.me> <bloodyreaper@yandex.ru>
|
||||
|
||||
@@ -0,0 +1,36 @@
|
||||
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/i2c/apm,xgene-slimpro-i2c.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
title: APM X-Gene SLIMpro Mailbox I2C
|
||||
|
||||
maintainers:
|
||||
- Khuong Dinh <khuong@os.amperecomputing.com>
|
||||
|
||||
description:
|
||||
An I2C controller accessed over the "SLIMpro" mailbox.
|
||||
|
||||
allOf:
|
||||
- $ref: /schemas/i2c/i2c-controller.yaml#
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
const: apm,xgene-slimpro-i2c
|
||||
|
||||
mboxes:
|
||||
maxItems: 1
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- mboxes
|
||||
|
||||
unevaluatedProperties: false
|
||||
|
||||
examples:
|
||||
- |
|
||||
i2c {
|
||||
compatible = "apm,xgene-slimpro-i2c";
|
||||
mboxes = <&mailbox 0>;
|
||||
};
|
||||
@@ -1,15 +0,0 @@
|
||||
APM X-Gene SLIMpro Mailbox I2C Driver
|
||||
|
||||
An I2C controller accessed over the "SLIMpro" mailbox.
|
||||
|
||||
Required properties :
|
||||
|
||||
- compatible : should be "apm,xgene-slimpro-i2c"
|
||||
- mboxes : use the label reference for the mailbox as the first parameter.
|
||||
The second parameter is the channel number.
|
||||
|
||||
Example :
|
||||
i2cslimpro {
|
||||
compatible = "apm,xgene-slimpro-i2c";
|
||||
mboxes = <&mailbox 0>;
|
||||
};
|
||||
@@ -24,6 +24,10 @@ properties:
|
||||
- enum:
|
||||
- qcom,qcs8300-qmp-ufs-phy
|
||||
- const: qcom,sa8775p-qmp-ufs-phy
|
||||
- items:
|
||||
- enum:
|
||||
- qcom,kaanapali-qmp-ufs-phy
|
||||
- const: qcom,sm8750-qmp-ufs-phy
|
||||
- enum:
|
||||
- qcom,msm8996-qmp-ufs-phy
|
||||
- qcom,msm8998-qmp-ufs-phy
|
||||
|
||||
@@ -79,6 +79,7 @@ properties:
|
||||
- fsl,imx-audio-nau8822
|
||||
- fsl,imx-audio-sgtl5000
|
||||
- fsl,imx-audio-si476x
|
||||
- fsl,imx-audio-tlv320
|
||||
- fsl,imx-audio-tlv320aic31xx
|
||||
- fsl,imx-audio-tlv320aic32x4
|
||||
- fsl,imx-audio-wm8524
|
||||
|
||||
@@ -33,6 +33,7 @@ properties:
|
||||
- qcom,apq8096-sndcard
|
||||
- qcom,glymur-sndcard
|
||||
- qcom,qcm6490-idp-sndcard
|
||||
- qcom,qcs615-sndcard
|
||||
- qcom,qcs6490-rb3gen2-sndcard
|
||||
- qcom,qcs8275-sndcard
|
||||
- qcom,qcs9075-sndcard
|
||||
|
||||
@@ -24,10 +24,10 @@ description: |
|
||||
Instruments Smart Amp speaker protection algorithm. The
|
||||
integrated speaker voltage and current sense provides for real time
|
||||
monitoring of loudspeaker behavior.
|
||||
The TAS5825/TAS5827 is a stereo, digital input Class-D audio
|
||||
amplifier optimized for efficiently driving high peak power into
|
||||
small loudspeakers. An integrated on-chip DSP supports Texas
|
||||
Instruments Smart Amp speaker protection algorithm.
|
||||
The TAS5802/TAS5815/TAS5825/TAS5827/TAS5828 is a stereo, digital input
|
||||
Class-D audio amplifier optimized for efficiently driving high peak
|
||||
power into small loudspeakers. An integrated on-chip DSP supports
|
||||
Texas Instruments Smart Amp speaker protection algorithm.
|
||||
|
||||
Specifications about the audio amplifier can be found at:
|
||||
https://www.ti.com/lit/gpn/tas2120
|
||||
@@ -35,8 +35,10 @@ description: |
|
||||
https://www.ti.com/lit/gpn/tas2563
|
||||
https://www.ti.com/lit/gpn/tas2572
|
||||
https://www.ti.com/lit/gpn/tas2781
|
||||
https://www.ti.com/lit/gpn/tas5815
|
||||
https://www.ti.com/lit/gpn/tas5825m
|
||||
https://www.ti.com/lit/gpn/tas5827
|
||||
https://www.ti.com/lit/gpn/tas5828m
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
@@ -65,11 +67,21 @@ properties:
|
||||
Protection and Audio Processing, 16/20/24/32bit stereo I2S or
|
||||
multichannel TDM.
|
||||
|
||||
ti,tas5802: 22-W, Inductor-Less, Digital Input, Closed-Loop Class-D
|
||||
Audio Amplifier with 96-Khz Extended Processing and Low Idle Power
|
||||
Dissipation.
|
||||
|
||||
ti,tas5815: 30-W, Digital Input, Stereo, Closed-loop Class-D Audio
|
||||
Amplifier with 96 kHz Enhanced Processing
|
||||
|
||||
ti,tas5825: 38-W Stereo, Inductor-Less, Digital Input, Closed-Loop 4.5V
|
||||
to 26.4V Class-D Audio Amplifier with 192-kHz Extended Audio Processing.
|
||||
|
||||
ti,tas5827: 47-W Stereo, Digital Input, High Efficiency Closed-Loop Class-D
|
||||
Amplifier with Class-H Algorithm
|
||||
ti,tas5827: 47-W Stereo, Digital Input, High Efficiency Closed-Loop
|
||||
Class-D Amplifier with Class-H Algorithm
|
||||
|
||||
ti,tas5828: 50-W Stereo, Digital Input, High Efficiency Closed-Loop
|
||||
Class-D Amplifier with Hybrid-Pro Algorithm
|
||||
oneOf:
|
||||
- items:
|
||||
- enum:
|
||||
@@ -80,8 +92,11 @@ properties:
|
||||
- ti,tas2563
|
||||
- ti,tas2570
|
||||
- ti,tas2572
|
||||
- ti,tas5802
|
||||
- ti,tas5815
|
||||
- ti,tas5825
|
||||
- ti,tas5827
|
||||
- ti,tas5828
|
||||
- const: ti,tas2781
|
||||
- enum:
|
||||
- ti,tas2781
|
||||
@@ -177,12 +192,28 @@ allOf:
|
||||
minimum: 0x38
|
||||
maximum: 0x3f
|
||||
|
||||
- if:
|
||||
properties:
|
||||
compatible:
|
||||
contains:
|
||||
enum:
|
||||
- ti,tas5802
|
||||
- ti,tas5815
|
||||
then:
|
||||
properties:
|
||||
reg:
|
||||
maxItems: 4
|
||||
items:
|
||||
minimum: 0x54
|
||||
maximum: 0x57
|
||||
|
||||
- if:
|
||||
properties:
|
||||
compatible:
|
||||
contains:
|
||||
enum:
|
||||
- ti,tas5827
|
||||
- ti,tas5828
|
||||
then:
|
||||
properties:
|
||||
reg:
|
||||
|
||||
@@ -15,6 +15,7 @@ select:
|
||||
compatible:
|
||||
contains:
|
||||
enum:
|
||||
- qcom,kaanapali-ufshc
|
||||
- qcom,sm8650-ufshc
|
||||
- qcom,sm8750-ufshc
|
||||
required:
|
||||
@@ -24,6 +25,7 @@ properties:
|
||||
compatible:
|
||||
items:
|
||||
- enum:
|
||||
- qcom,kaanapali-ufshc
|
||||
- qcom,sm8650-ufshc
|
||||
- qcom,sm8750-ufshc
|
||||
- const: qcom,ufshc
|
||||
|
||||
@@ -11,6 +11,7 @@ found on https://linux-ax25.in-berlin.de.
|
||||
|
||||
There is a mailing list for discussing Linux amateur radio matters
|
||||
called linux-hams@vger.kernel.org. To subscribe to it, send a message to
|
||||
majordomo@vger.kernel.org with the words "subscribe linux-hams" in the body
|
||||
of the message, the subject field is ignored. You don't need to be
|
||||
subscribed to post but of course that means you might miss an answer.
|
||||
linux-hams+subscribe@vger.kernel.org or use the web interface at
|
||||
https://vger.kernel.org. The subject and body of the message are
|
||||
ignored. You don't need to be subscribed to post but of course that
|
||||
means you might miss an answer.
|
||||
|
||||
@@ -137,16 +137,20 @@ d. Checksum offload header v5
|
||||
|
||||
Checksum offload header fields are in big endian format.
|
||||
|
||||
Packet format::
|
||||
|
||||
Bit 0 - 6 7 8-15 16-31
|
||||
Function Header Type Next Header Checksum Valid Reserved
|
||||
|
||||
Header Type is to indicate the type of header, this usually is set to CHECKSUM
|
||||
|
||||
Header types
|
||||
= ==========================================
|
||||
|
||||
= ===============
|
||||
0 Reserved
|
||||
1 Reserved
|
||||
2 checksum header
|
||||
= ===============
|
||||
|
||||
Checksum Valid is to indicate whether the header checksum is valid. Value of 1
|
||||
implies that checksum is calculated on this packet and is valid, value of 0
|
||||
@@ -183,9 +187,11 @@ rmnet in a single linear skb. rmnet will process the individual
|
||||
packets and either ACK the MAP command or deliver the IP packet to the
|
||||
network stack as needed
|
||||
|
||||
MAP header|IP Packet|Optional padding|MAP header|IP Packet|Optional padding....
|
||||
Packet format::
|
||||
|
||||
MAP header|IP Packet|Optional padding|MAP header|Command Packet|Optional pad...
|
||||
MAP header|IP Packet|Optional padding|MAP header|IP Packet|Optional padding....
|
||||
|
||||
MAP header|IP Packet|Optional padding|MAP header|Command Packet|Optional pad...
|
||||
|
||||
3. Userspace configuration
|
||||
==========================
|
||||
|
||||
@@ -96,9 +96,8 @@ needed to these network configuration daemons to make sure that an IP is
|
||||
received only on the 'failover' device.
|
||||
|
||||
Below is the patch snippet used with 'cloud-ifupdown-helper' script found on
|
||||
Debian cloud images:
|
||||
Debian cloud images::
|
||||
|
||||
::
|
||||
@@ -27,6 +27,8 @@ do_setup() {
|
||||
local working="$cfgdir/.$INTERFACE"
|
||||
local final="$cfgdir/$INTERFACE"
|
||||
@@ -172,9 +171,8 @@ appropriate FDB entry is added.
|
||||
|
||||
The following script is executed on the destination hypervisor once migration
|
||||
completes, and it reattaches the VF to the VM and brings down the virtio-net
|
||||
interface.
|
||||
interface::
|
||||
|
||||
::
|
||||
# reattach-vf.sh
|
||||
#!/bin/bash
|
||||
|
||||
|
||||
@@ -38,6 +38,81 @@ Like ``clang-format`` for the rest of the kernel, ``rustfmt`` works on
|
||||
individual files, and does not require a kernel configuration. Sometimes it may
|
||||
even work with broken code.
|
||||
|
||||
Imports
|
||||
~~~~~~~
|
||||
|
||||
``rustfmt``, by default, formats imports in a way that is prone to conflicts
|
||||
while merging and rebasing, since in some cases it condenses several items into
|
||||
the same line. For instance:
|
||||
|
||||
.. code-block:: rust
|
||||
|
||||
// Do not use this style.
|
||||
use crate::{
|
||||
example1,
|
||||
example2::{example3, example4, example5},
|
||||
example6, example7,
|
||||
example8::example9,
|
||||
};
|
||||
|
||||
Instead, the kernel uses a vertical layout that looks like this:
|
||||
|
||||
.. code-block:: rust
|
||||
|
||||
use crate::{
|
||||
example1,
|
||||
example2::{
|
||||
example3,
|
||||
example4,
|
||||
example5, //
|
||||
},
|
||||
example6,
|
||||
example7,
|
||||
example8::example9, //
|
||||
};
|
||||
|
||||
That is, each item goes into its own line, and braces are used as soon as there
|
||||
is more than one item in a list.
|
||||
|
||||
The trailing empty comment allows to preserve this formatting. Not only that,
|
||||
``rustfmt`` will actually reformat imports vertically when the empty comment is
|
||||
added. That is, it is possible to easily reformat the original example into the
|
||||
expected style by running ``rustfmt`` on an input like:
|
||||
|
||||
.. code-block:: rust
|
||||
|
||||
// Do not use this style.
|
||||
use crate::{
|
||||
example1,
|
||||
example2::{example3, example4, example5, //
|
||||
},
|
||||
example6, example7,
|
||||
example8::example9, //
|
||||
};
|
||||
|
||||
The trailing empty comment works for nested imports, as shown above, as well as
|
||||
for single item imports -- this can be useful to minimize diffs within patch
|
||||
series:
|
||||
|
||||
.. code-block:: rust
|
||||
|
||||
use crate::{
|
||||
example1, //
|
||||
};
|
||||
|
||||
The trailing empty comment works in any of the lines within the braces, but it
|
||||
is preferred to keep it in the last item, since it is reminiscent of the
|
||||
trailing comma in other formatters. Sometimes it may be simpler to avoid moving
|
||||
the comment several times within a patch series due to changes in the list.
|
||||
|
||||
There may be cases where exceptions may need to be made, i.e. none of this is
|
||||
a hard rule. There is also code that is not migrated to this style yet, but
|
||||
please do not introduce code in other styles.
|
||||
|
||||
Eventually, the goal is to get ``rustfmt`` to support this formatting style (or
|
||||
a similar one) automatically in a stable release without requiring the trailing
|
||||
empty comment. Thus, at some point, the goal is to remove those comments.
|
||||
|
||||
|
||||
Comments
|
||||
--------
|
||||
|
||||
@@ -1229,6 +1229,9 @@ It is not possible to read back a pending external abort (injected via
|
||||
KVM_SET_VCPU_EVENTS or otherwise) because such an exception is always delivered
|
||||
directly to the virtual CPU).
|
||||
|
||||
Calling this ioctl on a vCPU that hasn't been initialized will return
|
||||
-ENOEXEC.
|
||||
|
||||
::
|
||||
|
||||
struct kvm_vcpu_events {
|
||||
@@ -1309,6 +1312,8 @@ exceptions by manipulating individual registers using the KVM_SET_ONE_REG API.
|
||||
|
||||
See KVM_GET_VCPU_EVENTS for the data structure.
|
||||
|
||||
Calling this ioctl on a vCPU that hasn't been initialized will return
|
||||
-ENOEXEC.
|
||||
|
||||
4.33 KVM_GET_DEBUGREGS
|
||||
----------------------
|
||||
@@ -6432,9 +6437,18 @@ most one mapping per page, i.e. binding multiple memory regions to a single
|
||||
guest_memfd range is not allowed (any number of memory regions can be bound to
|
||||
a single guest_memfd file, but the bound ranges must not overlap).
|
||||
|
||||
When the capability KVM_CAP_GUEST_MEMFD_MMAP is supported, the 'flags' field
|
||||
supports GUEST_MEMFD_FLAG_MMAP. Setting this flag on guest_memfd creation
|
||||
enables mmap() and faulting of guest_memfd memory to host userspace.
|
||||
The capability KVM_CAP_GUEST_MEMFD_FLAGS enumerates the `flags` that can be
|
||||
specified via KVM_CREATE_GUEST_MEMFD. Currently defined flags:
|
||||
|
||||
============================ ================================================
|
||||
GUEST_MEMFD_FLAG_MMAP Enable using mmap() on the guest_memfd file
|
||||
descriptor.
|
||||
GUEST_MEMFD_FLAG_INIT_SHARED Make all memory in the file shared during
|
||||
KVM_CREATE_GUEST_MEMFD (memory files created
|
||||
without INIT_SHARED will be marked private).
|
||||
Shared memory can be faulted into host userspace
|
||||
page tables. Private memory cannot.
|
||||
============================ ================================================
|
||||
|
||||
When the KVM MMU performs a PFN lookup to service a guest fault and the backing
|
||||
guest_memfd has the GUEST_MEMFD_FLAG_MMAP set, then the fault will always be
|
||||
|
||||
@@ -13,7 +13,8 @@ will act as the VM interrupt controller, requiring emulated user-space devices
|
||||
to inject interrupts to the VGIC instead of directly to CPUs. It is not
|
||||
possible to create both a GICv3 and GICv2 on the same VM.
|
||||
|
||||
Creating a guest GICv3 device requires a host GICv3 as well.
|
||||
Creating a guest GICv3 device requires a host GICv3 host, or a GICv5 host with
|
||||
support for FEAT_GCIE_LEGACY.
|
||||
|
||||
|
||||
Groups:
|
||||
|
||||
@@ -3841,6 +3841,7 @@ F: drivers/hwmon/asus-ec-sensors.c
|
||||
ASUS NOTEBOOKS AND EEEPC ACPI/WMI EXTRAS DRIVERS
|
||||
M: Corentin Chary <corentin.chary@gmail.com>
|
||||
M: Luke D. Jones <luke@ljones.dev>
|
||||
M: Denis Benato <benato.denis96@gmail.com>
|
||||
L: platform-driver-x86@vger.kernel.org
|
||||
S: Maintained
|
||||
W: https://asus-linux.org/
|
||||
@@ -26892,7 +26893,7 @@ S: Maintained
|
||||
F: drivers/vfio/cdx/*
|
||||
|
||||
VFIO DRIVER
|
||||
M: Alex Williamson <alex.williamson@redhat.com>
|
||||
M: Alex Williamson <alex@shazbot.org>
|
||||
L: kvm@vger.kernel.org
|
||||
S: Maintained
|
||||
T: git https://github.com/awilliam/linux-vfio.git
|
||||
@@ -27055,7 +27056,7 @@ T: git git://linuxtv.org/media.git
|
||||
F: drivers/media/test-drivers/vimc/*
|
||||
|
||||
VIRT LIB
|
||||
M: Alex Williamson <alex.williamson@redhat.com>
|
||||
M: Alex Williamson <alex@shazbot.org>
|
||||
M: Paolo Bonzini <pbonzini@redhat.com>
|
||||
L: kvm@vger.kernel.org
|
||||
S: Supported
|
||||
|
||||
2
Makefile
2
Makefile
@@ -2,7 +2,7 @@
|
||||
VERSION = 6
|
||||
PATCHLEVEL = 18
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc1
|
||||
EXTRAVERSION = -rc2
|
||||
NAME = Baby Opossum Posse
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
||||
@@ -965,6 +965,7 @@ config HAVE_CFI_ICALL_NORMALIZE_INTEGERS_RUSTC
|
||||
def_bool y
|
||||
depends on HAVE_CFI_ICALL_NORMALIZE_INTEGERS
|
||||
depends on RUSTC_VERSION >= 107900
|
||||
depends on ARM64 || X86_64
|
||||
# With GCOV/KASAN we need this fix: https://github.com/rust-lang/rust/pull/129373
|
||||
depends on (RUSTC_LLVM_VERSION >= 190103 && RUSTC_VERSION >= 108200) || \
|
||||
(!GCOV_KERNEL && !KASAN_GENERIC && !KASAN_SW_TAGS)
|
||||
|
||||
@@ -24,22 +24,48 @@
|
||||
* ID_AA64MMFR4_EL1.E2H0 < 0. On such CPUs HCR_EL2.E2H is RES1, but it
|
||||
* can reset into an UNKNOWN state and might not read as 1 until it has
|
||||
* been initialized explicitly.
|
||||
*
|
||||
* Fruity CPUs seem to have HCR_EL2.E2H set to RAO/WI, but
|
||||
* don't advertise it (they predate this relaxation).
|
||||
*
|
||||
* Initalize HCR_EL2.E2H so that later code can rely upon HCR_EL2.E2H
|
||||
* indicating whether the CPU is running in E2H mode.
|
||||
*/
|
||||
mrs_s x1, SYS_ID_AA64MMFR4_EL1
|
||||
sbfx x1, x1, #ID_AA64MMFR4_EL1_E2H0_SHIFT, #ID_AA64MMFR4_EL1_E2H0_WIDTH
|
||||
cmp x1, #0
|
||||
b.ge .LnVHE_\@
|
||||
b.lt .LnE2H0_\@
|
||||
|
||||
/*
|
||||
* Unfortunately, HCR_EL2.E2H can be RES1 even if not advertised
|
||||
* as such via ID_AA64MMFR4_EL1.E2H0:
|
||||
*
|
||||
* - Fruity CPUs predate the !FEAT_E2H0 relaxation, and seem to
|
||||
* have HCR_EL2.E2H implemented as RAO/WI.
|
||||
*
|
||||
* - On CPUs that lack FEAT_FGT, a hypervisor can't trap guest
|
||||
* reads of ID_AA64MMFR4_EL1 to advertise !FEAT_E2H0. NV
|
||||
* guests on these hosts can write to HCR_EL2.E2H without
|
||||
* trapping to the hypervisor, but these writes have no
|
||||
* functional effect.
|
||||
*
|
||||
* Handle both cases by checking for an essential VHE property
|
||||
* (system register remapping) to decide whether we're
|
||||
* effectively VHE-only or not.
|
||||
*/
|
||||
msr_hcr_el2 x0 // Setup HCR_EL2 as nVHE
|
||||
isb
|
||||
mov x1, #1 // Write something to FAR_EL1
|
||||
msr far_el1, x1
|
||||
isb
|
||||
mov x1, #2 // Try to overwrite it via FAR_EL2
|
||||
msr far_el2, x1
|
||||
isb
|
||||
mrs x1, far_el1 // If we see the latest write in FAR_EL1,
|
||||
cmp x1, #2 // we can safely assume we are VHE only.
|
||||
b.ne .LnVHE_\@ // Otherwise, we know that nVHE works.
|
||||
|
||||
.LnE2H0_\@:
|
||||
orr x0, x0, #HCR_E2H
|
||||
.LnVHE_\@:
|
||||
msr_hcr_el2 x0
|
||||
isb
|
||||
.LnVHE_\@:
|
||||
.endm
|
||||
|
||||
.macro __init_el2_sctlr
|
||||
|
||||
@@ -816,6 +816,11 @@ struct kvm_vcpu_arch {
|
||||
u64 hcrx_el2;
|
||||
u64 mdcr_el2;
|
||||
|
||||
struct {
|
||||
u64 r;
|
||||
u64 w;
|
||||
} fgt[__NR_FGT_GROUP_IDS__];
|
||||
|
||||
/* Exception Information */
|
||||
struct kvm_vcpu_fault_info fault;
|
||||
|
||||
@@ -1600,6 +1605,51 @@ static inline bool kvm_arch_has_irq_bypass(void)
|
||||
void compute_fgu(struct kvm *kvm, enum fgt_group_id fgt);
|
||||
void get_reg_fixed_bits(struct kvm *kvm, enum vcpu_sysreg reg, u64 *res0, u64 *res1);
|
||||
void check_feature_map(void);
|
||||
void kvm_vcpu_load_fgt(struct kvm_vcpu *vcpu);
|
||||
|
||||
static __always_inline enum fgt_group_id __fgt_reg_to_group_id(enum vcpu_sysreg reg)
|
||||
{
|
||||
switch (reg) {
|
||||
case HFGRTR_EL2:
|
||||
case HFGWTR_EL2:
|
||||
return HFGRTR_GROUP;
|
||||
case HFGITR_EL2:
|
||||
return HFGITR_GROUP;
|
||||
case HDFGRTR_EL2:
|
||||
case HDFGWTR_EL2:
|
||||
return HDFGRTR_GROUP;
|
||||
case HAFGRTR_EL2:
|
||||
return HAFGRTR_GROUP;
|
||||
case HFGRTR2_EL2:
|
||||
case HFGWTR2_EL2:
|
||||
return HFGRTR2_GROUP;
|
||||
case HFGITR2_EL2:
|
||||
return HFGITR2_GROUP;
|
||||
case HDFGRTR2_EL2:
|
||||
case HDFGWTR2_EL2:
|
||||
return HDFGRTR2_GROUP;
|
||||
default:
|
||||
BUILD_BUG_ON(1);
|
||||
}
|
||||
}
|
||||
|
||||
#define vcpu_fgt(vcpu, reg) \
|
||||
({ \
|
||||
enum fgt_group_id id = __fgt_reg_to_group_id(reg); \
|
||||
u64 *p; \
|
||||
switch (reg) { \
|
||||
case HFGWTR_EL2: \
|
||||
case HDFGWTR_EL2: \
|
||||
case HFGWTR2_EL2: \
|
||||
case HDFGWTR2_EL2: \
|
||||
p = &(vcpu)->arch.fgt[id].w; \
|
||||
break; \
|
||||
default: \
|
||||
p = &(vcpu)->arch.fgt[id].r; \
|
||||
break; \
|
||||
} \
|
||||
\
|
||||
p; \
|
||||
})
|
||||
|
||||
#endif /* __ARM64_KVM_HOST_H__ */
|
||||
|
||||
@@ -1220,10 +1220,19 @@
|
||||
__val; \
|
||||
})
|
||||
|
||||
/*
|
||||
* The "Z" constraint combined with the "%x0" template should be enough
|
||||
* to force XZR generation if (v) is a constant 0 value but LLVM does not
|
||||
* yet understand that modifier/constraint combo so a conditional is required
|
||||
* to nudge the compiler into using XZR as a source for a 0 constant value.
|
||||
*/
|
||||
#define write_sysreg_s(v, r) do { \
|
||||
u64 __val = (u64)(v); \
|
||||
u32 __maybe_unused __check_r = (u32)(r); \
|
||||
asm volatile(__msr_s(r, "%x0") : : "rZ" (__val)); \
|
||||
if (__builtin_constant_p(__val) && __val == 0) \
|
||||
asm volatile(__msr_s(r, "xzr")); \
|
||||
else \
|
||||
asm volatile(__msr_s(r, "%x0") : : "r" (__val)); \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
|
||||
@@ -697,6 +697,8 @@ static void noinstr el0_breakpt(struct pt_regs *regs, unsigned long esr)
|
||||
|
||||
static void noinstr el0_softstp(struct pt_regs *regs, unsigned long esr)
|
||||
{
|
||||
bool step_done;
|
||||
|
||||
if (!is_ttbr0_addr(regs->pc))
|
||||
arm64_apply_bp_hardening();
|
||||
|
||||
@@ -707,10 +709,10 @@ static void noinstr el0_softstp(struct pt_regs *regs, unsigned long esr)
|
||||
* If we are stepping a suspended breakpoint there's nothing more to do:
|
||||
* the single-step is complete.
|
||||
*/
|
||||
if (!try_step_suspended_breakpoints(regs)) {
|
||||
local_daif_restore(DAIF_PROCCTX);
|
||||
step_done = try_step_suspended_breakpoints(regs);
|
||||
local_daif_restore(DAIF_PROCCTX);
|
||||
if (!step_done)
|
||||
do_el0_softstep(esr, regs);
|
||||
}
|
||||
arm64_exit_to_user_mode(regs);
|
||||
}
|
||||
|
||||
|
||||
@@ -66,7 +66,7 @@ static int nr_timers(struct kvm_vcpu *vcpu)
|
||||
|
||||
u32 timer_get_ctl(struct arch_timer_context *ctxt)
|
||||
{
|
||||
struct kvm_vcpu *vcpu = ctxt->vcpu;
|
||||
struct kvm_vcpu *vcpu = timer_context_to_vcpu(ctxt);
|
||||
|
||||
switch(arch_timer_ctx_index(ctxt)) {
|
||||
case TIMER_VTIMER:
|
||||
@@ -85,7 +85,7 @@ u32 timer_get_ctl(struct arch_timer_context *ctxt)
|
||||
|
||||
u64 timer_get_cval(struct arch_timer_context *ctxt)
|
||||
{
|
||||
struct kvm_vcpu *vcpu = ctxt->vcpu;
|
||||
struct kvm_vcpu *vcpu = timer_context_to_vcpu(ctxt);
|
||||
|
||||
switch(arch_timer_ctx_index(ctxt)) {
|
||||
case TIMER_VTIMER:
|
||||
@@ -104,7 +104,7 @@ u64 timer_get_cval(struct arch_timer_context *ctxt)
|
||||
|
||||
static void timer_set_ctl(struct arch_timer_context *ctxt, u32 ctl)
|
||||
{
|
||||
struct kvm_vcpu *vcpu = ctxt->vcpu;
|
||||
struct kvm_vcpu *vcpu = timer_context_to_vcpu(ctxt);
|
||||
|
||||
switch(arch_timer_ctx_index(ctxt)) {
|
||||
case TIMER_VTIMER:
|
||||
@@ -126,7 +126,7 @@ static void timer_set_ctl(struct arch_timer_context *ctxt, u32 ctl)
|
||||
|
||||
static void timer_set_cval(struct arch_timer_context *ctxt, u64 cval)
|
||||
{
|
||||
struct kvm_vcpu *vcpu = ctxt->vcpu;
|
||||
struct kvm_vcpu *vcpu = timer_context_to_vcpu(ctxt);
|
||||
|
||||
switch(arch_timer_ctx_index(ctxt)) {
|
||||
case TIMER_VTIMER:
|
||||
@@ -146,16 +146,6 @@ static void timer_set_cval(struct arch_timer_context *ctxt, u64 cval)
|
||||
}
|
||||
}
|
||||
|
||||
static void timer_set_offset(struct arch_timer_context *ctxt, u64 offset)
|
||||
{
|
||||
if (!ctxt->offset.vm_offset) {
|
||||
WARN(offset, "timer %ld\n", arch_timer_ctx_index(ctxt));
|
||||
return;
|
||||
}
|
||||
|
||||
WRITE_ONCE(*ctxt->offset.vm_offset, offset);
|
||||
}
|
||||
|
||||
u64 kvm_phys_timer_read(void)
|
||||
{
|
||||
return timecounter->cc->read(timecounter->cc);
|
||||
@@ -343,7 +333,7 @@ static enum hrtimer_restart kvm_hrtimer_expire(struct hrtimer *hrt)
|
||||
u64 ns;
|
||||
|
||||
ctx = container_of(hrt, struct arch_timer_context, hrtimer);
|
||||
vcpu = ctx->vcpu;
|
||||
vcpu = timer_context_to_vcpu(ctx);
|
||||
|
||||
trace_kvm_timer_hrtimer_expire(ctx);
|
||||
|
||||
@@ -436,8 +426,9 @@ static void kvm_timer_update_status(struct arch_timer_context *ctx, bool level)
|
||||
*
|
||||
* But hey, it's fast, right?
|
||||
*/
|
||||
if (is_hyp_ctxt(ctx->vcpu) &&
|
||||
(ctx == vcpu_vtimer(ctx->vcpu) || ctx == vcpu_ptimer(ctx->vcpu))) {
|
||||
struct kvm_vcpu *vcpu = timer_context_to_vcpu(ctx);
|
||||
if (is_hyp_ctxt(vcpu) &&
|
||||
(ctx == vcpu_vtimer(vcpu) || ctx == vcpu_ptimer(vcpu))) {
|
||||
unsigned long val = timer_get_ctl(ctx);
|
||||
__assign_bit(__ffs(ARCH_TIMER_CTRL_IT_STAT), &val, level);
|
||||
timer_set_ctl(ctx, val);
|
||||
@@ -470,7 +461,7 @@ static void timer_emulate(struct arch_timer_context *ctx)
|
||||
trace_kvm_timer_emulate(ctx, should_fire);
|
||||
|
||||
if (should_fire != ctx->irq.level)
|
||||
kvm_timer_update_irq(ctx->vcpu, should_fire, ctx);
|
||||
kvm_timer_update_irq(timer_context_to_vcpu(ctx), should_fire, ctx);
|
||||
|
||||
kvm_timer_update_status(ctx, should_fire);
|
||||
|
||||
@@ -498,7 +489,7 @@ static void set_cntpoff(u64 cntpoff)
|
||||
|
||||
static void timer_save_state(struct arch_timer_context *ctx)
|
||||
{
|
||||
struct arch_timer_cpu *timer = vcpu_timer(ctx->vcpu);
|
||||
struct arch_timer_cpu *timer = vcpu_timer(timer_context_to_vcpu(ctx));
|
||||
enum kvm_arch_timers index = arch_timer_ctx_index(ctx);
|
||||
unsigned long flags;
|
||||
|
||||
@@ -609,7 +600,7 @@ static void kvm_timer_unblocking(struct kvm_vcpu *vcpu)
|
||||
|
||||
static void timer_restore_state(struct arch_timer_context *ctx)
|
||||
{
|
||||
struct arch_timer_cpu *timer = vcpu_timer(ctx->vcpu);
|
||||
struct arch_timer_cpu *timer = vcpu_timer(timer_context_to_vcpu(ctx));
|
||||
enum kvm_arch_timers index = arch_timer_ctx_index(ctx);
|
||||
unsigned long flags;
|
||||
|
||||
@@ -668,7 +659,7 @@ static inline void set_timer_irq_phys_active(struct arch_timer_context *ctx, boo
|
||||
|
||||
static void kvm_timer_vcpu_load_gic(struct arch_timer_context *ctx)
|
||||
{
|
||||
struct kvm_vcpu *vcpu = ctx->vcpu;
|
||||
struct kvm_vcpu *vcpu = timer_context_to_vcpu(ctx);
|
||||
bool phys_active = false;
|
||||
|
||||
/*
|
||||
@@ -677,7 +668,7 @@ static void kvm_timer_vcpu_load_gic(struct arch_timer_context *ctx)
|
||||
* this point and the register restoration, we'll take the
|
||||
* interrupt anyway.
|
||||
*/
|
||||
kvm_timer_update_irq(ctx->vcpu, kvm_timer_should_fire(ctx), ctx);
|
||||
kvm_timer_update_irq(vcpu, kvm_timer_should_fire(ctx), ctx);
|
||||
|
||||
if (irqchip_in_kernel(vcpu->kvm))
|
||||
phys_active = kvm_vgic_map_is_active(vcpu, timer_irq(ctx));
|
||||
@@ -1063,7 +1054,7 @@ static void timer_context_init(struct kvm_vcpu *vcpu, int timerid)
|
||||
struct arch_timer_context *ctxt = vcpu_get_timer(vcpu, timerid);
|
||||
struct kvm *kvm = vcpu->kvm;
|
||||
|
||||
ctxt->vcpu = vcpu;
|
||||
ctxt->timer_id = timerid;
|
||||
|
||||
if (timerid == TIMER_VTIMER)
|
||||
ctxt->offset.vm_offset = &kvm->arch.timer_data.voffset;
|
||||
@@ -1121,49 +1112,6 @@ void kvm_timer_cpu_down(void)
|
||||
disable_percpu_irq(host_ptimer_irq);
|
||||
}
|
||||
|
||||
int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value)
|
||||
{
|
||||
struct arch_timer_context *timer;
|
||||
|
||||
switch (regid) {
|
||||
case KVM_REG_ARM_TIMER_CTL:
|
||||
timer = vcpu_vtimer(vcpu);
|
||||
kvm_arm_timer_write(vcpu, timer, TIMER_REG_CTL, value);
|
||||
break;
|
||||
case KVM_REG_ARM_TIMER_CNT:
|
||||
if (!test_bit(KVM_ARCH_FLAG_VM_COUNTER_OFFSET,
|
||||
&vcpu->kvm->arch.flags)) {
|
||||
timer = vcpu_vtimer(vcpu);
|
||||
timer_set_offset(timer, kvm_phys_timer_read() - value);
|
||||
}
|
||||
break;
|
||||
case KVM_REG_ARM_TIMER_CVAL:
|
||||
timer = vcpu_vtimer(vcpu);
|
||||
kvm_arm_timer_write(vcpu, timer, TIMER_REG_CVAL, value);
|
||||
break;
|
||||
case KVM_REG_ARM_PTIMER_CTL:
|
||||
timer = vcpu_ptimer(vcpu);
|
||||
kvm_arm_timer_write(vcpu, timer, TIMER_REG_CTL, value);
|
||||
break;
|
||||
case KVM_REG_ARM_PTIMER_CNT:
|
||||
if (!test_bit(KVM_ARCH_FLAG_VM_COUNTER_OFFSET,
|
||||
&vcpu->kvm->arch.flags)) {
|
||||
timer = vcpu_ptimer(vcpu);
|
||||
timer_set_offset(timer, kvm_phys_timer_read() - value);
|
||||
}
|
||||
break;
|
||||
case KVM_REG_ARM_PTIMER_CVAL:
|
||||
timer = vcpu_ptimer(vcpu);
|
||||
kvm_arm_timer_write(vcpu, timer, TIMER_REG_CVAL, value);
|
||||
break;
|
||||
|
||||
default:
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static u64 read_timer_ctl(struct arch_timer_context *timer)
|
||||
{
|
||||
/*
|
||||
@@ -1180,31 +1128,6 @@ static u64 read_timer_ctl(struct arch_timer_context *timer)
|
||||
return ctl;
|
||||
}
|
||||
|
||||
u64 kvm_arm_timer_get_reg(struct kvm_vcpu *vcpu, u64 regid)
|
||||
{
|
||||
switch (regid) {
|
||||
case KVM_REG_ARM_TIMER_CTL:
|
||||
return kvm_arm_timer_read(vcpu,
|
||||
vcpu_vtimer(vcpu), TIMER_REG_CTL);
|
||||
case KVM_REG_ARM_TIMER_CNT:
|
||||
return kvm_arm_timer_read(vcpu,
|
||||
vcpu_vtimer(vcpu), TIMER_REG_CNT);
|
||||
case KVM_REG_ARM_TIMER_CVAL:
|
||||
return kvm_arm_timer_read(vcpu,
|
||||
vcpu_vtimer(vcpu), TIMER_REG_CVAL);
|
||||
case KVM_REG_ARM_PTIMER_CTL:
|
||||
return kvm_arm_timer_read(vcpu,
|
||||
vcpu_ptimer(vcpu), TIMER_REG_CTL);
|
||||
case KVM_REG_ARM_PTIMER_CNT:
|
||||
return kvm_arm_timer_read(vcpu,
|
||||
vcpu_ptimer(vcpu), TIMER_REG_CNT);
|
||||
case KVM_REG_ARM_PTIMER_CVAL:
|
||||
return kvm_arm_timer_read(vcpu,
|
||||
vcpu_ptimer(vcpu), TIMER_REG_CVAL);
|
||||
}
|
||||
return (u64)-1;
|
||||
}
|
||||
|
||||
static u64 kvm_arm_timer_read(struct kvm_vcpu *vcpu,
|
||||
struct arch_timer_context *timer,
|
||||
enum kvm_arch_timer_regs treg)
|
||||
|
||||
@@ -642,6 +642,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
||||
vcpu->arch.hcr_el2 |= HCR_TWI;
|
||||
|
||||
vcpu_set_pauth_traps(vcpu);
|
||||
kvm_vcpu_load_fgt(vcpu);
|
||||
|
||||
if (is_protected_kvm_enabled()) {
|
||||
kvm_call_hyp_nvhe(__pkvm_vcpu_load,
|
||||
@@ -1794,6 +1795,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
|
||||
case KVM_GET_VCPU_EVENTS: {
|
||||
struct kvm_vcpu_events events;
|
||||
|
||||
if (!kvm_vcpu_initialized(vcpu))
|
||||
return -ENOEXEC;
|
||||
|
||||
if (kvm_arm_vcpu_get_events(vcpu, &events))
|
||||
return -EINVAL;
|
||||
|
||||
@@ -1805,6 +1809,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
|
||||
case KVM_SET_VCPU_EVENTS: {
|
||||
struct kvm_vcpu_events events;
|
||||
|
||||
if (!kvm_vcpu_initialized(vcpu))
|
||||
return -ENOEXEC;
|
||||
|
||||
if (copy_from_user(&events, argp, sizeof(events)))
|
||||
return -EFAULT;
|
||||
|
||||
|
||||
@@ -91,7 +91,6 @@ static enum trans_regime compute_translation_regime(struct kvm_vcpu *vcpu, u32 o
|
||||
case OP_AT_S1E2W:
|
||||
case OP_AT_S1E2A:
|
||||
return vcpu_el2_e2h_is_set(vcpu) ? TR_EL20 : TR_EL2;
|
||||
break;
|
||||
default:
|
||||
return (vcpu_el2_e2h_is_set(vcpu) &&
|
||||
vcpu_el2_tge_is_set(vcpu)) ? TR_EL20 : TR_EL10;
|
||||
@@ -1602,13 +1601,17 @@ int __kvm_find_s1_desc_level(struct kvm_vcpu *vcpu, u64 va, u64 ipa, int *level)
|
||||
.fn = match_s1_desc,
|
||||
.priv = &dm,
|
||||
},
|
||||
.regime = TR_EL10,
|
||||
.as_el0 = false,
|
||||
.pan = false,
|
||||
};
|
||||
struct s1_walk_result wr = {};
|
||||
int ret;
|
||||
|
||||
if (is_hyp_ctxt(vcpu))
|
||||
wi.regime = vcpu_el2_e2h_is_set(vcpu) ? TR_EL20 : TR_EL2;
|
||||
else
|
||||
wi.regime = TR_EL10;
|
||||
|
||||
ret = setup_s1_walk(vcpu, &wi, &wr, va);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@@ -5,6 +5,8 @@
|
||||
*/
|
||||
|
||||
#include <linux/kvm_host.h>
|
||||
#include <asm/kvm_emulate.h>
|
||||
#include <asm/kvm_nested.h>
|
||||
#include <asm/sysreg.h>
|
||||
|
||||
/*
|
||||
@@ -1428,3 +1430,91 @@ void get_reg_fixed_bits(struct kvm *kvm, enum vcpu_sysreg reg, u64 *res0, u64 *r
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static __always_inline struct fgt_masks *__fgt_reg_to_masks(enum vcpu_sysreg reg)
|
||||
{
|
||||
switch (reg) {
|
||||
case HFGRTR_EL2:
|
||||
return &hfgrtr_masks;
|
||||
case HFGWTR_EL2:
|
||||
return &hfgwtr_masks;
|
||||
case HFGITR_EL2:
|
||||
return &hfgitr_masks;
|
||||
case HDFGRTR_EL2:
|
||||
return &hdfgrtr_masks;
|
||||
case HDFGWTR_EL2:
|
||||
return &hdfgwtr_masks;
|
||||
case HAFGRTR_EL2:
|
||||
return &hafgrtr_masks;
|
||||
case HFGRTR2_EL2:
|
||||
return &hfgrtr2_masks;
|
||||
case HFGWTR2_EL2:
|
||||
return &hfgwtr2_masks;
|
||||
case HFGITR2_EL2:
|
||||
return &hfgitr2_masks;
|
||||
case HDFGRTR2_EL2:
|
||||
return &hdfgrtr2_masks;
|
||||
case HDFGWTR2_EL2:
|
||||
return &hdfgwtr2_masks;
|
||||
default:
|
||||
BUILD_BUG_ON(1);
|
||||
}
|
||||
}
|
||||
|
||||
static __always_inline void __compute_fgt(struct kvm_vcpu *vcpu, enum vcpu_sysreg reg)
|
||||
{
|
||||
u64 fgu = vcpu->kvm->arch.fgu[__fgt_reg_to_group_id(reg)];
|
||||
struct fgt_masks *m = __fgt_reg_to_masks(reg);
|
||||
u64 clear = 0, set = 0, val = m->nmask;
|
||||
|
||||
set |= fgu & m->mask;
|
||||
clear |= fgu & m->nmask;
|
||||
|
||||
if (is_nested_ctxt(vcpu)) {
|
||||
u64 nested = __vcpu_sys_reg(vcpu, reg);
|
||||
set |= nested & m->mask;
|
||||
clear |= ~nested & m->nmask;
|
||||
}
|
||||
|
||||
val |= set;
|
||||
val &= ~clear;
|
||||
*vcpu_fgt(vcpu, reg) = val;
|
||||
}
|
||||
|
||||
static void __compute_hfgwtr(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
__compute_fgt(vcpu, HFGWTR_EL2);
|
||||
|
||||
if (cpus_have_final_cap(ARM64_WORKAROUND_AMPERE_AC03_CPU_38))
|
||||
*vcpu_fgt(vcpu, HFGWTR_EL2) |= HFGWTR_EL2_TCR_EL1;
|
||||
}
|
||||
|
||||
static void __compute_hdfgwtr(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
__compute_fgt(vcpu, HDFGWTR_EL2);
|
||||
|
||||
if (is_hyp_ctxt(vcpu))
|
||||
*vcpu_fgt(vcpu, HDFGWTR_EL2) |= HDFGWTR_EL2_MDSCR_EL1;
|
||||
}
|
||||
|
||||
void kvm_vcpu_load_fgt(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (!cpus_have_final_cap(ARM64_HAS_FGT))
|
||||
return;
|
||||
|
||||
__compute_fgt(vcpu, HFGRTR_EL2);
|
||||
__compute_hfgwtr(vcpu);
|
||||
__compute_fgt(vcpu, HFGITR_EL2);
|
||||
__compute_fgt(vcpu, HDFGRTR_EL2);
|
||||
__compute_hdfgwtr(vcpu);
|
||||
__compute_fgt(vcpu, HAFGRTR_EL2);
|
||||
|
||||
if (!cpus_have_final_cap(ARM64_HAS_FGT2))
|
||||
return;
|
||||
|
||||
__compute_fgt(vcpu, HFGRTR2_EL2);
|
||||
__compute_fgt(vcpu, HFGWTR2_EL2);
|
||||
__compute_fgt(vcpu, HFGITR2_EL2);
|
||||
__compute_fgt(vcpu, HDFGRTR2_EL2);
|
||||
__compute_fgt(vcpu, HDFGWTR2_EL2);
|
||||
}
|
||||
|
||||
@@ -15,6 +15,12 @@
|
||||
#include <asm/kvm_arm.h>
|
||||
#include <asm/kvm_emulate.h>
|
||||
|
||||
static int cpu_has_spe(u64 dfr0)
|
||||
{
|
||||
return cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_EL1_PMSVer_SHIFT) &&
|
||||
!(read_sysreg_s(SYS_PMBIDR_EL1) & PMBIDR_EL1_P);
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_arm_setup_mdcr_el2 - configure vcpu mdcr_el2 value
|
||||
*
|
||||
@@ -77,13 +83,12 @@ void kvm_init_host_debug_data(void)
|
||||
*host_data_ptr(debug_brps) = SYS_FIELD_GET(ID_AA64DFR0_EL1, BRPs, dfr0);
|
||||
*host_data_ptr(debug_wrps) = SYS_FIELD_GET(ID_AA64DFR0_EL1, WRPs, dfr0);
|
||||
|
||||
if (cpu_has_spe(dfr0))
|
||||
host_data_set_flag(HAS_SPE);
|
||||
|
||||
if (has_vhe())
|
||||
return;
|
||||
|
||||
if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_EL1_PMSVer_SHIFT) &&
|
||||
!(read_sysreg_s(SYS_PMBIDR_EL1) & PMBIDR_EL1_P))
|
||||
host_data_set_flag(HAS_SPE);
|
||||
|
||||
/* Check if we have BRBE implemented and available at the host */
|
||||
if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_EL1_BRBE_SHIFT))
|
||||
host_data_set_flag(HAS_BRBE);
|
||||
@@ -102,7 +107,7 @@ void kvm_init_host_debug_data(void)
|
||||
void kvm_debug_init_vhe(void)
|
||||
{
|
||||
/* Clear PMSCR_EL1.E{0,1}SPE which reset to UNKNOWN values. */
|
||||
if (SYS_FIELD_GET(ID_AA64DFR0_EL1, PMSVer, read_sysreg(id_aa64dfr0_el1)))
|
||||
if (host_data_test_flag(HAS_SPE))
|
||||
write_sysreg_el1(0, SYS_PMSCR);
|
||||
}
|
||||
|
||||
|
||||
@@ -591,64 +591,6 @@ static unsigned long num_core_regs(const struct kvm_vcpu *vcpu)
|
||||
return copy_core_reg_indices(vcpu, NULL);
|
||||
}
|
||||
|
||||
static const u64 timer_reg_list[] = {
|
||||
KVM_REG_ARM_TIMER_CTL,
|
||||
KVM_REG_ARM_TIMER_CNT,
|
||||
KVM_REG_ARM_TIMER_CVAL,
|
||||
KVM_REG_ARM_PTIMER_CTL,
|
||||
KVM_REG_ARM_PTIMER_CNT,
|
||||
KVM_REG_ARM_PTIMER_CVAL,
|
||||
};
|
||||
|
||||
#define NUM_TIMER_REGS ARRAY_SIZE(timer_reg_list)
|
||||
|
||||
static bool is_timer_reg(u64 index)
|
||||
{
|
||||
switch (index) {
|
||||
case KVM_REG_ARM_TIMER_CTL:
|
||||
case KVM_REG_ARM_TIMER_CNT:
|
||||
case KVM_REG_ARM_TIMER_CVAL:
|
||||
case KVM_REG_ARM_PTIMER_CTL:
|
||||
case KVM_REG_ARM_PTIMER_CNT:
|
||||
case KVM_REG_ARM_PTIMER_CVAL:
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static int copy_timer_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
|
||||
{
|
||||
for (int i = 0; i < NUM_TIMER_REGS; i++) {
|
||||
if (put_user(timer_reg_list[i], uindices))
|
||||
return -EFAULT;
|
||||
uindices++;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int set_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
|
||||
{
|
||||
void __user *uaddr = (void __user *)(long)reg->addr;
|
||||
u64 val;
|
||||
int ret;
|
||||
|
||||
ret = copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id));
|
||||
if (ret != 0)
|
||||
return -EFAULT;
|
||||
|
||||
return kvm_arm_timer_set_reg(vcpu, reg->id, val);
|
||||
}
|
||||
|
||||
static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
|
||||
{
|
||||
void __user *uaddr = (void __user *)(long)reg->addr;
|
||||
u64 val;
|
||||
|
||||
val = kvm_arm_timer_get_reg(vcpu, reg->id);
|
||||
return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)) ? -EFAULT : 0;
|
||||
}
|
||||
|
||||
static unsigned long num_sve_regs(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
const unsigned int slices = vcpu_sve_slices(vcpu);
|
||||
@@ -724,7 +666,6 @@ unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu)
|
||||
res += num_sve_regs(vcpu);
|
||||
res += kvm_arm_num_sys_reg_descs(vcpu);
|
||||
res += kvm_arm_get_fw_num_regs(vcpu);
|
||||
res += NUM_TIMER_REGS;
|
||||
|
||||
return res;
|
||||
}
|
||||
@@ -755,11 +696,6 @@ int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
|
||||
return ret;
|
||||
uindices += kvm_arm_get_fw_num_regs(vcpu);
|
||||
|
||||
ret = copy_timer_indices(vcpu, uindices);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
uindices += NUM_TIMER_REGS;
|
||||
|
||||
return kvm_arm_copy_sys_reg_indices(vcpu, uindices);
|
||||
}
|
||||
|
||||
@@ -777,9 +713,6 @@ int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
|
||||
case KVM_REG_ARM64_SVE: return get_sve_reg(vcpu, reg);
|
||||
}
|
||||
|
||||
if (is_timer_reg(reg->id))
|
||||
return get_timer_reg(vcpu, reg);
|
||||
|
||||
return kvm_arm_sys_reg_get_reg(vcpu, reg);
|
||||
}
|
||||
|
||||
@@ -797,9 +730,6 @@ int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
|
||||
case KVM_REG_ARM64_SVE: return set_sve_reg(vcpu, reg);
|
||||
}
|
||||
|
||||
if (is_timer_reg(reg->id))
|
||||
return set_timer_reg(vcpu, reg);
|
||||
|
||||
return kvm_arm_sys_reg_set_reg(vcpu, reg);
|
||||
}
|
||||
|
||||
|
||||
@@ -147,7 +147,12 @@ static int kvm_handle_wfx(struct kvm_vcpu *vcpu)
|
||||
if (esr & ESR_ELx_WFx_ISS_RV) {
|
||||
u64 val, now;
|
||||
|
||||
now = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_TIMER_CNT);
|
||||
now = kvm_phys_timer_read();
|
||||
if (is_hyp_ctxt(vcpu) && vcpu_el2_e2h_is_set(vcpu))
|
||||
now -= timer_get_offset(vcpu_hvtimer(vcpu));
|
||||
else
|
||||
now -= timer_get_offset(vcpu_vtimer(vcpu));
|
||||
|
||||
val = vcpu_get_reg(vcpu, kvm_vcpu_sys_get_rt(vcpu));
|
||||
|
||||
if (now >= val)
|
||||
|
||||
@@ -195,123 +195,6 @@ static inline void __deactivate_cptr_traps(struct kvm_vcpu *vcpu)
|
||||
__deactivate_cptr_traps_nvhe(vcpu);
|
||||
}
|
||||
|
||||
#define reg_to_fgt_masks(reg) \
|
||||
({ \
|
||||
struct fgt_masks *m; \
|
||||
switch(reg) { \
|
||||
case HFGRTR_EL2: \
|
||||
m = &hfgrtr_masks; \
|
||||
break; \
|
||||
case HFGWTR_EL2: \
|
||||
m = &hfgwtr_masks; \
|
||||
break; \
|
||||
case HFGITR_EL2: \
|
||||
m = &hfgitr_masks; \
|
||||
break; \
|
||||
case HDFGRTR_EL2: \
|
||||
m = &hdfgrtr_masks; \
|
||||
break; \
|
||||
case HDFGWTR_EL2: \
|
||||
m = &hdfgwtr_masks; \
|
||||
break; \
|
||||
case HAFGRTR_EL2: \
|
||||
m = &hafgrtr_masks; \
|
||||
break; \
|
||||
case HFGRTR2_EL2: \
|
||||
m = &hfgrtr2_masks; \
|
||||
break; \
|
||||
case HFGWTR2_EL2: \
|
||||
m = &hfgwtr2_masks; \
|
||||
break; \
|
||||
case HFGITR2_EL2: \
|
||||
m = &hfgitr2_masks; \
|
||||
break; \
|
||||
case HDFGRTR2_EL2: \
|
||||
m = &hdfgrtr2_masks; \
|
||||
break; \
|
||||
case HDFGWTR2_EL2: \
|
||||
m = &hdfgwtr2_masks; \
|
||||
break; \
|
||||
default: \
|
||||
BUILD_BUG_ON(1); \
|
||||
} \
|
||||
\
|
||||
m; \
|
||||
})
|
||||
|
||||
#define compute_clr_set(vcpu, reg, clr, set) \
|
||||
do { \
|
||||
u64 hfg = __vcpu_sys_reg(vcpu, reg); \
|
||||
struct fgt_masks *m = reg_to_fgt_masks(reg); \
|
||||
set |= hfg & m->mask; \
|
||||
clr |= ~hfg & m->nmask; \
|
||||
} while(0)
|
||||
|
||||
#define reg_to_fgt_group_id(reg) \
|
||||
({ \
|
||||
enum fgt_group_id id; \
|
||||
switch(reg) { \
|
||||
case HFGRTR_EL2: \
|
||||
case HFGWTR_EL2: \
|
||||
id = HFGRTR_GROUP; \
|
||||
break; \
|
||||
case HFGITR_EL2: \
|
||||
id = HFGITR_GROUP; \
|
||||
break; \
|
||||
case HDFGRTR_EL2: \
|
||||
case HDFGWTR_EL2: \
|
||||
id = HDFGRTR_GROUP; \
|
||||
break; \
|
||||
case HAFGRTR_EL2: \
|
||||
id = HAFGRTR_GROUP; \
|
||||
break; \
|
||||
case HFGRTR2_EL2: \
|
||||
case HFGWTR2_EL2: \
|
||||
id = HFGRTR2_GROUP; \
|
||||
break; \
|
||||
case HFGITR2_EL2: \
|
||||
id = HFGITR2_GROUP; \
|
||||
break; \
|
||||
case HDFGRTR2_EL2: \
|
||||
case HDFGWTR2_EL2: \
|
||||
id = HDFGRTR2_GROUP; \
|
||||
break; \
|
||||
default: \
|
||||
BUILD_BUG_ON(1); \
|
||||
} \
|
||||
\
|
||||
id; \
|
||||
})
|
||||
|
||||
#define compute_undef_clr_set(vcpu, kvm, reg, clr, set) \
|
||||
do { \
|
||||
u64 hfg = kvm->arch.fgu[reg_to_fgt_group_id(reg)]; \
|
||||
struct fgt_masks *m = reg_to_fgt_masks(reg); \
|
||||
set |= hfg & m->mask; \
|
||||
clr |= hfg & m->nmask; \
|
||||
} while(0)
|
||||
|
||||
#define update_fgt_traps_cs(hctxt, vcpu, kvm, reg, clr, set) \
|
||||
do { \
|
||||
struct fgt_masks *m = reg_to_fgt_masks(reg); \
|
||||
u64 c = clr, s = set; \
|
||||
u64 val; \
|
||||
\
|
||||
ctxt_sys_reg(hctxt, reg) = read_sysreg_s(SYS_ ## reg); \
|
||||
if (is_nested_ctxt(vcpu)) \
|
||||
compute_clr_set(vcpu, reg, c, s); \
|
||||
\
|
||||
compute_undef_clr_set(vcpu, kvm, reg, c, s); \
|
||||
\
|
||||
val = m->nmask; \
|
||||
val |= s; \
|
||||
val &= ~c; \
|
||||
write_sysreg_s(val, SYS_ ## reg); \
|
||||
} while(0)
|
||||
|
||||
#define update_fgt_traps(hctxt, vcpu, kvm, reg) \
|
||||
update_fgt_traps_cs(hctxt, vcpu, kvm, reg, 0, 0)
|
||||
|
||||
static inline bool cpu_has_amu(void)
|
||||
{
|
||||
u64 pfr0 = read_sysreg_s(SYS_ID_AA64PFR0_EL1);
|
||||
@@ -320,33 +203,36 @@ static inline bool cpu_has_amu(void)
|
||||
ID_AA64PFR0_EL1_AMU_SHIFT);
|
||||
}
|
||||
|
||||
#define __activate_fgt(hctxt, vcpu, reg) \
|
||||
do { \
|
||||
ctxt_sys_reg(hctxt, reg) = read_sysreg_s(SYS_ ## reg); \
|
||||
write_sysreg_s(*vcpu_fgt(vcpu, reg), SYS_ ## reg); \
|
||||
} while (0)
|
||||
|
||||
static inline void __activate_traps_hfgxtr(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_cpu_context *hctxt = host_data_ptr(host_ctxt);
|
||||
struct kvm *kvm = kern_hyp_va(vcpu->kvm);
|
||||
|
||||
if (!cpus_have_final_cap(ARM64_HAS_FGT))
|
||||
return;
|
||||
|
||||
update_fgt_traps(hctxt, vcpu, kvm, HFGRTR_EL2);
|
||||
update_fgt_traps_cs(hctxt, vcpu, kvm, HFGWTR_EL2, 0,
|
||||
cpus_have_final_cap(ARM64_WORKAROUND_AMPERE_AC03_CPU_38) ?
|
||||
HFGWTR_EL2_TCR_EL1_MASK : 0);
|
||||
update_fgt_traps(hctxt, vcpu, kvm, HFGITR_EL2);
|
||||
update_fgt_traps(hctxt, vcpu, kvm, HDFGRTR_EL2);
|
||||
update_fgt_traps(hctxt, vcpu, kvm, HDFGWTR_EL2);
|
||||
__activate_fgt(hctxt, vcpu, HFGRTR_EL2);
|
||||
__activate_fgt(hctxt, vcpu, HFGWTR_EL2);
|
||||
__activate_fgt(hctxt, vcpu, HFGITR_EL2);
|
||||
__activate_fgt(hctxt, vcpu, HDFGRTR_EL2);
|
||||
__activate_fgt(hctxt, vcpu, HDFGWTR_EL2);
|
||||
|
||||
if (cpu_has_amu())
|
||||
update_fgt_traps(hctxt, vcpu, kvm, HAFGRTR_EL2);
|
||||
__activate_fgt(hctxt, vcpu, HAFGRTR_EL2);
|
||||
|
||||
if (!cpus_have_final_cap(ARM64_HAS_FGT2))
|
||||
return;
|
||||
|
||||
update_fgt_traps(hctxt, vcpu, kvm, HFGRTR2_EL2);
|
||||
update_fgt_traps(hctxt, vcpu, kvm, HFGWTR2_EL2);
|
||||
update_fgt_traps(hctxt, vcpu, kvm, HFGITR2_EL2);
|
||||
update_fgt_traps(hctxt, vcpu, kvm, HDFGRTR2_EL2);
|
||||
update_fgt_traps(hctxt, vcpu, kvm, HDFGWTR2_EL2);
|
||||
__activate_fgt(hctxt, vcpu, HFGRTR2_EL2);
|
||||
__activate_fgt(hctxt, vcpu, HFGWTR2_EL2);
|
||||
__activate_fgt(hctxt, vcpu, HFGITR2_EL2);
|
||||
__activate_fgt(hctxt, vcpu, HDFGRTR2_EL2);
|
||||
__activate_fgt(hctxt, vcpu, HDFGWTR2_EL2);
|
||||
}
|
||||
|
||||
#define __deactivate_fgt(htcxt, vcpu, reg) \
|
||||
|
||||
@@ -172,6 +172,7 @@ static int pkvm_vcpu_init_traps(struct pkvm_hyp_vcpu *hyp_vcpu)
|
||||
|
||||
/* Trust the host for non-protected vcpu features. */
|
||||
vcpu->arch.hcrx_el2 = host_vcpu->arch.hcrx_el2;
|
||||
memcpy(vcpu->arch.fgt, host_vcpu->arch.fgt, sizeof(vcpu->arch.fgt));
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -1859,13 +1859,16 @@ void kvm_nested_setup_mdcr_el2(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u64 guest_mdcr = __vcpu_sys_reg(vcpu, MDCR_EL2);
|
||||
|
||||
if (is_nested_ctxt(vcpu))
|
||||
vcpu->arch.mdcr_el2 |= (guest_mdcr & NV_MDCR_GUEST_INCLUDE);
|
||||
/*
|
||||
* In yet another example where FEAT_NV2 is fscking broken, accesses
|
||||
* to MDSCR_EL1 are redirected to the VNCR despite having an effect
|
||||
* at EL2. Use a big hammer to apply sanity.
|
||||
*
|
||||
* Unless of course we have FEAT_FGT, in which case we can precisely
|
||||
* trap MDSCR_EL1.
|
||||
*/
|
||||
if (is_hyp_ctxt(vcpu))
|
||||
else if (!cpus_have_final_cap(ARM64_HAS_FGT))
|
||||
vcpu->arch.mdcr_el2 |= MDCR_EL2_TDA;
|
||||
else
|
||||
vcpu->arch.mdcr_el2 |= (guest_mdcr & NV_MDCR_GUEST_INCLUDE);
|
||||
}
|
||||
|
||||
@@ -203,7 +203,6 @@ static void locate_register(const struct kvm_vcpu *vcpu, enum vcpu_sysreg reg,
|
||||
MAPPED_EL2_SYSREG(AMAIR_EL2, AMAIR_EL1, NULL );
|
||||
MAPPED_EL2_SYSREG(ELR_EL2, ELR_EL1, NULL );
|
||||
MAPPED_EL2_SYSREG(SPSR_EL2, SPSR_EL1, NULL );
|
||||
MAPPED_EL2_SYSREG(ZCR_EL2, ZCR_EL1, NULL );
|
||||
MAPPED_EL2_SYSREG(CONTEXTIDR_EL2, CONTEXTIDR_EL1, NULL );
|
||||
MAPPED_EL2_SYSREG(SCTLR2_EL2, SCTLR2_EL1, NULL );
|
||||
case CNTHCTL_EL2:
|
||||
@@ -1595,14 +1594,47 @@ static bool access_arch_timer(struct kvm_vcpu *vcpu,
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool access_hv_timer(struct kvm_vcpu *vcpu,
|
||||
struct sys_reg_params *p,
|
||||
const struct sys_reg_desc *r)
|
||||
static int arch_timer_set_user(struct kvm_vcpu *vcpu,
|
||||
const struct sys_reg_desc *rd,
|
||||
u64 val)
|
||||
{
|
||||
if (!vcpu_el2_e2h_is_set(vcpu))
|
||||
return undef_access(vcpu, p, r);
|
||||
switch (reg_to_encoding(rd)) {
|
||||
case SYS_CNTV_CTL_EL0:
|
||||
case SYS_CNTP_CTL_EL0:
|
||||
case SYS_CNTHV_CTL_EL2:
|
||||
case SYS_CNTHP_CTL_EL2:
|
||||
val &= ~ARCH_TIMER_CTRL_IT_STAT;
|
||||
break;
|
||||
case SYS_CNTVCT_EL0:
|
||||
if (!test_bit(KVM_ARCH_FLAG_VM_COUNTER_OFFSET, &vcpu->kvm->arch.flags))
|
||||
timer_set_offset(vcpu_vtimer(vcpu), kvm_phys_timer_read() - val);
|
||||
return 0;
|
||||
case SYS_CNTPCT_EL0:
|
||||
if (!test_bit(KVM_ARCH_FLAG_VM_COUNTER_OFFSET, &vcpu->kvm->arch.flags))
|
||||
timer_set_offset(vcpu_ptimer(vcpu), kvm_phys_timer_read() - val);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return access_arch_timer(vcpu, p, r);
|
||||
__vcpu_assign_sys_reg(vcpu, rd->reg, val);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int arch_timer_get_user(struct kvm_vcpu *vcpu,
|
||||
const struct sys_reg_desc *rd,
|
||||
u64 *val)
|
||||
{
|
||||
switch (reg_to_encoding(rd)) {
|
||||
case SYS_CNTVCT_EL0:
|
||||
*val = kvm_phys_timer_read() - timer_get_offset(vcpu_vtimer(vcpu));
|
||||
break;
|
||||
case SYS_CNTPCT_EL0:
|
||||
*val = kvm_phys_timer_read() - timer_get_offset(vcpu_ptimer(vcpu));
|
||||
break;
|
||||
default:
|
||||
*val = __vcpu_sys_reg(vcpu, rd->reg);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static s64 kvm_arm64_ftr_safe_value(u32 id, const struct arm64_ftr_bits *ftrp,
|
||||
@@ -2507,15 +2539,20 @@ static bool bad_redir_trap(struct kvm_vcpu *vcpu,
|
||||
"trap of EL2 register redirected to EL1");
|
||||
}
|
||||
|
||||
#define EL2_REG_FILTERED(name, acc, rst, v, filter) { \
|
||||
#define SYS_REG_USER_FILTER(name, acc, rst, v, gu, su, filter) { \
|
||||
SYS_DESC(SYS_##name), \
|
||||
.access = acc, \
|
||||
.reset = rst, \
|
||||
.reg = name, \
|
||||
.get_user = gu, \
|
||||
.set_user = su, \
|
||||
.visibility = filter, \
|
||||
.val = v, \
|
||||
}
|
||||
|
||||
#define EL2_REG_FILTERED(name, acc, rst, v, filter) \
|
||||
SYS_REG_USER_FILTER(name, acc, rst, v, NULL, NULL, filter)
|
||||
|
||||
#define EL2_REG(name, acc, rst, v) \
|
||||
EL2_REG_FILTERED(name, acc, rst, v, el2_visibility)
|
||||
|
||||
@@ -2526,6 +2563,10 @@ static bool bad_redir_trap(struct kvm_vcpu *vcpu,
|
||||
EL2_REG_VNCR_FILT(name, hidden_visibility)
|
||||
#define EL2_REG_REDIR(name, rst, v) EL2_REG(name, bad_redir_trap, rst, v)
|
||||
|
||||
#define TIMER_REG(name, vis) \
|
||||
SYS_REG_USER_FILTER(name, access_arch_timer, reset_val, 0, \
|
||||
arch_timer_get_user, arch_timer_set_user, vis)
|
||||
|
||||
/*
|
||||
* Since reset() callback and field val are not used for idregs, they will be
|
||||
* used for specific purposes for idregs.
|
||||
@@ -2705,18 +2746,17 @@ static bool access_zcr_el2(struct kvm_vcpu *vcpu,
|
||||
|
||||
if (guest_hyp_sve_traps_enabled(vcpu)) {
|
||||
kvm_inject_nested_sve_trap(vcpu);
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!p->is_write) {
|
||||
p->regval = vcpu_read_sys_reg(vcpu, ZCR_EL2);
|
||||
p->regval = __vcpu_sys_reg(vcpu, ZCR_EL2);
|
||||
return true;
|
||||
}
|
||||
|
||||
vq = SYS_FIELD_GET(ZCR_ELx, LEN, p->regval) + 1;
|
||||
vq = min(vq, vcpu_sve_max_vq(vcpu));
|
||||
vcpu_write_sys_reg(vcpu, vq - 1, ZCR_EL2);
|
||||
|
||||
__vcpu_assign_sys_reg(vcpu, ZCR_EL2, vq - 1);
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -2833,6 +2873,16 @@ static unsigned int s1pie_el2_visibility(const struct kvm_vcpu *vcpu,
|
||||
return __el2_visibility(vcpu, rd, s1pie_visibility);
|
||||
}
|
||||
|
||||
static unsigned int cnthv_visibility(const struct kvm_vcpu *vcpu,
|
||||
const struct sys_reg_desc *rd)
|
||||
{
|
||||
if (vcpu_has_nv(vcpu) &&
|
||||
!vcpu_has_feature(vcpu, KVM_ARM_VCPU_HAS_EL2_E2H0))
|
||||
return 0;
|
||||
|
||||
return REG_HIDDEN;
|
||||
}
|
||||
|
||||
static bool access_mdcr(struct kvm_vcpu *vcpu,
|
||||
struct sys_reg_params *p,
|
||||
const struct sys_reg_desc *r)
|
||||
@@ -3482,17 +3532,19 @@ static const struct sys_reg_desc sys_reg_descs[] = {
|
||||
AMU_AMEVTYPER1_EL0(14),
|
||||
AMU_AMEVTYPER1_EL0(15),
|
||||
|
||||
{ SYS_DESC(SYS_CNTPCT_EL0), access_arch_timer },
|
||||
{ SYS_DESC(SYS_CNTVCT_EL0), access_arch_timer },
|
||||
{ SYS_DESC(SYS_CNTPCT_EL0), .access = access_arch_timer,
|
||||
.get_user = arch_timer_get_user, .set_user = arch_timer_set_user },
|
||||
{ SYS_DESC(SYS_CNTVCT_EL0), .access = access_arch_timer,
|
||||
.get_user = arch_timer_get_user, .set_user = arch_timer_set_user },
|
||||
{ SYS_DESC(SYS_CNTPCTSS_EL0), access_arch_timer },
|
||||
{ SYS_DESC(SYS_CNTVCTSS_EL0), access_arch_timer },
|
||||
{ SYS_DESC(SYS_CNTP_TVAL_EL0), access_arch_timer },
|
||||
{ SYS_DESC(SYS_CNTP_CTL_EL0), access_arch_timer },
|
||||
{ SYS_DESC(SYS_CNTP_CVAL_EL0), access_arch_timer },
|
||||
TIMER_REG(CNTP_CTL_EL0, NULL),
|
||||
TIMER_REG(CNTP_CVAL_EL0, NULL),
|
||||
|
||||
{ SYS_DESC(SYS_CNTV_TVAL_EL0), access_arch_timer },
|
||||
{ SYS_DESC(SYS_CNTV_CTL_EL0), access_arch_timer },
|
||||
{ SYS_DESC(SYS_CNTV_CVAL_EL0), access_arch_timer },
|
||||
TIMER_REG(CNTV_CTL_EL0, NULL),
|
||||
TIMER_REG(CNTV_CVAL_EL0, NULL),
|
||||
|
||||
/* PMEVCNTRn_EL0 */
|
||||
PMU_PMEVCNTR_EL0(0),
|
||||
@@ -3690,12 +3742,12 @@ static const struct sys_reg_desc sys_reg_descs[] = {
|
||||
EL2_REG_VNCR(CNTVOFF_EL2, reset_val, 0),
|
||||
EL2_REG(CNTHCTL_EL2, access_rw, reset_val, 0),
|
||||
{ SYS_DESC(SYS_CNTHP_TVAL_EL2), access_arch_timer },
|
||||
EL2_REG(CNTHP_CTL_EL2, access_arch_timer, reset_val, 0),
|
||||
EL2_REG(CNTHP_CVAL_EL2, access_arch_timer, reset_val, 0),
|
||||
TIMER_REG(CNTHP_CTL_EL2, el2_visibility),
|
||||
TIMER_REG(CNTHP_CVAL_EL2, el2_visibility),
|
||||
|
||||
{ SYS_DESC(SYS_CNTHV_TVAL_EL2), access_hv_timer },
|
||||
EL2_REG(CNTHV_CTL_EL2, access_hv_timer, reset_val, 0),
|
||||
EL2_REG(CNTHV_CVAL_EL2, access_hv_timer, reset_val, 0),
|
||||
{ SYS_DESC(SYS_CNTHV_TVAL_EL2), access_arch_timer, .visibility = cnthv_visibility },
|
||||
TIMER_REG(CNTHV_CTL_EL2, cnthv_visibility),
|
||||
TIMER_REG(CNTHV_CVAL_EL2, cnthv_visibility),
|
||||
|
||||
{ SYS_DESC(SYS_CNTKCTL_EL12), access_cntkctl_el12 },
|
||||
|
||||
@@ -5233,15 +5285,28 @@ static int demux_c15_set(struct kvm_vcpu *vcpu, u64 id, void __user *uaddr)
|
||||
}
|
||||
}
|
||||
|
||||
static u64 kvm_one_reg_to_id(const struct kvm_one_reg *reg)
|
||||
{
|
||||
switch(reg->id) {
|
||||
case KVM_REG_ARM_TIMER_CVAL:
|
||||
return TO_ARM64_SYS_REG(CNTV_CVAL_EL0);
|
||||
case KVM_REG_ARM_TIMER_CNT:
|
||||
return TO_ARM64_SYS_REG(CNTVCT_EL0);
|
||||
default:
|
||||
return reg->id;
|
||||
}
|
||||
}
|
||||
|
||||
int kvm_sys_reg_get_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg,
|
||||
const struct sys_reg_desc table[], unsigned int num)
|
||||
{
|
||||
u64 __user *uaddr = (u64 __user *)(unsigned long)reg->addr;
|
||||
const struct sys_reg_desc *r;
|
||||
u64 id = kvm_one_reg_to_id(reg);
|
||||
u64 val;
|
||||
int ret;
|
||||
|
||||
r = id_to_sys_reg_desc(vcpu, reg->id, table, num);
|
||||
r = id_to_sys_reg_desc(vcpu, id, table, num);
|
||||
if (!r || sysreg_hidden(vcpu, r))
|
||||
return -ENOENT;
|
||||
|
||||
@@ -5274,13 +5339,14 @@ int kvm_sys_reg_set_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg,
|
||||
{
|
||||
u64 __user *uaddr = (u64 __user *)(unsigned long)reg->addr;
|
||||
const struct sys_reg_desc *r;
|
||||
u64 id = kvm_one_reg_to_id(reg);
|
||||
u64 val;
|
||||
int ret;
|
||||
|
||||
if (get_user(val, uaddr))
|
||||
return -EFAULT;
|
||||
|
||||
r = id_to_sys_reg_desc(vcpu, reg->id, table, num);
|
||||
r = id_to_sys_reg_desc(vcpu, id, table, num);
|
||||
if (!r || sysreg_hidden(vcpu, r))
|
||||
return -ENOENT;
|
||||
|
||||
@@ -5340,10 +5406,23 @@ static u64 sys_reg_to_index(const struct sys_reg_desc *reg)
|
||||
|
||||
static bool copy_reg_to_user(const struct sys_reg_desc *reg, u64 __user **uind)
|
||||
{
|
||||
u64 idx;
|
||||
|
||||
if (!*uind)
|
||||
return true;
|
||||
|
||||
if (put_user(sys_reg_to_index(reg), *uind))
|
||||
switch (reg_to_encoding(reg)) {
|
||||
case SYS_CNTV_CVAL_EL0:
|
||||
idx = KVM_REG_ARM_TIMER_CVAL;
|
||||
break;
|
||||
case SYS_CNTVCT_EL0:
|
||||
idx = KVM_REG_ARM_TIMER_CNT;
|
||||
break;
|
||||
default:
|
||||
idx = sys_reg_to_index(reg);
|
||||
}
|
||||
|
||||
if (put_user(idx, *uind))
|
||||
return false;
|
||||
|
||||
(*uind)++;
|
||||
|
||||
@@ -257,4 +257,10 @@ int kvm_finalize_sys_regs(struct kvm_vcpu *vcpu);
|
||||
(val); \
|
||||
})
|
||||
|
||||
#define TO_ARM64_SYS_REG(r) ARM64_SYS_REG(sys_reg_Op0(SYS_ ## r), \
|
||||
sys_reg_Op1(SYS_ ## r), \
|
||||
sys_reg_CRn(SYS_ ## r), \
|
||||
sys_reg_CRm(SYS_ ## r), \
|
||||
sys_reg_Op2(SYS_ ## r))
|
||||
|
||||
#endif /* __ARM64_KVM_SYS_REGS_LOCAL_H__ */
|
||||
|
||||
@@ -297,8 +297,11 @@ void vcpu_set_ich_hcr(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct vgic_v3_cpu_if *vgic_v3 = &vcpu->arch.vgic_cpu.vgic_v3;
|
||||
|
||||
if (!vgic_is_v3(vcpu->kvm))
|
||||
return;
|
||||
|
||||
/* Hide GICv3 sysreg if necessary */
|
||||
if (!kvm_has_gicv3(vcpu->kvm)) {
|
||||
if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2) {
|
||||
vgic_v3->vgic_hcr |= (ICH_HCR_EL2_TALL0 | ICH_HCR_EL2_TALL1 |
|
||||
ICH_HCR_EL2_TC);
|
||||
return;
|
||||
|
||||
@@ -21,7 +21,7 @@ void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
|
||||
|
||||
folio = page_folio(pfn_to_page(pfn));
|
||||
|
||||
if (test_and_set_bit(PG_dcache_clean, &folio->flags))
|
||||
if (test_and_set_bit(PG_dcache_clean, &folio->flags.f))
|
||||
return;
|
||||
|
||||
icache_inv_range(address, address + nr*PAGE_SIZE);
|
||||
|
||||
@@ -20,8 +20,8 @@
|
||||
|
||||
static inline void flush_dcache_folio(struct folio *folio)
|
||||
{
|
||||
if (test_bit(PG_dcache_clean, &folio->flags))
|
||||
clear_bit(PG_dcache_clean, &folio->flags);
|
||||
if (test_bit(PG_dcache_clean, &folio->flags.f))
|
||||
clear_bit(PG_dcache_clean, &folio->flags.f);
|
||||
}
|
||||
#define flush_dcache_folio flush_dcache_folio
|
||||
|
||||
|
||||
@@ -1747,6 +1747,9 @@ void __init fadump_setup_param_area(void)
|
||||
{
|
||||
phys_addr_t range_start, range_end;
|
||||
|
||||
if (!fw_dump.fadump_enabled)
|
||||
return;
|
||||
|
||||
if (!fw_dump.param_area_supported || fw_dump.dump_active)
|
||||
return;
|
||||
|
||||
|
||||
@@ -916,8 +916,7 @@ int kvmppc_xive_attach_escalation(struct kvm_vcpu *vcpu, u8 prio,
|
||||
* it fires once.
|
||||
*/
|
||||
if (single_escalation) {
|
||||
struct irq_data *d = irq_get_irq_data(xc->esc_virq[prio]);
|
||||
struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
|
||||
struct xive_irq_data *xd = irq_get_chip_data(xc->esc_virq[prio]);
|
||||
|
||||
xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_01);
|
||||
vcpu->arch.xive_esc_raddr = xd->eoi_page;
|
||||
@@ -1612,7 +1611,7 @@ int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq,
|
||||
|
||||
/* Grab info about irq */
|
||||
state->pt_number = hw_irq;
|
||||
state->pt_data = irq_data_get_irq_handler_data(host_data);
|
||||
state->pt_data = irq_data_get_irq_chip_data(host_data);
|
||||
|
||||
/*
|
||||
* Configure the IRQ to match the existing configuration of
|
||||
@@ -1787,8 +1786,7 @@ void kvmppc_xive_disable_vcpu_interrupts(struct kvm_vcpu *vcpu)
|
||||
*/
|
||||
void xive_cleanup_single_escalation(struct kvm_vcpu *vcpu, int irq)
|
||||
{
|
||||
struct irq_data *d = irq_get_irq_data(irq);
|
||||
struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
|
||||
struct xive_irq_data *xd = irq_get_chip_data(irq);
|
||||
|
||||
/*
|
||||
* This slightly odd sequence gives the right result
|
||||
@@ -2827,9 +2825,7 @@ int kvmppc_xive_debug_show_queues(struct seq_file *m, struct kvm_vcpu *vcpu)
|
||||
i0, i1);
|
||||
}
|
||||
if (xc->esc_virq[i]) {
|
||||
struct irq_data *d = irq_get_irq_data(xc->esc_virq[i]);
|
||||
struct xive_irq_data *xd =
|
||||
irq_data_get_irq_handler_data(d);
|
||||
struct xive_irq_data *xd = irq_get_chip_data(xc->esc_virq[i]);
|
||||
u64 pq = xive_vm_esb_load(xd, XIVE_ESB_GET);
|
||||
|
||||
seq_printf(m, " ESC %d %c%c EOI @%llx",
|
||||
|
||||
@@ -121,7 +121,7 @@ static int init_vas_instance(struct platform_device *pdev)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
xd = irq_get_handler_data(vinst->virq);
|
||||
xd = irq_get_chip_data(vinst->virq);
|
||||
if (!xd) {
|
||||
pr_err("Inst%d: Invalid virq %d\n",
|
||||
vinst->vas_id, vinst->virq);
|
||||
|
||||
@@ -443,8 +443,7 @@ static int pseries_msi_ops_prepare(struct irq_domain *domain, struct device *dev
|
||||
*/
|
||||
static void pseries_msi_ops_teardown(struct irq_domain *domain, msi_alloc_info_t *arg)
|
||||
{
|
||||
struct msi_desc *desc = arg->desc;
|
||||
struct pci_dev *pdev = msi_desc_to_pci_dev(desc);
|
||||
struct pci_dev *pdev = to_pci_dev(domain->dev);
|
||||
|
||||
rtas_disable_msi(pdev);
|
||||
}
|
||||
|
||||
@@ -1580,7 +1580,7 @@ static void xive_flush_cpu_queue(unsigned int cpu, struct xive_cpu *xc)
|
||||
cpu, irq);
|
||||
#endif
|
||||
raw_spin_lock(&desc->lock);
|
||||
xd = irq_desc_get_handler_data(desc);
|
||||
xd = irq_desc_get_chip_data(desc);
|
||||
|
||||
/*
|
||||
* Clear saved_p to indicate that it's no longer pending
|
||||
|
||||
@@ -29,7 +29,7 @@ config RISCV
|
||||
select ARCH_HAS_DEBUG_VIRTUAL if MMU
|
||||
select ARCH_HAS_DEBUG_VM_PGTABLE
|
||||
select ARCH_HAS_DEBUG_WX
|
||||
select ARCH_HAS_ELF_CORE_EFLAGS
|
||||
select ARCH_HAS_ELF_CORE_EFLAGS if BINFMT_ELF && ELF_CORE
|
||||
select ARCH_HAS_FAST_MULTIPLIER
|
||||
select ARCH_HAS_FORTIFY_SOURCE
|
||||
select ARCH_HAS_GCOV_PROFILE_ALL
|
||||
|
||||
@@ -3,14 +3,18 @@
|
||||
#ifndef __ASM_KGDB_H_
|
||||
#define __ASM_KGDB_H_
|
||||
|
||||
#include <linux/build_bug.h>
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#define GDB_SIZEOF_REG sizeof(unsigned long)
|
||||
|
||||
#define DBG_MAX_REG_NUM (36)
|
||||
#define NUMREGBYTES ((DBG_MAX_REG_NUM) * GDB_SIZEOF_REG)
|
||||
#define DBG_MAX_REG_NUM 36
|
||||
#define NUMREGBYTES (DBG_MAX_REG_NUM * GDB_SIZEOF_REG)
|
||||
#define CACHE_FLUSH_IS_SAFE 1
|
||||
#define BUFMAX 2048
|
||||
static_assert(BUFMAX > NUMREGBYTES,
|
||||
"As per KGDB documentation, BUFMAX must be larger than NUMREGBYTES");
|
||||
#ifdef CONFIG_RISCV_ISA_C
|
||||
#define BREAK_INSTR_SIZE 2
|
||||
#else
|
||||
@@ -97,6 +101,7 @@ extern unsigned long kgdb_compiled_break;
|
||||
#define DBG_REG_STATUS_OFF 33
|
||||
#define DBG_REG_BADADDR_OFF 34
|
||||
#define DBG_REG_CAUSE_OFF 35
|
||||
/* NOTE: increase DBG_MAX_REG_NUM if you add more values here. */
|
||||
|
||||
extern const char riscv_gdb_stub_feature[64];
|
||||
|
||||
|
||||
@@ -54,6 +54,7 @@ void arch_cpuhp_cleanup_dead_cpu(unsigned int cpu)
|
||||
|
||||
pr_notice("CPU%u: off\n", cpu);
|
||||
|
||||
clear_tasks_mm_cpumask(cpu);
|
||||
/* Verify from the firmware if the cpu is really stopped*/
|
||||
if (cpu_ops->cpu_is_stopped)
|
||||
ret = cpu_ops->cpu_is_stopped(cpu);
|
||||
|
||||
@@ -455,7 +455,7 @@ SYM_DATA_START_LOCAL(excp_vect_table)
|
||||
RISCV_PTR do_trap_ecall_s
|
||||
RISCV_PTR do_trap_unknown
|
||||
RISCV_PTR do_trap_ecall_m
|
||||
/* instruciton page fault */
|
||||
/* instruction page fault */
|
||||
ALT_PAGE_FAULT(RISCV_PTR do_page_fault)
|
||||
RISCV_PTR do_page_fault /* load page fault */
|
||||
RISCV_PTR do_trap_unknown
|
||||
|
||||
@@ -49,10 +49,15 @@ static void __kprobes arch_simulate_insn(struct kprobe *p, struct pt_regs *regs)
|
||||
post_kprobe_handler(p, kcb, regs);
|
||||
}
|
||||
|
||||
static bool __kprobes arch_check_kprobe(struct kprobe *p)
|
||||
static bool __kprobes arch_check_kprobe(unsigned long addr)
|
||||
{
|
||||
unsigned long tmp = (unsigned long)p->addr - p->offset;
|
||||
unsigned long addr = (unsigned long)p->addr;
|
||||
unsigned long tmp, offset;
|
||||
|
||||
/* start iterating at the closest preceding symbol */
|
||||
if (!kallsyms_lookup_size_offset(addr, NULL, &offset))
|
||||
return false;
|
||||
|
||||
tmp = addr - offset;
|
||||
|
||||
while (tmp <= addr) {
|
||||
if (tmp == addr)
|
||||
@@ -71,7 +76,7 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
|
||||
if ((unsigned long)insn & 0x1)
|
||||
return -EILSEQ;
|
||||
|
||||
if (!arch_check_kprobe(p))
|
||||
if (!arch_check_kprobe((unsigned long)p->addr))
|
||||
return -EILSEQ;
|
||||
|
||||
/* copy instruction */
|
||||
|
||||
@@ -331,11 +331,14 @@ void __init setup_arch(char **cmdline_p)
|
||||
/* Parse the ACPI tables for possible boot-time configuration */
|
||||
acpi_boot_table_init();
|
||||
|
||||
if (acpi_disabled) {
|
||||
#if IS_ENABLED(CONFIG_BUILTIN_DTB)
|
||||
unflatten_and_copy_device_tree();
|
||||
unflatten_and_copy_device_tree();
|
||||
#else
|
||||
unflatten_device_tree();
|
||||
unflatten_device_tree();
|
||||
#endif
|
||||
}
|
||||
|
||||
misc_mem_init();
|
||||
|
||||
init_resources();
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
#define KPROBE_TEST_MAGIC_LOWER 0x0000babe
|
||||
#define KPROBE_TEST_MAGIC_UPPER 0xcafe0000
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
#ifndef __ASSEMBLER__
|
||||
|
||||
/* array of addresses to install kprobes */
|
||||
extern void *test_kprobes_addresses[];
|
||||
@@ -19,6 +19,6 @@ extern void *test_kprobes_addresses[];
|
||||
/* array of functions that return KPROBE_TEST_MAGIC */
|
||||
extern long (*test_kprobes_functions[])(void);
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
#endif /* __ASSEMBLER__ */
|
||||
|
||||
#endif /* TEST_KPROBES_H */
|
||||
|
||||
@@ -1355,11 +1355,23 @@ static __init int print_s5_reset_status_mmio(void)
|
||||
return 0;
|
||||
|
||||
value = ioread32(addr);
|
||||
iounmap(addr);
|
||||
|
||||
/* Value with "all bits set" is an error response and should be ignored. */
|
||||
if (value == U32_MAX)
|
||||
if (value == U32_MAX) {
|
||||
iounmap(addr);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Clear all reason bits so they won't be retained if the next reset
|
||||
* does not update the register. Besides, some bits are never cleared by
|
||||
* hardware so it's software's responsibility to clear them.
|
||||
*
|
||||
* Writing the value back effectively clears all reason bits as they are
|
||||
* write-1-to-clear.
|
||||
*/
|
||||
iowrite32(value, addr);
|
||||
iounmap(addr);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(s5_reset_reason_txt); i++) {
|
||||
if (!(value & BIT(i)))
|
||||
|
||||
@@ -242,7 +242,9 @@ int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_mon_domain *d,
|
||||
u32 unused, u32 rmid, enum resctrl_event_id eventid,
|
||||
u64 *val, void *ignored)
|
||||
{
|
||||
struct rdt_hw_mon_domain *hw_dom = resctrl_to_arch_mon_dom(d);
|
||||
int cpu = cpumask_any(&d->hdr.cpu_mask);
|
||||
struct arch_mbm_state *am;
|
||||
u64 msr_val;
|
||||
u32 prmid;
|
||||
int ret;
|
||||
@@ -251,12 +253,16 @@ int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_mon_domain *d,
|
||||
|
||||
prmid = logical_rmid_to_physical_rmid(cpu, rmid);
|
||||
ret = __rmid_read_phys(prmid, eventid, &msr_val);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
*val = get_corrected_val(r, d, rmid, eventid, msr_val);
|
||||
if (!ret) {
|
||||
*val = get_corrected_val(r, d, rmid, eventid, msr_val);
|
||||
} else if (ret == -EINVAL) {
|
||||
am = get_arch_mbm_state(hw_dom, rmid, eventid);
|
||||
if (am)
|
||||
am->prev_msr = 0;
|
||||
}
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int __cntr_id_read(u32 cntr_id, u64 *val)
|
||||
|
||||
@@ -108,16 +108,18 @@ void kvm_init_pmu_capability(const struct kvm_pmu_ops *pmu_ops)
|
||||
bool is_intel = boot_cpu_data.x86_vendor == X86_VENDOR_INTEL;
|
||||
int min_nr_gp_ctrs = pmu_ops->MIN_NR_GP_COUNTERS;
|
||||
|
||||
perf_get_x86_pmu_capability(&kvm_host_pmu);
|
||||
|
||||
/*
|
||||
* Hybrid PMUs don't play nice with virtualization without careful
|
||||
* configuration by userspace, and KVM's APIs for reporting supported
|
||||
* vPMU features do not account for hybrid PMUs. Disable vPMU support
|
||||
* for hybrid PMUs until KVM gains a way to let userspace opt-in.
|
||||
*/
|
||||
if (cpu_feature_enabled(X86_FEATURE_HYBRID_CPU))
|
||||
if (cpu_feature_enabled(X86_FEATURE_HYBRID_CPU)) {
|
||||
enable_pmu = false;
|
||||
memset(&kvm_host_pmu, 0, sizeof(kvm_host_pmu));
|
||||
} else {
|
||||
perf_get_x86_pmu_capability(&kvm_host_pmu);
|
||||
}
|
||||
|
||||
if (enable_pmu) {
|
||||
/*
|
||||
|
||||
@@ -13941,10 +13941,11 @@ bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
|
||||
|
||||
#ifdef CONFIG_KVM_GUEST_MEMFD
|
||||
/*
|
||||
* KVM doesn't yet support mmap() on guest_memfd for VMs with private memory
|
||||
* (the private vs. shared tracking needs to be moved into guest_memfd).
|
||||
* KVM doesn't yet support initializing guest_memfd memory as shared for VMs
|
||||
* with private memory (the private vs. shared tracking needs to be moved into
|
||||
* guest_memfd).
|
||||
*/
|
||||
bool kvm_arch_supports_gmem_mmap(struct kvm *kvm)
|
||||
bool kvm_arch_supports_gmem_init_shared(struct kvm *kvm)
|
||||
{
|
||||
return !kvm_arch_has_private_mem(kvm);
|
||||
}
|
||||
|
||||
@@ -446,7 +446,7 @@ static void cpa_flush(struct cpa_data *cpa, int cache)
|
||||
}
|
||||
|
||||
start = fix_addr(__cpa_addr(cpa, 0));
|
||||
end = fix_addr(__cpa_addr(cpa, cpa->numpages));
|
||||
end = start + cpa->numpages * PAGE_SIZE;
|
||||
if (cpa->force_flush_all)
|
||||
end = TLB_FLUSH_ALL;
|
||||
|
||||
|
||||
@@ -911,11 +911,31 @@ void switch_mm_irqs_off(struct mm_struct *unused, struct mm_struct *next,
|
||||
* CR3 and cpu_tlbstate.loaded_mm are not all in sync.
|
||||
*/
|
||||
this_cpu_write(cpu_tlbstate.loaded_mm, LOADED_MM_SWITCHING);
|
||||
barrier();
|
||||
|
||||
/* Start receiving IPIs and then read tlb_gen (and LAM below) */
|
||||
/*
|
||||
* Make sure this CPU is set in mm_cpumask() such that we'll
|
||||
* receive invalidation IPIs.
|
||||
*
|
||||
* Rely on the smp_mb() implied by cpumask_set_cpu()'s atomic
|
||||
* operation, or explicitly provide one. Such that:
|
||||
*
|
||||
* switch_mm_irqs_off() flush_tlb_mm_range()
|
||||
* smp_store_release(loaded_mm, SWITCHING); atomic64_inc_return(tlb_gen)
|
||||
* smp_mb(); // here // smp_mb() implied
|
||||
* atomic64_read(tlb_gen); this_cpu_read(loaded_mm);
|
||||
*
|
||||
* we properly order against flush_tlb_mm_range(), where the
|
||||
* loaded_mm load can happen in mative_flush_tlb_multi() ->
|
||||
* should_flush_tlb().
|
||||
*
|
||||
* This way switch_mm() must see the new tlb_gen or
|
||||
* flush_tlb_mm_range() must see the new loaded_mm, or both.
|
||||
*/
|
||||
if (next != &init_mm && !cpumask_test_cpu(cpu, mm_cpumask(next)))
|
||||
cpumask_set_cpu(cpu, mm_cpumask(next));
|
||||
else
|
||||
smp_mb();
|
||||
|
||||
next_tlb_gen = atomic64_read(&next->context.tlb_gen);
|
||||
|
||||
ns = choose_new_asid(next, next_tlb_gen);
|
||||
|
||||
@@ -812,8 +812,7 @@ int blkg_conf_open_bdev(struct blkg_conf_ctx *ctx)
|
||||
}
|
||||
/*
|
||||
* Similar to blkg_conf_open_bdev, but additionally freezes the queue,
|
||||
* acquires q->elevator_lock, and ensures the correct locking order
|
||||
* between q->elevator_lock and q->rq_qos_mutex.
|
||||
* ensures the correct locking order between freeze queue and q->rq_qos_mutex.
|
||||
*
|
||||
* This function returns negative error on failure. On success it returns
|
||||
* memflags which must be saved and later passed to blkg_conf_exit_frozen
|
||||
@@ -834,13 +833,11 @@ unsigned long __must_check blkg_conf_open_bdev_frozen(struct blkg_conf_ctx *ctx)
|
||||
* At this point, we haven’t started protecting anything related to QoS,
|
||||
* so we release q->rq_qos_mutex here, which was first acquired in blkg_
|
||||
* conf_open_bdev. Later, we re-acquire q->rq_qos_mutex after freezing
|
||||
* the queue and acquiring q->elevator_lock to maintain the correct
|
||||
* locking order.
|
||||
* the queue to maintain the correct locking order.
|
||||
*/
|
||||
mutex_unlock(&ctx->bdev->bd_queue->rq_qos_mutex);
|
||||
|
||||
memflags = blk_mq_freeze_queue(ctx->bdev->bd_queue);
|
||||
mutex_lock(&ctx->bdev->bd_queue->elevator_lock);
|
||||
mutex_lock(&ctx->bdev->bd_queue->rq_qos_mutex);
|
||||
|
||||
return memflags;
|
||||
@@ -995,9 +992,8 @@ void blkg_conf_exit(struct blkg_conf_ctx *ctx)
|
||||
EXPORT_SYMBOL_GPL(blkg_conf_exit);
|
||||
|
||||
/*
|
||||
* Similar to blkg_conf_exit, but also unfreezes the queue and releases
|
||||
* q->elevator_lock. Should be used when blkg_conf_open_bdev_frozen
|
||||
* is used to open the bdev.
|
||||
* Similar to blkg_conf_exit, but also unfreezes the queue. Should be used
|
||||
* when blkg_conf_open_bdev_frozen is used to open the bdev.
|
||||
*/
|
||||
void blkg_conf_exit_frozen(struct blkg_conf_ctx *ctx, unsigned long memflags)
|
||||
{
|
||||
@@ -1005,7 +1001,6 @@ void blkg_conf_exit_frozen(struct blkg_conf_ctx *ctx, unsigned long memflags)
|
||||
struct request_queue *q = ctx->bdev->bd_queue;
|
||||
|
||||
blkg_conf_exit(ctx);
|
||||
mutex_unlock(&q->elevator_lock);
|
||||
blk_mq_unfreeze_queue(q, memflags);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -557,7 +557,7 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e,
|
||||
if (blk_mq_is_shared_tags(flags)) {
|
||||
/* Shared tags are stored at index 0 in @et->tags. */
|
||||
q->sched_shared_tags = et->tags[0];
|
||||
blk_mq_tag_update_sched_shared_tags(q);
|
||||
blk_mq_tag_update_sched_shared_tags(q, et->nr_requests);
|
||||
}
|
||||
|
||||
queue_for_each_hw_ctx(q, hctx, i) {
|
||||
|
||||
@@ -622,10 +622,11 @@ void blk_mq_tag_resize_shared_tags(struct blk_mq_tag_set *set, unsigned int size
|
||||
sbitmap_queue_resize(&tags->bitmap_tags, size - set->reserved_tags);
|
||||
}
|
||||
|
||||
void blk_mq_tag_update_sched_shared_tags(struct request_queue *q)
|
||||
void blk_mq_tag_update_sched_shared_tags(struct request_queue *q,
|
||||
unsigned int nr)
|
||||
{
|
||||
sbitmap_queue_resize(&q->sched_shared_tags->bitmap_tags,
|
||||
q->nr_requests - q->tag_set->reserved_tags);
|
||||
nr - q->tag_set->reserved_tags);
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -4941,7 +4941,7 @@ struct elevator_tags *blk_mq_update_nr_requests(struct request_queue *q,
|
||||
* tags can't grow, see blk_mq_alloc_sched_tags().
|
||||
*/
|
||||
if (q->elevator)
|
||||
blk_mq_tag_update_sched_shared_tags(q);
|
||||
blk_mq_tag_update_sched_shared_tags(q, nr);
|
||||
else
|
||||
blk_mq_tag_resize_shared_tags(set, nr);
|
||||
} else if (!q->elevator) {
|
||||
|
||||
@@ -186,7 +186,8 @@ void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx,
|
||||
void blk_mq_put_tags(struct blk_mq_tags *tags, int *tag_array, int nr_tags);
|
||||
void blk_mq_tag_resize_shared_tags(struct blk_mq_tag_set *set,
|
||||
unsigned int size);
|
||||
void blk_mq_tag_update_sched_shared_tags(struct request_queue *q);
|
||||
void blk_mq_tag_update_sched_shared_tags(struct request_queue *q,
|
||||
unsigned int nr);
|
||||
|
||||
void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool);
|
||||
void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_tag_iter_fn *fn,
|
||||
|
||||
@@ -97,6 +97,8 @@ struct dma_bridge_chan {
|
||||
* response queue's head and tail pointer of this DBC.
|
||||
*/
|
||||
void __iomem *dbc_base;
|
||||
/* Synchronizes access to Request queue's head and tail pointer */
|
||||
struct mutex req_lock;
|
||||
/* Head of list where each node is a memory handle queued in request queue */
|
||||
struct list_head xfer_list;
|
||||
/* Synchronizes DBC readers during cleanup */
|
||||
|
||||
@@ -407,7 +407,7 @@ static int find_and_map_user_pages(struct qaic_device *qdev,
|
||||
return -EINVAL;
|
||||
remaining = in_trans->size - resources->xferred_dma_size;
|
||||
if (remaining == 0)
|
||||
return 0;
|
||||
return -EINVAL;
|
||||
|
||||
if (check_add_overflow(xfer_start_addr, remaining, &end))
|
||||
return -EINVAL;
|
||||
|
||||
@@ -1356,13 +1356,17 @@ static int __qaic_execute_bo_ioctl(struct drm_device *dev, void *data, struct dr
|
||||
goto release_ch_rcu;
|
||||
}
|
||||
|
||||
ret = mutex_lock_interruptible(&dbc->req_lock);
|
||||
if (ret)
|
||||
goto release_ch_rcu;
|
||||
|
||||
head = readl(dbc->dbc_base + REQHP_OFF);
|
||||
tail = readl(dbc->dbc_base + REQTP_OFF);
|
||||
|
||||
if (head == U32_MAX || tail == U32_MAX) {
|
||||
/* PCI link error */
|
||||
ret = -ENODEV;
|
||||
goto release_ch_rcu;
|
||||
goto unlock_req_lock;
|
||||
}
|
||||
|
||||
queue_level = head <= tail ? tail - head : dbc->nelem - (head - tail);
|
||||
@@ -1370,11 +1374,12 @@ static int __qaic_execute_bo_ioctl(struct drm_device *dev, void *data, struct dr
|
||||
ret = send_bo_list_to_device(qdev, file_priv, exec, args->hdr.count, is_partial, dbc,
|
||||
head, &tail);
|
||||
if (ret)
|
||||
goto release_ch_rcu;
|
||||
goto unlock_req_lock;
|
||||
|
||||
/* Finalize commit to hardware */
|
||||
submit_ts = ktime_get_ns();
|
||||
writel(tail, dbc->dbc_base + REQTP_OFF);
|
||||
mutex_unlock(&dbc->req_lock);
|
||||
|
||||
update_profiling_data(file_priv, exec, args->hdr.count, is_partial, received_ts,
|
||||
submit_ts, queue_level);
|
||||
@@ -1382,6 +1387,9 @@ static int __qaic_execute_bo_ioctl(struct drm_device *dev, void *data, struct dr
|
||||
if (datapath_polling)
|
||||
schedule_work(&dbc->poll_work);
|
||||
|
||||
unlock_req_lock:
|
||||
if (ret)
|
||||
mutex_unlock(&dbc->req_lock);
|
||||
release_ch_rcu:
|
||||
srcu_read_unlock(&dbc->ch_lock, rcu_id);
|
||||
unlock_dev_srcu:
|
||||
|
||||
@@ -218,6 +218,9 @@ static int qaic_bootlog_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_d
|
||||
if (ret)
|
||||
goto destroy_workqueue;
|
||||
|
||||
dev_set_drvdata(&mhi_dev->dev, qdev);
|
||||
qdev->bootlog_ch = mhi_dev;
|
||||
|
||||
for (i = 0; i < BOOTLOG_POOL_SIZE; i++) {
|
||||
msg = devm_kzalloc(&qdev->pdev->dev, sizeof(*msg), GFP_KERNEL);
|
||||
if (!msg) {
|
||||
@@ -233,8 +236,6 @@ static int qaic_bootlog_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_d
|
||||
goto mhi_unprepare;
|
||||
}
|
||||
|
||||
dev_set_drvdata(&mhi_dev->dev, qdev);
|
||||
qdev->bootlog_ch = mhi_dev;
|
||||
return 0;
|
||||
|
||||
mhi_unprepare:
|
||||
|
||||
@@ -454,6 +454,9 @@ static struct qaic_device *create_qdev(struct pci_dev *pdev,
|
||||
return NULL;
|
||||
init_waitqueue_head(&qdev->dbc[i].dbc_release);
|
||||
INIT_LIST_HEAD(&qdev->dbc[i].bo_lists);
|
||||
ret = drmm_mutex_init(drm, &qdev->dbc[i].req_lock);
|
||||
if (ret)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return qdev;
|
||||
|
||||
@@ -95,6 +95,11 @@ acpi_tb_print_table_header(acpi_physical_address address,
|
||||
{
|
||||
struct acpi_table_header local_header;
|
||||
|
||||
#pragma GCC diagnostic push
|
||||
#if defined(__GNUC__) && __GNUC__ >= 11
|
||||
#pragma GCC diagnostic ignored "-Wstringop-overread"
|
||||
#endif
|
||||
|
||||
if (ACPI_COMPARE_NAMESEG(header->signature, ACPI_SIG_FACS)) {
|
||||
|
||||
/* FACS only has signature and length fields */
|
||||
@@ -143,4 +148,5 @@ acpi_tb_print_table_header(acpi_physical_address address,
|
||||
local_header.asl_compiler_id,
|
||||
local_header.asl_compiler_revision));
|
||||
}
|
||||
#pragma GCC diagnostic pop
|
||||
}
|
||||
|
||||
@@ -1107,7 +1107,7 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
|
||||
size_t num_args,
|
||||
struct fwnode_reference_args *args)
|
||||
{
|
||||
return acpi_fwnode_get_reference_args(fwnode, propname, NULL, index, num_args, args);
|
||||
return acpi_fwnode_get_reference_args(fwnode, propname, NULL, num_args, index, args);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__acpi_node_get_property_reference);
|
||||
|
||||
|
||||
@@ -133,8 +133,7 @@ static inline bool tpm_crb_has_idle(u32 start_method)
|
||||
{
|
||||
return !(start_method == ACPI_TPM2_START_METHOD ||
|
||||
start_method == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD ||
|
||||
start_method == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC ||
|
||||
start_method == ACPI_TPM2_CRB_WITH_ARM_FFA);
|
||||
start_method == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC);
|
||||
}
|
||||
|
||||
static bool crb_wait_for_reg_32(u32 __iomem *reg, u32 mask, u32 value,
|
||||
@@ -191,7 +190,7 @@ static int crb_try_pluton_doorbell(struct crb_priv *priv, bool wait_for_complete
|
||||
*
|
||||
* Return: 0 always
|
||||
*/
|
||||
static int __crb_go_idle(struct device *dev, struct crb_priv *priv)
|
||||
static int __crb_go_idle(struct device *dev, struct crb_priv *priv, int loc)
|
||||
{
|
||||
int rc;
|
||||
|
||||
@@ -200,6 +199,12 @@ static int __crb_go_idle(struct device *dev, struct crb_priv *priv)
|
||||
|
||||
iowrite32(CRB_CTRL_REQ_GO_IDLE, &priv->regs_t->ctrl_req);
|
||||
|
||||
if (priv->sm == ACPI_TPM2_CRB_WITH_ARM_FFA) {
|
||||
rc = tpm_crb_ffa_start(CRB_FFA_START_TYPE_COMMAND, loc);
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
|
||||
rc = crb_try_pluton_doorbell(priv, true);
|
||||
if (rc)
|
||||
return rc;
|
||||
@@ -220,7 +225,7 @@ static int crb_go_idle(struct tpm_chip *chip)
|
||||
struct device *dev = &chip->dev;
|
||||
struct crb_priv *priv = dev_get_drvdata(dev);
|
||||
|
||||
return __crb_go_idle(dev, priv);
|
||||
return __crb_go_idle(dev, priv, chip->locality);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -238,7 +243,7 @@ static int crb_go_idle(struct tpm_chip *chip)
|
||||
*
|
||||
* Return: 0 on success -ETIME on timeout;
|
||||
*/
|
||||
static int __crb_cmd_ready(struct device *dev, struct crb_priv *priv)
|
||||
static int __crb_cmd_ready(struct device *dev, struct crb_priv *priv, int loc)
|
||||
{
|
||||
int rc;
|
||||
|
||||
@@ -247,6 +252,12 @@ static int __crb_cmd_ready(struct device *dev, struct crb_priv *priv)
|
||||
|
||||
iowrite32(CRB_CTRL_REQ_CMD_READY, &priv->regs_t->ctrl_req);
|
||||
|
||||
if (priv->sm == ACPI_TPM2_CRB_WITH_ARM_FFA) {
|
||||
rc = tpm_crb_ffa_start(CRB_FFA_START_TYPE_COMMAND, loc);
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
|
||||
rc = crb_try_pluton_doorbell(priv, true);
|
||||
if (rc)
|
||||
return rc;
|
||||
@@ -267,7 +278,7 @@ static int crb_cmd_ready(struct tpm_chip *chip)
|
||||
struct device *dev = &chip->dev;
|
||||
struct crb_priv *priv = dev_get_drvdata(dev);
|
||||
|
||||
return __crb_cmd_ready(dev, priv);
|
||||
return __crb_cmd_ready(dev, priv, chip->locality);
|
||||
}
|
||||
|
||||
static int __crb_request_locality(struct device *dev,
|
||||
@@ -444,7 +455,7 @@ static int crb_send(struct tpm_chip *chip, u8 *buf, size_t bufsiz, size_t len)
|
||||
|
||||
/* Seems to be necessary for every command */
|
||||
if (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_PLUTON)
|
||||
__crb_cmd_ready(&chip->dev, priv);
|
||||
__crb_cmd_ready(&chip->dev, priv, chip->locality);
|
||||
|
||||
memcpy_toio(priv->cmd, buf, len);
|
||||
|
||||
@@ -672,7 +683,7 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
|
||||
* PTT HW bug w/a: wake up the device to access
|
||||
* possibly not retained registers.
|
||||
*/
|
||||
ret = __crb_cmd_ready(dev, priv);
|
||||
ret = __crb_cmd_ready(dev, priv, 0);
|
||||
if (ret)
|
||||
goto out_relinquish_locality;
|
||||
|
||||
@@ -744,7 +755,7 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
|
||||
if (!ret)
|
||||
priv->cmd_size = cmd_size;
|
||||
|
||||
__crb_go_idle(dev, priv);
|
||||
__crb_go_idle(dev, priv, 0);
|
||||
|
||||
out_relinquish_locality:
|
||||
|
||||
|
||||
@@ -1614,7 +1614,11 @@ static int amd_pstate_cpu_offline(struct cpufreq_policy *policy)
|
||||
* min_perf value across kexec reboots. If this CPU is just onlined normally after this, the
|
||||
* limits, epp and desired perf will get reset to the cached values in cpudata struct
|
||||
*/
|
||||
return amd_pstate_update_perf(policy, perf.bios_min_perf, 0U, 0U, 0U, false);
|
||||
return amd_pstate_update_perf(policy, perf.bios_min_perf,
|
||||
FIELD_GET(AMD_CPPC_DES_PERF_MASK, cpudata->cppc_req_cached),
|
||||
FIELD_GET(AMD_CPPC_MAX_PERF_MASK, cpudata->cppc_req_cached),
|
||||
FIELD_GET(AMD_CPPC_EPP_PERF_MASK, cpudata->cppc_req_cached),
|
||||
false);
|
||||
}
|
||||
|
||||
static int amd_pstate_suspend(struct cpufreq_policy *policy)
|
||||
|
||||
@@ -188,20 +188,17 @@ static unsigned int get_typical_interval(struct menu_device *data)
|
||||
*
|
||||
* This can deal with workloads that have long pauses interspersed
|
||||
* with sporadic activity with a bunch of short pauses.
|
||||
*
|
||||
* However, if the number of remaining samples is too small to exclude
|
||||
* any more outliers, allow the deepest available idle state to be
|
||||
* selected because there are systems where the time spent by CPUs in
|
||||
* deep idle states is correlated to the maximum frequency the CPUs
|
||||
* can get to. On those systems, shallow idle states should be avoided
|
||||
* unless there is a clear indication that the given CPU is most likley
|
||||
* going to be woken up shortly.
|
||||
*/
|
||||
if (divisor * 4 <= INTERVALS * 3) {
|
||||
/*
|
||||
* If there are sufficiently many data points still under
|
||||
* consideration after the outliers have been eliminated,
|
||||
* returning without a prediction would be a mistake because it
|
||||
* is likely that the next interval will not exceed the current
|
||||
* maximum, so return the latter in that case.
|
||||
*/
|
||||
if (divisor >= INTERVALS / 2)
|
||||
return max;
|
||||
|
||||
if (divisor * 4 <= INTERVALS * 3)
|
||||
return UINT_MAX;
|
||||
}
|
||||
|
||||
/* Update the thresholds for the next round. */
|
||||
if (avg - min > max - avg)
|
||||
|
||||
@@ -348,7 +348,7 @@ static int cxl_acpi_set_cache_size(struct cxl_root_decoder *cxlrd)
|
||||
struct resource res;
|
||||
int nid, rc;
|
||||
|
||||
res = DEFINE_RES(start, size, 0);
|
||||
res = DEFINE_RES_MEM(start, size);
|
||||
nid = phys_to_target_node(start);
|
||||
|
||||
rc = hmat_get_extended_linear_cache_size(&res, nid, &cache_size);
|
||||
|
||||
@@ -371,6 +371,9 @@ cxl_feature_info(struct cxl_features_state *cxlfs,
|
||||
{
|
||||
struct cxl_feat_entry *feat;
|
||||
|
||||
if (!cxlfs || !cxlfs->entries)
|
||||
return ERR_PTR(-EOPNOTSUPP);
|
||||
|
||||
for (int i = 0; i < cxlfs->entries->num_features; i++) {
|
||||
feat = &cxlfs->entries->ent[i];
|
||||
if (uuid_equal(uuid, &feat->uuid))
|
||||
|
||||
@@ -1182,6 +1182,20 @@ __devm_cxl_add_dport(struct cxl_port *port, struct device *dport_dev,
|
||||
if (rc)
|
||||
return ERR_PTR(rc);
|
||||
|
||||
/*
|
||||
* Setup port register if this is the first dport showed up. Having
|
||||
* a dport also means that there is at least 1 active link.
|
||||
*/
|
||||
if (port->nr_dports == 1 &&
|
||||
port->component_reg_phys != CXL_RESOURCE_NONE) {
|
||||
rc = cxl_port_setup_regs(port, port->component_reg_phys);
|
||||
if (rc) {
|
||||
xa_erase(&port->dports, (unsigned long)dport->dport_dev);
|
||||
return ERR_PTR(rc);
|
||||
}
|
||||
port->component_reg_phys = CXL_RESOURCE_NONE;
|
||||
}
|
||||
|
||||
get_device(dport_dev);
|
||||
rc = devm_add_action_or_reset(host, cxl_dport_remove, dport);
|
||||
if (rc)
|
||||
@@ -1200,18 +1214,6 @@ __devm_cxl_add_dport(struct cxl_port *port, struct device *dport_dev,
|
||||
|
||||
cxl_debugfs_create_dport_dir(dport);
|
||||
|
||||
/*
|
||||
* Setup port register if this is the first dport showed up. Having
|
||||
* a dport also means that there is at least 1 active link.
|
||||
*/
|
||||
if (port->nr_dports == 1 &&
|
||||
port->component_reg_phys != CXL_RESOURCE_NONE) {
|
||||
rc = cxl_port_setup_regs(port, port->component_reg_phys);
|
||||
if (rc)
|
||||
return ERR_PTR(rc);
|
||||
port->component_reg_phys = CXL_RESOURCE_NONE;
|
||||
}
|
||||
|
||||
return dport;
|
||||
}
|
||||
|
||||
|
||||
@@ -839,7 +839,7 @@ static int match_free_decoder(struct device *dev, const void *data)
|
||||
}
|
||||
|
||||
static bool region_res_match_cxl_range(const struct cxl_region_params *p,
|
||||
struct range *range)
|
||||
const struct range *range)
|
||||
{
|
||||
if (!p->res)
|
||||
return false;
|
||||
@@ -3398,10 +3398,7 @@ static int match_region_by_range(struct device *dev, const void *data)
|
||||
p = &cxlr->params;
|
||||
|
||||
guard(rwsem_read)(&cxl_rwsem.region);
|
||||
if (p->res && p->res->start == r->start && p->res->end == r->end)
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
return region_res_match_cxl_range(p, r);
|
||||
}
|
||||
|
||||
static int cxl_extended_linear_cache_resize(struct cxl_region *cxlr,
|
||||
@@ -3666,14 +3663,14 @@ static int validate_region_offset(struct cxl_region *cxlr, u64 offset)
|
||||
|
||||
if (offset < p->cache_size) {
|
||||
dev_err(&cxlr->dev,
|
||||
"Offset %#llx is within extended linear cache %pr\n",
|
||||
"Offset %#llx is within extended linear cache %pa\n",
|
||||
offset, &p->cache_size);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
region_size = resource_size(p->res);
|
||||
if (offset >= region_size) {
|
||||
dev_err(&cxlr->dev, "Offset %#llx exceeds region size %pr\n",
|
||||
dev_err(&cxlr->dev, "Offset %#llx exceeds region size %pa\n",
|
||||
offset, ®ion_size);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@@ -1068,7 +1068,7 @@ TRACE_EVENT(cxl_poison,
|
||||
__entry->hpa = cxl_dpa_to_hpa(cxlr, cxlmd,
|
||||
__entry->dpa);
|
||||
if (__entry->hpa != ULLONG_MAX && cxlr->params.cache_size)
|
||||
__entry->hpa_alias0 = __entry->hpa +
|
||||
__entry->hpa_alias0 = __entry->hpa -
|
||||
cxlr->params.cache_size;
|
||||
else
|
||||
__entry->hpa_alias0 = ULLONG_MAX;
|
||||
|
||||
@@ -1290,6 +1290,7 @@ struct amdgpu_device {
|
||||
bool debug_disable_gpu_ring_reset;
|
||||
bool debug_vm_userptr;
|
||||
bool debug_disable_ce_logs;
|
||||
bool debug_enable_ce_cs;
|
||||
|
||||
/* Protection for the following isolation structure */
|
||||
struct mutex enforce_isolation_mutex;
|
||||
|
||||
@@ -2329,10 +2329,9 @@ void amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(struct kgd_mem *mem)
|
||||
int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct amdgpu_device *adev,
|
||||
struct kfd_vm_fault_info *mem)
|
||||
{
|
||||
if (atomic_read(&adev->gmc.vm_fault_info_updated) == 1) {
|
||||
if (atomic_read_acquire(&adev->gmc.vm_fault_info_updated) == 1) {
|
||||
*mem = *adev->gmc.vm_fault_info;
|
||||
mb(); /* make sure read happened */
|
||||
atomic_set(&adev->gmc.vm_fault_info_updated, 0);
|
||||
atomic_set_release(&adev->gmc.vm_fault_info_updated, 0);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -364,6 +364,12 @@ static int amdgpu_cs_p2_ib(struct amdgpu_cs_parser *p,
|
||||
if (p->uf_bo && ring->funcs->no_user_fence)
|
||||
return -EINVAL;
|
||||
|
||||
if (!p->adev->debug_enable_ce_cs &&
|
||||
chunk_ib->flags & AMDGPU_IB_FLAG_CE) {
|
||||
dev_err_ratelimited(p->adev->dev, "CE CS is blocked, use debug=0x400 to override\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (chunk_ib->ip_type == AMDGPU_HW_IP_GFX &&
|
||||
chunk_ib->flags & AMDGPU_IB_FLAG_PREEMPT) {
|
||||
if (chunk_ib->flags & AMDGPU_IB_FLAG_CE)
|
||||
@@ -702,7 +708,7 @@ static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
|
||||
*/
|
||||
const s64 us_upper_bound = 200000;
|
||||
|
||||
if (!adev->mm_stats.log2_max_MBps) {
|
||||
if ((!adev->mm_stats.log2_max_MBps) || !ttm_resource_manager_used(&adev->mman.vram_mgr.manager)) {
|
||||
*max_bytes = 0;
|
||||
*max_vis_bytes = 0;
|
||||
return;
|
||||
|
||||
@@ -1882,6 +1882,13 @@ static bool amdgpu_device_pcie_dynamic_switching_supported(struct amdgpu_device
|
||||
|
||||
static bool amdgpu_device_aspm_support_quirk(struct amdgpu_device *adev)
|
||||
{
|
||||
/* Enabling ASPM causes randoms hangs on Tahiti and Oland on Zen4.
|
||||
* It's unclear if this is a platform-specific or GPU-specific issue.
|
||||
* Disable ASPM on SI for the time being.
|
||||
*/
|
||||
if (adev->family == AMDGPU_FAMILY_SI)
|
||||
return true;
|
||||
|
||||
#if IS_ENABLED(CONFIG_X86)
|
||||
struct cpuinfo_x86 *c = &cpu_data(0);
|
||||
|
||||
|
||||
@@ -1033,7 +1033,9 @@ static uint8_t amdgpu_discovery_get_harvest_info(struct amdgpu_device *adev,
|
||||
/* Until a uniform way is figured, get mask based on hwid */
|
||||
switch (hw_id) {
|
||||
case VCN_HWID:
|
||||
harvest = ((1 << inst) & adev->vcn.inst_mask) == 0;
|
||||
/* VCN vs UVD+VCE */
|
||||
if (!amdgpu_ip_version(adev, VCE_HWIP, 0))
|
||||
harvest = ((1 << inst) & adev->vcn.inst_mask) == 0;
|
||||
break;
|
||||
case DMU_HWID:
|
||||
if (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK)
|
||||
@@ -2565,7 +2567,9 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
|
||||
amdgpu_discovery_init(adev);
|
||||
vega10_reg_base_init(adev);
|
||||
adev->sdma.num_instances = 2;
|
||||
adev->sdma.sdma_mask = 3;
|
||||
adev->gmc.num_umc = 4;
|
||||
adev->gfx.xcc_mask = 1;
|
||||
adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 0, 0);
|
||||
adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 0, 0);
|
||||
adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 0);
|
||||
@@ -2592,7 +2596,9 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
|
||||
amdgpu_discovery_init(adev);
|
||||
vega10_reg_base_init(adev);
|
||||
adev->sdma.num_instances = 2;
|
||||
adev->sdma.sdma_mask = 3;
|
||||
adev->gmc.num_umc = 4;
|
||||
adev->gfx.xcc_mask = 1;
|
||||
adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 3, 0);
|
||||
adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 3, 0);
|
||||
adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 1);
|
||||
@@ -2619,8 +2625,10 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
|
||||
amdgpu_discovery_init(adev);
|
||||
vega10_reg_base_init(adev);
|
||||
adev->sdma.num_instances = 1;
|
||||
adev->sdma.sdma_mask = 1;
|
||||
adev->vcn.num_vcn_inst = 1;
|
||||
adev->gmc.num_umc = 2;
|
||||
adev->gfx.xcc_mask = 1;
|
||||
if (adev->apu_flags & AMD_APU_IS_RAVEN2) {
|
||||
adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 2, 0);
|
||||
adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 2, 0);
|
||||
@@ -2665,7 +2673,9 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
|
||||
amdgpu_discovery_init(adev);
|
||||
vega20_reg_base_init(adev);
|
||||
adev->sdma.num_instances = 2;
|
||||
adev->sdma.sdma_mask = 3;
|
||||
adev->gmc.num_umc = 8;
|
||||
adev->gfx.xcc_mask = 1;
|
||||
adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 0);
|
||||
adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 0);
|
||||
adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 0);
|
||||
@@ -2693,8 +2703,10 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
|
||||
amdgpu_discovery_init(adev);
|
||||
arct_reg_base_init(adev);
|
||||
adev->sdma.num_instances = 8;
|
||||
adev->sdma.sdma_mask = 0xff;
|
||||
adev->vcn.num_vcn_inst = 2;
|
||||
adev->gmc.num_umc = 8;
|
||||
adev->gfx.xcc_mask = 1;
|
||||
adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 1);
|
||||
adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 1);
|
||||
adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 1);
|
||||
@@ -2726,8 +2738,10 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
|
||||
amdgpu_discovery_init(adev);
|
||||
aldebaran_reg_base_init(adev);
|
||||
adev->sdma.num_instances = 5;
|
||||
adev->sdma.sdma_mask = 0x1f;
|
||||
adev->vcn.num_vcn_inst = 2;
|
||||
adev->gmc.num_umc = 4;
|
||||
adev->gfx.xcc_mask = 1;
|
||||
adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 2);
|
||||
adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 2);
|
||||
adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 4, 0);
|
||||
@@ -2762,6 +2776,8 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
|
||||
} else {
|
||||
cyan_skillfish_reg_base_init(adev);
|
||||
adev->sdma.num_instances = 2;
|
||||
adev->sdma.sdma_mask = 3;
|
||||
adev->gfx.xcc_mask = 1;
|
||||
adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(2, 0, 3);
|
||||
adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(2, 0, 3);
|
||||
adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(5, 0, 1);
|
||||
|
||||
@@ -144,7 +144,8 @@ enum AMDGPU_DEBUG_MASK {
|
||||
AMDGPU_DEBUG_DISABLE_GPU_RING_RESET = BIT(6),
|
||||
AMDGPU_DEBUG_SMU_POOL = BIT(7),
|
||||
AMDGPU_DEBUG_VM_USERPTR = BIT(8),
|
||||
AMDGPU_DEBUG_DISABLE_RAS_CE_LOG = BIT(9)
|
||||
AMDGPU_DEBUG_DISABLE_RAS_CE_LOG = BIT(9),
|
||||
AMDGPU_DEBUG_ENABLE_CE_CS = BIT(10)
|
||||
};
|
||||
|
||||
unsigned int amdgpu_vram_limit = UINT_MAX;
|
||||
@@ -2289,6 +2290,11 @@ static void amdgpu_init_debug_options(struct amdgpu_device *adev)
|
||||
pr_info("debug: disable kernel logs of correctable errors\n");
|
||||
adev->debug_disable_ce_logs = true;
|
||||
}
|
||||
|
||||
if (amdgpu_debug_mask & AMDGPU_DEBUG_ENABLE_CE_CS) {
|
||||
pr_info("debug: allowing command submission to CE engine\n");
|
||||
adev->debug_enable_ce_cs = true;
|
||||
}
|
||||
}
|
||||
|
||||
static unsigned long amdgpu_fix_asic_type(struct pci_dev *pdev, unsigned long flags)
|
||||
|
||||
@@ -758,11 +758,42 @@ void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring)
|
||||
* @fence: fence of the ring to signal
|
||||
*
|
||||
*/
|
||||
void amdgpu_fence_driver_guilty_force_completion(struct amdgpu_fence *fence)
|
||||
void amdgpu_fence_driver_guilty_force_completion(struct amdgpu_fence *af)
|
||||
{
|
||||
dma_fence_set_error(&fence->base, -ETIME);
|
||||
amdgpu_fence_write(fence->ring, fence->seq);
|
||||
amdgpu_fence_process(fence->ring);
|
||||
struct dma_fence *unprocessed;
|
||||
struct dma_fence __rcu **ptr;
|
||||
struct amdgpu_fence *fence;
|
||||
struct amdgpu_ring *ring = af->ring;
|
||||
unsigned long flags;
|
||||
u32 seq, last_seq;
|
||||
|
||||
last_seq = amdgpu_fence_read(ring) & ring->fence_drv.num_fences_mask;
|
||||
seq = ring->fence_drv.sync_seq & ring->fence_drv.num_fences_mask;
|
||||
|
||||
/* mark all fences from the guilty context with an error */
|
||||
spin_lock_irqsave(&ring->fence_drv.lock, flags);
|
||||
do {
|
||||
last_seq++;
|
||||
last_seq &= ring->fence_drv.num_fences_mask;
|
||||
|
||||
ptr = &ring->fence_drv.fences[last_seq];
|
||||
rcu_read_lock();
|
||||
unprocessed = rcu_dereference(*ptr);
|
||||
|
||||
if (unprocessed && !dma_fence_is_signaled_locked(unprocessed)) {
|
||||
fence = container_of(unprocessed, struct amdgpu_fence, base);
|
||||
|
||||
if (fence == af)
|
||||
dma_fence_set_error(&fence->base, -ETIME);
|
||||
else if (fence->context == af->context)
|
||||
dma_fence_set_error(&fence->base, -ECANCELED);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
} while (last_seq != seq);
|
||||
spin_unlock_irqrestore(&ring->fence_drv.lock, flags);
|
||||
/* signal the guilty fence */
|
||||
amdgpu_fence_write(ring, af->seq);
|
||||
amdgpu_fence_process(ring);
|
||||
}
|
||||
|
||||
void amdgpu_fence_save_wptr(struct dma_fence *fence)
|
||||
@@ -790,14 +821,19 @@ void amdgpu_ring_backup_unprocessed_commands(struct amdgpu_ring *ring,
|
||||
struct dma_fence *unprocessed;
|
||||
struct dma_fence __rcu **ptr;
|
||||
struct amdgpu_fence *fence;
|
||||
u64 wptr, i, seqno;
|
||||
u64 wptr;
|
||||
u32 seq, last_seq;
|
||||
|
||||
seqno = amdgpu_fence_read(ring);
|
||||
last_seq = amdgpu_fence_read(ring) & ring->fence_drv.num_fences_mask;
|
||||
seq = ring->fence_drv.sync_seq & ring->fence_drv.num_fences_mask;
|
||||
wptr = ring->fence_drv.signalled_wptr;
|
||||
ring->ring_backup_entries_to_copy = 0;
|
||||
|
||||
for (i = seqno + 1; i <= ring->fence_drv.sync_seq; ++i) {
|
||||
ptr = &ring->fence_drv.fences[i & ring->fence_drv.num_fences_mask];
|
||||
do {
|
||||
last_seq++;
|
||||
last_seq &= ring->fence_drv.num_fences_mask;
|
||||
|
||||
ptr = &ring->fence_drv.fences[last_seq];
|
||||
rcu_read_lock();
|
||||
unprocessed = rcu_dereference(*ptr);
|
||||
|
||||
@@ -813,7 +849,7 @@ void amdgpu_ring_backup_unprocessed_commands(struct amdgpu_ring *ring,
|
||||
wptr = fence->wptr;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
} while (last_seq != seq);
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
@@ -371,7 +371,7 @@ static int amdgpu_debugfs_jpeg_sched_mask_set(void *data, u64 val)
|
||||
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
|
||||
for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
|
||||
ring = &adev->jpeg.inst[i].ring_dec[j];
|
||||
if (val & (BIT_ULL(1) << ((i * adev->jpeg.num_jpeg_rings) + j)))
|
||||
if (val & (BIT_ULL((i * adev->jpeg.num_jpeg_rings) + j)))
|
||||
ring->sched.ready = true;
|
||||
else
|
||||
ring->sched.ready = false;
|
||||
|
||||
@@ -758,7 +758,8 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
|
||||
ui64 = atomic64_read(&adev->num_vram_cpu_page_faults);
|
||||
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
|
||||
case AMDGPU_INFO_VRAM_USAGE:
|
||||
ui64 = ttm_resource_manager_usage(&adev->mman.vram_mgr.manager);
|
||||
ui64 = ttm_resource_manager_used(&adev->mman.vram_mgr.manager) ?
|
||||
ttm_resource_manager_usage(&adev->mman.vram_mgr.manager) : 0;
|
||||
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
|
||||
case AMDGPU_INFO_VIS_VRAM_USAGE:
|
||||
ui64 = amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr);
|
||||
@@ -804,8 +805,8 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
|
||||
mem.vram.usable_heap_size = adev->gmc.real_vram_size -
|
||||
atomic64_read(&adev->vram_pin_size) -
|
||||
AMDGPU_VM_RESERVED_VRAM;
|
||||
mem.vram.heap_usage =
|
||||
ttm_resource_manager_usage(vram_man);
|
||||
mem.vram.heap_usage = ttm_resource_manager_used(&adev->mman.vram_mgr.manager) ?
|
||||
ttm_resource_manager_usage(vram_man) : 0;
|
||||
mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4;
|
||||
|
||||
mem.cpu_accessible_vram.total_heap_size =
|
||||
|
||||
@@ -409,7 +409,7 @@ int amdgpu_mes_detect_and_reset_hung_queues(struct amdgpu_device *adev,
|
||||
return -EINVAL;
|
||||
|
||||
/* Clear the doorbell array before detection */
|
||||
memset(adev->mes.hung_queue_db_array_cpu_addr, 0,
|
||||
memset(adev->mes.hung_queue_db_array_cpu_addr, AMDGPU_MES_INVALID_DB_OFFSET,
|
||||
adev->mes.hung_queue_db_array_size * sizeof(u32));
|
||||
input.queue_type = queue_type;
|
||||
input.detect_only = detect_only;
|
||||
@@ -420,12 +420,17 @@ int amdgpu_mes_detect_and_reset_hung_queues(struct amdgpu_device *adev,
|
||||
dev_err(adev->dev, "failed to detect and reset\n");
|
||||
} else {
|
||||
*hung_db_num = 0;
|
||||
for (i = 0; i < adev->mes.hung_queue_db_array_size; i++) {
|
||||
for (i = 0; i < adev->mes.hung_queue_hqd_info_offset; i++) {
|
||||
if (db_array[i] != AMDGPU_MES_INVALID_DB_OFFSET) {
|
||||
hung_db_array[i] = db_array[i];
|
||||
*hung_db_num += 1;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* TODO: return HQD info for MES scheduled user compute queue reset cases
|
||||
* stored in hung_db_array hqd info offset to full array size
|
||||
*/
|
||||
}
|
||||
|
||||
return r;
|
||||
@@ -686,14 +691,11 @@ int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe)
|
||||
bool amdgpu_mes_suspend_resume_all_supported(struct amdgpu_device *adev)
|
||||
{
|
||||
uint32_t mes_rev = adev->mes.sched_version & AMDGPU_MES_VERSION_MASK;
|
||||
bool is_supported = false;
|
||||
|
||||
if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0) &&
|
||||
amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(12, 0, 0) &&
|
||||
mes_rev >= 0x63)
|
||||
is_supported = true;
|
||||
|
||||
return is_supported;
|
||||
return ((amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0) &&
|
||||
amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(12, 0, 0) &&
|
||||
mes_rev >= 0x63) ||
|
||||
amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(12, 0, 0));
|
||||
}
|
||||
|
||||
/* Fix me -- node_id is used to identify the correct MES instances in the future */
|
||||
|
||||
@@ -149,6 +149,7 @@ struct amdgpu_mes {
|
||||
void *resource_1_addr[AMDGPU_MAX_MES_PIPES];
|
||||
|
||||
int hung_queue_db_array_size;
|
||||
int hung_queue_hqd_info_offset;
|
||||
struct amdgpu_bo *hung_queue_db_array_gpu_obj;
|
||||
uint64_t hung_queue_db_array_gpu_addr;
|
||||
void *hung_queue_db_array_cpu_addr;
|
||||
|
||||
@@ -811,7 +811,7 @@ int amdgpu_ring_reset_helper_end(struct amdgpu_ring *ring,
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
/* signal the fence of the bad job */
|
||||
/* signal the guilty fence and set an error on all fences from the context */
|
||||
if (guilty_fence)
|
||||
amdgpu_fence_driver_guilty_force_completion(guilty_fence);
|
||||
/* Re-emit the non-guilty commands */
|
||||
|
||||
@@ -155,7 +155,7 @@ extern const struct drm_sched_backend_ops amdgpu_sched_ops;
|
||||
void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring);
|
||||
void amdgpu_fence_driver_set_error(struct amdgpu_ring *ring, int error);
|
||||
void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring);
|
||||
void amdgpu_fence_driver_guilty_force_completion(struct amdgpu_fence *fence);
|
||||
void amdgpu_fence_driver_guilty_force_completion(struct amdgpu_fence *af);
|
||||
void amdgpu_fence_save_wptr(struct dma_fence *fence);
|
||||
|
||||
int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring);
|
||||
|
||||
@@ -598,8 +598,8 @@ static int amdgpu_virt_write_vf2pf_data(struct amdgpu_device *adev)
|
||||
vf2pf_info->driver_cert = 0;
|
||||
vf2pf_info->os_info.all = 0;
|
||||
|
||||
vf2pf_info->fb_usage =
|
||||
ttm_resource_manager_usage(&adev->mman.vram_mgr.manager) >> 20;
|
||||
vf2pf_info->fb_usage = ttm_resource_manager_used(&adev->mman.vram_mgr.manager) ?
|
||||
ttm_resource_manager_usage(&adev->mman.vram_mgr.manager) >> 20 : 0;
|
||||
vf2pf_info->fb_vis_usage =
|
||||
amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr) >> 20;
|
||||
vf2pf_info->fb_size = adev->gmc.real_vram_size >> 20;
|
||||
|
||||
@@ -234,6 +234,9 @@ static umode_t amdgpu_vram_attrs_is_visible(struct kobject *kobj,
|
||||
!adev->gmc.vram_vendor)
|
||||
return 0;
|
||||
|
||||
if (!ttm_resource_manager_used(&adev->mman.vram_mgr.manager))
|
||||
return 0;
|
||||
|
||||
return attr->mode;
|
||||
}
|
||||
|
||||
|
||||
@@ -5862,8 +5862,6 @@ static void gfx_v11_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
|
||||
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
|
||||
u32 header, control = 0;
|
||||
|
||||
BUG_ON(ib->flags & AMDGPU_IB_FLAG_CE);
|
||||
|
||||
header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
|
||||
|
||||
control |= ib->length_dw | (vmid << 24);
|
||||
|
||||
@@ -4419,8 +4419,6 @@ static void gfx_v12_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
|
||||
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
|
||||
u32 header, control = 0;
|
||||
|
||||
BUG_ON(ib->flags & AMDGPU_IB_FLAG_CE);
|
||||
|
||||
header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
|
||||
|
||||
control |= ib->length_dw | (vmid << 24);
|
||||
|
||||
@@ -1068,7 +1068,7 @@ static int gmc_v7_0_sw_init(struct amdgpu_ip_block *ip_block)
|
||||
GFP_KERNEL);
|
||||
if (!adev->gmc.vm_fault_info)
|
||||
return -ENOMEM;
|
||||
atomic_set(&adev->gmc.vm_fault_info_updated, 0);
|
||||
atomic_set_release(&adev->gmc.vm_fault_info_updated, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -1290,7 +1290,7 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev,
|
||||
vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
|
||||
VMID);
|
||||
if (amdgpu_amdkfd_is_kfd_vmid(adev, vmid)
|
||||
&& !atomic_read(&adev->gmc.vm_fault_info_updated)) {
|
||||
&& !atomic_read_acquire(&adev->gmc.vm_fault_info_updated)) {
|
||||
struct kfd_vm_fault_info *info = adev->gmc.vm_fault_info;
|
||||
u32 protections = REG_GET_FIELD(status,
|
||||
VM_CONTEXT1_PROTECTION_FAULT_STATUS,
|
||||
@@ -1306,8 +1306,7 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev,
|
||||
info->prot_read = protections & 0x8 ? true : false;
|
||||
info->prot_write = protections & 0x10 ? true : false;
|
||||
info->prot_exec = protections & 0x20 ? true : false;
|
||||
mb();
|
||||
atomic_set(&adev->gmc.vm_fault_info_updated, 1);
|
||||
atomic_set_release(&adev->gmc.vm_fault_info_updated, 1);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
@@ -1183,7 +1183,7 @@ static int gmc_v8_0_sw_init(struct amdgpu_ip_block *ip_block)
|
||||
GFP_KERNEL);
|
||||
if (!adev->gmc.vm_fault_info)
|
||||
return -ENOMEM;
|
||||
atomic_set(&adev->gmc.vm_fault_info_updated, 0);
|
||||
atomic_set_release(&adev->gmc.vm_fault_info_updated, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -1478,7 +1478,7 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
|
||||
vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
|
||||
VMID);
|
||||
if (amdgpu_amdkfd_is_kfd_vmid(adev, vmid)
|
||||
&& !atomic_read(&adev->gmc.vm_fault_info_updated)) {
|
||||
&& !atomic_read_acquire(&adev->gmc.vm_fault_info_updated)) {
|
||||
struct kfd_vm_fault_info *info = adev->gmc.vm_fault_info;
|
||||
u32 protections = REG_GET_FIELD(status,
|
||||
VM_CONTEXT1_PROTECTION_FAULT_STATUS,
|
||||
@@ -1494,8 +1494,7 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
|
||||
info->prot_read = protections & 0x8 ? true : false;
|
||||
info->prot_write = protections & 0x10 ? true : false;
|
||||
info->prot_exec = protections & 0x20 ? true : false;
|
||||
mb();
|
||||
atomic_set(&adev->gmc.vm_fault_info_updated, 1);
|
||||
atomic_set_release(&adev->gmc.vm_fault_info_updated, 1);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
@@ -208,10 +208,10 @@ static int mes_userq_detect_and_reset(struct amdgpu_device *adev,
|
||||
struct amdgpu_userq_mgr *uqm, *tmp;
|
||||
unsigned int hung_db_num = 0;
|
||||
int queue_id, r, i;
|
||||
u32 db_array[4];
|
||||
u32 db_array[8];
|
||||
|
||||
if (db_array_size > 4) {
|
||||
dev_err(adev->dev, "DB array size (%d vs 4) too small\n",
|
||||
if (db_array_size > 8) {
|
||||
dev_err(adev->dev, "DB array size (%d vs 8) too small\n",
|
||||
db_array_size);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@@ -66,7 +66,8 @@ static int mes_v11_0_kiq_hw_fini(struct amdgpu_device *adev);
|
||||
#define GFX_MES_DRAM_SIZE 0x80000
|
||||
#define MES11_HW_RESOURCE_1_SIZE (128 * AMDGPU_GPU_PAGE_SIZE)
|
||||
|
||||
#define MES11_HUNG_DB_OFFSET_ARRAY_SIZE 4
|
||||
#define MES11_HUNG_DB_OFFSET_ARRAY_SIZE 8 /* [0:3] = db offset, [4:7] = hqd info */
|
||||
#define MES11_HUNG_HQD_INFO_OFFSET 4
|
||||
|
||||
static void mes_v11_0_ring_set_wptr(struct amdgpu_ring *ring)
|
||||
{
|
||||
@@ -1720,8 +1721,9 @@ static int mes_v11_0_early_init(struct amdgpu_ip_block *ip_block)
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
int pipe, r;
|
||||
|
||||
adev->mes.hung_queue_db_array_size =
|
||||
MES11_HUNG_DB_OFFSET_ARRAY_SIZE;
|
||||
adev->mes.hung_queue_db_array_size = MES11_HUNG_DB_OFFSET_ARRAY_SIZE;
|
||||
adev->mes.hung_queue_hqd_info_offset = MES11_HUNG_HQD_INFO_OFFSET;
|
||||
|
||||
for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) {
|
||||
if (!adev->enable_mes_kiq && pipe == AMDGPU_MES_KIQ_PIPE)
|
||||
continue;
|
||||
|
||||
@@ -47,7 +47,8 @@ static int mes_v12_0_kiq_hw_fini(struct amdgpu_device *adev);
|
||||
|
||||
#define MES_EOP_SIZE 2048
|
||||
|
||||
#define MES12_HUNG_DB_OFFSET_ARRAY_SIZE 4
|
||||
#define MES12_HUNG_DB_OFFSET_ARRAY_SIZE 8 /* [0:3] = db offset [4:7] hqd info */
|
||||
#define MES12_HUNG_HQD_INFO_OFFSET 4
|
||||
|
||||
static void mes_v12_0_ring_set_wptr(struct amdgpu_ring *ring)
|
||||
{
|
||||
@@ -228,7 +229,12 @@ static int mes_v12_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes,
|
||||
pipe, x_pkt->header.opcode);
|
||||
|
||||
r = amdgpu_fence_wait_polling(ring, seq, timeout);
|
||||
if (r < 1 || !*status_ptr) {
|
||||
|
||||
/*
|
||||
* status_ptr[31:0] == 0 (fail) or status_ptr[63:0] == 1 (success).
|
||||
* If status_ptr[31:0] == 0 then status_ptr[63:32] will have debug error information.
|
||||
*/
|
||||
if (r < 1 || !(lower_32_bits(*status_ptr))) {
|
||||
|
||||
if (misc_op_str)
|
||||
dev_err(adev->dev, "MES(%d) failed to respond to msg=%s (%s)\n",
|
||||
@@ -1899,8 +1905,9 @@ static int mes_v12_0_early_init(struct amdgpu_ip_block *ip_block)
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
int pipe, r;
|
||||
|
||||
adev->mes.hung_queue_db_array_size =
|
||||
MES12_HUNG_DB_OFFSET_ARRAY_SIZE;
|
||||
adev->mes.hung_queue_db_array_size = MES12_HUNG_DB_OFFSET_ARRAY_SIZE;
|
||||
adev->mes.hung_queue_hqd_info_offset = MES12_HUNG_HQD_INFO_OFFSET;
|
||||
|
||||
for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) {
|
||||
r = amdgpu_mes_init_microcode(adev, pipe);
|
||||
if (r)
|
||||
|
||||
@@ -1209,6 +1209,15 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
|
||||
pr_debug_ratelimited("Evicting process pid %d queues\n",
|
||||
pdd->process->lead_thread->pid);
|
||||
|
||||
if (dqm->dev->kfd->shared_resources.enable_mes) {
|
||||
pdd->last_evict_timestamp = get_jiffies_64();
|
||||
retval = suspend_all_queues_mes(dqm);
|
||||
if (retval) {
|
||||
dev_err(dev, "Suspending all queues failed");
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
/* Mark all queues as evicted. Deactivate all active queues on
|
||||
* the qpd.
|
||||
*/
|
||||
@@ -1221,23 +1230,27 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
|
||||
decrement_queue_count(dqm, qpd, q);
|
||||
|
||||
if (dqm->dev->kfd->shared_resources.enable_mes) {
|
||||
int err;
|
||||
|
||||
err = remove_queue_mes(dqm, q, qpd);
|
||||
if (err) {
|
||||
retval = remove_queue_mes(dqm, q, qpd);
|
||||
if (retval) {
|
||||
dev_err(dev, "Failed to evict queue %d\n",
|
||||
q->properties.queue_id);
|
||||
retval = err;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
}
|
||||
pdd->last_evict_timestamp = get_jiffies_64();
|
||||
if (!dqm->dev->kfd->shared_resources.enable_mes)
|
||||
|
||||
if (!dqm->dev->kfd->shared_resources.enable_mes) {
|
||||
pdd->last_evict_timestamp = get_jiffies_64();
|
||||
retval = execute_queues_cpsch(dqm,
|
||||
qpd->is_debug ?
|
||||
KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES :
|
||||
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0,
|
||||
USE_DEFAULT_GRACE_PERIOD);
|
||||
} else {
|
||||
retval = resume_all_queues_mes(dqm);
|
||||
if (retval)
|
||||
dev_err(dev, "Resuming all queues failed");
|
||||
}
|
||||
|
||||
out:
|
||||
dqm_unlock(dqm);
|
||||
@@ -3098,61 +3111,17 @@ int kfd_dqm_suspend_bad_queue_mes(struct kfd_node *knode, u32 pasid, u32 doorbel
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int kfd_dqm_evict_pasid_mes(struct device_queue_manager *dqm,
|
||||
struct qcm_process_device *qpd)
|
||||
{
|
||||
struct device *dev = dqm->dev->adev->dev;
|
||||
int ret = 0;
|
||||
|
||||
/* Check if process is already evicted */
|
||||
dqm_lock(dqm);
|
||||
if (qpd->evicted) {
|
||||
/* Increment the evicted count to make sure the
|
||||
* process stays evicted before its terminated.
|
||||
*/
|
||||
qpd->evicted++;
|
||||
dqm_unlock(dqm);
|
||||
goto out;
|
||||
}
|
||||
dqm_unlock(dqm);
|
||||
|
||||
ret = suspend_all_queues_mes(dqm);
|
||||
if (ret) {
|
||||
dev_err(dev, "Suspending all queues failed");
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = dqm->ops.evict_process_queues(dqm, qpd);
|
||||
if (ret) {
|
||||
dev_err(dev, "Evicting process queues failed");
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = resume_all_queues_mes(dqm);
|
||||
if (ret)
|
||||
dev_err(dev, "Resuming all queues failed");
|
||||
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
int kfd_evict_process_device(struct kfd_process_device *pdd)
|
||||
{
|
||||
struct device_queue_manager *dqm;
|
||||
struct kfd_process *p;
|
||||
int ret = 0;
|
||||
|
||||
p = pdd->process;
|
||||
dqm = pdd->dev->dqm;
|
||||
|
||||
WARN(debug_evictions, "Evicting pid %d", p->lead_thread->pid);
|
||||
|
||||
if (dqm->dev->kfd->shared_resources.enable_mes)
|
||||
ret = kfd_dqm_evict_pasid_mes(dqm, &pdd->qpd);
|
||||
else
|
||||
ret = dqm->ops.evict_process_queues(dqm, &pdd->qpd);
|
||||
|
||||
return ret;
|
||||
return dqm->ops.evict_process_queues(dqm, &pdd->qpd);
|
||||
}
|
||||
|
||||
int reserve_debug_trap_vmid(struct device_queue_manager *dqm,
|
||||
|
||||
@@ -2085,8 +2085,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
|
||||
|
||||
dc_hardware_init(adev->dm.dc);
|
||||
|
||||
adev->dm.restore_backlight = true;
|
||||
|
||||
adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev);
|
||||
if (!adev->dm.hpd_rx_offload_wq) {
|
||||
drm_err(adev_to_drm(adev), "failed to create hpd rx offload workqueue.\n");
|
||||
@@ -3442,7 +3440,6 @@ static int dm_resume(struct amdgpu_ip_block *ip_block)
|
||||
dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
|
||||
|
||||
dc_resume(dm->dc);
|
||||
adev->dm.restore_backlight = true;
|
||||
|
||||
amdgpu_dm_irq_resume_early(adev);
|
||||
|
||||
@@ -9969,6 +9966,7 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state,
|
||||
bool mode_set_reset_required = false;
|
||||
u32 i;
|
||||
struct dc_commit_streams_params params = {dc_state->streams, dc_state->stream_count};
|
||||
bool set_backlight_level = false;
|
||||
|
||||
/* Disable writeback */
|
||||
for_each_old_connector_in_state(state, connector, old_con_state, i) {
|
||||
@@ -10088,6 +10086,7 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state,
|
||||
acrtc->hw_mode = new_crtc_state->mode;
|
||||
crtc->hwmode = new_crtc_state->mode;
|
||||
mode_set_reset_required = true;
|
||||
set_backlight_level = true;
|
||||
} else if (modereset_required(new_crtc_state)) {
|
||||
drm_dbg_atomic(dev,
|
||||
"Atomic commit: RESET. crtc id %d:[%p]\n",
|
||||
@@ -10144,16 +10143,13 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state,
|
||||
* to fix a flicker issue.
|
||||
* It will cause the dm->actual_brightness is not the current panel brightness
|
||||
* level. (the dm->brightness is the correct panel level)
|
||||
* So we set the backlight level with dm->brightness value after initial
|
||||
* set mode. Use restore_backlight flag to avoid setting backlight level
|
||||
* for every subsequent mode set.
|
||||
* So we set the backlight level with dm->brightness value after set mode
|
||||
*/
|
||||
if (dm->restore_backlight) {
|
||||
if (set_backlight_level) {
|
||||
for (i = 0; i < dm->num_of_edps; i++) {
|
||||
if (dm->backlight_dev[i])
|
||||
amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
|
||||
}
|
||||
dm->restore_backlight = false;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -630,13 +630,6 @@ struct amdgpu_display_manager {
|
||||
*/
|
||||
u32 actual_brightness[AMDGPU_DM_MAX_NUM_EDP];
|
||||
|
||||
/**
|
||||
* @restore_backlight:
|
||||
*
|
||||
* Flag to indicate whether to restore backlight after modeset.
|
||||
*/
|
||||
bool restore_backlight;
|
||||
|
||||
/**
|
||||
* @aux_hpd_discon_quirk:
|
||||
*
|
||||
|
||||
@@ -3500,6 +3500,11 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
|
||||
* for these GPUs to calculate bandwidth requirements.
|
||||
*/
|
||||
if (high_pixelclock_count) {
|
||||
/* Work around flickering lines at the bottom edge
|
||||
* of the screen when using a single 4K 60Hz monitor.
|
||||
*/
|
||||
disable_mclk_switching = true;
|
||||
|
||||
/* On Oland, we observe some flickering when two 4K 60Hz
|
||||
* displays are connected, possibly because voltage is too low.
|
||||
* Raise the voltage by requiring a higher SCLK.
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user