mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-12-27 10:01:39 -05:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Cross-merge networking fixes after downstream PR (net-6.15-rc3). No conflicts. Adjacent changes: tools/net/ynl/pyynl/ynl_gen_c.py4d07bbf2d4("tools: ynl-gen: don't declare loop iterator in place")7e8ba0c7de("tools: ynl: don't use genlmsghdr in classic netlink") Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
5
.mailmap
5
.mailmap
@@ -322,6 +322,7 @@ Jayachandran C <c.jayachandran@gmail.com> <jchandra@broadcom.com>
|
||||
Jayachandran C <c.jayachandran@gmail.com> <jchandra@digeo.com>
|
||||
Jayachandran C <c.jayachandran@gmail.com> <jnair@caviumnetworks.com>
|
||||
<jean-philippe@linaro.org> <jean-philippe.brucker@arm.com>
|
||||
Jean-Michel Hautbois <jeanmichel.hautbois@yoseli.org> <jeanmichel.hautbois@ideasonboard.com>
|
||||
Jean Tourrilhes <jt@hpl.hp.com>
|
||||
Jeevan Shriram <quic_jshriram@quicinc.com> <jshriram@codeaurora.org>
|
||||
Jeff Garzik <jgarzik@pretzel.yyz.us>
|
||||
@@ -438,6 +439,8 @@ Linus Lüssing <linus.luessing@c0d3.blue> <ll@simonwunderlich.de>
|
||||
Li Yang <leoyang.li@nxp.com> <leoli@freescale.com>
|
||||
Li Yang <leoyang.li@nxp.com> <leo@zh-kernel.org>
|
||||
Lior David <quic_liord@quicinc.com> <liord@codeaurora.org>
|
||||
Loic Poulain <loic.poulain@oss.qualcomm.com> <loic.poulain@linaro.org>
|
||||
Loic Poulain <loic.poulain@oss.qualcomm.com> <loic.poulain@intel.com>
|
||||
Lorenzo Pieralisi <lpieralisi@kernel.org> <lorenzo.pieralisi@arm.com>
|
||||
Lorenzo Stoakes <lorenzo.stoakes@oracle.com> <lstoakes@gmail.com>
|
||||
Luca Ceresoli <luca.ceresoli@bootlin.com> <luca@lucaceresoli.net>
|
||||
@@ -685,6 +688,8 @@ Simon Wunderlich <sw@simonwunderlich.de> <simon.wunderlich@saxnet.de>
|
||||
Simon Wunderlich <sw@simonwunderlich.de> <simon@open-mesh.com>
|
||||
Simon Wunderlich <sw@simonwunderlich.de> <siwu@hrz.tu-chemnitz.de>
|
||||
Sricharan Ramabadhran <quic_srichara@quicinc.com> <sricharan@codeaurora.org>
|
||||
Srinivas Kandagatla <srini@kernel.org> <srinivas.kandagatla@st.com>
|
||||
Srinivas Kandagatla <srini@kernel.org> <srinivas.kandagatla@linaro.org>
|
||||
Srinivas Ramana <quic_sramana@quicinc.com> <sramana@codeaurora.org>
|
||||
Sriram R <quic_srirrama@quicinc.com> <srirrama@codeaurora.org>
|
||||
Sriram Yagnaraman <sriram.yagnaraman@ericsson.com> <sriram.yagnaraman@est.tech>
|
||||
|
||||
@@ -1604,3 +1604,35 @@ Description:
|
||||
prevent the UFS from frequently performing clock gating/ungating.
|
||||
|
||||
The attribute is read/write.
|
||||
|
||||
What: /sys/bus/platform/drivers/ufshcd/*/device_lvl_exception_count
|
||||
What: /sys/bus/platform/devices/*.ufs/device_lvl_exception_count
|
||||
Date: March 2025
|
||||
Contact: Bao D. Nguyen <quic_nguyenb@quicinc.com>
|
||||
Description:
|
||||
This attribute is applicable to ufs devices compliant to the
|
||||
JEDEC specifications version 4.1 or later. The
|
||||
device_lvl_exception_count is a counter indicating the number of
|
||||
times the device level exceptions have occurred since the last
|
||||
time this variable is reset. Writing a 0 value to this
|
||||
attribute will reset the device_lvl_exception_count. If the
|
||||
device_lvl_exception_count reads a positive value, the user
|
||||
application should read the device_lvl_exception_id attribute to
|
||||
know more information about the exception.
|
||||
|
||||
The attribute is read/write.
|
||||
|
||||
What: /sys/bus/platform/drivers/ufshcd/*/device_lvl_exception_id
|
||||
What: /sys/bus/platform/devices/*.ufs/device_lvl_exception_id
|
||||
Date: March 2025
|
||||
Contact: Bao D. Nguyen <quic_nguyenb@quicinc.com>
|
||||
Description:
|
||||
Reading the device_lvl_exception_id returns the
|
||||
qDeviceLevelExceptionID attribute of the ufs device JEDEC
|
||||
specification version 4.1. The definition of the
|
||||
qDeviceLevelExceptionID is the ufs device vendor specific
|
||||
implementation. Refer to the device manufacturer datasheet for
|
||||
more information on the meaning of the qDeviceLevelExceptionID
|
||||
attribute value.
|
||||
|
||||
The attribute is read only.
|
||||
|
||||
@@ -22,3 +22,4 @@ are configurable at compile, boot or run time.
|
||||
srso
|
||||
gather_data_sampling
|
||||
reg-file-data-sampling
|
||||
rsb
|
||||
|
||||
268
Documentation/admin-guide/hw-vuln/rsb.rst
Normal file
268
Documentation/admin-guide/hw-vuln/rsb.rst
Normal file
@@ -0,0 +1,268 @@
|
||||
.. SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
=======================
|
||||
RSB-related mitigations
|
||||
=======================
|
||||
|
||||
.. warning::
|
||||
Please keep this document up-to-date, otherwise you will be
|
||||
volunteered to update it and convert it to a very long comment in
|
||||
bugs.c!
|
||||
|
||||
Since 2018 there have been many Spectre CVEs related to the Return Stack
|
||||
Buffer (RSB) (sometimes referred to as the Return Address Stack (RAS) or
|
||||
Return Address Predictor (RAP) on AMD).
|
||||
|
||||
Information about these CVEs and how to mitigate them is scattered
|
||||
amongst a myriad of microarchitecture-specific documents.
|
||||
|
||||
This document attempts to consolidate all the relevant information in
|
||||
once place and clarify the reasoning behind the current RSB-related
|
||||
mitigations. It's meant to be as concise as possible, focused only on
|
||||
the current kernel mitigations: what are the RSB-related attack vectors
|
||||
and how are they currently being mitigated?
|
||||
|
||||
It's *not* meant to describe how the RSB mechanism operates or how the
|
||||
exploits work. More details about those can be found in the references
|
||||
below.
|
||||
|
||||
Rather, this is basically a glorified comment, but too long to actually
|
||||
be one. So when the next CVE comes along, a kernel developer can
|
||||
quickly refer to this as a refresher to see what we're actually doing
|
||||
and why.
|
||||
|
||||
At a high level, there are two classes of RSB attacks: RSB poisoning
|
||||
(Intel and AMD) and RSB underflow (Intel only). They must each be
|
||||
considered individually for each attack vector (and microarchitecture
|
||||
where applicable).
|
||||
|
||||
----
|
||||
|
||||
RSB poisoning (Intel and AMD)
|
||||
=============================
|
||||
|
||||
SpectreRSB
|
||||
~~~~~~~~~~
|
||||
|
||||
RSB poisoning is a technique used by SpectreRSB [#spectre-rsb]_ where
|
||||
an attacker poisons an RSB entry to cause a victim's return instruction
|
||||
to speculate to an attacker-controlled address. This can happen when
|
||||
there are unbalanced CALLs/RETs after a context switch or VMEXIT.
|
||||
|
||||
* All attack vectors can potentially be mitigated by flushing out any
|
||||
poisoned RSB entries using an RSB filling sequence
|
||||
[#intel-rsb-filling]_ [#amd-rsb-filling]_ when transitioning between
|
||||
untrusted and trusted domains. But this has a performance impact and
|
||||
should be avoided whenever possible.
|
||||
|
||||
.. DANGER::
|
||||
**FIXME**: Currently we're flushing 32 entries. However, some CPU
|
||||
models have more than 32 entries. The loop count needs to be
|
||||
increased for those. More detailed information is needed about RSB
|
||||
sizes.
|
||||
|
||||
* On context switch, the user->user mitigation requires ensuring the
|
||||
RSB gets filled or cleared whenever IBPB gets written [#cond-ibpb]_
|
||||
during a context switch:
|
||||
|
||||
* AMD:
|
||||
On Zen 4+, IBPB (or SBPB [#amd-sbpb]_ if used) clears the RSB.
|
||||
This is indicated by IBPB_RET in CPUID [#amd-ibpb-rsb]_.
|
||||
|
||||
On Zen < 4, the RSB filling sequence [#amd-rsb-filling]_ must be
|
||||
always be done in addition to IBPB [#amd-ibpb-no-rsb]_. This is
|
||||
indicated by X86_BUG_IBPB_NO_RET.
|
||||
|
||||
* Intel:
|
||||
IBPB always clears the RSB:
|
||||
|
||||
"Software that executed before the IBPB command cannot control
|
||||
the predicted targets of indirect branches executed after the
|
||||
command on the same logical processor. The term indirect branch
|
||||
in this context includes near return instructions, so these
|
||||
predicted targets may come from the RSB." [#intel-ibpb-rsb]_
|
||||
|
||||
* On context switch, user->kernel attacks are prevented by SMEP. User
|
||||
space can only insert user space addresses into the RSB. Even
|
||||
non-canonical addresses can't be inserted due to the page gap at the
|
||||
end of the user canonical address space reserved by TASK_SIZE_MAX.
|
||||
A SMEP #PF at instruction fetch prevents the kernel from speculatively
|
||||
executing user space.
|
||||
|
||||
* AMD:
|
||||
"Finally, branches that are predicted as 'ret' instructions get
|
||||
their predicted targets from the Return Address Predictor (RAP).
|
||||
AMD recommends software use a RAP stuffing sequence (mitigation
|
||||
V2-3 in [2]) and/or Supervisor Mode Execution Protection (SMEP)
|
||||
to ensure that the addresses in the RAP are safe for
|
||||
speculation. Collectively, we refer to these mitigations as "RAP
|
||||
Protection"." [#amd-smep-rsb]_
|
||||
|
||||
* Intel:
|
||||
"On processors with enhanced IBRS, an RSB overwrite sequence may
|
||||
not suffice to prevent the predicted target of a near return
|
||||
from using an RSB entry created in a less privileged predictor
|
||||
mode. Software can prevent this by enabling SMEP (for
|
||||
transitions from user mode to supervisor mode) and by having
|
||||
IA32_SPEC_CTRL.IBRS set during VM exits." [#intel-smep-rsb]_
|
||||
|
||||
* On VMEXIT, guest->host attacks are mitigated by eIBRS (and PBRSB
|
||||
mitigation if needed):
|
||||
|
||||
* AMD:
|
||||
"When Automatic IBRS is enabled, the internal return address
|
||||
stack used for return address predictions is cleared on VMEXIT."
|
||||
[#amd-eibrs-vmexit]_
|
||||
|
||||
* Intel:
|
||||
"On processors with enhanced IBRS, an RSB overwrite sequence may
|
||||
not suffice to prevent the predicted target of a near return
|
||||
from using an RSB entry created in a less privileged predictor
|
||||
mode. Software can prevent this by enabling SMEP (for
|
||||
transitions from user mode to supervisor mode) and by having
|
||||
IA32_SPEC_CTRL.IBRS set during VM exits. Processors with
|
||||
enhanced IBRS still support the usage model where IBRS is set
|
||||
only in the OS/VMM for OSes that enable SMEP. To do this, such
|
||||
processors will ensure that guest behavior cannot control the
|
||||
RSB after a VM exit once IBRS is set, even if IBRS was not set
|
||||
at the time of the VM exit." [#intel-eibrs-vmexit]_
|
||||
|
||||
Note that some Intel CPUs are susceptible to Post-barrier Return
|
||||
Stack Buffer Predictions (PBRSB) [#intel-pbrsb]_, where the last
|
||||
CALL from the guest can be used to predict the first unbalanced RET.
|
||||
In this case the PBRSB mitigation is needed in addition to eIBRS.
|
||||
|
||||
AMD RETBleed / SRSO / Branch Type Confusion
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
On AMD, poisoned RSB entries can also be created by the AMD RETBleed
|
||||
variant [#retbleed-paper]_ [#amd-btc]_ or by Speculative Return Stack
|
||||
Overflow [#amd-srso]_ (Inception [#inception-paper]_). The kernel
|
||||
protects itself by replacing every RET in the kernel with a branch to a
|
||||
single safe RET.
|
||||
|
||||
----
|
||||
|
||||
RSB underflow (Intel only)
|
||||
==========================
|
||||
|
||||
RSB Alternate (RSBA) ("Intel Retbleed")
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Some Intel Skylake-generation CPUs are susceptible to the Intel variant
|
||||
of RETBleed [#retbleed-paper]_ (Return Stack Buffer Underflow
|
||||
[#intel-rsbu]_). If a RET is executed when the RSB buffer is empty due
|
||||
to mismatched CALLs/RETs or returning from a deep call stack, the branch
|
||||
predictor can fall back to using the Branch Target Buffer (BTB). If a
|
||||
user forces a BTB collision then the RET can speculatively branch to a
|
||||
user-controlled address.
|
||||
|
||||
* Note that RSB filling doesn't fully mitigate this issue. If there
|
||||
are enough unbalanced RETs, the RSB may still underflow and fall back
|
||||
to using a poisoned BTB entry.
|
||||
|
||||
* On context switch, user->user underflow attacks are mitigated by the
|
||||
conditional IBPB [#cond-ibpb]_ on context switch which effectively
|
||||
clears the BTB:
|
||||
|
||||
* "The indirect branch predictor barrier (IBPB) is an indirect branch
|
||||
control mechanism that establishes a barrier, preventing software
|
||||
that executed before the barrier from controlling the predicted
|
||||
targets of indirect branches executed after the barrier on the same
|
||||
logical processor." [#intel-ibpb-btb]_
|
||||
|
||||
* On context switch and VMEXIT, user->kernel and guest->host RSB
|
||||
underflows are mitigated by IBRS or eIBRS:
|
||||
|
||||
* "Enabling IBRS (including enhanced IBRS) will mitigate the "RSBU"
|
||||
attack demonstrated by the researchers. As previously documented,
|
||||
Intel recommends the use of enhanced IBRS, where supported. This
|
||||
includes any processor that enumerates RRSBA but not RRSBA_DIS_S."
|
||||
[#intel-rsbu]_
|
||||
|
||||
However, note that eIBRS and IBRS do not mitigate intra-mode attacks.
|
||||
Like RRSBA below, this is mitigated by clearing the BHB on kernel
|
||||
entry.
|
||||
|
||||
As an alternative to classic IBRS, call depth tracking (combined with
|
||||
retpolines) can be used to track kernel returns and fill the RSB when
|
||||
it gets close to being empty.
|
||||
|
||||
Restricted RSB Alternate (RRSBA)
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Some newer Intel CPUs have Restricted RSB Alternate (RRSBA) behavior,
|
||||
which, similar to RSBA described above, also falls back to using the BTB
|
||||
on RSB underflow. The only difference is that the predicted targets are
|
||||
restricted to the current domain when eIBRS is enabled:
|
||||
|
||||
* "Restricted RSB Alternate (RRSBA) behavior allows alternate branch
|
||||
predictors to be used by near RET instructions when the RSB is
|
||||
empty. When eIBRS is enabled, the predicted targets of these
|
||||
alternate predictors are restricted to those belonging to the
|
||||
indirect branch predictor entries of the current prediction domain.
|
||||
[#intel-eibrs-rrsba]_
|
||||
|
||||
When a CPU with RRSBA is vulnerable to Branch History Injection
|
||||
[#bhi-paper]_ [#intel-bhi]_, an RSB underflow could be used for an
|
||||
intra-mode BTI attack. This is mitigated by clearing the BHB on
|
||||
kernel entry.
|
||||
|
||||
However if the kernel uses retpolines instead of eIBRS, it needs to
|
||||
disable RRSBA:
|
||||
|
||||
* "Where software is using retpoline as a mitigation for BHI or
|
||||
intra-mode BTI, and the processor both enumerates RRSBA and
|
||||
enumerates RRSBA_DIS controls, it should disable this behavior."
|
||||
[#intel-retpoline-rrsba]_
|
||||
|
||||
----
|
||||
|
||||
References
|
||||
==========
|
||||
|
||||
.. [#spectre-rsb] `Spectre Returns! Speculation Attacks using the Return Stack Buffer <https://arxiv.org/pdf/1807.07940.pdf>`_
|
||||
|
||||
.. [#intel-rsb-filling] "Empty RSB Mitigation on Skylake-generation" in `Retpoline: A Branch Target Injection Mitigation <https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/technical-documentation/retpoline-branch-target-injection-mitigation.html#inpage-nav-5-1>`_
|
||||
|
||||
.. [#amd-rsb-filling] "Mitigation V2-3" in `Software Techniques for Managing Speculation <https://www.amd.com/content/dam/amd/en/documents/processor-tech-docs/programmer-references/software-techniques-for-managing-speculation.pdf>`_
|
||||
|
||||
.. [#cond-ibpb] Whether IBPB is written depends on whether the prev and/or next task is protected from Spectre attacks. It typically requires opting in per task or system-wide. For more details see the documentation for the ``spectre_v2_user`` cmdline option in Documentation/admin-guide/kernel-parameters.txt.
|
||||
|
||||
.. [#amd-sbpb] IBPB without flushing of branch type predictions. Only exists for AMD.
|
||||
|
||||
.. [#amd-ibpb-rsb] "Function 8000_0008h -- Processor Capacity Parameters and Extended Feature Identification" in `AMD64 Architecture Programmer's Manual Volume 3: General-Purpose and System Instructions <https://www.amd.com/content/dam/amd/en/documents/processor-tech-docs/programmer-references/24594.pdf>`_. SBPB behaves the same way according to `this email <https://lore.kernel.org/5175b163a3736ca5fd01cedf406735636c99a>`_.
|
||||
|
||||
.. [#amd-ibpb-no-rsb] `Spectre Attacks: Exploiting Speculative Execution <https://comsec.ethz.ch/wp-content/files/ibpb_sp25.pdf>`_
|
||||
|
||||
.. [#intel-ibpb-rsb] "Introduction" in `Post-barrier Return Stack Buffer Predictions / CVE-2022-26373 / INTEL-SA-00706 <https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/advisory-guidance/post-barrier-return-stack-buffer-predictions.html>`_
|
||||
|
||||
.. [#amd-smep-rsb] "Existing Mitigations" in `Technical Guidance for Mitigating Branch Type Confusion <https://www.amd.com/content/dam/amd/en/documents/resources/technical-guidance-for-mitigating-branch-type-confusion.pdf>`_
|
||||
|
||||
.. [#intel-smep-rsb] "Enhanced IBRS" in `Indirect Branch Restricted Speculation <https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/technical-documentation/indirect-branch-restricted-speculation.html>`_
|
||||
|
||||
.. [#amd-eibrs-vmexit] "Extended Feature Enable Register (EFER)" in `AMD64 Architecture Programmer's Manual Volume 2: System Programming <https://www.amd.com/content/dam/amd/en/documents/processor-tech-docs/programmer-references/24593.pdf>`_
|
||||
|
||||
.. [#intel-eibrs-vmexit] "Enhanced IBRS" in `Indirect Branch Restricted Speculation <https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/technical-documentation/indirect-branch-restricted-speculation.html>`_
|
||||
|
||||
.. [#intel-pbrsb] `Post-barrier Return Stack Buffer Predictions / CVE-2022-26373 / INTEL-SA-00706 <https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/advisory-guidance/post-barrier-return-stack-buffer-predictions.html>`_
|
||||
|
||||
.. [#retbleed-paper] `RETBleed: Arbitrary Speculative Code Execution with Return Instruction <https://comsec.ethz.ch/wp-content/files/retbleed_sec22.pdf>`_
|
||||
|
||||
.. [#amd-btc] `Technical Guidance for Mitigating Branch Type Confusion <https://www.amd.com/content/dam/amd/en/documents/resources/technical-guidance-for-mitigating-branch-type-confusion.pdf>`_
|
||||
|
||||
.. [#amd-srso] `Technical Update Regarding Speculative Return Stack Overflow <https://www.amd.com/content/dam/amd/en/documents/corporate/cr/speculative-return-stack-overflow-whitepaper.pdf>`_
|
||||
|
||||
.. [#inception-paper] `Inception: Exposing New Attack Surfaces with Training in Transient Execution <https://comsec.ethz.ch/wp-content/files/inception_sec23.pdf>`_
|
||||
|
||||
.. [#intel-rsbu] `Return Stack Buffer Underflow / Return Stack Buffer Underflow / CVE-2022-29901, CVE-2022-28693 / INTEL-SA-00702 <https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/advisory-guidance/return-stack-buffer-underflow.html>`_
|
||||
|
||||
.. [#intel-ibpb-btb] `Indirect Branch Predictor Barrier' <https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/technical-documentation/indirect-branch-predictor-barrier.html>`_
|
||||
|
||||
.. [#intel-eibrs-rrsba] "Guidance for RSBU" in `Return Stack Buffer Underflow / Return Stack Buffer Underflow / CVE-2022-29901, CVE-2022-28693 / INTEL-SA-00702 <https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/advisory-guidance/return-stack-buffer-underflow.html>`_
|
||||
|
||||
.. [#bhi-paper] `Branch History Injection: On the Effectiveness of Hardware Mitigations Against Cross-Privilege Spectre-v2 Attacks <http://download.vusec.net/papers/bhi-spectre-bhb_sec22.pdf>`_
|
||||
|
||||
.. [#intel-bhi] `Branch History Injection and Intra-mode Branch Target Injection / CVE-2022-0001, CVE-2022-0002 / INTEL-SA-00598 <https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/technical-documentation/branch-history-injection.html>`_
|
||||
|
||||
.. [#intel-retpoline-rrsba] "Retpoline" in `Branch History Injection and Intra-mode Branch Target Injection / CVE-2022-0001, CVE-2022-0002 / INTEL-SA-00598 <https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/technical-documentation/branch-history-injection.html>`_
|
||||
@@ -1407,18 +1407,15 @@
|
||||
earlyprintk=serial[,0x...[,baudrate]]
|
||||
earlyprintk=ttySn[,baudrate]
|
||||
earlyprintk=dbgp[debugController#]
|
||||
earlyprintk=mmio32,membase[,{nocfg|baudrate}]
|
||||
earlyprintk=pciserial[,force],bus:device.function[,{nocfg|baudrate}]
|
||||
earlyprintk=xdbc[xhciController#]
|
||||
earlyprintk=bios
|
||||
earlyprintk=mmio,membase[,{nocfg|baudrate}]
|
||||
|
||||
earlyprintk is useful when the kernel crashes before
|
||||
the normal console is initialized. It is not enabled by
|
||||
default because it has some cosmetic problems.
|
||||
|
||||
Only 32-bit memory addresses are supported for "mmio"
|
||||
and "pciserial" devices.
|
||||
|
||||
Use "nocfg" to skip UART configuration, assume
|
||||
BIOS/firmware has configured UART correctly.
|
||||
|
||||
|
||||
@@ -124,6 +124,14 @@ When mounting an XFS filesystem, the following options are accepted.
|
||||
controls the size of each buffer and so is also relevant to
|
||||
this case.
|
||||
|
||||
lifetime (default) or nolifetime
|
||||
Enable data placement based on write life time hints provided
|
||||
by the user. This turns on co-allocation of data of similar
|
||||
life times when statistically favorable to reduce garbage
|
||||
collection cost.
|
||||
|
||||
These options are only available for zoned rt file systems.
|
||||
|
||||
logbsize=value
|
||||
Set the size of each in-memory log buffer. The size may be
|
||||
specified in bytes, or in kilobytes with a "k" suffix.
|
||||
@@ -143,6 +151,14 @@ When mounting an XFS filesystem, the following options are accepted.
|
||||
optional, and the log section can be separate from the data
|
||||
section or contained within it.
|
||||
|
||||
max_open_zones=value
|
||||
Specify the max number of zones to keep open for writing on a
|
||||
zoned rt device. Many open zones aids file data separation
|
||||
but may impact performance on HDDs.
|
||||
|
||||
If ``max_open_zones`` is not specified, the value is determined
|
||||
by the capabilities and the size of the zoned rt device.
|
||||
|
||||
noalign
|
||||
Data allocations will not be aligned at stripe unit
|
||||
boundaries. This is only relevant to filesystems created
|
||||
@@ -542,3 +558,37 @@ The interesting knobs for XFS workqueues are as follows:
|
||||
nice Relative priority of scheduling the threads. These are the
|
||||
same nice levels that can be applied to userspace processes.
|
||||
============ ===========
|
||||
|
||||
Zoned Filesystems
|
||||
=================
|
||||
|
||||
For zoned file systems, the following attribute is exposed in:
|
||||
|
||||
/sys/fs/xfs/<dev>/zoned/
|
||||
|
||||
max_open_zones (Min: 1 Default: Varies Max: UINTMAX)
|
||||
This read-only attribute exposes the maximum number of open zones
|
||||
available for data placement. The value is determined at mount time and
|
||||
is limited by the capabilities of the backing zoned device, file system
|
||||
size and the max_open_zones mount option.
|
||||
|
||||
Zoned Filesystems
|
||||
=================
|
||||
|
||||
For zoned file systems, the following attributes are exposed in:
|
||||
|
||||
/sys/fs/xfs/<dev>/zoned/
|
||||
|
||||
max_open_zones (Min: 1 Default: Varies Max: UINTMAX)
|
||||
This read-only attribute exposes the maximum number of open zones
|
||||
available for data placement. The value is determined at mount time and
|
||||
is limited by the capabilities of the backing zoned device, file system
|
||||
size and the max_open_zones mount option.
|
||||
|
||||
zonegc_low_space (Min: 0 Default: 0 Max: 100)
|
||||
Define a percentage for how much of the unused space that GC should keep
|
||||
available for writing. A high value will reclaim more of the space
|
||||
occupied by unused blocks, creating a larger buffer against write
|
||||
bursts at the cost of increased write amplification. Regardless
|
||||
of this value, garbage collection will always aim to free a minimum
|
||||
amount of blocks to keep max_open_zones open for data placement purposes.
|
||||
|
||||
@@ -79,8 +79,9 @@ feature flags.
|
||||
How are feature flags created?
|
||||
==============================
|
||||
|
||||
a: Feature flags can be derived from the contents of CPUID leaves.
|
||||
------------------------------------------------------------------
|
||||
Feature flags can be derived from the contents of CPUID leaves
|
||||
--------------------------------------------------------------
|
||||
|
||||
These feature definitions are organized mirroring the layout of CPUID
|
||||
leaves and grouped in words with offsets as mapped in enum cpuid_leafs
|
||||
in cpufeatures.h (see arch/x86/include/asm/cpufeatures.h for details).
|
||||
@@ -89,8 +90,9 @@ cpufeatures.h, and if it is detected at run time, the flags will be
|
||||
displayed accordingly in /proc/cpuinfo. For example, the flag "avx2"
|
||||
comes from X86_FEATURE_AVX2 in cpufeatures.h.
|
||||
|
||||
b: Flags can be from scattered CPUID-based features.
|
||||
----------------------------------------------------
|
||||
Flags can be from scattered CPUID-based features
|
||||
------------------------------------------------
|
||||
|
||||
Hardware features enumerated in sparsely populated CPUID leaves get
|
||||
software-defined values. Still, CPUID needs to be queried to determine
|
||||
if a given feature is present. This is done in init_scattered_cpuid_features().
|
||||
@@ -104,8 +106,9 @@ has only one feature and would waste 31 bits of space in the x86_capability[]
|
||||
array. Since there is a struct cpuinfo_x86 for each possible CPU, the wasted
|
||||
memory is not trivial.
|
||||
|
||||
c: Flags can be created synthetically under certain conditions for hardware features.
|
||||
-------------------------------------------------------------------------------------
|
||||
Flags can be created synthetically under certain conditions for hardware features
|
||||
---------------------------------------------------------------------------------
|
||||
|
||||
Examples of conditions include whether certain features are present in
|
||||
MSR_IA32_CORE_CAPS or specific CPU models are identified. If the needed
|
||||
conditions are met, the features are enabled by the set_cpu_cap or
|
||||
@@ -114,8 +117,8 @@ the feature X86_FEATURE_SPLIT_LOCK_DETECT will be enabled and
|
||||
"split_lock_detect" will be displayed. The flag "ring3mwait" will be
|
||||
displayed only when running on INTEL_XEON_PHI_[KNL|KNM] processors.
|
||||
|
||||
d: Flags can represent purely software features.
|
||||
------------------------------------------------
|
||||
Flags can represent purely software features
|
||||
--------------------------------------------
|
||||
These flags do not represent hardware features. Instead, they represent a
|
||||
software feature implemented in the kernel. For example, Kernel Page Table
|
||||
Isolation is purely software feature and its feature flag X86_FEATURE_PTI is
|
||||
@@ -130,14 +133,18 @@ x86_cap/bug_flags[] arrays in kernel/cpu/capflags.c. The names in the
|
||||
resulting x86_cap/bug_flags[] are used to populate /proc/cpuinfo. The naming
|
||||
of flags in the x86_cap/bug_flags[] are as follows:
|
||||
|
||||
a: The name of the flag is from the string in X86_FEATURE_<name> by default.
|
||||
----------------------------------------------------------------------------
|
||||
By default, the flag <name> in /proc/cpuinfo is extracted from the respective
|
||||
X86_FEATURE_<name> in cpufeatures.h. For example, the flag "avx2" is from
|
||||
X86_FEATURE_AVX2.
|
||||
Flags do not appear by default in /proc/cpuinfo
|
||||
-----------------------------------------------
|
||||
|
||||
Feature flags are omitted by default from /proc/cpuinfo as it does not make
|
||||
sense for the feature to be exposed to userspace in most cases. For example,
|
||||
X86_FEATURE_ALWAYS is defined in cpufeatures.h but that flag is an internal
|
||||
kernel feature used in the alternative runtime patching functionality. So the
|
||||
flag does not appear in /proc/cpuinfo.
|
||||
|
||||
Specify a flag name if absolutely needed
|
||||
----------------------------------------
|
||||
|
||||
b: The naming can be overridden.
|
||||
--------------------------------
|
||||
If the comment on the line for the #define X86_FEATURE_* starts with a
|
||||
double-quote character (""), the string inside the double-quote characters
|
||||
will be the name of the flags. For example, the flag "sse4_1" comes from
|
||||
@@ -148,36 +155,31 @@ needed. For instance, /proc/cpuinfo is a userspace interface and must remain
|
||||
constant. If, for some reason, the naming of X86_FEATURE_<name> changes, one
|
||||
shall override the new naming with the name already used in /proc/cpuinfo.
|
||||
|
||||
c: The naming override can be "", which means it will not appear in /proc/cpuinfo.
|
||||
----------------------------------------------------------------------------------
|
||||
The feature shall be omitted from /proc/cpuinfo if it does not make sense for
|
||||
the feature to be exposed to userspace. For example, X86_FEATURE_ALWAYS is
|
||||
defined in cpufeatures.h but that flag is an internal kernel feature used
|
||||
in the alternative runtime patching functionality. So, its name is overridden
|
||||
with "". Its flag will not appear in /proc/cpuinfo.
|
||||
|
||||
Flags are missing when one or more of these happen
|
||||
==================================================
|
||||
|
||||
a: The hardware does not enumerate support for it.
|
||||
--------------------------------------------------
|
||||
The hardware does not enumerate support for it
|
||||
----------------------------------------------
|
||||
|
||||
For example, when a new kernel is running on old hardware or the feature is
|
||||
not enabled by boot firmware. Even if the hardware is new, there might be a
|
||||
problem enabling the feature at run time, the flag will not be displayed.
|
||||
|
||||
b: The kernel does not know about the flag.
|
||||
-------------------------------------------
|
||||
The kernel does not know about the flag
|
||||
---------------------------------------
|
||||
|
||||
For example, when an old kernel is running on new hardware.
|
||||
|
||||
c: The kernel disabled support for it at compile-time.
|
||||
------------------------------------------------------
|
||||
The kernel disabled support for it at compile-time
|
||||
--------------------------------------------------
|
||||
|
||||
For example, if 5-level-paging is not enabled when building (i.e.,
|
||||
CONFIG_X86_5LEVEL is not selected) the flag "la57" will not show up [#f1]_.
|
||||
Even though the feature will still be detected via CPUID, the kernel disables
|
||||
it by clearing via setup_clear_cpu_cap(X86_FEATURE_LA57).
|
||||
|
||||
d: The feature is disabled at boot-time.
|
||||
----------------------------------------
|
||||
The feature is disabled at boot-time
|
||||
------------------------------------
|
||||
A feature can be disabled either using a command-line parameter or because
|
||||
it failed to be enabled. The command-line parameter clearcpuid= can be used
|
||||
to disable features using the feature number as defined in
|
||||
@@ -190,8 +192,9 @@ disable specific features. The list of parameters includes, but is not limited
|
||||
to, nofsgsbase, nosgx, noxsave, etc. 5-level paging can also be disabled using
|
||||
"no5lvl".
|
||||
|
||||
e: The feature was known to be non-functional.
|
||||
----------------------------------------------
|
||||
The feature was known to be non-functional
|
||||
------------------------------------------
|
||||
|
||||
The feature was known to be non-functional because a dependency was
|
||||
missing at runtime. For example, AVX flags will not show up if XSAVE feature
|
||||
is disabled since they depend on XSAVE feature. Another example would be broken
|
||||
|
||||
@@ -7,7 +7,6 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
title: Ceva AHCI SATA Controller
|
||||
|
||||
maintainers:
|
||||
- Mubin Sayyed <mubin.sayyed@amd.com>
|
||||
- Radhey Shyam Pandey <radhey.shyam.pandey@amd.com>
|
||||
|
||||
description: |
|
||||
|
||||
@@ -111,11 +111,27 @@ properties:
|
||||
unevaluatedProperties: false
|
||||
|
||||
port@1:
|
||||
$ref: /schemas/graph.yaml#/properties/port
|
||||
$ref: /schemas/graph.yaml#/$defs/port-base
|
||||
unevaluatedProperties: false
|
||||
description:
|
||||
DSI output port node to the panel or the next bridge
|
||||
in the chain
|
||||
|
||||
properties:
|
||||
endpoint:
|
||||
$ref: /schemas/media/video-interfaces.yaml#
|
||||
unevaluatedProperties: false
|
||||
|
||||
properties:
|
||||
data-lanes:
|
||||
description: array of physical DSI data lane indexes.
|
||||
minItems: 1
|
||||
items:
|
||||
- const: 1
|
||||
- const: 2
|
||||
- const: 3
|
||||
- const: 4
|
||||
|
||||
required:
|
||||
- port@0
|
||||
- port@1
|
||||
|
||||
@@ -12,7 +12,6 @@ description:
|
||||
PS_MODE). Every pin can be configured as input/output.
|
||||
|
||||
maintainers:
|
||||
- Mubin Sayyed <mubin.sayyed@amd.com>
|
||||
- Radhey Shyam Pandey <radhey.shyam.pandey@amd.com>
|
||||
|
||||
properties:
|
||||
|
||||
@@ -19,6 +19,7 @@ properties:
|
||||
- fsl,imx8mp-irqsteer
|
||||
- fsl,imx8qm-irqsteer
|
||||
- fsl,imx8qxp-irqsteer
|
||||
- fsl,imx94-irqsteer
|
||||
- const: fsl,imx-irqsteer
|
||||
|
||||
reg:
|
||||
|
||||
@@ -9,15 +9,6 @@ title: Renesas R-Car Timer Pulse Unit PWM Controller
|
||||
maintainers:
|
||||
- Laurent Pinchart <laurent.pinchart+renesas@ideasonboard.com>
|
||||
|
||||
select:
|
||||
properties:
|
||||
compatible:
|
||||
contains:
|
||||
const: renesas,tpu
|
||||
required:
|
||||
- compatible
|
||||
- '#pwm-cells'
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
items:
|
||||
|
||||
@@ -7,7 +7,6 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
title: Zynq UltraScale+ MPSoC and Versal reset
|
||||
|
||||
maintainers:
|
||||
- Mubin Sayyed <mubin.sayyed@amd.com>
|
||||
- Radhey Shyam Pandey <radhey.shyam.pandey@amd.com>
|
||||
|
||||
description: |
|
||||
|
||||
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
title: Freescale Layerscape Reset Registers Module
|
||||
|
||||
maintainers:
|
||||
- Frank Li
|
||||
- Frank Li <Frank.Li@nxp.com>
|
||||
|
||||
description:
|
||||
Reset Module includes chip reset, service processor control and Reset Control
|
||||
|
||||
@@ -18,9 +18,14 @@ description: |
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
enum:
|
||||
- nxp,imx95-sysctr-timer
|
||||
- nxp,sysctr-timer
|
||||
oneOf:
|
||||
- enum:
|
||||
- nxp,imx95-sysctr-timer
|
||||
- nxp,sysctr-timer
|
||||
- items:
|
||||
- enum:
|
||||
- nxp,imx94-sysctr-timer
|
||||
- const: nxp,imx95-sysctr-timer
|
||||
|
||||
reg:
|
||||
maxItems: 1
|
||||
|
||||
@@ -1,56 +0,0 @@
|
||||
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/timer/renesas,tpu.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
title: Renesas H8/300 Timer Pulse Unit
|
||||
|
||||
maintainers:
|
||||
- Yoshinori Sato <ysato@users.sourceforge.jp>
|
||||
|
||||
description:
|
||||
The TPU is a 16bit timer/counter with configurable clock inputs and
|
||||
programmable compare match.
|
||||
This implementation supports only cascade mode.
|
||||
|
||||
select:
|
||||
properties:
|
||||
compatible:
|
||||
contains:
|
||||
const: renesas,tpu
|
||||
'#pwm-cells': false
|
||||
required:
|
||||
- compatible
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
const: renesas,tpu
|
||||
|
||||
reg:
|
||||
items:
|
||||
- description: First channel
|
||||
- description: Second channel
|
||||
|
||||
clocks:
|
||||
maxItems: 1
|
||||
|
||||
clock-names:
|
||||
const: fck
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- reg
|
||||
- clocks
|
||||
- clock-names
|
||||
|
||||
additionalProperties: false
|
||||
|
||||
examples:
|
||||
- |
|
||||
tpu: tpu@ffffe0 {
|
||||
compatible = "renesas,tpu";
|
||||
reg = <0xffffe0 16>, <0xfffff0 12>;
|
||||
clocks = <&pclk>;
|
||||
clock-names = "fck";
|
||||
};
|
||||
@@ -7,7 +7,6 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
title: Xilinx SuperSpeed DWC3 USB SoC controller
|
||||
|
||||
maintainers:
|
||||
- Mubin Sayyed <mubin.sayyed@amd.com>
|
||||
- Radhey Shyam Pandey <radhey.shyam.pandey@amd.com>
|
||||
|
||||
properties:
|
||||
|
||||
@@ -17,7 +17,6 @@ description:
|
||||
|
||||
maintainers:
|
||||
- Michal Simek <michal.simek@amd.com>
|
||||
- Mubin Sayyed <mubin.sayyed@amd.com>
|
||||
- Radhey Shyam Pandey <radhey.shyam.pandey@amd.com>
|
||||
|
||||
properties:
|
||||
|
||||
@@ -7,7 +7,6 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
title: Xilinx udc controller
|
||||
|
||||
maintainers:
|
||||
- Mubin Sayyed <mubin.sayyed@amd.com>
|
||||
- Radhey Shyam Pandey <radhey.shyam.pandey@amd.com>
|
||||
|
||||
properties:
|
||||
|
||||
@@ -328,9 +328,13 @@ The ext4 superblock is laid out as follows in
|
||||
- s_checksum_type
|
||||
- Metadata checksum algorithm type. The only valid value is 1 (crc32c).
|
||||
* - 0x176
|
||||
- __le16
|
||||
- s_reserved_pad
|
||||
-
|
||||
- \_\_u8
|
||||
- s\_encryption\_level
|
||||
- Versioning level for encryption.
|
||||
* - 0x177
|
||||
- \_\_u8
|
||||
- s\_reserved\_pad
|
||||
- Padding to next 32bits.
|
||||
* - 0x178
|
||||
- __le64
|
||||
- s_kbytes_written
|
||||
@@ -466,9 +470,13 @@ The ext4 superblock is laid out as follows in
|
||||
- s_last_error_time_hi
|
||||
- Upper 8 bits of the s_last_error_time field.
|
||||
* - 0x27A
|
||||
- __u8
|
||||
- s_pad[2]
|
||||
- Zero padding.
|
||||
- \_\_u8
|
||||
- s\_first\_error\_errcode
|
||||
-
|
||||
* - 0x27B
|
||||
- \_\_u8
|
||||
- s\_last\_error\_errcode
|
||||
-
|
||||
* - 0x27C
|
||||
- __le16
|
||||
- s_encoding
|
||||
|
||||
@@ -123,12 +123,12 @@ attribute-sets:
|
||||
|
||||
operations:
|
||||
name-prefix: ovs-vport-cmd-
|
||||
fixed-header: ovs-header
|
||||
list:
|
||||
-
|
||||
name: new
|
||||
doc: Create a new OVS vport
|
||||
attribute-set: vport
|
||||
fixed-header: ovs-header
|
||||
do:
|
||||
request:
|
||||
attributes:
|
||||
@@ -141,7 +141,6 @@ operations:
|
||||
name: del
|
||||
doc: Delete existing OVS vport from a data path
|
||||
attribute-set: vport
|
||||
fixed-header: ovs-header
|
||||
do:
|
||||
request:
|
||||
attributes:
|
||||
@@ -152,7 +151,6 @@ operations:
|
||||
name: get
|
||||
doc: Get / dump OVS vport configuration and state
|
||||
attribute-set: vport
|
||||
fixed-header: ovs-header
|
||||
do: &vport-get-op
|
||||
request:
|
||||
attributes:
|
||||
|
||||
@@ -1119,11 +1119,10 @@ attribute-sets:
|
||||
-
|
||||
name: prop-list
|
||||
type: nest
|
||||
nested-attributes: link-attrs
|
||||
nested-attributes: prop-list-link-attrs
|
||||
-
|
||||
name: alt-ifname
|
||||
type: string
|
||||
multi-attr: true
|
||||
-
|
||||
name: perm-address
|
||||
type: binary
|
||||
@@ -1169,6 +1168,13 @@ attribute-sets:
|
||||
-
|
||||
name: netns-immutable
|
||||
type: u8
|
||||
-
|
||||
name: prop-list-link-attrs
|
||||
subset-of: link-attrs
|
||||
attributes:
|
||||
-
|
||||
name: alt-ifname
|
||||
multi-attr: true
|
||||
-
|
||||
name: af-spec-attrs
|
||||
attributes:
|
||||
@@ -1591,7 +1597,7 @@ attribute-sets:
|
||||
name: nf-call-iptables
|
||||
type: u8
|
||||
-
|
||||
name: nf-call-ip6-tables
|
||||
name: nf-call-ip6tables
|
||||
type: u8
|
||||
-
|
||||
name: nf-call-arptables
|
||||
@@ -2083,7 +2089,7 @@ attribute-sets:
|
||||
name: id
|
||||
type: u16
|
||||
-
|
||||
name: flag
|
||||
name: flags
|
||||
type: binary
|
||||
struct: ifla-vlan-flags
|
||||
-
|
||||
@@ -2171,7 +2177,7 @@ attribute-sets:
|
||||
type: binary
|
||||
struct: ifla-cacheinfo
|
||||
-
|
||||
name: icmp6-stats
|
||||
name: icmp6stats
|
||||
type: binary
|
||||
struct: ifla-icmp6-stats
|
||||
-
|
||||
@@ -2185,9 +2191,10 @@ attribute-sets:
|
||||
type: u32
|
||||
-
|
||||
name: mctp-attrs
|
||||
name-prefix: ifla-mctp-
|
||||
attributes:
|
||||
-
|
||||
name: mctp-net
|
||||
name: net
|
||||
type: u32
|
||||
-
|
||||
name: phys-binding
|
||||
@@ -2469,7 +2476,6 @@ operations:
|
||||
- min-mtu
|
||||
- max-mtu
|
||||
- prop-list
|
||||
- alt-ifname
|
||||
- perm-address
|
||||
- proto-down-reason
|
||||
- parent-dev-name
|
||||
|
||||
@@ -13,25 +13,25 @@ definitions:
|
||||
type: struct
|
||||
members:
|
||||
-
|
||||
name: family
|
||||
name: ndm-family
|
||||
type: u8
|
||||
-
|
||||
name: pad
|
||||
name: ndm-pad
|
||||
type: pad
|
||||
len: 3
|
||||
-
|
||||
name: ifindex
|
||||
name: ndm-ifindex
|
||||
type: s32
|
||||
-
|
||||
name: state
|
||||
name: ndm-state
|
||||
type: u16
|
||||
enum: nud-state
|
||||
-
|
||||
name: flags
|
||||
name: ndm-flags
|
||||
type: u8
|
||||
enum: ntf-flags
|
||||
-
|
||||
name: type
|
||||
name: ndm-type
|
||||
type: u8
|
||||
enum: rtm-type
|
||||
-
|
||||
@@ -189,7 +189,7 @@ attribute-sets:
|
||||
type: binary
|
||||
display-hint: ipv4
|
||||
-
|
||||
name: lladr
|
||||
name: lladdr
|
||||
type: binary
|
||||
display-hint: mac
|
||||
-
|
||||
|
||||
@@ -27,7 +27,7 @@ SYSCALL
|
||||
=======
|
||||
mseal syscall signature
|
||||
-----------------------
|
||||
``int mseal(void \* addr, size_t len, unsigned long flags)``
|
||||
``int mseal(void *addr, size_t len, unsigned long flags)``
|
||||
|
||||
**addr**/**len**: virtual memory address range.
|
||||
The address range set by **addr**/**len** must meet:
|
||||
|
||||
@@ -138,6 +138,10 @@ input data, the meaning of which depends on the subfeature being accessed.
|
||||
The output buffer contains a single byte which signals success or failure (``0x00`` on failure)
|
||||
and 31 bytes of output data, the meaning if which depends on the subfeature being accessed.
|
||||
|
||||
.. note::
|
||||
The ACPI control method responsible for handling the WMI method calls is not thread-safe.
|
||||
This is a firmware bug that needs to be handled inside the driver itself.
|
||||
|
||||
WMI method Get_EC()
|
||||
-------------------
|
||||
|
||||
|
||||
18
MAINTAINERS
18
MAINTAINERS
@@ -6335,6 +6335,7 @@ F: Documentation/process/cve.rst
|
||||
|
||||
CW1200 WLAN driver
|
||||
S: Orphan
|
||||
L: linux-wireless@vger.kernel.org
|
||||
F: drivers/net/wireless/st/
|
||||
F: include/linux/platform_data/net-cw1200.h
|
||||
|
||||
@@ -10947,6 +10948,7 @@ F: include/linux/platform_data/huawei-gaokun-ec.h
|
||||
|
||||
HUGETLB SUBSYSTEM
|
||||
M: Muchun Song <muchun.song@linux.dev>
|
||||
R: Oscar Salvador <osalvador@suse.de>
|
||||
L: linux-mm@kvack.org
|
||||
S: Maintained
|
||||
F: Documentation/ABI/testing/sysfs-kernel-mm-hugepages
|
||||
@@ -12803,6 +12805,7 @@ F: lib/Kconfig.kcsan
|
||||
F: scripts/Makefile.kcsan
|
||||
|
||||
KDUMP
|
||||
M: Andrew Morton <akpm@linux-foundation.org>
|
||||
M: Baoquan He <bhe@redhat.com>
|
||||
R: Vivek Goyal <vgoyal@redhat.com>
|
||||
R: Dave Young <dyoung@redhat.com>
|
||||
@@ -13104,6 +13107,8 @@ F: fs/kernfs/
|
||||
F: include/linux/kernfs.h
|
||||
|
||||
KEXEC
|
||||
M: Andrew Morton <akpm@linux-foundation.org>
|
||||
M: Baoquan He <bhe@redhat.com>
|
||||
L: kexec@lists.infradead.org
|
||||
W: http://kernel.org/pub/linux/utils/kernel/kexec/
|
||||
F: include/linux/kexec.h
|
||||
@@ -14276,6 +14281,7 @@ S: Odd fixes
|
||||
F: drivers/net/ethernet/marvell/sk*
|
||||
|
||||
MARVELL LIBERTAS WIRELESS DRIVER
|
||||
L: linux-wireless@vger.kernel.org
|
||||
L: libertas-dev@lists.infradead.org
|
||||
S: Orphan
|
||||
F: drivers/net/wireless/marvell/libertas/
|
||||
@@ -17361,7 +17367,7 @@ T: git git://git.infradead.org/nvme.git
|
||||
F: drivers/nvme/target/
|
||||
|
||||
NVMEM FRAMEWORK
|
||||
M: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
|
||||
M: Srinivas Kandagatla <srini@kernel.org>
|
||||
S: Maintained
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/srini/nvmem.git
|
||||
F: Documentation/ABI/stable/sysfs-bus-nvmem
|
||||
@@ -19577,7 +19583,7 @@ S: Supported
|
||||
F: drivers/crypto/intel/qat/
|
||||
|
||||
QCOM AUDIO (ASoC) DRIVERS
|
||||
M: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
|
||||
M: Srinivas Kandagatla <srini@kernel.org>
|
||||
L: linux-sound@vger.kernel.org
|
||||
L: linux-arm-msm@vger.kernel.org
|
||||
S: Supported
|
||||
@@ -19750,6 +19756,7 @@ F: drivers/media/tuners/qt1010*
|
||||
|
||||
QUALCOMM ATH12K WIRELESS DRIVER
|
||||
M: Jeff Johnson <jjohnson@kernel.org>
|
||||
L: linux-wireless@vger.kernel.org
|
||||
L: ath12k@lists.infradead.org
|
||||
S: Supported
|
||||
W: https://wireless.wiki.kernel.org/en/users/Drivers/ath12k
|
||||
@@ -19759,6 +19766,7 @@ N: ath12k
|
||||
|
||||
QUALCOMM ATHEROS ATH10K WIRELESS DRIVER
|
||||
M: Jeff Johnson <jjohnson@kernel.org>
|
||||
L: linux-wireless@vger.kernel.org
|
||||
L: ath10k@lists.infradead.org
|
||||
S: Supported
|
||||
W: https://wireless.wiki.kernel.org/en/users/Drivers/ath10k
|
||||
@@ -19768,6 +19776,7 @@ N: ath10k
|
||||
|
||||
QUALCOMM ATHEROS ATH11K WIRELESS DRIVER
|
||||
M: Jeff Johnson <jjohnson@kernel.org>
|
||||
L: linux-wireless@vger.kernel.org
|
||||
L: ath11k@lists.infradead.org
|
||||
S: Supported
|
||||
W: https://wireless.wiki.kernel.org/en/users/Drivers/ath11k
|
||||
@@ -19877,7 +19886,7 @@ F: Documentation/devicetree/bindings/net/qcom,ethqos.yaml
|
||||
F: drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
|
||||
|
||||
QUALCOMM FASTRPC DRIVER
|
||||
M: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
|
||||
M: Srinivas Kandagatla <srini@kernel.org>
|
||||
M: Amol Maheshwari <amahesh@qti.qualcomm.com>
|
||||
L: linux-arm-msm@vger.kernel.org
|
||||
L: dri-devel@lists.freedesktop.org
|
||||
@@ -21925,7 +21934,7 @@ S: Maintained
|
||||
F: drivers/media/rc/serial_ir.c
|
||||
|
||||
SERIAL LOW-POWER INTER-CHIP MEDIA BUS (SLIMbus)
|
||||
M: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
|
||||
M: Srinivas Kandagatla <srini@kernel.org>
|
||||
L: linux-sound@vger.kernel.org
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/bindings/slimbus/
|
||||
@@ -22141,6 +22150,7 @@ F: drivers/platform/x86/touchscreen_dmi.c
|
||||
|
||||
SILICON LABS WIRELESS DRIVERS (for WFxxx series)
|
||||
M: Jérôme Pouiller <jerome.pouiller@silabs.com>
|
||||
L: linux-wireless@vger.kernel.org
|
||||
S: Supported
|
||||
F: Documentation/devicetree/bindings/net/wireless/silabs,wfx.yaml
|
||||
F: drivers/net/wireless/silabs/
|
||||
|
||||
2
Makefile
2
Makefile
@@ -2,7 +2,7 @@
|
||||
VERSION = 6
|
||||
PATCHLEVEL = 15
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc1
|
||||
EXTRAVERSION = -rc2
|
||||
NAME = Baby Opossum Posse
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
||||
@@ -86,7 +86,7 @@
|
||||
|
||||
#endif
|
||||
|
||||
#define res_smp_cond_load_acquire_timewait(v, c) smp_cond_load_acquire_timewait(v, c, 0, 1)
|
||||
#define res_smp_cond_load_acquire(v, c) smp_cond_load_acquire_timewait(v, c, 0, 1)
|
||||
|
||||
#include <asm-generic/rqspinlock.h>
|
||||
|
||||
|
||||
@@ -332,6 +332,10 @@ config HAVE_MARCH_Z16_FEATURES
|
||||
def_bool n
|
||||
select HAVE_MARCH_Z15_FEATURES
|
||||
|
||||
config HAVE_MARCH_Z17_FEATURES
|
||||
def_bool n
|
||||
select HAVE_MARCH_Z16_FEATURES
|
||||
|
||||
choice
|
||||
prompt "Processor type"
|
||||
default MARCH_Z196
|
||||
@@ -397,6 +401,14 @@ config MARCH_Z16
|
||||
Select this to enable optimizations for IBM z16 (3931 and
|
||||
3932 series).
|
||||
|
||||
config MARCH_Z17
|
||||
bool "IBM z17"
|
||||
select HAVE_MARCH_Z17_FEATURES
|
||||
depends on $(cc-option,-march=z17)
|
||||
help
|
||||
Select this to enable optimizations for IBM z17 (9175 and
|
||||
9176 series).
|
||||
|
||||
endchoice
|
||||
|
||||
config MARCH_Z10_TUNE
|
||||
@@ -420,6 +432,9 @@ config MARCH_Z15_TUNE
|
||||
config MARCH_Z16_TUNE
|
||||
def_bool TUNE_Z16 || MARCH_Z16 && TUNE_DEFAULT
|
||||
|
||||
config MARCH_Z17_TUNE
|
||||
def_bool TUNE_Z17 || MARCH_Z17 && TUNE_DEFAULT
|
||||
|
||||
choice
|
||||
prompt "Tune code generation"
|
||||
default TUNE_DEFAULT
|
||||
@@ -464,6 +479,10 @@ config TUNE_Z16
|
||||
bool "IBM z16"
|
||||
depends on $(cc-option,-mtune=z16)
|
||||
|
||||
config TUNE_Z17
|
||||
bool "IBM z17"
|
||||
depends on $(cc-option,-mtune=z17)
|
||||
|
||||
endchoice
|
||||
|
||||
config 64BIT
|
||||
|
||||
@@ -48,6 +48,7 @@ mflags-$(CONFIG_MARCH_Z13) := -march=z13
|
||||
mflags-$(CONFIG_MARCH_Z14) := -march=z14
|
||||
mflags-$(CONFIG_MARCH_Z15) := -march=z15
|
||||
mflags-$(CONFIG_MARCH_Z16) := -march=z16
|
||||
mflags-$(CONFIG_MARCH_Z17) := -march=z17
|
||||
|
||||
export CC_FLAGS_MARCH := $(mflags-y)
|
||||
|
||||
@@ -61,6 +62,7 @@ cflags-$(CONFIG_MARCH_Z13_TUNE) += -mtune=z13
|
||||
cflags-$(CONFIG_MARCH_Z14_TUNE) += -mtune=z14
|
||||
cflags-$(CONFIG_MARCH_Z15_TUNE) += -mtune=z15
|
||||
cflags-$(CONFIG_MARCH_Z16_TUNE) += -mtune=z16
|
||||
cflags-$(CONFIG_MARCH_Z17_TUNE) += -mtune=z17
|
||||
|
||||
cflags-y += -Wa,-I$(srctree)/arch/$(ARCH)/include
|
||||
|
||||
|
||||
@@ -33,6 +33,10 @@
|
||||
#define MARCH_HAS_Z16_FEATURES 1
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_HAVE_MARCH_Z17_FEATURES
|
||||
#define MARCH_HAS_Z17_FEATURES 1
|
||||
#endif
|
||||
|
||||
#endif /* __DECOMPRESSOR */
|
||||
|
||||
#endif /* __ASM_S390_MARCH_H */
|
||||
|
||||
@@ -442,7 +442,7 @@ static void cpum_cf_make_setsize(enum cpumf_ctr_set ctrset)
|
||||
ctrset_size = 48;
|
||||
else if (cpumf_ctr_info.csvn >= 3 && cpumf_ctr_info.csvn <= 5)
|
||||
ctrset_size = 128;
|
||||
else if (cpumf_ctr_info.csvn == 6 || cpumf_ctr_info.csvn == 7)
|
||||
else if (cpumf_ctr_info.csvn >= 6 && cpumf_ctr_info.csvn <= 8)
|
||||
ctrset_size = 160;
|
||||
break;
|
||||
case CPUMF_CTR_SET_MT_DIAG:
|
||||
@@ -858,18 +858,13 @@ static int cpumf_pmu_event_type(struct perf_event *event)
|
||||
static int cpumf_pmu_event_init(struct perf_event *event)
|
||||
{
|
||||
unsigned int type = event->attr.type;
|
||||
int err;
|
||||
int err = -ENOENT;
|
||||
|
||||
if (type == PERF_TYPE_HARDWARE || type == PERF_TYPE_RAW)
|
||||
err = __hw_perf_event_init(event, type);
|
||||
else if (event->pmu->type == type)
|
||||
/* Registered as unknown PMU */
|
||||
err = __hw_perf_event_init(event, cpumf_pmu_event_type(event));
|
||||
else
|
||||
return -ENOENT;
|
||||
|
||||
if (unlikely(err) && event->destroy)
|
||||
event->destroy(event);
|
||||
|
||||
return err;
|
||||
}
|
||||
@@ -1819,8 +1814,6 @@ static int cfdiag_event_init(struct perf_event *event)
|
||||
event->destroy = hw_perf_event_destroy;
|
||||
|
||||
err = cfdiag_event_init2(event);
|
||||
if (unlikely(err))
|
||||
event->destroy(event);
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -237,7 +237,6 @@ CPUMF_EVENT_ATTR(cf_z14, TX_C_TABORT_NO_SPECIAL, 0x00f4);
|
||||
CPUMF_EVENT_ATTR(cf_z14, TX_C_TABORT_SPECIAL, 0x00f5);
|
||||
CPUMF_EVENT_ATTR(cf_z14, MT_DIAG_CYCLES_ONE_THR_ACTIVE, 0x01c0);
|
||||
CPUMF_EVENT_ATTR(cf_z14, MT_DIAG_CYCLES_TWO_THR_ACTIVE, 0x01c1);
|
||||
|
||||
CPUMF_EVENT_ATTR(cf_z15, L1D_RO_EXCL_WRITES, 0x0080);
|
||||
CPUMF_EVENT_ATTR(cf_z15, DTLB2_WRITES, 0x0081);
|
||||
CPUMF_EVENT_ATTR(cf_z15, DTLB2_MISSES, 0x0082);
|
||||
@@ -365,6 +364,83 @@ CPUMF_EVENT_ATTR(cf_z16, NNPA_WAIT_LOCK, 0x010d);
|
||||
CPUMF_EVENT_ATTR(cf_z16, NNPA_HOLD_LOCK, 0x010e);
|
||||
CPUMF_EVENT_ATTR(cf_z16, MT_DIAG_CYCLES_ONE_THR_ACTIVE, 0x01c0);
|
||||
CPUMF_EVENT_ATTR(cf_z16, MT_DIAG_CYCLES_TWO_THR_ACTIVE, 0x01c1);
|
||||
CPUMF_EVENT_ATTR(cf_z17, L1D_RO_EXCL_WRITES, 0x0080);
|
||||
CPUMF_EVENT_ATTR(cf_z17, DTLB2_WRITES, 0x0081);
|
||||
CPUMF_EVENT_ATTR(cf_z17, DTLB2_MISSES, 0x0082);
|
||||
CPUMF_EVENT_ATTR(cf_z17, CRSTE_1MB_WRITES, 0x0083);
|
||||
CPUMF_EVENT_ATTR(cf_z17, DTLB2_GPAGE_WRITES, 0x0084);
|
||||
CPUMF_EVENT_ATTR(cf_z17, ITLB2_WRITES, 0x0086);
|
||||
CPUMF_EVENT_ATTR(cf_z17, ITLB2_MISSES, 0x0087);
|
||||
CPUMF_EVENT_ATTR(cf_z17, TLB2_PTE_WRITES, 0x0089);
|
||||
CPUMF_EVENT_ATTR(cf_z17, TLB2_CRSTE_WRITES, 0x008a);
|
||||
CPUMF_EVENT_ATTR(cf_z17, TLB2_ENGINES_BUSY, 0x008b);
|
||||
CPUMF_EVENT_ATTR(cf_z17, TX_C_TEND, 0x008c);
|
||||
CPUMF_EVENT_ATTR(cf_z17, TX_NC_TEND, 0x008d);
|
||||
CPUMF_EVENT_ATTR(cf_z17, L1C_TLB2_MISSES, 0x008f);
|
||||
CPUMF_EVENT_ATTR(cf_z17, DCW_REQ, 0x0091);
|
||||
CPUMF_EVENT_ATTR(cf_z17, DCW_REQ_IV, 0x0092);
|
||||
CPUMF_EVENT_ATTR(cf_z17, DCW_REQ_CHIP_HIT, 0x0093);
|
||||
CPUMF_EVENT_ATTR(cf_z17, DCW_REQ_DRAWER_HIT, 0x0094);
|
||||
CPUMF_EVENT_ATTR(cf_z17, DCW_ON_CHIP, 0x0095);
|
||||
CPUMF_EVENT_ATTR(cf_z17, DCW_ON_CHIP_IV, 0x0096);
|
||||
CPUMF_EVENT_ATTR(cf_z17, DCW_ON_CHIP_CHIP_HIT, 0x0097);
|
||||
CPUMF_EVENT_ATTR(cf_z17, DCW_ON_CHIP_DRAWER_HIT, 0x0098);
|
||||
CPUMF_EVENT_ATTR(cf_z17, DCW_ON_MODULE, 0x0099);
|
||||
CPUMF_EVENT_ATTR(cf_z17, DCW_ON_DRAWER, 0x009a);
|
||||
CPUMF_EVENT_ATTR(cf_z17, DCW_OFF_DRAWER, 0x009b);
|
||||
CPUMF_EVENT_ATTR(cf_z17, DCW_ON_CHIP_MEMORY, 0x009c);
|
||||
CPUMF_EVENT_ATTR(cf_z17, DCW_ON_MODULE_MEMORY, 0x009d);
|
||||
CPUMF_EVENT_ATTR(cf_z17, DCW_ON_DRAWER_MEMORY, 0x009e);
|
||||
CPUMF_EVENT_ATTR(cf_z17, DCW_OFF_DRAWER_MEMORY, 0x009f);
|
||||
CPUMF_EVENT_ATTR(cf_z17, IDCW_ON_MODULE_IV, 0x00a0);
|
||||
CPUMF_EVENT_ATTR(cf_z17, IDCW_ON_MODULE_CHIP_HIT, 0x00a1);
|
||||
CPUMF_EVENT_ATTR(cf_z17, IDCW_ON_MODULE_DRAWER_HIT, 0x00a2);
|
||||
CPUMF_EVENT_ATTR(cf_z17, IDCW_ON_DRAWER_IV, 0x00a3);
|
||||
CPUMF_EVENT_ATTR(cf_z17, IDCW_ON_DRAWER_CHIP_HIT, 0x00a4);
|
||||
CPUMF_EVENT_ATTR(cf_z17, IDCW_ON_DRAWER_DRAWER_HIT, 0x00a5);
|
||||
CPUMF_EVENT_ATTR(cf_z17, IDCW_OFF_DRAWER_IV, 0x00a6);
|
||||
CPUMF_EVENT_ATTR(cf_z17, IDCW_OFF_DRAWER_CHIP_HIT, 0x00a7);
|
||||
CPUMF_EVENT_ATTR(cf_z17, IDCW_OFF_DRAWER_DRAWER_HIT, 0x00a8);
|
||||
CPUMF_EVENT_ATTR(cf_z17, ICW_REQ, 0x00a9);
|
||||
CPUMF_EVENT_ATTR(cf_z17, ICW_REQ_IV, 0x00aa);
|
||||
CPUMF_EVENT_ATTR(cf_z17, ICW_REQ_CHIP_HIT, 0x00ab);
|
||||
CPUMF_EVENT_ATTR(cf_z17, ICW_REQ_DRAWER_HIT, 0x00ac);
|
||||
CPUMF_EVENT_ATTR(cf_z17, ICW_ON_CHIP, 0x00ad);
|
||||
CPUMF_EVENT_ATTR(cf_z17, ICW_ON_CHIP_IV, 0x00ae);
|
||||
CPUMF_EVENT_ATTR(cf_z17, ICW_ON_CHIP_CHIP_HIT, 0x00af);
|
||||
CPUMF_EVENT_ATTR(cf_z17, ICW_ON_CHIP_DRAWER_HIT, 0x00b0);
|
||||
CPUMF_EVENT_ATTR(cf_z17, ICW_ON_MODULE, 0x00b1);
|
||||
CPUMF_EVENT_ATTR(cf_z17, ICW_ON_DRAWER, 0x00b2);
|
||||
CPUMF_EVENT_ATTR(cf_z17, ICW_OFF_DRAWER, 0x00b3);
|
||||
CPUMF_EVENT_ATTR(cf_z17, CYCLES_SAMETHRD, 0x00ca);
|
||||
CPUMF_EVENT_ATTR(cf_z17, CYCLES_DIFFTHRD, 0x00cb);
|
||||
CPUMF_EVENT_ATTR(cf_z17, INST_SAMETHRD, 0x00cc);
|
||||
CPUMF_EVENT_ATTR(cf_z17, INST_DIFFTHRD, 0x00cd);
|
||||
CPUMF_EVENT_ATTR(cf_z17, WRONG_BRANCH_PREDICTION, 0x00ce);
|
||||
CPUMF_EVENT_ATTR(cf_z17, VX_BCD_EXECUTION_SLOTS, 0x00e1);
|
||||
CPUMF_EVENT_ATTR(cf_z17, DECIMAL_INSTRUCTIONS, 0x00e2);
|
||||
CPUMF_EVENT_ATTR(cf_z17, LAST_HOST_TRANSLATIONS, 0x00e8);
|
||||
CPUMF_EVENT_ATTR(cf_z17, TX_NC_TABORT, 0x00f4);
|
||||
CPUMF_EVENT_ATTR(cf_z17, TX_C_TABORT_NO_SPECIAL, 0x00f5);
|
||||
CPUMF_EVENT_ATTR(cf_z17, TX_C_TABORT_SPECIAL, 0x00f6);
|
||||
CPUMF_EVENT_ATTR(cf_z17, DFLT_ACCESS, 0x00f8);
|
||||
CPUMF_EVENT_ATTR(cf_z17, DFLT_CYCLES, 0x00fd);
|
||||
CPUMF_EVENT_ATTR(cf_z17, SORTL, 0x0100);
|
||||
CPUMF_EVENT_ATTR(cf_z17, DFLT_CC, 0x0109);
|
||||
CPUMF_EVENT_ATTR(cf_z17, DFLT_CCFINISH, 0x010a);
|
||||
CPUMF_EVENT_ATTR(cf_z17, NNPA_INVOCATIONS, 0x010b);
|
||||
CPUMF_EVENT_ATTR(cf_z17, NNPA_COMPLETIONS, 0x010c);
|
||||
CPUMF_EVENT_ATTR(cf_z17, NNPA_WAIT_LOCK, 0x010d);
|
||||
CPUMF_EVENT_ATTR(cf_z17, NNPA_HOLD_LOCK, 0x010e);
|
||||
CPUMF_EVENT_ATTR(cf_z17, NNPA_INST_ONCHIP, 0x0110);
|
||||
CPUMF_EVENT_ATTR(cf_z17, NNPA_INST_OFFCHIP, 0x0111);
|
||||
CPUMF_EVENT_ATTR(cf_z17, NNPA_INST_DIFF, 0x0112);
|
||||
CPUMF_EVENT_ATTR(cf_z17, NNPA_4K_PREFETCH, 0x0114);
|
||||
CPUMF_EVENT_ATTR(cf_z17, NNPA_COMPL_LOCK, 0x0115);
|
||||
CPUMF_EVENT_ATTR(cf_z17, NNPA_RETRY_LOCK, 0x0116);
|
||||
CPUMF_EVENT_ATTR(cf_z17, NNPA_RETRY_LOCK_WITH_PLO, 0x0117);
|
||||
CPUMF_EVENT_ATTR(cf_z17, MT_DIAG_CYCLES_ONE_THR_ACTIVE, 0x01c0);
|
||||
CPUMF_EVENT_ATTR(cf_z17, MT_DIAG_CYCLES_TWO_THR_ACTIVE, 0x01c1);
|
||||
|
||||
static struct attribute *cpumcf_fvn1_pmu_event_attr[] __initdata = {
|
||||
CPUMF_EVENT_PTR(cf_fvn1, CPU_CYCLES),
|
||||
@@ -414,7 +490,7 @@ static struct attribute *cpumcf_svn_12345_pmu_event_attr[] __initdata = {
|
||||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute *cpumcf_svn_67_pmu_event_attr[] __initdata = {
|
||||
static struct attribute *cpumcf_svn_678_pmu_event_attr[] __initdata = {
|
||||
CPUMF_EVENT_PTR(cf_svn_12345, PRNG_FUNCTIONS),
|
||||
CPUMF_EVENT_PTR(cf_svn_12345, PRNG_CYCLES),
|
||||
CPUMF_EVENT_PTR(cf_svn_12345, PRNG_BLOCKED_FUNCTIONS),
|
||||
@@ -779,6 +855,87 @@ static struct attribute *cpumcf_z16_pmu_event_attr[] __initdata = {
|
||||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute *cpumcf_z17_pmu_event_attr[] __initdata = {
|
||||
CPUMF_EVENT_PTR(cf_z17, L1D_RO_EXCL_WRITES),
|
||||
CPUMF_EVENT_PTR(cf_z17, DTLB2_WRITES),
|
||||
CPUMF_EVENT_PTR(cf_z17, DTLB2_MISSES),
|
||||
CPUMF_EVENT_PTR(cf_z17, CRSTE_1MB_WRITES),
|
||||
CPUMF_EVENT_PTR(cf_z17, DTLB2_GPAGE_WRITES),
|
||||
CPUMF_EVENT_PTR(cf_z17, ITLB2_WRITES),
|
||||
CPUMF_EVENT_PTR(cf_z17, ITLB2_MISSES),
|
||||
CPUMF_EVENT_PTR(cf_z17, TLB2_PTE_WRITES),
|
||||
CPUMF_EVENT_PTR(cf_z17, TLB2_CRSTE_WRITES),
|
||||
CPUMF_EVENT_PTR(cf_z17, TLB2_ENGINES_BUSY),
|
||||
CPUMF_EVENT_PTR(cf_z17, TX_C_TEND),
|
||||
CPUMF_EVENT_PTR(cf_z17, TX_NC_TEND),
|
||||
CPUMF_EVENT_PTR(cf_z17, L1C_TLB2_MISSES),
|
||||
CPUMF_EVENT_PTR(cf_z17, DCW_REQ),
|
||||
CPUMF_EVENT_PTR(cf_z17, DCW_REQ_IV),
|
||||
CPUMF_EVENT_PTR(cf_z17, DCW_REQ_CHIP_HIT),
|
||||
CPUMF_EVENT_PTR(cf_z17, DCW_REQ_DRAWER_HIT),
|
||||
CPUMF_EVENT_PTR(cf_z17, DCW_ON_CHIP),
|
||||
CPUMF_EVENT_PTR(cf_z17, DCW_ON_CHIP_IV),
|
||||
CPUMF_EVENT_PTR(cf_z17, DCW_ON_CHIP_CHIP_HIT),
|
||||
CPUMF_EVENT_PTR(cf_z17, DCW_ON_CHIP_DRAWER_HIT),
|
||||
CPUMF_EVENT_PTR(cf_z17, DCW_ON_MODULE),
|
||||
CPUMF_EVENT_PTR(cf_z17, DCW_ON_DRAWER),
|
||||
CPUMF_EVENT_PTR(cf_z17, DCW_OFF_DRAWER),
|
||||
CPUMF_EVENT_PTR(cf_z17, DCW_ON_CHIP_MEMORY),
|
||||
CPUMF_EVENT_PTR(cf_z17, DCW_ON_MODULE_MEMORY),
|
||||
CPUMF_EVENT_PTR(cf_z17, DCW_ON_DRAWER_MEMORY),
|
||||
CPUMF_EVENT_PTR(cf_z17, DCW_OFF_DRAWER_MEMORY),
|
||||
CPUMF_EVENT_PTR(cf_z17, IDCW_ON_MODULE_IV),
|
||||
CPUMF_EVENT_PTR(cf_z17, IDCW_ON_MODULE_CHIP_HIT),
|
||||
CPUMF_EVENT_PTR(cf_z17, IDCW_ON_MODULE_DRAWER_HIT),
|
||||
CPUMF_EVENT_PTR(cf_z17, IDCW_ON_DRAWER_IV),
|
||||
CPUMF_EVENT_PTR(cf_z17, IDCW_ON_DRAWER_CHIP_HIT),
|
||||
CPUMF_EVENT_PTR(cf_z17, IDCW_ON_DRAWER_DRAWER_HIT),
|
||||
CPUMF_EVENT_PTR(cf_z17, IDCW_OFF_DRAWER_IV),
|
||||
CPUMF_EVENT_PTR(cf_z17, IDCW_OFF_DRAWER_CHIP_HIT),
|
||||
CPUMF_EVENT_PTR(cf_z17, IDCW_OFF_DRAWER_DRAWER_HIT),
|
||||
CPUMF_EVENT_PTR(cf_z17, ICW_REQ),
|
||||
CPUMF_EVENT_PTR(cf_z17, ICW_REQ_IV),
|
||||
CPUMF_EVENT_PTR(cf_z17, ICW_REQ_CHIP_HIT),
|
||||
CPUMF_EVENT_PTR(cf_z17, ICW_REQ_DRAWER_HIT),
|
||||
CPUMF_EVENT_PTR(cf_z17, ICW_ON_CHIP),
|
||||
CPUMF_EVENT_PTR(cf_z17, ICW_ON_CHIP_IV),
|
||||
CPUMF_EVENT_PTR(cf_z17, ICW_ON_CHIP_CHIP_HIT),
|
||||
CPUMF_EVENT_PTR(cf_z17, ICW_ON_CHIP_DRAWER_HIT),
|
||||
CPUMF_EVENT_PTR(cf_z17, ICW_ON_MODULE),
|
||||
CPUMF_EVENT_PTR(cf_z17, ICW_ON_DRAWER),
|
||||
CPUMF_EVENT_PTR(cf_z17, ICW_OFF_DRAWER),
|
||||
CPUMF_EVENT_PTR(cf_z17, CYCLES_SAMETHRD),
|
||||
CPUMF_EVENT_PTR(cf_z17, CYCLES_DIFFTHRD),
|
||||
CPUMF_EVENT_PTR(cf_z17, INST_SAMETHRD),
|
||||
CPUMF_EVENT_PTR(cf_z17, INST_DIFFTHRD),
|
||||
CPUMF_EVENT_PTR(cf_z17, WRONG_BRANCH_PREDICTION),
|
||||
CPUMF_EVENT_PTR(cf_z17, VX_BCD_EXECUTION_SLOTS),
|
||||
CPUMF_EVENT_PTR(cf_z17, DECIMAL_INSTRUCTIONS),
|
||||
CPUMF_EVENT_PTR(cf_z17, LAST_HOST_TRANSLATIONS),
|
||||
CPUMF_EVENT_PTR(cf_z17, TX_NC_TABORT),
|
||||
CPUMF_EVENT_PTR(cf_z17, TX_C_TABORT_NO_SPECIAL),
|
||||
CPUMF_EVENT_PTR(cf_z17, TX_C_TABORT_SPECIAL),
|
||||
CPUMF_EVENT_PTR(cf_z17, DFLT_ACCESS),
|
||||
CPUMF_EVENT_PTR(cf_z17, DFLT_CYCLES),
|
||||
CPUMF_EVENT_PTR(cf_z17, SORTL),
|
||||
CPUMF_EVENT_PTR(cf_z17, DFLT_CC),
|
||||
CPUMF_EVENT_PTR(cf_z17, DFLT_CCFINISH),
|
||||
CPUMF_EVENT_PTR(cf_z17, NNPA_INVOCATIONS),
|
||||
CPUMF_EVENT_PTR(cf_z17, NNPA_COMPLETIONS),
|
||||
CPUMF_EVENT_PTR(cf_z17, NNPA_WAIT_LOCK),
|
||||
CPUMF_EVENT_PTR(cf_z17, NNPA_HOLD_LOCK),
|
||||
CPUMF_EVENT_PTR(cf_z17, NNPA_INST_ONCHIP),
|
||||
CPUMF_EVENT_PTR(cf_z17, NNPA_INST_OFFCHIP),
|
||||
CPUMF_EVENT_PTR(cf_z17, NNPA_INST_DIFF),
|
||||
CPUMF_EVENT_PTR(cf_z17, NNPA_4K_PREFETCH),
|
||||
CPUMF_EVENT_PTR(cf_z17, NNPA_COMPL_LOCK),
|
||||
CPUMF_EVENT_PTR(cf_z17, NNPA_RETRY_LOCK),
|
||||
CPUMF_EVENT_PTR(cf_z17, NNPA_RETRY_LOCK_WITH_PLO),
|
||||
CPUMF_EVENT_PTR(cf_z17, MT_DIAG_CYCLES_ONE_THR_ACTIVE),
|
||||
CPUMF_EVENT_PTR(cf_z17, MT_DIAG_CYCLES_TWO_THR_ACTIVE),
|
||||
NULL,
|
||||
};
|
||||
|
||||
/* END: CPUM_CF COUNTER DEFINITIONS ===================================== */
|
||||
|
||||
static struct attribute_group cpumcf_pmu_events_group = {
|
||||
@@ -859,7 +1016,7 @@ __init const struct attribute_group **cpumf_cf_event_group(void)
|
||||
if (ci.csvn >= 1 && ci.csvn <= 5)
|
||||
csvn = cpumcf_svn_12345_pmu_event_attr;
|
||||
else if (ci.csvn >= 6)
|
||||
csvn = cpumcf_svn_67_pmu_event_attr;
|
||||
csvn = cpumcf_svn_678_pmu_event_attr;
|
||||
|
||||
/* Determine model-specific counter set(s) */
|
||||
get_cpu_id(&cpu_id);
|
||||
@@ -892,6 +1049,10 @@ __init const struct attribute_group **cpumf_cf_event_group(void)
|
||||
case 0x3932:
|
||||
model = cpumcf_z16_pmu_event_attr;
|
||||
break;
|
||||
case 0x9175:
|
||||
case 0x9176:
|
||||
model = cpumcf_z17_pmu_event_attr;
|
||||
break;
|
||||
default:
|
||||
model = none;
|
||||
break;
|
||||
|
||||
@@ -885,9 +885,6 @@ static int cpumsf_pmu_event_init(struct perf_event *event)
|
||||
event->attr.exclude_idle = 0;
|
||||
|
||||
err = __hw_perf_event_init(event);
|
||||
if (unlikely(err))
|
||||
if (event->destroy)
|
||||
event->destroy(event);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
||||
@@ -294,6 +294,10 @@ static int __init setup_elf_platform(void)
|
||||
case 0x3932:
|
||||
strcpy(elf_platform, "z16");
|
||||
break;
|
||||
case 0x9175:
|
||||
case 0x9176:
|
||||
strcpy(elf_platform, "z17");
|
||||
break;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -53,6 +53,9 @@ static struct facility_def facility_defs[] = {
|
||||
#endif
|
||||
#ifdef CONFIG_HAVE_MARCH_Z15_FEATURES
|
||||
61, /* miscellaneous-instruction-extension 3 */
|
||||
#endif
|
||||
#ifdef CONFIG_HAVE_MARCH_Z17_FEATURES
|
||||
84, /* miscellaneous-instruction-extension 4 */
|
||||
#endif
|
||||
-1 /* END */
|
||||
}
|
||||
|
||||
@@ -17,19 +17,20 @@
|
||||
|
||||
.pushsection .noinstr.text, "ax"
|
||||
|
||||
SYM_FUNC_START(entry_ibpb)
|
||||
/* Clobbers AX, CX, DX */
|
||||
SYM_FUNC_START(write_ibpb)
|
||||
ANNOTATE_NOENDBR
|
||||
movl $MSR_IA32_PRED_CMD, %ecx
|
||||
movl $PRED_CMD_IBPB, %eax
|
||||
movl _ASM_RIP(x86_pred_cmd), %eax
|
||||
xorl %edx, %edx
|
||||
wrmsr
|
||||
|
||||
/* Make sure IBPB clears return stack preductions too. */
|
||||
FILL_RETURN_BUFFER %rax, RSB_CLEAR_LOOPS, X86_BUG_IBPB_NO_RET
|
||||
RET
|
||||
SYM_FUNC_END(entry_ibpb)
|
||||
SYM_FUNC_END(write_ibpb)
|
||||
/* For KVM */
|
||||
EXPORT_SYMBOL_GPL(entry_ibpb);
|
||||
EXPORT_SYMBOL_GPL(write_ibpb);
|
||||
|
||||
.popsection
|
||||
|
||||
|
||||
@@ -269,7 +269,7 @@
|
||||
* typically has NO_MELTDOWN).
|
||||
*
|
||||
* While retbleed_untrain_ret() doesn't clobber anything but requires stack,
|
||||
* entry_ibpb() will clobber AX, CX, DX.
|
||||
* write_ibpb() will clobber AX, CX, DX.
|
||||
*
|
||||
* As such, this must be placed after every *SWITCH_TO_KERNEL_CR3 at a point
|
||||
* where we have a stack but before any RET instruction.
|
||||
@@ -279,7 +279,7 @@
|
||||
VALIDATE_UNRET_END
|
||||
CALL_UNTRAIN_RET
|
||||
ALTERNATIVE_2 "", \
|
||||
"call entry_ibpb", \ibpb_feature, \
|
||||
"call write_ibpb", \ibpb_feature, \
|
||||
__stringify(\call_depth_insns), X86_FEATURE_CALL_DEPTH
|
||||
#endif
|
||||
.endm
|
||||
@@ -368,7 +368,7 @@ extern void srso_return_thunk(void);
|
||||
extern void srso_alias_return_thunk(void);
|
||||
|
||||
extern void entry_untrain_ret(void);
|
||||
extern void entry_ibpb(void);
|
||||
extern void write_ibpb(void);
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
extern void clear_bhb_loop(void);
|
||||
@@ -514,11 +514,11 @@ void alternative_msr_write(unsigned int msr, u64 val, unsigned int feature)
|
||||
: "memory");
|
||||
}
|
||||
|
||||
extern u64 x86_pred_cmd;
|
||||
|
||||
static inline void indirect_branch_prediction_barrier(void)
|
||||
{
|
||||
alternative_msr_write(MSR_IA32_PRED_CMD, x86_pred_cmd, X86_FEATURE_IBPB);
|
||||
asm_inline volatile(ALTERNATIVE("", "call write_ibpb", X86_FEATURE_IBPB)
|
||||
: ASM_CALL_CONSTRAINT
|
||||
:: "rax", "rcx", "rdx", "memory");
|
||||
}
|
||||
|
||||
/* The Intel SPEC CTRL MSR base value cache */
|
||||
|
||||
@@ -16,23 +16,23 @@
|
||||
#ifdef __ASSEMBLER__
|
||||
|
||||
#define ASM_CLAC \
|
||||
ALTERNATIVE __stringify(ANNOTATE_IGNORE_ALTERNATIVE), "clac", X86_FEATURE_SMAP
|
||||
ALTERNATIVE "", "clac", X86_FEATURE_SMAP
|
||||
|
||||
#define ASM_STAC \
|
||||
ALTERNATIVE __stringify(ANNOTATE_IGNORE_ALTERNATIVE), "stac", X86_FEATURE_SMAP
|
||||
ALTERNATIVE "", "stac", X86_FEATURE_SMAP
|
||||
|
||||
#else /* __ASSEMBLER__ */
|
||||
|
||||
static __always_inline void clac(void)
|
||||
{
|
||||
/* Note: a barrier is implicit in alternative() */
|
||||
alternative(ANNOTATE_IGNORE_ALTERNATIVE "", "clac", X86_FEATURE_SMAP);
|
||||
alternative("", "clac", X86_FEATURE_SMAP);
|
||||
}
|
||||
|
||||
static __always_inline void stac(void)
|
||||
{
|
||||
/* Note: a barrier is implicit in alternative() */
|
||||
alternative(ANNOTATE_IGNORE_ALTERNATIVE "", "stac", X86_FEATURE_SMAP);
|
||||
alternative("", "stac", X86_FEATURE_SMAP);
|
||||
}
|
||||
|
||||
static __always_inline unsigned long smap_save(void)
|
||||
@@ -59,9 +59,9 @@ static __always_inline void smap_restore(unsigned long flags)
|
||||
|
||||
/* These macros can be used in asm() statements */
|
||||
#define ASM_CLAC \
|
||||
ALTERNATIVE(ANNOTATE_IGNORE_ALTERNATIVE "", "clac", X86_FEATURE_SMAP)
|
||||
ALTERNATIVE("", "clac", X86_FEATURE_SMAP)
|
||||
#define ASM_STAC \
|
||||
ALTERNATIVE(ANNOTATE_IGNORE_ALTERNATIVE "", "stac", X86_FEATURE_SMAP)
|
||||
ALTERNATIVE("", "stac", X86_FEATURE_SMAP)
|
||||
|
||||
#define ASM_CLAC_UNSAFE \
|
||||
ALTERNATIVE("", ANNOTATE_IGNORE_ALTERNATIVE "clac", X86_FEATURE_SMAP)
|
||||
|
||||
@@ -23,6 +23,8 @@
|
||||
#include <linux/serial_core.h>
|
||||
#include <linux/pgtable.h>
|
||||
|
||||
#include <xen/xen.h>
|
||||
|
||||
#include <asm/e820/api.h>
|
||||
#include <asm/irqdomain.h>
|
||||
#include <asm/pci_x86.h>
|
||||
@@ -1729,6 +1731,15 @@ int __init acpi_mps_check(void)
|
||||
{
|
||||
#if defined(CONFIG_X86_LOCAL_APIC) && !defined(CONFIG_X86_MPPARSE)
|
||||
/* mptable code is not built-in*/
|
||||
|
||||
/*
|
||||
* Xen disables ACPI in PV DomU guests but it still emulates APIC and
|
||||
* supports SMP. Returning early here ensures that APIC is not disabled
|
||||
* unnecessarily and the guest is not limited to a single vCPU.
|
||||
*/
|
||||
if (xen_pv_domain() && !xen_initial_domain())
|
||||
return 0;
|
||||
|
||||
if (acpi_disabled || acpi_noirq) {
|
||||
pr_warn("MPS support code is not built-in, using acpi=off or acpi=noirq or pci=noacpi may have problem\n");
|
||||
return 1;
|
||||
|
||||
@@ -805,6 +805,7 @@ static void init_amd_bd(struct cpuinfo_x86 *c)
|
||||
static const struct x86_cpu_id erratum_1386_microcode[] = {
|
||||
X86_MATCH_VFM_STEPS(VFM_MAKE(X86_VENDOR_AMD, 0x17, 0x01), 0x2, 0x2, 0x0800126e),
|
||||
X86_MATCH_VFM_STEPS(VFM_MAKE(X86_VENDOR_AMD, 0x17, 0x31), 0x0, 0x0, 0x08301052),
|
||||
{}
|
||||
};
|
||||
|
||||
static void fix_erratum_1386(struct cpuinfo_x86 *c)
|
||||
|
||||
@@ -59,7 +59,6 @@ DEFINE_PER_CPU(u64, x86_spec_ctrl_current);
|
||||
EXPORT_PER_CPU_SYMBOL_GPL(x86_spec_ctrl_current);
|
||||
|
||||
u64 x86_pred_cmd __ro_after_init = PRED_CMD_IBPB;
|
||||
EXPORT_SYMBOL_GPL(x86_pred_cmd);
|
||||
|
||||
static u64 __ro_after_init x86_arch_cap_msr;
|
||||
|
||||
@@ -1142,7 +1141,7 @@ static void __init retbleed_select_mitigation(void)
|
||||
setup_clear_cpu_cap(X86_FEATURE_RETHUNK);
|
||||
|
||||
/*
|
||||
* There is no need for RSB filling: entry_ibpb() ensures
|
||||
* There is no need for RSB filling: write_ibpb() ensures
|
||||
* all predictions, including the RSB, are invalidated,
|
||||
* regardless of IBPB implementation.
|
||||
*/
|
||||
@@ -1592,51 +1591,54 @@ static void __init spec_ctrl_disable_kernel_rrsba(void)
|
||||
rrsba_disabled = true;
|
||||
}
|
||||
|
||||
static void __init spectre_v2_determine_rsb_fill_type_at_vmexit(enum spectre_v2_mitigation mode)
|
||||
static void __init spectre_v2_select_rsb_mitigation(enum spectre_v2_mitigation mode)
|
||||
{
|
||||
/*
|
||||
* Similar to context switches, there are two types of RSB attacks
|
||||
* after VM exit:
|
||||
* WARNING! There are many subtleties to consider when changing *any*
|
||||
* code related to RSB-related mitigations. Before doing so, carefully
|
||||
* read the following document, and update if necessary:
|
||||
*
|
||||
* 1) RSB underflow
|
||||
* Documentation/admin-guide/hw-vuln/rsb.rst
|
||||
*
|
||||
* 2) Poisoned RSB entry
|
||||
* In an overly simplified nutshell:
|
||||
*
|
||||
* When retpoline is enabled, both are mitigated by filling/clearing
|
||||
* the RSB.
|
||||
* - User->user RSB attacks are conditionally mitigated during
|
||||
* context switches by cond_mitigation -> write_ibpb().
|
||||
*
|
||||
* When IBRS is enabled, while #1 would be mitigated by the IBRS branch
|
||||
* prediction isolation protections, RSB still needs to be cleared
|
||||
* because of #2. Note that SMEP provides no protection here, unlike
|
||||
* user-space-poisoned RSB entries.
|
||||
* - User->kernel and guest->host attacks are mitigated by eIBRS or
|
||||
* RSB filling.
|
||||
*
|
||||
* eIBRS should protect against RSB poisoning, but if the EIBRS_PBRSB
|
||||
* bug is present then a LITE version of RSB protection is required,
|
||||
* just a single call needs to retire before a RET is executed.
|
||||
* Though, depending on config, note that other alternative
|
||||
* mitigations may end up getting used instead, e.g., IBPB on
|
||||
* entry/vmexit, call depth tracking, or return thunks.
|
||||
*/
|
||||
|
||||
switch (mode) {
|
||||
case SPECTRE_V2_NONE:
|
||||
return;
|
||||
break;
|
||||
|
||||
case SPECTRE_V2_EIBRS_LFENCE:
|
||||
case SPECTRE_V2_EIBRS:
|
||||
if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) {
|
||||
setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT_LITE);
|
||||
pr_info("Spectre v2 / PBRSB-eIBRS: Retire a single CALL on VMEXIT\n");
|
||||
}
|
||||
return;
|
||||
|
||||
case SPECTRE_V2_EIBRS_LFENCE:
|
||||
case SPECTRE_V2_EIBRS_RETPOLINE:
|
||||
if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) {
|
||||
pr_info("Spectre v2 / PBRSB-eIBRS: Retire a single CALL on VMEXIT\n");
|
||||
setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT_LITE);
|
||||
}
|
||||
break;
|
||||
|
||||
case SPECTRE_V2_RETPOLINE:
|
||||
case SPECTRE_V2_LFENCE:
|
||||
case SPECTRE_V2_IBRS:
|
||||
pr_info("Spectre v2 / SpectreRSB: Filling RSB on context switch and VMEXIT\n");
|
||||
setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
|
||||
setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT);
|
||||
pr_info("Spectre v2 / SpectreRSB : Filling RSB on VMEXIT\n");
|
||||
return;
|
||||
}
|
||||
break;
|
||||
|
||||
pr_warn_once("Unknown Spectre v2 mode, disabling RSB mitigation at VM exit");
|
||||
dump_stack();
|
||||
default:
|
||||
pr_warn_once("Unknown Spectre v2 mode, disabling RSB mitigation\n");
|
||||
dump_stack();
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1830,48 +1832,7 @@ static void __init spectre_v2_select_mitigation(void)
|
||||
spectre_v2_enabled = mode;
|
||||
pr_info("%s\n", spectre_v2_strings[mode]);
|
||||
|
||||
/*
|
||||
* If Spectre v2 protection has been enabled, fill the RSB during a
|
||||
* context switch. In general there are two types of RSB attacks
|
||||
* across context switches, for which the CALLs/RETs may be unbalanced.
|
||||
*
|
||||
* 1) RSB underflow
|
||||
*
|
||||
* Some Intel parts have "bottomless RSB". When the RSB is empty,
|
||||
* speculated return targets may come from the branch predictor,
|
||||
* which could have a user-poisoned BTB or BHB entry.
|
||||
*
|
||||
* AMD has it even worse: *all* returns are speculated from the BTB,
|
||||
* regardless of the state of the RSB.
|
||||
*
|
||||
* When IBRS or eIBRS is enabled, the "user -> kernel" attack
|
||||
* scenario is mitigated by the IBRS branch prediction isolation
|
||||
* properties, so the RSB buffer filling wouldn't be necessary to
|
||||
* protect against this type of attack.
|
||||
*
|
||||
* The "user -> user" attack scenario is mitigated by RSB filling.
|
||||
*
|
||||
* 2) Poisoned RSB entry
|
||||
*
|
||||
* If the 'next' in-kernel return stack is shorter than 'prev',
|
||||
* 'next' could be tricked into speculating with a user-poisoned RSB
|
||||
* entry.
|
||||
*
|
||||
* The "user -> kernel" attack scenario is mitigated by SMEP and
|
||||
* eIBRS.
|
||||
*
|
||||
* The "user -> user" scenario, also known as SpectreBHB, requires
|
||||
* RSB clearing.
|
||||
*
|
||||
* So to mitigate all cases, unconditionally fill RSB on context
|
||||
* switches.
|
||||
*
|
||||
* FIXME: Is this pointless for retbleed-affected AMD?
|
||||
*/
|
||||
setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
|
||||
pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n");
|
||||
|
||||
spectre_v2_determine_rsb_fill_type_at_vmexit(mode);
|
||||
spectre_v2_select_rsb_mitigation(mode);
|
||||
|
||||
/*
|
||||
* Retpoline protects the kernel, but doesn't protect firmware. IBRS
|
||||
@@ -2676,7 +2637,7 @@ static void __init srso_select_mitigation(void)
|
||||
setup_clear_cpu_cap(X86_FEATURE_RETHUNK);
|
||||
|
||||
/*
|
||||
* There is no need for RSB filling: entry_ibpb() ensures
|
||||
* There is no need for RSB filling: write_ibpb() ensures
|
||||
* all predictions, including the RSB, are invalidated,
|
||||
* regardless of IBPB implementation.
|
||||
*/
|
||||
@@ -2701,7 +2662,7 @@ static void __init srso_select_mitigation(void)
|
||||
srso_mitigation = SRSO_MITIGATION_IBPB_ON_VMEXIT;
|
||||
|
||||
/*
|
||||
* There is no need for RSB filling: entry_ibpb() ensures
|
||||
* There is no need for RSB filling: write_ibpb() ensures
|
||||
* all predictions, including the RSB, are invalidated,
|
||||
* regardless of IBPB implementation.
|
||||
*/
|
||||
|
||||
@@ -3553,6 +3553,22 @@ static void mkdir_rdt_prepare_rmid_free(struct rdtgroup *rgrp)
|
||||
free_rmid(rgrp->closid, rgrp->mon.rmid);
|
||||
}
|
||||
|
||||
/*
|
||||
* We allow creating mon groups only with in a directory called "mon_groups"
|
||||
* which is present in every ctrl_mon group. Check if this is a valid
|
||||
* "mon_groups" directory.
|
||||
*
|
||||
* 1. The directory should be named "mon_groups".
|
||||
* 2. The mon group itself should "not" be named "mon_groups".
|
||||
* This makes sure "mon_groups" directory always has a ctrl_mon group
|
||||
* as parent.
|
||||
*/
|
||||
static bool is_mon_groups(struct kernfs_node *kn, const char *name)
|
||||
{
|
||||
return (!strcmp(rdt_kn_name(kn), "mon_groups") &&
|
||||
strcmp(name, "mon_groups"));
|
||||
}
|
||||
|
||||
static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
|
||||
const char *name, umode_t mode,
|
||||
enum rdt_group_type rtype, struct rdtgroup **r)
|
||||
@@ -3568,6 +3584,15 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check that the parent directory for a monitor group is a "mon_groups"
|
||||
* directory.
|
||||
*/
|
||||
if (rtype == RDTMON_GROUP && !is_mon_groups(parent_kn, name)) {
|
||||
ret = -EPERM;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
if (rtype == RDTMON_GROUP &&
|
||||
(prdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP ||
|
||||
prdtgrp->mode == RDT_MODE_PSEUDO_LOCKED)) {
|
||||
@@ -3751,22 +3776,6 @@ static int rdtgroup_mkdir_ctrl_mon(struct kernfs_node *parent_kn,
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* We allow creating mon groups only with in a directory called "mon_groups"
|
||||
* which is present in every ctrl_mon group. Check if this is a valid
|
||||
* "mon_groups" directory.
|
||||
*
|
||||
* 1. The directory should be named "mon_groups".
|
||||
* 2. The mon group itself should "not" be named "mon_groups".
|
||||
* This makes sure "mon_groups" directory always has a ctrl_mon group
|
||||
* as parent.
|
||||
*/
|
||||
static bool is_mon_groups(struct kernfs_node *kn, const char *name)
|
||||
{
|
||||
return (!strcmp(rdt_kn_name(kn), "mon_groups") &&
|
||||
strcmp(name, "mon_groups"));
|
||||
}
|
||||
|
||||
static int rdtgroup_mkdir(struct kernfs_node *parent_kn, const char *name,
|
||||
umode_t mode)
|
||||
{
|
||||
@@ -3782,11 +3791,8 @@ static int rdtgroup_mkdir(struct kernfs_node *parent_kn, const char *name,
|
||||
if (resctrl_arch_alloc_capable() && parent_kn == rdtgroup_default.kn)
|
||||
return rdtgroup_mkdir_ctrl_mon(parent_kn, name, mode);
|
||||
|
||||
/*
|
||||
* If RDT monitoring is supported and the parent directory is a valid
|
||||
* "mon_groups" directory, add a monitoring subdirectory.
|
||||
*/
|
||||
if (resctrl_arch_mon_capable() && is_mon_groups(parent_kn, name))
|
||||
/* Else, attempt to add a monitoring subdirectory. */
|
||||
if (resctrl_arch_mon_capable())
|
||||
return rdtgroup_mkdir_mon(parent_kn, name, mode);
|
||||
|
||||
return -EPERM;
|
||||
|
||||
@@ -753,22 +753,21 @@ void __init e820__memory_setup_extended(u64 phys_addr, u32 data_len)
|
||||
void __init e820__register_nosave_regions(unsigned long limit_pfn)
|
||||
{
|
||||
int i;
|
||||
unsigned long pfn = 0;
|
||||
u64 last_addr = 0;
|
||||
|
||||
for (i = 0; i < e820_table->nr_entries; i++) {
|
||||
struct e820_entry *entry = &e820_table->entries[i];
|
||||
|
||||
if (pfn < PFN_UP(entry->addr))
|
||||
register_nosave_region(pfn, PFN_UP(entry->addr));
|
||||
|
||||
pfn = PFN_DOWN(entry->addr + entry->size);
|
||||
|
||||
if (entry->type != E820_TYPE_RAM)
|
||||
register_nosave_region(PFN_UP(entry->addr), pfn);
|
||||
continue;
|
||||
|
||||
if (pfn >= limit_pfn)
|
||||
break;
|
||||
if (last_addr < entry->addr)
|
||||
register_nosave_region(PFN_DOWN(last_addr), PFN_UP(entry->addr));
|
||||
|
||||
last_addr = entry->addr + entry->size;
|
||||
}
|
||||
|
||||
register_nosave_region(PFN_DOWN(last_addr), limit_pfn);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
|
||||
@@ -389,10 +389,10 @@ static int __init setup_early_printk(char *buf)
|
||||
keep = (strstr(buf, "keep") != NULL);
|
||||
|
||||
while (*buf != '\0') {
|
||||
if (!strncmp(buf, "mmio", 4)) {
|
||||
early_mmio_serial_init(buf + 4);
|
||||
if (!strncmp(buf, "mmio32", 6)) {
|
||||
buf += 6;
|
||||
early_mmio_serial_init(buf);
|
||||
early_console_register(&early_serial_console, keep);
|
||||
buf += 4;
|
||||
}
|
||||
if (!strncmp(buf, "serial", 6)) {
|
||||
buf += 6;
|
||||
@@ -407,9 +407,9 @@ static int __init setup_early_printk(char *buf)
|
||||
}
|
||||
#ifdef CONFIG_PCI
|
||||
if (!strncmp(buf, "pciserial", 9)) {
|
||||
early_pci_serial_init(buf + 9);
|
||||
buf += 9; /* Keep from match the above "pciserial" */
|
||||
early_pci_serial_init(buf);
|
||||
early_console_register(&early_serial_console, keep);
|
||||
buf += 9; /* Keep from match the above "serial" */
|
||||
}
|
||||
#endif
|
||||
if (!strncmp(buf, "vga", 3) &&
|
||||
|
||||
@@ -667,9 +667,9 @@ static void cond_mitigation(struct task_struct *next)
|
||||
prev_mm = this_cpu_read(cpu_tlbstate.last_user_mm_spec);
|
||||
|
||||
/*
|
||||
* Avoid user/user BTB poisoning by flushing the branch predictor
|
||||
* when switching between processes. This stops one process from
|
||||
* doing Spectre-v2 attacks on another.
|
||||
* Avoid user->user BTB/RSB poisoning by flushing them when switching
|
||||
* between processes. This stops one process from doing Spectre-v2
|
||||
* attacks on another.
|
||||
*
|
||||
* Both, the conditional and the always IBPB mode use the mm
|
||||
* pointer to avoid the IBPB when switching between tasks of the
|
||||
|
||||
@@ -26,7 +26,7 @@
|
||||
/* code below belongs to the image kernel */
|
||||
.align PAGE_SIZE
|
||||
SYM_FUNC_START(restore_registers)
|
||||
ANNOTATE_NOENDBR
|
||||
ENDBR
|
||||
/* go back to the original page tables */
|
||||
movq %r9, %cr3
|
||||
|
||||
@@ -120,7 +120,7 @@ SYM_FUNC_END(restore_image)
|
||||
|
||||
/* code below has been relocated to a safe page */
|
||||
SYM_FUNC_START(core_restore_code)
|
||||
ANNOTATE_NOENDBR
|
||||
ENDBR
|
||||
/* switch to temporary page tables */
|
||||
movq %rax, %cr3
|
||||
/* flush TLB */
|
||||
|
||||
@@ -54,14 +54,20 @@ struct mc_debug_data {
|
||||
|
||||
static DEFINE_PER_CPU(struct mc_buffer, mc_buffer);
|
||||
static struct mc_debug_data mc_debug_data_early __initdata;
|
||||
static DEFINE_PER_CPU(struct mc_debug_data *, mc_debug_data) =
|
||||
&mc_debug_data_early;
|
||||
static struct mc_debug_data __percpu *mc_debug_data_ptr;
|
||||
DEFINE_PER_CPU(unsigned long, xen_mc_irq_flags);
|
||||
|
||||
static struct static_key mc_debug __ro_after_init;
|
||||
static bool mc_debug_enabled __initdata;
|
||||
|
||||
static struct mc_debug_data * __ref get_mc_debug(void)
|
||||
{
|
||||
if (!mc_debug_data_ptr)
|
||||
return &mc_debug_data_early;
|
||||
|
||||
return this_cpu_ptr(mc_debug_data_ptr);
|
||||
}
|
||||
|
||||
static int __init xen_parse_mc_debug(char *arg)
|
||||
{
|
||||
mc_debug_enabled = true;
|
||||
@@ -71,20 +77,16 @@ static int __init xen_parse_mc_debug(char *arg)
|
||||
}
|
||||
early_param("xen_mc_debug", xen_parse_mc_debug);
|
||||
|
||||
void mc_percpu_init(unsigned int cpu)
|
||||
{
|
||||
per_cpu(mc_debug_data, cpu) = per_cpu_ptr(mc_debug_data_ptr, cpu);
|
||||
}
|
||||
|
||||
static int __init mc_debug_enable(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct mc_debug_data __percpu *mcdb;
|
||||
|
||||
if (!mc_debug_enabled)
|
||||
return 0;
|
||||
|
||||
mc_debug_data_ptr = alloc_percpu(struct mc_debug_data);
|
||||
if (!mc_debug_data_ptr) {
|
||||
mcdb = alloc_percpu(struct mc_debug_data);
|
||||
if (!mcdb) {
|
||||
pr_err("xen_mc_debug inactive\n");
|
||||
static_key_slow_dec(&mc_debug);
|
||||
return -ENOMEM;
|
||||
@@ -93,7 +95,7 @@ static int __init mc_debug_enable(void)
|
||||
/* Be careful when switching to percpu debug data. */
|
||||
local_irq_save(flags);
|
||||
xen_mc_flush();
|
||||
mc_percpu_init(0);
|
||||
mc_debug_data_ptr = mcdb;
|
||||
local_irq_restore(flags);
|
||||
|
||||
pr_info("xen_mc_debug active\n");
|
||||
@@ -155,7 +157,7 @@ void xen_mc_flush(void)
|
||||
trace_xen_mc_flush(b->mcidx, b->argidx, b->cbidx);
|
||||
|
||||
if (static_key_false(&mc_debug)) {
|
||||
mcdb = __this_cpu_read(mc_debug_data);
|
||||
mcdb = get_mc_debug();
|
||||
memcpy(mcdb->entries, b->entries,
|
||||
b->mcidx * sizeof(struct multicall_entry));
|
||||
}
|
||||
@@ -235,7 +237,7 @@ struct multicall_space __xen_mc_entry(size_t args)
|
||||
|
||||
ret.mc = &b->entries[b->mcidx];
|
||||
if (static_key_false(&mc_debug)) {
|
||||
struct mc_debug_data *mcdb = __this_cpu_read(mc_debug_data);
|
||||
struct mc_debug_data *mcdb = get_mc_debug();
|
||||
|
||||
mcdb->caller[b->mcidx] = __builtin_return_address(0);
|
||||
mcdb->argsz[b->mcidx] = args;
|
||||
|
||||
@@ -305,7 +305,6 @@ static int xen_pv_kick_ap(unsigned int cpu, struct task_struct *idle)
|
||||
return rc;
|
||||
|
||||
xen_pmu_init(cpu);
|
||||
mc_percpu_init(cpu);
|
||||
|
||||
/*
|
||||
* Why is this a BUG? If the hypercall fails then everything can be
|
||||
|
||||
@@ -226,9 +226,7 @@ SYM_CODE_END(xen_early_idt_handler_array)
|
||||
push %rax
|
||||
mov $__HYPERVISOR_iret, %eax
|
||||
syscall /* Do the IRET. */
|
||||
#ifdef CONFIG_MITIGATION_SLS
|
||||
int3
|
||||
#endif
|
||||
ud2 /* The SYSCALL should never return. */
|
||||
.endm
|
||||
|
||||
SYM_CODE_START(xen_iret)
|
||||
|
||||
@@ -261,9 +261,6 @@ void xen_mc_callback(void (*fn)(void *), void *data);
|
||||
*/
|
||||
struct multicall_space xen_mc_extend_args(unsigned long op, size_t arg_size);
|
||||
|
||||
/* Do percpu data initialization for multicalls. */
|
||||
void mc_percpu_init(unsigned int cpu);
|
||||
|
||||
extern bool is_xen_pmu;
|
||||
|
||||
irqreturn_t xen_pmu_irq_handler(int irq, void *dev_id);
|
||||
|
||||
@@ -315,16 +315,7 @@ EXPORT_SYMBOL_GPL(crypto_ahash_setkey);
|
||||
|
||||
static bool ahash_request_hasvirt(struct ahash_request *req)
|
||||
{
|
||||
struct ahash_request *r2;
|
||||
|
||||
if (ahash_request_isvirt(req))
|
||||
return true;
|
||||
|
||||
list_for_each_entry(r2, &req->base.list, base.list)
|
||||
if (ahash_request_isvirt(r2))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
return ahash_request_isvirt(req);
|
||||
}
|
||||
|
||||
static int ahash_reqchain_virt(struct ahash_save_req_state *state,
|
||||
@@ -472,7 +463,6 @@ static int ahash_do_req_chain(struct ahash_request *req,
|
||||
bool update = op == crypto_ahash_alg(tfm)->update;
|
||||
struct ahash_save_req_state *state;
|
||||
struct ahash_save_req_state state0;
|
||||
struct ahash_request *r2;
|
||||
u8 *page = NULL;
|
||||
int err;
|
||||
|
||||
@@ -509,7 +499,6 @@ static int ahash_do_req_chain(struct ahash_request *req,
|
||||
state->offset = 0;
|
||||
state->nbytes = 0;
|
||||
INIT_LIST_HEAD(&state->head);
|
||||
list_splice_init(&req->base.list, &state->head);
|
||||
|
||||
if (page)
|
||||
sg_init_one(&state->sg, page, PAGE_SIZE);
|
||||
@@ -540,9 +529,6 @@ static int ahash_do_req_chain(struct ahash_request *req,
|
||||
|
||||
out_set_chain:
|
||||
req->base.err = err;
|
||||
list_for_each_entry(r2, &req->base.list, base.list)
|
||||
r2->base.err = err;
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -551,19 +537,10 @@ int crypto_ahash_init(struct ahash_request *req)
|
||||
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
|
||||
|
||||
if (likely(tfm->using_shash)) {
|
||||
struct ahash_request *r2;
|
||||
int err;
|
||||
|
||||
err = crypto_shash_init(prepare_shash_desc(req, tfm));
|
||||
req->base.err = err;
|
||||
|
||||
list_for_each_entry(r2, &req->base.list, base.list) {
|
||||
struct shash_desc *desc;
|
||||
|
||||
desc = prepare_shash_desc(r2, tfm);
|
||||
r2->base.err = crypto_shash_init(desc);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -620,19 +597,10 @@ int crypto_ahash_update(struct ahash_request *req)
|
||||
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
|
||||
|
||||
if (likely(tfm->using_shash)) {
|
||||
struct ahash_request *r2;
|
||||
int err;
|
||||
|
||||
err = shash_ahash_update(req, ahash_request_ctx(req));
|
||||
req->base.err = err;
|
||||
|
||||
list_for_each_entry(r2, &req->base.list, base.list) {
|
||||
struct shash_desc *desc;
|
||||
|
||||
desc = ahash_request_ctx(r2);
|
||||
r2->base.err = shash_ahash_update(r2, desc);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -645,19 +613,10 @@ int crypto_ahash_final(struct ahash_request *req)
|
||||
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
|
||||
|
||||
if (likely(tfm->using_shash)) {
|
||||
struct ahash_request *r2;
|
||||
int err;
|
||||
|
||||
err = crypto_shash_final(ahash_request_ctx(req), req->result);
|
||||
req->base.err = err;
|
||||
|
||||
list_for_each_entry(r2, &req->base.list, base.list) {
|
||||
struct shash_desc *desc;
|
||||
|
||||
desc = ahash_request_ctx(r2);
|
||||
r2->base.err = crypto_shash_final(desc, r2->result);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -670,19 +629,10 @@ int crypto_ahash_finup(struct ahash_request *req)
|
||||
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
|
||||
|
||||
if (likely(tfm->using_shash)) {
|
||||
struct ahash_request *r2;
|
||||
int err;
|
||||
|
||||
err = shash_ahash_finup(req, ahash_request_ctx(req));
|
||||
req->base.err = err;
|
||||
|
||||
list_for_each_entry(r2, &req->base.list, base.list) {
|
||||
struct shash_desc *desc;
|
||||
|
||||
desc = ahash_request_ctx(r2);
|
||||
r2->base.err = shash_ahash_finup(r2, desc);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -757,19 +707,10 @@ int crypto_ahash_digest(struct ahash_request *req)
|
||||
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
|
||||
|
||||
if (likely(tfm->using_shash)) {
|
||||
struct ahash_request *r2;
|
||||
int err;
|
||||
|
||||
err = shash_ahash_digest(req, prepare_shash_desc(req, tfm));
|
||||
req->base.err = err;
|
||||
|
||||
list_for_each_entry(r2, &req->base.list, base.list) {
|
||||
struct shash_desc *desc;
|
||||
|
||||
desc = prepare_shash_desc(r2, tfm);
|
||||
r2->base.err = shash_ahash_digest(r2, desc);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -1133,20 +1074,5 @@ int ahash_register_instance(struct crypto_template *tmpl,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ahash_register_instance);
|
||||
|
||||
void ahash_request_free(struct ahash_request *req)
|
||||
{
|
||||
struct ahash_request *tmp;
|
||||
struct ahash_request *r2;
|
||||
|
||||
if (unlikely(!req))
|
||||
return;
|
||||
|
||||
list_for_each_entry_safe(r2, tmp, &req->base.list, base.list)
|
||||
kfree_sensitive(r2);
|
||||
|
||||
kfree_sensitive(req);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ahash_request_free);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_DESCRIPTION("Asynchronous cryptographic hash type");
|
||||
|
||||
@@ -111,10 +111,14 @@ static void scomp_free_streams(struct scomp_alg *alg)
|
||||
struct crypto_acomp_stream __percpu *stream = alg->stream;
|
||||
int i;
|
||||
|
||||
alg->stream = NULL;
|
||||
if (!stream)
|
||||
return;
|
||||
|
||||
for_each_possible_cpu(i) {
|
||||
struct crypto_acomp_stream *ps = per_cpu_ptr(stream, i);
|
||||
|
||||
if (!ps->ctx)
|
||||
if (IS_ERR_OR_NULL(ps->ctx))
|
||||
break;
|
||||
|
||||
alg->free_ctx(ps->ctx);
|
||||
@@ -132,6 +136,8 @@ static int scomp_alloc_streams(struct scomp_alg *alg)
|
||||
if (!stream)
|
||||
return -ENOMEM;
|
||||
|
||||
alg->stream = stream;
|
||||
|
||||
for_each_possible_cpu(i) {
|
||||
struct crypto_acomp_stream *ps = per_cpu_ptr(stream, i);
|
||||
|
||||
@@ -143,8 +149,6 @@ static int scomp_alloc_streams(struct scomp_alg *alg)
|
||||
|
||||
spin_lock_init(&ps->lock);
|
||||
}
|
||||
|
||||
alg->stream = stream;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -332,7 +332,7 @@ ivpu_force_recovery_fn(struct file *file, const char __user *user_buf, size_t si
|
||||
return -EINVAL;
|
||||
|
||||
ret = ivpu_rpm_get(vdev);
|
||||
if (ret)
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ivpu_pm_trigger_recovery(vdev, "debugfs");
|
||||
@@ -383,7 +383,7 @@ static int dct_active_set(void *data, u64 active_percent)
|
||||
return -EINVAL;
|
||||
|
||||
ret = ivpu_rpm_get(vdev);
|
||||
if (ret)
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if (active_percent)
|
||||
|
||||
@@ -302,7 +302,8 @@ ivpu_ipc_send_receive_internal(struct ivpu_device *vdev, struct vpu_jsm_msg *req
|
||||
struct ivpu_ipc_consumer cons;
|
||||
int ret;
|
||||
|
||||
drm_WARN_ON(&vdev->drm, pm_runtime_status_suspended(vdev->drm.dev));
|
||||
drm_WARN_ON(&vdev->drm, pm_runtime_status_suspended(vdev->drm.dev) &&
|
||||
pm_runtime_enabled(vdev->drm.dev));
|
||||
|
||||
ivpu_ipc_consumer_add(vdev, &cons, channel, NULL);
|
||||
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
*/
|
||||
|
||||
#include <drm/drm_file.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
|
||||
#include "ivpu_drv.h"
|
||||
#include "ivpu_gem.h"
|
||||
@@ -44,6 +45,10 @@ int ivpu_ms_start_ioctl(struct drm_device *dev, void *data, struct drm_file *fil
|
||||
args->sampling_period_ns < MS_MIN_SAMPLE_PERIOD_NS)
|
||||
return -EINVAL;
|
||||
|
||||
ret = ivpu_rpm_get(vdev);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
mutex_lock(&file_priv->ms_lock);
|
||||
|
||||
if (get_instance_by_mask(file_priv, args->metric_group_mask)) {
|
||||
@@ -96,6 +101,8 @@ int ivpu_ms_start_ioctl(struct drm_device *dev, void *data, struct drm_file *fil
|
||||
kfree(ms);
|
||||
unlock:
|
||||
mutex_unlock(&file_priv->ms_lock);
|
||||
|
||||
ivpu_rpm_put(vdev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -160,6 +167,10 @@ int ivpu_ms_get_data_ioctl(struct drm_device *dev, void *data, struct drm_file *
|
||||
if (!args->metric_group_mask)
|
||||
return -EINVAL;
|
||||
|
||||
ret = ivpu_rpm_get(vdev);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
mutex_lock(&file_priv->ms_lock);
|
||||
|
||||
ms = get_instance_by_mask(file_priv, args->metric_group_mask);
|
||||
@@ -187,6 +198,7 @@ int ivpu_ms_get_data_ioctl(struct drm_device *dev, void *data, struct drm_file *
|
||||
unlock:
|
||||
mutex_unlock(&file_priv->ms_lock);
|
||||
|
||||
ivpu_rpm_put(vdev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -204,11 +216,17 @@ int ivpu_ms_stop_ioctl(struct drm_device *dev, void *data, struct drm_file *file
|
||||
{
|
||||
struct ivpu_file_priv *file_priv = file->driver_priv;
|
||||
struct drm_ivpu_metric_streamer_stop *args = data;
|
||||
struct ivpu_device *vdev = file_priv->vdev;
|
||||
struct ivpu_ms_instance *ms;
|
||||
int ret;
|
||||
|
||||
if (!args->metric_group_mask)
|
||||
return -EINVAL;
|
||||
|
||||
ret = ivpu_rpm_get(vdev);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
mutex_lock(&file_priv->ms_lock);
|
||||
|
||||
ms = get_instance_by_mask(file_priv, args->metric_group_mask);
|
||||
@@ -217,6 +235,7 @@ int ivpu_ms_stop_ioctl(struct drm_device *dev, void *data, struct drm_file *file
|
||||
|
||||
mutex_unlock(&file_priv->ms_lock);
|
||||
|
||||
ivpu_rpm_put(vdev);
|
||||
return ms ? 0 : -EINVAL;
|
||||
}
|
||||
|
||||
@@ -281,6 +300,9 @@ int ivpu_ms_get_info_ioctl(struct drm_device *dev, void *data, struct drm_file *
|
||||
void ivpu_ms_cleanup(struct ivpu_file_priv *file_priv)
|
||||
{
|
||||
struct ivpu_ms_instance *ms, *tmp;
|
||||
struct ivpu_device *vdev = file_priv->vdev;
|
||||
|
||||
pm_runtime_get_sync(vdev->drm.dev);
|
||||
|
||||
mutex_lock(&file_priv->ms_lock);
|
||||
|
||||
@@ -293,6 +315,8 @@ void ivpu_ms_cleanup(struct ivpu_file_priv *file_priv)
|
||||
free_instance(file_priv, ms);
|
||||
|
||||
mutex_unlock(&file_priv->ms_lock);
|
||||
|
||||
pm_runtime_put_autosuspend(vdev->drm.dev);
|
||||
}
|
||||
|
||||
void ivpu_ms_cleanup_all(struct ivpu_device *vdev)
|
||||
|
||||
@@ -458,7 +458,7 @@ static void acpi_button_notify(acpi_handle handle, u32 event, void *data)
|
||||
acpi_pm_wakeup_event(&device->dev);
|
||||
|
||||
button = acpi_driver_data(device);
|
||||
if (button->suspended)
|
||||
if (button->suspended || event == ACPI_BUTTON_NOTIFY_WAKE)
|
||||
return;
|
||||
|
||||
input = button->input;
|
||||
|
||||
@@ -2301,6 +2301,34 @@ static const struct dmi_system_id acpi_ec_no_wakeup[] = {
|
||||
DMI_MATCH(DMI_PRODUCT_FAMILY, "103C_5336AN HP ZHAN 66 Pro"),
|
||||
},
|
||||
},
|
||||
/*
|
||||
* Lenovo Legion Go S; touchscreen blocks HW sleep when woken up from EC
|
||||
* https://gitlab.freedesktop.org/drm/amd/-/issues/3929
|
||||
*/
|
||||
{
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "83L3"),
|
||||
}
|
||||
},
|
||||
{
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "83N6"),
|
||||
}
|
||||
},
|
||||
{
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "83Q2"),
|
||||
}
|
||||
},
|
||||
{
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "83Q3"),
|
||||
}
|
||||
},
|
||||
{ },
|
||||
};
|
||||
|
||||
|
||||
@@ -229,7 +229,7 @@ static int acpi_pptt_leaf_node(struct acpi_table_header *table_hdr,
|
||||
node_entry = ACPI_PTR_DIFF(node, table_hdr);
|
||||
entry = ACPI_ADD_PTR(struct acpi_subtable_header, table_hdr,
|
||||
sizeof(struct acpi_table_pptt));
|
||||
proc_sz = sizeof(struct acpi_pptt_processor *);
|
||||
proc_sz = sizeof(struct acpi_pptt_processor);
|
||||
|
||||
while ((unsigned long)entry + proc_sz < table_end) {
|
||||
cpu_node = (struct acpi_pptt_processor *)entry;
|
||||
@@ -270,7 +270,7 @@ static struct acpi_pptt_processor *acpi_find_processor_node(struct acpi_table_he
|
||||
table_end = (unsigned long)table_hdr + table_hdr->length;
|
||||
entry = ACPI_ADD_PTR(struct acpi_subtable_header, table_hdr,
|
||||
sizeof(struct acpi_table_pptt));
|
||||
proc_sz = sizeof(struct acpi_pptt_processor *);
|
||||
proc_sz = sizeof(struct acpi_pptt_processor);
|
||||
|
||||
/* find the processor structure associated with this cpuid */
|
||||
while ((unsigned long)entry + proc_sz < table_end) {
|
||||
|
||||
@@ -1510,6 +1510,8 @@ int ata_eh_get_ncq_success_sense(struct ata_link *link)
|
||||
unsigned int err_mask, tag;
|
||||
u8 *sense, sk = 0, asc = 0, ascq = 0;
|
||||
u64 sense_valid, val;
|
||||
u16 extended_sense;
|
||||
bool aux_icc_valid;
|
||||
int ret = 0;
|
||||
|
||||
err_mask = ata_read_log_page(dev, ATA_LOG_SENSE_NCQ, 0, buf, 2);
|
||||
@@ -1529,6 +1531,8 @@ int ata_eh_get_ncq_success_sense(struct ata_link *link)
|
||||
|
||||
sense_valid = (u64)buf[8] | ((u64)buf[9] << 8) |
|
||||
((u64)buf[10] << 16) | ((u64)buf[11] << 24);
|
||||
extended_sense = get_unaligned_le16(&buf[14]);
|
||||
aux_icc_valid = extended_sense & BIT(15);
|
||||
|
||||
ata_qc_for_each_raw(ap, qc, tag) {
|
||||
if (!(qc->flags & ATA_QCFLAG_EH) ||
|
||||
@@ -1556,6 +1560,17 @@ int ata_eh_get_ncq_success_sense(struct ata_link *link)
|
||||
continue;
|
||||
}
|
||||
|
||||
qc->result_tf.nsect = sense[6];
|
||||
qc->result_tf.hob_nsect = sense[7];
|
||||
qc->result_tf.lbal = sense[8];
|
||||
qc->result_tf.lbam = sense[9];
|
||||
qc->result_tf.lbah = sense[10];
|
||||
qc->result_tf.hob_lbal = sense[11];
|
||||
qc->result_tf.hob_lbam = sense[12];
|
||||
qc->result_tf.hob_lbah = sense[13];
|
||||
if (aux_icc_valid)
|
||||
qc->result_tf.auxiliary = get_unaligned_le32(&sense[16]);
|
||||
|
||||
/* Set sense without also setting scsicmd->result */
|
||||
scsi_build_sense_buffer(dev->flags & ATA_DFLAG_D_SENSE,
|
||||
qc->scsicmd->sense_buffer, sk,
|
||||
|
||||
@@ -223,10 +223,16 @@ static int pxa_ata_probe(struct platform_device *pdev)
|
||||
|
||||
ap->ioaddr.cmd_addr = devm_ioremap(&pdev->dev, cmd_res->start,
|
||||
resource_size(cmd_res));
|
||||
if (!ap->ioaddr.cmd_addr)
|
||||
return -ENOMEM;
|
||||
ap->ioaddr.ctl_addr = devm_ioremap(&pdev->dev, ctl_res->start,
|
||||
resource_size(ctl_res));
|
||||
if (!ap->ioaddr.ctl_addr)
|
||||
return -ENOMEM;
|
||||
ap->ioaddr.bmdma_addr = devm_ioremap(&pdev->dev, dma_res->start,
|
||||
resource_size(dma_res));
|
||||
if (!ap->ioaddr.bmdma_addr)
|
||||
return -ENOMEM;
|
||||
|
||||
/*
|
||||
* Adjust register offsets
|
||||
|
||||
@@ -1117,9 +1117,14 @@ static int pdc20621_prog_dimm0(struct ata_host *host)
|
||||
mmio += PDC_CHIP0_OFS;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(pdc_i2c_read_data); i++)
|
||||
pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
|
||||
pdc_i2c_read_data[i].reg,
|
||||
&spd0[pdc_i2c_read_data[i].ofs]);
|
||||
if (!pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
|
||||
pdc_i2c_read_data[i].reg,
|
||||
&spd0[pdc_i2c_read_data[i].ofs])) {
|
||||
dev_err(host->dev,
|
||||
"Failed in i2c read at index %d: device=%#x, reg=%#x\n",
|
||||
i, PDC_DIMM0_SPD_DEV_ADDRESS, pdc_i2c_read_data[i].reg);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
data |= (spd0[4] - 8) | ((spd0[21] != 0) << 3) | ((spd0[3]-11) << 4);
|
||||
data |= ((spd0[17] / 4) << 6) | ((spd0[5] / 2) << 7) |
|
||||
@@ -1284,6 +1289,8 @@ static unsigned int pdc20621_dimm_init(struct ata_host *host)
|
||||
|
||||
/* Programming DIMM0 Module Control Register (index_CID0:80h) */
|
||||
size = pdc20621_prog_dimm0(host);
|
||||
if (size < 0)
|
||||
return size;
|
||||
dev_dbg(host->dev, "Local DIMM Size = %dMB\n", size);
|
||||
|
||||
/* Programming DIMM Module Global Control Register (index_CID0:88h) */
|
||||
|
||||
@@ -2031,7 +2031,7 @@ static int null_add_dev(struct nullb_device *dev)
|
||||
nullb->disk->minors = 1;
|
||||
nullb->disk->fops = &null_ops;
|
||||
nullb->disk->private_data = nullb;
|
||||
strscpy_pad(nullb->disk->disk_name, nullb->disk_name, DISK_NAME_LEN);
|
||||
strscpy(nullb->disk->disk_name, nullb->disk_name);
|
||||
|
||||
if (nullb->dev->zoned) {
|
||||
rv = null_register_zoned_dev(nullb);
|
||||
|
||||
@@ -1286,7 +1286,9 @@ static void nxp_coredump(struct hci_dev *hdev)
|
||||
u8 pcmd = 2;
|
||||
|
||||
skb = nxp_drv_send_cmd(hdev, HCI_NXP_TRIGGER_DUMP, 1, &pcmd);
|
||||
if (!IS_ERR(skb))
|
||||
if (IS_ERR(skb))
|
||||
bt_dev_err(hdev, "Failed to trigger FW Dump. (%ld)", PTR_ERR(skb));
|
||||
else
|
||||
kfree_skb(skb);
|
||||
}
|
||||
|
||||
@@ -1445,9 +1447,6 @@ static int nxp_shutdown(struct hci_dev *hdev)
|
||||
/* HCI_NXP_IND_RESET command may not returns any response */
|
||||
if (!IS_ERR(skb))
|
||||
kfree_skb(skb);
|
||||
} else if (nxpdev->current_baudrate != nxpdev->fw_init_baudrate) {
|
||||
nxpdev->new_baudrate = nxpdev->fw_init_baudrate;
|
||||
nxp_set_baudrate_cmd(hdev, NULL);
|
||||
}
|
||||
|
||||
return 0;
|
||||
@@ -1799,13 +1798,15 @@ static void nxp_serdev_remove(struct serdev_device *serdev)
|
||||
clear_bit(BTNXPUART_FW_DOWNLOADING, &nxpdev->tx_state);
|
||||
wake_up_interruptible(&nxpdev->check_boot_sign_wait_q);
|
||||
wake_up_interruptible(&nxpdev->fw_dnld_done_wait_q);
|
||||
}
|
||||
|
||||
if (test_bit(HCI_RUNNING, &hdev->flags)) {
|
||||
/* Ensure shutdown callback is executed before unregistering, so
|
||||
* that baudrate is reset to initial value.
|
||||
} else {
|
||||
/* Restore FW baudrate to fw_init_baudrate if changed.
|
||||
* This will ensure FW baudrate is in sync with
|
||||
* driver baudrate in case this driver is re-inserted.
|
||||
*/
|
||||
nxp_shutdown(hdev);
|
||||
if (nxpdev->current_baudrate != nxpdev->fw_init_baudrate) {
|
||||
nxpdev->new_baudrate = nxpdev->fw_init_baudrate;
|
||||
nxp_set_baudrate_cmd(hdev, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
ps_cleanup(nxpdev);
|
||||
|
||||
@@ -889,7 +889,7 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
|
||||
if (le32_to_cpu(ver.soc_id) == QCA_WCN3950_SOC_ID_T)
|
||||
variant = "t";
|
||||
else if (le32_to_cpu(ver.soc_id) == QCA_WCN3950_SOC_ID_S)
|
||||
variant = "u";
|
||||
variant = "s";
|
||||
|
||||
snprintf(config.fwname, sizeof(config.fwname),
|
||||
"qca/cmnv%02x%s.bin", rom_ver, variant);
|
||||
|
||||
@@ -1215,6 +1215,8 @@ struct btrtl_device_info *btrtl_initialize(struct hci_dev *hdev,
|
||||
rtl_dev_err(hdev, "mandatory config file %s not found",
|
||||
btrtl_dev->ic_info->cfg_name);
|
||||
ret = btrtl_dev->cfg_len;
|
||||
if (!ret)
|
||||
ret = -EINVAL;
|
||||
goto err_free;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -289,18 +289,18 @@ static void vhci_coredump(struct hci_dev *hdev)
|
||||
|
||||
static void vhci_coredump_hdr(struct hci_dev *hdev, struct sk_buff *skb)
|
||||
{
|
||||
char buf[80];
|
||||
const char *buf;
|
||||
|
||||
snprintf(buf, sizeof(buf), "Controller Name: vhci_ctrl\n");
|
||||
buf = "Controller Name: vhci_ctrl\n";
|
||||
skb_put_data(skb, buf, strlen(buf));
|
||||
|
||||
snprintf(buf, sizeof(buf), "Firmware Version: vhci_fw\n");
|
||||
buf = "Firmware Version: vhci_fw\n";
|
||||
skb_put_data(skb, buf, strlen(buf));
|
||||
|
||||
snprintf(buf, sizeof(buf), "Driver: vhci_drv\n");
|
||||
buf = "Driver: vhci_drv\n";
|
||||
skb_put_data(skb, buf, strlen(buf));
|
||||
|
||||
snprintf(buf, sizeof(buf), "Vendor: vhci\n");
|
||||
buf = "Vendor: vhci\n";
|
||||
skb_put_data(skb, buf, strlen(buf));
|
||||
}
|
||||
|
||||
|
||||
@@ -122,12 +122,12 @@ int caam_qi_enqueue(struct device *qidev, struct caam_drv_req *req)
|
||||
qm_fd_addr_set64(&fd, addr);
|
||||
|
||||
do {
|
||||
refcount_inc(&req->drv_ctx->refcnt);
|
||||
ret = qman_enqueue(req->drv_ctx->req_fq, &fd);
|
||||
if (likely(!ret)) {
|
||||
refcount_inc(&req->drv_ctx->refcnt);
|
||||
if (likely(!ret))
|
||||
return 0;
|
||||
}
|
||||
|
||||
refcount_dec(&req->drv_ctx->refcnt);
|
||||
if (ret != -EBUSY)
|
||||
break;
|
||||
num_retries++;
|
||||
|
||||
@@ -269,7 +269,7 @@ static int tegra_aes_do_one_req(struct crypto_engine *engine, void *areq)
|
||||
unsigned int cmdlen, key1_id, key2_id;
|
||||
int ret;
|
||||
|
||||
rctx->iv = (u32 *)req->iv;
|
||||
rctx->iv = (ctx->alg == SE_ALG_ECB) ? NULL : (u32 *)req->iv;
|
||||
rctx->len = req->cryptlen;
|
||||
key1_id = ctx->key1_id;
|
||||
key2_id = ctx->key2_id;
|
||||
@@ -498,9 +498,6 @@ static int tegra_aes_crypt(struct skcipher_request *req, bool encrypt)
|
||||
if (!req->cryptlen)
|
||||
return 0;
|
||||
|
||||
if (ctx->alg == SE_ALG_ECB)
|
||||
req->iv = NULL;
|
||||
|
||||
rctx->encrypt = encrypt;
|
||||
|
||||
return crypto_transfer_skcipher_request_to_engine(ctx->se->engine, req);
|
||||
|
||||
@@ -393,7 +393,7 @@ static long udmabuf_create(struct miscdevice *device,
|
||||
if (!ubuf)
|
||||
return -ENOMEM;
|
||||
|
||||
pglimit = (size_limit_mb * 1024 * 1024) >> PAGE_SHIFT;
|
||||
pglimit = ((u64)size_limit_mb * 1024 * 1024) >> PAGE_SHIFT;
|
||||
for (i = 0; i < head->count; i++) {
|
||||
pgoff_t subpgcnt;
|
||||
|
||||
|
||||
@@ -461,36 +461,6 @@ unsigned int cs_dsp_mock_xm_header_get_alg_base_in_words(struct cs_dsp_test *pri
|
||||
}
|
||||
EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_xm_header_get_alg_base_in_words, "FW_CS_DSP_KUNIT_TEST_UTILS");
|
||||
|
||||
/**
|
||||
* cs_dsp_mock_xm_header_get_fw_version_from_regmap() - Firmware version.
|
||||
*
|
||||
* @priv: Pointer to struct cs_dsp_test.
|
||||
*
|
||||
* Return: Firmware version word value.
|
||||
*/
|
||||
unsigned int cs_dsp_mock_xm_header_get_fw_version_from_regmap(struct cs_dsp_test *priv)
|
||||
{
|
||||
unsigned int xm = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_XM);
|
||||
union {
|
||||
struct wmfw_id_hdr adsp2;
|
||||
struct wmfw_v3_id_hdr halo;
|
||||
} hdr;
|
||||
|
||||
switch (priv->dsp->type) {
|
||||
case WMFW_ADSP2:
|
||||
regmap_raw_read(priv->dsp->regmap, xm, &hdr.adsp2, sizeof(hdr.adsp2));
|
||||
return be32_to_cpu(hdr.adsp2.ver);
|
||||
case WMFW_HALO:
|
||||
regmap_raw_read(priv->dsp->regmap, xm, &hdr.halo, sizeof(hdr.halo));
|
||||
return be32_to_cpu(hdr.halo.ver);
|
||||
default:
|
||||
KUNIT_FAIL(priv->test, NULL);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_xm_header_get_fw_version_from_regmap,
|
||||
"FW_CS_DSP_KUNIT_TEST_UTILS");
|
||||
|
||||
/**
|
||||
* cs_dsp_mock_xm_header_get_fw_version() - Firmware version.
|
||||
*
|
||||
|
||||
@@ -2198,7 +2198,7 @@ static int cs_dsp_bin_test_common_init(struct kunit *test, struct cs_dsp *dsp)
|
||||
|
||||
priv->local->bin_builder =
|
||||
cs_dsp_mock_bin_init(priv, 1,
|
||||
cs_dsp_mock_xm_header_get_fw_version_from_regmap(priv));
|
||||
cs_dsp_mock_xm_header_get_fw_version(xm_hdr));
|
||||
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv->local->bin_builder);
|
||||
|
||||
/* We must provide a dummy wmfw to load */
|
||||
|
||||
@@ -451,7 +451,7 @@ static int cs_dsp_bin_err_test_common_init(struct kunit *test, struct cs_dsp *ds
|
||||
|
||||
local->bin_builder =
|
||||
cs_dsp_mock_bin_init(priv, 1,
|
||||
cs_dsp_mock_xm_header_get_fw_version_from_regmap(priv));
|
||||
cs_dsp_mock_xm_header_get_fw_version(local->xm_header));
|
||||
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, local->bin_builder);
|
||||
|
||||
/* Init cs_dsp */
|
||||
|
||||
@@ -170,4 +170,5 @@ static struct kunit_suite fpga_bridge_suite = {
|
||||
|
||||
kunit_test_suite(fpga_bridge_suite);
|
||||
|
||||
MODULE_DESCRIPTION("KUnit test for the FPGA Bridge");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
||||
@@ -330,4 +330,5 @@ static struct kunit_suite fpga_mgr_suite = {
|
||||
|
||||
kunit_test_suite(fpga_mgr_suite);
|
||||
|
||||
MODULE_DESCRIPTION("KUnit test for the FPGA Manager");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
||||
@@ -214,4 +214,5 @@ static struct kunit_suite fpga_region_suite = {
|
||||
|
||||
kunit_test_suite(fpga_region_suite);
|
||||
|
||||
MODULE_DESCRIPTION("KUnit test for the FPGA Region");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
||||
@@ -105,7 +105,7 @@ static int fwctl_cmd_rpc(struct fwctl_ucmd *ucmd)
|
||||
if (!test_and_set_bit(0, &fwctl_tainted)) {
|
||||
dev_warn(
|
||||
&fwctl->dev,
|
||||
"%s(%d): has requested full access to the physical device device",
|
||||
"%s(%d): has requested full access to the physical device",
|
||||
current->comm, task_pid_nr(current));
|
||||
add_taint(TAINT_FWCTL, LOCKDEP_STILL_OK);
|
||||
}
|
||||
|
||||
@@ -105,12 +105,14 @@ static int pdsfc_identify(struct pdsfc_dev *pdsfc)
|
||||
static void pdsfc_free_endpoints(struct pdsfc_dev *pdsfc)
|
||||
{
|
||||
struct device *dev = &pdsfc->fwctl.dev;
|
||||
u32 num_endpoints;
|
||||
int i;
|
||||
|
||||
if (!pdsfc->endpoints)
|
||||
return;
|
||||
|
||||
for (i = 0; pdsfc->endpoint_info && i < pdsfc->endpoints->num_entries; i++)
|
||||
num_endpoints = le32_to_cpu(pdsfc->endpoints->num_entries);
|
||||
for (i = 0; pdsfc->endpoint_info && i < num_endpoints; i++)
|
||||
mutex_destroy(&pdsfc->endpoint_info[i].lock);
|
||||
vfree(pdsfc->endpoint_info);
|
||||
pdsfc->endpoint_info = NULL;
|
||||
@@ -199,7 +201,7 @@ static int pdsfc_init_endpoints(struct pdsfc_dev *pdsfc)
|
||||
ep_entry = (struct pds_fwctl_query_data_endpoint *)pdsfc->endpoints->entries;
|
||||
for (i = 0; i < num_endpoints; i++) {
|
||||
mutex_init(&pdsfc->endpoint_info[i].lock);
|
||||
pdsfc->endpoint_info[i].endpoint = ep_entry[i].id;
|
||||
pdsfc->endpoint_info[i].endpoint = le32_to_cpu(ep_entry[i].id);
|
||||
}
|
||||
|
||||
return 0;
|
||||
@@ -214,6 +216,7 @@ static struct pds_fwctl_query_data *pdsfc_get_operations(struct pdsfc_dev *pdsfc
|
||||
struct pds_fwctl_query_data *data;
|
||||
union pds_core_adminq_cmd cmd;
|
||||
dma_addr_t data_pa;
|
||||
u32 num_entries;
|
||||
int err;
|
||||
int i;
|
||||
|
||||
@@ -246,8 +249,9 @@ static struct pds_fwctl_query_data *pdsfc_get_operations(struct pdsfc_dev *pdsfc
|
||||
*pa = data_pa;
|
||||
|
||||
entries = (struct pds_fwctl_query_data_operation *)data->entries;
|
||||
dev_dbg(dev, "num_entries %d\n", data->num_entries);
|
||||
for (i = 0; i < data->num_entries; i++) {
|
||||
num_entries = le32_to_cpu(data->num_entries);
|
||||
dev_dbg(dev, "num_entries %d\n", num_entries);
|
||||
for (i = 0; i < num_entries; i++) {
|
||||
|
||||
/* Translate FW command attribute to fwctl scope */
|
||||
switch (entries[i].scope) {
|
||||
@@ -267,7 +271,7 @@ static struct pds_fwctl_query_data *pdsfc_get_operations(struct pdsfc_dev *pdsfc
|
||||
break;
|
||||
}
|
||||
dev_dbg(dev, "endpoint %d operation: id %x scope %d\n",
|
||||
ep, entries[i].id, entries[i].scope);
|
||||
ep, le32_to_cpu(entries[i].id), entries[i].scope);
|
||||
}
|
||||
|
||||
return data;
|
||||
@@ -280,24 +284,26 @@ static int pdsfc_validate_rpc(struct pdsfc_dev *pdsfc,
|
||||
struct pds_fwctl_query_data_operation *op_entry;
|
||||
struct pdsfc_rpc_endpoint_info *ep_info = NULL;
|
||||
struct device *dev = &pdsfc->fwctl.dev;
|
||||
u32 num_entries;
|
||||
int i;
|
||||
|
||||
/* validate rpc in_len & out_len based
|
||||
* on ident.max_req_sz & max_resp_sz
|
||||
*/
|
||||
if (rpc->in.len > pdsfc->ident.max_req_sz) {
|
||||
if (rpc->in.len > le32_to_cpu(pdsfc->ident.max_req_sz)) {
|
||||
dev_dbg(dev, "Invalid request size %u, max %u\n",
|
||||
rpc->in.len, pdsfc->ident.max_req_sz);
|
||||
rpc->in.len, le32_to_cpu(pdsfc->ident.max_req_sz));
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (rpc->out.len > pdsfc->ident.max_resp_sz) {
|
||||
if (rpc->out.len > le32_to_cpu(pdsfc->ident.max_resp_sz)) {
|
||||
dev_dbg(dev, "Invalid response size %u, max %u\n",
|
||||
rpc->out.len, pdsfc->ident.max_resp_sz);
|
||||
rpc->out.len, le32_to_cpu(pdsfc->ident.max_resp_sz));
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
for (i = 0; i < pdsfc->endpoints->num_entries; i++) {
|
||||
num_entries = le32_to_cpu(pdsfc->endpoints->num_entries);
|
||||
for (i = 0; i < num_entries; i++) {
|
||||
if (pdsfc->endpoint_info[i].endpoint == rpc->in.ep) {
|
||||
ep_info = &pdsfc->endpoint_info[i];
|
||||
break;
|
||||
@@ -326,8 +332,9 @@ static int pdsfc_validate_rpc(struct pdsfc_dev *pdsfc,
|
||||
|
||||
/* reject unsupported and/or out of scope commands */
|
||||
op_entry = (struct pds_fwctl_query_data_operation *)ep_info->operations->entries;
|
||||
for (i = 0; i < ep_info->operations->num_entries; i++) {
|
||||
if (PDS_FWCTL_RPC_OPCODE_CMP(rpc->in.op, op_entry[i].id)) {
|
||||
num_entries = le32_to_cpu(ep_info->operations->num_entries);
|
||||
for (i = 0; i < num_entries; i++) {
|
||||
if (PDS_FWCTL_RPC_OPCODE_CMP(rpc->in.op, le32_to_cpu(op_entry[i].id))) {
|
||||
if (scope < op_entry[i].scope)
|
||||
return -EPERM;
|
||||
return 0;
|
||||
@@ -402,7 +409,7 @@ static void *pdsfc_fw_rpc(struct fwctl_uctx *uctx, enum fwctl_rpc_scope scope,
|
||||
cmd = (union pds_core_adminq_cmd) {
|
||||
.fwctl_rpc = {
|
||||
.opcode = PDS_FWCTL_CMD_RPC,
|
||||
.flags = PDS_FWCTL_RPC_IND_REQ | PDS_FWCTL_RPC_IND_RESP,
|
||||
.flags = cpu_to_le16(PDS_FWCTL_RPC_IND_REQ | PDS_FWCTL_RPC_IND_RESP),
|
||||
.ep = cpu_to_le32(rpc->in.ep),
|
||||
.op = cpu_to_le32(rpc->in.op),
|
||||
.req_pa = cpu_to_le64(in_payload_dma_addr),
|
||||
|
||||
@@ -353,7 +353,6 @@ enum amdgpu_kiq_irq {
|
||||
AMDGPU_CP_KIQ_IRQ_DRIVER0 = 0,
|
||||
AMDGPU_CP_KIQ_IRQ_LAST
|
||||
};
|
||||
#define SRIOV_USEC_TIMEOUT 1200000 /* wait 12 * 100ms for SRIOV */
|
||||
#define MAX_KIQ_REG_WAIT 5000 /* in usecs, 5ms */
|
||||
#define MAX_KIQ_REG_BAILOUT_INTERVAL 5 /* in msecs, 5ms */
|
||||
#define MAX_KIQ_REG_TRY 1000
|
||||
|
||||
@@ -3643,6 +3643,13 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
|
||||
adev, adev->ip_blocks[i].version->type))
|
||||
continue;
|
||||
|
||||
/* Since we skip suspend for S0i3, we need to cancel the delayed
|
||||
* idle work here as the suspend callback never gets called.
|
||||
*/
|
||||
if (adev->in_s0ix &&
|
||||
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX &&
|
||||
amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 0, 0))
|
||||
cancel_delayed_work_sync(&adev->gfx.idle_work);
|
||||
/* skip suspend of gfx/mes and psp for S0ix
|
||||
* gfx is in gfxoff state, so on resume it will exit gfxoff just
|
||||
* like at runtime. PSP is also part of the always on hardware
|
||||
|
||||
@@ -120,6 +120,8 @@ MODULE_FIRMWARE("amdgpu/vega20_ip_discovery.bin");
|
||||
MODULE_FIRMWARE("amdgpu/raven_ip_discovery.bin");
|
||||
MODULE_FIRMWARE("amdgpu/raven2_ip_discovery.bin");
|
||||
MODULE_FIRMWARE("amdgpu/picasso_ip_discovery.bin");
|
||||
MODULE_FIRMWARE("amdgpu/arcturus_ip_discovery.bin");
|
||||
MODULE_FIRMWARE("amdgpu/aldebaran_ip_discovery.bin");
|
||||
|
||||
#define mmIP_DISCOVERY_VERSION 0x16A00
|
||||
#define mmRCC_CONFIG_MEMSIZE 0xde3
|
||||
|
||||
@@ -75,11 +75,25 @@ static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
|
||||
*/
|
||||
static int amdgpu_dma_buf_pin(struct dma_buf_attachment *attach)
|
||||
{
|
||||
struct drm_gem_object *obj = attach->dmabuf->priv;
|
||||
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
|
||||
struct dma_buf *dmabuf = attach->dmabuf;
|
||||
struct amdgpu_bo *bo = gem_to_amdgpu_bo(dmabuf->priv);
|
||||
u32 domains = bo->preferred_domains;
|
||||
|
||||
/* pin buffer into GTT */
|
||||
return amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
|
||||
dma_resv_assert_held(dmabuf->resv);
|
||||
|
||||
/*
|
||||
* Try pinning into VRAM to allow P2P with RDMA NICs without ODP
|
||||
* support if all attachments can do P2P. If any attachment can't do
|
||||
* P2P just pin into GTT instead.
|
||||
*/
|
||||
list_for_each_entry(attach, &dmabuf->attachments, node)
|
||||
if (!attach->peer2peer)
|
||||
domains &= ~AMDGPU_GEM_DOMAIN_VRAM;
|
||||
|
||||
if (domains & AMDGPU_GEM_DOMAIN_VRAM)
|
||||
bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
|
||||
|
||||
return amdgpu_bo_pin(bo, domains);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -134,9 +148,6 @@ static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach,
|
||||
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
|
||||
if (r)
|
||||
return ERR_PTR(r);
|
||||
|
||||
} else if (bo->tbo.resource->mem_type != TTM_PL_TT) {
|
||||
return ERR_PTR(-EBUSY);
|
||||
}
|
||||
|
||||
switch (bo->tbo.resource->mem_type) {
|
||||
@@ -184,7 +195,7 @@ static void amdgpu_dma_buf_unmap(struct dma_buf_attachment *attach,
|
||||
struct sg_table *sgt,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
if (sgt->sgl->page_link) {
|
||||
if (sg_page(sgt->sgl)) {
|
||||
dma_unmap_sgtable(attach->dev, sgt, dir, 0);
|
||||
sg_free_table(sgt);
|
||||
kfree(sgt);
|
||||
|
||||
@@ -699,12 +699,10 @@ int amdgpu_gmc_flush_gpu_tlb_pasid(struct amdgpu_device *adev, uint16_t pasid,
|
||||
uint32_t flush_type, bool all_hub,
|
||||
uint32_t inst)
|
||||
{
|
||||
u32 usec_timeout = amdgpu_sriov_vf(adev) ? SRIOV_USEC_TIMEOUT :
|
||||
adev->usec_timeout;
|
||||
struct amdgpu_ring *ring = &adev->gfx.kiq[inst].ring;
|
||||
struct amdgpu_kiq *kiq = &adev->gfx.kiq[inst];
|
||||
unsigned int ndw;
|
||||
int r;
|
||||
int r, cnt = 0;
|
||||
uint32_t seq;
|
||||
|
||||
/*
|
||||
@@ -761,10 +759,21 @@ int amdgpu_gmc_flush_gpu_tlb_pasid(struct amdgpu_device *adev, uint16_t pasid,
|
||||
|
||||
amdgpu_ring_commit(ring);
|
||||
spin_unlock(&adev->gfx.kiq[inst].ring_lock);
|
||||
if (amdgpu_fence_wait_polling(ring, seq, usec_timeout) < 1) {
|
||||
|
||||
r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
|
||||
|
||||
might_sleep();
|
||||
while (r < 1 && cnt++ < MAX_KIQ_REG_TRY &&
|
||||
!amdgpu_reset_pending(adev->reset_domain)) {
|
||||
msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
|
||||
r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
|
||||
}
|
||||
|
||||
if (cnt > MAX_KIQ_REG_TRY) {
|
||||
dev_err(adev->dev, "timeout waiting for kiq fence\n");
|
||||
r = -ETIME;
|
||||
}
|
||||
} else
|
||||
r = 0;
|
||||
}
|
||||
|
||||
error_unlock_reset:
|
||||
|
||||
@@ -163,8 +163,8 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
|
||||
* When GTT is just an alternative to VRAM make sure that we
|
||||
* only use it as fallback and still try to fill up VRAM first.
|
||||
*/
|
||||
if (domain & abo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM &&
|
||||
!(adev->flags & AMD_IS_APU))
|
||||
if (abo->tbo.resource && !(adev->flags & AMD_IS_APU) &&
|
||||
domain & abo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM)
|
||||
places[c].flags |= TTM_PL_FLAG_FALLBACK;
|
||||
c++;
|
||||
}
|
||||
|
||||
@@ -24,6 +24,7 @@
|
||||
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <drm/ttm/ttm_range_manager.h>
|
||||
#include <drm/drm_drv.h>
|
||||
|
||||
#include "amdgpu.h"
|
||||
#include "amdgpu_vm.h"
|
||||
@@ -907,6 +908,9 @@ int amdgpu_vram_mgr_init(struct amdgpu_device *adev)
|
||||
struct ttm_resource_manager *man = &mgr->manager;
|
||||
int err;
|
||||
|
||||
man->cg = drmm_cgroup_register_region(adev_to_drm(adev), "vram", adev->gmc.real_vram_size);
|
||||
if (IS_ERR(man->cg))
|
||||
return PTR_ERR(man->cg);
|
||||
ttm_resource_manager_init(man, &adev->mman.bdev,
|
||||
adev->gmc.real_vram_size);
|
||||
|
||||
|
||||
@@ -894,6 +894,10 @@ static void mes_v11_0_get_fw_version(struct amdgpu_device *adev)
|
||||
{
|
||||
int pipe;
|
||||
|
||||
/* return early if we have already fetched these */
|
||||
if (adev->mes.sched_version && adev->mes.kiq_version)
|
||||
return;
|
||||
|
||||
/* get MES scheduler/KIQ versions */
|
||||
mutex_lock(&adev->srbm_mutex);
|
||||
|
||||
|
||||
@@ -1392,17 +1392,20 @@ static int mes_v12_0_queue_init(struct amdgpu_device *adev,
|
||||
mes_v12_0_queue_init_register(ring);
|
||||
}
|
||||
|
||||
/* get MES scheduler/KIQ versions */
|
||||
mutex_lock(&adev->srbm_mutex);
|
||||
soc21_grbm_select(adev, 3, pipe, 0, 0);
|
||||
if (((pipe == AMDGPU_MES_SCHED_PIPE) && !adev->mes.sched_version) ||
|
||||
((pipe == AMDGPU_MES_KIQ_PIPE) && !adev->mes.kiq_version)) {
|
||||
/* get MES scheduler/KIQ versions */
|
||||
mutex_lock(&adev->srbm_mutex);
|
||||
soc21_grbm_select(adev, 3, pipe, 0, 0);
|
||||
|
||||
if (pipe == AMDGPU_MES_SCHED_PIPE)
|
||||
adev->mes.sched_version = RREG32_SOC15(GC, 0, regCP_MES_GP3_LO);
|
||||
else if (pipe == AMDGPU_MES_KIQ_PIPE && adev->enable_mes_kiq)
|
||||
adev->mes.kiq_version = RREG32_SOC15(GC, 0, regCP_MES_GP3_LO);
|
||||
if (pipe == AMDGPU_MES_SCHED_PIPE)
|
||||
adev->mes.sched_version = RREG32_SOC15(GC, 0, regCP_MES_GP3_LO);
|
||||
else if (pipe == AMDGPU_MES_KIQ_PIPE && adev->enable_mes_kiq)
|
||||
adev->mes.kiq_version = RREG32_SOC15(GC, 0, regCP_MES_GP3_LO);
|
||||
|
||||
soc21_grbm_select(adev, 0, 0, 0, 0);
|
||||
mutex_unlock(&adev->srbm_mutex);
|
||||
soc21_grbm_select(adev, 0, 0, 0, 0);
|
||||
mutex_unlock(&adev->srbm_mutex);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1983,9 +1983,6 @@ static void kfd_topology_set_capabilities(struct kfd_topology_device *dev)
|
||||
if (kfd_dbg_has_ttmps_always_setup(dev->gpu))
|
||||
dev->node_props.debug_prop |= HSA_DBG_DISPATCH_INFO_ALWAYS_VALID;
|
||||
|
||||
if (dev->gpu->adev->sdma.supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE)
|
||||
dev->node_props.capability2 |= HSA_CAP2_PER_SDMA_QUEUE_RESET_SUPPORTED;
|
||||
|
||||
if (KFD_GC_VERSION(dev->gpu) < IP_VERSION(10, 0, 0)) {
|
||||
if (KFD_GC_VERSION(dev->gpu) == IP_VERSION(9, 4, 3) ||
|
||||
KFD_GC_VERSION(dev->gpu) == IP_VERSION(9, 4, 4))
|
||||
@@ -2001,7 +1998,11 @@ static void kfd_topology_set_capabilities(struct kfd_topology_device *dev)
|
||||
dev->node_props.capability |=
|
||||
HSA_CAP_TRAP_DEBUG_PRECISE_MEMORY_OPERATIONS_SUPPORTED;
|
||||
|
||||
dev->node_props.capability |= HSA_CAP_PER_QUEUE_RESET_SUPPORTED;
|
||||
if (!amdgpu_sriov_vf(dev->gpu->adev))
|
||||
dev->node_props.capability |= HSA_CAP_PER_QUEUE_RESET_SUPPORTED;
|
||||
|
||||
if (dev->gpu->adev->sdma.supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE)
|
||||
dev->node_props.capability2 |= HSA_CAP2_PER_SDMA_QUEUE_RESET_SUPPORTED;
|
||||
} else {
|
||||
dev->node_props.debug_prop |= HSA_DBG_WATCH_ADDR_MASK_LO_BIT_GFX10 |
|
||||
HSA_DBG_WATCH_ADDR_MASK_HI_BIT;
|
||||
|
||||
@@ -1722,6 +1722,13 @@ static const struct dmi_system_id dmi_quirk_table[] = {
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "HP Elite mt645 G8 Mobile Thin Client"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.callback = edp0_on_dp1_callback,
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 645 14 inch G11 Notebook PC"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.callback = edp0_on_dp1_callback,
|
||||
.matches = {
|
||||
@@ -1729,6 +1736,20 @@ static const struct dmi_system_id dmi_quirk_table[] = {
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 665 16 inch G11 Notebook PC"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.callback = edp0_on_dp1_callback,
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "HP ProBook 445 14 inch G11 Notebook PC"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.callback = edp0_on_dp1_callback,
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "HP ProBook 465 16 inch G11 Notebook PC"),
|
||||
},
|
||||
},
|
||||
{}
|
||||
/* TODO: refactor this from a fixed table to a dynamic option */
|
||||
};
|
||||
|
||||
@@ -113,6 +113,7 @@ bool amdgpu_dm_crtc_vrr_active(const struct dm_crtc_state *dm_state)
|
||||
*
|
||||
* Panel Replay and PSR SU
|
||||
* - Enable when:
|
||||
* - VRR is disabled
|
||||
* - vblank counter is disabled
|
||||
* - entry is allowed: usermode demonstrates an adequate number of fast
|
||||
* commits)
|
||||
@@ -131,19 +132,20 @@ static void amdgpu_dm_crtc_set_panel_sr_feature(
|
||||
bool is_sr_active = (link->replay_settings.replay_allow_active ||
|
||||
link->psr_settings.psr_allow_active);
|
||||
bool is_crc_window_active = false;
|
||||
bool vrr_active = amdgpu_dm_crtc_vrr_active_irq(vblank_work->acrtc);
|
||||
|
||||
#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
|
||||
is_crc_window_active =
|
||||
amdgpu_dm_crc_window_is_activated(&vblank_work->acrtc->base);
|
||||
#endif
|
||||
|
||||
if (link->replay_settings.replay_feature_enabled &&
|
||||
if (link->replay_settings.replay_feature_enabled && !vrr_active &&
|
||||
allow_sr_entry && !is_sr_active && !is_crc_window_active) {
|
||||
amdgpu_dm_replay_enable(vblank_work->stream, true);
|
||||
} else if (vblank_enabled) {
|
||||
if (link->psr_settings.psr_version < DC_PSR_VERSION_SU_1 && is_sr_active)
|
||||
amdgpu_dm_psr_disable(vblank_work->stream, false);
|
||||
} else if (link->psr_settings.psr_feature_enabled &&
|
||||
} else if (link->psr_settings.psr_feature_enabled && !vrr_active &&
|
||||
allow_sr_entry && !is_sr_active && !is_crc_window_active) {
|
||||
|
||||
struct amdgpu_dm_connector *aconn =
|
||||
@@ -244,6 +246,8 @@ static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work)
|
||||
struct vblank_control_work *vblank_work =
|
||||
container_of(work, struct vblank_control_work, work);
|
||||
struct amdgpu_display_manager *dm = vblank_work->dm;
|
||||
struct amdgpu_device *adev = drm_to_adev(dm->ddev);
|
||||
int r;
|
||||
|
||||
mutex_lock(&dm->dc_lock);
|
||||
|
||||
@@ -271,8 +275,15 @@ static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work)
|
||||
vblank_work->acrtc->dm_irq_params.allow_sr_entry);
|
||||
}
|
||||
|
||||
if (dm->active_vblank_irq_count == 0)
|
||||
if (dm->active_vblank_irq_count == 0) {
|
||||
r = amdgpu_dpm_pause_power_profile(adev, true);
|
||||
if (r)
|
||||
dev_warn(adev->dev, "failed to set default power profile mode\n");
|
||||
dc_allow_idle_optimizations(dm->dc, true);
|
||||
r = amdgpu_dpm_pause_power_profile(adev, false);
|
||||
if (r)
|
||||
dev_warn(adev->dev, "failed to restore the power profile mode\n");
|
||||
}
|
||||
|
||||
mutex_unlock(&dm->dc_lock);
|
||||
|
||||
|
||||
@@ -86,6 +86,8 @@ static void dml21_init(const struct dc *in_dc, struct dml2_context **dml_ctx, co
|
||||
/* Store configuration options */
|
||||
(*dml_ctx)->config = *config;
|
||||
|
||||
DC_FP_START();
|
||||
|
||||
/*Initialize SOCBB and DCNIP params */
|
||||
dml21_initialize_soc_bb_params(&(*dml_ctx)->v21.dml_init, config, in_dc);
|
||||
dml21_initialize_ip_params(&(*dml_ctx)->v21.dml_init, config, in_dc);
|
||||
@@ -96,6 +98,8 @@ static void dml21_init(const struct dc *in_dc, struct dml2_context **dml_ctx, co
|
||||
|
||||
/*Initialize DML21 instance */
|
||||
dml2_initialize_instance(&(*dml_ctx)->v21.dml_init);
|
||||
|
||||
DC_FP_END();
|
||||
}
|
||||
|
||||
bool dml21_create(const struct dc *in_dc, struct dml2_context **dml_ctx, const struct dml2_configuration_options *config)
|
||||
@@ -283,11 +287,16 @@ bool dml21_validate(const struct dc *in_dc, struct dc_state *context, struct dml
|
||||
{
|
||||
bool out = false;
|
||||
|
||||
DC_FP_START();
|
||||
|
||||
/* Use dml_validate_only for fast_validate path */
|
||||
if (fast_validate) {
|
||||
if (fast_validate)
|
||||
out = dml21_check_mode_support(in_dc, context, dml_ctx);
|
||||
} else
|
||||
else
|
||||
out = dml21_mode_check_and_programming(in_dc, context, dml_ctx);
|
||||
|
||||
DC_FP_END();
|
||||
|
||||
return out;
|
||||
}
|
||||
|
||||
@@ -426,8 +435,12 @@ void dml21_copy(struct dml2_context *dst_dml_ctx,
|
||||
|
||||
dst_dml_ctx->v21.mode_programming.programming = dst_dml2_programming;
|
||||
|
||||
DC_FP_START();
|
||||
|
||||
/* need to initialize copied instance for internal references to be correct */
|
||||
dml2_initialize_instance(&dst_dml_ctx->v21.dml_init);
|
||||
|
||||
DC_FP_END();
|
||||
}
|
||||
|
||||
bool dml21_create_copy(struct dml2_context **dst_dml_ctx,
|
||||
|
||||
@@ -732,11 +732,16 @@ bool dml2_validate(const struct dc *in_dc, struct dc_state *context, struct dml2
|
||||
return out;
|
||||
}
|
||||
|
||||
DC_FP_START();
|
||||
|
||||
/* Use dml_validate_only for fast_validate path */
|
||||
if (fast_validate)
|
||||
out = dml2_validate_only(context);
|
||||
else
|
||||
out = dml2_validate_and_build_resource(in_dc, context);
|
||||
|
||||
DC_FP_END();
|
||||
|
||||
return out;
|
||||
}
|
||||
|
||||
@@ -779,11 +784,15 @@ static void dml2_init(const struct dc *in_dc, const struct dml2_configuration_op
|
||||
break;
|
||||
}
|
||||
|
||||
DC_FP_START();
|
||||
|
||||
initialize_dml2_ip_params(*dml2, in_dc, &(*dml2)->v20.dml_core_ctx.ip);
|
||||
|
||||
initialize_dml2_soc_bbox(*dml2, in_dc, &(*dml2)->v20.dml_core_ctx.soc);
|
||||
|
||||
initialize_dml2_soc_states(*dml2, in_dc, &(*dml2)->v20.dml_core_ctx.soc, &(*dml2)->v20.dml_core_ctx.states);
|
||||
|
||||
DC_FP_END();
|
||||
}
|
||||
|
||||
bool dml2_create(const struct dc *in_dc, const struct dml2_configuration_options *config, struct dml2_context **dml2)
|
||||
|
||||
@@ -429,6 +429,7 @@ struct amd_pm_funcs {
|
||||
int (*set_pp_table)(void *handle, const char *buf, size_t size);
|
||||
void (*debugfs_print_current_performance_level)(void *handle, struct seq_file *m);
|
||||
int (*switch_power_profile)(void *handle, enum PP_SMC_POWER_PROFILE type, bool en);
|
||||
int (*pause_power_profile)(void *handle, bool pause);
|
||||
/* export to amdgpu */
|
||||
struct amd_vce_state *(*get_vce_clock_state)(void *handle, u32 idx);
|
||||
int (*dispatch_tasks)(void *handle, enum amd_pp_task task_id,
|
||||
|
||||
@@ -349,6 +349,25 @@ int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev,
|
||||
return ret;
|
||||
}
|
||||
|
||||
int amdgpu_dpm_pause_power_profile(struct amdgpu_device *adev,
|
||||
bool pause)
|
||||
{
|
||||
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
|
||||
int ret = 0;
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
return 0;
|
||||
|
||||
if (pp_funcs && pp_funcs->pause_power_profile) {
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
ret = pp_funcs->pause_power_profile(
|
||||
adev->powerplay.pp_handle, pause);
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev,
|
||||
uint32_t pstate)
|
||||
{
|
||||
|
||||
@@ -410,6 +410,8 @@ int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev,
|
||||
int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev,
|
||||
enum PP_SMC_POWER_PROFILE type,
|
||||
bool en);
|
||||
int amdgpu_dpm_pause_power_profile(struct amdgpu_device *adev,
|
||||
bool pause);
|
||||
|
||||
int amdgpu_dpm_baco_reset(struct amdgpu_device *adev);
|
||||
|
||||
|
||||
@@ -2398,7 +2398,11 @@ static int smu_switch_power_profile(void *handle,
|
||||
smu_power_profile_mode_get(smu, type);
|
||||
else
|
||||
smu_power_profile_mode_put(smu, type);
|
||||
ret = smu_bump_power_profile_mode(smu, NULL, 0);
|
||||
/* don't switch the active workload when paused */
|
||||
if (smu->pause_workload)
|
||||
ret = 0;
|
||||
else
|
||||
ret = smu_bump_power_profile_mode(smu, NULL, 0);
|
||||
if (ret) {
|
||||
if (enable)
|
||||
smu_power_profile_mode_put(smu, type);
|
||||
@@ -2411,6 +2415,35 @@ static int smu_switch_power_profile(void *handle,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int smu_pause_power_profile(void *handle,
|
||||
bool pause)
|
||||
{
|
||||
struct smu_context *smu = handle;
|
||||
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
|
||||
u32 workload_mask = 1 << PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
|
||||
int ret;
|
||||
|
||||
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
|
||||
smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {
|
||||
smu->pause_workload = pause;
|
||||
|
||||
/* force to bootup default profile */
|
||||
if (smu->pause_workload && smu->ppt_funcs->set_power_profile_mode)
|
||||
ret = smu->ppt_funcs->set_power_profile_mode(smu,
|
||||
workload_mask,
|
||||
NULL,
|
||||
0);
|
||||
else
|
||||
ret = smu_bump_power_profile_mode(smu, NULL, 0);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static enum amd_dpm_forced_level smu_get_performance_level(void *handle)
|
||||
{
|
||||
struct smu_context *smu = handle;
|
||||
@@ -3733,6 +3766,7 @@ static const struct amd_pm_funcs swsmu_pm_funcs = {
|
||||
.get_pp_table = smu_sys_get_pp_table,
|
||||
.set_pp_table = smu_sys_set_pp_table,
|
||||
.switch_power_profile = smu_switch_power_profile,
|
||||
.pause_power_profile = smu_pause_power_profile,
|
||||
/* export to amdgpu */
|
||||
.dispatch_tasks = smu_handle_dpm_task,
|
||||
.load_firmware = smu_load_microcode,
|
||||
|
||||
@@ -558,6 +558,7 @@ struct smu_context {
|
||||
|
||||
/* asic agnostic workload mask */
|
||||
uint32_t workload_mask;
|
||||
bool pause_workload;
|
||||
/* default/user workload preference */
|
||||
uint32_t power_profile_mode;
|
||||
uint32_t workload_refcount[PP_SMC_POWER_PROFILE_COUNT];
|
||||
|
||||
@@ -1204,7 +1204,7 @@ int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu,
|
||||
uint32_t crystal_clock_freq = 2500;
|
||||
uint32_t tach_period;
|
||||
|
||||
if (speed == 0)
|
||||
if (!speed || speed > UINT_MAX/8)
|
||||
return -EINVAL;
|
||||
/*
|
||||
* To prevent from possible overheat, some ASICs may have requirement
|
||||
|
||||
@@ -244,6 +244,7 @@ static int icl_get_qgv_points(struct drm_i915_private *dev_priv,
|
||||
qi->deinterleave = 4;
|
||||
break;
|
||||
case INTEL_DRAM_GDDR:
|
||||
case INTEL_DRAM_GDDR_ECC:
|
||||
qi->channel_width = 32;
|
||||
break;
|
||||
default:
|
||||
@@ -398,6 +399,12 @@ static const struct intel_sa_info xe2_hpd_sa_info = {
|
||||
/* Other values not used by simplified algorithm */
|
||||
};
|
||||
|
||||
static const struct intel_sa_info xe2_hpd_ecc_sa_info = {
|
||||
.derating = 45,
|
||||
.deprogbwlimit = 53,
|
||||
/* Other values not used by simplified algorithm */
|
||||
};
|
||||
|
||||
static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel_sa_info *sa)
|
||||
{
|
||||
struct intel_qgv_info qi = {};
|
||||
@@ -740,10 +747,15 @@ static unsigned int icl_qgv_bw(struct drm_i915_private *i915,
|
||||
|
||||
void intel_bw_init_hw(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
const struct dram_info *dram_info = &dev_priv->dram_info;
|
||||
|
||||
if (!HAS_DISPLAY(dev_priv))
|
||||
return;
|
||||
|
||||
if (DISPLAY_VERx100(dev_priv) >= 1401 && IS_DGFX(dev_priv))
|
||||
if (DISPLAY_VERx100(dev_priv) >= 1401 && IS_DGFX(dev_priv) &&
|
||||
dram_info->type == INTEL_DRAM_GDDR_ECC)
|
||||
xe2_hpd_get_bw_info(dev_priv, &xe2_hpd_ecc_sa_info);
|
||||
else if (DISPLAY_VERx100(dev_priv) >= 1401 && IS_DGFX(dev_priv))
|
||||
xe2_hpd_get_bw_info(dev_priv, &xe2_hpd_sa_info);
|
||||
else if (DISPLAY_VER(dev_priv) >= 14)
|
||||
tgl_get_bw_info(dev_priv, &mtl_sa_info);
|
||||
|
||||
@@ -968,7 +968,9 @@ static bool vrr_params_changed(const struct intel_crtc_state *old_crtc_state,
|
||||
old_crtc_state->vrr.vmin != new_crtc_state->vrr.vmin ||
|
||||
old_crtc_state->vrr.vmax != new_crtc_state->vrr.vmax ||
|
||||
old_crtc_state->vrr.guardband != new_crtc_state->vrr.guardband ||
|
||||
old_crtc_state->vrr.pipeline_full != new_crtc_state->vrr.pipeline_full;
|
||||
old_crtc_state->vrr.pipeline_full != new_crtc_state->vrr.pipeline_full ||
|
||||
old_crtc_state->vrr.vsync_start != new_crtc_state->vrr.vsync_start ||
|
||||
old_crtc_state->vrr.vsync_end != new_crtc_state->vrr.vsync_end;
|
||||
}
|
||||
|
||||
static bool cmrr_params_changed(const struct intel_crtc_state *old_crtc_state,
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user