mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-05-08 14:02:37 -04:00
Merge branch 'master' into mm-nonmm-stable
This commit is contained in:
@@ -1,4 +1,4 @@
|
||||
What: /sys/kernel/oops_count
|
||||
What: /sys/kernel/warn_count
|
||||
Date: November 2022
|
||||
KernelVersion: 6.2.0
|
||||
Contact: Linux Kernel Hardening List <linux-hardening@vger.kernel.org>
|
||||
|
||||
@@ -34,13 +34,12 @@ bpf_sk_storage_get()
|
||||
|
||||
void *bpf_sk_storage_get(struct bpf_map *map, void *sk, void *value, u64 flags)
|
||||
|
||||
Socket-local storage can be retrieved using the ``bpf_sk_storage_get()``
|
||||
helper. The helper gets the storage from ``sk`` that is associated with ``map``.
|
||||
If the ``BPF_LOCAL_STORAGE_GET_F_CREATE`` flag is used then
|
||||
``bpf_sk_storage_get()`` will create the storage for ``sk`` if it does not
|
||||
already exist. ``value`` can be used together with
|
||||
``BPF_LOCAL_STORAGE_GET_F_CREATE`` to initialize the storage value, otherwise it
|
||||
will be zero initialized. Returns a pointer to the storage on success, or
|
||||
Socket-local storage for ``map`` can be retrieved from socket ``sk`` using the
|
||||
``bpf_sk_storage_get()`` helper. If the ``BPF_LOCAL_STORAGE_GET_F_CREATE``
|
||||
flag is used then ``bpf_sk_storage_get()`` will create the storage for ``sk``
|
||||
if it does not already exist. ``value`` can be used together with
|
||||
``BPF_LOCAL_STORAGE_GET_F_CREATE`` to initialize the storage value, otherwise
|
||||
it will be zero initialized. Returns a pointer to the storage on success, or
|
||||
``NULL`` in case of failure.
|
||||
|
||||
.. note::
|
||||
@@ -54,9 +53,9 @@ bpf_sk_storage_delete()
|
||||
|
||||
long bpf_sk_storage_delete(struct bpf_map *map, void *sk)
|
||||
|
||||
Socket-local storage can be deleted using the ``bpf_sk_storage_delete()``
|
||||
helper. The helper deletes the storage from ``sk`` that is identified by
|
||||
``map``. Returns ``0`` on success, or negative error in case of failure.
|
||||
Socket-local storage for ``map`` can be deleted from socket ``sk`` using the
|
||||
``bpf_sk_storage_delete()`` helper. Returns ``0`` on success, or negative
|
||||
error in case of failure.
|
||||
|
||||
User space
|
||||
----------
|
||||
@@ -68,16 +67,20 @@ bpf_map_update_elem()
|
||||
|
||||
int bpf_map_update_elem(int map_fd, const void *key, const void *value, __u64 flags)
|
||||
|
||||
Socket-local storage for the socket identified by ``key`` belonging to
|
||||
``map_fd`` can be added or updated using the ``bpf_map_update_elem()`` libbpf
|
||||
function. ``key`` must be a pointer to a valid ``fd`` in the user space
|
||||
program. The ``flags`` parameter can be used to control the update behaviour:
|
||||
Socket-local storage for map ``map_fd`` can be added or updated locally to a
|
||||
socket using the ``bpf_map_update_elem()`` libbpf function. The socket is
|
||||
identified by a `socket` ``fd`` stored in the pointer ``key``. The pointer
|
||||
``value`` has the data to be added or updated to the socket ``fd``. The type
|
||||
and size of ``value`` should be the same as the value type of the map
|
||||
definition.
|
||||
|
||||
- ``BPF_ANY`` will create storage for ``fd`` or update existing storage.
|
||||
- ``BPF_NOEXIST`` will create storage for ``fd`` only if it did not already
|
||||
exist, otherwise the call will fail with ``-EEXIST``.
|
||||
- ``BPF_EXIST`` will update existing storage for ``fd`` if it already exists,
|
||||
otherwise the call will fail with ``-ENOENT``.
|
||||
The ``flags`` parameter can be used to control the update behaviour:
|
||||
|
||||
- ``BPF_ANY`` will create storage for `socket` ``fd`` or update existing storage.
|
||||
- ``BPF_NOEXIST`` will create storage for `socket` ``fd`` only if it did not
|
||||
already exist, otherwise the call will fail with ``-EEXIST``.
|
||||
- ``BPF_EXIST`` will update existing storage for `socket` ``fd`` if it already
|
||||
exists, otherwise the call will fail with ``-ENOENT``.
|
||||
|
||||
Returns ``0`` on success, or negative error in case of failure.
|
||||
|
||||
@@ -88,10 +91,10 @@ bpf_map_lookup_elem()
|
||||
|
||||
int bpf_map_lookup_elem(int map_fd, const void *key, void *value)
|
||||
|
||||
Socket-local storage for the socket identified by ``key`` belonging to
|
||||
``map_fd`` can be retrieved using the ``bpf_map_lookup_elem()`` libbpf
|
||||
function. ``key`` must be a pointer to a valid ``fd`` in the user space
|
||||
program. Returns ``0`` on success, or negative error in case of failure.
|
||||
Socket-local storage for map ``map_fd`` can be retrieved from a socket using
|
||||
the ``bpf_map_lookup_elem()`` libbpf function. The storage is retrieved from
|
||||
the socket identified by a `socket` ``fd`` stored in the pointer
|
||||
``key``. Returns ``0`` on success, or negative error in case of failure.
|
||||
|
||||
bpf_map_delete_elem()
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
@@ -100,9 +103,10 @@ bpf_map_delete_elem()
|
||||
|
||||
int bpf_map_delete_elem(int map_fd, const void *key)
|
||||
|
||||
Socket-local storage for the socket identified by ``key`` belonging to
|
||||
``map_fd`` can be deleted using the ``bpf_map_delete_elem()`` libbpf
|
||||
function. Returns ``0`` on success, or negative error in case of failure.
|
||||
Socket-local storage for map ``map_fd`` can be deleted from a socket using the
|
||||
``bpf_map_delete_elem()`` libbpf function. The storage is deleted from the
|
||||
socket identified by a `socket` ``fd`` stored in the pointer ``key``. Returns
|
||||
``0`` on success, or negative error in case of failure.
|
||||
|
||||
Examples
|
||||
========
|
||||
|
||||
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
title: ROHM BD71828 Power Management Integrated Circuit LED driver
|
||||
|
||||
maintainers:
|
||||
- Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>
|
||||
- Matti Vaittinen <mazziesaccount@gmail.com>
|
||||
|
||||
description: |
|
||||
This module is part of the ROHM BD71828 MFD device. For more details
|
||||
|
||||
@@ -21,6 +21,7 @@ properties:
|
||||
- mediatek,mt8173-gce
|
||||
- mediatek,mt8183-gce
|
||||
- mediatek,mt8186-gce
|
||||
- mediatek,mt8188-gce
|
||||
- mediatek,mt8192-gce
|
||||
- mediatek,mt8195-gce
|
||||
|
||||
|
||||
@@ -28,6 +28,7 @@ properties:
|
||||
- qcom,sc8180x-apss-shared
|
||||
- qcom,sdm660-apcs-hmss-global
|
||||
- qcom,sdm845-apss-shared
|
||||
- qcom,sm4250-apcs-hmss-global
|
||||
- qcom,sm6125-apcs-hmss-global
|
||||
- qcom,sm6115-apcs-hmss-global
|
||||
- qcom,sm8150-apss-shared
|
||||
|
||||
@@ -24,12 +24,14 @@ properties:
|
||||
compatible:
|
||||
items:
|
||||
- enum:
|
||||
- qcom,sc7280-ipcc
|
||||
- qcom,sc8280xp-ipcc
|
||||
- qcom,sm6350-ipcc
|
||||
- qcom,sm6375-ipcc
|
||||
- qcom,sm8250-ipcc
|
||||
- qcom,sm8350-ipcc
|
||||
- qcom,sm8450-ipcc
|
||||
- qcom,sc7280-ipcc
|
||||
- qcom,sm8550-ipcc
|
||||
- const: qcom,ipcc
|
||||
|
||||
reg:
|
||||
|
||||
42
Documentation/devicetree/bindings/mfd/ampere,smpro.yaml
Normal file
42
Documentation/devicetree/bindings/mfd/ampere,smpro.yaml
Normal file
@@ -0,0 +1,42 @@
|
||||
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/mfd/ampere,smpro.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
title: Ampere Altra SMPro firmware driver
|
||||
|
||||
maintainers:
|
||||
- Quan Nguyen <quan@os.amperecomputing.com>
|
||||
|
||||
description: |
|
||||
Ampere Altra SMPro firmware may contain different blocks like hardware
|
||||
monitoring, error monitoring and other miscellaneous features.
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
enum:
|
||||
- ampere,smpro
|
||||
|
||||
reg:
|
||||
description:
|
||||
I2C device address.
|
||||
maxItems: 1
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- reg
|
||||
|
||||
additionalProperties: false
|
||||
|
||||
examples:
|
||||
- |
|
||||
i2c {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
||||
smpro@4f {
|
||||
compatible = "ampere,smpro";
|
||||
reg = <0x4f>;
|
||||
};
|
||||
};
|
||||
@@ -36,6 +36,9 @@ properties:
|
||||
const: 1
|
||||
|
||||
patternProperties:
|
||||
'^timer@[a-f0-9]+$':
|
||||
$ref: /schemas/timer/brcm,bcmbca-timer.yaml
|
||||
|
||||
'^watchdog@[a-f0-9]+$':
|
||||
$ref: /schemas/watchdog/brcm,bcm7038-wdt.yaml
|
||||
|
||||
@@ -54,6 +57,11 @@ examples:
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
|
||||
timer@0 {
|
||||
compatible = "brcm,bcm63138-timer";
|
||||
reg = <0x0 0x28>;
|
||||
};
|
||||
|
||||
watchdog@28 {
|
||||
compatible = "brcm,bcm7038-wdt";
|
||||
reg = <0x28 0x8>;
|
||||
|
||||
@@ -33,11 +33,6 @@ Required properties:
|
||||
"dlg,da9061" for DA9061
|
||||
- reg : Specifies the I2C slave address (this defaults to 0x58 but it can be
|
||||
modified to match the chip's OTP settings).
|
||||
- interrupts : IRQ line information.
|
||||
- interrupt-controller
|
||||
|
||||
See Documentation/devicetree/bindings/interrupt-controller/interrupts.txt for
|
||||
further information on IRQ bindings.
|
||||
|
||||
Optional properties:
|
||||
|
||||
@@ -48,6 +43,12 @@ Optional properties:
|
||||
See Documentation/devicetree/bindings/gpio/gpio.txt for further information on
|
||||
GPIO bindings.
|
||||
|
||||
- interrupts : IRQ line information.
|
||||
- interrupt-controller
|
||||
|
||||
See Documentation/devicetree/bindings/interrupt-controller/interrupts.txt for
|
||||
further information on IRQ bindings.
|
||||
|
||||
Sub-nodes:
|
||||
|
||||
- regulators : This node defines the settings for the LDOs and BUCKs.
|
||||
@@ -85,7 +86,7 @@ Sub-nodes:
|
||||
|
||||
- onkey : See ../input/da9062-onkey.txt
|
||||
|
||||
- watchdog: See ../watchdog/da9062-watchdog.txt
|
||||
- watchdog: See ../watchdog/da9062-wdt.txt
|
||||
|
||||
- thermal : See ../thermal/da9062-thermal.txt
|
||||
|
||||
|
||||
@@ -12,7 +12,8 @@ maintainers:
|
||||
description: |
|
||||
The Ocelot ethernet switch family contains chips that have an internal CPU
|
||||
(VSC7513, VSC7514) and chips that don't (VSC7511, VSC7512). All switches have
|
||||
the option to be controlled externally, which is the purpose of this driver.
|
||||
the option to be controlled externally via external interfaces like SPI or
|
||||
PCIe.
|
||||
|
||||
The switch family is a multi-port networking switch that supports many
|
||||
interfaces. Additionally, the device can perform pin control, MDIO buses, and
|
||||
@@ -61,7 +62,6 @@ required:
|
||||
- reg
|
||||
- '#address-cells'
|
||||
- '#size-cells'
|
||||
- spi-max-frequency
|
||||
|
||||
additionalProperties: false
|
||||
|
||||
|
||||
@@ -21,6 +21,7 @@ Required properties:
|
||||
compatible:
|
||||
"mediatek,mt6323" for PMIC MT6323
|
||||
"mediatek,mt6331" for PMIC MT6331 and MT6332
|
||||
"mediatek,mt6357" for PMIC MT6357
|
||||
"mediatek,mt6358" for PMIC MT6358 and MT6366
|
||||
"mediatek,mt6359" for PMIC MT6359
|
||||
"mediatek,mt6397" for PMIC MT6397
|
||||
|
||||
@@ -99,10 +99,16 @@ properties:
|
||||
type: object
|
||||
$ref: /schemas/regulator/qcom,spmi-regulator.yaml#
|
||||
|
||||
pwm:
|
||||
type: object
|
||||
$ref: /schemas/leds/leds-qcom-lpg.yaml#
|
||||
|
||||
patternProperties:
|
||||
"^adc@[0-9a-f]+$":
|
||||
type: object
|
||||
$ref: /schemas/iio/adc/qcom,spmi-vadc.yaml#
|
||||
oneOf:
|
||||
- $ref: /schemas/iio/adc/qcom,spmi-iadc.yaml#
|
||||
- $ref: /schemas/iio/adc/qcom,spmi-vadc.yaml#
|
||||
|
||||
"^adc-tm@[0-9a-f]+$":
|
||||
type: object
|
||||
@@ -112,11 +118,13 @@ patternProperties:
|
||||
type: object
|
||||
additionalProperties: true # FIXME qcom,pm8916-wcd-analog-codec binding not converted yet
|
||||
|
||||
"extcon@[0-9a-f]+$":
|
||||
"^charger@[0-9a-f]+$":
|
||||
type: object
|
||||
$ref: /schemas/extcon/qcom,pm8941-misc.yaml#
|
||||
oneOf:
|
||||
- $ref: /schemas/power/supply/qcom,pm8941-charger.yaml#
|
||||
- $ref: /schemas/power/supply/qcom,pm8941-coincell.yaml#
|
||||
|
||||
"gpio(s)?@[0-9a-f]+$":
|
||||
"gpio@[0-9a-f]+$":
|
||||
type: object
|
||||
$ref: /schemas/pinctrl/qcom,pmic-gpio.yaml#
|
||||
|
||||
@@ -124,10 +132,6 @@ patternProperties:
|
||||
type: object
|
||||
$ref: /schemas/power/reset/qcom,pon.yaml#
|
||||
|
||||
"pwm@[0-9a-f]+$":
|
||||
type: object
|
||||
$ref: /schemas/leds/leds-qcom-lpg.yaml#
|
||||
|
||||
"^rtc@[0-9a-f]+$":
|
||||
type: object
|
||||
$ref: /schemas/rtc/qcom-pm8xxx-rtc.yaml#
|
||||
@@ -136,9 +140,17 @@ patternProperties:
|
||||
type: object
|
||||
$ref: /schemas/thermal/qcom,spmi-temp-alarm.yaml#
|
||||
|
||||
"^usb-detect@[0-9a-f]+$":
|
||||
type: object
|
||||
$ref: /schemas/extcon/qcom,pm8941-misc.yaml#
|
||||
|
||||
"^usb-vbus-regulator@[0-9a-f]+$":
|
||||
type: object
|
||||
$ref: /schemas/regulator/qcom,usb-vbus-regulator.yaml#
|
||||
|
||||
"^vibrator@[0-9a-f]+$":
|
||||
type: object
|
||||
additionalProperties: true # FIXME qcom,pm8916-vib binding not converted yet
|
||||
$ref: /schemas/input/qcom,pm8xxx-vib.yaml#
|
||||
|
||||
"^mpps@[0-9a-f]+$":
|
||||
type: object
|
||||
@@ -200,7 +212,7 @@ examples:
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
||||
pmi8998_gpio: gpios@c000 {
|
||||
pmi8998_gpio: gpio@c000 {
|
||||
compatible = "qcom,pmi8998-gpio", "qcom,spmi-gpio";
|
||||
reg = <0xc000>;
|
||||
gpio-controller;
|
||||
@@ -285,7 +297,7 @@ examples:
|
||||
};
|
||||
};
|
||||
|
||||
pm6150_gpio: gpios@c000 {
|
||||
pm6150_gpio: gpio@c000 {
|
||||
compatible = "qcom,pm6150-gpio", "qcom,spmi-gpio";
|
||||
reg = <0xc000>;
|
||||
gpio-controller;
|
||||
|
||||
@@ -17,10 +17,12 @@ properties:
|
||||
compatible:
|
||||
items:
|
||||
- enum:
|
||||
- qcom,msm8976-tcsr
|
||||
- qcom,msm8998-tcsr
|
||||
- qcom,qcs404-tcsr
|
||||
- qcom,sc7180-tcsr
|
||||
- qcom,sc7280-tcsr
|
||||
- qcom,sc8280xp-tcsr
|
||||
- qcom,sdm630-tcsr
|
||||
- qcom,sdm845-tcsr
|
||||
- qcom,sm8150-tcsr
|
||||
|
||||
@@ -15,11 +15,15 @@ description: |
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
enum:
|
||||
- qcom,pm8018
|
||||
- qcom,pm8058
|
||||
- qcom,pm8821
|
||||
- qcom,pm8921
|
||||
oneOf:
|
||||
- enum:
|
||||
- qcom,pm8058
|
||||
- qcom,pm8821
|
||||
- qcom,pm8921
|
||||
- items:
|
||||
- enum:
|
||||
- qcom,pm8018
|
||||
- const: qcom,pm8921
|
||||
|
||||
reg:
|
||||
maxItems: 1
|
||||
@@ -56,4 +60,23 @@ required:
|
||||
- interrupt-controller
|
||||
|
||||
additionalProperties: false
|
||||
|
||||
examples:
|
||||
- |
|
||||
#include <dt-bindings/interrupt-controller/irq.h>
|
||||
ssbi {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
pmic@0 {
|
||||
compatible = "qcom,pm8921";
|
||||
reg = <0>;
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
interrupt-controller;
|
||||
#interrupt-cells = <2>;
|
||||
|
||||
interrupt-parent = <&tlmm>;
|
||||
interrupts = <32 IRQ_TYPE_EDGE_RISING>;
|
||||
};
|
||||
};
|
||||
...
|
||||
|
||||
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
title: ROHM BD71815 Power Management Integrated Circuit
|
||||
|
||||
maintainers:
|
||||
- Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>
|
||||
- Matti Vaittinen <mazziesaccount@gmail.com>
|
||||
|
||||
description: |
|
||||
BD71815AGW is a single-chip power management ICs for battery-powered
|
||||
|
||||
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
title: ROHM BD71828 Power Management Integrated Circuit
|
||||
|
||||
maintainers:
|
||||
- Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>
|
||||
- Matti Vaittinen <mazziesaccount@gmail.com>
|
||||
|
||||
description: |
|
||||
BD71828GW is a single-chip power management IC for battery-powered portable
|
||||
|
||||
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
title: ROHM BD71837 Power Management Integrated Circuit
|
||||
|
||||
maintainers:
|
||||
- Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>
|
||||
- Matti Vaittinen <mazziesaccount@gmail.com>
|
||||
|
||||
description: |
|
||||
BD71837MWV is programmable Power Management ICs for powering single-core,
|
||||
|
||||
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
title: ROHM BD71847 and BD71850 Power Management Integrated Circuit
|
||||
|
||||
maintainers:
|
||||
- Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>
|
||||
- Matti Vaittinen <mazziesaccount@gmail.com>
|
||||
|
||||
description: |
|
||||
BD71847AMWV and BD71850MWV are programmable Power Management ICs for powering
|
||||
|
||||
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
title: ROHM BD9576MUF and BD9573MUF Power Management Integrated Circuit
|
||||
|
||||
maintainers:
|
||||
- Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>
|
||||
- Matti Vaittinen <mazziesaccount@gmail.com>
|
||||
|
||||
description: |
|
||||
BD9576MUF and BD9573MUF are power management ICs primarily intended for
|
||||
|
||||
@@ -53,6 +53,7 @@ properties:
|
||||
- microchip,lan966x-cpu-syscon
|
||||
- microchip,sparx5-cpu-syscon
|
||||
- mstar,msc313-pmsleep
|
||||
- nuvoton,wpcm450-shm
|
||||
- rockchip,px30-qos
|
||||
- rockchip,rk3036-qos
|
||||
- rockchip,rk3066-qos
|
||||
|
||||
@@ -52,6 +52,9 @@ properties:
|
||||
type: object
|
||||
description: Magnetic reader
|
||||
|
||||
power-domains:
|
||||
maxItems: 1
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- reg
|
||||
|
||||
@@ -26,7 +26,9 @@ properties:
|
||||
compatible:
|
||||
items:
|
||||
- enum:
|
||||
- ti,j7200-system-controller
|
||||
- ti,j721e-system-controller
|
||||
- ti,j721s2-system-controller
|
||||
- const: syscon
|
||||
- const: simple-mfd
|
||||
|
||||
|
||||
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
title: ROHM BD99954 Battery charger
|
||||
|
||||
maintainers:
|
||||
- Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>
|
||||
- Matti Vaittinen <mazziesaccount@gmail.com>
|
||||
- Markus Laine <markus.laine@fi.rohmeurope.com>
|
||||
- Mikko Mutanen <mikko.mutanen@fi.rohmeurope.com>
|
||||
|
||||
|
||||
@@ -35,6 +35,7 @@ properties:
|
||||
- renesas,pwm-r8a77980 # R-Car V3H
|
||||
- renesas,pwm-r8a77990 # R-Car E3
|
||||
- renesas,pwm-r8a77995 # R-Car D3
|
||||
- renesas,pwm-r8a779g0 # R-Car V4H
|
||||
- const: renesas,pwm-rcar
|
||||
|
||||
reg:
|
||||
|
||||
@@ -40,6 +40,7 @@ properties:
|
||||
- renesas,tpu-r8a77970 # R-Car V3M
|
||||
- renesas,tpu-r8a77980 # R-Car V3H
|
||||
- renesas,tpu-r8a779a0 # R-Car V3U
|
||||
- renesas,tpu-r8a779g0 # R-Car V4H
|
||||
- const: renesas,tpu
|
||||
|
||||
reg:
|
||||
|
||||
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
title: ROHM BD71815 Power Management Integrated Circuit regulators
|
||||
|
||||
maintainers:
|
||||
- Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>
|
||||
- Matti Vaittinen <mazziesaccount@gmail.com>
|
||||
|
||||
description: |
|
||||
This module is part of the ROHM BD718215 MFD device. For more details
|
||||
|
||||
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
title: ROHM BD71828 Power Management Integrated Circuit regulators
|
||||
|
||||
maintainers:
|
||||
- Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>
|
||||
- Matti Vaittinen <mazziesaccount@gmail.com>
|
||||
|
||||
description: |
|
||||
This module is part of the ROHM BD71828 MFD device. For more details
|
||||
|
||||
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
title: ROHM BD71837 Power Management Integrated Circuit regulators
|
||||
|
||||
maintainers:
|
||||
- Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>
|
||||
- Matti Vaittinen <mazziesaccount@gmail.com>
|
||||
|
||||
description: |
|
||||
List of regulators provided by this controller. BD71837 regulators node
|
||||
|
||||
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
title: ROHM BD71847 and BD71850 Power Management Integrated Circuit regulators
|
||||
|
||||
maintainers:
|
||||
- Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>
|
||||
- Matti Vaittinen <mazziesaccount@gmail.com>
|
||||
|
||||
description: |
|
||||
List of regulators provided by this controller. BD71847 regulators node
|
||||
|
||||
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
title: ROHM BD9576 and BD9573 Power Management Integrated Circuit regulators
|
||||
|
||||
maintainers:
|
||||
- Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>
|
||||
- Matti Vaittinen <mazziesaccount@gmail.com>
|
||||
|
||||
description: |
|
||||
This module is part of the ROHM BD9576 MFD device. For more details
|
||||
|
||||
@@ -22,6 +22,8 @@ properties:
|
||||
- fsl,imx8mn-cm7
|
||||
- fsl,imx8mp-cm7
|
||||
- fsl,imx8mq-cm4
|
||||
- fsl,imx8qm-cm4
|
||||
- fsl,imx8qxp-cm4
|
||||
- fsl,imx8ulp-cm33
|
||||
- fsl,imx93-cm33
|
||||
|
||||
@@ -54,12 +56,26 @@ properties:
|
||||
minItems: 1
|
||||
maxItems: 32
|
||||
|
||||
power-domains:
|
||||
maxItems: 8
|
||||
|
||||
fsl,auto-boot:
|
||||
$ref: /schemas/types.yaml#/definitions/flag
|
||||
description:
|
||||
Indicate whether need to load the default firmware and start the remote
|
||||
processor automatically.
|
||||
|
||||
fsl,entry-address:
|
||||
$ref: /schemas/types.yaml#/definitions/uint32
|
||||
description:
|
||||
Specify CPU entry address for SCU enabled processor.
|
||||
|
||||
fsl,resource-id:
|
||||
$ref: /schemas/types.yaml#/definitions/uint32
|
||||
description:
|
||||
This property is to specify the resource id of the remote processor in SoC
|
||||
which supports SCFW
|
||||
|
||||
required:
|
||||
- compatible
|
||||
|
||||
|
||||
@@ -0,0 +1,135 @@
|
||||
# SPDX-License-Identifier: (GPL-2.0-only or BSD-2-Clause)
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/remoteproc/xlnx,zynqmp-r5fss.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
title: Xilinx R5F processor subsystem
|
||||
|
||||
maintainers:
|
||||
- Ben Levinsky <ben.levinsky@amd.com>
|
||||
- Tanmay Shah <tanmay.shah@amd.com>
|
||||
|
||||
description: |
|
||||
The Xilinx platforms include a pair of Cortex-R5F processors (RPU) for
|
||||
real-time processing based on the Cortex-R5F processor core from ARM.
|
||||
The Cortex-R5F processor implements the Arm v7-R architecture and includes a
|
||||
floating-point unit that implements the Arm VFPv3 instruction set.
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
const: xlnx,zynqmp-r5fss
|
||||
|
||||
xlnx,cluster-mode:
|
||||
$ref: /schemas/types.yaml#/definitions/uint32
|
||||
enum: [0, 1, 2]
|
||||
description: |
|
||||
The RPU MPCore can operate in split mode (Dual-processor performance), Safety
|
||||
lock-step mode(Both RPU cores execute the same code in lock-step,
|
||||
clock-for-clock) or Single CPU mode (RPU core 0 is held in reset while
|
||||
core 1 runs normally). The processor does not support dynamic configuration.
|
||||
Switching between modes is only permitted immediately after a processor reset.
|
||||
If set to 1 then lockstep mode and if 0 then split mode.
|
||||
If set to 2 then single CPU mode. When not defined, default will be lockstep mode.
|
||||
In summary,
|
||||
0: split mode
|
||||
1: lockstep mode (default)
|
||||
2: single cpu mode
|
||||
|
||||
patternProperties:
|
||||
"^r5f-[a-f0-9]+$":
|
||||
type: object
|
||||
description: |
|
||||
The RPU is located in the Low Power Domain of the Processor Subsystem.
|
||||
Each processor includes separate L1 instruction and data caches and
|
||||
tightly coupled memories (TCM). System memory is cacheable, but the TCM
|
||||
memory space is non-cacheable.
|
||||
|
||||
Each RPU contains one 64KB memory and two 32KB memories that
|
||||
are accessed via the TCM A and B port interfaces, for a total of 128KB
|
||||
per processor. In lock-step mode, the processor has access to 256KB of
|
||||
TCM memory.
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
const: xlnx,zynqmp-r5f
|
||||
|
||||
power-domains:
|
||||
maxItems: 1
|
||||
|
||||
mboxes:
|
||||
minItems: 1
|
||||
items:
|
||||
- description: mailbox channel to send data to RPU
|
||||
- description: mailbox channel to receive data from RPU
|
||||
|
||||
mbox-names:
|
||||
minItems: 1
|
||||
items:
|
||||
- const: tx
|
||||
- const: rx
|
||||
|
||||
sram:
|
||||
$ref: /schemas/types.yaml#/definitions/phandle-array
|
||||
minItems: 1
|
||||
maxItems: 8
|
||||
items:
|
||||
maxItems: 1
|
||||
description: |
|
||||
phandles to one or more reserved on-chip SRAM regions. Other than TCM,
|
||||
the RPU can execute instructions and access data from the OCM memory,
|
||||
the main DDR memory, and other system memories.
|
||||
|
||||
The regions should be defined as child nodes of the respective SRAM
|
||||
node, and should be defined as per the generic bindings in
|
||||
Documentation/devicetree/bindings/sram/sram.yaml
|
||||
|
||||
memory-region:
|
||||
description: |
|
||||
List of phandles to the reserved memory regions associated with the
|
||||
remoteproc device. This is variable and describes the memories shared with
|
||||
the remote processor (e.g. remoteproc firmware and carveouts, rpmsg
|
||||
vrings, ...). This reserved memory region will be allocated in DDR memory.
|
||||
minItems: 1
|
||||
maxItems: 8
|
||||
items:
|
||||
- description: region used for RPU firmware image section
|
||||
- description: vdev buffer
|
||||
- description: vring0
|
||||
- description: vring1
|
||||
additionalItems: true
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- power-domains
|
||||
|
||||
unevaluatedProperties: false
|
||||
|
||||
required:
|
||||
- compatible
|
||||
|
||||
additionalProperties: false
|
||||
|
||||
examples:
|
||||
- |
|
||||
remoteproc {
|
||||
compatible = "xlnx,zynqmp-r5fss";
|
||||
xlnx,cluster-mode = <1>;
|
||||
|
||||
r5f-0 {
|
||||
compatible = "xlnx,zynqmp-r5f";
|
||||
power-domains = <&zynqmp_firmware 0x7>;
|
||||
memory-region = <&rproc_0_fw_image>, <&rpu0vdev0buffer>, <&rpu0vdev0vring0>, <&rpu0vdev0vring1>;
|
||||
mboxes = <&ipi_mailbox_rpu0 0>, <&ipi_mailbox_rpu0 1>;
|
||||
mbox-names = "tx", "rx";
|
||||
};
|
||||
|
||||
r5f-1 {
|
||||
compatible = "xlnx,zynqmp-r5f";
|
||||
power-domains = <&zynqmp_firmware 0x8>;
|
||||
memory-region = <&rproc_1_fw_image>, <&rpu1vdev0buffer>, <&rpu1vdev0vring0>, <&rpu1vdev0vring1>;
|
||||
mboxes = <&ipi_mailbox_rpu1 0>, <&ipi_mailbox_rpu1 1>;
|
||||
mbox-names = "tx", "rx";
|
||||
};
|
||||
};
|
||||
...
|
||||
@@ -38,6 +38,7 @@ properties:
|
||||
- fsl,imx8mq-sai
|
||||
- fsl,imx8qm-sai
|
||||
- fsl,imx8ulp-sai
|
||||
- fsl,imx93-sai
|
||||
- fsl,vf610-sai
|
||||
|
||||
reg:
|
||||
|
||||
@@ -0,0 +1,40 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/timer/brcm,bcmbca-timer.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
title: Broadcom Broadband SoC timer
|
||||
|
||||
maintainers:
|
||||
- Rafał Miłecki <rafal@milecki.pl>
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
oneOf:
|
||||
- const: brcm,bcm6345-timer
|
||||
description: >
|
||||
An old block with 3 timers.
|
||||
|
||||
It can be found in BCM6345, BCM6838 and BCM63268.
|
||||
- const: brcm,bcm63138-timer
|
||||
description: >
|
||||
Updated block with 4 timers and control regs at the beginning.
|
||||
|
||||
It can be found in newer SoCs, e.g. BCM63138, BCM63148, BCM63381,
|
||||
BCM68360, BCM6848, BCM6858, BCM4908.
|
||||
|
||||
reg:
|
||||
maxItems: 1
|
||||
|
||||
additionalProperties: false
|
||||
|
||||
required:
|
||||
- reg
|
||||
|
||||
examples:
|
||||
- |
|
||||
timer@fffe0200 {
|
||||
compatible = "brcm,bcm6345-timer";
|
||||
reg = <0xfffe0200 0x1c>;
|
||||
};
|
||||
@@ -25,6 +25,11 @@ versions up to 3.1. File system type to use on mount is *ntfs3*.
|
||||
Note: Applied to empty files, this allows to switch type between
|
||||
sparse(0x200), compressed(0x800) and normal.
|
||||
|
||||
- *system.ntfs_attrib_be* gets/sets ntfs file/dir attributes.
|
||||
|
||||
Same value as system.ntfs_attrib but always represent as big-endian
|
||||
(endianness of system.ntfs_attrib is the same as of the CPU).
|
||||
|
||||
Mount Options
|
||||
=============
|
||||
|
||||
@@ -75,6 +80,20 @@ this table marked with no it means default is without **no**.
|
||||
- Files with the Windows-specific SYSTEM (FILE_ATTRIBUTE_SYSTEM) attribute
|
||||
will be marked as system immutable files.
|
||||
|
||||
* - hide_dot_files
|
||||
- Updates the Windows-specific HIDDEN (FILE_ATTRIBUTE_HIDDEN) attribute
|
||||
when creating and moving or renaming files. Files whose names start
|
||||
with a dot will have the HIDDEN attribute set and files whose names
|
||||
do not start with a dot will have it unset.
|
||||
|
||||
* - windows_names
|
||||
- Prevents the creation of files and directories with a name not allowed
|
||||
by Windows, either because it contains some not allowed character (which
|
||||
are the characters " * / : < > ? \\ | and those whose code is less than
|
||||
0x20), because the name (with or without extension) is a reserved file
|
||||
name (CON, AUX, NUL, PRN, LPT1-9, COM1-9) or because the last character
|
||||
is a space or a dot. Existing such files can still be read and renamed.
|
||||
|
||||
* - discard
|
||||
- Enable support of the TRIM command for improved performance on delete
|
||||
operations, which is recommended for use with the solid-state drives
|
||||
|
||||
@@ -50,6 +50,7 @@ parameters, info versions, and other features it supports.
|
||||
:maxdepth: 1
|
||||
|
||||
bnxt
|
||||
etas_es58x
|
||||
hns3
|
||||
ionic
|
||||
ice
|
||||
|
||||
@@ -163,6 +163,39 @@ nf_conntrack_timestamp - BOOLEAN
|
||||
|
||||
Enable connection tracking flow timestamping.
|
||||
|
||||
nf_conntrack_sctp_timeout_closed - INTEGER (seconds)
|
||||
default 10
|
||||
|
||||
nf_conntrack_sctp_timeout_cookie_wait - INTEGER (seconds)
|
||||
default 3
|
||||
|
||||
nf_conntrack_sctp_timeout_cookie_echoed - INTEGER (seconds)
|
||||
default 3
|
||||
|
||||
nf_conntrack_sctp_timeout_established - INTEGER (seconds)
|
||||
default 432000 (5 days)
|
||||
|
||||
nf_conntrack_sctp_timeout_shutdown_sent - INTEGER (seconds)
|
||||
default 0.3
|
||||
|
||||
nf_conntrack_sctp_timeout_shutdown_recd - INTEGER (seconds)
|
||||
default 0.3
|
||||
|
||||
nf_conntrack_sctp_timeout_shutdown_ack_sent - INTEGER (seconds)
|
||||
default 3
|
||||
|
||||
nf_conntrack_sctp_timeout_heartbeat_sent - INTEGER (seconds)
|
||||
default 30
|
||||
|
||||
This timeout is used to setup conntrack entry on secondary paths.
|
||||
Default is set to hb_interval.
|
||||
|
||||
nf_conntrack_sctp_timeout_heartbeat_acked - INTEGER (seconds)
|
||||
default 210
|
||||
|
||||
This timeout is used to setup conntrack entry on secondary paths.
|
||||
Default is set to (hb_interval * path_max_retrans + rto_max)
|
||||
|
||||
nf_conntrack_udp_timeout - INTEGER (seconds)
|
||||
default 30
|
||||
|
||||
|
||||
@@ -131,8 +131,7 @@ For example, if the function is non-recursive and is called with a
|
||||
spinlock held, maxactive = 1 should be enough. If the function is
|
||||
non-recursive and can never relinquish the CPU (e.g., via a semaphore
|
||||
or preemption), NR_CPUS should be enough. If maxactive <= 0, it is
|
||||
set to a default value. If CONFIG_PREEMPT is enabled, the default
|
||||
is max(10, 2*NR_CPUS). Otherwise, the default is NR_CPUS.
|
||||
set to a default value: max(10, 2*NR_CPUS).
|
||||
|
||||
It's not a disaster if you set maxactive too low; you'll just miss
|
||||
some probes. In the kretprobe struct, the nmissed field is set to
|
||||
|
||||
@@ -58,8 +58,8 @@ Synopsis of kprobe_events
|
||||
NAME=FETCHARG : Set NAME as the argument name of FETCHARG.
|
||||
FETCHARG:TYPE : Set TYPE as the type of FETCHARG. Currently, basic types
|
||||
(u8/u16/u32/u64/s8/s16/s32/s64), hexadecimal types
|
||||
(x8/x16/x32/x64), "string", "ustring" and bitfield
|
||||
are supported.
|
||||
(x8/x16/x32/x64), "string", "ustring", "symbol", "symstr"
|
||||
and bitfield are supported.
|
||||
|
||||
(\*1) only for the probe on function entry (offs == 0).
|
||||
(\*2) only for return probe.
|
||||
@@ -96,6 +96,10 @@ offset, and container-size (usually 32). The syntax is::
|
||||
|
||||
Symbol type('symbol') is an alias of u32 or u64 type (depends on BITS_PER_LONG)
|
||||
which shows given pointer in "symbol+offset" style.
|
||||
On the other hand, symbol-string type ('symstr') converts the given address to
|
||||
"symbol+offset/symbolsize" style and stores it as a null-terminated string.
|
||||
With 'symstr' type, you can filter the event with wildcard pattern of the
|
||||
symbols, and you don't need to solve symbol name by yourself.
|
||||
For $comm, the default type is "string"; any other type is invalid.
|
||||
|
||||
.. _user_mem_access:
|
||||
|
||||
@@ -10878,6 +10878,7 @@ T: git git://git.kernel.dk/liburing
|
||||
F: io_uring/
|
||||
F: include/linux/io_uring.h
|
||||
F: include/linux/io_uring_types.h
|
||||
F: include/trace/events/io_uring.h
|
||||
F: include/uapi/linux/io_uring.h
|
||||
F: tools/io_uring/
|
||||
|
||||
@@ -15336,6 +15337,7 @@ F: drivers/mfd/menelaus.c
|
||||
F: drivers/mfd/palmas.c
|
||||
F: drivers/mfd/tps65217.c
|
||||
F: drivers/mfd/tps65218.c
|
||||
F: drivers/mfd/tps65219.c
|
||||
F: drivers/mfd/tps65910.c
|
||||
F: drivers/mfd/twl-core.[ch]
|
||||
F: drivers/mfd/twl4030*.c
|
||||
|
||||
4
Makefile
4
Makefile
@@ -1,8 +1,8 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 6
|
||||
PATCHLEVEL = 1
|
||||
PATCHLEVEL = 2
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION =
|
||||
EXTRAVERSION = -rc1
|
||||
NAME = Hurr durr I'ma ninja sloth
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
||||
@@ -100,6 +100,22 @@ opp03 {
|
||||
};
|
||||
};
|
||||
|
||||
reserved-memory {
|
||||
#address-cells = <2>;
|
||||
#size-cells = <2>;
|
||||
ranges;
|
||||
|
||||
rproc_0_fw_image: memory@3ed00000 {
|
||||
no-map;
|
||||
reg = <0x0 0x3ed00000 0x0 0x40000>;
|
||||
};
|
||||
|
||||
rproc_1_fw_image: memory@3ef00000 {
|
||||
no-map;
|
||||
reg = <0x0 0x3ef00000 0x0 0x40000>;
|
||||
};
|
||||
};
|
||||
|
||||
zynqmp_ipi: zynqmp_ipi {
|
||||
compatible = "xlnx,zynqmp-ipi-mailbox";
|
||||
interrupt-parent = <&gic>;
|
||||
@@ -203,6 +219,23 @@ fpga_full: fpga-full {
|
||||
ranges;
|
||||
};
|
||||
|
||||
remoteproc {
|
||||
compatible = "xlnx,zynqmp-r5fss";
|
||||
xlnx,cluster-mode = <1>;
|
||||
|
||||
r5f-0 {
|
||||
compatible = "xlnx,zynqmp-r5f";
|
||||
power-domains = <&zynqmp_firmware PD_RPU_0>;
|
||||
memory-region = <&rproc_0_fw_image>;
|
||||
};
|
||||
|
||||
r5f-1 {
|
||||
compatible = "xlnx,zynqmp-r5f";
|
||||
power-domains = <&zynqmp_firmware PD_RPU_1>;
|
||||
memory-region = <&rproc_1_fw_image>;
|
||||
};
|
||||
};
|
||||
|
||||
amba: axi {
|
||||
compatible = "simple-bus";
|
||||
#address-cells = <2>;
|
||||
|
||||
@@ -38,26 +38,6 @@ static inline char *strncpy(char *dest, const char *src, size_t n)
|
||||
return xdest;
|
||||
}
|
||||
|
||||
#ifndef CONFIG_COLDFIRE
|
||||
#define __HAVE_ARCH_STRCMP
|
||||
static inline int strcmp(const char *cs, const char *ct)
|
||||
{
|
||||
char res;
|
||||
|
||||
asm ("\n"
|
||||
"1: move.b (%0)+,%2\n" /* get *cs */
|
||||
" cmp.b (%1)+,%2\n" /* compare a byte */
|
||||
" jne 2f\n" /* not equal, break out */
|
||||
" tst.b %2\n" /* at end of cs? */
|
||||
" jne 1b\n" /* no, keep going */
|
||||
" jra 3f\n" /* strings are equal */
|
||||
"2: sub.b -(%1),%2\n" /* *cs - *ct */
|
||||
"3:"
|
||||
: "+a" (cs), "+a" (ct), "=d" (res));
|
||||
return res;
|
||||
}
|
||||
#endif /* CONFIG_COLDFIRE */
|
||||
|
||||
#define __HAVE_ARCH_MEMMOVE
|
||||
extern void *memmove(void *, const void *, __kernel_size_t);
|
||||
|
||||
|
||||
@@ -109,6 +109,8 @@ timer-mfd@10000080 {
|
||||
compatible = "brcm,bcm7038-twd", "simple-mfd", "syscon";
|
||||
reg = <0x10000080 0x30>;
|
||||
ranges = <0x0 0x10000080 0x30>;
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
|
||||
timer@0 {
|
||||
compatible = "brcm,bcm6345-timer";
|
||||
|
||||
@@ -21,6 +21,7 @@
|
||||
#include <asm/bootinfo.h>
|
||||
#include <asm/addrspace.h>
|
||||
#include <asm/prom.h>
|
||||
#include <asm/mach-ralink/ralink_regs.h>
|
||||
|
||||
#include "common.h"
|
||||
|
||||
@@ -81,7 +82,8 @@ static int __init plat_of_setup(void)
|
||||
__dt_register_buses(soc_info.compatible, "palmbus");
|
||||
|
||||
/* make sure that the reset controller is setup early */
|
||||
ralink_rst_init();
|
||||
if (ralink_soc != MT762X_SOC_MT7621AT)
|
||||
ralink_rst_init();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -59,6 +59,7 @@
|
||||
#include <asm/xmon.h>
|
||||
#include <asm/cputhreads.h>
|
||||
#include <mm/mmu_decl.h>
|
||||
#include <asm/archrandom.h>
|
||||
#include <asm/fadump.h>
|
||||
#include <asm/udbg.h>
|
||||
#include <asm/hugetlb.h>
|
||||
|
||||
@@ -13,10 +13,10 @@
|
||||
#include <linux/kvm.h>
|
||||
#include <linux/kvm_types.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <asm/csr.h>
|
||||
#include <asm/hwcap.h>
|
||||
#include <asm/kvm_vcpu_fp.h>
|
||||
#include <asm/kvm_vcpu_insn.h>
|
||||
#include <asm/kvm_vcpu_sbi.h>
|
||||
#include <asm/kvm_vcpu_timer.h>
|
||||
|
||||
#define KVM_MAX_VCPUS 1024
|
||||
@@ -95,10 +95,6 @@ struct kvm_arch {
|
||||
struct kvm_guest_timer timer;
|
||||
};
|
||||
|
||||
struct kvm_sbi_context {
|
||||
int return_handled;
|
||||
};
|
||||
|
||||
struct kvm_cpu_trap {
|
||||
unsigned long sepc;
|
||||
unsigned long scause;
|
||||
@@ -169,6 +165,11 @@ struct kvm_vcpu_arch {
|
||||
/* ISA feature bits (similar to MISA) */
|
||||
DECLARE_BITMAP(isa, RISCV_ISA_EXT_MAX);
|
||||
|
||||
/* Vendor, Arch, and Implementation details */
|
||||
unsigned long mvendorid;
|
||||
unsigned long marchid;
|
||||
unsigned long mimpid;
|
||||
|
||||
/* SSCRATCH, STVEC, and SCOUNTEREN of Host */
|
||||
unsigned long host_sscratch;
|
||||
unsigned long host_stvec;
|
||||
@@ -217,7 +218,7 @@ struct kvm_vcpu_arch {
|
||||
struct kvm_csr_decode csr_decode;
|
||||
|
||||
/* SBI context */
|
||||
struct kvm_sbi_context sbi_context;
|
||||
struct kvm_vcpu_sbi_context sbi_context;
|
||||
|
||||
/* Cache pages needed to program page tables with spinlock held */
|
||||
struct kvm_mmu_memory_cache mmu_page_cache;
|
||||
@@ -327,7 +328,4 @@ bool kvm_riscv_vcpu_has_interrupts(struct kvm_vcpu *vcpu, unsigned long mask);
|
||||
void kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu);
|
||||
void kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu);
|
||||
|
||||
int kvm_riscv_vcpu_sbi_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
|
||||
int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu, struct kvm_run *run);
|
||||
|
||||
#endif /* __RISCV_KVM_HOST_H__ */
|
||||
|
||||
@@ -14,6 +14,10 @@
|
||||
#define KVM_SBI_VERSION_MAJOR 1
|
||||
#define KVM_SBI_VERSION_MINOR 0
|
||||
|
||||
struct kvm_vcpu_sbi_context {
|
||||
int return_handled;
|
||||
};
|
||||
|
||||
struct kvm_vcpu_sbi_extension {
|
||||
unsigned long extid_start;
|
||||
unsigned long extid_end;
|
||||
@@ -31,7 +35,9 @@ void kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu *vcpu, struct kvm_run *run);
|
||||
void kvm_riscv_vcpu_sbi_system_reset(struct kvm_vcpu *vcpu,
|
||||
struct kvm_run *run,
|
||||
u32 type, u64 flags);
|
||||
int kvm_riscv_vcpu_sbi_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
|
||||
const struct kvm_vcpu_sbi_extension *kvm_vcpu_sbi_find_ext(unsigned long extid);
|
||||
int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu, struct kvm_run *run);
|
||||
|
||||
#ifdef CONFIG_RISCV_SBI_V01
|
||||
extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_v01;
|
||||
|
||||
@@ -49,6 +49,9 @@ struct kvm_sregs {
|
||||
struct kvm_riscv_config {
|
||||
unsigned long isa;
|
||||
unsigned long zicbom_block_size;
|
||||
unsigned long mvendorid;
|
||||
unsigned long marchid;
|
||||
unsigned long mimpid;
|
||||
};
|
||||
|
||||
/* CORE registers for KVM_GET_ONE_REG and KVM_SET_ONE_REG */
|
||||
|
||||
@@ -627,16 +627,19 @@ long sbi_get_mvendorid(void)
|
||||
{
|
||||
return __sbi_base_ecall(SBI_EXT_BASE_GET_MVENDORID);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sbi_get_mvendorid);
|
||||
|
||||
long sbi_get_marchid(void)
|
||||
{
|
||||
return __sbi_base_ecall(SBI_EXT_BASE_GET_MARCHID);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sbi_get_marchid);
|
||||
|
||||
long sbi_get_mimpid(void)
|
||||
{
|
||||
return __sbi_base_ecall(SBI_EXT_BASE_GET_MIMPID);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sbi_get_mimpid);
|
||||
|
||||
static void sbi_send_cpumask_ipi(const struct cpumask *target)
|
||||
{
|
||||
|
||||
@@ -127,3 +127,9 @@ static int __init riscv_kvm_init(void)
|
||||
return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
|
||||
}
|
||||
module_init(riscv_kvm_init);
|
||||
|
||||
static void __exit riscv_kvm_exit(void)
|
||||
{
|
||||
kvm_exit();
|
||||
}
|
||||
module_exit(riscv_kvm_exit);
|
||||
|
||||
@@ -537,10 +537,8 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
|
||||
if (change == KVM_MR_FLAGS_ONLY)
|
||||
goto out;
|
||||
|
||||
spin_lock(&kvm->mmu_lock);
|
||||
if (ret)
|
||||
gstage_unmap_range(kvm, base_gpa, size, false);
|
||||
spin_unlock(&kvm->mmu_lock);
|
||||
kvm_riscv_gstage_iounmap(kvm, base_gpa, size);
|
||||
|
||||
out:
|
||||
mmap_read_unlock(current->mm);
|
||||
@@ -632,7 +630,7 @@ int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,
|
||||
|
||||
mmap_read_lock(current->mm);
|
||||
|
||||
vma = find_vma_intersection(current->mm, hva, hva + 1);
|
||||
vma = vma_lookup(current->mm, hva);
|
||||
if (unlikely(!vma)) {
|
||||
kvm_err("Failed to find VMA for hva 0x%lx\n", hva);
|
||||
mmap_read_unlock(current->mm);
|
||||
|
||||
@@ -21,6 +21,7 @@
|
||||
#include <asm/csr.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/hwcap.h>
|
||||
#include <asm/sbi.h>
|
||||
|
||||
const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
|
||||
KVM_GENERIC_VCPU_STATS(),
|
||||
@@ -171,6 +172,11 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
|
||||
set_bit(host_isa, vcpu->arch.isa);
|
||||
}
|
||||
|
||||
/* Setup vendor, arch, and implementation details */
|
||||
vcpu->arch.mvendorid = sbi_get_mvendorid();
|
||||
vcpu->arch.marchid = sbi_get_marchid();
|
||||
vcpu->arch.mimpid = sbi_get_mimpid();
|
||||
|
||||
/* Setup VCPU hfence queue */
|
||||
spin_lock_init(&vcpu->arch.hfence_lock);
|
||||
|
||||
@@ -270,6 +276,15 @@ static int kvm_riscv_vcpu_get_reg_config(struct kvm_vcpu *vcpu,
|
||||
return -EINVAL;
|
||||
reg_val = riscv_cbom_block_size;
|
||||
break;
|
||||
case KVM_REG_RISCV_CONFIG_REG(mvendorid):
|
||||
reg_val = vcpu->arch.mvendorid;
|
||||
break;
|
||||
case KVM_REG_RISCV_CONFIG_REG(marchid):
|
||||
reg_val = vcpu->arch.marchid;
|
||||
break;
|
||||
case KVM_REG_RISCV_CONFIG_REG(mimpid):
|
||||
reg_val = vcpu->arch.mimpid;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
@@ -296,12 +311,15 @@ static int kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu *vcpu,
|
||||
if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id)))
|
||||
return -EFAULT;
|
||||
|
||||
/* This ONE REG interface is only defined for single letter extensions */
|
||||
if (fls(reg_val) >= RISCV_ISA_EXT_BASE)
|
||||
return -EINVAL;
|
||||
|
||||
switch (reg_num) {
|
||||
case KVM_REG_RISCV_CONFIG_REG(isa):
|
||||
/*
|
||||
* This ONE REG interface is only defined for
|
||||
* single letter extensions.
|
||||
*/
|
||||
if (fls(reg_val) >= RISCV_ISA_EXT_BASE)
|
||||
return -EINVAL;
|
||||
|
||||
if (!vcpu->arch.ran_atleast_once) {
|
||||
/* Ignore the enable/disable request for certain extensions */
|
||||
for (i = 0; i < RISCV_ISA_EXT_BASE; i++) {
|
||||
@@ -329,6 +347,24 @@ static int kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu *vcpu,
|
||||
break;
|
||||
case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size):
|
||||
return -EOPNOTSUPP;
|
||||
case KVM_REG_RISCV_CONFIG_REG(mvendorid):
|
||||
if (!vcpu->arch.ran_atleast_once)
|
||||
vcpu->arch.mvendorid = reg_val;
|
||||
else
|
||||
return -EBUSY;
|
||||
break;
|
||||
case KVM_REG_RISCV_CONFIG_REG(marchid):
|
||||
if (!vcpu->arch.ran_atleast_once)
|
||||
vcpu->arch.marchid = reg_val;
|
||||
else
|
||||
return -EBUSY;
|
||||
break;
|
||||
case KVM_REG_RISCV_CONFIG_REG(mimpid):
|
||||
if (!vcpu->arch.ran_atleast_once)
|
||||
vcpu->arch.mimpid = reg_val;
|
||||
else
|
||||
return -EBUSY;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
@@ -541,22 +577,26 @@ static int kvm_riscv_vcpu_set_reg_isa_ext(struct kvm_vcpu *vcpu,
|
||||
static int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu,
|
||||
const struct kvm_one_reg *reg)
|
||||
{
|
||||
if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CONFIG)
|
||||
switch (reg->id & KVM_REG_RISCV_TYPE_MASK) {
|
||||
case KVM_REG_RISCV_CONFIG:
|
||||
return kvm_riscv_vcpu_set_reg_config(vcpu, reg);
|
||||
else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CORE)
|
||||
case KVM_REG_RISCV_CORE:
|
||||
return kvm_riscv_vcpu_set_reg_core(vcpu, reg);
|
||||
else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CSR)
|
||||
case KVM_REG_RISCV_CSR:
|
||||
return kvm_riscv_vcpu_set_reg_csr(vcpu, reg);
|
||||
else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_TIMER)
|
||||
case KVM_REG_RISCV_TIMER:
|
||||
return kvm_riscv_vcpu_set_reg_timer(vcpu, reg);
|
||||
else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_FP_F)
|
||||
case KVM_REG_RISCV_FP_F:
|
||||
return kvm_riscv_vcpu_set_reg_fp(vcpu, reg,
|
||||
KVM_REG_RISCV_FP_F);
|
||||
else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_FP_D)
|
||||
case KVM_REG_RISCV_FP_D:
|
||||
return kvm_riscv_vcpu_set_reg_fp(vcpu, reg,
|
||||
KVM_REG_RISCV_FP_D);
|
||||
else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_ISA_EXT)
|
||||
case KVM_REG_RISCV_ISA_EXT:
|
||||
return kvm_riscv_vcpu_set_reg_isa_ext(vcpu, reg);
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
@@ -564,22 +604,26 @@ static int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu,
|
||||
static int kvm_riscv_vcpu_get_reg(struct kvm_vcpu *vcpu,
|
||||
const struct kvm_one_reg *reg)
|
||||
{
|
||||
if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CONFIG)
|
||||
switch (reg->id & KVM_REG_RISCV_TYPE_MASK) {
|
||||
case KVM_REG_RISCV_CONFIG:
|
||||
return kvm_riscv_vcpu_get_reg_config(vcpu, reg);
|
||||
else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CORE)
|
||||
case KVM_REG_RISCV_CORE:
|
||||
return kvm_riscv_vcpu_get_reg_core(vcpu, reg);
|
||||
else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CSR)
|
||||
case KVM_REG_RISCV_CSR:
|
||||
return kvm_riscv_vcpu_get_reg_csr(vcpu, reg);
|
||||
else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_TIMER)
|
||||
case KVM_REG_RISCV_TIMER:
|
||||
return kvm_riscv_vcpu_get_reg_timer(vcpu, reg);
|
||||
else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_FP_F)
|
||||
case KVM_REG_RISCV_FP_F:
|
||||
return kvm_riscv_vcpu_get_reg_fp(vcpu, reg,
|
||||
KVM_REG_RISCV_FP_F);
|
||||
else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_FP_D)
|
||||
case KVM_REG_RISCV_FP_D:
|
||||
return kvm_riscv_vcpu_get_reg_fp(vcpu, reg,
|
||||
KVM_REG_RISCV_FP_D);
|
||||
else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_ISA_EXT)
|
||||
case KVM_REG_RISCV_ISA_EXT:
|
||||
return kvm_riscv_vcpu_get_reg_isa_ext(vcpu, reg);
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
@@ -984,8 +1028,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
|
||||
while (ret > 0) {
|
||||
/* Check conditions before entering the guest */
|
||||
ret = xfer_to_guest_mode_handle_work(vcpu);
|
||||
if (!ret)
|
||||
ret = 1;
|
||||
if (ret)
|
||||
continue;
|
||||
ret = 1;
|
||||
|
||||
kvm_riscv_gstage_vmid_update(vcpu);
|
||||
|
||||
|
||||
@@ -10,9 +10,7 @@
|
||||
#include <linux/err.h>
|
||||
#include <linux/kvm_host.h>
|
||||
#include <linux/version.h>
|
||||
#include <asm/csr.h>
|
||||
#include <asm/sbi.h>
|
||||
#include <asm/kvm_vcpu_timer.h>
|
||||
#include <asm/kvm_vcpu_sbi.h>
|
||||
|
||||
static int kvm_sbi_ext_base_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
|
||||
@@ -21,7 +19,6 @@ static int kvm_sbi_ext_base_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
|
||||
{
|
||||
int ret = 0;
|
||||
struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
|
||||
struct sbiret ecall_ret;
|
||||
|
||||
switch (cp->a6) {
|
||||
case SBI_EXT_BASE_GET_SPEC_VERSION:
|
||||
@@ -50,13 +47,13 @@ static int kvm_sbi_ext_base_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
|
||||
*out_val = kvm_vcpu_sbi_find_ext(cp->a0) ? 1 : 0;
|
||||
break;
|
||||
case SBI_EXT_BASE_GET_MVENDORID:
|
||||
*out_val = vcpu->arch.mvendorid;
|
||||
break;
|
||||
case SBI_EXT_BASE_GET_MARCHID:
|
||||
*out_val = vcpu->arch.marchid;
|
||||
break;
|
||||
case SBI_EXT_BASE_GET_MIMPID:
|
||||
ecall_ret = sbi_ecall(SBI_EXT_BASE, cp->a6, 0, 0, 0, 0, 0, 0);
|
||||
if (!ecall_ret.error)
|
||||
*out_val = ecall_ret.value;
|
||||
/*TODO: We are unnecessarily converting the error twice */
|
||||
ret = sbi_err_map_linux_errno(ecall_ret.error);
|
||||
*out_val = vcpu->arch.mimpid;
|
||||
break;
|
||||
default:
|
||||
ret = -EOPNOTSUPP;
|
||||
|
||||
@@ -9,7 +9,6 @@
|
||||
#include <linux/errno.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/kvm_host.h>
|
||||
#include <asm/csr.h>
|
||||
#include <asm/sbi.h>
|
||||
#include <asm/kvm_vcpu_sbi.h>
|
||||
|
||||
|
||||
@@ -9,7 +9,6 @@
|
||||
#include <linux/errno.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/kvm_host.h>
|
||||
#include <asm/csr.h>
|
||||
#include <asm/sbi.h>
|
||||
#include <asm/kvm_vcpu_timer.h>
|
||||
#include <asm/kvm_vcpu_sbi.h>
|
||||
|
||||
@@ -9,7 +9,6 @@
|
||||
#include <linux/errno.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/kvm_host.h>
|
||||
#include <asm/csr.h>
|
||||
#include <asm/sbi.h>
|
||||
#include <asm/kvm_vcpu_timer.h>
|
||||
#include <asm/kvm_vcpu_sbi.h>
|
||||
|
||||
@@ -52,6 +52,7 @@
|
||||
#include <linux/hugetlb.h>
|
||||
#include <linux/kmemleak.h>
|
||||
|
||||
#include <asm/archrandom.h>
|
||||
#include <asm/boot_data.h>
|
||||
#include <asm/ipl.h>
|
||||
#include <asm/facility.h>
|
||||
|
||||
@@ -102,7 +102,7 @@ static int switch_drv_remove(struct platform_device *pdev)
|
||||
|
||||
platform_set_drvdata(pdev, NULL);
|
||||
flush_work(&psw->work);
|
||||
del_timer_sync(&psw->debounce);
|
||||
timer_shutdown_sync(&psw->debounce);
|
||||
free_irq(irq, pdev);
|
||||
|
||||
kfree(psw);
|
||||
|
||||
@@ -36,7 +36,7 @@ static __init void init_cea_offsets(void)
|
||||
unsigned int cea;
|
||||
|
||||
again:
|
||||
cea = prandom_u32_max(max_cea);
|
||||
cea = get_random_u32_below(max_cea);
|
||||
|
||||
for_each_possible_cpu(j) {
|
||||
if (cea_offset(j) == cea)
|
||||
|
||||
@@ -724,19 +724,19 @@ void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
|
||||
* sure that the reference to cgroup is valid across the call (see
|
||||
* comments in bfq_bic_update_cgroup on this issue)
|
||||
*/
|
||||
static void *__bfq_bic_change_cgroup(struct bfq_data *bfqd,
|
||||
struct bfq_io_cq *bic,
|
||||
struct bfq_group *bfqg)
|
||||
static void __bfq_bic_change_cgroup(struct bfq_data *bfqd,
|
||||
struct bfq_io_cq *bic,
|
||||
struct bfq_group *bfqg)
|
||||
{
|
||||
struct bfq_queue *async_bfqq = bic_to_bfqq(bic, 0);
|
||||
struct bfq_queue *sync_bfqq = bic_to_bfqq(bic, 1);
|
||||
struct bfq_queue *async_bfqq = bic_to_bfqq(bic, false);
|
||||
struct bfq_queue *sync_bfqq = bic_to_bfqq(bic, true);
|
||||
struct bfq_entity *entity;
|
||||
|
||||
if (async_bfqq) {
|
||||
entity = &async_bfqq->entity;
|
||||
|
||||
if (entity->sched_data != &bfqg->sched_data) {
|
||||
bic_set_bfqq(bic, NULL, 0);
|
||||
bic_set_bfqq(bic, NULL, false);
|
||||
bfq_release_process_ref(bfqd, async_bfqq);
|
||||
}
|
||||
}
|
||||
@@ -772,12 +772,10 @@ static void *__bfq_bic_change_cgroup(struct bfq_data *bfqd,
|
||||
*/
|
||||
bfq_put_cooperator(sync_bfqq);
|
||||
bfq_release_process_ref(bfqd, sync_bfqq);
|
||||
bic_set_bfqq(bic, NULL, 1);
|
||||
bic_set_bfqq(bic, NULL, true);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return bfqg;
|
||||
}
|
||||
|
||||
void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio)
|
||||
|
||||
@@ -386,6 +386,12 @@ static void bfq_put_stable_ref(struct bfq_queue *bfqq);
|
||||
|
||||
void bic_set_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq, bool is_sync)
|
||||
{
|
||||
struct bfq_queue *old_bfqq = bic->bfqq[is_sync];
|
||||
|
||||
/* Clear bic pointer if bfqq is detached from this bic */
|
||||
if (old_bfqq && old_bfqq->bic == bic)
|
||||
old_bfqq->bic = NULL;
|
||||
|
||||
/*
|
||||
* If bfqq != NULL, then a non-stable queue merge between
|
||||
* bic->bfqq and bfqq is happening here. This causes troubles
|
||||
@@ -3108,7 +3114,7 @@ bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic,
|
||||
/*
|
||||
* Merge queues (that is, let bic redirect its requests to new_bfqq)
|
||||
*/
|
||||
bic_set_bfqq(bic, new_bfqq, 1);
|
||||
bic_set_bfqq(bic, new_bfqq, true);
|
||||
bfq_mark_bfqq_coop(new_bfqq);
|
||||
/*
|
||||
* new_bfqq now belongs to at least two bics (it is a shared queue):
|
||||
@@ -5311,7 +5317,6 @@ static void bfq_exit_icq_bfqq(struct bfq_io_cq *bic, bool is_sync)
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&bfqd->lock, flags);
|
||||
bfqq->bic = NULL;
|
||||
bfq_exit_bfqq(bfqd, bfqq);
|
||||
bic_set_bfqq(bic, NULL, is_sync);
|
||||
spin_unlock_irqrestore(&bfqd->lock, flags);
|
||||
@@ -6557,7 +6562,7 @@ bfq_split_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq)
|
||||
return bfqq;
|
||||
}
|
||||
|
||||
bic_set_bfqq(bic, NULL, 1);
|
||||
bic_set_bfqq(bic, NULL, true);
|
||||
|
||||
bfq_put_cooperator(bfqq);
|
||||
|
||||
@@ -7058,7 +7063,9 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
|
||||
bfqd->idle_slice_timer.function = bfq_idle_slice_timer;
|
||||
|
||||
bfqd->queue_weights_tree = RB_ROOT_CACHED;
|
||||
#ifdef CONFIG_BFQ_GROUP_IOSCHED
|
||||
bfqd->num_groups_with_pending_reqs = 0;
|
||||
#endif
|
||||
|
||||
INIT_LIST_HEAD(&bfqd->active_list);
|
||||
INIT_LIST_HEAD(&bfqd->idle_list);
|
||||
|
||||
@@ -197,8 +197,10 @@ struct bfq_entity {
|
||||
/* flag, set to request a weight, ioprio or ioprio_class change */
|
||||
int prio_changed;
|
||||
|
||||
#ifdef CONFIG_BFQ_GROUP_IOSCHED
|
||||
/* flag, set if the entity is counted in groups_with_pending_reqs */
|
||||
bool in_groups_with_pending_reqs;
|
||||
#endif
|
||||
|
||||
/* last child queue of entity created (for non-leaf entities) */
|
||||
struct bfq_queue *last_bfqq_created;
|
||||
@@ -491,6 +493,7 @@ struct bfq_data {
|
||||
*/
|
||||
struct rb_root_cached queue_weights_tree;
|
||||
|
||||
#ifdef CONFIG_BFQ_GROUP_IOSCHED
|
||||
/*
|
||||
* Number of groups with at least one process that
|
||||
* has at least one request waiting for completion. Note that
|
||||
@@ -538,6 +541,7 @@ struct bfq_data {
|
||||
* with no request waiting for completion.
|
||||
*/
|
||||
unsigned int num_groups_with_pending_reqs;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Per-class (RT, BE, IDLE) number of bfq_queues containing
|
||||
|
||||
@@ -1612,28 +1612,28 @@ void bfq_requeue_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
|
||||
|
||||
void bfq_add_bfqq_in_groups_with_pending_reqs(struct bfq_queue *bfqq)
|
||||
{
|
||||
#ifdef CONFIG_BFQ_GROUP_IOSCHED
|
||||
struct bfq_entity *entity = &bfqq->entity;
|
||||
|
||||
if (!entity->in_groups_with_pending_reqs) {
|
||||
entity->in_groups_with_pending_reqs = true;
|
||||
#ifdef CONFIG_BFQ_GROUP_IOSCHED
|
||||
if (!(bfqq_group(bfqq)->num_queues_with_pending_reqs++))
|
||||
bfqq->bfqd->num_groups_with_pending_reqs++;
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
void bfq_del_bfqq_in_groups_with_pending_reqs(struct bfq_queue *bfqq)
|
||||
{
|
||||
#ifdef CONFIG_BFQ_GROUP_IOSCHED
|
||||
struct bfq_entity *entity = &bfqq->entity;
|
||||
|
||||
if (entity->in_groups_with_pending_reqs) {
|
||||
entity->in_groups_with_pending_reqs = false;
|
||||
#ifdef CONFIG_BFQ_GROUP_IOSCHED
|
||||
if (!(--bfqq_group(bfqq)->num_queues_with_pending_reqs))
|
||||
bfqq->bfqd->num_groups_with_pending_reqs--;
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
@@ -33,6 +33,7 @@
|
||||
#include "blk-cgroup.h"
|
||||
#include "blk-ioprio.h"
|
||||
#include "blk-throttle.h"
|
||||
#include "blk-rq-qos.h"
|
||||
|
||||
/*
|
||||
* blkcg_pol_mutex protects blkcg_policy[] and policy [de]activation.
|
||||
@@ -1322,6 +1323,7 @@ int blkcg_init_disk(struct gendisk *disk)
|
||||
void blkcg_exit_disk(struct gendisk *disk)
|
||||
{
|
||||
blkg_destroy_all(disk);
|
||||
rq_qos_exit(disk->queue);
|
||||
blk_throtl_exit(disk);
|
||||
}
|
||||
|
||||
|
||||
@@ -254,14 +254,15 @@ EXPORT_SYMBOL_GPL(blk_clear_pm_only);
|
||||
|
||||
static void blk_free_queue_rcu(struct rcu_head *rcu_head)
|
||||
{
|
||||
kmem_cache_free(blk_requestq_cachep,
|
||||
container_of(rcu_head, struct request_queue, rcu_head));
|
||||
struct request_queue *q = container_of(rcu_head,
|
||||
struct request_queue, rcu_head);
|
||||
|
||||
percpu_ref_exit(&q->q_usage_counter);
|
||||
kmem_cache_free(blk_requestq_cachep, q);
|
||||
}
|
||||
|
||||
static void blk_free_queue(struct request_queue *q)
|
||||
{
|
||||
percpu_ref_exit(&q->q_usage_counter);
|
||||
|
||||
if (q->poll_stat)
|
||||
blk_stat_remove_callback(q, q->poll_cb);
|
||||
blk_stat_free_callback(q->poll_cb);
|
||||
|
||||
@@ -232,7 +232,9 @@ enum {
|
||||
|
||||
/* 1/64k is granular enough and can easily be handled w/ u32 */
|
||||
WEIGHT_ONE = 1 << 16,
|
||||
};
|
||||
|
||||
enum {
|
||||
/*
|
||||
* As vtime is used to calculate the cost of each IO, it needs to
|
||||
* be fairly high precision. For example, it should be able to
|
||||
@@ -2818,7 +2820,7 @@ static void ioc_rqos_exit(struct rq_qos *rqos)
|
||||
ioc->running = IOC_STOP;
|
||||
spin_unlock_irq(&ioc->lock);
|
||||
|
||||
del_timer_sync(&ioc->timer);
|
||||
timer_shutdown_sync(&ioc->timer);
|
||||
free_percpu(ioc->pcpu_stat);
|
||||
kfree(ioc);
|
||||
}
|
||||
|
||||
@@ -644,7 +644,7 @@ static void blkcg_iolatency_exit(struct rq_qos *rqos)
|
||||
{
|
||||
struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos);
|
||||
|
||||
del_timer_sync(&blkiolat->timer);
|
||||
timer_shutdown_sync(&blkiolat->timer);
|
||||
flush_work(&blkiolat->enable_work);
|
||||
blkcg_deactivate_policy(rqos->q, &blkcg_policy_iolatency);
|
||||
kfree(blkiolat);
|
||||
|
||||
@@ -434,7 +434,7 @@ static void kyber_exit_sched(struct elevator_queue *e)
|
||||
struct kyber_queue_data *kqd = e->elevator_data;
|
||||
int i;
|
||||
|
||||
del_timer_sync(&kqd->timer);
|
||||
timer_shutdown_sync(&kqd->timer);
|
||||
blk_stat_disable_accounting(kqd->q);
|
||||
|
||||
for (i = 0; i < KYBER_NUM_DOMAINS; i++)
|
||||
|
||||
@@ -1431,7 +1431,7 @@ static int ghes_remove(struct platform_device *ghes_dev)
|
||||
ghes->flags |= GHES_EXITING;
|
||||
switch (generic->notify.type) {
|
||||
case ACPI_HEST_NOTIFY_POLLED:
|
||||
del_timer_sync(&ghes->timer);
|
||||
timer_shutdown_sync(&ghes->timer);
|
||||
break;
|
||||
case ACPI_HEST_NOTIFY_EXTERNAL:
|
||||
free_irq(ghes->irq, ghes);
|
||||
|
||||
@@ -2213,7 +2213,7 @@ idt77252_init_ubr(struct idt77252_dev *card, struct vc_map *vc,
|
||||
}
|
||||
spin_unlock_irqrestore(&vc->lock, flags);
|
||||
if (est) {
|
||||
del_timer_sync(&est->timer);
|
||||
timer_shutdown_sync(&est->timer);
|
||||
kfree(est);
|
||||
}
|
||||
|
||||
@@ -2530,7 +2530,7 @@ idt77252_close(struct atm_vcc *vcc)
|
||||
vc->tx_vcc = NULL;
|
||||
|
||||
if (vc->estimator) {
|
||||
del_timer(&vc->estimator->timer);
|
||||
timer_shutdown(&vc->estimator->timer);
|
||||
kfree(vc->estimator);
|
||||
vc->estimator = NULL;
|
||||
}
|
||||
@@ -3752,7 +3752,7 @@ static void __exit idt77252_exit(void)
|
||||
card = idt77252_chain;
|
||||
dev = card->atmdev;
|
||||
idt77252_chain = card->next;
|
||||
del_timer_sync(&card->tst_timer);
|
||||
timer_shutdown_sync(&card->tst_timer);
|
||||
|
||||
if (dev->phy->stop)
|
||||
dev->phy->stop(dev);
|
||||
|
||||
@@ -2184,7 +2184,7 @@ void drbd_destroy_device(struct kref *kref)
|
||||
struct drbd_resource *resource = device->resource;
|
||||
struct drbd_peer_device *peer_device, *tmp_peer_device;
|
||||
|
||||
del_timer_sync(&device->request_timer);
|
||||
timer_shutdown_sync(&device->request_timer);
|
||||
|
||||
/* paranoia asserts */
|
||||
D_ASSERT(device, device->open_cnt == 0);
|
||||
|
||||
@@ -1030,6 +1030,9 @@ static int conn_connect(struct drbd_connection *connection)
|
||||
sock.socket->sk->sk_allocation = GFP_NOIO;
|
||||
msock.socket->sk->sk_allocation = GFP_NOIO;
|
||||
|
||||
sock.socket->sk->sk_use_task_frag = false;
|
||||
msock.socket->sk->sk_use_task_frag = false;
|
||||
|
||||
sock.socket->sk->sk_priority = TC_PRIO_INTERACTIVE_BULK;
|
||||
msock.socket->sk->sk_priority = TC_PRIO_INTERACTIVE;
|
||||
|
||||
|
||||
@@ -1755,7 +1755,7 @@ static void lo_free_disk(struct gendisk *disk)
|
||||
if (lo->workqueue)
|
||||
destroy_workqueue(lo->workqueue);
|
||||
loop_free_idle_workers(lo, true);
|
||||
del_timer_sync(&lo->timer);
|
||||
timer_shutdown_sync(&lo->timer);
|
||||
mutex_destroy(&lo->lo_mutex);
|
||||
kfree(lo);
|
||||
}
|
||||
@@ -1773,7 +1773,16 @@ static const struct block_device_operations lo_fops = {
|
||||
/*
|
||||
* And now the modules code and kernel interface.
|
||||
*/
|
||||
static int max_loop;
|
||||
|
||||
/*
|
||||
* If max_loop is specified, create that many devices upfront.
|
||||
* This also becomes a hard limit. If max_loop is not specified,
|
||||
* create CONFIG_BLK_DEV_LOOP_MIN_COUNT loop devices at module
|
||||
* init time. Loop devices can be requested on-demand with the
|
||||
* /dev/loop-control interface, or be instantiated by accessing
|
||||
* a 'dead' device node.
|
||||
*/
|
||||
static int max_loop = CONFIG_BLK_DEV_LOOP_MIN_COUNT;
|
||||
module_param(max_loop, int, 0444);
|
||||
MODULE_PARM_DESC(max_loop, "Maximum number of loop devices");
|
||||
module_param(max_part, int, 0444);
|
||||
@@ -2181,7 +2190,7 @@ MODULE_ALIAS("devname:loop-control");
|
||||
|
||||
static int __init loop_init(void)
|
||||
{
|
||||
int i, nr;
|
||||
int i;
|
||||
int err;
|
||||
|
||||
part_shift = 0;
|
||||
@@ -2209,19 +2218,6 @@ static int __init loop_init(void)
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
/*
|
||||
* If max_loop is specified, create that many devices upfront.
|
||||
* This also becomes a hard limit. If max_loop is not specified,
|
||||
* create CONFIG_BLK_DEV_LOOP_MIN_COUNT loop devices at module
|
||||
* init time. Loop devices can be requested on-demand with the
|
||||
* /dev/loop-control interface, or be instantiated by accessing
|
||||
* a 'dead' device node.
|
||||
*/
|
||||
if (max_loop)
|
||||
nr = max_loop;
|
||||
else
|
||||
nr = CONFIG_BLK_DEV_LOOP_MIN_COUNT;
|
||||
|
||||
err = misc_register(&loop_misc);
|
||||
if (err < 0)
|
||||
goto err_out;
|
||||
@@ -2233,7 +2229,7 @@ static int __init loop_init(void)
|
||||
}
|
||||
|
||||
/* pre-create number of devices given by config or max_loop */
|
||||
for (i = 0; i < nr; i++)
|
||||
for (i = 0; i < max_loop; i++)
|
||||
loop_add(i);
|
||||
|
||||
printk(KERN_INFO "loop: module loaded\n");
|
||||
|
||||
@@ -512,6 +512,7 @@ static int sock_xmit(struct nbd_device *nbd, int index, int send,
|
||||
noreclaim_flag = memalloc_noreclaim_save();
|
||||
do {
|
||||
sock->sk->sk_allocation = GFP_NOIO | __GFP_MEMALLOC;
|
||||
sock->sk->sk_use_task_frag = false;
|
||||
msg.msg_name = NULL;
|
||||
msg.msg_namelen = 0;
|
||||
msg.msg_control = NULL;
|
||||
|
||||
@@ -737,7 +737,7 @@ static int bcsp_close(struct hci_uart *hu)
|
||||
{
|
||||
struct bcsp_struct *bcsp = hu->priv;
|
||||
|
||||
del_timer_sync(&bcsp->tbcsp);
|
||||
timer_shutdown_sync(&bcsp->tbcsp);
|
||||
|
||||
hu->priv = NULL;
|
||||
|
||||
|
||||
@@ -11,6 +11,7 @@
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/random.h>
|
||||
#include <linux/hw_random.h>
|
||||
#include <asm/archrandom.h>
|
||||
|
||||
static int powernv_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
|
||||
{
|
||||
|
||||
@@ -23,6 +23,7 @@
|
||||
#include <linux/sched/signal.h>
|
||||
#include <asm/debug.h>
|
||||
#include <asm/cpacf.h>
|
||||
#include <asm/archrandom.h>
|
||||
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_AUTHOR("IBM Corporation");
|
||||
|
||||
@@ -56,6 +56,7 @@
|
||||
#include <linux/sched/isolation.h>
|
||||
#include <crypto/chacha.h>
|
||||
#include <crypto/blake2s.h>
|
||||
#include <asm/archrandom.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/irq.h>
|
||||
#include <asm/irq_regs.h>
|
||||
|
||||
@@ -429,7 +429,9 @@ static int __init efisubsys_init(void)
|
||||
platform_device_register_simple("efi_secret", 0, NULL, 0);
|
||||
#endif
|
||||
|
||||
execute_with_initialized_rng(&refresh_nv_rng_seed_nb);
|
||||
if (efi_rt_services_supported(EFI_RT_SUPPORTED_SET_VARIABLE))
|
||||
execute_with_initialized_rng(&refresh_nv_rng_seed_nb);
|
||||
|
||||
return 0;
|
||||
|
||||
err_remove_group:
|
||||
|
||||
@@ -1166,6 +1166,103 @@ int zynqmp_pm_release_node(const u32 node)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(zynqmp_pm_release_node);
|
||||
|
||||
/**
|
||||
* zynqmp_pm_get_rpu_mode() - Get RPU mode
|
||||
* @node_id: Node ID of the device
|
||||
* @rpu_mode: return by reference value
|
||||
* either split or lockstep
|
||||
*
|
||||
* Return: return 0 on success or error+reason.
|
||||
* if success, then rpu_mode will be set
|
||||
* to current rpu mode.
|
||||
*/
|
||||
int zynqmp_pm_get_rpu_mode(u32 node_id, enum rpu_oper_mode *rpu_mode)
|
||||
{
|
||||
u32 ret_payload[PAYLOAD_ARG_CNT];
|
||||
int ret;
|
||||
|
||||
ret = zynqmp_pm_invoke_fn(PM_IOCTL, node_id,
|
||||
IOCTL_GET_RPU_OPER_MODE, 0, 0, ret_payload);
|
||||
|
||||
/* only set rpu_mode if no error */
|
||||
if (ret == XST_PM_SUCCESS)
|
||||
*rpu_mode = ret_payload[0];
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(zynqmp_pm_get_rpu_mode);
|
||||
|
||||
/**
|
||||
* zynqmp_pm_set_rpu_mode() - Set RPU mode
|
||||
* @node_id: Node ID of the device
|
||||
* @rpu_mode: Argument 1 to requested IOCTL call. either split or lockstep
|
||||
*
|
||||
* This function is used to set RPU mode to split or
|
||||
* lockstep
|
||||
*
|
||||
* Return: Returns status, either success or error+reason
|
||||
*/
|
||||
int zynqmp_pm_set_rpu_mode(u32 node_id, enum rpu_oper_mode rpu_mode)
|
||||
{
|
||||
return zynqmp_pm_invoke_fn(PM_IOCTL, node_id,
|
||||
IOCTL_SET_RPU_OPER_MODE, (u32)rpu_mode,
|
||||
0, NULL);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(zynqmp_pm_set_rpu_mode);
|
||||
|
||||
/**
|
||||
* zynqmp_pm_set_tcm_config - configure TCM
|
||||
* @node_id: Firmware specific TCM subsystem ID
|
||||
* @tcm_mode: Argument 1 to requested IOCTL call
|
||||
* either PM_RPU_TCM_COMB or PM_RPU_TCM_SPLIT
|
||||
*
|
||||
* This function is used to set RPU mode to split or combined
|
||||
*
|
||||
* Return: status: 0 for success, else failure
|
||||
*/
|
||||
int zynqmp_pm_set_tcm_config(u32 node_id, enum rpu_tcm_comb tcm_mode)
|
||||
{
|
||||
return zynqmp_pm_invoke_fn(PM_IOCTL, node_id,
|
||||
IOCTL_TCM_COMB_CONFIG, (u32)tcm_mode, 0,
|
||||
NULL);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(zynqmp_pm_set_tcm_config);
|
||||
|
||||
/**
|
||||
* zynqmp_pm_force_pwrdwn - PM call to request for another PU or subsystem to
|
||||
* be powered down forcefully
|
||||
* @node: Node ID of the targeted PU or subsystem
|
||||
* @ack: Flag to specify whether acknowledge is requested
|
||||
*
|
||||
* Return: status, either success or error+reason
|
||||
*/
|
||||
int zynqmp_pm_force_pwrdwn(const u32 node,
|
||||
const enum zynqmp_pm_request_ack ack)
|
||||
{
|
||||
return zynqmp_pm_invoke_fn(PM_FORCE_POWERDOWN, node, ack, 0, 0, NULL);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(zynqmp_pm_force_pwrdwn);
|
||||
|
||||
/**
|
||||
* zynqmp_pm_request_wake - PM call to wake up selected master or subsystem
|
||||
* @node: Node ID of the master or subsystem
|
||||
* @set_addr: Specifies whether the address argument is relevant
|
||||
* @address: Address from which to resume when woken up
|
||||
* @ack: Flag to specify whether acknowledge requested
|
||||
*
|
||||
* Return: status, either success or error+reason
|
||||
*/
|
||||
int zynqmp_pm_request_wake(const u32 node,
|
||||
const bool set_addr,
|
||||
const u64 address,
|
||||
const enum zynqmp_pm_request_ack ack)
|
||||
{
|
||||
/* set_addr flag is encoded into 1st bit of address */
|
||||
return zynqmp_pm_invoke_fn(PM_REQUEST_WAKEUP, node, address | set_addr,
|
||||
address >> 32, ack, NULL);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(zynqmp_pm_request_wake);
|
||||
|
||||
/**
|
||||
* zynqmp_pm_set_requirement() - PM call to set requirement for PM slaves
|
||||
* @node: Node ID of the slave
|
||||
|
||||
@@ -657,9 +657,10 @@ static void mvebu_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
|
||||
spin_unlock_irqrestore(&mvpwm->lock, flags);
|
||||
}
|
||||
|
||||
static void mvebu_pwm_get_state(struct pwm_chip *chip,
|
||||
struct pwm_device *pwm,
|
||||
struct pwm_state *state) {
|
||||
static int mvebu_pwm_get_state(struct pwm_chip *chip,
|
||||
struct pwm_device *pwm,
|
||||
struct pwm_state *state)
|
||||
{
|
||||
|
||||
struct mvebu_pwm *mvpwm = to_mvebu_pwm(chip);
|
||||
struct mvebu_gpio_chip *mvchip = mvpwm->mvchip;
|
||||
@@ -693,6 +694,8 @@ static void mvebu_pwm_get_state(struct pwm_chip *chip,
|
||||
state->enabled = false;
|
||||
|
||||
spin_unlock_irqrestore(&mvpwm->lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mvebu_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
|
||||
|
||||
@@ -29,6 +29,7 @@
|
||||
#include <linux/mm.h>
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/mmu_notifier.h>
|
||||
#include <kgd_kfd_interface.h>
|
||||
#include <drm/ttm/ttm_execbuf_util.h>
|
||||
#include "amdgpu_sync.h"
|
||||
@@ -65,6 +66,7 @@ struct kgd_mem {
|
||||
struct mutex lock;
|
||||
struct amdgpu_bo *bo;
|
||||
struct dma_buf *dmabuf;
|
||||
struct hmm_range *range;
|
||||
struct list_head attachments;
|
||||
/* protected by amdkfd_process_info.lock */
|
||||
struct ttm_validate_buffer validate_list;
|
||||
@@ -75,7 +77,7 @@ struct kgd_mem {
|
||||
|
||||
uint32_t alloc_flags;
|
||||
|
||||
atomic_t invalid;
|
||||
uint32_t invalid;
|
||||
struct amdkfd_process_info *process_info;
|
||||
|
||||
struct amdgpu_sync sync;
|
||||
@@ -131,7 +133,8 @@ struct amdkfd_process_info {
|
||||
struct amdgpu_amdkfd_fence *eviction_fence;
|
||||
|
||||
/* MMU-notifier related fields */
|
||||
atomic_t evicted_bos;
|
||||
struct mutex notifier_lock;
|
||||
uint32_t evicted_bos;
|
||||
struct delayed_work restore_userptr_work;
|
||||
struct pid *pid;
|
||||
bool block_mmu_notifications;
|
||||
@@ -180,7 +183,8 @@ int kfd_debugfs_kfd_mem_limits(struct seq_file *m, void *data);
|
||||
bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm);
|
||||
struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f);
|
||||
int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo);
|
||||
int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm);
|
||||
int amdgpu_amdkfd_evict_userptr(struct mmu_interval_notifier *mni,
|
||||
unsigned long cur_seq, struct kgd_mem *mem);
|
||||
#else
|
||||
static inline
|
||||
bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm)
|
||||
@@ -201,7 +205,8 @@ int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo)
|
||||
}
|
||||
|
||||
static inline
|
||||
int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm)
|
||||
int amdgpu_amdkfd_evict_userptr(struct mmu_interval_notifier *mni,
|
||||
unsigned long cur_seq, struct kgd_mem *mem)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
@@ -265,8 +270,10 @@ int amdgpu_amdkfd_get_pcie_bandwidth_mbytes(struct amdgpu_device *adev, bool is_
|
||||
(&((struct amdgpu_fpriv *) \
|
||||
((struct drm_file *)(drm_priv))->driver_priv)->vm)
|
||||
|
||||
int amdgpu_amdkfd_gpuvm_set_vm_pasid(struct amdgpu_device *adev,
|
||||
struct file *filp, u32 pasid);
|
||||
int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct amdgpu_device *adev,
|
||||
struct file *filp, u32 pasid,
|
||||
struct file *filp,
|
||||
void **process_info,
|
||||
struct dma_fence **ef);
|
||||
void amdgpu_amdkfd_gpuvm_release_process_vm(struct amdgpu_device *adev,
|
||||
|
||||
@@ -964,7 +964,9 @@ static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr,
|
||||
* later stage when it is scheduled by another ioctl called by
|
||||
* CRIU master process for the target pid for restore.
|
||||
*/
|
||||
atomic_inc(&mem->invalid);
|
||||
mutex_lock(&process_info->notifier_lock);
|
||||
mem->invalid++;
|
||||
mutex_unlock(&process_info->notifier_lock);
|
||||
mutex_unlock(&process_info->lock);
|
||||
return 0;
|
||||
}
|
||||
@@ -1301,6 +1303,7 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
|
||||
return -ENOMEM;
|
||||
|
||||
mutex_init(&info->lock);
|
||||
mutex_init(&info->notifier_lock);
|
||||
INIT_LIST_HEAD(&info->vm_list_head);
|
||||
INIT_LIST_HEAD(&info->kfd_bo_list);
|
||||
INIT_LIST_HEAD(&info->userptr_valid_list);
|
||||
@@ -1317,7 +1320,6 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
|
||||
}
|
||||
|
||||
info->pid = get_task_pid(current->group_leader, PIDTYPE_PID);
|
||||
atomic_set(&info->evicted_bos, 0);
|
||||
INIT_DELAYED_WORK(&info->restore_userptr_work,
|
||||
amdgpu_amdkfd_restore_userptr_worker);
|
||||
|
||||
@@ -1372,6 +1374,7 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
|
||||
put_pid(info->pid);
|
||||
create_evict_fence_fail:
|
||||
mutex_destroy(&info->lock);
|
||||
mutex_destroy(&info->notifier_lock);
|
||||
kfree(info);
|
||||
}
|
||||
return ret;
|
||||
@@ -1426,8 +1429,36 @@ static void amdgpu_amdkfd_gpuvm_unpin_bo(struct amdgpu_bo *bo)
|
||||
amdgpu_bo_unreserve(bo);
|
||||
}
|
||||
|
||||
int amdgpu_amdkfd_gpuvm_set_vm_pasid(struct amdgpu_device *adev,
|
||||
struct file *filp, u32 pasid)
|
||||
|
||||
{
|
||||
struct amdgpu_fpriv *drv_priv;
|
||||
struct amdgpu_vm *avm;
|
||||
int ret;
|
||||
|
||||
ret = amdgpu_file_to_fpriv(filp, &drv_priv);
|
||||
if (ret)
|
||||
return ret;
|
||||
avm = &drv_priv->vm;
|
||||
|
||||
/* Free the original amdgpu allocated pasid,
|
||||
* will be replaced with kfd allocated pasid.
|
||||
*/
|
||||
if (avm->pasid) {
|
||||
amdgpu_pasid_free(avm->pasid);
|
||||
amdgpu_vm_set_pasid(adev, avm, 0);
|
||||
}
|
||||
|
||||
ret = amdgpu_vm_set_pasid(adev, avm, pasid);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct amdgpu_device *adev,
|
||||
struct file *filp, u32 pasid,
|
||||
struct file *filp,
|
||||
void **process_info,
|
||||
struct dma_fence **ef)
|
||||
{
|
||||
@@ -1444,22 +1475,11 @@ int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct amdgpu_device *adev,
|
||||
if (avm->process_info)
|
||||
return -EINVAL;
|
||||
|
||||
/* Free the original amdgpu allocated pasid,
|
||||
* will be replaced with kfd allocated pasid.
|
||||
*/
|
||||
if (avm->pasid) {
|
||||
amdgpu_pasid_free(avm->pasid);
|
||||
amdgpu_vm_set_pasid(adev, avm, 0);
|
||||
}
|
||||
|
||||
/* Convert VM into a compute VM */
|
||||
ret = amdgpu_vm_make_compute(adev, avm);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = amdgpu_vm_set_pasid(adev, avm, pasid);
|
||||
if (ret)
|
||||
return ret;
|
||||
/* Initialize KFD part of the VM and process info */
|
||||
ret = init_kfd_vm(avm, process_info, ef);
|
||||
if (ret)
|
||||
@@ -1496,6 +1516,7 @@ void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
|
||||
cancel_delayed_work_sync(&process_info->restore_userptr_work);
|
||||
put_pid(process_info->pid);
|
||||
mutex_destroy(&process_info->lock);
|
||||
mutex_destroy(&process_info->notifier_lock);
|
||||
kfree(process_info);
|
||||
}
|
||||
}
|
||||
@@ -1548,7 +1569,9 @@ int amdgpu_amdkfd_criu_resume(void *p)
|
||||
|
||||
mutex_lock(&pinfo->lock);
|
||||
pr_debug("scheduling work\n");
|
||||
atomic_inc(&pinfo->evicted_bos);
|
||||
mutex_lock(&pinfo->notifier_lock);
|
||||
pinfo->evicted_bos++;
|
||||
mutex_unlock(&pinfo->notifier_lock);
|
||||
if (!READ_ONCE(pinfo->block_mmu_notifications)) {
|
||||
ret = -EINVAL;
|
||||
goto out_unlock;
|
||||
@@ -1773,8 +1796,13 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
|
||||
list_del(&bo_list_entry->head);
|
||||
mutex_unlock(&process_info->lock);
|
||||
|
||||
/* No more MMU notifiers */
|
||||
amdgpu_hmm_unregister(mem->bo);
|
||||
/* Cleanup user pages and MMU notifiers */
|
||||
if (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm)) {
|
||||
amdgpu_hmm_unregister(mem->bo);
|
||||
mutex_lock(&process_info->notifier_lock);
|
||||
amdgpu_ttm_tt_discard_user_pages(mem->bo->tbo.ttm, mem->range);
|
||||
mutex_unlock(&process_info->notifier_lock);
|
||||
}
|
||||
|
||||
ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx);
|
||||
if (unlikely(ret))
|
||||
@@ -1864,6 +1892,16 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
|
||||
*/
|
||||
mutex_lock(&mem->process_info->lock);
|
||||
|
||||
/* Lock notifier lock. If we find an invalid userptr BO, we can be
|
||||
* sure that the MMU notifier is no longer running
|
||||
* concurrently and the queues are actually stopped
|
||||
*/
|
||||
if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
|
||||
mutex_lock(&mem->process_info->notifier_lock);
|
||||
is_invalid_userptr = !!mem->invalid;
|
||||
mutex_unlock(&mem->process_info->notifier_lock);
|
||||
}
|
||||
|
||||
mutex_lock(&mem->lock);
|
||||
|
||||
domain = mem->domain;
|
||||
@@ -2241,34 +2279,38 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct amdgpu_device *adev,
|
||||
*
|
||||
* Runs in MMU notifier, may be in RECLAIM_FS context. This means it
|
||||
* cannot do any memory allocations, and cannot take any locks that
|
||||
* are held elsewhere while allocating memory. Therefore this is as
|
||||
* simple as possible, using atomic counters.
|
||||
* are held elsewhere while allocating memory.
|
||||
*
|
||||
* It doesn't do anything to the BO itself. The real work happens in
|
||||
* restore, where we get updated page addresses. This function only
|
||||
* ensures that GPU access to the BO is stopped.
|
||||
*/
|
||||
int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem,
|
||||
struct mm_struct *mm)
|
||||
int amdgpu_amdkfd_evict_userptr(struct mmu_interval_notifier *mni,
|
||||
unsigned long cur_seq, struct kgd_mem *mem)
|
||||
{
|
||||
struct amdkfd_process_info *process_info = mem->process_info;
|
||||
int evicted_bos;
|
||||
int r = 0;
|
||||
|
||||
/* Do not process MMU notifications until stage-4 IOCTL is received */
|
||||
/* Do not process MMU notifications during CRIU restore until
|
||||
* KFD_CRIU_OP_RESUME IOCTL is received
|
||||
*/
|
||||
if (READ_ONCE(process_info->block_mmu_notifications))
|
||||
return 0;
|
||||
|
||||
atomic_inc(&mem->invalid);
|
||||
evicted_bos = atomic_inc_return(&process_info->evicted_bos);
|
||||
if (evicted_bos == 1) {
|
||||
mutex_lock(&process_info->notifier_lock);
|
||||
mmu_interval_set_seq(mni, cur_seq);
|
||||
|
||||
mem->invalid++;
|
||||
if (++process_info->evicted_bos == 1) {
|
||||
/* First eviction, stop the queues */
|
||||
r = kgd2kfd_quiesce_mm(mm, KFD_QUEUE_EVICTION_TRIGGER_USERPTR);
|
||||
r = kgd2kfd_quiesce_mm(mni->mm,
|
||||
KFD_QUEUE_EVICTION_TRIGGER_USERPTR);
|
||||
if (r)
|
||||
pr_err("Failed to quiesce KFD\n");
|
||||
schedule_delayed_work(&process_info->restore_userptr_work,
|
||||
msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
|
||||
}
|
||||
mutex_unlock(&process_info->notifier_lock);
|
||||
|
||||
return r;
|
||||
}
|
||||
@@ -2285,54 +2327,58 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
|
||||
struct kgd_mem *mem, *tmp_mem;
|
||||
struct amdgpu_bo *bo;
|
||||
struct ttm_operation_ctx ctx = { false, false };
|
||||
int invalid, ret;
|
||||
uint32_t invalid;
|
||||
int ret = 0;
|
||||
|
||||
/* Move all invalidated BOs to the userptr_inval_list and
|
||||
* release their user pages by migration to the CPU domain
|
||||
*/
|
||||
mutex_lock(&process_info->notifier_lock);
|
||||
|
||||
/* Move all invalidated BOs to the userptr_inval_list */
|
||||
list_for_each_entry_safe(mem, tmp_mem,
|
||||
&process_info->userptr_valid_list,
|
||||
validate_list.head) {
|
||||
if (!atomic_read(&mem->invalid))
|
||||
continue; /* BO is still valid */
|
||||
|
||||
bo = mem->bo;
|
||||
|
||||
if (amdgpu_bo_reserve(bo, true))
|
||||
return -EAGAIN;
|
||||
amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
|
||||
ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
|
||||
amdgpu_bo_unreserve(bo);
|
||||
if (ret) {
|
||||
pr_err("%s: Failed to invalidate userptr BO\n",
|
||||
__func__);
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
list_move_tail(&mem->validate_list.head,
|
||||
&process_info->userptr_inval_list);
|
||||
}
|
||||
|
||||
if (list_empty(&process_info->userptr_inval_list))
|
||||
return 0; /* All evicted userptr BOs were freed */
|
||||
validate_list.head)
|
||||
if (mem->invalid)
|
||||
list_move_tail(&mem->validate_list.head,
|
||||
&process_info->userptr_inval_list);
|
||||
|
||||
/* Go through userptr_inval_list and update any invalid user_pages */
|
||||
list_for_each_entry(mem, &process_info->userptr_inval_list,
|
||||
validate_list.head) {
|
||||
struct hmm_range *range;
|
||||
|
||||
invalid = atomic_read(&mem->invalid);
|
||||
invalid = mem->invalid;
|
||||
if (!invalid)
|
||||
/* BO hasn't been invalidated since the last
|
||||
* revalidation attempt. Keep its BO list.
|
||||
* revalidation attempt. Keep its page list.
|
||||
*/
|
||||
continue;
|
||||
|
||||
bo = mem->bo;
|
||||
|
||||
amdgpu_ttm_tt_discard_user_pages(bo->tbo.ttm, mem->range);
|
||||
mem->range = NULL;
|
||||
|
||||
/* BO reservations and getting user pages (hmm_range_fault)
|
||||
* must happen outside the notifier lock
|
||||
*/
|
||||
mutex_unlock(&process_info->notifier_lock);
|
||||
|
||||
/* Move the BO to system (CPU) domain if necessary to unmap
|
||||
* and free the SG table
|
||||
*/
|
||||
if (bo->tbo.resource->mem_type != TTM_PL_SYSTEM) {
|
||||
if (amdgpu_bo_reserve(bo, true))
|
||||
return -EAGAIN;
|
||||
amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
|
||||
ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
|
||||
amdgpu_bo_unreserve(bo);
|
||||
if (ret) {
|
||||
pr_err("%s: Failed to invalidate userptr BO\n",
|
||||
__func__);
|
||||
return -EAGAIN;
|
||||
}
|
||||
}
|
||||
|
||||
/* Get updated user pages */
|
||||
ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages,
|
||||
&range);
|
||||
&mem->range);
|
||||
if (ret) {
|
||||
pr_debug("Failed %d to get user pages\n", ret);
|
||||
|
||||
@@ -2345,30 +2391,32 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
|
||||
*/
|
||||
if (ret != -EFAULT)
|
||||
return ret;
|
||||
} else {
|
||||
|
||||
/*
|
||||
* FIXME: Cannot ignore the return code, must hold
|
||||
* notifier_lock
|
||||
*/
|
||||
amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, range);
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
mutex_lock(&process_info->notifier_lock);
|
||||
|
||||
/* Mark the BO as valid unless it was invalidated
|
||||
* again concurrently.
|
||||
*/
|
||||
if (atomic_cmpxchg(&mem->invalid, invalid, 0) != invalid)
|
||||
return -EAGAIN;
|
||||
if (mem->invalid != invalid) {
|
||||
ret = -EAGAIN;
|
||||
goto unlock_out;
|
||||
}
|
||||
mem->invalid = 0;
|
||||
}
|
||||
|
||||
return 0;
|
||||
unlock_out:
|
||||
mutex_unlock(&process_info->notifier_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Validate invalid userptr BOs
|
||||
*
|
||||
* Validates BOs on the userptr_inval_list, and moves them back to the
|
||||
* userptr_valid_list. Also updates GPUVM page tables with new page
|
||||
* addresses and waits for the page table updates to complete.
|
||||
* Validates BOs on the userptr_inval_list. Also updates GPUVM page tables
|
||||
* with new page addresses and waits for the page table updates to complete.
|
||||
*/
|
||||
static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
|
||||
{
|
||||
@@ -2439,9 +2487,6 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
|
||||
}
|
||||
}
|
||||
|
||||
list_move_tail(&mem->validate_list.head,
|
||||
&process_info->userptr_valid_list);
|
||||
|
||||
/* Update mapping. If the BO was not validated
|
||||
* (because we couldn't get user pages), this will
|
||||
* clear the page table entries, which will result in
|
||||
@@ -2457,7 +2502,9 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
|
||||
if (ret) {
|
||||
pr_err("%s: update PTE failed\n", __func__);
|
||||
/* make sure this gets validated again */
|
||||
atomic_inc(&mem->invalid);
|
||||
mutex_lock(&process_info->notifier_lock);
|
||||
mem->invalid++;
|
||||
mutex_unlock(&process_info->notifier_lock);
|
||||
goto unreserve_out;
|
||||
}
|
||||
}
|
||||
@@ -2477,6 +2524,36 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Confirm that all user pages are valid while holding the notifier lock
|
||||
*
|
||||
* Moves valid BOs from the userptr_inval_list back to userptr_val_list.
|
||||
*/
|
||||
static int confirm_valid_user_pages_locked(struct amdkfd_process_info *process_info)
|
||||
{
|
||||
struct kgd_mem *mem, *tmp_mem;
|
||||
int ret = 0;
|
||||
|
||||
list_for_each_entry_safe(mem, tmp_mem,
|
||||
&process_info->userptr_inval_list,
|
||||
validate_list.head) {
|
||||
bool valid = amdgpu_ttm_tt_get_user_pages_done(
|
||||
mem->bo->tbo.ttm, mem->range);
|
||||
|
||||
mem->range = NULL;
|
||||
if (!valid) {
|
||||
WARN(!mem->invalid, "Invalid BO not marked invalid");
|
||||
ret = -EAGAIN;
|
||||
continue;
|
||||
}
|
||||
WARN(mem->invalid, "Valid BO is marked invalid");
|
||||
|
||||
list_move_tail(&mem->validate_list.head,
|
||||
&process_info->userptr_valid_list);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Worker callback to restore evicted userptr BOs
|
||||
*
|
||||
* Tries to update and validate all userptr BOs. If successful and no
|
||||
@@ -2491,9 +2568,11 @@ static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work)
|
||||
restore_userptr_work);
|
||||
struct task_struct *usertask;
|
||||
struct mm_struct *mm;
|
||||
int evicted_bos;
|
||||
uint32_t evicted_bos;
|
||||
|
||||
evicted_bos = atomic_read(&process_info->evicted_bos);
|
||||
mutex_lock(&process_info->notifier_lock);
|
||||
evicted_bos = process_info->evicted_bos;
|
||||
mutex_unlock(&process_info->notifier_lock);
|
||||
if (!evicted_bos)
|
||||
return;
|
||||
|
||||
@@ -2516,9 +2595,6 @@ static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work)
|
||||
* and we can just restart the queues.
|
||||
*/
|
||||
if (!list_empty(&process_info->userptr_inval_list)) {
|
||||
if (atomic_read(&process_info->evicted_bos) != evicted_bos)
|
||||
goto unlock_out; /* Concurrent eviction, try again */
|
||||
|
||||
if (validate_invalid_user_pages(process_info))
|
||||
goto unlock_out;
|
||||
}
|
||||
@@ -2527,10 +2603,17 @@ static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work)
|
||||
* be a first eviction that calls quiesce_mm. The eviction
|
||||
* reference counting inside KFD will handle this case.
|
||||
*/
|
||||
if (atomic_cmpxchg(&process_info->evicted_bos, evicted_bos, 0) !=
|
||||
evicted_bos)
|
||||
goto unlock_out;
|
||||
evicted_bos = 0;
|
||||
mutex_lock(&process_info->notifier_lock);
|
||||
if (process_info->evicted_bos != evicted_bos)
|
||||
goto unlock_notifier_out;
|
||||
|
||||
if (confirm_valid_user_pages_locked(process_info)) {
|
||||
WARN(1, "User pages unexpectedly invalid");
|
||||
goto unlock_notifier_out;
|
||||
}
|
||||
|
||||
process_info->evicted_bos = evicted_bos = 0;
|
||||
|
||||
if (kgd2kfd_resume_mm(mm)) {
|
||||
pr_err("%s: Failed to resume KFD\n", __func__);
|
||||
/* No recovery from this failure. Probably the CP is
|
||||
@@ -2538,6 +2621,8 @@ static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work)
|
||||
*/
|
||||
}
|
||||
|
||||
unlock_notifier_out:
|
||||
mutex_unlock(&process_info->notifier_lock);
|
||||
unlock_out:
|
||||
mutex_unlock(&process_info->lock);
|
||||
|
||||
|
||||
@@ -3016,14 +3016,15 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
|
||||
continue;
|
||||
}
|
||||
|
||||
/* skip suspend of gfx and psp for S0ix
|
||||
/* skip suspend of gfx/mes and psp for S0ix
|
||||
* gfx is in gfxoff state, so on resume it will exit gfxoff just
|
||||
* like at runtime. PSP is also part of the always on hardware
|
||||
* so no need to suspend it.
|
||||
*/
|
||||
if (adev->in_s0ix &&
|
||||
(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP ||
|
||||
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX))
|
||||
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
|
||||
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_MES))
|
||||
continue;
|
||||
|
||||
/* XXX handle errors */
|
||||
@@ -4112,6 +4113,11 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
|
||||
|
||||
adev->in_suspend = true;
|
||||
|
||||
/* Evict the majority of BOs before grabbing the full access */
|
||||
r = amdgpu_device_evict_resources(adev);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
if (amdgpu_sriov_vf(adev)) {
|
||||
amdgpu_virt_fini_data_exchange(adev);
|
||||
r = amdgpu_virt_request_full_gpu(adev, false);
|
||||
|
||||
@@ -2039,6 +2039,15 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
|
||||
"See modparam exp_hw_support\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
/* differentiate between P10 and P11 asics with the same DID */
|
||||
if (pdev->device == 0x67FF &&
|
||||
(pdev->revision == 0xE3 ||
|
||||
pdev->revision == 0xE7 ||
|
||||
pdev->revision == 0xF3 ||
|
||||
pdev->revision == 0xF7)) {
|
||||
flags &= ~AMD_ASIC_MASK;
|
||||
flags |= CHIP_POLARIS10;
|
||||
}
|
||||
|
||||
/* Due to hardware bugs, S/G Display on raven requires a 1:1 IOMMU mapping,
|
||||
* however, SME requires an indirect IOMMU mapping because the encryption
|
||||
@@ -2108,12 +2117,12 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
|
||||
|
||||
pci_set_drvdata(pdev, ddev);
|
||||
|
||||
ret = amdgpu_driver_load_kms(adev, ent->driver_data);
|
||||
ret = amdgpu_driver_load_kms(adev, flags);
|
||||
if (ret)
|
||||
goto err_pci;
|
||||
|
||||
retry_init:
|
||||
ret = drm_dev_register(ddev, ent->driver_data);
|
||||
ret = drm_dev_register(ddev, flags);
|
||||
if (ret == -EAGAIN && ++retry <= 3) {
|
||||
DRM_INFO("retry init %d\n", retry);
|
||||
/* Don't request EX mode too frequently which is attacking */
|
||||
|
||||
@@ -64,7 +64,8 @@ static bool is_fru_eeprom_supported(struct amdgpu_device *adev, u32 *fru_addr)
|
||||
sizeof(atom_ctx->vbios_version)) ||
|
||||
strnstr(atom_ctx->vbios_version, "D163",
|
||||
sizeof(atom_ctx->vbios_version))) {
|
||||
*fru_addr = FRU_EEPROM_MADDR_6;
|
||||
if (fru_addr)
|
||||
*fru_addr = FRU_EEPROM_MADDR_6;
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
@@ -83,7 +84,8 @@ static bool is_fru_eeprom_supported(struct amdgpu_device *adev, u32 *fru_addr)
|
||||
sizeof(atom_ctx->vbios_version))) {
|
||||
return false;
|
||||
} else {
|
||||
*fru_addr = FRU_EEPROM_MADDR_6;
|
||||
if (fru_addr)
|
||||
*fru_addr = FRU_EEPROM_MADDR_6;
|
||||
return true;
|
||||
}
|
||||
} else {
|
||||
|
||||
@@ -113,7 +113,7 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
|
||||
bp.resv = resv;
|
||||
bp.preferred_domain = initial_domain;
|
||||
bp.flags = flags;
|
||||
bp.domain = initial_domain | AMDGPU_GEM_DOMAIN_CPU;
|
||||
bp.domain = initial_domain;
|
||||
bp.bo_ptr_size = sizeof(struct amdgpu_bo);
|
||||
|
||||
r = amdgpu_bo_create_user(adev, &bp, &ubo);
|
||||
@@ -332,10 +332,20 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
|
||||
}
|
||||
|
||||
initial_domain = (u32)(0xffffffff & args->in.domains);
|
||||
retry:
|
||||
r = amdgpu_gem_object_create(adev, size, args->in.alignment,
|
||||
initial_domain, flags, ttm_bo_type_device,
|
||||
resv, &gobj);
|
||||
initial_domain,
|
||||
flags, ttm_bo_type_device, resv, &gobj);
|
||||
if (r && r != -ERESTARTSYS) {
|
||||
if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
|
||||
flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
|
||||
goto retry;
|
||||
}
|
||||
|
||||
if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
|
||||
initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
|
||||
goto retry;
|
||||
}
|
||||
DRM_DEBUG("Failed to allocate GEM object (%llu, %d, %llu, %d)\n",
|
||||
size, initial_domain, args->in.alignment, r);
|
||||
}
|
||||
|
||||
@@ -105,17 +105,11 @@ static bool amdgpu_hmm_invalidate_hsa(struct mmu_interval_notifier *mni,
|
||||
unsigned long cur_seq)
|
||||
{
|
||||
struct amdgpu_bo *bo = container_of(mni, struct amdgpu_bo, notifier);
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
|
||||
|
||||
if (!mmu_notifier_range_blockable(range))
|
||||
return false;
|
||||
|
||||
mutex_lock(&adev->notifier_lock);
|
||||
|
||||
mmu_interval_set_seq(mni, cur_seq);
|
||||
|
||||
amdgpu_amdkfd_evict_userptr(bo->kfd_bo, bo->notifier.mm);
|
||||
mutex_unlock(&adev->notifier_lock);
|
||||
amdgpu_amdkfd_evict_userptr(mni, cur_seq, bo->kfd_bo);
|
||||
|
||||
return true;
|
||||
}
|
||||
@@ -244,9 +238,9 @@ int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier,
|
||||
return r;
|
||||
}
|
||||
|
||||
int amdgpu_hmm_range_get_pages_done(struct hmm_range *hmm_range)
|
||||
bool amdgpu_hmm_range_get_pages_done(struct hmm_range *hmm_range)
|
||||
{
|
||||
int r;
|
||||
bool r;
|
||||
|
||||
r = mmu_interval_read_retry(hmm_range->notifier,
|
||||
hmm_range->notifier_seq);
|
||||
|
||||
@@ -29,12 +29,13 @@
|
||||
#include <linux/rwsem.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/interval_tree.h>
|
||||
#include <linux/mmu_notifier.h>
|
||||
|
||||
int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier,
|
||||
uint64_t start, uint64_t npages, bool readonly,
|
||||
void *owner, struct page **pages,
|
||||
struct hmm_range **phmm_range);
|
||||
int amdgpu_hmm_range_get_pages_done(struct hmm_range *hmm_range);
|
||||
bool amdgpu_hmm_range_get_pages_done(struct hmm_range *hmm_range);
|
||||
|
||||
#if defined(CONFIG_HMM_MIRROR)
|
||||
int amdgpu_hmm_register(struct amdgpu_bo *bo, unsigned long addr);
|
||||
|
||||
@@ -165,6 +165,26 @@ bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev,
|
||||
atomic_read(&adev->gpu_reset_counter);
|
||||
}
|
||||
|
||||
/* Check if we need to switch to another set of resources */
|
||||
static bool amdgpu_vmid_gds_switch_needed(struct amdgpu_vmid *id,
|
||||
struct amdgpu_job *job)
|
||||
{
|
||||
return id->gds_base != job->gds_base ||
|
||||
id->gds_size != job->gds_size ||
|
||||
id->gws_base != job->gws_base ||
|
||||
id->gws_size != job->gws_size ||
|
||||
id->oa_base != job->oa_base ||
|
||||
id->oa_size != job->oa_size;
|
||||
}
|
||||
|
||||
/* Check if the id is compatible with the job */
|
||||
static bool amdgpu_vmid_compatible(struct amdgpu_vmid *id,
|
||||
struct amdgpu_job *job)
|
||||
{
|
||||
return id->pd_gpu_addr == job->vm_pd_addr &&
|
||||
!amdgpu_vmid_gds_switch_needed(id, job);
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_vmid_grab_idle - grab idle VMID
|
||||
*
|
||||
@@ -258,14 +278,15 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
unsigned vmhub = ring->funcs->vmhub;
|
||||
struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
|
||||
uint64_t fence_context = adev->fence_context + ring->idx;
|
||||
bool needs_flush = vm->use_cpu_for_update;
|
||||
uint64_t updates = amdgpu_vm_tlb_seq(vm);
|
||||
int r;
|
||||
|
||||
*id = vm->reserved_vmid[vmhub];
|
||||
*id = id_mgr->reserved;
|
||||
if ((*id)->owner != vm->immediate.fence_context ||
|
||||
(*id)->pd_gpu_addr != job->vm_pd_addr ||
|
||||
!amdgpu_vmid_compatible(*id, job) ||
|
||||
(*id)->flushed_updates < updates ||
|
||||
!(*id)->last_flush ||
|
||||
((*id)->last_flush->context != fence_context &&
|
||||
@@ -294,8 +315,8 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
(*id)->flushed_updates = updates;
|
||||
job->vm_needs_flush = needs_flush;
|
||||
job->spm_update_needed = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -333,7 +354,7 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
|
||||
if ((*id)->owner != vm->immediate.fence_context)
|
||||
continue;
|
||||
|
||||
if ((*id)->pd_gpu_addr != job->vm_pd_addr)
|
||||
if (!amdgpu_vmid_compatible(*id, job))
|
||||
continue;
|
||||
|
||||
if (!(*id)->last_flush ||
|
||||
@@ -355,7 +376,6 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
(*id)->flushed_updates = updates;
|
||||
job->vm_needs_flush |= needs_flush;
|
||||
return 0;
|
||||
}
|
||||
@@ -408,22 +428,30 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
|
||||
if (r)
|
||||
goto error;
|
||||
|
||||
id->flushed_updates = amdgpu_vm_tlb_seq(vm);
|
||||
job->vm_needs_flush = true;
|
||||
}
|
||||
|
||||
list_move_tail(&id->list, &id_mgr->ids_lru);
|
||||
}
|
||||
|
||||
id->pd_gpu_addr = job->vm_pd_addr;
|
||||
id->owner = vm->immediate.fence_context;
|
||||
|
||||
job->gds_switch_needed = amdgpu_vmid_gds_switch_needed(id, job);
|
||||
if (job->vm_needs_flush) {
|
||||
id->flushed_updates = amdgpu_vm_tlb_seq(vm);
|
||||
dma_fence_put(id->last_flush);
|
||||
id->last_flush = NULL;
|
||||
}
|
||||
job->vmid = id - id_mgr->ids;
|
||||
job->pasid = vm->pasid;
|
||||
|
||||
id->gds_base = job->gds_base;
|
||||
id->gds_size = job->gds_size;
|
||||
id->gws_base = job->gws_base;
|
||||
id->gws_size = job->gws_size;
|
||||
id->oa_base = job->oa_base;
|
||||
id->oa_size = job->oa_size;
|
||||
id->pd_gpu_addr = job->vm_pd_addr;
|
||||
id->owner = vm->immediate.fence_context;
|
||||
|
||||
trace_amdgpu_vm_grab_id(vm, ring, job);
|
||||
|
||||
error:
|
||||
@@ -435,31 +463,27 @@ int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev,
|
||||
struct amdgpu_vm *vm,
|
||||
unsigned vmhub)
|
||||
{
|
||||
struct amdgpu_vmid_mgr *id_mgr;
|
||||
struct amdgpu_vmid *idle;
|
||||
int r = 0;
|
||||
struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
|
||||
|
||||
id_mgr = &adev->vm_manager.id_mgr[vmhub];
|
||||
mutex_lock(&id_mgr->lock);
|
||||
if (vm->reserved_vmid[vmhub])
|
||||
goto unlock;
|
||||
if (atomic_inc_return(&id_mgr->reserved_vmid_num) >
|
||||
AMDGPU_VM_MAX_RESERVED_VMID) {
|
||||
DRM_ERROR("Over limitation of reserved vmid\n");
|
||||
atomic_dec(&id_mgr->reserved_vmid_num);
|
||||
r = -EINVAL;
|
||||
goto unlock;
|
||||
}
|
||||
/* Select the first entry VMID */
|
||||
idle = list_first_entry(&id_mgr->ids_lru, struct amdgpu_vmid, list);
|
||||
list_del_init(&idle->list);
|
||||
vm->reserved_vmid[vmhub] = idle;
|
||||
mutex_unlock(&id_mgr->lock);
|
||||
|
||||
return 0;
|
||||
++id_mgr->reserved_use_count;
|
||||
if (!id_mgr->reserved) {
|
||||
struct amdgpu_vmid *id;
|
||||
|
||||
id = list_first_entry(&id_mgr->ids_lru, struct amdgpu_vmid,
|
||||
list);
|
||||
/* Remove from normal round robin handling */
|
||||
list_del_init(&id->list);
|
||||
id_mgr->reserved = id;
|
||||
}
|
||||
vm->reserved_vmid[vmhub] = true;
|
||||
|
||||
unlock:
|
||||
mutex_unlock(&id_mgr->lock);
|
||||
return r;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void amdgpu_vmid_free_reserved(struct amdgpu_device *adev,
|
||||
@@ -469,12 +493,12 @@ void amdgpu_vmid_free_reserved(struct amdgpu_device *adev,
|
||||
struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
|
||||
|
||||
mutex_lock(&id_mgr->lock);
|
||||
if (vm->reserved_vmid[vmhub]) {
|
||||
list_add(&vm->reserved_vmid[vmhub]->list,
|
||||
&id_mgr->ids_lru);
|
||||
vm->reserved_vmid[vmhub] = NULL;
|
||||
atomic_dec(&id_mgr->reserved_vmid_num);
|
||||
if (vm->reserved_vmid[vmhub] &&
|
||||
!--id_mgr->reserved_use_count) {
|
||||
/* give the reserved ID back to normal round robin */
|
||||
list_add(&id_mgr->reserved->list, &id_mgr->ids_lru);
|
||||
}
|
||||
vm->reserved_vmid[vmhub] = false;
|
||||
mutex_unlock(&id_mgr->lock);
|
||||
}
|
||||
|
||||
@@ -541,7 +565,7 @@ void amdgpu_vmid_mgr_init(struct amdgpu_device *adev)
|
||||
|
||||
mutex_init(&id_mgr->lock);
|
||||
INIT_LIST_HEAD(&id_mgr->ids_lru);
|
||||
atomic_set(&id_mgr->reserved_vmid_num, 0);
|
||||
id_mgr->reserved_use_count = 0;
|
||||
|
||||
/* manage only VMIDs not used by KFD */
|
||||
id_mgr->num_ids = adev->vm_manager.first_kfd_vmid;
|
||||
|
||||
@@ -67,7 +67,8 @@ struct amdgpu_vmid_mgr {
|
||||
unsigned num_ids;
|
||||
struct list_head ids_lru;
|
||||
struct amdgpu_vmid ids[AMDGPU_NUM_VMID];
|
||||
atomic_t reserved_vmid_num;
|
||||
struct amdgpu_vmid *reserved;
|
||||
unsigned int reserved_use_count;
|
||||
};
|
||||
|
||||
int amdgpu_pasid_alloc(unsigned int bits);
|
||||
|
||||
@@ -53,6 +53,8 @@ struct amdgpu_job {
|
||||
uint32_t preamble_status;
|
||||
uint32_t preemption_status;
|
||||
bool vm_needs_flush;
|
||||
bool gds_switch_needed;
|
||||
bool spm_update_needed;
|
||||
uint64_t vm_pd_addr;
|
||||
unsigned vmid;
|
||||
unsigned pasid;
|
||||
|
||||
@@ -346,17 +346,16 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
|
||||
* @adev: amdgpu device object
|
||||
* @offset: offset of the BO
|
||||
* @size: size of the BO
|
||||
* @domain: where to place it
|
||||
* @bo_ptr: used to initialize BOs in structures
|
||||
* @cpu_addr: optional CPU address mapping
|
||||
*
|
||||
* Creates a kernel BO at a specific offset in the address space of the domain.
|
||||
* Creates a kernel BO at a specific offset in VRAM.
|
||||
*
|
||||
* Returns:
|
||||
* 0 on success, negative error code otherwise.
|
||||
*/
|
||||
int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
|
||||
uint64_t offset, uint64_t size, uint32_t domain,
|
||||
uint64_t offset, uint64_t size,
|
||||
struct amdgpu_bo **bo_ptr, void **cpu_addr)
|
||||
{
|
||||
struct ttm_operation_ctx ctx = { false, false };
|
||||
@@ -366,8 +365,9 @@ int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
|
||||
offset &= PAGE_MASK;
|
||||
size = ALIGN(size, PAGE_SIZE);
|
||||
|
||||
r = amdgpu_bo_create_reserved(adev, size, PAGE_SIZE, domain, bo_ptr,
|
||||
NULL, cpu_addr);
|
||||
r = amdgpu_bo_create_reserved(adev, size, PAGE_SIZE,
|
||||
AMDGPU_GEM_DOMAIN_VRAM, bo_ptr, NULL,
|
||||
cpu_addr);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
@@ -422,6 +422,8 @@ void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
|
||||
if (*bo == NULL)
|
||||
return;
|
||||
|
||||
WARN_ON(amdgpu_ttm_adev((*bo)->tbo.bdev)->in_suspend);
|
||||
|
||||
if (likely(amdgpu_bo_reserve(*bo, true) == 0)) {
|
||||
if (cpu_addr)
|
||||
amdgpu_bo_kunmap(*bo);
|
||||
@@ -446,27 +448,24 @@ static bool amdgpu_bo_validate_size(struct amdgpu_device *adev,
|
||||
|
||||
/*
|
||||
* If GTT is part of requested domains the check must succeed to
|
||||
* allow fall back to GTT
|
||||
* allow fall back to GTT.
|
||||
*/
|
||||
if (domain & AMDGPU_GEM_DOMAIN_GTT) {
|
||||
man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT);
|
||||
|
||||
if (size < man->size)
|
||||
if (man && size < man->size)
|
||||
return true;
|
||||
else
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
|
||||
else if (!man)
|
||||
WARN_ON_ONCE("GTT domain requested but GTT mem manager uninitialized");
|
||||
goto fail;
|
||||
} else if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
|
||||
man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
|
||||
|
||||
if (size < man->size)
|
||||
if (man && size < man->size)
|
||||
return true;
|
||||
else
|
||||
goto fail;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
|
||||
/* TODO add more domains checks, such as AMDGPU_GEM_DOMAIN_CPU */
|
||||
return true;
|
||||
|
||||
@@ -581,7 +580,11 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
|
||||
bo->flags |= AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE;
|
||||
|
||||
bo->tbo.bdev = &adev->mman.bdev;
|
||||
amdgpu_bo_placement_from_domain(bo, bp->domain);
|
||||
if (bp->domain & (AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA |
|
||||
AMDGPU_GEM_DOMAIN_GDS))
|
||||
amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
|
||||
else
|
||||
amdgpu_bo_placement_from_domain(bo, bp->domain);
|
||||
if (bp->type == ttm_bo_type_kernel)
|
||||
bo->tbo.priority = 1;
|
||||
|
||||
@@ -1506,7 +1509,8 @@ u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo)
|
||||
uint32_t amdgpu_bo_get_preferred_domain(struct amdgpu_device *adev,
|
||||
uint32_t domain)
|
||||
{
|
||||
if (domain == (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) {
|
||||
if ((domain == (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) &&
|
||||
((adev->asic_type == CHIP_CARRIZO) || (adev->asic_type == CHIP_STONEY))) {
|
||||
domain = AMDGPU_GEM_DOMAIN_VRAM;
|
||||
if (adev->gmc.real_vram_size <= AMDGPU_SG_THRESHOLD)
|
||||
domain = AMDGPU_GEM_DOMAIN_GTT;
|
||||
|
||||
@@ -284,7 +284,7 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
|
||||
u32 domain, struct amdgpu_bo **bo_ptr,
|
||||
u64 *gpu_addr, void **cpu_addr);
|
||||
int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
|
||||
uint64_t offset, uint64_t size, uint32_t domain,
|
||||
uint64_t offset, uint64_t size,
|
||||
struct amdgpu_bo **bo_ptr, void **cpu_addr);
|
||||
int amdgpu_bo_create_user(struct amdgpu_device *adev,
|
||||
struct amdgpu_bo_param *bp,
|
||||
|
||||
@@ -695,8 +695,19 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages,
|
||||
return r;
|
||||
}
|
||||
|
||||
/* amdgpu_ttm_tt_discard_user_pages - Discard range and pfn array allocations
|
||||
*/
|
||||
void amdgpu_ttm_tt_discard_user_pages(struct ttm_tt *ttm,
|
||||
struct hmm_range *range)
|
||||
{
|
||||
struct amdgpu_ttm_tt *gtt = (void *)ttm;
|
||||
|
||||
if (gtt && gtt->userptr && range)
|
||||
amdgpu_hmm_range_get_pages_done(range);
|
||||
}
|
||||
|
||||
/*
|
||||
* amdgpu_ttm_tt_userptr_range_done - stop HMM track the CPU page table change
|
||||
* amdgpu_ttm_tt_get_user_pages_done - stop HMM track the CPU page table change
|
||||
* Check if the pages backing this ttm range have been invalidated
|
||||
*
|
||||
* Returns: true if pages are still valid
|
||||
@@ -714,10 +725,6 @@ bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm,
|
||||
|
||||
WARN_ONCE(!range->hmm_pfns, "No user pages to check\n");
|
||||
|
||||
/*
|
||||
* FIXME: Must always hold notifier_lock for this, and must
|
||||
* not ignore the return code.
|
||||
*/
|
||||
return !amdgpu_hmm_range_get_pages_done(range);
|
||||
}
|
||||
#endif
|
||||
@@ -1569,7 +1576,6 @@ static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev)
|
||||
return amdgpu_bo_create_kernel_at(adev,
|
||||
adev->mman.fw_vram_usage_start_offset,
|
||||
adev->mman.fw_vram_usage_size,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
&adev->mman.fw_vram_usage_reserved_bo,
|
||||
&adev->mman.fw_vram_usage_va);
|
||||
}
|
||||
@@ -1595,7 +1601,6 @@ static int amdgpu_ttm_drv_reserve_vram_init(struct amdgpu_device *adev)
|
||||
return amdgpu_bo_create_kernel_at(adev,
|
||||
adev->mman.drv_vram_usage_start_offset,
|
||||
adev->mman.drv_vram_usage_size,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
&adev->mman.drv_vram_usage_reserved_bo,
|
||||
&adev->mman.drv_vram_usage_va);
|
||||
}
|
||||
@@ -1676,7 +1681,6 @@ static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev)
|
||||
ret = amdgpu_bo_create_kernel_at(adev,
|
||||
ctx->c2p_train_data_offset,
|
||||
ctx->train_data_size,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
&ctx->c2p_bo,
|
||||
NULL);
|
||||
if (ret) {
|
||||
@@ -1690,7 +1694,6 @@ static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev)
|
||||
ret = amdgpu_bo_create_kernel_at(adev,
|
||||
adev->gmc.real_vram_size - adev->mman.discovery_tmr_size,
|
||||
adev->mman.discovery_tmr_size,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
&adev->mman.discovery_memory,
|
||||
NULL);
|
||||
if (ret) {
|
||||
@@ -1791,21 +1794,18 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
|
||||
* avoid display artifacts while transitioning between pre-OS
|
||||
* and driver. */
|
||||
r = amdgpu_bo_create_kernel_at(adev, 0, adev->mman.stolen_vga_size,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
&adev->mman.stolen_vga_memory,
|
||||
NULL);
|
||||
if (r)
|
||||
return r;
|
||||
r = amdgpu_bo_create_kernel_at(adev, adev->mman.stolen_vga_size,
|
||||
adev->mman.stolen_extended_size,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
&adev->mman.stolen_extended_memory,
|
||||
NULL);
|
||||
if (r)
|
||||
return r;
|
||||
r = amdgpu_bo_create_kernel_at(adev, adev->mman.stolen_reserved_offset,
|
||||
adev->mman.stolen_reserved_size,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
&adev->mman.stolen_reserved_memory,
|
||||
NULL);
|
||||
if (r)
|
||||
|
||||
@@ -159,6 +159,8 @@ uint64_t amdgpu_ttm_domain_start(struct amdgpu_device *adev, uint32_t type);
|
||||
#if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
|
||||
int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages,
|
||||
struct hmm_range **range);
|
||||
void amdgpu_ttm_tt_discard_user_pages(struct ttm_tt *ttm,
|
||||
struct hmm_range *range);
|
||||
bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm,
|
||||
struct hmm_range *range);
|
||||
#else
|
||||
@@ -168,6 +170,10 @@ static inline int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo,
|
||||
{
|
||||
return -EPERM;
|
||||
}
|
||||
static inline void amdgpu_ttm_tt_discard_user_pages(struct ttm_tt *ttm,
|
||||
struct hmm_range *range)
|
||||
{
|
||||
}
|
||||
static inline bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm,
|
||||
struct hmm_range *range)
|
||||
{
|
||||
|
||||
@@ -395,7 +395,6 @@ static void amdgpu_virt_ras_reserve_bps(struct amdgpu_device *adev)
|
||||
*/
|
||||
if (amdgpu_bo_create_kernel_at(adev, bp << AMDGPU_GPU_PAGE_SHIFT,
|
||||
AMDGPU_GPU_PAGE_SIZE,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
&bo, NULL))
|
||||
DRM_DEBUG("RAS WARN: reserve vram for retired page %llx fail\n", bp);
|
||||
|
||||
|
||||
@@ -484,25 +484,20 @@ bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
unsigned vmhub = ring->funcs->vmhub;
|
||||
struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
|
||||
struct amdgpu_vmid *id;
|
||||
bool gds_switch_needed;
|
||||
bool vm_flush_needed = job->vm_needs_flush || ring->has_compute_vm_bug;
|
||||
|
||||
if (job->vmid == 0)
|
||||
return false;
|
||||
id = &id_mgr->ids[job->vmid];
|
||||
gds_switch_needed = ring->funcs->emit_gds_switch && (
|
||||
id->gds_base != job->gds_base ||
|
||||
id->gds_size != job->gds_size ||
|
||||
id->gws_base != job->gws_base ||
|
||||
id->gws_size != job->gws_size ||
|
||||
id->oa_base != job->oa_base ||
|
||||
id->oa_size != job->oa_size);
|
||||
|
||||
if (amdgpu_vmid_had_gpu_reset(adev, id))
|
||||
if (job->vm_needs_flush || ring->has_compute_vm_bug)
|
||||
return true;
|
||||
|
||||
return vm_flush_needed || gds_switch_needed;
|
||||
if (ring->funcs->emit_gds_switch && job->gds_switch_needed)
|
||||
return true;
|
||||
|
||||
if (amdgpu_vmid_had_gpu_reset(adev, &id_mgr->ids[job->vmid]))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -524,27 +519,20 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
|
||||
unsigned vmhub = ring->funcs->vmhub;
|
||||
struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
|
||||
struct amdgpu_vmid *id = &id_mgr->ids[job->vmid];
|
||||
bool gds_switch_needed = ring->funcs->emit_gds_switch && (
|
||||
id->gds_base != job->gds_base ||
|
||||
id->gds_size != job->gds_size ||
|
||||
id->gws_base != job->gws_base ||
|
||||
id->gws_size != job->gws_size ||
|
||||
id->oa_base != job->oa_base ||
|
||||
id->oa_size != job->oa_size);
|
||||
bool spm_update_needed = job->spm_update_needed;
|
||||
bool gds_switch_needed = ring->funcs->emit_gds_switch &&
|
||||
job->gds_switch_needed;
|
||||
bool vm_flush_needed = job->vm_needs_flush;
|
||||
struct dma_fence *fence = NULL;
|
||||
bool pasid_mapping_needed = false;
|
||||
unsigned patch_offset = 0;
|
||||
bool update_spm_vmid_needed = (job->vm && (job->vm->reserved_vmid[vmhub] != NULL));
|
||||
int r;
|
||||
|
||||
if (update_spm_vmid_needed && adev->gfx.rlc.funcs->update_spm_vmid)
|
||||
adev->gfx.rlc.funcs->update_spm_vmid(adev, job->vmid);
|
||||
|
||||
if (amdgpu_vmid_had_gpu_reset(adev, id)) {
|
||||
gds_switch_needed = true;
|
||||
vm_flush_needed = true;
|
||||
pasid_mapping_needed = true;
|
||||
spm_update_needed = true;
|
||||
}
|
||||
|
||||
mutex_lock(&id_mgr->lock);
|
||||
@@ -577,6 +565,17 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
|
||||
if (pasid_mapping_needed)
|
||||
amdgpu_gmc_emit_pasid_mapping(ring, job->vmid, job->pasid);
|
||||
|
||||
if (spm_update_needed && adev->gfx.rlc.funcs->update_spm_vmid)
|
||||
adev->gfx.rlc.funcs->update_spm_vmid(adev, job->vmid);
|
||||
|
||||
if (!ring->is_mes_queue && ring->funcs->emit_gds_switch &&
|
||||
gds_switch_needed) {
|
||||
amdgpu_ring_emit_gds_switch(ring, job->vmid, job->gds_base,
|
||||
job->gds_size, job->gws_base,
|
||||
job->gws_size, job->oa_base,
|
||||
job->oa_size);
|
||||
}
|
||||
|
||||
if (vm_flush_needed || pasid_mapping_needed) {
|
||||
r = amdgpu_fence_emit(ring, &fence, NULL, 0);
|
||||
if (r)
|
||||
@@ -601,20 +600,6 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
|
||||
}
|
||||
dma_fence_put(fence);
|
||||
|
||||
if (!ring->is_mes_queue && ring->funcs->emit_gds_switch &&
|
||||
gds_switch_needed) {
|
||||
id->gds_base = job->gds_base;
|
||||
id->gds_size = job->gds_size;
|
||||
id->gws_base = job->gws_base;
|
||||
id->gws_size = job->gws_size;
|
||||
id->oa_base = job->oa_base;
|
||||
id->oa_size = job->oa_size;
|
||||
amdgpu_ring_emit_gds_switch(ring, job->vmid, job->gds_base,
|
||||
job->gds_size, job->gws_base,
|
||||
job->gws_size, job->oa_base,
|
||||
job->oa_size);
|
||||
}
|
||||
|
||||
if (ring->funcs->patch_cond_exec)
|
||||
amdgpu_ring_patch_cond_exec(ring, patch_offset);
|
||||
|
||||
@@ -2383,7 +2368,6 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
|
||||
union drm_amdgpu_vm *args = data;
|
||||
struct amdgpu_device *adev = drm_to_adev(dev);
|
||||
struct amdgpu_fpriv *fpriv = filp->driver_priv;
|
||||
long timeout = msecs_to_jiffies(2000);
|
||||
int r;
|
||||
|
||||
switch (args->in.op) {
|
||||
@@ -2395,21 +2379,6 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
|
||||
return r;
|
||||
break;
|
||||
case AMDGPU_VM_OP_UNRESERVE_VMID:
|
||||
if (amdgpu_sriov_runtime(adev))
|
||||
timeout = 8 * timeout;
|
||||
|
||||
/* Wait vm idle to make sure the vmid set in SPM_VMID is
|
||||
* not referenced anymore.
|
||||
*/
|
||||
r = amdgpu_bo_reserve(fpriv->vm.root.bo, true);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = amdgpu_vm_wait_idle(&fpriv->vm, timeout);
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
amdgpu_bo_unreserve(fpriv->vm.root.bo);
|
||||
amdgpu_vmid_free_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB_0);
|
||||
break;
|
||||
default:
|
||||
|
||||
@@ -119,9 +119,6 @@ struct amdgpu_bo_vm;
|
||||
/* Reserve 2MB at top/bottom of address space for kernel use */
|
||||
#define AMDGPU_VA_RESERVED_SIZE (2ULL << 20)
|
||||
|
||||
/* max vmids dedicated for process */
|
||||
#define AMDGPU_VM_MAX_RESERVED_VMID 1
|
||||
|
||||
/* See vm_update_mode */
|
||||
#define AMDGPU_VM_USE_CPU_FOR_GFX (1 << 0)
|
||||
#define AMDGPU_VM_USE_CPU_FOR_COMPUTE (1 << 1)
|
||||
@@ -298,8 +295,7 @@ struct amdgpu_vm {
|
||||
struct dma_fence *last_unlocked;
|
||||
|
||||
unsigned int pasid;
|
||||
/* dedicated to vm */
|
||||
struct amdgpu_vmid *reserved_vmid[AMDGPU_MAX_VMHUBS];
|
||||
bool reserved_vmid[AMDGPU_MAX_VMHUBS];
|
||||
|
||||
/* Flag to indicate if VM tables are updated by CPU or GPU (SDMA) */
|
||||
bool use_cpu_for_update;
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user