Merge tag 'drm-misc-next-2026-02-26' of https://gitlab.freedesktop.org/drm/misc/kernel into drm-next

drm-misc-next for v7.1:

UAPI Changes:

connector:
- Add panel_type property

fourcc:
- Add ARM interleaved 64k modifier

nouveau:
- Query Z-Cull info with DRM_IOCTL_NOUVEAU_GET_ZCULL_INFO

Cross-subsystem Changes:

coreboot:
- Clean up coreboot framebuffer support

dma-buf:
- Provide revoke mechanism for shared buffers
- Rename move_notify callback to invalidate_mappings and update users.
- Always enable move_notify
- Support dma_fence_was_initialized() test
- Protect dma_fence_ops by RCU and improve locking
- Fix sparse warnings

Core Changes:

atomic:
- Allocate drm_private_state via callback and convert drivers

atomic-helper:
- Use system_percpu_wq

buddy:
- Make buddy allocator available to all DRM drivers
- Document flags and structures

colorop:
- Add destroy helper and convert drivers

fbdev-emulation:
- Clean up

gem:
- Fix drm_gem_objects_lookup() error cleanup

Driver Changes:

amdgpu:
- Set panel_type to OELD for eDP

atmel-hlcdc:
- Support sana5d65 LCD controller

bridge:
- anx7625: Support USB-C plus DT bindings
- connector: Fix EDID detection
- dw-hdmi-qp: Support Vendor-Specfic and SDP Infoframes; improve others
- fsl-ldb: Fix visual artifacts plus related DT property 'enable-termination-resistor'
- imx8qxp-pixel-link: Improve bridge reference handling
- lt9611: Support Port-B-only input plus DT bindings
- tda998x: Support DRM_BRIDGE_ATTACH_NO_CONNECTOR; Clean up
- Support TH1520 HDMI plus DT bindings
- Clean up

imagination:
- Clean up

komeda:
- Fix integer overflow in AFBC checks

mcde:
- Improve bridge handling

nouveau:
- Provide Z-cull info to user space
- gsp: Support GA100
- Shutdown on PCI device shutdown
- Clean up

panel:
- panel-jdi-lt070me05000: Use mipi-dsi multi functions
- panel-edp: Support Add AUO B116XAT04.1 (HW: 1A); Support CMN N116BCL-EAK (C2); Support FriendlyELEC plus DT changes
- Fix Kconfig dependencies

panthor:
- Add tracepoints for power and IRQs

rcar-du:
- dsi: fix VCLK calculation

rockchip:
- vop2: Use drm_ logging functions
- Support DisplayPort on RK3576

sysfb:
- corebootdrm: Support system framebuffer on coreboot firmware; detect orientation
- Clean up pixel-format lookup

sun4i:
- Clean up

tilcdc:
- Use DT bindings scheme
- Use managed DRM interfaces
- Support DRM_BRIDGE_ATTACH_NO_CONNECTOR
- Clean up a lot of obsolete code

v3d:
- Clean up

vc4:
- Use system_percpu_wq
- Clean up

verisilicon:
- Support DC8200 plus DT bindings

virtgpu:
- Support PRIME imports with enabled 3D

Signed-off-by: Dave Airlie <airlied@redhat.com>

From: Thomas Zimmermann <tzimmermann@suse.de>
Link: https://patch.msgid.link/20260226143615.GA47200@linux.fritz.box
This commit is contained in:
Dave Airlie
2026-03-02 16:57:26 +10:00
234 changed files with 8540 additions and 4247 deletions

View File

@@ -85,6 +85,11 @@ properties:
aux-bus:
$ref: /schemas/display/dp-aux-bus.yaml#
connector:
type: object
$ref: /schemas/connector/usb-connector.yaml#
unevaluatedProperties: false
ports:
$ref: /schemas/graph.yaml#/properties/ports
@@ -117,7 +122,6 @@ properties:
required:
- port@0
- port@1
required:
- compatible
@@ -127,6 +131,28 @@ required:
- vdd33-supply
- ports
allOf:
- if:
required:
- aux-bus
- connector
then:
false
- if:
required:
- connector
then:
properties:
ports:
properties:
port@1: false
else:
properties:
ports:
required:
- port@1
additionalProperties: false
examples:
@@ -185,3 +211,73 @@ examples:
};
};
};
- |
#include <dt-bindings/gpio/gpio.h>
i2c {
#address-cells = <1>;
#size-cells = <0>;
encoder@58 {
compatible = "analogix,anx7625";
reg = <0x58>;
enable-gpios = <&pio 45 GPIO_ACTIVE_HIGH>;
reset-gpios = <&pio 73 GPIO_ACTIVE_HIGH>;
vdd10-supply = <&pp1000_mipibrdg>;
vdd18-supply = <&pp1800_mipibrdg>;
vdd33-supply = <&pp3300_mipibrdg>;
analogix,audio-enable;
analogix,lane0-swing = /bits/ 8 <0x14 0x54 0x64 0x74>;
analogix,lane1-swing = /bits/ 8 <0x14 0x54 0x64 0x74>;
connector {
compatible = "usb-c-connector";
power-role = "dual";
data-role = "dual";
vbus-supply = <&vbus_reg>;
ports {
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
endpoint {
remote-endpoint = <&usb_hs>;
};
};
port@1 {
reg = <1>;
endpoint {
remote-endpoint = <&usb_ss>;
};
};
port@2 {
reg = <2>;
endpoint {
remote-endpoint = <&usb_sbu>;
};
};
};
};
ports {
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
endpoint {
remote-endpoint = <&mipi_dsi>;
bus-type = <7>;
data-lanes = <0 1 2 3>;
};
};
};
};
};

View File

@@ -35,6 +35,15 @@ properties:
- const: ldb
- const: lvds
nxp,enable-termination-resistor:
type: boolean
description:
Indicates that the built-in 100 Ohm termination resistor on the LVDS
output is enabled. This property is optional and controlled via the
HS_EN bit in the LVDS_CTRL register. Enabling it can improve signal
quality and prevent visual artifacts on some boards, but increases
power consumption.
ports:
$ref: /schemas/graph.yaml#/properties/ports
@@ -84,6 +93,15 @@ allOf:
required:
- reg-names
- if:
properties:
compatible:
contains:
const: fsl,imx6sx-ldb
then:
properties:
nxp,enable-termination-resistor: false
additionalProperties: false
examples:

View File

@@ -44,21 +44,28 @@ properties:
port@0:
$ref: /schemas/graph.yaml#/properties/port
description:
Primary MIPI port-1 for MIPI input
DSI Port A input. directly drives the display, or works in
combination with Port B for higher resolution displays.
port@1:
$ref: /schemas/graph.yaml#/properties/port
description:
Additional MIPI port-2 for MIPI input, used in combination
with primary MIPI port-1 to drive higher resolution displays
DSI Port B input. Can be used alone if DSI is physically
connected to Port B, or in combination with Port A for higher
resolution displays.
port@2:
$ref: /schemas/graph.yaml#/properties/port
description:
HDMI port for HDMI output
anyOf:
- required:
- port@0
- required:
- port@1
required:
- port@0
- port@2
required:

View File

@@ -0,0 +1,120 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/display/bridge/thead,th1520-dw-hdmi.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: T-Head TH1520 DesignWare HDMI TX Encoder
maintainers:
- Icenowy Zheng <uwu@icenowy.me>
description:
The HDMI transmitter is a Synopsys DesignWare HDMI TX controller
paired with a DesignWare HDMI Gen2 TX PHY.
allOf:
- $ref: /schemas/display/bridge/synopsys,dw-hdmi.yaml#
properties:
compatible:
enum:
- thead,th1520-dw-hdmi
reg-io-width:
const: 4
clocks:
maxItems: 4
clock-names:
items:
- const: iahb
- const: isfr
- const: cec
- const: pix
resets:
items:
- description: Main reset
- description: Configuration APB reset
reset-names:
items:
- const: main
- const: apb
ports:
$ref: /schemas/graph.yaml#/properties/ports
properties:
port@0:
$ref: /schemas/graph.yaml#/properties/port
description: Input port connected to DC8200 DPU "DP" output
port@1:
$ref: /schemas/graph.yaml#/properties/port
description: HDMI output port
required:
- port@0
- port@1
required:
- compatible
- reg
- reg-io-width
- clocks
- clock-names
- resets
- reset-names
- interrupts
- ports
unevaluatedProperties: false
examples:
- |
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/clock/thead,th1520-clk-ap.h>
#include <dt-bindings/reset/thead,th1520-reset.h>
soc {
#address-cells = <2>;
#size-cells = <2>;
hdmi@ffef540000 {
compatible = "thead,th1520-dw-hdmi";
reg = <0xff 0xef540000 0x0 0x40000>;
reg-io-width = <4>;
interrupts = <111 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk_vo CLK_HDMI_PCLK>,
<&clk_vo CLK_HDMI_SFR>,
<&clk_vo CLK_HDMI_CEC>,
<&clk_vo CLK_HDMI_PIXCLK>;
clock-names = "iahb", "isfr", "cec", "pix";
resets = <&rst_vo TH1520_RESET_ID_HDMI>,
<&rst_vo TH1520_RESET_ID_HDMI_APB>;
reset-names = "main", "apb";
ports {
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
hdmi_in: endpoint {
remote-endpoint = <&dpu_out_dp1>;
};
};
port@1 {
reg = <1>;
hdmi_out_conn: endpoint {
remote-endpoint = <&hdmi_conn_in>;
};
};
};
};
};

View File

@@ -44,6 +44,8 @@ properties:
- boe,nv133fhm-n62
# BOE NV140FHM-N49 14.0" FHD a-Si FT panel
- boe,nv140fhmn49
# FriendlyELEC HD702E 800x1280 LCD panel
- friendlyarm,hd702e
# Innolux Corporation 11.6" WXGA (1366x768) TFT LCD panel
- innolux,n116bca-ea1
# Innolux Corporation 11.6" WXGA (1366x768) TFT LCD panel

View File

@@ -144,8 +144,6 @@ properties:
- foxlink,fl500wvr00-a0t
# Frida FRD350H54004 3.5" QVGA TFT LCD panel
- frida,frd350h54004
# FriendlyELEC HD702E 800x1280 LCD panel
- friendlyarm,hd702e
# GiantPlus GPG48273QS5 4.3" (480x272) WQVGA TFT LCD panel
- giantplus,gpg48273qs5
# GiantPlus GPM940B0 3.0" QVGA TFT LCD panel

View File

@@ -27,12 +27,10 @@ description: |
* Pixel clock up to 594MHz
* I2S, SPDIF audio interface
allOf:
- $ref: /schemas/sound/dai-common.yaml#
properties:
compatible:
enum:
- rockchip,rk3576-dp
- rockchip,rk3588-dp
reg:
@@ -42,6 +40,7 @@ properties:
maxItems: 1
clocks:
minItems: 3
items:
- description: Peripheral/APB bus clock
- description: DisplayPort AUX clock
@@ -50,6 +49,7 @@ properties:
- description: SPDIF interfce clock
clock-names:
minItems: 3
items:
- const: apb
- const: aux
@@ -95,6 +95,27 @@ required:
- ports
- resets
allOf:
- $ref: /schemas/sound/dai-common.yaml#
- if:
properties:
compatible:
contains:
enum:
- rockchip,rk3588-dp
then:
properties:
clocks:
minItems: 5
clock-names:
minItems: 5
else:
properties:
clocks:
maxItems: 3
clock-names:
maxItems: 3
unevaluatedProperties: false
examples:

View File

@@ -1,4 +1,5 @@
Device-Tree bindings for tilcdc DRM generic panel output driver
This binding is deprecated and should not be used.
Required properties:
- compatible: value should be "ti,tilcdc,panel".

View File

@@ -0,0 +1,100 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
# Copyright 2025 Bootlin
%YAML 1.2
---
$id: http://devicetree.org/schemas/display/tilcdc/ti,am33xx-tilcdc.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: TI LCD Controller, found on AM335x, DA850, AM18x and OMAP-L138
maintainers:
- Kory Maincent <kory.maincent@bootlin.com>
properties:
compatible:
enum:
- ti,am33xx-tilcdc
- ti,da850-tilcdc
reg:
maxItems: 1
interrupts:
maxItems: 1
port:
$ref: /schemas/graph.yaml#/properties/port
ti,hwmods:
$ref: /schemas/types.yaml#/definitions/string
description:
Name of the hwmod associated to the LCDC
max-bandwidth:
$ref: /schemas/types.yaml#/definitions/uint32
description:
The maximum pixels per second that the memory interface / lcd
controller combination can sustain
# maximum: 2048*2048*60
maximum: 251658240
max-width:
$ref: /schemas/types.yaml#/definitions/uint32
description:
The maximum horizontal pixel width supported by the lcd controller.
maximum: 2048
max-pixelclock:
$ref: /schemas/types.yaml#/definitions/uint32
description:
The maximum pixel clock that can be supported by the lcd controller
in KHz.
blue-and-red-wiring:
enum: [straight, crossed]
description:
This property deals with the LCDC revision 2 (found on AM335x)
color errata [1].
- "straight" indicates normal wiring that supports RGB565,
BGR888, and XBGR8888 color formats.
- "crossed" indicates wiring that has blue and red wires
crossed. This setup supports BGR565, RGB888 and XRGB8888
formats.
- If the property is not present or its value is not recognized
the legacy mode is assumed. This configuration supports RGB565,
RGB888 and XRGB8888 formats. However, depending on wiring, the red
and blue colors are swapped in either 16 or 24-bit color modes.
[1] There is an errata about AM335x color wiring. For 16-bit color
mode the wires work as they should (LCD_DATA[0:4] is for Blue[3:7]),
but for 24 bit color modes the wiring of blue and red components is
crossed and LCD_DATA[0:4] is for Red[3:7] and LCD_DATA[11:15] is
for Blue[3-7]. For more details see section 3.1.1 in AM335x
Silicon Errata
https://www.ti.com/general/docs/lit/getliterature.tsp?baseLiteratureNumber=sprz360
required:
- compatible
- interrupts
- reg
- port
additionalProperties: false
examples:
- |
display-controller@4830e000 {
compatible = "ti,am33xx-tilcdc";
reg = <0x4830e000 0x1000>;
interrupt-parent = <&intc>;
interrupts = <36>;
ti,hwmods = "lcdc";
blue-and-red-wiring = "crossed";
port {
endpoint {
remote-endpoint = <&hdmi_0>;
};
};
};

View File

@@ -1,82 +0,0 @@
Device-Tree bindings for tilcdc DRM driver
Required properties:
- compatible: value should be one of the following:
- "ti,am33xx-tilcdc" for AM335x based boards
- "ti,da850-tilcdc" for DA850/AM18x/OMAP-L138 based boards
- interrupts: the interrupt number
- reg: base address and size of the LCDC device
Recommended properties:
- ti,hwmods: Name of the hwmod associated to the LCDC
Optional properties:
- max-bandwidth: The maximum pixels per second that the memory
interface / lcd controller combination can sustain
- max-width: The maximum horizontal pixel width supported by
the lcd controller.
- max-pixelclock: The maximum pixel clock that can be supported
by the lcd controller in KHz.
- blue-and-red-wiring: Recognized values "straight" or "crossed".
This property deals with the LCDC revision 2 (found on AM335x)
color errata [1].
- "straight" indicates normal wiring that supports RGB565,
BGR888, and XBGR8888 color formats.
- "crossed" indicates wiring that has blue and red wires
crossed. This setup supports BGR565, RGB888 and XRGB8888
formats.
- If the property is not present or its value is not recognized
the legacy mode is assumed. This configuration supports RGB565,
RGB888 and XRGB8888 formats. However, depending on wiring, the red
and blue colors are swapped in either 16 or 24-bit color modes.
Optional nodes:
- port/ports: to describe a connection to an external encoder. The
binding follows Documentation/devicetree/bindings/graph.txt and
supports a single port with a single endpoint.
- See also Documentation/devicetree/bindings/display/tilcdc/panel.txt and
Documentation/devicetree/bindings/display/bridge/ti,tfp410.yaml for connecting
tfp410 DVI encoder or lcd panel to lcdc
[1] There is an errata about AM335x color wiring. For 16-bit color mode
the wires work as they should (LCD_DATA[0:4] is for Blue[3:7]),
but for 24 bit color modes the wiring of blue and red components is
crossed and LCD_DATA[0:4] is for Red[3:7] and LCD_DATA[11:15] is
for Blue[3-7]. For more details see section 3.1.1 in AM335x
Silicon Errata:
https://www.ti.com/general/docs/lit/getliterature.tsp?baseLiteratureNumber=sprz360
Example:
fb: fb@4830e000 {
compatible = "ti,am33xx-tilcdc", "ti,da850-tilcdc";
reg = <0x4830e000 0x1000>;
interrupt-parent = <&intc>;
interrupts = <36>;
ti,hwmods = "lcdc";
blue-and-red-wiring = "crossed";
port {
lcdc_0: endpoint {
remote-endpoint = <&hdmi_0>;
};
};
};
tda19988: tda19988 {
compatible = "nxp,tda998x";
reg = <0x70>;
pinctrl-names = "default", "off";
pinctrl-0 = <&nxp_hdmi_bonelt_pins>;
pinctrl-1 = <&nxp_hdmi_bonelt_off_pins>;
port {
hdmi_0: endpoint {
remote-endpoint = <&lcdc_0>;
};
};
};

View File

@@ -0,0 +1,122 @@
# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/display/verisilicon,dc.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Verisilicon DC-series display controllers
maintainers:
- Icenowy Zheng <uwu@icenowy.me>
properties:
$nodename:
pattern: "^display@[0-9a-f]+$"
compatible:
items:
- enum:
- thead,th1520-dc8200
- const: verisilicon,dc # DC IPs have discoverable ID/revision registers
reg:
maxItems: 1
interrupts:
maxItems: 1
clocks:
items:
- description: DC Core clock
- description: DMA AXI bus clock
- description: Configuration AHB bus clock
- description: Pixel clock of output 0
- description: Pixel clock of output 1
clock-names:
items:
- const: core
- const: axi
- const: ahb
- const: pix0
- const: pix1
resets:
items:
- description: DC Core reset
- description: DMA AXI bus reset
- description: Configuration AHB bus reset
reset-names:
items:
- const: core
- const: axi
- const: ahb
ports:
$ref: /schemas/graph.yaml#/properties/ports
properties:
port@0:
$ref: /schemas/graph.yaml#/properties/port
description: The first output channel , endpoint 0 should be
used for DPI format output and endpoint 1 should be used
for DP format output.
port@1:
$ref: /schemas/graph.yaml#/properties/port
description: The second output channel if the DC variant
supports. Follow the same endpoint addressing rule with
the first port.
required:
- compatible
- reg
- interrupts
- clocks
- clock-names
- ports
additionalProperties: false
examples:
- |
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/clock/thead,th1520-clk-ap.h>
#include <dt-bindings/reset/thead,th1520-reset.h>
soc {
#address-cells = <2>;
#size-cells = <2>;
display@ffef600000 {
compatible = "thead,th1520-dc8200", "verisilicon,dc";
reg = <0xff 0xef600000 0x0 0x100000>;
interrupts = <93 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk_vo CLK_DPU_CCLK>,
<&clk_vo CLK_DPU_ACLK>,
<&clk_vo CLK_DPU_HCLK>,
<&clk_vo CLK_DPU_PIXELCLK0>,
<&clk_vo CLK_DPU_PIXELCLK1>;
clock-names = "core", "axi", "ahb", "pix0", "pix1";
resets = <&rst TH1520_RESET_ID_DPU_CORE>,
<&rst TH1520_RESET_ID_DPU_AXI>,
<&rst TH1520_RESET_ID_DPU_AHB>;
reset-names = "core", "axi", "ahb";
ports {
#address-cells = <1>;
#size-cells = <0>;
port@1 {
reg = <1>;
#address-cells = <1>;
#size-cells = <0>;
dpu_out_dp1: endpoint@1 {
reg = <1>;
remote-endpoint = <&hdmi_in>;
};
};
};
};
};

View File

@@ -1761,6 +1761,8 @@ patternProperties:
description: Variscite Ltd.
"^vdl,.*":
description: Van der Laan b.v.
"^verisilicon,.*":
description: VeriSilicon Microelectronics (Shanghai) Co., Ltd.
"^vertexcom,.*":
description: Vertexcom Technologies, Inc.
"^via,.*":

View File

@@ -526,8 +526,14 @@ DRM GPUVM Function References
DRM Buddy Allocator
===================
DRM Buddy Function References
-----------------------------
Buddy Allocator Function References (GPU buddy)
-----------------------------------------------
.. kernel-doc:: drivers/gpu/buddy.c
:export:
DRM Buddy Specific Logging Function References
----------------------------------------------
.. kernel-doc:: drivers/gpu/drm/drm_buddy.c
:export:

View File

@@ -8792,6 +8792,14 @@ F: Documentation/devicetree/bindings/display/brcm,bcm2835-*.yaml
F: drivers/gpu/drm/vc4/
F: include/uapi/drm/vc4_drm.h
DRM DRIVERS FOR VERISILICON DISPLAY CONTROLLER IP
M: Icenowy Zheng <zhengxingda@iscas.ac.cn>
L: dri-devel@lists.freedesktop.org
S: Maintained
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/verisilicon,dc.yaml
F: drivers/gpu/drm/verisilicon/
DRM DRIVERS FOR VIVANTE GPU IP
M: Lucas Stach <l.stach@pengutronix.de>
R: Russell King <linux+etnaviv@armlinux.org.uk>
@@ -8904,15 +8912,17 @@ T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: drivers/gpu/drm/ttm/
F: include/drm/ttm/
DRM BUDDY ALLOCATOR
GPU BUDDY ALLOCATOR
M: Matthew Auld <matthew.auld@intel.com>
M: Arun Pravin <arunpravin.paneerselvam@amd.com>
R: Christian Koenig <christian.koenig@amd.com>
L: dri-devel@lists.freedesktop.org
S: Maintained
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: drivers/gpu/drm/drm_buddy.c
F: drivers/gpu/drm/tests/drm_buddy_test.c
F: drivers/gpu/drm_buddy.c
F: drivers/gpu/buddy.c
F: drivers/gpu/tests/gpu_buddy_test.c
F: include/linux/gpu_buddy.h
F: include/drm/drm_buddy.h
DRM AUTOMATED TESTING
@@ -10850,6 +10860,7 @@ L: chrome-platform@lists.linux.dev
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/chrome-platform/linux.git
F: drivers/firmware/google/
F: include/linux/coreboot.h
GOOGLE TENSOR SoC SUPPORT
M: Peter Griffin <peter.griffin@linaro.org>
@@ -22829,6 +22840,7 @@ F: Documentation/devicetree/bindings/reset/thead,th1520-reset.yaml
F: arch/riscv/boot/dts/thead/
F: drivers/clk/thead/clk-th1520-ap.c
F: drivers/firmware/thead,th1520-aon.c
F: drivers/gpu/drm/bridge/th1520-dw-hdmi.c
F: drivers/mailbox/mailbox-th1520.c
F: drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c
F: drivers/pinctrl/pinctrl-th1520.c

View File

@@ -40,18 +40,6 @@ config UDMABUF
A driver to let userspace turn memfd regions into dma-bufs.
Qemu can use this to create host dmabufs for guest framebuffers.
config DMABUF_MOVE_NOTIFY
bool "Move notify between drivers (EXPERIMENTAL)"
default n
depends on DMA_SHARED_BUFFER
help
Don't pin buffers if the dynamic DMA-buf interface is available on
both the exporter as well as the importer. This fixes a security
problem where userspace is able to pin unrestricted amounts of memory
through DMA-buf.
This is marked experimental because we don't yet have a consistent
execution context and memory management between drivers.
config DMABUF_DEBUG
bool "DMA-BUF debug checks"
depends on DMA_SHARED_BUFFER

View File

@@ -916,8 +916,7 @@ static bool
dma_buf_pin_on_map(struct dma_buf_attachment *attach)
{
return attach->dmabuf->ops->pin &&
(!dma_buf_attachment_is_dynamic(attach) ||
!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY));
!dma_buf_attachment_is_dynamic(attach);
}
/**
@@ -981,7 +980,7 @@ dma_buf_pin_on_map(struct dma_buf_attachment *attach)
* 3. Exporters must hold the dma-buf reservation lock when calling these
* functions:
*
* - dma_buf_move_notify()
* - dma_buf_invalidate_mappings()
*/
/**
@@ -1017,9 +1016,6 @@ dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
if (WARN_ON(!dmabuf || !dev))
return ERR_PTR(-EINVAL);
if (WARN_ON(importer_ops && !importer_ops->move_notify))
return ERR_PTR(-EINVAL);
attach = kzalloc_obj(*attach);
if (!attach)
return ERR_PTR(-ENOMEM);
@@ -1130,7 +1126,7 @@ EXPORT_SYMBOL_NS_GPL(dma_buf_pin, "DMA_BUF");
*
* This unpins a buffer pinned by dma_buf_pin() and allows the exporter to move
* any mapping of @attach again and inform the importer through
* &dma_buf_attach_ops.move_notify.
* &dma_buf_attach_ops.invalidate_mappings.
*/
void dma_buf_unpin(struct dma_buf_attachment *attach)
{
@@ -1323,24 +1319,71 @@ void dma_buf_unmap_attachment_unlocked(struct dma_buf_attachment *attach,
EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment_unlocked, "DMA_BUF");
/**
* dma_buf_move_notify - notify attachments that DMA-buf is moving
* dma_buf_attach_revocable - check if a DMA-buf importer implements
* revoke semantics.
* @attach: the DMA-buf attachment to check
*
* Returns true if the DMA-buf importer can support the revoke sequence
* explained in dma_buf_invalidate_mappings() within bounded time. Meaning the
* importer implements invalidate_mappings() and ensures that unmap is called as
* a result.
*/
bool dma_buf_attach_revocable(struct dma_buf_attachment *attach)
{
return attach->importer_ops &&
attach->importer_ops->invalidate_mappings;
}
EXPORT_SYMBOL_NS_GPL(dma_buf_attach_revocable, "DMA_BUF");
/**
* dma_buf_invalidate_mappings - notify attachments that DMA-buf is moving
*
* @dmabuf: [in] buffer which is moving
*
* Informs all attachments that they need to destroy and recreate all their
* mappings.
* mappings. If the attachment is dynamic then the dynamic importer is expected
* to invalidate any caches it has of the mapping result and perform a new
* mapping request before allowing HW to do any further DMA.
*
* If the attachment is pinned then this informs the pinned importer that the
* underlying mapping is no longer available. Pinned importers may take this is
* as a permanent revocation and never establish new mappings so exporters
* should not trigger it lightly.
*
* Upon return importers may continue to access the DMA-buf memory. The caller
* must do two additional waits to ensure that the memory is no longer being
* accessed:
* 1) Until dma_resv_wait_timeout() retires fences the importer is allowed to
* fully access the memory.
* 2) Until the importer calls unmap it is allowed to speculatively
* read-and-discard the memory. It must not write to the memory.
*
* A caller wishing to use dma_buf_invalidate_mappings() to fully stop access to
* the DMA-buf must wait for both. Dynamic callers can often use just the first.
*
* All importers providing a invalidate_mappings() op must ensure that unmap is
* called within bounded time after the op.
*
* Pinned importers that do not support a invalidate_mappings() op will
* eventually perform unmap when they are done with the buffer, which may be an
* ubounded time from calling this function. dma_buf_attach_revocable() can be
* used to prevent such importers from attaching.
*
* Importers are free to request a new mapping in parallel as this function
* returns.
*/
void dma_buf_move_notify(struct dma_buf *dmabuf)
void dma_buf_invalidate_mappings(struct dma_buf *dmabuf)
{
struct dma_buf_attachment *attach;
dma_resv_assert_held(dmabuf->resv);
list_for_each_entry(attach, &dmabuf->attachments, node)
if (attach->importer_ops)
attach->importer_ops->move_notify(attach);
if (attach->importer_ops &&
attach->importer_ops->invalidate_mappings)
attach->importer_ops->invalidate_mappings(attach);
}
EXPORT_SYMBOL_NS_GPL(dma_buf_move_notify, "DMA_BUF");
EXPORT_SYMBOL_NS_GPL(dma_buf_invalidate_mappings, "DMA_BUF");
/**
* DOC: cpu access

View File

@@ -200,15 +200,28 @@ void dma_fence_array_init(struct dma_fence_array *array,
u64 context, unsigned seqno,
bool signal_on_any)
{
static struct lock_class_key dma_fence_array_lock_key;
WARN_ON(!num_fences || !fences);
array->num_fences = num_fences;
spin_lock_init(&array->lock);
dma_fence_init(&array->base, &dma_fence_array_ops, &array->lock,
context, seqno);
dma_fence_init(&array->base, &dma_fence_array_ops, NULL, context,
seqno);
init_irq_work(&array->work, irq_dma_fence_array_work);
/*
* dma_fence_array_enable_signaling() is invoked while holding
* array->base.inline_lock and may call dma_fence_add_callback()
* on the underlying fences, which takes their inline_lock.
*
* Since both locks share the same lockdep class, this legitimate
* nesting confuses lockdep and triggers a recursive locking
* warning. Assign a separate lockdep class to the array lock
* to model this hierarchy correctly.
*/
lockdep_set_class(&array->base.inline_lock, &dma_fence_array_lock_key);
atomic_set(&array->num_pending, signal_on_any ? 1 : num_fences);
array->fences = fences;

View File

@@ -242,10 +242,10 @@ void dma_fence_chain_init(struct dma_fence_chain *chain,
struct dma_fence *fence,
uint64_t seqno)
{
static struct lock_class_key dma_fence_chain_lock_key;
struct dma_fence_chain *prev_chain = to_dma_fence_chain(prev);
uint64_t context;
spin_lock_init(&chain->lock);
rcu_assign_pointer(chain->prev, prev);
chain->fence = fence;
chain->prev_seqno = 0;
@@ -261,9 +261,21 @@ void dma_fence_chain_init(struct dma_fence_chain *chain,
seqno = max(prev->seqno, seqno);
}
dma_fence_init64(&chain->base, &dma_fence_chain_ops, &chain->lock,
dma_fence_init64(&chain->base, &dma_fence_chain_ops, NULL,
context, seqno);
/*
* dma_fence_chain_enable_signaling() is invoked while holding
* chain->base.inline_lock and may call dma_fence_add_callback()
* on the underlying fences, which takes their inline_lock.
*
* Since both locks share the same lockdep class, this legitimate
* nesting confuses lockdep and triggers a recursive locking
* warning. Assign a separate lockdep class to the chain lock
* to model this hierarchy correctly.
*/
lockdep_set_class(&chain->base.inline_lock, &dma_fence_chain_lock_key);
/*
* Chaining dma_fence_chain container together is only allowed through
* the prev fence and not through the contained fence.

View File

@@ -24,7 +24,6 @@ EXPORT_TRACEPOINT_SYMBOL(dma_fence_emit);
EXPORT_TRACEPOINT_SYMBOL(dma_fence_enable_signal);
EXPORT_TRACEPOINT_SYMBOL(dma_fence_signaled);
static DEFINE_SPINLOCK(dma_fence_stub_lock);
static struct dma_fence dma_fence_stub;
/*
@@ -123,12 +122,9 @@ static const struct dma_fence_ops dma_fence_stub_ops = {
static int __init dma_fence_init_stub(void)
{
dma_fence_init(&dma_fence_stub, &dma_fence_stub_ops,
&dma_fence_stub_lock, 0, 0);
dma_fence_init(&dma_fence_stub, &dma_fence_stub_ops, NULL, 0, 0);
set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
&dma_fence_stub.flags);
dma_fence_signal(&dma_fence_stub);
return 0;
}
@@ -160,11 +156,7 @@ struct dma_fence *dma_fence_allocate_private_stub(ktime_t timestamp)
if (fence == NULL)
return NULL;
dma_fence_init(fence,
&dma_fence_stub_ops,
&dma_fence_stub_lock,
0, 0);
dma_fence_init(fence, &dma_fence_stub_ops, NULL, 0, 0);
set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
&fence->flags);
@@ -343,7 +335,6 @@ void __dma_fence_might_wait(void)
}
#endif
/**
* dma_fence_signal_timestamp_locked - signal completion of a fence
* @fence: the fence to signal
@@ -362,15 +353,25 @@ void __dma_fence_might_wait(void)
void dma_fence_signal_timestamp_locked(struct dma_fence *fence,
ktime_t timestamp)
{
const struct dma_fence_ops *ops;
struct dma_fence_cb *cur, *tmp;
struct list_head cb_list;
lockdep_assert_held(fence->lock);
dma_fence_assert_held(fence);
if (unlikely(test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
&fence->flags)))
return;
/*
* When neither a release nor a wait operation is specified set the ops
* pointer to NULL to allow the fence structure to become independent
* from who originally issued it.
*/
ops = rcu_dereference_protected(fence->ops, true);
if (!ops->release && !ops->wait)
RCU_INIT_POINTER(fence->ops, NULL);
/* Stash the cb_list before replacing it with the timestamp */
list_replace(&fence->cb_list, &cb_list);
@@ -404,9 +405,9 @@ void dma_fence_signal_timestamp(struct dma_fence *fence, ktime_t timestamp)
if (WARN_ON(!fence))
return;
spin_lock_irqsave(fence->lock, flags);
dma_fence_lock_irqsave(fence, flags);
dma_fence_signal_timestamp_locked(fence, timestamp);
spin_unlock_irqrestore(fence->lock, flags);
dma_fence_unlock_irqrestore(fence, flags);
}
EXPORT_SYMBOL(dma_fence_signal_timestamp);
@@ -465,9 +466,9 @@ bool dma_fence_check_and_signal(struct dma_fence *fence)
unsigned long flags;
bool ret;
spin_lock_irqsave(fence->lock, flags);
dma_fence_lock_irqsave(fence, flags);
ret = dma_fence_check_and_signal_locked(fence);
spin_unlock_irqrestore(fence->lock, flags);
dma_fence_unlock_irqrestore(fence, flags);
return ret;
}
@@ -493,9 +494,9 @@ void dma_fence_signal(struct dma_fence *fence)
tmp = dma_fence_begin_signalling();
spin_lock_irqsave(fence->lock, flags);
dma_fence_lock_irqsave(fence, flags);
dma_fence_signal_timestamp_locked(fence, ktime_get());
spin_unlock_irqrestore(fence->lock, flags);
dma_fence_unlock_irqrestore(fence, flags);
dma_fence_end_signalling(tmp);
}
@@ -522,6 +523,7 @@ EXPORT_SYMBOL(dma_fence_signal);
signed long
dma_fence_wait_timeout(struct dma_fence *fence, bool intr, signed long timeout)
{
const struct dma_fence_ops *ops;
signed long ret;
if (WARN_ON(timeout < 0))
@@ -533,15 +535,22 @@ dma_fence_wait_timeout(struct dma_fence *fence, bool intr, signed long timeout)
dma_fence_enable_sw_signaling(fence);
if (trace_dma_fence_wait_start_enabled()) {
rcu_read_lock();
trace_dma_fence_wait_start(fence);
rcu_read_lock();
ops = rcu_dereference(fence->ops);
trace_dma_fence_wait_start(fence);
if (ops && ops->wait) {
/*
* Implementing the wait ops is deprecated and not supported for
* issuers of fences who need their lifetime to be independent
* of their module after they signal, so it is ok to use the
* ops outside the RCU protected section.
*/
rcu_read_unlock();
ret = ops->wait(fence, intr, timeout);
} else {
rcu_read_unlock();
}
if (fence->ops->wait)
ret = fence->ops->wait(fence, intr, timeout);
else
ret = dma_fence_default_wait(fence, intr, timeout);
}
if (trace_dma_fence_wait_end_enabled()) {
rcu_read_lock();
trace_dma_fence_wait_end(fence);
@@ -562,6 +571,7 @@ void dma_fence_release(struct kref *kref)
{
struct dma_fence *fence =
container_of(kref, struct dma_fence, refcount);
const struct dma_fence_ops *ops;
rcu_read_lock();
trace_dma_fence_destroy(fence);
@@ -587,18 +597,18 @@ void dma_fence_release(struct kref *kref)
* don't leave chains dangling. We set the error flag first
* so that the callbacks know this signal is due to an error.
*/
spin_lock_irqsave(fence->lock, flags);
dma_fence_lock_irqsave(fence, flags);
fence->error = -EDEADLK;
dma_fence_signal_locked(fence);
spin_unlock_irqrestore(fence->lock, flags);
dma_fence_unlock_irqrestore(fence, flags);
}
rcu_read_unlock();
if (fence->ops->release)
fence->ops->release(fence);
ops = rcu_dereference(fence->ops);
if (ops && ops->release)
ops->release(fence);
else
dma_fence_free(fence);
rcu_read_unlock();
}
EXPORT_SYMBOL(dma_fence_release);
@@ -617,9 +627,10 @@ EXPORT_SYMBOL(dma_fence_free);
static bool __dma_fence_enable_signaling(struct dma_fence *fence)
{
const struct dma_fence_ops *ops;
bool was_set;
lockdep_assert_held(fence->lock);
dma_fence_assert_held(fence);
was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
&fence->flags);
@@ -627,14 +638,18 @@ static bool __dma_fence_enable_signaling(struct dma_fence *fence)
if (dma_fence_test_signaled_flag(fence))
return false;
if (!was_set && fence->ops->enable_signaling) {
rcu_read_lock();
ops = rcu_dereference(fence->ops);
if (!was_set && ops && ops->enable_signaling) {
trace_dma_fence_enable_signal(fence);
if (!fence->ops->enable_signaling(fence)) {
if (!ops->enable_signaling(fence)) {
rcu_read_unlock();
dma_fence_signal_locked(fence);
return false;
}
}
rcu_read_unlock();
return true;
}
@@ -651,9 +666,9 @@ void dma_fence_enable_sw_signaling(struct dma_fence *fence)
{
unsigned long flags;
spin_lock_irqsave(fence->lock, flags);
dma_fence_lock_irqsave(fence, flags);
__dma_fence_enable_signaling(fence);
spin_unlock_irqrestore(fence->lock, flags);
dma_fence_unlock_irqrestore(fence, flags);
}
EXPORT_SYMBOL(dma_fence_enable_sw_signaling);
@@ -693,8 +708,7 @@ int dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb,
return -ENOENT;
}
spin_lock_irqsave(fence->lock, flags);
dma_fence_lock_irqsave(fence, flags);
if (__dma_fence_enable_signaling(fence)) {
cb->func = func;
list_add_tail(&cb->node, &fence->cb_list);
@@ -702,8 +716,7 @@ int dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb,
INIT_LIST_HEAD(&cb->node);
ret = -ENOENT;
}
spin_unlock_irqrestore(fence->lock, flags);
dma_fence_unlock_irqrestore(fence, flags);
return ret;
}
@@ -726,9 +739,9 @@ int dma_fence_get_status(struct dma_fence *fence)
unsigned long flags;
int status;
spin_lock_irqsave(fence->lock, flags);
dma_fence_lock_irqsave(fence, flags);
status = dma_fence_get_status_locked(fence);
spin_unlock_irqrestore(fence->lock, flags);
dma_fence_unlock_irqrestore(fence, flags);
return status;
}
@@ -758,13 +771,11 @@ dma_fence_remove_callback(struct dma_fence *fence, struct dma_fence_cb *cb)
unsigned long flags;
bool ret;
spin_lock_irqsave(fence->lock, flags);
dma_fence_lock_irqsave(fence, flags);
ret = !list_empty(&cb->node);
if (ret)
list_del_init(&cb->node);
spin_unlock_irqrestore(fence->lock, flags);
dma_fence_unlock_irqrestore(fence, flags);
return ret;
}
@@ -803,7 +814,7 @@ dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout)
unsigned long flags;
signed long ret = timeout ? timeout : 1;
spin_lock_irqsave(fence->lock, flags);
dma_fence_lock_irqsave(fence, flags);
if (dma_fence_test_signaled_flag(fence))
goto out;
@@ -827,11 +838,11 @@ dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout)
__set_current_state(TASK_INTERRUPTIBLE);
else
__set_current_state(TASK_UNINTERRUPTIBLE);
spin_unlock_irqrestore(fence->lock, flags);
dma_fence_unlock_irqrestore(fence, flags);
ret = schedule_timeout(ret);
spin_lock_irqsave(fence->lock, flags);
dma_fence_lock_irqsave(fence, flags);
if (ret > 0 && intr && signal_pending(current))
ret = -ERESTARTSYS;
}
@@ -841,7 +852,7 @@ dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout)
__set_current_state(TASK_RUNNING);
out:
spin_unlock_irqrestore(fence->lock, flags);
dma_fence_unlock_irqrestore(fence, flags);
return ret;
}
EXPORT_SYMBOL(dma_fence_default_wait);
@@ -1007,8 +1018,13 @@ EXPORT_SYMBOL(dma_fence_wait_any_timeout);
*/
void dma_fence_set_deadline(struct dma_fence *fence, ktime_t deadline)
{
if (fence->ops->set_deadline && !dma_fence_is_signaled(fence))
fence->ops->set_deadline(fence, deadline);
const struct dma_fence_ops *ops;
rcu_read_lock();
ops = rcu_dereference(fence->ops);
if (ops && ops->set_deadline && !dma_fence_is_signaled(fence))
ops->set_deadline(fence, deadline);
rcu_read_unlock();
}
EXPORT_SYMBOL(dma_fence_set_deadline);
@@ -1045,16 +1061,26 @@ static void
__dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
spinlock_t *lock, u64 context, u64 seqno, unsigned long flags)
{
BUG_ON(!lock);
BUG_ON(!ops || !ops->get_driver_name || !ops->get_timeline_name);
kref_init(&fence->refcount);
fence->ops = ops;
/*
* While it is counter intuitive to protect a constant function pointer
* table by RCU it allows modules to wait for an RCU grace period
* before they unload, to make sure that nobody is executing their
* functions any more.
*/
RCU_INIT_POINTER(fence->ops, ops);
INIT_LIST_HEAD(&fence->cb_list);
fence->lock = lock;
fence->context = context;
fence->seqno = seqno;
fence->flags = flags;
fence->flags = flags | BIT(DMA_FENCE_FLAG_INITIALIZED_BIT);
if (lock) {
fence->extern_lock = lock;
} else {
spin_lock_init(&fence->inline_lock);
fence->flags |= BIT(DMA_FENCE_FLAG_INLINE_LOCK_BIT);
}
fence->error = 0;
trace_dma_fence_init(fence);
@@ -1064,7 +1090,7 @@ __dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
* dma_fence_init - Initialize a custom fence.
* @fence: the fence to initialize
* @ops: the dma_fence_ops for operations on this fence
* @lock: the irqsafe spinlock to use for locking this fence
* @lock: optional irqsafe spinlock to use for locking this fence
* @context: the execution context this fence is run on
* @seqno: a linear increasing sequence number for this context
*
@@ -1074,6 +1100,10 @@ __dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
*
* context and seqno are used for easy comparison between fences, allowing
* to check which fence is later by simply using dma_fence_later().
*
* It is strongly discouraged to provide an external lock because this couples
* lock and fence life time. This is only allowed for legacy use cases when
* multiple fences need to be prevented from signaling out of order.
*/
void
dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
@@ -1087,7 +1117,7 @@ EXPORT_SYMBOL(dma_fence_init);
* dma_fence_init64 - Initialize a custom fence with 64-bit seqno support.
* @fence: the fence to initialize
* @ops: the dma_fence_ops for operations on this fence
* @lock: the irqsafe spinlock to use for locking this fence
* @lock: optional irqsafe spinlock to use for locking this fence
* @context: the execution context this fence is run on
* @seqno: a linear increasing sequence number for this context
*
@@ -1097,6 +1127,10 @@ EXPORT_SYMBOL(dma_fence_init);
*
* Context and seqno are used for easy comparison between fences, allowing
* to check which fence is later by simply using dma_fence_later().
*
* It is strongly discouraged to provide an external lock because this couples
* lock and fence life time. This is only allowed for legacy use cases when
* multiple fences need to be prevented from signaling out of order.
*/
void
dma_fence_init64(struct dma_fence *fence, const struct dma_fence_ops *ops,
@@ -1129,13 +1163,14 @@ EXPORT_SYMBOL(dma_fence_init64);
*/
const char __rcu *dma_fence_driver_name(struct dma_fence *fence)
{
RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
"RCU protection is required for safe access to returned string");
const struct dma_fence_ops *ops;
/* RCU protection is required for safe access to returned string */
ops = rcu_dereference(fence->ops);
if (!dma_fence_test_signaled_flag(fence))
return fence->ops->get_driver_name(fence);
return (const char __rcu *)ops->get_driver_name(fence);
else
return "detached-driver";
return (const char __rcu *)"detached-driver";
}
EXPORT_SYMBOL(dma_fence_driver_name);
@@ -1161,12 +1196,13 @@ EXPORT_SYMBOL(dma_fence_driver_name);
*/
const char __rcu *dma_fence_timeline_name(struct dma_fence *fence)
{
RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
"RCU protection is required for safe access to returned string");
const struct dma_fence_ops *ops;
/* RCU protection is required for safe access to returned string */
ops = rcu_dereference(fence->ops);
if (!dma_fence_test_signaled_flag(fence))
return fence->ops->get_timeline_name(fence);
return (const char __rcu *)ops->get_driver_name(fence);
else
return "signaled-timeline";
return (const char __rcu *)"signaled-timeline";
}
EXPORT_SYMBOL(dma_fence_timeline_name);

View File

@@ -14,43 +14,26 @@
#include "selftest.h"
static struct kmem_cache *slab_fences;
static struct mock_fence {
struct dma_fence base;
struct spinlock lock;
} *to_mock_fence(struct dma_fence *f) {
return container_of(f, struct mock_fence, base);
}
static const char *mock_name(struct dma_fence *f)
{
return "mock";
}
static void mock_fence_release(struct dma_fence *f)
{
kmem_cache_free(slab_fences, to_mock_fence(f));
}
static const struct dma_fence_ops mock_ops = {
.get_driver_name = mock_name,
.get_timeline_name = mock_name,
.release = mock_fence_release,
};
static struct dma_fence *mock_fence(void)
{
struct mock_fence *f;
struct dma_fence *f;
f = kmem_cache_alloc(slab_fences, GFP_KERNEL);
f = kmalloc(sizeof(*f), GFP_KERNEL);
if (!f)
return NULL;
spin_lock_init(&f->lock);
dma_fence_init(&f->base, &mock_ops, &f->lock, 0, 0);
return &f->base;
dma_fence_init(f, &mock_ops, NULL, 0, 0);
return f;
}
static int sanitycheck(void *arg)
@@ -100,6 +83,11 @@ static int test_signaling(void *arg)
goto err_free;
}
if (rcu_dereference_protected(f->ops, true)) {
pr_err("Fence ops not cleared on signal\n");
goto err_free;
}
err = 0;
err_free:
dma_fence_put(f);
@@ -410,8 +398,10 @@ struct race_thread {
static void __wait_for_callbacks(struct dma_fence *f)
{
spin_lock_irq(f->lock);
spin_unlock_irq(f->lock);
unsigned long flags;
dma_fence_lock_irqsave(f, flags);
dma_fence_unlock_irqrestore(f, flags);
}
static int thread_signal_callback(void *arg)
@@ -538,19 +528,7 @@ int dma_fence(void)
SUBTEST(test_stub),
SUBTEST(race_signal_callback),
};
int ret;
pr_info("sizeof(dma_fence)=%zu\n", sizeof(struct dma_fence));
slab_fences = KMEM_CACHE(mock_fence,
SLAB_TYPESAFE_BY_RCU |
SLAB_HWCACHE_ALIGN);
if (!slab_fences)
return -ENOMEM;
ret = subtests(tests, NULL);
kmem_cache_destroy(slab_fences);
return ret;
return subtests(tests, NULL);
}

View File

@@ -156,12 +156,12 @@ static void timeline_fence_release(struct dma_fence *fence)
struct sync_timeline *parent = dma_fence_parent(fence);
unsigned long flags;
spin_lock_irqsave(fence->lock, flags);
dma_fence_lock_irqsave(fence, flags);
if (!list_empty(&pt->link)) {
list_del(&pt->link);
rb_erase(&pt->node, &parent->pt_tree);
}
spin_unlock_irqrestore(fence->lock, flags);
dma_fence_unlock_irqrestore(fence, flags);
sync_timeline_put(parent);
dma_fence_free(fence);
@@ -179,7 +179,7 @@ static void timeline_fence_set_deadline(struct dma_fence *fence, ktime_t deadlin
struct sync_pt *pt = dma_fence_to_sync_pt(fence);
unsigned long flags;
spin_lock_irqsave(fence->lock, flags);
dma_fence_lock_irqsave(fence, flags);
if (test_bit(SW_SYNC_HAS_DEADLINE_BIT, &fence->flags)) {
if (ktime_before(deadline, pt->deadline))
pt->deadline = deadline;
@@ -187,7 +187,7 @@ static void timeline_fence_set_deadline(struct dma_fence *fence, ktime_t deadlin
pt->deadline = deadline;
__set_bit(SW_SYNC_HAS_DEADLINE_BIT, &fence->flags);
}
spin_unlock_irqrestore(fence->lock, flags);
dma_fence_unlock_irqrestore(fence, flags);
}
static const struct dma_fence_ops timeline_fence_ops = {
@@ -431,13 +431,13 @@ static int sw_sync_ioctl_get_deadline(struct sync_timeline *obj, unsigned long a
goto put_fence;
}
spin_lock_irqsave(fence->lock, flags);
dma_fence_lock_irqsave(fence, flags);
if (!test_bit(SW_SYNC_HAS_DEADLINE_BIT, &fence->flags)) {
ret = -ENOENT;
goto unlock;
}
data.deadline_ns = ktime_to_ns(pt->deadline);
spin_unlock_irqrestore(fence->lock, flags);
dma_fence_unlock_irqrestore(fence, flags);
dma_fence_put(fence);
@@ -450,7 +450,7 @@ static int sw_sync_ioctl_get_deadline(struct sync_timeline *obj, unsigned long a
return 0;
unlock:
spin_unlock_irqrestore(fence->lock, flags);
dma_fence_unlock_irqrestore(fence, flags);
put_fence:
dma_fence_put(fence);

View File

@@ -47,7 +47,7 @@ struct sync_timeline {
static inline struct sync_timeline *dma_fence_parent(struct dma_fence *fence)
{
return container_of(fence->lock, struct sync_timeline, lock);
return container_of(fence->extern_lock, struct sync_timeline, lock);
}
/**

View File

@@ -59,11 +59,12 @@ config GOOGLE_MEMCONSOLE_X86_LEGACY
config GOOGLE_FRAMEBUFFER_COREBOOT
tristate "Coreboot Framebuffer"
depends on FB_SIMPLE || DRM_SIMPLEDRM
depends on GOOGLE_COREBOOT_TABLE
help
This option enables the kernel to search for a framebuffer in
the coreboot table. If found, it is registered with simplefb.
the coreboot table. If found, it is registered with a platform
device of type coreboot-framebuffer. Using the old device of
type simple-framebuffer is deprecated.
config GOOGLE_MEMCONSOLE_COREBOOT
tristate "Firmware Memory Console"

View File

@@ -12,6 +12,7 @@
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/kobject.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>

View File

@@ -14,6 +14,7 @@
#include <linux/init.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
@@ -21,6 +22,16 @@
#include "coreboot_table.h"
/* Coreboot table header structure */
struct coreboot_table_header {
char signature[4];
u32 header_bytes;
u32 header_checksum;
u32 table_bytes;
u32 table_checksum;
u32 table_entries;
};
#define CB_DEV(d) container_of(d, struct coreboot_device, dev)
#define CB_DRV(d) container_of_const(d, struct coreboot_driver, drv)
@@ -251,7 +262,7 @@ static void __exit coreboot_table_driver_exit(void)
bus_unregister(&coreboot_bus_type);
}
module_init(coreboot_table_driver_init);
subsys_initcall(coreboot_table_driver_init);
module_exit(coreboot_table_driver_exit);
MODULE_AUTHOR("Google, Inc.");

View File

@@ -12,65 +12,8 @@
#ifndef __COREBOOT_TABLE_H
#define __COREBOOT_TABLE_H
#include <linux/coreboot.h>
#include <linux/device.h>
#include <linux/mod_devicetable.h>
/* Coreboot table header structure */
struct coreboot_table_header {
char signature[4];
u32 header_bytes;
u32 header_checksum;
u32 table_bytes;
u32 table_checksum;
u32 table_entries;
};
/* List of coreboot entry structures that is used */
/* Generic */
struct coreboot_table_entry {
u32 tag;
u32 size;
};
/* Points to a CBMEM entry */
struct lb_cbmem_ref {
u32 tag;
u32 size;
u64 cbmem_addr;
};
#define LB_TAG_CBMEM_ENTRY 0x31
/* Corresponds to LB_TAG_CBMEM_ENTRY */
struct lb_cbmem_entry {
u32 tag;
u32 size;
u64 address;
u32 entry_size;
u32 id;
};
/* Describes framebuffer setup by coreboot */
struct lb_framebuffer {
u32 tag;
u32 size;
u64 physical_address;
u32 x_resolution;
u32 y_resolution;
u32 bytes_per_line;
u8 bits_per_pixel;
u8 red_mask_pos;
u8 red_mask_size;
u8 green_mask_pos;
u8 green_mask_size;
u8 blue_mask_pos;
u8 blue_mask_size;
u8 reserved_mask_pos;
u8 reserved_mask_size;
};
/* A device, additionally with information from coreboot. */
struct coreboot_device {

View File

@@ -12,30 +12,87 @@
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/platform_data/simplefb.h>
#include <linux/platform_device.h>
#include <linux/sysfb.h>
#include "coreboot_table.h"
#define CB_TAG_FRAMEBUFFER 0x12
#if defined(CONFIG_PCI)
static bool framebuffer_pci_dev_is_enabled(struct pci_dev *pdev)
{
/*
* TODO: Try to integrate this code into the PCI subsystem
*/
int ret;
u16 command;
static const struct simplefb_format formats[] = SIMPLEFB_FORMATS;
ret = pci_read_config_word(pdev, PCI_COMMAND, &command);
if (ret != PCIBIOS_SUCCESSFUL)
return false;
if (!(command & PCI_COMMAND_MEMORY))
return false;
return true;
}
static struct pci_dev *framebuffer_parent_pci_dev(struct resource *res)
{
struct pci_dev *pdev = NULL;
const struct resource *r = NULL;
while (!r && (pdev = pci_get_base_class(PCI_BASE_CLASS_DISPLAY, pdev)))
r = pci_find_resource(pdev, res);
if (!r || !pdev)
return NULL; /* not found; not an error */
if (!framebuffer_pci_dev_is_enabled(pdev)) {
pci_dev_put(pdev);
return ERR_PTR(-ENODEV);
}
return pdev;
}
#else
static struct pci_dev *framebuffer_parent_pci_dev(struct resource *res)
{
return NULL;
}
#endif
static struct device *framebuffer_parent_dev(struct resource *res)
{
struct pci_dev *pdev;
pdev = framebuffer_parent_pci_dev(res);
if (IS_ERR(pdev))
return ERR_CAST(pdev);
else if (pdev)
return &pdev->dev;
return NULL;
}
static int framebuffer_probe(struct coreboot_device *dev)
{
int i;
u32 length;
struct lb_framebuffer *fb = &dev->framebuffer;
struct device *parent;
struct platform_device *pdev;
struct resource res;
int ret;
#if !IS_ENABLED(CONFIG_DRM_COREBOOTDRM)
struct simplefb_platform_data pdata = {
.width = fb->x_resolution,
.height = fb->y_resolution,
.stride = fb->bytes_per_line,
.format = NULL,
};
int i;
static const struct simplefb_format formats[] = SIMPLEFB_FORMATS;
#endif
/*
* On coreboot systems, the advertised LB_TAG_FRAMEBUFFER entry
@@ -53,6 +110,29 @@ static int framebuffer_probe(struct coreboot_device *dev)
if (!fb->physical_address)
return -ENODEV;
res = DEFINE_RES_MEM(fb->physical_address,
PAGE_ALIGN(fb->y_resolution * fb->bytes_per_line));
if (res.end <= res.start)
return -EINVAL;
parent = framebuffer_parent_dev(&res);
if (IS_ERR(parent))
return PTR_ERR(parent);
#if IS_ENABLED(CONFIG_DRM_COREBOOTDRM)
pdev = platform_device_register_resndata(parent, "coreboot-framebuffer", 0,
&res, 1, fb, fb->size);
if (IS_ERR(pdev)) {
pr_warn("coreboot: could not register framebuffer\n");
ret = PTR_ERR(pdev);
goto out_put_device_parent;
}
#else
/*
* FIXME: Coreboot systems should use a driver that binds to
* coreboot-framebuffer devices. Remove support for
* simple-framebuffer at some point.
*/
for (i = 0; i < ARRAY_SIZE(formats); ++i) {
if (fb->bits_per_pixel == formats[i].bits_per_pixel &&
fb->red_mask_pos == formats[i].red.offset &&
@@ -63,35 +143,28 @@ static int framebuffer_probe(struct coreboot_device *dev)
fb->blue_mask_size == formats[i].blue.length)
pdata.format = formats[i].name;
}
if (!pdata.format)
return -ENODEV;
if (!pdata.format) {
ret = -ENODEV;
goto out_put_device_parent;
}
memset(&res, 0, sizeof(res));
res.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
res.name = "Coreboot Framebuffer";
res.start = fb->physical_address;
length = PAGE_ALIGN(fb->y_resolution * fb->bytes_per_line);
res.end = res.start + length - 1;
if (res.end <= res.start)
return -EINVAL;
pdev = platform_device_register_resndata(&dev->dev,
pdev = platform_device_register_resndata(parent,
"simple-framebuffer", 0,
&res, 1, &pdata,
sizeof(pdata));
if (IS_ERR(pdev))
if (IS_ERR(pdev)) {
ret = PTR_ERR(pdev);
pr_warn("coreboot: could not register framebuffer\n");
else
dev_set_drvdata(&dev->dev, pdev);
goto out_put_device_parent;
}
#endif
return PTR_ERR_OR_ZERO(pdev);
}
ret = 0;
static void framebuffer_remove(struct coreboot_device *dev)
{
struct platform_device *pdev = dev_get_drvdata(&dev->dev);
platform_device_unregister(pdev);
out_put_device_parent:
if (parent)
put_device(parent);
return ret;
}
static const struct coreboot_device_id framebuffer_ids[] = {
@@ -102,7 +175,6 @@ MODULE_DEVICE_TABLE(coreboot, framebuffer_ids);
static struct coreboot_driver framebuffer_driver = {
.probe = framebuffer_probe,
.remove = framebuffer_remove,
.drv = {
.name = "framebuffer",
},

View File

@@ -10,6 +10,7 @@
#include <linux/device.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include "memconsole.h"

View File

@@ -13,6 +13,7 @@
#include <linux/kernel.h>
#include <linux/kobject.h>
#include <linux/list.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>

13
drivers/gpu/Kconfig Normal file
View File

@@ -0,0 +1,13 @@
# SPDX-License-Identifier: GPL-2.0
config GPU_BUDDY
bool
help
A page based buddy allocator for GPU memory.
config GPU_BUDDY_KUNIT_TEST
tristate "KUnit tests for GPU buddy allocator" if !KUNIT_ALL_TESTS
depends on GPU_BUDDY && KUNIT
default KUNIT_ALL_TESTS
help
KUnit tests for the GPU buddy allocator.

View File

@@ -2,7 +2,9 @@
# drm/tegra depends on host1x, so if both drivers are built-in care must be
# taken to initialize them in the correct order. Link order is the only way
# to ensure this currently.
obj-y += host1x/ drm/ vga/
# Similarly, buddy must come first since it is used by other drivers.
obj-$(CONFIG_GPU_BUDDY) += buddy.o
obj-y += host1x/ drm/ vga/ tests/
obj-$(CONFIG_IMX_IPUV3_CORE) += ipu-v3/
obj-$(CONFIG_TRACE_GPU_MEM) += trace/
obj-$(CONFIG_NOVA_CORE) += nova-core/

1323
drivers/gpu/buddy.c Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -55,7 +55,7 @@ config DRM_DRAW
config DRM_PANIC
bool "Display a user-friendly message when a kernel panic occurs"
depends on DRM
depends on DRM && PRINTK
select FONT_SUPPORT
select DRM_DRAW
help
@@ -220,6 +220,7 @@ config DRM_GPUSVM
config DRM_BUDDY
tristate
depends on DRM
select GPU_BUDDY
help
A page based buddy allocator
@@ -269,10 +270,6 @@ config DRM_SCHED
config DRM_PANEL_BACKLIGHT_QUIRKS
tristate
config DRM_LIB_RANDOM
bool
default n
config DRM_PRIVACY_SCREEN
bool
default n
@@ -335,6 +332,7 @@ source "drivers/gpu/drm/udl/Kconfig"
source "drivers/gpu/drm/v3d/Kconfig"
source "drivers/gpu/drm/vboxvideo/Kconfig"
source "drivers/gpu/drm/vc4/Kconfig"
source "drivers/gpu/drm/verisilicon/Kconfig"
source "drivers/gpu/drm/vgem/Kconfig"
source "drivers/gpu/drm/virtio/Kconfig"
source "drivers/gpu/drm/vkms/Kconfig"

View File

@@ -69,7 +69,6 @@ config DRM_KUNIT_TEST
select DRM_EXPORT_FOR_TESTS if m
select DRM_GEM_SHMEM_HELPER
select DRM_KUNIT_TEST_HELPERS
select DRM_LIB_RANDOM
select DRM_SYSFB_HELPER
select PRIME_NUMBERS
default KUNIT_ALL_TESTS

View File

@@ -78,7 +78,6 @@ drm-$(CONFIG_DRM_CLIENT) += \
drm_client_event.o \
drm_client_modeset.o \
drm_client_sysrq.o
drm-$(CONFIG_DRM_LIB_RANDOM) += lib/drm_random.o
drm-$(CONFIG_COMPAT) += drm_ioc32.o
drm-$(CONFIG_DRM_PANEL) += drm_panel.o
drm-$(CONFIG_OF) += drm_of.o
@@ -237,11 +236,12 @@ obj-y += solomon/
obj-$(CONFIG_DRM_SPRD) += sprd/
obj-$(CONFIG_DRM_LOONGSON) += loongson/
obj-$(CONFIG_DRM_POWERVR) += imagination/
obj-$(CONFIG_DRM_VERISILICON_DC) += verisilicon/
# Ensure drm headers are self-contained and pass kernel-doc
hdrtest-files := \
$(shell cd $(src) && find . -maxdepth 1 -name 'drm_*.h') \
$(shell cd $(src) && find display lib -name '*.h')
$(shell cd $(src) && find display -name '*.h')
always-$(CONFIG_DRM_HEADER_TEST) += \
$(patsubst %.h,%.hdrtest, $(hdrtest-files))

View File

@@ -134,13 +134,9 @@ static int amdgpu_dma_buf_pin(struct dma_buf_attachment *attach)
* notifiers are disabled, only allow pinning in VRAM when move
* notiers are enabled.
*/
if (!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) {
domains &= ~AMDGPU_GEM_DOMAIN_VRAM;
} else {
list_for_each_entry(attach, &dmabuf->attachments, node)
if (!attach->peer2peer)
domains &= ~AMDGPU_GEM_DOMAIN_VRAM;
}
list_for_each_entry(attach, &dmabuf->attachments, node)
if (!attach->peer2peer)
domains &= ~AMDGPU_GEM_DOMAIN_VRAM;
if (domains & AMDGPU_GEM_DOMAIN_VRAM)
bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
@@ -456,7 +452,7 @@ amdgpu_dma_buf_create_obj(struct drm_device *dev, struct dma_buf *dma_buf)
}
/**
* amdgpu_dma_buf_move_notify - &attach.move_notify implementation
* amdgpu_dma_buf_move_notify - &attach.invalidate_mappings implementation
*
* @attach: the DMA-buf attachment
*
@@ -534,7 +530,7 @@ amdgpu_dma_buf_move_notify(struct dma_buf_attachment *attach)
static const struct dma_buf_attach_ops amdgpu_dma_buf_attach_ops = {
.allow_peer2peer = true,
.move_notify = amdgpu_dma_buf_move_notify
.invalidate_mappings = amdgpu_dma_buf_move_notify
};
/**

View File

@@ -289,9 +289,10 @@ void amdgpu_job_free_resources(struct amdgpu_job *job)
unsigned i;
/* Check if any fences were initialized */
if (job->base.s_fence && job->base.s_fence->finished.ops)
if (job->base.s_fence &&
dma_fence_was_initialized(&job->base.s_fence->finished))
f = &job->base.s_fence->finished;
else if (job->hw_fence && job->hw_fence->base.ops)
else if (dma_fence_was_initialized(&job->hw_fence->base))
f = &job->hw_fence->base;
else
f = NULL;
@@ -308,11 +309,11 @@ static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
amdgpu_sync_free(&job->explicit_sync);
if (job->hw_fence->base.ops)
if (dma_fence_was_initialized(&job->hw_fence->base))
dma_fence_put(&job->hw_fence->base);
else
kfree(job->hw_fence);
if (job->hw_vm_fence->base.ops)
if (dma_fence_was_initialized(&job->hw_vm_fence->base))
dma_fence_put(&job->hw_vm_fence->base);
else
kfree(job->hw_vm_fence);
@@ -346,11 +347,11 @@ void amdgpu_job_free(struct amdgpu_job *job)
if (job->gang_submit != &job->base.s_fence->scheduled)
dma_fence_put(job->gang_submit);
if (job->hw_fence->base.ops)
if (dma_fence_was_initialized(&job->hw_fence->base))
dma_fence_put(&job->hw_fence->base);
else
kfree(job->hw_fence);
if (job->hw_vm_fence->base.ops)
if (dma_fence_was_initialized(&job->hw_vm_fence->base))
dma_fence_put(&job->hw_vm_fence->base);
else
kfree(job->hw_vm_fence);

View File

@@ -1274,7 +1274,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
if (abo->tbo.base.dma_buf && !drm_gem_is_imported(&abo->tbo.base) &&
old_mem && old_mem->mem_type != TTM_PL_SYSTEM)
dma_buf_move_notify(abo->tbo.base.dma_buf);
dma_buf_invalidate_mappings(abo->tbo.base.dma_buf);
/* move_notify is called before move happens */
trace_amdgpu_bo_move(abo, new_mem ? new_mem->mem_type : -1,

View File

@@ -5665,7 +5665,7 @@ int amdgpu_ras_add_critical_region(struct amdgpu_device *adev,
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
struct amdgpu_vram_mgr_resource *vres;
struct ras_critical_region *region;
struct drm_buddy_block *block;
struct gpu_buddy_block *block;
int ret = 0;
if (!bo || !bo->tbo.resource)

View File

@@ -55,7 +55,7 @@ static inline void amdgpu_res_first(struct ttm_resource *res,
uint64_t start, uint64_t size,
struct amdgpu_res_cursor *cur)
{
struct drm_buddy_block *block;
struct gpu_buddy_block *block;
struct list_head *head, *next;
struct drm_mm_node *node;
@@ -71,7 +71,7 @@ static inline void amdgpu_res_first(struct ttm_resource *res,
head = &to_amdgpu_vram_mgr_resource(res)->blocks;
block = list_first_entry_or_null(head,
struct drm_buddy_block,
struct gpu_buddy_block,
link);
if (!block)
goto fallback;
@@ -81,7 +81,7 @@ static inline void amdgpu_res_first(struct ttm_resource *res,
next = block->link.next;
if (next != head)
block = list_entry(next, struct drm_buddy_block, link);
block = list_entry(next, struct gpu_buddy_block, link);
}
cur->start = amdgpu_vram_mgr_block_start(block) + start;
@@ -125,7 +125,7 @@ static inline void amdgpu_res_first(struct ttm_resource *res,
*/
static inline void amdgpu_res_next(struct amdgpu_res_cursor *cur, uint64_t size)
{
struct drm_buddy_block *block;
struct gpu_buddy_block *block;
struct drm_mm_node *node;
struct list_head *next;
@@ -146,7 +146,7 @@ static inline void amdgpu_res_next(struct amdgpu_res_cursor *cur, uint64_t size)
block = cur->node;
next = block->link.next;
block = list_entry(next, struct drm_buddy_block, link);
block = list_entry(next, struct gpu_buddy_block, link);
cur->node = block;
cur->start = amdgpu_vram_mgr_block_start(block);
@@ -175,7 +175,7 @@ static inline void amdgpu_res_next(struct amdgpu_res_cursor *cur, uint64_t size)
*/
static inline bool amdgpu_res_cleared(struct amdgpu_res_cursor *cur)
{
struct drm_buddy_block *block;
struct gpu_buddy_block *block;
switch (cur->mem_type) {
case TTM_PL_VRAM:

View File

@@ -479,10 +479,10 @@ bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid,
if (amdgpu_sriov_vf(ring->adev) || !ring->funcs->soft_recovery || !fence)
return false;
spin_lock_irqsave(fence->lock, flags);
dma_fence_lock_irqsave(fence, flags);
if (!dma_fence_is_signaled_locked(fence))
dma_fence_set_error(fence, -ENODATA);
spin_unlock_irqrestore(fence->lock, flags);
dma_fence_unlock_irqrestore(fence, flags);
while (!dma_fence_is_signaled(fence) &&
ktime_to_ns(ktime_sub(deadline, ktime_get())) > 0)

View File

@@ -2785,8 +2785,8 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
dma_fence_put(vm->last_unlocked);
dma_fence_wait(vm->last_tlb_flush, false);
/* Make sure that all fence callbacks have completed */
spin_lock_irqsave(vm->last_tlb_flush->lock, flags);
spin_unlock_irqrestore(vm->last_tlb_flush->lock, flags);
dma_fence_lock_irqsave(vm->last_tlb_flush, flags);
dma_fence_unlock_irqrestore(vm->last_tlb_flush, flags);
dma_fence_put(vm->last_tlb_flush);
list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {

View File

@@ -639,7 +639,7 @@ static inline uint64_t amdgpu_vm_tlb_seq(struct amdgpu_vm *vm)
* sure that the dma_fence structure isn't freed up.
*/
rcu_read_lock();
lock = vm->last_tlb_flush->lock;
lock = dma_fence_spinlock(vm->last_tlb_flush);
rcu_read_unlock();
spin_lock_irqsave(lock, flags);

View File

@@ -25,6 +25,7 @@
#include <linux/dma-mapping.h>
#include <drm/ttm/ttm_range_manager.h>
#include <drm/drm_drv.h>
#include <drm/drm_buddy.h>
#include "amdgpu.h"
#include "amdgpu_vm.h"
@@ -52,15 +53,15 @@ to_amdgpu_device(struct amdgpu_vram_mgr *mgr)
return container_of(mgr, struct amdgpu_device, mman.vram_mgr);
}
static inline struct drm_buddy_block *
static inline struct gpu_buddy_block *
amdgpu_vram_mgr_first_block(struct list_head *list)
{
return list_first_entry_or_null(list, struct drm_buddy_block, link);
return list_first_entry_or_null(list, struct gpu_buddy_block, link);
}
static inline bool amdgpu_is_vram_mgr_blocks_contiguous(struct list_head *head)
{
struct drm_buddy_block *block;
struct gpu_buddy_block *block;
u64 start, size;
block = amdgpu_vram_mgr_first_block(head);
@@ -71,7 +72,7 @@ static inline bool amdgpu_is_vram_mgr_blocks_contiguous(struct list_head *head)
start = amdgpu_vram_mgr_block_start(block);
size = amdgpu_vram_mgr_block_size(block);
block = list_entry(block->link.next, struct drm_buddy_block, link);
block = list_entry(block->link.next, struct gpu_buddy_block, link);
if (start + size != amdgpu_vram_mgr_block_start(block))
return false;
}
@@ -81,7 +82,7 @@ static inline bool amdgpu_is_vram_mgr_blocks_contiguous(struct list_head *head)
static inline u64 amdgpu_vram_mgr_blocks_size(struct list_head *head)
{
struct drm_buddy_block *block;
struct gpu_buddy_block *block;
u64 size = 0;
list_for_each_entry(block, head, link)
@@ -254,7 +255,7 @@ const struct attribute_group amdgpu_vram_mgr_attr_group = {
* Calculate how many bytes of the DRM BUDDY block are inside visible VRAM
*/
static u64 amdgpu_vram_mgr_vis_size(struct amdgpu_device *adev,
struct drm_buddy_block *block)
struct gpu_buddy_block *block)
{
u64 start = amdgpu_vram_mgr_block_start(block);
u64 end = start + amdgpu_vram_mgr_block_size(block);
@@ -279,7 +280,7 @@ u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo)
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct ttm_resource *res = bo->tbo.resource;
struct amdgpu_vram_mgr_resource *vres = to_amdgpu_vram_mgr_resource(res);
struct drm_buddy_block *block;
struct gpu_buddy_block *block;
u64 usage = 0;
if (amdgpu_gmc_vram_full_visible(&adev->gmc))
@@ -299,15 +300,15 @@ static void amdgpu_vram_mgr_do_reserve(struct ttm_resource_manager *man)
{
struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
struct amdgpu_device *adev = to_amdgpu_device(mgr);
struct drm_buddy *mm = &mgr->mm;
struct gpu_buddy *mm = &mgr->mm;
struct amdgpu_vram_reservation *rsv, *temp;
struct drm_buddy_block *block;
struct gpu_buddy_block *block;
uint64_t vis_usage;
list_for_each_entry_safe(rsv, temp, &mgr->reservations_pending, blocks) {
if (drm_buddy_alloc_blocks(mm, rsv->start, rsv->start + rsv->size,
if (gpu_buddy_alloc_blocks(mm, rsv->start, rsv->start + rsv->size,
rsv->size, mm->chunk_size, &rsv->allocated,
DRM_BUDDY_RANGE_ALLOCATION))
GPU_BUDDY_RANGE_ALLOCATION))
continue;
block = amdgpu_vram_mgr_first_block(&rsv->allocated);
@@ -403,7 +404,7 @@ int amdgpu_vram_mgr_query_address_block_info(struct amdgpu_vram_mgr *mgr,
uint64_t address, struct amdgpu_vram_block_info *info)
{
struct amdgpu_vram_mgr_resource *vres;
struct drm_buddy_block *block;
struct gpu_buddy_block *block;
u64 start, size;
int ret = -ENOENT;
@@ -450,8 +451,8 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
struct amdgpu_vram_mgr_resource *vres;
u64 size, remaining_size, lpfn, fpfn;
unsigned int adjust_dcc_size = 0;
struct drm_buddy *mm = &mgr->mm;
struct drm_buddy_block *block;
struct gpu_buddy *mm = &mgr->mm;
struct gpu_buddy_block *block;
unsigned long pages_per_block;
int r;
@@ -493,17 +494,17 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
INIT_LIST_HEAD(&vres->blocks);
if (place->flags & TTM_PL_FLAG_TOPDOWN)
vres->flags |= DRM_BUDDY_TOPDOWN_ALLOCATION;
vres->flags |= GPU_BUDDY_TOPDOWN_ALLOCATION;
if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
vres->flags |= DRM_BUDDY_CONTIGUOUS_ALLOCATION;
vres->flags |= GPU_BUDDY_CONTIGUOUS_ALLOCATION;
if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CLEARED)
vres->flags |= DRM_BUDDY_CLEAR_ALLOCATION;
vres->flags |= GPU_BUDDY_CLEAR_ALLOCATION;
if (fpfn || lpfn != mgr->mm.size)
/* Allocate blocks in desired range */
vres->flags |= DRM_BUDDY_RANGE_ALLOCATION;
vres->flags |= GPU_BUDDY_RANGE_ALLOCATION;
if (bo->flags & AMDGPU_GEM_CREATE_GFX12_DCC &&
adev->gmc.gmc_funcs->get_dcc_alignment)
@@ -516,7 +517,7 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
dcc_size = roundup_pow_of_two(vres->base.size + adjust_dcc_size);
remaining_size = (u64)dcc_size;
vres->flags |= DRM_BUDDY_TRIM_DISABLE;
vres->flags |= GPU_BUDDY_TRIM_DISABLE;
}
mutex_lock(&mgr->lock);
@@ -536,7 +537,7 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
BUG_ON(min_block_size < mm->chunk_size);
r = drm_buddy_alloc_blocks(mm, fpfn,
r = gpu_buddy_alloc_blocks(mm, fpfn,
lpfn,
size,
min_block_size,
@@ -545,7 +546,7 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
if (unlikely(r == -ENOSPC) && pages_per_block == ~0ul &&
!(place->flags & TTM_PL_FLAG_CONTIGUOUS)) {
vres->flags &= ~DRM_BUDDY_CONTIGUOUS_ALLOCATION;
vres->flags &= ~GPU_BUDDY_CONTIGUOUS_ALLOCATION;
pages_per_block = max_t(u32, 2UL << (20UL - PAGE_SHIFT),
tbo->page_alignment);
@@ -566,7 +567,7 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
list_add_tail(&vres->vres_node, &mgr->allocated_vres_list);
if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS && adjust_dcc_size) {
struct drm_buddy_block *dcc_block;
struct gpu_buddy_block *dcc_block;
unsigned long dcc_start;
u64 trim_start;
@@ -576,7 +577,7 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
roundup((unsigned long)amdgpu_vram_mgr_block_start(dcc_block),
adjust_dcc_size);
trim_start = (u64)dcc_start;
drm_buddy_block_trim(mm, &trim_start,
gpu_buddy_block_trim(mm, &trim_start,
(u64)vres->base.size,
&vres->blocks);
}
@@ -614,7 +615,7 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
return 0;
error_free_blocks:
drm_buddy_free_list(mm, &vres->blocks, 0);
gpu_buddy_free_list(mm, &vres->blocks, 0);
mutex_unlock(&mgr->lock);
error_fini:
ttm_resource_fini(man, &vres->base);
@@ -637,8 +638,8 @@ static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man,
struct amdgpu_vram_mgr_resource *vres = to_amdgpu_vram_mgr_resource(res);
struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
struct amdgpu_device *adev = to_amdgpu_device(mgr);
struct drm_buddy *mm = &mgr->mm;
struct drm_buddy_block *block;
struct gpu_buddy *mm = &mgr->mm;
struct gpu_buddy_block *block;
uint64_t vis_usage = 0;
mutex_lock(&mgr->lock);
@@ -649,7 +650,7 @@ static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man,
list_for_each_entry(block, &vres->blocks, link)
vis_usage += amdgpu_vram_mgr_vis_size(adev, block);
drm_buddy_free_list(mm, &vres->blocks, vres->flags);
gpu_buddy_free_list(mm, &vres->blocks, vres->flags);
amdgpu_vram_mgr_do_reserve(man);
mutex_unlock(&mgr->lock);
@@ -688,7 +689,7 @@ int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
if (!*sgt)
return -ENOMEM;
/* Determine the number of DRM_BUDDY blocks to export */
/* Determine the number of GPU_BUDDY blocks to export */
amdgpu_res_first(res, offset, length, &cursor);
while (cursor.remaining) {
num_entries++;
@@ -704,10 +705,10 @@ int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
sg->length = 0;
/*
* Walk down DRM_BUDDY blocks to populate scatterlist nodes
* @note: Use iterator api to get first the DRM_BUDDY block
* Walk down GPU_BUDDY blocks to populate scatterlist nodes
* @note: Use iterator api to get first the GPU_BUDDY block
* and the number of bytes from it. Access the following
* DRM_BUDDY block(s) if more buffer needs to exported
* GPU_BUDDY block(s) if more buffer needs to exported
*/
amdgpu_res_first(res, offset, length, &cursor);
for_each_sgtable_sg((*sgt), sg, i) {
@@ -792,10 +793,10 @@ uint64_t amdgpu_vram_mgr_vis_usage(struct amdgpu_vram_mgr *mgr)
void amdgpu_vram_mgr_clear_reset_blocks(struct amdgpu_device *adev)
{
struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr;
struct drm_buddy *mm = &mgr->mm;
struct gpu_buddy *mm = &mgr->mm;
mutex_lock(&mgr->lock);
drm_buddy_reset_clear(mm, false);
gpu_buddy_reset_clear(mm, false);
mutex_unlock(&mgr->lock);
}
@@ -815,7 +816,7 @@ static bool amdgpu_vram_mgr_intersects(struct ttm_resource_manager *man,
size_t size)
{
struct amdgpu_vram_mgr_resource *mgr = to_amdgpu_vram_mgr_resource(res);
struct drm_buddy_block *block;
struct gpu_buddy_block *block;
/* Check each drm buddy block individually */
list_for_each_entry(block, &mgr->blocks, link) {
@@ -848,7 +849,7 @@ static bool amdgpu_vram_mgr_compatible(struct ttm_resource_manager *man,
size_t size)
{
struct amdgpu_vram_mgr_resource *mgr = to_amdgpu_vram_mgr_resource(res);
struct drm_buddy_block *block;
struct gpu_buddy_block *block;
/* Check each drm buddy block individually */
list_for_each_entry(block, &mgr->blocks, link) {
@@ -877,7 +878,7 @@ static void amdgpu_vram_mgr_debug(struct ttm_resource_manager *man,
struct drm_printer *printer)
{
struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
struct drm_buddy *mm = &mgr->mm;
struct gpu_buddy *mm = &mgr->mm;
struct amdgpu_vram_reservation *rsv;
drm_printf(printer, " vis usage:%llu\n",
@@ -930,7 +931,7 @@ int amdgpu_vram_mgr_init(struct amdgpu_device *adev)
mgr->default_page_size = PAGE_SIZE;
man->func = &amdgpu_vram_mgr_func;
err = drm_buddy_init(&mgr->mm, man->size, PAGE_SIZE);
err = gpu_buddy_init(&mgr->mm, man->size, PAGE_SIZE);
if (err)
return err;
@@ -965,11 +966,11 @@ void amdgpu_vram_mgr_fini(struct amdgpu_device *adev)
kfree(rsv);
list_for_each_entry_safe(rsv, temp, &mgr->reserved_pages, blocks) {
drm_buddy_free_list(&mgr->mm, &rsv->allocated, 0);
gpu_buddy_free_list(&mgr->mm, &rsv->allocated, 0);
kfree(rsv);
}
if (!adev->gmc.is_app_apu)
drm_buddy_fini(&mgr->mm);
gpu_buddy_fini(&mgr->mm);
mutex_unlock(&mgr->lock);
ttm_resource_manager_cleanup(man);

View File

@@ -24,11 +24,11 @@
#ifndef __AMDGPU_VRAM_MGR_H__
#define __AMDGPU_VRAM_MGR_H__
#include <drm/drm_buddy.h>
#include <linux/gpu_buddy.h>
struct amdgpu_vram_mgr {
struct ttm_resource_manager manager;
struct drm_buddy mm;
struct gpu_buddy mm;
/* protects access to buffer objects */
struct mutex lock;
struct list_head reservations_pending;
@@ -57,19 +57,19 @@ struct amdgpu_vram_mgr_resource {
struct amdgpu_vres_task task;
};
static inline u64 amdgpu_vram_mgr_block_start(struct drm_buddy_block *block)
static inline u64 amdgpu_vram_mgr_block_start(struct gpu_buddy_block *block)
{
return drm_buddy_block_offset(block);
return gpu_buddy_block_offset(block);
}
static inline u64 amdgpu_vram_mgr_block_size(struct drm_buddy_block *block)
static inline u64 amdgpu_vram_mgr_block_size(struct gpu_buddy_block *block)
{
return (u64)PAGE_SIZE << drm_buddy_block_order(block);
return (u64)PAGE_SIZE << gpu_buddy_block_order(block);
}
static inline bool amdgpu_vram_mgr_is_cleared(struct drm_buddy_block *block)
static inline bool amdgpu_vram_mgr_is_cleared(struct gpu_buddy_block *block)
{
return drm_buddy_block_is_clear(block);
return gpu_buddy_block_is_clear(block);
}
static inline struct amdgpu_vram_mgr_resource *
@@ -82,8 +82,8 @@ static inline void amdgpu_vram_mgr_set_cleared(struct ttm_resource *res)
{
struct amdgpu_vram_mgr_resource *ares = to_amdgpu_vram_mgr_resource(res);
WARN_ON(ares->flags & DRM_BUDDY_CLEARED);
ares->flags |= DRM_BUDDY_CLEARED;
WARN_ON(ares->flags & GPU_BUDDY_CLEARED);
ares->flags |= GPU_BUDDY_CLEARED;
}
int amdgpu_vram_mgr_query_address_block_info(struct amdgpu_vram_mgr *mgr,

View File

@@ -27,7 +27,7 @@ config HSA_AMD_SVM
config HSA_AMD_P2P
bool "HSA kernel driver support for peer-to-peer for AMD GPU devices"
depends on HSA_AMD && PCI_P2PDMA && DMABUF_MOVE_NOTIFY
depends on HSA_AMD && PCI_P2PDMA
help
Enable peer-to-peer (P2P) communication between AMD GPUs over
the PCIe bus. This can improve performance of multi-GPU compute

View File

@@ -91,6 +91,7 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_edid.h>
#include <drm/drm_eld.h>
#include <drm/drm_mode.h>
#include <drm/drm_utils.h>
#include <drm/drm_vblank.h>
#include <drm/drm_audio_component.h>
@@ -3765,6 +3766,10 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
caps->aux_support = false;
drm_object_property_set_value(&conn_base->base,
adev_to_drm(adev)->mode_config.panel_type_property,
caps->ext_caps->bits.oled ? DRM_MODE_PANEL_TYPE_OLED : DRM_MODE_PANEL_TYPE_UNKNOWN);
if (caps->ext_caps->bits.oled == 1
/*
* ||
@@ -9073,6 +9078,8 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
if (connector_type == DRM_MODE_CONNECTOR_eDP) {
struct drm_privacy_screen *privacy_screen;
drm_connector_attach_panel_type_property(&aconnector->base);
privacy_screen = drm_privacy_screen_get(adev_to_drm(adev)->dev, NULL);
if (!IS_ERR(privacy_screen)) {
drm_connector_attach_privacy_screen_provider(&aconnector->base,

View File

@@ -55,6 +55,10 @@ const u64 amdgpu_dm_supported_blnd_tfs =
#define LUT3D_SIZE 17
static const struct drm_colorop_funcs dm_colorop_funcs = {
.destroy = drm_colorop_destroy,
};
int amdgpu_dm_initialize_default_pipeline(struct drm_plane *plane, struct drm_prop_enum_list *list)
{
struct drm_colorop *ops[MAX_COLOR_PIPELINE_OPS];
@@ -72,7 +76,7 @@ int amdgpu_dm_initialize_default_pipeline(struct drm_plane *plane, struct drm_pr
goto cleanup;
}
ret = drm_plane_colorop_curve_1d_init(dev, ops[i], plane,
ret = drm_plane_colorop_curve_1d_init(dev, ops[i], plane, &dm_colorop_funcs,
amdgpu_dm_supported_degam_tfs,
DRM_COLOROP_FLAG_ALLOW_BYPASS);
if (ret)
@@ -89,7 +93,8 @@ int amdgpu_dm_initialize_default_pipeline(struct drm_plane *plane, struct drm_pr
goto cleanup;
}
ret = drm_plane_colorop_mult_init(dev, ops[i], plane, DRM_COLOROP_FLAG_ALLOW_BYPASS);
ret = drm_plane_colorop_mult_init(dev, ops[i], plane, &dm_colorop_funcs,
DRM_COLOROP_FLAG_ALLOW_BYPASS);
if (ret)
goto cleanup;
@@ -104,7 +109,9 @@ int amdgpu_dm_initialize_default_pipeline(struct drm_plane *plane, struct drm_pr
goto cleanup;
}
ret = drm_plane_colorop_ctm_3x4_init(dev, ops[i], plane, DRM_COLOROP_FLAG_ALLOW_BYPASS);
ret = drm_plane_colorop_ctm_3x4_init(dev, ops[i], plane,
&dm_colorop_funcs,
DRM_COLOROP_FLAG_ALLOW_BYPASS);
if (ret)
goto cleanup;
@@ -120,7 +127,7 @@ int amdgpu_dm_initialize_default_pipeline(struct drm_plane *plane, struct drm_pr
goto cleanup;
}
ret = drm_plane_colorop_curve_1d_init(dev, ops[i], plane,
ret = drm_plane_colorop_curve_1d_init(dev, ops[i], plane, &dm_colorop_funcs,
amdgpu_dm_supported_shaper_tfs,
DRM_COLOROP_FLAG_ALLOW_BYPASS);
if (ret)
@@ -137,7 +144,9 @@ int amdgpu_dm_initialize_default_pipeline(struct drm_plane *plane, struct drm_pr
goto cleanup;
}
ret = drm_plane_colorop_curve_1d_lut_init(dev, ops[i], plane, MAX_COLOR_LUT_ENTRIES,
ret = drm_plane_colorop_curve_1d_lut_init(dev, ops[i], plane,
&dm_colorop_funcs,
MAX_COLOR_LUT_ENTRIES,
DRM_COLOROP_LUT1D_INTERPOLATION_LINEAR,
DRM_COLOROP_FLAG_ALLOW_BYPASS);
if (ret)
@@ -154,7 +163,8 @@ int amdgpu_dm_initialize_default_pipeline(struct drm_plane *plane, struct drm_pr
goto cleanup;
}
ret = drm_plane_colorop_3dlut_init(dev, ops[i], plane, LUT3D_SIZE,
ret = drm_plane_colorop_3dlut_init(dev, ops[i], plane,
&dm_colorop_funcs, LUT3D_SIZE,
DRM_COLOROP_LUT3D_INTERPOLATION_TETRAHEDRAL,
DRM_COLOROP_FLAG_ALLOW_BYPASS);
if (ret)
@@ -172,7 +182,7 @@ int amdgpu_dm_initialize_default_pipeline(struct drm_plane *plane, struct drm_pr
goto cleanup;
}
ret = drm_plane_colorop_curve_1d_init(dev, ops[i], plane,
ret = drm_plane_colorop_curve_1d_init(dev, ops[i], plane, &dm_colorop_funcs,
amdgpu_dm_supported_blnd_tfs,
DRM_COLOROP_FLAG_ALLOW_BYPASS);
if (ret)
@@ -189,7 +199,8 @@ int amdgpu_dm_initialize_default_pipeline(struct drm_plane *plane, struct drm_pr
goto cleanup;
}
ret = drm_plane_colorop_curve_1d_lut_init(dev, ops[i], plane, MAX_COLOR_LUT_ENTRIES,
ret = drm_plane_colorop_curve_1d_lut_init(dev, ops[i], plane, &dm_colorop_funcs,
MAX_COLOR_LUT_ENTRIES,
DRM_COLOROP_LUT1D_INTERPOLATION_LINEAR,
DRM_COLOROP_FLAG_ALLOW_BYPASS);
if (ret)

View File

@@ -4,6 +4,8 @@
* Author: James.Qian.Wang <james.qian.wang@arm.com>
*
*/
#include <linux/overflow.h>
#include <drm/drm_device.h>
#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_gem.h>
@@ -93,7 +95,9 @@ komeda_fb_afbc_size_check(struct komeda_fb *kfb, struct drm_file *file,
kfb->afbc_size = kfb->offset_payload + n_blocks *
ALIGN(bpp * AFBC_SUPERBLK_PIXELS / 8,
AFBC_SUPERBLK_ALIGNMENT);
min_size = kfb->afbc_size + fb->offsets[0];
if (check_add_overflow(kfb->afbc_size, fb->offsets[0], &min_size)) {
goto check_failed;
}
if (min_size > obj->size) {
DRM_DEBUG_KMS("afbc size check failed, obj_size: 0x%zx. min_size 0x%llx.\n",
obj->size, min_size);

View File

@@ -128,6 +128,8 @@ struct komeda_component {
const struct komeda_component_funcs *funcs;
};
#define to_component(o) container_of(o, struct komeda_component, obj)
/**
* struct komeda_component_output
*

View File

@@ -40,7 +40,24 @@ komeda_layer_atomic_destroy_state(struct drm_private_obj *obj,
kfree(st);
}
static struct drm_private_state *
komeda_layer_atomic_create_state(struct drm_private_obj *obj)
{
struct komeda_layer_state *st;
st = kzalloc_obj(*st);
if (!st)
return ERR_PTR(-ENOMEM);
__drm_atomic_helper_private_obj_create_state(obj, &st->base.obj);
komeda_component_state_reset(&st->base);
st->base.component = to_component(obj);
return &st->base.obj;
}
static const struct drm_private_state_funcs komeda_layer_obj_funcs = {
.atomic_create_state = komeda_layer_atomic_create_state,
.atomic_duplicate_state = komeda_layer_atomic_duplicate_state,
.atomic_destroy_state = komeda_layer_atomic_destroy_state,
};
@@ -48,14 +65,7 @@ static const struct drm_private_state_funcs komeda_layer_obj_funcs = {
static int komeda_layer_obj_add(struct komeda_kms_dev *kms,
struct komeda_layer *layer)
{
struct komeda_layer_state *st;
st = kzalloc_obj(*st);
if (!st)
return -ENOMEM;
st->base.component = &layer->base;
drm_atomic_private_obj_init(&kms->base, &layer->base.obj, &st->base.obj,
drm_atomic_private_obj_init(&kms->base, &layer->base.obj, NULL,
&komeda_layer_obj_funcs);
return 0;
}
@@ -82,7 +92,24 @@ komeda_scaler_atomic_destroy_state(struct drm_private_obj *obj,
kfree(to_scaler_st(priv_to_comp_st(state)));
}
static struct drm_private_state *
komeda_scaler_atomic_create_state(struct drm_private_obj *obj)
{
struct komeda_scaler_state *st;
st = kzalloc_obj(*st);
if (!st)
return ERR_PTR(-ENOMEM);
__drm_atomic_helper_private_obj_create_state(obj, &st->base.obj);
komeda_component_state_reset(&st->base);
st->base.component = to_component(obj);
return &st->base.obj;
}
static const struct drm_private_state_funcs komeda_scaler_obj_funcs = {
.atomic_create_state = komeda_scaler_atomic_create_state,
.atomic_duplicate_state = komeda_scaler_atomic_duplicate_state,
.atomic_destroy_state = komeda_scaler_atomic_destroy_state,
};
@@ -90,15 +117,8 @@ static const struct drm_private_state_funcs komeda_scaler_obj_funcs = {
static int komeda_scaler_obj_add(struct komeda_kms_dev *kms,
struct komeda_scaler *scaler)
{
struct komeda_scaler_state *st;
st = kzalloc_obj(*st);
if (!st)
return -ENOMEM;
st->base.component = &scaler->base;
drm_atomic_private_obj_init(&kms->base,
&scaler->base.obj, &st->base.obj,
&scaler->base.obj, NULL,
&komeda_scaler_obj_funcs);
return 0;
}
@@ -125,7 +145,24 @@ komeda_compiz_atomic_destroy_state(struct drm_private_obj *obj,
kfree(to_compiz_st(priv_to_comp_st(state)));
}
static struct drm_private_state *
komeda_compiz_atomic_create_state(struct drm_private_obj *obj)
{
struct komeda_compiz_state *st;
st = kzalloc_obj(*st);
if (!st)
return ERR_PTR(-ENOMEM);
__drm_atomic_helper_private_obj_create_state(obj, &st->base.obj);
komeda_component_state_reset(&st->base);
st->base.component = to_component(obj);
return &st->base.obj;
}
static const struct drm_private_state_funcs komeda_compiz_obj_funcs = {
.atomic_create_state = komeda_compiz_atomic_create_state,
.atomic_duplicate_state = komeda_compiz_atomic_duplicate_state,
.atomic_destroy_state = komeda_compiz_atomic_destroy_state,
};
@@ -133,14 +170,7 @@ static const struct drm_private_state_funcs komeda_compiz_obj_funcs = {
static int komeda_compiz_obj_add(struct komeda_kms_dev *kms,
struct komeda_compiz *compiz)
{
struct komeda_compiz_state *st;
st = kzalloc_obj(*st);
if (!st)
return -ENOMEM;
st->base.component = &compiz->base;
drm_atomic_private_obj_init(&kms->base, &compiz->base.obj, &st->base.obj,
drm_atomic_private_obj_init(&kms->base, &compiz->base.obj, NULL,
&komeda_compiz_obj_funcs);
return 0;
@@ -168,7 +198,24 @@ komeda_splitter_atomic_destroy_state(struct drm_private_obj *obj,
kfree(to_splitter_st(priv_to_comp_st(state)));
}
static struct drm_private_state *
komeda_splitter_atomic_create_state(struct drm_private_obj *obj)
{
struct komeda_splitter_state *st;
st = kzalloc_obj(*st);
if (!st)
return ERR_PTR(-ENOMEM);
__drm_atomic_helper_private_obj_create_state(obj, &st->base.obj);
komeda_component_state_reset(&st->base);
st->base.component = to_component(obj);
return &st->base.obj;
}
static const struct drm_private_state_funcs komeda_splitter_obj_funcs = {
.atomic_create_state = komeda_splitter_atomic_create_state,
.atomic_duplicate_state = komeda_splitter_atomic_duplicate_state,
.atomic_destroy_state = komeda_splitter_atomic_destroy_state,
};
@@ -176,15 +223,8 @@ static const struct drm_private_state_funcs komeda_splitter_obj_funcs = {
static int komeda_splitter_obj_add(struct komeda_kms_dev *kms,
struct komeda_splitter *splitter)
{
struct komeda_splitter_state *st;
st = kzalloc_obj(*st);
if (!st)
return -ENOMEM;
st->base.component = &splitter->base;
drm_atomic_private_obj_init(&kms->base,
&splitter->base.obj, &st->base.obj,
&splitter->base.obj, NULL,
&komeda_splitter_obj_funcs);
return 0;
@@ -211,7 +251,24 @@ static void komeda_merger_atomic_destroy_state(struct drm_private_obj *obj,
kfree(to_merger_st(priv_to_comp_st(state)));
}
static struct drm_private_state *
komeda_merger_atomic_create_state(struct drm_private_obj *obj)
{
struct komeda_merger_state *st;
st = kzalloc_obj(*st);
if (!st)
return ERR_PTR(-ENOMEM);
__drm_atomic_helper_private_obj_create_state(obj, &st->base.obj);
komeda_component_state_reset(&st->base);
st->base.component = to_component(obj);
return &st->base.obj;
}
static const struct drm_private_state_funcs komeda_merger_obj_funcs = {
.atomic_create_state = komeda_merger_atomic_create_state,
.atomic_duplicate_state = komeda_merger_atomic_duplicate_state,
.atomic_destroy_state = komeda_merger_atomic_destroy_state,
};
@@ -219,15 +276,8 @@ static const struct drm_private_state_funcs komeda_merger_obj_funcs = {
static int komeda_merger_obj_add(struct komeda_kms_dev *kms,
struct komeda_merger *merger)
{
struct komeda_merger_state *st;
st = kzalloc_obj(*st);
if (!st)
return -ENOMEM;
st->base.component = &merger->base;
drm_atomic_private_obj_init(&kms->base,
&merger->base.obj, &st->base.obj,
&merger->base.obj, NULL,
&komeda_merger_obj_funcs);
return 0;
@@ -255,7 +305,24 @@ komeda_improc_atomic_destroy_state(struct drm_private_obj *obj,
kfree(to_improc_st(priv_to_comp_st(state)));
}
static struct drm_private_state *
komeda_improc_atomic_create_state(struct drm_private_obj *obj)
{
struct komeda_improc_state *st;
st = kzalloc_obj(*st);
if (!st)
return ERR_PTR(-ENOMEM);
__drm_atomic_helper_private_obj_create_state(obj, &st->base.obj);
komeda_component_state_reset(&st->base);
st->base.component = to_component(obj);
return &st->base.obj;
}
static const struct drm_private_state_funcs komeda_improc_obj_funcs = {
.atomic_create_state = komeda_improc_atomic_create_state,
.atomic_duplicate_state = komeda_improc_atomic_duplicate_state,
.atomic_destroy_state = komeda_improc_atomic_destroy_state,
};
@@ -263,14 +330,7 @@ static const struct drm_private_state_funcs komeda_improc_obj_funcs = {
static int komeda_improc_obj_add(struct komeda_kms_dev *kms,
struct komeda_improc *improc)
{
struct komeda_improc_state *st;
st = kzalloc_obj(*st);
if (!st)
return -ENOMEM;
st->base.component = &improc->base;
drm_atomic_private_obj_init(&kms->base, &improc->base.obj, &st->base.obj,
drm_atomic_private_obj_init(&kms->base, &improc->base.obj, NULL,
&komeda_improc_obj_funcs);
return 0;
@@ -298,7 +358,24 @@ komeda_timing_ctrlr_atomic_destroy_state(struct drm_private_obj *obj,
kfree(to_ctrlr_st(priv_to_comp_st(state)));
}
static struct drm_private_state *
komeda_timing_ctrlr_atomic_create_state(struct drm_private_obj *obj)
{
struct komeda_timing_ctrlr_state *st;
st = kzalloc_obj(*st);
if (!st)
return ERR_PTR(-ENOMEM);
__drm_atomic_helper_private_obj_create_state(obj, &st->base.obj);
komeda_component_state_reset(&st->base);
st->base.component = to_component(obj);
return &st->base.obj;
}
static const struct drm_private_state_funcs komeda_timing_ctrlr_obj_funcs = {
.atomic_create_state = komeda_timing_ctrlr_atomic_create_state,
.atomic_duplicate_state = komeda_timing_ctrlr_atomic_duplicate_state,
.atomic_destroy_state = komeda_timing_ctrlr_atomic_destroy_state,
};
@@ -306,14 +383,7 @@ static const struct drm_private_state_funcs komeda_timing_ctrlr_obj_funcs = {
static int komeda_timing_ctrlr_obj_add(struct komeda_kms_dev *kms,
struct komeda_timing_ctrlr *ctrlr)
{
struct komeda_compiz_state *st;
st = kzalloc_obj(*st);
if (!st)
return -ENOMEM;
st->base.component = &ctrlr->base;
drm_atomic_private_obj_init(&kms->base, &ctrlr->base.obj, &st->base.obj,
drm_atomic_private_obj_init(&kms->base, &ctrlr->base.obj, NULL,
&komeda_timing_ctrlr_obj_funcs);
return 0;
@@ -342,7 +412,24 @@ komeda_pipeline_atomic_destroy_state(struct drm_private_obj *obj,
kfree(priv_to_pipe_st(state));
}
static struct drm_private_state *
komeda_pipeline_atomic_create_state(struct drm_private_obj *obj)
{
struct komeda_pipeline_state *st;
st = kzalloc_obj(*st);
if (!st)
return ERR_PTR(-ENOMEM);
__drm_atomic_helper_private_obj_create_state(obj, &st->obj);
st->active_comps = 0;
st->pipe = container_of(obj, struct komeda_pipeline, obj);
return &st->obj;
}
static const struct drm_private_state_funcs komeda_pipeline_obj_funcs = {
.atomic_create_state = komeda_pipeline_atomic_create_state,
.atomic_duplicate_state = komeda_pipeline_atomic_duplicate_state,
.atomic_destroy_state = komeda_pipeline_atomic_destroy_state,
};
@@ -350,14 +437,7 @@ static const struct drm_private_state_funcs komeda_pipeline_obj_funcs = {
static int komeda_pipeline_obj_add(struct komeda_kms_dev *kms,
struct komeda_pipeline *pipe)
{
struct komeda_pipeline_state *st;
st = kzalloc_obj(*st);
if (!st)
return -ENOMEM;
st->pipe = pipe;
drm_atomic_private_obj_init(&kms->base, &pipe->obj, &st->obj,
drm_atomic_private_obj_init(&kms->base, &pipe->obj, NULL,
&komeda_pipeline_obj_funcs);
return 0;

View File

@@ -566,6 +566,83 @@ static const struct atmel_hlcdc_dc_desc atmel_xlcdc_dc_sam9x75 = {
.ops = &atmel_xlcdc_ops,
};
static const struct atmel_hlcdc_layer_desc atmel_xlcdc_sama7d65_layers[] = {
{
.name = "base",
.formats = &atmel_hlcdc_plane_rgb_formats,
.regs_offset = 0x60,
.id = 0,
.type = ATMEL_HLCDC_BASE_LAYER,
.cfgs_offset = 0x1c,
.layout = {
.xstride = { 2 },
.default_color = 3,
.general_config = 4,
.disc_pos = 5,
.disc_size = 6,
},
.clut_offset = 0x700,
},
{
.name = "overlay1",
.formats = &atmel_hlcdc_plane_rgb_formats,
.regs_offset = 0x160,
.id = 1,
.type = ATMEL_HLCDC_OVERLAY_LAYER,
.cfgs_offset = 0x1c,
.layout = {
.pos = 2,
.size = 3,
.xstride = { 4 },
.pstride = { 5 },
.default_color = 6,
.chroma_key = 7,
.chroma_key_mask = 8,
.general_config = 9,
},
.clut_offset = 0xb00,
},
{
.name = "high-end-overlay",
.formats = &atmel_hlcdc_plane_rgb_and_yuv_formats,
.regs_offset = 0x360,
.id = 2,
.type = ATMEL_HLCDC_OVERLAY_LAYER,
.cfgs_offset = 0x30,
.layout = {
.pos = 2,
.size = 3,
.memsize = 4,
.xstride = { 5, 7 },
.pstride = { 6, 8 },
.default_color = 9,
.chroma_key = 10,
.chroma_key_mask = 11,
.general_config = 12,
.csc = 16,
.scaler_config = 23,
.vxs_config = 30,
.hxs_config = 31,
},
.clut_offset = 0x1300,
},
};
static const struct atmel_hlcdc_dc_desc atmel_xlcdc_dc_sama7d65 = {
.min_width = 0,
.min_height = 0,
.max_width = 2048,
.max_height = 2048,
.max_spw = 0x3ff,
.max_vpw = 0x3ff,
.max_hpw = 0x3ff,
.fixed_clksrc = true,
.is_xlcdc = true,
.nlayers = ARRAY_SIZE(atmel_xlcdc_sama7d65_layers),
.layers = atmel_xlcdc_sama7d65_layers,
.ops = &atmel_xlcdc_ops,
};
static const struct of_device_id atmel_hlcdc_of_match[] = {
{
.compatible = "atmel,at91sam9n12-hlcdc",
@@ -595,6 +672,10 @@ static const struct of_device_id atmel_hlcdc_of_match[] = {
.compatible = "microchip,sam9x75-xlcdc",
.data = &atmel_xlcdc_dc_sam9x75,
},
{
.compatible = "microchip,sama7d65-xlcdc",
.data = &atmel_xlcdc_dc_sama7d65,
},
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, atmel_hlcdc_of_match);

View File

@@ -342,6 +342,16 @@ config DRM_THINE_THC63LVD1024
help
Thine THC63LVD1024 LVDS/parallel converter driver.
config DRM_THEAD_TH1520_DW_HDMI
tristate "T-Head TH1520 DesignWare HDMI bridge"
depends on OF
depends on COMMON_CLK
depends on ARCH_THEAD || COMPILE_TEST
select DRM_DW_HDMI
help
Choose this to enable support for the internal HDMI bridge found
on the T-Head TH1520 SoC.
config DRM_TOSHIBA_TC358762
tristate "TC358762 DSI/DPI bridge"
depends on OF

View File

@@ -29,6 +29,7 @@ obj-$(CONFIG_DRM_SII902X) += sii902x.o
obj-$(CONFIG_DRM_SII9234) += sii9234.o
obj-$(CONFIG_DRM_SIMPLE_BRIDGE) += simple-bridge.o
obj-$(CONFIG_DRM_SOLOMON_SSD2825) += ssd2825.o
obj-$(CONFIG_DRM_THEAD_TH1520_DW_HDMI) += th1520-dw-hdmi.o
obj-$(CONFIG_DRM_THINE_THC63LVD1024) += thc63lvd1024.o
obj-$(CONFIG_DRM_TOSHIBA_TC358762) += tc358762.o
obj-$(CONFIG_DRM_TOSHIBA_TC358764) += tc358764.o

View File

@@ -34,6 +34,7 @@ config DRM_ANALOGIX_ANX7625
tristate "Analogix Anx7625 MIPI to DP interface support"
depends on DRM
depends on OF
depends on TYPEC || !TYPEC
select DRM_DISPLAY_DP_HELPER
select DRM_DISPLAY_HDCP_HELPER
select DRM_DISPLAY_HELPER

View File

@@ -1177,12 +1177,88 @@ static int analogix_dp_set_bridge(struct analogix_dp_device *dp)
return ret;
}
static void analogix_dp_bridge_mode_set(struct drm_bridge *bridge,
const struct drm_display_mode *mode)
{
struct analogix_dp_device *dp = to_dp(bridge);
struct drm_display_info *display_info = &dp->connector.display_info;
struct video_info *video = &dp->video_info;
struct device_node *dp_node = dp->dev->of_node;
int vic;
/* Input video interlaces & hsync pol & vsync pol */
video->interlaced = !!(mode->flags & DRM_MODE_FLAG_INTERLACE);
video->v_sync_polarity = !!(mode->flags & DRM_MODE_FLAG_NVSYNC);
video->h_sync_polarity = !!(mode->flags & DRM_MODE_FLAG_NHSYNC);
/* Input video dynamic_range & colorimetry */
vic = drm_match_cea_mode(mode);
if ((vic == 6) || (vic == 7) || (vic == 21) || (vic == 22) ||
(vic == 2) || (vic == 3) || (vic == 17) || (vic == 18)) {
video->dynamic_range = CEA;
video->ycbcr_coeff = COLOR_YCBCR601;
} else if (vic) {
video->dynamic_range = CEA;
video->ycbcr_coeff = COLOR_YCBCR709;
} else {
video->dynamic_range = VESA;
video->ycbcr_coeff = COLOR_YCBCR709;
}
/* Input vide bpc and color_formats */
switch (display_info->bpc) {
case 12:
video->color_depth = COLOR_12;
break;
case 10:
video->color_depth = COLOR_10;
break;
case 8:
video->color_depth = COLOR_8;
break;
case 6:
video->color_depth = COLOR_6;
break;
default:
video->color_depth = COLOR_8;
break;
}
if (display_info->color_formats & DRM_COLOR_FORMAT_YCBCR444)
video->color_space = COLOR_YCBCR444;
else if (display_info->color_formats & DRM_COLOR_FORMAT_YCBCR422)
video->color_space = COLOR_YCBCR422;
else
video->color_space = COLOR_RGB;
/*
* NOTE: those property parsing code is used for providing backward
* compatibility for samsung platform.
* Due to we used the "of_property_read_u32" interfaces, when this
* property isn't present, the "video_info" can keep the original
* values and wouldn't be modified.
*/
of_property_read_u32(dp_node, "samsung,color-space",
&video->color_space);
of_property_read_u32(dp_node, "samsung,dynamic-range",
&video->dynamic_range);
of_property_read_u32(dp_node, "samsung,ycbcr-coeff",
&video->ycbcr_coeff);
of_property_read_u32(dp_node, "samsung,color-depth",
&video->color_depth);
if (of_property_read_bool(dp_node, "hsync-active-high"))
video->h_sync_polarity = true;
if (of_property_read_bool(dp_node, "vsync-active-high"))
video->v_sync_polarity = true;
if (of_property_read_bool(dp_node, "interlaced"))
video->interlaced = true;
}
static void analogix_dp_bridge_atomic_enable(struct drm_bridge *bridge,
struct drm_atomic_state *old_state)
{
struct analogix_dp_device *dp = to_dp(bridge);
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state;
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
int timeout_loop = 0;
int ret;
@@ -1190,6 +1266,11 @@ static void analogix_dp_bridge_atomic_enable(struct drm_bridge *bridge,
if (!crtc)
return;
new_crtc_state = drm_atomic_get_new_crtc_state(old_state, crtc);
if (!new_crtc_state)
return;
analogix_dp_bridge_mode_set(bridge, &new_crtc_state->adjusted_mode);
old_crtc_state = drm_atomic_get_old_crtc_state(old_state, crtc);
/* Not a full enable, just disable PSR and continue */
if (old_crtc_state && old_crtc_state->self_refresh_active) {
@@ -1296,83 +1377,6 @@ static void analogix_dp_bridge_atomic_post_disable(struct drm_bridge *bridge,
DRM_ERROR("Failed to enable psr (%d)\n", ret);
}
static void analogix_dp_bridge_mode_set(struct drm_bridge *bridge,
const struct drm_display_mode *orig_mode,
const struct drm_display_mode *mode)
{
struct analogix_dp_device *dp = to_dp(bridge);
struct drm_display_info *display_info = &dp->connector.display_info;
struct video_info *video = &dp->video_info;
struct device_node *dp_node = dp->dev->of_node;
int vic;
/* Input video interlaces & hsync pol & vsync pol */
video->interlaced = !!(mode->flags & DRM_MODE_FLAG_INTERLACE);
video->v_sync_polarity = !!(mode->flags & DRM_MODE_FLAG_NVSYNC);
video->h_sync_polarity = !!(mode->flags & DRM_MODE_FLAG_NHSYNC);
/* Input video dynamic_range & colorimetry */
vic = drm_match_cea_mode(mode);
if ((vic == 6) || (vic == 7) || (vic == 21) || (vic == 22) ||
(vic == 2) || (vic == 3) || (vic == 17) || (vic == 18)) {
video->dynamic_range = CEA;
video->ycbcr_coeff = COLOR_YCBCR601;
} else if (vic) {
video->dynamic_range = CEA;
video->ycbcr_coeff = COLOR_YCBCR709;
} else {
video->dynamic_range = VESA;
video->ycbcr_coeff = COLOR_YCBCR709;
}
/* Input vide bpc and color_formats */
switch (display_info->bpc) {
case 12:
video->color_depth = COLOR_12;
break;
case 10:
video->color_depth = COLOR_10;
break;
case 8:
video->color_depth = COLOR_8;
break;
case 6:
video->color_depth = COLOR_6;
break;
default:
video->color_depth = COLOR_8;
break;
}
if (display_info->color_formats & DRM_COLOR_FORMAT_YCBCR444)
video->color_space = COLOR_YCBCR444;
else if (display_info->color_formats & DRM_COLOR_FORMAT_YCBCR422)
video->color_space = COLOR_YCBCR422;
else
video->color_space = COLOR_RGB;
/*
* NOTE: those property parsing code is used for providing backward
* compatibility for samsung platform.
* Due to we used the "of_property_read_u32" interfaces, when this
* property isn't present, the "video_info" can keep the original
* values and wouldn't be modified.
*/
of_property_read_u32(dp_node, "samsung,color-space",
&video->color_space);
of_property_read_u32(dp_node, "samsung,dynamic-range",
&video->dynamic_range);
of_property_read_u32(dp_node, "samsung,ycbcr-coeff",
&video->ycbcr_coeff);
of_property_read_u32(dp_node, "samsung,color-depth",
&video->color_depth);
if (of_property_read_bool(dp_node, "hsync-active-high"))
video->h_sync_polarity = true;
if (of_property_read_bool(dp_node, "vsync-active-high"))
video->v_sync_polarity = true;
if (of_property_read_bool(dp_node, "interlaced"))
video->interlaced = true;
}
static const struct drm_bridge_funcs analogix_dp_bridge_funcs = {
.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
@@ -1381,7 +1385,6 @@ static const struct drm_bridge_funcs analogix_dp_bridge_funcs = {
.atomic_enable = analogix_dp_bridge_atomic_enable,
.atomic_disable = analogix_dp_bridge_atomic_disable,
.atomic_post_disable = analogix_dp_bridge_atomic_post_disable,
.mode_set = analogix_dp_bridge_mode_set,
.attach = analogix_dp_bridge_attach,
};

View File

@@ -156,7 +156,7 @@ struct analogix_dp_device {
struct drm_device *drm_dev;
struct drm_connector connector;
struct drm_bridge bridge;
struct drm_dp_aux aux;
struct drm_dp_aux aux;
struct clk *clock;
unsigned int irq;
void __iomem *reg_base;
@@ -166,7 +166,7 @@ struct analogix_dp_device {
struct phy *phy;
int dpms_mode;
struct gpio_desc *hpd_gpiod;
bool force_hpd;
bool force_hpd;
bool fast_train_enable;
bool psr_supported;

View File

@@ -3,6 +3,7 @@
* Copyright(c) 2020, Analogix Semiconductor. All rights reserved.
*
*/
#include <linux/cleanup.h>
#include <linux/gcd.h>
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
@@ -15,6 +16,9 @@
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/usb.h>
#include <linux/usb/pd.h>
#include <linux/usb/role.h>
#include <linux/workqueue.h>
#include <linux/of_graph.h>
@@ -1325,7 +1329,7 @@ static int anx7625_read_hpd_gpio_config_status(struct anx7625_data *ctx)
static void anx7625_disable_pd_protocol(struct anx7625_data *ctx)
{
struct device *dev = ctx->dev;
int ret, val;
int ret;
/* Reset main ocm */
ret = anx7625_reg_write(ctx, ctx->i2c.rx_p0_client, 0x88, 0x40);
@@ -1339,6 +1343,11 @@ static void anx7625_disable_pd_protocol(struct anx7625_data *ctx)
DRM_DEV_DEBUG_DRIVER(dev, "disable PD feature fail.\n");
else
DRM_DEV_DEBUG_DRIVER(dev, "disable PD feature succeeded.\n");
}
static void anx7625_configure_hpd(struct anx7625_data *ctx)
{
int val;
/*
* Make sure the HPD GPIO already be configured after OCM release before
@@ -1369,7 +1378,9 @@ static int anx7625_ocm_loading_check(struct anx7625_data *ctx)
if ((ret & FLASH_LOAD_STA_CHK) != FLASH_LOAD_STA_CHK)
return -ENODEV;
anx7625_disable_pd_protocol(ctx);
if (!ctx->typec_port)
anx7625_disable_pd_protocol(ctx);
anx7625_configure_hpd(ctx);
DRM_DEV_DEBUG_DRIVER(dev, "Firmware ver %02x%02x,",
anx7625_reg_read(ctx,
@@ -1472,6 +1483,175 @@ static void anx7625_start_dp_work(struct anx7625_data *ctx)
DRM_DEV_DEBUG_DRIVER(dev, "Secure OCM version=%02x\n", ret);
}
#if IS_REACHABLE(CONFIG_TYPEC)
static u8 anx7625_checksum(u8 *buf, u8 len)
{
u8 ret = 0;
u8 i;
for (i = 0; i < len; i++)
ret += buf[i];
return ret;
}
static int anx7625_read_msg_ctrl_status(struct anx7625_data *ctx)
{
return anx7625_reg_read(ctx, ctx->i2c.rx_p0_client, CMD_SEND_BUF);
}
static int anx7625_wait_msg_empty(struct anx7625_data *ctx)
{
int val;
return readx_poll_timeout(anx7625_read_msg_ctrl_status, ctx,
val, (val < 0) || (val == 0),
2000, 2000 * 150);
}
static int anx7625_send_msg(struct anx7625_data *ctx, u8 type, u8 *buf, u8 size)
{
struct fw_msg *msg = &ctx->send_msg;
u8 crc;
int ret;
size = min_t(u8, size, (u8)MAX_BUF_LEN);
memcpy(msg->buf, buf, size);
msg->msg_type = type;
/* msg len equals buffer length + msg_type */
msg->msg_len = size + 1;
crc = anx7625_checksum((u8 *)msg, size + HEADER_LEN);
msg->buf[size] = 0 - crc;
ret = anx7625_wait_msg_empty(ctx);
if (ret)
return ret;
ret = anx7625_reg_block_write(ctx, ctx->i2c.rx_p0_client,
CMD_SEND_BUF + 1, size + HEADER_LEN,
&msg->msg_type);
ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p0_client, CMD_SEND_BUF,
msg->msg_len);
return ret;
}
static int anx7625_typec_dr_set(struct typec_port *port, enum typec_data_role role)
{
struct anx7625_data *ctx = typec_get_drvdata(port);
if (role == ctx->typec_data_role)
return 0;
return anx7625_send_msg(ctx, 0x11, NULL, 0);
}
static const struct typec_operations anx7625_typec_ops = {
.dr_set = anx7625_typec_dr_set,
};
static void anx7625_typec_set_orientation(struct anx7625_data *ctx)
{
u32 val = anx7625_reg_read(ctx, ctx->i2c.rx_p0_client, SYSTEM_STSTUS);
if (val & (CC1_RP | CC1_RD))
typec_set_orientation(ctx->typec_port, TYPEC_ORIENTATION_NORMAL);
else if (val & (CC2_RP | CC2_RD))
typec_set_orientation(ctx->typec_port, TYPEC_ORIENTATION_REVERSE);
else
typec_set_orientation(ctx->typec_port, TYPEC_ORIENTATION_NONE);
}
static void anx7625_typec_set_status(struct anx7625_data *ctx,
unsigned int intr_status,
unsigned int intr_vector)
{
if (intr_vector & CC_STATUS)
anx7625_typec_set_orientation(ctx);
if (intr_vector & DATA_ROLE_STATUS) {
enum typec_data_role data_role = (intr_status & DATA_ROLE_STATUS) ?
TYPEC_HOST : TYPEC_DEVICE;
usb_role_switch_set_role(ctx->role_sw,
(intr_status & DATA_ROLE_STATUS) ?
USB_ROLE_HOST : USB_ROLE_DEVICE);
typec_set_data_role(ctx->typec_port, data_role);
ctx->typec_data_role = data_role;
}
if (intr_vector & VBUS_STATUS)
typec_set_pwr_role(ctx->typec_port,
(intr_status & VBUS_STATUS) ?
TYPEC_SOURCE : TYPEC_SINK);
if (intr_vector & VCONN_STATUS)
typec_set_vconn_role(ctx->typec_port,
(intr_status & VCONN_STATUS) ?
TYPEC_SOURCE : TYPEC_SINK);
}
static int anx7625_typec_register(struct anx7625_data *ctx)
{
struct typec_capability typec_cap = { };
struct fwnode_handle *fwnode __free(fwnode_handle) =
device_get_named_child_node(ctx->dev, "connector");
u32 val;
int ret;
if (!fwnode)
return 0;
ret = typec_get_fw_cap(&typec_cap, fwnode);
if (ret < 0)
return ret;
typec_cap.revision = 0x0120;
typec_cap.pd_revision = 0x0300;
typec_cap.usb_capability = USB_CAPABILITY_USB2 | USB_CAPABILITY_USB3;
typec_cap.orientation_aware = true;
typec_cap.driver_data = ctx;
typec_cap.ops = &anx7625_typec_ops;
ctx->typec_port = typec_register_port(ctx->dev, &typec_cap);
if (IS_ERR(ctx->typec_port))
return PTR_ERR(ctx->typec_port);
ctx->role_sw = fwnode_usb_role_switch_get(fwnode);
if (IS_ERR(ctx->role_sw)) {
typec_unregister_port(ctx->typec_port);
return PTR_ERR(ctx->role_sw);
}
val = anx7625_reg_read(ctx, ctx->i2c.rx_p0_client, SYSTEM_STSTUS);
anx7625_typec_set_status(ctx, val,
CC_STATUS | DATA_ROLE_STATUS |
VBUS_STATUS | VCONN_STATUS);
return 0;
}
static void anx7625_typec_unregister(struct anx7625_data *ctx)
{
usb_role_switch_put(ctx->role_sw);
typec_unregister_port(ctx->typec_port);
}
#else
static void anx7625_typec_set_status(struct anx7625_data *ctx,
unsigned int intr_status,
unsigned int intr_vector)
{
}
static int anx7625_typec_register(struct anx7625_data *ctx)
{
return 0;
}
static void anx7625_typec_unregister(struct anx7625_data *ctx)
{
}
#endif
static int anx7625_read_hpd_status_p0(struct anx7625_data *ctx)
{
return anx7625_reg_read(ctx, ctx->i2c.rx_p0_client, SYSTEM_STSTUS);
@@ -1566,7 +1746,7 @@ static void dp_hpd_change_handler(struct anx7625_data *ctx, bool on)
}
}
static int anx7625_hpd_change_detect(struct anx7625_data *ctx)
static int anx7625_intr_status(struct anx7625_data *ctx)
{
int intr_vector, status;
struct device *dev = ctx->dev;
@@ -1593,9 +1773,6 @@ static int anx7625_hpd_change_detect(struct anx7625_data *ctx)
return status;
}
if (!(intr_vector & HPD_STATUS_CHANGE))
return -ENOENT;
status = anx7625_reg_read(ctx, ctx->i2c.rx_p0_client,
SYSTEM_STSTUS);
if (status < 0) {
@@ -1604,6 +1781,12 @@ static int anx7625_hpd_change_detect(struct anx7625_data *ctx)
}
DRM_DEV_DEBUG_DRIVER(dev, "0x7e:0x45=%x\n", status);
anx7625_typec_set_status(ctx, status, intr_vector);
if (!(intr_vector & HPD_STATUS))
return -ENOENT;
dp_hpd_change_handler(ctx, status & HPD_STATUS);
return 0;
@@ -1622,7 +1805,7 @@ static void anx7625_work_func(struct work_struct *work)
return;
}
event = anx7625_hpd_change_detect(ctx);
event = anx7625_intr_status(ctx);
mutex_unlock(&ctx->lock);
@@ -2741,11 +2924,29 @@ static int anx7625_i2c_probe(struct i2c_client *client)
}
if (!platform->pdata.low_power_mode) {
anx7625_disable_pd_protocol(platform);
struct fwnode_handle *fwnode;
fwnode = device_get_named_child_node(dev, "connector");
if (fwnode)
fwnode_handle_put(fwnode);
else
anx7625_disable_pd_protocol(platform);
anx7625_configure_hpd(platform);
pm_runtime_get_sync(dev);
_anx7625_hpd_polling(platform, 5000 * 100);
}
if (platform->pdata.intp_irq)
anx7625_reg_write(platform, platform->i2c.rx_p0_client,
INTERFACE_CHANGE_INT_MASK, 0);
/* After getting runtime handle */
ret = anx7625_typec_register(platform);
if (ret)
goto pm_suspend;
/* Add work function */
if (platform->pdata.intp_irq) {
enable_irq(platform->pdata.intp_irq);
@@ -2759,6 +2960,10 @@ static int anx7625_i2c_probe(struct i2c_client *client)
return 0;
pm_suspend:
if (!platform->pdata.low_power_mode)
pm_runtime_put_sync_suspend(&client->dev);
free_wq:
if (platform->workqueue)
destroy_workqueue(platform->workqueue);
@@ -2774,6 +2979,8 @@ static void anx7625_i2c_remove(struct i2c_client *client)
{
struct anx7625_data *platform = i2c_get_clientdata(client);
anx7625_typec_unregister(platform);
drm_bridge_remove(&platform->bridge);
if (platform->pdata.intp_irq)

View File

@@ -51,9 +51,24 @@
#define INTR_RECEIVED_MSG BIT(5)
#define SYSTEM_STSTUS 0x45
#define INTERFACE_CHANGE_INT_MASK 0x43
#define INTERFACE_CHANGE_INT 0x44
#define HPD_STATUS_CHANGE 0x80
#define HPD_STATUS 0x80
#define VCONN_STATUS BIT(2)
#define VBUS_STATUS BIT(3)
#define CC_STATUS BIT(4)
#define DATA_ROLE_STATUS BIT(5)
#define HPD_STATUS BIT(7)
#define NEW_CC_STATUS 0x46
#define CC1_RD BIT(0)
#define CC1_RA BIT(1)
#define CC1_RP (BIT(2) | BIT(3))
#define CC2_RD BIT(4)
#define CC2_RA BIT(5)
#define CC2_RP (BIT(6) | BIT(7))
#define CMD_SEND_BUF 0xC0
#define CMD_RECV_BUF 0xE0
/******** END of I2C Address 0x58 ********/
@@ -447,9 +462,23 @@ struct anx7625_i2c_client {
struct i2c_client *tcpc_client;
};
struct typec_port;
struct usb_role_switch;
#define MAX_BUF_LEN 30
struct fw_msg {
u8 msg_len;
u8 msg_type;
u8 buf[MAX_BUF_LEN];
} __packed;
#define HEADER_LEN 2
struct anx7625_data {
struct anx7625_platform_data pdata;
struct platform_device *audio_pdev;
struct typec_port *typec_port;
struct usb_role_switch *role_sw;
int typec_data_role;
int hpd_status;
int hpd_high_cnt;
int dp_en;
@@ -479,6 +508,7 @@ struct anx7625_data {
struct drm_connector *connector;
struct mipi_dsi_device *dsi;
struct drm_dp_aux aux;
struct fw_msg send_msg;
};
#endif /* __ANX7625_H__ */

View File

@@ -92,6 +92,7 @@ struct fsl_ldb {
const struct fsl_ldb_devdata *devdata;
bool ch0_enabled;
bool ch1_enabled;
bool use_termination_resistor;
};
static bool fsl_ldb_is_dual(const struct fsl_ldb *fsl_ldb)
@@ -212,6 +213,9 @@ static void fsl_ldb_atomic_enable(struct drm_bridge *bridge,
/* Program LVDS_CTRL */
reg = LVDS_CTRL_CC_ADJ(2) | LVDS_CTRL_PRE_EMPH_EN |
LVDS_CTRL_PRE_EMPH_ADJ(3) | LVDS_CTRL_VBG_EN;
if (fsl_ldb->use_termination_resistor)
reg |= LVDS_CTRL_HS_EN;
regmap_write(fsl_ldb->regmap, fsl_ldb->devdata->lvds_ctrl, reg);
/* Wait for VBG to stabilize. */
@@ -340,6 +344,9 @@ static int fsl_ldb_probe(struct platform_device *pdev)
if (IS_ERR(panel))
return PTR_ERR(panel);
if (of_property_present(dev->of_node, "nxp,enable-termination-resistor"))
fsl_ldb->use_termination_resistor = true;
fsl_ldb->panel_bridge = devm_drm_panel_bridge_add(dev, panel);
if (IS_ERR(fsl_ldb->panel_bridge))
return PTR_ERR(fsl_ldb->panel_bridge);

View File

@@ -23,7 +23,6 @@
struct imx8qxp_pixel_link {
struct drm_bridge bridge;
struct drm_bridge *next_bridge;
struct device *dev;
struct imx_sc_ipc *ipc_handle;
u8 stream_id;
@@ -140,7 +139,7 @@ static int imx8qxp_pixel_link_bridge_attach(struct drm_bridge *bridge,
}
return drm_bridge_attach(encoder,
pl->next_bridge, bridge,
pl->bridge.next_bridge, bridge,
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
}
@@ -256,17 +255,13 @@ static int imx8qxp_pixel_link_disable_all_controls(struct imx8qxp_pixel_link *pl
return imx8qxp_pixel_link_disable_sync(pl);
}
static struct drm_bridge *
imx8qxp_pixel_link_find_next_bridge(struct imx8qxp_pixel_link *pl)
static int imx8qxp_pixel_link_find_next_bridge(struct imx8qxp_pixel_link *pl)
{
struct device_node *np = pl->dev->of_node;
struct device_node *port, *remote;
struct drm_bridge *next_bridge[PL_MAX_NEXT_BRIDGES];
struct device_node *port;
u32 port_id;
bool found_port = false;
int reg, ep_cnt = 0;
/* select the first next bridge by default */
int bridge_sel = 0;
int reg;
for (port_id = 1; port_id <= PL_MAX_MST_ADDR + 1; port_id++) {
port = of_graph_get_port_by_id(np, port_id);
@@ -284,11 +279,12 @@ imx8qxp_pixel_link_find_next_bridge(struct imx8qxp_pixel_link *pl)
if (!found_port) {
DRM_DEV_ERROR(pl->dev, "no available output port\n");
return ERR_PTR(-ENODEV);
return -ENODEV;
}
for (reg = 0; reg < PL_MAX_NEXT_BRIDGES; reg++) {
remote = of_graph_get_remote_node(np, port_id, reg);
struct device_node *remote __free(device_node) =
of_graph_get_remote_node(np, port_id, reg);
if (!remote)
continue;
@@ -296,28 +292,26 @@ imx8qxp_pixel_link_find_next_bridge(struct imx8qxp_pixel_link *pl)
DRM_DEV_DEBUG(pl->dev,
"port%u endpoint%u remote parent is not available\n",
port_id, reg);
of_node_put(remote);
continue;
}
next_bridge[ep_cnt] = of_drm_find_bridge(remote);
if (!next_bridge[ep_cnt]) {
of_node_put(remote);
return ERR_PTR(-EPROBE_DEFER);
if (!pl->bridge.next_bridge) {
/* Select the first bridge by default... */
pl->bridge.next_bridge = of_drm_find_and_get_bridge(remote);
if (!pl->bridge.next_bridge)
return -EPROBE_DEFER;
} else if (of_property_present(remote, "fsl,companion-pxl2dpi")) {
/* ... but prefer the companion PXL2DPI if present */
drm_bridge_put(pl->bridge.next_bridge);
pl->bridge.next_bridge = of_drm_find_and_get_bridge(remote);
if (!pl->bridge.next_bridge)
return -EPROBE_DEFER;
}
/* specially select the next bridge with companion PXL2DPI */
if (of_property_present(remote, "fsl,companion-pxl2dpi"))
bridge_sel = ep_cnt;
ep_cnt++;
of_node_put(remote);
}
pl->mst_addr = port_id - 1;
return next_bridge[bridge_sel];
return 0;
}
static int imx8qxp_pixel_link_bridge_probe(struct platform_device *pdev)
@@ -373,9 +367,9 @@ static int imx8qxp_pixel_link_bridge_probe(struct platform_device *pdev)
if (ret)
return ret;
pl->next_bridge = imx8qxp_pixel_link_find_next_bridge(pl);
if (IS_ERR(pl->next_bridge))
return PTR_ERR(pl->next_bridge);
ret = imx8qxp_pixel_link_find_next_bridge(pl);
if (ret)
return ret;
platform_set_drvdata(pdev, pl);

View File

@@ -116,12 +116,25 @@ static int lt9611_mipi_input_digital(struct lt9611 *lt9611,
{ 0x830a, 0x00 },
{ 0x824f, 0x80 },
{ 0x8250, 0x10 },
{ 0x8303, 0x00 },
{ 0x8302, 0x0a },
{ 0x8306, 0x0a },
};
if (lt9611->dsi1_node)
reg_cfg[1].def = 0x03;
if (lt9611->dsi1_node) {
if (lt9611->dsi0_node) {
/* Dual port (Port A + B) */
reg_cfg[1].def = 0x03;
} else {
/*
* Single port B:
* - 0x8303 bit 6: port swap (1=PortB as primary)
* - 0x8250 bit 3:2: byte_clk source (01=PortB)
*/
reg_cfg[3].def = 0x14;
reg_cfg[4].def = 0x40;
}
}
return regmap_multi_reg_write(lt9611->regmap, reg_cfg, ARRAY_SIZE(reg_cfg));
}
@@ -202,7 +215,9 @@ static void lt9611_pcr_setup(struct lt9611 *lt9611, const struct drm_display_mod
regmap_write(lt9611->regmap, 0x831d, pol);
regmap_multi_reg_write(lt9611->regmap, reg_cfg, ARRAY_SIZE(reg_cfg));
if (lt9611->dsi1_node) {
/* dual port: configure hact for combining two DSI inputs */
if (lt9611->dsi0_node && lt9611->dsi1_node) {
unsigned int hact = mode->hdisplay;
hact >>= 2;
@@ -759,7 +774,8 @@ static enum drm_mode_status lt9611_bridge_mode_valid(struct drm_bridge *bridge,
if (mode->hdisplay > 3840)
return MODE_BAD_HVALUE;
if (mode->hdisplay > 2000 && !lt9611->dsi1_node)
/* high resolution requires dual port (Port A + B) */
if (mode->hdisplay > 2000 && !(lt9611->dsi0_node && lt9611->dsi1_node))
return MODE_PANEL;
return MODE_OK;
@@ -1033,13 +1049,13 @@ static int lt9611_parse_dt(struct device *dev,
struct lt9611 *lt9611)
{
lt9611->dsi0_node = of_graph_get_remote_node(dev->of_node, 0, -1);
if (!lt9611->dsi0_node) {
dev_err(lt9611->dev, "failed to get remote node for primary dsi\n");
lt9611->dsi1_node = of_graph_get_remote_node(dev->of_node, 1, -1);
if (!lt9611->dsi0_node && !lt9611->dsi1_node) {
dev_err(lt9611->dev, "failed to get remote node for dsi\n");
return -ENODEV;
}
lt9611->dsi1_node = of_graph_get_remote_node(dev->of_node, 1, -1);
lt9611->ac_mode = of_property_read_bool(dev->of_node, "lt,ac-mode");
return drm_of_find_panel_or_bridge(dev->of_node, 2, -1, NULL, &lt9611->next_bridge);
@@ -1161,14 +1177,16 @@ static int lt9611_probe(struct i2c_client *client)
drm_bridge_add(&lt9611->bridge);
/* Attach primary DSI */
lt9611->dsi0 = lt9611_attach_dsi(lt9611, lt9611->dsi0_node);
if (IS_ERR(lt9611->dsi0)) {
ret = PTR_ERR(lt9611->dsi0);
goto err_remove_bridge;
/* Attach primary DSI (directly drives or Port A in dual-port mode) */
if (lt9611->dsi0_node) {
lt9611->dsi0 = lt9611_attach_dsi(lt9611, lt9611->dsi0_node);
if (IS_ERR(lt9611->dsi0)) {
ret = PTR_ERR(lt9611->dsi0);
goto err_remove_bridge;
}
}
/* Attach secondary DSI, if specified */
/* Attach secondary DSI (Port B in single or dual-port mode) */
if (lt9611->dsi1_node) {
lt9611->dsi1 = lt9611_attach_dsi(lt9611, lt9611->dsi1_node);
if (IS_ERR(lt9611->dsi1)) {

View File

@@ -352,12 +352,6 @@ enum {
DW_DP_YCBCR420_16BIT,
};
enum {
DW_DP_MP_SINGLE_PIXEL,
DW_DP_MP_DUAL_PIXEL,
DW_DP_MP_QUAD_PIXEL,
};
enum {
DW_DP_SDP_VERTICAL_INTERVAL = BIT(0),
DW_DP_SDP_HORIZONTAL_INTERVAL = BIT(1),
@@ -1984,7 +1978,7 @@ struct dw_dp *dw_dp_bind(struct device *dev, struct drm_encoder *encoder,
return ERR_CAST(dp);
dp->dev = dev;
dp->pixel_mode = DW_DP_MP_QUAD_PIXEL;
dp->pixel_mode = plat_data->pixel_mode;
dp->plat_data.max_link_rate = plat_data->max_link_rate;
bridge = &dp->bridge;
@@ -2020,13 +2014,13 @@ struct dw_dp *dw_dp_bind(struct device *dev, struct drm_encoder *encoder,
return ERR_CAST(dp->aux_clk);
}
dp->i2s_clk = devm_clk_get(dev, "i2s");
dp->i2s_clk = devm_clk_get_optional(dev, "i2s");
if (IS_ERR(dp->i2s_clk)) {
dev_err_probe(dev, PTR_ERR(dp->i2s_clk), "failed to get i2s clock\n");
return ERR_CAST(dp->i2s_clk);
}
dp->spdif_clk = devm_clk_get(dev, "spdif");
dp->spdif_clk = devm_clk_get_optional(dev, "spdif");
if (IS_ERR(dp->spdif_clk)) {
dev_err_probe(dev, PTR_ERR(dp->spdif_clk), "failed to get spdif clock\n");
return ERR_CAST(dp->spdif_clk);

View File

@@ -11,6 +11,7 @@
#include <linux/export.h>
#include <linux/i2c.h>
#include <linux/irq.h>
#include <linux/minmax.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
@@ -748,120 +749,6 @@ static struct i2c_adapter *dw_hdmi_qp_i2c_adapter(struct dw_hdmi_qp *hdmi)
return adap;
}
static int dw_hdmi_qp_config_avi_infoframe(struct dw_hdmi_qp *hdmi,
const u8 *buffer, size_t len)
{
u32 val, i, j;
if (len != HDMI_INFOFRAME_SIZE(AVI)) {
dev_err(hdmi->dev, "failed to configure avi infoframe\n");
return -EINVAL;
}
/*
* DW HDMI QP IP uses a different byte format from standard AVI info
* frames, though generally the bits are in the correct bytes.
*/
val = buffer[1] << 8 | buffer[2] << 16;
dw_hdmi_qp_write(hdmi, val, PKT_AVI_CONTENTS0);
for (i = 0; i < 4; i++) {
for (j = 0; j < 4; j++) {
if (i * 4 + j >= 14)
break;
if (!j)
val = buffer[i * 4 + j + 3];
val |= buffer[i * 4 + j + 3] << (8 * j);
}
dw_hdmi_qp_write(hdmi, val, PKT_AVI_CONTENTS1 + i * 4);
}
dw_hdmi_qp_mod(hdmi, 0, PKTSCHED_AVI_FIELDRATE, PKTSCHED_PKT_CONFIG1);
dw_hdmi_qp_mod(hdmi, PKTSCHED_AVI_TX_EN | PKTSCHED_GCP_TX_EN,
PKTSCHED_AVI_TX_EN | PKTSCHED_GCP_TX_EN, PKTSCHED_PKT_EN);
return 0;
}
static int dw_hdmi_qp_config_drm_infoframe(struct dw_hdmi_qp *hdmi,
const u8 *buffer, size_t len)
{
u32 val, i;
if (len != HDMI_INFOFRAME_SIZE(DRM)) {
dev_err(hdmi->dev, "failed to configure drm infoframe\n");
return -EINVAL;
}
dw_hdmi_qp_mod(hdmi, 0, PKTSCHED_DRMI_TX_EN, PKTSCHED_PKT_EN);
val = buffer[1] << 8 | buffer[2] << 16;
dw_hdmi_qp_write(hdmi, val, PKT_DRMI_CONTENTS0);
for (i = 0; i <= buffer[2]; i++) {
if (i % 4 == 0)
val = buffer[3 + i];
val |= buffer[3 + i] << ((i % 4) * 8);
if ((i % 4 == 3) || i == buffer[2])
dw_hdmi_qp_write(hdmi, val,
PKT_DRMI_CONTENTS1 + ((i / 4) * 4));
}
dw_hdmi_qp_mod(hdmi, 0, PKTSCHED_DRMI_FIELDRATE, PKTSCHED_PKT_CONFIG1);
dw_hdmi_qp_mod(hdmi, PKTSCHED_DRMI_TX_EN, PKTSCHED_DRMI_TX_EN,
PKTSCHED_PKT_EN);
return 0;
}
/*
* Static values documented in the TRM
* Different values are only used for debug purposes
*/
#define DW_HDMI_QP_AUDIO_INFOFRAME_HB1 0x1
#define DW_HDMI_QP_AUDIO_INFOFRAME_HB2 0xa
static int dw_hdmi_qp_config_audio_infoframe(struct dw_hdmi_qp *hdmi,
const u8 *buffer, size_t len)
{
/*
* AUDI_CONTENTS0: { RSV, HB2, HB1, RSV }
* AUDI_CONTENTS1: { PB3, PB2, PB1, PB0 }
* AUDI_CONTENTS2: { PB7, PB6, PB5, PB4 }
*
* PB0: CheckSum
* PB1: | CT3 | CT2 | CT1 | CT0 | F13 | CC2 | CC1 | CC0 |
* PB2: | F27 | F26 | F25 | SF2 | SF1 | SF0 | SS1 | SS0 |
* PB3: | F37 | F36 | F35 | F34 | F33 | F32 | F31 | F30 |
* PB4: | CA7 | CA6 | CA5 | CA4 | CA3 | CA2 | CA1 | CA0 |
* PB5: | DM_INH | LSV3 | LSV2 | LSV1 | LSV0 | F52 | F51 | F50 |
* PB6~PB10: Reserved
*
* AUDI_CONTENTS0 default value defined by HDMI specification,
* and shall only be changed for debug purposes.
*/
u32 header_bytes = (DW_HDMI_QP_AUDIO_INFOFRAME_HB1 << 8) |
(DW_HDMI_QP_AUDIO_INFOFRAME_HB2 << 16);
regmap_bulk_write(hdmi->regm, PKT_AUDI_CONTENTS0, &header_bytes, 1);
regmap_bulk_write(hdmi->regm, PKT_AUDI_CONTENTS1, &buffer[3], 1);
regmap_bulk_write(hdmi->regm, PKT_AUDI_CONTENTS2, &buffer[4], 1);
/* Enable ACR, AUDI, AMD */
dw_hdmi_qp_mod(hdmi,
PKTSCHED_ACR_TX_EN | PKTSCHED_AUDI_TX_EN | PKTSCHED_AMD_TX_EN,
PKTSCHED_ACR_TX_EN | PKTSCHED_AUDI_TX_EN | PKTSCHED_AMD_TX_EN,
PKTSCHED_PKT_EN);
/* Enable AUDS */
dw_hdmi_qp_mod(hdmi, PKTSCHED_AUDS_TX_EN, PKTSCHED_AUDS_TX_EN, PKTSCHED_PKT_EN);
return 0;
}
static void dw_hdmi_qp_bridge_atomic_enable(struct drm_bridge *bridge,
struct drm_atomic_state *state)
{
@@ -970,9 +857,9 @@ static int dw_hdmi_qp_bridge_clear_avi_infoframe(struct drm_bridge *bridge)
static int dw_hdmi_qp_bridge_clear_hdmi_infoframe(struct drm_bridge *bridge)
{
/* FIXME: add support for this InfoFrame */
struct dw_hdmi_qp *hdmi = bridge->driver_private;
drm_warn_once(bridge->encoder->dev, "HDMI VSI not supported\n");
dw_hdmi_qp_mod(hdmi, 0, PKTSCHED_VSI_TX_EN, PKTSCHED_PKT_EN);
return 0;
}
@@ -986,6 +873,15 @@ static int dw_hdmi_qp_bridge_clear_hdr_drm_infoframe(struct drm_bridge *bridge)
return 0;
}
static int dw_hdmi_qp_bridge_clear_spd_infoframe(struct drm_bridge *bridge)
{
struct dw_hdmi_qp *hdmi = bridge->driver_private;
dw_hdmi_qp_mod(hdmi, 0, PKTSCHED_SPDI_TX_EN, PKTSCHED_PKT_EN);
return 0;
}
static int dw_hdmi_qp_bridge_clear_audio_infoframe(struct drm_bridge *bridge)
{
struct dw_hdmi_qp *hdmi = bridge->driver_private;
@@ -999,6 +895,32 @@ static int dw_hdmi_qp_bridge_clear_audio_infoframe(struct drm_bridge *bridge)
return 0;
}
static void dw_hdmi_qp_write_pkt(struct dw_hdmi_qp *hdmi, const u8 *buffer,
size_t start, size_t len, unsigned int reg)
{
u32 val = 0;
size_t i;
for (i = start; i < start + len; i++)
val |= buffer[i] << ((i % 4) * BITS_PER_BYTE);
dw_hdmi_qp_write(hdmi, val, reg);
}
static void dw_hdmi_qp_write_infoframe(struct dw_hdmi_qp *hdmi, const u8 *buffer,
size_t len, unsigned int reg)
{
size_t i;
/* InfoFrame packet header */
dw_hdmi_qp_write_pkt(hdmi, buffer, 1, 2, reg);
/* InfoFrame packet body */
for (i = 0; i < len - 3; i += 4)
dw_hdmi_qp_write_pkt(hdmi, buffer + 3, i, min(len - i - 3, 4),
reg + i + 4);
}
static int dw_hdmi_qp_bridge_write_avi_infoframe(struct drm_bridge *bridge,
const u8 *buffer, size_t len)
{
@@ -1006,15 +928,27 @@ static int dw_hdmi_qp_bridge_write_avi_infoframe(struct drm_bridge *bridge,
dw_hdmi_qp_bridge_clear_avi_infoframe(bridge);
return dw_hdmi_qp_config_avi_infoframe(hdmi, buffer, len);
dw_hdmi_qp_write_infoframe(hdmi, buffer, len, PKT_AVI_CONTENTS0);
dw_hdmi_qp_mod(hdmi, 0, PKTSCHED_AVI_FIELDRATE, PKTSCHED_PKT_CONFIG1);
dw_hdmi_qp_mod(hdmi, PKTSCHED_AVI_TX_EN | PKTSCHED_GCP_TX_EN,
PKTSCHED_AVI_TX_EN | PKTSCHED_GCP_TX_EN, PKTSCHED_PKT_EN);
return 0;
}
static int dw_hdmi_qp_bridge_write_hdmi_infoframe(struct drm_bridge *bridge,
const u8 *buffer, size_t len)
{
struct dw_hdmi_qp *hdmi = bridge->driver_private;
dw_hdmi_qp_bridge_clear_hdmi_infoframe(bridge);
/* FIXME: add support for the HDMI VSI */
dw_hdmi_qp_write_infoframe(hdmi, buffer, len, PKT_VSI_CONTENTS0);
dw_hdmi_qp_mod(hdmi, 0, PKTSCHED_VSI_FIELDRATE, PKTSCHED_PKT_CONFIG1);
dw_hdmi_qp_mod(hdmi, PKTSCHED_VSI_TX_EN, PKTSCHED_VSI_TX_EN,
PKTSCHED_PKT_EN);
return 0;
}
@@ -1026,7 +960,28 @@ static int dw_hdmi_qp_bridge_write_hdr_drm_infoframe(struct drm_bridge *bridge,
dw_hdmi_qp_bridge_clear_hdr_drm_infoframe(bridge);
return dw_hdmi_qp_config_drm_infoframe(hdmi, buffer, len);
dw_hdmi_qp_write_infoframe(hdmi, buffer, len, PKT_DRMI_CONTENTS0);
dw_hdmi_qp_mod(hdmi, 0, PKTSCHED_DRMI_FIELDRATE, PKTSCHED_PKT_CONFIG1);
dw_hdmi_qp_mod(hdmi, PKTSCHED_DRMI_TX_EN, PKTSCHED_DRMI_TX_EN,
PKTSCHED_PKT_EN);
return 0;
}
static int dw_hdmi_qp_bridge_write_spd_infoframe(struct drm_bridge *bridge,
const u8 *buffer, size_t len)
{
struct dw_hdmi_qp *hdmi = bridge->driver_private;
dw_hdmi_qp_bridge_clear_spd_infoframe(bridge);
dw_hdmi_qp_write_infoframe(hdmi, buffer, len, PKT_SPDI_CONTENTS0);
dw_hdmi_qp_mod(hdmi, PKTSCHED_SPDI_TX_EN, PKTSCHED_SPDI_TX_EN,
PKTSCHED_PKT_EN);
return 0;
}
static int dw_hdmi_qp_bridge_write_audio_infoframe(struct drm_bridge *bridge,
@@ -1036,7 +991,31 @@ static int dw_hdmi_qp_bridge_write_audio_infoframe(struct drm_bridge *bridge,
dw_hdmi_qp_bridge_clear_audio_infoframe(bridge);
return dw_hdmi_qp_config_audio_infoframe(hdmi, buffer, len);
/*
* AUDI_CONTENTS0: { RSV, HB2, HB1, RSV }
* AUDI_CONTENTS1: { PB3, PB2, PB1, PB0 }
* AUDI_CONTENTS2: { PB7, PB6, PB5, PB4 }
*
* PB0: CheckSum
* PB1: | CT3 | CT2 | CT1 | CT0 | F13 | CC2 | CC1 | CC0 |
* PB2: | F27 | F26 | F25 | SF2 | SF1 | SF0 | SS1 | SS0 |
* PB3: | F37 | F36 | F35 | F34 | F33 | F32 | F31 | F30 |
* PB4: | CA7 | CA6 | CA5 | CA4 | CA3 | CA2 | CA1 | CA0 |
* PB5: | DM_INH | LSV3 | LSV2 | LSV1 | LSV0 | F52 | F51 | F50 |
* PB6~PB10: Reserved
*/
dw_hdmi_qp_write_infoframe(hdmi, buffer, len, PKT_AUDI_CONTENTS0);
/* Enable ACR, AUDI, AMD */
dw_hdmi_qp_mod(hdmi,
PKTSCHED_ACR_TX_EN | PKTSCHED_AUDI_TX_EN | PKTSCHED_AMD_TX_EN,
PKTSCHED_ACR_TX_EN | PKTSCHED_AUDI_TX_EN | PKTSCHED_AMD_TX_EN,
PKTSCHED_PKT_EN);
/* Enable AUDS */
dw_hdmi_qp_mod(hdmi, PKTSCHED_AUDS_TX_EN, PKTSCHED_AUDS_TX_EN, PKTSCHED_PKT_EN);
return 0;
}
#ifdef CONFIG_DRM_DW_HDMI_QP_CEC
@@ -1227,6 +1206,8 @@ static const struct drm_bridge_funcs dw_hdmi_qp_bridge_funcs = {
.hdmi_write_hdmi_infoframe = dw_hdmi_qp_bridge_write_hdmi_infoframe,
.hdmi_clear_hdr_drm_infoframe = dw_hdmi_qp_bridge_clear_hdr_drm_infoframe,
.hdmi_write_hdr_drm_infoframe = dw_hdmi_qp_bridge_write_hdr_drm_infoframe,
.hdmi_clear_spd_infoframe = dw_hdmi_qp_bridge_clear_spd_infoframe,
.hdmi_write_spd_infoframe = dw_hdmi_qp_bridge_write_spd_infoframe,
.hdmi_clear_audio_infoframe = dw_hdmi_qp_bridge_clear_audio_infoframe,
.hdmi_write_audio_infoframe = dw_hdmi_qp_bridge_write_audio_infoframe,
.hdmi_audio_startup = dw_hdmi_qp_audio_enable,
@@ -1344,7 +1325,8 @@ struct dw_hdmi_qp *dw_hdmi_qp_bind(struct platform_device *pdev,
DRM_BRIDGE_OP_EDID |
DRM_BRIDGE_OP_HDMI |
DRM_BRIDGE_OP_HDMI_AUDIO |
DRM_BRIDGE_OP_HDMI_HDR_DRM_INFOFRAME;
DRM_BRIDGE_OP_HDMI_HDR_DRM_INFOFRAME |
DRM_BRIDGE_OP_HDMI_SPD_INFOFRAME;
if (!hdmi->no_hpd)
hdmi->bridge.ops |= DRM_BRIDGE_OP_HPD;
hdmi->bridge.of_node = pdev->dev.of_node;

View File

@@ -198,6 +198,7 @@
#define PKTSCHED_PRQUEUE2_CONFIG2 0xa94
#define PKTSCHED_PKT_CONFIG0 0xa98
#define PKTSCHED_PKT_CONFIG1 0xa9c
#define PKTSCHED_VSI_FIELDRATE BIT(14)
#define PKTSCHED_DRMI_FIELDRATE BIT(13)
#define PKTSCHED_AVI_FIELDRATE BIT(12)
#define PKTSCHED_PKT_CONFIG2 0xaa0
@@ -205,7 +206,9 @@
#define PKTSCHED_PKT_EN 0xaa8
#define PKTSCHED_DRMI_TX_EN BIT(17)
#define PKTSCHED_AUDI_TX_EN BIT(15)
#define PKTSCHED_SPDI_TX_EN BIT(14)
#define PKTSCHED_AVI_TX_EN BIT(13)
#define PKTSCHED_VSI_TX_EN BIT(12)
#define PKTSCHED_EMP_CVTEM_TX_EN BIT(10)
#define PKTSCHED_AMD_TX_EN BIT(8)
#define PKTSCHED_GCP_TX_EN BIT(3)

View File

@@ -4,7 +4,6 @@
* Author: Rob Clark <robdclark@gmail.com>
*/
#include <linux/component.h>
#include <linux/gpio/consumer.h>
#include <linux/hdmi.h>
#include <linux/i2c.h>
@@ -1194,26 +1193,27 @@ static int tda998x_audio_codec_init(struct tda998x_priv *priv,
/* DRM connector functions */
static enum drm_connector_status
tda998x_connector_detect(struct drm_connector *connector, bool force)
static enum drm_connector_status tda998x_conn_detect(struct tda998x_priv *priv)
{
struct tda998x_priv *priv = conn_to_tda998x_priv(connector);
u8 val = cec_read(priv, REG_CEC_RXSHPDLEV);
return (val & CEC_RXSHPDLEV_HPD) ? connector_status_connected :
connector_status_disconnected;
}
static void tda998x_connector_destroy(struct drm_connector *connector)
static enum drm_connector_status
tda998x_connector_detect(struct drm_connector *connector, bool force)
{
drm_connector_cleanup(connector);
struct tda998x_priv *priv = conn_to_tda998x_priv(connector);
return tda998x_conn_detect(priv);
}
static const struct drm_connector_funcs tda998x_connector_funcs = {
.reset = drm_atomic_helper_connector_reset,
.fill_modes = drm_helper_probe_single_connector_modes,
.detect = tda998x_connector_detect,
.destroy = tda998x_connector_destroy,
.destroy = drm_connector_cleanup,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
@@ -1282,11 +1282,10 @@ static int read_edid_block(void *data, u8 *buf, unsigned int blk, size_t length)
return ret;
}
static int tda998x_connector_get_modes(struct drm_connector *connector)
static const struct drm_edid *tda998x_edid_read(struct tda998x_priv *priv,
struct drm_connector *connector)
{
struct tda998x_priv *priv = conn_to_tda998x_priv(connector);
const struct drm_edid *drm_edid;
int n;
/*
* If we get killed while waiting for the HPD timeout, return
@@ -1304,6 +1303,16 @@ static int tda998x_connector_get_modes(struct drm_connector *connector)
if (priv->rev == TDA19988)
reg_set(priv, REG_TX4, TX4_PD_RAM);
return drm_edid;
}
static int tda998x_connector_get_modes(struct drm_connector *connector)
{
struct tda998x_priv *priv = conn_to_tda998x_priv(connector);
const struct drm_edid *drm_edid;
int n;
drm_edid = tda998x_edid_read(priv, connector);
drm_edid_connector_update(connector, drm_edid);
cec_notifier_set_phys_addr(priv->cec_notify,
connector->display_info.source_physical_address);
@@ -1371,10 +1380,8 @@ static int tda998x_bridge_attach(struct drm_bridge *bridge,
{
struct tda998x_priv *priv = bridge_to_tda998x_priv(bridge);
if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
DRM_ERROR("Fix bridge driver to make connector optional!");
return -EINVAL;
}
if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)
return 0;
return tda998x_connector_init(priv, bridge->dev);
}
@@ -1383,7 +1390,8 @@ static void tda998x_bridge_detach(struct drm_bridge *bridge)
{
struct tda998x_priv *priv = bridge_to_tda998x_priv(bridge);
drm_connector_cleanup(&priv->connector);
if (priv->connector.dev)
drm_connector_cleanup(&priv->connector);
}
static enum drm_mode_status tda998x_bridge_mode_valid(struct drm_bridge *bridge,
@@ -1683,6 +1691,59 @@ static void tda998x_bridge_mode_set(struct drm_bridge *bridge,
mutex_unlock(&priv->audio_mutex);
}
static const struct drm_edid *
tda998x_bridge_edid_read(struct drm_bridge *bridge,
struct drm_connector *connector)
{
struct tda998x_priv *priv = bridge_to_tda998x_priv(bridge);
const struct drm_edid *drm_edid;
const struct edid *edid;
drm_edid = tda998x_edid_read(priv, connector);
if (!drm_edid) {
dev_dbg(&priv->hdmi->dev, "failed to get edid\n");
return NULL;
}
/*
* FIXME: This should use connector->display_info.has_audio from
* a path that has read the EDID and called
* drm_edid_connector_update().
*/
edid = drm_edid_raw(drm_edid);
dev_dbg(&priv->hdmi->dev, "got edid: width[%d] x height[%d]\n",
edid->width_cm, edid->height_cm);
priv->sink_has_audio = drm_detect_monitor_audio(edid);
cec_notifier_set_phys_addr_from_edid(priv->cec_notify, edid);
return drm_edid;
}
static enum drm_connector_status
tda998x_bridge_detect(struct drm_bridge *bridge,
struct drm_connector *connector)
{
struct tda998x_priv *priv = bridge_to_tda998x_priv(bridge);
return tda998x_conn_detect(priv);
}
static void tda998x_bridge_hpd_enable(struct drm_bridge *bridge)
{
struct tda998x_priv *priv = bridge_to_tda998x_priv(bridge);
cec_write(priv, REG_CEC_RXSHPDINTENA, CEC_RXSHPDLEV_HPD);
}
static void tda998x_bridge_hpd_disable(struct drm_bridge *bridge)
{
struct tda998x_priv *priv = bridge_to_tda998x_priv(bridge);
cec_write(priv, REG_CEC_RXSHPDINTENA, 0);
}
static const struct drm_bridge_funcs tda998x_bridge_funcs = {
.attach = tda998x_bridge_attach,
.detach = tda998x_bridge_detach,
@@ -1690,6 +1751,10 @@ static const struct drm_bridge_funcs tda998x_bridge_funcs = {
.disable = tda998x_bridge_disable,
.mode_set = tda998x_bridge_mode_set,
.enable = tda998x_bridge_enable,
.edid_read = tda998x_bridge_edid_read,
.detect = tda998x_bridge_detect,
.hpd_enable = tda998x_bridge_hpd_enable,
.hpd_disable = tda998x_bridge_hpd_disable,
};
/* I2C driver functions */
@@ -1749,38 +1814,20 @@ static int tda998x_get_audio_ports(struct tda998x_priv *priv,
return 0;
}
static void tda998x_destroy(struct device *dev)
static int
tda998x_probe(struct i2c_client *client)
{
struct tda998x_priv *priv = dev_get_drvdata(dev);
drm_bridge_remove(&priv->bridge);
/* disable all IRQs and free the IRQ handler */
cec_write(priv, REG_CEC_RXSHPDINTENA, 0);
reg_clear(priv, REG_INT_FLAGS_2, INT_FLAGS_2_EDID_BLK_RD);
if (priv->audio_pdev)
platform_device_unregister(priv->audio_pdev);
if (priv->hdmi->irq)
free_irq(priv->hdmi->irq, priv);
timer_delete_sync(&priv->edid_delay_timer);
cancel_work_sync(&priv->detect_work);
i2c_unregister_device(priv->cec);
cec_notifier_conn_unregister(priv->cec_notify);
}
static int tda998x_create(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct device_node *np = client->dev.of_node;
struct device *dev = &client->dev;
struct i2c_board_info cec_info;
struct tda998x_priv *priv;
u32 video;
int rev_lo, rev_hi, ret;
u32 video;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
dev_warn(&client->dev, "adapter does not support I2C\n");
return -EIO;
}
priv = devm_drm_bridge_alloc(dev, struct tda998x_priv, bridge, &tda998x_bridge_funcs);
if (IS_ERR(priv))
@@ -1815,13 +1862,15 @@ static int tda998x_create(struct device *dev)
rev_lo = reg_read(priv, REG_VERSION_LSB);
if (rev_lo < 0) {
dev_err(dev, "failed to read version: %d\n", rev_lo);
return rev_lo;
ret = rev_lo;
goto cancel_work;
}
rev_hi = reg_read(priv, REG_VERSION_MSB);
if (rev_hi < 0) {
dev_err(dev, "failed to read version: %d\n", rev_hi);
return rev_hi;
ret = rev_hi;
goto cancel_work;
}
priv->rev = rev_lo | rev_hi << 8;
@@ -1844,7 +1893,8 @@ static int tda998x_create(struct device *dev)
break;
default:
dev_err(dev, "found unsupported device: %04x\n", priv->rev);
return -ENXIO;
ret = -ENXIO;
goto cancel_work;
}
/* after reset, enable DDC: */
@@ -1888,17 +1938,18 @@ static int tda998x_create(struct device *dev)
if (ret) {
dev_err(dev, "failed to request IRQ#%u: %d\n",
client->irq, ret);
goto err_irq;
goto cancel_work;
}
/* enable HPD irq */
cec_write(priv, REG_CEC_RXSHPDINTENA, CEC_RXSHPDLEV_HPD);
priv->bridge.ops = DRM_BRIDGE_OP_HPD;
}
priv->cec_notify = cec_notifier_conn_register(dev, NULL, NULL);
if (!priv->cec_notify) {
ret = -ENOMEM;
goto fail;
goto free_irq;
}
priv->cec_glue.parent = dev;
@@ -1925,7 +1976,7 @@ static int tda998x_create(struct device *dev)
priv->cec = i2c_new_client_device(client->adapter, &cec_info);
if (IS_ERR(priv->cec)) {
ret = PTR_ERR(priv->cec);
goto fail;
goto notifier_conn_unregister;
}
/* enable EDID read irq: */
@@ -1942,7 +1993,7 @@ static int tda998x_create(struct device *dev)
ret = tda998x_get_audio_ports(priv, np);
if (ret)
goto fail;
goto unregister_dev;
if (priv->audio_port_enable[AUDIO_ROUTE_I2S] ||
priv->audio_port_enable[AUDIO_ROUTE_SPDIF])
@@ -1953,96 +2004,50 @@ static int tda998x_create(struct device *dev)
priv->bridge.of_node = dev->of_node;
#endif
priv->bridge.ops |= DRM_BRIDGE_OP_EDID | DRM_BRIDGE_OP_DETECT;
priv->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
drm_bridge_add(&priv->bridge);
return 0;
fail:
tda998x_destroy(dev);
err_irq:
return ret;
}
/* DRM encoder functions */
static int tda998x_encoder_init(struct device *dev, struct drm_device *drm)
{
struct tda998x_priv *priv = dev_get_drvdata(dev);
u32 crtcs = 0;
int ret;
if (dev->of_node)
crtcs = drm_of_find_possible_crtcs(drm, dev->of_node);
/* If no CRTCs were found, fall back to our old behaviour */
if (crtcs == 0) {
dev_warn(dev, "Falling back to first CRTC\n");
crtcs = 1 << 0;
unregister_dev:
i2c_unregister_device(priv->cec);
notifier_conn_unregister:
cec_notifier_conn_unregister(priv->cec_notify);
free_irq:
if (client->irq) {
cec_write(priv, REG_CEC_RXSHPDINTENA, 0);
reg_clear(priv, REG_INT_FLAGS_2, INT_FLAGS_2_EDID_BLK_RD);
free_irq(client->irq, priv);
}
priv->encoder.possible_crtcs = crtcs;
ret = drm_simple_encoder_init(drm, &priv->encoder,
DRM_MODE_ENCODER_TMDS);
if (ret)
goto err_encoder;
ret = drm_bridge_attach(&priv->encoder, &priv->bridge, NULL, 0);
if (ret)
goto err_bridge;
return 0;
err_bridge:
drm_encoder_cleanup(&priv->encoder);
err_encoder:
return ret;
}
static int tda998x_bind(struct device *dev, struct device *master, void *data)
{
struct drm_device *drm = data;
return tda998x_encoder_init(dev, drm);
}
static void tda998x_unbind(struct device *dev, struct device *master,
void *data)
{
struct tda998x_priv *priv = dev_get_drvdata(dev);
drm_encoder_cleanup(&priv->encoder);
}
static const struct component_ops tda998x_ops = {
.bind = tda998x_bind,
.unbind = tda998x_unbind,
};
static int
tda998x_probe(struct i2c_client *client)
{
int ret;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
dev_warn(&client->dev, "adapter does not support I2C\n");
return -EIO;
}
ret = tda998x_create(&client->dev);
if (ret)
return ret;
ret = component_add(&client->dev, &tda998x_ops);
if (ret)
tda998x_destroy(&client->dev);
cancel_work:
timer_delete_sync(&priv->edid_delay_timer);
cancel_work_sync(&priv->detect_work);
return ret;
}
static void tda998x_remove(struct i2c_client *client)
{
component_del(&client->dev, &tda998x_ops);
tda998x_destroy(&client->dev);
struct tda998x_priv *priv = dev_get_drvdata(&client->dev);
drm_bridge_remove(&priv->bridge);
if (priv->audio_pdev)
platform_device_unregister(priv->audio_pdev);
i2c_unregister_device(priv->cec);
cec_notifier_conn_unregister(priv->cec_notify);
/* disable all IRQs and free the IRQ handler */
if (client->irq) {
cec_write(priv, REG_CEC_RXSHPDINTENA, 0);
reg_clear(priv, REG_INT_FLAGS_2, INT_FLAGS_2_EDID_BLK_RD);
free_irq(priv->hdmi->irq, priv);
}
timer_delete_sync(&priv->edid_delay_timer);
cancel_work_sync(&priv->detect_work);
}
#ifdef CONFIG_OF

View File

@@ -0,0 +1,173 @@
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2025 Icenowy Zheng <uwu@icenowy.me>
*
* Based on rcar_dw_hdmi.c, which is:
* Copyright (C) 2016 Renesas Electronics Corporation
* Based on imx8mp-hdmi-tx.c, which is:
* Copyright (C) 2022 Pengutronix, Lucas Stach <kernel@pengutronix.de>
*/
#include <linux/clk.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
#include <drm/bridge/dw_hdmi.h>
#include <drm/drm_modes.h>
#define TH1520_HDMI_PHY_OPMODE_PLLCFG 0x06 /* Mode of operation and PLL dividers */
#define TH1520_HDMI_PHY_CKSYMTXCTRL 0x09 /* Clock Symbol and Transmitter Control Register */
#define TH1520_HDMI_PHY_VLEVCTRL 0x0e /* Voltage Level Control Register */
#define TH1520_HDMI_PHY_PLLCURRGMPCTRL 0x10 /* PLL current and Gmp (conductance) */
#define TH1520_HDMI_PHY_PLLDIVCTRL 0x11 /* PLL dividers */
#define TH1520_HDMI_PHY_TXTERM 0x19 /* Transmission Termination Register */
struct th1520_hdmi_phy_params {
unsigned long mpixelclock;
u16 opmode_pllcfg;
u16 pllcurrgmpctrl;
u16 plldivctrl;
u16 cksymtxctrl;
u16 vlevctrl;
u16 txterm;
};
static const struct th1520_hdmi_phy_params th1520_hdmi_phy_params[] = {
{ 35500000, 0x0003, 0x0283, 0x0628, 0x8088, 0x01a0, 0x0007 },
{ 44900000, 0x0003, 0x0285, 0x0228, 0x8088, 0x01a0, 0x0007 },
{ 71000000, 0x0002, 0x1183, 0x0614, 0x8088, 0x01a0, 0x0007 },
{ 90000000, 0x0002, 0x1142, 0x0214, 0x8088, 0x01a0, 0x0007 },
{ 121750000, 0x0001, 0x20c0, 0x060a, 0x8088, 0x01a0, 0x0007 },
{ 165000000, 0x0001, 0x2080, 0x020a, 0x8088, 0x01a0, 0x0007 },
{ 198000000, 0x0000, 0x3040, 0x0605, 0x83c8, 0x0120, 0x0004 },
{ 297000000, 0x0000, 0x3041, 0x0205, 0x81dc, 0x0200, 0x0005 },
{ 371250000, 0x0640, 0x3041, 0x0205, 0x80f6, 0x0140, 0x0000 },
{ 495000000, 0x0640, 0x3080, 0x0005, 0x80f6, 0x0140, 0x0000 },
{ 594000000, 0x0640, 0x3080, 0x0005, 0x80fa, 0x01e0, 0x0004 },
};
struct th1520_hdmi {
struct dw_hdmi_plat_data plat_data;
struct dw_hdmi *dw_hdmi;
struct clk *pixclk;
struct reset_control *mainrst, *prst;
};
static enum drm_mode_status
th1520_hdmi_mode_valid(struct dw_hdmi *hdmi, void *data,
const struct drm_display_info *info,
const struct drm_display_mode *mode)
{
/*
* The maximum supported clock frequency is 594 MHz, as shown in the PHY
* parameters table.
*/
if (mode->clock > 594000)
return MODE_CLOCK_HIGH;
return MODE_OK;
}
static void th1520_hdmi_phy_set_params(struct dw_hdmi *hdmi,
const struct th1520_hdmi_phy_params *params)
{
dw_hdmi_phy_i2c_write(hdmi, params->opmode_pllcfg,
TH1520_HDMI_PHY_OPMODE_PLLCFG);
dw_hdmi_phy_i2c_write(hdmi, params->pllcurrgmpctrl,
TH1520_HDMI_PHY_PLLCURRGMPCTRL);
dw_hdmi_phy_i2c_write(hdmi, params->plldivctrl,
TH1520_HDMI_PHY_PLLDIVCTRL);
dw_hdmi_phy_i2c_write(hdmi, params->vlevctrl,
TH1520_HDMI_PHY_VLEVCTRL);
dw_hdmi_phy_i2c_write(hdmi, params->cksymtxctrl,
TH1520_HDMI_PHY_CKSYMTXCTRL);
dw_hdmi_phy_i2c_write(hdmi, params->txterm,
TH1520_HDMI_PHY_TXTERM);
}
static int th1520_hdmi_phy_configure(struct dw_hdmi *hdmi, void *data,
unsigned long mpixelclock)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(th1520_hdmi_phy_params); i++) {
if (mpixelclock <= th1520_hdmi_phy_params[i].mpixelclock) {
th1520_hdmi_phy_set_params(hdmi,
&th1520_hdmi_phy_params[i]);
return 0;
}
}
return -EINVAL;
}
static int th1520_dw_hdmi_probe(struct platform_device *pdev)
{
struct th1520_hdmi *hdmi;
struct dw_hdmi_plat_data *plat_data;
struct device *dev = &pdev->dev;
hdmi = devm_kzalloc(dev, sizeof(*hdmi), GFP_KERNEL);
if (!hdmi)
return -ENOMEM;
plat_data = &hdmi->plat_data;
hdmi->pixclk = devm_clk_get_enabled(dev, "pix");
if (IS_ERR(hdmi->pixclk))
return dev_err_probe(dev, PTR_ERR(hdmi->pixclk),
"Unable to get pixel clock\n");
hdmi->mainrst = devm_reset_control_get_exclusive_deasserted(dev, "main");
if (IS_ERR(hdmi->mainrst))
return dev_err_probe(dev, PTR_ERR(hdmi->mainrst),
"Unable to get main reset\n");
hdmi->prst = devm_reset_control_get_exclusive_deasserted(dev, "apb");
if (IS_ERR(hdmi->prst))
return dev_err_probe(dev, PTR_ERR(hdmi->prst),
"Unable to get apb reset\n");
plat_data->output_port = 1;
plat_data->mode_valid = th1520_hdmi_mode_valid;
plat_data->configure_phy = th1520_hdmi_phy_configure;
plat_data->priv_data = hdmi;
hdmi->dw_hdmi = dw_hdmi_probe(pdev, plat_data);
if (IS_ERR(hdmi))
return PTR_ERR(hdmi);
platform_set_drvdata(pdev, hdmi);
return 0;
}
static void th1520_dw_hdmi_remove(struct platform_device *pdev)
{
struct dw_hdmi *hdmi = platform_get_drvdata(pdev);
dw_hdmi_remove(hdmi);
}
static const struct of_device_id th1520_dw_hdmi_of_table[] = {
{ .compatible = "thead,th1520-dw-hdmi" },
{ /* Sentinel */ },
};
MODULE_DEVICE_TABLE(of, th1520_dw_hdmi_of_table);
static struct platform_driver th1520_dw_hdmi_platform_driver = {
.probe = th1520_dw_hdmi_probe,
.remove = th1520_dw_hdmi_remove,
.driver = {
.name = "th1520-dw-hdmi",
.of_match_table = th1520_dw_hdmi_of_table,
},
};
module_platform_driver(th1520_dw_hdmi_platform_driver);
MODULE_AUTHOR("Icenowy Zheng <uwu@icenowy.me>");
MODULE_DESCRIPTION("T-Head TH1520 HDMI Encoder Driver");
MODULE_LICENSE("GPL");

View File

@@ -826,9 +826,19 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
if (!bridge->ycbcr_420_allowed)
connector->ycbcr_420_allowed = false;
if (bridge->ops & DRM_BRIDGE_OP_EDID) {
/*
* Ensure the last bridge declares OP_EDID or OP_MODES or both.
*/
if (bridge->ops & DRM_BRIDGE_OP_EDID || bridge->ops & DRM_BRIDGE_OP_MODES) {
drm_bridge_put(bridge_connector->bridge_edid);
bridge_connector->bridge_edid = drm_bridge_get(bridge);
bridge_connector->bridge_edid = NULL;
drm_bridge_put(bridge_connector->bridge_modes);
bridge_connector->bridge_modes = NULL;
if (bridge->ops & DRM_BRIDGE_OP_EDID)
bridge_connector->bridge_edid = drm_bridge_get(bridge);
if (bridge->ops & DRM_BRIDGE_OP_MODES)
bridge_connector->bridge_modes = drm_bridge_get(bridge);
}
if (bridge->ops & DRM_BRIDGE_OP_HPD) {
drm_bridge_put(bridge_connector->bridge_hpd);
@@ -838,10 +848,6 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
drm_bridge_put(bridge_connector->bridge_detect);
bridge_connector->bridge_detect = drm_bridge_get(bridge);
}
if (bridge->ops & DRM_BRIDGE_OP_MODES) {
drm_bridge_put(bridge_connector->bridge_modes);
bridge_connector->bridge_modes = drm_bridge_get(bridge);
}
if (bridge->ops & DRM_BRIDGE_OP_HDMI) {
if (bridge_connector->bridge_hdmi)
return ERR_PTR(-EBUSY);

View File

@@ -5184,6 +5184,28 @@ static void drm_dp_mst_destroy_state(struct drm_private_obj *obj,
kfree(mst_state);
}
static struct drm_private_state *
drm_dp_mst_atomic_create_state(struct drm_private_obj *obj)
{
struct drm_dp_mst_topology_mgr *mgr =
to_dp_mst_topology_mgr(obj);
struct drm_dp_mst_topology_state *mst_state;
mst_state = kzalloc_obj(*mst_state);
if (!mst_state)
return ERR_PTR(-ENOMEM);
__drm_atomic_helper_private_obj_create_state(obj, &mst_state->base);
mst_state->total_avail_slots = 63;
mst_state->start_slot = 1;
mst_state->mgr = mgr;
INIT_LIST_HEAD(&mst_state->payloads);
return &mst_state->base;
}
static bool drm_dp_mst_port_downstream_of_branch(struct drm_dp_mst_port *port,
struct drm_dp_mst_branch *branch)
{
@@ -5620,6 +5642,7 @@ int drm_dp_mst_atomic_check(struct drm_atomic_state *state)
EXPORT_SYMBOL(drm_dp_mst_atomic_check);
const struct drm_private_state_funcs drm_dp_mst_topology_state_funcs = {
.atomic_create_state = drm_dp_mst_atomic_create_state,
.atomic_duplicate_state = drm_dp_mst_duplicate_state,
.atomic_destroy_state = drm_dp_mst_destroy_state,
};
@@ -5708,8 +5731,6 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
int max_dpcd_transaction_bytes, int max_payloads,
int conn_base_id)
{
struct drm_dp_mst_topology_state *mst_state;
mutex_init(&mgr->lock);
mutex_init(&mgr->qlock);
mutex_init(&mgr->delayed_destroy_lock);
@@ -5743,18 +5764,8 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
mgr->max_payloads = max_payloads;
mgr->conn_base_id = conn_base_id;
mst_state = kzalloc_obj(*mst_state);
if (mst_state == NULL)
return -ENOMEM;
mst_state->total_avail_slots = 63;
mst_state->start_slot = 1;
mst_state->mgr = mgr;
INIT_LIST_HEAD(&mst_state->payloads);
drm_atomic_private_obj_init(dev, &mgr->base,
&mst_state->base,
NULL,
&drm_dp_mst_topology_state_funcs);
return 0;

View File

@@ -1497,7 +1497,22 @@ static void tunnel_group_destroy_state(struct drm_private_obj *obj, struct drm_p
free_group_state(to_group_state(state));
}
static struct drm_private_state *tunnel_group_atomic_create_state(struct drm_private_obj *obj)
{
struct drm_dp_tunnel_group_state *group_state;
group_state = kzalloc_obj(*group_state);
if (!group_state)
return ERR_PTR(-ENOMEM);
__drm_atomic_helper_private_obj_create_state(obj, &group_state->base);
INIT_LIST_HEAD(&group_state->tunnel_states);
return &group_state->base;
}
static const struct drm_private_state_funcs tunnel_group_funcs = {
.atomic_create_state = tunnel_group_atomic_create_state,
.atomic_duplicate_state = tunnel_group_duplicate_state,
.atomic_destroy_state = tunnel_group_destroy_state,
};
@@ -1581,19 +1596,11 @@ EXPORT_SYMBOL(drm_dp_tunnel_atomic_get_new_state);
static bool init_group(struct drm_dp_tunnel_mgr *mgr, struct drm_dp_tunnel_group *group)
{
struct drm_dp_tunnel_group_state *group_state;
group_state = kzalloc_obj(*group_state);
if (!group_state)
return false;
INIT_LIST_HEAD(&group_state->tunnel_states);
group->mgr = mgr;
group->available_bw = -1;
INIT_LIST_HEAD(&group->tunnels);
drm_atomic_private_obj_init(mgr->dev, &group->base, &group_state->base,
drm_atomic_private_obj_init(mgr->dev, &group->base, NULL,
&tunnel_group_funcs);
return true;

View File

@@ -926,23 +926,41 @@ static void drm_atomic_plane_print_state(struct drm_printer *p,
*
* Initialize the private object, which can be embedded into any
* driver private object that needs its own atomic state.
*
* RETURNS:
* Zero on success, error code on failure
*/
void
drm_atomic_private_obj_init(struct drm_device *dev,
struct drm_private_obj *obj,
struct drm_private_state *state,
const struct drm_private_state_funcs *funcs)
int drm_atomic_private_obj_init(struct drm_device *dev,
struct drm_private_obj *obj,
struct drm_private_state *state,
const struct drm_private_state_funcs *funcs)
{
memset(obj, 0, sizeof(*obj));
drm_modeset_lock_init(&obj->lock);
obj->dev = dev;
obj->state = state;
obj->funcs = funcs;
list_add_tail(&obj->head, &dev->mode_config.privobj_list);
state->obj = obj;
/*
* Not all users of drm_atomic_private_obj_init have been
* converted to using &drm_private_obj_funcs.atomic_create_state yet.
* For the time being, let's only call reset if the passed state is
* NULL. Otherwise, we will fallback to the previous behaviour.
*/
if (!state) {
state = obj->funcs->atomic_create_state(obj);
if (IS_ERR(state))
return PTR_ERR(state);
obj->state = state;
} else {
obj->state = state;
state->obj = obj;
}
return 0;
}
EXPORT_SYMBOL(drm_atomic_private_obj_init);

View File

@@ -2301,13 +2301,13 @@ int drm_atomic_helper_commit(struct drm_device *dev,
* current layout.
*
* NOTE: Commit work has multiple phases, first hardware commit, then
* cleanup. We want them to overlap, hence need system_unbound_wq to
* cleanup. We want them to overlap, hence need system_dfl_wq to
* make sure work items don't artificially stall on each another.
*/
drm_atomic_state_get(state);
if (nonblock)
queue_work(system_unbound_wq, &state->commit_work);
queue_work(system_dfl_wq, &state->commit_work);
else
commit_tail(state);
@@ -2340,7 +2340,7 @@ EXPORT_SYMBOL(drm_atomic_helper_commit);
*
* Asynchronous workers need to have sufficient parallelism to be able to run
* different atomic commits on different CRTCs in parallel. The simplest way to
* achieve this is by running them on the &system_unbound_wq work queue. Note
* achieve this is by running them on the &system_dfl_wq work queue. Note
* that drivers are not required to split up atomic commits and run an
* individual commit in parallel - userspace is supposed to do that if it cares.
* But it might be beneficial to do that for modesets, since those necessarily

View File

@@ -713,6 +713,28 @@ void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector,
}
EXPORT_SYMBOL(drm_atomic_helper_connector_destroy_state);
/**
* __drm_atomic_helper_private_obj_create_state - initializes private object state
* @obj: private object
* @state: new state to initialize
*
* Initializes the newly allocated @state, usually required when
* initializing the drivers.
*
* @obj is assumed to be zeroed.
*
* This is useful for drivers that use private states.
*/
void __drm_atomic_helper_private_obj_create_state(struct drm_private_obj *obj,
struct drm_private_state *state)
{
if (state)
state->obj = obj;
obj->state = state;
}
EXPORT_SYMBOL(__drm_atomic_helper_private_obj_create_state);
/**
* __drm_atomic_helper_private_obj_duplicate_state - copy atomic private state
* @obj: CRTC object
@@ -802,6 +824,7 @@ void __drm_atomic_helper_bridge_reset(struct drm_bridge *bridge,
struct drm_bridge_state *state)
{
memset(state, 0, sizeof(*state));
__drm_atomic_helper_private_obj_create_state(&bridge->base, &state->base);
state->bridge = bridge;
}
EXPORT_SYMBOL(__drm_atomic_helper_bridge_reset);

View File

@@ -460,7 +460,21 @@ drm_bridge_atomic_destroy_priv_state(struct drm_private_obj *obj,
bridge->funcs->atomic_destroy_state(bridge, state);
}
static struct drm_private_state *
drm_bridge_atomic_create_priv_state(struct drm_private_obj *obj)
{
struct drm_bridge *bridge = drm_priv_to_bridge(obj);
struct drm_bridge_state *state;
state = bridge->funcs->atomic_reset(bridge);
if (IS_ERR(state))
return ERR_CAST(state);
return &state->base;
}
static const struct drm_private_state_funcs drm_bridge_priv_state_funcs = {
.atomic_create_state = drm_bridge_atomic_create_priv_state,
.atomic_duplicate_state = drm_bridge_atomic_duplicate_priv_state,
.atomic_destroy_state = drm_bridge_atomic_destroy_priv_state,
};
@@ -537,26 +551,13 @@ int drm_bridge_attach(struct drm_encoder *encoder, struct drm_bridge *bridge,
goto err_reset_bridge;
}
if (drm_bridge_is_atomic(bridge)) {
struct drm_bridge_state *state;
state = bridge->funcs->atomic_reset(bridge);
if (IS_ERR(state)) {
ret = PTR_ERR(state);
goto err_detach_bridge;
}
if (drm_bridge_is_atomic(bridge))
drm_atomic_private_obj_init(bridge->dev, &bridge->base,
&state->base,
NULL,
&drm_bridge_priv_state_funcs);
}
return 0;
err_detach_bridge:
if (bridge->funcs->detach)
bridge->funcs->detach(bridge);
err_reset_bridge:
bridge->dev = NULL;
bridge->encoder = NULL;

File diff suppressed because it is too large Load Diff

View File

@@ -93,7 +93,8 @@ static const struct drm_prop_enum_list drm_colorop_lut3d_interpolation_list[] =
/* Init Helpers */
static int drm_plane_colorop_init(struct drm_device *dev, struct drm_colorop *colorop,
struct drm_plane *plane, enum drm_colorop_type type,
struct drm_plane *plane, const struct drm_colorop_funcs *funcs,
enum drm_colorop_type type,
uint32_t flags)
{
struct drm_mode_config *config = &dev->mode_config;
@@ -109,6 +110,7 @@ static int drm_plane_colorop_init(struct drm_device *dev, struct drm_colorop *co
colorop->type = type;
colorop->plane = plane;
colorop->next = NULL;
colorop->funcs = funcs;
list_add_tail(&colorop->head, &config->colorop_list);
colorop->index = config->num_colorop++;
@@ -178,6 +180,21 @@ void drm_colorop_cleanup(struct drm_colorop *colorop)
}
EXPORT_SYMBOL(drm_colorop_cleanup);
/**
* drm_colorop_destroy - destroy colorop
* @colorop: drm colorop
*
* Destroys @colorop by performing common DRM cleanup and freeing the
* colorop object. This can be used by drivers if they do not
* require any driver-specific teardown.
*/
void drm_colorop_destroy(struct drm_colorop *colorop)
{
drm_colorop_cleanup(colorop);
kfree(colorop);
}
EXPORT_SYMBOL(drm_colorop_destroy);
/**
* drm_colorop_pipeline_destroy - Helper for color pipeline destruction
*
@@ -191,8 +208,7 @@ void drm_colorop_pipeline_destroy(struct drm_device *dev)
struct drm_colorop *colorop, *next;
list_for_each_entry_safe(colorop, next, &config->colorop_list, head) {
drm_colorop_cleanup(colorop);
kfree(colorop);
colorop->funcs->destroy(colorop);
}
}
EXPORT_SYMBOL(drm_colorop_pipeline_destroy);
@@ -203,6 +219,7 @@ EXPORT_SYMBOL(drm_colorop_pipeline_destroy);
* @dev: DRM device
* @colorop: The drm_colorop object to initialize
* @plane: The associated drm_plane
* @funcs: control functions for the new colorop
* @supported_tfs: A bitfield of supported drm_plane_colorop_curve_1d_init enum values,
* created using BIT(curve_type) and combined with the OR '|'
* operator.
@@ -210,7 +227,8 @@ EXPORT_SYMBOL(drm_colorop_pipeline_destroy);
* @return zero on success, -E value on failure
*/
int drm_plane_colorop_curve_1d_init(struct drm_device *dev, struct drm_colorop *colorop,
struct drm_plane *plane, u64 supported_tfs, uint32_t flags)
struct drm_plane *plane, const struct drm_colorop_funcs *funcs,
u64 supported_tfs, uint32_t flags)
{
struct drm_prop_enum_list enum_list[DRM_COLOROP_1D_CURVE_COUNT];
int i, len;
@@ -231,7 +249,7 @@ int drm_plane_colorop_curve_1d_init(struct drm_device *dev, struct drm_colorop *
return -EINVAL;
}
ret = drm_plane_colorop_init(dev, colorop, plane, DRM_COLOROP_1D_CURVE, flags);
ret = drm_plane_colorop_init(dev, colorop, plane, funcs, DRM_COLOROP_1D_CURVE, flags);
if (ret)
return ret;
@@ -288,20 +306,23 @@ static int drm_colorop_create_data_prop(struct drm_device *dev, struct drm_color
* @dev: DRM device
* @colorop: The drm_colorop object to initialize
* @plane: The associated drm_plane
* @funcs: control functions for new colorop
* @lut_size: LUT size supported by driver
* @interpolation: 1D LUT interpolation type
* @flags: bitmask of misc, see DRM_COLOROP_FLAG_* defines.
* @return zero on success, -E value on failure
*/
int drm_plane_colorop_curve_1d_lut_init(struct drm_device *dev, struct drm_colorop *colorop,
struct drm_plane *plane, uint32_t lut_size,
struct drm_plane *plane,
const struct drm_colorop_funcs *funcs,
uint32_t lut_size,
enum drm_colorop_lut1d_interpolation_type interpolation,
uint32_t flags)
{
struct drm_property *prop;
int ret;
ret = drm_plane_colorop_init(dev, colorop, plane, DRM_COLOROP_1D_LUT, flags);
ret = drm_plane_colorop_init(dev, colorop, plane, funcs, DRM_COLOROP_1D_LUT, flags);
if (ret)
return ret;
@@ -339,11 +360,12 @@ int drm_plane_colorop_curve_1d_lut_init(struct drm_device *dev, struct drm_color
EXPORT_SYMBOL(drm_plane_colorop_curve_1d_lut_init);
int drm_plane_colorop_ctm_3x4_init(struct drm_device *dev, struct drm_colorop *colorop,
struct drm_plane *plane, uint32_t flags)
struct drm_plane *plane, const struct drm_colorop_funcs *funcs,
uint32_t flags)
{
int ret;
ret = drm_plane_colorop_init(dev, colorop, plane, DRM_COLOROP_CTM_3X4, flags);
ret = drm_plane_colorop_init(dev, colorop, plane, funcs, DRM_COLOROP_CTM_3X4, flags);
if (ret)
return ret;
@@ -363,16 +385,18 @@ EXPORT_SYMBOL(drm_plane_colorop_ctm_3x4_init);
* @dev: DRM device
* @colorop: The drm_colorop object to initialize
* @plane: The associated drm_plane
* @funcs: control functions for the new colorop
* @flags: bitmask of misc, see DRM_COLOROP_FLAG_* defines.
* @return zero on success, -E value on failure
*/
int drm_plane_colorop_mult_init(struct drm_device *dev, struct drm_colorop *colorop,
struct drm_plane *plane, uint32_t flags)
struct drm_plane *plane, const struct drm_colorop_funcs *funcs,
uint32_t flags)
{
struct drm_property *prop;
int ret;
ret = drm_plane_colorop_init(dev, colorop, plane, DRM_COLOROP_MULTIPLIER, flags);
ret = drm_plane_colorop_init(dev, colorop, plane, funcs, DRM_COLOROP_MULTIPLIER, flags);
if (ret)
return ret;
@@ -391,6 +415,7 @@ EXPORT_SYMBOL(drm_plane_colorop_mult_init);
int drm_plane_colorop_3dlut_init(struct drm_device *dev, struct drm_colorop *colorop,
struct drm_plane *plane,
const struct drm_colorop_funcs *funcs,
uint32_t lut_size,
enum drm_colorop_lut3d_interpolation_type interpolation,
uint32_t flags)
@@ -398,7 +423,7 @@ int drm_plane_colorop_3dlut_init(struct drm_device *dev, struct drm_colorop *col
struct drm_property *prop;
int ret;
ret = drm_plane_colorop_init(dev, colorop, plane, DRM_COLOROP_3D_LUT, flags);
ret = drm_plane_colorop_init(dev, colorop, plane, funcs, DRM_COLOROP_3D_LUT, flags);
if (ret)
return ret;

View File

@@ -1173,6 +1173,11 @@ static const struct drm_prop_enum_list drm_link_status_enum_list[] = {
{ DRM_MODE_LINK_STATUS_BAD, "Bad" },
};
static const struct drm_prop_enum_list drm_panel_type_enum_list[] = {
{ DRM_MODE_PANEL_TYPE_UNKNOWN, "unknown" },
{ DRM_MODE_PANEL_TYPE_OLED, "OLED" },
};
/**
* drm_display_info_set_bus_formats - set the supported bus formats
* @info: display info to store bus formats in
@@ -1501,6 +1506,9 @@ EXPORT_SYMBOL(drm_hdmi_connector_get_output_format_name);
* Summarizing: Only set "DPMS" when the connector is known to be enabled,
* assume that a successful SETCONFIG call also sets "DPMS" to on, and
* never read back the value of "DPMS" because it can be incorrect.
* panel_type:
* Immutable enum property to indicate the type of connected panel.
* Possible values are "unknown" (default) and "OLED".
* PATH:
* Connector path property to identify how this sink is physically
* connected. Used by DP MST. This should be set by calling
@@ -1851,6 +1859,13 @@ int drm_connector_create_standard_properties(struct drm_device *dev)
return -ENOMEM;
dev->mode_config.link_status_property = prop;
prop = drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE, "panel_type",
drm_panel_type_enum_list,
ARRAY_SIZE(drm_panel_type_enum_list));
if (!prop)
return -ENOMEM;
dev->mode_config.panel_type_property = prop;
prop = drm_property_create_bool(dev, DRM_MODE_PROP_IMMUTABLE, "non-desktop");
if (!prop)
return -ENOMEM;
@@ -3626,3 +3641,21 @@ struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev,
return tg;
}
EXPORT_SYMBOL(drm_mode_create_tile_group);
/**
* drm_connector_attach_panel_type_property - attaches panel type property
* @connector: connector to attach the property on.
*
* This is used to add support for panel type detection.
*/
void drm_connector_attach_panel_type_property(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_property *prop = dev->mode_config.panel_type_property;
if (!prop)
return;
drm_object_attach_property(&connector->base, prop, DRM_MODE_PANEL_TYPE_UNKNOWN);
}
EXPORT_SYMBOL(drm_connector_attach_panel_type_property);

View File

@@ -158,8 +158,8 @@ static const struct dma_fence_ops drm_crtc_fence_ops;
static struct drm_crtc *fence_to_crtc(struct dma_fence *fence)
{
BUG_ON(fence->ops != &drm_crtc_fence_ops);
return container_of(fence->lock, struct drm_crtc, fence_lock);
BUG_ON(rcu_access_pointer(fence->ops) != &drm_crtc_fence_ops);
return container_of(fence->extern_lock, struct drm_crtc, fence_lock);
}
static const char *drm_crtc_fence_get_driver_name(struct dma_fence *fence)

View File

@@ -343,18 +343,6 @@ EXPORT_SYMBOL(drm_fb_helper_unprepare);
int drm_fb_helper_init(struct drm_device *dev,
struct drm_fb_helper *fb_helper)
{
int ret;
/*
* If this is not the generic fbdev client, initialize a drm_client
* without callbacks so we can use the modesets.
*/
if (!fb_helper->client.funcs) {
ret = drm_client_init(dev, &fb_helper->client, "drm_fb_helper", NULL);
if (ret)
return ret;
}
dev->fb_helper = fb_helper;
return 0;
@@ -437,9 +425,6 @@ void drm_fb_helper_fini(struct drm_fb_helper *fb_helper)
cancel_work_sync(&fb_helper->damage_work);
drm_fb_helper_release_info(fb_helper);
if (!fb_helper->client.funcs)
drm_client_release(&fb_helper->client);
}
EXPORT_SYMBOL(drm_fb_helper_fini);

View File

@@ -784,7 +784,7 @@ EXPORT_SYMBOL(drm_gem_put_pages);
static int objects_lookup(struct drm_file *filp, u32 *handle, int count,
struct drm_gem_object **objs)
{
int i, ret = 0;
int i;
struct drm_gem_object *obj;
spin_lock(&filp->table_lock);
@@ -792,16 +792,23 @@ static int objects_lookup(struct drm_file *filp, u32 *handle, int count,
for (i = 0; i < count; i++) {
/* Check if we currently have a reference on the object */
obj = idr_find(&filp->object_idr, handle[i]);
if (!obj) {
ret = -ENOENT;
break;
}
if (!obj)
goto err;
drm_gem_object_get(obj);
objs[i] = obj;
}
spin_unlock(&filp->table_lock);
return 0;
err:
spin_unlock(&filp->table_lock);
return ret;
while (i--)
drm_gem_object_put(objs[i]);
return -ENOENT;
}
/**
@@ -829,24 +836,34 @@ int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles,
u32 *handles;
int ret;
*objs_out = NULL;
if (!count)
return 0;
objs = kvmalloc_objs(struct drm_gem_object *, count,
GFP_KERNEL | __GFP_ZERO);
objs = kvmalloc_objs(*objs, count);
if (!objs)
return -ENOMEM;
*objs_out = objs;
handles = vmemdup_array_user(bo_handles, count, sizeof(u32));
if (IS_ERR(handles))
return PTR_ERR(handles);
if (IS_ERR(handles)) {
ret = PTR_ERR(handles);
goto err_free_objs;
}
ret = objects_lookup(filp, handles, count, objs);
kvfree(handles);
return ret;
if (ret)
goto err_free_handles;
kvfree(handles);
*objs_out = objs;
return 0;
err_free_handles:
kvfree(handles);
err_free_objs:
kvfree(objs);
return ret;
}
EXPORT_SYMBOL(drm_gem_objects_lookup);

View File

@@ -524,6 +524,7 @@ void drm_mode_config_cleanup(struct drm_device *dev)
struct drm_property *property, *pt;
struct drm_property_blob *blob, *bt;
struct drm_plane *plane, *plt;
struct drm_colorop *colorop, *copt;
list_for_each_entry_safe(encoder, enct, &dev->mode_config.encoder_list,
head) {
@@ -553,6 +554,11 @@ void drm_mode_config_cleanup(struct drm_device *dev)
drm_property_destroy(dev, property);
}
list_for_each_entry_safe(colorop, copt, &dev->mode_config.colorop_list,
head) {
colorop->funcs->destroy(colorop);
}
list_for_each_entry_safe(plane, plt, &dev->mode_config.plane_list,
head) {
plane->funcs->destroy(plane);

View File

@@ -626,7 +626,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
*/
dev->mode_config.delayed_event = true;
if (dev->mode_config.poll_enabled)
mod_delayed_work(system_wq,
mod_delayed_work(system_percpu_wq,
&dev->mode_config.output_poll_work,
0);
}

View File

@@ -218,7 +218,7 @@ void drm_self_refresh_helper_alter_state(struct drm_atomic_state *state)
ewma_psr_time_read(&sr_data->exit_avg_ms)) * 2;
mutex_unlock(&sr_data->avg_mutex);
mod_delayed_work(system_wq, &sr_data->entry_work,
mod_delayed_work(system_percpu_wq, &sr_data->entry_work,
msecs_to_jiffies(delay));
}
}

View File

@@ -81,7 +81,7 @@
* From userspace, this property will always read as zero.
*/
#define fence_to_wb_connector(x) container_of(x->lock, \
#define fence_to_wb_connector(x) container_of(x->extern_lock, \
struct drm_writeback_connector, \
fence_lock)

View File

@@ -2,6 +2,8 @@
/*
* Copyright © 2025 Intel Corporation
*/
#include <drm/drm_print.h>
#include "intel_color.h"
#include "intel_colorop.h"
#include "intel_color_pipeline.h"
@@ -10,72 +12,145 @@
#include "skl_universal_plane.h"
#define MAX_COLOR_PIPELINES 1
#define MAX_COLOROP 4
#define PLANE_DEGAMMA_SIZE 128
#define PLANE_GAMMA_SIZE 32
static const struct drm_colorop_funcs intel_colorop_funcs = {
.destroy = intel_colorop_destroy,
};
/*
* 3DLUT can be bound to all three HDR planes. However, even with the latest
* color pipeline UAPI, there is no good way to represent a HW block which
* can be shared/attached at different stages of the pipeline. So right now,
* we expose 3DLUT only attached with the primary plane.
*
* That way we don't confuse the userspace with opaque commit failures
* on trying to enable it on multiple planes which would otherwise make
* the pipeline totally unusable.
*/
static const enum intel_color_block xe3plpd_primary_plane_pipeline[] = {
INTEL_PLANE_CB_PRE_CSC_LUT,
INTEL_PLANE_CB_CSC,
INTEL_PLANE_CB_3DLUT,
INTEL_PLANE_CB_POST_CSC_LUT,
};
static const enum intel_color_block hdr_plane_pipeline[] = {
INTEL_PLANE_CB_PRE_CSC_LUT,
INTEL_PLANE_CB_CSC,
INTEL_PLANE_CB_POST_CSC_LUT,
};
static bool plane_has_3dlut(struct intel_display *display, enum pipe pipe,
struct drm_plane *plane)
{
return (DISPLAY_VER(display) >= 35 &&
intel_color_crtc_has_3dlut(display, pipe) &&
plane->type == DRM_PLANE_TYPE_PRIMARY);
}
static
struct intel_colorop *intel_color_pipeline_plane_add_colorop(struct drm_plane *plane,
struct intel_colorop *prev,
enum intel_color_block id)
{
struct drm_device *dev = plane->dev;
struct intel_colorop *colorop;
int ret;
colorop = intel_colorop_create(id);
if (IS_ERR(colorop))
return colorop;
switch (id) {
case INTEL_PLANE_CB_PRE_CSC_LUT:
ret = drm_plane_colorop_curve_1d_lut_init(dev,
&colorop->base, plane,
&intel_colorop_funcs,
PLANE_DEGAMMA_SIZE,
DRM_COLOROP_LUT1D_INTERPOLATION_LINEAR,
DRM_COLOROP_FLAG_ALLOW_BYPASS);
break;
case INTEL_PLANE_CB_CSC:
ret = drm_plane_colorop_ctm_3x4_init(dev, &colorop->base, plane,
&intel_colorop_funcs,
DRM_COLOROP_FLAG_ALLOW_BYPASS);
break;
case INTEL_PLANE_CB_3DLUT:
ret = drm_plane_colorop_3dlut_init(dev, &colorop->base, plane,
&intel_colorop_funcs, 17,
DRM_COLOROP_LUT3D_INTERPOLATION_TETRAHEDRAL,
true);
break;
case INTEL_PLANE_CB_POST_CSC_LUT:
ret = drm_plane_colorop_curve_1d_lut_init(dev, &colorop->base, plane,
&intel_colorop_funcs,
PLANE_GAMMA_SIZE,
DRM_COLOROP_LUT1D_INTERPOLATION_LINEAR,
DRM_COLOROP_FLAG_ALLOW_BYPASS);
break;
default:
drm_err(plane->dev, "Invalid colorop id [%d]", id);
ret = -EINVAL;
}
if (ret)
goto cleanup;
if (prev)
drm_colorop_set_next_property(&prev->base, &colorop->base);
return colorop;
cleanup:
intel_colorop_destroy(&colorop->base);
return ERR_PTR(ret);
}
static
int _intel_color_pipeline_plane_init(struct drm_plane *plane, struct drm_prop_enum_list *list,
enum pipe pipe)
{
struct drm_device *dev = plane->dev;
struct intel_display *display = to_intel_display(dev);
struct drm_colorop *prev_op;
struct intel_colorop *colorop;
int ret;
struct intel_colorop *colorop[MAX_COLOROP];
struct intel_colorop *prev = NULL;
const enum intel_color_block *pipeline;
int pipeline_len;
int ret = 0;
int i;
colorop = intel_colorop_create(INTEL_PLANE_CB_PRE_CSC_LUT);
ret = drm_plane_colorop_curve_1d_lut_init(dev, &colorop->base, plane,
PLANE_DEGAMMA_SIZE,
DRM_COLOROP_LUT1D_INTERPOLATION_LINEAR,
DRM_COLOROP_FLAG_ALLOW_BYPASS);
if (ret)
return ret;
list->type = colorop->base.base.id;
/* TODO: handle failures and clean up */
prev_op = &colorop->base;
colorop = intel_colorop_create(INTEL_PLANE_CB_CSC);
ret = drm_plane_colorop_ctm_3x4_init(dev, &colorop->base, plane,
DRM_COLOROP_FLAG_ALLOW_BYPASS);
if (ret)
return ret;
drm_colorop_set_next_property(prev_op, &colorop->base);
prev_op = &colorop->base;
if (DISPLAY_VER(display) >= 35 &&
intel_color_crtc_has_3dlut(display, pipe) &&
plane->type == DRM_PLANE_TYPE_PRIMARY) {
colorop = intel_colorop_create(INTEL_PLANE_CB_3DLUT);
ret = drm_plane_colorop_3dlut_init(dev, &colorop->base, plane, 17,
DRM_COLOROP_LUT3D_INTERPOLATION_TETRAHEDRAL,
true);
if (ret)
return ret;
drm_colorop_set_next_property(prev_op, &colorop->base);
prev_op = &colorop->base;
if (plane_has_3dlut(display, pipe, plane)) {
pipeline = xe3plpd_primary_plane_pipeline;
pipeline_len = ARRAY_SIZE(xe3plpd_primary_plane_pipeline);
} else {
pipeline = hdr_plane_pipeline;
pipeline_len = ARRAY_SIZE(hdr_plane_pipeline);
}
colorop = intel_colorop_create(INTEL_PLANE_CB_POST_CSC_LUT);
ret = drm_plane_colorop_curve_1d_lut_init(dev, &colorop->base, plane,
PLANE_GAMMA_SIZE,
DRM_COLOROP_LUT1D_INTERPOLATION_LINEAR,
DRM_COLOROP_FLAG_ALLOW_BYPASS);
if (ret)
return ret;
for (i = 0; i < pipeline_len; i++) {
colorop[i] = intel_color_pipeline_plane_add_colorop(plane, prev,
pipeline[i]);
if (IS_ERR(colorop[i])) {
ret = PTR_ERR(colorop[i]);
goto cleanup;
}
drm_colorop_set_next_property(prev_op, &colorop->base);
prev = colorop[i];
}
list->name = kasprintf(GFP_KERNEL, "Color Pipeline %d", list->type);
list->type = colorop[0]->base.base.id;
list->name = kasprintf(GFP_KERNEL, "Color Pipeline %d", colorop[0]->base.base.id);
return 0;
cleanup:
while (--i >= 0)
intel_colorop_destroy(&colorop[i]->base);
return ret;
}
int intel_color_pipeline_plane_init(struct drm_plane *plane, enum pipe pipe)

View File

@@ -35,3 +35,9 @@ struct intel_colorop *intel_colorop_create(enum intel_color_block id)
return colorop;
}
void intel_colorop_destroy(struct drm_colorop *colorop)
{
drm_colorop_cleanup(colorop);
kfree(to_intel_colorop(colorop));
}

View File

@@ -13,5 +13,6 @@ struct intel_colorop;
struct intel_colorop *to_intel_colorop(struct drm_colorop *colorop);
struct intel_colorop *intel_colorop_alloc(void);
struct intel_colorop *intel_colorop_create(enum intel_color_block id);
void intel_colorop_destroy(struct drm_colorop *colorop);
#endif /* __INTEL_COLOROP_H__ */

View File

@@ -5,7 +5,7 @@
#include <linux/shmem_fs.h>
#include <drm/drm_buddy.h>
#include <linux/gpu_buddy.h>
#include <drm/drm_print.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_tt.h>

View File

@@ -148,7 +148,7 @@ __dma_fence_signal__notify(struct dma_fence *fence,
{
struct dma_fence_cb *cur, *tmp;
lockdep_assert_held(fence->lock);
dma_fence_assert_held(fence);
list_for_each_entry_safe(cur, tmp, list, node) {
INIT_LIST_HEAD(&cur->node);

View File

@@ -1045,9 +1045,10 @@ __i915_active_fence_set(struct i915_active_fence *active,
* nesting rules for the fence->lock; the inner lock is always the
* older lock.
*/
spin_lock_irqsave(fence->lock, flags);
dma_fence_lock_irqsave(fence, flags);
if (prev)
spin_lock_nested(prev->lock, SINGLE_DEPTH_NESTING);
spin_lock_nested(dma_fence_spinlock(prev),
SINGLE_DEPTH_NESTING);
/*
* A does the cmpxchg first, and so it sees C or NULL, as before, or
@@ -1061,17 +1062,18 @@ __i915_active_fence_set(struct i915_active_fence *active,
*/
while (cmpxchg(__active_fence_slot(active), prev, fence) != prev) {
if (prev) {
spin_unlock(prev->lock);
spin_unlock(dma_fence_spinlock(prev));
dma_fence_put(prev);
}
spin_unlock_irqrestore(fence->lock, flags);
dma_fence_unlock_irqrestore(fence, flags);
prev = i915_active_fence_get(active);
GEM_BUG_ON(prev == fence);
spin_lock_irqsave(fence->lock, flags);
dma_fence_lock_irqsave(fence, flags);
if (prev)
spin_lock_nested(prev->lock, SINGLE_DEPTH_NESTING);
spin_lock_nested(dma_fence_spinlock(prev),
SINGLE_DEPTH_NESTING);
}
/*
@@ -1088,10 +1090,11 @@ __i915_active_fence_set(struct i915_active_fence *active,
*/
if (prev) {
__list_del_entry(&active->cb.node);
spin_unlock(prev->lock); /* serialise with prev->cb_list */
/* serialise with prev->cb_list */
spin_unlock(dma_fence_spinlock(prev));
}
list_add_tail(&active->cb.node, &fence->cb_list);
spin_unlock_irqrestore(fence->lock, flags);
dma_fence_unlock_irqrestore(fence, flags);
return prev;
}

View File

@@ -7,7 +7,7 @@
#include "i915_scatterlist.h"
#include "i915_ttm_buddy_manager.h"
#include <drm/drm_buddy.h>
#include <linux/gpu_buddy.h>
#include <drm/drm_mm.h>
#include <linux/slab.h>
@@ -167,9 +167,9 @@ struct i915_refct_sgt *i915_rsgt_from_buddy_resource(struct ttm_resource *res,
struct i915_ttm_buddy_resource *bman_res = to_ttm_buddy_resource(res);
const u64 size = res->size;
const u32 max_segment = round_down(UINT_MAX, page_alignment);
struct drm_buddy *mm = bman_res->mm;
struct gpu_buddy *mm = bman_res->mm;
struct list_head *blocks = &bman_res->blocks;
struct drm_buddy_block *block;
struct gpu_buddy_block *block;
struct i915_refct_sgt *rsgt;
struct scatterlist *sg;
struct sg_table *st;
@@ -202,8 +202,8 @@ struct i915_refct_sgt *i915_rsgt_from_buddy_resource(struct ttm_resource *res,
list_for_each_entry(block, blocks, link) {
u64 block_size, offset;
block_size = min_t(u64, size, drm_buddy_block_size(mm, block));
offset = drm_buddy_block_offset(block);
block_size = min_t(u64, size, gpu_buddy_block_size(mm, block));
offset = gpu_buddy_block_offset(block);
while (block_size) {
u64 len;

View File

@@ -5,6 +5,7 @@
#include <linux/slab.h>
#include <linux/gpu_buddy.h>
#include <drm/drm_buddy.h>
#include <drm/drm_print.h>
#include <drm/ttm/ttm_placement.h>
@@ -16,7 +17,7 @@
struct i915_ttm_buddy_manager {
struct ttm_resource_manager manager;
struct drm_buddy mm;
struct gpu_buddy mm;
struct list_head reserved;
struct mutex lock;
unsigned long visible_size;
@@ -38,7 +39,7 @@ static int i915_ttm_buddy_man_alloc(struct ttm_resource_manager *man,
{
struct i915_ttm_buddy_manager *bman = to_buddy_manager(man);
struct i915_ttm_buddy_resource *bman_res;
struct drm_buddy *mm = &bman->mm;
struct gpu_buddy *mm = &bman->mm;
unsigned long n_pages, lpfn;
u64 min_page_size;
u64 size;
@@ -57,13 +58,13 @@ static int i915_ttm_buddy_man_alloc(struct ttm_resource_manager *man,
bman_res->mm = mm;
if (place->flags & TTM_PL_FLAG_TOPDOWN)
bman_res->flags |= DRM_BUDDY_TOPDOWN_ALLOCATION;
bman_res->flags |= GPU_BUDDY_TOPDOWN_ALLOCATION;
if (place->flags & TTM_PL_FLAG_CONTIGUOUS)
bman_res->flags |= DRM_BUDDY_CONTIGUOUS_ALLOCATION;
bman_res->flags |= GPU_BUDDY_CONTIGUOUS_ALLOCATION;
if (place->fpfn || lpfn != man->size)
bman_res->flags |= DRM_BUDDY_RANGE_ALLOCATION;
bman_res->flags |= GPU_BUDDY_RANGE_ALLOCATION;
GEM_BUG_ON(!bman_res->base.size);
size = bman_res->base.size;
@@ -89,7 +90,7 @@ static int i915_ttm_buddy_man_alloc(struct ttm_resource_manager *man,
goto err_free_res;
}
err = drm_buddy_alloc_blocks(mm, (u64)place->fpfn << PAGE_SHIFT,
err = gpu_buddy_alloc_blocks(mm, (u64)place->fpfn << PAGE_SHIFT,
(u64)lpfn << PAGE_SHIFT,
(u64)n_pages << PAGE_SHIFT,
min_page_size,
@@ -101,15 +102,15 @@ static int i915_ttm_buddy_man_alloc(struct ttm_resource_manager *man,
if (lpfn <= bman->visible_size) {
bman_res->used_visible_size = PFN_UP(bman_res->base.size);
} else {
struct drm_buddy_block *block;
struct gpu_buddy_block *block;
list_for_each_entry(block, &bman_res->blocks, link) {
unsigned long start =
drm_buddy_block_offset(block) >> PAGE_SHIFT;
gpu_buddy_block_offset(block) >> PAGE_SHIFT;
if (start < bman->visible_size) {
unsigned long end = start +
(drm_buddy_block_size(mm, block) >> PAGE_SHIFT);
(gpu_buddy_block_size(mm, block) >> PAGE_SHIFT);
bman_res->used_visible_size +=
min(end, bman->visible_size) - start;
@@ -126,7 +127,7 @@ static int i915_ttm_buddy_man_alloc(struct ttm_resource_manager *man,
return 0;
err_free_blocks:
drm_buddy_free_list(mm, &bman_res->blocks, 0);
gpu_buddy_free_list(mm, &bman_res->blocks, 0);
mutex_unlock(&bman->lock);
err_free_res:
ttm_resource_fini(man, &bman_res->base);
@@ -141,7 +142,7 @@ static void i915_ttm_buddy_man_free(struct ttm_resource_manager *man,
struct i915_ttm_buddy_manager *bman = to_buddy_manager(man);
mutex_lock(&bman->lock);
drm_buddy_free_list(&bman->mm, &bman_res->blocks, 0);
gpu_buddy_free_list(&bman->mm, &bman_res->blocks, 0);
bman->visible_avail += bman_res->used_visible_size;
mutex_unlock(&bman->lock);
@@ -156,8 +157,8 @@ static bool i915_ttm_buddy_man_intersects(struct ttm_resource_manager *man,
{
struct i915_ttm_buddy_resource *bman_res = to_ttm_buddy_resource(res);
struct i915_ttm_buddy_manager *bman = to_buddy_manager(man);
struct drm_buddy *mm = &bman->mm;
struct drm_buddy_block *block;
struct gpu_buddy *mm = &bman->mm;
struct gpu_buddy_block *block;
if (!place->fpfn && !place->lpfn)
return true;
@@ -176,9 +177,9 @@ static bool i915_ttm_buddy_man_intersects(struct ttm_resource_manager *man,
/* Check each drm buddy block individually */
list_for_each_entry(block, &bman_res->blocks, link) {
unsigned long fpfn =
drm_buddy_block_offset(block) >> PAGE_SHIFT;
gpu_buddy_block_offset(block) >> PAGE_SHIFT;
unsigned long lpfn = fpfn +
(drm_buddy_block_size(mm, block) >> PAGE_SHIFT);
(gpu_buddy_block_size(mm, block) >> PAGE_SHIFT);
if (place->fpfn < lpfn && place->lpfn > fpfn)
return true;
@@ -194,8 +195,8 @@ static bool i915_ttm_buddy_man_compatible(struct ttm_resource_manager *man,
{
struct i915_ttm_buddy_resource *bman_res = to_ttm_buddy_resource(res);
struct i915_ttm_buddy_manager *bman = to_buddy_manager(man);
struct drm_buddy *mm = &bman->mm;
struct drm_buddy_block *block;
struct gpu_buddy *mm = &bman->mm;
struct gpu_buddy_block *block;
if (!place->fpfn && !place->lpfn)
return true;
@@ -209,9 +210,9 @@ static bool i915_ttm_buddy_man_compatible(struct ttm_resource_manager *man,
/* Check each drm buddy block individually */
list_for_each_entry(block, &bman_res->blocks, link) {
unsigned long fpfn =
drm_buddy_block_offset(block) >> PAGE_SHIFT;
gpu_buddy_block_offset(block) >> PAGE_SHIFT;
unsigned long lpfn = fpfn +
(drm_buddy_block_size(mm, block) >> PAGE_SHIFT);
(gpu_buddy_block_size(mm, block) >> PAGE_SHIFT);
if (fpfn < place->fpfn || lpfn > place->lpfn)
return false;
@@ -224,7 +225,7 @@ static void i915_ttm_buddy_man_debug(struct ttm_resource_manager *man,
struct drm_printer *printer)
{
struct i915_ttm_buddy_manager *bman = to_buddy_manager(man);
struct drm_buddy_block *block;
struct gpu_buddy_block *block;
mutex_lock(&bman->lock);
drm_printf(printer, "default_page_size: %lluKiB\n",
@@ -293,7 +294,7 @@ int i915_ttm_buddy_man_init(struct ttm_device *bdev,
if (!bman)
return -ENOMEM;
err = drm_buddy_init(&bman->mm, size, chunk_size);
err = gpu_buddy_init(&bman->mm, size, chunk_size);
if (err)
goto err_free_bman;
@@ -333,7 +334,7 @@ int i915_ttm_buddy_man_fini(struct ttm_device *bdev, unsigned int type)
{
struct ttm_resource_manager *man = ttm_manager_type(bdev, type);
struct i915_ttm_buddy_manager *bman = to_buddy_manager(man);
struct drm_buddy *mm = &bman->mm;
struct gpu_buddy *mm = &bman->mm;
int ret;
ttm_resource_manager_set_used(man, false);
@@ -345,8 +346,8 @@ int i915_ttm_buddy_man_fini(struct ttm_device *bdev, unsigned int type)
ttm_set_driver_manager(bdev, type, NULL);
mutex_lock(&bman->lock);
drm_buddy_free_list(mm, &bman->reserved, 0);
drm_buddy_fini(mm);
gpu_buddy_free_list(mm, &bman->reserved, 0);
gpu_buddy_fini(mm);
bman->visible_avail += bman->visible_reserved;
WARN_ON_ONCE(bman->visible_avail != bman->visible_size);
mutex_unlock(&bman->lock);
@@ -371,15 +372,15 @@ int i915_ttm_buddy_man_reserve(struct ttm_resource_manager *man,
u64 start, u64 size)
{
struct i915_ttm_buddy_manager *bman = to_buddy_manager(man);
struct drm_buddy *mm = &bman->mm;
struct gpu_buddy *mm = &bman->mm;
unsigned long fpfn = start >> PAGE_SHIFT;
unsigned long flags = 0;
int ret;
flags |= DRM_BUDDY_RANGE_ALLOCATION;
flags |= GPU_BUDDY_RANGE_ALLOCATION;
mutex_lock(&bman->lock);
ret = drm_buddy_alloc_blocks(mm, start,
ret = gpu_buddy_alloc_blocks(mm, start,
start + size,
size, mm->chunk_size,
&bman->reserved,

View File

@@ -13,7 +13,7 @@
struct ttm_device;
struct ttm_resource_manager;
struct drm_buddy;
struct gpu_buddy;
/**
* struct i915_ttm_buddy_resource
@@ -33,7 +33,7 @@ struct i915_ttm_buddy_resource {
struct list_head blocks;
unsigned long flags;
unsigned long used_visible_size;
struct drm_buddy *mm;
struct gpu_buddy *mm;
};
/**

View File

@@ -323,9 +323,9 @@ static void active_flush(struct i915_active *ref,
if (!fence)
return;
spin_lock_irq(fence->lock);
spin_lock_irq(dma_fence_spinlock(fence));
__list_del_entry(&active->cb.node);
spin_unlock_irq(fence->lock); /* serialise with fence->cb_list */
spin_unlock_irq(dma_fence_spinlock(fence)); /* serialise with fence->cb_list */
atomic_dec(&ref->count);
GEM_BUG_ON(!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags));

View File

@@ -6,7 +6,7 @@
#include <linux/prime_numbers.h>
#include <linux/sort.h>
#include <drm/drm_buddy.h>
#include <linux/gpu_buddy.h>
#include "../i915_selftest.h"
@@ -371,7 +371,7 @@ static int igt_mock_splintered_region(void *arg)
struct drm_i915_private *i915 = mem->i915;
struct i915_ttm_buddy_resource *res;
struct drm_i915_gem_object *obj;
struct drm_buddy *mm;
struct gpu_buddy *mm;
unsigned int expected_order;
LIST_HEAD(objects);
u64 size;
@@ -447,8 +447,8 @@ static int igt_mock_max_segment(void *arg)
struct drm_i915_private *i915 = mem->i915;
struct i915_ttm_buddy_resource *res;
struct drm_i915_gem_object *obj;
struct drm_buddy_block *block;
struct drm_buddy *mm;
struct gpu_buddy_block *block;
struct gpu_buddy *mm;
struct list_head *blocks;
struct scatterlist *sg;
I915_RND_STATE(prng);
@@ -487,8 +487,8 @@ static int igt_mock_max_segment(void *arg)
mm = res->mm;
size = 0;
list_for_each_entry(block, blocks, link) {
if (drm_buddy_block_size(mm, block) > size)
size = drm_buddy_block_size(mm, block);
if (gpu_buddy_block_size(mm, block) > size)
size = gpu_buddy_block_size(mm, block);
}
if (size < max_segment) {
pr_err("%s: Failed to create a huge contiguous block [> %u], largest block %lld\n",
@@ -527,14 +527,14 @@ static u64 igt_object_mappable_total(struct drm_i915_gem_object *obj)
struct intel_memory_region *mr = obj->mm.region;
struct i915_ttm_buddy_resource *bman_res =
to_ttm_buddy_resource(obj->mm.res);
struct drm_buddy *mm = bman_res->mm;
struct drm_buddy_block *block;
struct gpu_buddy *mm = bman_res->mm;
struct gpu_buddy_block *block;
u64 total;
total = 0;
list_for_each_entry(block, &bman_res->blocks, link) {
u64 start = drm_buddy_block_offset(block);
u64 end = start + drm_buddy_block_size(mm, block);
u64 start = gpu_buddy_block_offset(block);
u64 end = start + gpu_buddy_block_size(mm, block);
if (start < resource_size(&mr->io))
total += min_t(u64, end, resource_size(&mr->io)) - start;

View File

@@ -136,6 +136,14 @@ pvr_ccb_slot_available_locked(struct pvr_ccb *pvr_ccb, u32 *write_offset)
static void
process_fwccb_command(struct pvr_device *pvr_dev, struct rogue_fwif_fwccb_cmd *cmd)
{
struct drm_device *drm_dev = from_pvr_device(pvr_dev);
if ((cmd->cmd_type & ROGUE_CMD_MAGIC_DWORD_MASK) != ROGUE_CMD_MAGIC_DWORD_SHIFTED) {
drm_warn_once(drm_dev, "Received FWCCB command with bad magic value; ignoring (type=0x%08x)\n",
cmd->cmd_type);
return;
}
switch (cmd->cmd_type) {
case ROGUE_FWIF_FWCCB_CMD_REQUEST_GPU_RESTART:
pvr_power_reset(pvr_dev, false);
@@ -150,9 +158,17 @@ process_fwccb_command(struct pvr_device *pvr_dev, struct rogue_fwif_fwccb_cmd *c
pvr_free_list_process_grow_req(pvr_dev, &cmd->cmd_data.cmd_free_list_gs);
break;
case ROGUE_FWIF_FWCCB_CMD_UPDATE_STATS:
/*
* We currently have no infrastructure for processing these
* stats. It may be added in the future, but for now just
* suppress the "unknown" warning when receiving this command.
*/
break;
default:
drm_info(from_pvr_device(pvr_dev), "Received unknown FWCCB command %x\n",
cmd->cmd_type);
drm_info(drm_dev, "Received unknown FWCCB command (type=%d)\n",
cmd->cmd_type & ~ROGUE_CMD_MAGIC_DWORD_MASK);
break;
}
}

View File

@@ -152,15 +152,13 @@ struct pvr_device {
* @power: Optional power domain devices.
*
* On platforms with more than one power domain for the GPU, they are
* stored here in @domain_devs, along with links between them in
* @domain_links. The size of @domain_devs is given by @domain_count,
* while the size of @domain_links is (2 * @domain_count) - 1.
* stored here in @domains, along with links between them in
* @domain_links. The size of @domain_links is one less than
* struct dev_pm_domain_list->num_pds in @domains.
*/
struct pvr_device_power {
struct device **domain_devs;
struct dev_pm_domain_list *domains;
struct device_link **domain_links;
u32 domain_count;
} power;
/**

View File

@@ -593,14 +593,16 @@ pvr_watchdog_fini(struct pvr_device *pvr_dev)
int pvr_power_domains_init(struct pvr_device *pvr_dev)
{
struct device *dev = from_pvr_device(pvr_dev)->dev;
static const char *const ROGUE_PD_NAMES[] = { "a", "b", "c", "d", "e" };
struct drm_device *drm_dev = from_pvr_device(pvr_dev);
struct device *dev = drm_dev->dev;
struct device_link **domain_links __free(kfree) = NULL;
struct device **domain_devs __free(kfree) = NULL;
struct dev_pm_domain_list *domains = NULL;
int domain_count;
int link_count;
char dev_name[2] = "a";
int err;
int i;
@@ -612,46 +614,33 @@ int pvr_power_domains_init(struct pvr_device *pvr_dev)
if (domain_count <= 1)
return 0;
link_count = domain_count + (domain_count - 1);
if (domain_count > ARRAY_SIZE(ROGUE_PD_NAMES)) {
drm_err(drm_dev, "%s() only supports %zu domains on Rogue",
__func__, ARRAY_SIZE(ROGUE_PD_NAMES));
return -EOPNOTSUPP;
}
domain_devs = kzalloc_objs(*domain_devs, domain_count);
if (!domain_devs)
return -ENOMEM;
link_count = domain_count - 1;
domain_links = kzalloc_objs(*domain_links, link_count);
if (!domain_links)
return -ENOMEM;
for (i = 0; i < domain_count; i++) {
struct device *domain_dev;
const struct dev_pm_domain_attach_data pd_attach_data = {
.pd_names = ROGUE_PD_NAMES,
.num_pd_names = domain_count,
.pd_flags = 0,
};
dev_name[0] = 'a' + i;
domain_dev = dev_pm_domain_attach_by_name(dev, dev_name);
if (IS_ERR_OR_NULL(domain_dev)) {
err = domain_dev ? PTR_ERR(domain_dev) : -ENODEV;
goto err_detach;
}
err = dev_pm_domain_attach_list(dev, &pd_attach_data, &domains);
if (err < 0)
return err;
domain_devs[i] = domain_dev;
}
for (i = 0; i < domain_count; i++) {
for (i = 0; i < link_count; i++) {
struct device_link *link;
link = device_link_add(dev, domain_devs[i], DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME);
if (!link) {
err = -ENODEV;
goto err_unlink;
}
domain_links[i] = link;
}
for (i = domain_count; i < link_count; i++) {
struct device_link *link;
link = device_link_add(domain_devs[i - domain_count + 1],
domain_devs[i - domain_count],
link = device_link_add(domains->pd_devs[i + 1],
domains->pd_devs[i],
DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME);
if (!link) {
err = -ENODEV;
@@ -662,9 +651,8 @@ int pvr_power_domains_init(struct pvr_device *pvr_dev)
}
pvr_dev->power = (struct pvr_device_power){
.domain_devs = no_free_ptr(domain_devs),
.domains = domains,
.domain_links = no_free_ptr(domain_links),
.domain_count = domain_count,
};
return 0;
@@ -673,31 +661,21 @@ int pvr_power_domains_init(struct pvr_device *pvr_dev)
while (--i >= 0)
device_link_del(domain_links[i]);
i = domain_count;
err_detach:
while (--i >= 0)
dev_pm_domain_detach(domain_devs[i], true);
return err;
}
void pvr_power_domains_fini(struct pvr_device *pvr_dev)
{
const int domain_count = pvr_dev->power.domain_count;
struct pvr_device_power *pvr_power = &pvr_dev->power;
int i = domain_count + (domain_count - 1);
int i = (int)pvr_power->domains->num_pds - 1;
while (--i >= 0)
device_link_del(pvr_dev->power.domain_links[i]);
device_link_del(pvr_power->domain_links[i]);
i = domain_count;
dev_pm_domain_detach_list(pvr_power->domains);
while (--i >= 0)
dev_pm_domain_detach(pvr_dev->power.domain_devs[i], true);
kfree(pvr_power->domain_links);
kfree(pvr_dev->power.domain_links);
kfree(pvr_dev->power.domain_devs);
pvr_dev->power = (struct pvr_device_power){ 0 };
*pvr_power = (struct pvr_device_power){ 0 };
}

View File

@@ -954,6 +954,20 @@ static void ingenic_drm_destroy_state(struct drm_private_obj *obj,
kfree(priv_state);
}
static struct drm_private_state *
ingenic_drm_create_state(struct drm_private_obj *obj)
{
struct ingenic_drm_private_state *priv_state;
priv_state = kzalloc_obj(*priv_state);
if (!priv_state)
return ERR_PTR(-ENOMEM);
__drm_atomic_helper_private_obj_create_state(obj, &priv_state->base);
return &priv_state->base;
}
DEFINE_DRM_GEM_DMA_FOPS(ingenic_drm_fops);
static const struct drm_driver ingenic_drm_driver_data = {
@@ -1034,6 +1048,7 @@ static struct drm_mode_config_helper_funcs ingenic_drm_mode_config_helpers = {
};
static const struct drm_private_state_funcs ingenic_drm_private_state_funcs = {
.atomic_create_state = ingenic_drm_create_state,
.atomic_duplicate_state = ingenic_drm_duplicate_state,
.atomic_destroy_state = ingenic_drm_destroy_state,
};
@@ -1087,7 +1102,6 @@ static void ingenic_drm_atomic_private_obj_fini(struct drm_device *drm, void *pr
static int ingenic_drm_bind(struct device *dev, bool has_components)
{
struct platform_device *pdev = to_platform_device(dev);
struct ingenic_drm_private_state *private_state;
const struct jz_soc_info *soc_info;
struct ingenic_drm *priv;
struct clk *parent_clk;
@@ -1387,19 +1401,13 @@ static int ingenic_drm_bind(struct device *dev, bool has_components)
goto err_devclk_disable;
}
private_state = kzalloc_obj(*private_state);
if (!private_state) {
ret = -ENOMEM;
goto err_clk_notifier_unregister;
}
drm_atomic_private_obj_init(drm, &priv->private_obj, &private_state->base,
drm_atomic_private_obj_init(drm, &priv->private_obj, NULL,
&ingenic_drm_private_state_funcs);
ret = drmm_add_action_or_reset(drm, ingenic_drm_atomic_private_obj_fini,
&priv->private_obj);
if (ret)
goto err_private_state_free;
goto err_clk_notifier_unregister;
ret = drm_dev_register(drm, 0);
if (ret) {
@@ -1411,8 +1419,6 @@ static int ingenic_drm_bind(struct device *dev, bool has_components)
return 0;
err_private_state_free:
kfree(private_state);
err_clk_notifier_unregister:
clk_notifier_unregister(parent_clk, &priv->clock_nb);
err_devclk_disable:

View File

@@ -750,7 +750,22 @@ static void ingenic_ipu_destroy_state(struct drm_private_obj *obj,
kfree(priv_state);
}
static struct drm_private_state *
ingenic_ipu_create_state(struct drm_private_obj *obj)
{
struct ingenic_ipu_private_state *priv_state;
priv_state = kzalloc_obj(*priv_state);
if (!priv_state)
return ERR_PTR(-ENOMEM);
__drm_atomic_helper_private_obj_create_state(obj, &priv_state->base);
return &priv_state->base;
}
static const struct drm_private_state_funcs ingenic_ipu_private_state_funcs = {
.atomic_create_state = ingenic_ipu_create_state,
.atomic_duplicate_state = ingenic_ipu_duplicate_state,
.atomic_destroy_state = ingenic_ipu_destroy_state,
};
@@ -793,7 +808,6 @@ static const struct regmap_config ingenic_ipu_regmap_config = {
static int ingenic_ipu_bind(struct device *dev, struct device *master, void *d)
{
struct platform_device *pdev = to_platform_device(dev);
struct ingenic_ipu_private_state *private_state;
const struct soc_info *soc_info;
struct drm_device *drm = d;
struct drm_plane *plane;
@@ -887,20 +901,10 @@ static int ingenic_ipu_bind(struct device *dev, struct device *master, void *d)
return err;
}
private_state = kzalloc_obj(*private_state);
if (!private_state) {
err = -ENOMEM;
goto err_clk_unprepare;
}
drm_atomic_private_obj_init(drm, &ipu->private_obj, &private_state->base,
drm_atomic_private_obj_init(drm, &ipu->private_obj, NULL,
&ingenic_ipu_private_state_funcs);
return 0;
err_clk_unprepare:
clk_unprepare(ipu->clk);
return err;
}
static void ingenic_ipu_unbind(struct device *dev,

Some files were not shown because too many files have changed in this diff Show More