Merge tag 'drm-misc-next-2020-11-12' of git://anongit.freedesktop.org/drm/drm-misc into drm-next

drm-misc-next for 5.11:

UAPI Changes:

Cross-subsystem Changes:

Core Changes:

 - Remove pgprot_decrypt() before calling io_remap_pfn_range()
 - Revert "drm/dp_mst: Retrieve extended DPCD caps for topology manager"
 - ttm: Add multihop infrastructure
 - doc: Update dma-buf

Driver Changes:

 - amdgpu: Use TTM multihop
 - kmb: select DRM_MIPI_DSI and depend on ARCH_KEEMBAY; Fix build warning;
   Fix typos
 - nouveau: Use TTM multihop; Fix out-of-bounds access
 - radeon: Use TTM multihop
 - ingenic: Search for scaling coefficients to to 102% of screen size

Signed-off-by: Dave Airlie <airlied@redhat.com>

From: Thomas Zimmermann <tzimmermann@suse.de>
Link: https://patchwork.freedesktop.org/patch/msgid/20201112080115.GA7954@linux-uq9g
This commit is contained in:
Dave Airlie
2020-11-13 14:16:16 +10:00
249 changed files with 7047 additions and 1436 deletions

View File

@@ -0,0 +1,101 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/display/bridge/intel,keembay-dsi.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Devicetree bindings for Intel Keem Bay mipi dsi controller
maintainers:
- Anitha Chrisanthus <anitha.chrisanthus@intel.com>
- Edmond J Dea <edmund.j.dea@intel.com>
properties:
compatible:
const: intel,keembay-dsi
reg:
items:
- description: MIPI registers range
reg-names:
items:
- const: mipi
clocks:
items:
- description: MIPI DSI clock
- description: MIPI DSI econfig clock
- description: MIPI DSI config clock
clock-names:
items:
- const: clk_mipi
- const: clk_mipi_ecfg
- const: clk_mipi_cfg
ports:
type: object
properties:
'#address-cells':
const: 1
'#size-cells':
const: 0
port@0:
type: object
description: MIPI DSI input port.
port@1:
type: object
description: DSI output port.
required:
- port@0
- port@1
additionalProperties: false
required:
- compatible
- reg
- reg-names
- clocks
- clock-names
- ports
additionalProperties: false
examples:
- |
mipi-dsi@20900000 {
compatible = "intel,keembay-dsi";
reg = <0x20900000 0x4000>;
reg-names = "mipi";
clocks = <&scmi_clk 0x86>,
<&scmi_clk 0x88>,
<&scmi_clk 0x89>;
clock-names = "clk_mipi", "clk_mipi_ecfg",
"clk_mipi_cfg";
ports {
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
dsi_in: endpoint {
remote-endpoint = <&disp_out>;
};
};
port@1 {
reg = <1>;
dsi_out: endpoint {
remote-endpoint = <&adv7535_input>;
};
};
};
};

View File

@@ -4,18 +4,19 @@
$id: http://devicetree.org/schemas/display/bridge/lontium,lt9611.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Lontium LT9611 2 Port MIPI to HDMI Bridge
title: Lontium LT9611(UXC) 2 Port MIPI to HDMI Bridge
maintainers:
- Vinod Koul <vkoul@kernel.org>
description: |
The LT9611 is a bridge device which converts DSI to HDMI
The LT9611 and LT9611UXC are bridge devices which convert DSI to HDMI
properties:
compatible:
enum:
- lontium,lt9611
- lontium,lt9611uxc
reg:
maxItems: 1

View File

@@ -8,6 +8,8 @@ Optional properties:
- interrupts: describe the interrupt line used to inform the host
about hotplug events.
- reset-gpios: OF device-tree gpio specification for RST_N pin.
- iovcc-supply: I/O Supply Voltage (1.8V or 3.3V)
- cvcc12-supply: Digital Core Supply Voltage (1.2V)
HDMI audio properties:
- #sound-dai-cells: <0> or <1>. <0> if only i2s or spdif pin
@@ -54,6 +56,8 @@ Example:
compatible = "sil,sii9022";
reg = <0x39>;
reset-gpios = <&pioA 1 0>;
iovcc-supply = <&v3v3_hdmi>;
cvcc12-supply = <&v1v2_hdmi>;
#sound-dai-cells = <0>;
sil,i2s-data-lanes = < 0 1 2 >;

View File

@@ -0,0 +1,72 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/display/intel,keembay-display.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Devicetree bindings for Intel Keem Bay display controller
maintainers:
- Anitha Chrisanthus <anitha.chrisanthus@intel.com>
- Edmond J Dea <edmund.j.dea@intel.com>
properties:
compatible:
const: intel,keembay-display
reg:
items:
- description: LCD registers range
reg-names:
items:
- const: lcd
clocks:
items:
- description: LCD controller clock
- description: pll0 clock
clock-names:
items:
- const: clk_lcd
- const: clk_pll0
interrupts:
maxItems: 1
port:
type: object
description: Display output node to DSI.
required:
- compatible
- reg
- reg-names
- clocks
- clock-names
- interrupts
- port
additionalProperties: false
examples:
- |
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
display@20930000 {
compatible = "intel,keembay-display";
reg = <0x20930000 0x3000>;
reg-names = "lcd";
interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&scmi_clk 0x83>,
<&scmi_clk 0x0>;
clock-names = "clk_lcd", "clk_pll0";
port {
disp_out: endpoint {
remote-endpoint = <&dsi_in>;
};
};
};

View File

@@ -0,0 +1,43 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/display/intel,keembay-msscam.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Devicetree bindings for Intel Keem Bay MSSCAM
maintainers:
- Anitha Chrisanthus <anitha.chrisanthus@intel.com>
- Edmond J Dea <edmund.j.dea@intel.com>
description: |
MSSCAM controls local clocks in the display subsystem namely LCD clocks and
MIPI DSI clocks. It also configures the interconnect between LCD and
MIPI DSI.
properties:
compatible:
items:
- const: intel,keembay-msscam
- const: syscon
reg:
maxItems: 1
reg-io-width:
const: 4
required:
- compatible
- reg
- reg-io-width
additionalProperties: false
examples:
- |
msscam:msscam@20910000 {
compatible = "intel,keembay-msscam", "syscon";
reg = <0x20910000 0x30>;
reg-io-width = <4>;
};

View File

@@ -201,13 +201,28 @@ Convert drivers to use drm_fbdev_generic_setup()
------------------------------------------------
Most drivers can use drm_fbdev_generic_setup(). Driver have to implement
atomic modesetting and GEM vmap support. Current generic fbdev emulation
expects the framebuffer in system memory (or system-like memory).
atomic modesetting and GEM vmap support. Historically, generic fbdev emulation
expected the framebuffer in system memory or system-like memory. By employing
struct dma_buf_map, drivers with frambuffers in I/O memory can be supported
as well.
Contact: Maintainer of the driver you plan to convert
Level: Intermediate
Reimplement functions in drm_fbdev_fb_ops without fbdev
-------------------------------------------------------
A number of callback functions in drm_fbdev_fb_ops could benefit from
being rewritten without dependencies on the fbdev module. Some of the
helpers could further benefit from using struct dma_buf_map instead of
raw pointers.
Contact: Thomas Zimmermann <tzimmermann@suse.de>, Daniel Vetter
Level: Advanced
drm_framebuffer_funcs and drm_mode_config_funcs.fb_create cleanup
-----------------------------------------------------------------
@@ -450,6 +465,24 @@ Contact: Ville Syrjälä, Daniel Vetter
Level: Intermediate
Use struct dma_buf_map throughout codebase
------------------------------------------
Pointers to shared device memory are stored in struct dma_buf_map. Each
instance knows whether it refers to system or I/O memory. Most of the DRM-wide
interface have been converted to use struct dma_buf_map, but implementations
often still use raw pointers.
The task is to use struct dma_buf_map where it makes sense.
* Memory managers should use struct dma_buf_map for dma-buf-imported buffers.
* TTM might benefit from using struct dma_buf_map internally.
* Framebuffer copying and blitting helpers should operate on struct dma_buf_map.
Contact: Thomas Zimmermann <tzimmermann@suse.de>, Christian König, Daniel Vetter
Level: Intermediate
Core refactorings
=================

View File

@@ -8961,6 +8961,13 @@ M: Deepak Saxena <dsaxena@plexity.net>
S: Maintained
F: drivers/char/hw_random/ixp4xx-rng.c
INTEL KEEMBAY DRM DRIVER
M: Anitha Chrisanthus <anitha.chrisanthus@intel.com>
M: Edmund Dea <edmund.j.dea@intel.com>
S: Maintained
F: Documentation/devicetree/bindings/display/intel,kmb_display.yaml
F: drivers/gpu/drm/kmb/
INTEL MANAGEMENT ENGINE (mei)
M: Tomas Winkler <tomas.winkler@intel.com>
L: linux-kernel@vger.kernel.org

View File

@@ -908,7 +908,7 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
}
#ifdef CONFIG_DMA_API_DEBUG
{
if (!IS_ERR(sg_table)) {
struct scatterlist *sg;
u64 addr;
int len;

View File

@@ -232,6 +232,7 @@ config DRM_RADEON
select FW_LOADER
select DRM_KMS_HELPER
select DRM_TTM
select DRM_TTM_HELPER
select POWER_SUPPLY
select HWMON
select BACKLIGHT_CLASS_DEVICE
@@ -252,6 +253,7 @@ config DRM_AMDGPU
select DRM_KMS_HELPER
select DRM_SCHED
select DRM_TTM
select DRM_TTM_HELPER
select POWER_SUPPLY
select HWMON
select BACKLIGHT_CLASS_DEVICE
@@ -268,6 +270,8 @@ source "drivers/gpu/drm/nouveau/Kconfig"
source "drivers/gpu/drm/i915/Kconfig"
source "drivers/gpu/drm/kmb/Kconfig"
config DRM_VGEM
tristate "Virtual GEM provider"
depends on DRM

View File

@@ -71,6 +71,7 @@ obj-$(CONFIG_DRM_AMDGPU)+= amd/amdgpu/
obj-$(CONFIG_DRM_MGA) += mga/
obj-$(CONFIG_DRM_I810) += i810/
obj-$(CONFIG_DRM_I915) += i915/
obj-$(CONFIG_DRM_KMB_DISPLAY) += kmb/
obj-$(CONFIG_DRM_MGAG200) += mgag200/
obj-$(CONFIG_DRM_V3D) += v3d/
obj-$(CONFIG_DRM_VC4) += vc4/

View File

@@ -41,42 +41,6 @@
#include <linux/dma-fence-array.h>
#include <linux/pci-p2pdma.h>
/**
* amdgpu_gem_prime_vmap - &dma_buf_ops.vmap implementation
* @obj: GEM BO
*
* Sets up an in-kernel virtual mapping of the BO's memory.
*
* Returns:
* The virtual address of the mapping or an error pointer.
*/
void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj)
{
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
int ret;
ret = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages,
&bo->dma_buf_vmap);
if (ret)
return ERR_PTR(ret);
return bo->dma_buf_vmap.virtual;
}
/**
* amdgpu_gem_prime_vunmap - &dma_buf_ops.vunmap implementation
* @obj: GEM BO
* @vaddr: Virtual address (unused)
*
* Tears down the in-kernel virtual mapping of the BO's memory.
*/
void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
{
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
ttm_bo_kunmap(&bo->dma_buf_vmap);
}
/**
* amdgpu_gem_prime_mmap - &drm_driver.gem_prime_mmap implementation
* @obj: GEM BO

View File

@@ -31,8 +31,6 @@ struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
struct dma_buf *dma_buf);
bool amdgpu_dmabuf_is_xgmi_accessible(struct amdgpu_device *adev,
struct amdgpu_bo *bo);
void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj);
void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
int amdgpu_gem_prime_mmap(struct drm_gem_object *obj,
struct vm_area_struct *vma);

View File

@@ -40,6 +40,7 @@
#include "amdgpu.h"
#include "amdgpu_irq.h"
#include "amdgpu_dma_buf.h"
#include "amdgpu_sched.h"
#include "amdgpu_amdkfd.h"
@@ -1105,7 +1106,7 @@ static const struct pci_device_id pciidlist[] = {
MODULE_DEVICE_TABLE(pci, pciidlist);
static struct drm_driver kms_driver;
static const struct drm_driver amdgpu_kms_driver;
static int amdgpu_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
@@ -1176,7 +1177,7 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
if (ret)
return ret;
adev = devm_drm_dev_alloc(&pdev->dev, &kms_driver, typeof(*adev), ddev);
adev = devm_drm_dev_alloc(&pdev->dev, &amdgpu_kms_driver, typeof(*adev), ddev);
if (IS_ERR(adev))
return PTR_ERR(adev);
@@ -1520,7 +1521,29 @@ int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv)
return 0;
}
static struct drm_driver kms_driver = {
int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
const struct drm_ioctl_desc amdgpu_ioctls_kms[] = {
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_CREATE, amdgpu_gem_create_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_CTX, amdgpu_ctx_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_VM, amdgpu_vm_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_SCHED, amdgpu_sched_ioctl, DRM_MASTER),
DRM_IOCTL_DEF_DRV(AMDGPU_BO_LIST, amdgpu_bo_list_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_FENCE_TO_HANDLE, amdgpu_cs_fence_to_handle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
/* KMS */
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_MMAP, amdgpu_gem_mmap_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_WAIT_IDLE, amdgpu_gem_wait_idle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_CS, amdgpu_cs_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_INFO, amdgpu_info_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_CS, amdgpu_cs_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_FENCES, amdgpu_cs_wait_fences_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_METADATA, amdgpu_gem_metadata_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_VA, amdgpu_gem_va_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_OP, amdgpu_gem_op_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR, amdgpu_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
};
static const struct drm_driver amdgpu_kms_driver = {
.driver_features =
DRIVER_ATOMIC |
DRIVER_GEM |
@@ -1531,6 +1554,7 @@ static struct drm_driver kms_driver = {
.lastclose = amdgpu_driver_lastclose_kms,
.irq_handler = amdgpu_irq_handler,
.ioctls = amdgpu_ioctls_kms,
.num_ioctls = ARRAY_SIZE(amdgpu_ioctls_kms),
.dumb_create = amdgpu_mode_dumb_create,
.dumb_map_offset = amdgpu_mode_dumb_mmap,
.fops = &amdgpu_driver_kms_fops,
@@ -1583,7 +1607,6 @@ static int __init amdgpu_init(void)
goto error_fence;
DRM_INFO("amdgpu kernel modesetting enabled.\n");
kms_driver.num_ioctls = amdgpu_max_kms_ioctl;
amdgpu_register_atpx_handler();
/* Ignore KFD init failures. Normal when CONFIG_HSA_AMD is not set. */

View File

@@ -33,6 +33,7 @@
#include <drm/amdgpu_drm.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_gem_ttm_helper.h>
#include "amdgpu.h"
#include "amdgpu_display.h"
@@ -220,8 +221,8 @@ static const struct drm_gem_object_funcs amdgpu_gem_object_funcs = {
.open = amdgpu_gem_object_open,
.close = amdgpu_gem_object_close,
.export = amdgpu_gem_prime_export,
.vmap = amdgpu_gem_prime_vmap,
.vunmap = amdgpu_gem_prime_vunmap,
.vmap = drm_gem_ttm_vmap,
.vunmap = drm_gem_ttm_vunmap,
};
/*

View File

@@ -29,7 +29,6 @@
#include "amdgpu.h"
#include <drm/drm_debugfs.h>
#include <drm/amdgpu_drm.h>
#include "amdgpu_sched.h"
#include "amdgpu_uvd.h"
#include "amdgpu_vce.h"
#include "atom.h"
@@ -484,7 +483,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
* etc. (all asics).
* Returns 0 on success, -EINVAL on failure.
*/
static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
{
struct amdgpu_device *adev = drm_to_adev(dev);
struct drm_amdgpu_info *info = data;
@@ -1247,27 +1246,6 @@ void amdgpu_disable_vblank_kms(struct drm_crtc *crtc)
amdgpu_irq_put(adev, &adev->crtc_irq, idx);
}
const struct drm_ioctl_desc amdgpu_ioctls_kms[] = {
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_CREATE, amdgpu_gem_create_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_CTX, amdgpu_ctx_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_VM, amdgpu_vm_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_SCHED, amdgpu_sched_ioctl, DRM_MASTER),
DRM_IOCTL_DEF_DRV(AMDGPU_BO_LIST, amdgpu_bo_list_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_FENCE_TO_HANDLE, amdgpu_cs_fence_to_handle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
/* KMS */
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_MMAP, amdgpu_gem_mmap_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_WAIT_IDLE, amdgpu_gem_wait_idle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_CS, amdgpu_cs_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_INFO, amdgpu_info_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_CS, amdgpu_cs_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_FENCES, amdgpu_cs_wait_fences_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_METADATA, amdgpu_gem_metadata_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_VA, amdgpu_gem_va_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_OP, amdgpu_gem_op_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR, amdgpu_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW)
};
const int amdgpu_max_kms_ioctl = ARRAY_SIZE(amdgpu_ioctls_kms);
/*
* Debugfs info
*/

View File

@@ -100,7 +100,6 @@ struct amdgpu_bo {
struct amdgpu_bo *parent;
struct amdgpu_bo *shadow;
struct ttm_bo_kmap_obj dma_buf_vmap;
struct amdgpu_mn *mn;

View File

@@ -512,119 +512,6 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
return r;
}
/**
* amdgpu_move_vram_ram - Copy VRAM buffer to RAM buffer
*
* Called by amdgpu_bo_move().
*/
static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict,
struct ttm_operation_ctx *ctx,
struct ttm_resource *new_mem)
{
struct ttm_resource *old_mem = &bo->mem;
struct ttm_resource tmp_mem;
struct ttm_place placements;
struct ttm_placement placement;
int r;
/* create space/pages for new_mem in GTT space */
tmp_mem = *new_mem;
tmp_mem.mm_node = NULL;
placement.num_placement = 1;
placement.placement = &placements;
placement.num_busy_placement = 1;
placement.busy_placement = &placements;
placements.fpfn = 0;
placements.lpfn = 0;
placements.mem_type = TTM_PL_TT;
placements.flags = 0;
r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx);
if (unlikely(r)) {
pr_err("Failed to find GTT space for blit from VRAM\n");
return r;
}
r = ttm_tt_populate(bo->bdev, bo->ttm, ctx);
if (unlikely(r))
goto out_cleanup;
/* Bind the memory to the GTT space */
r = amdgpu_ttm_backend_bind(bo->bdev, bo->ttm, &tmp_mem);
if (unlikely(r)) {
goto out_cleanup;
}
/* blit VRAM to GTT */
r = amdgpu_move_blit(bo, evict, &tmp_mem, old_mem);
if (unlikely(r)) {
goto out_cleanup;
}
r = ttm_bo_wait_ctx(bo, ctx);
if (unlikely(r))
goto out_cleanup;
amdgpu_ttm_backend_unbind(bo->bdev, bo->ttm);
ttm_resource_free(bo, &bo->mem);
ttm_bo_assign_mem(bo, new_mem);
out_cleanup:
ttm_resource_free(bo, &tmp_mem);
return r;
}
/**
* amdgpu_move_ram_vram - Copy buffer from RAM to VRAM
*
* Called by amdgpu_bo_move().
*/
static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, bool evict,
struct ttm_operation_ctx *ctx,
struct ttm_resource *new_mem)
{
struct ttm_resource *old_mem = &bo->mem;
struct ttm_resource tmp_mem;
struct ttm_placement placement;
struct ttm_place placements;
int r;
/* make space in GTT for old_mem buffer */
tmp_mem = *new_mem;
tmp_mem.mm_node = NULL;
placement.num_placement = 1;
placement.placement = &placements;
placement.num_busy_placement = 1;
placement.busy_placement = &placements;
placements.fpfn = 0;
placements.lpfn = 0;
placements.mem_type = TTM_PL_TT;
placements.flags = 0;
r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx);
if (unlikely(r)) {
pr_err("Failed to find GTT space for blit to VRAM\n");
return r;
}
/* move/bind old memory to GTT space */
r = ttm_tt_populate(bo->bdev, bo->ttm, ctx);
if (unlikely(r))
return r;
r = amdgpu_ttm_backend_bind(bo->bdev, bo->ttm, &tmp_mem);
if (unlikely(r)) {
goto out_cleanup;
}
ttm_bo_assign_mem(bo, &tmp_mem);
/* copy to VRAM */
r = amdgpu_move_blit(bo, evict, new_mem, old_mem);
if (unlikely(r)) {
goto out_cleanup;
}
out_cleanup:
ttm_resource_free(bo, &tmp_mem);
return r;
}
/**
* amdgpu_mem_visible - Check that memory can be accessed by ttm_bo_move_memcpy
*
@@ -656,13 +543,25 @@ static bool amdgpu_mem_visible(struct amdgpu_device *adev,
*/
static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
struct ttm_operation_ctx *ctx,
struct ttm_resource *new_mem)
struct ttm_resource *new_mem,
struct ttm_place *hop)
{
struct amdgpu_device *adev;
struct amdgpu_bo *abo;
struct ttm_resource *old_mem = &bo->mem;
int r;
if ((old_mem->mem_type == TTM_PL_SYSTEM &&
new_mem->mem_type == TTM_PL_VRAM) ||
(old_mem->mem_type == TTM_PL_VRAM &&
new_mem->mem_type == TTM_PL_SYSTEM)) {
hop->fpfn = 0;
hop->lpfn = 0;
hop->mem_type = TTM_PL_TT;
hop->flags = 0;
return -EMULTIHOP;
}
if (new_mem->mem_type == TTM_PL_TT) {
r = amdgpu_ttm_backend_bind(bo->bdev, bo->ttm, new_mem);
if (r)
@@ -716,17 +615,7 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
goto memcpy;
}
if (old_mem->mem_type == TTM_PL_VRAM &&
new_mem->mem_type == TTM_PL_SYSTEM) {
r = amdgpu_move_vram_ram(bo, evict, ctx, new_mem);
} else if (old_mem->mem_type == TTM_PL_SYSTEM &&
new_mem->mem_type == TTM_PL_VRAM) {
r = amdgpu_move_ram_vram(bo, evict, ctx, new_mem);
} else {
r = amdgpu_move_blit(bo, evict,
new_mem, old_mem);
}
r = amdgpu_move_blit(bo, evict, new_mem, old_mem);
if (r) {
memcpy:
/* Check that all memory is CPU accessible */

View File

@@ -47,11 +47,13 @@ bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev)
void amdgpu_virt_init_setting(struct amdgpu_device *adev)
{
struct drm_device *ddev = adev_to_drm(adev);
/* enable virtual display */
if (adev->mode_info.num_crtc == 0)
adev->mode_info.num_crtc = 1;
adev->enable_virtual_display = true;
adev_to_drm(adev)->driver->driver_features &= ~DRIVER_ATOMIC;
ddev->driver_features &= ~DRIVER_ATOMIC;
adev->cg_flags = 0;
adev->pg_flags = 0;
}

View File

@@ -387,10 +387,12 @@ static void
komeda_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
crtc);
struct drm_crtc_state *old = drm_atomic_get_old_crtc_state(state,
crtc);
/* commit with modeset will be handled in enable/disable */
if (drm_atomic_crtc_needs_modeset(crtc->state))
if (drm_atomic_crtc_needs_modeset(crtc_state))
return;
komeda_crtc_do_flush(crtc, old);

View File

@@ -58,7 +58,7 @@ static irqreturn_t komeda_kms_irq_handler(int irq, void *data)
return status;
}
static struct drm_driver komeda_kms_driver = {
static const struct drm_driver komeda_kms_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.lastclose = drm_fb_helper_lastclose,
DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(komeda_gem_cma_dumb_create),

View File

@@ -234,7 +234,7 @@ static void hdlcd_debugfs_init(struct drm_minor *minor)
DEFINE_DRM_GEM_CMA_FOPS(fops);
static struct drm_driver hdlcd_driver = {
static const struct drm_driver hdlcd_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.irq_handler = hdlcd_irq,
.irq_preinstall = hdlcd_irq_preinstall,

View File

@@ -561,7 +561,7 @@ static void malidp_debugfs_init(struct drm_minor *minor)
#endif //CONFIG_DEBUG_FS
static struct drm_driver malidp_driver = {
static const struct drm_driver malidp_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(malidp_dumb_create),
#ifdef CONFIG_DEBUG_FS

View File

@@ -431,11 +431,13 @@ static int armada_drm_crtc_atomic_check(struct drm_crtc *crtc,
static void armada_drm_crtc_atomic_begin(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
crtc);
struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
if (crtc->state->color_mgmt_changed)
if (crtc_state->color_mgmt_changed)
armada_drm_update_gamma(crtc);
dcrtc->regs_idx = 0;
@@ -445,6 +447,8 @@ static void armada_drm_crtc_atomic_begin(struct drm_crtc *crtc,
static void armada_drm_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
crtc);
struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
@@ -455,7 +459,7 @@ static void armada_drm_crtc_atomic_flush(struct drm_crtc *crtc,
* If we aren't doing a full modeset, then we need to queue
* the event here.
*/
if (!drm_atomic_crtc_needs_modeset(crtc->state)) {
if (!drm_atomic_crtc_needs_modeset(crtc_state)) {
dcrtc->update_pending = true;
armada_drm_crtc_queue_state_event(crtc);
spin_lock_irq(&dcrtc->irq_lock);

View File

@@ -27,7 +27,7 @@
#include <drm/armada_drm.h>
#include "armada_ioctlP.h"
static struct drm_ioctl_desc armada_ioctls[] = {
static const struct drm_ioctl_desc armada_ioctls[] = {
DRM_IOCTL_DEF_DRV(ARMADA_GEM_CREATE, armada_gem_create_ioctl,0),
DRM_IOCTL_DEF_DRV(ARMADA_GEM_MMAP, armada_gem_mmap_ioctl, 0),
DRM_IOCTL_DEF_DRV(ARMADA_GEM_PWRITE, armada_gem_pwrite_ioctl, 0),
@@ -35,7 +35,7 @@ static struct drm_ioctl_desc armada_ioctls[] = {
DEFINE_DRM_GEM_FOPS(armada_drm_fops);
static struct drm_driver armada_drm_driver = {
static const struct drm_driver armada_drm_driver = {
.lastclose = drm_fb_helper_lastclose,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
@@ -48,6 +48,7 @@ static struct drm_driver armada_drm_driver = {
.date = "20120730",
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.ioctls = armada_ioctls,
.num_ioctls = ARRAY_SIZE(armada_ioctls),
.fops = &armada_drm_fops,
};
@@ -275,8 +276,6 @@ static int __init armada_drm_init(void)
{
int ret;
armada_drm_driver.num_ioctls = ARRAY_SIZE(armada_ioctls);
ret = platform_driver_register(&armada_lcd_platform_driver);
if (ret)
return ret;

View File

@@ -191,7 +191,7 @@ static void aspeed_gfx_unload(struct drm_device *drm)
DEFINE_DRM_GEM_CMA_FOPS(fops);
static struct drm_driver aspeed_gfx_driver = {
static const struct drm_driver aspeed_gfx_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
DRM_GEM_CMA_DRIVER_OPS,
.fops = &fops,

View File

@@ -39,7 +39,7 @@ static void ast_cursor_fini(struct ast_private *ast)
for (i = 0; i < ARRAY_SIZE(ast->cursor.gbo); ++i) {
gbo = ast->cursor.gbo[i];
drm_gem_vram_vunmap(gbo, ast->cursor.vaddr[i]);
drm_gem_vram_vunmap(gbo, &ast->cursor.map[i]);
drm_gem_vram_unpin(gbo);
drm_gem_vram_put(gbo);
}
@@ -60,7 +60,7 @@ int ast_cursor_init(struct ast_private *ast)
struct drm_device *dev = &ast->base;
size_t size, i;
struct drm_gem_vram_object *gbo;
void __iomem *vaddr;
struct dma_buf_map map;
int ret;
size = roundup(AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE, PAGE_SIZE);
@@ -77,16 +77,15 @@ int ast_cursor_init(struct ast_private *ast)
drm_gem_vram_put(gbo);
goto err_drm_gem_vram_put;
}
vaddr = drm_gem_vram_vmap(gbo);
if (IS_ERR(vaddr)) {
ret = PTR_ERR(vaddr);
ret = drm_gem_vram_vmap(gbo, &map);
if (ret) {
drm_gem_vram_unpin(gbo);
drm_gem_vram_put(gbo);
goto err_drm_gem_vram_put;
}
ast->cursor.gbo[i] = gbo;
ast->cursor.vaddr[i] = vaddr;
ast->cursor.map[i] = map;
}
return drmm_add_action_or_reset(dev, ast_cursor_release, NULL);
@@ -95,7 +94,7 @@ int ast_cursor_init(struct ast_private *ast)
while (i) {
--i;
gbo = ast->cursor.gbo[i];
drm_gem_vram_vunmap(gbo, ast->cursor.vaddr[i]);
drm_gem_vram_vunmap(gbo, &ast->cursor.map[i]);
drm_gem_vram_unpin(gbo);
drm_gem_vram_put(gbo);
}
@@ -170,6 +169,7 @@ int ast_cursor_blit(struct ast_private *ast, struct drm_framebuffer *fb)
{
struct drm_device *dev = &ast->base;
struct drm_gem_vram_object *gbo;
struct dma_buf_map map;
int ret;
void *src;
void __iomem *dst;
@@ -183,18 +183,17 @@ int ast_cursor_blit(struct ast_private *ast, struct drm_framebuffer *fb)
ret = drm_gem_vram_pin(gbo, 0);
if (ret)
return ret;
src = drm_gem_vram_vmap(gbo);
if (IS_ERR(src)) {
ret = PTR_ERR(src);
ret = drm_gem_vram_vmap(gbo, &map);
if (ret)
goto err_drm_gem_vram_unpin;
}
src = map.vaddr; /* TODO: Use mapping abstraction properly */
dst = ast->cursor.vaddr[ast->cursor.next_index];
dst = ast->cursor.map[ast->cursor.next_index].vaddr_iomem;
/* do data transfer to cursor BO */
update_cursor_image(dst, src, fb->width, fb->height);
drm_gem_vram_vunmap(gbo, src);
drm_gem_vram_vunmap(gbo, &map);
drm_gem_vram_unpin(gbo);
return 0;
@@ -257,7 +256,7 @@ void ast_cursor_show(struct ast_private *ast, int x, int y,
u8 __iomem *sig;
u8 jreg;
dst = ast->cursor.vaddr[ast->cursor.next_index];
dst = ast->cursor.map[ast->cursor.next_index].vaddr;
sig = dst + AST_HWC_SIZE;
writel(x, sig + AST_HWC_SIGNATURE_X);

View File

@@ -49,7 +49,7 @@ module_param_named(modeset, ast_modeset, int, 0400);
DEFINE_DRM_GEM_FOPS(ast_fops);
static struct drm_driver ast_driver = {
static const struct drm_driver ast_driver = {
.driver_features = DRIVER_ATOMIC |
DRIVER_GEM |
DRIVER_MODESET,

View File

@@ -28,10 +28,11 @@
#ifndef __AST_DRV_H__
#define __AST_DRV_H__
#include <linux/types.h>
#include <linux/io.h>
#include <linux/dma-buf-map.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
#include <linux/io.h>
#include <linux/types.h>
#include <drm/drm_connector.h>
#include <drm/drm_crtc.h>
@@ -63,6 +64,7 @@ enum ast_chip {
AST2300,
AST2400,
AST2500,
AST2600,
};
enum ast_tx_chip {
@@ -131,7 +133,7 @@ struct ast_private {
struct {
struct drm_gem_vram_object *gbo[AST_DEFAULT_HWC_NUM];
void __iomem *vaddr[AST_DEFAULT_HWC_NUM];
struct dma_buf_map map[AST_DEFAULT_HWC_NUM];
unsigned int next_index;
} cursor;
@@ -159,7 +161,7 @@ static inline struct ast_private *to_ast_private(struct drm_device *dev)
return container_of(dev, struct ast_private, base);
}
struct ast_private *ast_device_create(struct drm_driver *drv,
struct ast_private *ast_device_create(const struct drm_driver *drv,
struct pci_dev *pdev,
unsigned long flags);

View File

@@ -143,7 +143,10 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
ast_detect_config_mode(dev, &scu_rev);
/* Identify chipset */
if (dev->pdev->revision >= 0x40) {
if (dev->pdev->revision >= 0x50) {
ast->chip = AST2600;
drm_info(dev, "AST 2600 detected\n");
} else if (dev->pdev->revision >= 0x40) {
ast->chip = AST2500;
drm_info(dev, "AST 2500 detected\n");
} else if (dev->pdev->revision >= 0x30) {
@@ -392,7 +395,7 @@ static void ast_device_release(void *data)
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa1, 0x04);
}
struct ast_private *ast_device_create(struct drm_driver *drv,
struct ast_private *ast_device_create(const struct drm_driver *drv,
struct pci_dev *pdev,
unsigned long flags)
{

View File

@@ -782,10 +782,12 @@ static void
ast_crtc_helper_atomic_flush(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
crtc);
struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state,
crtc);
struct ast_private *ast = to_ast_private(crtc->dev);
struct ast_crtc_state *ast_crtc_state = to_ast_crtc_state(crtc->state);
struct ast_crtc_state *ast_crtc_state = to_ast_crtc_state(crtc_state);
struct ast_crtc_state *old_ast_crtc_state = to_ast_crtc_state(old_crtc_state);
/*

View File

@@ -295,10 +295,10 @@ static const struct ast_vbios_enhtable res_1600x900[] = {
static const struct ast_vbios_enhtable res_1920x1080[] = {
{2200, 1920, 88, 44, 1125, 1080, 4, 5, VCLK148_5, /* 60Hz */
(SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo |
(SyncPP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo |
AST2500PreCatchCRT), 60, 1, 0x38 },
{2200, 1920, 88, 44, 1125, 1080, 4, 5, VCLK148_5, /* 60Hz */
(SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo |
(SyncPP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo |
AST2500PreCatchCRT), 0xFF, 1, 0x38 },
};

View File

@@ -815,7 +815,7 @@ static void atmel_hlcdc_dc_irq_uninstall(struct drm_device *dev)
DEFINE_DRM_GEM_CMA_FOPS(fops);
static struct drm_driver atmel_hlcdc_dc_driver = {
static const struct drm_driver atmel_hlcdc_dc_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.irq_handler = atmel_hlcdc_dc_irq_handler,
.irq_preinstall = atmel_hlcdc_dc_irq_uninstall,

View File

@@ -57,7 +57,7 @@ static int bochs_load(struct drm_device *dev)
DEFINE_DRM_GEM_FOPS(bochs_fops);
static struct drm_driver bochs_driver = {
static const struct drm_driver bochs_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &bochs_fops,
.name = "bochs-drm",

View File

@@ -151,7 +151,6 @@ int bochs_kms_init(struct bochs_device *bochs)
bochs->dev->mode_config.preferred_depth = 24;
bochs->dev->mode_config.prefer_shadow = 0;
bochs->dev->mode_config.prefer_shadow_fbdev = 1;
bochs->dev->mode_config.fbdev_use_iomem = true;
bochs->dev->mode_config.quirk_addfb_prefer_host_byte_order = true;
bochs->dev->mode_config.funcs = &bochs_mode_funcs;

View File

@@ -61,6 +61,19 @@ config DRM_LONTIUM_LT9611
HDMI signals
Please say Y if you have such hardware.
config DRM_LONTIUM_LT9611UXC
tristate "Lontium LT9611UXC DSI/HDMI bridge"
select SND_SOC_HDMI_CODEC if SND_SOC
depends on OF
select DRM_PANEL_BRIDGE
select DRM_KMS_HELPER
select REGMAP_I2C
help
Driver for Lontium LT9611UXC DSI to HDMI bridge
chip driver that converts dual DSI and I2S to
HDMI signals
Please say Y if you have such hardware.
config DRM_LVDS_CODEC
tristate "Transparent LVDS encoders and decoders support"
depends on OF

View File

@@ -3,6 +3,7 @@ obj-$(CONFIG_DRM_CDNS_DSI) += cdns-dsi.o
obj-$(CONFIG_DRM_CHRONTEL_CH7033) += chrontel-ch7033.o
obj-$(CONFIG_DRM_DISPLAY_CONNECTOR) += display-connector.o
obj-$(CONFIG_DRM_LONTIUM_LT9611) += lontium-lt9611.o
obj-$(CONFIG_DRM_LONTIUM_LT9611UXC) += lontium-lt9611uxc.o
obj-$(CONFIG_DRM_LVDS_CODEC) += lvds-codec.o
obj-$(CONFIG_DRM_MEGACHIPS_STDPXXXX_GE_B850V3_FW) += megachips-stdpxxxx-ge-b850v3-fw.o
obj-$(CONFIG_DRM_NXP_PTN3460) += nxp-ptn3460.o

View File

@@ -524,94 +524,6 @@ void analogix_dp_enable_sw_function(struct analogix_dp_device *dp)
writel(reg, dp->reg_base + ANALOGIX_DP_FUNC_EN_1);
}
static int analogix_dp_start_aux_transaction(struct analogix_dp_device *dp)
{
int reg;
int retval = 0;
int timeout_loop = 0;
/* Enable AUX CH operation */
reg = readl(dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_2);
reg |= AUX_EN;
writel(reg, dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_2);
/* Is AUX CH command reply received? */
reg = readl(dp->reg_base + ANALOGIX_DP_INT_STA);
while (!(reg & RPLY_RECEIV)) {
timeout_loop++;
if (DP_TIMEOUT_LOOP_COUNT < timeout_loop) {
dev_err(dp->dev, "AUX CH command reply failed!\n");
return -ETIMEDOUT;
}
reg = readl(dp->reg_base + ANALOGIX_DP_INT_STA);
usleep_range(10, 11);
}
/* Clear interrupt source for AUX CH command reply */
writel(RPLY_RECEIV, dp->reg_base + ANALOGIX_DP_INT_STA);
/* Clear interrupt source for AUX CH access error */
reg = readl(dp->reg_base + ANALOGIX_DP_INT_STA);
if (reg & AUX_ERR) {
writel(AUX_ERR, dp->reg_base + ANALOGIX_DP_INT_STA);
return -EREMOTEIO;
}
/* Check AUX CH error access status */
reg = readl(dp->reg_base + ANALOGIX_DP_AUX_CH_STA);
if ((reg & AUX_STATUS_MASK) != 0) {
dev_err(dp->dev, "AUX CH error happens: %d\n\n",
reg & AUX_STATUS_MASK);
return -EREMOTEIO;
}
return retval;
}
int analogix_dp_write_byte_to_dpcd(struct analogix_dp_device *dp,
unsigned int reg_addr,
unsigned char data)
{
u32 reg;
int i;
int retval;
for (i = 0; i < 3; i++) {
/* Clear AUX CH data buffer */
reg = BUF_CLR;
writel(reg, dp->reg_base + ANALOGIX_DP_BUFFER_DATA_CTL);
/* Select DPCD device address */
reg = AUX_ADDR_7_0(reg_addr);
writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_7_0);
reg = AUX_ADDR_15_8(reg_addr);
writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_15_8);
reg = AUX_ADDR_19_16(reg_addr);
writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_19_16);
/* Write data buffer */
reg = (unsigned int)data;
writel(reg, dp->reg_base + ANALOGIX_DP_BUF_DATA_0);
/*
* Set DisplayPort transaction and write 1 byte
* If bit 3 is 1, DisplayPort transaction.
* If Bit 3 is 0, I2C transaction.
*/
reg = AUX_TX_COMM_DP_TRANSACTION | AUX_TX_COMM_WRITE;
writel(reg, dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_1);
/* Start AUX transaction */
retval = analogix_dp_start_aux_transaction(dp);
if (retval == 0)
break;
dev_dbg(dp->dev, "%s: Aux Transaction fail!\n", __func__);
}
return retval;
}
void analogix_dp_set_link_bandwidth(struct analogix_dp_device *dp, u32 bwtype)
{
u32 reg;

File diff suppressed because it is too large Load Diff

View File

@@ -80,7 +80,6 @@ static int lvds_codec_probe(struct platform_device *pdev)
struct device_node *panel_node;
struct drm_panel *panel;
struct lvds_codec *lvds_codec;
int ret;
lvds_codec = devm_kzalloc(dev, sizeof(*lvds_codec), GFP_KERNEL);
if (!lvds_codec)
@@ -90,13 +89,9 @@ static int lvds_codec_probe(struct platform_device *pdev)
lvds_codec->connector_type = (uintptr_t)of_device_get_match_data(dev);
lvds_codec->vcc = devm_regulator_get(lvds_codec->dev, "power");
if (IS_ERR(lvds_codec->vcc)) {
ret = PTR_ERR(lvds_codec->vcc);
if (ret != -EPROBE_DEFER)
dev_err(lvds_codec->dev,
"Unable to get \"vcc\" supply: %d\n", ret);
return ret;
}
if (IS_ERR(lvds_codec->vcc))
return dev_err_probe(dev, PTR_ERR(lvds_codec->vcc),
"Unable to get \"vcc\" supply\n");
lvds_codec->powerdown_gpio = devm_gpiod_get_optional(dev, "powerdown",
GPIOD_OUT_HIGH);

View File

@@ -17,6 +17,7 @@
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/clk.h>
#include <drm/drm_atomic_helper.h>
@@ -168,6 +169,7 @@ struct sii902x {
struct drm_connector connector;
struct gpio_desc *reset_gpio;
struct i2c_mux_core *i2cmux;
struct regulator_bulk_data supplies[2];
/*
* Mutex protects audio and video functions from interfering
* each other, by keeping their i2c command sequences atomic.
@@ -954,13 +956,73 @@ static const struct drm_bridge_timings default_sii902x_timings = {
| DRM_BUS_FLAG_DE_HIGH,
};
static int sii902x_init(struct sii902x *sii902x)
{
struct device *dev = &sii902x->i2c->dev;
unsigned int status = 0;
u8 chipid[4];
int ret;
sii902x_reset(sii902x);
ret = regmap_write(sii902x->regmap, SII902X_REG_TPI_RQB, 0x0);
if (ret)
return ret;
ret = regmap_bulk_read(sii902x->regmap, SII902X_REG_CHIPID(0),
&chipid, 4);
if (ret) {
dev_err(dev, "regmap_read failed %d\n", ret);
return ret;
}
if (chipid[0] != 0xb0) {
dev_err(dev, "Invalid chipid: %02x (expecting 0xb0)\n",
chipid[0]);
return -EINVAL;
}
/* Clear all pending interrupts */
regmap_read(sii902x->regmap, SII902X_INT_STATUS, &status);
regmap_write(sii902x->regmap, SII902X_INT_STATUS, status);
if (sii902x->i2c->irq > 0) {
regmap_write(sii902x->regmap, SII902X_INT_ENABLE,
SII902X_HOTPLUG_EVENT);
ret = devm_request_threaded_irq(dev, sii902x->i2c->irq, NULL,
sii902x_interrupt,
IRQF_ONESHOT, dev_name(dev),
sii902x);
if (ret)
return ret;
}
sii902x->bridge.funcs = &sii902x_bridge_funcs;
sii902x->bridge.of_node = dev->of_node;
sii902x->bridge.timings = &default_sii902x_timings;
drm_bridge_add(&sii902x->bridge);
sii902x_audio_codec_init(sii902x, dev);
i2c_set_clientdata(sii902x->i2c, sii902x);
sii902x->i2cmux = i2c_mux_alloc(sii902x->i2c->adapter, dev,
1, 0, I2C_MUX_GATE,
sii902x_i2c_bypass_select,
sii902x_i2c_bypass_deselect);
if (!sii902x->i2cmux)
return -ENOMEM;
sii902x->i2cmux->priv = sii902x;
return i2c_mux_add_adapter(sii902x->i2cmux, 0, 0, 0);
}
static int sii902x_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct device *dev = &client->dev;
unsigned int status = 0;
struct sii902x *sii902x;
u8 chipid[4];
int ret;
ret = i2c_check_functionality(client->adapter,
@@ -989,59 +1051,27 @@ static int sii902x_probe(struct i2c_client *client,
mutex_init(&sii902x->mutex);
sii902x_reset(sii902x);
ret = regmap_write(sii902x->regmap, SII902X_REG_TPI_RQB, 0x0);
if (ret)
sii902x->supplies[0].supply = "iovcc";
sii902x->supplies[1].supply = "cvcc12";
ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(sii902x->supplies),
sii902x->supplies);
if (ret < 0)
return ret;
ret = regmap_bulk_read(sii902x->regmap, SII902X_REG_CHIPID(0),
&chipid, 4);
if (ret) {
dev_err(dev, "regmap_read failed %d\n", ret);
ret = regulator_bulk_enable(ARRAY_SIZE(sii902x->supplies),
sii902x->supplies);
if (ret < 0) {
dev_err_probe(dev, ret, "Failed to enable supplies");
return ret;
}
if (chipid[0] != 0xb0) {
dev_err(dev, "Invalid chipid: %02x (expecting 0xb0)\n",
chipid[0]);
return -EINVAL;
ret = sii902x_init(sii902x);
if (ret < 0) {
regulator_bulk_disable(ARRAY_SIZE(sii902x->supplies),
sii902x->supplies);
}
/* Clear all pending interrupts */
regmap_read(sii902x->regmap, SII902X_INT_STATUS, &status);
regmap_write(sii902x->regmap, SII902X_INT_STATUS, status);
if (client->irq > 0) {
regmap_write(sii902x->regmap, SII902X_INT_ENABLE,
SII902X_HOTPLUG_EVENT);
ret = devm_request_threaded_irq(dev, client->irq, NULL,
sii902x_interrupt,
IRQF_ONESHOT, dev_name(dev),
sii902x);
if (ret)
return ret;
}
sii902x->bridge.funcs = &sii902x_bridge_funcs;
sii902x->bridge.of_node = dev->of_node;
sii902x->bridge.timings = &default_sii902x_timings;
drm_bridge_add(&sii902x->bridge);
sii902x_audio_codec_init(sii902x, dev);
i2c_set_clientdata(client, sii902x);
sii902x->i2cmux = i2c_mux_alloc(client->adapter, dev,
1, 0, I2C_MUX_GATE,
sii902x_i2c_bypass_select,
sii902x_i2c_bypass_deselect);
if (!sii902x->i2cmux)
return -ENOMEM;
sii902x->i2cmux->priv = sii902x;
return i2c_mux_add_adapter(sii902x->i2cmux, 0, 0, 0);
return ret;
}
static int sii902x_remove(struct i2c_client *client)
@@ -1051,6 +1081,8 @@ static int sii902x_remove(struct i2c_client *client)
i2c_mux_del_adapters(sii902x->i2cmux);
drm_bridge_remove(&sii902x->bridge);
regulator_bulk_disable(ARRAY_SIZE(sii902x->supplies),
sii902x->supplies);
return 0;
}

View File

@@ -17,6 +17,8 @@
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <asm/unaligned.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
@@ -72,6 +74,7 @@
#define SN_AUX_ADDR_19_16_REG 0x74
#define SN_AUX_ADDR_15_8_REG 0x75
#define SN_AUX_ADDR_7_0_REG 0x76
#define SN_AUX_ADDR_MASK GENMASK(19, 0)
#define SN_AUX_LENGTH_REG 0x77
#define SN_AUX_CMD_REG 0x78
#define AUX_CMD_SEND BIT(0)
@@ -118,6 +121,7 @@
* @debugfs: Used for managing our debugfs.
* @host_node: Remote DSI node.
* @dsi: Our MIPI DSI source.
* @edid: Detected EDID of eDP panel.
* @refclk: Our reference clock.
* @panel: Our panel.
* @enable_gpio: The GPIO we toggle to enable the bridge.
@@ -143,6 +147,7 @@ struct ti_sn_bridge {
struct drm_bridge bridge;
struct drm_connector connector;
struct dentry *debugfs;
struct edid *edid;
struct device_node *host_node;
struct mipi_dsi_device *dsi;
struct clk *refclk;
@@ -264,6 +269,23 @@ connector_to_ti_sn_bridge(struct drm_connector *connector)
static int ti_sn_bridge_connector_get_modes(struct drm_connector *connector)
{
struct ti_sn_bridge *pdata = connector_to_ti_sn_bridge(connector);
struct edid *edid = pdata->edid;
int num, ret;
if (!edid) {
pm_runtime_get_sync(pdata->dev);
edid = pdata->edid = drm_get_edid(connector, &pdata->aux.ddc);
pm_runtime_put(pdata->dev);
}
if (edid && drm_edid_is_valid(edid)) {
ret = drm_connector_update_edid_property(connector, edid);
if (!ret) {
num = drm_add_edid_modes(connector, edid);
if (num)
return num;
}
}
return drm_panel_get_modes(pdata->panel, connector);
}
@@ -856,13 +878,15 @@ static ssize_t ti_sn_aux_transfer(struct drm_dp_aux *aux,
struct drm_dp_aux_msg *msg)
{
struct ti_sn_bridge *pdata = aux_to_ti_sn_bridge(aux);
u32 request = msg->request & ~DP_AUX_I2C_MOT;
u32 request = msg->request & ~(DP_AUX_I2C_MOT | DP_AUX_I2C_WRITE_STATUS_UPDATE);
u32 request_val = AUX_CMD_REQ(msg->request);
u8 *buf = (u8 *)msg->buffer;
u8 *buf = msg->buffer;
unsigned int len = msg->size;
unsigned int val;
int ret, i;
int ret;
u8 addr_len[SN_AUX_LENGTH_REG + 1 - SN_AUX_ADDR_19_16_REG];
if (msg->size > SN_AUX_MAX_PAYLOAD_BYTES)
if (len > SN_AUX_MAX_PAYLOAD_BYTES)
return -EINVAL;
switch (request) {
@@ -871,24 +895,21 @@ static ssize_t ti_sn_aux_transfer(struct drm_dp_aux *aux,
case DP_AUX_NATIVE_READ:
case DP_AUX_I2C_READ:
regmap_write(pdata->regmap, SN_AUX_CMD_REG, request_val);
/* Assume it's good */
msg->reply = 0;
break;
default:
return -EINVAL;
}
regmap_write(pdata->regmap, SN_AUX_ADDR_19_16_REG,
(msg->address >> 16) & 0xF);
regmap_write(pdata->regmap, SN_AUX_ADDR_15_8_REG,
(msg->address >> 8) & 0xFF);
regmap_write(pdata->regmap, SN_AUX_ADDR_7_0_REG, msg->address & 0xFF);
BUILD_BUG_ON(sizeof(addr_len) != sizeof(__be32));
put_unaligned_be32((msg->address & SN_AUX_ADDR_MASK) << 8 | len,
addr_len);
regmap_bulk_write(pdata->regmap, SN_AUX_ADDR_19_16_REG, addr_len,
ARRAY_SIZE(addr_len));
regmap_write(pdata->regmap, SN_AUX_LENGTH_REG, msg->size);
if (request == DP_AUX_NATIVE_WRITE || request == DP_AUX_I2C_WRITE) {
for (i = 0; i < msg->size; i++)
regmap_write(pdata->regmap, SN_AUX_WDATA_REG(i),
buf[i]);
}
if (request == DP_AUX_NATIVE_WRITE || request == DP_AUX_I2C_WRITE)
regmap_bulk_write(pdata->regmap, SN_AUX_WDATA_REG(0), buf, len);
/* Clear old status bits before start so we don't get confused */
regmap_write(pdata->regmap, SN_AUX_CMD_STATUS_REG,
@@ -898,35 +919,52 @@ static ssize_t ti_sn_aux_transfer(struct drm_dp_aux *aux,
regmap_write(pdata->regmap, SN_AUX_CMD_REG, request_val | AUX_CMD_SEND);
/* Zero delay loop because i2c transactions are slow already */
ret = regmap_read_poll_timeout(pdata->regmap, SN_AUX_CMD_REG, val,
!(val & AUX_CMD_SEND), 200,
50 * 1000);
!(val & AUX_CMD_SEND), 0, 50 * 1000);
if (ret)
return ret;
ret = regmap_read(pdata->regmap, SN_AUX_CMD_STATUS_REG, &val);
if (ret)
return ret;
else if ((val & AUX_IRQ_STATUS_NAT_I2C_FAIL)
|| (val & AUX_IRQ_STATUS_AUX_RPLY_TOUT)
|| (val & AUX_IRQ_STATUS_AUX_SHORT))
return -ENXIO;
if (request == DP_AUX_NATIVE_WRITE || request == DP_AUX_I2C_WRITE)
return msg->size;
for (i = 0; i < msg->size; i++) {
unsigned int val;
ret = regmap_read(pdata->regmap, SN_AUX_RDATA_REG(i),
&val);
if (ret)
return ret;
WARN_ON(val & ~0xFF);
buf[i] = (u8)(val & 0xFF);
if (val & AUX_IRQ_STATUS_AUX_RPLY_TOUT) {
/*
* The hardware tried the message seven times per the DP spec
* but it hit a timeout. We ignore defers here because they're
* handled in hardware.
*/
return -ETIMEDOUT;
}
return msg->size;
if (val & AUX_IRQ_STATUS_AUX_SHORT) {
ret = regmap_read(pdata->regmap, SN_AUX_LENGTH_REG, &len);
if (ret)
return ret;
} else if (val & AUX_IRQ_STATUS_NAT_I2C_FAIL) {
switch (request) {
case DP_AUX_I2C_WRITE:
case DP_AUX_I2C_READ:
msg->reply |= DP_AUX_I2C_REPLY_NACK;
break;
case DP_AUX_NATIVE_READ:
case DP_AUX_NATIVE_WRITE:
msg->reply |= DP_AUX_NATIVE_REPLY_NACK;
break;
}
return 0;
}
if (request == DP_AUX_NATIVE_WRITE || request == DP_AUX_I2C_WRITE ||
len == 0)
return len;
ret = regmap_bulk_read(pdata->regmap, SN_AUX_RDATA_REG(0), buf, len);
if (ret)
return ret;
return len;
}
static int ti_sn_bridge_parse_dsi_host(struct ti_sn_bridge *pdata)
@@ -1268,6 +1306,7 @@ static int ti_sn_bridge_remove(struct i2c_client *client)
if (!pdata)
return -EINVAL;
kfree(pdata->edid);
ti_sn_debugfs_remove(pdata);
of_node_put(pdata->host_node);

View File

@@ -160,7 +160,7 @@ static int tpd12s015_probe(struct platform_device *pdev)
/* Register the IRQ if the HPD GPIO is IRQ-capable. */
tpd->hpd_irq = gpiod_to_irq(tpd->hpd_gpio);
if (tpd->hpd_irq) {
if (tpd->hpd_irq >= 0) {
ret = devm_request_threaded_irq(&pdev->dev, tpd->hpd_irq, NULL,
tpd12s015_hpd_isr,
IRQF_TRIGGER_RISING |

View File

@@ -3,6 +3,7 @@
* Copyright 2018 Noralf Trønnes
*/
#include <linux/dma-buf-map.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/mutex.h>
@@ -234,7 +235,7 @@ static void drm_client_buffer_delete(struct drm_client_buffer *buffer)
{
struct drm_device *dev = buffer->client->dev;
drm_gem_vunmap(buffer->gem, buffer->vaddr);
drm_gem_vunmap(buffer->gem, &buffer->map);
if (buffer->gem)
drm_gem_object_put(buffer->gem);
@@ -290,24 +291,31 @@ drm_client_buffer_create(struct drm_client_dev *client, u32 width, u32 height, u
/**
* drm_client_buffer_vmap - Map DRM client buffer into address space
* @buffer: DRM client buffer
* @map_copy: Returns the mapped memory's address
*
* This function maps a client buffer into kernel address space. If the
* buffer is already mapped, it returns the mapping's address.
* buffer is already mapped, it returns the existing mapping's address.
*
* Client buffer mappings are not ref'counted. Each call to
* drm_client_buffer_vmap() should be followed by a call to
* drm_client_buffer_vunmap(); or the client buffer should be mapped
* throughout its lifetime.
*
* The returned address is a copy of the internal value. In contrast to
* other vmap interfaces, you don't need it for the client's vunmap
* function. So you can modify it at will during blit and draw operations.
*
* Returns:
* The mapped memory's address
* 0 on success, or a negative errno code otherwise.
*/
void *drm_client_buffer_vmap(struct drm_client_buffer *buffer)
int
drm_client_buffer_vmap(struct drm_client_buffer *buffer, struct dma_buf_map *map_copy)
{
void *vaddr;
struct dma_buf_map *map = &buffer->map;
int ret;
if (buffer->vaddr)
return buffer->vaddr;
if (dma_buf_map_is_set(map))
goto out;
/*
* FIXME: The dependency on GEM here isn't required, we could
@@ -317,13 +325,14 @@ void *drm_client_buffer_vmap(struct drm_client_buffer *buffer)
* fd_install step out of the driver backend hooks, to make that
* final step optional for internal users.
*/
vaddr = drm_gem_vmap(buffer->gem);
if (IS_ERR(vaddr))
return vaddr;
ret = drm_gem_vmap(buffer->gem, map);
if (ret)
return ret;
buffer->vaddr = vaddr;
out:
*map_copy = *map;
return vaddr;
return 0;
}
EXPORT_SYMBOL(drm_client_buffer_vmap);
@@ -337,8 +346,9 @@ EXPORT_SYMBOL(drm_client_buffer_vmap);
*/
void drm_client_buffer_vunmap(struct drm_client_buffer *buffer)
{
drm_gem_vunmap(buffer->gem, buffer->vaddr);
buffer->vaddr = NULL;
struct dma_buf_map *map = &buffer->map;
drm_gem_vunmap(buffer->gem, map);
}
EXPORT_SYMBOL(drm_client_buffer_vunmap);

View File

@@ -3686,10 +3686,9 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
WARN_ON(mgr->mst_primary);
/* get dpcd info */
ret = drm_dp_read_dpcd_caps(mgr->aux, mgr->dpcd);
if (ret < 0) {
drm_dbg_kms(mgr->dev, "%s: failed to read DPCD, ret %d\n",
mgr->aux->name, ret);
ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
if (ret != DP_RECEIVER_CAP_SIZE) {
DRM_DEBUG_KMS("failed to read DPCD\n");
goto out_unlock;
}

View File

@@ -284,7 +284,7 @@ void drm_minor_release(struct drm_minor *minor)
* struct clk *pclk;
* };
*
* static struct drm_driver driver_drm_driver = {
* static const struct drm_driver driver_drm_driver = {
* [...]
* };
*
@@ -574,7 +574,7 @@ static void drm_dev_init_release(struct drm_device *dev, void *res)
}
static int drm_dev_init(struct drm_device *dev,
struct drm_driver *driver,
const struct drm_driver *driver,
struct device *parent)
{
int ret;
@@ -589,7 +589,11 @@ static int drm_dev_init(struct drm_device *dev,
kref_init(&dev->ref);
dev->dev = get_device(parent);
#ifdef CONFIG_DRM_LEGACY
dev->driver = (struct drm_driver *)driver;
#else
dev->driver = driver;
#endif
INIT_LIST_HEAD(&dev->managed.resources);
spin_lock_init(&dev->managed.lock);
@@ -663,7 +667,7 @@ static void devm_drm_dev_init_release(void *data)
static int devm_drm_dev_init(struct device *parent,
struct drm_device *dev,
struct drm_driver *driver)
const struct drm_driver *driver)
{
int ret;
@@ -678,7 +682,8 @@ static int devm_drm_dev_init(struct device *parent,
return ret;
}
void *__devm_drm_dev_alloc(struct device *parent, struct drm_driver *driver,
void *__devm_drm_dev_alloc(struct device *parent,
const struct drm_driver *driver,
size_t size, size_t offset)
{
void *container;
@@ -713,7 +718,7 @@ EXPORT_SYMBOL(__devm_drm_dev_alloc);
* RETURNS:
* Pointer to new DRM device, or ERR_PTR on failure.
*/
struct drm_device *drm_dev_alloc(struct drm_driver *driver,
struct drm_device *drm_dev_alloc(const struct drm_driver *driver,
struct device *parent)
{
struct drm_device *dev;
@@ -858,7 +863,7 @@ static void remove_compat_control_link(struct drm_device *dev)
*/
int drm_dev_register(struct drm_device *dev, unsigned long flags)
{
struct drm_driver *driver = dev->driver;
const struct drm_driver *driver = dev->driver;
int ret;
if (!driver->load)

View File

@@ -3114,6 +3114,8 @@ static int drm_cvt_modes(struct drm_connector *connector,
case 0x0c:
width = height * 15 / 9;
break;
default:
unreachable();
}
for (j = 1; j < 5; j++) {

View File

@@ -372,24 +372,22 @@ static void drm_fb_helper_resume_worker(struct work_struct *work)
}
static void drm_fb_helper_dirty_blit_real(struct drm_fb_helper *fb_helper,
struct drm_clip_rect *clip)
struct drm_clip_rect *clip,
struct dma_buf_map *dst)
{
struct drm_framebuffer *fb = fb_helper->fb;
unsigned int cpp = fb->format->cpp[0];
size_t offset = clip->y1 * fb->pitches[0] + clip->x1 * cpp;
void *src = fb_helper->fbdev->screen_buffer + offset;
void *dst = fb_helper->buffer->vaddr + offset;
size_t len = (clip->x2 - clip->x1) * cpp;
unsigned int y;
for (y = clip->y1; y < clip->y2; y++) {
if (!fb_helper->dev->mode_config.fbdev_use_iomem)
memcpy(dst, src, len);
else
memcpy_toio((void __iomem *)dst, src, len);
dma_buf_map_incr(dst, offset); /* go to first pixel within clip rect */
for (y = clip->y1; y < clip->y2; y++) {
dma_buf_map_memcpy_to(dst, src, len);
dma_buf_map_incr(dst, fb->pitches[0]);
src += fb->pitches[0];
dst += fb->pitches[0];
}
}
@@ -400,7 +398,8 @@ static void drm_fb_helper_dirty_work(struct work_struct *work)
struct drm_clip_rect *clip = &helper->dirty_clip;
struct drm_clip_rect clip_copy;
unsigned long flags;
void *vaddr;
struct dma_buf_map map;
int ret;
spin_lock_irqsave(&helper->dirty_lock, flags);
clip_copy = *clip;
@@ -413,11 +412,12 @@ static void drm_fb_helper_dirty_work(struct work_struct *work)
/* Generic fbdev uses a shadow buffer */
if (helper->buffer) {
vaddr = drm_client_buffer_vmap(helper->buffer);
if (IS_ERR(vaddr))
ret = drm_client_buffer_vmap(helper->buffer, &map);
if (ret)
return;
drm_fb_helper_dirty_blit_real(helper, &clip_copy);
drm_fb_helper_dirty_blit_real(helper, &clip_copy, &map);
}
if (helper->fb->funcs->dirty)
helper->fb->funcs->dirty(helper->fb, NULL, 0, 0,
&clip_copy, 1);
@@ -2026,6 +2026,199 @@ static int drm_fbdev_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
return -ENODEV;
}
static bool drm_fbdev_use_iomem(struct fb_info *info)
{
struct drm_fb_helper *fb_helper = info->par;
struct drm_client_buffer *buffer = fb_helper->buffer;
return !drm_fbdev_use_shadow_fb(fb_helper) && buffer->map.is_iomem;
}
static ssize_t fb_read_screen_base(struct fb_info *info, char __user *buf, size_t count,
loff_t pos)
{
const char __iomem *src = info->screen_base + pos;
size_t alloc_size = min_t(size_t, count, PAGE_SIZE);
ssize_t ret = 0;
int err = 0;
char *tmp;
tmp = kmalloc(alloc_size, GFP_KERNEL);
if (!tmp)
return -ENOMEM;
while (count) {
size_t c = min_t(size_t, count, alloc_size);
memcpy_fromio(tmp, src, c);
if (copy_to_user(buf, tmp, c)) {
err = -EFAULT;
break;
}
src += c;
buf += c;
ret += c;
count -= c;
}
kfree(tmp);
return ret ? ret : err;
}
static ssize_t fb_read_screen_buffer(struct fb_info *info, char __user *buf, size_t count,
loff_t pos)
{
const char *src = info->screen_buffer + pos;
if (copy_to_user(buf, src, count))
return -EFAULT;
return count;
}
static ssize_t drm_fbdev_fb_read(struct fb_info *info, char __user *buf,
size_t count, loff_t *ppos)
{
loff_t pos = *ppos;
size_t total_size;
ssize_t ret;
if (info->screen_size)
total_size = info->screen_size;
else
total_size = info->fix.smem_len;
if (pos >= total_size)
return 0;
if (count >= total_size)
count = total_size;
if (total_size - count < pos)
count = total_size - pos;
if (drm_fbdev_use_iomem(info))
ret = fb_read_screen_base(info, buf, count, pos);
else
ret = fb_read_screen_buffer(info, buf, count, pos);
if (ret > 0)
*ppos += ret;
return ret;
}
static ssize_t fb_write_screen_base(struct fb_info *info, const char __user *buf, size_t count,
loff_t pos)
{
char __iomem *dst = info->screen_base + pos;
size_t alloc_size = min_t(size_t, count, PAGE_SIZE);
ssize_t ret = 0;
int err = 0;
u8 *tmp;
tmp = kmalloc(alloc_size, GFP_KERNEL);
if (!tmp)
return -ENOMEM;
while (count) {
size_t c = min_t(size_t, count, alloc_size);
if (copy_from_user(tmp, buf, c)) {
err = -EFAULT;
break;
}
memcpy_toio(dst, tmp, c);
dst += c;
buf += c;
ret += c;
count -= c;
}
kfree(tmp);
return ret ? ret : err;
}
static ssize_t fb_write_screen_buffer(struct fb_info *info, const char __user *buf, size_t count,
loff_t pos)
{
char *dst = info->screen_buffer + pos;
if (copy_from_user(dst, buf, count))
return -EFAULT;
return count;
}
static ssize_t drm_fbdev_fb_write(struct fb_info *info, const char __user *buf,
size_t count, loff_t *ppos)
{
loff_t pos = *ppos;
size_t total_size;
ssize_t ret;
int err = 0;
if (info->screen_size)
total_size = info->screen_size;
else
total_size = info->fix.smem_len;
if (pos > total_size)
return -EFBIG;
if (count > total_size) {
err = -EFBIG;
count = total_size;
}
if (total_size - count < pos) {
if (!err)
err = -ENOSPC;
count = total_size - pos;
}
/*
* Copy to framebuffer even if we already logged an error. Emulates
* the behavior of the original fbdev implementation.
*/
if (drm_fbdev_use_iomem(info))
ret = fb_write_screen_base(info, buf, count, pos);
else
ret = fb_write_screen_buffer(info, buf, count, pos);
if (ret > 0)
*ppos += ret;
return ret ? ret : err;
}
static void drm_fbdev_fb_fillrect(struct fb_info *info,
const struct fb_fillrect *rect)
{
if (drm_fbdev_use_iomem(info))
drm_fb_helper_cfb_fillrect(info, rect);
else
drm_fb_helper_sys_fillrect(info, rect);
}
static void drm_fbdev_fb_copyarea(struct fb_info *info,
const struct fb_copyarea *area)
{
if (drm_fbdev_use_iomem(info))
drm_fb_helper_cfb_copyarea(info, area);
else
drm_fb_helper_sys_copyarea(info, area);
}
static void drm_fbdev_fb_imageblit(struct fb_info *info,
const struct fb_image *image)
{
if (drm_fbdev_use_iomem(info))
drm_fb_helper_cfb_imageblit(info, image);
else
drm_fb_helper_sys_imageblit(info, image);
}
static const struct fb_ops drm_fbdev_fb_ops = {
.owner = THIS_MODULE,
DRM_FB_HELPER_DEFAULT_OPS,
@@ -2033,11 +2226,11 @@ static const struct fb_ops drm_fbdev_fb_ops = {
.fb_release = drm_fbdev_fb_release,
.fb_destroy = drm_fbdev_fb_destroy,
.fb_mmap = drm_fbdev_fb_mmap,
.fb_read = drm_fb_helper_sys_read,
.fb_write = drm_fb_helper_sys_write,
.fb_fillrect = drm_fb_helper_sys_fillrect,
.fb_copyarea = drm_fb_helper_sys_copyarea,
.fb_imageblit = drm_fb_helper_sys_imageblit,
.fb_read = drm_fbdev_fb_read,
.fb_write = drm_fbdev_fb_write,
.fb_fillrect = drm_fbdev_fb_fillrect,
.fb_copyarea = drm_fbdev_fb_copyarea,
.fb_imageblit = drm_fbdev_fb_imageblit,
};
static struct fb_deferred_io drm_fbdev_defio = {
@@ -2060,7 +2253,8 @@ static int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper,
struct drm_framebuffer *fb;
struct fb_info *fbi;
u32 format;
void *vaddr;
struct dma_buf_map map;
int ret;
drm_dbg_kms(dev, "surface width(%d), height(%d) and bpp(%d)\n",
sizes->surface_width, sizes->surface_height,
@@ -2096,14 +2290,22 @@ static int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper,
fb_deferred_io_init(fbi);
} else {
/* buffer is mapped for HW framebuffer */
vaddr = drm_client_buffer_vmap(fb_helper->buffer);
if (IS_ERR(vaddr))
return PTR_ERR(vaddr);
ret = drm_client_buffer_vmap(fb_helper->buffer, &map);
if (ret)
return ret;
if (map.is_iomem)
fbi->screen_base = map.vaddr_iomem;
else
fbi->screen_buffer = map.vaddr;
fbi->screen_buffer = vaddr;
/* Shamelessly leak the physical address to user-space */
/*
* Shamelessly leak the physical address to user-space. As
* page_to_phys() is undefined for I/O memory, warn in this
* case.
*/
#if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM)
if (drm_leak_fbdev_smem && fbi->fix.smem_start == 0)
if (drm_leak_fbdev_smem && fbi->fix.smem_start == 0 &&
!drm_WARN_ON_ONCE(dev, map.is_iomem))
fbi->fix.smem_start =
page_to_phys(virt_to_page(fbi->screen_buffer));
#endif

View File

@@ -258,9 +258,11 @@ void drm_file_free(struct drm_file *file)
(long)old_encode_dev(file->minor->kdev->devt),
atomic_read(&dev->open_count));
#ifdef CONFIG_DRM_LEGACY
if (drm_core_check_feature(dev, DRIVER_LEGACY) &&
dev->driver->preclose)
dev->driver->preclose(dev, file);
#endif
if (drm_core_check_feature(dev, DRIVER_LEGACY))
drm_legacy_lock_release(dev, file->filp);

View File

@@ -36,6 +36,7 @@
#include <linux/pagemap.h>
#include <linux/shmem_fs.h>
#include <linux/dma-buf.h>
#include <linux/dma-buf-map.h>
#include <linux/mem_encrypt.h>
#include <linux/pagevec.h>
@@ -1205,28 +1206,32 @@ void drm_gem_unpin(struct drm_gem_object *obj)
obj->funcs->unpin(obj);
}
void *drm_gem_vmap(struct drm_gem_object *obj)
int drm_gem_vmap(struct drm_gem_object *obj, struct dma_buf_map *map)
{
void *vaddr;
int ret;
if (obj->funcs->vmap)
vaddr = obj->funcs->vmap(obj);
else
vaddr = ERR_PTR(-EOPNOTSUPP);
if (!obj->funcs->vmap)
return -EOPNOTSUPP;
if (!vaddr)
vaddr = ERR_PTR(-ENOMEM);
ret = obj->funcs->vmap(obj, map);
if (ret)
return ret;
else if (dma_buf_map_is_null(map))
return -ENOMEM;
return vaddr;
return 0;
}
void drm_gem_vunmap(struct drm_gem_object *obj, void *vaddr)
void drm_gem_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map)
{
if (!vaddr)
if (dma_buf_map_is_null(map))
return;
if (obj->funcs->vunmap)
obj->funcs->vunmap(obj, vaddr);
obj->funcs->vunmap(obj, map);
/* Always set the mapping to NULL. Callers may rely on this. */
dma_buf_map_clear(map);
}
/**

View File

@@ -33,6 +33,14 @@
* display drivers that are unable to map scattered buffers via an IOMMU.
*/
static const struct drm_gem_object_funcs drm_gem_cma_default_funcs = {
.free = drm_gem_cma_free_object,
.print_info = drm_gem_cma_print_info,
.get_sg_table = drm_gem_cma_prime_get_sg_table,
.vmap = drm_gem_cma_prime_vmap,
.vm_ops = &drm_gem_cma_vm_ops,
};
/**
* __drm_gem_cma_create - Create a GEM CMA object without allocating memory
* @drm: DRM device
@@ -58,6 +66,10 @@ __drm_gem_cma_create(struct drm_device *drm, size_t size)
gem_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL);
if (!gem_obj)
return ERR_PTR(-ENOMEM);
if (!gem_obj->funcs)
gem_obj->funcs = &drm_gem_cma_default_funcs;
cma_obj = container_of(gem_obj, struct drm_gem_cma_object, base);
ret = drm_gem_object_init(drm, gem_obj, size);
@@ -519,6 +531,8 @@ EXPORT_SYMBOL_GPL(drm_gem_cma_prime_mmap);
* drm_gem_cma_prime_vmap - map a CMA GEM object into the kernel's virtual
* address space
* @obj: GEM object
* @map: Returns the kernel virtual address of the CMA GEM object's backing
* store.
*
* This function maps a buffer exported via DRM PRIME into the kernel's
* virtual address space. Since the CMA buffers are already mapped into the
@@ -527,68 +541,18 @@ EXPORT_SYMBOL_GPL(drm_gem_cma_prime_mmap);
* driver's &drm_gem_object_funcs.vmap callback.
*
* Returns:
* The kernel virtual address of the CMA GEM object's backing store.
* 0 on success, or a negative error code otherwise.
*/
void *drm_gem_cma_prime_vmap(struct drm_gem_object *obj)
int drm_gem_cma_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map)
{
struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
return cma_obj->vaddr;
dma_buf_map_set_vaddr(map, cma_obj->vaddr);
return 0;
}
EXPORT_SYMBOL_GPL(drm_gem_cma_prime_vmap);
/**
* drm_gem_cma_prime_vunmap - unmap a CMA GEM object from the kernel's virtual
* address space
* @obj: GEM object
* @vaddr: kernel virtual address where the CMA GEM object was mapped
*
* This function removes a buffer exported via DRM PRIME from the kernel's
* virtual address space. This is a no-op because CMA buffers cannot be
* unmapped from kernel space. Drivers using the CMA helpers should set this
* as their &drm_gem_object_funcs.vunmap callback.
*/
void drm_gem_cma_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
{
/* Nothing to do */
}
EXPORT_SYMBOL_GPL(drm_gem_cma_prime_vunmap);
static const struct drm_gem_object_funcs drm_gem_cma_default_funcs = {
.free = drm_gem_cma_free_object,
.print_info = drm_gem_cma_print_info,
.get_sg_table = drm_gem_cma_prime_get_sg_table,
.vmap = drm_gem_cma_prime_vmap,
.vm_ops = &drm_gem_cma_vm_ops,
};
/**
* drm_gem_cma_create_object_default_funcs - Create a CMA GEM object with a
* default function table
* @dev: DRM device
* @size: Size of the object to allocate
*
* This sets the GEM object functions to the default CMA helper functions.
* This function can be used as the &drm_driver.gem_create_object callback.
*
* Returns:
* A pointer to a allocated GEM object or an error pointer on failure.
*/
struct drm_gem_object *
drm_gem_cma_create_object_default_funcs(struct drm_device *dev, size_t size)
{
struct drm_gem_cma_object *cma_obj;
cma_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL);
if (!cma_obj)
return NULL;
cma_obj->base.funcs = &drm_gem_cma_default_funcs;
return &cma_obj->base;
}
EXPORT_SYMBOL(drm_gem_cma_create_object_default_funcs);
/**
* drm_gem_cma_prime_import_sg_table_vmap - PRIME import another driver's
* scatter/gather table and get the virtual address of the buffer

View File

@@ -258,19 +258,25 @@ void drm_gem_shmem_unpin(struct drm_gem_object *obj)
}
EXPORT_SYMBOL(drm_gem_shmem_unpin);
static void *drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem)
static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem, struct dma_buf_map *map)
{
struct drm_gem_object *obj = &shmem->base;
struct dma_buf_map map;
int ret = 0;
if (shmem->vmap_use_count++ > 0)
return shmem->vaddr;
if (shmem->vmap_use_count++ > 0) {
dma_buf_map_set_vaddr(map, shmem->vaddr);
return 0;
}
if (obj->import_attach) {
ret = dma_buf_vmap(obj->import_attach->dmabuf, &map);
if (!ret)
shmem->vaddr = map.vaddr;
ret = dma_buf_vmap(obj->import_attach->dmabuf, map);
if (!ret) {
if (WARN_ON(map->is_iomem)) {
ret = -EIO;
goto err_put_pages;
}
shmem->vaddr = map->vaddr;
}
} else {
pgprot_t prot = PAGE_KERNEL;
@@ -284,6 +290,8 @@ static void *drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem)
VM_MAP, prot);
if (!shmem->vaddr)
ret = -ENOMEM;
else
dma_buf_map_set_vaddr(map, shmem->vaddr);
}
if (ret) {
@@ -291,7 +299,7 @@ static void *drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem)
goto err_put_pages;
}
return shmem->vaddr;
return 0;
err_put_pages:
if (!obj->import_attach)
@@ -299,12 +307,14 @@ static void *drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem)
err_zero_use:
shmem->vmap_use_count = 0;
return ERR_PTR(ret);
return ret;
}
/*
* drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object
* @shmem: shmem GEM object
* @map: Returns the kernel virtual address of the SHMEM GEM object's backing
* store.
*
* This function makes sure that a contiguous kernel virtual address mapping
* exists for the buffer backing the shmem GEM object.
@@ -318,26 +328,25 @@ static void *drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem)
* Returns:
* 0 on success or a negative error code on failure.
*/
void *drm_gem_shmem_vmap(struct drm_gem_object *obj)
int drm_gem_shmem_vmap(struct drm_gem_object *obj, struct dma_buf_map *map)
{
struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
void *vaddr;
int ret;
ret = mutex_lock_interruptible(&shmem->vmap_lock);
if (ret)
return ERR_PTR(ret);
vaddr = drm_gem_shmem_vmap_locked(shmem);
return ret;
ret = drm_gem_shmem_vmap_locked(shmem, map);
mutex_unlock(&shmem->vmap_lock);
return vaddr;
return ret;
}
EXPORT_SYMBOL(drm_gem_shmem_vmap);
static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem)
static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem,
struct dma_buf_map *map)
{
struct drm_gem_object *obj = &shmem->base;
struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(shmem->vaddr);
if (WARN_ON_ONCE(!shmem->vmap_use_count))
return;
@@ -346,7 +355,7 @@ static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem)
return;
if (obj->import_attach)
dma_buf_vunmap(obj->import_attach->dmabuf, &map);
dma_buf_vunmap(obj->import_attach->dmabuf, map);
else
vunmap(shmem->vaddr);
@@ -357,6 +366,7 @@ static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem)
/*
* drm_gem_shmem_vunmap - Unmap a virtual mapping fo a shmem GEM object
* @shmem: shmem GEM object
* @map: Kernel virtual address where the SHMEM GEM object was mapped
*
* This function cleans up a kernel virtual address mapping acquired by
* drm_gem_shmem_vmap(). The mapping is only removed when the use count drops to
@@ -366,12 +376,12 @@ static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem)
* also be called by drivers directly, in which case it will hide the
* differences between dma-buf imported and natively allocated objects.
*/
void drm_gem_shmem_vunmap(struct drm_gem_object *obj, void *vaddr)
void drm_gem_shmem_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map)
{
struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
mutex_lock(&shmem->vmap_lock);
drm_gem_shmem_vunmap_locked(shmem);
drm_gem_shmem_vunmap_locked(shmem, map);
mutex_unlock(&shmem->vmap_lock);
}
EXPORT_SYMBOL(drm_gem_shmem_vunmap);

View File

@@ -49,6 +49,43 @@ void drm_gem_ttm_print_info(struct drm_printer *p, unsigned int indent,
}
EXPORT_SYMBOL(drm_gem_ttm_print_info);
/**
* drm_gem_ttm_vmap() - vmap &ttm_buffer_object
* @gem: GEM object.
* @map: [out] returns the dma-buf mapping.
*
* Maps a GEM object with ttm_bo_vmap(). This function can be used as
* &drm_gem_object_funcs.vmap callback.
*
* Returns:
* 0 on success, or a negative errno code otherwise.
*/
int drm_gem_ttm_vmap(struct drm_gem_object *gem,
struct dma_buf_map *map)
{
struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gem);
return ttm_bo_vmap(bo, map);
}
EXPORT_SYMBOL(drm_gem_ttm_vmap);
/**
* drm_gem_ttm_vunmap() - vunmap &ttm_buffer_object
* @gem: GEM object.
* @map: dma-buf mapping.
*
* Unmaps a GEM object with ttm_bo_vunmap(). This function can be used as
* &drm_gem_object_funcs.vmap callback.
*/
void drm_gem_ttm_vunmap(struct drm_gem_object *gem,
struct dma_buf_map *map)
{
struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gem);
ttm_bo_vunmap(bo, map);
}
EXPORT_SYMBOL(drm_gem_ttm_vunmap);
/**
* drm_gem_ttm_mmap() - mmap &ttm_buffer_object
* @gem: GEM object.

View File

@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-or-later
#include <linux/dma-buf-map.h>
#include <linux/module.h>
#include <drm/drm_debugfs.h>
@@ -112,8 +113,8 @@ static void drm_gem_vram_cleanup(struct drm_gem_vram_object *gbo)
* up; only release the GEM object.
*/
WARN_ON(gbo->kmap_use_count);
WARN_ON(gbo->kmap.virtual);
WARN_ON(gbo->vmap_use_count);
WARN_ON(dma_buf_map_is_set(&gbo->map));
drm_gem_object_release(&gbo->bo.base);
}
@@ -378,39 +379,37 @@ int drm_gem_vram_unpin(struct drm_gem_vram_object *gbo)
}
EXPORT_SYMBOL(drm_gem_vram_unpin);
static void *drm_gem_vram_kmap_locked(struct drm_gem_vram_object *gbo,
bool map, bool *is_iomem)
static int drm_gem_vram_kmap_locked(struct drm_gem_vram_object *gbo,
struct dma_buf_map *map)
{
int ret;
struct ttm_bo_kmap_obj *kmap = &gbo->kmap;
if (gbo->kmap_use_count > 0)
if (gbo->vmap_use_count > 0)
goto out;
if (kmap->virtual || !map)
goto out;
ret = ttm_bo_kmap(&gbo->bo, 0, gbo->bo.num_pages, kmap);
ret = ttm_bo_vmap(&gbo->bo, &gbo->map);
if (ret)
return ERR_PTR(ret);
return ret;
out:
if (!kmap->virtual) {
if (is_iomem)
*is_iomem = false;
return NULL; /* not mapped; don't increment ref */
}
++gbo->kmap_use_count;
if (is_iomem)
return ttm_kmap_obj_virtual(kmap, is_iomem);
return kmap->virtual;
++gbo->vmap_use_count;
*map = gbo->map;
return 0;
}
static void drm_gem_vram_kunmap_locked(struct drm_gem_vram_object *gbo)
static void drm_gem_vram_kunmap_locked(struct drm_gem_vram_object *gbo,
struct dma_buf_map *map)
{
if (WARN_ON_ONCE(!gbo->kmap_use_count))
struct drm_device *dev = gbo->bo.base.dev;
if (drm_WARN_ON_ONCE(dev, !gbo->vmap_use_count))
return;
if (--gbo->kmap_use_count > 0)
if (drm_WARN_ON_ONCE(dev, !dma_buf_map_is_equal(&gbo->map, map)))
return; /* BUG: map not mapped from this BO */
if (--gbo->vmap_use_count > 0)
return;
/*
@@ -424,7 +423,9 @@ static void drm_gem_vram_kunmap_locked(struct drm_gem_vram_object *gbo)
/**
* drm_gem_vram_vmap() - Pins and maps a GEM VRAM object into kernel address
* space
* @gbo: The GEM VRAM object to map
* @gbo: The GEM VRAM object to map
* @map: Returns the kernel virtual address of the VRAM GEM object's backing
* store.
*
* The vmap function pins a GEM VRAM object to its current location, either
* system or video memory, and maps its buffer into kernel address space.
@@ -433,48 +434,44 @@ static void drm_gem_vram_kunmap_locked(struct drm_gem_vram_object *gbo)
* unmap and unpin the GEM VRAM object.
*
* Returns:
* The buffer's virtual address on success, or
* an ERR_PTR()-encoded error code otherwise.
* 0 on success, or a negative error code otherwise.
*/
void *drm_gem_vram_vmap(struct drm_gem_vram_object *gbo)
int drm_gem_vram_vmap(struct drm_gem_vram_object *gbo, struct dma_buf_map *map)
{
int ret;
void *base;
ret = ttm_bo_reserve(&gbo->bo, true, false, NULL);
if (ret)
return ERR_PTR(ret);
return ret;
ret = drm_gem_vram_pin_locked(gbo, 0);
if (ret)
goto err_ttm_bo_unreserve;
base = drm_gem_vram_kmap_locked(gbo, true, NULL);
if (IS_ERR(base)) {
ret = PTR_ERR(base);
ret = drm_gem_vram_kmap_locked(gbo, map);
if (ret)
goto err_drm_gem_vram_unpin_locked;
}
ttm_bo_unreserve(&gbo->bo);
return base;
return 0;
err_drm_gem_vram_unpin_locked:
drm_gem_vram_unpin_locked(gbo);
err_ttm_bo_unreserve:
ttm_bo_unreserve(&gbo->bo);
return ERR_PTR(ret);
return ret;
}
EXPORT_SYMBOL(drm_gem_vram_vmap);
/**
* drm_gem_vram_vunmap() - Unmaps and unpins a GEM VRAM object
* @gbo: The GEM VRAM object to unmap
* @vaddr: The mapping's base address as returned by drm_gem_vram_vmap()
* @gbo: The GEM VRAM object to unmap
* @map: Kernel virtual address where the VRAM GEM object was mapped
*
* A call to drm_gem_vram_vunmap() unmaps and unpins a GEM VRAM buffer. See
* the documentation for drm_gem_vram_vmap() for more information.
*/
void drm_gem_vram_vunmap(struct drm_gem_vram_object *gbo, void *vaddr)
void drm_gem_vram_vunmap(struct drm_gem_vram_object *gbo, struct dma_buf_map *map)
{
int ret;
@@ -482,7 +479,7 @@ void drm_gem_vram_vunmap(struct drm_gem_vram_object *gbo, void *vaddr)
if (WARN_ONCE(ret, "ttm_bo_reserve_failed(): ret=%d\n", ret))
return;
drm_gem_vram_kunmap_locked(gbo);
drm_gem_vram_kunmap_locked(gbo, map);
drm_gem_vram_unpin_locked(gbo);
ttm_bo_unreserve(&gbo->bo);
@@ -573,15 +570,13 @@ static void drm_gem_vram_bo_driver_move_notify(struct drm_gem_vram_object *gbo,
bool evict,
struct ttm_resource *new_mem)
{
struct ttm_bo_kmap_obj *kmap = &gbo->kmap;
struct ttm_buffer_object *bo = &gbo->bo;
struct drm_device *dev = bo->base.dev;
if (WARN_ON_ONCE(gbo->kmap_use_count))
if (drm_WARN_ON_ONCE(dev, gbo->vmap_use_count))
return;
if (!kmap->virtual)
return;
ttm_bo_kunmap(kmap);
kmap->virtual = NULL;
ttm_bo_vunmap(bo, &gbo->map);
}
static int drm_gem_vram_bo_driver_move(struct drm_gem_vram_object *gbo,
@@ -847,37 +842,33 @@ static void drm_gem_vram_object_unpin(struct drm_gem_object *gem)
}
/**
* drm_gem_vram_object_vmap() - \
Implements &struct drm_gem_object_funcs.vmap
* @gem: The GEM object to map
* drm_gem_vram_object_vmap() -
* Implements &struct drm_gem_object_funcs.vmap
* @gem: The GEM object to map
* @map: Returns the kernel virtual address of the VRAM GEM object's backing
* store.
*
* Returns:
* The buffers virtual address on success, or
* NULL otherwise.
* 0 on success, or a negative error code otherwise.
*/
static void *drm_gem_vram_object_vmap(struct drm_gem_object *gem)
static int drm_gem_vram_object_vmap(struct drm_gem_object *gem, struct dma_buf_map *map)
{
struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem);
void *base;
base = drm_gem_vram_vmap(gbo);
if (IS_ERR(base))
return NULL;
return base;
return drm_gem_vram_vmap(gbo, map);
}
/**
* drm_gem_vram_object_vunmap() - \
Implements &struct drm_gem_object_funcs.vunmap
* @gem: The GEM object to unmap
* @vaddr: The mapping's base address
* drm_gem_vram_object_vunmap() -
* Implements &struct drm_gem_object_funcs.vunmap
* @gem: The GEM object to unmap
* @map: Kernel virtual address where the VRAM GEM object was mapped
*/
static void drm_gem_vram_object_vunmap(struct drm_gem_object *gem,
void *vaddr)
static void drm_gem_vram_object_vunmap(struct drm_gem_object *gem, struct dma_buf_map *map)
{
struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem);
drm_gem_vram_vunmap(gbo, vaddr);
drm_gem_vram_vunmap(gbo, map);
}
/*
@@ -964,7 +955,8 @@ static void bo_driver_delete_mem_notify(struct ttm_buffer_object *bo)
static int bo_driver_move(struct ttm_buffer_object *bo,
bool evict,
struct ttm_operation_ctx *ctx,
struct ttm_resource *new_mem)
struct ttm_resource *new_mem,
struct ttm_place *hop)
{
struct drm_gem_vram_object *gbo;

View File

@@ -33,6 +33,7 @@
struct dentry;
struct dma_buf;
struct dma_buf_map;
struct drm_connector;
struct drm_crtc;
struct drm_framebuffer;
@@ -187,8 +188,8 @@ void drm_gem_print_info(struct drm_printer *p, unsigned int indent,
int drm_gem_pin(struct drm_gem_object *obj);
void drm_gem_unpin(struct drm_gem_object *obj);
void *drm_gem_vmap(struct drm_gem_object *obj);
void drm_gem_vunmap(struct drm_gem_object *obj, void *vaddr);
int drm_gem_vmap(struct drm_gem_object *obj, struct dma_buf_map *map);
void drm_gem_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map);
/* drm_debugfs.c drm_debugfs_crc.c */
#if defined(CONFIG_DEBUG_FS)

View File

@@ -667,21 +667,15 @@ EXPORT_SYMBOL(drm_gem_unmap_dma_buf);
*
* Sets up a kernel virtual mapping. This can be used as the &dma_buf_ops.vmap
* callback. Calls into &drm_gem_object_funcs.vmap for device specific handling.
* The kernel virtual address is returned in map.
*
* Returns the kernel virtual address or NULL on failure.
* Returns 0 on success or a negative errno code otherwise.
*/
int drm_gem_dmabuf_vmap(struct dma_buf *dma_buf, struct dma_buf_map *map)
{
struct drm_gem_object *obj = dma_buf->priv;
void *vaddr;
vaddr = drm_gem_vmap(obj);
if (IS_ERR(vaddr))
return PTR_ERR(vaddr);
dma_buf_map_set_vaddr(map, vaddr);
return 0;
return drm_gem_vmap(obj, map);
}
EXPORT_SYMBOL(drm_gem_dmabuf_vmap);
@@ -697,7 +691,7 @@ void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, struct dma_buf_map *map)
{
struct drm_gem_object *obj = dma_buf->priv;
drm_gem_vunmap(obj, map->vaddr);
drm_gem_vunmap(obj, map);
}
EXPORT_SYMBOL(drm_gem_dmabuf_vunmap);

View File

@@ -209,9 +209,12 @@ static u32 __get_vblank_counter(struct drm_device *dev, unsigned int pipe)
if (crtc->funcs->get_vblank_counter)
return crtc->funcs->get_vblank_counter(crtc);
} else if (dev->driver->get_vblank_counter) {
}
#ifdef CONFIG_DRM_LEGACY
else if (dev->driver->get_vblank_counter) {
return dev->driver->get_vblank_counter(dev, pipe);
}
#endif
return drm_vblank_no_hw_counter(dev, pipe);
}
@@ -429,9 +432,12 @@ static void __disable_vblank(struct drm_device *dev, unsigned int pipe)
if (crtc->funcs->disable_vblank)
crtc->funcs->disable_vblank(crtc);
} else {
}
#ifdef CONFIG_DRM_LEGACY
else {
dev->driver->disable_vblank(dev, pipe);
}
#endif
}
/*
@@ -1096,9 +1102,12 @@ static int __enable_vblank(struct drm_device *dev, unsigned int pipe)
if (crtc->funcs->enable_vblank)
return crtc->funcs->enable_vblank(crtc);
} else if (dev->driver->enable_vblank) {
}
#ifdef CONFIG_DRM_LEGACY
else if (dev->driver->enable_vblank) {
return dev->driver->enable_vblank(dev, pipe);
}
#endif
return -EINVAL;
}

View File

@@ -70,9 +70,6 @@ static pgprot_t drm_io_prot(struct drm_local_map *map,
{
pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
/* We don't want graphics memory to be mapped encrypted */
tmp = pgprot_decrypted(tmp);
#if defined(__i386__) || defined(__x86_64__) || defined(__powerpc__) || \
defined(__mips__)
if (map->type == _DRM_REGISTERS && !(map->flags & _DRM_WRITE_COMBINING))

View File

@@ -480,7 +480,7 @@ static const struct file_operations fops = {
.mmap = etnaviv_gem_mmap,
};
static struct drm_driver etnaviv_drm_driver = {
static const struct drm_driver etnaviv_drm_driver = {
.driver_features = DRIVER_GEM | DRIVER_RENDER,
.open = etnaviv_open,
.postclose = etnaviv_postclose,

View File

@@ -51,8 +51,7 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma);
int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset);
struct sg_table *etnaviv_gem_prime_get_sg_table(struct drm_gem_object *obj);
void *etnaviv_gem_prime_vmap(struct drm_gem_object *obj);
void etnaviv_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
int etnaviv_gem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map);
int etnaviv_gem_prime_mmap(struct drm_gem_object *obj,
struct vm_area_struct *vma);
struct drm_gem_object *etnaviv_gem_prime_import_sg_table(struct drm_device *dev,

View File

@@ -571,7 +571,6 @@ static const struct drm_gem_object_funcs etnaviv_gem_object_funcs = {
.unpin = etnaviv_gem_prime_unpin,
.get_sg_table = etnaviv_gem_prime_get_sg_table,
.vmap = etnaviv_gem_prime_vmap,
.vunmap = etnaviv_gem_prime_vunmap,
.vm_ops = &vm_ops,
};

View File

@@ -22,14 +22,16 @@ struct sg_table *etnaviv_gem_prime_get_sg_table(struct drm_gem_object *obj)
return drm_prime_pages_to_sg(obj->dev, etnaviv_obj->pages, npages);
}
void *etnaviv_gem_prime_vmap(struct drm_gem_object *obj)
int etnaviv_gem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map)
{
return etnaviv_gem_vmap(obj);
}
void *vaddr;
void etnaviv_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
{
/* TODO msm_gem_vunmap() */
vaddr = etnaviv_gem_vmap(obj);
if (!vaddr)
return -ENOMEM;
dma_buf_map_set_vaddr(map, vaddr);
return 0;
}
int etnaviv_gem_prime_mmap(struct drm_gem_object *obj,

View File

@@ -113,7 +113,7 @@ static const struct file_operations exynos_drm_driver_fops = {
.release = drm_release,
};
static struct drm_driver exynos_drm_driver = {
static const struct drm_driver exynos_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM
| DRIVER_ATOMIC | DRIVER_RENDER,
.open = exynos_drm_open,

View File

@@ -135,8 +135,6 @@ static const struct vm_operations_struct exynos_drm_gem_vm_ops = {
static const struct drm_gem_object_funcs exynos_drm_gem_object_funcs = {
.free = exynos_drm_gem_free_object,
.get_sg_table = exynos_drm_gem_prime_get_sg_table,
.vmap = exynos_drm_gem_prime_vmap,
.vunmap = exynos_drm_gem_prime_vunmap,
.vm_ops = &exynos_drm_gem_vm_ops,
};
@@ -469,16 +467,6 @@ exynos_drm_gem_prime_import_sg_table(struct drm_device *dev,
return &exynos_gem->base;
}
void *exynos_drm_gem_prime_vmap(struct drm_gem_object *obj)
{
return NULL;
}
void exynos_drm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
{
/* Nothing to do */
}
int exynos_drm_gem_prime_mmap(struct drm_gem_object *obj,
struct vm_area_struct *vma)
{

View File

@@ -107,8 +107,6 @@ struct drm_gem_object *
exynos_drm_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach,
struct sg_table *sgt);
void *exynos_drm_gem_prime_vmap(struct drm_gem_object *obj);
void exynos_drm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
int exynos_drm_gem_prime_mmap(struct drm_gem_object *obj,
struct vm_area_struct *vma);

View File

@@ -134,7 +134,7 @@ static irqreturn_t fsl_dcu_drm_irq(int irq, void *arg)
DEFINE_DRM_GEM_CMA_FOPS(fsl_dcu_drm_fops);
static struct drm_driver fsl_dcu_drm_driver = {
static const struct drm_driver fsl_dcu_drm_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.load = fsl_dcu_load,
.unload = fsl_dcu_unload,
@@ -234,7 +234,6 @@ static int fsl_dcu_drm_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct resource *res;
void __iomem *base;
struct drm_driver *driver = &fsl_dcu_drm_driver;
struct clk *pix_clk_in;
char pix_clk_name[32];
const char *pix_clk_in_name;
@@ -304,7 +303,7 @@ static int fsl_dcu_drm_probe(struct platform_device *pdev)
fsl_dev->tcon = fsl_tcon_init(dev);
drm = drm_dev_alloc(driver, dev);
drm = drm_dev_alloc(&fsl_dcu_drm_driver, dev);
if (IS_ERR(drm)) {
ret = PTR_ERR(drm);
goto unregister_pix_clk;

View File

@@ -32,12 +32,6 @@ static void psb_gem_free_object(struct drm_gem_object *obj)
psb_gtt_free_range(obj->dev, gtt);
}
int psb_gem_get_aperture(struct drm_device *dev, void *data,
struct drm_file *file)
{
return -EINVAL;
}
static const struct vm_operations_struct psb_gem_vm_ops = {
.fault = psb_gem_fault,
.open = drm_gem_vm_open,

View File

@@ -34,7 +34,7 @@
#include "psb_intel_reg.h"
#include "psb_reg.h"
static struct drm_driver driver;
static const struct drm_driver driver;
static int psb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
/*
@@ -491,7 +491,7 @@ static const struct file_operations psb_gem_fops = {
.read = drm_read,
};
static struct drm_driver driver = {
static const struct drm_driver driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM,
.lastclose = drm_fb_helper_lastclose,

View File

@@ -735,8 +735,6 @@ extern const struct drm_connector_helper_funcs
extern const struct drm_connector_funcs psb_intel_lvds_connector_funcs;
/* gem.c */
extern int psb_gem_get_aperture(struct drm_device *dev, void *data,
struct drm_file *file);
extern int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args);

View File

@@ -43,7 +43,7 @@ static irqreturn_t hibmc_drm_interrupt(int irq, void *arg)
return IRQ_HANDLED;
}
static struct drm_driver hibmc_driver = {
static const struct drm_driver hibmc_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &hibmc_fops,
.name = "hibmc",

View File

@@ -918,7 +918,7 @@ static const struct drm_mode_config_funcs ade_mode_config_funcs = {
DEFINE_DRM_GEM_CMA_FOPS(ade_fops);
static struct drm_driver ade_driver = {
static const struct drm_driver ade_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &ade_fops,
DRM_GEM_CMA_DRIVER_OPS,

View File

@@ -40,7 +40,7 @@ struct kirin_drm_data {
u32 num_planes;
u32 prim_plane;
struct drm_driver *driver;
const struct drm_driver *driver;
const struct drm_crtc_helper_funcs *crtc_helper_funcs;
const struct drm_crtc_funcs *crtc_funcs;
const struct drm_plane_helper_funcs *plane_helper_funcs;

View File

@@ -87,7 +87,7 @@
#include "intel_sideband.h"
#include "vlv_suspend.h"
static struct drm_driver driver;
static const struct drm_driver driver;
static int i915_get_bridge_dev(struct drm_i915_private *dev_priv)
{
@@ -1759,7 +1759,7 @@ static const struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_GEM_VM_DESTROY, i915_gem_vm_destroy_ioctl, DRM_RENDER_ALLOW),
};
static struct drm_driver driver = {
static const struct drm_driver driver = {
/* Don't use MTRRs here; the Xserver or userspace app should
* deal with them for Intel hardware.
*/

View File

@@ -81,7 +81,7 @@ static void mock_device_release(struct drm_device *dev)
i915_params_free(&i915->params);
}
static struct drm_driver mock_driver = {
static const struct drm_driver mock_driver = {
.name = "mock",
.driver_features = DRIVER_GEM,
.release = mock_device_release,

View File

@@ -26,7 +26,7 @@ static const struct drm_mode_config_funcs dcss_drm_mode_config_funcs = {
.atomic_commit = drm_atomic_helper_commit,
};
static struct drm_driver dcss_kms_driver = {
static const struct drm_driver dcss_kms_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
DRM_GEM_CMA_DRIVER_OPS,
.fops = &dcss_cma_fops,

View File

@@ -145,7 +145,7 @@ static const struct drm_ioctl_desc imx_drm_ioctls[] = {
/* none so far */
};
static struct drm_driver imx_drm_driver = {
static const struct drm_driver imx_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
DRM_GEM_CMA_DRIVER_OPS,
.ioctls = imx_drm_ioctls,

View File

@@ -305,11 +305,13 @@ ingenic_drm_crtc_mode_valid(struct drm_crtc *crtc, const struct drm_display_mode
static void ingenic_drm_crtc_atomic_begin(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
crtc);
struct ingenic_drm *priv = drm_crtc_get_priv(crtc);
u32 ctrl = 0;
if (priv->soc_info->has_osd &&
drm_atomic_crtc_needs_modeset(crtc->state)) {
drm_atomic_crtc_needs_modeset(crtc_state)) {
/*
* If IPU plane is enabled, enable IPU as source for the F1
* plane; otherwise use regular DMA.
@@ -326,7 +328,8 @@ static void ingenic_drm_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct ingenic_drm *priv = drm_crtc_get_priv(crtc);
struct drm_crtc_state *crtc_state = crtc->state;
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
crtc);
struct drm_pending_vblank_event *event = crtc_state->event;
if (drm_atomic_crtc_needs_modeset(crtc_state)) {
@@ -716,7 +719,7 @@ static void ingenic_drm_disable_vblank(struct drm_crtc *crtc)
DEFINE_DRM_GEM_CMA_FOPS(ingenic_drm_fops);
static struct drm_driver ingenic_drm_driver_data = {
static const struct drm_driver ingenic_drm_driver_data = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
.name = "ingenic-drm",
.desc = "DRM module for Ingenic SoCs",

View File

@@ -516,7 +516,7 @@ static void ingenic_ipu_plane_atomic_update(struct drm_plane *plane,
static int ingenic_ipu_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *state)
{
unsigned int num_w, denom_w, num_h, denom_h, xres, yres;
unsigned int num_w, denom_w, num_h, denom_h, xres, yres, max_w, max_h;
struct ingenic_ipu *ipu = plane_to_ingenic_ipu(plane);
struct drm_crtc *crtc = state->crtc ?: plane->state->crtc;
struct drm_crtc_state *crtc_state;
@@ -558,19 +558,26 @@ static int ingenic_ipu_plane_atomic_check(struct drm_plane *plane,
xres = state->src_w >> 16;
yres = state->src_h >> 16;
/* Adjust the coefficients until we find a valid configuration */
for (denom_w = xres, num_w = state->crtc_w;
num_w <= crtc_state->mode.hdisplay; num_w++)
/*
* Increase the scaled image's theorical width/height until we find a
* configuration that has valid scaling coefficients, up to 102% of the
* screen's resolution. This makes sure that we can scale from almost
* every resolution possible at the cost of a very small distorsion.
* The CRTC_W / CRTC_H are not modified.
*/
max_w = crtc_state->mode.hdisplay * 102 / 100;
max_h = crtc_state->mode.vdisplay * 102 / 100;
for (denom_w = xres, num_w = state->crtc_w; num_w <= max_w; num_w++)
if (!reduce_fraction(&num_w, &denom_w))
break;
if (num_w > crtc_state->mode.hdisplay)
if (num_w > max_w)
return -EINVAL;
for (denom_h = yres, num_h = state->crtc_h;
num_h <= crtc_state->mode.vdisplay; num_h++)
for (denom_h = yres, num_h = state->crtc_h; num_h <= max_h; num_h++)
if (!reduce_fraction(&num_h, &denom_h))
break;
if (num_h > crtc_state->mode.vdisplay)
if (num_h > max_h)
return -EINVAL;
ipu->num_w = num_w;

View File

@@ -0,0 +1,13 @@
config DRM_KMB_DISPLAY
tristate "INTEL KEEMBAY DISPLAY"
depends on DRM
depends on ARCH_KEEMBAY || COMPILE_TEST
select DRM_KMS_HELPER
select DRM_KMS_CMA_HELPER
select DRM_GEM_CMA_HELPER
select DRM_MIPI_DSI
help
Choose this option if you have Intel's KeemBay SOC which integrates
an ARM Cortex A53 CPU with an Intel Movidius VPU.
If M is selected the module will be called kmb-drm.

View File

@@ -0,0 +1,2 @@
kmb-drm-y := kmb_crtc.o kmb_drv.o kmb_plane.o kmb_dsi.o
obj-$(CONFIG_DRM_KMB_DISPLAY) += kmb-drm.o

View File

@@ -0,0 +1,214 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright © 2018-2020 Intel Corporation
*/
#include <linux/clk.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
#include <drm/drm_modeset_helper_vtables.h>
#include "kmb_drv.h"
#include "kmb_dsi.h"
#include "kmb_plane.h"
#include "kmb_regs.h"
struct kmb_crtc_timing {
u32 vfront_porch;
u32 vback_porch;
u32 vsync_len;
u32 hfront_porch;
u32 hback_porch;
u32 hsync_len;
};
static int kmb_crtc_enable_vblank(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct kmb_drm_private *kmb = to_kmb(dev);
/* Clear interrupt */
kmb_write_lcd(kmb, LCD_INT_CLEAR, LCD_INT_VERT_COMP);
/* Set which interval to generate vertical interrupt */
kmb_write_lcd(kmb, LCD_VSTATUS_COMPARE,
LCD_VSTATUS_COMPARE_VSYNC);
/* Enable vertical interrupt */
kmb_set_bitmask_lcd(kmb, LCD_INT_ENABLE,
LCD_INT_VERT_COMP);
return 0;
}
static void kmb_crtc_disable_vblank(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct kmb_drm_private *kmb = to_kmb(dev);
/* Clear interrupt */
kmb_write_lcd(kmb, LCD_INT_CLEAR, LCD_INT_VERT_COMP);
/* Disable vertical interrupt */
kmb_clr_bitmask_lcd(kmb, LCD_INT_ENABLE,
LCD_INT_VERT_COMP);
}
static const struct drm_crtc_funcs kmb_crtc_funcs = {
.destroy = drm_crtc_cleanup,
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
.reset = drm_atomic_helper_crtc_reset,
.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
.enable_vblank = kmb_crtc_enable_vblank,
.disable_vblank = kmb_crtc_disable_vblank,
};
static void kmb_crtc_set_mode(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_display_mode *m = &crtc->state->adjusted_mode;
struct kmb_crtc_timing vm;
struct kmb_drm_private *kmb = to_kmb(dev);
unsigned int val = 0;
/* Initialize mipi */
kmb_dsi_mode_set(kmb->kmb_dsi, m, kmb->sys_clk_mhz);
drm_info(dev,
"vfp= %d vbp= %d vsync_len=%d hfp=%d hbp=%d hsync_len=%d\n",
m->crtc_vsync_start - m->crtc_vdisplay,
m->crtc_vtotal - m->crtc_vsync_end,
m->crtc_vsync_end - m->crtc_vsync_start,
m->crtc_hsync_start - m->crtc_hdisplay,
m->crtc_htotal - m->crtc_hsync_end,
m->crtc_hsync_end - m->crtc_hsync_start);
val = kmb_read_lcd(kmb, LCD_INT_ENABLE);
kmb_clr_bitmask_lcd(kmb, LCD_INT_ENABLE, val);
kmb_set_bitmask_lcd(kmb, LCD_INT_CLEAR, ~0x0);
vm.vfront_porch = 2;
vm.vback_porch = 2;
vm.vsync_len = 8;
vm.hfront_porch = 0;
vm.hback_porch = 0;
vm.hsync_len = 28;
drm_dbg(dev, "%s : %dactive height= %d vbp=%d vfp=%d vsync-w=%d h-active=%d h-bp=%d h-fp=%d hsync-l=%d",
__func__, __LINE__,
m->crtc_vdisplay, vm.vback_porch, vm.vfront_porch,
vm.vsync_len, m->crtc_hdisplay, vm.hback_porch,
vm.hfront_porch, vm.hsync_len);
kmb_write_lcd(kmb, LCD_V_ACTIVEHEIGHT,
m->crtc_vdisplay - 1);
kmb_write_lcd(kmb, LCD_V_BACKPORCH, vm.vback_porch);
kmb_write_lcd(kmb, LCD_V_FRONTPORCH, vm.vfront_porch);
kmb_write_lcd(kmb, LCD_VSYNC_WIDTH, vm.vsync_len - 1);
kmb_write_lcd(kmb, LCD_H_ACTIVEWIDTH,
m->crtc_hdisplay - 1);
kmb_write_lcd(kmb, LCD_H_BACKPORCH, vm.hback_porch);
kmb_write_lcd(kmb, LCD_H_FRONTPORCH, vm.hfront_porch);
kmb_write_lcd(kmb, LCD_HSYNC_WIDTH, vm.hsync_len - 1);
/* This is hardcoded as 0 in the Myriadx code */
kmb_write_lcd(kmb, LCD_VSYNC_START, 0);
kmb_write_lcd(kmb, LCD_VSYNC_END, 0);
/* Back ground color */
kmb_write_lcd(kmb, LCD_BG_COLOUR_LS, 0x4);
if (m->flags == DRM_MODE_FLAG_INTERLACE) {
kmb_write_lcd(kmb,
LCD_VSYNC_WIDTH_EVEN, vm.vsync_len - 1);
kmb_write_lcd(kmb,
LCD_V_BACKPORCH_EVEN, vm.vback_porch);
kmb_write_lcd(kmb,
LCD_V_FRONTPORCH_EVEN, vm.vfront_porch);
kmb_write_lcd(kmb, LCD_V_ACTIVEHEIGHT_EVEN,
m->crtc_vdisplay - 1);
/* This is hardcoded as 10 in the Myriadx code */
kmb_write_lcd(kmb, LCD_VSYNC_START_EVEN, 10);
kmb_write_lcd(kmb, LCD_VSYNC_END_EVEN, 10);
}
kmb_write_lcd(kmb, LCD_TIMING_GEN_TRIG, 1);
kmb_set_bitmask_lcd(kmb, LCD_CONTROL, LCD_CTRL_ENABLE);
kmb_set_bitmask_lcd(kmb, LCD_INT_ENABLE, val);
}
static void kmb_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct kmb_drm_private *kmb = crtc_to_kmb_priv(crtc);
clk_prepare_enable(kmb->kmb_clk.clk_lcd);
kmb_crtc_set_mode(crtc);
drm_crtc_vblank_on(crtc);
}
static void kmb_crtc_atomic_disable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct kmb_drm_private *kmb = crtc_to_kmb_priv(crtc);
struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state, crtc);
/* due to hw limitations, planes need to be off when crtc is off */
drm_atomic_helper_disable_planes_on_crtc(old_state, false);
drm_crtc_vblank_off(crtc);
clk_disable_unprepare(kmb->kmb_clk.clk_lcd);
}
static void kmb_crtc_atomic_begin(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct drm_device *dev = crtc->dev;
struct kmb_drm_private *kmb = to_kmb(dev);
kmb_clr_bitmask_lcd(kmb, LCD_INT_ENABLE,
LCD_INT_VERT_COMP);
}
static void kmb_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct drm_device *dev = crtc->dev;
struct kmb_drm_private *kmb = to_kmb(dev);
kmb_set_bitmask_lcd(kmb, LCD_INT_ENABLE,
LCD_INT_VERT_COMP);
spin_lock_irq(&crtc->dev->event_lock);
if (crtc->state->event) {
if (drm_crtc_vblank_get(crtc) == 0)
drm_crtc_arm_vblank_event(crtc, crtc->state->event);
else
drm_crtc_send_vblank_event(crtc, crtc->state->event);
}
crtc->state->event = NULL;
spin_unlock_irq(&crtc->dev->event_lock);
}
static const struct drm_crtc_helper_funcs kmb_crtc_helper_funcs = {
.atomic_begin = kmb_crtc_atomic_begin,
.atomic_enable = kmb_crtc_atomic_enable,
.atomic_disable = kmb_crtc_atomic_disable,
.atomic_flush = kmb_crtc_atomic_flush,
};
int kmb_setup_crtc(struct drm_device *drm)
{
struct kmb_drm_private *kmb = to_kmb(drm);
struct kmb_plane *primary;
int ret;
primary = kmb_plane_init(drm);
if (IS_ERR(primary))
return PTR_ERR(primary);
ret = drm_crtc_init_with_planes(drm, &kmb->crtc, &primary->base_plane,
NULL, &kmb_crtc_funcs, NULL);
if (ret) {
kmb_plane_destroy(&primary->base_plane);
return ret;
}
drm_crtc_helper_add(&kmb->crtc, &kmb_crtc_helper_funcs);
return 0;
}

View File

@@ -0,0 +1,602 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright © 2018-2020 Intel Corporation
*/
#include <linux/clk.h>
#include <linux/module.h>
#include <linux/of_graph.h>
#include <linux/of_platform.h>
#include <linux/of_reserved_mem.h>
#include <linux/mfd/syscon.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_irq.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
#include "kmb_drv.h"
#include "kmb_dsi.h"
#include "kmb_regs.h"
static int kmb_display_clk_enable(struct kmb_drm_private *kmb)
{
int ret = 0;
ret = clk_prepare_enable(kmb->kmb_clk.clk_lcd);
if (ret) {
drm_err(&kmb->drm, "Failed to enable LCD clock: %d\n", ret);
return ret;
}
DRM_INFO("SUCCESS : enabled LCD clocks\n");
return 0;
}
static int kmb_initialize_clocks(struct kmb_drm_private *kmb, struct device *dev)
{
int ret = 0;
struct regmap *msscam;
kmb->kmb_clk.clk_lcd = devm_clk_get(dev, "clk_lcd");
if (IS_ERR(kmb->kmb_clk.clk_lcd)) {
drm_err(&kmb->drm, "clk_get() failed clk_lcd\n");
return PTR_ERR(kmb->kmb_clk.clk_lcd);
}
kmb->kmb_clk.clk_pll0 = devm_clk_get(dev, "clk_pll0");
if (IS_ERR(kmb->kmb_clk.clk_pll0)) {
drm_err(&kmb->drm, "clk_get() failed clk_pll0 ");
return PTR_ERR(kmb->kmb_clk.clk_pll0);
}
kmb->sys_clk_mhz = clk_get_rate(kmb->kmb_clk.clk_pll0) / 1000000;
drm_info(&kmb->drm, "system clk = %d Mhz", kmb->sys_clk_mhz);
ret = kmb_dsi_clk_init(kmb->kmb_dsi);
/* Set LCD clock to 200 Mhz */
clk_set_rate(kmb->kmb_clk.clk_lcd, KMB_LCD_DEFAULT_CLK);
if (clk_get_rate(kmb->kmb_clk.clk_lcd) != KMB_LCD_DEFAULT_CLK) {
drm_err(&kmb->drm, "failed to set to clk_lcd to %d\n",
KMB_LCD_DEFAULT_CLK);
return -1;
}
drm_dbg(&kmb->drm, "clk_lcd = %ld\n", clk_get_rate(kmb->kmb_clk.clk_lcd));
ret = kmb_display_clk_enable(kmb);
if (ret)
return ret;
msscam = syscon_regmap_lookup_by_compatible("intel,keembay-msscam");
if (IS_ERR(msscam)) {
drm_err(&kmb->drm, "failed to get msscam syscon");
return -1;
}
/* Enable MSS_CAM_CLK_CTRL for MIPI TX and LCD */
regmap_update_bits(msscam, MSS_CAM_CLK_CTRL, 0x1fff, 0x1fff);
regmap_update_bits(msscam, MSS_CAM_RSTN_CTRL, 0xffffffff, 0xffffffff);
return 0;
}
static void kmb_display_clk_disable(struct kmb_drm_private *kmb)
{
clk_disable_unprepare(kmb->kmb_clk.clk_lcd);
}
static void __iomem *kmb_map_mmio(struct drm_device *drm,
struct platform_device *pdev,
char *name)
{
struct resource *res;
void __iomem *mem;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
if (!res) {
drm_err(drm, "failed to get resource for %s", name);
return ERR_PTR(-ENOMEM);
}
mem = devm_ioremap_resource(drm->dev, res);
if (IS_ERR(mem))
drm_err(drm, "failed to ioremap %s registers", name);
return mem;
}
static int kmb_hw_init(struct drm_device *drm, unsigned long flags)
{
struct kmb_drm_private *kmb = to_kmb(drm);
struct platform_device *pdev = to_platform_device(drm->dev);
int irq_lcd;
int ret = 0;
/* Map LCD MMIO registers */
kmb->lcd_mmio = kmb_map_mmio(drm, pdev, "lcd");
if (IS_ERR(kmb->lcd_mmio)) {
drm_err(&kmb->drm, "failed to map LCD registers\n");
return -ENOMEM;
}
/* Map MIPI MMIO registers */
ret = kmb_dsi_map_mmio(kmb->kmb_dsi);
if (ret)
return ret;
/* Enable display clocks */
kmb_initialize_clocks(kmb, &pdev->dev);
/* Register irqs here - section 17.3 in databook
* lists LCD at 79 and 82 for MIPI under MSS CPU -
* firmware has redirected 79 to A53 IRQ 33
*/
/* Allocate LCD interrupt resources */
irq_lcd = platform_get_irq(pdev, 0);
if (irq_lcd < 0) {
drm_err(&kmb->drm, "irq_lcd not found");
goto setup_fail;
}
/* Get the optional framebuffer memory resource */
ret = of_reserved_mem_device_init(drm->dev);
if (ret && ret != -ENODEV)
return ret;
spin_lock_init(&kmb->irq_lock);
kmb->irq_lcd = irq_lcd;
return 0;
setup_fail:
of_reserved_mem_device_release(drm->dev);
return ret;
}
static const struct drm_mode_config_funcs kmb_mode_config_funcs = {
.fb_create = drm_gem_fb_create,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};
static int kmb_setup_mode_config(struct drm_device *drm)
{
int ret;
struct kmb_drm_private *kmb = to_kmb(drm);
ret = drmm_mode_config_init(drm);
if (ret)
return ret;
drm->mode_config.min_width = KMB_MIN_WIDTH;
drm->mode_config.min_height = KMB_MIN_HEIGHT;
drm->mode_config.max_width = KMB_MAX_WIDTH;
drm->mode_config.max_height = KMB_MAX_HEIGHT;
drm->mode_config.funcs = &kmb_mode_config_funcs;
ret = kmb_setup_crtc(drm);
if (ret < 0) {
drm_err(drm, "failed to create crtc\n");
return ret;
}
ret = kmb_dsi_encoder_init(drm, kmb->kmb_dsi);
/* Set the CRTC's port so that the encoder component can find it */
kmb->crtc.port = of_graph_get_port_by_id(drm->dev->of_node, 0);
ret = drm_vblank_init(drm, drm->mode_config.num_crtc);
if (ret < 0) {
drm_err(drm, "failed to initialize vblank\n");
pm_runtime_disable(drm->dev);
return ret;
}
drm_mode_config_reset(drm);
return 0;
}
static irqreturn_t handle_lcd_irq(struct drm_device *dev)
{
unsigned long status, val, val1;
int plane_id, dma0_state, dma1_state;
struct kmb_drm_private *kmb = to_kmb(dev);
status = kmb_read_lcd(kmb, LCD_INT_STATUS);
spin_lock(&kmb->irq_lock);
if (status & LCD_INT_EOF) {
kmb_write_lcd(kmb, LCD_INT_CLEAR, LCD_INT_EOF);
/* When disabling/enabling LCD layers, the change takes effect
* immediately and does not wait for EOF (end of frame).
* When kmb_plane_atomic_disable is called, mark the plane as
* disabled but actually disable the plane when EOF irq is
* being handled.
*/
for (plane_id = LAYER_0;
plane_id < KMB_MAX_PLANES; plane_id++) {
if (kmb->plane_status[plane_id].disable) {
kmb_clr_bitmask_lcd(kmb,
LCD_LAYERn_DMA_CFG
(plane_id),
LCD_DMA_LAYER_ENABLE);
kmb_clr_bitmask_lcd(kmb, LCD_CONTROL,
kmb->plane_status[plane_id].ctrl);
kmb->plane_status[plane_id].disable = false;
}
}
if (kmb->kmb_under_flow) {
/* DMA Recovery after underflow */
dma0_state = (kmb->layer_no == 0) ?
LCD_VIDEO0_DMA0_STATE : LCD_VIDEO1_DMA0_STATE;
dma1_state = (kmb->layer_no == 0) ?
LCD_VIDEO0_DMA1_STATE : LCD_VIDEO1_DMA1_STATE;
do {
kmb_write_lcd(kmb, LCD_FIFO_FLUSH, 1);
val = kmb_read_lcd(kmb, dma0_state)
& LCD_DMA_STATE_ACTIVE;
val1 = kmb_read_lcd(kmb, dma1_state)
& LCD_DMA_STATE_ACTIVE;
} while ((val || val1));
/* disable dma */
kmb_clr_bitmask_lcd(kmb,
LCD_LAYERn_DMA_CFG(kmb->layer_no),
LCD_DMA_LAYER_ENABLE);
kmb_write_lcd(kmb, LCD_FIFO_FLUSH, 1);
kmb->kmb_flush_done = 1;
kmb->kmb_under_flow = 0;
}
}
if (status & LCD_INT_LINE_CMP) {
/* clear line compare interrupt */
kmb_write_lcd(kmb, LCD_INT_CLEAR, LCD_INT_LINE_CMP);
}
if (status & LCD_INT_VERT_COMP) {
/* Read VSTATUS */
val = kmb_read_lcd(kmb, LCD_VSTATUS);
val = (val & LCD_VSTATUS_VERTICAL_STATUS_MASK);
switch (val) {
case LCD_VSTATUS_COMPARE_VSYNC:
/* Clear vertical compare interrupt */
kmb_write_lcd(kmb, LCD_INT_CLEAR, LCD_INT_VERT_COMP);
if (kmb->kmb_flush_done) {
kmb_set_bitmask_lcd(kmb,
LCD_LAYERn_DMA_CFG
(kmb->layer_no),
LCD_DMA_LAYER_ENABLE);
kmb->kmb_flush_done = 0;
}
drm_crtc_handle_vblank(&kmb->crtc);
break;
case LCD_VSTATUS_COMPARE_BACKPORCH:
case LCD_VSTATUS_COMPARE_ACTIVE:
case LCD_VSTATUS_COMPARE_FRONT_PORCH:
kmb_write_lcd(kmb, LCD_INT_CLEAR, LCD_INT_VERT_COMP);
break;
}
}
if (status & LCD_INT_DMA_ERR) {
val =
(status & LCD_INT_DMA_ERR &
kmb_read_lcd(kmb, LCD_INT_ENABLE));
/* LAYER0 - VL0 */
if (val & (LAYER0_DMA_FIFO_UNDERFLOW |
LAYER0_DMA_CB_FIFO_UNDERFLOW |
LAYER0_DMA_CR_FIFO_UNDERFLOW)) {
kmb->kmb_under_flow++;
drm_info(&kmb->drm,
"!LAYER0:VL0 DMA UNDERFLOW val = 0x%lx,under_flow=%d",
val, kmb->kmb_under_flow);
/* disable underflow interrupt */
kmb_clr_bitmask_lcd(kmb, LCD_INT_ENABLE,
LAYER0_DMA_FIFO_UNDERFLOW |
LAYER0_DMA_CB_FIFO_UNDERFLOW |
LAYER0_DMA_CR_FIFO_UNDERFLOW);
kmb_set_bitmask_lcd(kmb, LCD_INT_CLEAR,
LAYER0_DMA_CB_FIFO_UNDERFLOW |
LAYER0_DMA_FIFO_UNDERFLOW |
LAYER0_DMA_CR_FIFO_UNDERFLOW);
/* disable auto restart mode */
kmb_clr_bitmask_lcd(kmb, LCD_LAYERn_DMA_CFG(0),
LCD_DMA_LAYER_CONT_PING_PONG_UPDATE);
kmb->layer_no = 0;
}
if (val & LAYER0_DMA_FIFO_OVERFLOW)
drm_dbg(&kmb->drm,
"LAYER0:VL0 DMA OVERFLOW val = 0x%lx", val);
if (val & LAYER0_DMA_CB_FIFO_OVERFLOW)
drm_dbg(&kmb->drm,
"LAYER0:VL0 DMA CB OVERFLOW val = 0x%lx", val);
if (val & LAYER0_DMA_CR_FIFO_OVERFLOW)
drm_dbg(&kmb->drm,
"LAYER0:VL0 DMA CR OVERFLOW val = 0x%lx", val);
/* LAYER1 - VL1 */
if (val & (LAYER1_DMA_FIFO_UNDERFLOW |
LAYER1_DMA_CB_FIFO_UNDERFLOW |
LAYER1_DMA_CR_FIFO_UNDERFLOW)) {
kmb->kmb_under_flow++;
drm_info(&kmb->drm,
"!LAYER1:VL1 DMA UNDERFLOW val = 0x%lx, under_flow=%d",
val, kmb->kmb_under_flow);
/* disable underflow interrupt */
kmb_clr_bitmask_lcd(kmb, LCD_INT_ENABLE,
LAYER1_DMA_FIFO_UNDERFLOW |
LAYER1_DMA_CB_FIFO_UNDERFLOW |
LAYER1_DMA_CR_FIFO_UNDERFLOW);
kmb_set_bitmask_lcd(kmb, LCD_INT_CLEAR,
LAYER1_DMA_CB_FIFO_UNDERFLOW |
LAYER1_DMA_FIFO_UNDERFLOW |
LAYER1_DMA_CR_FIFO_UNDERFLOW);
/* disable auto restart mode */
kmb_clr_bitmask_lcd(kmb, LCD_LAYERn_DMA_CFG(1),
LCD_DMA_LAYER_CONT_PING_PONG_UPDATE);
kmb->layer_no = 1;
}
/* LAYER1 - VL1 */
if (val & LAYER1_DMA_FIFO_OVERFLOW)
drm_dbg(&kmb->drm,
"LAYER1:VL1 DMA OVERFLOW val = 0x%lx", val);
if (val & LAYER1_DMA_CB_FIFO_OVERFLOW)
drm_dbg(&kmb->drm,
"LAYER1:VL1 DMA CB OVERFLOW val = 0x%lx", val);
if (val & LAYER1_DMA_CR_FIFO_OVERFLOW)
drm_dbg(&kmb->drm,
"LAYER1:VL1 DMA CR OVERFLOW val = 0x%lx", val);
/* LAYER2 - GL0 */
if (val & LAYER2_DMA_FIFO_UNDERFLOW)
drm_dbg(&kmb->drm,
"LAYER2:GL0 DMA UNDERFLOW val = 0x%lx", val);
if (val & LAYER2_DMA_FIFO_OVERFLOW)
drm_dbg(&kmb->drm,
"LAYER2:GL0 DMA OVERFLOW val = 0x%lx", val);
/* LAYER3 - GL1 */
if (val & LAYER3_DMA_FIFO_UNDERFLOW)
drm_dbg(&kmb->drm,
"LAYER3:GL1 DMA UNDERFLOW val = 0x%lx", val);
if (val & LAYER3_DMA_FIFO_UNDERFLOW)
drm_dbg(&kmb->drm,
"LAYER3:GL1 DMA OVERFLOW val = 0x%lx", val);
}
spin_unlock(&kmb->irq_lock);
if (status & LCD_INT_LAYER) {
/* Clear layer interrupts */
kmb_write_lcd(kmb, LCD_INT_CLEAR, LCD_INT_LAYER);
}
/* Clear all interrupts */
kmb_set_bitmask_lcd(kmb, LCD_INT_CLEAR, 1);
return IRQ_HANDLED;
}
/* IRQ handler */
static irqreturn_t kmb_isr(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *)arg;
handle_lcd_irq(dev);
return IRQ_HANDLED;
}
static void kmb_irq_reset(struct drm_device *drm)
{
kmb_write_lcd(to_kmb(drm), LCD_INT_CLEAR, 0xFFFF);
kmb_write_lcd(to_kmb(drm), LCD_INT_ENABLE, 0);
}
DEFINE_DRM_GEM_CMA_FOPS(fops);
static struct drm_driver kmb_driver = {
.driver_features = DRIVER_GEM |
DRIVER_MODESET | DRIVER_ATOMIC,
.irq_handler = kmb_isr,
.irq_preinstall = kmb_irq_reset,
.irq_uninstall = kmb_irq_reset,
/* GEM Operations */
.fops = &fops,
DRM_GEM_CMA_DRIVER_OPS_VMAP,
.name = "kmb-drm",
.desc = "KEEMBAY DISPLAY DRIVER ",
.date = "20201008",
.major = 1,
.minor = 0,
};
static int kmb_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct drm_device *drm = dev_get_drvdata(dev);
struct kmb_drm_private *kmb = to_kmb(drm);
drm_dev_unregister(drm);
drm_kms_helper_poll_fini(drm);
of_node_put(kmb->crtc.port);
kmb->crtc.port = NULL;
pm_runtime_get_sync(drm->dev);
drm_irq_uninstall(drm);
pm_runtime_put_sync(drm->dev);
pm_runtime_disable(drm->dev);
of_reserved_mem_device_release(drm->dev);
/* Release clks */
kmb_display_clk_disable(kmb);
dev_set_drvdata(dev, NULL);
/* Unregister DSI host */
kmb_dsi_host_unregister(kmb->kmb_dsi);
drm_atomic_helper_shutdown(drm);
return 0;
}
static int kmb_probe(struct platform_device *pdev)
{
struct device *dev = get_device(&pdev->dev);
struct kmb_drm_private *kmb;
int ret = 0;
struct device_node *dsi_in;
struct device_node *dsi_node;
struct platform_device *dsi_pdev;
/* The bridge (ADV 7535) will return -EPROBE_DEFER until it
* has a mipi_dsi_host to register its device to. So, we
* first register the DSI host during probe time, and then return
* -EPROBE_DEFER until the bridge is loaded. Probe will be called again
* and then the rest of the driver initialization can proceed
* afterwards and the bridge can be successfully attached.
*/
dsi_in = of_graph_get_endpoint_by_regs(dev->of_node, 0, 0);
if (!dsi_in) {
DRM_ERROR("Failed to get dsi_in node info from DT");
return -EINVAL;
}
dsi_node = of_graph_get_remote_port_parent(dsi_in);
if (!dsi_node) {
of_node_put(dsi_in);
DRM_ERROR("Failed to get dsi node from DT\n");
return -EINVAL;
}
dsi_pdev = of_find_device_by_node(dsi_node);
if (!dsi_pdev) {
of_node_put(dsi_in);
of_node_put(dsi_node);
DRM_ERROR("Failed to get dsi platform device\n");
return -EINVAL;
}
of_node_put(dsi_in);
of_node_put(dsi_node);
ret = kmb_dsi_host_bridge_init(get_device(&dsi_pdev->dev));
if (ret == -EPROBE_DEFER) {
return -EPROBE_DEFER;
} else if (ret) {
DRM_ERROR("probe failed to initialize DSI host bridge\n");
return ret;
}
/* Create DRM device */
kmb = devm_drm_dev_alloc(dev, &kmb_driver,
struct kmb_drm_private, drm);
if (IS_ERR(kmb))
return PTR_ERR(kmb);
dev_set_drvdata(dev, &kmb->drm);
/* Initialize MIPI DSI */
kmb->kmb_dsi = kmb_dsi_init(dsi_pdev);
if (IS_ERR(kmb->kmb_dsi)) {
drm_err(&kmb->drm, "failed to initialize DSI\n");
ret = PTR_ERR(kmb->kmb_dsi);
goto err_free1;
}
kmb->kmb_dsi->dev = &dsi_pdev->dev;
kmb->kmb_dsi->pdev = dsi_pdev;
ret = kmb_hw_init(&kmb->drm, 0);
if (ret)
goto err_free1;
ret = kmb_setup_mode_config(&kmb->drm);
if (ret)
goto err_free;
ret = drm_irq_install(&kmb->drm, kmb->irq_lcd);
if (ret < 0) {
drm_err(&kmb->drm, "failed to install IRQ handler\n");
goto err_irq;
}
drm_kms_helper_poll_init(&kmb->drm);
/* Register graphics device with the kernel */
ret = drm_dev_register(&kmb->drm, 0);
if (ret)
goto err_register;
return 0;
err_register:
drm_kms_helper_poll_fini(&kmb->drm);
err_irq:
pm_runtime_disable(kmb->drm.dev);
err_free:
drm_crtc_cleanup(&kmb->crtc);
drm_mode_config_cleanup(&kmb->drm);
err_free1:
dev_set_drvdata(dev, NULL);
kmb_dsi_host_unregister(kmb->kmb_dsi);
return ret;
}
static const struct of_device_id kmb_of_match[] = {
{.compatible = "intel,keembay-display"},
{},
};
MODULE_DEVICE_TABLE(of, kmb_of_match);
static int __maybe_unused kmb_pm_suspend(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
struct kmb_drm_private *kmb = drm ? to_kmb(drm) : NULL;
drm_kms_helper_poll_disable(drm);
kmb->state = drm_atomic_helper_suspend(drm);
if (IS_ERR(kmb->state)) {
drm_kms_helper_poll_enable(drm);
return PTR_ERR(kmb->state);
}
return 0;
}
static int __maybe_unused kmb_pm_resume(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
struct kmb_drm_private *kmb = drm ? to_kmb(drm) : NULL;
if (!kmb)
return 0;
drm_atomic_helper_resume(drm, kmb->state);
drm_kms_helper_poll_enable(drm);
return 0;
}
static SIMPLE_DEV_PM_OPS(kmb_pm_ops, kmb_pm_suspend, kmb_pm_resume);
static struct platform_driver kmb_platform_driver = {
.probe = kmb_probe,
.remove = kmb_remove,
.driver = {
.name = "kmb-drm",
.pm = &kmb_pm_ops,
.of_match_table = kmb_of_match,
},
};
module_platform_driver(kmb_platform_driver);
MODULE_AUTHOR("Intel Corporation");
MODULE_DESCRIPTION("Keembay Display driver");
MODULE_LICENSE("GPL v2");

View File

@@ -0,0 +1,88 @@
/* SPDX-License-Identifier: GPL-2.0-only
*
* Copyright © 2018-2020 Intel Corporation
*/
#ifndef __KMB_DRV_H__
#define __KMB_DRV_H__
#include <drm/drm_device.h>
#include "kmb_plane.h"
#include "kmb_regs.h"
#define KMB_MAX_WIDTH 1920 /*Max width in pixels */
#define KMB_MAX_HEIGHT 1080 /*Max height in pixels */
#define KMB_MIN_WIDTH 1920 /*Max width in pixels */
#define KMB_MIN_HEIGHT 1080 /*Max height in pixels */
#define KMB_LCD_DEFAULT_CLK 200000000
#define KMB_SYS_CLK_MHZ 500
#define ICAM_MMIO 0x3b100000
#define ICAM_LCD_OFFSET 0x1080
#define ICAM_MMIO_SIZE 0x2000
struct kmb_dsi;
struct kmb_clock {
struct clk *clk_lcd;
struct clk *clk_pll0;
};
struct kmb_drm_private {
struct drm_device drm;
struct kmb_dsi *kmb_dsi;
void __iomem *lcd_mmio;
struct kmb_clock kmb_clk;
struct drm_crtc crtc;
struct kmb_plane *plane;
struct drm_atomic_state *state;
spinlock_t irq_lock;
int irq_lcd;
int sys_clk_mhz;
struct layer_status plane_status[KMB_MAX_PLANES];
int kmb_under_flow;
int kmb_flush_done;
int layer_no;
};
static inline struct kmb_drm_private *to_kmb(const struct drm_device *dev)
{
return container_of(dev, struct kmb_drm_private, drm);
}
static inline struct kmb_drm_private *crtc_to_kmb_priv(const struct drm_crtc *x)
{
return container_of(x, struct kmb_drm_private, crtc);
}
static inline void kmb_write_lcd(struct kmb_drm_private *dev_p,
unsigned int reg, u32 value)
{
writel(value, (dev_p->lcd_mmio + reg));
}
static inline u32 kmb_read_lcd(struct kmb_drm_private *dev_p, unsigned int reg)
{
return readl(dev_p->lcd_mmio + reg);
}
static inline void kmb_set_bitmask_lcd(struct kmb_drm_private *dev_p,
unsigned int reg, u32 mask)
{
u32 reg_val = kmb_read_lcd(dev_p, reg);
kmb_write_lcd(dev_p, reg, (reg_val | mask));
}
static inline void kmb_clr_bitmask_lcd(struct kmb_drm_private *dev_p,
unsigned int reg, u32 mask)
{
u32 reg_val = kmb_read_lcd(dev_p, reg);
kmb_write_lcd(dev_p, reg, (reg_val & (~mask)));
}
int kmb_setup_crtc(struct drm_device *dev);
void kmb_set_scanout(struct kmb_drm_private *lcd);
#endif /* __KMB_DRV_H__ */

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,387 @@
/* SPDX-License-Identifier: GPL-2.0-only
*
* Copyright © 2019-2020 Intel Corporation
*/
#ifndef __KMB_DSI_H__
#define __KMB_DSI_H__
#include <drm/drm_encoder.h>
#include <drm/drm_mipi_dsi.h>
/* MIPI TX CFG */
#define MIPI_TX_LANE_DATA_RATE_MBPS 891
#define MIPI_TX_REF_CLK_KHZ 24000
#define MIPI_TX_CFG_CLK_KHZ 24000
#define MIPI_TX_BPP 24
/* DPHY Tx test codes*/
#define TEST_CODE_FSM_CONTROL 0x03
#define TEST_CODE_MULTIPLE_PHY_CTRL 0x0C
#define TEST_CODE_PLL_PROPORTIONAL_CHARGE_PUMP_CTRL 0x0E
#define TEST_CODE_PLL_INTEGRAL_CHARGE_PUMP_CTRL 0x0F
#define TEST_CODE_PLL_VCO_CTRL 0x12
#define TEST_CODE_PLL_GMP_CTRL 0x13
#define TEST_CODE_PLL_PHASE_ERR_CTRL 0x14
#define TEST_CODE_PLL_LOCK_FILTER 0x15
#define TEST_CODE_PLL_UNLOCK_FILTER 0x16
#define TEST_CODE_PLL_INPUT_DIVIDER 0x17
#define TEST_CODE_PLL_FEEDBACK_DIVIDER 0x18
#define PLL_FEEDBACK_DIVIDER_HIGH BIT(7)
#define TEST_CODE_PLL_OUTPUT_CLK_SEL 0x19
#define PLL_N_OVR_EN BIT(4)
#define PLL_M_OVR_EN BIT(5)
#define TEST_CODE_VOD_LEVEL 0x24
#define TEST_CODE_PLL_CHARGE_PUMP_BIAS 0x1C
#define TEST_CODE_PLL_LOCK_DETECTOR 0x1D
#define TEST_CODE_HS_FREQ_RANGE_CFG 0x44
#define TEST_CODE_PLL_ANALOG_PROG 0x1F
#define TEST_CODE_SLEW_RATE_OVERRIDE_CTRL 0xA0
#define TEST_CODE_SLEW_RATE_DDL_LOOP_CTRL 0xA3
#define TEST_CODE_SLEW_RATE_DDL_CYCLES 0xA4
/* DPHY params */
#define PLL_N_MIN 0
#define PLL_N_MAX 15
#define PLL_M_MIN 62
#define PLL_M_MAX 623
#define PLL_FVCO_MAX 1250
#define TIMEOUT 600
#define MIPI_TX_FRAME_GEN 4
#define MIPI_TX_FRAME_GEN_SECTIONS 4
#define MIPI_CTRL_VIRTUAL_CHANNELS 4
#define MIPI_D_LANES_PER_DPHY 2
#define MIPI_CTRL_2LANE_MAX_MC_FIFO_LOC 255
#define MIPI_CTRL_4LANE_MAX_MC_FIFO_LOC 511
/* 2 Data Lanes per D-PHY */
#define MIPI_DPHY_D_LANES 2
#define MIPI_DPHY_DEFAULT_BIT_RATES 63
#define KMB_MIPI_DEFAULT_CLK 24000000
#define KMB_MIPI_DEFAULT_CFG_CLK 24000000
#define to_kmb_dsi(x) container_of(x, struct kmb_dsi, base)
struct kmb_dsi {
struct drm_encoder base;
struct device *dev;
struct platform_device *pdev;
struct mipi_dsi_host *host;
struct mipi_dsi_device *device;
struct drm_bridge *adv_bridge;
void __iomem *mipi_mmio;
struct clk *clk_mipi;
struct clk *clk_mipi_ecfg;
struct clk *clk_mipi_cfg;
int sys_clk_mhz;
};
/* DPHY Tx test codes */
enum mipi_ctrl_num {
MIPI_CTRL0 = 0,
MIPI_CTRL1,
MIPI_CTRL2,
MIPI_CTRL3,
MIPI_CTRL4,
MIPI_CTRL5,
MIPI_CTRL6,
MIPI_CTRL7,
MIPI_CTRL8,
MIPI_CTRL9,
MIPI_CTRL_NA
};
enum mipi_dphy_num {
MIPI_DPHY0 = 0,
MIPI_DPHY1,
MIPI_DPHY2,
MIPI_DPHY3,
MIPI_DPHY4,
MIPI_DPHY5,
MIPI_DPHY6,
MIPI_DPHY7,
MIPI_DPHY8,
MIPI_DPHY9,
MIPI_DPHY_NA
};
enum mipi_dir {
MIPI_RX,
MIPI_TX
};
enum mipi_ctrl_type {
MIPI_DSI,
MIPI_CSI
};
enum mipi_data_if {
MIPI_IF_DMA,
MIPI_IF_PARALLEL
};
enum mipi_data_mode {
MIPI_DATA_MODE0,
MIPI_DATA_MODE1,
MIPI_DATA_MODE2,
MIPI_DATA_MODE3
};
enum mipi_dsi_video_mode {
DSI_VIDEO_MODE_NO_BURST_PULSE,
DSI_VIDEO_MODE_NO_BURST_EVENT,
DSI_VIDEO_MODE_BURST
};
enum mipi_dsi_blanking_mode {
TRANSITION_TO_LOW_POWER,
SEND_BLANK_PACKET
};
enum mipi_dsi_eotp {
DSI_EOTP_DISABLED,
DSI_EOTP_ENABLES
};
enum mipi_dsi_data_type {
DSI_SP_DT_RESERVED_00 = 0x00,
DSI_SP_DT_VSYNC_START = 0x01,
DSI_SP_DT_COLOR_MODE_OFF = 0x02,
DSI_SP_DT_GENERIC_SHORT_WR = 0x03,
DSI_SP_DT_GENERIC_RD = 0x04,
DSI_SP_DT_DCS_SHORT_WR = 0x05,
DSI_SP_DT_DCS_RD = 0x06,
DSI_SP_DT_EOTP = 0x08,
DSI_LP_DT_NULL = 0x09,
DSI_LP_DT_RESERVED_0A = 0x0a,
DSI_LP_DT_RESERVED_0B = 0x0b,
DSI_LP_DT_LPPS_YCBCR422_20B = 0x0c,
DSI_LP_DT_PPS_RGB101010_30B = 0x0d,
DSI_LP_DT_PPS_RGB565_16B = 0x0e,
DSI_LP_DT_RESERVED_0F = 0x0f,
DSI_SP_DT_RESERVED_10 = 0x10,
DSI_SP_DT_VSYNC_END = 0x11,
DSI_SP_DT_COLOR_MODE_ON = 0x12,
DSI_SP_DT_GENERIC_SHORT_WR_1PAR = 0x13,
DSI_SP_DT_GENERIC_RD_1PAR = 0x14,
DSI_SP_DT_DCS_SHORT_WR_1PAR = 0x15,
DSI_SP_DT_RESERVED_16 = 0x16,
DSI_SP_DT_RESERVED_17 = 0x17,
DSI_SP_DT_RESERVED_18 = 0x18,
DSI_LP_DT_BLANK = 0x19,
DSI_LP_DT_RESERVED_1A = 0x1a,
DSI_LP_DT_RESERVED_1B = 0x1b,
DSI_LP_DT_PPS_YCBCR422_24B = 0x1c,
DSI_LP_DT_PPS_RGB121212_36B = 0x1d,
DSI_LP_DT_PPS_RGB666_18B = 0x1e,
DSI_LP_DT_RESERVED_1F = 0x1f,
DSI_SP_DT_RESERVED_20 = 0x20,
DSI_SP_DT_HSYNC_START = 0x21,
DSI_SP_DT_SHUT_DOWN_PERIPH_CMD = 0x22,
DSI_SP_DT_GENERIC_SHORT_WR_2PAR = 0x23,
DSI_SP_DT_GENERIC_RD_2PAR = 0x24,
DSI_SP_DT_RESERVED_25 = 0x25,
DSI_SP_DT_RESERVED_26 = 0x26,
DSI_SP_DT_RESERVED_27 = 0x27,
DSI_SP_DT_RESERVED_28 = 0x28,
DSI_LP_DT_GENERIC_LONG_WR = 0x29,
DSI_LP_DT_RESERVED_2A = 0x2a,
DSI_LP_DT_RESERVED_2B = 0x2b,
DSI_LP_DT_PPS_YCBCR422_16B = 0x2c,
DSI_LP_DT_RESERVED_2D = 0x2d,
DSI_LP_DT_LPPS_RGB666_18B = 0x2e,
DSI_LP_DT_RESERVED_2F = 0x2f,
DSI_SP_DT_RESERVED_30 = 0x30,
DSI_SP_DT_HSYNC_END = 0x31,
DSI_SP_DT_TURN_ON_PERIPH_CMD = 0x32,
DSI_SP_DT_RESERVED_33 = 0x33,
DSI_SP_DT_RESERVED_34 = 0x34,
DSI_SP_DT_RESERVED_35 = 0x35,
DSI_SP_DT_RESERVED_36 = 0x36,
DSI_SP_DT_SET_MAX_RETURN_PKT_SIZE = 0x37,
DSI_SP_DT_RESERVED_38 = 0x38,
DSI_LP_DT_DSC_LONG_WR = 0x39,
DSI_LP_DT_RESERVED_3A = 0x3a,
DSI_LP_DT_RESERVED_3B = 0x3b,
DSI_LP_DT_RESERVED_3C = 0x3c,
DSI_LP_DT_PPS_YCBCR420_12B = 0x3d,
DSI_LP_DT_PPS_RGB888_24B = 0x3e,
DSI_LP_DT_RESERVED_3F = 0x3f
};
enum mipi_tx_hs_tp_sel {
MIPI_TX_HS_TP_WHOLE_FRAME_COLOR0 = 0,
MIPI_TX_HS_TP_WHOLE_FRAME_COLOR1,
MIPI_TX_HS_TP_V_STRIPES,
MIPI_TX_HS_TP_H_STRIPES,
};
enum dphy_mode {
MIPI_DPHY_SLAVE = 0,
MIPI_DPHY_MASTER
};
enum dphy_tx_fsm {
DPHY_TX_POWERDWN = 0,
DPHY_TX_BGPON,
DPHY_TX_TERMCAL,
DPHY_TX_TERMCALUP,
DPHY_TX_OFFSETCAL,
DPHY_TX_LOCK,
DPHY_TX_SRCAL,
DPHY_TX_IDLE,
DPHY_TX_ULP,
DPHY_TX_LANESTART,
DPHY_TX_CLKALIGN,
DPHY_TX_DDLTUNNING,
DPHY_TX_ULP_FORCE_PLL,
DPHY_TX_LOCK_LOSS
};
struct mipi_data_type_params {
u8 size_constraint_pixels;
u8 size_constraint_bytes;
u8 pixels_per_pclk;
u8 bits_per_pclk;
};
struct mipi_tx_dsi_cfg {
u8 hfp_blank_en; /* Horizontal front porch blanking enable */
u8 eotp_en; /* End of transmission packet enable */
/* Last vertical front porch blanking mode */
u8 lpm_last_vfp_line;
/* First vertical sync active blanking mode */
u8 lpm_first_vsa_line;
u8 sync_pulse_eventn; /* Sync type */
u8 hfp_blanking; /* Horizontal front porch blanking mode */
u8 hbp_blanking; /* Horizontal back porch blanking mode */
u8 hsa_blanking; /* Horizontal sync active blanking mode */
u8 v_blanking; /* Vertical timing blanking mode */
};
struct mipi_tx_frame_section_cfg {
u32 dma_v_stride;
u16 dma_v_scale_cfg;
u16 width_pixels;
u16 height_lines;
u8 dma_packed;
u8 bpp;
u8 bpp_unpacked;
u8 dma_h_stride;
u8 data_type;
u8 data_mode;
u8 dma_flip_rotate_sel;
};
struct mipi_tx_frame_timing_cfg {
u32 bpp;
u32 lane_rate_mbps;
u32 hsync_width;
u32 h_backporch;
u32 h_frontporch;
u32 h_active;
u16 vsync_width;
u16 v_backporch;
u16 v_frontporch;
u16 v_active;
u8 active_lanes;
};
struct mipi_tx_frame_sect_phcfg {
u32 wc;
enum mipi_data_mode data_mode;
enum mipi_dsi_data_type data_type;
u8 vchannel;
u8 dma_packed;
};
struct mipi_tx_frame_cfg {
struct mipi_tx_frame_section_cfg *sections[MIPI_TX_FRAME_GEN_SECTIONS];
u32 hsync_width; /* in pixels */
u32 h_backporch; /* in pixels */
u32 h_frontporch; /* in pixels */
u16 vsync_width; /* in lines */
u16 v_backporch; /* in lines */
u16 v_frontporch; /* in lines */
};
struct mipi_tx_ctrl_cfg {
struct mipi_tx_frame_cfg *frames[MIPI_TX_FRAME_GEN];
const struct mipi_tx_dsi_cfg *tx_dsi_cfg;
u8 line_sync_pkt_en;
u8 line_counter_active;
u8 frame_counter_active;
u8 tx_hsclkkidle_cnt;
u8 tx_hsexit_cnt;
u8 tx_crc_en;
u8 tx_hact_wait_stop;
u8 tx_always_use_hact;
u8 tx_wait_trig;
u8 tx_wait_all_sect;
};
/* configuration structure for MIPI control */
struct mipi_ctrl_cfg {
u8 active_lanes; /* # active lanes per controller 2/4 */
u32 lane_rate_mbps; /* MBPS */
u32 ref_clk_khz;
u32 cfg_clk_khz;
struct mipi_tx_ctrl_cfg tx_ctrl_cfg;
};
static inline void kmb_write_mipi(struct kmb_dsi *kmb_dsi,
unsigned int reg, u32 value)
{
writel(value, (kmb_dsi->mipi_mmio + reg));
}
static inline u32 kmb_read_mipi(struct kmb_dsi *kmb_dsi, unsigned int reg)
{
return readl(kmb_dsi->mipi_mmio + reg);
}
static inline void kmb_write_bits_mipi(struct kmb_dsi *kmb_dsi,
unsigned int reg, u32 offset,
u32 num_bits, u32 value)
{
u32 reg_val = kmb_read_mipi(kmb_dsi, reg);
u32 mask = (1 << num_bits) - 1;
value &= mask;
mask <<= offset;
reg_val &= (~mask);
reg_val |= (value << offset);
kmb_write_mipi(kmb_dsi, reg, reg_val);
}
static inline void kmb_set_bit_mipi(struct kmb_dsi *kmb_dsi,
unsigned int reg, u32 offset)
{
u32 reg_val = kmb_read_mipi(kmb_dsi, reg);
kmb_write_mipi(kmb_dsi, reg, reg_val | (1 << offset));
}
static inline void kmb_clr_bit_mipi(struct kmb_dsi *kmb_dsi,
unsigned int reg, u32 offset)
{
u32 reg_val = kmb_read_mipi(kmb_dsi, reg);
kmb_write_mipi(kmb_dsi, reg, reg_val & (~(1 << offset)));
}
int kmb_dsi_host_bridge_init(struct device *dev);
struct kmb_dsi *kmb_dsi_init(struct platform_device *pdev);
void kmb_dsi_host_unregister(struct kmb_dsi *kmb_dsi);
int kmb_dsi_mode_set(struct kmb_dsi *kmb_dsi, struct drm_display_mode *mode,
int sys_clk_mhz);
int kmb_dsi_map_mmio(struct kmb_dsi *kmb_dsi);
int kmb_dsi_clk_init(struct kmb_dsi *kmb_dsi);
int kmb_dsi_encoder_init(struct drm_device *dev, struct kmb_dsi *kmb_dsi);
#endif /* __KMB_DSI_H__ */

View File

@@ -0,0 +1,522 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright © 2018-2020 Intel Corporation
*/
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_plane_helper.h>
#include "kmb_drv.h"
#include "kmb_plane.h"
#include "kmb_regs.h"
const u32 layer_irqs[] = {
LCD_INT_VL0,
LCD_INT_VL1,
LCD_INT_GL0,
LCD_INT_GL1
};
/* Conversion (yuv->rgb) matrix from myriadx */
static const u32 csc_coef_lcd[] = {
1024, 0, 1436,
1024, -352, -731,
1024, 1814, 0,
-179, 125, -226
};
/* Graphics layer (layers 2 & 3) formats, only packed formats are supported */
static const u32 kmb_formats_g[] = {
DRM_FORMAT_RGB332,
DRM_FORMAT_XRGB4444, DRM_FORMAT_XBGR4444,
DRM_FORMAT_ARGB4444, DRM_FORMAT_ABGR4444,
DRM_FORMAT_XRGB1555, DRM_FORMAT_XBGR1555,
DRM_FORMAT_ARGB1555, DRM_FORMAT_ABGR1555,
DRM_FORMAT_RGB565, DRM_FORMAT_BGR565,
DRM_FORMAT_RGB888, DRM_FORMAT_BGR888,
DRM_FORMAT_XRGB8888, DRM_FORMAT_XBGR8888,
DRM_FORMAT_ARGB8888, DRM_FORMAT_ABGR8888,
};
/* Video layer ( 0 & 1) formats, packed and planar formats are supported */
static const u32 kmb_formats_v[] = {
/* packed formats */
DRM_FORMAT_RGB332,
DRM_FORMAT_XRGB4444, DRM_FORMAT_XBGR4444,
DRM_FORMAT_ARGB4444, DRM_FORMAT_ABGR4444,
DRM_FORMAT_XRGB1555, DRM_FORMAT_XBGR1555,
DRM_FORMAT_ARGB1555, DRM_FORMAT_ABGR1555,
DRM_FORMAT_RGB565, DRM_FORMAT_BGR565,
DRM_FORMAT_RGB888, DRM_FORMAT_BGR888,
DRM_FORMAT_XRGB8888, DRM_FORMAT_XBGR8888,
DRM_FORMAT_ARGB8888, DRM_FORMAT_ABGR8888,
/*planar formats */
DRM_FORMAT_YUV420, DRM_FORMAT_YVU420,
DRM_FORMAT_YUV422, DRM_FORMAT_YVU422,
DRM_FORMAT_YUV444, DRM_FORMAT_YVU444,
DRM_FORMAT_NV12, DRM_FORMAT_NV21,
};
static unsigned int check_pixel_format(struct drm_plane *plane, u32 format)
{
int i;
for (i = 0; i < plane->format_count; i++) {
if (plane->format_types[i] == format)
return 0;
}
return -EINVAL;
}
static int kmb_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *state)
{
struct drm_framebuffer *fb;
int ret;
struct drm_crtc_state *crtc_state;
bool can_position;
fb = state->fb;
if (!fb || !state->crtc)
return 0;
ret = check_pixel_format(plane, fb->format->format);
if (ret)
return ret;
if (state->crtc_w > KMB_MAX_WIDTH || state->crtc_h > KMB_MAX_HEIGHT)
return -EINVAL;
if (state->crtc_w < KMB_MIN_WIDTH || state->crtc_h < KMB_MIN_HEIGHT)
return -EINVAL;
can_position = (plane->type == DRM_PLANE_TYPE_OVERLAY);
crtc_state =
drm_atomic_get_existing_crtc_state(state->state, state->crtc);
return drm_atomic_helper_check_plane_state(state, crtc_state,
DRM_PLANE_HELPER_NO_SCALING,
DRM_PLANE_HELPER_NO_SCALING,
can_position, true);
}
static void kmb_plane_atomic_disable(struct drm_plane *plane,
struct drm_plane_state *state)
{
struct kmb_plane *kmb_plane = to_kmb_plane(plane);
int plane_id = kmb_plane->id;
struct kmb_drm_private *kmb;
kmb = to_kmb(plane->dev);
switch (plane_id) {
case LAYER_0:
kmb->plane_status[plane_id].ctrl = LCD_CTRL_VL1_ENABLE;
break;
case LAYER_1:
kmb->plane_status[plane_id].ctrl = LCD_CTRL_VL2_ENABLE;
break;
case LAYER_2:
kmb->plane_status[plane_id].ctrl = LCD_CTRL_GL1_ENABLE;
break;
case LAYER_3:
kmb->plane_status[plane_id].ctrl = LCD_CTRL_GL2_ENABLE;
break;
}
kmb->plane_status[plane_id].disable = true;
}
static unsigned int get_pixel_format(u32 format)
{
unsigned int val = 0;
switch (format) {
/* planar formats */
case DRM_FORMAT_YUV444:
val = LCD_LAYER_FORMAT_YCBCR444PLAN | LCD_LAYER_PLANAR_STORAGE;
break;
case DRM_FORMAT_YVU444:
val = LCD_LAYER_FORMAT_YCBCR444PLAN | LCD_LAYER_PLANAR_STORAGE
| LCD_LAYER_CRCB_ORDER;
break;
case DRM_FORMAT_YUV422:
val = LCD_LAYER_FORMAT_YCBCR422PLAN | LCD_LAYER_PLANAR_STORAGE;
break;
case DRM_FORMAT_YVU422:
val = LCD_LAYER_FORMAT_YCBCR422PLAN | LCD_LAYER_PLANAR_STORAGE
| LCD_LAYER_CRCB_ORDER;
break;
case DRM_FORMAT_YUV420:
val = LCD_LAYER_FORMAT_YCBCR420PLAN | LCD_LAYER_PLANAR_STORAGE;
break;
case DRM_FORMAT_YVU420:
val = LCD_LAYER_FORMAT_YCBCR420PLAN | LCD_LAYER_PLANAR_STORAGE
| LCD_LAYER_CRCB_ORDER;
break;
case DRM_FORMAT_NV12:
val = LCD_LAYER_FORMAT_NV12 | LCD_LAYER_PLANAR_STORAGE;
break;
case DRM_FORMAT_NV21:
val = LCD_LAYER_FORMAT_NV12 | LCD_LAYER_PLANAR_STORAGE
| LCD_LAYER_CRCB_ORDER;
break;
/* packed formats */
/* looks hw requires B & G to be swapped when RGB */
case DRM_FORMAT_RGB332:
val = LCD_LAYER_FORMAT_RGB332 | LCD_LAYER_BGR_ORDER;
break;
case DRM_FORMAT_XBGR4444:
val = LCD_LAYER_FORMAT_RGBX4444;
break;
case DRM_FORMAT_ARGB4444:
val = LCD_LAYER_FORMAT_RGBA4444 | LCD_LAYER_BGR_ORDER;
break;
case DRM_FORMAT_ABGR4444:
val = LCD_LAYER_FORMAT_RGBA4444;
break;
case DRM_FORMAT_XRGB1555:
val = LCD_LAYER_FORMAT_XRGB1555 | LCD_LAYER_BGR_ORDER;
break;
case DRM_FORMAT_XBGR1555:
val = LCD_LAYER_FORMAT_XRGB1555;
break;
case DRM_FORMAT_ARGB1555:
val = LCD_LAYER_FORMAT_RGBA1555 | LCD_LAYER_BGR_ORDER;
break;
case DRM_FORMAT_ABGR1555:
val = LCD_LAYER_FORMAT_RGBA1555;
break;
case DRM_FORMAT_RGB565:
val = LCD_LAYER_FORMAT_RGB565 | LCD_LAYER_BGR_ORDER;
break;
case DRM_FORMAT_BGR565:
val = LCD_LAYER_FORMAT_RGB565;
break;
case DRM_FORMAT_RGB888:
val = LCD_LAYER_FORMAT_RGB888 | LCD_LAYER_BGR_ORDER;
break;
case DRM_FORMAT_BGR888:
val = LCD_LAYER_FORMAT_RGB888;
break;
case DRM_FORMAT_XRGB8888:
val = LCD_LAYER_FORMAT_RGBX8888 | LCD_LAYER_BGR_ORDER;
break;
case DRM_FORMAT_XBGR8888:
val = LCD_LAYER_FORMAT_RGBX8888;
break;
case DRM_FORMAT_ARGB8888:
val = LCD_LAYER_FORMAT_RGBA8888 | LCD_LAYER_BGR_ORDER;
break;
case DRM_FORMAT_ABGR8888:
val = LCD_LAYER_FORMAT_RGBA8888;
break;
}
DRM_INFO_ONCE("%s : %d format=0x%x val=0x%x\n",
__func__, __LINE__, format, val);
return val;
}
static unsigned int get_bits_per_pixel(const struct drm_format_info *format)
{
u32 bpp = 0;
unsigned int val = 0;
if (format->num_planes > 1) {
val = LCD_LAYER_8BPP;
return val;
}
bpp += 8 * format->cpp[0];
switch (bpp) {
case 8:
val = LCD_LAYER_8BPP;
break;
case 16:
val = LCD_LAYER_16BPP;
break;
case 24:
val = LCD_LAYER_24BPP;
break;
case 32:
val = LCD_LAYER_32BPP;
break;
}
DRM_DEBUG("bpp=%d val=0x%x\n", bpp, val);
return val;
}
static void config_csc(struct kmb_drm_private *kmb, int plane_id)
{
/* YUV to RGB conversion using the fixed matrix csc_coef_lcd */
kmb_write_lcd(kmb, LCD_LAYERn_CSC_COEFF11(plane_id), csc_coef_lcd[0]);
kmb_write_lcd(kmb, LCD_LAYERn_CSC_COEFF12(plane_id), csc_coef_lcd[1]);
kmb_write_lcd(kmb, LCD_LAYERn_CSC_COEFF13(plane_id), csc_coef_lcd[2]);
kmb_write_lcd(kmb, LCD_LAYERn_CSC_COEFF21(plane_id), csc_coef_lcd[3]);
kmb_write_lcd(kmb, LCD_LAYERn_CSC_COEFF22(plane_id), csc_coef_lcd[4]);
kmb_write_lcd(kmb, LCD_LAYERn_CSC_COEFF23(plane_id), csc_coef_lcd[5]);
kmb_write_lcd(kmb, LCD_LAYERn_CSC_COEFF31(plane_id), csc_coef_lcd[6]);
kmb_write_lcd(kmb, LCD_LAYERn_CSC_COEFF32(plane_id), csc_coef_lcd[7]);
kmb_write_lcd(kmb, LCD_LAYERn_CSC_COEFF33(plane_id), csc_coef_lcd[8]);
kmb_write_lcd(kmb, LCD_LAYERn_CSC_OFF1(plane_id), csc_coef_lcd[9]);
kmb_write_lcd(kmb, LCD_LAYERn_CSC_OFF2(plane_id), csc_coef_lcd[10]);
kmb_write_lcd(kmb, LCD_LAYERn_CSC_OFF3(plane_id), csc_coef_lcd[11]);
}
static void kmb_plane_atomic_update(struct drm_plane *plane,
struct drm_plane_state *state)
{
struct drm_framebuffer *fb;
struct kmb_drm_private *kmb;
unsigned int width;
unsigned int height;
unsigned int dma_len;
struct kmb_plane *kmb_plane;
unsigned int dma_cfg;
unsigned int ctrl = 0, val = 0, out_format = 0;
unsigned int src_w, src_h, crtc_x, crtc_y;
unsigned char plane_id;
int num_planes;
static dma_addr_t addr[MAX_SUB_PLANES];
if (!plane || !plane->state || !state)
return;
fb = plane->state->fb;
if (!fb)
return;
num_planes = fb->format->num_planes;
kmb_plane = to_kmb_plane(plane);
plane_id = kmb_plane->id;
kmb = to_kmb(plane->dev);
spin_lock_irq(&kmb->irq_lock);
if (kmb->kmb_under_flow || kmb->kmb_flush_done) {
spin_unlock_irq(&kmb->irq_lock);
drm_dbg(&kmb->drm, "plane_update:underflow!!!! returning");
return;
}
spin_unlock_irq(&kmb->irq_lock);
src_w = (plane->state->src_w >> 16);
src_h = plane->state->src_h >> 16;
crtc_x = plane->state->crtc_x;
crtc_y = plane->state->crtc_y;
drm_dbg(&kmb->drm,
"src_w=%d src_h=%d, fb->format->format=0x%x fb->flags=0x%x\n",
src_w, src_h, fb->format->format, fb->flags);
width = fb->width;
height = fb->height;
dma_len = (width * height * fb->format->cpp[0]);
drm_dbg(&kmb->drm, "dma_len=%d ", dma_len);
kmb_write_lcd(kmb, LCD_LAYERn_DMA_LEN(plane_id), dma_len);
kmb_write_lcd(kmb, LCD_LAYERn_DMA_LEN_SHADOW(plane_id), dma_len);
kmb_write_lcd(kmb, LCD_LAYERn_DMA_LINE_VSTRIDE(plane_id),
fb->pitches[0]);
kmb_write_lcd(kmb, LCD_LAYERn_DMA_LINE_WIDTH(plane_id),
(width * fb->format->cpp[0]));
addr[Y_PLANE] = drm_fb_cma_get_gem_addr(fb, plane->state, 0);
kmb_write_lcd(kmb, LCD_LAYERn_DMA_START_ADDR(plane_id),
addr[Y_PLANE] + fb->offsets[0]);
val = get_pixel_format(fb->format->format);
val |= get_bits_per_pixel(fb->format);
/* Program Cb/Cr for planar formats */
if (num_planes > 1) {
kmb_write_lcd(kmb, LCD_LAYERn_DMA_CB_LINE_VSTRIDE(plane_id),
width * fb->format->cpp[0]);
kmb_write_lcd(kmb, LCD_LAYERn_DMA_CB_LINE_WIDTH(plane_id),
(width * fb->format->cpp[0]));
addr[U_PLANE] = drm_fb_cma_get_gem_addr(fb, plane->state,
U_PLANE);
/* check if Cb/Cr is swapped*/
if (num_planes == 3 && (val & LCD_LAYER_CRCB_ORDER))
kmb_write_lcd(kmb,
LCD_LAYERn_DMA_START_CR_ADR(plane_id),
addr[U_PLANE]);
else
kmb_write_lcd(kmb,
LCD_LAYERn_DMA_START_CB_ADR(plane_id),
addr[U_PLANE]);
if (num_planes == 3) {
kmb_write_lcd(kmb,
LCD_LAYERn_DMA_CR_LINE_VSTRIDE(plane_id),
((width) * fb->format->cpp[0]));
kmb_write_lcd(kmb,
LCD_LAYERn_DMA_CR_LINE_WIDTH(plane_id),
((width) * fb->format->cpp[0]));
addr[V_PLANE] = drm_fb_cma_get_gem_addr(fb,
plane->state,
V_PLANE);
/* check if Cb/Cr is swapped*/
if (val & LCD_LAYER_CRCB_ORDER)
kmb_write_lcd(kmb,
LCD_LAYERn_DMA_START_CB_ADR(plane_id),
addr[V_PLANE]);
else
kmb_write_lcd(kmb,
LCD_LAYERn_DMA_START_CR_ADR(plane_id),
addr[V_PLANE]);
}
}
kmb_write_lcd(kmb, LCD_LAYERn_WIDTH(plane_id), src_w - 1);
kmb_write_lcd(kmb, LCD_LAYERn_HEIGHT(plane_id), src_h - 1);
kmb_write_lcd(kmb, LCD_LAYERn_COL_START(plane_id), crtc_x);
kmb_write_lcd(kmb, LCD_LAYERn_ROW_START(plane_id), crtc_y);
val |= LCD_LAYER_FIFO_100;
if (val & LCD_LAYER_PLANAR_STORAGE) {
val |= LCD_LAYER_CSC_EN;
/* Enable CSC if input is planar and output is RGB */
config_csc(kmb, plane_id);
}
kmb_write_lcd(kmb, LCD_LAYERn_CFG(plane_id), val);
switch (plane_id) {
case LAYER_0:
ctrl = LCD_CTRL_VL1_ENABLE;
break;
case LAYER_1:
ctrl = LCD_CTRL_VL2_ENABLE;
break;
case LAYER_2:
ctrl = LCD_CTRL_GL1_ENABLE;
break;
case LAYER_3:
ctrl = LCD_CTRL_GL2_ENABLE;
break;
}
ctrl |= LCD_CTRL_PROGRESSIVE | LCD_CTRL_TIM_GEN_ENABLE
| LCD_CTRL_CONTINUOUS | LCD_CTRL_OUTPUT_ENABLED;
/* LCD is connected to MIPI on kmb
* Therefore this bit is required for DSI Tx
*/
ctrl |= LCD_CTRL_VHSYNC_IDLE_LVL;
kmb_set_bitmask_lcd(kmb, LCD_CONTROL, ctrl);
/* FIXME no doc on how to set output format,these values are
* taken from the Myriadx tests
*/
out_format |= LCD_OUTF_FORMAT_RGB888;
/* Leave RGB order,conversion mode and clip mode to default */
/* do not interleave RGB channels for mipi Tx compatibility */
out_format |= LCD_OUTF_MIPI_RGB_MODE;
kmb_write_lcd(kmb, LCD_OUT_FORMAT_CFG, out_format);
dma_cfg = LCD_DMA_LAYER_ENABLE | LCD_DMA_LAYER_VSTRIDE_EN |
LCD_DMA_LAYER_CONT_UPDATE | LCD_DMA_LAYER_AXI_BURST_16;
/* Enable DMA */
kmb_write_lcd(kmb, LCD_LAYERn_DMA_CFG(plane_id), dma_cfg);
drm_dbg(&kmb->drm, "dma_cfg=0x%x LCD_DMA_CFG=0x%x\n", dma_cfg,
kmb_read_lcd(kmb, LCD_LAYERn_DMA_CFG(plane_id)));
kmb_set_bitmask_lcd(kmb, LCD_INT_CLEAR, LCD_INT_EOF |
LCD_INT_DMA_ERR);
kmb_set_bitmask_lcd(kmb, LCD_INT_ENABLE, LCD_INT_EOF |
LCD_INT_DMA_ERR);
}
static const struct drm_plane_helper_funcs kmb_plane_helper_funcs = {
.atomic_check = kmb_plane_atomic_check,
.atomic_update = kmb_plane_atomic_update,
.atomic_disable = kmb_plane_atomic_disable
};
void kmb_plane_destroy(struct drm_plane *plane)
{
struct kmb_plane *kmb_plane = to_kmb_plane(plane);
drm_plane_cleanup(plane);
kfree(kmb_plane);
}
static const struct drm_plane_funcs kmb_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = kmb_plane_destroy,
.reset = drm_atomic_helper_plane_reset,
.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
};
struct kmb_plane *kmb_plane_init(struct drm_device *drm)
{
struct kmb_drm_private *kmb = to_kmb(drm);
struct kmb_plane *plane = NULL;
struct kmb_plane *primary = NULL;
int i = 0;
int ret = 0;
enum drm_plane_type plane_type;
const u32 *plane_formats;
int num_plane_formats;
for (i = 0; i < KMB_MAX_PLANES; i++) {
plane = drmm_kzalloc(drm, sizeof(*plane), GFP_KERNEL);
if (!plane) {
drm_err(drm, "Failed to allocate plane\n");
return ERR_PTR(-ENOMEM);
}
plane_type = (i == 0) ? DRM_PLANE_TYPE_PRIMARY :
DRM_PLANE_TYPE_OVERLAY;
if (i < 2) {
plane_formats = kmb_formats_v;
num_plane_formats = ARRAY_SIZE(kmb_formats_v);
} else {
plane_formats = kmb_formats_g;
num_plane_formats = ARRAY_SIZE(kmb_formats_g);
}
ret = drm_universal_plane_init(drm, &plane->base_plane,
POSSIBLE_CRTCS, &kmb_plane_funcs,
plane_formats, num_plane_formats,
NULL, plane_type, "plane %d", i);
if (ret < 0) {
drm_err(drm, "drm_universal_plane_init failed (ret=%d)",
ret);
goto cleanup;
}
drm_dbg(drm, "%s : %d i=%d type=%d",
__func__, __LINE__,
i, plane_type);
drm_plane_helper_add(&plane->base_plane,
&kmb_plane_helper_funcs);
if (plane_type == DRM_PLANE_TYPE_PRIMARY) {
primary = plane;
kmb->plane = plane;
}
drm_dbg(drm, "%s : %d primary=%p\n", __func__, __LINE__,
&primary->base_plane);
plane->id = i;
}
return primary;
cleanup:
drmm_kfree(drm, plane);
return ERR_PTR(ret);
}

View File

@@ -0,0 +1,67 @@
/* SPDX-License-Identifier: GPL-2.0-only
*
* Copyright © 2018-2020 Intel Corporation
*/
#ifndef __KMB_PLANE_H__
#define __KMB_PLANE_H__
#include <drm/drm_fourcc.h>
#include <drm/drm_plane.h>
#define LCD_INT_VL0_ERR ((LAYER0_DMA_FIFO_UNDERFLOW) | \
(LAYER0_DMA_FIFO_OVERFLOW) | \
(LAYER0_DMA_CB_FIFO_OVERFLOW) | \
(LAYER0_DMA_CB_FIFO_UNDERFLOW) | \
(LAYER0_DMA_CR_FIFO_OVERFLOW) | \
(LAYER0_DMA_CR_FIFO_UNDERFLOW))
#define LCD_INT_VL1_ERR ((LAYER1_DMA_FIFO_UNDERFLOW) | \
(LAYER1_DMA_FIFO_OVERFLOW) | \
(LAYER1_DMA_CB_FIFO_OVERFLOW) | \
(LAYER1_DMA_CB_FIFO_UNDERFLOW) | \
(LAYER1_DMA_CR_FIFO_OVERFLOW) | \
(LAYER1_DMA_CR_FIFO_UNDERFLOW))
#define LCD_INT_GL0_ERR (LAYER2_DMA_FIFO_OVERFLOW | LAYER2_DMA_FIFO_UNDERFLOW)
#define LCD_INT_GL1_ERR (LAYER3_DMA_FIFO_OVERFLOW | LAYER3_DMA_FIFO_UNDERFLOW)
#define LCD_INT_VL0 (LAYER0_DMA_DONE | LAYER0_DMA_IDLE | LCD_INT_VL0_ERR)
#define LCD_INT_VL1 (LAYER1_DMA_DONE | LAYER1_DMA_IDLE | LCD_INT_VL1_ERR)
#define LCD_INT_GL0 (LAYER2_DMA_DONE | LAYER2_DMA_IDLE | LCD_INT_GL0_ERR)
#define LCD_INT_GL1 (LAYER3_DMA_DONE | LAYER3_DMA_IDLE | LCD_INT_GL1_ERR)
#define LCD_INT_DMA_ERR (LCD_INT_VL0_ERR | LCD_INT_VL1_ERR \
| LCD_INT_GL0_ERR | LCD_INT_GL1_ERR)
#define POSSIBLE_CRTCS 1
#define to_kmb_plane(x) container_of(x, struct kmb_plane, base_plane)
enum layer_id {
LAYER_0,
LAYER_1,
LAYER_2,
LAYER_3,
/* KMB_MAX_PLANES */
};
#define KMB_MAX_PLANES 1
enum sub_plane_id {
Y_PLANE,
U_PLANE,
V_PLANE,
MAX_SUB_PLANES,
};
struct kmb_plane {
struct drm_plane base_plane;
unsigned char id;
};
struct layer_status {
bool disable;
u32 ctrl;
};
struct kmb_plane *kmb_plane_init(struct drm_device *drm);
void kmb_plane_destroy(struct drm_plane *plane);
#endif /* __KMB_PLANE_H__ */

View File

@@ -0,0 +1,725 @@
/* SPDX-License-Identifier: GPL-2.0-only
*
* Copyright © 2018-2020 Intel Corporation
*/
#ifndef __KMB_REGS_H__
#define __KMB_REGS_H__
/***************************************************************************
* LCD controller control register defines
***************************************************************************/
#define LCD_CONTROL (0x4 * 0x000)
#define LCD_CTRL_PROGRESSIVE (0 << 0)
#define LCD_CTRL_INTERLACED BIT(0)
#define LCD_CTRL_ENABLE BIT(1)
#define LCD_CTRL_VL1_ENABLE BIT(2)
#define LCD_CTRL_VL2_ENABLE BIT(3)
#define LCD_CTRL_GL1_ENABLE BIT(4)
#define LCD_CTRL_GL2_ENABLE BIT(5)
#define LCD_CTRL_ALPHA_BLEND_VL1 (0 << 6)
#define LCD_CTRL_ALPHA_BLEND_VL2 BIT(6)
#define LCD_CTRL_ALPHA_BLEND_GL1 (2 << 6)
#define LCD_CTRL_ALPHA_BLEND_GL2 (3 << 6)
#define LCD_CTRL_ALPHA_TOP_VL1 (0 << 8)
#define LCD_CTRL_ALPHA_TOP_VL2 BIT(8)
#define LCD_CTRL_ALPHA_TOP_GL1 (2 << 8)
#define LCD_CTRL_ALPHA_TOP_GL2 (3 << 8)
#define LCD_CTRL_ALPHA_MIDDLE_VL1 (0 << 10)
#define LCD_CTRL_ALPHA_MIDDLE_VL2 BIT(10)
#define LCD_CTRL_ALPHA_MIDDLE_GL1 (2 << 10)
#define LCD_CTRL_ALPHA_MIDDLE_GL2 (3 << 10)
#define LCD_CTRL_ALPHA_BOTTOM_VL1 (0 << 12)
#define LCD_CTRL_ALPHA_BOTTOM_VL2 BIT(12)
#define LCD_CTRL_ALPHA_BOTTOM_GL1 (2 << 12)
#define LCD_CTRL_ALPHA_BOTTOM_GL2 (3 << 12)
#define LCD_CTRL_TIM_GEN_ENABLE BIT(14)
#define LCD_CTRL_CONTINUOUS (0 << 15)
#define LCD_CTRL_ONE_SHOT BIT(15)
#define LCD_CTRL_PWM0_EN BIT(16)
#define LCD_CTRL_PWM1_EN BIT(17)
#define LCD_CTRL_PWM2_EN BIT(18)
#define LCD_CTRL_OUTPUT_DISABLED (0 << 19)
#define LCD_CTRL_OUTPUT_ENABLED BIT(19)
#define LCD_CTRL_BPORCH_ENABLE BIT(21)
#define LCD_CTRL_FPORCH_ENABLE BIT(22)
#define LCD_CTRL_PIPELINE_DMA BIT(28)
#define LCD_CTRL_VHSYNC_IDLE_LVL BIT(31)
/* interrupts */
#define LCD_INT_STATUS (0x4 * 0x001)
#define LCD_INT_EOF BIT(0)
#define LCD_INT_LINE_CMP BIT(1)
#define LCD_INT_VERT_COMP BIT(2)
#define LAYER0_DMA_DONE BIT(3)
#define LAYER0_DMA_IDLE BIT(4)
#define LAYER0_DMA_FIFO_OVERFLOW BIT(5)
#define LAYER0_DMA_FIFO_UNDERFLOW BIT(6)
#define LAYER0_DMA_CB_FIFO_OVERFLOW BIT(7)
#define LAYER0_DMA_CB_FIFO_UNDERFLOW BIT(8)
#define LAYER0_DMA_CR_FIFO_OVERFLOW BIT(9)
#define LAYER0_DMA_CR_FIFO_UNDERFLOW BIT(10)
#define LAYER1_DMA_DONE BIT(11)
#define LAYER1_DMA_IDLE BIT(12)
#define LAYER1_DMA_FIFO_OVERFLOW BIT(13)
#define LAYER1_DMA_FIFO_UNDERFLOW BIT(14)
#define LAYER1_DMA_CB_FIFO_OVERFLOW BIT(15)
#define LAYER1_DMA_CB_FIFO_UNDERFLOW BIT(16)
#define LAYER1_DMA_CR_FIFO_OVERFLOW BIT(17)
#define LAYER1_DMA_CR_FIFO_UNDERFLOW BIT(18)
#define LAYER2_DMA_DONE BIT(19)
#define LAYER2_DMA_IDLE BIT(20)
#define LAYER2_DMA_FIFO_OVERFLOW BIT(21)
#define LAYER2_DMA_FIFO_UNDERFLOW BIT(22)
#define LAYER3_DMA_DONE BIT(23)
#define LAYER3_DMA_IDLE BIT(24)
#define LAYER3_DMA_FIFO_OVERFLOW BIT(25)
#define LAYER3_DMA_FIFO_UNDERFLOW BIT(26)
#define LCD_INT_LAYER (0x07fffff8)
#define LCD_INT_ENABLE (0x4 * 0x002)
#define LCD_INT_CLEAR (0x4 * 0x003)
#define LCD_LINE_COUNT (0x4 * 0x004)
#define LCD_LINE_COMPARE (0x4 * 0x005)
#define LCD_VSTATUS (0x4 * 0x006)
/*LCD_VSTATUS_COMPARE Vertcal interval in which to generate vertcal
* interval interrupt
*/
/* BITS 13 and 14 */
#define LCD_VSTATUS_COMPARE (0x4 * 0x007)
#define LCD_VSTATUS_VERTICAL_STATUS_MASK (3 << 13)
#define LCD_VSTATUS_COMPARE_VSYNC (0 << 13)
#define LCD_VSTATUS_COMPARE_BACKPORCH BIT(13)
#define LCD_VSTATUS_COMPARE_ACTIVE (2 << 13)
#define LCD_VSTATUS_COMPARE_FRONT_PORCH (3 << 13)
#define LCD_SCREEN_WIDTH (0x4 * 0x008)
#define LCD_SCREEN_HEIGHT (0x4 * 0x009)
#define LCD_FIELD_INT_CFG (0x4 * 0x00a)
#define LCD_FIFO_FLUSH (0x4 * 0x00b)
#define LCD_BG_COLOUR_LS (0x4 * 0x00c)
#define LCD_BG_COLOUR_MS (0x4 * 0x00d)
#define LCD_RAM_CFG (0x4 * 0x00e)
/****************************************************************************
* LCD controller Layer config register
***************************************************************************/
#define LCD_LAYER0_CFG (0x4 * 0x100)
#define LCD_LAYERn_CFG(N) (LCD_LAYER0_CFG + (0x400 * (N)))
#define LCD_LAYER_SCALE_H BIT(1)
#define LCD_LAYER_SCALE_V BIT(2)
#define LCD_LAYER_SCALE_H_V (LCD_LAYER_SCALE_H | \
LCD_LAYER_SCALE_V)
#define LCD_LAYER_CSC_EN BIT(3)
#define LCD_LAYER_ALPHA_STATIC BIT(4)
#define LCD_LAYER_ALPHA_EMBED BIT(5)
#define LCD_LAYER_ALPHA_COMBI (LCD_LAYER_ALPHA_STATIC | \
LCD_LAYER_ALPHA_EMBED)
/* RGB multiplied with alpha */
#define LCD_LAYER_ALPHA_PREMULT BIT(6)
#define LCD_LAYER_INVERT_COL BIT(7)
#define LCD_LAYER_TRANSPARENT_EN BIT(8)
#define LCD_LAYER_FORMAT_YCBCR444PLAN (0 << 9)
#define LCD_LAYER_FORMAT_YCBCR422PLAN BIT(9)
#define LCD_LAYER_FORMAT_YCBCR420PLAN (2 << 9)
#define LCD_LAYER_FORMAT_RGB888PLAN (3 << 9)
#define LCD_LAYER_FORMAT_YCBCR444LIN (4 << 9)
#define LCD_LAYER_FORMAT_YCBCR422LIN (5 << 9)
#define LCD_LAYER_FORMAT_RGB888 (6 << 9)
#define LCD_LAYER_FORMAT_RGBA8888 (7 << 9)
#define LCD_LAYER_FORMAT_RGBX8888 (8 << 9)
#define LCD_LAYER_FORMAT_RGB565 (9 << 9)
#define LCD_LAYER_FORMAT_RGBA1555 (0xa << 9)
#define LCD_LAYER_FORMAT_XRGB1555 (0xb << 9)
#define LCD_LAYER_FORMAT_RGB444 (0xc << 9)
#define LCD_LAYER_FORMAT_RGBA4444 (0xd << 9)
#define LCD_LAYER_FORMAT_RGBX4444 (0xe << 9)
#define LCD_LAYER_FORMAT_RGB332 (0xf << 9)
#define LCD_LAYER_FORMAT_RGBA3328 (0x10 << 9)
#define LCD_LAYER_FORMAT_RGBX3328 (0x11 << 9)
#define LCD_LAYER_FORMAT_CLUT (0x12 << 9)
#define LCD_LAYER_FORMAT_NV12 (0x1c << 9)
#define LCD_LAYER_PLANAR_STORAGE BIT(14)
#define LCD_LAYER_8BPP (0 << 15)
#define LCD_LAYER_16BPP BIT(15)
#define LCD_LAYER_24BPP (2 << 15)
#define LCD_LAYER_32BPP (3 << 15)
#define LCD_LAYER_Y_ORDER BIT(17)
#define LCD_LAYER_CRCB_ORDER BIT(18)
#define LCD_LAYER_BGR_ORDER BIT(19)
#define LCD_LAYER_LUT_2ENT (0 << 20)
#define LCD_LAYER_LUT_4ENT BIT(20)
#define LCD_LAYER_LUT_16ENT (2 << 20)
#define LCD_LAYER_NO_FLIP (0 << 22)
#define LCD_LAYER_FLIP_V BIT(22)
#define LCD_LAYER_FLIP_H (2 << 22)
#define LCD_LAYER_ROT_R90 (3 << 22)
#define LCD_LAYER_ROT_L90 (4 << 22)
#define LCD_LAYER_ROT_180 (5 << 22)
#define LCD_LAYER_FIFO_00 (0 << 25)
#define LCD_LAYER_FIFO_25 BIT(25)
#define LCD_LAYER_FIFO_50 (2 << 25)
#define LCD_LAYER_FIFO_100 (3 << 25)
#define LCD_LAYER_INTERLEAVE_DIS (0 << 27)
#define LCD_LAYER_INTERLEAVE_V BIT(27)
#define LCD_LAYER_INTERLEAVE_H (2 << 27)
#define LCD_LAYER_INTERLEAVE_CH (3 << 27)
#define LCD_LAYER_INTERLEAVE_V_SUB (4 << 27)
#define LCD_LAYER_INTERLEAVE_H_SUB (5 << 27)
#define LCD_LAYER_INTERLEAVE_CH_SUB (6 << 27)
#define LCD_LAYER_INTER_POS_EVEN (0 << 30)
#define LCD_LAYER_INTER_POS_ODD BIT(30)
#define LCD_LAYER0_COL_START (0x4 * 0x101)
#define LCD_LAYERn_COL_START(N) (LCD_LAYER0_COL_START + (0x400 * (N)))
#define LCD_LAYER0_ROW_START (0x4 * 0x102)
#define LCD_LAYERn_ROW_START(N) (LCD_LAYER0_ROW_START + (0x400 * (N)))
#define LCD_LAYER0_WIDTH (0x4 * 0x103)
#define LCD_LAYERn_WIDTH(N) (LCD_LAYER0_WIDTH + (0x400 * (N)))
#define LCD_LAYER0_HEIGHT (0x4 * 0x104)
#define LCD_LAYERn_HEIGHT(N) (LCD_LAYER0_HEIGHT + (0x400 * (N)))
#define LCD_LAYER0_SCALE_CFG (0x4 * 0x105)
#define LCD_LAYERn_SCALE_CFG(N) (LCD_LAYER0_SCALE_CFG + (0x400 * (N)))
#define LCD_LAYER0_ALPHA (0x4 * 0x106)
#define LCD_LAYERn_ALPHA(N) (LCD_LAYER0_ALPHA + (0x400 * (N)))
#define LCD_LAYER0_INV_COLOUR_LS (0x4 * 0x107)
#define LCD_LAYERn_INV_COLOUR_LS(N) (LCD_LAYER0_INV_COLOUR_LS + \
(0x400 * (N)))
#define LCD_LAYER0_INV_COLOUR_MS (0x4 * 0x108)
#define LCD_LAYERn_INV_COLOUR_MS(N) (LCD_LAYER0_INV_COLOUR_MS + \
(0x400 * (N)))
#define LCD_LAYER0_TRANS_COLOUR_LS (0x4 * 0x109)
#define LCD_LAYERn_TRANS_COLOUR_LS(N) (LCD_LAYER0_TRANS_COLOUR_LS + \
(0x400 * (N)))
#define LCD_LAYER0_TRANS_COLOUR_MS (0x4 * 0x10a)
#define LCD_LAYERn_TRANS_COLOUR_MS(N) (LCD_LAYER0_TRANS_COLOUR_MS + \
(0x400 * (N)))
#define LCD_LAYER0_CSC_COEFF11 (0x4 * 0x10b)
#define LCD_LAYERn_CSC_COEFF11(N) (LCD_LAYER0_CSC_COEFF11 + (0x400 * (N)))
#define LCD_LAYER0_CSC_COEFF12 (0x4 * 0x10c)
#define LCD_LAYERn_CSC_COEFF12(N) (LCD_LAYER0_CSC_COEFF12 + (0x400 * (N)))
#define LCD_LAYER0_CSC_COEFF13 (0x4 * 0x10d)
#define LCD_LAYERn_CSC_COEFF13(N) (LCD_LAYER0_CSC_COEFF13 + (0x400 * (N)))
#define LCD_LAYER0_CSC_COEFF21 (0x4 * 0x10e)
#define LCD_LAYERn_CSC_COEFF21(N) (LCD_LAYER0_CSC_COEFF21 + (0x400 * (N)))
#define LCD_LAYER0_CSC_COEFF22 (0x4 * 0x10f)
#define LCD_LAYERn_CSC_COEFF22(N) (LCD_LAYER0_CSC_COEFF22 + (0x400 * (N)))
#define LCD_LAYER0_CSC_COEFF23 (0x4 * 0x110)
#define LCD_LAYERn_CSC_COEFF23(N) (LCD_LAYER0_CSC_COEFF23 + (0x400 * (N)))
#define LCD_LAYER0_CSC_COEFF31 (0x4 * 0x111)
#define LCD_LAYERn_CSC_COEFF31(N) (LCD_LAYER0_CSC_COEFF31 + (0x400 * (N)))
#define LCD_LAYER0_CSC_COEFF32 (0x4 * 0x112)
#define LCD_LAYERn_CSC_COEFF32(N) (LCD_LAYER0_CSC_COEFF32 + (0x400 * (N)))
#define LCD_LAYER0_CSC_COEFF33 (0x4 * 0x113)
#define LCD_LAYERn_CSC_COEFF33(N) (LCD_LAYER0_CSC_COEFF33 + (0x400 * (N)))
#define LCD_LAYER0_CSC_OFF1 (0x4 * 0x114)
#define LCD_LAYERn_CSC_OFF1(N) (LCD_LAYER0_CSC_OFF1 + (0x400 * (N)))
#define LCD_LAYER0_CSC_OFF2 (0x4 * 0x115)
#define LCD_LAYERn_CSC_OFF2(N) (LCD_LAYER0_CSC_OFF2 + (0x400 * (N)))
#define LCD_LAYER0_CSC_OFF3 (0x4 * 0x116)
#define LCD_LAYERn_CSC_OFF3(N) (LCD_LAYER0_CSC_OFF3 + (0x400 * (N)))
/* LCD controller Layer DMA config register */
#define LCD_LAYER0_DMA_CFG (0x4 * 0x117)
#define LCD_LAYERn_DMA_CFG(N) (LCD_LAYER0_DMA_CFG + \
(0x400 * (N)))
#define LCD_DMA_LAYER_ENABLE BIT(0)
#define LCD_DMA_LAYER_STATUS BIT(1)
#define LCD_DMA_LAYER_AUTO_UPDATE BIT(2)
#define LCD_DMA_LAYER_CONT_UPDATE BIT(3)
#define LCD_DMA_LAYER_CONT_PING_PONG_UPDATE (LCD_DMA_LAYER_AUTO_UPDATE \
| LCD_DMA_LAYER_CONT_UPDATE)
#define LCD_DMA_LAYER_FIFO_ADR_MODE BIT(4)
#define LCD_DMA_LAYER_AXI_BURST_1 BIT(5)
#define LCD_DMA_LAYER_AXI_BURST_2 (2 << 5)
#define LCD_DMA_LAYER_AXI_BURST_3 (3 << 5)
#define LCD_DMA_LAYER_AXI_BURST_4 (4 << 5)
#define LCD_DMA_LAYER_AXI_BURST_5 (5 << 5)
#define LCD_DMA_LAYER_AXI_BURST_6 (6 << 5)
#define LCD_DMA_LAYER_AXI_BURST_7 (7 << 5)
#define LCD_DMA_LAYER_AXI_BURST_8 (8 << 5)
#define LCD_DMA_LAYER_AXI_BURST_9 (9 << 5)
#define LCD_DMA_LAYER_AXI_BURST_10 (0xa << 5)
#define LCD_DMA_LAYER_AXI_BURST_11 (0xb << 5)
#define LCD_DMA_LAYER_AXI_BURST_12 (0xc << 5)
#define LCD_DMA_LAYER_AXI_BURST_13 (0xd << 5)
#define LCD_DMA_LAYER_AXI_BURST_14 (0xe << 5)
#define LCD_DMA_LAYER_AXI_BURST_15 (0xf << 5)
#define LCD_DMA_LAYER_AXI_BURST_16 (0x10 << 5)
#define LCD_DMA_LAYER_VSTRIDE_EN BIT(10)
#define LCD_LAYER0_DMA_START_ADR (0x4 * 0x118)
#define LCD_LAYERn_DMA_START_ADDR(N) (LCD_LAYER0_DMA_START_ADR \
+ (0x400 * (N)))
#define LCD_LAYER0_DMA_START_SHADOW (0x4 * 0x119)
#define LCD_LAYERn_DMA_START_SHADOW(N) (LCD_LAYER0_DMA_START_SHADOW \
+ (0x400 * (N)))
#define LCD_LAYER0_DMA_LEN (0x4 * 0x11a)
#define LCD_LAYERn_DMA_LEN(N) (LCD_LAYER0_DMA_LEN + \
(0x400 * (N)))
#define LCD_LAYER0_DMA_LEN_SHADOW (0x4 * 0x11b)
#define LCD_LAYERn_DMA_LEN_SHADOW(N) (LCD_LAYER0_DMA_LEN_SHADOW + \
(0x400 * (N)))
#define LCD_LAYER0_DMA_STATUS (0x4 * 0x11c)
#define LCD_LAYERn_DMA_STATUS(N) (LCD_LAYER0_DMA_STATUS + \
(0x400 * (N)))
#define LCD_LAYER0_DMA_LINE_WIDTH (0x4 * 0x11d)
#define LCD_LAYERn_DMA_LINE_WIDTH(N) (LCD_LAYER0_DMA_LINE_WIDTH + \
(0x400 * (N)))
#define LCD_LAYER0_DMA_LINE_VSTRIDE (0x4 * 0x11e)
#define LCD_LAYERn_DMA_LINE_VSTRIDE(N) (LCD_LAYER0_DMA_LINE_VSTRIDE +\
(0x400 * (N)))
#define LCD_LAYER0_DMA_FIFO_STATUS (0x4 * 0x11f)
#define LCD_LAYERn_DMA_FIFO_STATUS(N) (LCD_LAYER0_DMA_FIFO_STATUS + \
(0x400 * (N)))
#define LCD_LAYER0_CFG2 (0x4 * 0x120)
#define LCD_LAYERn_CFG2(N) (LCD_LAYER0_CFG2 + (0x400 * (N)))
#define LCD_LAYER0_DMA_START_CB_ADR (0x4 * 0x700)
#define LCD_LAYERn_DMA_START_CB_ADR(N) (LCD_LAYER0_DMA_START_CB_ADR + \
(0x20 * (N)))
#define LCD_LAYER0_DMA_START_CB_SHADOW (0x4 * 0x701)
#define LCD_LAYERn_DMA_START_CB_SHADOW(N) (LCD_LAYER0_DMA_START_CB_SHADOW\
+ (0x20 * (N)))
#define LCD_LAYER0_DMA_CB_LINE_WIDTH (0x4 * 0x702)
#define LCD_LAYERn_DMA_CB_LINE_WIDTH(N) (LCD_LAYER0_DMA_CB_LINE_WIDTH +\
(0x20 * (N)))
#define LCD_LAYER0_DMA_CB_LINE_VSTRIDE (0x4 * 0x703)
#define LCD_LAYERn_DMA_CB_LINE_VSTRIDE(N) (LCD_LAYER0_DMA_CB_LINE_VSTRIDE\
+ (0x20 * (N)))
#define LCD_LAYER0_DMA_START_CR_ADR (0x4 * 0x704)
#define LCD_LAYERn_DMA_START_CR_ADR(N) (LCD_LAYER0_DMA_START_CR_ADR + \
(0x20 * (N)))
#define LCD_LAYER0_DMA_START_CR_SHADOW (0x4 * 0x705)
#define LCD_LAYERn_DMA_START_CR_SHADOW(N) \
(LCD_LAYER0_DMA_START_CR_SHADOW\
+ (0x20 * (N)))
#define LCD_LAYER0_DMA_CR_LINE_WIDTH (0x4 * 0x706)
#define LCD_LAYERn_DMA_CR_LINE_WIDTH(N) (LCD_LAYER0_DMA_CR_LINE_WIDTH +\
(0x20 * (N)))
#define LCD_LAYER0_DMA_CR_LINE_VSTRIDE (0x4 * 0x707)
#define LCD_LAYERn_DMA_CR_LINE_VSTRIDE(N) (LCD_LAYER0_DMA_CR_LINE_VSTRIDE\
+ (0x20 * (N)))
#define LCD_LAYER1_DMA_START_CB_ADR (0x4 * 0x708)
#define LCD_LAYER1_DMA_START_CB_SHADOW (0x4 * 0x709)
#define LCD_LAYER1_DMA_CB_LINE_WIDTH (0x4 * 0x70a)
#define LCD_LAYER1_DMA_CB_LINE_VSTRIDE (0x4 * 0x70b)
#define LCD_LAYER1_DMA_START_CR_ADR (0x4 * 0x70c)
#define LCD_LAYER1_DMA_START_CR_SHADOW (0x4 * 0x70d)
#define LCD_LAYER1_DMA_CR_LINE_WIDTH (0x4 * 0x70e)
#define LCD_LAYER1_DMA_CR_LINE_VSTRIDE (0x4 * 0x70f)
/****************************************************************************
* LCD controller output format register defines
***************************************************************************/
#define LCD_OUT_FORMAT_CFG (0x4 * 0x800)
#define LCD_OUTF_FORMAT_RGB121212 (0x00)
#define LCD_OUTF_FORMAT_RGB101010 (0x01)
#define LCD_OUTF_FORMAT_RGB888 (0x02)
#define LCD_OUTF_FORMAT_RGB666 (0x03)
#define LCD_OUTF_FORMAT_RGB565 (0x04)
#define LCD_OUTF_FORMAT_RGB444 (0x05)
#define LCD_OUTF_FORMAT_MRGB121212 (0x10)
#define LCD_OUTF_FORMAT_MRGB101010 (0x11)
#define LCD_OUTF_FORMAT_MRGB888 (0x12)
#define LCD_OUTF_FORMAT_MRGB666 (0x13)
#define LCD_OUTF_FORMAT_MRGB565 (0x14)
#define LCD_OUTF_FORMAT_YCBCR420_8B_LEGACY (0x08)
#define LCD_OUTF_FORMAT_YCBCR420_8B_DCI (0x09)
#define LCD_OUTF_FORMAT_YCBCR420_8B (0x0A)
#define LCD_OUTF_FORMAT_YCBCR420_10B (0x0B)
#define LCD_OUTF_FORMAT_YCBCR420_12B (0x0C)
#define LCD_OUTF_FORMAT_YCBCR422_8B (0x0D)
#define LCD_OUTF_FORMAT_YCBCR422_10B (0x0E)
#define LCD_OUTF_FORMAT_YCBCR444 (0x0F)
#define LCD_OUTF_FORMAT_MYCBCR420_8B_LEGACY (0x18)
#define LCD_OUTF_FORMAT_MYCBCR420_8B_DCI (0x19)
#define LCD_OUTF_FORMAT_MYCBCR420_8B (0x1A)
#define LCD_OUTF_FORMAT_MYCBCR420_10B (0x1B)
#define LCD_OUTF_FORMAT_MYCBCR420_12B (0x1C)
#define LCD_OUTF_FORMAT_MYCBCR422_8B (0x1D)
#define LCD_OUTF_FORMAT_MYCBCR422_10B (0x1E)
#define LCD_OUTF_FORMAT_MYCBCR444 (0x1F)
#define LCD_OUTF_BGR_ORDER BIT(5)
#define LCD_OUTF_Y_ORDER BIT(6)
#define LCD_OUTF_CRCB_ORDER BIT(7)
#define LCD_OUTF_SYNC_MODE BIT(11)
#define LCD_OUTF_RGB_CONV_MODE BIT(14)
#define LCD_OUTF_MIPI_RGB_MODE BIT(18)
#define LCD_HSYNC_WIDTH (0x4 * 0x801)
#define LCD_H_BACKPORCH (0x4 * 0x802)
#define LCD_H_ACTIVEWIDTH (0x4 * 0x803)
#define LCD_H_FRONTPORCH (0x4 * 0x804)
#define LCD_VSYNC_WIDTH (0x4 * 0x805)
#define LCD_V_BACKPORCH (0x4 * 0x806)
#define LCD_V_ACTIVEHEIGHT (0x4 * 0x807)
#define LCD_V_FRONTPORCH (0x4 * 0x808)
#define LCD_VSYNC_START (0x4 * 0x809)
#define LCD_VSYNC_END (0x4 * 0x80a)
#define LCD_V_BACKPORCH_EVEN (0x4 * 0x80b)
#define LCD_VSYNC_WIDTH_EVEN (0x4 * 0x80c)
#define LCD_V_ACTIVEHEIGHT_EVEN (0x4 * 0x80d)
#define LCD_V_FRONTPORCH_EVEN (0x4 * 0x80e)
#define LCD_VSYNC_START_EVEN (0x4 * 0x80f)
#define LCD_VSYNC_END_EVEN (0x4 * 0x810)
#define LCD_TIMING_GEN_TRIG (0x4 * 0x811)
#define LCD_PWM0_CTRL (0x4 * 0x812)
#define LCD_PWM0_RPT_LEADIN (0x4 * 0x813)
#define LCD_PWM0_HIGH_LOW (0x4 * 0x814)
#define LCD_PWM1_CTRL (0x4 * 0x815)
#define LCD_PWM1_RPT_LEADIN (0x4 * 0x816)
#define LCD_PWM1_HIGH_LOW (0x4 * 0x817)
#define LCD_PWM2_CTRL (0x4 * 0x818)
#define LCD_PWM2_RPT_LEADIN (0x4 * 0x819)
#define LCD_PWM2_HIGH_LOW (0x4 * 0x81a)
#define LCD_VIDEO0_DMA0_BYTES (0x4 * 0xb00)
#define LCD_VIDEO0_DMA0_STATE (0x4 * 0xb01)
#define LCD_DMA_STATE_ACTIVE BIT(3)
#define LCD_VIDEO0_DMA1_BYTES (0x4 * 0xb02)
#define LCD_VIDEO0_DMA1_STATE (0x4 * 0xb03)
#define LCD_VIDEO0_DMA2_BYTES (0x4 * 0xb04)
#define LCD_VIDEO0_DMA2_STATE (0x4 * 0xb05)
#define LCD_VIDEO1_DMA0_BYTES (0x4 * 0xb06)
#define LCD_VIDEO1_DMA0_STATE (0x4 * 0xb07)
#define LCD_VIDEO1_DMA1_BYTES (0x4 * 0xb08)
#define LCD_VIDEO1_DMA1_STATE (0x4 * 0xb09)
#define LCD_VIDEO1_DMA2_BYTES (0x4 * 0xb0a)
#define LCD_VIDEO1_DMA2_STATE (0x4 * 0xb0b)
#define LCD_GRAPHIC0_DMA_BYTES (0x4 * 0xb0c)
#define LCD_GRAPHIC0_DMA_STATE (0x4 * 0xb0d)
#define LCD_GRAPHIC1_DMA_BYTES (0x4 * 0xb0e)
#define LCD_GRAPHIC1_DMA_STATE (0x4 * 0xb0f)
/***************************************************************************
* MIPI controller control register defines
*************************************************************************/
#define MIPI0_HS_BASE_ADDR (MIPI_BASE_ADDR + 0x400)
#define HS_OFFSET(M) (((M) + 1) * 0x400)
#define MIPI_TX_HS_CTRL (0x0)
#define MIPI_TXm_HS_CTRL(M) (MIPI_TX_HS_CTRL + HS_OFFSET(M))
#define HS_CTRL_EN BIT(0)
/* 1:CSI 0:DSI */
#define HS_CTRL_CSIDSIN BIT(2)
/* 1:LCD, 0:DMA */
#define TX_SOURCE BIT(3)
#define ACTIVE_LANES(n) ((n) << 4)
#define LCD_VC(ch) ((ch) << 8)
#define DSI_EOTP_EN BIT(11)
#define DSI_CMD_HFP_EN BIT(12)
#define CRC_EN BIT(14)
#define HSEXIT_CNT(n) ((n) << 16)
#define HSCLKIDLE_CNT BIT(24)
#define MIPI_TX_HS_SYNC_CFG (0x8)
#define MIPI_TXm_HS_SYNC_CFG(M) (MIPI_TX_HS_SYNC_CFG \
+ HS_OFFSET(M))
#define LINE_SYNC_PKT_ENABLE BIT(0)
#define FRAME_COUNTER_ACTIVE BIT(1)
#define LINE_COUNTER_ACTIVE BIT(2)
#define DSI_V_BLANKING BIT(4)
#define DSI_HSA_BLANKING BIT(5)
#define DSI_HBP_BLANKING BIT(6)
#define DSI_HFP_BLANKING BIT(7)
#define DSI_SYNC_PULSE_EVENTN BIT(8)
#define DSI_LPM_FIRST_VSA_LINE BIT(9)
#define DSI_LPM_LAST_VFP_LINE BIT(10)
#define WAIT_ALL_SECT BIT(11)
#define WAIT_TRIG_POS BIT(15)
#define ALWAYS_USE_HACT(f) ((f) << 19)
#define FRAME_GEN_EN(f) ((f) << 23)
#define HACT_WAIT_STOP(f) ((f) << 28)
#define MIPI_TX0_HS_FG0_SECT0_PH (0x40)
#define MIPI_TXm_HS_FGn_SECTo_PH(M, N, O) (MIPI_TX0_HS_FG0_SECT0_PH + \
HS_OFFSET(M) + (0x2C * (N)) \
+ (8 * (O)))
#define MIPI_TX_SECT_WC_MASK (0xffff)
#define MIPI_TX_SECT_VC_MASK (3)
#define MIPI_TX_SECT_VC_SHIFT (22)
#define MIPI_TX_SECT_DT_MASK (0x3f)
#define MIPI_TX_SECT_DT_SHIFT (16)
#define MIPI_TX_SECT_DM_MASK (3)
#define MIPI_TX_SECT_DM_SHIFT (24)
#define MIPI_TX_SECT_DMA_PACKED BIT(26)
#define MIPI_TX_HS_FG0_SECT_UNPACKED_BYTES0 (0x60)
#define MIPI_TX_HS_FG0_SECT_UNPACKED_BYTES1 (0x64)
#define MIPI_TXm_HS_FGn_SECT_UNPACKED_BYTES0(M, N) \
(MIPI_TX_HS_FG0_SECT_UNPACKED_BYTES0 \
+ HS_OFFSET(M) + (0x2C * (N)))
#define MIPI_TX_HS_FG0_SECT0_LINE_CFG (0x44)
#define MIPI_TXm_HS_FGn_SECTo_LINE_CFG(M, N, O) \
(MIPI_TX_HS_FG0_SECT0_LINE_CFG + HS_OFFSET(M) \
+ (0x2C * (N)) + (8 * (O)))
#define MIPI_TX_HS_FG0_NUM_LINES (0x68)
#define MIPI_TXm_HS_FGn_NUM_LINES(M, N) \
(MIPI_TX_HS_FG0_NUM_LINES + HS_OFFSET(M) \
+ (0x2C * (N)))
#define MIPI_TX_HS_VSYNC_WIDTHS0 (0x104)
#define MIPI_TXm_HS_VSYNC_WIDTHn(M, N) \
(MIPI_TX_HS_VSYNC_WIDTHS0 + HS_OFFSET(M) \
+ (0x4 * (N)))
#define MIPI_TX_HS_V_BACKPORCHES0 (0x16c)
#define MIPI_TXm_HS_V_BACKPORCHESn(M, N) \
(MIPI_TX_HS_V_BACKPORCHES0 + HS_OFFSET(M) \
+ (0x4 * (N)))
#define MIPI_TX_HS_V_FRONTPORCHES0 (0x174)
#define MIPI_TXm_HS_V_FRONTPORCHESn(M, N) \
(MIPI_TX_HS_V_FRONTPORCHES0 + HS_OFFSET(M) \
+ (0x4 * (N)))
#define MIPI_TX_HS_V_ACTIVE0 (0x17c)
#define MIPI_TXm_HS_V_ACTIVEn(M, N) \
(MIPI_TX_HS_V_ACTIVE0 + HS_OFFSET(M) \
+ (0x4 * (N)))
#define MIPI_TX_HS_HSYNC_WIDTH0 (0x10c)
#define MIPI_TXm_HS_HSYNC_WIDTHn(M, N) \
(MIPI_TX_HS_HSYNC_WIDTH0 + HS_OFFSET(M) \
+ (0x4 * (N)))
#define MIPI_TX_HS_H_BACKPORCH0 (0x11c)
#define MIPI_TXm_HS_H_BACKPORCHn(M, N) \
(MIPI_TX_HS_H_BACKPORCH0 + HS_OFFSET(M) \
+ (0x4 * (N)))
#define MIPI_TX_HS_H_FRONTPORCH0 (0x12c)
#define MIPI_TXm_HS_H_FRONTPORCHn(M, N) \
(MIPI_TX_HS_H_FRONTPORCH0 + HS_OFFSET(M) \
+ (0x4 * (N)))
#define MIPI_TX_HS_H_ACTIVE0 (0x184)
#define MIPI_TXm_HS_H_ACTIVEn(M, N) \
(MIPI_TX_HS_H_ACTIVE0 + HS_OFFSET(M) \
+ (0x4 * (N)))
#define MIPI_TX_HS_LLP_HSYNC_WIDTH0 (0x13c)
#define MIPI_TXm_HS_LLP_HSYNC_WIDTHn(M, N) \
(MIPI_TX_HS_LLP_HSYNC_WIDTH0 + HS_OFFSET(M) \
+ (0x4 * (N)))
#define MIPI_TX_HS_LLP_H_BACKPORCH0 (0x14c)
#define MIPI_TXm_HS_LLP_H_BACKPORCHn(M, N) \
(MIPI_TX_HS_LLP_H_BACKPORCH0 + HS_OFFSET(M) \
+ (0x4 * (N)))
#define MIPI_TX_HS_LLP_H_FRONTPORCH0 (0x15c)
#define MIPI_TXm_HS_LLP_H_FRONTPORCHn(M, N) \
(MIPI_TX_HS_LLP_H_FRONTPORCH0 + HS_OFFSET(M) \
+ (0x4 * (N)))
#define MIPI_TX_HS_MC_FIFO_CTRL_EN (0x194)
#define MIPI_TXm_HS_MC_FIFO_CTRL_EN(M) \
(MIPI_TX_HS_MC_FIFO_CTRL_EN + HS_OFFSET(M))
#define MIPI_TX_HS_MC_FIFO_CHAN_ALLOC0 (0x198)
#define MIPI_TX_HS_MC_FIFO_CHAN_ALLOC1 (0x19c)
#define MIPI_TXm_HS_MC_FIFO_CHAN_ALLOCn(M, N) \
(MIPI_TX_HS_MC_FIFO_CHAN_ALLOC0 + HS_OFFSET(M) \
+ (0x4 * (N)))
#define SET_MC_FIFO_CHAN_ALLOC(dev, ctrl, vc, sz) \
kmb_write_bits_mipi(dev, \
MIPI_TXm_HS_MC_FIFO_CHAN_ALLOCn(ctrl, \
(vc) / 2), ((vc) % 2) * 16, 16, sz)
#define MIPI_TX_HS_MC_FIFO_RTHRESHOLD0 (0x1a0)
#define MIPI_TX_HS_MC_FIFO_RTHRESHOLD1 (0x1a4)
#define MIPI_TXm_HS_MC_FIFO_RTHRESHOLDn(M, N) \
(MIPI_TX_HS_MC_FIFO_RTHRESHOLD0 + HS_OFFSET(M) \
+ (0x4 * (N)))
#define SET_MC_FIFO_RTHRESHOLD(dev, ctrl, vc, th) \
kmb_write_bits_mipi(dev, MIPI_TXm_HS_MC_FIFO_RTHRESHOLDn(ctrl, \
(vc) / 2), ((vc) % 2) * 16, 16, th)
#define MIPI_TX_HS_DMA_CFG (0x1a8)
#define MIPI_TX_HS_DMA_START_ADR_CHAN0 (0x1ac)
#define MIPI_TX_HS_DMA_LEN_CHAN0 (0x1b4)
/* MIPI IRQ */
#define MIPI_CTRL_IRQ_STATUS0 (0x00)
#define MIPI_DPHY_ERR_IRQ 1
#define MIPI_DPHY_ERR_MASK 0x7FE /*bits 1-10 */
#define MIPI_HS_IRQ 13
/* bits 13-22 */
#define MIPI_HS_IRQ_MASK 0x7FE000
#define MIPI_LP_EVENT_IRQ 25
#define MIPI_GET_IRQ_STAT0(dev) kmb_read_mipi(dev, \
MIPI_CTRL_IRQ_STATUS0)
#define MIPI_CTRL_IRQ_STATUS1 (0x04)
#define MIPI_HS_RX_EVENT_IRQ 0
#define MIPI_GET_IRQ_STAT1(dev) kmb_read_mipi(dev, \
MIPI_CTRL_IRQ_STATUS1)
#define MIPI_CTRL_IRQ_ENABLE0 (0x08)
#define SET_MIPI_CTRL_IRQ_ENABLE0(dev, M, N) kmb_set_bit_mipi(dev, \
MIPI_CTRL_IRQ_ENABLE0, \
(M) + (N))
#define MIPI_GET_IRQ_ENABLED0(dev) kmb_read_mipi(dev, \
MIPI_CTRL_IRQ_ENABLE0)
#define MIPI_CTRL_IRQ_ENABLE1 (0x0c)
#define MIPI_GET_IRQ_ENABLED1(dev) kmb_read_mipi(dev, \
MIPI_CTRL_IRQ_ENABLE1)
#define MIPI_CTRL_IRQ_CLEAR0 (0x010)
#define SET_MIPI_CTRL_IRQ_CLEAR0(dev, M, N) \
kmb_set_bit_mipi(dev, MIPI_CTRL_IRQ_CLEAR0, (M) + (N))
#define MIPI_CTRL_IRQ_CLEAR1 (0x014)
#define SET_MIPI_CTRL_IRQ_CLEAR1(dev, M, N) \
kmb_set_bit_mipi(dev, MIPI_CTRL_IRQ_CLEAR1, (M) + (N))
#define MIPI_CTRL_DIG_LOOPBACK (0x018)
#define MIPI_TX_HS_IRQ_STATUS (0x01c)
#define MIPI_TX_HS_IRQ_STATUSm(M) (MIPI_TX_HS_IRQ_STATUS + \
HS_OFFSET(M))
#define GET_MIPI_TX_HS_IRQ_STATUS(dev, M) kmb_read_mipi(dev, \
MIPI_TX_HS_IRQ_STATUSm(M))
#define MIPI_TX_HS_IRQ_LINE_COMPARE BIT(1)
#define MIPI_TX_HS_IRQ_FRAME_DONE_0 BIT(2)
#define MIPI_TX_HS_IRQ_FRAME_DONE_1 BIT(3)
#define MIPI_TX_HS_IRQ_FRAME_DONE_2 BIT(4)
#define MIPI_TX_HS_IRQ_FRAME_DONE_3 BIT(5)
#define MIPI_TX_HS_IRQ_DMA_DONE_0 BIT(6)
#define MIPI_TX_HS_IRQ_DMA_IDLE_0 BIT(7)
#define MIPI_TX_HS_IRQ_DMA_DONE_1 BIT(8)
#define MIPI_TX_HS_IRQ_DMA_IDLE_1 BIT(9)
#define MIPI_TX_HS_IRQ_DMA_DONE_2 BIT(10)
#define MIPI_TX_HS_IRQ_DMA_IDLE_2 BIT(11)
#define MIPI_TX_HS_IRQ_DMA_DONE_3 BIT(12)
#define MIPI_TX_HS_IRQ_DMA_IDLE_3 BIT(13)
#define MIPI_TX_HS_IRQ_MC_FIFO_UNDERFLOW BIT(14)
#define MIPI_TX_HS_IRQ_MC_FIFO_OVERFLOW BIT(15)
#define MIPI_TX_HS_IRQ_LLP_FIFO_EMPTY BIT(16)
#define MIPI_TX_HS_IRQ_LLP_REQUEST_QUEUE_FULL BIT(17)
#define MIPI_TX_HS_IRQ_LLP_REQUEST_QUEUE_ERROR BIT(18)
#define MIPI_TX_HS_IRQ_LLP_WORD_COUNT_ERROR BIT(20)
#define MIPI_TX_HS_IRQ_FRAME_DONE \
(MIPI_TX_HS_IRQ_FRAME_DONE_0 | \
MIPI_TX_HS_IRQ_FRAME_DONE_1 | \
MIPI_TX_HS_IRQ_FRAME_DONE_2 | \
MIPI_TX_HS_IRQ_FRAME_DONE_3)
#define MIPI_TX_HS_IRQ_DMA_DONE \
(MIPI_TX_HS_IRQ_DMA_DONE_0 | \
MIPI_TX_HS_IRQ_DMA_DONE_1 | \
MIPI_TX_HS_IRQ_DMA_DONE_2 | \
MIPI_TX_HS_IRQ_DMA_DONE_3)
#define MIPI_TX_HS_IRQ_DMA_IDLE \
(MIPI_TX_HS_IRQ_DMA_IDLE_0 | \
MIPI_TX_HS_IRQ_DMA_IDLE_1 | \
MIPI_TX_HS_IRQ_DMA_IDLE_2 | \
MIPI_TX_HS_IRQ_DMA_IDLE_3)
#define MIPI_TX_HS_IRQ_ERROR \
(MIPI_TX_HS_IRQ_MC_FIFO_UNDERFLOW | \
MIPI_TX_HS_IRQ_MC_FIFO_OVERFLOW | \
MIPI_TX_HS_IRQ_LLP_FIFO_EMPTY | \
MIPI_TX_HS_IRQ_LLP_REQUEST_QUEUE_FULL | \
MIPI_TX_HS_IRQ_LLP_REQUEST_QUEUE_ERROR | \
MIPI_TX_HS_IRQ_LLP_WORD_COUNT_ERROR)
#define MIPI_TX_HS_IRQ_ALL \
(MIPI_TX_HS_IRQ_FRAME_DONE | \
MIPI_TX_HS_IRQ_DMA_DONE | \
MIPI_TX_HS_IRQ_DMA_IDLE | \
MIPI_TX_HS_IRQ_LINE_COMPARE | \
MIPI_TX_HS_IRQ_ERROR)
#define MIPI_TX_HS_IRQ_ENABLE (0x020)
#define GET_HS_IRQ_ENABLE(dev, M) kmb_read_mipi(dev, \
MIPI_TX_HS_IRQ_ENABLE \
+ HS_OFFSET(M))
#define MIPI_TX_HS_IRQ_CLEAR (0x024)
/* MIPI Test Pattern Generation */
#define MIPI_TX_HS_TEST_PAT_CTRL (0x230)
#define MIPI_TXm_HS_TEST_PAT_CTRL(M) \
(MIPI_TX_HS_TEST_PAT_CTRL + HS_OFFSET(M))
#define TP_EN_VCm(M) (1 << ((M) * 0x04))
#define TP_SEL_VCm(M, N) \
((N) << (((M) * 0x04) + 1))
#define TP_STRIPE_WIDTH(M) ((M) << 16)
#define MIPI_TX_HS_TEST_PAT_COLOR0 (0x234)
#define MIPI_TXm_HS_TEST_PAT_COLOR0(M) \
(MIPI_TX_HS_TEST_PAT_COLOR0 + HS_OFFSET(M))
#define MIPI_TX_HS_TEST_PAT_COLOR1 (0x238)
#define MIPI_TXm_HS_TEST_PAT_COLOR1(M) \
(MIPI_TX_HS_TEST_PAT_COLOR1 + HS_OFFSET(M))
/* D-PHY regs */
#define DPHY_ENABLE (0x100)
#define DPHY_INIT_CTRL0 (0x104)
#define SHUTDOWNZ 0
#define RESETZ 12
#define DPHY_INIT_CTRL1 (0x108)
#define PLL_CLKSEL_0 18
#define PLL_SHADOW_CTRL 16
#define DPHY_INIT_CTRL2 (0x10c)
#define SET_DPHY_INIT_CTRL0(dev, dphy, offset) \
kmb_set_bit_mipi(dev, DPHY_INIT_CTRL0, \
((dphy) + (offset)))
#define CLR_DPHY_INIT_CTRL0(dev, dphy, offset) \
kmb_clr_bit_mipi(dev, DPHY_INIT_CTRL0, \
((dphy) + (offset)))
#define DPHY_INIT_CTRL2 (0x10c)
#define DPHY_PLL_OBS0 (0x110)
#define DPHY_PLL_OBS1 (0x114)
#define DPHY_PLL_OBS2 (0x118)
#define DPHY_FREQ_CTRL0_3 (0x11c)
#define DPHY_FREQ_CTRL4_7 (0x120)
#define SET_DPHY_FREQ_CTRL0_3(dev, dphy, val) \
kmb_write_bits_mipi(dev, DPHY_FREQ_CTRL0_3 \
+ (((dphy) / 4) * 4), (dphy % 4) * 8, 6, val)
#define DPHY_FORCE_CTRL0 (0x128)
#define DPHY_FORCE_CTRL1 (0x12C)
#define MIPI_DPHY_STAT0_3 (0x134)
#define MIPI_DPHY_STAT4_7 (0x138)
#define GET_STOPSTATE_DATA(dev, dphy) \
(((kmb_read_mipi(dev, MIPI_DPHY_STAT0_3 + \
((dphy) / 4) * 4)) >> \
(((dphy % 4) * 8) + 4)) & 0x03)
#define MIPI_DPHY_ERR_STAT6_7 (0x14C)
#define DPHY_TEST_CTRL0 (0x154)
#define SET_DPHY_TEST_CTRL0(dev, dphy) \
kmb_set_bit_mipi(dev, DPHY_TEST_CTRL0, (dphy))
#define CLR_DPHY_TEST_CTRL0(dev, dphy) \
kmb_clr_bit_mipi(dev, DPHY_TEST_CTRL0, \
(dphy))
#define DPHY_TEST_CTRL1 (0x158)
#define SET_DPHY_TEST_CTRL1_CLK(dev, dphy) \
kmb_set_bit_mipi(dev, DPHY_TEST_CTRL1, (dphy))
#define CLR_DPHY_TEST_CTRL1_CLK(dev, dphy) \
kmb_clr_bit_mipi(dev, DPHY_TEST_CTRL1, (dphy))
#define SET_DPHY_TEST_CTRL1_EN(dev, dphy) \
kmb_set_bit_mipi(dev, DPHY_TEST_CTRL1, ((dphy) + 12))
#define CLR_DPHY_TEST_CTRL1_EN(dev, dphy) \
kmb_clr_bit_mipi(dev, DPHY_TEST_CTRL1, ((dphy) + 12))
#define DPHY_TEST_DIN0_3 (0x15c)
#define SET_TEST_DIN0_3(dev, dphy, val) \
kmb_write_mipi(dev, DPHY_TEST_DIN0_3 + \
4, ((val) << (((dphy) % 4) * 8)))
#define DPHY_TEST_DOUT0_3 (0x168)
#define GET_TEST_DOUT0_3(dev, dphy) \
(kmb_read_mipi(dev, DPHY_TEST_DOUT0_3) \
>> (((dphy) % 4) * 8) & 0xff)
#define DPHY_TEST_DOUT4_7 (0x16C)
#define GET_TEST_DOUT4_7(dev, dphy) \
(kmb_read_mipi(dev, DPHY_TEST_DOUT4_7) \
>> (((dphy) % 4) * 8) & 0xff)
#define DPHY_TEST_DOUT8_9 (0x170)
#define DPHY_TEST_DIN4_7 (0x160)
#define DPHY_TEST_DIN8_9 (0x164)
#define DPHY_PLL_LOCK (0x188)
#define GET_PLL_LOCK(dev, dphy) \
(kmb_read_mipi(dev, DPHY_PLL_LOCK) \
& (1 << ((dphy) - MIPI_DPHY6)))
#define DPHY_CFG_CLK_EN (0x18c)
#define MSS_MIPI_CIF_CFG (0x00)
#define MSS_LCD_MIPI_CFG (0x04)
#define MSS_CAM_CLK_CTRL (0x10)
#define MSS_LOOPBACK_CFG (0x0C)
#define LCD BIT(1)
#define MIPI_COMMON BIT(2)
#define MIPI_TX0 BIT(9)
#define MSS_CAM_RSTN_CTRL (0x14)
#define MSS_CAM_RSTN_SET (0x20)
#define MSS_CAM_RSTN_CLR (0x24)
#define MSSCPU_CPR_CLK_EN (0x0)
#define MSSCPU_CPR_RST_EN (0x10)
#define BIT_MASK_16 (0xffff)
/* icam lcd qos */
#define LCD_QOS_PRIORITY (0x8)
#define LCD_QOS_MODE (0xC)
#define LCD_QOS_BW (0x10)
#endif /* __KMB_REGS_H__ */

View File

@@ -261,7 +261,7 @@ DEFINE_DRM_GEM_FOPS(lima_drm_driver_fops);
* - 1.1.0 - add heap buffer support
*/
static struct drm_driver lima_drm_driver = {
static const struct drm_driver lima_drm_driver = {
.driver_features = DRIVER_RENDER | DRIVER_GEM | DRIVER_SYNCOBJ,
.open = lima_drm_driver_open,
.postclose = lima_drm_driver_postclose,

View File

@@ -182,14 +182,14 @@ static int lima_gem_pin(struct drm_gem_object *obj)
return drm_gem_shmem_pin(obj);
}
static void *lima_gem_vmap(struct drm_gem_object *obj)
static int lima_gem_vmap(struct drm_gem_object *obj, struct dma_buf_map *map)
{
struct lima_bo *bo = to_lima_bo(obj);
if (bo->heap_size)
return ERR_PTR(-EINVAL);
return -EINVAL;
return drm_gem_shmem_vmap(obj);
return drm_gem_shmem_vmap(obj, map);
}
static int lima_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)

View File

@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
#include <linux/dma-buf-map.h>
#include <linux/kthread.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
@@ -303,6 +304,8 @@ static void lima_sched_build_error_task_list(struct lima_sched_task *task)
struct lima_dump_chunk_buffer *buffer_chunk;
u32 size, task_size, mem_size;
int i;
struct dma_buf_map map;
int ret;
mutex_lock(&dev->error_task_list_lock);
@@ -388,15 +391,15 @@ static void lima_sched_build_error_task_list(struct lima_sched_task *task)
} else {
buffer_chunk->size = lima_bo_size(bo);
data = drm_gem_shmem_vmap(&bo->base.base);
if (IS_ERR_OR_NULL(data)) {
ret = drm_gem_shmem_vmap(&bo->base.base, &map);
if (ret) {
kvfree(et);
goto out;
}
memcpy(buffer_chunk + 1, data, buffer_chunk->size);
memcpy(buffer_chunk + 1, map.vaddr, buffer_chunk->size);
drm_gem_shmem_vunmap(&bo->base.base, data);
drm_gem_shmem_vunmap(&bo->base.base, &map);
}
buffer_chunk = (void *)(buffer_chunk + 1) + buffer_chunk->size;

View File

@@ -178,7 +178,7 @@ static int mcde_modeset_init(struct drm_device *drm)
DEFINE_DRM_GEM_CMA_FOPS(drm_fops);
static struct drm_driver mcde_drm_driver = {
static const struct drm_driver mcde_drm_driver = {
.driver_features =
DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
.lastclose = drm_fb_helper_lastclose,

View File

@@ -11,6 +11,7 @@
#include <asm/barrier.h>
#include <soc/mediatek/smi.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
@@ -577,17 +578,19 @@ static void mtk_drm_crtc_atomic_disable(struct drm_crtc *crtc,
static void mtk_drm_crtc_atomic_begin(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct mtk_crtc_state *crtc_state = to_mtk_crtc_state(crtc->state);
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
crtc);
struct mtk_crtc_state *mtk_crtc_state = to_mtk_crtc_state(crtc_state);
struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
if (mtk_crtc->event && crtc_state->base.event)
if (mtk_crtc->event && mtk_crtc_state->base.event)
DRM_ERROR("new event while there is still a pending event\n");
if (crtc_state->base.event) {
crtc_state->base.event->pipe = drm_crtc_index(crtc);
if (mtk_crtc_state->base.event) {
mtk_crtc_state->base.event->pipe = drm_crtc_index(crtc);
WARN_ON(drm_crtc_vblank_get(crtc) != 0);
mtk_crtc->event = crtc_state->base.event;
crtc_state->base.event = NULL;
mtk_crtc->event = mtk_crtc_state->base.event;
mtk_crtc_state->base.event = NULL;
}
}

View File

@@ -321,7 +321,7 @@ struct drm_gem_object *mtk_drm_gem_prime_import(struct drm_device *dev,
return drm_gem_prime_import_dev(dev, dma_buf, private->dma_dev);
}
static struct drm_driver mtk_drm_driver = {
static const struct drm_driver mtk_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
.dumb_create = mtk_drm_gem_dumb_create,

View File

@@ -240,23 +240,25 @@ struct drm_gem_object *mtk_gem_prime_import_sg_table(struct drm_device *dev,
return &mtk_gem->base;
}
void *mtk_drm_gem_prime_vmap(struct drm_gem_object *obj)
int mtk_drm_gem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map)
{
struct mtk_drm_gem_obj *mtk_gem = to_mtk_gem_obj(obj);
struct sg_table *sgt;
struct sg_table *sgt = NULL;
unsigned int npages;
if (mtk_gem->kvaddr)
return mtk_gem->kvaddr;
goto out;
sgt = mtk_gem_prime_get_sg_table(obj);
if (IS_ERR(sgt))
return NULL;
return PTR_ERR(sgt);
npages = obj->size >> PAGE_SHIFT;
mtk_gem->pages = kcalloc(npages, sizeof(*mtk_gem->pages), GFP_KERNEL);
if (!mtk_gem->pages)
goto out;
if (!mtk_gem->pages) {
kfree(sgt);
return -ENOMEM;
}
drm_prime_sg_to_page_addr_arrays(sgt, mtk_gem->pages, NULL, npages);
@@ -265,13 +267,15 @@ void *mtk_drm_gem_prime_vmap(struct drm_gem_object *obj)
out:
kfree(sgt);
dma_buf_map_set_vaddr(map, mtk_gem->kvaddr);
return mtk_gem->kvaddr;
return 0;
}
void mtk_drm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
void mtk_drm_gem_prime_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map)
{
struct mtk_drm_gem_obj *mtk_gem = to_mtk_gem_obj(obj);
void *vaddr = map->vaddr;
if (!mtk_gem->pages)
return;

View File

@@ -45,7 +45,7 @@ int mtk_drm_gem_mmap_buf(struct drm_gem_object *obj,
struct sg_table *mtk_gem_prime_get_sg_table(struct drm_gem_object *obj);
struct drm_gem_object *mtk_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach, struct sg_table *sg);
void *mtk_drm_gem_prime_vmap(struct drm_gem_object *obj);
void mtk_drm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
int mtk_drm_gem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map);
void mtk_drm_gem_prime_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map);
#endif

View File

@@ -90,7 +90,7 @@ static int meson_dumb_create(struct drm_file *file, struct drm_device *dev,
DEFINE_DRM_GEM_CMA_FOPS(fops);
static struct drm_driver meson_driver = {
static const struct drm_driver meson_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
/* IRQ */

View File

@@ -25,7 +25,7 @@
* DEALINGS IN THE SOFTWARE.
*/
/**
/*
* \file mga_dma.c
* DMA support for MGA G200 / G400.
*
@@ -435,7 +435,7 @@ int mga_driver_load(struct drm_device *dev, unsigned long flags)
}
#if IS_ENABLED(CONFIG_AGP)
/**
/*
* Bootstrap the driver for AGP DMA.
*
* \todo
@@ -610,7 +610,7 @@ static int mga_do_agp_dma_bootstrap(struct drm_device *dev,
}
#endif
/**
/*
* Bootstrap the driver for PCI DMA.
*
* \todo
@@ -1143,7 +1143,7 @@ int mga_dma_buffers(struct drm_device *dev, void *data,
return ret;
}
/**
/*
* Called just before the module is unloaded.
*/
void mga_driver_unload(struct drm_device *dev)
@@ -1152,7 +1152,7 @@ void mga_driver_unload(struct drm_device *dev)
dev->dev_private = NULL;
}
/**
/*
* Called when the last opener of the device is closed.
*/
void mga_driver_lastclose(struct drm_device *dev)

View File

@@ -942,7 +942,6 @@ static int mga_dma_iload(struct drm_device *dev, void *data, struct drm_file *fi
struct drm_device_dma *dma = dev->dma;
drm_mga_private_t *dev_priv = dev->dev_private;
struct drm_buf *buf;
drm_mga_buf_priv_t *buf_priv;
drm_mga_iload_t *iload = data;
DRM_DEBUG("\n");
@@ -959,7 +958,6 @@ static int mga_dma_iload(struct drm_device *dev, void *data, struct drm_file *fi
return -EINVAL;
buf = dma->buflist[iload->idx];
buf_priv = buf->dev_private;
if (mga_verify_iload(dev_priv, iload->dstorg, iload->length)) {
mga_freelist_put(dev, buf);

View File

@@ -28,7 +28,7 @@ module_param_named(modeset, mgag200_modeset, int, 0400);
DEFINE_DRM_GEM_FOPS(mgag200_driver_fops);
static struct drm_driver mgag200_driver = {
static const struct drm_driver mgag200_driver = {
.driver_features = DRIVER_ATOMIC | DRIVER_GEM | DRIVER_MODESET,
.fops = &mgag200_driver_fops,
.name = DRIVER_NAME,

View File

@@ -9,6 +9,7 @@
*/
#include <linux/delay.h>
#include <linux/dma-buf-map.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_atomic_state_helper.h>
@@ -794,21 +795,16 @@ static int mgag200_crtc_set_plls(struct mga_device *mdev, long clock)
case G200_SE_A:
case G200_SE_B:
return mga_g200se_set_plls(mdev, clock);
break;
case G200_WB:
case G200_EW3:
return mga_g200wb_set_plls(mdev, clock);
break;
case G200_EV:
return mga_g200ev_set_plls(mdev, clock);
break;
case G200_EH:
case G200_EH3:
return mga_g200eh_set_plls(mdev, clock);
break;
case G200_ER:
return mga_g200er_set_plls(mdev, clock);
break;
}
misc = RREG8(MGA_MISC_IN);
@@ -1556,15 +1552,18 @@ mgag200_handle_damage(struct mga_device *mdev, struct drm_framebuffer *fb,
struct drm_rect *clip)
{
struct drm_device *dev = &mdev->base;
struct dma_buf_map map;
void *vmap;
int ret;
vmap = drm_gem_shmem_vmap(fb->obj[0]);
if (drm_WARN_ON(dev, !vmap))
ret = drm_gem_shmem_vmap(fb->obj[0], &map);
if (drm_WARN_ON(dev, ret))
return; /* BUG: SHMEM BO should always be vmapped */
vmap = map.vaddr; /* TODO: Use mapping abstraction properly */
drm_fb_memcpy_dstclip(mdev->vram, vmap, fb, clip);
drm_gem_shmem_vunmap(fb->obj[0], vmap);
drm_gem_shmem_vunmap(fb->obj[0], &map);
/* Always scanout image at VRAM offset 0 */
mgag200_set_startadd(mdev, (u32)0);

Some files were not shown because too many files have changed in this diff Show More