mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-12-27 11:06:41 -05:00
ASoC: qcom: q6dsp: fixes and updates
Merge series from Srinivas Kandagatla <srinivas.kandagatla@oss.qualcomm.com>: This patchset has 4 fixes and some enhancements to the Elite DSP driver support. Fixes includes - setting correct flags for expected behaviour of appl_ptr - fix closing of copp instances - fix buffer alignment. - fix state checks before closing asm stream Enhancements include: - adding q6asm_get_hw_pointer and ack callback support - simplify code via __free(kfree) mechanism. - use spinlock guards - few cleanups discovered during doing above 2. There is another set of updates comming soon, which will add support for early memory mapping and few more modules support in audioreach.
This commit is contained in:
1
.mailmap
1
.mailmap
@@ -644,6 +644,7 @@ Qais Yousef <qyousef@layalina.io> <qais.yousef@arm.com>
|
||||
Quentin Monnet <qmo@kernel.org> <quentin.monnet@netronome.com>
|
||||
Quentin Monnet <qmo@kernel.org> <quentin@isovalent.com>
|
||||
Quentin Perret <qperret@qperret.net> <quentin.perret@arm.com>
|
||||
Rae Moar <raemoar63@gmail.com> <rmoar@google.com>
|
||||
Rafael J. Wysocki <rjw@rjwysocki.net> <rjw@sisk.pl>
|
||||
Rajeev Nandan <quic_rajeevny@quicinc.com> <rajeevny@codeaurora.org>
|
||||
Rajendra Nayak <quic_rjendra@quicinc.com> <rnayak@codeaurora.org>
|
||||
|
||||
4
CREDITS
4
CREDITS
@@ -2036,6 +2036,10 @@ S: Botanicka' 68a
|
||||
S: 602 00 Brno
|
||||
S: Czech Republic
|
||||
|
||||
N: Karsten Keil
|
||||
E: isdn@linux-pingi.de
|
||||
D: ISDN subsystem maintainer
|
||||
|
||||
N: Jakob Kemi
|
||||
E: jakob.kemi@telia.com
|
||||
D: V4L W9966 Webcam driver
|
||||
|
||||
@@ -180,9 +180,9 @@ allOf:
|
||||
then:
|
||||
properties:
|
||||
reg:
|
||||
minItems: 2
|
||||
maxItems: 2
|
||||
reg-names:
|
||||
minItems: 2
|
||||
maxItems: 2
|
||||
else:
|
||||
properties:
|
||||
reg:
|
||||
|
||||
@@ -32,7 +32,7 @@ properties:
|
||||
|
||||
$ref: /schemas/types.yaml#/definitions/uint32-array
|
||||
minItems: 2
|
||||
maxItems: 2
|
||||
maxItems: 4
|
||||
items:
|
||||
enum: [1, 2, 3, 4]
|
||||
|
||||
@@ -48,7 +48,7 @@ properties:
|
||||
|
||||
$ref: /schemas/types.yaml#/definitions/uint32-array
|
||||
minItems: 2
|
||||
maxItems: 2
|
||||
maxItems: 5
|
||||
items:
|
||||
enum: [1, 2, 3, 4, 5]
|
||||
|
||||
|
||||
@@ -605,6 +605,8 @@ operations:
|
||||
reply: &pin-attrs
|
||||
attributes:
|
||||
- id
|
||||
- module-name
|
||||
- clock-id
|
||||
- board-label
|
||||
- panel-label
|
||||
- package-label
|
||||
|
||||
@@ -19,9 +19,6 @@ Userdata append support by Matthew Wood <thepacketgeek@gmail.com>, Jan 22 2024
|
||||
|
||||
Sysdata append support by Breno Leitao <leitao@debian.org>, Jan 15 2025
|
||||
|
||||
Please send bug reports to Matt Mackall <mpm@selenic.com>
|
||||
Satyam Sharma <satyam.sharma@gmail.com>, and Cong Wang <xiyou.wangcong@gmail.com>
|
||||
|
||||
Introduction:
|
||||
=============
|
||||
|
||||
|
||||
16
MAINTAINERS
16
MAINTAINERS
@@ -13260,10 +13260,8 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending.git mast
|
||||
F: drivers/infiniband/ulp/isert
|
||||
|
||||
ISDN/CMTP OVER BLUETOOTH
|
||||
M: Karsten Keil <isdn@linux-pingi.de>
|
||||
L: isdn4linux@listserv.isdn4linux.de (subscribers-only)
|
||||
L: netdev@vger.kernel.org
|
||||
S: Odd Fixes
|
||||
S: Orphan
|
||||
W: http://www.isdn4linux.de
|
||||
F: Documentation/isdn/
|
||||
F: drivers/isdn/capi/
|
||||
@@ -13272,10 +13270,8 @@ F: include/uapi/linux/isdn/
|
||||
F: net/bluetooth/cmtp/
|
||||
|
||||
ISDN/mISDN SUBSYSTEM
|
||||
M: Karsten Keil <isdn@linux-pingi.de>
|
||||
L: isdn4linux@listserv.isdn4linux.de (subscribers-only)
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
S: Orphan
|
||||
W: http://www.isdn4linux.de
|
||||
F: drivers/isdn/Kconfig
|
||||
F: drivers/isdn/Makefile
|
||||
@@ -13429,9 +13425,12 @@ F: mm/kasan/
|
||||
F: scripts/Makefile.kasan
|
||||
|
||||
KCONFIG
|
||||
M: Nathan Chancellor <nathan@kernel.org>
|
||||
M: Nicolas Schier <nsc@kernel.org>
|
||||
L: linux-kbuild@vger.kernel.org
|
||||
S: Orphan
|
||||
S: Odd Fixes
|
||||
Q: https://patchwork.kernel.org/project/linux-kbuild/list/
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kbuild/linux.git
|
||||
F: Documentation/kbuild/kconfig*
|
||||
F: scripts/Kconfig.include
|
||||
F: scripts/kconfig/
|
||||
@@ -13616,7 +13615,7 @@ F: fs/smb/server/
|
||||
KERNEL UNIT TESTING FRAMEWORK (KUnit)
|
||||
M: Brendan Higgins <brendan.higgins@linux.dev>
|
||||
M: David Gow <davidgow@google.com>
|
||||
R: Rae Moar <rmoar@google.com>
|
||||
R: Rae Moar <raemoar63@gmail.com>
|
||||
L: linux-kselftest@vger.kernel.org
|
||||
L: kunit-dev@googlegroups.com
|
||||
S: Maintained
|
||||
@@ -21332,6 +21331,7 @@ F: drivers/media/platform/qcom/venus/
|
||||
QUALCOMM WCN36XX WIRELESS DRIVER
|
||||
M: Loic Poulain <loic.poulain@oss.qualcomm.com>
|
||||
L: wcn36xx@lists.infradead.org
|
||||
L: linux-wireless@vger.kernel.org
|
||||
S: Supported
|
||||
W: https://wireless.wiki.kernel.org/en/users/Drivers/wcn36xx
|
||||
F: drivers/net/wireless/ath/wcn36xx/
|
||||
|
||||
2
Makefile
2
Makefile
@@ -2,7 +2,7 @@
|
||||
VERSION = 6
|
||||
PATCHLEVEL = 18
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc3
|
||||
EXTRAVERSION = -rc4
|
||||
NAME = Baby Opossum Posse
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
||||
@@ -1213,6 +1213,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
|
||||
u8 src = bpf2a64[insn->src_reg];
|
||||
const u8 tmp = bpf2a64[TMP_REG_1];
|
||||
const u8 tmp2 = bpf2a64[TMP_REG_2];
|
||||
const u8 tmp3 = bpf2a64[TMP_REG_3];
|
||||
const u8 fp = bpf2a64[BPF_REG_FP];
|
||||
const u8 arena_vm_base = bpf2a64[ARENA_VM_START];
|
||||
const u8 priv_sp = bpf2a64[PRIVATE_SP];
|
||||
@@ -1757,8 +1758,8 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
|
||||
case BPF_ST | BPF_PROBE_MEM32 | BPF_W:
|
||||
case BPF_ST | BPF_PROBE_MEM32 | BPF_DW:
|
||||
if (BPF_MODE(insn->code) == BPF_PROBE_MEM32) {
|
||||
emit(A64_ADD(1, tmp2, dst, arena_vm_base), ctx);
|
||||
dst = tmp2;
|
||||
emit(A64_ADD(1, tmp3, dst, arena_vm_base), ctx);
|
||||
dst = tmp3;
|
||||
}
|
||||
if (dst == fp) {
|
||||
dst_adj = ctx->priv_sp_used ? priv_sp : A64_SP;
|
||||
|
||||
@@ -158,7 +158,6 @@ config S390
|
||||
select ARCH_WANT_IRQS_OFF_ACTIVATE_MM
|
||||
select ARCH_WANT_KERNEL_PMD_MKWRITE
|
||||
select ARCH_WANT_LD_ORPHAN_WARN
|
||||
select ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP
|
||||
select ARCH_WANTS_THP_SWAP
|
||||
select BUILDTIME_TABLE_SORT
|
||||
select CLONE_BACKWARDS2
|
||||
|
||||
@@ -101,6 +101,7 @@ CONFIG_SLUB_STATS=y
|
||||
CONFIG_MEMORY_HOTPLUG=y
|
||||
CONFIG_MEMORY_HOTREMOVE=y
|
||||
CONFIG_KSM=y
|
||||
CONFIG_PERSISTENT_HUGE_ZERO_FOLIO=y
|
||||
CONFIG_TRANSPARENT_HUGEPAGE=y
|
||||
CONFIG_CMA_DEBUGFS=y
|
||||
CONFIG_CMA_SYSFS=y
|
||||
@@ -123,12 +124,12 @@ CONFIG_TLS_DEVICE=y
|
||||
CONFIG_TLS_TOE=y
|
||||
CONFIG_XFRM_USER=m
|
||||
CONFIG_NET_KEY=m
|
||||
CONFIG_XDP_SOCKETS=y
|
||||
CONFIG_XDP_SOCKETS_DIAG=m
|
||||
CONFIG_DIBS=y
|
||||
CONFIG_DIBS_LO=y
|
||||
CONFIG_SMC=m
|
||||
CONFIG_SMC_DIAG=m
|
||||
CONFIG_DIBS=y
|
||||
CONFIG_DIBS_LO=y
|
||||
CONFIG_XDP_SOCKETS=y
|
||||
CONFIG_XDP_SOCKETS_DIAG=m
|
||||
CONFIG_INET=y
|
||||
CONFIG_IP_MULTICAST=y
|
||||
CONFIG_IP_ADVANCED_ROUTER=y
|
||||
@@ -472,6 +473,7 @@ CONFIG_SCSI_DH_EMC=m
|
||||
CONFIG_SCSI_DH_ALUA=m
|
||||
CONFIG_MD=y
|
||||
CONFIG_BLK_DEV_MD=y
|
||||
CONFIG_MD_LLBITMAP=y
|
||||
# CONFIG_MD_BITMAP_FILE is not set
|
||||
CONFIG_MD_LINEAR=m
|
||||
CONFIG_MD_CLUSTER=m
|
||||
@@ -654,9 +656,12 @@ CONFIG_JFS_POSIX_ACL=y
|
||||
CONFIG_JFS_SECURITY=y
|
||||
CONFIG_JFS_STATISTICS=y
|
||||
CONFIG_XFS_FS=y
|
||||
CONFIG_XFS_SUPPORT_V4=y
|
||||
CONFIG_XFS_SUPPORT_ASCII_CI=y
|
||||
CONFIG_XFS_QUOTA=y
|
||||
CONFIG_XFS_POSIX_ACL=y
|
||||
CONFIG_XFS_RT=y
|
||||
# CONFIG_XFS_ONLINE_SCRUB is not set
|
||||
CONFIG_XFS_DEBUG=y
|
||||
CONFIG_GFS2_FS=m
|
||||
CONFIG_GFS2_FS_LOCKING_DLM=y
|
||||
@@ -666,7 +671,6 @@ CONFIG_BTRFS_FS_POSIX_ACL=y
|
||||
CONFIG_BTRFS_DEBUG=y
|
||||
CONFIG_BTRFS_ASSERT=y
|
||||
CONFIG_NILFS2_FS=m
|
||||
CONFIG_FS_DAX=y
|
||||
CONFIG_EXPORTFS_BLOCK_OPS=y
|
||||
CONFIG_FS_ENCRYPTION=y
|
||||
CONFIG_FS_VERITY=y
|
||||
|
||||
@@ -94,6 +94,7 @@ CONFIG_SLAB_BUCKETS=y
|
||||
CONFIG_MEMORY_HOTPLUG=y
|
||||
CONFIG_MEMORY_HOTREMOVE=y
|
||||
CONFIG_KSM=y
|
||||
CONFIG_PERSISTENT_HUGE_ZERO_FOLIO=y
|
||||
CONFIG_TRANSPARENT_HUGEPAGE=y
|
||||
CONFIG_CMA_SYSFS=y
|
||||
CONFIG_CMA_AREAS=7
|
||||
@@ -114,12 +115,12 @@ CONFIG_TLS_DEVICE=y
|
||||
CONFIG_TLS_TOE=y
|
||||
CONFIG_XFRM_USER=m
|
||||
CONFIG_NET_KEY=m
|
||||
CONFIG_XDP_SOCKETS=y
|
||||
CONFIG_XDP_SOCKETS_DIAG=m
|
||||
CONFIG_DIBS=y
|
||||
CONFIG_DIBS_LO=y
|
||||
CONFIG_SMC=m
|
||||
CONFIG_SMC_DIAG=m
|
||||
CONFIG_DIBS=y
|
||||
CONFIG_DIBS_LO=y
|
||||
CONFIG_XDP_SOCKETS=y
|
||||
CONFIG_XDP_SOCKETS_DIAG=m
|
||||
CONFIG_INET=y
|
||||
CONFIG_IP_MULTICAST=y
|
||||
CONFIG_IP_ADVANCED_ROUTER=y
|
||||
@@ -462,6 +463,7 @@ CONFIG_SCSI_DH_EMC=m
|
||||
CONFIG_SCSI_DH_ALUA=m
|
||||
CONFIG_MD=y
|
||||
CONFIG_BLK_DEV_MD=y
|
||||
CONFIG_MD_LLBITMAP=y
|
||||
# CONFIG_MD_BITMAP_FILE is not set
|
||||
CONFIG_MD_LINEAR=m
|
||||
CONFIG_MD_CLUSTER=m
|
||||
@@ -644,16 +646,18 @@ CONFIG_JFS_POSIX_ACL=y
|
||||
CONFIG_JFS_SECURITY=y
|
||||
CONFIG_JFS_STATISTICS=y
|
||||
CONFIG_XFS_FS=y
|
||||
CONFIG_XFS_SUPPORT_V4=y
|
||||
CONFIG_XFS_SUPPORT_ASCII_CI=y
|
||||
CONFIG_XFS_QUOTA=y
|
||||
CONFIG_XFS_POSIX_ACL=y
|
||||
CONFIG_XFS_RT=y
|
||||
# CONFIG_XFS_ONLINE_SCRUB is not set
|
||||
CONFIG_GFS2_FS=m
|
||||
CONFIG_GFS2_FS_LOCKING_DLM=y
|
||||
CONFIG_OCFS2_FS=m
|
||||
CONFIG_BTRFS_FS=y
|
||||
CONFIG_BTRFS_FS_POSIX_ACL=y
|
||||
CONFIG_NILFS2_FS=m
|
||||
CONFIG_FS_DAX=y
|
||||
CONFIG_EXPORTFS_BLOCK_OPS=y
|
||||
CONFIG_FS_ENCRYPTION=y
|
||||
CONFIG_FS_VERITY=y
|
||||
|
||||
@@ -33,7 +33,6 @@ CONFIG_NET=y
|
||||
CONFIG_DEVTMPFS=y
|
||||
CONFIG_DEVTMPFS_SAFE=y
|
||||
CONFIG_BLK_DEV_RAM=y
|
||||
# CONFIG_DCSSBLK is not set
|
||||
# CONFIG_DASD is not set
|
||||
CONFIG_ENCLOSURE_SERVICES=y
|
||||
CONFIG_SCSI=y
|
||||
|
||||
@@ -169,11 +169,18 @@ struct kmac_sha2_ctx {
|
||||
u64 buflen[2];
|
||||
};
|
||||
|
||||
enum async_op {
|
||||
OP_NOP = 0,
|
||||
OP_UPDATE,
|
||||
OP_FINAL,
|
||||
OP_FINUP,
|
||||
};
|
||||
|
||||
/* phmac request context */
|
||||
struct phmac_req_ctx {
|
||||
struct hash_walk_helper hwh;
|
||||
struct kmac_sha2_ctx kmac_ctx;
|
||||
bool final;
|
||||
enum async_op async_op;
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -610,6 +617,7 @@ static int phmac_update(struct ahash_request *req)
|
||||
* using engine to serialize requests.
|
||||
*/
|
||||
if (rc == 0 || rc == -EKEYEXPIRED) {
|
||||
req_ctx->async_op = OP_UPDATE;
|
||||
atomic_inc(&tfm_ctx->via_engine_ctr);
|
||||
rc = crypto_transfer_hash_request_to_engine(phmac_crypto_engine, req);
|
||||
if (rc != -EINPROGRESS)
|
||||
@@ -647,8 +655,7 @@ static int phmac_final(struct ahash_request *req)
|
||||
* using engine to serialize requests.
|
||||
*/
|
||||
if (rc == 0 || rc == -EKEYEXPIRED) {
|
||||
req->nbytes = 0;
|
||||
req_ctx->final = true;
|
||||
req_ctx->async_op = OP_FINAL;
|
||||
atomic_inc(&tfm_ctx->via_engine_ctr);
|
||||
rc = crypto_transfer_hash_request_to_engine(phmac_crypto_engine, req);
|
||||
if (rc != -EINPROGRESS)
|
||||
@@ -676,13 +683,16 @@ static int phmac_finup(struct ahash_request *req)
|
||||
if (rc)
|
||||
goto out;
|
||||
|
||||
req_ctx->async_op = OP_FINUP;
|
||||
|
||||
/* Try synchronous operations if no active engine usage */
|
||||
if (!atomic_read(&tfm_ctx->via_engine_ctr)) {
|
||||
rc = phmac_kmac_update(req, false);
|
||||
if (rc == 0)
|
||||
req->nbytes = 0;
|
||||
req_ctx->async_op = OP_FINAL;
|
||||
}
|
||||
if (!rc && !req->nbytes && !atomic_read(&tfm_ctx->via_engine_ctr)) {
|
||||
if (!rc && req_ctx->async_op == OP_FINAL &&
|
||||
!atomic_read(&tfm_ctx->via_engine_ctr)) {
|
||||
rc = phmac_kmac_final(req, false);
|
||||
if (rc == 0)
|
||||
goto out;
|
||||
@@ -694,7 +704,7 @@ static int phmac_finup(struct ahash_request *req)
|
||||
* using engine to serialize requests.
|
||||
*/
|
||||
if (rc == 0 || rc == -EKEYEXPIRED) {
|
||||
req_ctx->final = true;
|
||||
/* req->async_op has been set to either OP_FINUP or OP_FINAL */
|
||||
atomic_inc(&tfm_ctx->via_engine_ctr);
|
||||
rc = crypto_transfer_hash_request_to_engine(phmac_crypto_engine, req);
|
||||
if (rc != -EINPROGRESS)
|
||||
@@ -855,15 +865,16 @@ static int phmac_do_one_request(struct crypto_engine *engine, void *areq)
|
||||
|
||||
/*
|
||||
* Three kinds of requests come in here:
|
||||
* update when req->nbytes > 0 and req_ctx->final is false
|
||||
* final when req->nbytes = 0 and req_ctx->final is true
|
||||
* finup when req->nbytes > 0 and req_ctx->final is true
|
||||
* For update and finup the hwh walk needs to be prepared and
|
||||
* up to date but the actual nr of bytes in req->nbytes may be
|
||||
* any non zero number. For final there is no hwh walk needed.
|
||||
* 1. req->async_op == OP_UPDATE with req->nbytes > 0
|
||||
* 2. req->async_op == OP_FINUP with req->nbytes > 0
|
||||
* 3. req->async_op == OP_FINAL
|
||||
* For update and finup the hwh walk has already been prepared
|
||||
* by the caller. For final there is no hwh walk needed.
|
||||
*/
|
||||
|
||||
if (req->nbytes) {
|
||||
switch (req_ctx->async_op) {
|
||||
case OP_UPDATE:
|
||||
case OP_FINUP:
|
||||
rc = phmac_kmac_update(req, true);
|
||||
if (rc == -EKEYEXPIRED) {
|
||||
/*
|
||||
@@ -880,10 +891,11 @@ static int phmac_do_one_request(struct crypto_engine *engine, void *areq)
|
||||
hwh_advance(hwh, rc);
|
||||
goto out;
|
||||
}
|
||||
req->nbytes = 0;
|
||||
}
|
||||
|
||||
if (req_ctx->final) {
|
||||
if (req_ctx->async_op == OP_UPDATE)
|
||||
break;
|
||||
req_ctx->async_op = OP_FINAL;
|
||||
fallthrough;
|
||||
case OP_FINAL:
|
||||
rc = phmac_kmac_final(req, true);
|
||||
if (rc == -EKEYEXPIRED) {
|
||||
/*
|
||||
@@ -897,10 +909,14 @@ static int phmac_do_one_request(struct crypto_engine *engine, void *areq)
|
||||
cond_resched();
|
||||
return -ENOSPC;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
/* unknown/unsupported/unimplemented asynch op */
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
out:
|
||||
if (rc || req_ctx->final)
|
||||
if (rc || req_ctx->async_op == OP_FINAL)
|
||||
memzero_explicit(kmac_ctx, sizeof(*kmac_ctx));
|
||||
pr_debug("request complete with rc=%d\n", rc);
|
||||
local_bh_disable();
|
||||
|
||||
@@ -145,7 +145,6 @@ struct zpci_dev {
|
||||
u8 has_resources : 1;
|
||||
u8 is_physfn : 1;
|
||||
u8 util_str_avail : 1;
|
||||
u8 irqs_registered : 1;
|
||||
u8 tid_avail : 1;
|
||||
u8 rtr_avail : 1; /* Relaxed translation allowed */
|
||||
unsigned int devfn; /* DEVFN part of the RID*/
|
||||
|
||||
@@ -291,16 +291,14 @@ static int ptdump_cmp(const void *a, const void *b)
|
||||
|
||||
static int add_marker(unsigned long start, unsigned long end, const char *name)
|
||||
{
|
||||
size_t oldsize, newsize;
|
||||
struct addr_marker *new;
|
||||
size_t newsize;
|
||||
|
||||
oldsize = markers_cnt * sizeof(*markers);
|
||||
newsize = oldsize + 2 * sizeof(*markers);
|
||||
if (!oldsize)
|
||||
markers = kvmalloc(newsize, GFP_KERNEL);
|
||||
else
|
||||
markers = kvrealloc(markers, newsize, GFP_KERNEL);
|
||||
if (!markers)
|
||||
goto error;
|
||||
newsize = (markers_cnt + 2) * sizeof(*markers);
|
||||
new = kvrealloc(markers, newsize, GFP_KERNEL);
|
||||
if (!new)
|
||||
return -ENOMEM;
|
||||
markers = new;
|
||||
markers[markers_cnt].is_start = 1;
|
||||
markers[markers_cnt].start_address = start;
|
||||
markers[markers_cnt].size = end - start;
|
||||
@@ -312,9 +310,6 @@ static int add_marker(unsigned long start, unsigned long end, const char *name)
|
||||
markers[markers_cnt].name = name;
|
||||
markers_cnt++;
|
||||
return 0;
|
||||
error:
|
||||
markers_cnt = 0;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static int pt_dump_init(void)
|
||||
|
||||
@@ -188,7 +188,7 @@ static pci_ers_result_t zpci_event_attempt_error_recovery(struct pci_dev *pdev)
|
||||
* is unbound or probed and that userspace can't access its
|
||||
* configuration space while we perform recovery.
|
||||
*/
|
||||
pci_dev_lock(pdev);
|
||||
device_lock(&pdev->dev);
|
||||
if (pdev->error_state == pci_channel_io_perm_failure) {
|
||||
ers_res = PCI_ERS_RESULT_DISCONNECT;
|
||||
goto out_unlock;
|
||||
@@ -257,7 +257,7 @@ static pci_ers_result_t zpci_event_attempt_error_recovery(struct pci_dev *pdev)
|
||||
driver->err_handler->resume(pdev);
|
||||
pci_uevent_ers(pdev, PCI_ERS_RESULT_RECOVERED);
|
||||
out_unlock:
|
||||
pci_dev_unlock(pdev);
|
||||
device_unlock(&pdev->dev);
|
||||
zpci_report_status(zdev, "recovery", status_str);
|
||||
|
||||
return ers_res;
|
||||
|
||||
@@ -107,9 +107,6 @@ static int zpci_set_irq(struct zpci_dev *zdev)
|
||||
else
|
||||
rc = zpci_set_airq(zdev);
|
||||
|
||||
if (!rc)
|
||||
zdev->irqs_registered = 1;
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
@@ -123,9 +120,6 @@ static int zpci_clear_irq(struct zpci_dev *zdev)
|
||||
else
|
||||
rc = zpci_clear_airq(zdev);
|
||||
|
||||
if (!rc)
|
||||
zdev->irqs_registered = 0;
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
@@ -427,8 +421,7 @@ bool arch_restore_msi_irqs(struct pci_dev *pdev)
|
||||
{
|
||||
struct zpci_dev *zdev = to_zpci(pdev);
|
||||
|
||||
if (!zdev->irqs_registered)
|
||||
zpci_set_irq(zdev);
|
||||
zpci_set_irq(zdev);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
@@ -75,7 +75,7 @@ export BITS
|
||||
#
|
||||
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=53383
|
||||
#
|
||||
KBUILD_CFLAGS += -mno-sse -mno-mmx -mno-sse2 -mno-3dnow -mno-avx
|
||||
KBUILD_CFLAGS += -mno-sse -mno-mmx -mno-sse2 -mno-3dnow -mno-avx -mno-sse4a
|
||||
KBUILD_RUSTFLAGS += --target=$(objtree)/scripts/target.json
|
||||
KBUILD_RUSTFLAGS += -Ctarget-feature=-sse,-sse2,-sse3,-ssse3,-sse4.1,-sse4.2,-avx,-avx2
|
||||
|
||||
|
||||
@@ -7596,6 +7596,7 @@ __init int intel_pmu_init(void)
|
||||
break;
|
||||
|
||||
case INTEL_PANTHERLAKE_L:
|
||||
case INTEL_WILDCATLAKE_L:
|
||||
pr_cont("Pantherlake Hybrid events, ");
|
||||
name = "pantherlake_hybrid";
|
||||
goto lnl_common;
|
||||
|
||||
@@ -317,7 +317,8 @@ static u64 __grt_latency_data(struct perf_event *event, u64 status,
|
||||
{
|
||||
u64 val;
|
||||
|
||||
WARN_ON_ONCE(hybrid_pmu(event->pmu)->pmu_type == hybrid_big);
|
||||
WARN_ON_ONCE(is_hybrid() &&
|
||||
hybrid_pmu(event->pmu)->pmu_type == hybrid_big);
|
||||
|
||||
dse &= PERF_PEBS_DATA_SOURCE_GRT_MASK;
|
||||
val = hybrid_var(event->pmu, pebs_data_source)[dse];
|
||||
|
||||
@@ -1895,6 +1895,7 @@ static const struct x86_cpu_id intel_uncore_match[] __initconst = {
|
||||
X86_MATCH_VFM(INTEL_ARROWLAKE_H, &mtl_uncore_init),
|
||||
X86_MATCH_VFM(INTEL_LUNARLAKE_M, &lnl_uncore_init),
|
||||
X86_MATCH_VFM(INTEL_PANTHERLAKE_L, &ptl_uncore_init),
|
||||
X86_MATCH_VFM(INTEL_WILDCATLAKE_L, &ptl_uncore_init),
|
||||
X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, &spr_uncore_init),
|
||||
X86_MATCH_VFM(INTEL_EMERALDRAPIDS_X, &spr_uncore_init),
|
||||
X86_MATCH_VFM(INTEL_GRANITERAPIDS_X, &gnr_uncore_init),
|
||||
|
||||
@@ -150,12 +150,12 @@
|
||||
|
||||
#define INTEL_LUNARLAKE_M IFM(6, 0xBD) /* Lion Cove / Skymont */
|
||||
|
||||
#define INTEL_PANTHERLAKE_L IFM(6, 0xCC) /* Cougar Cove / Crestmont */
|
||||
#define INTEL_PANTHERLAKE_L IFM(6, 0xCC) /* Cougar Cove / Darkmont */
|
||||
|
||||
#define INTEL_WILDCATLAKE_L IFM(6, 0xD5)
|
||||
|
||||
#define INTEL_NOVALAKE IFM(18, 0x01)
|
||||
#define INTEL_NOVALAKE_L IFM(18, 0x03)
|
||||
#define INTEL_NOVALAKE IFM(18, 0x01) /* Coyote Cove / Arctic Wolf */
|
||||
#define INTEL_NOVALAKE_L IFM(18, 0x03) /* Coyote Cove / Arctic Wolf */
|
||||
|
||||
/* "Small Core" Processors (Atom/E-Core) */
|
||||
|
||||
|
||||
@@ -43,6 +43,9 @@ extern unsigned long __phys_addr_symbol(unsigned long);
|
||||
void clear_page_orig(void *page);
|
||||
void clear_page_rep(void *page);
|
||||
void clear_page_erms(void *page);
|
||||
KCFI_REFERENCE(clear_page_orig);
|
||||
KCFI_REFERENCE(clear_page_rep);
|
||||
KCFI_REFERENCE(clear_page_erms);
|
||||
|
||||
static inline void clear_page(void *page)
|
||||
{
|
||||
|
||||
@@ -516,7 +516,7 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
|
||||
setup_force_cpu_cap(X86_FEATURE_ZEN5);
|
||||
break;
|
||||
case 0x50 ... 0x5f:
|
||||
case 0x90 ... 0xaf:
|
||||
case 0x80 ... 0xaf:
|
||||
case 0xc0 ... 0xcf:
|
||||
setup_force_cpu_cap(X86_FEATURE_ZEN6);
|
||||
break;
|
||||
@@ -1035,8 +1035,18 @@ static void init_amd_zen4(struct cpuinfo_x86 *c)
|
||||
}
|
||||
}
|
||||
|
||||
static const struct x86_cpu_id zen5_rdseed_microcode[] = {
|
||||
ZEN_MODEL_STEP_UCODE(0x1a, 0x02, 0x1, 0x0b00215a),
|
||||
ZEN_MODEL_STEP_UCODE(0x1a, 0x11, 0x0, 0x0b101054),
|
||||
};
|
||||
|
||||
static void init_amd_zen5(struct cpuinfo_x86 *c)
|
||||
{
|
||||
if (!x86_match_min_microcode_rev(zen5_rdseed_microcode)) {
|
||||
clear_cpu_cap(c, X86_FEATURE_RDSEED);
|
||||
msr_clear_bit(MSR_AMD64_CPUID_FN_7, 18);
|
||||
pr_emerg_once("RDSEED32 is broken. Disabling the corresponding CPUID bit.\n");
|
||||
}
|
||||
}
|
||||
|
||||
static void init_amd(struct cpuinfo_x86 *c)
|
||||
|
||||
@@ -233,13 +233,31 @@ static bool need_sha_check(u32 cur_rev)
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool cpu_has_entrysign(void)
|
||||
{
|
||||
unsigned int fam = x86_family(bsp_cpuid_1_eax);
|
||||
unsigned int model = x86_model(bsp_cpuid_1_eax);
|
||||
|
||||
if (fam == 0x17 || fam == 0x19)
|
||||
return true;
|
||||
|
||||
if (fam == 0x1a) {
|
||||
if (model <= 0x2f ||
|
||||
(0x40 <= model && model <= 0x4f) ||
|
||||
(0x60 <= model && model <= 0x6f))
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool verify_sha256_digest(u32 patch_id, u32 cur_rev, const u8 *data, unsigned int len)
|
||||
{
|
||||
struct patch_digest *pd = NULL;
|
||||
u8 digest[SHA256_DIGEST_SIZE];
|
||||
int i;
|
||||
|
||||
if (x86_family(bsp_cpuid_1_eax) < 0x17)
|
||||
if (!cpu_has_entrysign())
|
||||
return true;
|
||||
|
||||
if (!need_sha_check(cur_rev))
|
||||
|
||||
@@ -825,6 +825,9 @@ void fpu__clear_user_states(struct fpu *fpu)
|
||||
!fpregs_state_valid(fpu, smp_processor_id()))
|
||||
os_xrstor_supervisor(fpu->fpstate);
|
||||
|
||||
/* Ensure XFD state is in sync before reloading XSTATE */
|
||||
xfd_update_state(fpu->fpstate);
|
||||
|
||||
/* Reset user states in registers. */
|
||||
restore_fpregs_from_init_fpstate(XFEATURE_MASK_USER_RESTORE);
|
||||
|
||||
|
||||
@@ -2701,7 +2701,7 @@ st: if (is_imm8(insn->off))
|
||||
/* Update cleanup_addr */
|
||||
ctx->cleanup_addr = proglen;
|
||||
if (bpf_prog_was_classic(bpf_prog) &&
|
||||
!capable(CAP_SYS_ADMIN)) {
|
||||
!ns_capable_noaudit(&init_user_ns, CAP_SYS_ADMIN)) {
|
||||
u8 *ip = image + addrs[i - 1];
|
||||
|
||||
if (emit_spectre_bhb_barrier(&prog, ip, bpf_prog))
|
||||
|
||||
@@ -292,7 +292,7 @@ bool __blk_crypto_bio_prep(struct bio **bio_ptr)
|
||||
}
|
||||
|
||||
if (!bio_crypt_check_alignment(bio)) {
|
||||
bio->bi_status = BLK_STS_IOERR;
|
||||
bio->bi_status = BLK_STS_INVAL;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
|
||||
@@ -63,6 +63,9 @@ static __init int acpi_parse_mrrm(struct acpi_table_header *table)
|
||||
if (!mrrm)
|
||||
return -ENODEV;
|
||||
|
||||
if (mrrm->header.revision != 1)
|
||||
return -EINVAL;
|
||||
|
||||
if (mrrm->flags & ACPI_MRRM_FLAGS_REGION_ASSIGNMENT_OS)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
|
||||
@@ -1959,8 +1959,10 @@ static void acpi_video_bus_remove_notify_handler(struct acpi_video_bus *video)
|
||||
struct acpi_video_device *dev;
|
||||
|
||||
mutex_lock(&video->device_list_lock);
|
||||
list_for_each_entry(dev, &video->video_device_list, entry)
|
||||
list_for_each_entry(dev, &video->video_device_list, entry) {
|
||||
acpi_video_dev_remove_notify_handler(dev);
|
||||
cancel_delayed_work_sync(&dev->switch_brightness_work);
|
||||
}
|
||||
mutex_unlock(&video->device_list_lock);
|
||||
|
||||
acpi_video_bus_stop_devices(video);
|
||||
|
||||
@@ -619,8 +619,10 @@ static int acpi_button_add(struct acpi_device *device)
|
||||
|
||||
input_set_drvdata(input, device);
|
||||
error = input_register_device(input);
|
||||
if (error)
|
||||
if (error) {
|
||||
input_free_device(input);
|
||||
goto err_remove_fs;
|
||||
}
|
||||
|
||||
switch (device->device_type) {
|
||||
case ACPI_BUS_TYPE_POWER_BUTTON:
|
||||
|
||||
@@ -49,6 +49,7 @@ struct acpi_fan_fst {
|
||||
};
|
||||
|
||||
struct acpi_fan {
|
||||
acpi_handle handle;
|
||||
bool acpi4;
|
||||
bool has_fst;
|
||||
struct acpi_fan_fif fif;
|
||||
@@ -59,14 +60,14 @@ struct acpi_fan {
|
||||
struct device_attribute fine_grain_control;
|
||||
};
|
||||
|
||||
int acpi_fan_get_fst(struct acpi_device *device, struct acpi_fan_fst *fst);
|
||||
int acpi_fan_get_fst(acpi_handle handle, struct acpi_fan_fst *fst);
|
||||
int acpi_fan_create_attributes(struct acpi_device *device);
|
||||
void acpi_fan_delete_attributes(struct acpi_device *device);
|
||||
|
||||
#if IS_REACHABLE(CONFIG_HWMON)
|
||||
int devm_acpi_fan_create_hwmon(struct acpi_device *device);
|
||||
int devm_acpi_fan_create_hwmon(struct device *dev);
|
||||
#else
|
||||
static inline int devm_acpi_fan_create_hwmon(struct acpi_device *device) { return 0; };
|
||||
static inline int devm_acpi_fan_create_hwmon(struct device *dev) { return 0; };
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
@@ -55,7 +55,7 @@ static ssize_t show_fan_speed(struct device *dev, struct device_attribute *attr,
|
||||
struct acpi_fan_fst fst;
|
||||
int status;
|
||||
|
||||
status = acpi_fan_get_fst(acpi_dev, &fst);
|
||||
status = acpi_fan_get_fst(acpi_dev->handle, &fst);
|
||||
if (status)
|
||||
return status;
|
||||
|
||||
|
||||
@@ -44,25 +44,30 @@ static int fan_get_max_state(struct thermal_cooling_device *cdev, unsigned long
|
||||
return 0;
|
||||
}
|
||||
|
||||
int acpi_fan_get_fst(struct acpi_device *device, struct acpi_fan_fst *fst)
|
||||
int acpi_fan_get_fst(acpi_handle handle, struct acpi_fan_fst *fst)
|
||||
{
|
||||
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
|
||||
union acpi_object *obj;
|
||||
acpi_status status;
|
||||
int ret = 0;
|
||||
|
||||
status = acpi_evaluate_object(device->handle, "_FST", NULL, &buffer);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
dev_err(&device->dev, "Get fan state failed\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
status = acpi_evaluate_object(handle, "_FST", NULL, &buffer);
|
||||
if (ACPI_FAILURE(status))
|
||||
return -EIO;
|
||||
|
||||
obj = buffer.pointer;
|
||||
if (!obj || obj->type != ACPI_TYPE_PACKAGE ||
|
||||
obj->package.count != 3 ||
|
||||
obj->package.elements[1].type != ACPI_TYPE_INTEGER) {
|
||||
dev_err(&device->dev, "Invalid _FST data\n");
|
||||
ret = -EINVAL;
|
||||
if (!obj)
|
||||
return -ENODATA;
|
||||
|
||||
if (obj->type != ACPI_TYPE_PACKAGE || obj->package.count != 3) {
|
||||
ret = -EPROTO;
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (obj->package.elements[0].type != ACPI_TYPE_INTEGER ||
|
||||
obj->package.elements[1].type != ACPI_TYPE_INTEGER ||
|
||||
obj->package.elements[2].type != ACPI_TYPE_INTEGER) {
|
||||
ret = -EPROTO;
|
||||
goto err;
|
||||
}
|
||||
|
||||
@@ -81,7 +86,7 @@ static int fan_get_state_acpi4(struct acpi_device *device, unsigned long *state)
|
||||
struct acpi_fan_fst fst;
|
||||
int status, i;
|
||||
|
||||
status = acpi_fan_get_fst(device, &fst);
|
||||
status = acpi_fan_get_fst(device->handle, &fst);
|
||||
if (status)
|
||||
return status;
|
||||
|
||||
@@ -311,11 +316,16 @@ static int acpi_fan_probe(struct platform_device *pdev)
|
||||
struct acpi_device *device = ACPI_COMPANION(&pdev->dev);
|
||||
char *name;
|
||||
|
||||
if (!device)
|
||||
return -ENODEV;
|
||||
|
||||
fan = devm_kzalloc(&pdev->dev, sizeof(*fan), GFP_KERNEL);
|
||||
if (!fan) {
|
||||
dev_err(&device->dev, "No memory for fan\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
fan->handle = device->handle;
|
||||
device->driver_data = fan;
|
||||
platform_set_drvdata(pdev, fan);
|
||||
|
||||
@@ -337,7 +347,7 @@ static int acpi_fan_probe(struct platform_device *pdev)
|
||||
}
|
||||
|
||||
if (fan->has_fst) {
|
||||
result = devm_acpi_fan_create_hwmon(device);
|
||||
result = devm_acpi_fan_create_hwmon(&pdev->dev);
|
||||
if (result)
|
||||
return result;
|
||||
|
||||
|
||||
@@ -93,13 +93,12 @@ static umode_t acpi_fan_hwmon_is_visible(const void *drvdata, enum hwmon_sensor_
|
||||
static int acpi_fan_hwmon_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
|
||||
int channel, long *val)
|
||||
{
|
||||
struct acpi_device *adev = to_acpi_device(dev->parent);
|
||||
struct acpi_fan *fan = dev_get_drvdata(dev);
|
||||
struct acpi_fan_fps *fps;
|
||||
struct acpi_fan_fst fst;
|
||||
int ret;
|
||||
|
||||
ret = acpi_fan_get_fst(adev, &fst);
|
||||
ret = acpi_fan_get_fst(fan->handle, &fst);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
@@ -167,12 +166,12 @@ static const struct hwmon_chip_info acpi_fan_hwmon_chip_info = {
|
||||
.info = acpi_fan_hwmon_info,
|
||||
};
|
||||
|
||||
int devm_acpi_fan_create_hwmon(struct acpi_device *device)
|
||||
int devm_acpi_fan_create_hwmon(struct device *dev)
|
||||
{
|
||||
struct acpi_fan *fan = acpi_driver_data(device);
|
||||
struct acpi_fan *fan = dev_get_drvdata(dev);
|
||||
struct device *hdev;
|
||||
|
||||
hdev = devm_hwmon_device_register_with_info(&device->dev, "acpi_fan", fan,
|
||||
&acpi_fan_hwmon_chip_info, NULL);
|
||||
hdev = devm_hwmon_device_register_with_info(dev, "acpi_fan", fan, &acpi_fan_hwmon_chip_info,
|
||||
NULL);
|
||||
return PTR_ERR_OR_ZERO(hdev);
|
||||
}
|
||||
|
||||
@@ -155,7 +155,7 @@ int __init acpi_parse_spcr(bool enable_earlycon, bool enable_console)
|
||||
* Baud Rate field. If this field is zero or not present, Configured
|
||||
* Baud Rate is used.
|
||||
*/
|
||||
if (table->precise_baudrate)
|
||||
if (table->header.revision >= 4 && table->precise_baudrate)
|
||||
baud_rate = table->precise_baudrate;
|
||||
else switch (table->baud_rate) {
|
||||
case 0:
|
||||
|
||||
@@ -48,8 +48,7 @@ struct regmap *__regmap_init_slimbus(struct slim_device *slimbus,
|
||||
if (IS_ERR(bus))
|
||||
return ERR_CAST(bus);
|
||||
|
||||
return __regmap_init(&slimbus->dev, bus, &slimbus->dev, config,
|
||||
lock_key, lock_name);
|
||||
return __regmap_init(&slimbus->dev, bus, slimbus, config, lock_key, lock_name);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__regmap_init_slimbus);
|
||||
|
||||
@@ -63,8 +62,7 @@ struct regmap *__devm_regmap_init_slimbus(struct slim_device *slimbus,
|
||||
if (IS_ERR(bus))
|
||||
return ERR_CAST(bus);
|
||||
|
||||
return __devm_regmap_init(&slimbus->dev, bus, &slimbus, config,
|
||||
lock_key, lock_name);
|
||||
return __devm_regmap_init(&slimbus->dev, bus, slimbus, config, lock_key, lock_name);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__devm_regmap_init_slimbus);
|
||||
|
||||
|
||||
@@ -294,6 +294,8 @@ static int bcma_register_devices(struct bcma_bus *bus)
|
||||
int err;
|
||||
|
||||
list_for_each_entry(core, &bus->cores, list) {
|
||||
struct device_node *np;
|
||||
|
||||
/* We support that core ourselves */
|
||||
switch (core->id.id) {
|
||||
case BCMA_CORE_4706_CHIPCOMMON:
|
||||
@@ -311,6 +313,10 @@ static int bcma_register_devices(struct bcma_bus *bus)
|
||||
if (bcma_is_core_needed_early(core->id.id))
|
||||
continue;
|
||||
|
||||
np = core->dev.of_node;
|
||||
if (np && !of_device_is_available(np))
|
||||
continue;
|
||||
|
||||
/* Only first GMAC core on BCM4706 is connected and working */
|
||||
if (core->id.id == BCMA_CORE_4706_MAC_GBIT &&
|
||||
core->core_unit > 0)
|
||||
|
||||
@@ -1949,6 +1949,7 @@ static int null_add_dev(struct nullb_device *dev)
|
||||
.logical_block_size = dev->blocksize,
|
||||
.physical_block_size = dev->blocksize,
|
||||
.max_hw_sectors = dev->max_sectors,
|
||||
.dma_alignment = dev->blocksize - 1,
|
||||
};
|
||||
|
||||
struct nullb *nullb;
|
||||
|
||||
@@ -41,6 +41,7 @@ struct bpa10x_data {
|
||||
struct usb_anchor rx_anchor;
|
||||
|
||||
struct sk_buff *rx_skb[2];
|
||||
struct hci_uart hu;
|
||||
};
|
||||
|
||||
static void bpa10x_tx_complete(struct urb *urb)
|
||||
@@ -96,7 +97,7 @@ static void bpa10x_rx_complete(struct urb *urb)
|
||||
if (urb->status == 0) {
|
||||
bool idx = usb_pipebulk(urb->pipe);
|
||||
|
||||
data->rx_skb[idx] = h4_recv_buf(hdev, data->rx_skb[idx],
|
||||
data->rx_skb[idx] = h4_recv_buf(&data->hu, data->rx_skb[idx],
|
||||
urb->transfer_buffer,
|
||||
urb->actual_length,
|
||||
bpa10x_recv_pkts,
|
||||
@@ -388,6 +389,7 @@ static int bpa10x_probe(struct usb_interface *intf,
|
||||
hci_set_drvdata(hdev, data);
|
||||
|
||||
data->hdev = hdev;
|
||||
data->hu.hdev = hdev;
|
||||
|
||||
SET_HCIDEV_DEV(hdev, &intf->dev);
|
||||
|
||||
|
||||
@@ -1467,11 +1467,6 @@ static irqreturn_t btintel_pcie_irq_msix_handler(int irq, void *dev_id)
|
||||
if (intr_hw & BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP1)
|
||||
btintel_pcie_msix_gp1_handler(data);
|
||||
|
||||
/* This interrupt is triggered by the firmware after updating
|
||||
* boot_stage register and image_response register
|
||||
*/
|
||||
if (intr_hw & BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP0)
|
||||
btintel_pcie_msix_gp0_handler(data);
|
||||
|
||||
/* For TX */
|
||||
if (intr_fh & BTINTEL_PCIE_MSIX_FH_INT_CAUSES_0) {
|
||||
@@ -1487,6 +1482,12 @@ static irqreturn_t btintel_pcie_irq_msix_handler(int irq, void *dev_id)
|
||||
btintel_pcie_msix_tx_handle(data);
|
||||
}
|
||||
|
||||
/* This interrupt is triggered by the firmware after updating
|
||||
* boot_stage register and image_response register
|
||||
*/
|
||||
if (intr_hw & BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP0)
|
||||
btintel_pcie_msix_gp0_handler(data);
|
||||
|
||||
/*
|
||||
* Before sending the interrupt the HW disables it to prevent a nested
|
||||
* interrupt. This is done by writing 1 to the corresponding bit in
|
||||
|
||||
@@ -1270,6 +1270,12 @@ static void btmtksdio_reset(struct hci_dev *hdev)
|
||||
|
||||
sdio_claim_host(bdev->func);
|
||||
|
||||
/* set drv_pmctrl if BT is closed before doing reset */
|
||||
if (!test_bit(BTMTKSDIO_FUNC_ENABLED, &bdev->tx_state)) {
|
||||
sdio_enable_func(bdev->func);
|
||||
btmtksdio_drv_pmctrl(bdev);
|
||||
}
|
||||
|
||||
sdio_writel(bdev->func, C_INT_EN_CLR, MTK_REG_CHLPCR, NULL);
|
||||
skb_queue_purge(&bdev->txq);
|
||||
cancel_work_sync(&bdev->txrx_work);
|
||||
@@ -1285,6 +1291,12 @@ static void btmtksdio_reset(struct hci_dev *hdev)
|
||||
goto err;
|
||||
}
|
||||
|
||||
/* set fw_pmctrl back if BT is closed after doing reset */
|
||||
if (!test_bit(BTMTKSDIO_FUNC_ENABLED, &bdev->tx_state)) {
|
||||
btmtksdio_fw_pmctrl(bdev);
|
||||
sdio_disable_func(bdev->func);
|
||||
}
|
||||
|
||||
clear_bit(BTMTKSDIO_PATCH_ENABLED, &bdev->tx_state);
|
||||
err:
|
||||
sdio_release_host(bdev->func);
|
||||
|
||||
@@ -79,6 +79,7 @@ struct btmtkuart_dev {
|
||||
u16 stp_dlen;
|
||||
|
||||
const struct btmtkuart_data *data;
|
||||
struct hci_uart hu;
|
||||
};
|
||||
|
||||
#define btmtkuart_is_standalone(bdev) \
|
||||
@@ -368,7 +369,7 @@ static void btmtkuart_recv(struct hci_dev *hdev, const u8 *data, size_t count)
|
||||
sz_left -= adv;
|
||||
p_left += adv;
|
||||
|
||||
bdev->rx_skb = h4_recv_buf(bdev->hdev, bdev->rx_skb, p_h4,
|
||||
bdev->rx_skb = h4_recv_buf(&bdev->hu, bdev->rx_skb, p_h4,
|
||||
sz_h4, mtk_recv_pkts,
|
||||
ARRAY_SIZE(mtk_recv_pkts));
|
||||
if (IS_ERR(bdev->rx_skb)) {
|
||||
@@ -858,6 +859,7 @@ static int btmtkuart_probe(struct serdev_device *serdev)
|
||||
}
|
||||
|
||||
bdev->hdev = hdev;
|
||||
bdev->hu.hdev = hdev;
|
||||
|
||||
hdev->bus = HCI_UART;
|
||||
hci_set_drvdata(hdev, bdev);
|
||||
|
||||
@@ -212,6 +212,7 @@ struct btnxpuart_dev {
|
||||
struct ps_data psdata;
|
||||
struct btnxpuart_data *nxp_data;
|
||||
struct reset_control *pdn;
|
||||
struct hci_uart hu;
|
||||
};
|
||||
|
||||
#define NXP_V1_FW_REQ_PKT 0xa5
|
||||
@@ -1756,7 +1757,7 @@ static size_t btnxpuart_receive_buf(struct serdev_device *serdev,
|
||||
|
||||
ps_start_timer(nxpdev);
|
||||
|
||||
nxpdev->rx_skb = h4_recv_buf(nxpdev->hdev, nxpdev->rx_skb, data, count,
|
||||
nxpdev->rx_skb = h4_recv_buf(&nxpdev->hu, nxpdev->rx_skb, data, count,
|
||||
nxp_recv_pkts, ARRAY_SIZE(nxp_recv_pkts));
|
||||
if (IS_ERR(nxpdev->rx_skb)) {
|
||||
int err = PTR_ERR(nxpdev->rx_skb);
|
||||
@@ -1875,6 +1876,7 @@ static int nxp_serdev_probe(struct serdev_device *serdev)
|
||||
reset_control_deassert(nxpdev->pdn);
|
||||
|
||||
nxpdev->hdev = hdev;
|
||||
nxpdev->hu.hdev = hdev;
|
||||
|
||||
hdev->bus = HCI_UART;
|
||||
hci_set_drvdata(hdev, nxpdev);
|
||||
|
||||
@@ -105,7 +105,7 @@ static int ag6xx_recv(struct hci_uart *hu, const void *data, int count)
|
||||
if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
|
||||
return -EUNATCH;
|
||||
|
||||
ag6xx->rx_skb = h4_recv_buf(hu->hdev, ag6xx->rx_skb, data, count,
|
||||
ag6xx->rx_skb = h4_recv_buf(hu, ag6xx->rx_skb, data, count,
|
||||
ag6xx_recv_pkts,
|
||||
ARRAY_SIZE(ag6xx_recv_pkts));
|
||||
if (IS_ERR(ag6xx->rx_skb)) {
|
||||
|
||||
@@ -650,7 +650,7 @@ static int aml_recv(struct hci_uart *hu, const void *data, int count)
|
||||
struct aml_data *aml_data = hu->priv;
|
||||
int err;
|
||||
|
||||
aml_data->rx_skb = h4_recv_buf(hu->hdev, aml_data->rx_skb, data, count,
|
||||
aml_data->rx_skb = h4_recv_buf(hu, aml_data->rx_skb, data, count,
|
||||
aml_recv_pkts,
|
||||
ARRAY_SIZE(aml_recv_pkts));
|
||||
if (IS_ERR(aml_data->rx_skb)) {
|
||||
|
||||
@@ -191,7 +191,7 @@ static int ath_recv(struct hci_uart *hu, const void *data, int count)
|
||||
{
|
||||
struct ath_struct *ath = hu->priv;
|
||||
|
||||
ath->rx_skb = h4_recv_buf(hu->hdev, ath->rx_skb, data, count,
|
||||
ath->rx_skb = h4_recv_buf(hu, ath->rx_skb, data, count,
|
||||
ath_recv_pkts, ARRAY_SIZE(ath_recv_pkts));
|
||||
if (IS_ERR(ath->rx_skb)) {
|
||||
int err = PTR_ERR(ath->rx_skb);
|
||||
|
||||
@@ -698,7 +698,7 @@ static int bcm_recv(struct hci_uart *hu, const void *data, int count)
|
||||
if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
|
||||
return -EUNATCH;
|
||||
|
||||
bcm->rx_skb = h4_recv_buf(hu->hdev, bcm->rx_skb, data, count,
|
||||
bcm->rx_skb = h4_recv_buf(hu, bcm->rx_skb, data, count,
|
||||
bcm_recv_pkts, ARRAY_SIZE(bcm_recv_pkts));
|
||||
if (IS_ERR(bcm->rx_skb)) {
|
||||
int err = PTR_ERR(bcm->rx_skb);
|
||||
|
||||
@@ -112,7 +112,7 @@ static int h4_recv(struct hci_uart *hu, const void *data, int count)
|
||||
if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
|
||||
return -EUNATCH;
|
||||
|
||||
h4->rx_skb = h4_recv_buf(hu->hdev, h4->rx_skb, data, count,
|
||||
h4->rx_skb = h4_recv_buf(hu, h4->rx_skb, data, count,
|
||||
h4_recv_pkts, ARRAY_SIZE(h4_recv_pkts));
|
||||
if (IS_ERR(h4->rx_skb)) {
|
||||
int err = PTR_ERR(h4->rx_skb);
|
||||
@@ -151,12 +151,12 @@ int __exit h4_deinit(void)
|
||||
return hci_uart_unregister_proto(&h4p);
|
||||
}
|
||||
|
||||
struct sk_buff *h4_recv_buf(struct hci_dev *hdev, struct sk_buff *skb,
|
||||
struct sk_buff *h4_recv_buf(struct hci_uart *hu, struct sk_buff *skb,
|
||||
const unsigned char *buffer, int count,
|
||||
const struct h4_recv_pkt *pkts, int pkts_count)
|
||||
{
|
||||
struct hci_uart *hu = hci_get_drvdata(hdev);
|
||||
u8 alignment = hu->alignment ? hu->alignment : 1;
|
||||
struct hci_dev *hdev = hu->hdev;
|
||||
|
||||
/* Check for error from previous call */
|
||||
if (IS_ERR(skb))
|
||||
|
||||
@@ -972,7 +972,7 @@ static int intel_recv(struct hci_uart *hu, const void *data, int count)
|
||||
if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
|
||||
return -EUNATCH;
|
||||
|
||||
intel->rx_skb = h4_recv_buf(hu->hdev, intel->rx_skb, data, count,
|
||||
intel->rx_skb = h4_recv_buf(hu, intel->rx_skb, data, count,
|
||||
intel_recv_pkts,
|
||||
ARRAY_SIZE(intel_recv_pkts));
|
||||
if (IS_ERR(intel->rx_skb)) {
|
||||
|
||||
@@ -429,7 +429,7 @@ static int ll_recv(struct hci_uart *hu, const void *data, int count)
|
||||
if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
|
||||
return -EUNATCH;
|
||||
|
||||
ll->rx_skb = h4_recv_buf(hu->hdev, ll->rx_skb, data, count,
|
||||
ll->rx_skb = h4_recv_buf(hu, ll->rx_skb, data, count,
|
||||
ll_recv_pkts, ARRAY_SIZE(ll_recv_pkts));
|
||||
if (IS_ERR(ll->rx_skb)) {
|
||||
int err = PTR_ERR(ll->rx_skb);
|
||||
|
||||
@@ -264,9 +264,9 @@ static int mrvl_recv(struct hci_uart *hu, const void *data, int count)
|
||||
!test_bit(STATE_FW_LOADED, &mrvl->flags))
|
||||
return count;
|
||||
|
||||
mrvl->rx_skb = h4_recv_buf(hu->hdev, mrvl->rx_skb, data, count,
|
||||
mrvl_recv_pkts,
|
||||
ARRAY_SIZE(mrvl_recv_pkts));
|
||||
mrvl->rx_skb = h4_recv_buf(hu, mrvl->rx_skb, data, count,
|
||||
mrvl_recv_pkts,
|
||||
ARRAY_SIZE(mrvl_recv_pkts));
|
||||
if (IS_ERR(mrvl->rx_skb)) {
|
||||
int err = PTR_ERR(mrvl->rx_skb);
|
||||
bt_dev_err(hu->hdev, "Frame reassembly failed (%d)", err);
|
||||
|
||||
@@ -624,8 +624,8 @@ static int nokia_recv(struct hci_uart *hu, const void *data, int count)
|
||||
if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
|
||||
return -EUNATCH;
|
||||
|
||||
btdev->rx_skb = h4_recv_buf(hu->hdev, btdev->rx_skb, data, count,
|
||||
nokia_recv_pkts, ARRAY_SIZE(nokia_recv_pkts));
|
||||
btdev->rx_skb = h4_recv_buf(hu, btdev->rx_skb, data, count,
|
||||
nokia_recv_pkts, ARRAY_SIZE(nokia_recv_pkts));
|
||||
if (IS_ERR(btdev->rx_skb)) {
|
||||
err = PTR_ERR(btdev->rx_skb);
|
||||
dev_err(dev, "Frame reassembly failed (%d)", err);
|
||||
|
||||
@@ -1277,7 +1277,7 @@ static int qca_recv(struct hci_uart *hu, const void *data, int count)
|
||||
if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
|
||||
return -EUNATCH;
|
||||
|
||||
qca->rx_skb = h4_recv_buf(hu->hdev, qca->rx_skb, data, count,
|
||||
qca->rx_skb = h4_recv_buf(hu, qca->rx_skb, data, count,
|
||||
qca_recv_pkts, ARRAY_SIZE(qca_recv_pkts));
|
||||
if (IS_ERR(qca->rx_skb)) {
|
||||
int err = PTR_ERR(qca->rx_skb);
|
||||
|
||||
@@ -162,7 +162,7 @@ struct h4_recv_pkt {
|
||||
int h4_init(void);
|
||||
int h4_deinit(void);
|
||||
|
||||
struct sk_buff *h4_recv_buf(struct hci_dev *hdev, struct sk_buff *skb,
|
||||
struct sk_buff *h4_recv_buf(struct hci_uart *hu, struct sk_buff *skb,
|
||||
const unsigned char *buffer, int count,
|
||||
const struct h4_recv_pkt *pkts, int pkts_count);
|
||||
#endif
|
||||
|
||||
@@ -318,10 +318,13 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
|
||||
|
||||
/*
|
||||
* Use a physical idle state, not busy polling, unless a timer
|
||||
* is going to trigger soon enough.
|
||||
* is going to trigger soon enough or the exit latency of the
|
||||
* idle state in question is greater than the predicted idle
|
||||
* duration.
|
||||
*/
|
||||
if ((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) &&
|
||||
s->target_residency_ns <= data->next_timer_ns) {
|
||||
s->target_residency_ns <= data->next_timer_ns &&
|
||||
s->exit_latency_ns <= predicted_ns) {
|
||||
predicted_ns = s->target_residency_ns;
|
||||
idx = i;
|
||||
break;
|
||||
|
||||
@@ -787,7 +787,6 @@ static int aspeed_acry_probe(struct platform_device *pdev)
|
||||
err_engine_rsa_start:
|
||||
crypto_engine_exit(acry_dev->crypt_engine_rsa);
|
||||
clk_exit:
|
||||
clk_disable_unprepare(acry_dev->clk);
|
||||
|
||||
return rc;
|
||||
}
|
||||
@@ -799,7 +798,6 @@ static void aspeed_acry_remove(struct platform_device *pdev)
|
||||
aspeed_acry_unregister(acry_dev);
|
||||
crypto_engine_exit(acry_dev->crypt_engine_rsa);
|
||||
tasklet_kill(&acry_dev->done_task);
|
||||
clk_disable_unprepare(acry_dev->clk);
|
||||
}
|
||||
|
||||
MODULE_DEVICE_TABLE(of, aspeed_acry_of_matches);
|
||||
|
||||
@@ -1141,7 +1141,7 @@ const char __rcu *dma_fence_timeline_name(struct dma_fence *fence)
|
||||
"RCU protection is required for safe access to returned string");
|
||||
|
||||
if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
|
||||
return fence->ops->get_driver_name(fence);
|
||||
return fence->ops->get_timeline_name(fence);
|
||||
else
|
||||
return "signaled-timeline";
|
||||
}
|
||||
|
||||
@@ -1559,16 +1559,18 @@ int dpll_nl_pin_id_get_doit(struct sk_buff *skb, struct genl_info *info)
|
||||
return -EMSGSIZE;
|
||||
}
|
||||
pin = dpll_pin_find_from_nlattr(info);
|
||||
if (!IS_ERR(pin)) {
|
||||
if (!dpll_pin_available(pin)) {
|
||||
nlmsg_free(msg);
|
||||
return -ENODEV;
|
||||
}
|
||||
ret = dpll_msg_add_pin_handle(msg, pin);
|
||||
if (ret) {
|
||||
nlmsg_free(msg);
|
||||
return ret;
|
||||
}
|
||||
if (IS_ERR(pin)) {
|
||||
nlmsg_free(msg);
|
||||
return PTR_ERR(pin);
|
||||
}
|
||||
if (!dpll_pin_available(pin)) {
|
||||
nlmsg_free(msg);
|
||||
return -ENODEV;
|
||||
}
|
||||
ret = dpll_msg_add_pin_handle(msg, pin);
|
||||
if (ret) {
|
||||
nlmsg_free(msg);
|
||||
return ret;
|
||||
}
|
||||
genlmsg_end(msg, hdr);
|
||||
|
||||
@@ -1735,12 +1737,14 @@ int dpll_nl_device_id_get_doit(struct sk_buff *skb, struct genl_info *info)
|
||||
}
|
||||
|
||||
dpll = dpll_device_find_from_nlattr(info);
|
||||
if (!IS_ERR(dpll)) {
|
||||
ret = dpll_msg_add_dev_handle(msg, dpll);
|
||||
if (ret) {
|
||||
nlmsg_free(msg);
|
||||
return ret;
|
||||
}
|
||||
if (IS_ERR(dpll)) {
|
||||
nlmsg_free(msg);
|
||||
return PTR_ERR(dpll);
|
||||
}
|
||||
ret = dpll_msg_add_dev_handle(msg, dpll);
|
||||
if (ret) {
|
||||
nlmsg_free(msg);
|
||||
return ret;
|
||||
}
|
||||
genlmsg_end(msg, hdr);
|
||||
|
||||
|
||||
@@ -1904,7 +1904,7 @@ zl3073x_dpll_pin_is_registrable(struct zl3073x_dpll *zldpll,
|
||||
}
|
||||
|
||||
is_diff = zl3073x_out_is_diff(zldev, out);
|
||||
is_enabled = zl3073x_out_is_enabled(zldev, out);
|
||||
is_enabled = zl3073x_output_pin_is_enabled(zldev, index);
|
||||
}
|
||||
|
||||
/* Skip N-pin if the corresponding input/output is differential */
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
// SPDX-License-Identifier: MIT
|
||||
/*
|
||||
* Copyright 2025 Advanced Micro Devices, Inc.
|
||||
*
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/* SPDX-License-Identifier: MIT */
|
||||
/*
|
||||
* Copyright 2025 Advanced Micro Devices, Inc.
|
||||
*
|
||||
|
||||
@@ -322,6 +322,26 @@ static int vpe_early_init(struct amdgpu_ip_block *ip_block)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool vpe_need_dpm0_at_power_down(struct amdgpu_device *adev)
|
||||
{
|
||||
switch (amdgpu_ip_version(adev, VPE_HWIP, 0)) {
|
||||
case IP_VERSION(6, 1, 1):
|
||||
return adev->pm.fw_version < 0x0a640500;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
static int vpe_get_dpm_level(struct amdgpu_device *adev)
|
||||
{
|
||||
struct amdgpu_vpe *vpe = &adev->vpe;
|
||||
|
||||
if (!adev->pm.dpm_enabled)
|
||||
return 0;
|
||||
|
||||
return RREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_request_lv));
|
||||
}
|
||||
|
||||
static void vpe_idle_work_handler(struct work_struct *work)
|
||||
{
|
||||
struct amdgpu_device *adev =
|
||||
@@ -329,11 +349,17 @@ static void vpe_idle_work_handler(struct work_struct *work)
|
||||
unsigned int fences = 0;
|
||||
|
||||
fences += amdgpu_fence_count_emitted(&adev->vpe.ring);
|
||||
if (fences)
|
||||
goto reschedule;
|
||||
|
||||
if (fences == 0)
|
||||
amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VPE, AMD_PG_STATE_GATE);
|
||||
else
|
||||
schedule_delayed_work(&adev->vpe.idle_work, VPE_IDLE_TIMEOUT);
|
||||
if (vpe_need_dpm0_at_power_down(adev) && vpe_get_dpm_level(adev) != 0)
|
||||
goto reschedule;
|
||||
|
||||
amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VPE, AMD_PG_STATE_GATE);
|
||||
return;
|
||||
|
||||
reschedule:
|
||||
schedule_delayed_work(&adev->vpe.idle_work, VPE_IDLE_TIMEOUT);
|
||||
}
|
||||
|
||||
static int vpe_common_init(struct amdgpu_vpe *vpe)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
// SPDX-License-Identifier: MIT
|
||||
/*
|
||||
* Copyright 2018 Advanced Micro Devices, Inc.
|
||||
*
|
||||
|
||||
@@ -248,6 +248,8 @@ static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work)
|
||||
struct vblank_control_work *vblank_work =
|
||||
container_of(work, struct vblank_control_work, work);
|
||||
struct amdgpu_display_manager *dm = vblank_work->dm;
|
||||
struct amdgpu_device *adev = drm_to_adev(dm->ddev);
|
||||
int r;
|
||||
|
||||
mutex_lock(&dm->dc_lock);
|
||||
|
||||
@@ -277,7 +279,16 @@ static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work)
|
||||
|
||||
if (dm->active_vblank_irq_count == 0) {
|
||||
dc_post_update_surfaces_to_stream(dm->dc);
|
||||
|
||||
r = amdgpu_dpm_pause_power_profile(adev, true);
|
||||
if (r)
|
||||
dev_warn(adev->dev, "failed to set default power profile mode\n");
|
||||
|
||||
dc_allow_idle_optimizations(dm->dc, true);
|
||||
|
||||
r = amdgpu_dpm_pause_power_profile(adev, false);
|
||||
if (r)
|
||||
dev_warn(adev->dev, "failed to restore the power profile mode\n");
|
||||
}
|
||||
|
||||
mutex_unlock(&dm->dc_lock);
|
||||
@@ -297,8 +308,12 @@ static inline int amdgpu_dm_crtc_set_vblank(struct drm_crtc *crtc, bool enable)
|
||||
int irq_type;
|
||||
int rc = 0;
|
||||
|
||||
if (acrtc->otg_inst == -1)
|
||||
goto skip;
|
||||
if (enable && !acrtc->base.enabled) {
|
||||
drm_dbg_vbl(crtc->dev,
|
||||
"Reject vblank enable on unconfigured CRTC %d (enabled=%d)\n",
|
||||
acrtc->crtc_id, acrtc->base.enabled);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
irq_type = amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
|
||||
|
||||
@@ -383,7 +398,7 @@ static inline int amdgpu_dm_crtc_set_vblank(struct drm_crtc *crtc, bool enable)
|
||||
return rc;
|
||||
}
|
||||
#endif
|
||||
skip:
|
||||
|
||||
if (amdgpu_in_reset(adev))
|
||||
return 0;
|
||||
|
||||
|
||||
@@ -83,6 +83,7 @@ static void apply_edid_quirks(struct drm_device *dev, struct edid *edid, struct
|
||||
edid_caps->panel_patch.remove_sink_ext_caps = true;
|
||||
break;
|
||||
case drm_edid_encode_panel_id('S', 'D', 'C', 0x4154):
|
||||
case drm_edid_encode_panel_id('S', 'D', 'C', 0x4171):
|
||||
drm_dbg_driver(dev, "Disabling VSC on monitor with panel id %X\n", panel_id);
|
||||
edid_caps->panel_patch.disable_colorimetry = true;
|
||||
break;
|
||||
|
||||
@@ -578,9 +578,6 @@ static void dpp3_power_on_blnd_lut(
|
||||
dpp_base->ctx->dc->optimized_required = true;
|
||||
dpp_base->deferred_reg_writes.bits.disable_blnd_lut = true;
|
||||
}
|
||||
} else {
|
||||
REG_SET(CM_MEM_PWR_CTRL, 0,
|
||||
BLNDGAM_MEM_PWR_FORCE, power_on == true ? 0 : 1);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/* SPDX-License-Identifier: MIT */
|
||||
/*
|
||||
* Copyright 2025 Advanced Micro Devices, Inc.
|
||||
*
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/* SPDX-License-Identifier: MIT */
|
||||
|
||||
/*
|
||||
* Copyright 2024 Advanced Micro Devices, Inc. All rights reserved.
|
||||
|
||||
@@ -2024,7 +2024,7 @@ static int fiji_init_smc_table(struct pp_hwmgr *hwmgr)
|
||||
table->VoltageResponseTime = 0;
|
||||
table->PhaseResponseTime = 0;
|
||||
table->MemoryThermThrottleEnable = 1;
|
||||
table->PCIeBootLinkLevel = 0; /* 0:Gen1 1:Gen2 2:Gen3*/
|
||||
table->PCIeBootLinkLevel = (uint8_t) (data->dpm_table.pcie_speed_table.count);
|
||||
table->PCIeGenInterval = 1;
|
||||
table->VRConfig = 0;
|
||||
|
||||
|
||||
@@ -2028,7 +2028,7 @@ static int iceland_init_smc_table(struct pp_hwmgr *hwmgr)
|
||||
table->VoltageResponseTime = 0;
|
||||
table->PhaseResponseTime = 0;
|
||||
table->MemoryThermThrottleEnable = 1;
|
||||
table->PCIeBootLinkLevel = 0;
|
||||
table->PCIeBootLinkLevel = (uint8_t) (data->dpm_table.pcie_speed_table.count);
|
||||
table->PCIeGenInterval = 1;
|
||||
|
||||
result = iceland_populate_smc_svi2_config(hwmgr, table);
|
||||
|
||||
@@ -969,7 +969,7 @@ int smu_cmn_update_table(struct smu_context *smu,
|
||||
table_index);
|
||||
uint32_t table_size;
|
||||
int ret = 0;
|
||||
if (!table_data || table_id >= SMU_TABLE_COUNT || table_id < 0)
|
||||
if (!table_data || table_index >= SMU_TABLE_COUNT || table_id < 0)
|
||||
return -EINVAL;
|
||||
|
||||
table_size = smu_table->tables[table_index].size;
|
||||
|
||||
@@ -282,13 +282,13 @@ static inline void __ast_write8_i(void __iomem *addr, u32 reg, u8 index, u8 val)
|
||||
__ast_write8(addr, reg + 1, val);
|
||||
}
|
||||
|
||||
static inline void __ast_write8_i_masked(void __iomem *addr, u32 reg, u8 index, u8 read_mask,
|
||||
static inline void __ast_write8_i_masked(void __iomem *addr, u32 reg, u8 index, u8 preserve_mask,
|
||||
u8 val)
|
||||
{
|
||||
u8 tmp = __ast_read8_i_masked(addr, reg, index, read_mask);
|
||||
u8 tmp = __ast_read8_i_masked(addr, reg, index, preserve_mask);
|
||||
|
||||
tmp |= val;
|
||||
__ast_write8_i(addr, reg, index, tmp);
|
||||
val &= ~preserve_mask;
|
||||
__ast_write8_i(addr, reg, index, tmp | val);
|
||||
}
|
||||
|
||||
static inline u32 ast_read32(struct ast_device *ast, u32 reg)
|
||||
|
||||
@@ -280,7 +280,7 @@ sanity:
|
||||
GIT_STRATEGY: none
|
||||
script:
|
||||
# ci-fairy check-commits --junit-xml=check-commits.xml
|
||||
- ci-fairy check-merge-request --require-allow-collaboration --junit-xml=check-merge-request.xml
|
||||
# - ci-fairy check-merge-request --require-allow-collaboration --junit-xml=check-merge-request.xml
|
||||
- |
|
||||
set -eu
|
||||
image_tags=(
|
||||
|
||||
@@ -310,8 +310,12 @@ EXPORT_SYMBOL(drm_gem_destroy_shadow_plane_state);
|
||||
void __drm_gem_reset_shadow_plane(struct drm_plane *plane,
|
||||
struct drm_shadow_plane_state *shadow_plane_state)
|
||||
{
|
||||
__drm_atomic_helper_plane_reset(plane, &shadow_plane_state->base);
|
||||
drm_format_conv_state_init(&shadow_plane_state->fmtcnv_state);
|
||||
if (shadow_plane_state) {
|
||||
__drm_atomic_helper_plane_reset(plane, &shadow_plane_state->base);
|
||||
drm_format_conv_state_init(&shadow_plane_state->fmtcnv_state);
|
||||
} else {
|
||||
__drm_atomic_helper_plane_reset(plane, NULL);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(__drm_gem_reset_shadow_plane);
|
||||
|
||||
|
||||
@@ -347,7 +347,7 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
|
||||
u32 link_target, link_dwords;
|
||||
bool switch_context = gpu->exec_state != exec_state;
|
||||
bool switch_mmu_context = gpu->mmu_context != mmu_context;
|
||||
unsigned int new_flush_seq = READ_ONCE(gpu->mmu_context->flush_seq);
|
||||
unsigned int new_flush_seq = READ_ONCE(mmu_context->flush_seq);
|
||||
bool need_flush = switch_mmu_context || gpu->flush_seq != new_flush_seq;
|
||||
bool has_blt = !!(gpu->identity.minor_features5 &
|
||||
chipMinorFeatures5_BLT_ENGINE);
|
||||
|
||||
@@ -546,6 +546,36 @@ static bool is_event_handler(struct intel_display *display,
|
||||
REG_FIELD_GET(DMC_EVT_CTL_EVENT_ID_MASK, data) == event_id;
|
||||
}
|
||||
|
||||
static bool fixup_dmc_evt(struct intel_display *display,
|
||||
enum intel_dmc_id dmc_id,
|
||||
i915_reg_t reg_ctl, u32 *data_ctl,
|
||||
i915_reg_t reg_htp, u32 *data_htp)
|
||||
{
|
||||
if (!is_dmc_evt_ctl_reg(display, dmc_id, reg_ctl))
|
||||
return false;
|
||||
|
||||
if (!is_dmc_evt_htp_reg(display, dmc_id, reg_htp))
|
||||
return false;
|
||||
|
||||
/* make sure reg_ctl and reg_htp are for the same event */
|
||||
if (i915_mmio_reg_offset(reg_ctl) - i915_mmio_reg_offset(DMC_EVT_CTL(display, dmc_id, 0)) !=
|
||||
i915_mmio_reg_offset(reg_htp) - i915_mmio_reg_offset(DMC_EVT_HTP(display, dmc_id, 0)))
|
||||
return false;
|
||||
|
||||
/*
|
||||
* On ADL-S the HRR event handler is not restored after DC6.
|
||||
* Clear it to zero from the beginning to avoid mismatches later.
|
||||
*/
|
||||
if (display->platform.alderlake_s && dmc_id == DMC_FW_MAIN &&
|
||||
is_event_handler(display, dmc_id, MAINDMC_EVENT_VBLANK_A, reg_ctl, *data_ctl)) {
|
||||
*data_ctl = 0;
|
||||
*data_htp = 0;
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool disable_dmc_evt(struct intel_display *display,
|
||||
enum intel_dmc_id dmc_id,
|
||||
i915_reg_t reg, u32 data)
|
||||
@@ -1064,9 +1094,32 @@ static u32 parse_dmc_fw_header(struct intel_dmc *dmc,
|
||||
for (i = 0; i < mmio_count; i++) {
|
||||
dmc_info->mmioaddr[i] = _MMIO(mmioaddr[i]);
|
||||
dmc_info->mmiodata[i] = mmiodata[i];
|
||||
}
|
||||
|
||||
for (i = 0; i < mmio_count - 1; i++) {
|
||||
u32 orig_mmiodata[2] = {
|
||||
dmc_info->mmiodata[i],
|
||||
dmc_info->mmiodata[i+1],
|
||||
};
|
||||
|
||||
if (!fixup_dmc_evt(display, dmc_id,
|
||||
dmc_info->mmioaddr[i], &dmc_info->mmiodata[i],
|
||||
dmc_info->mmioaddr[i+1], &dmc_info->mmiodata[i+1]))
|
||||
continue;
|
||||
|
||||
drm_dbg_kms(display->drm,
|
||||
" mmio[%d]: 0x%x = 0x%x->0x%x (EVT_CTL)\n",
|
||||
i, i915_mmio_reg_offset(dmc_info->mmioaddr[i]),
|
||||
orig_mmiodata[0], dmc_info->mmiodata[i]);
|
||||
drm_dbg_kms(display->drm,
|
||||
" mmio[%d]: 0x%x = 0x%x->0x%x (EVT_HTP)\n",
|
||||
i+1, i915_mmio_reg_offset(dmc_info->mmioaddr[i+1]),
|
||||
orig_mmiodata[1], dmc_info->mmiodata[i+1]);
|
||||
}
|
||||
|
||||
for (i = 0; i < mmio_count; i++) {
|
||||
drm_dbg_kms(display->drm, " mmio[%d]: 0x%x = 0x%x%s%s\n",
|
||||
i, mmioaddr[i], mmiodata[i],
|
||||
i, i915_mmio_reg_offset(dmc_info->mmioaddr[i]), dmc_info->mmiodata[i],
|
||||
is_dmc_evt_ctl_reg(display, dmc_id, dmc_info->mmioaddr[i]) ? " (EVT_CTL)" :
|
||||
is_dmc_evt_htp_reg(display, dmc_id, dmc_info->mmioaddr[i]) ? " (EVT_HTP)" : "",
|
||||
disable_dmc_evt(display, dmc_id, dmc_info->mmioaddr[i],
|
||||
|
||||
@@ -25,19 +25,18 @@
|
||||
|
||||
struct imx_parallel_display_encoder {
|
||||
struct drm_encoder encoder;
|
||||
struct drm_bridge bridge;
|
||||
struct imx_parallel_display *pd;
|
||||
};
|
||||
|
||||
struct imx_parallel_display {
|
||||
struct device *dev;
|
||||
u32 bus_format;
|
||||
struct drm_bridge *next_bridge;
|
||||
struct drm_bridge bridge;
|
||||
};
|
||||
|
||||
static inline struct imx_parallel_display *bridge_to_imxpd(struct drm_bridge *b)
|
||||
{
|
||||
return container_of(b, struct imx_parallel_display_encoder, bridge)->pd;
|
||||
return container_of(b, struct imx_parallel_display, bridge);
|
||||
}
|
||||
|
||||
static const u32 imx_pd_bus_fmts[] = {
|
||||
@@ -195,15 +194,13 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data)
|
||||
if (IS_ERR(imxpd_encoder))
|
||||
return PTR_ERR(imxpd_encoder);
|
||||
|
||||
imxpd_encoder->pd = imxpd;
|
||||
encoder = &imxpd_encoder->encoder;
|
||||
bridge = &imxpd_encoder->bridge;
|
||||
bridge = &imxpd->bridge;
|
||||
|
||||
ret = imx_drm_encoder_parse_of(drm, encoder, imxpd->dev->of_node);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
bridge->funcs = &imx_pd_bridge_funcs;
|
||||
drm_bridge_attach(encoder, bridge, NULL, DRM_BRIDGE_ATTACH_NO_CONNECTOR);
|
||||
|
||||
connector = drm_bridge_connector_init(drm, encoder);
|
||||
@@ -228,9 +225,10 @@ static int imx_pd_probe(struct platform_device *pdev)
|
||||
u32 bus_format = 0;
|
||||
const char *fmt;
|
||||
|
||||
imxpd = devm_kzalloc(dev, sizeof(*imxpd), GFP_KERNEL);
|
||||
if (!imxpd)
|
||||
return -ENOMEM;
|
||||
imxpd = devm_drm_bridge_alloc(dev, struct imx_parallel_display, bridge,
|
||||
&imx_pd_bridge_funcs);
|
||||
if (IS_ERR(imxpd))
|
||||
return PTR_ERR(imxpd);
|
||||
|
||||
/* port@1 is the output port */
|
||||
imxpd->next_bridge = devm_drm_of_get_bridge(dev, np, 1, 0);
|
||||
@@ -258,6 +256,8 @@ static int imx_pd_probe(struct platform_device *pdev)
|
||||
|
||||
platform_set_drvdata(pdev, imxpd);
|
||||
|
||||
devm_drm_bridge_add(dev, &imxpd->bridge);
|
||||
|
||||
return component_add(dev, &imx_pd_ops);
|
||||
}
|
||||
|
||||
|
||||
@@ -686,10 +686,6 @@ static int mtk_drm_bind(struct device *dev)
|
||||
for (i = 0; i < private->data->mmsys_dev_num; i++)
|
||||
private->all_drm_private[i]->drm = NULL;
|
||||
err_put_dev:
|
||||
for (i = 0; i < private->data->mmsys_dev_num; i++) {
|
||||
/* For device_find_child in mtk_drm_get_all_priv() */
|
||||
put_device(private->all_drm_private[i]->dev);
|
||||
}
|
||||
put_device(private->mutex_dev);
|
||||
return ret;
|
||||
}
|
||||
@@ -697,18 +693,12 @@ static int mtk_drm_bind(struct device *dev)
|
||||
static void mtk_drm_unbind(struct device *dev)
|
||||
{
|
||||
struct mtk_drm_private *private = dev_get_drvdata(dev);
|
||||
int i;
|
||||
|
||||
/* for multi mmsys dev, unregister drm dev in mmsys master */
|
||||
if (private->drm_master) {
|
||||
drm_dev_unregister(private->drm);
|
||||
mtk_drm_kms_deinit(private->drm);
|
||||
drm_dev_put(private->drm);
|
||||
|
||||
for (i = 0; i < private->data->mmsys_dev_num; i++) {
|
||||
/* For device_find_child in mtk_drm_get_all_priv() */
|
||||
put_device(private->all_drm_private[i]->dev);
|
||||
}
|
||||
put_device(private->mutex_dev);
|
||||
}
|
||||
private->mtk_drm_bound = false;
|
||||
|
||||
@@ -780,6 +780,9 @@ static bool fw_block_mem(struct a6xx_gmu_bo *bo, const struct block_header *blk)
|
||||
return true;
|
||||
}
|
||||
|
||||
#define NEXT_BLK(blk) \
|
||||
((const struct block_header *)((const char *)(blk) + sizeof(*(blk)) + (blk)->size))
|
||||
|
||||
static int a6xx_gmu_fw_load(struct a6xx_gmu *gmu)
|
||||
{
|
||||
struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
|
||||
@@ -811,7 +814,7 @@ static int a6xx_gmu_fw_load(struct a6xx_gmu *gmu)
|
||||
|
||||
for (blk = (const struct block_header *) fw_image->data;
|
||||
(const u8*) blk < fw_image->data + fw_image->size;
|
||||
blk = (const struct block_header *) &blk->data[blk->size >> 2]) {
|
||||
blk = NEXT_BLK(blk)) {
|
||||
if (blk->size == 0)
|
||||
continue;
|
||||
|
||||
|
||||
@@ -348,13 +348,6 @@ int adreno_fault_handler(struct msm_gpu *gpu, unsigned long iova, int flags,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool
|
||||
adreno_smmu_has_prr(struct msm_gpu *gpu)
|
||||
{
|
||||
struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(&gpu->pdev->dev);
|
||||
return adreno_smmu && adreno_smmu->set_prr_addr;
|
||||
}
|
||||
|
||||
int adreno_get_param(struct msm_gpu *gpu, struct msm_context *ctx,
|
||||
uint32_t param, uint64_t *value, uint32_t *len)
|
||||
{
|
||||
|
||||
@@ -1545,6 +1545,9 @@ static enum drm_mode_status dpu_crtc_mode_valid(struct drm_crtc *crtc,
|
||||
adjusted_mode_clk = dpu_core_perf_adjusted_mode_clk(mode->clock,
|
||||
dpu_kms->perf.perf_cfg);
|
||||
|
||||
if (dpu_kms->catalog->caps->has_3d_merge)
|
||||
adjusted_mode_clk /= 2;
|
||||
|
||||
/*
|
||||
* The given mode, adjusted for the perf clock factor, should not exceed
|
||||
* the max core clock rate
|
||||
|
||||
@@ -267,8 +267,8 @@ static const u32 wb2_formats_rgb_yuv[] = {
|
||||
.base = 0x200, .len = 0xa0,}, \
|
||||
.csc_blk = {.name = "csc", \
|
||||
.base = 0x320, .len = 0x100,}, \
|
||||
.format_list = plane_formats_yuv, \
|
||||
.num_formats = ARRAY_SIZE(plane_formats_yuv), \
|
||||
.format_list = plane_formats, \
|
||||
.num_formats = ARRAY_SIZE(plane_formats), \
|
||||
.rotation_cfg = NULL, \
|
||||
}
|
||||
|
||||
|
||||
@@ -500,13 +500,15 @@ static void _dpu_plane_setup_pixel_ext(struct dpu_hw_scaler3_cfg *scale_cfg,
|
||||
int i;
|
||||
|
||||
for (i = 0; i < DPU_MAX_PLANES; i++) {
|
||||
uint32_t w = src_w, h = src_h;
|
||||
|
||||
if (i == DPU_SSPP_COMP_1_2 || i == DPU_SSPP_COMP_2) {
|
||||
src_w /= chroma_subsmpl_h;
|
||||
src_h /= chroma_subsmpl_v;
|
||||
w /= chroma_subsmpl_h;
|
||||
h /= chroma_subsmpl_v;
|
||||
}
|
||||
|
||||
pixel_ext->num_ext_pxls_top[i] = src_h;
|
||||
pixel_ext->num_ext_pxls_left[i] = src_w;
|
||||
pixel_ext->num_ext_pxls_top[i] = h;
|
||||
pixel_ext->num_ext_pxls_left[i] = w;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -740,7 +742,7 @@ static int dpu_plane_atomic_check_pipe(struct dpu_plane *pdpu,
|
||||
* We already have verified scaling against platform limitations.
|
||||
* Now check if the SSPP supports scaling at all.
|
||||
*/
|
||||
if (!sblk->scaler_blk.len &&
|
||||
if (!(sblk->scaler_blk.len && pipe->sspp->ops.setup_scaler) &&
|
||||
((drm_rect_width(&new_plane_state->src) >> 16 !=
|
||||
drm_rect_width(&new_plane_state->dst)) ||
|
||||
(drm_rect_height(&new_plane_state->src) >> 16 !=
|
||||
@@ -1278,7 +1280,7 @@ int dpu_assign_plane_resources(struct dpu_global_state *global_state,
|
||||
state, plane_state,
|
||||
prev_adjacent_plane_state);
|
||||
if (ret)
|
||||
break;
|
||||
return ret;
|
||||
|
||||
prev_adjacent_plane_state = plane_state;
|
||||
}
|
||||
|
||||
@@ -842,7 +842,7 @@ struct dpu_hw_sspp *dpu_rm_reserve_sspp(struct dpu_rm *rm,
|
||||
|
||||
if (!reqs->scale && !reqs->yuv)
|
||||
hw_sspp = dpu_rm_try_sspp(rm, global_state, crtc, reqs, SSPP_TYPE_DMA);
|
||||
if (!hw_sspp && reqs->scale)
|
||||
if (!hw_sspp && !reqs->yuv)
|
||||
hw_sspp = dpu_rm_try_sspp(rm, global_state, crtc, reqs, SSPP_TYPE_RGB);
|
||||
if (!hw_sspp)
|
||||
hw_sspp = dpu_rm_try_sspp(rm, global_state, crtc, reqs, SSPP_TYPE_VIG);
|
||||
|
||||
@@ -72,6 +72,9 @@ static int dpu_wb_conn_atomic_check(struct drm_connector *connector,
|
||||
DPU_ERROR("invalid fb w=%d, maxlinewidth=%u\n",
|
||||
fb->width, dpu_wb_conn->maxlinewidth);
|
||||
return -EINVAL;
|
||||
} else if (fb->modifier != DRM_FORMAT_MOD_LINEAR) {
|
||||
DPU_ERROR("unsupported fb modifier:%#llx\n", fb->modifier);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return drm_atomic_helper_check_wb_connector_state(conn_state->connector, conn_state->state);
|
||||
|
||||
@@ -109,7 +109,6 @@ struct msm_dsi_phy {
|
||||
struct msm_dsi_dphy_timing timing;
|
||||
const struct msm_dsi_phy_cfg *cfg;
|
||||
void *tuning_cfg;
|
||||
void *pll_data;
|
||||
|
||||
enum msm_dsi_phy_usecase usecase;
|
||||
bool regulator_ldo_mode;
|
||||
|
||||
@@ -426,11 +426,8 @@ static void dsi_pll_enable_pll_bias(struct dsi_pll_7nm *pll)
|
||||
u32 data;
|
||||
|
||||
spin_lock_irqsave(&pll->pll_enable_lock, flags);
|
||||
if (pll->pll_enable_cnt++) {
|
||||
spin_unlock_irqrestore(&pll->pll_enable_lock, flags);
|
||||
WARN_ON(pll->pll_enable_cnt == INT_MAX);
|
||||
return;
|
||||
}
|
||||
pll->pll_enable_cnt++;
|
||||
WARN_ON(pll->pll_enable_cnt == INT_MAX);
|
||||
|
||||
data = readl(pll->phy->base + REG_DSI_7nm_PHY_CMN_CTRL_0);
|
||||
data |= DSI_7nm_PHY_CMN_CTRL_0_PLL_SHUTDOWNB;
|
||||
@@ -876,7 +873,6 @@ static int dsi_pll_7nm_init(struct msm_dsi_phy *phy)
|
||||
spin_lock_init(&pll_7nm->pll_enable_lock);
|
||||
|
||||
pll_7nm->phy = phy;
|
||||
phy->pll_data = pll_7nm;
|
||||
|
||||
ret = pll_7nm_register(pll_7nm, phy->provided_clocks->hws);
|
||||
if (ret) {
|
||||
@@ -965,10 +961,8 @@ static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy,
|
||||
u32 const delay_us = 5;
|
||||
u32 const timeout_us = 1000;
|
||||
struct msm_dsi_dphy_timing *timing = &phy->timing;
|
||||
struct dsi_pll_7nm *pll = phy->pll_data;
|
||||
void __iomem *base = phy->base;
|
||||
bool less_than_1500_mhz;
|
||||
unsigned long flags;
|
||||
u32 vreg_ctrl_0, vreg_ctrl_1, lane_ctrl0;
|
||||
u32 glbl_pemph_ctrl_0;
|
||||
u32 glbl_str_swi_cal_sel_ctrl, glbl_hstx_str_ctrl_0;
|
||||
@@ -1090,13 +1084,10 @@ static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy,
|
||||
glbl_rescode_bot_ctrl = 0x3c;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&pll->pll_enable_lock, flags);
|
||||
pll->pll_enable_cnt = 1;
|
||||
/* de-assert digital and pll power down */
|
||||
data = DSI_7nm_PHY_CMN_CTRL_0_DIGTOP_PWRDN_B |
|
||||
DSI_7nm_PHY_CMN_CTRL_0_PLL_SHUTDOWNB;
|
||||
writel(data, base + REG_DSI_7nm_PHY_CMN_CTRL_0);
|
||||
spin_unlock_irqrestore(&pll->pll_enable_lock, flags);
|
||||
|
||||
/* Assert PLL core reset */
|
||||
writel(0x00, base + REG_DSI_7nm_PHY_CMN_PLL_CNTRL);
|
||||
@@ -1209,9 +1200,7 @@ static bool dsi_7nm_set_continuous_clock(struct msm_dsi_phy *phy, bool enable)
|
||||
|
||||
static void dsi_7nm_phy_disable(struct msm_dsi_phy *phy)
|
||||
{
|
||||
struct dsi_pll_7nm *pll = phy->pll_data;
|
||||
void __iomem *base = phy->base;
|
||||
unsigned long flags;
|
||||
u32 data;
|
||||
|
||||
DBG("");
|
||||
@@ -1238,11 +1227,8 @@ static void dsi_7nm_phy_disable(struct msm_dsi_phy *phy)
|
||||
writel(data, base + REG_DSI_7nm_PHY_CMN_CTRL_0);
|
||||
writel(0, base + REG_DSI_7nm_PHY_CMN_LANE_CTRL0);
|
||||
|
||||
spin_lock_irqsave(&pll->pll_enable_lock, flags);
|
||||
pll->pll_enable_cnt = 0;
|
||||
/* Turn off all PHY blocks */
|
||||
writel(0x00, base + REG_DSI_7nm_PHY_CMN_CTRL_0);
|
||||
spin_unlock_irqrestore(&pll->pll_enable_lock, flags);
|
||||
|
||||
/* make sure phy is turned off */
|
||||
wmb();
|
||||
|
||||
@@ -1120,12 +1120,16 @@ static void msm_gem_free_object(struct drm_gem_object *obj)
|
||||
put_pages(obj);
|
||||
}
|
||||
|
||||
if (obj->resv != &obj->_resv) {
|
||||
/*
|
||||
* In error paths, we could end up here before msm_gem_new_handle()
|
||||
* has changed obj->resv to point to the shared resv. In this case,
|
||||
* we don't want to drop a ref to the shared r_obj that we haven't
|
||||
* taken yet.
|
||||
*/
|
||||
if ((msm_obj->flags & MSM_BO_NO_SHARE) && (obj->resv != &obj->_resv)) {
|
||||
struct drm_gem_object *r_obj =
|
||||
container_of(obj->resv, struct drm_gem_object, _resv);
|
||||
|
||||
WARN_ON(!(msm_obj->flags & MSM_BO_NO_SHARE));
|
||||
|
||||
/* Drop reference we hold to shared resv obj: */
|
||||
drm_gem_object_put(r_obj);
|
||||
}
|
||||
|
||||
@@ -414,6 +414,11 @@ static void submit_attach_object_fences(struct msm_gem_submit *submit)
|
||||
submit->user_fence,
|
||||
DMA_RESV_USAGE_BOOKKEEP,
|
||||
DMA_RESV_USAGE_BOOKKEEP);
|
||||
|
||||
last_fence = vm->last_fence;
|
||||
vm->last_fence = dma_fence_unwrap_merge(submit->user_fence, last_fence);
|
||||
dma_fence_put(last_fence);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -427,10 +432,6 @@ static void submit_attach_object_fences(struct msm_gem_submit *submit)
|
||||
dma_resv_add_fence(obj->resv, submit->user_fence,
|
||||
DMA_RESV_USAGE_READ);
|
||||
}
|
||||
|
||||
last_fence = vm->last_fence;
|
||||
vm->last_fence = dma_fence_unwrap_merge(submit->user_fence, last_fence);
|
||||
dma_fence_put(last_fence);
|
||||
}
|
||||
|
||||
static int submit_bo(struct msm_gem_submit *submit, uint32_t idx,
|
||||
|
||||
@@ -971,6 +971,7 @@ static int
|
||||
lookup_op(struct msm_vm_bind_job *job, const struct drm_msm_vm_bind_op *op)
|
||||
{
|
||||
struct drm_device *dev = job->vm->drm;
|
||||
struct msm_drm_private *priv = dev->dev_private;
|
||||
int i = job->nr_ops++;
|
||||
int ret = 0;
|
||||
|
||||
@@ -1017,6 +1018,11 @@ lookup_op(struct msm_vm_bind_job *job, const struct drm_msm_vm_bind_op *op)
|
||||
break;
|
||||
}
|
||||
|
||||
if ((op->op == MSM_VM_BIND_OP_MAP_NULL) &&
|
||||
!adreno_smmu_has_prr(priv->gpu)) {
|
||||
ret = UERR(EINVAL, dev, "PRR not supported\n");
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -1421,7 +1427,7 @@ msm_ioctl_vm_bind(struct drm_device *dev, void *data, struct drm_file *file)
|
||||
* Maybe we could allow just UNMAP ops? OTOH userspace should just
|
||||
* immediately close the device file and all will be torn down.
|
||||
*/
|
||||
if (to_msm_vm(ctx->vm)->unusable)
|
||||
if (to_msm_vm(msm_context_vm(dev, ctx))->unusable)
|
||||
return UERR(EPIPE, dev, "context is unusable");
|
||||
|
||||
/*
|
||||
|
||||
@@ -299,6 +299,17 @@ static inline struct msm_gpu *dev_to_gpu(struct device *dev)
|
||||
return container_of(adreno_smmu, struct msm_gpu, adreno_smmu);
|
||||
}
|
||||
|
||||
static inline bool
|
||||
adreno_smmu_has_prr(struct msm_gpu *gpu)
|
||||
{
|
||||
struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(&gpu->pdev->dev);
|
||||
|
||||
if (!adreno_smmu)
|
||||
return false;
|
||||
|
||||
return adreno_smmu && adreno_smmu->set_prr_addr;
|
||||
}
|
||||
|
||||
/* It turns out that all targets use the same ringbuffer size */
|
||||
#define MSM_GPU_RINGBUFFER_SZ SZ_32K
|
||||
#define MSM_GPU_RINGBUFFER_BLKSIZE 32
|
||||
|
||||
@@ -338,6 +338,8 @@ msm_iommu_pagetable_prealloc_allocate(struct msm_mmu *mmu, struct msm_mmu_preall
|
||||
|
||||
ret = kmem_cache_alloc_bulk(pt_cache, GFP_KERNEL, p->count, p->pages);
|
||||
if (ret != p->count) {
|
||||
kfree(p->pages);
|
||||
p->pages = NULL;
|
||||
p->count = ret;
|
||||
return -ENOMEM;
|
||||
}
|
||||
@@ -351,6 +353,9 @@ msm_iommu_pagetable_prealloc_cleanup(struct msm_mmu *mmu, struct msm_mmu_preallo
|
||||
struct kmem_cache *pt_cache = get_pt_cache(mmu);
|
||||
uint32_t remaining_pt_count = p->count - p->ptr;
|
||||
|
||||
if (!p->pages)
|
||||
return;
|
||||
|
||||
if (p->count > 0)
|
||||
trace_msm_mmu_prealloc_cleanup(p->count, remaining_pt_count);
|
||||
|
||||
|
||||
@@ -482,6 +482,17 @@ nouveau_sched_create(struct nouveau_sched **psched, struct nouveau_drm *drm,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool
|
||||
nouveau_sched_job_list_empty(struct nouveau_sched *sched)
|
||||
{
|
||||
bool empty;
|
||||
|
||||
spin_lock(&sched->job.list.lock);
|
||||
empty = list_empty(&sched->job.list.head);
|
||||
spin_unlock(&sched->job.list.lock);
|
||||
|
||||
return empty;
|
||||
}
|
||||
|
||||
static void
|
||||
nouveau_sched_fini(struct nouveau_sched *sched)
|
||||
@@ -489,8 +500,7 @@ nouveau_sched_fini(struct nouveau_sched *sched)
|
||||
struct drm_gpu_scheduler *drm_sched = &sched->base;
|
||||
struct drm_sched_entity *entity = &sched->entity;
|
||||
|
||||
rmb(); /* for list_empty to work without lock */
|
||||
wait_event(sched->job.wq, list_empty(&sched->job.list.head));
|
||||
wait_event(sched->job.wq, nouveau_sched_job_list_empty(sched));
|
||||
|
||||
drm_sched_entity_fini(entity);
|
||||
drm_sched_fini(drm_sched);
|
||||
|
||||
@@ -359,7 +359,7 @@ static int kingdisplay_panel_probe(struct mipi_dsi_device *dsi)
|
||||
dsi->lanes = 4;
|
||||
dsi->format = MIPI_DSI_FMT_RGB888;
|
||||
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
|
||||
MIPI_DSI_MODE_LPM;
|
||||
MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET;
|
||||
|
||||
kingdisplay = devm_drm_panel_alloc(&dsi->dev, __typeof(*kingdisplay), base,
|
||||
&kingdisplay_panel_funcs,
|
||||
|
||||
@@ -249,6 +249,11 @@ static const struct drm_display_mode default_mode = {
|
||||
.flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
|
||||
};
|
||||
|
||||
/*
|
||||
* The mode data for this panel has been reverse engineered without access
|
||||
* to the panel datasheet / manual. Using DRM_MODE_FLAG_PHSYNC like all
|
||||
* other panels results in garbage data on the display.
|
||||
*/
|
||||
static const struct drm_display_mode t28cp45tn89_mode = {
|
||||
.clock = 6008,
|
||||
.hdisplay = 240,
|
||||
@@ -261,7 +266,7 @@ static const struct drm_display_mode t28cp45tn89_mode = {
|
||||
.vtotal = 320 + 8 + 4 + 4,
|
||||
.width_mm = 43,
|
||||
.height_mm = 57,
|
||||
.flags = DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC,
|
||||
.flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC,
|
||||
};
|
||||
|
||||
static const struct drm_display_mode et028013dma_mode = {
|
||||
|
||||
@@ -314,17 +314,17 @@ static int radeon_pci_probe(struct pci_dev *pdev,
|
||||
|
||||
ret = pci_enable_device(pdev);
|
||||
if (ret)
|
||||
goto err_free;
|
||||
return ret;
|
||||
|
||||
pci_set_drvdata(pdev, ddev);
|
||||
|
||||
ret = radeon_driver_load_kms(ddev, flags);
|
||||
if (ret)
|
||||
goto err_agp;
|
||||
goto err;
|
||||
|
||||
ret = drm_dev_register(ddev, flags);
|
||||
if (ret)
|
||||
goto err_agp;
|
||||
goto err;
|
||||
|
||||
if (rdev->mc.real_vram_size <= (8 * 1024 * 1024))
|
||||
format = drm_format_info(DRM_FORMAT_C8);
|
||||
@@ -337,30 +337,14 @@ static int radeon_pci_probe(struct pci_dev *pdev,
|
||||
|
||||
return 0;
|
||||
|
||||
err_agp:
|
||||
err:
|
||||
pci_disable_device(pdev);
|
||||
err_free:
|
||||
drm_dev_put(ddev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void
|
||||
radeon_pci_remove(struct pci_dev *pdev)
|
||||
{
|
||||
struct drm_device *dev = pci_get_drvdata(pdev);
|
||||
|
||||
drm_put_dev(dev);
|
||||
}
|
||||
|
||||
static void
|
||||
radeon_pci_shutdown(struct pci_dev *pdev)
|
||||
{
|
||||
/* if we are running in a VM, make sure the device
|
||||
* torn down properly on reboot/shutdown
|
||||
*/
|
||||
if (radeon_device_is_virtual())
|
||||
radeon_pci_remove(pdev);
|
||||
|
||||
#if defined(CONFIG_PPC64) || defined(CONFIG_MACH_LOONGSON64)
|
||||
/*
|
||||
* Some adapters need to be suspended before a
|
||||
@@ -613,7 +597,6 @@ static struct pci_driver radeon_kms_pci_driver = {
|
||||
.name = DRIVER_NAME,
|
||||
.id_table = pciidlist,
|
||||
.probe = radeon_pci_probe,
|
||||
.remove = radeon_pci_remove,
|
||||
.shutdown = radeon_pci_shutdown,
|
||||
.driver.pm = &radeon_pm_ops,
|
||||
};
|
||||
|
||||
@@ -84,7 +84,6 @@ void radeon_driver_unload_kms(struct drm_device *dev)
|
||||
rdev->agp = NULL;
|
||||
|
||||
done_free:
|
||||
kfree(rdev);
|
||||
dev->dev_private = NULL;
|
||||
}
|
||||
|
||||
|
||||
@@ -70,6 +70,7 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
|
||||
entity->guilty = guilty;
|
||||
entity->num_sched_list = num_sched_list;
|
||||
entity->priority = priority;
|
||||
entity->last_user = current->group_leader;
|
||||
/*
|
||||
* It's perfectly valid to initialize an entity without having a valid
|
||||
* scheduler attached. It's just not valid to use the scheduler before it
|
||||
@@ -302,7 +303,7 @@ long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
|
||||
|
||||
/* For a killed process disallow further enqueueing of jobs. */
|
||||
last_user = cmpxchg(&entity->last_user, current->group_leader, NULL);
|
||||
if ((!last_user || last_user == current->group_leader) &&
|
||||
if (last_user == current->group_leader &&
|
||||
(current->flags & PF_EXITING) && (current->exit_code == SIGKILL))
|
||||
drm_sched_entity_kill(entity);
|
||||
|
||||
@@ -552,10 +553,11 @@ void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
|
||||
drm_sched_rq_remove_entity(entity->rq, entity);
|
||||
entity->rq = rq;
|
||||
}
|
||||
spin_unlock(&entity->lock);
|
||||
|
||||
if (entity->num_sched_list == 1)
|
||||
entity->sched_list = NULL;
|
||||
|
||||
spin_unlock(&entity->lock);
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user