Merge tag 'soc-fixes-6.18-2' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc

Pull SoC fixes from Arnd Bergmann:
 "The main change this time is an update to the MAINTAINERS file,
  listing Krzysztof Kozlowski, Alexandre Belloni, and Linus Walleij as
  additional maintainers for the SoC tree, in order to go back to a
  group maintainership. Drew Fustini joins as an additional reviewer for
  the SoC tree.

  Thanks to all of you for volunteering to help out.

  On the actual bugfixes, we have a few correctness changes for firmware
  drivers (qtee, arm-ffa, scmi) and two devicetree fixes for Raspberry
  Pi"

* tag 'soc-fixes-6.18-2' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc:
  soc: officially expand maintainership team
  firmware: arm_scmi: Fix premature SCMI_XFER_FLAG_IS_RAW clearing in raw mode
  firmware: arm_scmi: Skip RAW initialization on failure
  include: trace: Fix inflight count helper on failed initialization
  firmware: arm_scmi: Account for failed debug initialization
  ARM: dts: broadcom: rpi: Switch to V3D firmware clock
  arm64: dts: broadcom: bcm2712: Define VGIC interrupt
  firmware: arm_ffa: Add support for IMPDEF value in the memory access descriptor
  tee: QCOMTEE should depend on ARCH_QCOM
  tee: qcom: return -EFAULT instead of -EINVAL if copy_from_user() fails
  tee: qcom: prevent potential off by one read
This commit is contained in:
Linus Torvalds
2025-10-24 11:15:17 -07:00
11 changed files with 119 additions and 58 deletions

View File

@@ -1997,6 +1997,10 @@ F: include/uapi/linux/if_arcnet.h
ARM AND ARM64 SoC SUB-ARCHITECTURES (COMMON PARTS)
M: Arnd Bergmann <arnd@arndb.de>
M: Krzysztof Kozlowski <krzk@kernel.org>
M: Alexandre Belloni <alexandre.belloni@bootlin.com>
M: Linus Walleij <linus.walleij@linaro.org>
R: Drew Fustini <fustini@kernel.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: soc@lists.linux.dev
S: Maintained

View File

@@ -77,6 +77,14 @@ &i2c0 {
/delete-property/ pinctrl-0;
};
&pm {
clocks = <&firmware_clocks 5>,
<&clocks BCM2835_CLOCK_PERI_IMAGE>,
<&clocks BCM2835_CLOCK_H264>,
<&clocks BCM2835_CLOCK_ISP>;
clock-names = "v3d", "peri_image", "h264", "isp";
};
&rmem {
/*
* RPi4's co-processor will copy the board's bootloader configuration

View File

@@ -13,7 +13,16 @@ &hdmi {
clock-names = "pixel", "hdmi";
};
&pm {
clocks = <&firmware_clocks 5>,
<&clocks BCM2835_CLOCK_PERI_IMAGE>,
<&clocks BCM2835_CLOCK_H264>,
<&clocks BCM2835_CLOCK_ISP>;
clock-names = "v3d", "peri_image", "h264", "isp";
};
&v3d {
clocks = <&firmware_clocks 5>;
power-domains = <&power RPI_POWER_DOMAIN_V3D>;
};

View File

@@ -326,6 +326,8 @@ gicv2: interrupt-controller@7fff9000 {
<0x7fffe000 0x2000>;
interrupt-controller;
#address-cells = <0>;
interrupts = <GIC_PPI 9 (GIC_CPU_MASK_SIMPLE(4) |
IRQ_TYPE_LEVEL_HIGH)>;
#interrupt-cells = <3>;
};

View File

@@ -649,6 +649,26 @@ static u16 ffa_memory_attributes_get(u32 func_id)
return FFA_MEM_NORMAL | FFA_MEM_WRITE_BACK | FFA_MEM_INNER_SHAREABLE;
}
static void ffa_emad_impdef_value_init(u32 version, void *dst, void *src)
{
struct ffa_mem_region_attributes *ep_mem_access;
if (FFA_EMAD_HAS_IMPDEF_FIELD(version))
memcpy(dst, src, sizeof(ep_mem_access->impdef_val));
}
static void
ffa_mem_region_additional_setup(u32 version, struct ffa_mem_region *mem_region)
{
if (!FFA_MEM_REGION_HAS_EP_MEM_OFFSET(version)) {
mem_region->ep_mem_size = 0;
} else {
mem_region->ep_mem_size = ffa_emad_size_get(version);
mem_region->ep_mem_offset = sizeof(*mem_region);
memset(mem_region->reserved, 0, 12);
}
}
static int
ffa_setup_and_transmit(u32 func_id, void *buffer, u32 max_fragsize,
struct ffa_mem_ops_args *args)
@@ -667,27 +687,24 @@ ffa_setup_and_transmit(u32 func_id, void *buffer, u32 max_fragsize,
mem_region->flags = args->flags;
mem_region->sender_id = drv_info->vm_id;
mem_region->attributes = ffa_memory_attributes_get(func_id);
ep_mem_access = buffer +
ffa_mem_desc_offset(buffer, 0, drv_info->version);
composite_offset = ffa_mem_desc_offset(buffer, args->nattrs,
drv_info->version);
for (idx = 0; idx < args->nattrs; idx++, ep_mem_access++) {
for (idx = 0; idx < args->nattrs; idx++) {
ep_mem_access = buffer +
ffa_mem_desc_offset(buffer, idx, drv_info->version);
ep_mem_access->receiver = args->attrs[idx].receiver;
ep_mem_access->attrs = args->attrs[idx].attrs;
ep_mem_access->composite_off = composite_offset;
ep_mem_access->flag = 0;
ep_mem_access->reserved = 0;
ffa_emad_impdef_value_init(drv_info->version,
ep_mem_access->impdef_val,
args->attrs[idx].impdef_val);
}
mem_region->handle = 0;
mem_region->ep_count = args->nattrs;
if (drv_info->version <= FFA_VERSION_1_0) {
mem_region->ep_mem_size = 0;
} else {
mem_region->ep_mem_size = sizeof(*ep_mem_access);
mem_region->ep_mem_offset = sizeof(*mem_region);
memset(mem_region->reserved, 0, 12);
}
ffa_mem_region_additional_setup(drv_info->version, mem_region);
composite = buffer + composite_offset;
composite->total_pg_cnt = ffa_get_num_pages_sg(args->sg);

View File

@@ -309,16 +309,36 @@ enum debug_counters {
SCMI_DEBUG_COUNTERS_LAST
};
static inline void scmi_inc_count(atomic_t *arr, int stat)
/**
* struct scmi_debug_info - Debug common info
* @top_dentry: A reference to the top debugfs dentry
* @name: Name of this SCMI instance
* @type: Type of this SCMI instance
* @is_atomic: Flag to state if the transport of this instance is atomic
* @counters: An array of atomic_c's used for tracking statistics (if enabled)
*/
struct scmi_debug_info {
struct dentry *top_dentry;
const char *name;
const char *type;
bool is_atomic;
atomic_t counters[SCMI_DEBUG_COUNTERS_LAST];
};
static inline void scmi_inc_count(struct scmi_debug_info *dbg, int stat)
{
if (IS_ENABLED(CONFIG_ARM_SCMI_DEBUG_COUNTERS))
atomic_inc(&arr[stat]);
if (IS_ENABLED(CONFIG_ARM_SCMI_DEBUG_COUNTERS)) {
if (dbg)
atomic_inc(&dbg->counters[stat]);
}
}
static inline void scmi_dec_count(atomic_t *arr, int stat)
static inline void scmi_dec_count(struct scmi_debug_info *dbg, int stat)
{
if (IS_ENABLED(CONFIG_ARM_SCMI_DEBUG_COUNTERS))
atomic_dec(&arr[stat]);
if (IS_ENABLED(CONFIG_ARM_SCMI_DEBUG_COUNTERS)) {
if (dbg)
atomic_dec(&dbg->counters[stat]);
}
}
enum scmi_bad_msg {

View File

@@ -115,22 +115,6 @@ struct scmi_protocol_instance {
#define ph_to_pi(h) container_of(h, struct scmi_protocol_instance, ph)
/**
* struct scmi_debug_info - Debug common info
* @top_dentry: A reference to the top debugfs dentry
* @name: Name of this SCMI instance
* @type: Type of this SCMI instance
* @is_atomic: Flag to state if the transport of this instance is atomic
* @counters: An array of atomic_c's used for tracking statistics (if enabled)
*/
struct scmi_debug_info {
struct dentry *top_dentry;
const char *name;
const char *type;
bool is_atomic;
atomic_t counters[SCMI_DEBUG_COUNTERS_LAST];
};
/**
* struct scmi_info - Structure representing a SCMI instance
*
@@ -610,7 +594,7 @@ scmi_xfer_inflight_register_unlocked(struct scmi_xfer *xfer,
/* Set in-flight */
set_bit(xfer->hdr.seq, minfo->xfer_alloc_table);
hash_add(minfo->pending_xfers, &xfer->node, xfer->hdr.seq);
scmi_inc_count(info->dbg->counters, XFERS_INFLIGHT);
scmi_inc_count(info->dbg, XFERS_INFLIGHT);
xfer->pending = true;
}
@@ -819,8 +803,9 @@ __scmi_xfer_put(struct scmi_xfers_info *minfo, struct scmi_xfer *xfer)
hash_del(&xfer->node);
xfer->pending = false;
scmi_dec_count(info->dbg->counters, XFERS_INFLIGHT);
scmi_dec_count(info->dbg, XFERS_INFLIGHT);
}
xfer->flags = 0;
hlist_add_head(&xfer->node, &minfo->free_xfers);
}
spin_unlock_irqrestore(&minfo->xfer_lock, flags);
@@ -839,8 +824,6 @@ void scmi_xfer_raw_put(const struct scmi_handle *handle, struct scmi_xfer *xfer)
{
struct scmi_info *info = handle_to_scmi_info(handle);
xfer->flags &= ~SCMI_XFER_FLAG_IS_RAW;
xfer->flags &= ~SCMI_XFER_FLAG_CHAN_SET;
return __scmi_xfer_put(&info->tx_minfo, xfer);
}
@@ -1034,7 +1017,7 @@ scmi_xfer_command_acquire(struct scmi_chan_info *cinfo, u32 msg_hdr)
spin_unlock_irqrestore(&minfo->xfer_lock, flags);
scmi_bad_message_trace(cinfo, msg_hdr, MSG_UNEXPECTED);
scmi_inc_count(info->dbg->counters, ERR_MSG_UNEXPECTED);
scmi_inc_count(info->dbg, ERR_MSG_UNEXPECTED);
return xfer;
}
@@ -1062,7 +1045,7 @@ scmi_xfer_command_acquire(struct scmi_chan_info *cinfo, u32 msg_hdr)
msg_type, xfer_id, msg_hdr, xfer->state);
scmi_bad_message_trace(cinfo, msg_hdr, MSG_INVALID);
scmi_inc_count(info->dbg->counters, ERR_MSG_INVALID);
scmi_inc_count(info->dbg, ERR_MSG_INVALID);
/* On error the refcount incremented above has to be dropped */
__scmi_xfer_put(minfo, xfer);
@@ -1107,7 +1090,7 @@ static void scmi_handle_notification(struct scmi_chan_info *cinfo,
PTR_ERR(xfer));
scmi_bad_message_trace(cinfo, msg_hdr, MSG_NOMEM);
scmi_inc_count(info->dbg->counters, ERR_MSG_NOMEM);
scmi_inc_count(info->dbg, ERR_MSG_NOMEM);
scmi_clear_channel(info, cinfo);
return;
@@ -1123,7 +1106,7 @@ static void scmi_handle_notification(struct scmi_chan_info *cinfo,
trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id,
xfer->hdr.id, "NOTI", xfer->hdr.seq,
xfer->hdr.status, xfer->rx.buf, xfer->rx.len);
scmi_inc_count(info->dbg->counters, NOTIFICATION_OK);
scmi_inc_count(info->dbg, NOTIFICATION_OK);
scmi_notify(cinfo->handle, xfer->hdr.protocol_id,
xfer->hdr.id, xfer->rx.buf, xfer->rx.len, ts);
@@ -1183,10 +1166,10 @@ static void scmi_handle_response(struct scmi_chan_info *cinfo,
if (xfer->hdr.type == MSG_TYPE_DELAYED_RESP) {
scmi_clear_channel(info, cinfo);
complete(xfer->async_done);
scmi_inc_count(info->dbg->counters, DELAYED_RESPONSE_OK);
scmi_inc_count(info->dbg, DELAYED_RESPONSE_OK);
} else {
complete(&xfer->done);
scmi_inc_count(info->dbg->counters, RESPONSE_OK);
scmi_inc_count(info->dbg, RESPONSE_OK);
}
if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) {
@@ -1296,7 +1279,7 @@ static int scmi_wait_for_reply(struct device *dev, const struct scmi_desc *desc,
"timed out in resp(caller: %pS) - polling\n",
(void *)_RET_IP_);
ret = -ETIMEDOUT;
scmi_inc_count(info->dbg->counters, XFERS_RESPONSE_POLLED_TIMEOUT);
scmi_inc_count(info->dbg, XFERS_RESPONSE_POLLED_TIMEOUT);
}
}
@@ -1321,7 +1304,7 @@ static int scmi_wait_for_reply(struct device *dev, const struct scmi_desc *desc,
"RESP" : "resp",
xfer->hdr.seq, xfer->hdr.status,
xfer->rx.buf, xfer->rx.len);
scmi_inc_count(info->dbg->counters, RESPONSE_POLLED_OK);
scmi_inc_count(info->dbg, RESPONSE_POLLED_OK);
if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) {
scmi_raw_message_report(info->raw, xfer,
@@ -1336,7 +1319,7 @@ static int scmi_wait_for_reply(struct device *dev, const struct scmi_desc *desc,
dev_err(dev, "timed out in resp(caller: %pS)\n",
(void *)_RET_IP_);
ret = -ETIMEDOUT;
scmi_inc_count(info->dbg->counters, XFERS_RESPONSE_TIMEOUT);
scmi_inc_count(info->dbg, XFERS_RESPONSE_TIMEOUT);
}
}
@@ -1420,13 +1403,13 @@ static int do_xfer(const struct scmi_protocol_handle *ph,
!is_transport_polling_capable(info->desc)) {
dev_warn_once(dev,
"Polling mode is not supported by transport.\n");
scmi_inc_count(info->dbg->counters, SENT_FAIL_POLLING_UNSUPPORTED);
scmi_inc_count(info->dbg, SENT_FAIL_POLLING_UNSUPPORTED);
return -EINVAL;
}
cinfo = idr_find(&info->tx_idr, pi->proto->id);
if (unlikely(!cinfo)) {
scmi_inc_count(info->dbg->counters, SENT_FAIL_CHANNEL_NOT_FOUND);
scmi_inc_count(info->dbg, SENT_FAIL_CHANNEL_NOT_FOUND);
return -EINVAL;
}
/* True ONLY if also supported by transport. */
@@ -1461,19 +1444,19 @@ static int do_xfer(const struct scmi_protocol_handle *ph,
ret = info->desc->ops->send_message(cinfo, xfer);
if (ret < 0) {
dev_dbg(dev, "Failed to send message %d\n", ret);
scmi_inc_count(info->dbg->counters, SENT_FAIL);
scmi_inc_count(info->dbg, SENT_FAIL);
return ret;
}
trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id,
xfer->hdr.id, "CMND", xfer->hdr.seq,
xfer->hdr.status, xfer->tx.buf, xfer->tx.len);
scmi_inc_count(info->dbg->counters, SENT_OK);
scmi_inc_count(info->dbg, SENT_OK);
ret = scmi_wait_for_message_response(cinfo, xfer);
if (!ret && xfer->hdr.status) {
ret = scmi_to_linux_errno(xfer->hdr.status);
scmi_inc_count(info->dbg->counters, ERR_PROTOCOL);
scmi_inc_count(info->dbg, ERR_PROTOCOL);
}
if (info->desc->ops->mark_txdone)
@@ -3044,9 +3027,6 @@ static int scmi_debugfs_raw_mode_setup(struct scmi_info *info)
u8 channels[SCMI_MAX_CHANNELS] = {};
DECLARE_BITMAP(protos, SCMI_MAX_CHANNELS) = {};
if (!info->dbg)
return -EINVAL;
/* Enumerate all channels to collect their ids */
idr_for_each_entry(&info->tx_idr, cinfo, id) {
/*
@@ -3218,7 +3198,7 @@ static int scmi_probe(struct platform_device *pdev)
if (!info->dbg)
dev_warn(dev, "Failed to setup SCMI debugfs.\n");
if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) {
if (info->dbg && IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) {
ret = scmi_debugfs_raw_mode_setup(info);
if (!coex) {
if (ret)
@@ -3423,6 +3403,9 @@ int scmi_inflight_count(const struct scmi_handle *handle)
if (IS_ENABLED(CONFIG_ARM_SCMI_DEBUG_COUNTERS)) {
struct scmi_info *info = handle_to_scmi_info(handle);
if (!info->dbg)
return 0;
return atomic_read(&info->dbg->counters[XFERS_INFLIGHT]);
} else {
return 0;

View File

@@ -2,6 +2,7 @@
# Qualcomm Trusted Execution Environment Configuration
config QCOMTEE
tristate "Qualcomm TEE Support"
depends on ARCH_QCOM || COMPILE_TEST
depends on !CPU_BIG_ENDIAN
select QCOM_SCM
select QCOM_TZMEM_MODE_SHMBRIDGE

View File

@@ -308,7 +308,7 @@ static int qcomtee_params_from_args(struct tee_param *params,
}
/* Release any IO and OO objects not processed. */
for (; u[i].type && i < num_params; i++) {
for (; i < num_params && u[i].type; i++) {
if (u[i].type == QCOMTEE_ARG_TYPE_OO ||
u[i].type == QCOMTEE_ARG_TYPE_IO)
qcomtee_object_put(u[i].o);

View File

@@ -424,7 +424,7 @@ static int qcomtee_prepare_msg(struct qcomtee_object_invoke_ctx *oic,
if (!(u[i].flags & QCOMTEE_ARG_FLAGS_UADDR))
memcpy(msgptr, u[i].b.addr, u[i].b.size);
else if (copy_from_user(msgptr, u[i].b.uaddr, u[i].b.size))
return -EINVAL;
return -EFAULT;
offset += qcomtee_msg_offset_align(u[i].b.size);
ib++;

View File

@@ -338,6 +338,7 @@ struct ffa_mem_region_attributes {
* an `struct ffa_mem_region_addr_range`.
*/
u32 composite_off;
u8 impdef_val[16];
u64 reserved;
};
@@ -417,15 +418,31 @@ struct ffa_mem_region {
#define CONSTITUENTS_OFFSET(x) \
(offsetof(struct ffa_composite_mem_region, constituents[x]))
#define FFA_EMAD_HAS_IMPDEF_FIELD(version) ((version) >= FFA_VERSION_1_2)
#define FFA_MEM_REGION_HAS_EP_MEM_OFFSET(version) ((version) > FFA_VERSION_1_0)
static inline u32 ffa_emad_size_get(u32 ffa_version)
{
u32 sz;
struct ffa_mem_region_attributes *ep_mem_access;
if (FFA_EMAD_HAS_IMPDEF_FIELD(ffa_version))
sz = sizeof(*ep_mem_access);
else
sz = sizeof(*ep_mem_access) - sizeof(ep_mem_access->impdef_val);
return sz;
}
static inline u32
ffa_mem_desc_offset(struct ffa_mem_region *buf, int count, u32 ffa_version)
{
u32 offset = count * sizeof(struct ffa_mem_region_attributes);
u32 offset = count * ffa_emad_size_get(ffa_version);
/*
* Earlier to v1.1, the endpoint memory descriptor array started at
* offset 32(i.e. offset of ep_mem_offset in the current structure)
*/
if (ffa_version <= FFA_VERSION_1_0)
if (!FFA_MEM_REGION_HAS_EP_MEM_OFFSET(ffa_version))
offset += offsetof(struct ffa_mem_region, ep_mem_offset);
else
offset += sizeof(struct ffa_mem_region);