mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-05-05 13:29:17 -04:00
staging: vchiq_arm: Track bulk user data pointer separately
A bulk callback transfer can be initiated from two places -
inside kernel interface or from user interface. However,
the callback data pointer 'cb_data' is used for tracking both
sets of data pointer. This commit tracks the callback
data pointer from user interface (named as 'cb_userdata') separately,
in the bulk transfer service callback.
This is esentially done by adding a 'void __user *cb_userdata' for
tracking __user pointers in vchiq_bulk and vchiq_completion_data
structs. Furthermore, the 'cb_userdata' data pointer is appended to
the vchiq_service's callback signature.
Separating the two callback data pointers ('cb_data' and 'cb_userdata')
fixes the sparse warnings around mixing userspace and kernel space
pointers.
As there are no additional sparse warnings left for vc04_services,
drop the relevant entry from the TODO.
Signed-off-by: Umang Jain <umang.jain@ideasonboard.com>
Link: https://lore.kernel.org/r/20241023110406.885199-7-umang.jain@ideasonboard.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
committed by
Greg Kroah-Hartman
parent
951b3c1435
commit
cb1d0f5788
@@ -96,7 +96,8 @@ static int bcm2835_audio_send_simple(struct bcm2835_audio_instance *instance,
|
||||
static int audio_vchi_callback(struct vchiq_instance *vchiq_instance,
|
||||
enum vchiq_reason reason,
|
||||
struct vchiq_header *header,
|
||||
unsigned int handle, void *userdata)
|
||||
unsigned int handle,
|
||||
void *cb_data, void __user *cb_userdata)
|
||||
{
|
||||
struct bcm2835_audio_instance *instance = vchiq_get_service_userdata(vchiq_instance,
|
||||
handle);
|
||||
|
||||
@@ -56,7 +56,7 @@ struct vchiq_service_base {
|
||||
enum vchiq_reason reason,
|
||||
struct vchiq_header *header,
|
||||
unsigned int handle,
|
||||
void *bulk_userdata);
|
||||
void *cb_data, void __user *cb_userdata);
|
||||
void *userdata;
|
||||
};
|
||||
|
||||
@@ -65,6 +65,7 @@ struct vchiq_completion_data_kernel {
|
||||
struct vchiq_header *header;
|
||||
void *service_userdata;
|
||||
void *cb_data;
|
||||
void __user *cb_userdata;
|
||||
};
|
||||
|
||||
struct vchiq_service_params_kernel {
|
||||
@@ -73,7 +74,7 @@ struct vchiq_service_params_kernel {
|
||||
enum vchiq_reason reason,
|
||||
struct vchiq_header *header,
|
||||
unsigned int handle,
|
||||
void *cb_data);
|
||||
void *cb_data, void __user *cb_userdata);
|
||||
void *userdata;
|
||||
short version; /* Increment for non-trivial changes */
|
||||
short version_min; /* Update for incompatible changes */
|
||||
|
||||
@@ -27,10 +27,6 @@ The code follows the 80 characters limitation yet tends to go 3 or 4 levels of
|
||||
indentation deep making it very unpleasant to read. This is specially relevant
|
||||
in the character driver ioctl code and in the core thread functions.
|
||||
|
||||
* Clean up Sparse warnings from __user annotations. See
|
||||
vchiq_irq_queue_bulk_tx_rx(). Ensure that the address of "&waiter->bulk_waiter"
|
||||
is never disclosed to userspace.
|
||||
|
||||
* Fix behavior of message handling
|
||||
|
||||
The polling behavior of vchiq_bulk_transmit(), vchiq_bulk_receive() and
|
||||
|
||||
@@ -632,7 +632,7 @@ vchiq_blocking_bulk_transfer(struct vchiq_instance *instance, unsigned int handl
|
||||
static int
|
||||
add_completion(struct vchiq_instance *instance, enum vchiq_reason reason,
|
||||
struct vchiq_header *header, struct user_service *user_service,
|
||||
void *bulk_userdata)
|
||||
void *cb_data, void __user *cb_userdata)
|
||||
{
|
||||
struct vchiq_completion_data_kernel *completion;
|
||||
struct vchiq_drv_mgmt *mgmt = dev_get_drvdata(instance->state->dev);
|
||||
@@ -662,7 +662,8 @@ add_completion(struct vchiq_instance *instance, enum vchiq_reason reason,
|
||||
completion->reason = reason;
|
||||
/* N.B. service_userdata is updated while processing AWAIT_COMPLETION */
|
||||
completion->service_userdata = user_service->service;
|
||||
completion->cb_data = bulk_userdata;
|
||||
completion->cb_data = cb_data;
|
||||
completion->cb_userdata = cb_userdata;
|
||||
|
||||
if (reason == VCHIQ_SERVICE_CLOSED) {
|
||||
/*
|
||||
@@ -693,8 +694,8 @@ add_completion(struct vchiq_instance *instance, enum vchiq_reason reason,
|
||||
|
||||
static int
|
||||
service_single_message(struct vchiq_instance *instance,
|
||||
enum vchiq_reason reason,
|
||||
struct vchiq_service *service, void *bulk_userdata)
|
||||
enum vchiq_reason reason, struct vchiq_service *service,
|
||||
void *cb_data, void __user *cb_userdata)
|
||||
{
|
||||
struct user_service *user_service;
|
||||
|
||||
@@ -712,7 +713,7 @@ service_single_message(struct vchiq_instance *instance,
|
||||
dev_dbg(instance->state->dev,
|
||||
"arm: Inserting extra MESSAGE_AVAILABLE\n");
|
||||
ret = add_completion(instance, reason, NULL, user_service,
|
||||
bulk_userdata);
|
||||
cb_data, cb_userdata);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
@@ -730,7 +731,8 @@ service_single_message(struct vchiq_instance *instance,
|
||||
|
||||
int
|
||||
service_callback(struct vchiq_instance *instance, enum vchiq_reason reason,
|
||||
struct vchiq_header *header, unsigned int handle, void *bulk_userdata)
|
||||
struct vchiq_header *header, unsigned int handle,
|
||||
void *cb_data, void __user *cb_userdata)
|
||||
{
|
||||
/*
|
||||
* How do we ensure the callback goes to the right client?
|
||||
@@ -769,9 +771,9 @@ service_callback(struct vchiq_instance *instance, enum vchiq_reason reason,
|
||||
rcu_read_unlock();
|
||||
|
||||
dev_dbg(service->state->dev,
|
||||
"arm: service %p(%d,%p), reason %d, header %p, instance %p, bulk_userdata %p\n",
|
||||
"arm: service %p(%d,%p), reason %d, header %p, instance %p, cb_data %p, cb_userdata %p\n",
|
||||
user_service, service->localport, user_service->userdata,
|
||||
reason, header, instance, bulk_userdata);
|
||||
reason, header, instance, cb_data, cb_userdata);
|
||||
|
||||
if (header && user_service->is_vchi) {
|
||||
spin_lock(&service->state->msg_queue_spinlock);
|
||||
@@ -783,8 +785,8 @@ service_callback(struct vchiq_instance *instance, enum vchiq_reason reason,
|
||||
DEBUG_TRACE(SERVICE_CALLBACK_LINE);
|
||||
DEBUG_COUNT(MSG_QUEUE_FULL_COUNT);
|
||||
|
||||
ret = service_single_message(instance, reason,
|
||||
service, bulk_userdata);
|
||||
ret = service_single_message(instance, reason, service,
|
||||
cb_data, cb_userdata);
|
||||
if (ret) {
|
||||
DEBUG_TRACE(SERVICE_CALLBACK_LINE);
|
||||
vchiq_service_put(service);
|
||||
@@ -822,7 +824,7 @@ service_callback(struct vchiq_instance *instance, enum vchiq_reason reason,
|
||||
return 0;
|
||||
|
||||
return add_completion(instance, reason, header, user_service,
|
||||
bulk_userdata);
|
||||
cb_data, cb_userdata);
|
||||
}
|
||||
|
||||
void vchiq_dump_platform_instances(struct vchiq_state *state, struct seq_file *f)
|
||||
@@ -909,7 +911,8 @@ static int
|
||||
vchiq_keepalive_vchiq_callback(struct vchiq_instance *instance,
|
||||
enum vchiq_reason reason,
|
||||
struct vchiq_header *header,
|
||||
unsigned int service_user, void *bulk_user)
|
||||
unsigned int service_user,
|
||||
void *cb_data, void __user *cb_userdata)
|
||||
{
|
||||
dev_err(instance->state->dev, "suspend: %s: callback reason %d\n",
|
||||
__func__, reason);
|
||||
|
||||
@@ -155,7 +155,8 @@ static inline int vchiq_register_chrdev(struct device *parent) { return 0; }
|
||||
|
||||
extern int
|
||||
service_callback(struct vchiq_instance *vchiq_instance, enum vchiq_reason reason,
|
||||
struct vchiq_header *header, unsigned int handle, void *bulk_userdata);
|
||||
struct vchiq_header *header, unsigned int handle,
|
||||
void *cb_data, void __user *cb_userdata);
|
||||
|
||||
extern void
|
||||
free_bulk_waiter(struct vchiq_instance *instance);
|
||||
|
||||
@@ -458,20 +458,23 @@ make_service_callback(struct vchiq_service *service, enum vchiq_reason reason,
|
||||
struct vchiq_header *header, struct vchiq_bulk *bulk)
|
||||
{
|
||||
void *cb_data = NULL;
|
||||
void __user *cb_userdata = NULL;
|
||||
int status;
|
||||
|
||||
/*
|
||||
* If a bulk transfer is in progress, pass bulk->cb_data to the
|
||||
* If a bulk transfer is in progress, pass bulk->cb_*data to the
|
||||
* callback function.
|
||||
*/
|
||||
if (bulk)
|
||||
if (bulk) {
|
||||
cb_data = bulk->cb_data;
|
||||
cb_userdata = bulk->cb_userdata;
|
||||
}
|
||||
|
||||
dev_dbg(service->state->dev, "core: %d: callback:%d (%s, %pK, %pK)\n",
|
||||
dev_dbg(service->state->dev, "core: %d: callback:%d (%s, %pK, %pK %pK)\n",
|
||||
service->state->id, service->localport, reason_names[reason],
|
||||
header, cb_data);
|
||||
header, cb_data, cb_userdata);
|
||||
status = service->base.callback(service->instance, reason, header, service->handle,
|
||||
cb_data);
|
||||
cb_data, cb_userdata);
|
||||
if (status && (status != -EAGAIN)) {
|
||||
dev_warn(service->state->dev,
|
||||
"core: %d: ignoring ERROR from callback to service %x\n",
|
||||
@@ -3073,6 +3076,7 @@ vchiq_bulk_xfer_queue_msg_killable(struct vchiq_service *service,
|
||||
bulk->dir = bulk_params->dir;
|
||||
bulk->waiter = bulk_params->waiter;
|
||||
bulk->cb_data = bulk_params->cb_data;
|
||||
bulk->cb_userdata = bulk_params->cb_userdata;
|
||||
bulk->size = bulk_params->size;
|
||||
bulk->offset = bulk_params->offset;
|
||||
bulk->uoffset = bulk_params->uoffset;
|
||||
|
||||
@@ -115,6 +115,7 @@ struct vchiq_bulk {
|
||||
short mode;
|
||||
short dir;
|
||||
void *cb_data;
|
||||
void __user *cb_userdata;
|
||||
struct bulk_waiter *waiter;
|
||||
dma_addr_t dma_addr;
|
||||
int size;
|
||||
|
||||
@@ -338,7 +338,7 @@ static int vchiq_irq_queue_bulk_tx_rx(struct vchiq_instance *instance,
|
||||
bulk_params.mode = args->mode;
|
||||
bulk_params.size = args->size;
|
||||
bulk_params.dir = dir;
|
||||
bulk_params.cb_data = args->userdata;
|
||||
bulk_params.cb_userdata = args->userdata;
|
||||
|
||||
status = vchiq_bulk_xfer_callback(instance, args->handle,
|
||||
&bulk_params);
|
||||
@@ -549,11 +549,7 @@ static int vchiq_ioc_await_completion(struct vchiq_instance *instance,
|
||||
!instance->use_close_delivered)
|
||||
vchiq_service_put(service);
|
||||
|
||||
/*
|
||||
* FIXME: address space mismatch, does cb_data
|
||||
* actually point to user or kernel memory?
|
||||
*/
|
||||
user_completion.cb_userdata = completion->cb_data;
|
||||
user_completion.cb_userdata = completion->cb_userdata;
|
||||
|
||||
if (vchiq_put_completion(args->buf, &user_completion, ret)) {
|
||||
if (ret == 0)
|
||||
|
||||
@@ -551,7 +551,8 @@ static void bulk_abort_cb(struct vchiq_mmal_instance *instance,
|
||||
/* incoming event service callback */
|
||||
static int mmal_service_callback(struct vchiq_instance *vchiq_instance,
|
||||
enum vchiq_reason reason, struct vchiq_header *header,
|
||||
unsigned int handle, void *bulk_ctx)
|
||||
unsigned int handle, void *cb_data,
|
||||
void __user *cb_userdata)
|
||||
{
|
||||
struct vchiq_mmal_instance *instance = vchiq_get_service_userdata(vchiq_instance, handle);
|
||||
u32 msg_len;
|
||||
@@ -626,11 +627,11 @@ static int mmal_service_callback(struct vchiq_instance *vchiq_instance,
|
||||
break;
|
||||
|
||||
case VCHIQ_BULK_RECEIVE_DONE:
|
||||
bulk_receive_cb(instance, bulk_ctx);
|
||||
bulk_receive_cb(instance, cb_data);
|
||||
break;
|
||||
|
||||
case VCHIQ_BULK_RECEIVE_ABORTED:
|
||||
bulk_abort_cb(instance, bulk_ctx);
|
||||
bulk_abort_cb(instance, cb_data);
|
||||
break;
|
||||
|
||||
case VCHIQ_SERVICE_CLOSED:
|
||||
|
||||
Reference in New Issue
Block a user