mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-12-27 14:41:22 -05:00
Merge tag 'tee-prot-dma-buf-for-v6.18' of git://git.kernel.org/pub/scm/linux/kernel/git/jenswi/linux-tee into soc/drivers
TEE protected DMA-bufs for v6.18 - Allocates protected DMA-bufs from a DMA-heap instantiated from the TEE subsystem. - The DMA-heap uses a protected memory pool provided by the backend TEE driver, allowing it to choose how to allocate the protected physical memory. - Three use-cases (Secure Video Playback, Trusted UI, and Secure Video Recording) have been identified so far to serve as examples of what can be expected. - The use-cases have predefined DMA-heap names, "protected,secure-video", "protected,trusted-ui", and "protected,secure-video-record". The backend driver registers protected memory pools for the use-cases it supports. * tag 'tee-prot-dma-buf-for-v6.18' of git://git.kernel.org/pub/scm/linux/kernel/git/jenswi/linux-tee: optee: smc abi: dynamic protected memory allocation optee: FF-A: dynamic protected memory allocation optee: support protected memory allocation tee: add tee_shm_alloc_dma_mem() tee: new ioctl to a register tee_shm from a dmabuf file descriptor tee: refactor params_from_user() tee: implement protected DMA-heap dma-buf: dma-heap: export declared functions optee: sync secure world ABI headers Link: https://lore.kernel.org/r/20250912101752.GA1453408@rayden Signed-off-by: Arnd Bergmann <arnd@arndb.de>
This commit is contained in:
@@ -11,6 +11,7 @@
|
||||
#include <linux/dma-buf.h>
|
||||
#include <linux/dma-heap.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/nospec.h>
|
||||
#include <linux/syscalls.h>
|
||||
@@ -202,6 +203,7 @@ void *dma_heap_get_drvdata(struct dma_heap *heap)
|
||||
{
|
||||
return heap->priv;
|
||||
}
|
||||
EXPORT_SYMBOL_NS_GPL(dma_heap_get_drvdata, "DMA_BUF_HEAP");
|
||||
|
||||
/**
|
||||
* dma_heap_get_name - get heap name
|
||||
@@ -214,6 +216,7 @@ const char *dma_heap_get_name(struct dma_heap *heap)
|
||||
{
|
||||
return heap->name;
|
||||
}
|
||||
EXPORT_SYMBOL_NS_GPL(dma_heap_get_name, "DMA_BUF_HEAP");
|
||||
|
||||
/**
|
||||
* dma_heap_add - adds a heap to dmabuf heaps
|
||||
@@ -303,6 +306,7 @@ struct dma_heap *dma_heap_add(const struct dma_heap_export_info *exp_info)
|
||||
kfree(heap);
|
||||
return err_ret;
|
||||
}
|
||||
EXPORT_SYMBOL_NS_GPL(dma_heap_add, "DMA_BUF_HEAP");
|
||||
|
||||
static char *dma_heap_devnode(const struct device *dev, umode_t *mode)
|
||||
{
|
||||
|
||||
@@ -12,6 +12,11 @@ menuconfig TEE
|
||||
|
||||
if TEE
|
||||
|
||||
config TEE_DMABUF_HEAPS
|
||||
bool
|
||||
depends on HAS_DMA && DMABUF_HEAPS
|
||||
default y
|
||||
|
||||
source "drivers/tee/optee/Kconfig"
|
||||
source "drivers/tee/amdtee/Kconfig"
|
||||
source "drivers/tee/tstee/Kconfig"
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
obj-$(CONFIG_TEE) += tee.o
|
||||
tee-objs += tee_core.o
|
||||
tee-objs += tee_heap.o
|
||||
tee-objs += tee_shm.o
|
||||
tee-objs += tee_shm_pool.o
|
||||
obj-$(CONFIG_OPTEE) += optee/
|
||||
|
||||
@@ -25,3 +25,8 @@ config OPTEE_INSECURE_LOAD_IMAGE
|
||||
|
||||
Additional documentation on kernel security risks are at
|
||||
Documentation/tee/op-tee.rst.
|
||||
|
||||
config OPTEE_STATIC_PROTMEM_POOL
|
||||
bool
|
||||
depends on HAS_IOMEM && TEE_DMABUF_HEAPS
|
||||
default y
|
||||
|
||||
@@ -4,6 +4,7 @@ optee-objs += core.o
|
||||
optee-objs += call.o
|
||||
optee-objs += notif.o
|
||||
optee-objs += rpc.o
|
||||
optee-objs += protmem.o
|
||||
optee-objs += supp.o
|
||||
optee-objs += device.o
|
||||
optee-objs += smc_abi.o
|
||||
|
||||
@@ -56,6 +56,13 @@ int optee_rpmb_intf_rdev(struct notifier_block *intf, unsigned long action,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int optee_set_dma_mask(struct optee *optee, u_int pa_width)
|
||||
{
|
||||
u64 mask = DMA_BIT_MASK(min(64, pa_width));
|
||||
|
||||
return dma_coerce_mask_and_coherent(&optee->teedev->dev, mask);
|
||||
}
|
||||
|
||||
static void optee_bus_scan(struct work_struct *work)
|
||||
{
|
||||
WARN_ON(optee_enumerate_devices(PTA_CMD_GET_DEVICES_SUPP));
|
||||
|
||||
@@ -649,6 +649,124 @@ static int optee_ffa_do_call_with_arg(struct tee_context *ctx,
|
||||
return optee_ffa_yielding_call(ctx, &data, rpc_arg, system_thread);
|
||||
}
|
||||
|
||||
static int do_call_lend_protmem(struct optee *optee, u64 cookie, u32 use_case)
|
||||
{
|
||||
struct optee_shm_arg_entry *entry;
|
||||
struct optee_msg_arg *msg_arg;
|
||||
struct tee_shm *shm;
|
||||
u_int offs;
|
||||
int rc;
|
||||
|
||||
msg_arg = optee_get_msg_arg(optee->ctx, 1, &entry, &shm, &offs);
|
||||
if (IS_ERR(msg_arg))
|
||||
return PTR_ERR(msg_arg);
|
||||
|
||||
msg_arg->cmd = OPTEE_MSG_CMD_ASSIGN_PROTMEM;
|
||||
msg_arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT;
|
||||
msg_arg->params[0].u.value.a = cookie;
|
||||
msg_arg->params[0].u.value.b = use_case;
|
||||
|
||||
rc = optee->ops->do_call_with_arg(optee->ctx, shm, offs, false);
|
||||
if (rc)
|
||||
goto out;
|
||||
if (msg_arg->ret != TEEC_SUCCESS) {
|
||||
rc = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
out:
|
||||
optee_free_msg_arg(optee->ctx, entry, offs);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int optee_ffa_lend_protmem(struct optee *optee, struct tee_shm *protmem,
|
||||
u32 *mem_attrs, unsigned int ma_count,
|
||||
u32 use_case)
|
||||
{
|
||||
struct ffa_device *ffa_dev = optee->ffa.ffa_dev;
|
||||
const struct ffa_mem_ops *mem_ops = ffa_dev->ops->mem_ops;
|
||||
const struct ffa_msg_ops *msg_ops = ffa_dev->ops->msg_ops;
|
||||
struct ffa_send_direct_data data;
|
||||
struct ffa_mem_region_attributes *mem_attr;
|
||||
struct ffa_mem_ops_args args = {
|
||||
.use_txbuf = true,
|
||||
.tag = use_case,
|
||||
};
|
||||
struct page *page;
|
||||
struct scatterlist sgl;
|
||||
unsigned int n;
|
||||
int rc;
|
||||
|
||||
mem_attr = kcalloc(ma_count, sizeof(*mem_attr), GFP_KERNEL);
|
||||
for (n = 0; n < ma_count; n++) {
|
||||
mem_attr[n].receiver = mem_attrs[n] & U16_MAX;
|
||||
mem_attr[n].attrs = mem_attrs[n] >> 16;
|
||||
}
|
||||
args.attrs = mem_attr;
|
||||
args.nattrs = ma_count;
|
||||
|
||||
page = phys_to_page(protmem->paddr);
|
||||
sg_init_table(&sgl, 1);
|
||||
sg_set_page(&sgl, page, protmem->size, 0);
|
||||
|
||||
args.sg = &sgl;
|
||||
rc = mem_ops->memory_lend(&args);
|
||||
kfree(mem_attr);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
rc = do_call_lend_protmem(optee, args.g_handle, use_case);
|
||||
if (rc)
|
||||
goto err_reclaim;
|
||||
|
||||
rc = optee_shm_add_ffa_handle(optee, protmem, args.g_handle);
|
||||
if (rc)
|
||||
goto err_unreg;
|
||||
|
||||
protmem->sec_world_id = args.g_handle;
|
||||
|
||||
return 0;
|
||||
|
||||
err_unreg:
|
||||
data = (struct ffa_send_direct_data){
|
||||
.data0 = OPTEE_FFA_RELEASE_PROTMEM,
|
||||
.data1 = (u32)args.g_handle,
|
||||
.data2 = (u32)(args.g_handle >> 32),
|
||||
};
|
||||
msg_ops->sync_send_receive(ffa_dev, &data);
|
||||
err_reclaim:
|
||||
mem_ops->memory_reclaim(args.g_handle, 0);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int optee_ffa_reclaim_protmem(struct optee *optee,
|
||||
struct tee_shm *protmem)
|
||||
{
|
||||
struct ffa_device *ffa_dev = optee->ffa.ffa_dev;
|
||||
const struct ffa_msg_ops *msg_ops = ffa_dev->ops->msg_ops;
|
||||
const struct ffa_mem_ops *mem_ops = ffa_dev->ops->mem_ops;
|
||||
u64 global_handle = protmem->sec_world_id;
|
||||
struct ffa_send_direct_data data = {
|
||||
.data0 = OPTEE_FFA_RELEASE_PROTMEM,
|
||||
.data1 = (u32)global_handle,
|
||||
.data2 = (u32)(global_handle >> 32)
|
||||
};
|
||||
int rc;
|
||||
|
||||
optee_shm_rem_ffa_handle(optee, global_handle);
|
||||
protmem->sec_world_id = 0;
|
||||
|
||||
rc = msg_ops->sync_send_receive(ffa_dev, &data);
|
||||
if (rc)
|
||||
pr_err("Release SHM id 0x%llx rc %d\n", global_handle, rc);
|
||||
|
||||
rc = mem_ops->memory_reclaim(global_handle, 0);
|
||||
if (rc)
|
||||
pr_err("mem_reclaim: 0x%llx %d", global_handle, rc);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
/*
|
||||
* 6. Driver initialization
|
||||
*
|
||||
@@ -819,6 +937,8 @@ static const struct optee_ops optee_ffa_ops = {
|
||||
.do_call_with_arg = optee_ffa_do_call_with_arg,
|
||||
.to_msg_param = optee_ffa_to_msg_param,
|
||||
.from_msg_param = optee_ffa_from_msg_param,
|
||||
.lend_protmem = optee_ffa_lend_protmem,
|
||||
.reclaim_protmem = optee_ffa_reclaim_protmem,
|
||||
};
|
||||
|
||||
static void optee_ffa_remove(struct ffa_device *ffa_dev)
|
||||
@@ -891,6 +1011,25 @@ static int optee_ffa_async_notif_init(struct ffa_device *ffa_dev,
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int optee_ffa_protmem_pool_init(struct optee *optee, u32 sec_caps)
|
||||
{
|
||||
enum tee_dma_heap_id id = TEE_DMA_HEAP_SECURE_VIDEO_PLAY;
|
||||
struct tee_protmem_pool *pool;
|
||||
int rc = 0;
|
||||
|
||||
if (sec_caps & OPTEE_FFA_SEC_CAP_PROTMEM) {
|
||||
pool = optee_protmem_alloc_dyn_pool(optee, id);
|
||||
if (IS_ERR(pool))
|
||||
return PTR_ERR(pool);
|
||||
|
||||
rc = tee_device_register_dma_heap(optee->teedev, id, pool);
|
||||
if (rc)
|
||||
pool->ops->destroy_pool(pool);
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int optee_ffa_probe(struct ffa_device *ffa_dev)
|
||||
{
|
||||
const struct ffa_notifier_ops *notif_ops;
|
||||
@@ -941,7 +1080,7 @@ static int optee_ffa_probe(struct ffa_device *ffa_dev)
|
||||
optee);
|
||||
if (IS_ERR(teedev)) {
|
||||
rc = PTR_ERR(teedev);
|
||||
goto err_free_pool;
|
||||
goto err_free_shm_pool;
|
||||
}
|
||||
optee->teedev = teedev;
|
||||
|
||||
@@ -988,6 +1127,9 @@ static int optee_ffa_probe(struct ffa_device *ffa_dev)
|
||||
rc);
|
||||
}
|
||||
|
||||
if (optee_ffa_protmem_pool_init(optee, sec_caps))
|
||||
pr_info("Protected memory service not available\n");
|
||||
|
||||
rc = optee_enumerate_devices(PTA_CMD_GET_DEVICES);
|
||||
if (rc)
|
||||
goto err_unregister_devices;
|
||||
@@ -1018,7 +1160,7 @@ static int optee_ffa_probe(struct ffa_device *ffa_dev)
|
||||
tee_device_unregister(optee->supp_teedev);
|
||||
err_unreg_teedev:
|
||||
tee_device_unregister(optee->teedev);
|
||||
err_free_pool:
|
||||
err_free_shm_pool:
|
||||
tee_shm_pool_free(pool);
|
||||
err_free_optee:
|
||||
kfree(optee);
|
||||
|
||||
@@ -81,7 +81,7 @@
|
||||
* as the second MSG arg struct for
|
||||
* OPTEE_FFA_YIELDING_CALL_WITH_ARG.
|
||||
* Bit[31:8]: Reserved (MBZ)
|
||||
* w5: Bitfield of secure world capabilities OPTEE_FFA_SEC_CAP_* below,
|
||||
* w5: Bitfield of OP-TEE capabilities OPTEE_FFA_SEC_CAP_*
|
||||
* w6: The maximum secure world notification number
|
||||
* w7: Not used (MBZ)
|
||||
*/
|
||||
@@ -94,6 +94,8 @@
|
||||
#define OPTEE_FFA_SEC_CAP_ASYNC_NOTIF BIT(1)
|
||||
/* OP-TEE supports probing for RPMB device if needed */
|
||||
#define OPTEE_FFA_SEC_CAP_RPMB_PROBE BIT(2)
|
||||
/* OP-TEE supports Protected Memory for secure data path */
|
||||
#define OPTEE_FFA_SEC_CAP_PROTMEM BIT(3)
|
||||
|
||||
#define OPTEE_FFA_EXCHANGE_CAPABILITIES OPTEE_FFA_BLOCKING_CALL(2)
|
||||
|
||||
@@ -108,7 +110,7 @@
|
||||
*
|
||||
* Return register usage:
|
||||
* w3: Error code, 0 on success
|
||||
* w4-w7: Note used (MBZ)
|
||||
* w4-w7: Not used (MBZ)
|
||||
*/
|
||||
#define OPTEE_FFA_UNREGISTER_SHM OPTEE_FFA_BLOCKING_CALL(3)
|
||||
|
||||
@@ -119,16 +121,31 @@
|
||||
* Call register usage:
|
||||
* w3: Service ID, OPTEE_FFA_ENABLE_ASYNC_NOTIF
|
||||
* w4: Notification value to request bottom half processing, should be
|
||||
* less than OPTEE_FFA_MAX_ASYNC_NOTIF_VALUE.
|
||||
* less than OPTEE_FFA_MAX_ASYNC_NOTIF_VALUE
|
||||
* w5-w7: Not used (MBZ)
|
||||
*
|
||||
* Return register usage:
|
||||
* w3: Error code, 0 on success
|
||||
* w4-w7: Not used (MBZ)
|
||||
*/
|
||||
#define OPTEE_FFA_ENABLE_ASYNC_NOTIF OPTEE_FFA_BLOCKING_CALL(5)
|
||||
|
||||
#define OPTEE_FFA_MAX_ASYNC_NOTIF_VALUE 64
|
||||
|
||||
/*
|
||||
* Release Protected memory
|
||||
*
|
||||
* Call register usage:
|
||||
* w3: Service ID, OPTEE_FFA_RECLAIM_PROTMEM
|
||||
* w4: Shared memory handle, lower bits
|
||||
* w5: Shared memory handle, higher bits
|
||||
* w6-w7: Not used (MBZ)
|
||||
*
|
||||
* Return register usage:
|
||||
* w3: Error code, 0 on success
|
||||
* w4-w7: Note used (MBZ)
|
||||
*/
|
||||
#define OPTEE_FFA_ENABLE_ASYNC_NOTIF OPTEE_FFA_BLOCKING_CALL(5)
|
||||
|
||||
#define OPTEE_FFA_MAX_ASYNC_NOTIF_VALUE 64
|
||||
#define OPTEE_FFA_RELEASE_PROTMEM OPTEE_FFA_BLOCKING_CALL(8)
|
||||
|
||||
/*
|
||||
* Call with struct optee_msg_arg as argument in the supplied shared memory
|
||||
|
||||
@@ -133,13 +133,13 @@ struct optee_msg_param_rmem {
|
||||
};
|
||||
|
||||
/**
|
||||
* struct optee_msg_param_fmem - ffa memory reference parameter
|
||||
* struct optee_msg_param_fmem - FF-A memory reference parameter
|
||||
* @offs_lower: Lower bits of offset into shared memory reference
|
||||
* @offs_upper: Upper bits of offset into shared memory reference
|
||||
* @internal_offs: Internal offset into the first page of shared memory
|
||||
* reference
|
||||
* @size: Size of the buffer
|
||||
* @global_id: Global identifier of Shared memory
|
||||
* @global_id: Global identifier of the shared memory
|
||||
*/
|
||||
struct optee_msg_param_fmem {
|
||||
u32 offs_low;
|
||||
@@ -165,7 +165,7 @@ struct optee_msg_param_value {
|
||||
* @attr: attributes
|
||||
* @tmem: parameter by temporary memory reference
|
||||
* @rmem: parameter by registered memory reference
|
||||
* @fmem: parameter by ffa registered memory reference
|
||||
* @fmem: parameter by FF-A registered memory reference
|
||||
* @value: parameter by opaque value
|
||||
* @octets: parameter by octet string
|
||||
*
|
||||
@@ -296,6 +296,18 @@ struct optee_msg_arg {
|
||||
*/
|
||||
#define OPTEE_MSG_FUNCID_GET_OS_REVISION 0x0001
|
||||
|
||||
/*
|
||||
* Values used in OPTEE_MSG_CMD_LEND_PROTMEM below
|
||||
* OPTEE_MSG_PROTMEM_RESERVED Reserved
|
||||
* OPTEE_MSG_PROTMEM_SECURE_VIDEO_PLAY Secure Video Playback
|
||||
* OPTEE_MSG_PROTMEM_TRUSTED_UI Trused UI
|
||||
* OPTEE_MSG_PROTMEM_SECURE_VIDEO_RECORD Secure Video Recording
|
||||
*/
|
||||
#define OPTEE_MSG_PROTMEM_RESERVED 0
|
||||
#define OPTEE_MSG_PROTMEM_SECURE_VIDEO_PLAY 1
|
||||
#define OPTEE_MSG_PROTMEM_TRUSTED_UI 2
|
||||
#define OPTEE_MSG_PROTMEM_SECURE_VIDEO_RECORD 3
|
||||
|
||||
/*
|
||||
* Do a secure call with struct optee_msg_arg as argument
|
||||
* The OPTEE_MSG_CMD_* below defines what goes in struct optee_msg_arg::cmd
|
||||
@@ -337,15 +349,63 @@ struct optee_msg_arg {
|
||||
* OPTEE_MSG_CMD_STOP_ASYNC_NOTIF informs secure world that from now is
|
||||
* normal world unable to process asynchronous notifications. Typically
|
||||
* used when the driver is shut down.
|
||||
*
|
||||
* OPTEE_MSG_CMD_LEND_PROTMEM lends protected memory. The passed normal
|
||||
* physical memory is protected from normal world access. The memory
|
||||
* should be unmapped prior to this call since it becomes inaccessible
|
||||
* during the request.
|
||||
* Parameters are passed as:
|
||||
* [in] param[0].attr OPTEE_MSG_ATTR_TYPE_VALUE_INPUT
|
||||
* [in] param[0].u.value.a OPTEE_MSG_PROTMEM_* defined above
|
||||
* [in] param[1].attr OPTEE_MSG_ATTR_TYPE_TMEM_INPUT
|
||||
* [in] param[1].u.tmem.buf_ptr physical address
|
||||
* [in] param[1].u.tmem.size size
|
||||
* [in] param[1].u.tmem.shm_ref holds protected memory reference
|
||||
*
|
||||
* OPTEE_MSG_CMD_RECLAIM_PROTMEM reclaims a previously lent protected
|
||||
* memory reference. The physical memory is accessible by the normal world
|
||||
* after this function has return and can be mapped again. The information
|
||||
* is passed as:
|
||||
* [in] param[0].attr OPTEE_MSG_ATTR_TYPE_VALUE_INPUT
|
||||
* [in] param[0].u.value.a holds protected memory cookie
|
||||
*
|
||||
* OPTEE_MSG_CMD_GET_PROTMEM_CONFIG get configuration for a specific
|
||||
* protected memory use case. Parameters are passed as:
|
||||
* [in] param[0].attr OPTEE_MSG_ATTR_TYPE_VALUE_INOUT
|
||||
* [in] param[0].value.a OPTEE_MSG_PROTMEM_*
|
||||
* [in] param[1].attr OPTEE_MSG_ATTR_TYPE_{R,F}MEM_OUTPUT
|
||||
* [in] param[1].u.{r,f}mem Buffer or NULL
|
||||
* [in] param[1].u.{r,f}mem.size Provided size of buffer or 0 for query
|
||||
* output for the protected use case:
|
||||
* [out] param[0].value.a Minimal size of protected memory
|
||||
* [out] param[0].value.b Required alignment of size and start of
|
||||
* protected memory
|
||||
* [out] param[0].value.c PA width, max 64
|
||||
* [out] param[1].{r,f}mem.size Size of output data
|
||||
* [out] param[1].{r,f}mem If non-NULL, contains an array of
|
||||
* uint32_t memory attributes that must be
|
||||
* included when lending memory for this
|
||||
* use case
|
||||
*
|
||||
* OPTEE_MSG_CMD_ASSIGN_PROTMEM assigns use-case to protected memory
|
||||
* previously lent using the FFA_LEND framework ABI. Parameters are passed
|
||||
* as:
|
||||
* [in] param[0].attr OPTEE_MSG_ATTR_TYPE_VALUE_INPUT
|
||||
* [in] param[0].u.value.a holds protected memory cookie
|
||||
* [in] param[0].u.value.b OPTEE_MSG_PROTMEM_* defined above
|
||||
*/
|
||||
#define OPTEE_MSG_CMD_OPEN_SESSION 0
|
||||
#define OPTEE_MSG_CMD_INVOKE_COMMAND 1
|
||||
#define OPTEE_MSG_CMD_CLOSE_SESSION 2
|
||||
#define OPTEE_MSG_CMD_CANCEL 3
|
||||
#define OPTEE_MSG_CMD_REGISTER_SHM 4
|
||||
#define OPTEE_MSG_CMD_UNREGISTER_SHM 5
|
||||
#define OPTEE_MSG_CMD_DO_BOTTOM_HALF 6
|
||||
#define OPTEE_MSG_CMD_STOP_ASYNC_NOTIF 7
|
||||
#define OPTEE_MSG_FUNCID_CALL_WITH_ARG 0x0004
|
||||
#define OPTEE_MSG_CMD_OPEN_SESSION 0
|
||||
#define OPTEE_MSG_CMD_INVOKE_COMMAND 1
|
||||
#define OPTEE_MSG_CMD_CLOSE_SESSION 2
|
||||
#define OPTEE_MSG_CMD_CANCEL 3
|
||||
#define OPTEE_MSG_CMD_REGISTER_SHM 4
|
||||
#define OPTEE_MSG_CMD_UNREGISTER_SHM 5
|
||||
#define OPTEE_MSG_CMD_DO_BOTTOM_HALF 6
|
||||
#define OPTEE_MSG_CMD_STOP_ASYNC_NOTIF 7
|
||||
#define OPTEE_MSG_CMD_LEND_PROTMEM 8
|
||||
#define OPTEE_MSG_CMD_RECLAIM_PROTMEM 9
|
||||
#define OPTEE_MSG_CMD_GET_PROTMEM_CONFIG 10
|
||||
#define OPTEE_MSG_CMD_ASSIGN_PROTMEM 11
|
||||
#define OPTEE_MSG_FUNCID_CALL_WITH_ARG 0x0004
|
||||
|
||||
#endif /* _OPTEE_MSG_H */
|
||||
|
||||
@@ -176,9 +176,14 @@ struct optee;
|
||||
* @do_call_with_arg: enters OP-TEE in secure world
|
||||
* @to_msg_param: converts from struct tee_param to OPTEE_MSG parameters
|
||||
* @from_msg_param: converts from OPTEE_MSG parameters to struct tee_param
|
||||
* @lend_protmem: lends physically contiguous memory as restricted
|
||||
* memory, inaccessible by the kernel
|
||||
* @reclaim_protmem: reclaims restricted memory previously lent with
|
||||
* @lend_protmem() and makes it accessible by the
|
||||
* kernel again
|
||||
*
|
||||
* These OPs are only supposed to be used internally in the OP-TEE driver
|
||||
* as a way of abstracting the different methogs of entering OP-TEE in
|
||||
* as a way of abstracting the different methods of entering OP-TEE in
|
||||
* secure world.
|
||||
*/
|
||||
struct optee_ops {
|
||||
@@ -191,6 +196,10 @@ struct optee_ops {
|
||||
int (*from_msg_param)(struct optee *optee, struct tee_param *params,
|
||||
size_t num_params,
|
||||
const struct optee_msg_param *msg_params);
|
||||
int (*lend_protmem)(struct optee *optee, struct tee_shm *protmem,
|
||||
u32 *mem_attr, unsigned int ma_count,
|
||||
u32 use_case);
|
||||
int (*reclaim_protmem)(struct optee *optee, struct tee_shm *protmem);
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -274,6 +283,8 @@ struct optee_call_ctx {
|
||||
|
||||
extern struct blocking_notifier_head optee_rpmb_intf_added;
|
||||
|
||||
int optee_set_dma_mask(struct optee *optee, u_int pa_width);
|
||||
|
||||
int optee_notif_init(struct optee *optee, u_int max_key);
|
||||
void optee_notif_uninit(struct optee *optee);
|
||||
int optee_notif_wait(struct optee *optee, u_int key, u32 timeout);
|
||||
@@ -285,6 +296,8 @@ u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params,
|
||||
void optee_supp_init(struct optee_supp *supp);
|
||||
void optee_supp_uninit(struct optee_supp *supp);
|
||||
void optee_supp_release(struct optee_supp *supp);
|
||||
struct tee_protmem_pool *optee_protmem_alloc_dyn_pool(struct optee *optee,
|
||||
enum tee_dma_heap_id id);
|
||||
|
||||
int optee_supp_recv(struct tee_context *ctx, u32 *func, u32 *num_params,
|
||||
struct tee_param *param);
|
||||
|
||||
@@ -264,7 +264,6 @@ struct optee_smc_get_shm_config_result {
|
||||
#define OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM BIT(0)
|
||||
/* Secure world can communicate via previously unregistered shared memory */
|
||||
#define OPTEE_SMC_SEC_CAP_UNREGISTERED_SHM BIT(1)
|
||||
|
||||
/*
|
||||
* Secure world supports commands "register/unregister shared memory",
|
||||
* secure world accepts command buffers located in any parts of non-secure RAM
|
||||
@@ -280,6 +279,10 @@ struct optee_smc_get_shm_config_result {
|
||||
#define OPTEE_SMC_SEC_CAP_RPC_ARG BIT(6)
|
||||
/* Secure world supports probing for RPMB device if needed */
|
||||
#define OPTEE_SMC_SEC_CAP_RPMB_PROBE BIT(7)
|
||||
/* Secure world supports protected memory */
|
||||
#define OPTEE_SMC_SEC_CAP_PROTMEM BIT(8)
|
||||
/* Secure world supports dynamic protected memory */
|
||||
#define OPTEE_SMC_SEC_CAP_DYNAMIC_PROTMEM BIT(9)
|
||||
|
||||
#define OPTEE_SMC_FUNCID_EXCHANGE_CAPABILITIES 9
|
||||
#define OPTEE_SMC_EXCHANGE_CAPABILITIES \
|
||||
@@ -451,6 +454,38 @@ struct optee_smc_disable_shm_cache_result {
|
||||
|
||||
/* See OPTEE_SMC_CALL_WITH_REGD_ARG above */
|
||||
#define OPTEE_SMC_FUNCID_CALL_WITH_REGD_ARG 19
|
||||
/*
|
||||
* Get protected memory config
|
||||
*
|
||||
* Returns the protected memory config.
|
||||
*
|
||||
* Call register usage:
|
||||
* a0 SMC Function ID, OPTEE_SMC_GET_PROTMEM_CONFIG
|
||||
* a2-6 Not used, must be zero
|
||||
* a7 Hypervisor Client ID register
|
||||
*
|
||||
* Have config return register usage:
|
||||
* a0 OPTEE_SMC_RETURN_OK
|
||||
* a1 Physical address of start of protected memory
|
||||
* a2 Size of protected memory
|
||||
* a3 PA width, max 64
|
||||
* a4-7 Preserved
|
||||
*
|
||||
* Not available register usage:
|
||||
* a0 OPTEE_SMC_RETURN_ENOTAVAIL
|
||||
* a1-3 Not used
|
||||
* a4-7 Preserved
|
||||
*/
|
||||
#define OPTEE_SMC_FUNCID_GET_PROTMEM_CONFIG 20
|
||||
#define OPTEE_SMC_GET_PROTMEM_CONFIG \
|
||||
OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_GET_PROTMEM_CONFIG)
|
||||
|
||||
struct optee_smc_get_protmem_config_result {
|
||||
unsigned long status;
|
||||
unsigned long start;
|
||||
unsigned long size;
|
||||
unsigned long pa_width;
|
||||
};
|
||||
|
||||
/*
|
||||
* Resume from RPC (for example after processing a foreign interrupt)
|
||||
|
||||
335
drivers/tee/optee/protmem.c
Normal file
335
drivers/tee/optee/protmem.c
Normal file
@@ -0,0 +1,335 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2025, Linaro Limited
|
||||
*/
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include <linux/errno.h>
|
||||
#include <linux/genalloc.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/tee_core.h>
|
||||
#include <linux/types.h>
|
||||
#include "optee_private.h"
|
||||
|
||||
struct optee_protmem_dyn_pool {
|
||||
struct tee_protmem_pool pool;
|
||||
struct gen_pool *gen_pool;
|
||||
struct optee *optee;
|
||||
size_t page_count;
|
||||
u32 *mem_attrs;
|
||||
u_int mem_attr_count;
|
||||
refcount_t refcount;
|
||||
u32 use_case;
|
||||
struct tee_shm *protmem;
|
||||
/* Protects when initializing and tearing down this struct */
|
||||
struct mutex mutex;
|
||||
};
|
||||
|
||||
static struct optee_protmem_dyn_pool *
|
||||
to_protmem_dyn_pool(struct tee_protmem_pool *pool)
|
||||
{
|
||||
return container_of(pool, struct optee_protmem_dyn_pool, pool);
|
||||
}
|
||||
|
||||
static int init_dyn_protmem(struct optee_protmem_dyn_pool *rp)
|
||||
{
|
||||
int rc;
|
||||
|
||||
rp->protmem = tee_shm_alloc_dma_mem(rp->optee->ctx, rp->page_count);
|
||||
if (IS_ERR(rp->protmem)) {
|
||||
rc = PTR_ERR(rp->protmem);
|
||||
goto err_null_protmem;
|
||||
}
|
||||
|
||||
/*
|
||||
* TODO unmap the memory range since the physical memory will
|
||||
* become inaccesible after the lend_protmem() call.
|
||||
*
|
||||
* If the platform supports a hypervisor at EL2, it will unmap the
|
||||
* intermediate physical memory for us and stop cache pre-fetch of
|
||||
* the memory.
|
||||
*/
|
||||
rc = rp->optee->ops->lend_protmem(rp->optee, rp->protmem,
|
||||
rp->mem_attrs,
|
||||
rp->mem_attr_count, rp->use_case);
|
||||
if (rc)
|
||||
goto err_put_shm;
|
||||
rp->protmem->flags |= TEE_SHM_DYNAMIC;
|
||||
|
||||
rp->gen_pool = gen_pool_create(PAGE_SHIFT, -1);
|
||||
if (!rp->gen_pool) {
|
||||
rc = -ENOMEM;
|
||||
goto err_reclaim;
|
||||
}
|
||||
|
||||
rc = gen_pool_add(rp->gen_pool, rp->protmem->paddr,
|
||||
rp->protmem->size, -1);
|
||||
if (rc)
|
||||
goto err_free_pool;
|
||||
|
||||
refcount_set(&rp->refcount, 1);
|
||||
return 0;
|
||||
|
||||
err_free_pool:
|
||||
gen_pool_destroy(rp->gen_pool);
|
||||
rp->gen_pool = NULL;
|
||||
err_reclaim:
|
||||
rp->optee->ops->reclaim_protmem(rp->optee, rp->protmem);
|
||||
err_put_shm:
|
||||
tee_shm_put(rp->protmem);
|
||||
err_null_protmem:
|
||||
rp->protmem = NULL;
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int get_dyn_protmem(struct optee_protmem_dyn_pool *rp)
|
||||
{
|
||||
int rc = 0;
|
||||
|
||||
if (!refcount_inc_not_zero(&rp->refcount)) {
|
||||
mutex_lock(&rp->mutex);
|
||||
if (rp->gen_pool) {
|
||||
/*
|
||||
* Another thread has already initialized the pool
|
||||
* before us, or the pool was just about to be torn
|
||||
* down. Either way we only need to increase the
|
||||
* refcount and we're done.
|
||||
*/
|
||||
refcount_inc(&rp->refcount);
|
||||
} else {
|
||||
rc = init_dyn_protmem(rp);
|
||||
}
|
||||
mutex_unlock(&rp->mutex);
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void release_dyn_protmem(struct optee_protmem_dyn_pool *rp)
|
||||
{
|
||||
gen_pool_destroy(rp->gen_pool);
|
||||
rp->gen_pool = NULL;
|
||||
|
||||
rp->optee->ops->reclaim_protmem(rp->optee, rp->protmem);
|
||||
rp->protmem->flags &= ~TEE_SHM_DYNAMIC;
|
||||
|
||||
WARN(refcount_read(&rp->protmem->refcount) != 1, "Unexpected refcount");
|
||||
tee_shm_put(rp->protmem);
|
||||
rp->protmem = NULL;
|
||||
}
|
||||
|
||||
static void put_dyn_protmem(struct optee_protmem_dyn_pool *rp)
|
||||
{
|
||||
if (refcount_dec_and_test(&rp->refcount)) {
|
||||
mutex_lock(&rp->mutex);
|
||||
if (rp->gen_pool)
|
||||
release_dyn_protmem(rp);
|
||||
mutex_unlock(&rp->mutex);
|
||||
}
|
||||
}
|
||||
|
||||
static int protmem_pool_op_dyn_alloc(struct tee_protmem_pool *pool,
|
||||
struct sg_table *sgt, size_t size,
|
||||
size_t *offs)
|
||||
{
|
||||
struct optee_protmem_dyn_pool *rp = to_protmem_dyn_pool(pool);
|
||||
size_t sz = ALIGN(size, PAGE_SIZE);
|
||||
phys_addr_t pa;
|
||||
int rc;
|
||||
|
||||
rc = get_dyn_protmem(rp);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
pa = gen_pool_alloc(rp->gen_pool, sz);
|
||||
if (!pa) {
|
||||
rc = -ENOMEM;
|
||||
goto err_put;
|
||||
}
|
||||
|
||||
rc = sg_alloc_table(sgt, 1, GFP_KERNEL);
|
||||
if (rc)
|
||||
goto err_free;
|
||||
|
||||
sg_set_page(sgt->sgl, phys_to_page(pa), size, 0);
|
||||
*offs = pa - rp->protmem->paddr;
|
||||
|
||||
return 0;
|
||||
err_free:
|
||||
gen_pool_free(rp->gen_pool, pa, size);
|
||||
err_put:
|
||||
put_dyn_protmem(rp);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void protmem_pool_op_dyn_free(struct tee_protmem_pool *pool,
|
||||
struct sg_table *sgt)
|
||||
{
|
||||
struct optee_protmem_dyn_pool *rp = to_protmem_dyn_pool(pool);
|
||||
struct scatterlist *sg;
|
||||
int i;
|
||||
|
||||
for_each_sgtable_sg(sgt, sg, i)
|
||||
gen_pool_free(rp->gen_pool, sg_phys(sg), sg->length);
|
||||
sg_free_table(sgt);
|
||||
put_dyn_protmem(rp);
|
||||
}
|
||||
|
||||
static int protmem_pool_op_dyn_update_shm(struct tee_protmem_pool *pool,
|
||||
struct sg_table *sgt, size_t offs,
|
||||
struct tee_shm *shm,
|
||||
struct tee_shm **parent_shm)
|
||||
{
|
||||
struct optee_protmem_dyn_pool *rp = to_protmem_dyn_pool(pool);
|
||||
|
||||
*parent_shm = rp->protmem;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void pool_op_dyn_destroy_pool(struct tee_protmem_pool *pool)
|
||||
{
|
||||
struct optee_protmem_dyn_pool *rp = to_protmem_dyn_pool(pool);
|
||||
|
||||
mutex_destroy(&rp->mutex);
|
||||
kfree(rp);
|
||||
}
|
||||
|
||||
static struct tee_protmem_pool_ops protmem_pool_ops_dyn = {
|
||||
.alloc = protmem_pool_op_dyn_alloc,
|
||||
.free = protmem_pool_op_dyn_free,
|
||||
.update_shm = protmem_pool_op_dyn_update_shm,
|
||||
.destroy_pool = pool_op_dyn_destroy_pool,
|
||||
};
|
||||
|
||||
static int get_protmem_config(struct optee *optee, u32 use_case,
|
||||
size_t *min_size, u_int *pa_width,
|
||||
u32 *mem_attrs, u_int *ma_count)
|
||||
{
|
||||
struct tee_param params[2] = {
|
||||
[0] = {
|
||||
.attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT,
|
||||
.u.value.a = use_case,
|
||||
},
|
||||
[1] = {
|
||||
.attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT,
|
||||
},
|
||||
};
|
||||
struct optee_shm_arg_entry *entry;
|
||||
struct tee_shm *shm_param = NULL;
|
||||
struct optee_msg_arg *msg_arg;
|
||||
struct tee_shm *shm;
|
||||
u_int offs;
|
||||
int rc;
|
||||
|
||||
if (mem_attrs && *ma_count) {
|
||||
params[1].u.memref.size = *ma_count * sizeof(*mem_attrs);
|
||||
shm_param = tee_shm_alloc_priv_buf(optee->ctx,
|
||||
params[1].u.memref.size);
|
||||
if (IS_ERR(shm_param))
|
||||
return PTR_ERR(shm_param);
|
||||
params[1].u.memref.shm = shm_param;
|
||||
}
|
||||
|
||||
msg_arg = optee_get_msg_arg(optee->ctx, ARRAY_SIZE(params), &entry,
|
||||
&shm, &offs);
|
||||
if (IS_ERR(msg_arg)) {
|
||||
rc = PTR_ERR(msg_arg);
|
||||
goto out_free_shm;
|
||||
}
|
||||
msg_arg->cmd = OPTEE_MSG_CMD_GET_PROTMEM_CONFIG;
|
||||
|
||||
rc = optee->ops->to_msg_param(optee, msg_arg->params,
|
||||
ARRAY_SIZE(params), params);
|
||||
if (rc)
|
||||
goto out_free_msg;
|
||||
|
||||
rc = optee->ops->do_call_with_arg(optee->ctx, shm, offs, false);
|
||||
if (rc)
|
||||
goto out_free_msg;
|
||||
if (msg_arg->ret && msg_arg->ret != TEEC_ERROR_SHORT_BUFFER) {
|
||||
rc = -EINVAL;
|
||||
goto out_free_msg;
|
||||
}
|
||||
|
||||
rc = optee->ops->from_msg_param(optee, params, ARRAY_SIZE(params),
|
||||
msg_arg->params);
|
||||
if (rc)
|
||||
goto out_free_msg;
|
||||
|
||||
if (!msg_arg->ret && mem_attrs &&
|
||||
*ma_count < params[1].u.memref.size / sizeof(*mem_attrs)) {
|
||||
rc = -EINVAL;
|
||||
goto out_free_msg;
|
||||
}
|
||||
|
||||
*min_size = params[0].u.value.a;
|
||||
*pa_width = params[0].u.value.c;
|
||||
*ma_count = params[1].u.memref.size / sizeof(*mem_attrs);
|
||||
|
||||
if (msg_arg->ret == TEEC_ERROR_SHORT_BUFFER) {
|
||||
rc = -ENOSPC;
|
||||
goto out_free_msg;
|
||||
}
|
||||
|
||||
if (mem_attrs)
|
||||
memcpy(mem_attrs, tee_shm_get_va(shm_param, 0),
|
||||
params[1].u.memref.size);
|
||||
|
||||
out_free_msg:
|
||||
optee_free_msg_arg(optee->ctx, entry, offs);
|
||||
out_free_shm:
|
||||
if (shm_param)
|
||||
tee_shm_free(shm_param);
|
||||
return rc;
|
||||
}
|
||||
|
||||
struct tee_protmem_pool *optee_protmem_alloc_dyn_pool(struct optee *optee,
|
||||
enum tee_dma_heap_id id)
|
||||
{
|
||||
struct optee_protmem_dyn_pool *rp;
|
||||
size_t min_size;
|
||||
u_int pa_width;
|
||||
int rc;
|
||||
|
||||
rp = kzalloc(sizeof(*rp), GFP_KERNEL);
|
||||
if (!rp)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
rp->use_case = id;
|
||||
|
||||
rc = get_protmem_config(optee, id, &min_size, &pa_width, NULL,
|
||||
&rp->mem_attr_count);
|
||||
if (rc) {
|
||||
if (rc != -ENOSPC)
|
||||
goto err;
|
||||
rp->mem_attrs = kcalloc(rp->mem_attr_count,
|
||||
sizeof(*rp->mem_attrs), GFP_KERNEL);
|
||||
if (!rp->mem_attrs) {
|
||||
rc = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
rc = get_protmem_config(optee, id, &min_size, &pa_width,
|
||||
rp->mem_attrs, &rp->mem_attr_count);
|
||||
if (rc)
|
||||
goto err_kfree_eps;
|
||||
}
|
||||
|
||||
rc = optee_set_dma_mask(optee, pa_width);
|
||||
if (rc)
|
||||
goto err_kfree_eps;
|
||||
|
||||
rp->pool.ops = &protmem_pool_ops_dyn;
|
||||
rp->optee = optee;
|
||||
rp->page_count = min_size / PAGE_SIZE;
|
||||
mutex_init(&rp->mutex);
|
||||
|
||||
return &rp->pool;
|
||||
|
||||
err_kfree_eps:
|
||||
kfree(rp->mem_attrs);
|
||||
err:
|
||||
kfree(rp);
|
||||
return ERR_PTR(rc);
|
||||
}
|
||||
@@ -965,6 +965,70 @@ static int optee_smc_do_call_with_arg(struct tee_context *ctx,
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int optee_smc_lend_protmem(struct optee *optee, struct tee_shm *protmem,
|
||||
u32 *mem_attrs, unsigned int ma_count,
|
||||
u32 use_case)
|
||||
{
|
||||
struct optee_shm_arg_entry *entry;
|
||||
struct optee_msg_arg *msg_arg;
|
||||
struct tee_shm *shm;
|
||||
u_int offs;
|
||||
int rc;
|
||||
|
||||
msg_arg = optee_get_msg_arg(optee->ctx, 2, &entry, &shm, &offs);
|
||||
if (IS_ERR(msg_arg))
|
||||
return PTR_ERR(msg_arg);
|
||||
|
||||
msg_arg->cmd = OPTEE_MSG_CMD_LEND_PROTMEM;
|
||||
msg_arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT;
|
||||
msg_arg->params[0].u.value.a = use_case;
|
||||
msg_arg->params[1].attr = OPTEE_MSG_ATTR_TYPE_TMEM_INPUT;
|
||||
msg_arg->params[1].u.tmem.buf_ptr = protmem->paddr;
|
||||
msg_arg->params[1].u.tmem.size = protmem->size;
|
||||
msg_arg->params[1].u.tmem.shm_ref = (u_long)protmem;
|
||||
|
||||
rc = optee->ops->do_call_with_arg(optee->ctx, shm, offs, false);
|
||||
if (rc)
|
||||
goto out;
|
||||
if (msg_arg->ret != TEEC_SUCCESS) {
|
||||
rc = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
protmem->sec_world_id = (u_long)protmem;
|
||||
|
||||
out:
|
||||
optee_free_msg_arg(optee->ctx, entry, offs);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int optee_smc_reclaim_protmem(struct optee *optee,
|
||||
struct tee_shm *protmem)
|
||||
{
|
||||
struct optee_shm_arg_entry *entry;
|
||||
struct optee_msg_arg *msg_arg;
|
||||
struct tee_shm *shm;
|
||||
u_int offs;
|
||||
int rc;
|
||||
|
||||
msg_arg = optee_get_msg_arg(optee->ctx, 1, &entry, &shm, &offs);
|
||||
if (IS_ERR(msg_arg))
|
||||
return PTR_ERR(msg_arg);
|
||||
|
||||
msg_arg->cmd = OPTEE_MSG_CMD_RECLAIM_PROTMEM;
|
||||
msg_arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_RMEM_INPUT;
|
||||
msg_arg->params[0].u.rmem.shm_ref = (u_long)protmem;
|
||||
|
||||
rc = optee->ops->do_call_with_arg(optee->ctx, shm, offs, false);
|
||||
if (rc)
|
||||
goto out;
|
||||
if (msg_arg->ret != TEEC_SUCCESS)
|
||||
rc = -EINVAL;
|
||||
|
||||
out:
|
||||
optee_free_msg_arg(optee->ctx, entry, offs);
|
||||
return rc;
|
||||
}
|
||||
|
||||
/*
|
||||
* 5. Asynchronous notification
|
||||
*/
|
||||
@@ -1216,6 +1280,8 @@ static const struct optee_ops optee_ops = {
|
||||
.do_call_with_arg = optee_smc_do_call_with_arg,
|
||||
.to_msg_param = optee_to_msg_param,
|
||||
.from_msg_param = optee_from_msg_param,
|
||||
.lend_protmem = optee_smc_lend_protmem,
|
||||
.reclaim_protmem = optee_smc_reclaim_protmem,
|
||||
};
|
||||
|
||||
static int enable_async_notif(optee_invoke_fn *invoke_fn)
|
||||
@@ -1583,6 +1649,74 @@ static inline int optee_load_fw(struct platform_device *pdev,
|
||||
}
|
||||
#endif
|
||||
|
||||
static struct tee_protmem_pool *static_protmem_pool_init(struct optee *optee)
|
||||
{
|
||||
#if IS_ENABLED(CONFIG_OPTEE_STATIC_PROTMEM_POOL)
|
||||
union {
|
||||
struct arm_smccc_res smccc;
|
||||
struct optee_smc_get_protmem_config_result result;
|
||||
} res;
|
||||
struct tee_protmem_pool *pool;
|
||||
void *p;
|
||||
int rc;
|
||||
|
||||
optee->smc.invoke_fn(OPTEE_SMC_GET_PROTMEM_CONFIG, 0, 0, 0, 0,
|
||||
0, 0, 0, &res.smccc);
|
||||
if (res.result.status != OPTEE_SMC_RETURN_OK)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
rc = optee_set_dma_mask(optee, res.result.pa_width);
|
||||
if (rc)
|
||||
return ERR_PTR(rc);
|
||||
|
||||
/*
|
||||
* Map the memory as uncached to make sure the kernel can work with
|
||||
* __pfn_to_page() and friends since that's needed when passing the
|
||||
* protected DMA-buf to a device. The memory should otherwise not
|
||||
* be touched by the kernel since it's likely to cause an external
|
||||
* abort due to the protection status.
|
||||
*/
|
||||
p = devm_memremap(&optee->teedev->dev, res.result.start,
|
||||
res.result.size, MEMREMAP_WC);
|
||||
if (IS_ERR(p))
|
||||
return p;
|
||||
|
||||
pool = tee_protmem_static_pool_alloc(res.result.start, res.result.size);
|
||||
if (IS_ERR(pool))
|
||||
devm_memunmap(&optee->teedev->dev, p);
|
||||
|
||||
return pool;
|
||||
#else
|
||||
return ERR_PTR(-EINVAL);
|
||||
#endif
|
||||
}
|
||||
|
||||
static int optee_protmem_pool_init(struct optee *optee)
|
||||
{
|
||||
bool protm = optee->smc.sec_caps & OPTEE_SMC_SEC_CAP_PROTMEM;
|
||||
bool dyn_protm = optee->smc.sec_caps &
|
||||
OPTEE_SMC_SEC_CAP_DYNAMIC_PROTMEM;
|
||||
enum tee_dma_heap_id heap_id = TEE_DMA_HEAP_SECURE_VIDEO_PLAY;
|
||||
struct tee_protmem_pool *pool = ERR_PTR(-EINVAL);
|
||||
int rc = -EINVAL;
|
||||
|
||||
if (!protm && !dyn_protm)
|
||||
return 0;
|
||||
|
||||
if (protm)
|
||||
pool = static_protmem_pool_init(optee);
|
||||
if (dyn_protm && IS_ERR(pool))
|
||||
pool = optee_protmem_alloc_dyn_pool(optee, heap_id);
|
||||
if (IS_ERR(pool))
|
||||
return PTR_ERR(pool);
|
||||
|
||||
rc = tee_device_register_dma_heap(optee->teedev, heap_id, pool);
|
||||
if (rc)
|
||||
pool->ops->destroy_pool(pool);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int optee_probe(struct platform_device *pdev)
|
||||
{
|
||||
optee_invoke_fn *invoke_fn;
|
||||
@@ -1678,7 +1812,7 @@ static int optee_probe(struct platform_device *pdev)
|
||||
optee = kzalloc(sizeof(*optee), GFP_KERNEL);
|
||||
if (!optee) {
|
||||
rc = -ENOMEM;
|
||||
goto err_free_pool;
|
||||
goto err_free_shm_pool;
|
||||
}
|
||||
|
||||
optee->ops = &optee_ops;
|
||||
@@ -1751,6 +1885,9 @@ static int optee_probe(struct platform_device *pdev)
|
||||
pr_info("Asynchronous notifications enabled\n");
|
||||
}
|
||||
|
||||
if (optee_protmem_pool_init(optee))
|
||||
pr_info("Protected memory service not available\n");
|
||||
|
||||
/*
|
||||
* Ensure that there are no pre-existing shm objects before enabling
|
||||
* the shm cache so that there's no chance of receiving an invalid
|
||||
@@ -1802,7 +1939,7 @@ static int optee_probe(struct platform_device *pdev)
|
||||
tee_device_unregister(optee->teedev);
|
||||
err_free_optee:
|
||||
kfree(optee);
|
||||
err_free_pool:
|
||||
err_free_shm_pool:
|
||||
tee_shm_pool_free(pool);
|
||||
if (memremaped_shm)
|
||||
memunmap(memremaped_shm);
|
||||
|
||||
@@ -317,6 +317,113 @@ tee_ioctl_shm_register(struct tee_context *ctx,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
tee_ioctl_shm_register_fd(struct tee_context *ctx,
|
||||
struct tee_ioctl_shm_register_fd_data __user *udata)
|
||||
{
|
||||
struct tee_ioctl_shm_register_fd_data data;
|
||||
struct tee_shm *shm;
|
||||
long ret;
|
||||
|
||||
if (copy_from_user(&data, udata, sizeof(data)))
|
||||
return -EFAULT;
|
||||
|
||||
/* Currently no input flags are supported */
|
||||
if (data.flags)
|
||||
return -EINVAL;
|
||||
|
||||
shm = tee_shm_register_fd(ctx, data.fd);
|
||||
if (IS_ERR(shm))
|
||||
return -EINVAL;
|
||||
|
||||
data.id = shm->id;
|
||||
data.flags = shm->flags;
|
||||
data.size = shm->size;
|
||||
|
||||
if (copy_to_user(udata, &data, sizeof(data)))
|
||||
ret = -EFAULT;
|
||||
else
|
||||
ret = tee_shm_get_fd(shm);
|
||||
|
||||
/*
|
||||
* When user space closes the file descriptor the shared memory
|
||||
* should be freed or if tee_shm_get_fd() failed then it will
|
||||
* be freed immediately.
|
||||
*/
|
||||
tee_shm_put(shm);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int param_from_user_memref(struct tee_context *ctx,
|
||||
struct tee_param_memref *memref,
|
||||
struct tee_ioctl_param *ip)
|
||||
{
|
||||
struct tee_shm *shm;
|
||||
size_t offs = 0;
|
||||
|
||||
/*
|
||||
* If a NULL pointer is passed to a TA in the TEE,
|
||||
* the ip.c IOCTL parameters is set to TEE_MEMREF_NULL
|
||||
* indicating a NULL memory reference.
|
||||
*/
|
||||
if (ip->c != TEE_MEMREF_NULL) {
|
||||
/*
|
||||
* If we fail to get a pointer to a shared
|
||||
* memory object (and increase the ref count)
|
||||
* from an identifier we return an error. All
|
||||
* pointers that has been added in params have
|
||||
* an increased ref count. It's the callers
|
||||
* responibility to do tee_shm_put() on all
|
||||
* resolved pointers.
|
||||
*/
|
||||
shm = tee_shm_get_from_id(ctx, ip->c);
|
||||
if (IS_ERR(shm))
|
||||
return PTR_ERR(shm);
|
||||
|
||||
/*
|
||||
* Ensure offset + size does not overflow
|
||||
* offset and does not overflow the size of
|
||||
* the referred shared memory object.
|
||||
*/
|
||||
if ((ip->a + ip->b) < ip->a ||
|
||||
(ip->a + ip->b) > shm->size) {
|
||||
tee_shm_put(shm);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (shm->flags & TEE_SHM_DMA_BUF) {
|
||||
struct tee_shm_dmabuf_ref *ref;
|
||||
|
||||
ref = container_of(shm, struct tee_shm_dmabuf_ref, shm);
|
||||
if (ref->parent_shm) {
|
||||
/*
|
||||
* The shm already has one reference to
|
||||
* ref->parent_shm so we are clear of 0.
|
||||
* We're getting another reference since
|
||||
* this shm will be used in the parameter
|
||||
* list instead of the shm we got with
|
||||
* tee_shm_get_from_id() above.
|
||||
*/
|
||||
refcount_inc(&ref->parent_shm->refcount);
|
||||
tee_shm_put(shm);
|
||||
shm = ref->parent_shm;
|
||||
offs = ref->offset;
|
||||
}
|
||||
}
|
||||
} else if (ctx->cap_memref_null) {
|
||||
/* Pass NULL pointer to OP-TEE */
|
||||
shm = NULL;
|
||||
} else {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
memref->shm_offs = ip->a + offs;
|
||||
memref->size = ip->b;
|
||||
memref->shm = shm;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int params_from_user(struct tee_context *ctx, struct tee_param *params,
|
||||
size_t num_params,
|
||||
struct tee_ioctl_param __user *uparams)
|
||||
@@ -324,8 +431,8 @@ static int params_from_user(struct tee_context *ctx, struct tee_param *params,
|
||||
size_t n;
|
||||
|
||||
for (n = 0; n < num_params; n++) {
|
||||
struct tee_shm *shm;
|
||||
struct tee_ioctl_param ip;
|
||||
int rc;
|
||||
|
||||
if (copy_from_user(&ip, uparams + n, sizeof(ip)))
|
||||
return -EFAULT;
|
||||
@@ -348,45 +455,10 @@ static int params_from_user(struct tee_context *ctx, struct tee_param *params,
|
||||
case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT:
|
||||
case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
|
||||
case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
|
||||
/*
|
||||
* If a NULL pointer is passed to a TA in the TEE,
|
||||
* the ip.c IOCTL parameters is set to TEE_MEMREF_NULL
|
||||
* indicating a NULL memory reference.
|
||||
*/
|
||||
if (ip.c != TEE_MEMREF_NULL) {
|
||||
/*
|
||||
* If we fail to get a pointer to a shared
|
||||
* memory object (and increase the ref count)
|
||||
* from an identifier we return an error. All
|
||||
* pointers that has been added in params have
|
||||
* an increased ref count. It's the callers
|
||||
* responibility to do tee_shm_put() on all
|
||||
* resolved pointers.
|
||||
*/
|
||||
shm = tee_shm_get_from_id(ctx, ip.c);
|
||||
if (IS_ERR(shm))
|
||||
return PTR_ERR(shm);
|
||||
|
||||
/*
|
||||
* Ensure offset + size does not overflow
|
||||
* offset and does not overflow the size of
|
||||
* the referred shared memory object.
|
||||
*/
|
||||
if ((ip.a + ip.b) < ip.a ||
|
||||
(ip.a + ip.b) > shm->size) {
|
||||
tee_shm_put(shm);
|
||||
return -EINVAL;
|
||||
}
|
||||
} else if (ctx->cap_memref_null) {
|
||||
/* Pass NULL pointer to OP-TEE */
|
||||
shm = NULL;
|
||||
} else {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
params[n].u.memref.shm_offs = ip.a;
|
||||
params[n].u.memref.size = ip.b;
|
||||
params[n].u.memref.shm = shm;
|
||||
rc = param_from_user_memref(ctx, ¶ms[n].u.memref,
|
||||
&ip);
|
||||
if (rc)
|
||||
return rc;
|
||||
break;
|
||||
default:
|
||||
/* Unknown attribute */
|
||||
@@ -791,6 +863,8 @@ static long tee_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
|
||||
return tee_ioctl_shm_alloc(ctx, uarg);
|
||||
case TEE_IOC_SHM_REGISTER:
|
||||
return tee_ioctl_shm_register(ctx, uarg);
|
||||
case TEE_IOC_SHM_REGISTER_FD:
|
||||
return tee_ioctl_shm_register_fd(ctx, uarg);
|
||||
case TEE_IOC_OPEN_SESSION:
|
||||
return tee_ioctl_open_session(ctx, uarg);
|
||||
case TEE_IOC_INVOKE:
|
||||
@@ -1027,6 +1101,8 @@ void tee_device_unregister(struct tee_device *teedev)
|
||||
if (!teedev)
|
||||
return;
|
||||
|
||||
tee_device_put_all_dma_heaps(teedev);
|
||||
|
||||
if (teedev->flags & TEE_DEVICE_FLAG_REGISTERED)
|
||||
cdev_device_del(&teedev->cdev, &teedev->dev);
|
||||
|
||||
@@ -1250,3 +1326,5 @@ MODULE_AUTHOR("Linaro");
|
||||
MODULE_DESCRIPTION("TEE Driver");
|
||||
MODULE_VERSION("1.0");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_IMPORT_NS("DMA_BUF");
|
||||
MODULE_IMPORT_NS("DMA_BUF_HEAP");
|
||||
|
||||
500
drivers/tee/tee_heap.c
Normal file
500
drivers/tee/tee_heap.c
Normal file
@@ -0,0 +1,500 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2025, Linaro Limited
|
||||
*/
|
||||
|
||||
#include <linux/dma-buf.h>
|
||||
#include <linux/dma-heap.h>
|
||||
#include <linux/genalloc.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/tee_core.h>
|
||||
#include <linux/xarray.h>
|
||||
|
||||
#include "tee_private.h"
|
||||
|
||||
struct tee_dma_heap {
|
||||
struct dma_heap *heap;
|
||||
enum tee_dma_heap_id id;
|
||||
struct kref kref;
|
||||
struct tee_protmem_pool *pool;
|
||||
struct tee_device *teedev;
|
||||
bool shutting_down;
|
||||
/* Protects pool, teedev, and shutting_down above */
|
||||
struct mutex mu;
|
||||
};
|
||||
|
||||
struct tee_heap_buffer {
|
||||
struct tee_dma_heap *heap;
|
||||
size_t size;
|
||||
size_t offs;
|
||||
struct sg_table table;
|
||||
};
|
||||
|
||||
struct tee_heap_attachment {
|
||||
struct sg_table table;
|
||||
struct device *dev;
|
||||
};
|
||||
|
||||
struct tee_protmem_static_pool {
|
||||
struct tee_protmem_pool pool;
|
||||
struct gen_pool *gen_pool;
|
||||
phys_addr_t pa_base;
|
||||
};
|
||||
|
||||
#if IS_ENABLED(CONFIG_TEE_DMABUF_HEAPS)
|
||||
static DEFINE_XARRAY_ALLOC(tee_dma_heap);
|
||||
|
||||
static void tee_heap_release(struct kref *kref)
|
||||
{
|
||||
struct tee_dma_heap *h = container_of(kref, struct tee_dma_heap, kref);
|
||||
|
||||
h->pool->ops->destroy_pool(h->pool);
|
||||
tee_device_put(h->teedev);
|
||||
h->pool = NULL;
|
||||
h->teedev = NULL;
|
||||
}
|
||||
|
||||
static void put_tee_heap(struct tee_dma_heap *h)
|
||||
{
|
||||
kref_put(&h->kref, tee_heap_release);
|
||||
}
|
||||
|
||||
static void get_tee_heap(struct tee_dma_heap *h)
|
||||
{
|
||||
kref_get(&h->kref);
|
||||
}
|
||||
|
||||
static int copy_sg_table(struct sg_table *dst, struct sg_table *src)
|
||||
{
|
||||
struct scatterlist *dst_sg;
|
||||
struct scatterlist *src_sg;
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
ret = sg_alloc_table(dst, src->orig_nents, GFP_KERNEL);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
dst_sg = dst->sgl;
|
||||
for_each_sgtable_sg(src, src_sg, i) {
|
||||
sg_set_page(dst_sg, sg_page(src_sg), src_sg->length,
|
||||
src_sg->offset);
|
||||
dst_sg = sg_next(dst_sg);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int tee_heap_attach(struct dma_buf *dmabuf,
|
||||
struct dma_buf_attachment *attachment)
|
||||
{
|
||||
struct tee_heap_buffer *buf = dmabuf->priv;
|
||||
struct tee_heap_attachment *a;
|
||||
int ret;
|
||||
|
||||
a = kzalloc(sizeof(*a), GFP_KERNEL);
|
||||
if (!a)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = copy_sg_table(&a->table, &buf->table);
|
||||
if (ret) {
|
||||
kfree(a);
|
||||
return ret;
|
||||
}
|
||||
|
||||
a->dev = attachment->dev;
|
||||
attachment->priv = a;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void tee_heap_detach(struct dma_buf *dmabuf,
|
||||
struct dma_buf_attachment *attachment)
|
||||
{
|
||||
struct tee_heap_attachment *a = attachment->priv;
|
||||
|
||||
sg_free_table(&a->table);
|
||||
kfree(a);
|
||||
}
|
||||
|
||||
static struct sg_table *
|
||||
tee_heap_map_dma_buf(struct dma_buf_attachment *attachment,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
struct tee_heap_attachment *a = attachment->priv;
|
||||
int ret;
|
||||
|
||||
ret = dma_map_sgtable(attachment->dev, &a->table, direction,
|
||||
DMA_ATTR_SKIP_CPU_SYNC);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
return &a->table;
|
||||
}
|
||||
|
||||
static void tee_heap_unmap_dma_buf(struct dma_buf_attachment *attachment,
|
||||
struct sg_table *table,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
struct tee_heap_attachment *a = attachment->priv;
|
||||
|
||||
WARN_ON(&a->table != table);
|
||||
|
||||
dma_unmap_sgtable(attachment->dev, table, direction,
|
||||
DMA_ATTR_SKIP_CPU_SYNC);
|
||||
}
|
||||
|
||||
static void tee_heap_buf_free(struct dma_buf *dmabuf)
|
||||
{
|
||||
struct tee_heap_buffer *buf = dmabuf->priv;
|
||||
|
||||
buf->heap->pool->ops->free(buf->heap->pool, &buf->table);
|
||||
mutex_lock(&buf->heap->mu);
|
||||
put_tee_heap(buf->heap);
|
||||
mutex_unlock(&buf->heap->mu);
|
||||
kfree(buf);
|
||||
}
|
||||
|
||||
static const struct dma_buf_ops tee_heap_buf_ops = {
|
||||
.attach = tee_heap_attach,
|
||||
.detach = tee_heap_detach,
|
||||
.map_dma_buf = tee_heap_map_dma_buf,
|
||||
.unmap_dma_buf = tee_heap_unmap_dma_buf,
|
||||
.release = tee_heap_buf_free,
|
||||
};
|
||||
|
||||
static struct dma_buf *tee_dma_heap_alloc(struct dma_heap *heap,
|
||||
unsigned long len, u32 fd_flags,
|
||||
u64 heap_flags)
|
||||
{
|
||||
struct tee_dma_heap *h = dma_heap_get_drvdata(heap);
|
||||
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
|
||||
struct tee_device *teedev = NULL;
|
||||
struct tee_heap_buffer *buf;
|
||||
struct tee_protmem_pool *pool;
|
||||
struct dma_buf *dmabuf;
|
||||
int rc;
|
||||
|
||||
mutex_lock(&h->mu);
|
||||
if (h->teedev) {
|
||||
teedev = h->teedev;
|
||||
pool = h->pool;
|
||||
get_tee_heap(h);
|
||||
}
|
||||
mutex_unlock(&h->mu);
|
||||
|
||||
if (!teedev)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
buf = kzalloc(sizeof(*buf), GFP_KERNEL);
|
||||
if (!buf) {
|
||||
dmabuf = ERR_PTR(-ENOMEM);
|
||||
goto err;
|
||||
}
|
||||
buf->size = len;
|
||||
buf->heap = h;
|
||||
|
||||
rc = pool->ops->alloc(pool, &buf->table, len, &buf->offs);
|
||||
if (rc) {
|
||||
dmabuf = ERR_PTR(rc);
|
||||
goto err_kfree;
|
||||
}
|
||||
|
||||
exp_info.ops = &tee_heap_buf_ops;
|
||||
exp_info.size = len;
|
||||
exp_info.priv = buf;
|
||||
exp_info.flags = fd_flags;
|
||||
dmabuf = dma_buf_export(&exp_info);
|
||||
if (IS_ERR(dmabuf))
|
||||
goto err_protmem_free;
|
||||
|
||||
return dmabuf;
|
||||
|
||||
err_protmem_free:
|
||||
pool->ops->free(pool, &buf->table);
|
||||
err_kfree:
|
||||
kfree(buf);
|
||||
err:
|
||||
mutex_lock(&h->mu);
|
||||
put_tee_heap(h);
|
||||
mutex_unlock(&h->mu);
|
||||
return dmabuf;
|
||||
}
|
||||
|
||||
static const struct dma_heap_ops tee_dma_heap_ops = {
|
||||
.allocate = tee_dma_heap_alloc,
|
||||
};
|
||||
|
||||
static const char *heap_id_2_name(enum tee_dma_heap_id id)
|
||||
{
|
||||
switch (id) {
|
||||
case TEE_DMA_HEAP_SECURE_VIDEO_PLAY:
|
||||
return "protected,secure-video";
|
||||
case TEE_DMA_HEAP_TRUSTED_UI:
|
||||
return "protected,trusted-ui";
|
||||
case TEE_DMA_HEAP_SECURE_VIDEO_RECORD:
|
||||
return "protected,secure-video-record";
|
||||
default:
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static int alloc_dma_heap(struct tee_device *teedev, enum tee_dma_heap_id id,
|
||||
struct tee_protmem_pool *pool)
|
||||
{
|
||||
struct dma_heap_export_info exp_info = {
|
||||
.ops = &tee_dma_heap_ops,
|
||||
.name = heap_id_2_name(id),
|
||||
};
|
||||
struct tee_dma_heap *h;
|
||||
int rc;
|
||||
|
||||
if (!exp_info.name)
|
||||
return -EINVAL;
|
||||
|
||||
if (xa_reserve(&tee_dma_heap, id, GFP_KERNEL)) {
|
||||
if (!xa_load(&tee_dma_heap, id))
|
||||
return -EEXIST;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
h = kzalloc(sizeof(*h), GFP_KERNEL);
|
||||
if (!h)
|
||||
return -ENOMEM;
|
||||
h->id = id;
|
||||
kref_init(&h->kref);
|
||||
h->teedev = teedev;
|
||||
h->pool = pool;
|
||||
mutex_init(&h->mu);
|
||||
|
||||
exp_info.priv = h;
|
||||
h->heap = dma_heap_add(&exp_info);
|
||||
if (IS_ERR(h->heap)) {
|
||||
rc = PTR_ERR(h->heap);
|
||||
kfree(h);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* "can't fail" due to the call to xa_reserve() above */
|
||||
return WARN_ON(xa_is_err(xa_store(&tee_dma_heap, id, h, GFP_KERNEL)));
|
||||
}
|
||||
|
||||
int tee_device_register_dma_heap(struct tee_device *teedev,
|
||||
enum tee_dma_heap_id id,
|
||||
struct tee_protmem_pool *pool)
|
||||
{
|
||||
struct tee_dma_heap *h;
|
||||
int rc;
|
||||
|
||||
if (!tee_device_get(teedev))
|
||||
return -EINVAL;
|
||||
|
||||
h = xa_load(&tee_dma_heap, id);
|
||||
if (h) {
|
||||
mutex_lock(&h->mu);
|
||||
if (h->teedev) {
|
||||
rc = -EBUSY;
|
||||
} else {
|
||||
kref_init(&h->kref);
|
||||
h->shutting_down = false;
|
||||
h->teedev = teedev;
|
||||
h->pool = pool;
|
||||
rc = 0;
|
||||
}
|
||||
mutex_unlock(&h->mu);
|
||||
} else {
|
||||
rc = alloc_dma_heap(teedev, id, pool);
|
||||
}
|
||||
|
||||
if (rc) {
|
||||
tee_device_put(teedev);
|
||||
dev_err(&teedev->dev, "can't register DMA heap id %d (%s)\n",
|
||||
id, heap_id_2_name(id));
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(tee_device_register_dma_heap);
|
||||
|
||||
void tee_device_put_all_dma_heaps(struct tee_device *teedev)
|
||||
{
|
||||
struct tee_dma_heap *h;
|
||||
u_long i;
|
||||
|
||||
xa_for_each(&tee_dma_heap, i, h) {
|
||||
if (h) {
|
||||
mutex_lock(&h->mu);
|
||||
if (h->teedev == teedev && !h->shutting_down) {
|
||||
h->shutting_down = true;
|
||||
put_tee_heap(h);
|
||||
}
|
||||
mutex_unlock(&h->mu);
|
||||
}
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(tee_device_put_all_dma_heaps);
|
||||
|
||||
int tee_heap_update_from_dma_buf(struct tee_device *teedev,
|
||||
struct dma_buf *dmabuf, size_t *offset,
|
||||
struct tee_shm *shm,
|
||||
struct tee_shm **parent_shm)
|
||||
{
|
||||
struct tee_heap_buffer *buf;
|
||||
int rc;
|
||||
|
||||
/* The DMA-buf must be from our heap */
|
||||
if (dmabuf->ops != &tee_heap_buf_ops)
|
||||
return -EINVAL;
|
||||
|
||||
buf = dmabuf->priv;
|
||||
/* The buffer must be from the same teedev */
|
||||
if (buf->heap->teedev != teedev)
|
||||
return -EINVAL;
|
||||
|
||||
shm->size = buf->size;
|
||||
|
||||
rc = buf->heap->pool->ops->update_shm(buf->heap->pool, &buf->table,
|
||||
buf->offs, shm, parent_shm);
|
||||
if (!rc && *parent_shm)
|
||||
*offset = buf->offs;
|
||||
|
||||
return rc;
|
||||
}
|
||||
#else
|
||||
int tee_device_register_dma_heap(struct tee_device *teedev __always_unused,
|
||||
enum tee_dma_heap_id id __always_unused,
|
||||
struct tee_protmem_pool *pool __always_unused)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(tee_device_register_dma_heap);
|
||||
|
||||
void
|
||||
tee_device_put_all_dma_heaps(struct tee_device *teedev __always_unused)
|
||||
{
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(tee_device_put_all_dma_heaps);
|
||||
|
||||
int tee_heap_update_from_dma_buf(struct tee_device *teedev __always_unused,
|
||||
struct dma_buf *dmabuf __always_unused,
|
||||
size_t *offset __always_unused,
|
||||
struct tee_shm *shm __always_unused,
|
||||
struct tee_shm **parent_shm __always_unused)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
#endif
|
||||
|
||||
static struct tee_protmem_static_pool *
|
||||
to_protmem_static_pool(struct tee_protmem_pool *pool)
|
||||
{
|
||||
return container_of(pool, struct tee_protmem_static_pool, pool);
|
||||
}
|
||||
|
||||
static int protmem_pool_op_static_alloc(struct tee_protmem_pool *pool,
|
||||
struct sg_table *sgt, size_t size,
|
||||
size_t *offs)
|
||||
{
|
||||
struct tee_protmem_static_pool *stp = to_protmem_static_pool(pool);
|
||||
phys_addr_t pa;
|
||||
int ret;
|
||||
|
||||
pa = gen_pool_alloc(stp->gen_pool, size);
|
||||
if (!pa)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
|
||||
if (ret) {
|
||||
gen_pool_free(stp->gen_pool, pa, size);
|
||||
return ret;
|
||||
}
|
||||
|
||||
sg_set_page(sgt->sgl, phys_to_page(pa), size, 0);
|
||||
*offs = pa - stp->pa_base;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void protmem_pool_op_static_free(struct tee_protmem_pool *pool,
|
||||
struct sg_table *sgt)
|
||||
{
|
||||
struct tee_protmem_static_pool *stp = to_protmem_static_pool(pool);
|
||||
struct scatterlist *sg;
|
||||
int i;
|
||||
|
||||
for_each_sgtable_sg(sgt, sg, i)
|
||||
gen_pool_free(stp->gen_pool, sg_phys(sg), sg->length);
|
||||
sg_free_table(sgt);
|
||||
}
|
||||
|
||||
static int protmem_pool_op_static_update_shm(struct tee_protmem_pool *pool,
|
||||
struct sg_table *sgt, size_t offs,
|
||||
struct tee_shm *shm,
|
||||
struct tee_shm **parent_shm)
|
||||
{
|
||||
struct tee_protmem_static_pool *stp = to_protmem_static_pool(pool);
|
||||
|
||||
shm->paddr = stp->pa_base + offs;
|
||||
*parent_shm = NULL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void protmem_pool_op_static_destroy_pool(struct tee_protmem_pool *pool)
|
||||
{
|
||||
struct tee_protmem_static_pool *stp = to_protmem_static_pool(pool);
|
||||
|
||||
gen_pool_destroy(stp->gen_pool);
|
||||
kfree(stp);
|
||||
}
|
||||
|
||||
static struct tee_protmem_pool_ops protmem_pool_ops_static = {
|
||||
.alloc = protmem_pool_op_static_alloc,
|
||||
.free = protmem_pool_op_static_free,
|
||||
.update_shm = protmem_pool_op_static_update_shm,
|
||||
.destroy_pool = protmem_pool_op_static_destroy_pool,
|
||||
};
|
||||
|
||||
struct tee_protmem_pool *tee_protmem_static_pool_alloc(phys_addr_t paddr,
|
||||
size_t size)
|
||||
{
|
||||
const size_t page_mask = PAGE_SIZE - 1;
|
||||
struct tee_protmem_static_pool *stp;
|
||||
int rc;
|
||||
|
||||
/* Check it's page aligned */
|
||||
if ((paddr | size) & page_mask)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
if (!pfn_valid(PHYS_PFN(paddr)))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
stp = kzalloc(sizeof(*stp), GFP_KERNEL);
|
||||
if (!stp)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
stp->gen_pool = gen_pool_create(PAGE_SHIFT, -1);
|
||||
if (!stp->gen_pool) {
|
||||
rc = -ENOMEM;
|
||||
goto err_free;
|
||||
}
|
||||
|
||||
rc = gen_pool_add(stp->gen_pool, paddr, size, -1);
|
||||
if (rc)
|
||||
goto err_free_pool;
|
||||
|
||||
stp->pool.ops = &protmem_pool_ops_static;
|
||||
stp->pa_base = paddr;
|
||||
return &stp->pool;
|
||||
|
||||
err_free_pool:
|
||||
gen_pool_destroy(stp->gen_pool);
|
||||
err_free:
|
||||
kfree(stp);
|
||||
|
||||
return ERR_PTR(rc);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(tee_protmem_static_pool_alloc);
|
||||
@@ -8,10 +8,19 @@
|
||||
#include <linux/cdev.h>
|
||||
#include <linux/completion.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/dma-buf.h>
|
||||
#include <linux/kref.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
/* extra references appended to shm object for registered shared memory */
|
||||
struct tee_shm_dmabuf_ref {
|
||||
struct tee_shm shm;
|
||||
size_t offset;
|
||||
struct dma_buf *dmabuf;
|
||||
struct tee_shm *parent_shm;
|
||||
};
|
||||
|
||||
int tee_shm_get_fd(struct tee_shm *shm);
|
||||
|
||||
bool tee_device_get(struct tee_device *teedev);
|
||||
@@ -24,4 +33,9 @@ struct tee_shm *tee_shm_alloc_user_buf(struct tee_context *ctx, size_t size);
|
||||
struct tee_shm *tee_shm_register_user_buf(struct tee_context *ctx,
|
||||
unsigned long addr, size_t length);
|
||||
|
||||
int tee_heap_update_from_dma_buf(struct tee_device *teedev,
|
||||
struct dma_buf *dmabuf, size_t *offset,
|
||||
struct tee_shm *shm,
|
||||
struct tee_shm **parent_shm);
|
||||
|
||||
#endif /*TEE_PRIVATE_H*/
|
||||
|
||||
@@ -4,6 +4,9 @@
|
||||
*/
|
||||
#include <linux/anon_inodes.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/dma-buf.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/idr.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/mm.h>
|
||||
@@ -12,9 +15,14 @@
|
||||
#include <linux/tee_core.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/uio.h>
|
||||
#include <linux/highmem.h>
|
||||
#include "tee_private.h"
|
||||
|
||||
struct tee_shm_dma_mem {
|
||||
struct tee_shm shm;
|
||||
dma_addr_t dma_addr;
|
||||
struct page *page;
|
||||
};
|
||||
|
||||
static void shm_put_kernel_pages(struct page **pages, size_t page_count)
|
||||
{
|
||||
size_t n;
|
||||
@@ -45,7 +53,24 @@ static void release_registered_pages(struct tee_shm *shm)
|
||||
|
||||
static void tee_shm_release(struct tee_device *teedev, struct tee_shm *shm)
|
||||
{
|
||||
if (shm->flags & TEE_SHM_POOL) {
|
||||
void *p = shm;
|
||||
|
||||
if (shm->flags & TEE_SHM_DMA_MEM) {
|
||||
#if IS_ENABLED(CONFIG_TEE_DMABUF_HEAPS)
|
||||
struct tee_shm_dma_mem *dma_mem;
|
||||
|
||||
dma_mem = container_of(shm, struct tee_shm_dma_mem, shm);
|
||||
p = dma_mem;
|
||||
dma_free_pages(&teedev->dev, shm->size, dma_mem->page,
|
||||
dma_mem->dma_addr, DMA_BIDIRECTIONAL);
|
||||
#endif
|
||||
} else if (shm->flags & TEE_SHM_DMA_BUF) {
|
||||
struct tee_shm_dmabuf_ref *ref;
|
||||
|
||||
ref = container_of(shm, struct tee_shm_dmabuf_ref, shm);
|
||||
p = ref;
|
||||
dma_buf_put(ref->dmabuf);
|
||||
} else if (shm->flags & TEE_SHM_POOL) {
|
||||
teedev->pool->ops->free(teedev->pool, shm);
|
||||
} else if (shm->flags & TEE_SHM_DYNAMIC) {
|
||||
int rc = teedev->desc->ops->shm_unregister(shm->ctx, shm);
|
||||
@@ -59,7 +84,7 @@ static void tee_shm_release(struct tee_device *teedev, struct tee_shm *shm)
|
||||
|
||||
teedev_ctx_put(shm->ctx);
|
||||
|
||||
kfree(shm);
|
||||
kfree(p);
|
||||
|
||||
tee_device_put(teedev);
|
||||
}
|
||||
@@ -169,7 +194,7 @@ struct tee_shm *tee_shm_alloc_user_buf(struct tee_context *ctx, size_t size)
|
||||
* tee_client_invoke_func(). The memory allocated is later freed with a
|
||||
* call to tee_shm_free().
|
||||
*
|
||||
* @returns a pointer to 'struct tee_shm'
|
||||
* @returns a pointer to 'struct tee_shm' on success, and ERR_PTR on failure
|
||||
*/
|
||||
struct tee_shm *tee_shm_alloc_kernel_buf(struct tee_context *ctx, size_t size)
|
||||
{
|
||||
@@ -179,6 +204,62 @@ struct tee_shm *tee_shm_alloc_kernel_buf(struct tee_context *ctx, size_t size)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(tee_shm_alloc_kernel_buf);
|
||||
|
||||
struct tee_shm *tee_shm_register_fd(struct tee_context *ctx, int fd)
|
||||
{
|
||||
struct tee_shm_dmabuf_ref *ref;
|
||||
int rc;
|
||||
|
||||
if (!tee_device_get(ctx->teedev))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
teedev_ctx_get(ctx);
|
||||
|
||||
ref = kzalloc(sizeof(*ref), GFP_KERNEL);
|
||||
if (!ref) {
|
||||
rc = -ENOMEM;
|
||||
goto err_put_tee;
|
||||
}
|
||||
|
||||
refcount_set(&ref->shm.refcount, 1);
|
||||
ref->shm.ctx = ctx;
|
||||
ref->shm.id = -1;
|
||||
ref->shm.flags = TEE_SHM_DMA_BUF;
|
||||
|
||||
ref->dmabuf = dma_buf_get(fd);
|
||||
if (IS_ERR(ref->dmabuf)) {
|
||||
rc = PTR_ERR(ref->dmabuf);
|
||||
goto err_kfree_ref;
|
||||
}
|
||||
|
||||
rc = tee_heap_update_from_dma_buf(ctx->teedev, ref->dmabuf,
|
||||
&ref->offset, &ref->shm,
|
||||
&ref->parent_shm);
|
||||
if (rc)
|
||||
goto err_put_dmabuf;
|
||||
|
||||
mutex_lock(&ref->shm.ctx->teedev->mutex);
|
||||
ref->shm.id = idr_alloc(&ref->shm.ctx->teedev->idr, &ref->shm,
|
||||
1, 0, GFP_KERNEL);
|
||||
mutex_unlock(&ref->shm.ctx->teedev->mutex);
|
||||
if (ref->shm.id < 0) {
|
||||
rc = ref->shm.id;
|
||||
goto err_put_dmabuf;
|
||||
}
|
||||
|
||||
return &ref->shm;
|
||||
|
||||
err_put_dmabuf:
|
||||
dma_buf_put(ref->dmabuf);
|
||||
err_kfree_ref:
|
||||
kfree(ref);
|
||||
err_put_tee:
|
||||
teedev_ctx_put(ctx);
|
||||
tee_device_put(ctx->teedev);
|
||||
|
||||
return ERR_PTR(rc);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(tee_shm_register_fd);
|
||||
|
||||
/**
|
||||
* tee_shm_alloc_priv_buf() - Allocate shared memory for a privately shared
|
||||
* kernel buffer
|
||||
@@ -203,6 +284,71 @@ struct tee_shm *tee_shm_alloc_priv_buf(struct tee_context *ctx, size_t size)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(tee_shm_alloc_priv_buf);
|
||||
|
||||
#if IS_ENABLED(CONFIG_TEE_DMABUF_HEAPS)
|
||||
/**
|
||||
* tee_shm_alloc_dma_mem() - Allocate DMA memory as shared memory object
|
||||
* @ctx: Context that allocates the shared memory
|
||||
* @page_count: Number of pages
|
||||
*
|
||||
* The allocated memory is expected to be lent (made inaccessible to the
|
||||
* kernel) to the TEE while it's used and returned (accessible to the
|
||||
* kernel again) before it's freed.
|
||||
*
|
||||
* This function should normally only be used internally in the TEE
|
||||
* drivers.
|
||||
*
|
||||
* @returns a pointer to 'struct tee_shm'
|
||||
*/
|
||||
struct tee_shm *tee_shm_alloc_dma_mem(struct tee_context *ctx,
|
||||
size_t page_count)
|
||||
{
|
||||
struct tee_device *teedev = ctx->teedev;
|
||||
struct tee_shm_dma_mem *dma_mem;
|
||||
dma_addr_t dma_addr;
|
||||
struct page *page;
|
||||
|
||||
if (!tee_device_get(teedev))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
page = dma_alloc_pages(&teedev->dev, page_count * PAGE_SIZE,
|
||||
&dma_addr, DMA_BIDIRECTIONAL, GFP_KERNEL);
|
||||
if (!page)
|
||||
goto err_put_teedev;
|
||||
|
||||
dma_mem = kzalloc(sizeof(*dma_mem), GFP_KERNEL);
|
||||
if (!dma_mem)
|
||||
goto err_free_pages;
|
||||
|
||||
refcount_set(&dma_mem->shm.refcount, 1);
|
||||
dma_mem->shm.ctx = ctx;
|
||||
dma_mem->shm.paddr = page_to_phys(page);
|
||||
dma_mem->dma_addr = dma_addr;
|
||||
dma_mem->page = page;
|
||||
dma_mem->shm.size = page_count * PAGE_SIZE;
|
||||
dma_mem->shm.flags = TEE_SHM_DMA_MEM;
|
||||
|
||||
teedev_ctx_get(ctx);
|
||||
|
||||
return &dma_mem->shm;
|
||||
|
||||
err_free_pages:
|
||||
dma_free_pages(&teedev->dev, page_count * PAGE_SIZE, page, dma_addr,
|
||||
DMA_BIDIRECTIONAL);
|
||||
err_put_teedev:
|
||||
tee_device_put(teedev);
|
||||
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(tee_shm_alloc_dma_mem);
|
||||
#else
|
||||
struct tee_shm *tee_shm_alloc_dma_mem(struct tee_context *ctx,
|
||||
size_t page_count)
|
||||
{
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(tee_shm_alloc_dma_mem);
|
||||
#endif
|
||||
|
||||
int tee_dyn_shm_alloc_helper(struct tee_shm *shm, size_t size, size_t align,
|
||||
int (*shm_register)(struct tee_context *ctx,
|
||||
struct tee_shm *shm,
|
||||
@@ -442,6 +588,9 @@ static int tee_shm_fop_mmap(struct file *filp, struct vm_area_struct *vma)
|
||||
/* Refuse sharing shared memory provided by application */
|
||||
if (shm->flags & TEE_SHM_USER_MAPPED)
|
||||
return -EINVAL;
|
||||
/* Refuse sharing registered DMA_bufs with the application */
|
||||
if (shm->flags & TEE_SHM_DMA_BUF)
|
||||
return -EINVAL;
|
||||
|
||||
/* check for overflowing the buffer's size */
|
||||
if (vma->vm_pgoff + vma_pages(vma) > shm->size >> PAGE_SHIFT)
|
||||
|
||||
@@ -8,9 +8,11 @@
|
||||
|
||||
#include <linux/cdev.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/dma-buf.h>
|
||||
#include <linux/idr.h>
|
||||
#include <linux/kref.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/tee.h>
|
||||
#include <linux/tee_drv.h>
|
||||
#include <linux/types.h>
|
||||
@@ -26,10 +28,19 @@
|
||||
#define TEE_SHM_USER_MAPPED BIT(1) /* Memory mapped in user space */
|
||||
#define TEE_SHM_POOL BIT(2) /* Memory allocated from pool */
|
||||
#define TEE_SHM_PRIV BIT(3) /* Memory private to TEE driver */
|
||||
#define TEE_SHM_DMA_BUF BIT(4) /* Memory with dma-buf handle */
|
||||
#define TEE_SHM_DMA_MEM BIT(5) /* Memory allocated with */
|
||||
/* dma_alloc_pages() */
|
||||
|
||||
#define TEE_DEVICE_FLAG_REGISTERED 0x1
|
||||
#define TEE_MAX_DEV_NAME_LEN 32
|
||||
|
||||
enum tee_dma_heap_id {
|
||||
TEE_DMA_HEAP_SECURE_VIDEO_PLAY = 1,
|
||||
TEE_DMA_HEAP_TRUSTED_UI,
|
||||
TEE_DMA_HEAP_SECURE_VIDEO_RECORD,
|
||||
};
|
||||
|
||||
/**
|
||||
* struct tee_device - TEE Device representation
|
||||
* @name: name of device
|
||||
@@ -116,6 +127,36 @@ struct tee_desc {
|
||||
u32 flags;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct tee_protmem_pool - protected memory pool
|
||||
* @ops: operations
|
||||
*
|
||||
* This is an abstract interface where this struct is expected to be
|
||||
* embedded in another struct specific to the implementation.
|
||||
*/
|
||||
struct tee_protmem_pool {
|
||||
const struct tee_protmem_pool_ops *ops;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct tee_protmem_pool_ops - protected memory pool operations
|
||||
* @alloc: called when allocating protected memory
|
||||
* @free: called when freeing protected memory
|
||||
* @update_shm: called when registering a dma-buf to update the @shm
|
||||
* with physical address of the buffer or to return the
|
||||
* @parent_shm of the memory pool
|
||||
* @destroy_pool: called when destroying the pool
|
||||
*/
|
||||
struct tee_protmem_pool_ops {
|
||||
int (*alloc)(struct tee_protmem_pool *pool, struct sg_table *sgt,
|
||||
size_t size, size_t *offs);
|
||||
void (*free)(struct tee_protmem_pool *pool, struct sg_table *sgt);
|
||||
int (*update_shm)(struct tee_protmem_pool *pool, struct sg_table *sgt,
|
||||
size_t offs, struct tee_shm *shm,
|
||||
struct tee_shm **parent_shm);
|
||||
void (*destroy_pool)(struct tee_protmem_pool *pool);
|
||||
};
|
||||
|
||||
/**
|
||||
* tee_device_alloc() - Allocate a new struct tee_device instance
|
||||
* @teedesc: Descriptor for this driver
|
||||
@@ -154,6 +195,11 @@ int tee_device_register(struct tee_device *teedev);
|
||||
*/
|
||||
void tee_device_unregister(struct tee_device *teedev);
|
||||
|
||||
int tee_device_register_dma_heap(struct tee_device *teedev,
|
||||
enum tee_dma_heap_id id,
|
||||
struct tee_protmem_pool *pool);
|
||||
void tee_device_put_all_dma_heaps(struct tee_device *teedev);
|
||||
|
||||
/**
|
||||
* tee_device_set_dev_groups() - Set device attribute groups
|
||||
* @teedev: Device to register
|
||||
@@ -229,6 +275,16 @@ static inline void tee_shm_pool_free(struct tee_shm_pool *pool)
|
||||
pool->ops->destroy_pool(pool);
|
||||
}
|
||||
|
||||
/**
|
||||
* tee_protmem_static_pool_alloc() - Create a protected memory manager
|
||||
* @paddr: Physical address of start of pool
|
||||
* @size: Size in bytes of the pool
|
||||
*
|
||||
* @returns pointer to a 'struct tee_protmem_pool' or an ERR_PTR on failure.
|
||||
*/
|
||||
struct tee_protmem_pool *tee_protmem_static_pool_alloc(phys_addr_t paddr,
|
||||
size_t size);
|
||||
|
||||
/**
|
||||
* tee_get_drvdata() - Return driver_data pointer
|
||||
* @returns the driver_data pointer supplied to tee_register().
|
||||
@@ -244,6 +300,9 @@ void *tee_get_drvdata(struct tee_device *teedev);
|
||||
*/
|
||||
struct tee_shm *tee_shm_alloc_priv_buf(struct tee_context *ctx, size_t size);
|
||||
|
||||
struct tee_shm *tee_shm_alloc_dma_mem(struct tee_context *ctx,
|
||||
size_t page_count);
|
||||
|
||||
int tee_dyn_shm_alloc_helper(struct tee_shm *shm, size_t size, size_t align,
|
||||
int (*shm_register)(struct tee_context *ctx,
|
||||
struct tee_shm *shm,
|
||||
|
||||
@@ -116,6 +116,16 @@ struct tee_shm *tee_shm_alloc_kernel_buf(struct tee_context *ctx, size_t size);
|
||||
struct tee_shm *tee_shm_register_kernel_buf(struct tee_context *ctx,
|
||||
void *addr, size_t length);
|
||||
|
||||
/**
|
||||
* tee_shm_register_fd() - Register shared memory from file descriptor
|
||||
*
|
||||
* @ctx: Context that allocates the shared memory
|
||||
* @fd: Shared memory file descriptor reference
|
||||
*
|
||||
* @returns a pointer to 'struct tee_shm' on success, and ERR_PTR on failure
|
||||
*/
|
||||
struct tee_shm *tee_shm_register_fd(struct tee_context *ctx, int fd);
|
||||
|
||||
/**
|
||||
* tee_shm_free() - Free shared memory
|
||||
* @shm: Handle to shared memory to free
|
||||
|
||||
@@ -378,6 +378,37 @@ struct tee_ioctl_shm_register_data {
|
||||
__s32 id;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct tee_ioctl_shm_register_fd_data - Shared memory registering argument
|
||||
* @fd: [in] File descriptor identifying dmabuf reference
|
||||
* @size: [out] Size of referenced memory
|
||||
* @flags: [in] Flags to/from allocation.
|
||||
* @id: [out] Identifier of the shared memory
|
||||
*
|
||||
* The flags field should currently be zero as input. Updated by the call
|
||||
* with actual flags as defined by TEE_IOCTL_SHM_* above.
|
||||
* This structure is used as argument for TEE_IOC_SHM_REGISTER_FD below.
|
||||
*/
|
||||
struct tee_ioctl_shm_register_fd_data {
|
||||
__s64 fd;
|
||||
__u64 size;
|
||||
__u32 flags;
|
||||
__s32 id;
|
||||
};
|
||||
|
||||
/**
|
||||
* TEE_IOC_SHM_REGISTER_FD - register a shared memory from a file descriptor
|
||||
*
|
||||
* Returns a file descriptor on success or < 0 on failure
|
||||
*
|
||||
* The returned file descriptor refers to the shared memory object in the
|
||||
* kernel. The supplied file deccriptor can be closed if it's not needed
|
||||
* for other purposes. The shared memory is freed when the descriptor is
|
||||
* closed.
|
||||
*/
|
||||
#define TEE_IOC_SHM_REGISTER_FD _IOWR(TEE_IOC_MAGIC, TEE_IOC_BASE + 8, \
|
||||
struct tee_ioctl_shm_register_fd_data)
|
||||
|
||||
/**
|
||||
* TEE_IOC_SHM_REGISTER - Register shared memory argument
|
||||
*
|
||||
|
||||
Reference in New Issue
Block a user