summaryrefslogtreecommitdiff
path: root/drivers/misc
diff options
context:
space:
mode:
authorZhen Kong <zkong@codeaurora.org>2015-12-02 23:40:16 -0800
committerDavid Keitel <dkeitel@codeaurora.org>2016-03-23 21:13:51 -0700
commitdeb62ed21a20b42e2b98daa14850ef95428bef1f (patch)
tree838c6a45b7beeb36db5e005ad8d24768528eb993 /drivers/misc
parent894269ca16bc63052973b17198dde1bf4fc58373 (diff)
qseecom: make change to support sg list entry number larger than 512
qseecom_send_modfd_cmd_64 converts non-contiguous ION memory fd to scatter-gather physical address list, and supports up to 512 SG list entries. Now, 512 is not enough if client wants to send large data in a fragmented system. In this change, if SG list entry number is larger than 512, we will allocate a new contiguous kernel buffer, then save all SG entries into this new buffer. Change-Id: Id90eefb98b63bf16db755dae8e1482e448090f47 Signed-off-by: Zhen Kong <zkong@codeaurora.org>
Diffstat (limited to 'drivers/misc')
-rw-r--r--drivers/misc/qseecom.c85
1 files changed, 76 insertions, 9 deletions
diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c
index b21a85c37682..f9bb5bb720cc 100644
--- a/drivers/misc/qseecom.c
+++ b/drivers/misc/qseecom.c
@@ -66,7 +66,10 @@
#define QSEE_CE_CLK_100MHZ 100000000
#define CE_CLK_DIV 1000000
-#define QSEECOM_MAX_SG_ENTRY 512
+#define QSEECOM_MAX_SG_ENTRY 512
+#define QSEECOM_SG_ENTRY_MSG_BUF_SZ_64BIT \
+ (QSEECOM_MAX_SG_ENTRY * SG_ENTRY_SZ_64BIT)
+
#define QSEECOM_INVALID_KEY_ID 0xff
/* Save partition image hash for authentication check */
@@ -2926,6 +2929,53 @@ err:
return -ENOMEM;
}
+static int __qseecom_allocate_sg_list_buffer(struct qseecom_dev_handle *data,
+ char *field, uint32_t fd_idx, struct sg_table *sg_ptr)
+{
+ struct scatterlist *sg = sg_ptr->sgl;
+ struct qseecom_sg_entry_64bit *sg_entry;
+ struct qseecom_sg_list_buf_hdr_64bit *buf_hdr;
+ void *buf;
+ uint i;
+ size_t size;
+ dma_addr_t coh_pmem;
+
+ if (fd_idx >= MAX_ION_FD) {
+ pr_err("fd_idx [%d] is invalid\n", fd_idx);
+ return -ENOMEM;
+ }
+ buf_hdr = (struct qseecom_sg_list_buf_hdr_64bit *)field;
+ memset((void *)buf_hdr, 0, QSEECOM_SG_LIST_BUF_HDR_SZ_64BIT);
+ /* Allocate a contiguous kernel buffer */
+ size = sg_ptr->nents * SG_ENTRY_SZ_64BIT;
+ size = (size + PAGE_SIZE) & PAGE_MASK;
+ buf = dma_alloc_coherent(qseecom.pdev,
+ size, &coh_pmem, GFP_KERNEL);
+ if (buf == NULL) {
+ pr_err("failed to alloc memory for sg buf\n");
+ return -ENOMEM;
+ }
+ /* update qseecom_sg_list_buf_hdr_64bit */
+ buf_hdr->version = QSEECOM_SG_LIST_BUF_FORMAT_VERSION_2;
+ buf_hdr->new_buf_phys_addr = coh_pmem;
+ buf_hdr->nents_total = sg_ptr->nents;
+ /* save the left sg entries into new allocated buf */
+ sg_entry = (struct qseecom_sg_entry_64bit *)buf;
+ for (i = 0; i < sg_ptr->nents; i++) {
+ sg_entry->phys_addr = (uint64_t)sg_dma_address(sg);
+ sg_entry->len = sg->length;
+ sg_entry++;
+ sg = sg_next(sg);
+ }
+
+ data->client.sec_buf_fd[fd_idx].is_sec_buf_fd = true;
+ data->client.sec_buf_fd[fd_idx].vbase = buf;
+ data->client.sec_buf_fd[fd_idx].pbase = coh_pmem;
+ data->client.sec_buf_fd[fd_idx].size = size;
+
+ return 0;
+}
+
static int __qseecom_update_cmd_buf_64(void *msg, bool cleanup,
struct qseecom_dev_handle *data)
{
@@ -2994,10 +3044,26 @@ static int __qseecom_update_cmd_buf_64(void *msg, bool cleanup,
goto err;
}
if (sg_ptr->nents > QSEECOM_MAX_SG_ENTRY) {
- pr_err("Num of scattered entries");
- pr_err(" (%d) is greater than max supported %d\n",
+ pr_warn("Num of scattered entries");
+ pr_warn(" (%d) is greater than %d\n",
sg_ptr->nents, QSEECOM_MAX_SG_ENTRY);
- goto err;
+ if (cleanup) {
+ if (data->client.sec_buf_fd[i].is_sec_buf_fd &&
+ data->client.sec_buf_fd[i].vbase)
+ dma_free_coherent(qseecom.pdev,
+ data->client.sec_buf_fd[i].size,
+ data->client.sec_buf_fd[i].vbase,
+ data->client.sec_buf_fd[i].pbase);
+ } else {
+ ret = __qseecom_allocate_sg_list_buffer(data,
+ field, i, sg_ptr);
+ if (ret) {
+ pr_err("Failed to allocate sg list buffer\n");
+ goto err;
+ }
+ }
+ len = QSEECOM_SG_LIST_BUF_HDR_SZ_64BIT;
+ goto cleanup;
}
sg = sg_ptr->sgl;
if (sg_ptr->nents == 1) {
@@ -3017,10 +3083,10 @@ static int __qseecom_update_cmd_buf_64(void *msg, bool cleanup,
(req->ifd_data[i].fd > 0)) {
if ((req->cmd_req_len <
- SG_ENTRY_SZ * sg_ptr->nents) ||
+ SG_ENTRY_SZ_64BIT * sg_ptr->nents) ||
(req->ifd_data[i].cmd_buf_offset >
- (req->cmd_req_len -
- SG_ENTRY_SZ * sg_ptr->nents))) {
+ (req->cmd_req_len -
+ SG_ENTRY_SZ_64BIT * sg_ptr->nents))) {
pr_err("Invalid offset = 0x%x\n",
req->ifd_data[i].cmd_buf_offset);
goto err;
@@ -3030,10 +3096,10 @@ static int __qseecom_update_cmd_buf_64(void *msg, bool cleanup,
(lstnr_resp->ifd_data[i].fd > 0)) {
if ((lstnr_resp->resp_len <
- SG_ENTRY_SZ * sg_ptr->nents) ||
+ SG_ENTRY_SZ_64BIT * sg_ptr->nents) ||
(lstnr_resp->ifd_data[i].cmd_buf_offset >
(lstnr_resp->resp_len -
- SG_ENTRY_SZ * sg_ptr->nents))) {
+ SG_ENTRY_SZ_64BIT * sg_ptr->nents))) {
goto err;
}
}
@@ -3049,6 +3115,7 @@ static int __qseecom_update_cmd_buf_64(void *msg, bool cleanup,
sg = sg_next(sg);
}
}
+cleanup:
if (cleanup)
msm_ion_do_cache_op(qseecom.ion_clnt,
ihandle, NULL, len,