diff options
author | Mitchel Humpherys <mitchelh@codeaurora.org> | 2015-11-23 13:18:47 -0800 |
---|---|---|
committer | David Keitel <dkeitel@codeaurora.org> | 2016-03-22 11:04:50 -0700 |
commit | 51adc0956b090e04ec291fe33b340f9b32678f8c (patch) | |
tree | b2478e2332d84bd61cd7930700c998aac3873bc8 /drivers/soc/qcom | |
parent | 8a9d07bc986c6b562e56a0b28395574bb52e4846 (diff) |
msm: secure_buffer: Ensure shared buffers are cache-aligned
Buffers shared with hypervisor might undergo some cache maintenance on
the hypervisor side. Currently, we're passing a buffer that might not
be cache-aligned (source_vm_list), so when the hypervisor does some
cache maintenance on it they might be corrupting data. Fix this by
ensuring that all buffers being shared with the hypervisor are
cache-aligned by kmalloc'ing and memcpy'ing the source_vm_list before
sending it across.
Change-Id: I661f8ca852ade159e3432904086b060be8bfb806
Signed-off-by: Mitchel Humpherys <mitchelh@codeaurora.org>
Diffstat (limited to 'drivers/soc/qcom')
-rw-r--r-- | drivers/soc/qcom/secure_buffer.c | 23 |
1 files changed, 19 insertions, 4 deletions
diff --git a/drivers/soc/qcom/secure_buffer.c b/drivers/soc/qcom/secure_buffer.c index be674b035afe..e370531405e9 100644 --- a/drivers/soc/qcom/secure_buffer.c +++ b/drivers/soc/qcom/secure_buffer.c @@ -275,6 +275,7 @@ int hyp_assign_table(struct sg_table *table, struct info_list *info_list = NULL; struct dest_info_list *dest_info_list = NULL; struct scm_desc desc = {0}; + u32 *source_vm_copy; info_list = get_info_list_from_table(table); if (!info_list) @@ -287,10 +288,23 @@ int hyp_assign_table(struct sg_table *table, goto err1; } + /* + * We can only pass cache-aligned sizes to hypervisor, so we need + * to kmalloc and memcpy the source_vm_list here. + */ + source_vm_copy = kmalloc_array( + source_nelems, sizeof(*source_vm_copy), GFP_KERNEL); + if (!source_vm_copy) { + ret = -ENOMEM; + goto err2; + } + memcpy(source_vm_copy, source_vm_list, + sizeof(*source_vm_list) * source_nelems); + desc.args[0] = virt_to_phys(info_list->list_head); desc.args[1] = info_list->list_size; - desc.args[2] = virt_to_phys(source_vm_list); - desc.args[3] = sizeof(*source_vm_list) * source_nelems; + desc.args[2] = virt_to_phys(source_vm_copy); + desc.args[3] = sizeof(*source_vm_copy) * source_nelems; desc.args[4] = virt_to_phys(dest_info_list->dest_info); desc.args[5] = dest_info_list->list_size; desc.args[6] = 0; @@ -298,7 +312,7 @@ int hyp_assign_table(struct sg_table *table, desc.arginfo = SCM_ARGS(7, SCM_RO, SCM_VAL, SCM_RO, SCM_VAL, SCM_RO, SCM_VAL, SCM_VAL); - dmac_flush_range(source_vm_list, source_vm_list + source_nelems); + dmac_flush_range(source_vm_copy, source_vm_copy + source_nelems); dmac_flush_range(info_list->list_head, info_list->list_head + (info_list->list_size / sizeof(*info_list->list_head))); dmac_flush_range(dest_info_list->dest_info, dest_info_list->dest_info + @@ -311,8 +325,9 @@ int hyp_assign_table(struct sg_table *table, pr_info("%s: Failed to assign memory protection, ret = %d\n", __func__, ret); + kfree(source_vm_copy); +err2: destroy_dest_info_list(dest_info_list); - err1: destroy_info_list(info_list); return ret; |