summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--drivers/staging/android/ion/ion_priv.h7
-rw-r--r--drivers/staging/android/ion/ion_system_heap.c223
-rw-r--r--drivers/staging/android/ion/ion_system_secure_heap.c100
-rw-r--r--drivers/staging/android/ion/msm/msm_ion.c13
-rw-r--r--drivers/staging/android/ion/msm_ion_priv.h11
-rw-r--r--include/soc/qcom/secure_buffer.h35
6 files changed, 286 insertions, 103 deletions
diff --git a/drivers/staging/android/ion/ion_priv.h b/drivers/staging/android/ion/ion_priv.h
index bae53350f7df..d598d0cbb67f 100644
--- a/drivers/staging/android/ion/ion_priv.h
+++ b/drivers/staging/android/ion/ion_priv.h
@@ -270,6 +270,13 @@ int msm_ion_heap_alloc_pages_mem(struct pages_mem *pages_mem);
void msm_ion_heap_free_pages_mem(struct pages_mem *pages_mem);
/**
+ * Functions to help assign/unassign sg_table for System Secure Heap
+ */
+
+int ion_system_secure_heap_unassign_sg(struct sg_table *sgt, int source_vmid);
+int ion_system_secure_heap_assign_sg(struct sg_table *sgt, int dest_vmid);
+
+/**
* ion_heap_init_shrinker
* @heap: the heap
*
diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c
index 36fdcbd83562..1f18110f5ad4 100644
--- a/drivers/staging/android/ion/ion_system_heap.c
+++ b/drivers/staging/android/ion/ion_system_heap.c
@@ -2,7 +2,7 @@
* drivers/staging/android/ion/ion_system_heap.c
*
* Copyright (C) 2011 Google, Inc.
- * Copyright (c) 2011-2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -29,6 +29,7 @@
#include "ion_priv.h"
#include <linux/dma-mapping.h>
#include <trace/events/kmem.h>
+#include <soc/qcom/secure_buffer.h>
static gfp_t high_order_gfp_flags = (GFP_HIGHUSER | __GFP_NOWARN |
__GFP_NORETRY)
@@ -61,6 +62,7 @@ struct ion_system_heap {
struct ion_heap heap;
struct ion_page_pool **uncached_pools;
struct ion_page_pool **cached_pools;
+ struct ion_page_pool **secure_pools[VMID_LAST];
};
struct page_info {
@@ -78,16 +80,18 @@ static struct page *alloc_buffer_page(struct ion_system_heap *heap,
bool cached = ion_buffer_cached(buffer);
struct page *page;
struct ion_page_pool *pool;
+ int vmid = get_secure_vmid(buffer->flags);
if (*from_pool) {
- if (!cached)
+ if (vmid > 0)
+ pool = heap->secure_pools[vmid][order_to_index(order)];
+ else if (!cached)
pool = heap->uncached_pools[order_to_index(order)];
else
pool = heap->cached_pools[order_to_index(order)];
page = ion_page_pool_alloc(pool, from_pool);
} else {
gfp_t gfp_mask = low_order_gfp_flags;
-
if (order)
gfp_mask = high_order_gfp_flags;
page = alloc_pages(gfp_mask, order);
@@ -98,15 +102,22 @@ static struct page *alloc_buffer_page(struct ion_system_heap *heap,
return page;
}
+/*
+ * For secure pages that need to be freed and not added back to the pool; the
+ * hyp_unassign should be called before calling this function
+ */
static void free_buffer_page(struct ion_system_heap *heap,
struct ion_buffer *buffer, struct page *page,
unsigned int order)
{
bool cached = ion_buffer_cached(buffer);
+ int vmid = get_secure_vmid(buffer->flags);
if (!(buffer->flags & ION_FLAG_POOL_FORCE_ALLOC)) {
struct ion_page_pool *pool;
- if (cached)
+ if (vmid > 0)
+ pool = heap->secure_pools[vmid][order_to_index(order)];
+ else if (cached)
pool = heap->cached_pools[order_to_index(order)];
else
pool = heap->uncached_pools[order_to_index(order)];
@@ -120,7 +131,6 @@ static void free_buffer_page(struct ion_system_heap *heap,
}
}
-
static struct page_info *alloc_largest_available(struct ion_system_heap *heap,
struct ion_buffer *buffer,
unsigned long size,
@@ -141,7 +151,6 @@ static struct page_info *alloc_largest_available(struct ion_system_heap *heap,
if (max_order < orders[i])
continue;
from_pool = !(buffer->flags & ION_FLAG_POOL_FORCE_ALLOC);
-
page = alloc_buffer_page(heap, buffer, orders[i], &from_pool);
if (!page)
continue;
@@ -206,6 +215,7 @@ static int ion_system_heap_allocate(struct ion_heap *heap,
unsigned int max_order = orders[0];
struct pages_mem data;
unsigned int sz;
+ int vmid = get_secure_vmid(buffer->flags);
if (align > PAGE_SIZE)
return -EINVAL;
@@ -294,19 +304,30 @@ static int ion_system_heap_allocate(struct ion_heap *heap,
goto err_free_sg2;
}
- if (nents_sync)
+ if (nents_sync) {
dma_sync_sg_for_device(NULL, table_sync.sgl, table_sync.nents,
DMA_BIDIRECTIONAL);
+ if (vmid > 0) {
+ ret = ion_system_secure_heap_assign_sg(&table_sync,
+ vmid);
+ if (ret)
+ goto err_free_sg2;
+ }
+ }
buffer->priv_virt = table;
if (nents_sync)
sg_free_table(&table_sync);
msm_ion_heap_free_pages_mem(&data);
return 0;
+
err_free_sg2:
/* We failed to zero buffers. Bypass pool */
buffer->flags |= ION_PRIV_FLAG_SHRINKER_FREE;
+ if (vmid > 0)
+ ion_system_secure_heap_unassign_sg(table, vmid);
+
for_each_sg(table->sgl, sg, table->nents, i)
free_buffer_page(sys_heap, buffer, sg_page(sg),
get_order(sg->length));
@@ -340,10 +361,16 @@ void ion_system_heap_free(struct ion_buffer *buffer)
struct scatterlist *sg;
LIST_HEAD(pages);
int i;
+ int vmid = get_secure_vmid(buffer->flags);
if (!(buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE) &&
- !(buffer->flags & ION_FLAG_POOL_FORCE_ALLOC))
- msm_ion_heap_sg_table_zero(table, buffer->size);
+ !(buffer->flags & ION_FLAG_POOL_FORCE_ALLOC)) {
+ if (vmid < 0)
+ msm_ion_heap_sg_table_zero(table, buffer->size);
+ } else if (vmid > 0) {
+ if (ion_system_secure_heap_unassign_sg(table, vmid))
+ return;
+ }
for_each_sg(table->sgl, sg, table->nents, i)
free_buffer_page(sys_heap, buffer, sg_page(sg),
@@ -363,13 +390,98 @@ void ion_system_heap_unmap_dma(struct ion_heap *heap,
{
}
+static int ion_move_secure_pages_to_uncached_pool(
+ struct ion_system_heap *sys_heap,
+ int vmid, unsigned int nr)
+{
+ int count = 0, i, ret, num_pages = 0;
+ struct page *page;
+ struct sg_table sgt;
+ struct scatterlist *sg;
+ struct page_info *pinfo, *tmpinfo;
+ LIST_HEAD(pages);
+
+ INIT_LIST_HEAD(&pages);
+ for (i = 0; i < num_orders && num_pages < nr; i++) {
+ /*
+ * Ideally we want to just dequeue page from the pool list only,
+ * but currently we can only call alloc and it could potentially
+ * try to allocate a new page if nothing exists in the pool
+ */
+ do {
+ struct page_info *info;
+
+ page = ion_page_pool_alloc_pool_only(
+ sys_heap->secure_pools[vmid][i]);
+ if (!page)
+ break;
+ info = kmalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ goto out1;
+ info->page = page;
+ info->order = orders[i];
+ INIT_LIST_HEAD(&info->list);
+ list_add(&info->list, &pages);
+ count += 1;
+ num_pages += 1 << info->order;
+ if (num_pages >= nr)
+ break;
+ } while (1);
+ }
+
+ if (!count)
+ return 0;
+
+ ret = sg_alloc_table(&sgt, count, GFP_KERNEL);
+ if (ret)
+ goto out1;
+ sg = sgt.sgl;
+ list_for_each_entry(pinfo, &pages, list) {
+ sg_set_page(sg, pinfo->page,
+ (1 << pinfo->order) * PAGE_SIZE, 0);
+ sg_dma_address(sg) = page_to_phys(pinfo->page);
+ sg = sg_next(sg);
+ }
+
+ if (ion_system_secure_heap_unassign_sg(&sgt, vmid))
+ goto out2;
+
+ list_for_each_entry_safe(pinfo, tmpinfo, &pages, list) {
+ ion_page_pool_free(
+ sys_heap->uncached_pools[order_to_index(pinfo->order)],
+ pinfo->page);
+ list_del(&pinfo->list);
+ kfree(pinfo);
+ }
+
+ sg_free_table(&sgt);
+ return num_pages;
+
+out2:
+ sg_free_table(&sgt);
+out1:
+ /*
+ * TODO Figure out how to handle failure here
+ * 1. return pages back to secure pool ?
+ */
+ list_for_each_entry_safe(pinfo, tmpinfo, &pages, list) {
+ ion_page_pool_free(
+ sys_heap->secure_pools[vmid][order_to_index(pinfo->order)],
+ pinfo->page);
+ list_del(&pinfo->list);
+ kfree(pinfo);
+ }
+ return 0;
+}
+
static int ion_system_heap_shrink(struct ion_heap *heap, gfp_t gfp_mask,
int nr_to_scan)
{
struct ion_system_heap *sys_heap;
int nr_total = 0;
- int i, nr_freed;
+ int i, j, nr_freed = 0, nr_secure = 0;
int only_scan = 0;
+ struct ion_page_pool *pool;
sys_heap = container_of(heap, struct ion_system_heap, heap);
@@ -377,8 +489,17 @@ static int ion_system_heap_shrink(struct ion_heap *heap, gfp_t gfp_mask,
only_scan = 1;
for (i = 0; i < num_orders; i++) {
- struct ion_page_pool *pool = sys_heap->uncached_pools[i];
- nr_freed = ion_page_pool_shrink(pool, gfp_mask, nr_to_scan);
+ for (j = 0; j < VMID_LAST; j++) {
+ if (is_secure_vmid_valid(j))
+ nr_secure +=
+ ion_move_secure_pages_to_uncached_pool(
+ sys_heap,
+ j, nr_to_scan / 4);
+ }
+
+ pool = sys_heap->uncached_pools[i];
+ nr_freed += ion_page_pool_shrink(pool, gfp_mask,
+ nr_to_scan + nr_secure);
pool = sys_heap->cached_pools[i];
nr_freed += ion_page_pool_shrink(pool, gfp_mask, nr_to_scan);
@@ -416,10 +537,12 @@ static int ion_system_heap_debug_show(struct ion_heap *heap, struct seq_file *s,
bool use_seq = s != NULL;
unsigned long uncached_total = 0;
unsigned long cached_total = 0;
+ unsigned long secure_total = 0;
+ struct ion_page_pool *pool;
+ int i, j;
- int i;
for (i = 0; i < num_orders; i++) {
- struct ion_page_pool *pool = sys_heap->uncached_pools[i];
+ pool = sys_heap->uncached_pools[i];
if (use_seq) {
seq_printf(s,
"%d order %u highmem pages in uncached pool = %lu total\n",
@@ -440,7 +563,7 @@ static int ion_system_heap_debug_show(struct ion_heap *heap, struct seq_file *s,
}
for (i = 0; i < num_orders; i++) {
- struct ion_page_pool *pool = sys_heap->cached_pools[i];
+ pool = sys_heap->cached_pools[i];
if (use_seq) {
seq_printf(s,
"%d order %u highmem pages in cached pool = %lu total\n",
@@ -459,19 +582,45 @@ static int ion_system_heap_debug_show(struct ion_heap *heap, struct seq_file *s,
pool->low_count;
}
+ for (i = 0; i < num_orders; i++) {
+ for (j = 0; j < VMID_LAST; j++) {
+ if (!is_secure_vmid_valid(j))
+ continue;
+ pool = sys_heap->secure_pools[j][i];
+
+ if (use_seq) {
+ seq_printf(s,
+ "VMID %d: %d order %u highmem pages in secure pool = %lu total\n",
+ j, pool->high_count, pool->order,
+ (1 << pool->order) * PAGE_SIZE *
+ pool->high_count);
+ seq_printf(s,
+ "VMID %d: %d order %u lowmem pages in secure pool = %lu total\n",
+ j, pool->low_count, pool->order,
+ (1 << pool->order) * PAGE_SIZE *
+ pool->low_count);
+ }
+
+ secure_total += (1 << pool->order) * PAGE_SIZE *
+ pool->high_count;
+ secure_total += (1 << pool->order) * PAGE_SIZE *
+ pool->low_count;
+ }
+ }
+
if (use_seq) {
seq_puts(s, "--------------------------------------------\n");
- seq_printf(s, "uncached pool = %lu cached pool = %lu\n",
- uncached_total, cached_total);
- seq_printf(s, "pool total (uncached + cached) = %lu\n",
- uncached_total + cached_total);
+ seq_printf(s, "uncached pool = %lu cached pool = %lu secure pool = %lu\n",
+ uncached_total, cached_total, secure_total);
+ seq_printf(s, "pool total (uncached + cached + secure) = %lu\n",
+ uncached_total + cached_total + secure_total);
seq_puts(s, "--------------------------------------------\n");
} else {
pr_info("-------------------------------------------------\n");
- pr_info("uncached pool = %lu cached pool = %lu\n",
- uncached_total, cached_total);
- pr_info("pool total (uncached + cached) = %lu\n",
- uncached_total + cached_total);
+ pr_info("uncached pool = %lu cached pool = %lu secure pool = %lu\n",
+ uncached_total, cached_total, secure_total);
+ pr_info("pool total (uncached + cached + secure) = %lu\n",
+ uncached_total + cached_total + secure_total);
pr_info("-------------------------------------------------\n");
}
@@ -517,6 +666,7 @@ err_create_pool:
struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused)
{
struct ion_system_heap *heap;
+ int i;
int pools_size = sizeof(struct ion_page_pool *) * num_orders;
heap = kzalloc(sizeof(struct ion_system_heap), GFP_KERNEL);
@@ -534,6 +684,16 @@ struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused)
if (!heap->cached_pools)
goto err_alloc_cached_pools;
+ for (i = 0; i < VMID_LAST; i++) {
+ if (is_secure_vmid_valid(i)) {
+ heap->secure_pools[i] = kzalloc(pools_size, GFP_KERNEL);
+ if (!heap->secure_pools[i])
+ goto err_create_secure_pools;
+ if (ion_system_heap_create_pools(heap->secure_pools[i]))
+ goto err_create_secure_pools;
+ }
+ }
+
if (ion_system_heap_create_pools(heap->uncached_pools))
goto err_create_uncached_pools;
@@ -547,6 +707,14 @@ err_create_cached_pools:
ion_system_heap_destroy_pools(heap->uncached_pools);
err_create_uncached_pools:
kfree(heap->cached_pools);
+err_create_secure_pools:
+ while (i >= 0) {
+ if (heap->secure_pools[i]) {
+ ion_system_heap_destroy_pools(heap->secure_pools[i]);
+ kfree(heap->secure_pools[i]);
+ }
+ i--;
+ }
err_alloc_cached_pools:
kfree(heap->uncached_pools);
err_alloc_uncached_pools:
@@ -559,7 +727,16 @@ void ion_system_heap_destroy(struct ion_heap *heap)
struct ion_system_heap *sys_heap = container_of(heap,
struct ion_system_heap,
heap);
+ int i;
+ for (i = 0; i < VMID_LAST; i++) {
+ if (is_secure_vmid_valid(i)) {
+ ion_move_secure_pages_to_uncached_pool(sys_heap,
+ i, UINT_MAX);
+ ion_system_heap_destroy_pools(
+ sys_heap->secure_pools[i]);
+ }
+ }
ion_system_heap_destroy_pools(sys_heap->uncached_pools);
ion_system_heap_destroy_pools(sys_heap->cached_pools);
kfree(sys_heap->uncached_pools);
diff --git a/drivers/staging/android/ion/ion_system_secure_heap.c b/drivers/staging/android/ion/ion_system_secure_heap.c
index 30787194fdbb..fc0c2c2d735d 100644
--- a/drivers/staging/android/ion/ion_system_secure_heap.c
+++ b/drivers/staging/android/ion/ion_system_secure_heap.c
@@ -1,6 +1,6 @@
/*
*
- * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -33,40 +33,52 @@ static bool is_cp_flag_present(unsigned long flags)
ION_FLAG_CP_CAMERA);
}
-static void ion_system_secure_heap_free(struct ion_buffer *buffer)
+int ion_system_secure_heap_unassign_sg(struct sg_table *sgt, int source_vmid)
{
- int ret = 0;
- int i;
- u32 source_vm;
- int dest_vmid;
- int dest_perms;
- struct sg_table *sgt;
+ u32 dest_vmid = VMID_HLOS;
+ u32 dest_perms = PERM_READ | PERM_WRITE | PERM_EXEC;
struct scatterlist *sg;
- struct ion_heap *heap = buffer->heap;
- struct ion_system_secure_heap *secure_heap = container_of(heap,
- struct ion_system_secure_heap,
- heap);
+ int ret, i;
- source_vm = get_secure_vmid(buffer->flags);
- if (source_vm < 0) {
- pr_info("%s: Unable to get secure VMID\n", __func__);
- return;
+ ret = hyp_assign_table(sgt, &source_vmid, 1,
+ &dest_vmid, &dest_perms, 1);
+ if (ret) {
+ pr_err("%s: Not freeing memory since assign call failed. VMID %d\n",
+ __func__, source_vmid);
+ return -ENXIO;
}
- dest_vmid = VMID_HLOS;
- dest_perms = PERM_READ | PERM_WRITE | PERM_EXEC;
- ret = hyp_assign_table(buffer->priv_virt, &source_vm, 1,
- &dest_vmid, &dest_perms, 1);
+ for_each_sg(sgt->sgl, sg, sgt->nents, i)
+ ClearPagePrivate(sg_page(sg));
+ return 0;
+}
+
+int ion_system_secure_heap_assign_sg(struct sg_table *sgt, int dest_vmid)
+{
+ u32 source_vmid = VMID_HLOS;
+ u32 dest_perms = PERM_READ | PERM_WRITE;
+ struct scatterlist *sg;
+ int ret, i;
+
+ ret = hyp_assign_table(sgt, &source_vmid, 1,
+ &dest_vmid, &dest_perms, 1);
if (ret) {
- pr_err("%s: Not freeing memory since assign call failed\n",
- __func__);
- return;
+ pr_err("%s: Assign call failed. VMID %d\n",
+ __func__, dest_vmid);
+ return -EINVAL;
}
- sgt = buffer->priv_virt;
for_each_sg(sgt->sgl, sg, sgt->nents, i)
- ClearPagePrivate(sg_page(sg));
+ SetPagePrivate(sg_page(sg));
+ return 0;
+}
+static void ion_system_secure_heap_free(struct ion_buffer *buffer)
+{
+ struct ion_heap *heap = buffer->heap;
+ struct ion_system_secure_heap *secure_heap = container_of(heap,
+ struct ion_system_secure_heap,
+ heap);
buffer->heap = secure_heap->sys_heap;
secure_heap->sys_heap->ops->free(buffer);
}
@@ -77,12 +89,6 @@ static int ion_system_secure_heap_allocate(struct ion_heap *heap,
unsigned long flags)
{
int ret = 0;
- int i;
- u32 source_vm;
- int dest_vmid;
- int dest_perms;
- struct sg_table *sgt;
- struct scatterlist *sg;
struct ion_system_secure_heap *secure_heap = container_of(heap,
struct ion_system_secure_heap,
heap);
@@ -101,38 +107,6 @@ static int ion_system_secure_heap_allocate(struct ion_heap *heap,
__func__, heap->name, ret);
return ret;
}
-
- source_vm = VMID_HLOS;
- dest_vmid = get_secure_vmid(flags);
- if (dest_vmid < 0) {
- pr_info("%s: Unable to get secure VMID\n", __func__);
- ret = -EINVAL;
- goto err;
- }
- dest_perms = PERM_READ | PERM_WRITE;
-
- ret = hyp_assign_table(buffer->priv_virt, &source_vm, 1,
- &dest_vmid, &dest_perms, 1);
- if (ret) {
- pr_err("%s: Assign call failed\n", __func__);
- goto err;
- }
-
- sgt = buffer->priv_virt;
- for_each_sg(sgt->sgl, sg, sgt->nents, i)
- SetPagePrivate(sg_page(sg));
-
- return ret;
-
-err:
- /*
- * the buffer->size field is populated in the caller of this function
- * and hence uninitialized when ops->free is called. Populating the
- * field here to handle the error condition correctly.
- */
- buffer->size = size;
- buffer->heap = secure_heap->sys_heap;
- secure_heap->sys_heap->ops->free(buffer);
return ret;
}
diff --git a/drivers/staging/android/ion/msm/msm_ion.c b/drivers/staging/android/ion/msm/msm_ion.c
index 952edb7c9163..9835145734f6 100644
--- a/drivers/staging/android/ion/msm/msm_ion.c
+++ b/drivers/staging/android/ion/msm/msm_ion.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -620,6 +620,17 @@ int ion_heap_allow_heap_secure(enum ion_heap_type type)
return false;
}
+bool is_secure_vmid_valid(int vmid)
+{
+ return (vmid == VMID_CP_TOUCH ||
+ vmid == VMID_CP_BITSTREAM ||
+ vmid == VMID_CP_PIXEL ||
+ vmid == VMID_CP_NON_PIXEL ||
+ vmid == VMID_CP_CAMERA ||
+ vmid == VMID_CP_SEC_DISPLAY ||
+ vmid == VMID_CP_APP);
+}
+
int get_secure_vmid(unsigned long flags)
{
if (flags & ION_FLAG_CP_TOUCH)
diff --git a/drivers/staging/android/ion/msm_ion_priv.h b/drivers/staging/android/ion/msm_ion_priv.h
index f7ede43336b0..97a40148cd18 100644
--- a/drivers/staging/android/ion/msm_ion_priv.h
+++ b/drivers/staging/android/ion/msm_ion_priv.h
@@ -2,7 +2,7 @@
* drivers/staging/android/ion/msm_ion_priv.h
*
* Copyright (C) 2011 Google, Inc.
- * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -122,6 +122,15 @@ int ion_heap_allow_handle_secure(enum ion_heap_type type);
int get_secure_vmid(unsigned long);
+bool is_secure_vmid_valid(int vmid);
+
+/**
+ * Functions to help assign/unassign sg_table for System Secure Heap
+ */
+
+int ion_system_secure_heap_unassign_sg(struct sg_table *sgt, int source_vmid);
+int ion_system_secure_heap_assign_sg(struct sg_table *sgt, int dest_vmid);
+
/**
* ion_create_chunked_sg_table - helper function to create sg table
* with specified chunk size
diff --git a/include/soc/qcom/secure_buffer.h b/include/soc/qcom/secure_buffer.h
index 24dd2b0069f3..ffa65569ce18 100644
--- a/include/soc/qcom/secure_buffer.h
+++ b/include/soc/qcom/secure_buffer.h
@@ -17,24 +17,29 @@
#include <linux/scatterlist.h>
-#define VMID_HLOS 0x3
-#define VMID_CP_TOUCH 0x8
-#define VMID_CP_BITSTREAM 0x9
-#define VMID_CP_PIXEL 0xA
-#define VMID_CP_NON_PIXEL 0xB
-#define VMID_CP_CAMERA 0xD
-#define VMID_HLOS_FREE 0xE
-#define VMID_MSS_MSA 0xF
-#define VMID_MSS_NONMSA 0x10
-#define VMID_CP_SEC_DISPLAY 0x11
-#define VMID_CP_APP 0x12
-#define VMID_WLAN 0x18
-#define VMID_WLAN_CE 0x19
-#define VMID_INVAL -1
/*
* if you add a secure VMID here make sure you update
- * msm_secure_vmid_to_string
+ * msm_secure_vmid_to_string.
+ * Make sure to keep the VMID_LAST as the last entry in the enum.
+ * This is needed in ion to create a list and it's sized using VMID_LAST.
*/
+enum vmid {
+ VMID_HLOS = 0x3,
+ VMID_CP_TOUCH = 0x8,
+ VMID_CP_BITSTREAM = 0x9,
+ VMID_CP_PIXEL = 0xA,
+ VMID_CP_NON_PIXEL = 0xB,
+ VMID_CP_CAMERA = 0xD,
+ VMID_HLOS_FREE = 0xE,
+ VMID_MSS_MSA = 0xF,
+ VMID_MSS_NONMSA = 0x10,
+ VMID_CP_SEC_DISPLAY = 0x11,
+ VMID_CP_APP = 0x12,
+ VMID_WLAN = 0x18,
+ VMID_WLAN_CE = 0x19,
+ VMID_LAST,
+ VMID_INVAL = -1
+};
#define PERM_READ 0x4
#define PERM_WRITE 0x2