summaryrefslogtreecommitdiff
path: root/drivers/iommu/arm-smmu.c
diff options
context:
space:
mode:
authorSusheel Khiani <skhiani@codeaurora.org>2015-12-04 17:49:59 +0530
committerDavid Keitel <dkeitel@codeaurora.org>2016-03-23 21:25:36 -0700
commit84072b90d254c9597c06b360fdfc3a51e03eba95 (patch)
tree362727ee84b67635b489398f8c84a8a6836512e5 /drivers/iommu/arm-smmu.c
parentb3d402012c6ee397d6db1e571a4364c096b4e659 (diff)
iommu/arm-smmu: Add support for slave side protection
For targets where we have no hypervisor, slave side protection mechanism is used to provide buffer protection. Add functionality to make calls into TZ for mapping/unmapping of buffers. CRs-Fixed: 959535 Change-Id: I3106a98370a70611f4670aaf1c0f95c9e758a87c Signed-off-by: Susheel Khiani <skhiani@codeaurora.org>
Diffstat (limited to 'drivers/iommu/arm-smmu.c')
-rw-r--r--drivers/iommu/arm-smmu.c130
1 files changed, 110 insertions, 20 deletions
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index bb36aba19a9d..b71fe57d62bf 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -46,6 +46,7 @@
#include <linux/amba/bus.h>
#include <soc/qcom/msm_tz_smmu.h>
+#include <soc/qcom/scm.h>
#include <soc/qcom/secure_buffer.h>
#include <asm/cacheflush.h>
#include <linux/msm-bus.h>
@@ -406,6 +407,8 @@ struct arm_smmu_device {
struct msm_bus_client_handle *bus_client;
char *bus_client_name;
+
+ enum tz_smmu_device_id sec_id;
};
struct arm_smmu_cfg {
@@ -445,7 +448,8 @@ struct arm_smmu_domain {
struct arm_smmu_device *smmu;
struct io_pgtable_ops *pgtbl_ops;
struct io_pgtable_cfg pgtbl_cfg;
- spinlock_t pgtbl_lock;
+ spinlock_t pgtbl_spin_lock;
+ struct mutex pgtbl_mutex_lock;
struct arm_smmu_cfg cfg;
enum arm_smmu_domain_stage stage;
struct mutex init_mutex; /* Protects smmu pointer */
@@ -507,6 +511,8 @@ static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
size_t size);
static bool arm_smmu_is_master_side_secure(struct arm_smmu_domain *smmu_domain);
static bool arm_smmu_is_static_cb(struct arm_smmu_device *smmu);
+static bool arm_smmu_is_slave_side_secure(struct arm_smmu_domain *smmu_domain);
+static bool arm_smmu_has_secure_vmid(struct arm_smmu_domain *smmu_domain);
static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
{
@@ -1460,11 +1466,23 @@ static bool arm_smmu_is_static_cb(struct arm_smmu_device *smmu)
return smmu->options & ARM_SMMU_OPT_STATIC_CB;
}
-static bool arm_smmu_is_master_side_secure(struct arm_smmu_domain *smmu_domain)
+static bool arm_smmu_has_secure_vmid(struct arm_smmu_domain *smmu_domain)
{
return smmu_domain->secure_vmid != VMID_INVAL;
}
+static bool arm_smmu_is_slave_side_secure(struct arm_smmu_domain *smmu_domain)
+{
+ return arm_smmu_has_secure_vmid(smmu_domain) &&
+ arm_smmu_is_static_cb(smmu_domain->smmu);
+}
+
+static bool arm_smmu_is_master_side_secure(struct arm_smmu_domain *smmu_domain)
+{
+ return arm_smmu_has_secure_vmid(smmu_domain)
+ && !arm_smmu_is_static_cb(smmu_domain->smmu);
+}
+
static void arm_smmu_secure_domain_lock(struct arm_smmu_domain *smmu_domain)
{
if (arm_smmu_is_master_side_secure(smmu_domain))
@@ -1477,6 +1495,27 @@ static void arm_smmu_secure_domain_unlock(struct arm_smmu_domain *smmu_domain)
mutex_unlock(&smmu_domain->assign_lock);
}
+static unsigned long arm_smmu_pgtbl_lock(struct arm_smmu_domain *smmu_domain)
+{
+ unsigned long flags;
+
+ if (arm_smmu_is_slave_side_secure(smmu_domain))
+ mutex_lock(&smmu_domain->pgtbl_mutex_lock);
+ else
+ spin_lock_irqsave(&smmu_domain->pgtbl_spin_lock, flags);
+
+ return flags;
+}
+
+static void arm_smmu_pgtbl_unlock(struct arm_smmu_domain *smmu_domain,
+ unsigned long flags)
+{
+ if (arm_smmu_is_slave_side_secure(smmu_domain))
+ mutex_unlock(&smmu_domain->pgtbl_mutex_lock);
+ else
+ spin_unlock_irqrestore(&smmu_domain->pgtbl_spin_lock, flags);
+}
+
static int arm_smmu_init_domain_context(struct iommu_domain *domain,
struct arm_smmu_device *smmu)
{
@@ -1562,17 +1601,30 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
cfg->irptndx = cfg->cbndx;
}
- smmu_domain->pgtbl_cfg = (struct io_pgtable_cfg) {
- .pgsize_bitmap = arm_smmu_ops.pgsize_bitmap,
- .ias = ias,
- .oas = oas,
- .tlb = &arm_smmu_gather_ops,
- .iommu_dev = smmu->dev,
- };
+ smmu_domain->smmu = smmu;
+
+ if (arm_smmu_is_slave_side_secure(smmu_domain)) {
+ smmu_domain->pgtbl_cfg = (struct io_pgtable_cfg) {
+ .pgsize_bitmap = arm_smmu_ops.pgsize_bitmap,
+ .arm_msm_secure_cfg = {
+ .sec_id = smmu->sec_id,
+ .cbndx = cfg->cbndx,
+ },
+ .iommu_dev = smmu->dev,
+ };
+ fmt = ARM_MSM_SECURE;
+ } else {
+ smmu_domain->pgtbl_cfg = (struct io_pgtable_cfg) {
+ .pgsize_bitmap = arm_smmu_ops.pgsize_bitmap,
+ .ias = ias,
+ .oas = oas,
+ .tlb = &arm_smmu_gather_ops,
+ .iommu_dev = smmu->dev,
+ };
+ }
cfg->asid = cfg->cbndx + 1;
cfg->vmid = cfg->cbndx + 2;
- smmu_domain->smmu = smmu;
pgtbl_ops = alloc_io_pgtable_ops(fmt, &smmu_domain->pgtbl_cfg,
smmu_domain);
if (!pgtbl_ops) {
@@ -1677,8 +1729,9 @@ static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
smmu_domain->cfg.vmid = INVALID_VMID;
mutex_init(&smmu_domain->init_mutex);
- spin_lock_init(&smmu_domain->pgtbl_lock);
+ spin_lock_init(&smmu_domain->pgtbl_spin_lock);
mutex_init(&smmu_domain->assign_lock);
+ mutex_init(&smmu_domain->pgtbl_mutex_lock);
return &smmu_domain->domain;
}
@@ -2030,7 +2083,6 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
}
}
-
/* Ensure that the domain is finalised */
ret = arm_smmu_init_domain_context(domain, smmu);
if (IS_ERR_VALUE(ret))
@@ -2259,9 +2311,9 @@ static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
arm_smmu_secure_domain_lock(smmu_domain);
- spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
+ flags = arm_smmu_pgtbl_lock(smmu_domain);
ret = ops->map(ops, iova, paddr, size, prot);
- spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
+ arm_smmu_pgtbl_unlock(smmu_domain, flags);
if (!ret)
ret = arm_smmu_assign_table(smmu_domain);
@@ -2279,15 +2331,31 @@ static size_t arm_smmu_map_sg(struct iommu_domain *domain, unsigned long iova,
unsigned long flags;
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+ int atomic_ctx = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
if (!ops)
return -ENODEV;
+ if (arm_smmu_is_slave_side_secure(smmu_domain) && atomic_ctx) {
+ dev_err(smmu->dev, "Slave side atomic context not supported\n");
+ return 0;
+ }
+
+ if (arm_smmu_is_slave_side_secure(smmu_domain)) {
+ mutex_lock(&smmu_domain->init_mutex);
+
+ if (arm_smmu_enable_clocks(smmu)) {
+ mutex_unlock(&smmu_domain->init_mutex);
+ return 0;
+ }
+ }
+
arm_smmu_secure_domain_lock(smmu_domain);
- spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
+ flags = arm_smmu_pgtbl_lock(smmu_domain);
ret = ops->map_sg(ops, iova, sg, nents, prot, &size);
- spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
+ arm_smmu_pgtbl_unlock(smmu_domain, flags);
if (ret) {
if (arm_smmu_assign_table(smmu_domain)) {
@@ -2301,6 +2369,10 @@ static size_t arm_smmu_map_sg(struct iommu_domain *domain, unsigned long iova,
out:
arm_smmu_secure_domain_unlock(smmu_domain);
+ if (arm_smmu_is_slave_side_secure(smmu_domain)) {
+ arm_smmu_disable_clocks(smmu_domain->smmu);
+ mutex_unlock(&smmu_domain->init_mutex);
+ }
return ret;
}
@@ -2316,6 +2388,12 @@ static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
if (!ops)
return 0;
+ if (arm_smmu_is_slave_side_secure(smmu_domain) && atomic_ctx) {
+ dev_err(smmu_domain->smmu->dev,
+ "Slave side atomic context not supported\n");
+ return 0;
+ }
+
/*
* The contract here is that if you set DOMAIN_ATTR_ATOMIC your
* domain *must* must be attached an SMMU during unmap. This
@@ -2343,9 +2421,9 @@ static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
}
}
- spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
+ flags = arm_smmu_pgtbl_lock(smmu_domain);
ret = ops->unmap(ops, iova, size);
- spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
+ arm_smmu_pgtbl_unlock(smmu_domain, flags);
/*
* While splitting up block mappings, we might allocate page table
@@ -2385,9 +2463,9 @@ static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
if (!ops)
return 0;
- spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
+ flags = arm_smmu_pgtbl_lock(smmu_domain);
ret = ops->iova_to_phys(ops, iova);
- spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
+ arm_smmu_pgtbl_unlock(smmu_domain, flags);
return ret;
}
@@ -2433,6 +2511,16 @@ static void arm_smmu_resume(struct arm_smmu_device *smmu)
void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
u32 reg;
+ if (arm_smmu_is_static_cb(smmu)) {
+ int ret, scm_ret;
+
+ ret = scm_restore_sec_cfg(smmu->sec_id, 0x0, &scm_ret);
+ if (ret || scm_ret) {
+ pr_err("scm call IOMMU_SECURE_CFG failed\n");
+ return;
+ }
+ }
+
reg = readl_relaxed(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
reg &= ~MICRO_MMU_CTRL_LOCAL_HALT_REQ;
writel_relaxed(reg, impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
@@ -3478,6 +3566,8 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
err = arm_smmu_enable_clocks(smmu);
if (err)
goto out_put_masters;
+
+ smmu->sec_id = msm_dev_to_device_id(dev);
err = arm_smmu_device_cfg_probe(smmu);
arm_smmu_disable_clocks(smmu);
if (err)