summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorLiam Mark <lmark@codeaurora.org>2016-12-01 13:05:31 -0800
committerLiam Mark <lmark@codeaurora.org>2016-12-05 13:42:00 -0800
commit3991501f313d8ad40a2ca013bf0f56505af42650 (patch)
tree52d6db7bb742bb49e9e2eefd174f1115f83570fb /drivers
parentb979f780bfe70f299455bf9e10f206cb58951b6c (diff)
iommu: dma-mapping-fast: add support for DMA sync single APIs
Implement the fast map DMA sync single APIs. Change-Id: I1b785c7c441e53fa0b2e0fa784ff8afed8afceb9 Signed-off-by: Liam Mark <lmark@codeaurora.org>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/iommu/dma-mapping-fast.c40
1 files changed, 40 insertions, 0 deletions
diff --git a/drivers/iommu/dma-mapping-fast.c b/drivers/iommu/dma-mapping-fast.c
index 266f7065fca4..411f52c5ae81 100644
--- a/drivers/iommu/dma-mapping-fast.c
+++ b/drivers/iommu/dma-mapping-fast.c
@@ -339,6 +339,30 @@ static void fast_smmu_unmap_page(struct device *dev, dma_addr_t iova,
spin_unlock_irqrestore(&mapping->lock, flags);
}
+static void fast_smmu_sync_single_for_cpu(struct device *dev,
+ dma_addr_t iova, size_t size, enum dma_data_direction dir)
+{
+ struct dma_fast_smmu_mapping *mapping = dev->archdata.mapping->fast;
+ av8l_fast_iopte *pmd = iopte_pmd_offset(mapping->pgtbl_pmds, iova);
+ unsigned long offset = iova & ~FAST_PAGE_MASK;
+ struct page *page = phys_to_page((*pmd & FAST_PTE_ADDR_MASK));
+
+ if (!is_device_dma_coherent(dev))
+ __fast_dma_page_dev_to_cpu(page, offset, size, dir);
+}
+
+static void fast_smmu_sync_single_for_device(struct device *dev,
+ dma_addr_t iova, size_t size, enum dma_data_direction dir)
+{
+ struct dma_fast_smmu_mapping *mapping = dev->archdata.mapping->fast;
+ av8l_fast_iopte *pmd = iopte_pmd_offset(mapping->pgtbl_pmds, iova);
+ unsigned long offset = iova & ~FAST_PAGE_MASK;
+ struct page *page = phys_to_page((*pmd & FAST_PTE_ADDR_MASK));
+
+ if (!is_device_dma_coherent(dev))
+ __fast_dma_page_cpu_to_dev(page, offset, size, dir);
+}
+
static int fast_smmu_map_sg(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir,
struct dma_attrs *attrs)
@@ -354,6 +378,18 @@ static void fast_smmu_unmap_sg(struct device *dev,
WARN_ON_ONCE(1);
}
+static void fast_smmu_sync_sg_for_cpu(struct device *dev,
+ struct scatterlist *sg, int nents, enum dma_data_direction dir)
+{
+ WARN_ON_ONCE(1);
+}
+
+static void fast_smmu_sync_sg_for_device(struct device *dev,
+ struct scatterlist *sg, int nents, enum dma_data_direction dir)
+{
+ WARN_ON_ONCE(1);
+}
+
static void __fast_smmu_free_pages(struct page **pages, int count)
{
while (count--)
@@ -590,8 +626,12 @@ static const struct dma_map_ops fast_smmu_dma_ops = {
.mmap = fast_smmu_mmap_attrs,
.map_page = fast_smmu_map_page,
.unmap_page = fast_smmu_unmap_page,
+ .sync_single_for_cpu = fast_smmu_sync_single_for_cpu,
+ .sync_single_for_device = fast_smmu_sync_single_for_device,
.map_sg = fast_smmu_map_sg,
.unmap_sg = fast_smmu_unmap_sg,
+ .sync_sg_for_cpu = fast_smmu_sync_sg_for_cpu,
+ .sync_sg_for_device = fast_smmu_sync_sg_for_device,
.dma_supported = fast_smmu_dma_supported,
.mapping_error = fast_smmu_mapping_error,
};