summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorMax Filippov <jcmvbkbc@gmail.com>2020-11-16 01:38:59 -0800
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2020-11-24 12:48:12 +0100
commitdb2c4daca0e073b0110ed7dc56f53bd404d07e88 (patch)
treefcb149eda8eeef47d608bfa64697960cd42cc4d5 /arch
parent803fe1fe48e4e88965ab1df8a6c6f2df098c96f4 (diff)
xtensa: disable preemption around cache alias management calls
commit 3a860d165eb5f4d7cf0bf81ef6a5b5c5e1754422 upstream. Although cache alias management calls set up and tear down TLB entries and fast_second_level_miss is able to restore TLB entry should it be evicted they absolutely cannot preempt each other because they use the same TLBTEMP area for different purposes. Disable preemption around all cache alias management calls to enforce that. Cc: stable@vger.kernel.org Signed-off-by: Max Filippov <jcmvbkbc@gmail.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/xtensa/mm/cache.c14
1 files changed, 14 insertions, 0 deletions
diff --git a/arch/xtensa/mm/cache.c b/arch/xtensa/mm/cache.c
index 1a804a2f9a5b..2fe5cabfc41e 100644
--- a/arch/xtensa/mm/cache.c
+++ b/arch/xtensa/mm/cache.c
@@ -74,8 +74,10 @@ static inline void kmap_invalidate_coherent(struct page *page,
kvaddr = TLBTEMP_BASE_1 +
(page_to_phys(page) & DCACHE_ALIAS_MASK);
+ preempt_disable();
__invalidate_dcache_page_alias(kvaddr,
page_to_phys(page));
+ preempt_enable();
}
}
}
@@ -162,6 +164,7 @@ void flush_dcache_page(struct page *page)
if (!alias && !mapping)
return;
+ preempt_disable();
virt = TLBTEMP_BASE_1 + (phys & DCACHE_ALIAS_MASK);
__flush_invalidate_dcache_page_alias(virt, phys);
@@ -172,6 +175,7 @@ void flush_dcache_page(struct page *page)
if (mapping)
__invalidate_icache_page_alias(virt, phys);
+ preempt_enable();
}
/* There shouldn't be an entry in the cache for this page anymore. */
@@ -204,8 +208,10 @@ void local_flush_cache_page(struct vm_area_struct *vma, unsigned long address,
unsigned long phys = page_to_phys(pfn_to_page(pfn));
unsigned long virt = TLBTEMP_BASE_1 + (address & DCACHE_ALIAS_MASK);
+ preempt_disable();
__flush_invalidate_dcache_page_alias(virt, phys);
__invalidate_icache_page_alias(virt, phys);
+ preempt_enable();
}
#endif
@@ -231,11 +237,13 @@ update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep)
unsigned long phys = page_to_phys(page);
unsigned long tmp;
+ preempt_disable();
tmp = TLBTEMP_BASE_1 + (phys & DCACHE_ALIAS_MASK);
__flush_invalidate_dcache_page_alias(tmp, phys);
tmp = TLBTEMP_BASE_1 + (addr & DCACHE_ALIAS_MASK);
__flush_invalidate_dcache_page_alias(tmp, phys);
__invalidate_icache_page_alias(tmp, phys);
+ preempt_enable();
clear_bit(PG_arch_1, &page->flags);
}
@@ -269,7 +277,9 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
if (alias) {
unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
+ preempt_disable();
__flush_invalidate_dcache_page_alias(t, phys);
+ preempt_enable();
}
/* Copy data */
@@ -284,9 +294,11 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
if (alias) {
unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
+ preempt_disable();
__flush_invalidate_dcache_range((unsigned long) dst, len);
if ((vma->vm_flags & VM_EXEC) != 0)
__invalidate_icache_page_alias(t, phys);
+ preempt_enable();
} else if ((vma->vm_flags & VM_EXEC) != 0) {
__flush_dcache_range((unsigned long)dst,len);
@@ -308,7 +320,9 @@ extern void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
if (alias) {
unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
+ preempt_disable();
__flush_invalidate_dcache_page_alias(t, phys);
+ preempt_enable();
}
memcpy(dst, src, len);