summaryrefslogtreecommitdiff
path: root/arch/blackfin/include
diff options
context:
space:
mode:
authorGraf Yang <graf.yang@analog.com>2008-11-18 17:48:22 +0800
committerBryan Wu <cooloney@kernel.org>2008-11-18 17:48:22 +0800
commitb8a989893cbdeb6c97a7b5af5f38fb0e480235f9 (patch)
tree658cf6df93dac687f0d6b94111d0f53b3dd0177c /arch/blackfin/include
parent6b3087c64a92a36ae20d33479b4df6d7afc910d4 (diff)
Blackfin arch: SMP supporting patchset: Blackfin CPLB related code
Blackfin dual core BF561 processor can support SMP like features. https://docs.blackfin.uclinux.org/doku.php?id=linux-kernel:smp-like In this patch, we provide SMP extend to Blackfin CPLB related code Signed-off-by: Graf Yang <graf.yang@analog.com> Signed-off-by: Bryan Wu <cooloney@kernel.org>
Diffstat (limited to 'arch/blackfin/include')
-rw-r--r--arch/blackfin/include/asm/cplb-mpu.h15
-rw-r--r--arch/blackfin/include/asm/cplb.h21
-rw-r--r--arch/blackfin/include/asm/cplbinit.h57
-rw-r--r--arch/blackfin/include/asm/mmu_context.h27
4 files changed, 86 insertions, 34 deletions
diff --git a/arch/blackfin/include/asm/cplb-mpu.h b/arch/blackfin/include/asm/cplb-mpu.h
index 75c67b99d607..80680ad7a378 100644
--- a/arch/blackfin/include/asm/cplb-mpu.h
+++ b/arch/blackfin/include/asm/cplb-mpu.h
@@ -28,6 +28,7 @@
*/
#ifndef __ASM_BFIN_CPLB_MPU_H
#define __ASM_BFIN_CPLB_MPU_H
+#include <linux/threads.h>
struct cplb_entry {
unsigned long data, addr;
@@ -39,22 +40,22 @@ struct mem_region {
unsigned long icplb_data;
};
-extern struct cplb_entry dcplb_tbl[MAX_CPLBS];
-extern struct cplb_entry icplb_tbl[MAX_CPLBS];
+extern struct cplb_entry dcplb_tbl[NR_CPUS][MAX_CPLBS];
+extern struct cplb_entry icplb_tbl[NR_CPUS][MAX_CPLBS];
extern int first_switched_icplb;
extern int first_mask_dcplb;
extern int first_switched_dcplb;
-extern int nr_dcplb_miss, nr_icplb_miss, nr_icplb_supv_miss, nr_dcplb_prot;
-extern int nr_cplb_flush;
+extern int nr_dcplb_miss[], nr_icplb_miss[], nr_icplb_supv_miss[];
+extern int nr_dcplb_prot[], nr_cplb_flush[];
extern int page_mask_order;
extern int page_mask_nelts;
-extern unsigned long *current_rwx_mask;
+extern unsigned long *current_rwx_mask[NR_CPUS];
-extern void flush_switched_cplbs(void);
-extern void set_mask_dcplbs(unsigned long *);
+extern void flush_switched_cplbs(unsigned int);
+extern void set_mask_dcplbs(unsigned long *, unsigned int);
extern void __noreturn panic_cplb_error(int seqstat, struct pt_regs *);
diff --git a/arch/blackfin/include/asm/cplb.h b/arch/blackfin/include/asm/cplb.h
index 9e8b4035fcec..5f7545d06200 100644
--- a/arch/blackfin/include/asm/cplb.h
+++ b/arch/blackfin/include/asm/cplb.h
@@ -30,7 +30,6 @@
#ifndef _CPLB_H
#define _CPLB_H
-#include <asm/blackfin.h>
#include <mach/anomaly.h>
#define SDRAM_IGENERIC (CPLB_L1_CHBL | CPLB_USER_RD | CPLB_VALID | CPLB_PORTPRIO)
@@ -55,13 +54,24 @@
#endif
#define L1_DMEMORY (CPLB_LOCK | CPLB_COMMON)
+
+#ifdef CONFIG_SMP
+#define L2_ATTR (INITIAL_T | I_CPLB | D_CPLB)
+#define L2_IMEMORY (CPLB_COMMON | CPLB_LOCK)
+#define L2_DMEMORY (CPLB_COMMON | CPLB_LOCK)
+
+#else
#ifdef CONFIG_BFIN_L2_CACHEABLE
#define L2_IMEMORY (SDRAM_IGENERIC)
#define L2_DMEMORY (SDRAM_DGENERIC)
#else
#define L2_IMEMORY (CPLB_COMMON)
#define L2_DMEMORY (CPLB_COMMON)
-#endif
+#endif /* CONFIG_BFIN_L2_CACHEABLE */
+
+#define L2_ATTR (INITIAL_T | SWITCH_T | I_CPLB | D_CPLB)
+#endif /* CONFIG_SMP */
+
#define SDRAM_DNON_CHBL (CPLB_COMMON)
#define SDRAM_EBIU (CPLB_COMMON)
#define SDRAM_OOPS (CPLB_VALID | ANOMALY_05000158_WORKAROUND | CPLB_LOCK | CPLB_DIRTY)
@@ -71,14 +81,7 @@
#define SIZE_1M 0x00100000 /* 1M */
#define SIZE_4M 0x00400000 /* 4M */
-#ifdef CONFIG_MPU
#define MAX_CPLBS 16
-#else
-#define MAX_CPLBS (16 * 2)
-#endif
-
-#define ASYNC_MEMORY_CPLB_COVERAGE ((ASYNC_BANK0_SIZE + ASYNC_BANK1_SIZE + \
- ASYNC_BANK2_SIZE + ASYNC_BANK3_SIZE) / SIZE_4M)
#define CPLB_ENABLE_ICACHE_P 0
#define CPLB_ENABLE_DCACHE_P 1
diff --git a/arch/blackfin/include/asm/cplbinit.h b/arch/blackfin/include/asm/cplbinit.h
index f845b41147ba..6bfc25788161 100644
--- a/arch/blackfin/include/asm/cplbinit.h
+++ b/arch/blackfin/include/asm/cplbinit.h
@@ -36,6 +36,8 @@
#ifdef CONFIG_MPU
#include <asm/cplb-mpu.h>
+extern void bfin_icache_init(struct cplb_entry *icplb_tbl);
+extern void bfin_dcache_init(struct cplb_entry *icplb_tbl);
#else
@@ -46,8 +48,40 @@
#define IN_KERNEL 1
-enum
-{ZERO_P, L1I_MEM, L1D_MEM, SDRAM_KERN , SDRAM_RAM_MTD, SDRAM_DMAZ, RES_MEM, ASYNC_MEM, L2_MEM};
+#define ASYNC_MEMORY_CPLB_COVERAGE ((ASYNC_BANK0_SIZE + ASYNC_BANK1_SIZE + \
+ ASYNC_BANK2_SIZE + ASYNC_BANK3_SIZE) / SIZE_4M)
+
+#define CPLB_MEM CONFIG_MAX_MEM_SIZE
+
+/*
+* Number of required data CPLB switchtable entries
+* MEMSIZE / 4 (we mostly install 4M page size CPLBs
+* approx 16 for smaller 1MB page size CPLBs for allignment purposes
+* 1 for L1 Data Memory
+* possibly 1 for L2 Data Memory
+* 1 for CONFIG_DEBUG_HUNT_FOR_ZERO
+* 1 for ASYNC Memory
+*/
+#define MAX_SWITCH_D_CPLBS (((CPLB_MEM / 4) + 16 + 1 + 1 + 1 \
+ + ASYNC_MEMORY_CPLB_COVERAGE) * 2)
+
+/*
+* Number of required instruction CPLB switchtable entries
+* MEMSIZE / 4 (we mostly install 4M page size CPLBs
+* approx 12 for smaller 1MB page size CPLBs for allignment purposes
+* 1 for L1 Instruction Memory
+* possibly 1 for L2 Instruction Memory
+* 1 for CONFIG_DEBUG_HUNT_FOR_ZERO
+*/
+#define MAX_SWITCH_I_CPLBS (((CPLB_MEM / 4) + 12 + 1 + 1 + 1) * 2)
+
+/* Number of CPLB table entries, used for cplb-nompu. */
+#define CPLB_TBL_ENTRIES (16 * 4)
+
+enum {
+ ZERO_P, L1I_MEM, L1D_MEM, L2_MEM, SDRAM_KERN, SDRAM_RAM_MTD, SDRAM_DMAZ,
+ RES_MEM, ASYNC_MEM, OCB_ROM
+};
struct cplb_desc {
u32 start; /* start address */
@@ -66,8 +100,8 @@ struct cplb_tab {
u16 size;
};
-extern u_long icplb_table[];
-extern u_long dcplb_table[];
+extern u_long icplb_tables[NR_CPUS][CPLB_TBL_ENTRIES+1];
+extern u_long dcplb_tables[NR_CPUS][CPLB_TBL_ENTRIES+1];
/* Till here we are discussing about the static memory management model.
* However, the operating envoronments commonly define more CPLB
@@ -78,15 +112,18 @@ extern u_long dcplb_table[];
* This is how Page descriptor Table is implemented in uClinux/Blackfin.
*/
-extern u_long ipdt_table[];
-extern u_long dpdt_table[];
+extern u_long ipdt_tables[NR_CPUS][MAX_SWITCH_I_CPLBS+1];
+extern u_long dpdt_tables[NR_CPUS][MAX_SWITCH_D_CPLBS+1];
#ifdef CONFIG_CPLB_INFO
-extern u_long ipdt_swapcount_table[];
-extern u_long dpdt_swapcount_table[];
+extern u_long ipdt_swapcount_tables[NR_CPUS][MAX_SWITCH_I_CPLBS];
+extern u_long dpdt_swapcount_tables[NR_CPUS][MAX_SWITCH_D_CPLBS];
#endif
+extern void bfin_icache_init(u_long icplbs[]);
+extern void bfin_dcache_init(u_long dcplbs[]);
#endif /* CONFIG_MPU */
-extern void generate_cplb_tables(void);
-
+#if defined(CONFIG_BFIN_DCACHE) || defined(CONFIG_BFIN_ICACHE)
+extern void generate_cplb_tables_cpu(unsigned int cpu);
+#endif
#endif
diff --git a/arch/blackfin/include/asm/mmu_context.h b/arch/blackfin/include/asm/mmu_context.h
index 35593dda2a4d..944e29faae48 100644
--- a/arch/blackfin/include/asm/mmu_context.h
+++ b/arch/blackfin/include/asm/mmu_context.h
@@ -37,6 +37,10 @@
#include <asm/pgalloc.h>
#include <asm/cplbinit.h>
+/* Note: L1 stacks are CPU-private things, so we bluntly disable this
+ feature in SMP mode, and use the per-CPU scratch SRAM bank only to
+ store the PDA instead. */
+
extern void *current_l1_stack_save;
extern int nr_l1stack_tasks;
extern void *l1_stack_base;
@@ -88,12 +92,15 @@ activate_l1stack(struct mm_struct *mm, unsigned long sp_base)
static inline void switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm,
struct task_struct *tsk)
{
+#ifdef CONFIG_MPU
+ unsigned int cpu = smp_processor_id();
+#endif
if (prev_mm == next_mm)
return;
#ifdef CONFIG_MPU
- if (prev_mm->context.page_rwx_mask == current_rwx_mask) {
- flush_switched_cplbs();
- set_mask_dcplbs(next_mm->context.page_rwx_mask);
+ if (prev_mm->context.page_rwx_mask == current_rwx_mask[cpu]) {
+ flush_switched_cplbs(cpu);
+ set_mask_dcplbs(next_mm->context.page_rwx_mask, cpu);
}
#endif
@@ -138,9 +145,10 @@ static inline void protect_page(struct mm_struct *mm, unsigned long addr,
static inline void update_protections(struct mm_struct *mm)
{
- if (mm->context.page_rwx_mask == current_rwx_mask) {
- flush_switched_cplbs();
- set_mask_dcplbs(mm->context.page_rwx_mask);
+ unsigned int cpu = smp_processor_id();
+ if (mm->context.page_rwx_mask == current_rwx_mask[cpu]) {
+ flush_switched_cplbs(cpu);
+ set_mask_dcplbs(mm->context.page_rwx_mask, cpu);
}
}
#endif
@@ -165,6 +173,9 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm)
static inline void destroy_context(struct mm_struct *mm)
{
struct sram_list_struct *tmp;
+#ifdef CONFIG_MPU
+ unsigned int cpu = smp_processor_id();
+#endif
#ifdef CONFIG_APP_STACK_L1
if (current_l1_stack_save == mm->context.l1_stack_save)
@@ -179,8 +190,8 @@ static inline void destroy_context(struct mm_struct *mm)
kfree(tmp);
}
#ifdef CONFIG_MPU
- if (current_rwx_mask == mm->context.page_rwx_mask)
- current_rwx_mask = NULL;
+ if (current_rwx_mask[cpu] == mm->context.page_rwx_mask)
+ current_rwx_mask[cpu] = NULL;
free_pages((unsigned long)mm->context.page_rwx_mask, page_mask_order);
#endif
}