summaryrefslogtreecommitdiff
path: root/arch/powerpc
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/include/asm/cputable.h17
-rw-r--r--arch/powerpc/include/asm/exception-64s.h2
-rw-r--r--arch/powerpc/include/asm/hvcall.h1
-rw-r--r--arch/powerpc/include/asm/kvm_asm.h16
-rw-r--r--arch/powerpc/include/asm/ppc_asm.h11
-rw-r--r--arch/powerpc/include/asm/processor.h13
-rw-r--r--arch/powerpc/include/asm/reg.h11
-rw-r--r--arch/powerpc/include/asm/signal.h3
-rw-r--r--arch/powerpc/include/asm/tm.h2
-rw-r--r--arch/powerpc/include/uapi/asm/Kbuild1
-rw-r--r--arch/powerpc/include/uapi/asm/tm.h18
-rw-r--r--arch/powerpc/kernel/cputable.c6
-rw-r--r--arch/powerpc/kernel/entry_32.S2
-rw-r--r--arch/powerpc/kernel/entry_64.S35
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S92
-rw-r--r--arch/powerpc/kernel/irq.c2
-rw-r--r--arch/powerpc/kernel/pci-common.c35
-rw-r--r--arch/powerpc/kernel/process.c7
-rw-r--r--arch/powerpc/kernel/signal.c40
-rw-r--r--arch/powerpc/kernel/signal.h2
-rw-r--r--arch/powerpc/kernel/signal_32.c10
-rw-r--r--arch/powerpc/kernel/signal_64.c23
-rw-r--r--arch/powerpc/kernel/traps.c39
-rw-r--r--arch/powerpc/kvm/44x_tlb.c5
-rw-r--r--arch/powerpc/kvm/book3s_hv.c2
-rw-r--r--arch/powerpc/kvm/book3s_pr_papr.c2
-rw-r--r--arch/powerpc/kvm/book3s_xics.c29
-rw-r--r--arch/powerpc/kvm/booke.c21
-rw-r--r--arch/powerpc/kvm/e500_mmu.c5
-rw-r--r--arch/powerpc/kvm/e500mc.c2
-rw-r--r--arch/powerpc/lib/copypage_power7.S19
-rw-r--r--arch/powerpc/lib/copyuser_power7.S12
-rw-r--r--arch/powerpc/mm/hash_native_64.c30
-rw-r--r--arch/powerpc/mm/hugetlbpage.c8
-rw-r--r--arch/powerpc/perf/core-book3s.c69
-rw-r--r--arch/powerpc/platforms/pseries/Kconfig2
-rw-r--r--arch/powerpc/platforms/pseries/eeh_cache.c4
-rw-r--r--arch/powerpc/platforms/pseries/eeh_pe.c3
-rw-r--r--arch/powerpc/platforms/pseries/eeh_pseries.c12
-rw-r--r--arch/powerpc/sysdev/fsl_pci.c24
-rw-r--r--arch/powerpc/sysdev/mpic.c4
41 files changed, 371 insertions, 270 deletions
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index 26807e5aff51..6f3887d884d2 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -176,6 +176,7 @@ extern const char *powerpc_base_platform;
#define CPU_FTR_CFAR LONG_ASM_CONST(0x0100000000000000)
#define CPU_FTR_HAS_PPR LONG_ASM_CONST(0x0200000000000000)
#define CPU_FTR_DAWR LONG_ASM_CONST(0x0400000000000000)
+#define CPU_FTR_DABRX LONG_ASM_CONST(0x0800000000000000)
#ifndef __ASSEMBLY__
@@ -394,19 +395,20 @@ extern const char *powerpc_base_platform;
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_201 | \
CPU_FTR_ALTIVEC_COMP | CPU_FTR_CAN_NAP | CPU_FTR_MMCRA | \
CPU_FTR_CP_USE_DCBTZ | CPU_FTR_STCX_CHECKS_ADDRESS | \
- CPU_FTR_HVMODE)
+ CPU_FTR_HVMODE | CPU_FTR_DABRX)
#define CPU_FTRS_POWER5 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
CPU_FTR_MMCRA | CPU_FTR_SMT | \
CPU_FTR_COHERENT_ICACHE | CPU_FTR_PURR | \
- CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB)
+ CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_DABRX)
#define CPU_FTRS_POWER6 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
CPU_FTR_MMCRA | CPU_FTR_SMT | \
CPU_FTR_COHERENT_ICACHE | \
CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
CPU_FTR_DSCR | CPU_FTR_UNALIGNED_LD_STD | \
- CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_CFAR)
+ CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_CFAR | \
+ CPU_FTR_DABRX)
#define CPU_FTRS_POWER7 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_206 |\
CPU_FTR_MMCRA | CPU_FTR_SMT | \
@@ -415,7 +417,7 @@ extern const char *powerpc_base_platform;
CPU_FTR_DSCR | CPU_FTR_SAO | CPU_FTR_ASYM_SMT | \
CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
CPU_FTR_ICSWX | CPU_FTR_CFAR | CPU_FTR_HVMODE | \
- CPU_FTR_VMX_COPY | CPU_FTR_HAS_PPR)
+ CPU_FTR_VMX_COPY | CPU_FTR_HAS_PPR | CPU_FTR_DABRX)
#define CPU_FTRS_POWER8 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_206 |\
CPU_FTR_MMCRA | CPU_FTR_SMT | \
@@ -430,14 +432,15 @@ extern const char *powerpc_base_platform;
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \
CPU_FTR_PAUSE_ZERO | CPU_FTR_CELL_TB_BUG | CPU_FTR_CP_USE_DCBTZ | \
- CPU_FTR_UNALIGNED_LD_STD)
+ CPU_FTR_UNALIGNED_LD_STD | CPU_FTR_DABRX)
#define CPU_FTRS_PA6T (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_ALTIVEC_COMP | \
- CPU_FTR_PURR | CPU_FTR_REAL_LE)
+ CPU_FTR_PURR | CPU_FTR_REAL_LE | CPU_FTR_DABRX)
#define CPU_FTRS_COMPATIBLE (CPU_FTR_USE_TB | CPU_FTR_PPCAS_ARCH_V2)
#define CPU_FTRS_A2 (CPU_FTR_USE_TB | CPU_FTR_SMT | CPU_FTR_DBELL | \
- CPU_FTR_NOEXECUTE | CPU_FTR_NODSISRALIGN | CPU_FTR_ICSWX)
+ CPU_FTR_NOEXECUTE | CPU_FTR_NODSISRALIGN | \
+ CPU_FTR_ICSWX | CPU_FTR_DABRX )
#ifdef __powerpc64__
#ifdef CONFIG_PPC_BOOK3E
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
index 8e5fae8beaf6..46793b58a761 100644
--- a/arch/powerpc/include/asm/exception-64s.h
+++ b/arch/powerpc/include/asm/exception-64s.h
@@ -513,7 +513,7 @@ label##_common: \
*/
#define STD_EXCEPTION_COMMON_ASYNC(trap, label, hdlr) \
EXCEPTION_COMMON(trap, label, hdlr, ret_from_except_lite, \
- FINISH_NAP;RUNLATCH_ON;DISABLE_INTS)
+ FINISH_NAP;DISABLE_INTS;RUNLATCH_ON)
/*
* When the idle code in power4_idle puts the CPU into NAP mode,
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
index cf4df8e2139a..0c7f2bfcf134 100644
--- a/arch/powerpc/include/asm/hvcall.h
+++ b/arch/powerpc/include/asm/hvcall.h
@@ -264,6 +264,7 @@
#define H_GET_MPP 0x2D4
#define H_HOME_NODE_ASSOCIATIVITY 0x2EC
#define H_BEST_ENERGY 0x2F4
+#define H_XIRR_X 0x2FC
#define H_RANDOM 0x300
#define H_COP 0x304
#define H_GET_MPP_X 0x314
diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h
index b9dd382cb349..851bac7afa4b 100644
--- a/arch/powerpc/include/asm/kvm_asm.h
+++ b/arch/powerpc/include/asm/kvm_asm.h
@@ -54,8 +54,16 @@
#define BOOKE_INTERRUPT_DEBUG 15
/* E500 */
-#define BOOKE_INTERRUPT_SPE_UNAVAIL 32
-#define BOOKE_INTERRUPT_SPE_FP_DATA 33
+#define BOOKE_INTERRUPT_SPE_ALTIVEC_UNAVAIL 32
+#define BOOKE_INTERRUPT_SPE_FP_DATA_ALTIVEC_ASSIST 33
+/*
+ * TODO: Unify 32-bit and 64-bit kernel exception handlers to use same defines
+ */
+#define BOOKE_INTERRUPT_SPE_UNAVAIL BOOKE_INTERRUPT_SPE_ALTIVEC_UNAVAIL
+#define BOOKE_INTERRUPT_SPE_FP_DATA BOOKE_INTERRUPT_SPE_FP_DATA_ALTIVEC_ASSIST
+#define BOOKE_INTERRUPT_ALTIVEC_UNAVAIL BOOKE_INTERRUPT_SPE_ALTIVEC_UNAVAIL
+#define BOOKE_INTERRUPT_ALTIVEC_ASSIST \
+ BOOKE_INTERRUPT_SPE_FP_DATA_ALTIVEC_ASSIST
#define BOOKE_INTERRUPT_SPE_FP_ROUND 34
#define BOOKE_INTERRUPT_PERFORMANCE_MONITOR 35
#define BOOKE_INTERRUPT_DOORBELL 36
@@ -67,10 +75,6 @@
#define BOOKE_INTERRUPT_HV_SYSCALL 40
#define BOOKE_INTERRUPT_HV_PRIV 41
-/* altivec */
-#define BOOKE_INTERRUPT_ALTIVEC_UNAVAIL 42
-#define BOOKE_INTERRUPT_ALTIVEC_ASSIST 43
-
/* book3s */
#define BOOK3S_INTERRUPT_SYSTEM_RESET 0x100
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
index cea8496091ff..2f1b6c5f8174 100644
--- a/arch/powerpc/include/asm/ppc_asm.h
+++ b/arch/powerpc/include/asm/ppc_asm.h
@@ -523,6 +523,17 @@ END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,946)
#define PPC440EP_ERR42
#endif
+/* The following stops all load and store data streams associated with stream
+ * ID (ie. streams created explicitly). The embedded and server mnemonics for
+ * dcbt are different so we use machine "power4" here explicitly.
+ */
+#define DCBT_STOP_ALL_STREAM_IDS(scratch) \
+.machine push ; \
+.machine "power4" ; \
+ lis scratch,0x60000000@h; \
+ dcbt r0,scratch,0b01010; \
+.machine pop
+
/*
* toreal/fromreal/tophys/tovirt macros. 32-bit BookE makes them
* keep the address intact to be compatible with code shared with
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index 594db6bc093c..14a658363698 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -409,21 +409,16 @@ static inline void prefetchw(const void *x)
#endif
#ifdef CONFIG_PPC64
-static inline unsigned long get_clean_sp(struct pt_regs *regs, int is_32)
+static inline unsigned long get_clean_sp(unsigned long sp, int is_32)
{
- unsigned long sp;
-
if (is_32)
- sp = regs->gpr[1] & 0x0ffffffffUL;
- else
- sp = regs->gpr[1];
-
+ return sp & 0x0ffffffffUL;
return sp;
}
#else
-static inline unsigned long get_clean_sp(struct pt_regs *regs, int is_32)
+static inline unsigned long get_clean_sp(unsigned long sp, int is_32)
{
- return regs->gpr[1];
+ return sp;
}
#endif
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index a6136515c7f2..4a9e408644fe 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -111,17 +111,6 @@
#define MSR_TM_TRANSACTIONAL(x) (((x) & MSR_TS_MASK) == MSR_TS_T)
#define MSR_TM_SUSPENDED(x) (((x) & MSR_TS_MASK) == MSR_TS_S)
-/* Reason codes describing kernel causes for transaction aborts. By
- convention, bit0 is copied to TEXASR[56] (IBM bit 7) which is set if
- the failure is persistent.
-*/
-#define TM_CAUSE_RESCHED 0xfe
-#define TM_CAUSE_TLBI 0xfc
-#define TM_CAUSE_FAC_UNAV 0xfa
-#define TM_CAUSE_SYSCALL 0xf9 /* Persistent */
-#define TM_CAUSE_MISC 0xf6
-#define TM_CAUSE_SIGNAL 0xf4
-
#if defined(CONFIG_PPC_BOOK3S_64)
#define MSR_64BIT MSR_SF
diff --git a/arch/powerpc/include/asm/signal.h b/arch/powerpc/include/asm/signal.h
index fbe66c463891..9322c28aebd2 100644
--- a/arch/powerpc/include/asm/signal.h
+++ b/arch/powerpc/include/asm/signal.h
@@ -3,5 +3,8 @@
#define __ARCH_HAS_SA_RESTORER
#include <uapi/asm/signal.h>
+#include <uapi/asm/ptrace.h>
+
+extern unsigned long get_tm_stackpointer(struct pt_regs *regs);
#endif /* _ASM_POWERPC_SIGNAL_H */
diff --git a/arch/powerpc/include/asm/tm.h b/arch/powerpc/include/asm/tm.h
index 4b4449abf3f8..9dfbc34bdbf5 100644
--- a/arch/powerpc/include/asm/tm.h
+++ b/arch/powerpc/include/asm/tm.h
@@ -5,6 +5,8 @@
* Copyright 2012 Matt Evans & Michael Neuling, IBM Corporation.
*/
+#include <uapi/asm/tm.h>
+
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
extern void do_load_up_transact_fpu(struct thread_struct *thread);
extern void do_load_up_transact_altivec(struct thread_struct *thread);
diff --git a/arch/powerpc/include/uapi/asm/Kbuild b/arch/powerpc/include/uapi/asm/Kbuild
index f7bca6370745..5182c8622b54 100644
--- a/arch/powerpc/include/uapi/asm/Kbuild
+++ b/arch/powerpc/include/uapi/asm/Kbuild
@@ -40,6 +40,7 @@ header-y += statfs.h
header-y += swab.h
header-y += termbits.h
header-y += termios.h
+header-y += tm.h
header-y += types.h
header-y += ucontext.h
header-y += unistd.h
diff --git a/arch/powerpc/include/uapi/asm/tm.h b/arch/powerpc/include/uapi/asm/tm.h
new file mode 100644
index 000000000000..85059a00f560
--- /dev/null
+++ b/arch/powerpc/include/uapi/asm/tm.h
@@ -0,0 +1,18 @@
+#ifndef _ASM_POWERPC_TM_H
+#define _ASM_POWERPC_TM_H
+
+/* Reason codes describing kernel causes for transaction aborts. By
+ * convention, bit0 is copied to TEXASR[56] (IBM bit 7) which is set if
+ * the failure is persistent. PAPR saves 0xff-0xe0 for the hypervisor.
+ */
+#define TM_CAUSE_PERSISTENT 0x01
+#define TM_CAUSE_RESCHED 0xde
+#define TM_CAUSE_TLBI 0xdc
+#define TM_CAUSE_FAC_UNAV 0xda
+#define TM_CAUSE_SYSCALL 0xd8 /* future use */
+#define TM_CAUSE_MISC 0xd6 /* future use */
+#define TM_CAUSE_SIGNAL 0xd4
+#define TM_CAUSE_ALIGNMENT 0xd2
+#define TM_CAUSE_EMULATE 0xd0
+
+#endif
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index c60bbec25c1f..2a45d0f04385 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -452,7 +452,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.mmu_features = MMU_FTRS_POWER8,
.icache_bsize = 128,
.dcache_bsize = 128,
- .oprofile_type = PPC_OPROFILE_POWER4,
+ .oprofile_type = PPC_OPROFILE_INVALID,
.oprofile_cpu_type = "ppc64/ibm-compat-v1",
.cpu_setup = __setup_cpu_power8,
.cpu_restore = __restore_cpu_power8,
@@ -482,7 +482,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "POWER7+ (raw)",
.cpu_features = CPU_FTRS_POWER7,
.cpu_user_features = COMMON_USER_POWER7,
- .cpu_user_features = COMMON_USER2_POWER7,
+ .cpu_user_features2 = COMMON_USER2_POWER7,
.mmu_features = MMU_FTRS_POWER7,
.icache_bsize = 128,
.dcache_bsize = 128,
@@ -507,7 +507,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.num_pmcs = 6,
.pmc_type = PPC_PMC_IBM,
.oprofile_cpu_type = "ppc64/power8",
- .oprofile_type = PPC_OPROFILE_POWER4,
+ .oprofile_type = PPC_OPROFILE_INVALID,
.cpu_setup = __setup_cpu_power8,
.cpu_restore = __restore_cpu_power8,
.platform = "power8",
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index d22e73e4618b..22b45a4955cd 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -849,7 +849,7 @@ resume_kernel:
/* check current_thread_info, _TIF_EMULATE_STACK_STORE */
CURRENT_THREAD_INFO(r9, r1)
lwz r8,TI_FLAGS(r9)
- andis. r8,r8,_TIF_EMULATE_STACK_STORE@h
+ andis. r0,r8,_TIF_EMULATE_STACK_STORE@h
beq+ 1f
addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 0e9095e47b5b..8741c854e03d 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -465,20 +465,6 @@ BEGIN_FTR_SECTION
std r0, THREAD_EBBHR(r3)
mfspr r0, SPRN_EBBRR
std r0, THREAD_EBBRR(r3)
-
- /* PMU registers made user read/(write) by EBB */
- mfspr r0, SPRN_SIAR
- std r0, THREAD_SIAR(r3)
- mfspr r0, SPRN_SDAR
- std r0, THREAD_SDAR(r3)
- mfspr r0, SPRN_SIER
- std r0, THREAD_SIER(r3)
- mfspr r0, SPRN_MMCR0
- std r0, THREAD_MMCR0(r3)
- mfspr r0, SPRN_MMCR2
- std r0, THREAD_MMCR2(r3)
- mfspr r0, SPRN_MMCRA
- std r0, THREAD_MMCRA(r3)
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
#endif
@@ -501,6 +487,13 @@ BEGIN_FTR_SECTION
ldarx r6,0,r1
END_FTR_SECTION_IFSET(CPU_FTR_STCX_CHECKS_ADDRESS)
+#ifdef CONFIG_PPC_BOOK3S
+/* Cancel all explict user streams as they will have no use after context
+ * switch and will stop the HW from creating streams itself
+ */
+ DCBT_STOP_ALL_STREAM_IDS(r6)
+#endif
+
addi r6,r4,-THREAD /* Convert THREAD to 'current' */
std r6,PACACURRENT(r13) /* Set new 'current' */
@@ -574,20 +567,6 @@ BEGIN_FTR_SECTION
ld r0, THREAD_EBBRR(r4)
mtspr SPRN_EBBRR, r0
- /* PMU registers made user read/(write) by EBB */
- ld r0, THREAD_SIAR(r4)
- mtspr SPRN_SIAR, r0
- ld r0, THREAD_SDAR(r4)
- mtspr SPRN_SDAR, r0
- ld r0, THREAD_SIER(r4)
- mtspr SPRN_SIER, r0
- ld r0, THREAD_MMCR0(r4)
- mtspr SPRN_MMCR0, r0
- ld r0, THREAD_MMCR2(r4)
- mtspr SPRN_MMCR2, r0
- ld r0, THREAD_MMCRA(r4)
- mtspr SPRN_MMCRA, r0
-
ld r0,THREAD_TAR(r4)
mtspr SPRN_TAR,r0
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index e6eba1bf61ad..40e4a17c8ba0 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -454,38 +454,14 @@ BEGIN_FTR_SECTION
xori r10,r10,(MSR_FE0|MSR_FE1)
mtmsrd r10
sync
- fmr 0,0
- fmr 1,1
- fmr 2,2
- fmr 3,3
- fmr 4,4
- fmr 5,5
- fmr 6,6
- fmr 7,7
- fmr 8,8
- fmr 9,9
- fmr 10,10
- fmr 11,11
- fmr 12,12
- fmr 13,13
- fmr 14,14
- fmr 15,15
- fmr 16,16
- fmr 17,17
- fmr 18,18
- fmr 19,19
- fmr 20,20
- fmr 21,21
- fmr 22,22
- fmr 23,23
- fmr 24,24
- fmr 25,25
- fmr 26,26
- fmr 27,27
- fmr 28,28
- fmr 29,29
- fmr 30,30
- fmr 31,31
+
+#define FMR2(n) fmr (n), (n) ; fmr n+1, n+1
+#define FMR4(n) FMR2(n) ; FMR2(n+2)
+#define FMR8(n) FMR4(n) ; FMR4(n+4)
+#define FMR16(n) FMR8(n) ; FMR8(n+8)
+#define FMR32(n) FMR16(n) ; FMR16(n+16)
+ FMR32(0)
+
FTR_SECTION_ELSE
/*
* To denormalise we need to move a copy of the register to itself.
@@ -495,39 +471,25 @@ FTR_SECTION_ELSE
oris r10,r10,MSR_VSX@h
mtmsrd r10
sync
- XVCPSGNDP(0,0,0)
- XVCPSGNDP(1,1,1)
- XVCPSGNDP(2,2,2)
- XVCPSGNDP(3,3,3)
- XVCPSGNDP(4,4,4)
- XVCPSGNDP(5,5,5)
- XVCPSGNDP(6,6,6)
- XVCPSGNDP(7,7,7)
- XVCPSGNDP(8,8,8)
- XVCPSGNDP(9,9,9)
- XVCPSGNDP(10,10,10)
- XVCPSGNDP(11,11,11)
- XVCPSGNDP(12,12,12)
- XVCPSGNDP(13,13,13)
- XVCPSGNDP(14,14,14)
- XVCPSGNDP(15,15,15)
- XVCPSGNDP(16,16,16)
- XVCPSGNDP(17,17,17)
- XVCPSGNDP(18,18,18)
- XVCPSGNDP(19,19,19)
- XVCPSGNDP(20,20,20)
- XVCPSGNDP(21,21,21)
- XVCPSGNDP(22,22,22)
- XVCPSGNDP(23,23,23)
- XVCPSGNDP(24,24,24)
- XVCPSGNDP(25,25,25)
- XVCPSGNDP(26,26,26)
- XVCPSGNDP(27,27,27)
- XVCPSGNDP(28,28,28)
- XVCPSGNDP(29,29,29)
- XVCPSGNDP(30,30,30)
- XVCPSGNDP(31,31,31)
+
+#define XVCPSGNDP2(n) XVCPSGNDP(n,n,n) ; XVCPSGNDP(n+1,n+1,n+1)
+#define XVCPSGNDP4(n) XVCPSGNDP2(n) ; XVCPSGNDP2(n+2)
+#define XVCPSGNDP8(n) XVCPSGNDP4(n) ; XVCPSGNDP4(n+4)
+#define XVCPSGNDP16(n) XVCPSGNDP8(n) ; XVCPSGNDP8(n+8)
+#define XVCPSGNDP32(n) XVCPSGNDP16(n) ; XVCPSGNDP16(n+16)
+ XVCPSGNDP32(0)
+
ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_206)
+
+BEGIN_FTR_SECTION
+ b denorm_done
+END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
+/*
+ * To denormalise we need to move a copy of the register to itself.
+ * For POWER8 we need to do that for all 64 VSX registers
+ */
+ XVCPSGNDP32(32)
+denorm_done:
mtspr SPRN_HSRR0,r11
mtcrf 0x80,r9
ld r9,PACA_EXGEN+EX_R9(r13)
@@ -721,7 +683,7 @@ machine_check_common:
STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception)
- STD_EXCEPTION_COMMON(0xe40, emulation_assist, .program_check_exception)
+ STD_EXCEPTION_COMMON(0xe40, emulation_assist, .emulation_assist_interrupt)
STD_EXCEPTION_COMMON(0xe60, hmi_exception, .unknown_exception)
#ifdef CONFIG_PPC_DOORBELL
STD_EXCEPTION_COMMON_ASYNC(0xe80, h_doorbell, .doorbell_exception)
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 5cbcf4d5a808..ea185e0b3cae 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -162,7 +162,7 @@ notrace unsigned int __check_irq_replay(void)
* in case we also had a rollover while hard disabled
*/
local_paca->irq_happened &= ~PACA_IRQ_DEC;
- if (decrementer_check_overflow())
+ if ((happened & PACA_IRQ_DEC) || decrementer_check_overflow())
return 0x900;
/* Finally check if an external interrupt happened */
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index e9acf50dd5b2..f46914a0f33e 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -657,15 +657,6 @@ void pci_resource_to_user(const struct pci_dev *dev, int bar,
* ranges. However, some machines (thanks Apple !) tend to split their
* space into lots of small contiguous ranges. So we have to coalesce.
*
- * - We can only cope with all memory ranges having the same offset
- * between CPU addresses and PCI addresses. Unfortunately, some bridges
- * are setup for a large 1:1 mapping along with a small "window" which
- * maps PCI address 0 to some arbitrary high address of the CPU space in
- * order to give access to the ISA memory hole.
- * The way out of here that I've chosen for now is to always set the
- * offset based on the first resource found, then override it if we
- * have a different offset and the previous was set by an ISA hole.
- *
* - Some busses have IO space not starting at 0, which causes trouble with
* the way we do our IO resource renumbering. The code somewhat deals with
* it for 64 bits but I would expect problems on 32 bits.
@@ -680,10 +671,9 @@ void pci_process_bridge_OF_ranges(struct pci_controller *hose,
int rlen;
int pna = of_n_addr_cells(dev);
int np = pna + 5;
- int memno = 0, isa_hole = -1;
+ int memno = 0;
u32 pci_space;
unsigned long long pci_addr, cpu_addr, pci_next, cpu_next, size;
- unsigned long long isa_mb = 0;
struct resource *res;
printk(KERN_INFO "PCI host bridge %s %s ranges:\n",
@@ -777,8 +767,6 @@ void pci_process_bridge_OF_ranges(struct pci_controller *hose,
}
/* Handles ISA memory hole space here */
if (pci_addr == 0) {
- isa_mb = cpu_addr;
- isa_hole = memno;
if (primary || isa_mem_base == 0)
isa_mem_base = cpu_addr;
hose->isa_mem_phys = cpu_addr;
@@ -839,6 +827,7 @@ static void pcibios_fixup_resources(struct pci_dev *dev)
}
for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
struct resource *res = dev->resource + i;
+ struct pci_bus_region reg;
if (!res->flags)
continue;
@@ -847,8 +836,9 @@ static void pcibios_fixup_resources(struct pci_dev *dev)
* at 0 as unset as well, except if PCI_PROBE_ONLY is also set
* since in that case, we don't want to re-assign anything
*/
+ pcibios_resource_to_bus(dev, &reg, res);
if (pci_has_flag(PCI_REASSIGN_ALL_RSRC) ||
- (res->start == 0 && !pci_has_flag(PCI_PROBE_ONLY))) {
+ (reg.start == 0 && !pci_has_flag(PCI_PROBE_ONLY))) {
/* Only print message if not re-assigning */
if (!pci_has_flag(PCI_REASSIGN_ALL_RSRC))
pr_debug("PCI:%s Resource %d %016llx-%016llx [%x] "
@@ -1004,7 +994,7 @@ void pcibios_setup_bus_self(struct pci_bus *bus)
ppc_md.pci_dma_bus_setup(bus);
}
-void pcibios_setup_device(struct pci_dev *dev)
+static void pcibios_setup_device(struct pci_dev *dev)
{
/* Fixup NUMA node as it may not be setup yet by the generic
* code and is needed by the DMA init
@@ -1025,6 +1015,17 @@ void pcibios_setup_device(struct pci_dev *dev)
ppc_md.pci_irq_fixup(dev);
}
+int pcibios_add_device(struct pci_dev *dev)
+{
+ /*
+ * We can only call pcibios_setup_device() after bus setup is complete,
+ * since some of the platform specific DMA setup code depends on it.
+ */
+ if (dev->bus->is_added)
+ pcibios_setup_device(dev);
+ return 0;
+}
+
void pcibios_setup_bus_devices(struct pci_bus *bus)
{
struct pci_dev *dev;
@@ -1479,10 +1480,6 @@ int pcibios_enable_device(struct pci_dev *dev, int mask)
if (ppc_md.pcibios_enable_device_hook(dev))
return -EINVAL;
- /* avoid pcie irq fix up impact on cardbus */
- if (dev->hdr_type != PCI_HEADER_TYPE_CARDBUS)
- pcibios_setup_device(dev);
-
return pci_enable_resources(dev, mask);
}
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index a902723fdc69..076d1242507a 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -399,7 +399,8 @@ static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
{
mtspr(SPRN_DABR, dabr);
- mtspr(SPRN_DABRX, dabrx);
+ if (cpu_has_feature(CPU_FTR_DABRX))
+ mtspr(SPRN_DABRX, dabrx);
return 0;
}
#else
@@ -1368,7 +1369,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
#ifdef CONFIG_PPC64
/* Called with hard IRQs off */
-void __ppc64_runlatch_on(void)
+void notrace __ppc64_runlatch_on(void)
{
struct thread_info *ti = current_thread_info();
unsigned long ctrl;
@@ -1381,7 +1382,7 @@ void __ppc64_runlatch_on(void)
}
/* Called with hard IRQs off */
-void __ppc64_runlatch_off(void)
+void notrace __ppc64_runlatch_off(void)
{
struct thread_info *ti = current_thread_info();
unsigned long ctrl;
diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c
index 577a8aa69c6e..457e97aa2945 100644
--- a/arch/powerpc/kernel/signal.c
+++ b/arch/powerpc/kernel/signal.c
@@ -18,6 +18,7 @@
#include <asm/uaccess.h>
#include <asm/unistd.h>
#include <asm/debug.h>
+#include <asm/tm.h>
#include "signal.h"
@@ -30,13 +31,13 @@ int show_unhandled_signals = 1;
/*
* Allocate space for the signal frame
*/
-void __user * get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
+void __user * get_sigframe(struct k_sigaction *ka, unsigned long sp,
size_t frame_size, int is_32)
{
unsigned long oldsp, newsp;
/* Default to using normal stack */
- oldsp = get_clean_sp(regs, is_32);
+ oldsp = get_clean_sp(sp, is_32);
/* Check for alt stack */
if ((ka->sa.sa_flags & SA_ONSTACK) &&
@@ -175,3 +176,38 @@ void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags)
user_enter();
}
+
+unsigned long get_tm_stackpointer(struct pt_regs *regs)
+{
+ /* When in an active transaction that takes a signal, we need to be
+ * careful with the stack. It's possible that the stack has moved back
+ * up after the tbegin. The obvious case here is when the tbegin is
+ * called inside a function that returns before a tend. In this case,
+ * the stack is part of the checkpointed transactional memory state.
+ * If we write over this non transactionally or in suspend, we are in
+ * trouble because if we get a tm abort, the program counter and stack
+ * pointer will be back at the tbegin but our in memory stack won't be
+ * valid anymore.
+ *
+ * To avoid this, when taking a signal in an active transaction, we
+ * need to use the stack pointer from the checkpointed state, rather
+ * than the speculated state. This ensures that the signal context
+ * (written tm suspended) will be written below the stack required for
+ * the rollback. The transaction is aborted becuase of the treclaim,
+ * so any memory written between the tbegin and the signal will be
+ * rolled back anyway.
+ *
+ * For signals taken in non-TM or suspended mode, we use the
+ * normal/non-checkpointed stack pointer.
+ */
+
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ if (MSR_TM_ACTIVE(regs->msr)) {
+ tm_enable();
+ tm_reclaim(&current->thread, regs->msr, TM_CAUSE_SIGNAL);
+ if (MSR_TM_TRANSACTIONAL(regs->msr))
+ return current->thread.ckpt_regs.gpr[1];
+ }
+#endif
+ return regs->gpr[1];
+}
diff --git a/arch/powerpc/kernel/signal.h b/arch/powerpc/kernel/signal.h
index ec84c901ceab..c69b9aeb9f23 100644
--- a/arch/powerpc/kernel/signal.h
+++ b/arch/powerpc/kernel/signal.h
@@ -12,7 +12,7 @@
extern void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags);
-extern void __user * get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
+extern void __user * get_sigframe(struct k_sigaction *ka, unsigned long sp,
size_t frame_size, int is_32);
extern int handle_signal32(unsigned long sig, struct k_sigaction *ka,
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index 95068bf569ad..201385c3a1ae 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -503,12 +503,6 @@ static int save_tm_user_regs(struct pt_regs *regs,
{
unsigned long msr = regs->msr;
- /* tm_reclaim rolls back all reg states, updating thread.ckpt_regs,
- * thread.transact_fpr[], thread.transact_vr[], etc.
- */
- tm_enable();
- tm_reclaim(&current->thread, msr, TM_CAUSE_SIGNAL);
-
/* Make sure floating point registers are stored in regs */
flush_fp_to_thread(current);
@@ -965,7 +959,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
/* Set up Signal Frame */
/* Put a Real Time Context onto stack */
- rt_sf = get_sigframe(ka, regs, sizeof(*rt_sf), 1);
+ rt_sf = get_sigframe(ka, get_tm_stackpointer(regs), sizeof(*rt_sf), 1);
addr = rt_sf;
if (unlikely(rt_sf == NULL))
goto badframe;
@@ -1403,7 +1397,7 @@ int handle_signal32(unsigned long sig, struct k_sigaction *ka,
unsigned long tramp;
/* Set up Signal Frame */
- frame = get_sigframe(ka, regs, sizeof(*frame), 1);
+ frame = get_sigframe(ka, get_tm_stackpointer(regs), sizeof(*frame), 1);
if (unlikely(frame == NULL))
goto badframe;
sc = (struct sigcontext __user *) &frame->sctx;
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index c1794286098c..345947367ec0 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -154,11 +154,12 @@ static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
* As above, but Transactional Memory is in use, so deliver sigcontexts
* containing checkpointed and transactional register states.
*
- * To do this, we treclaim to gather both sets of registers and set up the
- * 'normal' sigcontext registers with rolled-back register values such that a
- * simple signal handler sees a correct checkpointed register state.
- * If interested, a TM-aware sighandler can examine the transactional registers
- * in the 2nd sigcontext to determine the real origin of the signal.
+ * To do this, we treclaim (done before entering here) to gather both sets of
+ * registers and set up the 'normal' sigcontext registers with rolled-back
+ * register values such that a simple signal handler sees a correct
+ * checkpointed register state. If interested, a TM-aware sighandler can
+ * examine the transactional registers in the 2nd sigcontext to determine the
+ * real origin of the signal.
*/
static long setup_tm_sigcontexts(struct sigcontext __user *sc,
struct sigcontext __user *tm_sc,
@@ -184,16 +185,6 @@ static long setup_tm_sigcontexts(struct sigcontext __user *sc,
BUG_ON(!MSR_TM_ACTIVE(regs->msr));
- /* tm_reclaim rolls back all reg states, saving checkpointed (older)
- * GPRs to thread.ckpt_regs and (if used) FPRs to (newer)
- * thread.transact_fp and/or VRs to (newer) thread.transact_vr.
- * THEN we save out FP/VRs, if necessary, to the checkpointed (older)
- * thread.fr[]/vr[]s. The transactional (newer) GPRs are on the
- * stack, in *regs.
- */
- tm_enable();
- tm_reclaim(&current->thread, msr, TM_CAUSE_SIGNAL);
-
flush_fp_to_thread(current);
#ifdef CONFIG_ALTIVEC
@@ -711,7 +702,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
unsigned long newsp = 0;
long err = 0;
- frame = get_sigframe(ka, regs, sizeof(*frame), 0);
+ frame = get_sigframe(ka, get_tm_stackpointer(regs), sizeof(*frame), 0);
if (unlikely(frame == NULL))
goto badframe;
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index a7a648f6b750..c0e5caf8ccc7 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -53,6 +53,7 @@
#ifdef CONFIG_PPC64
#include <asm/firmware.h>
#include <asm/processor.h>
+#include <asm/tm.h>
#endif
#include <asm/kexec.h>
#include <asm/ppc-opcode.h>
@@ -932,6 +933,28 @@ static int emulate_isel(struct pt_regs *regs, u32 instword)
return 0;
}
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+static inline bool tm_abort_check(struct pt_regs *regs, int cause)
+{
+ /* If we're emulating a load/store in an active transaction, we cannot
+ * emulate it as the kernel operates in transaction suspended context.
+ * We need to abort the transaction. This creates a persistent TM
+ * abort so tell the user what caused it with a new code.
+ */
+ if (MSR_TM_TRANSACTIONAL(regs->msr)) {
+ tm_enable();
+ tm_abort(cause);
+ return true;
+ }
+ return false;
+}
+#else
+static inline bool tm_abort_check(struct pt_regs *regs, int reason)
+{
+ return false;
+}
+#endif
+
static int emulate_instruction(struct pt_regs *regs)
{
u32 instword;
@@ -971,6 +994,9 @@ static int emulate_instruction(struct pt_regs *regs)
/* Emulate load/store string insn. */
if ((instword & PPC_INST_STRING_GEN_MASK) == PPC_INST_STRING) {
+ if (tm_abort_check(regs,
+ TM_CAUSE_EMULATE | TM_CAUSE_PERSISTENT))
+ return -EINVAL;
PPC_WARN_EMULATED(string, regs);
return emulate_string_inst(regs, instword);
}
@@ -1139,6 +1165,16 @@ bail:
exception_exit(prev_state);
}
+/*
+ * This occurs when running in hypervisor mode on POWER6 or later
+ * and an illegal instruction is encountered.
+ */
+void __kprobes emulation_assist_interrupt(struct pt_regs *regs)
+{
+ regs->msr |= REASON_ILLEGAL;
+ program_check_exception(regs);
+}
+
void alignment_exception(struct pt_regs *regs)
{
enum ctx_state prev_state = exception_enter();
@@ -1148,6 +1184,9 @@ void alignment_exception(struct pt_regs *regs)
if (!arch_irq_disabled_regs(regs))
local_irq_enable();
+ if (tm_abort_check(regs, TM_CAUSE_ALIGNMENT | TM_CAUSE_PERSISTENT))
+ goto bail;
+
/* we don't implement logging of alignment exceptions */
if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS))
fixed = fix_alignment(regs);
diff --git a/arch/powerpc/kvm/44x_tlb.c b/arch/powerpc/kvm/44x_tlb.c
index 5dd3ab469976..ed0385448148 100644
--- a/arch/powerpc/kvm/44x_tlb.c
+++ b/arch/powerpc/kvm/44x_tlb.c
@@ -441,6 +441,7 @@ int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws)
struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
struct kvmppc_44x_tlbe *tlbe;
unsigned int gtlb_index;
+ int idx;
gtlb_index = kvmppc_get_gpr(vcpu, ra);
if (gtlb_index >= KVM44x_GUEST_TLB_SIZE) {
@@ -473,6 +474,8 @@ int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws)
return EMULATE_FAIL;
}
+ idx = srcu_read_lock(&vcpu->kvm->srcu);
+
if (tlbe_is_host_safe(vcpu, tlbe)) {
gva_t eaddr;
gpa_t gpaddr;
@@ -489,6 +492,8 @@ int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws)
kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
}
+ srcu_read_unlock(&vcpu->kvm->srcu, idx);
+
trace_kvm_gtlb_write(gtlb_index, tlbe->tid, tlbe->word0, tlbe->word1,
tlbe->word2);
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 9de24f8e03c7..550f5928b394 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -562,6 +562,8 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
case H_CPPR:
case H_EOI:
case H_IPI:
+ case H_IPOLL:
+ case H_XIRR_X:
if (kvmppc_xics_enabled(vcpu)) {
ret = kvmppc_xics_hcall(vcpu, req);
break;
diff --git a/arch/powerpc/kvm/book3s_pr_papr.c b/arch/powerpc/kvm/book3s_pr_papr.c
index b24309c6c2d5..da0e0bc268bd 100644
--- a/arch/powerpc/kvm/book3s_pr_papr.c
+++ b/arch/powerpc/kvm/book3s_pr_papr.c
@@ -257,6 +257,8 @@ int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd)
case H_CPPR:
case H_EOI:
case H_IPI:
+ case H_IPOLL:
+ case H_XIRR_X:
if (kvmppc_xics_enabled(vcpu))
return kvmppc_h_pr_xics_hcall(vcpu, cmd);
break;
diff --git a/arch/powerpc/kvm/book3s_xics.c b/arch/powerpc/kvm/book3s_xics.c
index f7a103756618..94c1dd46b83d 100644
--- a/arch/powerpc/kvm/book3s_xics.c
+++ b/arch/powerpc/kvm/book3s_xics.c
@@ -650,6 +650,23 @@ static noinline int kvmppc_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
return H_SUCCESS;
}
+static int kvmppc_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server)
+{
+ union kvmppc_icp_state state;
+ struct kvmppc_icp *icp;
+
+ icp = vcpu->arch.icp;
+ if (icp->server_num != server) {
+ icp = kvmppc_xics_find_server(vcpu->kvm, server);
+ if (!icp)
+ return H_PARAMETER;
+ }
+ state = ACCESS_ONCE(icp->state);
+ kvmppc_set_gpr(vcpu, 4, ((u32)state.cppr << 24) | state.xisr);
+ kvmppc_set_gpr(vcpu, 5, state.mfrr);
+ return H_SUCCESS;
+}
+
static noinline void kvmppc_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
{
union kvmppc_icp_state old_state, new_state;
@@ -787,6 +804,18 @@ int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 req)
if (!xics || !vcpu->arch.icp)
return H_HARDWARE;
+ /* These requests don't have real-mode implementations at present */
+ switch (req) {
+ case H_XIRR_X:
+ res = kvmppc_h_xirr(vcpu);
+ kvmppc_set_gpr(vcpu, 4, res);
+ kvmppc_set_gpr(vcpu, 5, get_tb());
+ return rc;
+ case H_IPOLL:
+ rc = kvmppc_h_ipoll(vcpu, kvmppc_get_gpr(vcpu, 4));
+ return rc;
+ }
+
/* Check for real mode returning too hard */
if (xics->real_mode)
return kvmppc_xics_rm_complete(vcpu, req);
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 1020119226db..1a1b51189773 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -673,7 +673,6 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
ret = s;
goto out;
}
- kvmppc_lazy_ee_enable();
kvm_guest_enter();
@@ -699,6 +698,8 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
kvmppc_load_guest_fp(vcpu);
#endif
+ kvmppc_lazy_ee_enable();
+
ret = __kvmppc_vcpu_run(kvm_run, vcpu);
/* No need for kvm_guest_exit. It's done in handle_exit.
@@ -832,6 +833,18 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
{
int r = RESUME_HOST;
int s;
+ int idx;
+
+#ifdef CONFIG_PPC64
+ WARN_ON(local_paca->irq_happened != 0);
+#endif
+
+ /*
+ * We enter with interrupts disabled in hardware, but
+ * we need to call hard_irq_disable anyway to ensure that
+ * the software state is kept in sync.
+ */
+ hard_irq_disable();
/* update before a new last_exit_type is rewritten */
kvmppc_update_timing_stats(vcpu);
@@ -1053,6 +1066,8 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
break;
}
+ idx = srcu_read_lock(&vcpu->kvm->srcu);
+
gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
gfn = gpaddr >> PAGE_SHIFT;
@@ -1075,6 +1090,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
kvmppc_account_exit(vcpu, MMIO_EXITS);
}
+ srcu_read_unlock(&vcpu->kvm->srcu, idx);
break;
}
@@ -1098,6 +1114,8 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
kvmppc_account_exit(vcpu, ITLB_VIRT_MISS_EXITS);
+ idx = srcu_read_lock(&vcpu->kvm->srcu);
+
gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
gfn = gpaddr >> PAGE_SHIFT;
@@ -1114,6 +1132,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_MACHINE_CHECK);
}
+ srcu_read_unlock(&vcpu->kvm->srcu, idx);
break;
}
diff --git a/arch/powerpc/kvm/e500_mmu.c b/arch/powerpc/kvm/e500_mmu.c
index c41a5a96b558..6d6f153b6c1d 100644
--- a/arch/powerpc/kvm/e500_mmu.c
+++ b/arch/powerpc/kvm/e500_mmu.c
@@ -396,6 +396,7 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
struct kvm_book3e_206_tlb_entry *gtlbe;
int tlbsel, esel;
int recal = 0;
+ int idx;
tlbsel = get_tlb_tlbsel(vcpu);
esel = get_tlb_esel(vcpu, tlbsel);
@@ -430,6 +431,8 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
kvmppc_set_tlb1map_range(vcpu, gtlbe);
}
+ idx = srcu_read_lock(&vcpu->kvm->srcu);
+
/* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */
if (tlbe_is_host_safe(vcpu, gtlbe)) {
u64 eaddr = get_tlb_eaddr(gtlbe);
@@ -444,6 +447,8 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
kvmppc_mmu_map(vcpu, eaddr, raddr, index_of(tlbsel, esel));
}
+ srcu_read_unlock(&vcpu->kvm->srcu, idx);
+
kvmppc_set_exit_type(vcpu, EMULATED_TLBWE_EXITS);
return EMULATE_DONE;
}
diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c
index 753cc99eff2b..19c8379575f7 100644
--- a/arch/powerpc/kvm/e500mc.c
+++ b/arch/powerpc/kvm/e500mc.c
@@ -177,8 +177,6 @@ int kvmppc_core_check_processor_compat(void)
r = 0;
else if (strcmp(cur_cpu_spec->cpu_name, "e5500") == 0)
r = 0;
- else if (strcmp(cur_cpu_spec->cpu_name, "e6500") == 0)
- r = 0;
else
r = -ENOTSUPP;
diff --git a/arch/powerpc/lib/copypage_power7.S b/arch/powerpc/lib/copypage_power7.S
index 0ef75bf0695c..395c594722a2 100644
--- a/arch/powerpc/lib/copypage_power7.S
+++ b/arch/powerpc/lib/copypage_power7.S
@@ -28,13 +28,14 @@ _GLOBAL(copypage_power7)
* aligned we don't need to clear the bottom 7 bits of either
* address.
*/
- ori r9,r3,1 /* stream=1 */
+ ori r9,r3,1 /* stream=1 => to */
#ifdef CONFIG_PPC_64K_PAGES
- lis r7,0x0E01 /* depth=7, units=512 */
+ lis r7,0x0E01 /* depth=7
+ * units/cachelines=512 */
#else
lis r7,0x0E00 /* depth=7 */
- ori r7,r7,0x1000 /* units=32 */
+ ori r7,r7,0x1000 /* units/cachelines=32 */
#endif
ori r10,r7,1 /* stream=1 */
@@ -43,12 +44,14 @@ _GLOBAL(copypage_power7)
.machine push
.machine "power4"
- dcbt r0,r4,0b01000
- dcbt r0,r7,0b01010
- dcbtst r0,r9,0b01000
- dcbtst r0,r10,0b01010
+ /* setup read stream 0 */
+ dcbt r0,r4,0b01000 /* addr from */
+ dcbt r0,r7,0b01010 /* length and depth from */
+ /* setup write stream 1 */
+ dcbtst r0,r9,0b01000 /* addr to */
+ dcbtst r0,r10,0b01010 /* length and depth to */
eieio
- dcbt r0,r8,0b01010 /* GO */
+ dcbt r0,r8,0b01010 /* all streams GO */
.machine pop
#ifdef CONFIG_ALTIVEC
diff --git a/arch/powerpc/lib/copyuser_power7.S b/arch/powerpc/lib/copyuser_power7.S
index 0d24ff15f5f6..d1f11795a7ad 100644
--- a/arch/powerpc/lib/copyuser_power7.S
+++ b/arch/powerpc/lib/copyuser_power7.S
@@ -318,12 +318,14 @@ err1; stb r0,0(r3)
.machine push
.machine "power4"
- dcbt r0,r6,0b01000
- dcbt r0,r7,0b01010
- dcbtst r0,r9,0b01000
- dcbtst r0,r10,0b01010
+ /* setup read stream 0 */
+ dcbt r0,r6,0b01000 /* addr from */
+ dcbt r0,r7,0b01010 /* length and depth from */
+ /* setup write stream 1 */
+ dcbtst r0,r9,0b01000 /* addr to */
+ dcbtst r0,r10,0b01010 /* length and depth to */
eieio
- dcbt r0,r8,0b01010 /* GO */
+ dcbt r0,r8,0b01010 /* all streams GO */
.machine pop
beq cr1,.Lunwind_stack_nonvmx_copy
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
index 6a2aead5b0e5..4c122c3f1623 100644
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -336,11 +336,18 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
hpte_v = hptep->v;
actual_psize = hpte_actual_psize(hptep, psize);
+ /*
+ * We need to invalidate the TLB always because hpte_remove doesn't do
+ * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less
+ * random entry from it. When we do that we don't invalidate the TLB
+ * (hpte_remove) because we assume the old translation is still
+ * technically "valid".
+ */
if (actual_psize < 0) {
- native_unlock_hpte(hptep);
- return -1;
+ actual_psize = psize;
+ ret = -1;
+ goto err_out;
}
- /* Even if we miss, we need to invalidate the TLB */
if (!HPTE_V_COMPARE(hpte_v, want_v)) {
DBG_LOW(" -> miss\n");
ret = -1;
@@ -350,6 +357,7 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
hptep->r = (hptep->r & ~(HPTE_R_PP | HPTE_R_N)) |
(newpp & (HPTE_R_PP | HPTE_R_N | HPTE_R_C));
}
+err_out:
native_unlock_hpte(hptep);
/* Ensure it is out of the tlb too. */
@@ -409,7 +417,7 @@ static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
hptep = htab_address + slot;
actual_psize = hpte_actual_psize(hptep, psize);
if (actual_psize < 0)
- return;
+ actual_psize = psize;
/* Update the HPTE */
hptep->r = (hptep->r & ~(HPTE_R_PP | HPTE_R_N)) |
@@ -437,21 +445,27 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long vpn,
hpte_v = hptep->v;
actual_psize = hpte_actual_psize(hptep, psize);
+ /*
+ * We need to invalidate the TLB always because hpte_remove doesn't do
+ * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less
+ * random entry from it. When we do that we don't invalidate the TLB
+ * (hpte_remove) because we assume the old translation is still
+ * technically "valid".
+ */
if (actual_psize < 0) {
+ actual_psize = psize;
native_unlock_hpte(hptep);
- local_irq_restore(flags);
- return;
+ goto err_out;
}
- /* Even if we miss, we need to invalidate the TLB */
if (!HPTE_V_COMPARE(hpte_v, want_v))
native_unlock_hpte(hptep);
else
/* Invalidate the hpte. NOTE: this also unlocks it */
hptep->v = 0;
+err_out:
/* Invalidate the TLB */
tlbie(vpn, psize, actual_psize, ssize, local);
-
local_irq_restore(flags);
}
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 237c8e5f2640..77fdd2cef33b 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -592,8 +592,14 @@ static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
do {
pmd = pmd_offset(pud, addr);
next = pmd_addr_end(addr, end);
- if (pmd_none_or_clear_bad(pmd))
+ if (!is_hugepd(pmd)) {
+ /*
+ * if it is not hugepd pointer, we should already find
+ * it cleared.
+ */
+ WARN_ON(!pmd_none_or_clear_bad(pmd));
continue;
+ }
#ifdef CONFIG_PPC_FSL_BOOK3E
/*
* Increment next by the size of the huge mapping since
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 426180b84978..29c6482890c8 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -110,7 +110,7 @@ static inline void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw) {}
static bool regs_use_siar(struct pt_regs *regs)
{
- return !!(regs->result & 1);
+ return !!regs->result;
}
/*
@@ -136,22 +136,30 @@ static inline unsigned long perf_ip_adjust(struct pt_regs *regs)
* If we're not doing instruction sampling, give them the SDAR
* (sampled data address). If we are doing instruction sampling, then
* only give them the SDAR if it corresponds to the instruction
- * pointed to by SIAR; this is indicated by the [POWER6_]MMCRA_SDSYNC or
- * the [POWER7P_]MMCRA_SDAR_VALID bit in MMCRA.
+ * pointed to by SIAR; this is indicated by the [POWER6_]MMCRA_SDSYNC, the
+ * [POWER7P_]MMCRA_SDAR_VALID bit in MMCRA, or the SDAR_VALID bit in SIER.
*/
static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp)
{
unsigned long mmcra = regs->dsisr;
- unsigned long sdsync;
+ bool sdar_valid;
- if (ppmu->flags & PPMU_SIAR_VALID)
- sdsync = POWER7P_MMCRA_SDAR_VALID;
- else if (ppmu->flags & PPMU_ALT_SIPR)
- sdsync = POWER6_MMCRA_SDSYNC;
- else
- sdsync = MMCRA_SDSYNC;
+ if (ppmu->flags & PPMU_HAS_SIER)
+ sdar_valid = regs->dar & SIER_SDAR_VALID;
+ else {
+ unsigned long sdsync;
+
+ if (ppmu->flags & PPMU_SIAR_VALID)
+ sdsync = POWER7P_MMCRA_SDAR_VALID;
+ else if (ppmu->flags & PPMU_ALT_SIPR)
+ sdsync = POWER6_MMCRA_SDSYNC;
+ else
+ sdsync = MMCRA_SDSYNC;
+
+ sdar_valid = mmcra & sdsync;
+ }
- if (!(mmcra & MMCRA_SAMPLE_ENABLE) || (mmcra & sdsync))
+ if (!(mmcra & MMCRA_SAMPLE_ENABLE) || sdar_valid)
*addrp = mfspr(SPRN_SDAR);
}
@@ -181,11 +189,6 @@ static bool regs_sipr(struct pt_regs *regs)
return !!(regs->dsisr & sipr);
}
-static bool regs_no_sipr(struct pt_regs *regs)
-{
- return !!(regs->result & 2);
-}
-
static inline u32 perf_flags_from_msr(struct pt_regs *regs)
{
if (regs->msr & MSR_PR)
@@ -208,7 +211,7 @@ static inline u32 perf_get_misc_flags(struct pt_regs *regs)
* SIAR which should give slightly more reliable
* results
*/
- if (regs_no_sipr(regs)) {
+ if (ppmu->flags & PPMU_NO_SIPR) {
unsigned long siar = mfspr(SPRN_SIAR);
if (siar >= PAGE_OFFSET)
return PERF_RECORD_MISC_KERNEL;
@@ -239,22 +242,9 @@ static inline void perf_read_regs(struct pt_regs *regs)
int use_siar;
regs->dsisr = mmcra;
- regs->result = 0;
-
- if (ppmu->flags & PPMU_NO_SIPR)
- regs->result |= 2;
-
- /*
- * On power8 if we're in random sampling mode, the SIER is updated.
- * If we're in continuous sampling mode, we don't have SIPR.
- */
- if (ppmu->flags & PPMU_HAS_SIER) {
- if (marked)
- regs->dar = mfspr(SPRN_SIER);
- else
- regs->result |= 2;
- }
+ if (ppmu->flags & PPMU_HAS_SIER)
+ regs->dar = mfspr(SPRN_SIER);
/*
* If this isn't a PMU exception (eg a software event) the SIAR is
@@ -279,12 +269,12 @@ static inline void perf_read_regs(struct pt_regs *regs)
use_siar = 1;
else if ((ppmu->flags & PPMU_NO_CONT_SAMPLING))
use_siar = 0;
- else if (!regs_no_sipr(regs) && regs_sipr(regs))
+ else if (!(ppmu->flags & PPMU_NO_SIPR) && regs_sipr(regs))
use_siar = 0;
else
use_siar = 1;
- regs->result |= use_siar;
+ regs->result = use_siar;
}
/*
@@ -308,8 +298,13 @@ static inline int siar_valid(struct pt_regs *regs)
unsigned long mmcra = regs->dsisr;
int marked = mmcra & MMCRA_SAMPLE_ENABLE;
- if ((ppmu->flags & PPMU_SIAR_VALID) && marked)
- return mmcra & POWER7P_MMCRA_SIAR_VALID;
+ if (marked) {
+ if (ppmu->flags & PPMU_HAS_SIER)
+ return regs->dar & SIER_SIAR_VALID;
+
+ if (ppmu->flags & PPMU_SIAR_VALID)
+ return mmcra & POWER7P_MMCRA_SIAR_VALID;
+ }
return 1;
}
@@ -1763,7 +1758,7 @@ static void perf_event_interrupt(struct pt_regs *regs)
}
}
}
- if ((!found) && printk_ratelimit())
+ if (!found && !nmi && printk_ratelimit())
printk(KERN_WARNING "Can't find PMC that caused IRQ\n");
/*
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
index 023b288f895b..4459eff7a75a 100644
--- a/arch/powerpc/platforms/pseries/Kconfig
+++ b/arch/powerpc/platforms/pseries/Kconfig
@@ -19,6 +19,8 @@ config PPC_PSERIES
select ZLIB_DEFLATE
select PPC_DOORBELL
select HAVE_CONTEXT_TRACKING
+ select HOTPLUG if SMP
+ select HOTPLUG_CPU if SMP
default y
config PPC_SPLPAR
diff --git a/arch/powerpc/platforms/pseries/eeh_cache.c b/arch/powerpc/platforms/pseries/eeh_cache.c
index 5a4c87903057..5ce3ba7ad137 100644
--- a/arch/powerpc/platforms/pseries/eeh_cache.c
+++ b/arch/powerpc/platforms/pseries/eeh_cache.c
@@ -294,8 +294,6 @@ void __init eeh_addr_cache_build(void)
spin_lock_init(&pci_io_addr_cache_root.piar_lock);
for_each_pci_dev(dev) {
- eeh_addr_cache_insert_dev(dev);
-
dn = pci_device_to_OF_node(dev);
if (!dn)
continue;
@@ -308,6 +306,8 @@ void __init eeh_addr_cache_build(void)
dev->dev.archdata.edev = edev;
edev->pdev = dev;
+ eeh_addr_cache_insert_dev(dev);
+
eeh_sysfs_add_device(dev);
}
diff --git a/arch/powerpc/platforms/pseries/eeh_pe.c b/arch/powerpc/platforms/pseries/eeh_pe.c
index fe43d1aa2cf1..9d4a9e8562b2 100644
--- a/arch/powerpc/platforms/pseries/eeh_pe.c
+++ b/arch/powerpc/platforms/pseries/eeh_pe.c
@@ -639,7 +639,8 @@ struct pci_bus *eeh_pe_bus_get(struct eeh_pe *pe)
if (pe->type & EEH_PE_PHB) {
bus = pe->phb->bus;
- } else if (pe->type & EEH_PE_BUS) {
+ } else if (pe->type & EEH_PE_BUS ||
+ pe->type & EEH_PE_DEVICE) {
edev = list_first_entry(&pe->edevs, struct eeh_dev, list);
pdev = eeh_dev_to_pci_dev(edev);
if (pdev)
diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c
index 19506f935737..b456b157d33d 100644
--- a/arch/powerpc/platforms/pseries/eeh_pseries.c
+++ b/arch/powerpc/platforms/pseries/eeh_pseries.c
@@ -83,7 +83,11 @@ static int pseries_eeh_init(void)
ibm_configure_pe = rtas_token("ibm,configure-pe");
ibm_configure_bridge = rtas_token("ibm,configure-bridge");
- /* necessary sanity check */
+ /*
+ * Necessary sanity check. We needn't check "get-config-addr-info"
+ * and its variant since the old firmware probably support address
+ * of domain/bus/slot/function for EEH RTAS operations.
+ */
if (ibm_set_eeh_option == RTAS_UNKNOWN_SERVICE) {
pr_warning("%s: RTAS service <ibm,set-eeh-option> invalid\n",
__func__);
@@ -102,12 +106,6 @@ static int pseries_eeh_init(void)
pr_warning("%s: RTAS service <ibm,slot-error-detail> invalid\n",
__func__);
return -EINVAL;
- } else if (ibm_get_config_addr_info2 == RTAS_UNKNOWN_SERVICE &&
- ibm_get_config_addr_info == RTAS_UNKNOWN_SERVICE) {
- pr_warning("%s: RTAS service <ibm,get-config-addr-info2> and "
- "<ibm,get-config-addr-info> invalid\n",
- __func__);
- return -EINVAL;
} else if (ibm_configure_pe == RTAS_UNKNOWN_SERVICE &&
ibm_configure_bridge == RTAS_UNKNOWN_SERVICE) {
pr_warning("%s: RTAS service <ibm,configure-pe> and "
diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c
index 028ac1f71b51..46ac1ddea683 100644
--- a/arch/powerpc/sysdev/fsl_pci.c
+++ b/arch/powerpc/sysdev/fsl_pci.c
@@ -97,22 +97,14 @@ static int fsl_indirect_read_config(struct pci_bus *bus, unsigned int devfn,
return indirect_read_config(bus, devfn, offset, len, val);
}
-static struct pci_ops fsl_indirect_pci_ops =
+#if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx)
+
+static struct pci_ops fsl_indirect_pcie_ops =
{
.read = fsl_indirect_read_config,
.write = indirect_write_config,
};
-static void __init fsl_setup_indirect_pci(struct pci_controller* hose,
- resource_size_t cfg_addr,
- resource_size_t cfg_data, u32 flags)
-{
- setup_indirect_pci(hose, cfg_addr, cfg_data, flags);
- hose->ops = &fsl_indirect_pci_ops;
-}
-
-#if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx)
-
#define MAX_PHYS_ADDR_BITS 40
static u64 pci64_dma_offset = 1ull << MAX_PHYS_ADDR_BITS;
@@ -504,13 +496,15 @@ int __init fsl_add_bridge(struct platform_device *pdev, int is_primary)
if (!hose->private_data)
goto no_bridge;
- fsl_setup_indirect_pci(hose, rsrc.start, rsrc.start + 0x4,
- PPC_INDIRECT_TYPE_BIG_ENDIAN);
+ setup_indirect_pci(hose, rsrc.start, rsrc.start + 0x4,
+ PPC_INDIRECT_TYPE_BIG_ENDIAN);
if (in_be32(&pci->block_rev1) < PCIE_IP_REV_3_0)
hose->indirect_type |= PPC_INDIRECT_TYPE_FSL_CFG_REG_LINK;
if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) {
+ /* use fsl_indirect_read_config for PCIe */
+ hose->ops = &fsl_indirect_pcie_ops;
/* For PCIE read HEADER_TYPE to identify controler mode */
early_read_config_byte(hose, 0, 0, PCI_HEADER_TYPE, &hdr_type);
if ((hdr_type & 0x7f) != PCI_HEADER_TYPE_BRIDGE)
@@ -814,8 +808,8 @@ int __init mpc83xx_add_bridge(struct device_node *dev)
if (ret)
goto err0;
} else {
- fsl_setup_indirect_pci(hose, rsrc_cfg.start,
- rsrc_cfg.start + 4, 0);
+ setup_indirect_pci(hose, rsrc_cfg.start,
+ rsrc_cfg.start + 4, 0);
}
printk(KERN_INFO "Found FSL PCI host bridge at 0x%016llx. "
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index 0a13ecb270c7..3cc2f9159ab1 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -54,7 +54,7 @@ static DEFINE_RAW_SPINLOCK(mpic_lock);
#ifdef CONFIG_PPC32 /* XXX for now */
#ifdef CONFIG_IRQ_ALL_CPUS
-#define distribute_irqs (!(mpic->flags & MPIC_SINGLE_DEST_CPU))
+#define distribute_irqs (1)
#else
#define distribute_irqs (0)
#endif
@@ -1703,7 +1703,7 @@ void mpic_setup_this_cpu(void)
* it differently, then we should make sure we also change the default
* values of irq_desc[].affinity in irq.c.
*/
- if (distribute_irqs) {
+ if (distribute_irqs && !(mpic->flags & MPIC_SINGLE_DEST_CPU)) {
for (i = 0; i < mpic->num_sources ; i++)
mpic_irq_write(i, MPIC_INFO(IRQ_DESTINATION),
mpic_irq_read(i, MPIC_INFO(IRQ_DESTINATION)) | msk);