From cfe0d8ba14a1d98245b371e486c68f37eba1ca52 Mon Sep 17 00:00:00 2001 From: Runzhen Wang Date: Fri, 28 Jun 2013 16:14:57 +0800 Subject: perf tools: Make Power7 events available for perf Power7 supports over 530 different perf events but only a small subset of these can be specified by name, for the remaining events, we must specify them by their raw code: perf stat -e r2003c This patch makes all the POWER7 events available in sysfs. So we can instead specify these as: perf stat -e 'cpu/PM_CMPLU_STALL_DFU/' where PM_CMPLU_STALL_DFU is the r2003c in previous example. Before this patch is applied, the size of power7-pmu.o is: $ size arch/powerpc/perf/power7-pmu.o text data bss dec hex filename 3073 2720 0 5793 16a1 arch/powerpc/perf/power7-pmu.o and after the patch is applied, it is: $ size arch/powerpc/perf/power7-pmu.o text data bss dec hex filename 15950 31112 0 47062 b7d6 arch/powerpc/perf/power7-pmu.o For the run time overhead, I use two scripts, one is "event_name.sh", which contains 50 event names, it looks like: # ./perf record -e 'cpu/PM_CMPLU_STALL_DFU/' -e ..... /bin/sleep 1 the other one is named "event_code.sh" which use corresponding events raw code instead of events names, it looks like: # ./perf record -e r2003c -e ...... /bin/sleep 1 below is the result. Using events name: [root@localhost perf]# time ./event_name.sh [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.002 MB perf.data (~102 samples) ] real 0m1.192s user 0m0.028s sys 0m0.106s Using events raw code: [root@localhost perf]# time ./event_code.sh [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.003 MB perf.data (~112 samples) ] real 0m1.198s user 0m0.028s sys 0m0.105s Signed-off-by: Runzhen Wang Acked-by: Michael Ellerman Cc: icycoder@gmail.com Cc: linuxppc-dev@lists.ozlabs.org Cc: Michael Ellerman Cc: Paul Mackerras Cc: Runzhen Wang Cc: Sukadev Bhattiprolu Cc: Xiao Guangrong Link: http://lkml.kernel.org/r/1372407297-6996-3-git-send-email-runzhen@linux.vnet.ibm.com Signed-off-by: Arnaldo Carvalho de Melo --- arch/powerpc/include/asm/perf_event_server.h | 4 +- arch/powerpc/perf/power7-events-list.h | 548 +++++++++++++++++++++++++++ arch/powerpc/perf/power7-pmu.c | 148 ++------ 3 files changed, 582 insertions(+), 118 deletions(-) create mode 100644 arch/powerpc/perf/power7-events-list.h (limited to 'arch') diff --git a/arch/powerpc/include/asm/perf_event_server.h b/arch/powerpc/include/asm/perf_event_server.h index f265049dd7d6..d9270d8eb087 100644 --- a/arch/powerpc/include/asm/perf_event_server.h +++ b/arch/powerpc/include/asm/perf_event_server.h @@ -136,11 +136,11 @@ extern ssize_t power_events_sysfs_show(struct device *dev, #define EVENT_PTR(_id, _suffix) &EVENT_VAR(_id, _suffix).attr.attr #define EVENT_ATTR(_name, _id, _suffix) \ - PMU_EVENT_ATTR(_name, EVENT_VAR(_id, _suffix), PME_PM_##_id, \ + PMU_EVENT_ATTR(_name, EVENT_VAR(_id, _suffix), PME_##_id, \ power_events_sysfs_show) #define GENERIC_EVENT_ATTR(_name, _id) EVENT_ATTR(_name, _id, _g) #define GENERIC_EVENT_PTR(_id) EVENT_PTR(_id, _g) -#define POWER_EVENT_ATTR(_name, _id) EVENT_ATTR(PM_##_name, _id, _p) +#define POWER_EVENT_ATTR(_name, _id) EVENT_ATTR(_name, _id, _p) #define POWER_EVENT_PTR(_id) EVENT_PTR(_id, _p) diff --git a/arch/powerpc/perf/power7-events-list.h b/arch/powerpc/perf/power7-events-list.h new file mode 100644 index 000000000000..687790a2c0b8 --- /dev/null +++ b/arch/powerpc/perf/power7-events-list.h @@ -0,0 +1,548 @@ +/* + * Performance counter support for POWER7 processors. + * + * Copyright 2013 Runzhen Wang, IBM Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +EVENT(PM_IC_DEMAND_L2_BR_ALL, 0x04898) +EVENT(PM_GCT_UTIL_7_TO_10_SLOTS, 0x020a0) +EVENT(PM_PMC2_SAVED, 0x10022) +EVENT(PM_CMPLU_STALL_DFU, 0x2003c) +EVENT(PM_VSU0_16FLOP, 0x0a0a4) +EVENT(PM_MRK_LSU_DERAT_MISS, 0x3d05a) +EVENT(PM_MRK_ST_CMPL, 0x10034) +EVENT(PM_NEST_PAIR3_ADD, 0x40881) +EVENT(PM_L2_ST_DISP, 0x46180) +EVENT(PM_L2_CASTOUT_MOD, 0x16180) +EVENT(PM_ISEG, 0x020a4) +EVENT(PM_MRK_INST_TIMEO, 0x40034) +EVENT(PM_L2_RCST_DISP_FAIL_ADDR, 0x36282) +EVENT(PM_LSU1_DC_PREF_STREAM_CONFIRM, 0x0d0b6) +EVENT(PM_IERAT_WR_64K, 0x040be) +EVENT(PM_MRK_DTLB_MISS_16M, 0x4d05e) +EVENT(PM_IERAT_MISS, 0x100f6) +EVENT(PM_MRK_PTEG_FROM_LMEM, 0x4d052) +EVENT(PM_FLOP, 0x100f4) +EVENT(PM_THRD_PRIO_4_5_CYC, 0x040b4) +EVENT(PM_BR_PRED_TA, 0x040aa) +EVENT(PM_CMPLU_STALL_FXU, 0x20014) +EVENT(PM_EXT_INT, 0x200f8) +EVENT(PM_VSU_FSQRT_FDIV, 0x0a888) +EVENT(PM_MRK_LD_MISS_EXPOSED_CYC, 0x1003e) +EVENT(PM_LSU1_LDF, 0x0c086) +EVENT(PM_IC_WRITE_ALL, 0x0488c) +EVENT(PM_LSU0_SRQ_STFWD, 0x0c0a0) +EVENT(PM_PTEG_FROM_RL2L3_MOD, 0x1c052) +EVENT(PM_MRK_DATA_FROM_L31_SHR, 0x1d04e) +EVENT(PM_DATA_FROM_L21_MOD, 0x3c046) +EVENT(PM_VSU1_SCAL_DOUBLE_ISSUED, 0x0b08a) +EVENT(PM_VSU0_8FLOP, 0x0a0a0) +EVENT(PM_POWER_EVENT1, 0x1006e) +EVENT(PM_DISP_CLB_HELD_BAL, 0x02092) +EVENT(PM_VSU1_2FLOP, 0x0a09a) +EVENT(PM_LWSYNC_HELD, 0x0209a) +EVENT(PM_PTEG_FROM_DL2L3_SHR, 0x3c054) +EVENT(PM_INST_FROM_L21_MOD, 0x34046) +EVENT(PM_IERAT_XLATE_WR_16MPLUS, 0x040bc) +EVENT(PM_IC_REQ_ALL, 0x04888) +EVENT(PM_DSLB_MISS, 0x0d090) +EVENT(PM_L3_MISS, 0x1f082) +EVENT(PM_LSU0_L1_PREF, 0x0d0b8) +EVENT(PM_VSU_SCALAR_SINGLE_ISSUED, 0x0b884) +EVENT(PM_LSU1_DC_PREF_STREAM_CONFIRM_STRIDE, 0x0d0be) +EVENT(PM_L2_INST, 0x36080) +EVENT(PM_VSU0_FRSP, 0x0a0b4) +EVENT(PM_FLUSH_DISP, 0x02082) +EVENT(PM_PTEG_FROM_L2MISS, 0x4c058) +EVENT(PM_VSU1_DQ_ISSUED, 0x0b09a) +EVENT(PM_CMPLU_STALL_LSU, 0x20012) +EVENT(PM_MRK_DATA_FROM_DMEM, 0x1d04a) +EVENT(PM_LSU_FLUSH_ULD, 0x0c8b0) +EVENT(PM_PTEG_FROM_LMEM, 0x4c052) +EVENT(PM_MRK_DERAT_MISS_16M, 0x3d05c) +EVENT(PM_THRD_ALL_RUN_CYC, 0x2000c) +EVENT(PM_MEM0_PREFETCH_DISP, 0x20083) +EVENT(PM_MRK_STALL_CMPLU_CYC_COUNT, 0x3003f) +EVENT(PM_DATA_FROM_DL2L3_MOD, 0x3c04c) +EVENT(PM_VSU_FRSP, 0x0a8b4) +EVENT(PM_MRK_DATA_FROM_L21_MOD, 0x3d046) +EVENT(PM_PMC1_OVERFLOW, 0x20010) +EVENT(PM_VSU0_SINGLE, 0x0a0a8) +EVENT(PM_MRK_PTEG_FROM_L3MISS, 0x2d058) +EVENT(PM_MRK_PTEG_FROM_L31_SHR, 0x2d056) +EVENT(PM_VSU0_VECTOR_SP_ISSUED, 0x0b090) +EVENT(PM_VSU1_FEST, 0x0a0ba) +EVENT(PM_MRK_INST_DISP, 0x20030) +EVENT(PM_VSU0_COMPLEX_ISSUED, 0x0b096) +EVENT(PM_LSU1_FLUSH_UST, 0x0c0b6) +EVENT(PM_INST_CMPL, 0x00002) +EVENT(PM_FXU_IDLE, 0x1000e) +EVENT(PM_LSU0_FLUSH_ULD, 0x0c0b0) +EVENT(PM_MRK_DATA_FROM_DL2L3_MOD, 0x3d04c) +EVENT(PM_LSU_LMQ_SRQ_EMPTY_ALL_CYC, 0x3001c) +EVENT(PM_LSU1_REJECT_LMQ_FULL, 0x0c0a6) +EVENT(PM_INST_PTEG_FROM_L21_MOD, 0x3e056) +EVENT(PM_INST_FROM_RL2L3_MOD, 0x14042) +EVENT(PM_SHL_CREATED, 0x05082) +EVENT(PM_L2_ST_HIT, 0x46182) +EVENT(PM_DATA_FROM_DMEM, 0x1c04a) +EVENT(PM_L3_LD_MISS, 0x2f082) +EVENT(PM_FXU1_BUSY_FXU0_IDLE, 0x4000e) +EVENT(PM_DISP_CLB_HELD_RES, 0x02094) +EVENT(PM_L2_SN_SX_I_DONE, 0x36382) +EVENT(PM_GRP_CMPL, 0x30004) +EVENT(PM_STCX_CMPL, 0x0c098) +EVENT(PM_VSU0_2FLOP, 0x0a098) +EVENT(PM_L3_PREF_MISS, 0x3f082) +EVENT(PM_LSU_SRQ_SYNC_CYC, 0x0d096) +EVENT(PM_LSU_REJECT_ERAT_MISS, 0x20064) +EVENT(PM_L1_ICACHE_MISS, 0x200fc) +EVENT(PM_LSU1_FLUSH_SRQ, 0x0c0be) +EVENT(PM_LD_REF_L1_LSU0, 0x0c080) +EVENT(PM_VSU0_FEST, 0x0a0b8) +EVENT(PM_VSU_VECTOR_SINGLE_ISSUED, 0x0b890) +EVENT(PM_FREQ_UP, 0x4000c) +EVENT(PM_DATA_FROM_LMEM, 0x3c04a) +EVENT(PM_LSU1_LDX, 0x0c08a) +EVENT(PM_PMC3_OVERFLOW, 0x40010) +EVENT(PM_MRK_BR_MPRED, 0x30036) +EVENT(PM_SHL_MATCH, 0x05086) +EVENT(PM_MRK_BR_TAKEN, 0x10036) +EVENT(PM_CMPLU_STALL_BRU, 0x4004e) +EVENT(PM_ISLB_MISS, 0x0d092) +EVENT(PM_CYC, 0x0001e) +EVENT(PM_DISP_HELD_THERMAL, 0x30006) +EVENT(PM_INST_PTEG_FROM_RL2L3_SHR, 0x2e054) +EVENT(PM_LSU1_SRQ_STFWD, 0x0c0a2) +EVENT(PM_GCT_NOSLOT_BR_MPRED, 0x4001a) +EVENT(PM_1PLUS_PPC_CMPL, 0x100f2) +EVENT(PM_PTEG_FROM_DMEM, 0x2c052) +EVENT(PM_VSU_2FLOP, 0x0a898) +EVENT(PM_GCT_FULL_CYC, 0x04086) +EVENT(PM_MRK_DATA_FROM_L3_CYC, 0x40020) +EVENT(PM_LSU_SRQ_S0_ALLOC, 0x0d09d) +EVENT(PM_MRK_DERAT_MISS_4K, 0x1d05c) +EVENT(PM_BR_MPRED_TA, 0x040ae) +EVENT(PM_INST_PTEG_FROM_L2MISS, 0x4e058) +EVENT(PM_DPU_HELD_POWER, 0x20006) +EVENT(PM_RUN_INST_CMPL, 0x400fa) +EVENT(PM_MRK_VSU_FIN, 0x30032) +EVENT(PM_LSU_SRQ_S0_VALID, 0x0d09c) +EVENT(PM_GCT_EMPTY_CYC, 0x20008) +EVENT(PM_IOPS_DISP, 0x30014) +EVENT(PM_RUN_SPURR, 0x10008) +EVENT(PM_PTEG_FROM_L21_MOD, 0x3c056) +EVENT(PM_VSU0_1FLOP, 0x0a080) +EVENT(PM_SNOOP_TLBIE, 0x0d0b2) +EVENT(PM_DATA_FROM_L3MISS, 0x2c048) +EVENT(PM_VSU_SINGLE, 0x0a8a8) +EVENT(PM_DTLB_MISS_16G, 0x1c05e) +EVENT(PM_CMPLU_STALL_VECTOR, 0x2001c) +EVENT(PM_FLUSH, 0x400f8) +EVENT(PM_L2_LD_HIT, 0x36182) +EVENT(PM_NEST_PAIR2_AND, 0x30883) +EVENT(PM_VSU1_1FLOP, 0x0a082) +EVENT(PM_IC_PREF_REQ, 0x0408a) +EVENT(PM_L3_LD_HIT, 0x2f080) +EVENT(PM_GCT_NOSLOT_IC_MISS, 0x2001a) +EVENT(PM_DISP_HELD, 0x10006) +EVENT(PM_L2_LD, 0x16080) +EVENT(PM_LSU_FLUSH_SRQ, 0x0c8bc) +EVENT(PM_BC_PLUS_8_CONV, 0x040b8) +EVENT(PM_MRK_DATA_FROM_L31_MOD_CYC, 0x40026) +EVENT(PM_CMPLU_STALL_VECTOR_LONG, 0x4004a) +EVENT(PM_L2_RCST_BUSY_RC_FULL, 0x26282) +EVENT(PM_TB_BIT_TRANS, 0x300f8) +EVENT(PM_THERMAL_MAX, 0x40006) +EVENT(PM_LSU1_FLUSH_ULD, 0x0c0b2) +EVENT(PM_LSU1_REJECT_LHS, 0x0c0ae) +EVENT(PM_LSU_LRQ_S0_ALLOC, 0x0d09f) +EVENT(PM_L3_CO_L31, 0x4f080) +EVENT(PM_POWER_EVENT4, 0x4006e) +EVENT(PM_DATA_FROM_L31_SHR, 0x1c04e) +EVENT(PM_BR_UNCOND, 0x0409e) +EVENT(PM_LSU1_DC_PREF_STREAM_ALLOC, 0x0d0aa) +EVENT(PM_PMC4_REWIND, 0x10020) +EVENT(PM_L2_RCLD_DISP, 0x16280) +EVENT(PM_THRD_PRIO_2_3_CYC, 0x040b2) +EVENT(PM_MRK_PTEG_FROM_L2MISS, 0x4d058) +EVENT(PM_IC_DEMAND_L2_BHT_REDIRECT, 0x04098) +EVENT(PM_LSU_DERAT_MISS, 0x200f6) +EVENT(PM_IC_PREF_CANCEL_L2, 0x04094) +EVENT(PM_MRK_FIN_STALL_CYC_COUNT, 0x1003d) +EVENT(PM_BR_PRED_CCACHE, 0x040a0) +EVENT(PM_GCT_UTIL_1_TO_2_SLOTS, 0x0209c) +EVENT(PM_MRK_ST_CMPL_INT, 0x30034) +EVENT(PM_LSU_TWO_TABLEWALK_CYC, 0x0d0a6) +EVENT(PM_MRK_DATA_FROM_L3MISS, 0x2d048) +EVENT(PM_GCT_NOSLOT_CYC, 0x100f8) +EVENT(PM_LSU_SET_MPRED, 0x0c0a8) +EVENT(PM_FLUSH_DISP_TLBIE, 0x0208a) +EVENT(PM_VSU1_FCONV, 0x0a0b2) +EVENT(PM_DERAT_MISS_16G, 0x4c05c) +EVENT(PM_INST_FROM_LMEM, 0x3404a) +EVENT(PM_IC_DEMAND_L2_BR_REDIRECT, 0x0409a) +EVENT(PM_CMPLU_STALL_SCALAR_LONG, 0x20018) +EVENT(PM_INST_PTEG_FROM_L2, 0x1e050) +EVENT(PM_PTEG_FROM_L2, 0x1c050) +EVENT(PM_MRK_DATA_FROM_L21_SHR_CYC, 0x20024) +EVENT(PM_MRK_DTLB_MISS_4K, 0x2d05a) +EVENT(PM_VSU0_FPSCR, 0x0b09c) +EVENT(PM_VSU1_VECT_DOUBLE_ISSUED, 0x0b082) +EVENT(PM_MRK_PTEG_FROM_RL2L3_MOD, 0x1d052) +EVENT(PM_MEM0_RQ_DISP, 0x10083) +EVENT(PM_L2_LD_MISS, 0x26080) +EVENT(PM_VMX_RESULT_SAT_1, 0x0b0a0) +EVENT(PM_L1_PREF, 0x0d8b8) +EVENT(PM_MRK_DATA_FROM_LMEM_CYC, 0x2002c) +EVENT(PM_GRP_IC_MISS_NONSPEC, 0x1000c) +EVENT(PM_PB_NODE_PUMP, 0x10081) +EVENT(PM_SHL_MERGED, 0x05084) +EVENT(PM_NEST_PAIR1_ADD, 0x20881) +EVENT(PM_DATA_FROM_L3, 0x1c048) +EVENT(PM_LSU_FLUSH, 0x0208e) +EVENT(PM_LSU_SRQ_SYNC_COUNT, 0x0d097) +EVENT(PM_PMC2_OVERFLOW, 0x30010) +EVENT(PM_LSU_LDF, 0x0c884) +EVENT(PM_POWER_EVENT3, 0x3006e) +EVENT(PM_DISP_WT, 0x30008) +EVENT(PM_CMPLU_STALL_REJECT, 0x40016) +EVENT(PM_IC_BANK_CONFLICT, 0x04082) +EVENT(PM_BR_MPRED_CR_TA, 0x048ae) +EVENT(PM_L2_INST_MISS, 0x36082) +EVENT(PM_CMPLU_STALL_ERAT_MISS, 0x40018) +EVENT(PM_NEST_PAIR2_ADD, 0x30881) +EVENT(PM_MRK_LSU_FLUSH, 0x0d08c) +EVENT(PM_L2_LDST, 0x16880) +EVENT(PM_INST_FROM_L31_SHR, 0x1404e) +EVENT(PM_VSU0_FIN, 0x0a0bc) +EVENT(PM_LARX_LSU, 0x0c894) +EVENT(PM_INST_FROM_RMEM, 0x34042) +EVENT(PM_DISP_CLB_HELD_TLBIE, 0x02096) +EVENT(PM_MRK_DATA_FROM_DMEM_CYC, 0x2002e) +EVENT(PM_BR_PRED_CR, 0x040a8) +EVENT(PM_LSU_REJECT, 0x10064) +EVENT(PM_GCT_UTIL_3_TO_6_SLOTS, 0x0209e) +EVENT(PM_CMPLU_STALL_END_GCT_NOSLOT, 0x10028) +EVENT(PM_LSU0_REJECT_LMQ_FULL, 0x0c0a4) +EVENT(PM_VSU_FEST, 0x0a8b8) +EVENT(PM_NEST_PAIR0_AND, 0x10883) +EVENT(PM_PTEG_FROM_L3, 0x2c050) +EVENT(PM_POWER_EVENT2, 0x2006e) +EVENT(PM_IC_PREF_CANCEL_PAGE, 0x04090) +EVENT(PM_VSU0_FSQRT_FDIV, 0x0a088) +EVENT(PM_MRK_GRP_CMPL, 0x40030) +EVENT(PM_VSU0_SCAL_DOUBLE_ISSUED, 0x0b088) +EVENT(PM_GRP_DISP, 0x3000a) +EVENT(PM_LSU0_LDX, 0x0c088) +EVENT(PM_DATA_FROM_L2, 0x1c040) +EVENT(PM_MRK_DATA_FROM_RL2L3_MOD, 0x1d042) +EVENT(PM_LD_REF_L1, 0x0c880) +EVENT(PM_VSU0_VECT_DOUBLE_ISSUED, 0x0b080) +EVENT(PM_VSU1_2FLOP_DOUBLE, 0x0a08e) +EVENT(PM_THRD_PRIO_6_7_CYC, 0x040b6) +EVENT(PM_BC_PLUS_8_RSLV_TAKEN, 0x040ba) +EVENT(PM_BR_MPRED_CR, 0x040ac) +EVENT(PM_L3_CO_MEM, 0x4f082) +EVENT(PM_LD_MISS_L1, 0x400f0) +EVENT(PM_DATA_FROM_RL2L3_MOD, 0x1c042) +EVENT(PM_LSU_SRQ_FULL_CYC, 0x1001a) +EVENT(PM_TABLEWALK_CYC, 0x10026) +EVENT(PM_MRK_PTEG_FROM_RMEM, 0x3d052) +EVENT(PM_LSU_SRQ_STFWD, 0x0c8a0) +EVENT(PM_INST_PTEG_FROM_RMEM, 0x3e052) +EVENT(PM_FXU0_FIN, 0x10004) +EVENT(PM_LSU1_L1_SW_PREF, 0x0c09e) +EVENT(PM_PTEG_FROM_L31_MOD, 0x1c054) +EVENT(PM_PMC5_OVERFLOW, 0x10024) +EVENT(PM_LD_REF_L1_LSU1, 0x0c082) +EVENT(PM_INST_PTEG_FROM_L21_SHR, 0x4e056) +EVENT(PM_CMPLU_STALL_THRD, 0x1001c) +EVENT(PM_DATA_FROM_RMEM, 0x3c042) +EVENT(PM_VSU0_SCAL_SINGLE_ISSUED, 0x0b084) +EVENT(PM_BR_MPRED_LSTACK, 0x040a6) +EVENT(PM_MRK_DATA_FROM_RL2L3_MOD_CYC, 0x40028) +EVENT(PM_LSU0_FLUSH_UST, 0x0c0b4) +EVENT(PM_LSU_NCST, 0x0c090) +EVENT(PM_BR_TAKEN, 0x20004) +EVENT(PM_INST_PTEG_FROM_LMEM, 0x4e052) +EVENT(PM_GCT_NOSLOT_BR_MPRED_IC_MISS, 0x4001c) +EVENT(PM_DTLB_MISS_4K, 0x2c05a) +EVENT(PM_PMC4_SAVED, 0x30022) +EVENT(PM_VSU1_PERMUTE_ISSUED, 0x0b092) +EVENT(PM_SLB_MISS, 0x0d890) +EVENT(PM_LSU1_FLUSH_LRQ, 0x0c0ba) +EVENT(PM_DTLB_MISS, 0x300fc) +EVENT(PM_VSU1_FRSP, 0x0a0b6) +EVENT(PM_VSU_VECTOR_DOUBLE_ISSUED, 0x0b880) +EVENT(PM_L2_CASTOUT_SHR, 0x16182) +EVENT(PM_DATA_FROM_DL2L3_SHR, 0x3c044) +EVENT(PM_VSU1_STF, 0x0b08e) +EVENT(PM_ST_FIN, 0x200f0) +EVENT(PM_PTEG_FROM_L21_SHR, 0x4c056) +EVENT(PM_L2_LOC_GUESS_WRONG, 0x26480) +EVENT(PM_MRK_STCX_FAIL, 0x0d08e) +EVENT(PM_LSU0_REJECT_LHS, 0x0c0ac) +EVENT(PM_IC_PREF_CANCEL_HIT, 0x04092) +EVENT(PM_L3_PREF_BUSY, 0x4f080) +EVENT(PM_MRK_BRU_FIN, 0x2003a) +EVENT(PM_LSU1_NCLD, 0x0c08e) +EVENT(PM_INST_PTEG_FROM_L31_MOD, 0x1e054) +EVENT(PM_LSU_NCLD, 0x0c88c) +EVENT(PM_LSU_LDX, 0x0c888) +EVENT(PM_L2_LOC_GUESS_CORRECT, 0x16480) +EVENT(PM_THRESH_TIMEO, 0x10038) +EVENT(PM_L3_PREF_ST, 0x0d0ae) +EVENT(PM_DISP_CLB_HELD_SYNC, 0x02098) +EVENT(PM_VSU_SIMPLE_ISSUED, 0x0b894) +EVENT(PM_VSU1_SINGLE, 0x0a0aa) +EVENT(PM_DATA_TABLEWALK_CYC, 0x3001a) +EVENT(PM_L2_RC_ST_DONE, 0x36380) +EVENT(PM_MRK_PTEG_FROM_L21_MOD, 0x3d056) +EVENT(PM_LARX_LSU1, 0x0c096) +EVENT(PM_MRK_DATA_FROM_RMEM, 0x3d042) +EVENT(PM_DISP_CLB_HELD, 0x02090) +EVENT(PM_DERAT_MISS_4K, 0x1c05c) +EVENT(PM_L2_RCLD_DISP_FAIL_ADDR, 0x16282) +EVENT(PM_SEG_EXCEPTION, 0x028a4) +EVENT(PM_FLUSH_DISP_SB, 0x0208c) +EVENT(PM_L2_DC_INV, 0x26182) +EVENT(PM_PTEG_FROM_DL2L3_MOD, 0x4c054) +EVENT(PM_DSEG, 0x020a6) +EVENT(PM_BR_PRED_LSTACK, 0x040a2) +EVENT(PM_VSU0_STF, 0x0b08c) +EVENT(PM_LSU_FX_FIN, 0x10066) +EVENT(PM_DERAT_MISS_16M, 0x3c05c) +EVENT(PM_MRK_PTEG_FROM_DL2L3_MOD, 0x4d054) +EVENT(PM_GCT_UTIL_11_PLUS_SLOTS, 0x020a2) +EVENT(PM_INST_FROM_L3, 0x14048) +EVENT(PM_MRK_IFU_FIN, 0x3003a) +EVENT(PM_ITLB_MISS, 0x400fc) +EVENT(PM_VSU_STF, 0x0b88c) +EVENT(PM_LSU_FLUSH_UST, 0x0c8b4) +EVENT(PM_L2_LDST_MISS, 0x26880) +EVENT(PM_FXU1_FIN, 0x40004) +EVENT(PM_SHL_DEALLOCATED, 0x05080) +EVENT(PM_L2_SN_M_WR_DONE, 0x46382) +EVENT(PM_LSU_REJECT_SET_MPRED, 0x0c8a8) +EVENT(PM_L3_PREF_LD, 0x0d0ac) +EVENT(PM_L2_SN_M_RD_DONE, 0x46380) +EVENT(PM_MRK_DERAT_MISS_16G, 0x4d05c) +EVENT(PM_VSU_FCONV, 0x0a8b0) +EVENT(PM_ANY_THRD_RUN_CYC, 0x100fa) +EVENT(PM_LSU_LMQ_FULL_CYC, 0x0d0a4) +EVENT(PM_MRK_LSU_REJECT_LHS, 0x0d082) +EVENT(PM_MRK_LD_MISS_L1_CYC, 0x4003e) +EVENT(PM_MRK_DATA_FROM_L2_CYC, 0x20020) +EVENT(PM_INST_IMC_MATCH_DISP, 0x30016) +EVENT(PM_MRK_DATA_FROM_RMEM_CYC, 0x4002c) +EVENT(PM_VSU0_SIMPLE_ISSUED, 0x0b094) +EVENT(PM_CMPLU_STALL_DIV, 0x40014) +EVENT(PM_MRK_PTEG_FROM_RL2L3_SHR, 0x2d054) +EVENT(PM_VSU_FMA_DOUBLE, 0x0a890) +EVENT(PM_VSU_4FLOP, 0x0a89c) +EVENT(PM_VSU1_FIN, 0x0a0be) +EVENT(PM_NEST_PAIR1_AND, 0x20883) +EVENT(PM_INST_PTEG_FROM_RL2L3_MOD, 0x1e052) +EVENT(PM_RUN_CYC, 0x200f4) +EVENT(PM_PTEG_FROM_RMEM, 0x3c052) +EVENT(PM_LSU_LRQ_S0_VALID, 0x0d09e) +EVENT(PM_LSU0_LDF, 0x0c084) +EVENT(PM_FLUSH_COMPLETION, 0x30012) +EVENT(PM_ST_MISS_L1, 0x300f0) +EVENT(PM_L2_NODE_PUMP, 0x36480) +EVENT(PM_INST_FROM_DL2L3_SHR, 0x34044) +EVENT(PM_MRK_STALL_CMPLU_CYC, 0x3003e) +EVENT(PM_VSU1_DENORM, 0x0a0ae) +EVENT(PM_MRK_DATA_FROM_L31_SHR_CYC, 0x20026) +EVENT(PM_NEST_PAIR0_ADD, 0x10881) +EVENT(PM_INST_FROM_L3MISS, 0x24048) +EVENT(PM_EE_OFF_EXT_INT, 0x02080) +EVENT(PM_INST_PTEG_FROM_DMEM, 0x2e052) +EVENT(PM_INST_FROM_DL2L3_MOD, 0x3404c) +EVENT(PM_PMC6_OVERFLOW, 0x30024) +EVENT(PM_VSU_2FLOP_DOUBLE, 0x0a88c) +EVENT(PM_TLB_MISS, 0x20066) +EVENT(PM_FXU_BUSY, 0x2000e) +EVENT(PM_L2_RCLD_DISP_FAIL_OTHER, 0x26280) +EVENT(PM_LSU_REJECT_LMQ_FULL, 0x0c8a4) +EVENT(PM_IC_RELOAD_SHR, 0x04096) +EVENT(PM_GRP_MRK, 0x10031) +EVENT(PM_MRK_ST_NEST, 0x20034) +EVENT(PM_VSU1_FSQRT_FDIV, 0x0a08a) +EVENT(PM_LSU0_FLUSH_LRQ, 0x0c0b8) +EVENT(PM_LARX_LSU0, 0x0c094) +EVENT(PM_IBUF_FULL_CYC, 0x04084) +EVENT(PM_MRK_DATA_FROM_DL2L3_SHR_CYC, 0x2002a) +EVENT(PM_LSU_DC_PREF_STREAM_ALLOC, 0x0d8a8) +EVENT(PM_GRP_MRK_CYC, 0x10030) +EVENT(PM_MRK_DATA_FROM_RL2L3_SHR_CYC, 0x20028) +EVENT(PM_L2_GLOB_GUESS_CORRECT, 0x16482) +EVENT(PM_LSU_REJECT_LHS, 0x0c8ac) +EVENT(PM_MRK_DATA_FROM_LMEM, 0x3d04a) +EVENT(PM_INST_PTEG_FROM_L3, 0x2e050) +EVENT(PM_FREQ_DOWN, 0x3000c) +EVENT(PM_PB_RETRY_NODE_PUMP, 0x30081) +EVENT(PM_INST_FROM_RL2L3_SHR, 0x1404c) +EVENT(PM_MRK_INST_ISSUED, 0x10032) +EVENT(PM_PTEG_FROM_L3MISS, 0x2c058) +EVENT(PM_RUN_PURR, 0x400f4) +EVENT(PM_MRK_GRP_IC_MISS, 0x40038) +EVENT(PM_MRK_DATA_FROM_L3, 0x1d048) +EVENT(PM_CMPLU_STALL_DCACHE_MISS, 0x20016) +EVENT(PM_PTEG_FROM_RL2L3_SHR, 0x2c054) +EVENT(PM_LSU_FLUSH_LRQ, 0x0c8b8) +EVENT(PM_MRK_DERAT_MISS_64K, 0x2d05c) +EVENT(PM_INST_PTEG_FROM_DL2L3_MOD, 0x4e054) +EVENT(PM_L2_ST_MISS, 0x26082) +EVENT(PM_MRK_PTEG_FROM_L21_SHR, 0x4d056) +EVENT(PM_LWSYNC, 0x0d094) +EVENT(PM_LSU0_DC_PREF_STREAM_CONFIRM_STRIDE, 0x0d0bc) +EVENT(PM_MRK_LSU_FLUSH_LRQ, 0x0d088) +EVENT(PM_INST_IMC_MATCH_CMPL, 0x100f0) +EVENT(PM_NEST_PAIR3_AND, 0x40883) +EVENT(PM_PB_RETRY_SYS_PUMP, 0x40081) +EVENT(PM_MRK_INST_FIN, 0x30030) +EVENT(PM_MRK_PTEG_FROM_DL2L3_SHR, 0x3d054) +EVENT(PM_INST_FROM_L31_MOD, 0x14044) +EVENT(PM_MRK_DTLB_MISS_64K, 0x3d05e) +EVENT(PM_LSU_FIN, 0x30066) +EVENT(PM_MRK_LSU_REJECT, 0x40064) +EVENT(PM_L2_CO_FAIL_BUSY, 0x16382) +EVENT(PM_MEM0_WQ_DISP, 0x40083) +EVENT(PM_DATA_FROM_L31_MOD, 0x1c044) +EVENT(PM_THERMAL_WARN, 0x10016) +EVENT(PM_VSU0_4FLOP, 0x0a09c) +EVENT(PM_BR_MPRED_CCACHE, 0x040a4) +EVENT(PM_CMPLU_STALL_IFU, 0x4004c) +EVENT(PM_L1_DEMAND_WRITE, 0x0408c) +EVENT(PM_FLUSH_BR_MPRED, 0x02084) +EVENT(PM_MRK_DTLB_MISS_16G, 0x1d05e) +EVENT(PM_MRK_PTEG_FROM_DMEM, 0x2d052) +EVENT(PM_L2_RCST_DISP, 0x36280) +EVENT(PM_CMPLU_STALL, 0x4000a) +EVENT(PM_LSU_PARTIAL_CDF, 0x0c0aa) +EVENT(PM_DISP_CLB_HELD_SB, 0x020a8) +EVENT(PM_VSU0_FMA_DOUBLE, 0x0a090) +EVENT(PM_FXU0_BUSY_FXU1_IDLE, 0x3000e) +EVENT(PM_IC_DEMAND_CYC, 0x10018) +EVENT(PM_MRK_DATA_FROM_L21_SHR, 0x3d04e) +EVENT(PM_MRK_LSU_FLUSH_UST, 0x0d086) +EVENT(PM_INST_PTEG_FROM_L3MISS, 0x2e058) +EVENT(PM_VSU_DENORM, 0x0a8ac) +EVENT(PM_MRK_LSU_PARTIAL_CDF, 0x0d080) +EVENT(PM_INST_FROM_L21_SHR, 0x3404e) +EVENT(PM_IC_PREF_WRITE, 0x0408e) +EVENT(PM_BR_PRED, 0x0409c) +EVENT(PM_INST_FROM_DMEM, 0x1404a) +EVENT(PM_IC_PREF_CANCEL_ALL, 0x04890) +EVENT(PM_LSU_DC_PREF_STREAM_CONFIRM, 0x0d8b4) +EVENT(PM_MRK_LSU_FLUSH_SRQ, 0x0d08a) +EVENT(PM_MRK_FIN_STALL_CYC, 0x1003c) +EVENT(PM_L2_RCST_DISP_FAIL_OTHER, 0x46280) +EVENT(PM_VSU1_DD_ISSUED, 0x0b098) +EVENT(PM_PTEG_FROM_L31_SHR, 0x2c056) +EVENT(PM_DATA_FROM_L21_SHR, 0x3c04e) +EVENT(PM_LSU0_NCLD, 0x0c08c) +EVENT(PM_VSU1_4FLOP, 0x0a09e) +EVENT(PM_VSU1_8FLOP, 0x0a0a2) +EVENT(PM_VSU_8FLOP, 0x0a8a0) +EVENT(PM_LSU_LMQ_SRQ_EMPTY_CYC, 0x2003e) +EVENT(PM_DTLB_MISS_64K, 0x3c05e) +EVENT(PM_THRD_CONC_RUN_INST, 0x300f4) +EVENT(PM_MRK_PTEG_FROM_L2, 0x1d050) +EVENT(PM_PB_SYS_PUMP, 0x20081) +EVENT(PM_VSU_FIN, 0x0a8bc) +EVENT(PM_MRK_DATA_FROM_L31_MOD, 0x1d044) +EVENT(PM_THRD_PRIO_0_1_CYC, 0x040b0) +EVENT(PM_DERAT_MISS_64K, 0x2c05c) +EVENT(PM_PMC2_REWIND, 0x30020) +EVENT(PM_INST_FROM_L2, 0x14040) +EVENT(PM_GRP_BR_MPRED_NONSPEC, 0x1000a) +EVENT(PM_INST_DISP, 0x200f2) +EVENT(PM_MEM0_RD_CANCEL_TOTAL, 0x30083) +EVENT(PM_LSU0_DC_PREF_STREAM_CONFIRM, 0x0d0b4) +EVENT(PM_L1_DCACHE_RELOAD_VALID, 0x300f6) +EVENT(PM_VSU_SCALAR_DOUBLE_ISSUED, 0x0b888) +EVENT(PM_L3_PREF_HIT, 0x3f080) +EVENT(PM_MRK_PTEG_FROM_L31_MOD, 0x1d054) +EVENT(PM_CMPLU_STALL_STORE, 0x2004a) +EVENT(PM_MRK_FXU_FIN, 0x20038) +EVENT(PM_PMC4_OVERFLOW, 0x10010) +EVENT(PM_MRK_PTEG_FROM_L3, 0x2d050) +EVENT(PM_LSU0_LMQ_LHR_MERGE, 0x0d098) +EVENT(PM_BTAC_HIT, 0x0508a) +EVENT(PM_L3_RD_BUSY, 0x4f082) +EVENT(PM_LSU0_L1_SW_PREF, 0x0c09c) +EVENT(PM_INST_FROM_L2MISS, 0x44048) +EVENT(PM_LSU0_DC_PREF_STREAM_ALLOC, 0x0d0a8) +EVENT(PM_L2_ST, 0x16082) +EVENT(PM_VSU0_DENORM, 0x0a0ac) +EVENT(PM_MRK_DATA_FROM_DL2L3_SHR, 0x3d044) +EVENT(PM_BR_PRED_CR_TA, 0x048aa) +EVENT(PM_VSU0_FCONV, 0x0a0b0) +EVENT(PM_MRK_LSU_FLUSH_ULD, 0x0d084) +EVENT(PM_BTAC_MISS, 0x05088) +EVENT(PM_MRK_LD_MISS_EXPOSED_CYC_COUNT, 0x1003f) +EVENT(PM_MRK_DATA_FROM_L2, 0x1d040) +EVENT(PM_LSU_DCACHE_RELOAD_VALID, 0x0d0a2) +EVENT(PM_VSU_FMA, 0x0a884) +EVENT(PM_LSU0_FLUSH_SRQ, 0x0c0bc) +EVENT(PM_LSU1_L1_PREF, 0x0d0ba) +EVENT(PM_IOPS_CMPL, 0x10014) +EVENT(PM_L2_SYS_PUMP, 0x36482) +EVENT(PM_L2_RCLD_BUSY_RC_FULL, 0x46282) +EVENT(PM_LSU_LMQ_S0_ALLOC, 0x0d0a1) +EVENT(PM_FLUSH_DISP_SYNC, 0x02088) +EVENT(PM_MRK_DATA_FROM_DL2L3_MOD_CYC, 0x4002a) +EVENT(PM_L2_IC_INV, 0x26180) +EVENT(PM_MRK_DATA_FROM_L21_MOD_CYC, 0x40024) +EVENT(PM_L3_PREF_LDST, 0x0d8ac) +EVENT(PM_LSU_SRQ_EMPTY_CYC, 0x40008) +EVENT(PM_LSU_LMQ_S0_VALID, 0x0d0a0) +EVENT(PM_FLUSH_PARTIAL, 0x02086) +EVENT(PM_VSU1_FMA_DOUBLE, 0x0a092) +EVENT(PM_1PLUS_PPC_DISP, 0x400f2) +EVENT(PM_DATA_FROM_L2MISS, 0x200fe) +EVENT(PM_SUSPENDED, 0x00000) +EVENT(PM_VSU0_FMA, 0x0a084) +EVENT(PM_CMPLU_STALL_SCALAR, 0x40012) +EVENT(PM_STCX_FAIL, 0x0c09a) +EVENT(PM_VSU0_FSQRT_FDIV_DOUBLE, 0x0a094) +EVENT(PM_DC_PREF_DST, 0x0d0b0) +EVENT(PM_VSU1_SCAL_SINGLE_ISSUED, 0x0b086) +EVENT(PM_L3_HIT, 0x1f080) +EVENT(PM_L2_GLOB_GUESS_WRONG, 0x26482) +EVENT(PM_MRK_DFU_FIN, 0x20032) +EVENT(PM_INST_FROM_L1, 0x04080) +EVENT(PM_BRU_FIN, 0x10068) +EVENT(PM_IC_DEMAND_REQ, 0x04088) +EVENT(PM_VSU1_FSQRT_FDIV_DOUBLE, 0x0a096) +EVENT(PM_VSU1_FMA, 0x0a086) +EVENT(PM_MRK_LD_MISS_L1, 0x20036) +EVENT(PM_VSU0_2FLOP_DOUBLE, 0x0a08c) +EVENT(PM_LSU_DC_PREF_STRIDED_STREAM_CONFIRM, 0x0d8bc) +EVENT(PM_INST_PTEG_FROM_L31_SHR, 0x2e056) +EVENT(PM_MRK_LSU_REJECT_ERAT_MISS, 0x30064) +EVENT(PM_MRK_DATA_FROM_L2MISS, 0x4d048) +EVENT(PM_DATA_FROM_RL2L3_SHR, 0x1c04c) +EVENT(PM_INST_FROM_PREF, 0x14046) +EVENT(PM_VSU1_SQ, 0x0b09e) +EVENT(PM_L2_LD_DISP, 0x36180) +EVENT(PM_L2_DISP_ALL, 0x46080) +EVENT(PM_THRD_GRP_CMPL_BOTH_CYC, 0x10012) +EVENT(PM_VSU_FSQRT_FDIV_DOUBLE, 0x0a894) +EVENT(PM_BR_MPRED, 0x400f6) +EVENT(PM_INST_PTEG_FROM_DL2L3_SHR, 0x3e054) +EVENT(PM_VSU_1FLOP, 0x0a880) +EVENT(PM_HV_CYC, 0x2000a) +EVENT(PM_MRK_LSU_FIN, 0x40032) +EVENT(PM_MRK_DATA_FROM_RL2L3_SHR, 0x1d04c) +EVENT(PM_DTLB_MISS_16M, 0x4c05e) +EVENT(PM_LSU1_LMQ_LHR_MERGE, 0x0d09a) +EVENT(PM_IFU_FIN, 0x40066) diff --git a/arch/powerpc/perf/power7-pmu.c b/arch/powerpc/perf/power7-pmu.c index d1821b8bbc4c..56c67bca2f75 100644 --- a/arch/powerpc/perf/power7-pmu.c +++ b/arch/powerpc/perf/power7-pmu.c @@ -53,37 +53,13 @@ /* * Power7 event codes. */ -#define PME_PM_CYC 0x1e -#define PME_PM_GCT_NOSLOT_CYC 0x100f8 -#define PME_PM_CMPLU_STALL 0x4000a -#define PME_PM_INST_CMPL 0x2 -#define PME_PM_LD_REF_L1 0xc880 -#define PME_PM_LD_MISS_L1 0x400f0 -#define PME_PM_BRU_FIN 0x10068 -#define PME_PM_BR_MPRED 0x400f6 - -#define PME_PM_CMPLU_STALL_FXU 0x20014 -#define PME_PM_CMPLU_STALL_DIV 0x40014 -#define PME_PM_CMPLU_STALL_SCALAR 0x40012 -#define PME_PM_CMPLU_STALL_SCALAR_LONG 0x20018 -#define PME_PM_CMPLU_STALL_VECTOR 0x2001c -#define PME_PM_CMPLU_STALL_VECTOR_LONG 0x4004a -#define PME_PM_CMPLU_STALL_LSU 0x20012 -#define PME_PM_CMPLU_STALL_REJECT 0x40016 -#define PME_PM_CMPLU_STALL_ERAT_MISS 0x40018 -#define PME_PM_CMPLU_STALL_DCACHE_MISS 0x20016 -#define PME_PM_CMPLU_STALL_STORE 0x2004a -#define PME_PM_CMPLU_STALL_THRD 0x1001c -#define PME_PM_CMPLU_STALL_IFU 0x4004c -#define PME_PM_CMPLU_STALL_BRU 0x4004e -#define PME_PM_GCT_NOSLOT_IC_MISS 0x2001a -#define PME_PM_GCT_NOSLOT_BR_MPRED 0x4001a -#define PME_PM_GCT_NOSLOT_BR_MPRED_IC_MISS 0x4001c -#define PME_PM_GRP_CMPL 0x30004 -#define PME_PM_1PLUS_PPC_CMPL 0x100f2 -#define PME_PM_CMPLU_STALL_DFU 0x2003c -#define PME_PM_RUN_CYC 0x200f4 -#define PME_PM_RUN_INST_CMPL 0x400fa +#define EVENT(_name, _code) \ + PME_##_name = _code, + +enum { +#include "power7-events-list.h" +}; +#undef EVENT /* * Layout of constraint bits: @@ -398,96 +374,36 @@ static int power7_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { }; -GENERIC_EVENT_ATTR(cpu-cycles, CYC); -GENERIC_EVENT_ATTR(stalled-cycles-frontend, GCT_NOSLOT_CYC); -GENERIC_EVENT_ATTR(stalled-cycles-backend, CMPLU_STALL); -GENERIC_EVENT_ATTR(instructions, INST_CMPL); -GENERIC_EVENT_ATTR(cache-references, LD_REF_L1); -GENERIC_EVENT_ATTR(cache-misses, LD_MISS_L1); -GENERIC_EVENT_ATTR(branch-instructions, BRU_FIN); -GENERIC_EVENT_ATTR(branch-misses, BR_MPRED); - -POWER_EVENT_ATTR(CYC, CYC); -POWER_EVENT_ATTR(GCT_NOSLOT_CYC, GCT_NOSLOT_CYC); -POWER_EVENT_ATTR(CMPLU_STALL, CMPLU_STALL); -POWER_EVENT_ATTR(INST_CMPL, INST_CMPL); -POWER_EVENT_ATTR(LD_REF_L1, LD_REF_L1); -POWER_EVENT_ATTR(LD_MISS_L1, LD_MISS_L1); -POWER_EVENT_ATTR(BRU_FIN, BRU_FIN) -POWER_EVENT_ATTR(BR_MPRED, BR_MPRED); - -POWER_EVENT_ATTR(CMPLU_STALL_FXU, CMPLU_STALL_FXU); -POWER_EVENT_ATTR(CMPLU_STALL_DIV, CMPLU_STALL_DIV); -POWER_EVENT_ATTR(CMPLU_STALL_SCALAR, CMPLU_STALL_SCALAR); -POWER_EVENT_ATTR(CMPLU_STALL_SCALAR_LONG, CMPLU_STALL_SCALAR_LONG); -POWER_EVENT_ATTR(CMPLU_STALL_VECTOR, CMPLU_STALL_VECTOR); -POWER_EVENT_ATTR(CMPLU_STALL_VECTOR_LONG, CMPLU_STALL_VECTOR_LONG); -POWER_EVENT_ATTR(CMPLU_STALL_LSU, CMPLU_STALL_LSU); -POWER_EVENT_ATTR(CMPLU_STALL_REJECT, CMPLU_STALL_REJECT); - -POWER_EVENT_ATTR(CMPLU_STALL_ERAT_MISS, CMPLU_STALL_ERAT_MISS); -POWER_EVENT_ATTR(CMPLU_STALL_DCACHE_MISS, CMPLU_STALL_DCACHE_MISS); -POWER_EVENT_ATTR(CMPLU_STALL_STORE, CMPLU_STALL_STORE); -POWER_EVENT_ATTR(CMPLU_STALL_THRD, CMPLU_STALL_THRD); -POWER_EVENT_ATTR(CMPLU_STALL_IFU, CMPLU_STALL_IFU); -POWER_EVENT_ATTR(CMPLU_STALL_BRU, CMPLU_STALL_BRU); -POWER_EVENT_ATTR(GCT_NOSLOT_IC_MISS, GCT_NOSLOT_IC_MISS); - -POWER_EVENT_ATTR(GCT_NOSLOT_BR_MPRED, GCT_NOSLOT_BR_MPRED); -POWER_EVENT_ATTR(GCT_NOSLOT_BR_MPRED_IC_MISS, GCT_NOSLOT_BR_MPRED_IC_MISS); -POWER_EVENT_ATTR(GRP_CMPL, GRP_CMPL); -POWER_EVENT_ATTR(1PLUS_PPC_CMPL, 1PLUS_PPC_CMPL); -POWER_EVENT_ATTR(CMPLU_STALL_DFU, CMPLU_STALL_DFU); -POWER_EVENT_ATTR(RUN_CYC, RUN_CYC); -POWER_EVENT_ATTR(RUN_INST_CMPL, RUN_INST_CMPL); +GENERIC_EVENT_ATTR(cpu-cycles, PM_CYC); +GENERIC_EVENT_ATTR(stalled-cycles-frontend, PM_GCT_NOSLOT_CYC); +GENERIC_EVENT_ATTR(stalled-cycles-backend, PM_CMPLU_STALL); +GENERIC_EVENT_ATTR(instructions, PM_INST_CMPL); +GENERIC_EVENT_ATTR(cache-references, PM_LD_REF_L1); +GENERIC_EVENT_ATTR(cache-misses, PM_LD_MISS_L1); +GENERIC_EVENT_ATTR(branch-instructions, PM_BRU_FIN); +GENERIC_EVENT_ATTR(branch-misses, PM_BR_MPRED); + +#define EVENT(_name, _code) POWER_EVENT_ATTR(_name, _name); +#include "power7-events-list.h" +#undef EVENT + +#define EVENT(_name, _code) POWER_EVENT_PTR(_name), static struct attribute *power7_events_attr[] = { - GENERIC_EVENT_PTR(CYC), - GENERIC_EVENT_PTR(GCT_NOSLOT_CYC), - GENERIC_EVENT_PTR(CMPLU_STALL), - GENERIC_EVENT_PTR(INST_CMPL), - GENERIC_EVENT_PTR(LD_REF_L1), - GENERIC_EVENT_PTR(LD_MISS_L1), - GENERIC_EVENT_PTR(BRU_FIN), - GENERIC_EVENT_PTR(BR_MPRED), - - POWER_EVENT_PTR(CYC), - POWER_EVENT_PTR(GCT_NOSLOT_CYC), - POWER_EVENT_PTR(CMPLU_STALL), - POWER_EVENT_PTR(INST_CMPL), - POWER_EVENT_PTR(LD_REF_L1), - POWER_EVENT_PTR(LD_MISS_L1), - POWER_EVENT_PTR(BRU_FIN), - POWER_EVENT_PTR(BR_MPRED), - - POWER_EVENT_PTR(CMPLU_STALL_FXU), - POWER_EVENT_PTR(CMPLU_STALL_DIV), - POWER_EVENT_PTR(CMPLU_STALL_SCALAR), - POWER_EVENT_PTR(CMPLU_STALL_SCALAR_LONG), - POWER_EVENT_PTR(CMPLU_STALL_VECTOR), - POWER_EVENT_PTR(CMPLU_STALL_VECTOR_LONG), - POWER_EVENT_PTR(CMPLU_STALL_LSU), - POWER_EVENT_PTR(CMPLU_STALL_REJECT), - - POWER_EVENT_PTR(CMPLU_STALL_ERAT_MISS), - POWER_EVENT_PTR(CMPLU_STALL_DCACHE_MISS), - POWER_EVENT_PTR(CMPLU_STALL_STORE), - POWER_EVENT_PTR(CMPLU_STALL_THRD), - POWER_EVENT_PTR(CMPLU_STALL_IFU), - POWER_EVENT_PTR(CMPLU_STALL_BRU), - POWER_EVENT_PTR(GCT_NOSLOT_IC_MISS), - POWER_EVENT_PTR(GCT_NOSLOT_BR_MPRED), - - POWER_EVENT_PTR(GCT_NOSLOT_BR_MPRED_IC_MISS), - POWER_EVENT_PTR(GRP_CMPL), - POWER_EVENT_PTR(1PLUS_PPC_CMPL), - POWER_EVENT_PTR(CMPLU_STALL_DFU), - POWER_EVENT_PTR(RUN_CYC), - POWER_EVENT_PTR(RUN_INST_CMPL), + GENERIC_EVENT_PTR(PM_CYC), + GENERIC_EVENT_PTR(PM_GCT_NOSLOT_CYC), + GENERIC_EVENT_PTR(PM_CMPLU_STALL), + GENERIC_EVENT_PTR(PM_INST_CMPL), + GENERIC_EVENT_PTR(PM_LD_REF_L1), + GENERIC_EVENT_PTR(PM_LD_MISS_L1), + GENERIC_EVENT_PTR(PM_BRU_FIN), + GENERIC_EVENT_PTR(PM_BR_MPRED), + + #include "power7-events-list.h" + #undef EVENT NULL }; - static struct attribute_group power7_pmu_events_group = { .name = "events", .attrs = power7_events_attr, -- cgit v1.2.3 From fd4363fff3d96795d3feb1b3fb48ce590f186bdd Mon Sep 17 00:00:00 2001 From: Jiri Kosina Date: Fri, 12 Jul 2013 11:21:48 +0200 Subject: x86: Introduce int3 (breakpoint)-based instruction patching Introduce a method for run-time instruction patching on a live SMP kernel based on int3 breakpoint, completely avoiding the need for stop_machine(). The way this is achieved: - add a int3 trap to the address that will be patched - sync cores - update all but the first byte of the patched range - sync cores - replace the first byte (int3) by the first byte of replacing opcode - sync cores According to http://lkml.indiana.edu/hypermail/linux/kernel/1001.1/01530.html synchronization after replacing "all but first" instructions should not be necessary (on Intel hardware), as the syncing after the subsequent patching of the first byte provides enough safety. But there's not only Intel HW out there, and we'd rather be on a safe side. If any CPU instruction execution would collide with the patching, it'd be trapped by the int3 breakpoint and redirected to the provided "handler" (which would typically mean just skipping over the patched region, acting as "nop" has been there, in case we are doing nop -> jump and jump -> nop transitions). Ftrace has been using this very technique since 08d636b ("ftrace/x86: Have arch x86_64 use breakpoints instead of stop machine") for ages already, and jump labels are another obvious potential user of this. Based on activities of Masami Hiramatsu a few years ago. Reviewed-by: Steven Rostedt Reviewed-by: Masami Hiramatsu Signed-off-by: Jiri Kosina Link: http://lkml.kernel.org/r/alpine.LNX.2.00.1307121102440.29788@pobox.suse.cz Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/alternative.h | 1 + arch/x86/kernel/alternative.c | 106 +++++++++++++++++++++++++++++++++++++ kernel/kprobes.c | 2 +- 3 files changed, 108 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h index 58ed6d96a6ac..3abf8dd6f44f 100644 --- a/arch/x86/include/asm/alternative.h +++ b/arch/x86/include/asm/alternative.h @@ -233,6 +233,7 @@ struct text_poke_param { }; extern void *text_poke(void *addr, const void *opcode, size_t len); +extern void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler); extern void *text_poke_smp(void *addr, const void *opcode, size_t len); extern void text_poke_smp_batch(struct text_poke_param *params, int n); diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index c15cf9a25e27..0ab49366a7a6 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include @@ -596,6 +597,111 @@ void *__kprobes text_poke(void *addr, const void *opcode, size_t len) return addr; } +static void do_sync_core(void *info) +{ + sync_core(); +} + +static bool bp_patching_in_progress; +static void *bp_int3_handler, *bp_int3_addr; + +static int int3_notify(struct notifier_block *self, unsigned long val, void *data) +{ + struct die_args *args = data; + + /* bp_patching_in_progress */ + smp_rmb(); + + if (likely(!bp_patching_in_progress)) + return NOTIFY_DONE; + + /* we are not interested in non-int3 faults and ring > 0 faults */ + if (val != DIE_INT3 || !args->regs || user_mode_vm(args->regs) + || args->regs->ip != (unsigned long)bp_int3_addr) + return NOTIFY_DONE; + + /* set up the specified breakpoint handler */ + args->regs->ip = (unsigned long) bp_int3_handler; + + return NOTIFY_STOP; +} +/** + * text_poke_bp() -- update instructions on live kernel on SMP + * @addr: address to patch + * @opcode: opcode of new instruction + * @len: length to copy + * @handler: address to jump to when the temporary breakpoint is hit + * + * Modify multi-byte instruction by using int3 breakpoint on SMP. + * In contrary to text_poke_smp(), we completely avoid stop_machine() here, + * and achieve the synchronization using int3 breakpoint. + * + * The way it is done: + * - add a int3 trap to the address that will be patched + * - sync cores + * - update all but the first byte of the patched range + * - sync cores + * - replace the first byte (int3) by the first byte of + * replacing opcode + * - sync cores + * + * Note: must be called under text_mutex. + */ +void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler) +{ + unsigned char int3 = 0xcc; + + bp_int3_handler = handler; + bp_int3_addr = (u8 *)addr + sizeof(int3); + bp_patching_in_progress = true; + /* + * Corresponding read barrier in int3 notifier for + * making sure the in_progress flags is correctly ordered wrt. + * patching + */ + smp_wmb(); + + text_poke(addr, &int3, sizeof(int3)); + + on_each_cpu(do_sync_core, NULL, 1); + + if (len - sizeof(int3) > 0) { + /* patch all but the first byte */ + text_poke((char *)addr + sizeof(int3), + (const char *) opcode + sizeof(int3), + len - sizeof(int3)); + /* + * According to Intel, this core syncing is very likely + * not necessary and we'd be safe even without it. But + * better safe than sorry (plus there's not only Intel). + */ + on_each_cpu(do_sync_core, NULL, 1); + } + + /* patch the first byte */ + text_poke(addr, opcode, sizeof(int3)); + + on_each_cpu(do_sync_core, NULL, 1); + + bp_patching_in_progress = false; + smp_wmb(); + + return addr; +} + +/* this one needs to run before anything else handles it as a + * regular exception */ +static struct notifier_block int3_nb = { + .priority = 0x7fffffff, + .notifier_call = int3_notify +}; + +static int __init int3_init(void) +{ + return register_die_notifier(&int3_nb); +} + +arch_initcall(int3_init); /* * Cross-modifying kernel text with stop_machine(). * This code originally comes from immediate value. diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 6e33498d665c..b58b490fa439 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -1709,7 +1709,7 @@ EXPORT_SYMBOL_GPL(unregister_kprobes); static struct notifier_block kprobe_exceptions_nb = { .notifier_call = kprobe_exceptions_notify, - .priority = 0x7fffffff /* we need to be notified first */ + .priority = 0x7ffffff0 /* High priority, but not first. */ }; unsigned long __weak arch_deref_entry_point(void *entry) -- cgit v1.2.3 From 51b2c07b22261f19188d9a9071943d60a067481c Mon Sep 17 00:00:00 2001 From: Jiri Kosina Date: Fri, 12 Jul 2013 11:22:09 +0200 Subject: x86: Make jump_label use int3-based patching Make jump labels use text_poke_bp() for text patching instead of text_poke_smp(), avoiding the need for stop_machine(). Reviewed-by: Steven Rostedt Reviewed-by: Masami Hiramatsu Signed-off-by: Jiri Kosina Link: http://lkml.kernel.org/r/alpine.LNX.2.00.1307121120250.29788@pobox.suse.cz Signed-off-by: H. Peter Anvin --- arch/x86/kernel/jump_label.c | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c index 2889b3d43882..460f5d9ceebb 100644 --- a/arch/x86/kernel/jump_label.c +++ b/arch/x86/kernel/jump_label.c @@ -37,7 +37,19 @@ static void __jump_label_transform(struct jump_entry *entry, } else memcpy(&code, ideal_nops[NOP_ATOMIC5], JUMP_LABEL_NOP_SIZE); - (*poker)((void *)entry->code, &code, JUMP_LABEL_NOP_SIZE); + /* + * Make text_poke_bp() a default fallback poker. + * + * At the time the change is being done, just ignore whether we + * are doing nop -> jump or jump -> nop transition, and assume + * always nop being the 'currently valid' instruction + * + */ + if (poker) + (*poker)((void *)entry->code, &code, JUMP_LABEL_NOP_SIZE); + else + text_poke_bp((void *)entry->code, &code, JUMP_LABEL_NOP_SIZE, + (void *)entry->code + JUMP_LABEL_NOP_SIZE); } void arch_jump_label_transform(struct jump_entry *entry, @@ -45,7 +57,7 @@ void arch_jump_label_transform(struct jump_entry *entry, { get_online_cpus(); mutex_lock(&text_mutex); - __jump_label_transform(entry, type, text_poke_smp); + __jump_label_transform(entry, type, NULL); mutex_unlock(&text_mutex); put_online_cpus(); } -- cgit v1.2.3 From c7e85c42be68fca743df58a306edd29aa295e155 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Thu, 18 Jul 2013 20:47:47 +0900 Subject: kprobes/x86: Remove an incorrect comment about int3 in NMI/MCE Remove a comment about an int3 issue in NMI/MCE, since commit: 3f3c8b8c4b2a ("x86: Add workaround to NMI iret woes") already fixed that. Keeping this incorrect comment can mislead developers. Signed-off-by: Masami Hiramatsu Reviewed-by: Jiri Kosina Cc: H. Peter Anvin Cc: Steven Rostedt Cc: Jason Baron Cc: yrl.pp-manager.tt@hitachi.com Cc: Borislav Petkov Link: http://lkml.kernel.org/r/20130718114747.26675.84110.stgit@mhiramat-M0-7522 Signed-off-by: Ingo Molnar --- arch/x86/kernel/kprobes/opt.c | 10 ---------- 1 file changed, 10 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c index 76dc6f095724..d7d8a8c10635 100644 --- a/arch/x86/kernel/kprobes/opt.c +++ b/arch/x86/kernel/kprobes/opt.c @@ -415,11 +415,6 @@ void __kprobes arch_optimize_kprobes(struct list_head *oplist) break; } - /* - * text_poke_smp doesn't support NMI/MCE code modifying. - * However, since kprobes itself also doesn't support NMI/MCE - * code probing, it's not a problem. - */ text_poke_smp_batch(jump_poke_params, c); } @@ -455,11 +450,6 @@ extern void arch_unoptimize_kprobes(struct list_head *oplist, break; } - /* - * text_poke_smp doesn't support NMI/MCE code modifying. - * However, since kprobes itself also doesn't support NMI/MCE - * code probing, it's not a problem. - */ text_poke_smp_batch(jump_poke_params, c); } -- cgit v1.2.3 From a7b0133ea94e4421a81702d5c0e6dcdbbbab8f6b Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Thu, 18 Jul 2013 20:47:50 +0900 Subject: kprobes/x86: Use text_poke_bp() instead of text_poke_smp*() Use text_poke_bp() for optimizing kprobes instead of text_poke_smp*(). Since the number of kprobes is usually not so large (<100) and text_poke_bp() is much lighter than text_poke_smp() [which uses stop_machine()], this just stops using batch processing. Signed-off-by: Masami Hiramatsu Reviewed-by: Jiri Kosina Cc: H. Peter Anvin Cc: Steven Rostedt Cc: Jason Baron Cc: yrl.pp-manager.tt@hitachi.com Cc: Borislav Petkov Link: http://lkml.kernel.org/r/20130718114750.26675.9174.stgit@mhiramat-M0-7522 Signed-off-by: Ingo Molnar --- arch/x86/kernel/kprobes/common.h | 5 -- arch/x86/kernel/kprobes/core.c | 2 +- arch/x86/kernel/kprobes/opt.c | 100 +++++++++------------------------------ 3 files changed, 23 insertions(+), 84 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/kprobes/common.h b/arch/x86/kernel/kprobes/common.h index 2e9d4b5af036..c6ee63f927ab 100644 --- a/arch/x86/kernel/kprobes/common.h +++ b/arch/x86/kernel/kprobes/common.h @@ -82,14 +82,9 @@ extern void synthesize_reljump(void *from, void *to); extern void synthesize_relcall(void *from, void *to); #ifdef CONFIG_OPTPROBES -extern int arch_init_optprobes(void); extern int setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter); extern unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsigned long addr); #else /* !CONFIG_OPTPROBES */ -static inline int arch_init_optprobes(void) -{ - return 0; -} static inline int setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter) { return 0; diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index 211bce445522..cd49b2c96d32 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c @@ -1068,7 +1068,7 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) int __init arch_init_kprobes(void) { - return arch_init_optprobes(); + return 0; } int __kprobes arch_trampoline_kprobe(struct kprobe *p) diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c index d7d8a8c10635..d71e99439376 100644 --- a/arch/x86/kernel/kprobes/opt.c +++ b/arch/x86/kernel/kprobes/opt.c @@ -371,31 +371,6 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op) return 0; } -#define MAX_OPTIMIZE_PROBES 256 -static struct text_poke_param *jump_poke_params; -static struct jump_poke_buffer { - u8 buf[RELATIVEJUMP_SIZE]; -} *jump_poke_bufs; - -static void __kprobes setup_optimize_kprobe(struct text_poke_param *tprm, - u8 *insn_buf, - struct optimized_kprobe *op) -{ - s32 rel = (s32)((long)op->optinsn.insn - - ((long)op->kp.addr + RELATIVEJUMP_SIZE)); - - /* Backup instructions which will be replaced by jump address */ - memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE, - RELATIVE_ADDR_SIZE); - - insn_buf[0] = RELATIVEJUMP_OPCODE; - *(s32 *)(&insn_buf[1]) = rel; - - tprm->addr = op->kp.addr; - tprm->opcode = insn_buf; - tprm->len = RELATIVEJUMP_SIZE; -} - /* * Replace breakpoints (int3) with relative jumps. * Caller must call with locking kprobe_mutex and text_mutex. @@ -403,32 +378,38 @@ static void __kprobes setup_optimize_kprobe(struct text_poke_param *tprm, void __kprobes arch_optimize_kprobes(struct list_head *oplist) { struct optimized_kprobe *op, *tmp; - int c = 0; + u8 insn_buf[RELATIVEJUMP_SIZE]; list_for_each_entry_safe(op, tmp, oplist, list) { + s32 rel = (s32)((long)op->optinsn.insn - + ((long)op->kp.addr + RELATIVEJUMP_SIZE)); + WARN_ON(kprobe_disabled(&op->kp)); - /* Setup param */ - setup_optimize_kprobe(&jump_poke_params[c], - jump_poke_bufs[c].buf, op); + + /* Backup instructions which will be replaced by jump address */ + memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE, + RELATIVE_ADDR_SIZE); + + insn_buf[0] = RELATIVEJUMP_OPCODE; + *(s32 *)(&insn_buf[1]) = rel; + + text_poke_bp(op->kp.addr, insn_buf, RELATIVEJUMP_SIZE, + op->optinsn.insn); + list_del_init(&op->list); - if (++c >= MAX_OPTIMIZE_PROBES) - break; } - - text_poke_smp_batch(jump_poke_params, c); } -static void __kprobes setup_unoptimize_kprobe(struct text_poke_param *tprm, - u8 *insn_buf, - struct optimized_kprobe *op) +/* Replace a relative jump with a breakpoint (int3). */ +void __kprobes arch_unoptimize_kprobe(struct optimized_kprobe *op) { + u8 insn_buf[RELATIVEJUMP_SIZE]; + /* Set int3 to first byte for kprobes */ insn_buf[0] = BREAKPOINT_INSTRUCTION; memcpy(insn_buf + 1, op->optinsn.copied_insn, RELATIVE_ADDR_SIZE); - - tprm->addr = op->kp.addr; - tprm->opcode = insn_buf; - tprm->len = RELATIVEJUMP_SIZE; + text_poke_bp(op->kp.addr, insn_buf, RELATIVEJUMP_SIZE, + op->optinsn.insn); } /* @@ -439,29 +420,11 @@ extern void arch_unoptimize_kprobes(struct list_head *oplist, struct list_head *done_list) { struct optimized_kprobe *op, *tmp; - int c = 0; list_for_each_entry_safe(op, tmp, oplist, list) { - /* Setup param */ - setup_unoptimize_kprobe(&jump_poke_params[c], - jump_poke_bufs[c].buf, op); + arch_unoptimize_kprobe(op); list_move(&op->list, done_list); - if (++c >= MAX_OPTIMIZE_PROBES) - break; } - - text_poke_smp_batch(jump_poke_params, c); -} - -/* Replace a relative jump with a breakpoint (int3). */ -void __kprobes arch_unoptimize_kprobe(struct optimized_kprobe *op) -{ - u8 buf[RELATIVEJUMP_SIZE]; - - /* Set int3 to first byte for kprobes */ - buf[0] = BREAKPOINT_INSTRUCTION; - memcpy(buf + 1, op->optinsn.copied_insn, RELATIVE_ADDR_SIZE); - text_poke_smp(op->kp.addr, buf, RELATIVEJUMP_SIZE); } int __kprobes @@ -481,22 +444,3 @@ setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter) } return 0; } - -int __kprobes arch_init_optprobes(void) -{ - /* Allocate code buffer and parameter array */ - jump_poke_bufs = kmalloc(sizeof(struct jump_poke_buffer) * - MAX_OPTIMIZE_PROBES, GFP_KERNEL); - if (!jump_poke_bufs) - return -ENOMEM; - - jump_poke_params = kmalloc(sizeof(struct text_poke_param) * - MAX_OPTIMIZE_PROBES, GFP_KERNEL); - if (!jump_poke_params) { - kfree(jump_poke_bufs); - jump_poke_bufs = NULL; - return -ENOMEM; - } - - return 0; -} -- cgit v1.2.3 From ea8596bb2d8d37957f3e92db9511c50801689180 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Thu, 18 Jul 2013 20:47:53 +0900 Subject: kprobes/x86: Remove unused text_poke_smp() and text_poke_smp_batch() functions Since introducing the text_poke_bp() for all text_poke_smp*() callers, text_poke_smp*() are now unused. This patch basically reverts: 3d55cc8a058e ("x86: Add text_poke_smp for SMP cross modifying code") 7deb18dcf047 ("x86: Introduce text_poke_smp_batch() for batch-code modifying") and related commits. This patch also fixes a Kconfig dependency issue on STOP_MACHINE in the case of CONFIG_SMP && !CONFIG_MODULE_UNLOAD. Signed-off-by: Masami Hiramatsu Reviewed-by: Jiri Kosina Cc: H. Peter Anvin Cc: Steven Rostedt Cc: Jason Baron Cc: yrl.pp-manager.tt@hitachi.com Cc: Borislav Petkov Link: http://lkml.kernel.org/r/20130718114753.26675.18714.stgit@mhiramat-M0-7522 Signed-off-by: Ingo Molnar --- arch/x86/Kconfig | 5 -- arch/x86/include/asm/alternative.h | 11 ----- arch/x86/kernel/alternative.c | 98 +------------------------------------- 3 files changed, 2 insertions(+), 112 deletions(-) (limited to 'arch') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index b32ebf92b0ce..3106e0e2647a 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -81,7 +81,6 @@ config X86 select HAVE_USER_RETURN_NOTIFIER select ARCH_BINFMT_ELF_RANDOMIZE_PIE select HAVE_ARCH_JUMP_LABEL - select HAVE_TEXT_POKE_SMP select HAVE_GENERIC_HARDIRQS select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE select SPARSE_IRQ @@ -2332,10 +2331,6 @@ config HAVE_ATOMIC_IOMAP def_bool y depends on X86_32 -config HAVE_TEXT_POKE_SMP - bool - select STOP_MACHINE if SMP - config X86_DEV_DMA_OPS bool depends on X86_64 || STA2X11 diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h index 3abf8dd6f44f..4daf8c501239 100644 --- a/arch/x86/include/asm/alternative.h +++ b/arch/x86/include/asm/alternative.h @@ -220,21 +220,10 @@ extern void *text_poke_early(void *addr, const void *opcode, size_t len); * no thread can be preempted in the instructions being modified (no iret to an * invalid instruction possible) or if the instructions are changed from a * consistent state to another consistent state atomically. - * More care must be taken when modifying code in the SMP case because of - * Intel's errata. text_poke_smp() takes care that errata, but still - * doesn't support NMI/MCE handler code modifying. * On the local CPU you need to be protected again NMI or MCE handlers seeing an * inconsistent instruction while you patch. */ -struct text_poke_param { - void *addr; - const void *opcode; - size_t len; -}; - extern void *text_poke(void *addr, const void *opcode, size_t len); extern void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler); -extern void *text_poke_smp(void *addr, const void *opcode, size_t len); -extern void text_poke_smp_batch(struct text_poke_param *params, int n); #endif /* _ASM_X86_ALTERNATIVE_H */ diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index 0ab49366a7a6..5d8782ee3dd7 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -633,8 +633,8 @@ static int int3_notify(struct notifier_block *self, unsigned long val, void *dat * @handler: address to jump to when the temporary breakpoint is hit * * Modify multi-byte instruction by using int3 breakpoint on SMP. - * In contrary to text_poke_smp(), we completely avoid stop_machine() here, - * and achieve the synchronization using int3 breakpoint. + * We completely avoid stop_machine() here, and achieve the + * synchronization using int3 breakpoint. * * The way it is done: * - add a int3 trap to the address that will be patched @@ -702,97 +702,3 @@ static int __init int3_init(void) } arch_initcall(int3_init); -/* - * Cross-modifying kernel text with stop_machine(). - * This code originally comes from immediate value. - */ -static atomic_t stop_machine_first; -static int wrote_text; - -struct text_poke_params { - struct text_poke_param *params; - int nparams; -}; - -static int __kprobes stop_machine_text_poke(void *data) -{ - struct text_poke_params *tpp = data; - struct text_poke_param *p; - int i; - - if (atomic_xchg(&stop_machine_first, 0)) { - for (i = 0; i < tpp->nparams; i++) { - p = &tpp->params[i]; - text_poke(p->addr, p->opcode, p->len); - } - smp_wmb(); /* Make sure other cpus see that this has run */ - wrote_text = 1; - } else { - while (!wrote_text) - cpu_relax(); - smp_mb(); /* Load wrote_text before following execution */ - } - - for (i = 0; i < tpp->nparams; i++) { - p = &tpp->params[i]; - flush_icache_range((unsigned long)p->addr, - (unsigned long)p->addr + p->len); - } - /* - * Intel Archiecture Software Developer's Manual section 7.1.3 specifies - * that a core serializing instruction such as "cpuid" should be - * executed on _each_ core before the new instruction is made visible. - */ - sync_core(); - return 0; -} - -/** - * text_poke_smp - Update instructions on a live kernel on SMP - * @addr: address to modify - * @opcode: source of the copy - * @len: length to copy - * - * Modify multi-byte instruction by using stop_machine() on SMP. This allows - * user to poke/set multi-byte text on SMP. Only non-NMI/MCE code modifying - * should be allowed, since stop_machine() does _not_ protect code against - * NMI and MCE. - * - * Note: Must be called under get_online_cpus() and text_mutex. - */ -void *__kprobes text_poke_smp(void *addr, const void *opcode, size_t len) -{ - struct text_poke_params tpp; - struct text_poke_param p; - - p.addr = addr; - p.opcode = opcode; - p.len = len; - tpp.params = &p; - tpp.nparams = 1; - atomic_set(&stop_machine_first, 1); - wrote_text = 0; - /* Use __stop_machine() because the caller already got online_cpus. */ - __stop_machine(stop_machine_text_poke, (void *)&tpp, cpu_online_mask); - return addr; -} - -/** - * text_poke_smp_batch - Update instructions on a live kernel on SMP - * @params: an array of text_poke parameters - * @n: the number of elements in params. - * - * Modify multi-byte instruction by using stop_machine() on SMP. Since the - * stop_machine() is heavy task, it is better to aggregate text_poke requests - * and do it once if possible. - * - * Note: Must be called under get_online_cpus() and text_mutex. - */ -void __kprobes text_poke_smp_batch(struct text_poke_param *params, int n) -{ - struct text_poke_params tpp = {.params = params, .nparams = n}; - - atomic_set(&stop_machine_first, 1); - wrote_text = 0; - __stop_machine(stop_machine_text_poke, (void *)&tpp, cpu_online_mask); -} -- cgit v1.2.3 From 17f41571bb2c4a398785452ac2718a6c5d77180e Mon Sep 17 00:00:00 2001 From: Jiri Kosina Date: Tue, 23 Jul 2013 10:09:28 +0200 Subject: kprobes/x86: Call out into INT3 handler directly instead of using notifier In fd4363fff3d96 ("x86: Introduce int3 (breakpoint)-based instruction patching"), the mechanism that was introduced for notifying alternatives code from int3 exception handler that and exception occured was die_notifier. This is however problematic, as early code might be using jump labels even before the notifier registration has been performed, which will then lead to an oops due to unhandled exception. One of such occurences has been encountered by Fengguang: int3: 0000 [#1] PREEMPT SMP DEBUG_PAGEALLOC Modules linked in: CPU: 1 PID: 0 Comm: swapper/1 Not tainted 3.11.0-rc1-01429-g04bf576 #8 task: ffff88000da1b040 ti: ffff88000da1c000 task.ti: ffff88000da1c000 RIP: 0010:[] [] ttwu_do_wakeup+0x28/0x225 RSP: 0000:ffff88000dd03f10 EFLAGS: 00000006 RAX: 0000000000000000 RBX: ffff88000dd12940 RCX: ffffffff81769c40 RDX: 0000000000000002 RSI: 0000000000000000 RDI: 0000000000000001 RBP: ffff88000dd03f28 R08: ffffffff8176a8c0 R09: 0000000000000002 R10: ffffffff810ff484 R11: ffff88000dd129e8 R12: ffff88000dbc90c0 R13: ffff88000dbc90c0 R14: ffff88000da1dfd8 R15: ffff88000da1dfd8 FS: 0000000000000000(0000) GS:ffff88000dd00000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b CR2: 00000000ffffffff CR3: 0000000001c88000 CR4: 00000000000006e0 Stack: ffff88000dd12940 ffff88000dbc90c0 ffff88000da1dfd8 ffff88000dd03f48 ffffffff81109e2b ffff88000dd12940 0000000000000000 ffff88000dd03f68 ffffffff81109e9e 0000000000000000 0000000000012940 ffff88000dd03f98 Call Trace: [] ttwu_do_activate.constprop.56+0x6d/0x79 [] sched_ttwu_pending+0x67/0x84 [] scheduler_ipi+0x15a/0x2b0 [] smp_reschedule_interrupt+0x38/0x41 [] reschedule_interrupt+0x6d/0x80 [] ? __atomic_notifier_call_chain+0x5/0xc1 [] ? native_safe_halt+0xd/0x16 [] default_idle+0x147/0x282 [] arch_cpu_idle+0x3d/0x5d [] cpu_idle_loop+0x46d/0x5db [] cpu_startup_entry+0x84/0x84 [] start_secondary+0x3c8/0x3d5 [...] Fix this by directly calling poke_int3_handler() from the int3 exception handler (analogically to what ftrace has been doing already), instead of relying on notifier, registration of which might not have yet been finalized by the time of the first trap. Reported-and-tested-by: Fengguang Wu Signed-off-by: Jiri Kosina Acked-by: Masami Hiramatsu Cc: H. Peter Anvin Cc: Fengguang Wu Cc: Steven Rostedt Link: http://lkml.kernel.org/r/alpine.LNX.2.00.1307231007490.14024@pobox.suse.cz Signed-off-by: Ingo Molnar --- arch/x86/include/asm/alternative.h | 2 ++ arch/x86/kernel/alternative.c | 31 ++++++++----------------------- arch/x86/kernel/traps.c | 4 ++++ kernel/kprobes.c | 2 +- 4 files changed, 15 insertions(+), 24 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h index 4daf8c501239..0a3f9c9f98d5 100644 --- a/arch/x86/include/asm/alternative.h +++ b/arch/x86/include/asm/alternative.h @@ -5,6 +5,7 @@ #include #include #include +#include /* * Alternative inline assembly for SMP. @@ -224,6 +225,7 @@ extern void *text_poke_early(void *addr, const void *opcode, size_t len); * inconsistent instruction while you patch. */ extern void *text_poke(void *addr, const void *opcode, size_t len); +extern int poke_int3_handler(struct pt_regs *regs); extern void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler); #endif /* _ASM_X86_ALTERNATIVE_H */ diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index 5d8782ee3dd7..15e8563e5c24 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -605,26 +605,24 @@ static void do_sync_core(void *info) static bool bp_patching_in_progress; static void *bp_int3_handler, *bp_int3_addr; -static int int3_notify(struct notifier_block *self, unsigned long val, void *data) +int poke_int3_handler(struct pt_regs *regs) { - struct die_args *args = data; - /* bp_patching_in_progress */ smp_rmb(); if (likely(!bp_patching_in_progress)) - return NOTIFY_DONE; + return 0; - /* we are not interested in non-int3 faults and ring > 0 faults */ - if (val != DIE_INT3 || !args->regs || user_mode_vm(args->regs) - || args->regs->ip != (unsigned long)bp_int3_addr) - return NOTIFY_DONE; + if (user_mode_vm(regs) || regs->ip != (unsigned long)bp_int3_addr) + return 0; /* set up the specified breakpoint handler */ - args->regs->ip = (unsigned long) bp_int3_handler; + regs->ip = (unsigned long) bp_int3_handler; + + return 1; - return NOTIFY_STOP; } + /** * text_poke_bp() -- update instructions on live kernel on SMP * @addr: address to patch @@ -689,16 +687,3 @@ void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler) return addr; } -/* this one needs to run before anything else handles it as a - * regular exception */ -static struct notifier_block int3_nb = { - .priority = 0x7fffffff, - .notifier_call = int3_notify -}; - -static int __init int3_init(void) -{ - return register_die_notifier(&int3_nb); -} - -arch_initcall(int3_init); diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 1b23a1c92746..8c8093b146ca 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -58,6 +58,7 @@ #include #include #include +#include #ifdef CONFIG_X86_64 #include @@ -327,6 +328,9 @@ dotraplinkage void __kprobes notrace do_int3(struct pt_regs *regs, long error_co ftrace_int3_handler(regs)) return; #endif + if (poke_int3_handler(regs)) + return; + prev_state = exception_enter(); #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP, diff --git a/kernel/kprobes.c b/kernel/kprobes.c index b58b490fa439..6e33498d665c 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -1709,7 +1709,7 @@ EXPORT_SYMBOL_GPL(unregister_kprobes); static struct notifier_block kprobe_exceptions_nb = { .notifier_call = kprobe_exceptions_notify, - .priority = 0x7ffffff0 /* High priority, but not first. */ + .priority = 0x7fffffff /* we need to be notified first */ }; unsigned long __weak arch_deref_entry_point(void *entry) -- cgit v1.2.3 From c73deb6aecda2955716f31572516f09d930ef450 Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Fri, 28 Jun 2013 16:22:18 +0300 Subject: perf/x86: Add ability to calculate TSC from perf sample timestamps For modern CPUs, perf clock is directly related to TSC. TSC can be calculated from perf clock and vice versa using a simple calculation. Two of the three componenets of that calculation are already exported in struct perf_event_mmap_page. This patch exports the third. Signed-off-by: Adrian Hunter Signed-off-by: Peter Zijlstra Cc: "H. Peter Anvin" Link: http://lkml.kernel.org/r/1372425741-1676-3-git-send-email-adrian.hunter@intel.com Signed-off-by: Ingo Molnar --- arch/x86/include/asm/tsc.h | 1 + arch/x86/kernel/cpu/perf_event.c | 6 ++++++ arch/x86/kernel/tsc.c | 6 ++++++ include/uapi/linux/perf_event.h | 22 ++++++++++++++++++++-- 4 files changed, 33 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h index c91e8b9d588b..235be70d5bb4 100644 --- a/arch/x86/include/asm/tsc.h +++ b/arch/x86/include/asm/tsc.h @@ -49,6 +49,7 @@ extern void tsc_init(void); extern void mark_tsc_unstable(char *reason); extern int unsynchronized_tsc(void); extern int check_tsc_unstable(void); +extern int check_tsc_disabled(void); extern unsigned long native_calibrate_tsc(void); extern int tsc_clocksource_reliable; diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index a7c7305030cc..8355c84b9729 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -1884,6 +1884,7 @@ static struct pmu pmu = { void arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now) { userpg->cap_usr_time = 0; + userpg->cap_usr_time_zero = 0; userpg->cap_usr_rdpmc = x86_pmu.attr_rdpmc; userpg->pmc_width = x86_pmu.cntval_bits; @@ -1897,6 +1898,11 @@ void arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now) userpg->time_mult = this_cpu_read(cyc2ns); userpg->time_shift = CYC2NS_SCALE_FACTOR; userpg->time_offset = this_cpu_read(cyc2ns_offset) - now; + + if (sched_clock_stable && !check_tsc_disabled()) { + userpg->cap_usr_time_zero = 1; + userpg->time_zero = this_cpu_read(cyc2ns_offset); + } } /* diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 6ff49247edf8..930e5d48f560 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c @@ -89,6 +89,12 @@ int check_tsc_unstable(void) } EXPORT_SYMBOL_GPL(check_tsc_unstable); +int check_tsc_disabled(void) +{ + return tsc_disabled; +} +EXPORT_SYMBOL_GPL(check_tsc_disabled); + #ifdef CONFIG_X86_TSC int __init notsc_setup(char *str) { diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h index 0041aedf2297..efef1d37a371 100644 --- a/include/uapi/linux/perf_event.h +++ b/include/uapi/linux/perf_event.h @@ -378,7 +378,8 @@ struct perf_event_mmap_page { struct { __u64 cap_usr_time : 1, cap_usr_rdpmc : 1, - cap_____res : 62; + cap_usr_time_zero : 1, + cap_____res : 61; }; }; @@ -420,12 +421,29 @@ struct perf_event_mmap_page { __u16 time_shift; __u32 time_mult; __u64 time_offset; + /* + * If cap_usr_time_zero, the hardware clock (e.g. TSC) can be calculated + * from sample timestamps. + * + * time = timestamp - time_zero; + * quot = time / time_mult; + * rem = time % time_mult; + * cyc = (quot << time_shift) + (rem << time_shift) / time_mult; + * + * And vice versa: + * + * quot = cyc >> time_shift; + * rem = cyc & ((1 << time_shift) - 1); + * timestamp = time_zero + quot * time_mult + + * ((rem * time_mult) >> time_shift); + */ + __u64 time_zero; /* * Hole for extension of the self monitor capabilities */ - __u64 __reserved[120]; /* align to 1k */ + __u64 __reserved[119]; /* align to 1k */ /* * Control data for the mmap() data buffer. -- cgit v1.2.3 From 899396cf7b4b31e08be358411ad5c0c066069ebc Mon Sep 17 00:00:00 2001 From: "Yan, Zheng" Date: Wed, 7 Aug 2013 14:17:23 +0800 Subject: perf/x86/intel/uncore: Add auxiliary pci device support The QPI uncore boxes have two pairs of MATCH/MASK registers that user to filter packet traffic serviced by QPI link layer. These registers are in auxiliary PCI devices. This patch changes the meaning of (struct pci_device_id)->driver_data. The first 8 bits are device index of the same uncore type, the second 8 bytes are uncore type index. Auxiliary PCI device's type is defined as UNCORE_EXTRA_PCI_DEV(0xff) Signed-off-by: Yan, Zheng Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/1375856245-10717-1-git-send-email-zheng.z.yan@intel.com Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_event_intel_uncore.c | 111 ++++++++++++++------------ arch/x86/kernel/cpu/perf_event_intel_uncore.h | 9 +++ 2 files changed, 68 insertions(+), 52 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c index cad791dbde95..7ce9b35851dc 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c @@ -6,6 +6,8 @@ static struct intel_uncore_type **pci_uncores = empty_uncore; /* pci bus to socket mapping */ static int pcibus_to_physid[256] = { [0 ... 255] = -1, }; +static struct pci_dev *extra_pci_dev[UNCORE_SOCKET_MAX][UNCORE_EXTRA_PCI_DEV_MAX]; + static DEFINE_RAW_SPINLOCK(uncore_box_lock); /* mask of cpus that collect uncore events */ @@ -807,43 +809,43 @@ static struct intel_uncore_type *snbep_pci_uncores[] = { static DEFINE_PCI_DEVICE_TABLE(snbep_uncore_pci_ids) = { { /* Home Agent */ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_HA), - .driver_data = SNBEP_PCI_UNCORE_HA, + .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_HA, 0), }, { /* MC Channel 0 */ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC0), - .driver_data = SNBEP_PCI_UNCORE_IMC, + .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 0), }, { /* MC Channel 1 */ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC1), - .driver_data = SNBEP_PCI_UNCORE_IMC, + .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 1), }, { /* MC Channel 2 */ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC2), - .driver_data = SNBEP_PCI_UNCORE_IMC, + .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 2), }, { /* MC Channel 3 */ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC3), - .driver_data = SNBEP_PCI_UNCORE_IMC, + .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 3), }, { /* QPI Port 0 */ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI0), - .driver_data = SNBEP_PCI_UNCORE_QPI, + .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 0), }, { /* QPI Port 1 */ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI1), - .driver_data = SNBEP_PCI_UNCORE_QPI, + .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 1), }, { /* R2PCIe */ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R2PCIE), - .driver_data = SNBEP_PCI_UNCORE_R2PCIE, + .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R2PCIE, 0), }, { /* R3QPI Link 0 */ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI0), - .driver_data = SNBEP_PCI_UNCORE_R3QPI, + .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 0), }, { /* R3QPI Link 1 */ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI1), - .driver_data = SNBEP_PCI_UNCORE_R3QPI, + .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 1), }, { /* end: all zeroes */ } }; @@ -1256,71 +1258,71 @@ static struct intel_uncore_type *ivt_pci_uncores[] = { static DEFINE_PCI_DEVICE_TABLE(ivt_uncore_pci_ids) = { { /* Home Agent 0 */ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe30), - .driver_data = IVT_PCI_UNCORE_HA, + .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_HA, 0), }, { /* Home Agent 1 */ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe38), - .driver_data = IVT_PCI_UNCORE_HA, + .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_HA, 1), }, { /* MC0 Channel 0 */ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb4), - .driver_data = IVT_PCI_UNCORE_IMC, + .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 0), }, { /* MC0 Channel 1 */ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb5), - .driver_data = IVT_PCI_UNCORE_IMC, + .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 1), }, { /* MC0 Channel 3 */ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb0), - .driver_data = IVT_PCI_UNCORE_IMC, + .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 2), }, { /* MC0 Channel 4 */ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb1), - .driver_data = IVT_PCI_UNCORE_IMC, + .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 3), }, { /* MC1 Channel 0 */ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef4), - .driver_data = IVT_PCI_UNCORE_IMC, + .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 4), }, { /* MC1 Channel 1 */ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef5), - .driver_data = IVT_PCI_UNCORE_IMC, + .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 5), }, { /* MC1 Channel 3 */ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef0), - .driver_data = IVT_PCI_UNCORE_IMC, + .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 6), }, { /* MC1 Channel 4 */ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef1), - .driver_data = IVT_PCI_UNCORE_IMC, + .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 7), }, { /* QPI0 Port 0 */ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe32), - .driver_data = IVT_PCI_UNCORE_QPI, + .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_QPI, 0), }, { /* QPI0 Port 1 */ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe33), - .driver_data = IVT_PCI_UNCORE_QPI, + .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_QPI, 1), }, { /* QPI1 Port 2 */ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3a), - .driver_data = IVT_PCI_UNCORE_QPI, + .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_QPI, 2), }, { /* R2PCIe */ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe34), - .driver_data = IVT_PCI_UNCORE_R2PCIE, + .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_R2PCIE, 0), }, { /* R3QPI0 Link 0 */ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe36), - .driver_data = IVT_PCI_UNCORE_R3QPI, + .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_R3QPI, 0), }, { /* R3QPI0 Link 1 */ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe37), - .driver_data = IVT_PCI_UNCORE_R3QPI, + .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_R3QPI, 1), }, { /* R3QPI1 Link 2 */ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3e), - .driver_data = IVT_PCI_UNCORE_R3QPI, + .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_R3QPI, 2), }, { /* end: all zeroes */ } }; @@ -3167,16 +3169,24 @@ static bool pcidrv_registered; /* * add a pci uncore device */ -static int uncore_pci_add(struct intel_uncore_type *type, struct pci_dev *pdev) +static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct intel_uncore_pmu *pmu; struct intel_uncore_box *box; - int i, phys_id; + struct intel_uncore_type *type; + int phys_id; phys_id = pcibus_to_physid[pdev->bus->number]; if (phys_id < 0) return -ENODEV; + if (UNCORE_PCI_DEV_TYPE(id->driver_data) == UNCORE_EXTRA_PCI_DEV) { + extra_pci_dev[phys_id][UNCORE_PCI_DEV_IDX(id->driver_data)] = pdev; + pci_set_drvdata(pdev, NULL); + return 0; + } + + type = pci_uncores[UNCORE_PCI_DEV_TYPE(id->driver_data)]; box = uncore_alloc_box(type, 0); if (!box) return -ENOMEM; @@ -3185,21 +3195,11 @@ static int uncore_pci_add(struct intel_uncore_type *type, struct pci_dev *pdev) * for performance monitoring unit with multiple boxes, * each box has a different function id. */ - for (i = 0; i < type->num_boxes; i++) { - pmu = &type->pmus[i]; - if (pmu->func_id == pdev->devfn) - break; - if (pmu->func_id < 0) { - pmu->func_id = pdev->devfn; - break; - } - pmu = NULL; - } - - if (!pmu) { - kfree(box); - return -EINVAL; - } + pmu = &type->pmus[UNCORE_PCI_DEV_IDX(id->driver_data)]; + if (pmu->func_id < 0) + pmu->func_id = pdev->devfn; + else + WARN_ON_ONCE(pmu->func_id != pdev->devfn); box->phys_id = phys_id; box->pci_dev = pdev; @@ -3217,9 +3217,22 @@ static int uncore_pci_add(struct intel_uncore_type *type, struct pci_dev *pdev) static void uncore_pci_remove(struct pci_dev *pdev) { struct intel_uncore_box *box = pci_get_drvdata(pdev); - struct intel_uncore_pmu *pmu = box->pmu; - int cpu, phys_id = pcibus_to_physid[pdev->bus->number]; + struct intel_uncore_pmu *pmu; + int i, cpu, phys_id = pcibus_to_physid[pdev->bus->number]; + box = pci_get_drvdata(pdev); + if (!box) { + for (i = 0; i < UNCORE_EXTRA_PCI_DEV_MAX; i++) { + if (extra_pci_dev[phys_id][i] == pdev) { + extra_pci_dev[phys_id][i] = NULL; + break; + } + } + WARN_ON_ONCE(i >= UNCORE_EXTRA_PCI_DEV_MAX); + return; + } + + pmu = box->pmu; if (WARN_ON_ONCE(phys_id != box->phys_id)) return; @@ -3240,12 +3253,6 @@ static void uncore_pci_remove(struct pci_dev *pdev) kfree(box); } -static int uncore_pci_probe(struct pci_dev *pdev, - const struct pci_device_id *id) -{ - return uncore_pci_add(pci_uncores[id->driver_data], pdev); -} - static int __init uncore_pci_init(void) { int ret; diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h index 47b3d00c9d89..ede5a8c1d29e 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h @@ -12,6 +12,15 @@ #define UNCORE_PMC_IDX_FIXED UNCORE_PMC_IDX_MAX_GENERIC #define UNCORE_PMC_IDX_MAX (UNCORE_PMC_IDX_FIXED + 1) +#define UNCORE_PCI_DEV_DATA(type, idx) ((type << 8) | idx) +#define UNCORE_PCI_DEV_TYPE(data) ((data >> 8) & 0xff) +#define UNCORE_PCI_DEV_IDX(data) (data & 0xff) +#define UNCORE_EXTRA_PCI_DEV 0xff +#define UNCORE_EXTRA_PCI_DEV_MAX 0 + +/* support up to 8 sockets */ +#define UNCORE_SOCKET_MAX 8 + #define UNCORE_EVENT_CONSTRAINT(c, n) EVENT_CONSTRAINT(c, n, 0xff) /* SNB event control */ -- cgit v1.2.3 From fd1ec259ba814f0ef35dc8ae2cbd0844541b917d Mon Sep 17 00:00:00 2001 From: "Yan, Zheng" Date: Wed, 7 Aug 2013 14:17:24 +0800 Subject: perf/x86/intel/uncore: Add filter support for QPI boxes The QPI uncore boxes have two pairs of MATCH/MASK registers that user to filter packet traffic serviced by QPI link layer. These registers are in auxiliary PCI devices. This patch adds the auxiliary PCI devices to snbep_uncore_pci_ids and adds field definitions for the MATCH/MASK registers. Signed-off-by: Yan, Zheng Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/1375856245-10717-2-git-send-email-zheng.z.yan@intel.com Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_event_intel_uncore.c | 139 ++++++++++++++++++++++---- arch/x86/kernel/cpu/perf_event_intel_uncore.h | 2 +- 2 files changed, 123 insertions(+), 18 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c index 7ce9b35851dc..6b8b9c951aad 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c @@ -47,6 +47,24 @@ DEFINE_UNCORE_FORMAT_ATTR(filter_band0, filter_band0, "config1:0-7"); DEFINE_UNCORE_FORMAT_ATTR(filter_band1, filter_band1, "config1:8-15"); DEFINE_UNCORE_FORMAT_ATTR(filter_band2, filter_band2, "config1:16-23"); DEFINE_UNCORE_FORMAT_ATTR(filter_band3, filter_band3, "config1:24-31"); +DEFINE_UNCORE_FORMAT_ATTR(match_rds, match_rds, "config1:48-51"); +DEFINE_UNCORE_FORMAT_ATTR(match_rnid30, match_rnid30, "config1:32-35"); +DEFINE_UNCORE_FORMAT_ATTR(match_rnid4, match_rnid4, "config1:31"); +DEFINE_UNCORE_FORMAT_ATTR(match_dnid, match_dnid, "config1:13-17"); +DEFINE_UNCORE_FORMAT_ATTR(match_mc, match_mc, "config1:9-12"); +DEFINE_UNCORE_FORMAT_ATTR(match_opc, match_opc, "config1:5-8"); +DEFINE_UNCORE_FORMAT_ATTR(match_vnw, match_vnw, "config1:3-4"); +DEFINE_UNCORE_FORMAT_ATTR(match0, match0, "config1:0-31"); +DEFINE_UNCORE_FORMAT_ATTR(match1, match1, "config1:32-63"); +DEFINE_UNCORE_FORMAT_ATTR(mask_rds, mask_rds, "config2:48-51"); +DEFINE_UNCORE_FORMAT_ATTR(mask_rnid30, mask_rnid30, "config2:32-35"); +DEFINE_UNCORE_FORMAT_ATTR(mask_rnid4, mask_rnid4, "config2:31"); +DEFINE_UNCORE_FORMAT_ATTR(mask_dnid, mask_dnid, "config2:13-17"); +DEFINE_UNCORE_FORMAT_ATTR(mask_mc, mask_mc, "config2:9-12"); +DEFINE_UNCORE_FORMAT_ATTR(mask_opc, mask_opc, "config2:5-8"); +DEFINE_UNCORE_FORMAT_ATTR(mask_vnw, mask_vnw, "config2:3-4"); +DEFINE_UNCORE_FORMAT_ATTR(mask0, mask0, "config2:0-31"); +DEFINE_UNCORE_FORMAT_ATTR(mask1, mask1, "config2:32-63"); static u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event) { @@ -303,6 +321,24 @@ static struct attribute *snbep_uncore_qpi_formats_attr[] = { &format_attr_edge.attr, &format_attr_inv.attr, &format_attr_thresh8.attr, + &format_attr_match_rds.attr, + &format_attr_match_rnid30.attr, + &format_attr_match_rnid4.attr, + &format_attr_match_dnid.attr, + &format_attr_match_mc.attr, + &format_attr_match_opc.attr, + &format_attr_match_vnw.attr, + &format_attr_match0.attr, + &format_attr_match1.attr, + &format_attr_mask_rds.attr, + &format_attr_mask_rnid30.attr, + &format_attr_mask_rnid4.attr, + &format_attr_mask_dnid.attr, + &format_attr_mask_mc.attr, + &format_attr_mask_opc.attr, + &format_attr_mask_vnw.attr, + &format_attr_mask0.attr, + &format_attr_mask1.attr, NULL, }; @@ -358,13 +394,16 @@ static struct intel_uncore_ops snbep_uncore_msr_ops = { SNBEP_UNCORE_MSR_OPS_COMMON_INIT(), }; +#define SNBEP_UNCORE_PCI_OPS_COMMON_INIT() \ + .init_box = snbep_uncore_pci_init_box, \ + .disable_box = snbep_uncore_pci_disable_box, \ + .enable_box = snbep_uncore_pci_enable_box, \ + .disable_event = snbep_uncore_pci_disable_event, \ + .read_counter = snbep_uncore_pci_read_counter + static struct intel_uncore_ops snbep_uncore_pci_ops = { - .init_box = snbep_uncore_pci_init_box, - .disable_box = snbep_uncore_pci_disable_box, - .enable_box = snbep_uncore_pci_enable_box, - .disable_event = snbep_uncore_pci_disable_event, - .enable_event = snbep_uncore_pci_enable_event, - .read_counter = snbep_uncore_pci_read_counter, + SNBEP_UNCORE_PCI_OPS_COMMON_INIT(), + .enable_event = snbep_uncore_pci_enable_event, \ }; static struct event_constraint snbep_uncore_cbox_constraints[] = { @@ -728,6 +767,61 @@ static struct intel_uncore_type *snbep_msr_uncores[] = { NULL, }; +enum { + SNBEP_PCI_QPI_PORT0_FILTER, + SNBEP_PCI_QPI_PORT1_FILTER, +}; + +static int snbep_qpi_hw_config(struct intel_uncore_box *box, struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + struct hw_perf_event_extra *reg1 = &hwc->extra_reg; + struct hw_perf_event_extra *reg2 = &hwc->branch_reg; + + if ((hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK) == 0x38) { + reg1->idx = 0; + reg1->reg = SNBEP_Q_Py_PCI_PMON_PKT_MATCH0; + reg1->config = event->attr.config1; + reg2->reg = SNBEP_Q_Py_PCI_PMON_PKT_MASK0; + reg2->config = event->attr.config2; + } + return 0; +} + +static void snbep_qpi_enable_event(struct intel_uncore_box *box, struct perf_event *event) +{ + struct pci_dev *pdev = box->pci_dev; + struct hw_perf_event *hwc = &event->hw; + struct hw_perf_event_extra *reg1 = &hwc->extra_reg; + struct hw_perf_event_extra *reg2 = &hwc->branch_reg; + + if (reg1->idx != EXTRA_REG_NONE) { + int idx = box->pmu->pmu_idx + SNBEP_PCI_QPI_PORT0_FILTER; + struct pci_dev *filter_pdev = extra_pci_dev[box->phys_id][idx]; + WARN_ON_ONCE(!filter_pdev); + if (filter_pdev) { + pci_write_config_dword(filter_pdev, reg1->reg, + (u32)reg1->config); + pci_write_config_dword(filter_pdev, reg1->reg + 4, + (u32)(reg1->config >> 32)); + pci_write_config_dword(filter_pdev, reg2->reg, + (u32)reg2->config); + pci_write_config_dword(filter_pdev, reg2->reg + 4, + (u32)(reg2->config >> 32)); + } + } + + pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN); +} + +static struct intel_uncore_ops snbep_uncore_qpi_ops = { + SNBEP_UNCORE_PCI_OPS_COMMON_INIT(), + .enable_event = snbep_qpi_enable_event, + .hw_config = snbep_qpi_hw_config, + .get_constraint = uncore_get_constraint, + .put_constraint = uncore_put_constraint, +}; + #define SNBEP_UNCORE_PCI_COMMON_INIT() \ .perf_ctr = SNBEP_PCI_PMON_CTR0, \ .event_ctl = SNBEP_PCI_PMON_CTL0, \ @@ -757,17 +851,18 @@ static struct intel_uncore_type snbep_uncore_imc = { }; static struct intel_uncore_type snbep_uncore_qpi = { - .name = "qpi", - .num_counters = 4, - .num_boxes = 2, - .perf_ctr_bits = 48, - .perf_ctr = SNBEP_PCI_PMON_CTR0, - .event_ctl = SNBEP_PCI_PMON_CTL0, - .event_mask = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK, - .box_ctl = SNBEP_PCI_PMON_BOX_CTL, - .ops = &snbep_uncore_pci_ops, - .event_descs = snbep_uncore_qpi_events, - .format_group = &snbep_uncore_qpi_format_group, + .name = "qpi", + .num_counters = 4, + .num_boxes = 2, + .perf_ctr_bits = 48, + .perf_ctr = SNBEP_PCI_PMON_CTR0, + .event_ctl = SNBEP_PCI_PMON_CTL0, + .event_mask = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK, + .box_ctl = SNBEP_PCI_PMON_BOX_CTL, + .num_shared_regs = 1, + .ops = &snbep_uncore_qpi_ops, + .event_descs = snbep_uncore_qpi_events, + .format_group = &snbep_uncore_qpi_format_group, }; @@ -847,6 +942,16 @@ static DEFINE_PCI_DEVICE_TABLE(snbep_uncore_pci_ids) = { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI1), .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 1), }, + { /* QPI Port 0 filter */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c86), + .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, + SNBEP_PCI_QPI_PORT0_FILTER), + }, + { /* QPI Port 0 filter */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c96), + .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, + SNBEP_PCI_QPI_PORT1_FILTER), + }, { /* end: all zeroes */ } }; diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h index ede5a8c1d29e..628500e65d0b 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h @@ -16,7 +16,7 @@ #define UNCORE_PCI_DEV_TYPE(data) ((data >> 8) & 0xff) #define UNCORE_PCI_DEV_IDX(data) (data & 0xff) #define UNCORE_EXTRA_PCI_DEV 0xff -#define UNCORE_EXTRA_PCI_DEV_MAX 0 +#define UNCORE_EXTRA_PCI_DEV_MAX 2 /* support up to 8 sockets */ #define UNCORE_SOCKET_MAX 8 -- cgit v1.2.3 From 77b339bce3e21f7a069447fc25a414b18e36fa2e Mon Sep 17 00:00:00 2001 From: "Yan, Zheng" Date: Tue, 13 Aug 2013 14:29:42 +0800 Subject: perf/x86/intel/uncore: Enable EV_SEL_EXT bit for PCU This patch adds support for the SNB-EP PCU uncore PMU extra_sel_bit (bit 21) which is missing from the documentation in Table-2.75 of Intel Xeon Processor E5-2600 Product Family Uncore Performance Monitoring Guide. It is referred to later in Table-2.81. Without this selection bit explicitly enabled by the kernel, some events such as COREx_TRANSITION_CYCLES do not count correctly. Signed-off-by: Yan, Zheng Reviewed-by: Stephane Eranian Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/1376375382-21350-4-git-send-email-zheng.z.yan@intel.com Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_event_intel_uncore.c | 2 +- arch/x86/kernel/cpu/perf_event_intel_uncore.h | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c index 6b8b9c951aad..e9696d8269ba 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c @@ -301,7 +301,7 @@ static struct attribute *snbep_uncore_cbox_formats_attr[] = { }; static struct attribute *snbep_uncore_pcu_formats_attr[] = { - &format_attr_event.attr, + &format_attr_event_ext.attr, &format_attr_occ_sel.attr, &format_attr_edge.attr, &format_attr_inv.attr, diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h index 628500e65d0b..a80ab71a883d 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h @@ -117,6 +117,7 @@ (SNBEP_PMON_CTL_EV_SEL_MASK | \ SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \ SNBEP_PMON_CTL_EDGE_DET | \ + SNBEP_PMON_CTL_EV_SEL_EXT | \ SNBEP_PMON_CTL_INVERT | \ SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \ SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \ -- cgit v1.2.3 From 53ad0447208d3f5897f673ca0b16c776583eedba Mon Sep 17 00:00:00 2001 From: "Yan, Zheng" Date: Thu, 18 Jul 2013 17:02:23 +0800 Subject: perf/x86: use INTEL_UEVENT_EXTRA_REG to define MSR_OFFCORE_RSP_X Silvermont (22nm Atom) has two offcore response configuration MSRs, unlike other Intel CPU, its event code for MSR_OFFCORE_RSP_1 is 0x02b7. To avoid complicating intel_fixup_er(), use INTEL_UEVENT_EXTRA_REG to define MSR_OFFCORE_RSP_X. So intel_fixup_er() can find the event code for OFFCORE_RSP_N by x86_pmu.extra_regs[N].event. Signed-off-by: Yan, Zheng Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/1374138144-17278-1-git-send-email-zheng.z.yan@intel.com Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_event_intel.c | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index a45d8d4ace10..0d59a42847c4 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c @@ -81,7 +81,8 @@ static struct event_constraint intel_nehalem_event_constraints[] __read_mostly = static struct extra_reg intel_nehalem_extra_regs[] __read_mostly = { - INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0), + /* must define OFFCORE_RSP_X first, see intel_fixup_er() */ + INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0), INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b), EVENT_EXTRA_END }; @@ -143,8 +144,9 @@ static struct event_constraint intel_ivb_event_constraints[] __read_mostly = static struct extra_reg intel_westmere_extra_regs[] __read_mostly = { - INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0), - INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0xffff, RSP_1), + /* must define OFFCORE_RSP_X first, see intel_fixup_er() */ + INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0), + INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0xffff, RSP_1), INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b), EVENT_EXTRA_END }; @@ -163,15 +165,17 @@ static struct event_constraint intel_gen_event_constraints[] __read_mostly = }; static struct extra_reg intel_snb_extra_regs[] __read_mostly = { - INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0), - INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3f807f8fffull, RSP_1), + /* must define OFFCORE_RSP_X first, see intel_fixup_er() */ + INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0), + INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3f807f8fffull, RSP_1), INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd), EVENT_EXTRA_END }; static struct extra_reg intel_snbep_extra_regs[] __read_mostly = { - INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0), - INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1), + /* must define OFFCORE_RSP_X first, see intel_fixup_er() */ + INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0), + INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1), INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd), EVENT_EXTRA_END }; @@ -1301,11 +1305,11 @@ static void intel_fixup_er(struct perf_event *event, int idx) if (idx == EXTRA_REG_RSP_0) { event->hw.config &= ~INTEL_ARCH_EVENT_MASK; - event->hw.config |= 0x01b7; + event->hw.config |= x86_pmu.extra_regs[EXTRA_REG_RSP_0].event; event->hw.extra_reg.reg = MSR_OFFCORE_RSP_0; } else if (idx == EXTRA_REG_RSP_1) { event->hw.config &= ~INTEL_ARCH_EVENT_MASK; - event->hw.config |= 0x01bb; + event->hw.config |= x86_pmu.extra_regs[EXTRA_REG_RSP_1].event; event->hw.extra_reg.reg = MSR_OFFCORE_RSP_1; } } -- cgit v1.2.3 From 1fa64180fbf7a33b7a30636a2f174a5cad68d48f Mon Sep 17 00:00:00 2001 From: "Yan, Zheng" Date: Thu, 18 Jul 2013 17:02:24 +0800 Subject: perf/x86: Add Silvermont (22nm Atom) support Compared to old atom, Silvermont has offcore and has more events that support PEBS. Signed-off-by: Yan, Zheng Reviewed-by: Stephane Eranian Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/1374138144-17278-2-git-send-email-zheng.z.yan@intel.com Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_event.h | 2 + arch/x86/kernel/cpu/perf_event_intel.c | 158 ++++++++++++++++++++++++++++++ arch/x86/kernel/cpu/perf_event_intel_ds.c | 26 +++++ 3 files changed, 186 insertions(+) (limited to 'arch') diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h index 97e557bc4c91..cc16faae0538 100644 --- a/arch/x86/kernel/cpu/perf_event.h +++ b/arch/x86/kernel/cpu/perf_event.h @@ -641,6 +641,8 @@ extern struct event_constraint intel_core2_pebs_event_constraints[]; extern struct event_constraint intel_atom_pebs_event_constraints[]; +extern struct event_constraint intel_slm_pebs_event_constraints[]; + extern struct event_constraint intel_nehalem_pebs_event_constraints[]; extern struct event_constraint intel_westmere_pebs_event_constraints[]; diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 0d59a42847c4..0abf6742a8b0 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c @@ -164,6 +164,15 @@ static struct event_constraint intel_gen_event_constraints[] __read_mostly = EVENT_CONSTRAINT_END }; +static struct event_constraint intel_slm_event_constraints[] __read_mostly = +{ + FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ + FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ + FIXED_EVENT_CONSTRAINT(0x013c, 2), /* CPU_CLK_UNHALTED.REF */ + FIXED_EVENT_CONSTRAINT(0x0300, 2), /* pseudo CPU_CLK_UNHALTED.REF */ + EVENT_CONSTRAINT_END +}; + static struct extra_reg intel_snb_extra_regs[] __read_mostly = { /* must define OFFCORE_RSP_X first, see intel_fixup_er() */ INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0), @@ -886,6 +895,140 @@ static __initconst const u64 atom_hw_cache_event_ids }, }; +static struct extra_reg intel_slm_extra_regs[] __read_mostly = +{ + /* must define OFFCORE_RSP_X first, see intel_fixup_er() */ + INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x768005ffff, RSP_0), + INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x768005ffff, RSP_1), + EVENT_EXTRA_END +}; + +#define SLM_DMND_READ SNB_DMND_DATA_RD +#define SLM_DMND_WRITE SNB_DMND_RFO +#define SLM_DMND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO) + +#define SLM_SNP_ANY (SNB_SNP_NONE|SNB_SNP_MISS|SNB_NO_FWD|SNB_HITM) +#define SLM_LLC_ACCESS SNB_RESP_ANY +#define SLM_LLC_MISS (SLM_SNP_ANY|SNB_NON_DRAM) + +static __initconst const u64 slm_hw_cache_extra_regs + [PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX] = +{ + [ C(LL ) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = SLM_DMND_READ|SLM_LLC_ACCESS, + [ C(RESULT_MISS) ] = SLM_DMND_READ|SLM_LLC_MISS, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = SLM_DMND_WRITE|SLM_LLC_ACCESS, + [ C(RESULT_MISS) ] = SLM_DMND_WRITE|SLM_LLC_MISS, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = SLM_DMND_PREFETCH|SLM_LLC_ACCESS, + [ C(RESULT_MISS) ] = SLM_DMND_PREFETCH|SLM_LLC_MISS, + }, + }, +}; + +static __initconst const u64 slm_hw_cache_event_ids + [PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX] = +{ + [ C(L1D) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0, + [ C(RESULT_MISS) ] = 0x0104, /* LD_DCU_MISS */ + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = 0, + [ C(RESULT_MISS) ] = 0, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = 0, + [ C(RESULT_MISS) ] = 0, + }, + }, + [ C(L1I ) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0x0380, /* ICACHE.ACCESSES */ + [ C(RESULT_MISS) ] = 0x0280, /* ICACGE.MISSES */ + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = 0, + [ C(RESULT_MISS) ] = 0, + }, + }, + [ C(LL ) ] = { + [ C(OP_READ) ] = { + /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */ + [ C(RESULT_ACCESS) ] = 0x01b7, + /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */ + [ C(RESULT_MISS) ] = 0x01b7, + }, + [ C(OP_WRITE) ] = { + /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */ + [ C(RESULT_ACCESS) ] = 0x01b7, + /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */ + [ C(RESULT_MISS) ] = 0x01b7, + }, + [ C(OP_PREFETCH) ] = { + /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */ + [ C(RESULT_ACCESS) ] = 0x01b7, + /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */ + [ C(RESULT_MISS) ] = 0x01b7, + }, + }, + [ C(DTLB) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0, + [ C(RESULT_MISS) ] = 0x0804, /* LD_DTLB_MISS */ + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = 0, + [ C(RESULT_MISS) ] = 0, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = 0, + [ C(RESULT_MISS) ] = 0, + }, + }, + [ C(ITLB) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */ + [ C(RESULT_MISS) ] = 0x0282, /* ITLB.MISSES */ + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + }, + [ C(BPU ) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */ + [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */ + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + }, +}; + static inline bool intel_pmu_needs_lbr_smpl(struct perf_event *event) { /* user explicitly requested branch sampling */ @@ -2180,6 +2323,21 @@ __init int intel_pmu_init(void) pr_cont("Atom events, "); break; + case 55: /* Atom 22nm "Silvermont" */ + memcpy(hw_cache_event_ids, slm_hw_cache_event_ids, + sizeof(hw_cache_event_ids)); + memcpy(hw_cache_extra_regs, slm_hw_cache_extra_regs, + sizeof(hw_cache_extra_regs)); + + intel_pmu_lbr_init_atom(); + + x86_pmu.event_constraints = intel_slm_event_constraints; + x86_pmu.pebs_constraints = intel_slm_pebs_event_constraints; + x86_pmu.extra_regs = intel_slm_extra_regs; + x86_pmu.er_flags |= ERF_HAS_RSP_1; + pr_cont("Silvermont events, "); + break; + case 37: /* 32 nm nehalem, "Clarkdale" */ case 44: /* 32 nm nehalem, "Gulftown" */ case 47: /* 32 nm Xeon E7 */ diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c index 3065c57a63c1..442fcc23f3a8 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_ds.c +++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c @@ -517,6 +517,32 @@ struct event_constraint intel_atom_pebs_event_constraints[] = { EVENT_CONSTRAINT_END }; +struct event_constraint intel_slm_pebs_event_constraints[] = { + INTEL_UEVENT_CONSTRAINT(0x0103, 0x1), /* REHABQ.LD_BLOCK_ST_FORWARD_PS */ + INTEL_UEVENT_CONSTRAINT(0x0803, 0x1), /* REHABQ.LD_SPLITS_PS */ + INTEL_UEVENT_CONSTRAINT(0x0204, 0x1), /* MEM_UOPS_RETIRED.L2_HIT_LOADS_PS */ + INTEL_UEVENT_CONSTRAINT(0x0404, 0x1), /* MEM_UOPS_RETIRED.L2_MISS_LOADS_PS */ + INTEL_UEVENT_CONSTRAINT(0x0804, 0x1), /* MEM_UOPS_RETIRED.DTLB_MISS_LOADS_PS */ + INTEL_UEVENT_CONSTRAINT(0x2004, 0x1), /* MEM_UOPS_RETIRED.HITM_PS */ + INTEL_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY_PS */ + INTEL_UEVENT_CONSTRAINT(0x00c4, 0x1), /* BR_INST_RETIRED.ALL_BRANCHES_PS */ + INTEL_UEVENT_CONSTRAINT(0x7ec4, 0x1), /* BR_INST_RETIRED.JCC_PS */ + INTEL_UEVENT_CONSTRAINT(0xbfc4, 0x1), /* BR_INST_RETIRED.FAR_BRANCH_PS */ + INTEL_UEVENT_CONSTRAINT(0xebc4, 0x1), /* BR_INST_RETIRED.NON_RETURN_IND_PS */ + INTEL_UEVENT_CONSTRAINT(0xf7c4, 0x1), /* BR_INST_RETIRED.RETURN_PS */ + INTEL_UEVENT_CONSTRAINT(0xf9c4, 0x1), /* BR_INST_RETIRED.CALL_PS */ + INTEL_UEVENT_CONSTRAINT(0xfbc4, 0x1), /* BR_INST_RETIRED.IND_CALL_PS */ + INTEL_UEVENT_CONSTRAINT(0xfdc4, 0x1), /* BR_INST_RETIRED.REL_CALL_PS */ + INTEL_UEVENT_CONSTRAINT(0xfec4, 0x1), /* BR_INST_RETIRED.TAKEN_JCC_PS */ + INTEL_UEVENT_CONSTRAINT(0x00c5, 0x1), /* BR_INST_MISP_RETIRED.ALL_BRANCHES_PS */ + INTEL_UEVENT_CONSTRAINT(0x7ec5, 0x1), /* BR_INST_MISP_RETIRED.JCC_PS */ + INTEL_UEVENT_CONSTRAINT(0xebc5, 0x1), /* BR_INST_MISP_RETIRED.NON_RETURN_IND_PS */ + INTEL_UEVENT_CONSTRAINT(0xf7c5, 0x1), /* BR_INST_MISP_RETIRED.RETURN_PS */ + INTEL_UEVENT_CONSTRAINT(0xfbc5, 0x1), /* BR_INST_MISP_RETIRED.IND_CALL_PS */ + INTEL_UEVENT_CONSTRAINT(0xfec5, 0x1), /* BR_INST_MISP_RETIRED.TAKEN_JCC_PS */ + EVENT_CONSTRAINT_END +}; + struct event_constraint intel_nehalem_pebs_event_constraints[] = { INTEL_PLD_CONSTRAINT(0x100b, 0xf), /* MEM_INST_RETIRED.* */ INTEL_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */ -- cgit v1.2.3 From 7bfb7e6bdd906f11ee9e751b3fec4f4fc728e818 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Thu, 29 Aug 2013 13:59:17 -0700 Subject: perf: Convert kmalloc_node(...GFP_ZERO...) to kzalloc_node() Use the convenience function instead of __GFP_ZERO. Signed-off-by: Joe Perches Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/f58599ae1a8d7b32d37e9cf283e95fba6452f7f6.1377809875.git.joe@perches.com Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_event_amd.c | 3 +-- arch/x86/kernel/cpu/perf_event_intel_ds.c | 6 +++--- arch/x86/kernel/cpu/perf_event_intel_uncore.c | 2 +- 3 files changed, 5 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c index 4cbe03287b08..beeb7cc07044 100644 --- a/arch/x86/kernel/cpu/perf_event_amd.c +++ b/arch/x86/kernel/cpu/perf_event_amd.c @@ -347,8 +347,7 @@ static struct amd_nb *amd_alloc_nb(int cpu) struct amd_nb *nb; int i; - nb = kmalloc_node(sizeof(struct amd_nb), GFP_KERNEL | __GFP_ZERO, - cpu_to_node(cpu)); + nb = kzalloc_node(sizeof(struct amd_nb), GFP_KERNEL, cpu_to_node(cpu)); if (!nb) return NULL; diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c index 442fcc23f3a8..63438aad177f 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_ds.c +++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c @@ -224,7 +224,7 @@ static int alloc_pebs_buffer(int cpu) if (!x86_pmu.pebs) return 0; - buffer = kmalloc_node(PEBS_BUFFER_SIZE, GFP_KERNEL | __GFP_ZERO, node); + buffer = kzalloc_node(PEBS_BUFFER_SIZE, GFP_KERNEL, node); if (unlikely(!buffer)) return -ENOMEM; @@ -262,7 +262,7 @@ static int alloc_bts_buffer(int cpu) if (!x86_pmu.bts) return 0; - buffer = kmalloc_node(BTS_BUFFER_SIZE, GFP_KERNEL | __GFP_ZERO, node); + buffer = kzalloc_node(BTS_BUFFER_SIZE, GFP_KERNEL, node); if (unlikely(!buffer)) return -ENOMEM; @@ -295,7 +295,7 @@ static int alloc_ds_buffer(int cpu) int node = cpu_to_node(cpu); struct debug_store *ds; - ds = kmalloc_node(sizeof(*ds), GFP_KERNEL | __GFP_ZERO, node); + ds = kzalloc_node(sizeof(*ds), GFP_KERNEL, node); if (unlikely(!ds)) return -ENOMEM; diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c index 34c11ae9bda3..fd8011ed4dcd 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c @@ -2713,7 +2713,7 @@ struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type, int cp size = sizeof(*box) + type->num_shared_regs * sizeof(struct intel_uncore_extra_reg); - box = kmalloc_node(size, GFP_KERNEL | __GFP_ZERO, cpu_to_node(cpu)); + box = kzalloc_node(size, GFP_KERNEL, cpu_to_node(cpu)); if (!box) return NULL; -- cgit v1.2.3