summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/DocBook/debugobjects.tmpl50
-rw-r--r--Documentation/RCU/checklist.txt6
-rw-r--r--Documentation/RCU/rcu.txt10
-rw-r--r--Documentation/RCU/stallwarn.txt16
-rw-r--r--Documentation/RCU/torture.txt13
-rw-r--r--Documentation/RCU/trace.txt4
-rw-r--r--Documentation/RCU/whatisRCU.txt19
-rw-r--r--Documentation/atomic_ops.txt87
-rw-r--r--Documentation/kernel-parameters.txt5
-rw-r--r--Documentation/lockdep-design.txt63
-rw-r--r--Documentation/trace/events.txt2
-rw-r--r--Documentation/virtual/kvm/api.txt16
-rw-r--r--MAINTAINERS28
-rw-r--r--Makefile2
-rw-r--r--arch/Kconfig4
-rw-r--r--arch/arm/Kconfig4
-rw-r--r--arch/arm/common/pl330.c116
-rw-r--r--arch/arm/configs/imx_v4_v5_defconfig12
-rw-r--r--arch/arm/kernel/process.c6
-rw-r--r--arch/arm/kernel/setup.c1
-rw-r--r--arch/arm/kernel/unwind.c4
-rw-r--r--arch/arm/mach-exynos/cpu.c5
-rw-r--r--arch/arm/mach-exynos/mct.c13
-rw-r--r--arch/arm/mach-imx/Kconfig8
-rw-r--r--arch/arm/mach-imx/Makefile4
-rw-r--r--arch/arm/mach-imx/clock-imx35.c20
-rw-r--r--arch/arm/mach-imx/mach-cpuimx35.c8
-rw-r--r--arch/arm/mach-mx5/board-mx51_babbage.c2
-rw-r--r--arch/arm/mach-mx5/board-mx53_evk.c2
-rw-r--r--arch/arm/mach-mx5/board-mx53_loco.c2
-rw-r--r--arch/arm/mach-mx5/board-mx53_smd.c2
-rw-r--r--arch/arm/mach-omap2/board-rx51-peripherals.c2
-rw-r--r--arch/arm/mach-omap2/mcbsp.c6
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_3xxx_data.c4
-rw-r--r--arch/arm/mach-s5pv210/mach-smdkv210.c1
-rw-r--r--arch/arm/mach-shmobile/board-ag5evm.c1
-rw-r--r--arch/arm/mach-shmobile/board-kota2.c139
-rw-r--r--arch/arm/mach-shmobile/clock-sh73a0.c18
-rw-r--r--arch/arm/mm/init.c4
-rw-r--r--arch/arm/mm/proc-v7.S6
-rw-r--r--arch/arm/oprofile/common.c2
-rw-r--r--arch/arm/plat-mxc/cpufreq.c3
-rw-r--r--arch/arm/plat-mxc/include/mach/uncompress.h1
-rw-r--r--arch/arm/plat-mxc/pwm.c16
-rw-r--r--arch/arm/plat-orion/gpio.c6
-rw-r--r--arch/arm/plat-samsung/dev-backlight.c1
-rw-r--r--arch/arm/plat-samsung/include/plat/cpu-freq-core.h25
-rw-r--r--arch/avr32/kernel/process.c6
-rw-r--r--arch/blackfin/kernel/process.c6
-rw-r--r--arch/ia64/Kconfig6
-rw-r--r--arch/ia64/include/asm/cputime.h2
-rw-r--r--arch/ia64/mm/contig.c3
-rw-r--r--arch/ia64/mm/init.c4
-rw-r--r--arch/microblaze/include/asm/memblock.h14
-rw-r--r--arch/microblaze/kernel/process.c6
-rw-r--r--arch/microblaze/kernel/prom.c3
-rw-r--r--arch/mips/Kconfig6
-rw-r--r--arch/mips/kernel/process.c6
-rw-r--r--arch/mips/kernel/setup.c3
-rw-r--r--arch/mips/sgi-ip27/ip27-memory.c5
-rw-r--r--arch/openrisc/include/asm/memblock.h24
-rw-r--r--arch/openrisc/kernel/idle.c6
-rw-r--r--arch/openrisc/kernel/prom.c3
-rw-r--r--arch/powerpc/Kconfig4
-rw-r--r--arch/powerpc/include/asm/cputime.h2
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h33
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_64.h33
-rw-r--r--arch/powerpc/include/asm/memblock.h8
-rw-r--r--arch/powerpc/kernel/idle.c15
-rw-r--r--arch/powerpc/kernel/machine_kexec.c3
-rw-r--r--arch/powerpc/kernel/prom.c20
-rw-r--r--arch/powerpc/kvm/book3s_hv.c2
-rw-r--r--arch/powerpc/kvm/book3s_pr.c2
-rw-r--r--arch/powerpc/kvm/e500.c1
-rw-r--r--arch/powerpc/mm/init_32.c4
-rw-r--r--arch/powerpc/mm/mem.c2
-rw-r--r--arch/powerpc/mm/numa.c60
-rw-r--r--arch/powerpc/mm/tlb_nohash.c1
-rw-r--r--arch/powerpc/platforms/embedded6xx/wii.c23
-rw-r--r--arch/powerpc/platforms/iseries/setup.c12
-rw-r--r--arch/powerpc/platforms/ps3/mm.c1
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c4
-rw-r--r--arch/s390/Kconfig6
-rw-r--r--arch/s390/include/asm/cputime.h2
-rw-r--r--arch/s390/kernel/process.c6
-rw-r--r--arch/s390/kernel/setup.c4
-rw-r--r--arch/s390/oprofile/hwsampler.c7
-rw-r--r--arch/s390/oprofile/init.c375
-rw-r--r--arch/s390/oprofile/op_counter.h23
-rw-r--r--arch/score/Kconfig6
-rw-r--r--arch/score/kernel/setup.c4
-rw-r--r--arch/sh/Kconfig1
-rw-r--r--arch/sh/boards/board-sh7757lcr.c16
-rw-r--r--arch/sh/include/asm/memblock.h4
-rw-r--r--arch/sh/kernel/idle.c6
-rw-r--r--arch/sh/kernel/machine_kexec.c3
-rw-r--r--arch/sh/kernel/setup.c3
-rw-r--r--arch/sh/mm/Kconfig3
-rw-r--r--arch/sh/mm/init.c3
-rw-r--r--arch/sh/oprofile/common.c4
-rw-r--r--arch/sparc/Kconfig4
-rw-r--r--arch/sparc/include/asm/memblock.h8
-rw-r--r--arch/sparc/kernel/ds.c6
-rw-r--r--arch/sparc/kernel/pci_sun4v.c4
-rw-r--r--arch/sparc/kernel/process_64.c6
-rw-r--r--arch/sparc/kernel/prom_common.c4
-rw-r--r--arch/sparc/kernel/setup_32.c2
-rw-r--r--arch/sparc/mm/btfixup.c3
-rw-r--r--arch/sparc/mm/init_64.c32
-rw-r--r--arch/tile/kernel/process.c6
-rw-r--r--arch/tile/mm/fault.c4
-rw-r--r--arch/um/kernel/process.c6
-rw-r--r--arch/unicore32/kernel/process.c6
-rw-r--r--arch/unicore32/kernel/setup.c1
-rw-r--r--arch/unicore32/mm/init.c4
-rw-r--r--arch/unicore32/mm/mmu.c1
-rw-r--r--arch/x86/Kconfig5
-rw-r--r--arch/x86/include/asm/e820.h2
-rw-r--r--arch/x86/include/asm/insn.h7
-rw-r--r--arch/x86/include/asm/memblock.h23
-rw-r--r--arch/x86/include/asm/perf_event.h44
-rw-r--r--arch/x86/kernel/aperture_64.c4
-rw-r--r--arch/x86/kernel/apic/apic.c6
-rw-r--r--arch/x86/kernel/apic/io_apic.c2
-rw-r--r--arch/x86/kernel/check.c34
-rw-r--r--arch/x86/kernel/cpu/mcheck/therm_throt.c2
-rw-r--r--arch/x86/kernel/cpu/mcheck/threshold.c2
-rw-r--r--arch/x86/kernel/cpu/perf_event.c262
-rw-r--r--arch/x86/kernel/cpu/perf_event.h51
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd.c2
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c90
-rw-r--r--arch/x86/kernel/dumpstack_32.c8
-rw-r--r--arch/x86/kernel/dumpstack_64.c8
-rw-r--r--arch/x86/kernel/e820.c58
-rw-r--r--arch/x86/kernel/head.c2
-rw-r--r--arch/x86/kernel/head32.c7
-rw-r--r--arch/x86/kernel/head64.c7
-rw-r--r--arch/x86/kernel/irq.c6
-rw-r--r--arch/x86/kernel/jump_label.c2
-rw-r--r--arch/x86/kernel/mpparse.c12
-rw-r--r--arch/x86/kernel/process_32.c6
-rw-r--r--arch/x86/kernel/process_64.c10
-rw-r--r--arch/x86/kernel/setup.c21
-rw-r--r--arch/x86/kernel/trampoline.c4
-rw-r--r--arch/x86/kvm/i8254.c10
-rw-r--r--arch/x86/kvm/x86.c19
-rw-r--r--arch/x86/lib/inat.c9
-rw-r--r--arch/x86/lib/insn.c4
-rw-r--r--arch/x86/lib/x86-opcode-map.txt606
-rw-r--r--arch/x86/mm/Makefile2
-rw-r--r--arch/x86/mm/init.c8
-rw-r--r--arch/x86/mm/init_32.c36
-rw-r--r--arch/x86/mm/init_64.c2
-rw-r--r--arch/x86/mm/memblock.c348
-rw-r--r--arch/x86/mm/memtest.c33
-rw-r--r--arch/x86/mm/numa.c37
-rw-r--r--arch/x86/mm/numa_32.c10
-rw-r--r--arch/x86/mm/numa_64.c2
-rw-r--r--arch/x86/mm/numa_emulation.c36
-rw-r--r--arch/x86/net/bpf_jit_comp.c4
-rw-r--r--arch/x86/oprofile/Makefile3
-rw-r--r--arch/x86/oprofile/init.c30
-rw-r--r--arch/x86/oprofile/nmi_int.c27
-rw-r--r--arch/x86/oprofile/nmi_timer_int.c50
-rw-r--r--arch/x86/platform/efi/efi.c9
-rw-r--r--arch/x86/tools/Makefile11
-rw-r--r--arch/x86/tools/gen-insn-attr-x86.awk21
-rw-r--r--arch/x86/tools/insn_sanity.c275
-rw-r--r--arch/x86/xen/enlighten.c2
-rw-r--r--arch/x86/xen/mmu.c12
-rw-r--r--arch/x86/xen/setup.c25
-rw-r--r--block/blk-core.c23
-rw-r--r--block/blk-map.c2
-rw-r--r--block/blk-tag.c13
-rw-r--r--block/cfq-iosched.c28
-rw-r--r--block/ioctl.c26
-rw-r--r--drivers/ata/Kconfig2
-rw-r--r--drivers/base/cpu.c7
-rw-r--r--drivers/block/cciss.c6
-rw-r--r--drivers/block/loop.c4
-rw-r--r--drivers/block/swim3.c362
-rw-r--r--drivers/bluetooth/Kconfig6
-rw-r--r--drivers/bluetooth/btmrvl_sdio.c15
-rw-r--r--drivers/bluetooth/btusb.c3
-rw-r--r--drivers/char/ipmi/ipmi_watchdog.c41
-rw-r--r--drivers/dma/Kconfig4
-rw-r--r--drivers/firmware/iscsi_ibft.c42
-rw-r--r--drivers/firmware/iscsi_ibft_find.c26
-rw-r--r--drivers/gpio/gpio-da9052.c21
-rw-r--r--drivers/gpio/gpio-ml-ioh.c32
-rw-r--r--drivers/gpio/gpio-mpc8xxx.c18
-rw-r--r--drivers/gpio/gpio-pl061.c4
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c1
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c10
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c43
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h18
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c7
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c19
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h30
-rw-r--r--drivers/gpu/drm/i915/intel_display.c87
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c173
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h1
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c8
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c16
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c36
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c12
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h5
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c16
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c400
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h5
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c22
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c23
-rw-r--r--drivers/i2c/busses/i2c-eg20t.c22
-rw-r--r--drivers/i2c/busses/i2c-omap.c11
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c3
-rw-r--r--drivers/infiniband/core/cma.c6
-rw-r--r--drivers/infiniband/hw/mlx4/main.c6
-rw-r--r--drivers/infiniband/hw/qib/qib_file_ops.c4
-rw-r--r--drivers/input/misc/cma3000_d0x.c4
-rw-r--r--drivers/input/mouse/sentelic.c8
-rw-r--r--drivers/input/mouse/sentelic.h3
-rw-r--r--drivers/input/mouse/synaptics.c11
-rw-r--r--drivers/input/tablet/wacom_wac.c4
-rw-r--r--drivers/iommu/intel-iommu.c30
-rw-r--r--drivers/iommu/iommu.c2
-rw-r--r--drivers/md/bitmap.c5
-rw-r--r--drivers/md/linear.c1
-rw-r--r--drivers/md/md.c3
-rw-r--r--drivers/md/raid5.c14
-rw-r--r--drivers/media/common/tuners/mxl5007t.c3
-rw-r--r--drivers/media/common/tuners/tda18218.c2
-rw-r--r--drivers/media/rc/ati_remote.c111
-rw-r--r--drivers/media/rc/keymaps/rc-ati-x10.c96
-rw-r--r--drivers/media/rc/keymaps/rc-medion-x10.c128
-rw-r--r--drivers/media/rc/keymaps/rc-snapstream-firefly.c114
-rw-r--r--drivers/media/video/au0828/au0828-cards.c7
-rw-r--r--drivers/media/video/gspca/gspca.c6
-rw-r--r--drivers/media/video/m5mols/m5mols.h2
-rw-r--r--drivers/media/video/m5mols/m5mols_core.c22
-rw-r--r--drivers/media/video/mt9m111.c1
-rw-r--r--drivers/media/video/mt9t112.c4
-rw-r--r--drivers/media/video/omap/omap_vout.c9
-rw-r--r--drivers/media/video/omap1_camera.c1
-rw-r--r--drivers/media/video/omap24xxcam-dma.c2
-rw-r--r--drivers/media/video/omap3isp/ispccdc.c2
-rw-r--r--drivers/media/video/omap3isp/ispstat.c2
-rw-r--r--drivers/media/video/omap3isp/ispvideo.c1
-rw-r--r--drivers/media/video/ov6650.c2
-rw-r--r--drivers/media/video/s5p-fimc/fimc-capture.c14
-rw-r--r--drivers/media/video/s5p-fimc/fimc-core.c24
-rw-r--r--drivers/media/video/s5p-fimc/fimc-core.h2
-rw-r--r--drivers/media/video/s5p-fimc/fimc-mdevice.c43
-rw-r--r--drivers/media/video/s5p-fimc/fimc-reg.c15
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc_enc.c2
-rw-r--r--drivers/media/video/s5p-tv/mixer_video.c1
-rw-r--r--drivers/media/video/sh_mobile_ceu_camera.c34
-rw-r--r--drivers/media/video/sh_mobile_csi2.c4
-rw-r--r--drivers/media/video/soc_camera.c3
-rw-r--r--drivers/mfd/ab5500-debugfs.c2
-rw-r--r--drivers/mfd/ab8500-core.c2
-rw-r--r--drivers/mfd/adp5520.c2
-rw-r--r--drivers/mfd/da903x.c3
-rw-r--r--drivers/mfd/jz4740-adc.c1
-rw-r--r--drivers/mfd/tps6586x.c2
-rw-r--r--drivers/mfd/tps65910.c2
-rw-r--r--drivers/mfd/twl-core.c16
-rw-r--r--drivers/mfd/twl4030-irq.c18
-rw-r--r--drivers/mfd/wm8994-core.c1
-rw-r--r--drivers/mmc/core/host.c11
-rw-r--r--drivers/mmc/host/mmci.c14
-rw-r--r--drivers/mmc/host/sdhci-cns3xxx.c5
-rw-r--r--drivers/mmc/host/sdhci-dove.c5
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c5
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c5
-rw-r--r--drivers/mmc/host/sdhci-of-hlwd.c5
-rw-r--r--drivers/mmc/host/sdhci-pci.c26
-rw-r--r--drivers/mmc/host/sdhci-pltfm.c18
-rw-r--r--drivers/mmc/host/sdhci-pltfm.h6
-rw-r--r--drivers/mmc/host/sdhci-pxav2.c5
-rw-r--r--drivers/mmc/host/sdhci-pxav3.c5
-rw-r--r--drivers/mmc/host/sdhci-s3c.c21
-rw-r--r--drivers/mmc/host/sdhci-tegra.c5
-rw-r--r--drivers/mmc/host/sdhci.c2
-rw-r--r--drivers/mmc/host/sdhci.h2
-rw-r--r--drivers/mmc/host/vub300.c2
-rw-r--r--drivers/mtd/maps/plat-ram.c12
-rw-r--r--drivers/mtd/maps/pxa2xx-flash.c2
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.c2
-rw-r--r--drivers/mtd/nand/ndfc.c2
-rw-r--r--drivers/net/ethernet/freescale/Kconfig4
-rw-r--r--drivers/net/ethernet/freescale/fec.c11
-rw-r--r--drivers/net/ethernet/freescale/fsl_pq_mdio.c53
-rw-r--r--drivers/net/ethernet/marvell/skge.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c1
-rw-r--r--drivers/net/ethernet/realtek/r8169.c14
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.c2
-rw-r--r--drivers/net/ppp/pptp.c4
-rw-r--r--drivers/net/usb/asix.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.c4
-rw-r--r--drivers/net/wireless/b43/pio.c16
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rxon.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-tx.c5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans-pcie.c4
-rw-r--r--drivers/net/wireless/mwifiex/cmdevt.c9
-rw-r--r--drivers/net/wireless/mwifiex/sta_ioctl.c7
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/phy.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/phy.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/phy.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/phy.c2
-rw-r--r--drivers/of/platform.c2
-rw-r--r--drivers/oprofile/nmi_timer_int.c173
-rw-r--r--drivers/oprofile/oprof.c30
-rw-r--r--drivers/oprofile/oprof.h10
-rw-r--r--drivers/oprofile/oprofile_files.c7
-rw-r--r--drivers/oprofile/oprofilefs.c11
-rw-r--r--drivers/oprofile/timer_int.c30
-rw-r--r--drivers/pci/ats.c1
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c30
-rw-r--r--drivers/pci/iov.c7
-rw-r--r--drivers/pci/pci.c9
-rw-r--r--drivers/rtc/interface.c44
-rw-r--r--drivers/rtc/rtc-m41t80.c9
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c4
-rw-r--r--drivers/sbus/char/bbc_i2c.c27
-rw-r--r--drivers/sbus/char/display7seg.c13
-rw-r--r--drivers/sbus/char/envctrl.c12
-rw-r--r--drivers/sbus/char/flash.c12
-rw-r--r--drivers/sbus/char/uctrl.c12
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c5
-rw-r--r--drivers/scsi/fcoe/fcoe.c116
-rw-r--r--drivers/scsi/fcoe/fcoe_ctlr.c4
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_scsih.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c27
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c8
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h1
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c14
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c25
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c42
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.h4
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c86
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/qla4xxx/ql4_def.h55
-rw-r--r--drivers/scsi/qla4xxx/ql4_fw.h8
-rw-r--r--drivers/scsi/qla4xxx/ql4_glbl.h16
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c243
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c11
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c1084
-rw-r--r--drivers/scsi/qla4xxx/ql4_version.h2
-rw-r--r--drivers/ssb/driver_pcicore.c8
-rw-r--r--drivers/staging/rtl8712/usb_intf.c1
-rw-r--r--drivers/staging/tidspbridge/core/dsp-clock.c15
-rw-r--r--drivers/staging/tidspbridge/rmgr/drv_interface.c4
-rw-r--r--drivers/usb/class/cdc-acm.c10
-rw-r--r--drivers/usb/dwc3/core.c2
-rw-r--r--drivers/usb/gadget/epautoconf.c3
-rw-r--r--drivers/usb/gadget/f_mass_storage.c1
-rw-r--r--drivers/usb/host/isp1760-if.c8
-rw-r--r--drivers/usb/musb/musb_host.c4
-rw-r--r--drivers/usb/renesas_usbhs/mod.c2
-rw-r--r--drivers/usb/renesas_usbhs/mod_host.c1
-rw-r--r--drivers/usb/serial/option.c7
-rw-r--r--drivers/watchdog/coh901327_wdt.c6
-rw-r--r--drivers/watchdog/hpwdt.c5
-rw-r--r--drivers/watchdog/iTCO_wdt.c6
-rw-r--r--drivers/watchdog/sp805_wdt.c2
-rw-r--r--drivers/xen/swiotlb-xen.c4
-rw-r--r--drivers/xen/xenbus/xenbus_xs.c13
-rw-r--r--firmware/README.AddingFirmware3
-rw-r--r--fs/btrfs/async-thread.c120
-rw-r--r--fs/btrfs/async-thread.h4
-rw-r--r--fs/btrfs/ctree.h3
-rw-r--r--fs/btrfs/delayed-inode.c4
-rw-r--r--fs/btrfs/disk-io.c34
-rw-r--r--fs/btrfs/extent-tree.c45
-rw-r--r--fs/btrfs/file.c8
-rw-r--r--fs/btrfs/inode.c189
-rw-r--r--fs/btrfs/ioctl.c6
-rw-r--r--fs/btrfs/relocation.c2
-rw-r--r--fs/btrfs/scrub.c8
-rw-r--r--fs/btrfs/super.c32
-rw-r--r--fs/btrfs/volumes.c8
-rw-r--r--fs/ceph/dir.c29
-rw-r--r--fs/cifs/connect.c4
-rw-r--r--fs/compat_ioctl.c38
-rw-r--r--fs/fs-writeback.c11
-rw-r--r--fs/ioctl.c2
-rw-r--r--fs/locks.c11
-rw-r--r--fs/minix/inode.c34
-rw-r--r--fs/nfs/file.c2
-rw-r--r--fs/nfs/nfs4proc.c24
-rw-r--r--fs/nfs/nfs4state.c33
-rw-r--r--fs/nilfs2/ioctl.c16
-rw-r--r--fs/proc/stat.c4
-rw-r--r--fs/xfs/xfs_super.c30
-rw-r--r--fs/xfs/xfs_sync.c36
-rw-r--r--fs/xfs/xfs_sync.h2
-rw-r--r--include/asm-generic/cputime.h8
-rw-r--r--include/linux/bitops.h10
-rw-r--r--include/linux/blkdev.h3
-rw-r--r--include/linux/bootmem.h2
-rw-r--r--include/linux/clocksource.h11
-rw-r--r--include/linux/cpu.h1
-rw-r--r--include/linux/debugobjects.h6
-rw-r--r--include/linux/dma_remapping.h2
-rw-r--r--include/linux/hardirq.h21
-rw-r--r--include/linux/jump_label.h27
-rw-r--r--include/linux/kvm.h1
-rw-r--r--include/linux/lglock.h36
-rw-r--r--include/linux/lockdep.h4
-rw-r--r--include/linux/memblock.h170
-rw-r--r--include/linux/mm.h34
-rw-r--r--include/linux/mmzone.h8
-rw-r--r--include/linux/perf_event.h8
-rw-r--r--include/linux/poison.h6
-rw-r--r--include/linux/rcupdate.h115
-rw-r--r--include/linux/sched.h8
-rw-r--r--include/linux/security.h2
-rw-r--r--include/linux/srcu.h87
-rw-r--r--include/linux/tick.h11
-rw-r--r--include/linux/wait.h4
-rw-r--r--include/media/soc_camera.h7
-rw-r--r--include/net/dst.h1
-rw-r--r--include/net/flow.h1
-rw-r--r--include/net/ip_vs.h2
-rw-r--r--include/net/sctp/structs.h4
-rw-r--r--include/net/sock.h4
-rw-r--r--include/scsi/libfcoe.h3
-rw-r--r--include/trace/events/rcu.h122
-rw-r--r--include/trace/events/writeback.h15
-rw-r--r--include/xen/interface/io/xs_wire.h3
-rw-r--r--init/Kconfig10
-rw-r--r--init/main.c3
-rw-r--r--kernel/cgroup.c5
-rw-r--r--kernel/cpu.c1
-rw-r--r--kernel/cpuset.c29
-rw-r--r--kernel/debug/kdb/kdb_support.c2
-rw-r--r--kernel/events/Makefile2
-rw-r--r--kernel/events/callchain.c191
-rw-r--r--kernel/events/core.c306
-rw-r--r--kernel/events/internal.h39
-rw-r--r--kernel/exit.c9
-rw-r--r--kernel/futex.c28
-rw-r--r--kernel/hung_task.c14
-rw-r--r--kernel/jump_label.c49
-rw-r--r--kernel/lockdep.c83
-rw-r--r--kernel/panic.c17
-rw-r--r--kernel/printk.c11
-rw-r--r--kernel/ptrace.c13
-rw-r--r--kernel/rcu.h7
-rw-r--r--kernel/rcupdate.c12
-rw-r--r--kernel/rcutiny.c149
-rw-r--r--kernel/rcutiny_plugin.h29
-rw-r--r--kernel/rcutorture.c225
-rw-r--r--kernel/rcutree.c290
-rw-r--r--kernel/rcutree.h26
-rw-r--r--kernel/rcutree_plugin.h289
-rw-r--r--kernel/rcutree_trace.c12
-rw-r--r--kernel/rtmutex-debug.c1
-rw-r--r--kernel/rtmutex.c8
-rw-r--r--kernel/signal.c2
-rw-r--r--kernel/softirq.c4
-rw-r--r--kernel/sysctl_binary.c2
-rw-r--r--kernel/time/clockevents.c1
-rw-r--r--kernel/time/clocksource.c12
-rw-r--r--kernel/time/tick-sched.c114
-rw-r--r--kernel/timer.c62
-rw-r--r--kernel/trace/trace.c106
-rw-r--r--kernel/trace/trace.h2
-rw-r--r--kernel/trace/trace_events_filter.c26
-rw-r--r--kernel/trace/trace_irqsoff.c13
-rw-r--r--kernel/trace/trace_output.c16
-rw-r--r--kernel/trace/trace_sched_wakeup.c13
-rw-r--r--kernel/wait.c4
-rw-r--r--lib/debugobjects.c54
-rw-r--r--mm/Kconfig6
-rw-r--r--mm/filemap.c7
-rw-r--r--mm/hugetlb.c2
-rw-r--r--mm/memblock.c961
-rw-r--r--mm/memcontrol.c3
-rw-r--r--mm/mempolicy.c11
-rw-r--r--mm/nobootmem.c45
-rw-r--r--mm/oom_kill.c2
-rw-r--r--mm/page_alloc.c508
-rw-r--r--mm/percpu.c6
-rw-r--r--mm/vmalloc.c2
-rw-r--r--net/batman-adv/translation-table.c27
-rw-r--r--net/bluetooth/bnep/core.c8
-rw-r--r--net/bluetooth/cmtp/core.c5
-rw-r--r--net/bluetooth/hci_conn.c2
-rw-r--r--net/bluetooth/hci_core.c2
-rw-r--r--net/bluetooth/hci_event.c2
-rw-r--r--net/bluetooth/l2cap_core.c12
-rw-r--r--net/bluetooth/rfcomm/core.c1
-rw-r--r--net/bridge/br_netfilter.c8
-rw-r--r--net/core/flow.c12
-rw-r--r--net/core/net-sysfs.c7
-rw-r--r--net/core/sock.c6
-rw-r--r--net/ipv4/ipconfig.c4
-rw-r--r--net/ipv4/ipip.c7
-rw-r--r--net/ipv4/route.c112
-rw-r--r--net/ipv6/addrconf.c3
-rw-r--r--net/ipv6/ip6_output.c2
-rw-r--r--net/ipv6/route.c2
-rw-r--r--net/ipv6/sit.c7
-rw-r--r--net/llc/af_llc.c14
-rw-r--r--net/mac80211/agg-tx.c86
-rw-r--r--net/netfilter/ipvs/ip_vs_conn.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c10
-rw-r--r--net/netfilter/ipvs/ip_vs_sync.c2
-rw-r--r--net/netfilter/nf_conntrack_netlink.c22
-rw-r--r--net/netfilter/xt_connbytes.c6
-rw-r--r--net/nfc/nci/core.c2
-rw-r--r--net/packet/af_packet.c12
-rw-r--r--net/sched/sch_gred.c2
-rw-r--r--net/sched/sch_mqprio.c2
-rw-r--r--net/sched/sch_netem.c7
-rw-r--r--net/sched/sch_qfq.c4
-rw-r--r--net/sctp/associola.c2
-rw-r--r--net/sctp/output.c8
-rw-r--r--net/sctp/outqueue.c6
-rw-r--r--net/sctp/protocol.c3
-rw-r--r--net/sctp/socket.c2
-rw-r--r--net/sctp/sysctl.c13
-rw-r--r--net/socket.c16
-rw-r--r--net/sunrpc/sched.c30
-rw-r--r--net/sunrpc/xprt.c10
-rw-r--r--net/xfrm/xfrm_policy.c18
-rw-r--r--scripts/kconfig/Makefile5
-rw-r--r--security/integrity/evm/evm_crypto.c19
-rw-r--r--security/security.c2
-rw-r--r--security/selinux/netport.c4
-rw-r--r--sound/atmel/ac97c.c4
-rw-r--r--sound/pci/hda/hda_intel.c4
-rw-r--r--sound/pci/hda/patch_sigmatel.c6
-rw-r--r--sound/soc/codecs/Kconfig2
-rw-r--r--sound/soc/codecs/jz4740.c1
-rw-r--r--sound/soc/codecs/wm8776.c1
-rw-r--r--sound/soc/codecs/wm8958-dsp2.c2
-rw-r--r--sound/soc/codecs/wm8996.c1
-rw-r--r--sound/soc/mxs/mxs-pcm.c3
-rw-r--r--sound/soc/mxs/mxs-sgtl5000.c1
-rw-r--r--sound/soc/pxa/hx4700.c5
-rw-r--r--sound/soc/samsung/jive_wm8750.c3
-rw-r--r--sound/soc/samsung/smdk2443_wm9710.c1
-rw-r--r--tools/perf/Documentation/perf-annotate.txt4
-rw-r--r--tools/perf/Documentation/perf-buildid-list.txt2
-rw-r--r--tools/perf/Documentation/perf-evlist.txt2
-rw-r--r--tools/perf/Documentation/perf-kmem.txt2
-rw-r--r--tools/perf/Documentation/perf-lock.txt2
-rw-r--r--tools/perf/Documentation/perf-record.txt2
-rw-r--r--tools/perf/Documentation/perf-report.txt11
-rw-r--r--tools/perf/Documentation/perf-sched.txt2
-rw-r--r--tools/perf/Documentation/perf-script.txt9
-rw-r--r--tools/perf/Documentation/perf-test.txt8
-rw-r--r--tools/perf/Documentation/perf-timechart.txt2
-rw-r--r--tools/perf/Makefile1
-rw-r--r--tools/perf/builtin-annotate.c132
-rw-r--r--tools/perf/builtin-buildid-list.c53
-rw-r--r--tools/perf/builtin-diff.c21
-rw-r--r--tools/perf/builtin-evlist.c2
-rw-r--r--tools/perf/builtin-inject.c118
-rw-r--r--tools/perf/builtin-kmem.c16
-rw-r--r--tools/perf/builtin-kvm.c2
-rw-r--r--tools/perf/builtin-lock.c12
-rw-r--r--tools/perf/builtin-probe.c1
-rw-r--r--tools/perf/builtin-record.c603
-rw-r--r--tools/perf/builtin-report.c236
-rw-r--r--tools/perf/builtin-sched.c200
-rw-r--r--tools/perf/builtin-script.c130
-rw-r--r--tools/perf/builtin-stat.c134
-rw-r--r--tools/perf/builtin-test.c545
-rw-r--r--tools/perf/builtin-timechart.c38
-rw-r--r--tools/perf/builtin-top.c558
-rw-r--r--tools/perf/perf.c33
-rw-r--r--tools/perf/perf.h24
-rw-r--r--tools/perf/util/annotate.c8
-rw-r--r--tools/perf/util/annotate.h5
-rw-r--r--tools/perf/util/build-id.c26
-rw-r--r--tools/perf/util/build-id.h2
-rw-r--r--tools/perf/util/callchain.h3
-rw-r--r--tools/perf/util/cgroup.c15
-rw-r--r--tools/perf/util/config.c5
-rw-r--r--tools/perf/util/debugfs.c35
-rw-r--r--tools/perf/util/debugfs.h31
-rw-r--r--tools/perf/util/event.c360
-rw-r--r--tools/perf/util/event.h68
-rw-r--r--tools/perf/util/evlist.c299
-rw-r--r--tools/perf/util/evlist.h43
-rw-r--r--tools/perf/util/evsel.c154
-rw-r--r--tools/perf/util/evsel.h8
-rw-r--r--tools/perf/util/header.c741
-rw-r--r--tools/perf/util/header.h51
-rw-r--r--tools/perf/util/hist.h3
-rw-r--r--tools/perf/util/include/linux/bitops.h118
-rw-r--r--tools/perf/util/map.c4
-rw-r--r--tools/perf/util/map.h19
-rw-r--r--tools/perf/util/parse-events.c30
-rw-r--r--tools/perf/util/parse-events.h1
-rw-r--r--tools/perf/util/probe-finder.h1
-rw-r--r--tools/perf/util/scripting-engines/trace-event-perl.c75
-rw-r--r--tools/perf/util/scripting-engines/trace-event-python.c4
-rw-r--r--tools/perf/util/session.c342
-rw-r--r--tools/perf/util/session.h72
-rw-r--r--tools/perf/util/setup.py3
-rw-r--r--tools/perf/util/symbol.c11
-rw-r--r--tools/perf/util/symbol.h1
-rw-r--r--tools/perf/util/thread.c6
-rw-r--r--tools/perf/util/thread.h14
-rw-r--r--tools/perf/util/tool.h50
-rw-r--r--tools/perf/util/top.h20
-rw-r--r--tools/perf/util/trace-event-info.c28
-rw-r--r--tools/perf/util/trace-event-scripting.c2
-rw-r--r--tools/perf/util/trace-event.h8
-rw-r--r--tools/perf/util/ui/browsers/annotate.c16
-rw-r--r--tools/perf/util/ui/browsers/hists.c2
-rw-r--r--tools/perf/util/ui/progress.c3
-rw-r--r--tools/perf/util/usage.c5
-rw-r--r--tools/perf/util/util.h11
-rw-r--r--tools/perf/util/values.c1
-rw-r--r--virt/kvm/assigned-dev.c93
626 files changed, 13309 insertions, 7394 deletions
diff --git a/Documentation/DocBook/debugobjects.tmpl b/Documentation/DocBook/debugobjects.tmpl
index 08ff908aa7a2..24979f691e3e 100644
--- a/Documentation/DocBook/debugobjects.tmpl
+++ b/Documentation/DocBook/debugobjects.tmpl
@@ -96,6 +96,7 @@
<listitem><para>debug_object_deactivate</para></listitem>
<listitem><para>debug_object_destroy</para></listitem>
<listitem><para>debug_object_free</para></listitem>
+ <listitem><para>debug_object_assert_init</para></listitem>
</itemizedlist>
Each of these functions takes the address of the real object and
a pointer to the object type specific debug description
@@ -273,6 +274,26 @@
debug checks.
</para>
</sect1>
+
+ <sect1 id="debug_object_assert_init">
+ <title>debug_object_assert_init</title>
+ <para>
+ This function is called to assert that an object has been
+ initialized.
+ </para>
+ <para>
+ When the real object is not tracked by debugobjects, it calls
+ fixup_assert_init of the object type description structure
+ provided by the caller, with the hardcoded object state
+ ODEBUG_NOT_AVAILABLE. The fixup function can correct the problem
+ by calling debug_object_init and other specific initializing
+ functions.
+ </para>
+ <para>
+ When the real object is already tracked by debugobjects it is
+ ignored.
+ </para>
+ </sect1>
</chapter>
<chapter id="fixupfunctions">
<title>Fixup functions</title>
@@ -381,6 +402,35 @@
statistics.
</para>
</sect1>
+ <sect1 id="fixup_assert_init">
+ <title>fixup_assert_init</title>
+ <para>
+ This function is called from the debug code whenever a problem
+ in debug_object_assert_init is detected.
+ </para>
+ <para>
+ Called from debug_object_assert_init() with a hardcoded state
+ ODEBUG_STATE_NOTAVAILABLE when the object is not found in the
+ debug bucket.
+ </para>
+ <para>
+ The function returns 1 when the fixup was successful,
+ otherwise 0. The return value is used to update the
+ statistics.
+ </para>
+ <para>
+ Note, this function should make sure debug_object_init() is
+ called before returning.
+ </para>
+ <para>
+ The handling of statically initialized objects is a special
+ case. The fixup function should check if this is a legitimate
+ case of a statically initialized object or not. In this case only
+ debug_object_init() should be called to make the object known to
+ the tracker. Then the function should return 0 because this is not
+ a real fixup.
+ </para>
+ </sect1>
</chapter>
<chapter id="bugs">
<title>Known Bugs And Assumptions</title>
diff --git a/Documentation/RCU/checklist.txt b/Documentation/RCU/checklist.txt
index 0c134f8afc6f..bff2d8be1e18 100644
--- a/Documentation/RCU/checklist.txt
+++ b/Documentation/RCU/checklist.txt
@@ -328,6 +328,12 @@ over a rather long period of time, but improvements are always welcome!
RCU rather than SRCU, because RCU is almost always faster and
easier to use than is SRCU.
+ If you need to enter your read-side critical section in a
+ hardirq or exception handler, and then exit that same read-side
+ critical section in the task that was interrupted, then you need
+ to srcu_read_lock_raw() and srcu_read_unlock_raw(), which avoid
+ the lockdep checking that would otherwise this practice illegal.
+
Also unlike other forms of RCU, explicit initialization
and cleanup is required via init_srcu_struct() and
cleanup_srcu_struct(). These are passed a "struct srcu_struct"
diff --git a/Documentation/RCU/rcu.txt b/Documentation/RCU/rcu.txt
index 31852705b586..bf778332a28f 100644
--- a/Documentation/RCU/rcu.txt
+++ b/Documentation/RCU/rcu.txt
@@ -38,11 +38,11 @@ o How can the updater tell when a grace period has completed
Preemptible variants of RCU (CONFIG_TREE_PREEMPT_RCU) get the
same effect, but require that the readers manipulate CPU-local
- counters. These counters allow limited types of blocking
- within RCU read-side critical sections. SRCU also uses
- CPU-local counters, and permits general blocking within
- RCU read-side critical sections. These two variants of
- RCU detect grace periods by sampling these counters.
+ counters. These counters allow limited types of blocking within
+ RCU read-side critical sections. SRCU also uses CPU-local
+ counters, and permits general blocking within RCU read-side
+ critical sections. These variants of RCU detect grace periods
+ by sampling these counters.
o If I am running on a uniprocessor kernel, which can only do one
thing at a time, why should I wait for a grace period?
diff --git a/Documentation/RCU/stallwarn.txt b/Documentation/RCU/stallwarn.txt
index 4e959208f736..083d88cbc089 100644
--- a/Documentation/RCU/stallwarn.txt
+++ b/Documentation/RCU/stallwarn.txt
@@ -101,6 +101,11 @@ o A CPU-bound real-time task in a CONFIG_PREEMPT_RT kernel that
CONFIG_TREE_PREEMPT_RCU case, you might see stall-warning
messages.
+o A hardware or software issue shuts off the scheduler-clock
+ interrupt on a CPU that is not in dyntick-idle mode. This
+ problem really has happened, and seems to be most likely to
+ result in RCU CPU stall warnings for CONFIG_NO_HZ=n kernels.
+
o A bug in the RCU implementation.
o A hardware failure. This is quite unlikely, but has occurred
@@ -109,12 +114,11 @@ o A hardware failure. This is quite unlikely, but has occurred
This resulted in a series of RCU CPU stall warnings, eventually
leading the realization that the CPU had failed.
-The RCU, RCU-sched, and RCU-bh implementations have CPU stall
-warning. SRCU does not have its own CPU stall warnings, but its
-calls to synchronize_sched() will result in RCU-sched detecting
-RCU-sched-related CPU stalls. Please note that RCU only detects
-CPU stalls when there is a grace period in progress. No grace period,
-no CPU stall warnings.
+The RCU, RCU-sched, and RCU-bh implementations have CPU stall warning.
+SRCU does not have its own CPU stall warnings, but its calls to
+synchronize_sched() will result in RCU-sched detecting RCU-sched-related
+CPU stalls. Please note that RCU only detects CPU stalls when there is
+a grace period in progress. No grace period, no CPU stall warnings.
To diagnose the cause of the stall, inspect the stack traces.
The offending function will usually be near the top of the stack.
diff --git a/Documentation/RCU/torture.txt b/Documentation/RCU/torture.txt
index 783d6c134d3f..d67068d0d2b9 100644
--- a/Documentation/RCU/torture.txt
+++ b/Documentation/RCU/torture.txt
@@ -61,11 +61,24 @@ nreaders This is the number of RCU reading threads supported.
To properly exercise RCU implementations with preemptible
read-side critical sections.
+onoff_interval
+ The number of seconds between each attempt to execute a
+ randomly selected CPU-hotplug operation. Defaults to
+ zero, which disables CPU hotplugging. In HOTPLUG_CPU=n
+ kernels, rcutorture will silently refuse to do any
+ CPU-hotplug operations regardless of what value is
+ specified for onoff_interval.
+
shuffle_interval
The number of seconds to keep the test threads affinitied
to a particular subset of the CPUs, defaults to 3 seconds.
Used in conjunction with test_no_idle_hz.
+shutdown_secs The number of seconds to run the test before terminating
+ the test and powering off the system. The default is
+ zero, which disables test termination and system shutdown.
+ This capability is useful for automated testing.
+
stat_interval The number of seconds between output of torture
statistics (via printk()). Regardless of the interval,
statistics are printed when the module is unloaded.
diff --git a/Documentation/RCU/trace.txt b/Documentation/RCU/trace.txt
index aaf65f6c6cd7..49587abfc2f7 100644
--- a/Documentation/RCU/trace.txt
+++ b/Documentation/RCU/trace.txt
@@ -105,14 +105,10 @@ o "dt" is the current value of the dyntick counter that is incremented
or one greater than the interrupt-nesting depth otherwise.
The number after the second "/" is the NMI nesting depth.
- This field is displayed only for CONFIG_NO_HZ kernels.
-
o "df" is the number of times that some other CPU has forced a
quiescent state on behalf of this CPU due to this CPU being in
dynticks-idle state.
- This field is displayed only for CONFIG_NO_HZ kernels.
-
o "of" is the number of times that some other CPU has forced a
quiescent state on behalf of this CPU due to this CPU being
offline. In a perfect world, this might never happen, but it
diff --git a/Documentation/RCU/whatisRCU.txt b/Documentation/RCU/whatisRCU.txt
index 6ef692667e2f..6bbe8dcdc3da 100644
--- a/Documentation/RCU/whatisRCU.txt
+++ b/Documentation/RCU/whatisRCU.txt
@@ -4,6 +4,7 @@ to start learning about RCU:
1. What is RCU, Fundamentally? http://lwn.net/Articles/262464/
2. What is RCU? Part 2: Usage http://lwn.net/Articles/263130/
3. RCU part 3: the RCU API http://lwn.net/Articles/264090/
+4. The RCU API, 2010 Edition http://lwn.net/Articles/418853/
What is RCU?
@@ -834,6 +835,8 @@ SRCU: Critical sections Grace period Barrier
srcu_read_lock synchronize_srcu N/A
srcu_read_unlock synchronize_srcu_expedited
+ srcu_read_lock_raw
+ srcu_read_unlock_raw
srcu_dereference
SRCU: Initialization/cleanup
@@ -855,27 +858,33 @@ list can be helpful:
a. Will readers need to block? If so, you need SRCU.
-b. What about the -rt patchset? If readers would need to block
+b. Is it necessary to start a read-side critical section in a
+ hardirq handler or exception handler, and then to complete
+ this read-side critical section in the task that was
+ interrupted? If so, you need SRCU's srcu_read_lock_raw() and
+ srcu_read_unlock_raw() primitives.
+
+c. What about the -rt patchset? If readers would need to block
in an non-rt kernel, you need SRCU. If readers would block
in a -rt kernel, but not in a non-rt kernel, SRCU is not
necessary.
-c. Do you need to treat NMI handlers, hardirq handlers,
+d. Do you need to treat NMI handlers, hardirq handlers,
and code segments with preemption disabled (whether
via preempt_disable(), local_irq_save(), local_bh_disable(),
or some other mechanism) as if they were explicit RCU readers?
If so, you need RCU-sched.
-d. Do you need RCU grace periods to complete even in the face
+e. Do you need RCU grace periods to complete even in the face
of softirq monopolization of one or more of the CPUs? For
example, is your code subject to network-based denial-of-service
attacks? If so, you need RCU-bh.
-e. Is your workload too update-intensive for normal use of
+f. Is your workload too update-intensive for normal use of
RCU, but inappropriate for other synchronization mechanisms?
If so, consider SLAB_DESTROY_BY_RCU. But please be careful!
-f. Otherwise, use RCU.
+g. Otherwise, use RCU.
Of course, this all assumes that you have determined that RCU is in fact
the right tool for your job.
diff --git a/Documentation/atomic_ops.txt b/Documentation/atomic_ops.txt
index 3bd585b44927..27f2b21a9d5c 100644
--- a/Documentation/atomic_ops.txt
+++ b/Documentation/atomic_ops.txt
@@ -84,6 +84,93 @@ compiler optimizes the section accessing atomic_t variables.
*** YOU HAVE BEEN WARNED! ***
+Properly aligned pointers, longs, ints, and chars (and unsigned
+equivalents) may be atomically loaded from and stored to in the same
+sense as described for atomic_read() and atomic_set(). The ACCESS_ONCE()
+macro should be used to prevent the compiler from using optimizations
+that might otherwise optimize accesses out of existence on the one hand,
+or that might create unsolicited accesses on the other.
+
+For example consider the following code:
+
+ while (a > 0)
+ do_something();
+
+If the compiler can prove that do_something() does not store to the
+variable a, then the compiler is within its rights transforming this to
+the following:
+
+ tmp = a;
+ if (a > 0)
+ for (;;)
+ do_something();
+
+If you don't want the compiler to do this (and you probably don't), then
+you should use something like the following:
+
+ while (ACCESS_ONCE(a) < 0)
+ do_something();
+
+Alternatively, you could place a barrier() call in the loop.
+
+For another example, consider the following code:
+
+ tmp_a = a;
+ do_something_with(tmp_a);
+ do_something_else_with(tmp_a);
+
+If the compiler can prove that do_something_with() does not store to the
+variable a, then the compiler is within its rights to manufacture an
+additional load as follows:
+
+ tmp_a = a;
+ do_something_with(tmp_a);
+ tmp_a = a;
+ do_something_else_with(tmp_a);
+
+This could fatally confuse your code if it expected the same value
+to be passed to do_something_with() and do_something_else_with().
+
+The compiler would be likely to manufacture this additional load if
+do_something_with() was an inline function that made very heavy use
+of registers: reloading from variable a could save a flush to the
+stack and later reload. To prevent the compiler from attacking your
+code in this manner, write the following:
+
+ tmp_a = ACCESS_ONCE(a);
+ do_something_with(tmp_a);
+ do_something_else_with(tmp_a);
+
+For a final example, consider the following code, assuming that the
+variable a is set at boot time before the second CPU is brought online
+and never changed later, so that memory barriers are not needed:
+
+ if (a)
+ b = 9;
+ else
+ b = 42;
+
+The compiler is within its rights to manufacture an additional store
+by transforming the above code into the following:
+
+ b = 42;
+ if (a)
+ b = 9;
+
+This could come as a fatal surprise to other code running concurrently
+that expected b to never have the value 42 if a was zero. To prevent
+the compiler from doing this, write something like:
+
+ if (a)
+ ACCESS_ONCE(b) = 9;
+ else
+ ACCESS_ONCE(b) = 42;
+
+Don't even -think- about doing this without proper use of memory barriers,
+locks, or atomic operations if variable a can change at runtime!
+
+*** WARNING: ACCESS_ONCE() DOES NOT IMPLY A BARRIER! ***
+
Now, we move onto the atomic operation interfaces typically implemented with
the help of assembly code.
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 81c287fad79d..0293fc8daca3 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1885,6 +1885,11 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
arch_perfmon: [X86] Force use of architectural
perfmon on Intel CPUs instead of the
CPU specific event set.
+ timer: [X86] Force use of architectural NMI
+ timer mode (see also oprofile.timer
+ for generic hr timer mode)
+ [s390] Force legacy basic mode sampling
+ (report cpu_type "timer")
oops=panic Always panic on oopses. Default is to just kill the
process, but there is a small probability of
diff --git a/Documentation/lockdep-design.txt b/Documentation/lockdep-design.txt
index abf768c681e2..5dbc99c04f6e 100644
--- a/Documentation/lockdep-design.txt
+++ b/Documentation/lockdep-design.txt
@@ -221,3 +221,66 @@ when the chain is validated for the first time, is then put into a hash
table, which hash-table can be checked in a lockfree manner. If the
locking chain occurs again later on, the hash table tells us that we
dont have to validate the chain again.
+
+Troubleshooting:
+----------------
+
+The validator tracks a maximum of MAX_LOCKDEP_KEYS number of lock classes.
+Exceeding this number will trigger the following lockdep warning:
+
+ (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS))
+
+By default, MAX_LOCKDEP_KEYS is currently set to 8191, and typical
+desktop systems have less than 1,000 lock classes, so this warning
+normally results from lock-class leakage or failure to properly
+initialize locks. These two problems are illustrated below:
+
+1. Repeated module loading and unloading while running the validator
+ will result in lock-class leakage. The issue here is that each
+ load of the module will create a new set of lock classes for
+ that module's locks, but module unloading does not remove old
+ classes (see below discussion of reuse of lock classes for why).
+ Therefore, if that module is loaded and unloaded repeatedly,
+ the number of lock classes will eventually reach the maximum.
+
+2. Using structures such as arrays that have large numbers of
+ locks that are not explicitly initialized. For example,
+ a hash table with 8192 buckets where each bucket has its own
+ spinlock_t will consume 8192 lock classes -unless- each spinlock
+ is explicitly initialized at runtime, for example, using the
+ run-time spin_lock_init() as opposed to compile-time initializers
+ such as __SPIN_LOCK_UNLOCKED(). Failure to properly initialize
+ the per-bucket spinlocks would guarantee lock-class overflow.
+ In contrast, a loop that called spin_lock_init() on each lock
+ would place all 8192 locks into a single lock class.
+
+ The moral of this story is that you should always explicitly
+ initialize your locks.
+
+One might argue that the validator should be modified to allow
+lock classes to be reused. However, if you are tempted to make this
+argument, first review the code and think through the changes that would
+be required, keeping in mind that the lock classes to be removed are
+likely to be linked into the lock-dependency graph. This turns out to
+be harder to do than to say.
+
+Of course, if you do run out of lock classes, the next thing to do is
+to find the offending lock classes. First, the following command gives
+you the number of lock classes currently in use along with the maximum:
+
+ grep "lock-classes" /proc/lockdep_stats
+
+This command produces the following output on a modest system:
+
+ lock-classes: 748 [max: 8191]
+
+If the number allocated (748 above) increases continually over time,
+then there is likely a leak. The following command can be used to
+identify the leaking lock classes:
+
+ grep "BD" /proc/lockdep
+
+Run the command and save the output, then compare against the output from
+a later run of this command to identify the leakers. This same output
+can also help you find situations where runtime lock initialization has
+been omitted.
diff --git a/Documentation/trace/events.txt b/Documentation/trace/events.txt
index b510564aac7e..bb24c2a0e870 100644
--- a/Documentation/trace/events.txt
+++ b/Documentation/trace/events.txt
@@ -191,8 +191,6 @@ And for string fields they are:
Currently, only exact string matches are supported.
-Currently, the maximum number of predicates in a filter is 16.
-
5.2 Setting filters
-------------------
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index 7945b0bd35e2..e2a4b5287361 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -1100,6 +1100,15 @@ emulate them efficiently. The fields in each entry are defined as follows:
eax, ebx, ecx, edx: the values returned by the cpuid instruction for
this function/index combination
+The TSC deadline timer feature (CPUID leaf 1, ecx[24]) is always returned
+as false, since the feature depends on KVM_CREATE_IRQCHIP for local APIC
+support. Instead it is reported via
+
+ ioctl(KVM_CHECK_EXTENSION, KVM_CAP_TSC_DEADLINE_TIMER)
+
+if that returns true and you use KVM_CREATE_IRQCHIP, or if you emulate the
+feature in userspace, then you can enable the feature for KVM_SET_CPUID2.
+
4.47 KVM_PPC_GET_PVINFO
Capability: KVM_CAP_PPC_GET_PVINFO
@@ -1151,6 +1160,13 @@ following flags are specified:
/* Depends on KVM_CAP_IOMMU */
#define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0)
+The KVM_DEV_ASSIGN_ENABLE_IOMMU flag is a mandatory option to ensure
+isolation of the device. Usages not specifying this flag are deprecated.
+
+Only PCI header type 0 devices with PCI BAR resources are supported by
+device assignment. The user requesting this ioctl must have read/write
+access to the PCI sysfs resource files associated with the device.
+
4.49 KVM_DEASSIGN_PCI_DEVICE
Capability: KVM_CAP_DEVICE_DEASSIGNMENT
diff --git a/MAINTAINERS b/MAINTAINERS
index b9db108f01c8..62f1cd357ddf 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1698,11 +1698,9 @@ F: arch/x86/include/asm/tce.h
CAN NETWORK LAYER
M: Oliver Hartkopp <socketcan@hartkopp.net>
-M: Oliver Hartkopp <oliver.hartkopp@volkswagen.de>
-M: Urs Thuermann <urs.thuermann@volkswagen.de>
L: linux-can@vger.kernel.org
-L: netdev@vger.kernel.org
-W: http://developer.berlios.de/projects/socketcan/
+W: http://gitorious.org/linux-can
+T: git git://gitorious.org/linux-can/linux-can-next.git
S: Maintained
F: net/can/
F: include/linux/can.h
@@ -1713,9 +1711,10 @@ F: include/linux/can/gw.h
CAN NETWORK DRIVERS
M: Wolfgang Grandegger <wg@grandegger.com>
+M: Marc Kleine-Budde <mkl@pengutronix.de>
L: linux-can@vger.kernel.org
-L: netdev@vger.kernel.org
-W: http://developer.berlios.de/projects/socketcan/
+W: http://gitorious.org/linux-can
+T: git git://gitorious.org/linux-can/linux-can-next.git
S: Maintained
F: drivers/net/can/
F: include/linux/can/dev.h
@@ -2700,7 +2699,7 @@ FIREWIRE SUBSYSTEM
M: Stefan Richter <stefanr@s5r6.in-berlin.de>
L: linux1394-devel@lists.sourceforge.net
W: http://ieee1394.wiki.kernel.org/
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394-2.6.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394.git
S: Maintained
F: drivers/firewire/
F: include/linux/firewire*.h
@@ -3101,6 +3100,7 @@ F: include/linux/hid*
HIGH-RESOLUTION TIMERS, CLOCKEVENTS, DYNTICKS
M: Thomas Gleixner <tglx@linutronix.de>
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers/core
S: Maintained
F: Documentation/timers/
F: kernel/hrtimer.c
@@ -3610,7 +3610,7 @@ F: net/irda/
IRQ SUBSYSTEM
M: Thomas Gleixner <tglx@linutronix.de>
S: Maintained
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip.git irq/core
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core
F: kernel/irq/
ISAPNP
@@ -4098,7 +4098,7 @@ F: drivers/hwmon/lm90.c
LOCKDEP AND LOCKSTAT
M: Peter Zijlstra <peterz@infradead.org>
M: Ingo Molnar <mingo@redhat.com>
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/peterz/linux-2.6-lockdep.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git core/locking
S: Maintained
F: Documentation/lockdep*.txt
F: Documentation/lockstat.txt
@@ -4280,7 +4280,9 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-2.6.git
S: Maintained
F: Documentation/dvb/
F: Documentation/video4linux/
+F: Documentation/DocBook/media/
F: drivers/media/
+F: drivers/staging/media/
F: include/media/
F: include/linux/dvb/
F: include/linux/videodev*.h
@@ -5086,6 +5088,7 @@ M: Peter Zijlstra <a.p.zijlstra@chello.nl>
M: Paul Mackerras <paulus@samba.org>
M: Ingo Molnar <mingo@elte.hu>
M: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git perf/core
S: Supported
F: kernel/events/*
F: include/linux/perf_event.h
@@ -5165,6 +5168,7 @@ F: drivers/scsi/pm8001/
POSIX CLOCKS and TIMERS
M: Thomas Gleixner <tglx@linutronix.de>
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers/core
S: Supported
F: fs/timerfd.c
F: include/linux/timer*
@@ -5680,6 +5684,7 @@ F: drivers/dma/dw_dmac.c
TIMEKEEPING, NTP
M: John Stultz <johnstul@us.ibm.com>
M: Thomas Gleixner <tglx@linutronix.de>
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers/core
S: Supported
F: include/linux/clocksource.h
F: include/linux/time.h
@@ -5704,6 +5709,7 @@ F: drivers/watchdog/sc1200wdt.c
SCHEDULER
M: Ingo Molnar <mingo@elte.hu>
M: Peter Zijlstra <peterz@infradead.org>
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git sched/core
S: Maintained
F: kernel/sched*
F: include/linux/sched.h
@@ -6631,7 +6637,7 @@ TRACING
M: Steven Rostedt <rostedt@goodmis.org>
M: Frederic Weisbecker <fweisbec@gmail.com>
M: Ingo Molnar <mingo@redhat.com>
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip.git perf/core
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git perf/core
S: Maintained
F: Documentation/trace/ftrace.txt
F: arch/*/*/*/ftrace.h
@@ -7381,7 +7387,7 @@ M: Thomas Gleixner <tglx@linutronix.de>
M: Ingo Molnar <mingo@redhat.com>
M: "H. Peter Anvin" <hpa@zytor.com>
M: x86@kernel.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/core
S: Maintained
F: Documentation/x86/
F: arch/x86/
diff --git a/Makefile b/Makefile
index d1ea73f74c2f..adddd11c3b3b 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 3
PATCHLEVEL = 2
SUBLEVEL = 0
-EXTRAVERSION = -rc5
+EXTRAVERSION =
NAME = Saber-toothed Squirrel
# *DOCUMENTATION*
diff --git a/arch/Kconfig b/arch/Kconfig
index 4b0669cbb3b0..2505740b81d2 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -30,6 +30,10 @@ config OPROFILE_EVENT_MULTIPLEX
config HAVE_OPROFILE
bool
+config OPROFILE_NMI_TIMER
+ def_bool y
+ depends on PERF_EVENTS && HAVE_PERF_EVENTS_NMI
+
config KPROBES
bool "Kprobes"
depends on MODULES
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 776d76b8cb69..b259c7c644e3 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1246,7 +1246,7 @@ config PL310_ERRATA_588369
config ARM_ERRATA_720789
bool "ARM errata: TLBIASIDIS and TLBIMVAIS operations can broadcast a faulty ASID"
- depends on CPU_V7 && SMP
+ depends on CPU_V7
help
This option enables the workaround for the 720789 Cortex-A9 (prior to
r2p0) erratum. A faulty ASID can be sent to the other CPUs for the
@@ -1282,7 +1282,7 @@ config ARM_ERRATA_743622
config ARM_ERRATA_751472
bool "ARM errata: Interrupted ICIALLUIS may prevent completion of broadcasted operation"
- depends on CPU_V7 && SMP
+ depends on CPU_V7
help
This option enables the workaround for the 751472 Cortex-A9 (prior
to r3p0) erratum. An interrupted ICIALLUIS operation may prevent the
diff --git a/arch/arm/common/pl330.c b/arch/arm/common/pl330.c
index f407a6b35d3d..8d8df744f7a5 100644
--- a/arch/arm/common/pl330.c
+++ b/arch/arm/common/pl330.c
@@ -221,17 +221,6 @@
*/
#define MCODE_BUFF_PER_REQ 256
-/*
- * Mark a _pl330_req as free.
- * We do it by writing DMAEND as the first instruction
- * because no valid request is going to have DMAEND as
- * its first instruction to execute.
- */
-#define MARK_FREE(req) do { \
- _emit_END(0, (req)->mc_cpu); \
- (req)->mc_len = 0; \
- } while (0)
-
/* If the _pl330_req is available to the client */
#define IS_FREE(req) (*((u8 *)((req)->mc_cpu)) == CMD_DMAEND)
@@ -301,8 +290,10 @@ struct pl330_thread {
struct pl330_dmac *dmac;
/* Only two at a time */
struct _pl330_req req[2];
- /* Index of the last submitted request */
+ /* Index of the last enqueued request */
unsigned lstenq;
+ /* Index of the last submitted request or -1 if the DMA is stopped */
+ int req_running;
};
enum pl330_dmac_state {
@@ -778,6 +769,22 @@ static inline void _execute_DBGINSN(struct pl330_thread *thrd,
writel(0, regs + DBGCMD);
}
+/*
+ * Mark a _pl330_req as free.
+ * We do it by writing DMAEND as the first instruction
+ * because no valid request is going to have DMAEND as
+ * its first instruction to execute.
+ */
+static void mark_free(struct pl330_thread *thrd, int idx)
+{
+ struct _pl330_req *req = &thrd->req[idx];
+
+ _emit_END(0, req->mc_cpu);
+ req->mc_len = 0;
+
+ thrd->req_running = -1;
+}
+
static inline u32 _state(struct pl330_thread *thrd)
{
void __iomem *regs = thrd->dmac->pinfo->base;
@@ -836,31 +843,6 @@ static inline u32 _state(struct pl330_thread *thrd)
}
}
-/* If the request 'req' of thread 'thrd' is currently active */
-static inline bool _req_active(struct pl330_thread *thrd,
- struct _pl330_req *req)
-{
- void __iomem *regs = thrd->dmac->pinfo->base;
- u32 buf = req->mc_bus, pc = readl(regs + CPC(thrd->id));
-
- if (IS_FREE(req))
- return false;
-
- return (pc >= buf && pc <= buf + req->mc_len) ? true : false;
-}
-
-/* Returns 0 if the thread is inactive, ID of active req + 1 otherwise */
-static inline unsigned _thrd_active(struct pl330_thread *thrd)
-{
- if (_req_active(thrd, &thrd->req[0]))
- return 1; /* First req active */
-
- if (_req_active(thrd, &thrd->req[1]))
- return 2; /* Second req active */
-
- return 0;
-}
-
static void _stop(struct pl330_thread *thrd)
{
void __iomem *regs = thrd->dmac->pinfo->base;
@@ -892,17 +874,22 @@ static bool _trigger(struct pl330_thread *thrd)
struct _arg_GO go;
unsigned ns;
u8 insn[6] = {0, 0, 0, 0, 0, 0};
+ int idx;
/* Return if already ACTIVE */
if (_state(thrd) != PL330_STATE_STOPPED)
return true;
- if (!IS_FREE(&thrd->req[1 - thrd->lstenq]))
- req = &thrd->req[1 - thrd->lstenq];
- else if (!IS_FREE(&thrd->req[thrd->lstenq]))
- req = &thrd->req[thrd->lstenq];
- else
- req = NULL;
+ idx = 1 - thrd->lstenq;
+ if (!IS_FREE(&thrd->req[idx]))
+ req = &thrd->req[idx];
+ else {
+ idx = thrd->lstenq;
+ if (!IS_FREE(&thrd->req[idx]))
+ req = &thrd->req[idx];
+ else
+ req = NULL;
+ }
/* Return if no request */
if (!req || !req->r)
@@ -933,6 +920,8 @@ static bool _trigger(struct pl330_thread *thrd)
/* Only manager can execute GO */
_execute_DBGINSN(thrd, insn, true);
+ thrd->req_running = idx;
+
return true;
}
@@ -1382,8 +1371,8 @@ static void pl330_dotask(unsigned long data)
thrd->req[0].r = NULL;
thrd->req[1].r = NULL;
- MARK_FREE(&thrd->req[0]);
- MARK_FREE(&thrd->req[1]);
+ mark_free(thrd, 0);
+ mark_free(thrd, 1);
/* Clear the reset flag */
pl330->dmac_tbd.reset_chan &= ~(1 << i);
@@ -1461,14 +1450,12 @@ int pl330_update(const struct pl330_info *pi)
thrd = &pl330->channels[id];
- active = _thrd_active(thrd);
- if (!active) /* Aborted */
+ active = thrd->req_running;
+ if (active == -1) /* Aborted */
continue;
- active -= 1;
-
rqdone = &thrd->req[active];
- MARK_FREE(rqdone);
+ mark_free(thrd, active);
/* Get going again ASAP */
_start(thrd);
@@ -1509,7 +1496,7 @@ int pl330_chan_ctrl(void *ch_id, enum pl330_chan_op op)
struct pl330_thread *thrd = ch_id;
struct pl330_dmac *pl330;
unsigned long flags;
- int ret = 0, active;
+ int ret = 0, active = thrd->req_running;
if (!thrd || thrd->free || thrd->dmac->state == DYING)
return -EINVAL;
@@ -1525,28 +1512,24 @@ int pl330_chan_ctrl(void *ch_id, enum pl330_chan_op op)
thrd->req[0].r = NULL;
thrd->req[1].r = NULL;
- MARK_FREE(&thrd->req[0]);
- MARK_FREE(&thrd->req[1]);
+ mark_free(thrd, 0);
+ mark_free(thrd, 1);
break;
case PL330_OP_ABORT:
- active = _thrd_active(thrd);
-
/* Make sure the channel is stopped */
_stop(thrd);
/* ABORT is only for the active req */
- if (!active)
+ if (active == -1)
break;
- active--;
-
thrd->req[active].r = NULL;
- MARK_FREE(&thrd->req[active]);
+ mark_free(thrd, active);
/* Start the next */
case PL330_OP_START:
- if (!_thrd_active(thrd) && !_start(thrd))
+ if ((active == -1) && !_start(thrd))
ret = -EIO;
break;
@@ -1587,14 +1570,13 @@ int pl330_chan_status(void *ch_id, struct pl330_chanstatus *pstatus)
else
pstatus->faulting = false;
- active = _thrd_active(thrd);
+ active = thrd->req_running;
- if (!active) {
+ if (active == -1) {
/* Indicate that the thread is not running */
pstatus->top_req = NULL;
pstatus->wait_req = NULL;
} else {
- active--;
pstatus->top_req = thrd->req[active].r;
pstatus->wait_req = !IS_FREE(&thrd->req[1 - active])
? thrd->req[1 - active].r : NULL;
@@ -1659,9 +1641,9 @@ void *pl330_request_channel(const struct pl330_info *pi)
thrd->free = false;
thrd->lstenq = 1;
thrd->req[0].r = NULL;
- MARK_FREE(&thrd->req[0]);
+ mark_free(thrd, 0);
thrd->req[1].r = NULL;
- MARK_FREE(&thrd->req[1]);
+ mark_free(thrd, 1);
break;
}
}
@@ -1767,14 +1749,14 @@ static inline void _reset_thread(struct pl330_thread *thrd)
thrd->req[0].mc_bus = pl330->mcode_bus
+ (thrd->id * pi->mcbufsz);
thrd->req[0].r = NULL;
- MARK_FREE(&thrd->req[0]);
+ mark_free(thrd, 0);
thrd->req[1].mc_cpu = thrd->req[0].mc_cpu
+ pi->mcbufsz / 2;
thrd->req[1].mc_bus = thrd->req[0].mc_bus
+ pi->mcbufsz / 2;
thrd->req[1].r = NULL;
- MARK_FREE(&thrd->req[1]);
+ mark_free(thrd, 1);
}
static int dmac_alloc_threads(struct pl330_dmac *pl330)
diff --git a/arch/arm/configs/imx_v4_v5_defconfig b/arch/arm/configs/imx_v4_v5_defconfig
index 11a4192197c8..cf497ce41dfe 100644
--- a/arch/arm/configs/imx_v4_v5_defconfig
+++ b/arch/arm/configs/imx_v4_v5_defconfig
@@ -18,9 +18,10 @@ CONFIG_ARCH_MXC=y
CONFIG_ARCH_IMX_V4_V5=y
CONFIG_ARCH_MX1ADS=y
CONFIG_MACH_SCB9328=y
+CONFIG_MACH_APF9328=y
CONFIG_MACH_MX21ADS=y
CONFIG_MACH_MX25_3DS=y
-CONFIG_MACH_EUKREA_CPUIMX25=y
+CONFIG_MACH_EUKREA_CPUIMX25SD=y
CONFIG_MACH_MX27ADS=y
CONFIG_MACH_PCM038=y
CONFIG_MACH_CPUIMX27=y
@@ -72,17 +73,16 @@ CONFIG_MTD_CFI_GEOMETRY=y
CONFIG_MTD_CFI_INTELEXT=y
CONFIG_MTD_PHYSMAP=y
CONFIG_MTD_NAND=y
+CONFIG_MTD_NAND_MXC=y
CONFIG_MTD_UBI=y
CONFIG_MISC_DEVICES=y
CONFIG_EEPROM_AT24=y
CONFIG_EEPROM_AT25=y
CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
-CONFIG_SMC91X=y
CONFIG_DM9000=y
+CONFIG_SMC91X=y
CONFIG_SMC911X=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
+CONFIG_SMSC_PHY=y
# CONFIG_INPUT_MOUSEDEV is not set
CONFIG_INPUT_EVDEV=y
# CONFIG_INPUT_KEYBOARD is not set
@@ -100,6 +100,7 @@ CONFIG_I2C_CHARDEV=y
CONFIG_I2C_IMX=y
CONFIG_SPI=y
CONFIG_SPI_IMX=y
+CONFIG_SPI_SPIDEV=y
CONFIG_W1=y
CONFIG_W1_MASTER_MXC=y
CONFIG_W1_SLAVE_THERM=y
@@ -139,6 +140,7 @@ CONFIG_MMC=y
CONFIG_MMC_MXC=y
CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_GPIO=y
CONFIG_LEDS_MC13783=y
CONFIG_LEDS_TRIGGERS=y
CONFIG_LEDS_TRIGGER_TIMER=y
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 3d0c6fb74ae4..e8e8fe505df1 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -183,7 +183,8 @@ void cpu_idle(void)
/* endless idle loop with no priority at all */
while (1) {
- tick_nohz_stop_sched_tick(1);
+ tick_nohz_idle_enter();
+ rcu_idle_enter();
leds_event(led_idle_start);
while (!need_resched()) {
#ifdef CONFIG_HOTPLUG_CPU
@@ -213,7 +214,8 @@ void cpu_idle(void)
}
}
leds_event(led_idle_end);
- tick_nohz_restart_sched_tick();
+ rcu_idle_exit();
+ tick_nohz_idle_exit();
preempt_enable_no_resched();
schedule();
preempt_disable();
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 8fc2c8fcbdc6..c0b59bff6be6 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -52,6 +52,7 @@
#include <asm/mach/time.h>
#include <asm/traps.h>
#include <asm/unwind.h>
+#include <asm/memblock.h>
#if defined(CONFIG_DEPRECATED_PARAM_STRUCT)
#include "compat.h"
diff --git a/arch/arm/kernel/unwind.c b/arch/arm/kernel/unwind.c
index 3f03fe0c3269..00df012c4678 100644
--- a/arch/arm/kernel/unwind.c
+++ b/arch/arm/kernel/unwind.c
@@ -160,12 +160,12 @@ static const struct unwind_idx *unwind_find_origin(
const struct unwind_idx *start, const struct unwind_idx *stop)
{
pr_debug("%s(%p, %p)\n", __func__, start, stop);
- while (start < stop - 1) {
+ while (start < stop) {
const struct unwind_idx *mid = start + ((stop - start) >> 1);
if (mid->addr_offset >= 0x40000000)
/* negative offset */
- start = mid;
+ start = mid + 1;
else
/* positive offset */
stop = mid;
diff --git a/arch/arm/mach-exynos/cpu.c b/arch/arm/mach-exynos/cpu.c
index 90ec247f3b37..cc8d4bd6d0f7 100644
--- a/arch/arm/mach-exynos/cpu.c
+++ b/arch/arm/mach-exynos/cpu.c
@@ -111,11 +111,6 @@ static struct map_desc exynos4_iodesc[] __initdata = {
.length = SZ_4K,
.type = MT_DEVICE,
}, {
- .virtual = (unsigned long)S5P_VA_SROMC,
- .pfn = __phys_to_pfn(EXYNOS4_PA_SROMC),
- .length = SZ_4K,
- .type = MT_DEVICE,
- }, {
.virtual = (unsigned long)S3C_VA_USB_HSPHY,
.pfn = __phys_to_pfn(EXYNOS4_PA_HSPHY),
.length = SZ_4K,
diff --git a/arch/arm/mach-exynos/mct.c b/arch/arm/mach-exynos/mct.c
index 97343df8f132..85b5527d0918 100644
--- a/arch/arm/mach-exynos/mct.c
+++ b/arch/arm/mach-exynos/mct.c
@@ -44,8 +44,6 @@ struct mct_clock_event_device {
char name[10];
};
-static DEFINE_PER_CPU(struct mct_clock_event_device, percpu_mct_tick);
-
static void exynos4_mct_write(unsigned int value, void *addr)
{
void __iomem *stat_addr;
@@ -264,6 +262,9 @@ static void exynos4_clockevent_init(void)
}
#ifdef CONFIG_LOCAL_TIMERS
+
+static DEFINE_PER_CPU(struct mct_clock_event_device, percpu_mct_tick);
+
/* Clock event handling */
static void exynos4_mct_tick_stop(struct mct_clock_event_device *mevt)
{
@@ -428,9 +429,13 @@ int __cpuinit local_timer_setup(struct clock_event_device *evt)
void local_timer_stop(struct clock_event_device *evt)
{
+ unsigned int cpu = smp_processor_id();
evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt);
if (mct_int_type == MCT_INT_SPI)
- disable_irq(evt->irq);
+ if (cpu == 0)
+ remove_irq(evt->irq, &mct_tick0_event_irq);
+ else
+ remove_irq(evt->irq, &mct_tick1_event_irq);
else
disable_percpu_irq(IRQ_MCT_LOCALTIMER);
}
@@ -443,6 +448,7 @@ static void __init exynos4_timer_resources(void)
clk_rate = clk_get_rate(mct_clk);
+#ifdef CONFIG_LOCAL_TIMERS
if (mct_int_type == MCT_INT_PPI) {
int err;
@@ -452,6 +458,7 @@ static void __init exynos4_timer_resources(void)
WARN(err, "MCT: can't request IRQ %d (%d)\n",
IRQ_MCT_LOCALTIMER, err);
}
+#endif /* CONFIG_LOCAL_TIMERS */
}
static void __init exynos4_timer_init(void)
diff --git a/arch/arm/mach-imx/Kconfig b/arch/arm/mach-imx/Kconfig
index c44aa974e79c..0e6f1af260b6 100644
--- a/arch/arm/mach-imx/Kconfig
+++ b/arch/arm/mach-imx/Kconfig
@@ -132,7 +132,7 @@ config MACH_MX25_3DS
select IMX_HAVE_PLATFORM_MXC_NAND
select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
-config MACH_EUKREA_CPUIMX25
+config MACH_EUKREA_CPUIMX25SD
bool "Support Eukrea CPUIMX25 Platform"
select SOC_IMX25
select IMX_HAVE_PLATFORM_FLEXCAN
@@ -148,7 +148,7 @@ config MACH_EUKREA_CPUIMX25
choice
prompt "Baseboard"
- depends on MACH_EUKREA_CPUIMX25
+ depends on MACH_EUKREA_CPUIMX25SD
default MACH_EUKREA_MBIMXSD25_BASEBOARD
config MACH_EUKREA_MBIMXSD25_BASEBOARD
@@ -542,7 +542,7 @@ config MACH_MX35_3DS
Include support for MX35PDK platform. This includes specific
configurations for the board and its peripherals.
-config MACH_EUKREA_CPUIMX35
+config MACH_EUKREA_CPUIMX35SD
bool "Support Eukrea CPUIMX35 Platform"
select SOC_IMX35
select IMX_HAVE_PLATFORM_FLEXCAN
@@ -560,7 +560,7 @@ config MACH_EUKREA_CPUIMX35
choice
prompt "Baseboard"
- depends on MACH_EUKREA_CPUIMX35
+ depends on MACH_EUKREA_CPUIMX35SD
default MACH_EUKREA_MBIMXSD35_BASEBOARD
config MACH_EUKREA_MBIMXSD35_BASEBOARD
diff --git a/arch/arm/mach-imx/Makefile b/arch/arm/mach-imx/Makefile
index aba73214c2a8..d97f409ce98b 100644
--- a/arch/arm/mach-imx/Makefile
+++ b/arch/arm/mach-imx/Makefile
@@ -24,7 +24,7 @@ obj-$(CONFIG_MACH_MX21ADS) += mach-mx21ads.o
# i.MX25 based machines
obj-$(CONFIG_MACH_MX25_3DS) += mach-mx25_3ds.o
-obj-$(CONFIG_MACH_EUKREA_CPUIMX25) += mach-eukrea_cpuimx25.o
+obj-$(CONFIG_MACH_EUKREA_CPUIMX25SD) += mach-eukrea_cpuimx25.o
obj-$(CONFIG_MACH_EUKREA_MBIMXSD25_BASEBOARD) += eukrea_mbimxsd25-baseboard.o
# i.MX27 based machines
@@ -57,7 +57,7 @@ obj-$(CONFIG_MACH_BUG) += mach-bug.o
# i.MX35 based machines
obj-$(CONFIG_MACH_PCM043) += mach-pcm043.o
obj-$(CONFIG_MACH_MX35_3DS) += mach-mx35_3ds.o
-obj-$(CONFIG_MACH_EUKREA_CPUIMX35) += mach-cpuimx35.o
+obj-$(CONFIG_MACH_EUKREA_CPUIMX35SD) += mach-cpuimx35.o
obj-$(CONFIG_MACH_EUKREA_MBIMXSD35_BASEBOARD) += eukrea_mbimxsd35-baseboard.o
obj-$(CONFIG_MACH_VPR200) += mach-vpr200.o
diff --git a/arch/arm/mach-imx/clock-imx35.c b/arch/arm/mach-imx/clock-imx35.c
index 8116f119517d..ac8238caecb9 100644
--- a/arch/arm/mach-imx/clock-imx35.c
+++ b/arch/arm/mach-imx/clock-imx35.c
@@ -507,7 +507,7 @@ static struct clk_lookup lookups[] = {
int __init mx35_clocks_init()
{
- unsigned int cgr2 = 3 << 26, cgr3 = 0;
+ unsigned int cgr2 = 3 << 26;
#if defined(CONFIG_DEBUG_LL) && !defined(CONFIG_DEBUG_ICEDCC)
cgr2 |= 3 << 16;
@@ -521,6 +521,12 @@ int __init mx35_clocks_init()
__raw_writel((3 << 18), CCM_BASE + CCM_CGR0);
__raw_writel((3 << 2) | (3 << 4) | (3 << 6) | (3 << 8) | (3 << 16),
CCM_BASE + CCM_CGR1);
+ __raw_writel(cgr2, CCM_BASE + CCM_CGR2);
+ __raw_writel(0, CCM_BASE + CCM_CGR3);
+
+ clk_enable(&iim_clk);
+ imx_print_silicon_rev("i.MX35", mx35_revision());
+ clk_disable(&iim_clk);
/*
* Check if we came up in internal boot mode. If yes, we need some
@@ -529,17 +535,11 @@ int __init mx35_clocks_init()
*/
if (!(__raw_readl(CCM_BASE + CCM_RCSR) & (3 << 10))) {
/* Additionally turn on UART1, SCC, and IIM clocks */
- cgr2 |= 3 << 16 | 3 << 4;
- cgr3 |= 3 << 2;
+ clk_enable(&iim_clk);
+ clk_enable(&uart1_clk);
+ clk_enable(&scc_clk);
}
- __raw_writel(cgr2, CCM_BASE + CCM_CGR2);
- __raw_writel(cgr3, CCM_BASE + CCM_CGR3);
-
- clk_enable(&iim_clk);
- imx_print_silicon_rev("i.MX35", mx35_revision());
- clk_disable(&iim_clk);
-
#ifdef CONFIG_MXC_USE_EPIT
epit_timer_init(&epit1_clk,
MX35_IO_ADDRESS(MX35_EPIT1_BASE_ADDR), MX35_INT_EPIT1);
diff --git a/arch/arm/mach-imx/mach-cpuimx35.c b/arch/arm/mach-imx/mach-cpuimx35.c
index 66af2e8f7e57..362aae780601 100644
--- a/arch/arm/mach-imx/mach-cpuimx35.c
+++ b/arch/arm/mach-imx/mach-cpuimx35.c
@@ -53,12 +53,18 @@ static const struct imxi2c_platform_data
.bitrate = 100000,
};
+#define TSC2007_IRQGPIO IMX_GPIO_NR(3, 2)
+static int tsc2007_get_pendown_state(void)
+{
+ return !gpio_get_value(TSC2007_IRQGPIO);
+}
+
static struct tsc2007_platform_data tsc2007_info = {
.model = 2007,
.x_plate_ohms = 180,
+ .get_pendown_state = tsc2007_get_pendown_state,
};
-#define TSC2007_IRQGPIO IMX_GPIO_NR(3, 2)
static struct i2c_board_info eukrea_cpuimx35_i2c_devices[] = {
{
I2C_BOARD_INFO("pcf8563", 0x51),
diff --git a/arch/arm/mach-mx5/board-mx51_babbage.c b/arch/arm/mach-mx5/board-mx51_babbage.c
index 5c837603ff0f..24994bb52147 100644
--- a/arch/arm/mach-mx5/board-mx51_babbage.c
+++ b/arch/arm/mach-mx5/board-mx51_babbage.c
@@ -362,7 +362,7 @@ static void __init mx51_babbage_init(void)
{
iomux_v3_cfg_t usbh1stp = MX51_PAD_USBH1_STP__USBH1_STP;
iomux_v3_cfg_t power_key = NEW_PAD_CTRL(MX51_PAD_EIM_A27__GPIO2_21,
- PAD_CTL_SRE_FAST | PAD_CTL_DSE_HIGH | PAD_CTL_PUS_100K_UP);
+ PAD_CTL_SRE_FAST | PAD_CTL_DSE_HIGH);
imx51_soc_init();
diff --git a/arch/arm/mach-mx5/board-mx53_evk.c b/arch/arm/mach-mx5/board-mx53_evk.c
index 6bea31ab8f85..64bbfcea6f35 100644
--- a/arch/arm/mach-mx5/board-mx53_evk.c
+++ b/arch/arm/mach-mx5/board-mx53_evk.c
@@ -106,7 +106,7 @@ static inline void mx53_evk_fec_reset(void)
gpio_set_value(MX53_EVK_FEC_PHY_RST, 1);
}
-static struct fec_platform_data mx53_evk_fec_pdata = {
+static const struct fec_platform_data mx53_evk_fec_pdata __initconst = {
.phy = PHY_INTERFACE_MODE_RMII,
};
diff --git a/arch/arm/mach-mx5/board-mx53_loco.c b/arch/arm/mach-mx5/board-mx53_loco.c
index 7678f7734db6..237bdecd9331 100644
--- a/arch/arm/mach-mx5/board-mx53_loco.c
+++ b/arch/arm/mach-mx5/board-mx53_loco.c
@@ -242,7 +242,7 @@ static inline void mx53_loco_fec_reset(void)
gpio_set_value(LOCO_FEC_PHY_RST, 1);
}
-static struct fec_platform_data mx53_loco_fec_data = {
+static const struct fec_platform_data mx53_loco_fec_data __initconst = {
.phy = PHY_INTERFACE_MODE_RMII,
};
diff --git a/arch/arm/mach-mx5/board-mx53_smd.c b/arch/arm/mach-mx5/board-mx53_smd.c
index 59c0845eb4a6..d42132a80e8f 100644
--- a/arch/arm/mach-mx5/board-mx53_smd.c
+++ b/arch/arm/mach-mx5/board-mx53_smd.c
@@ -104,7 +104,7 @@ static inline void mx53_smd_fec_reset(void)
gpio_set_value(SMD_FEC_PHY_RST, 1);
}
-static struct fec_platform_data mx53_smd_fec_data = {
+static const struct fec_platform_data mx53_smd_fec_data __initconst = {
.phy = PHY_INTERFACE_MODE_RMII,
};
diff --git a/arch/arm/mach-omap2/board-rx51-peripherals.c b/arch/arm/mach-omap2/board-rx51-peripherals.c
index ba1aa07bdb29..c15c5c9c9085 100644
--- a/arch/arm/mach-omap2/board-rx51-peripherals.c
+++ b/arch/arm/mach-omap2/board-rx51-peripherals.c
@@ -193,7 +193,7 @@ static struct platform_device rx51_charger_device = {
static void __init rx51_charger_init(void)
{
WARN_ON(gpio_request_one(RX51_USB_TRANSCEIVER_RST_GPIO,
- GPIOF_OUT_INIT_LOW, "isp1704_reset"));
+ GPIOF_OUT_INIT_HIGH, "isp1704_reset"));
platform_device_register(&rx51_charger_device);
}
diff --git a/arch/arm/mach-omap2/mcbsp.c b/arch/arm/mach-omap2/mcbsp.c
index 292eee3be15f..28fcb27005d2 100644
--- a/arch/arm/mach-omap2/mcbsp.c
+++ b/arch/arm/mach-omap2/mcbsp.c
@@ -145,6 +145,9 @@ static int omap_init_mcbsp(struct omap_hwmod *oh, void *unused)
pdata->reg_size = 4;
pdata->has_ccr = true;
}
+ pdata->set_clk_src = omap2_mcbsp_set_clk_src;
+ if (id == 1)
+ pdata->mux_signal = omap2_mcbsp1_mux_rx_clk;
if (oh->class->rev == MCBSP_CONFIG_TYPE3) {
if (id == 2)
@@ -174,9 +177,6 @@ static int omap_init_mcbsp(struct omap_hwmod *oh, void *unused)
name, oh->name);
return PTR_ERR(pdev);
}
- pdata->set_clk_src = omap2_mcbsp_set_clk_src;
- if (id == 1)
- pdata->mux_signal = omap2_mcbsp1_mux_rx_clk;
omap_mcbsp_count++;
return 0;
}
diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
index 7f8915ad5099..eef43e2e163e 100644
--- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
@@ -3247,18 +3247,14 @@ static __initdata struct omap_hwmod *omap3xxx_hwmods[] = {
/* 3430ES1-only hwmods */
static __initdata struct omap_hwmod *omap3430es1_hwmods[] = {
- &omap3xxx_iva_hwmod,
&omap3430es1_dss_core_hwmod,
- &omap3xxx_mailbox_hwmod,
NULL
};
/* 3430ES2+-only hwmods */
static __initdata struct omap_hwmod *omap3430es2plus_hwmods[] = {
- &omap3xxx_iva_hwmod,
&omap3xxx_dss_core_hwmod,
&omap3xxx_usbhsotg_hwmod,
- &omap3xxx_mailbox_hwmod,
NULL
};
diff --git a/arch/arm/mach-s5pv210/mach-smdkv210.c b/arch/arm/mach-s5pv210/mach-smdkv210.c
index a9106c392398..8662ef6e5681 100644
--- a/arch/arm/mach-s5pv210/mach-smdkv210.c
+++ b/arch/arm/mach-s5pv210/mach-smdkv210.c
@@ -273,6 +273,7 @@ static struct samsung_bl_gpio_info smdkv210_bl_gpio_info = {
static struct platform_pwm_backlight_data smdkv210_bl_data = {
.pwm_id = 3,
+ .pwm_period_ns = 1000,
};
static void __init smdkv210_map_io(void)
diff --git a/arch/arm/mach-shmobile/board-ag5evm.c b/arch/arm/mach-shmobile/board-ag5evm.c
index b862e9f81e3e..7119b87cbfa0 100644
--- a/arch/arm/mach-shmobile/board-ag5evm.c
+++ b/arch/arm/mach-shmobile/board-ag5evm.c
@@ -607,6 +607,7 @@ struct sys_timer ag5evm_timer = {
MACHINE_START(AG5EVM, "ag5evm")
.map_io = ag5evm_map_io,
+ .nr_irqs = NR_IRQS_LEGACY,
.init_irq = sh73a0_init_irq,
.handle_irq = shmobile_handle_irq_gic,
.init_machine = ag5evm_init,
diff --git a/arch/arm/mach-shmobile/board-kota2.c b/arch/arm/mach-shmobile/board-kota2.c
index bd9a78424d6b..f44150b5ae46 100644
--- a/arch/arm/mach-shmobile/board-kota2.c
+++ b/arch/arm/mach-shmobile/board-kota2.c
@@ -33,6 +33,7 @@
#include <linux/input/sh_keysc.h>
#include <linux/gpio_keys.h>
#include <linux/leds.h>
+#include <linux/platform_data/leds-renesas-tpu.h>
#include <linux/mmc/host.h>
#include <linux/mmc/sh_mmcif.h>
#include <linux/mfd/tmio.h>
@@ -56,7 +57,7 @@ static struct resource smsc9220_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = gic_spi(33), /* PINTA2 @ PORT144 */
+ .start = SH73A0_PINT0_IRQ(2), /* PINTA2 */
.flags = IORESOURCE_IRQ,
},
};
@@ -157,10 +158,6 @@ static struct platform_device gpio_keys_device = {
#define GPIO_LED(n, g) { .name = n, .gpio = g }
static struct gpio_led gpio_leds[] = {
- GPIO_LED("V2513", GPIO_PORT153), /* PORT153 [TPU1T02] -> V2513 */
- GPIO_LED("V2514", GPIO_PORT199), /* PORT199 [TPU4TO1] -> V2514 */
- GPIO_LED("V2515", GPIO_PORT197), /* PORT197 [TPU2TO1] -> V2515 */
- GPIO_LED("KEYLED", GPIO_PORT163), /* PORT163 [TPU3TO0] -> KEYLED */
GPIO_LED("G", GPIO_PORT20), /* PORT20 [GPO0] -> LED7 -> "G" */
GPIO_LED("H", GPIO_PORT21), /* PORT21 [GPO1] -> LED8 -> "H" */
GPIO_LED("J", GPIO_PORT22), /* PORT22 [GPO2] -> LED9 -> "J" */
@@ -179,6 +176,119 @@ static struct platform_device gpio_leds_device = {
},
};
+/* TPU LED */
+static struct led_renesas_tpu_config led_renesas_tpu12_pdata = {
+ .name = "V2513",
+ .pin_gpio_fn = GPIO_FN_TPU1TO2,
+ .pin_gpio = GPIO_PORT153,
+ .channel_offset = 0x90,
+ .timer_bit = 2,
+ .max_brightness = 1000,
+};
+
+static struct resource tpu12_resources[] = {
+ [0] = {
+ .name = "TPU12",
+ .start = 0xe6610090,
+ .end = 0xe66100b5,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct platform_device leds_tpu12_device = {
+ .name = "leds-renesas-tpu",
+ .id = 12,
+ .dev = {
+ .platform_data = &led_renesas_tpu12_pdata,
+ },
+ .num_resources = ARRAY_SIZE(tpu12_resources),
+ .resource = tpu12_resources,
+};
+
+static struct led_renesas_tpu_config led_renesas_tpu41_pdata = {
+ .name = "V2514",
+ .pin_gpio_fn = GPIO_FN_TPU4TO1,
+ .pin_gpio = GPIO_PORT199,
+ .channel_offset = 0x50,
+ .timer_bit = 1,
+ .max_brightness = 1000,
+};
+
+static struct resource tpu41_resources[] = {
+ [0] = {
+ .name = "TPU41",
+ .start = 0xe6640050,
+ .end = 0xe6640075,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct platform_device leds_tpu41_device = {
+ .name = "leds-renesas-tpu",
+ .id = 41,
+ .dev = {
+ .platform_data = &led_renesas_tpu41_pdata,
+ },
+ .num_resources = ARRAY_SIZE(tpu41_resources),
+ .resource = tpu41_resources,
+};
+
+static struct led_renesas_tpu_config led_renesas_tpu21_pdata = {
+ .name = "V2515",
+ .pin_gpio_fn = GPIO_FN_TPU2TO1,
+ .pin_gpio = GPIO_PORT197,
+ .channel_offset = 0x50,
+ .timer_bit = 1,
+ .max_brightness = 1000,
+};
+
+static struct resource tpu21_resources[] = {
+ [0] = {
+ .name = "TPU21",
+ .start = 0xe6620050,
+ .end = 0xe6620075,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct platform_device leds_tpu21_device = {
+ .name = "leds-renesas-tpu",
+ .id = 21,
+ .dev = {
+ .platform_data = &led_renesas_tpu21_pdata,
+ },
+ .num_resources = ARRAY_SIZE(tpu21_resources),
+ .resource = tpu21_resources,
+};
+
+static struct led_renesas_tpu_config led_renesas_tpu30_pdata = {
+ .name = "KEYLED",
+ .pin_gpio_fn = GPIO_FN_TPU3TO0,
+ .pin_gpio = GPIO_PORT163,
+ .channel_offset = 0x10,
+ .timer_bit = 0,
+ .max_brightness = 1000,
+};
+
+static struct resource tpu30_resources[] = {
+ [0] = {
+ .name = "TPU30",
+ .start = 0xe6630010,
+ .end = 0xe6630035,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct platform_device leds_tpu30_device = {
+ .name = "leds-renesas-tpu",
+ .id = 30,
+ .dev = {
+ .platform_data = &led_renesas_tpu30_pdata,
+ },
+ .num_resources = ARRAY_SIZE(tpu30_resources),
+ .resource = tpu30_resources,
+};
+
/* MMCIF */
static struct resource mmcif_resources[] = {
[0] = {
@@ -291,6 +401,10 @@ static struct platform_device *kota2_devices[] __initdata = {
&keysc_device,
&gpio_keys_device,
&gpio_leds_device,
+ &leds_tpu12_device,
+ &leds_tpu41_device,
+ &leds_tpu21_device,
+ &leds_tpu30_device,
&mmcif_device,
&sdhi0_device,
&sdhi1_device,
@@ -317,18 +431,6 @@ static void __init kota2_map_io(void)
shmobile_setup_console();
}
-#define PINTER0A 0xe69000a0
-#define PINTCR0A 0xe69000b0
-
-void __init kota2_init_irq(void)
-{
- sh73a0_init_irq();
-
- /* setup PINT: enable PINTA2 as active low */
- __raw_writel(1 << 29, PINTER0A);
- __raw_writew(2 << 10, PINTCR0A);
-}
-
static void __init kota2_init(void)
{
sh73a0_pinmux_init();
@@ -447,7 +549,8 @@ struct sys_timer kota2_timer = {
MACHINE_START(KOTA2, "kota2")
.map_io = kota2_map_io,
- .init_irq = kota2_init_irq,
+ .nr_irqs = NR_IRQS_LEGACY,
+ .init_irq = sh73a0_init_irq,
.handle_irq = shmobile_handle_irq_gic,
.init_machine = kota2_init,
.timer = &kota2_timer,
diff --git a/arch/arm/mach-shmobile/clock-sh73a0.c b/arch/arm/mach-shmobile/clock-sh73a0.c
index 61a846bb30f2..1370a89ca358 100644
--- a/arch/arm/mach-shmobile/clock-sh73a0.c
+++ b/arch/arm/mach-shmobile/clock-sh73a0.c
@@ -113,6 +113,12 @@ static struct clk main_clk = {
.ops = &main_clk_ops,
};
+/* Divide Main clock by two */
+static struct clk main_div2_clk = {
+ .ops = &div2_clk_ops,
+ .parent = &main_clk,
+};
+
/* PLL0, PLL1, PLL2, PLL3 */
static unsigned long pll_recalc(struct clk *clk)
{
@@ -181,6 +187,7 @@ static struct clk *main_clks[] = {
&extal1_div2_clk,
&extal2_div2_clk,
&main_clk,
+ &main_div2_clk,
&pll0_clk,
&pll1_clk,
&pll2_clk,
@@ -243,7 +250,7 @@ static struct clk div6_clks[DIV6_NR] = {
[DIV6_VCK1] = SH_CLK_DIV6(&pll1_div2_clk, VCLKCR1, 0),
[DIV6_VCK2] = SH_CLK_DIV6(&pll1_div2_clk, VCLKCR2, 0),
[DIV6_VCK3] = SH_CLK_DIV6(&pll1_div2_clk, VCLKCR3, 0),
- [DIV6_ZB1] = SH_CLK_DIV6(&pll1_div2_clk, ZBCKCR, 0),
+ [DIV6_ZB1] = SH_CLK_DIV6(&pll1_div2_clk, ZBCKCR, CLK_ENABLE_ON_INIT),
[DIV6_FLCTL] = SH_CLK_DIV6(&pll1_div2_clk, FLCKCR, 0),
[DIV6_SDHI0] = SH_CLK_DIV6(&pll1_div2_clk, SD0CKCR, 0),
[DIV6_SDHI1] = SH_CLK_DIV6(&pll1_div2_clk, SD1CKCR, 0),
@@ -268,6 +275,7 @@ enum { MSTP001,
MSTP207, MSTP206, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200,
MSTP331, MSTP329, MSTP325, MSTP323, MSTP318,
MSTP314, MSTP313, MSTP312, MSTP311,
+ MSTP303, MSTP302, MSTP301, MSTP300,
MSTP411, MSTP410, MSTP403,
MSTP_NR };
@@ -301,6 +309,10 @@ static struct clk mstp_clks[MSTP_NR] = {
[MSTP313] = MSTP(&div6_clks[DIV6_SDHI1], SMSTPCR3, 13, 0), /* SDHI1 */
[MSTP312] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 12, 0), /* MMCIF0 */
[MSTP311] = MSTP(&div6_clks[DIV6_SDHI2], SMSTPCR3, 11, 0), /* SDHI2 */
+ [MSTP303] = MSTP(&main_div2_clk, SMSTPCR3, 3, 0), /* TPU1 */
+ [MSTP302] = MSTP(&main_div2_clk, SMSTPCR3, 2, 0), /* TPU2 */
+ [MSTP301] = MSTP(&main_div2_clk, SMSTPCR3, 1, 0), /* TPU3 */
+ [MSTP300] = MSTP(&main_div2_clk, SMSTPCR3, 0, 0), /* TPU4 */
[MSTP411] = MSTP(&div4_clks[DIV4_HP], SMSTPCR4, 11, 0), /* IIC3 */
[MSTP410] = MSTP(&div4_clks[DIV4_HP], SMSTPCR4, 10, 0), /* IIC4 */
[MSTP403] = MSTP(&r_clk, SMSTPCR4, 3, 0), /* KEYSC */
@@ -350,6 +362,10 @@ static struct clk_lookup lookups[] = {
CLKDEV_DEV_ID("sh_mobile_sdhi.1", &mstp_clks[MSTP313]), /* SDHI1 */
CLKDEV_DEV_ID("sh_mmcif.0", &mstp_clks[MSTP312]), /* MMCIF0 */
CLKDEV_DEV_ID("sh_mobile_sdhi.2", &mstp_clks[MSTP311]), /* SDHI2 */
+ CLKDEV_DEV_ID("leds-renesas-tpu.12", &mstp_clks[MSTP303]), /* TPU1 */
+ CLKDEV_DEV_ID("leds-renesas-tpu.21", &mstp_clks[MSTP302]), /* TPU2 */
+ CLKDEV_DEV_ID("leds-renesas-tpu.30", &mstp_clks[MSTP301]), /* TPU3 */
+ CLKDEV_DEV_ID("leds-renesas-tpu.41", &mstp_clks[MSTP300]), /* TPU4 */
CLKDEV_DEV_ID("i2c-sh_mobile.3", &mstp_clks[MSTP411]), /* I2C3 */
CLKDEV_DEV_ID("i2c-sh_mobile.4", &mstp_clks[MSTP410]), /* I2C4 */
CLKDEV_DEV_ID("sh_keysc.0", &mstp_clks[MSTP403]), /* KEYSC */
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index fbdd12ea3a58..7c38474e533a 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -32,6 +32,7 @@
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
+#include <asm/memblock.h>
#include "mm.h"
@@ -332,7 +333,6 @@ void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc)
sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
- memblock_init();
for (i = 0; i < mi->nr_banks; i++)
memblock_add(mi->bank[i].start, mi->bank[i].size);
@@ -371,7 +371,7 @@ void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc)
if (mdesc->reserve)
mdesc->reserve();
- memblock_analyze();
+ memblock_allow_resize();
memblock_dump_all();
}
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 2c559ac38142..e70a73731eaa 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -363,11 +363,13 @@ __v7_setup:
orreq r10, r10, #1 << 6 @ set bit #6
mcreq p15, 0, r10, c15, c0, 1 @ write diagnostic register
#endif
-#ifdef CONFIG_ARM_ERRATA_751472
- cmp r6, #0x30 @ present prior to r3p0
+#if defined(CONFIG_ARM_ERRATA_751472) && defined(CONFIG_SMP)
+ ALT_SMP(cmp r6, #0x30) @ present prior to r3p0
+ ALT_UP_B(1f)
mrclt p15, 0, r10, c15, c0, 1 @ read diagnostic register
orrlt r10, r10, #1 << 11 @ set bit #11
mcrlt p15, 0, r10, c15, c0, 1 @ write diagnostic register
+1:
#endif
3: mov r10, #0
diff --git a/arch/arm/oprofile/common.c b/arch/arm/oprofile/common.c
index c074e66ad224..4e0a371630b3 100644
--- a/arch/arm/oprofile/common.c
+++ b/arch/arm/oprofile/common.c
@@ -116,7 +116,7 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
return oprofile_perf_init(ops);
}
-void __exit oprofile_arch_exit(void)
+void oprofile_arch_exit(void)
{
oprofile_perf_exit();
}
diff --git a/arch/arm/plat-mxc/cpufreq.c b/arch/arm/plat-mxc/cpufreq.c
index 74aac96cda20..73db34bf588a 100644
--- a/arch/arm/plat-mxc/cpufreq.c
+++ b/arch/arm/plat-mxc/cpufreq.c
@@ -17,6 +17,7 @@
* the CPU clock speed on the fly.
*/
+#include <linux/module.h>
#include <linux/cpufreq.h>
#include <linux/clk.h>
#include <linux/err.h>
@@ -97,7 +98,7 @@ static int mxc_set_target(struct cpufreq_policy *policy,
return ret;
}
-static int __init mxc_cpufreq_init(struct cpufreq_policy *policy)
+static int mxc_cpufreq_init(struct cpufreq_policy *policy)
{
int ret;
int i;
diff --git a/arch/arm/plat-mxc/include/mach/uncompress.h b/arch/arm/plat-mxc/include/mach/uncompress.h
index 88fd40452567..477971b00930 100644
--- a/arch/arm/plat-mxc/include/mach/uncompress.h
+++ b/arch/arm/plat-mxc/include/mach/uncompress.h
@@ -98,6 +98,7 @@ static __inline__ void __arch_decomp_setup(unsigned long arch_id)
case MACH_TYPE_PCM043:
case MACH_TYPE_LILLY1131:
case MACH_TYPE_VPR200:
+ case MACH_TYPE_EUKREA_CPUIMX35SD:
uart_base = MX3X_UART1_BASE_ADDR;
break;
case MACH_TYPE_MAGX_ZN5:
diff --git a/arch/arm/plat-mxc/pwm.c b/arch/arm/plat-mxc/pwm.c
index 42d74ea59084..e032717f7d02 100644
--- a/arch/arm/plat-mxc/pwm.c
+++ b/arch/arm/plat-mxc/pwm.c
@@ -32,6 +32,9 @@
#define MX3_PWMSAR 0x0C /* PWM Sample Register */
#define MX3_PWMPR 0x10 /* PWM Period Register */
#define MX3_PWMCR_PRESCALER(x) (((x - 1) & 0xFFF) << 4)
+#define MX3_PWMCR_DOZEEN (1 << 24)
+#define MX3_PWMCR_WAITEN (1 << 23)
+#define MX3_PWMCR_DBGEN (1 << 22)
#define MX3_PWMCR_CLKSRC_IPG_HIGH (2 << 16)
#define MX3_PWMCR_CLKSRC_IPG (1 << 16)
#define MX3_PWMCR_EN (1 << 0)
@@ -74,10 +77,21 @@ int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns)
do_div(c, period_ns);
duty_cycles = c;
+ /*
+ * according to imx pwm RM, the real period value should be
+ * PERIOD value in PWMPR plus 2.
+ */
+ if (period_cycles > 2)
+ period_cycles -= 2;
+ else
+ period_cycles = 0;
+
writel(duty_cycles, pwm->mmio_base + MX3_PWMSAR);
writel(period_cycles, pwm->mmio_base + MX3_PWMPR);
- cr = MX3_PWMCR_PRESCALER(prescale) | MX3_PWMCR_EN;
+ cr = MX3_PWMCR_PRESCALER(prescale) |
+ MX3_PWMCR_DOZEEN | MX3_PWMCR_WAITEN |
+ MX3_PWMCR_DBGEN | MX3_PWMCR_EN;
if (cpu_is_mx25())
cr |= MX3_PWMCR_CLKSRC_IPG;
diff --git a/arch/arm/plat-orion/gpio.c b/arch/arm/plat-orion/gpio.c
index 41ab97ebe4cf..10d160888133 100644
--- a/arch/arm/plat-orion/gpio.c
+++ b/arch/arm/plat-orion/gpio.c
@@ -384,12 +384,16 @@ void __init orion_gpio_init(int gpio_base, int ngpio,
struct orion_gpio_chip *ochip;
struct irq_chip_generic *gc;
struct irq_chip_type *ct;
+ char gc_label[16];
if (orion_gpio_chip_count == ARRAY_SIZE(orion_gpio_chips))
return;
+ snprintf(gc_label, sizeof(gc_label), "orion_gpio%d",
+ orion_gpio_chip_count);
+
ochip = orion_gpio_chips + orion_gpio_chip_count;
- ochip->chip.label = "orion_gpio";
+ ochip->chip.label = kstrdup(gc_label, GFP_KERNEL);
ochip->chip.request = orion_gpio_request;
ochip->chip.direction_input = orion_gpio_direction_input;
ochip->chip.get = orion_gpio_get;
diff --git a/arch/arm/plat-samsung/dev-backlight.c b/arch/arm/plat-samsung/dev-backlight.c
index e657305644cc..a976c023b286 100644
--- a/arch/arm/plat-samsung/dev-backlight.c
+++ b/arch/arm/plat-samsung/dev-backlight.c
@@ -15,7 +15,6 @@
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/pwm_backlight.h>
-#include <linux/slab.h>
#include <plat/devs.h>
#include <plat/gpio-cfg.h>
diff --git a/arch/arm/plat-samsung/include/plat/cpu-freq-core.h b/arch/arm/plat-samsung/include/plat/cpu-freq-core.h
index dac4760c0f0a..95509d8eb140 100644
--- a/arch/arm/plat-samsung/include/plat/cpu-freq-core.h
+++ b/arch/arm/plat-samsung/include/plat/cpu-freq-core.h
@@ -202,14 +202,6 @@ extern int s3c_plltab_register(struct cpufreq_frequency_table *plls,
extern struct s3c_cpufreq_config *s3c_cpufreq_getconfig(void);
extern struct s3c_iotimings *s3c_cpufreq_getiotimings(void);
-extern void s3c2410_iotiming_debugfs(struct seq_file *seq,
- struct s3c_cpufreq_config *cfg,
- union s3c_iobank *iob);
-
-extern void s3c2412_iotiming_debugfs(struct seq_file *seq,
- struct s3c_cpufreq_config *cfg,
- union s3c_iobank *iob);
-
#ifdef CONFIG_CPU_FREQ_S3C24XX_DEBUGFS
#define s3c_cpufreq_debugfs_call(x) x
#else
@@ -226,6 +218,10 @@ extern void s3c2410_cpufreq_setrefresh(struct s3c_cpufreq_config *cfg);
extern void s3c2410_set_fvco(struct s3c_cpufreq_config *cfg);
#ifdef CONFIG_S3C2410_IOTIMING
+extern void s3c2410_iotiming_debugfs(struct seq_file *seq,
+ struct s3c_cpufreq_config *cfg,
+ union s3c_iobank *iob);
+
extern int s3c2410_iotiming_calc(struct s3c_cpufreq_config *cfg,
struct s3c_iotimings *iot);
@@ -235,6 +231,7 @@ extern int s3c2410_iotiming_get(struct s3c_cpufreq_config *cfg,
extern void s3c2410_iotiming_set(struct s3c_cpufreq_config *cfg,
struct s3c_iotimings *iot);
#else
+#define s3c2410_iotiming_debugfs NULL
#define s3c2410_iotiming_calc NULL
#define s3c2410_iotiming_get NULL
#define s3c2410_iotiming_set NULL
@@ -242,8 +239,10 @@ extern void s3c2410_iotiming_set(struct s3c_cpufreq_config *cfg,
/* S3C2412 compatible routines */
-extern int s3c2412_iotiming_get(struct s3c_cpufreq_config *cfg,
- struct s3c_iotimings *timings);
+#ifdef CONFIG_S3C2412_IOTIMING
+extern void s3c2412_iotiming_debugfs(struct seq_file *seq,
+ struct s3c_cpufreq_config *cfg,
+ union s3c_iobank *iob);
extern int s3c2412_iotiming_get(struct s3c_cpufreq_config *cfg,
struct s3c_iotimings *timings);
@@ -253,6 +252,12 @@ extern int s3c2412_iotiming_calc(struct s3c_cpufreq_config *cfg,
extern void s3c2412_iotiming_set(struct s3c_cpufreq_config *cfg,
struct s3c_iotimings *iot);
+#else
+#define s3c2412_iotiming_debugfs NULL
+#define s3c2412_iotiming_calc NULL
+#define s3c2412_iotiming_get NULL
+#define s3c2412_iotiming_set NULL
+#endif /* CONFIG_S3C2412_IOTIMING */
#ifdef CONFIG_CPU_FREQ_S3C24XX_DEBUG
#define s3c_freq_dbg(x...) printk(KERN_INFO x)
diff --git a/arch/avr32/kernel/process.c b/arch/avr32/kernel/process.c
index ef5a2a08fcca..ea3395750324 100644
--- a/arch/avr32/kernel/process.c
+++ b/arch/avr32/kernel/process.c
@@ -34,10 +34,12 @@ void cpu_idle(void)
{
/* endless idle loop with no priority at all */
while (1) {
- tick_nohz_stop_sched_tick(1);
+ tick_nohz_idle_enter();
+ rcu_idle_enter();
while (!need_resched())
cpu_idle_sleep();
- tick_nohz_restart_sched_tick();
+ rcu_idle_exit();
+ tick_nohz_idle_exit();
preempt_enable_no_resched();
schedule();
preempt_disable();
diff --git a/arch/blackfin/kernel/process.c b/arch/blackfin/kernel/process.c
index 6a80a9e9fc4a..8dd0416673cb 100644
--- a/arch/blackfin/kernel/process.c
+++ b/arch/blackfin/kernel/process.c
@@ -88,10 +88,12 @@ void cpu_idle(void)
#endif
if (!idle)
idle = default_idle;
- tick_nohz_stop_sched_tick(1);
+ tick_nohz_idle_enter();
+ rcu_idle_enter();
while (!need_resched())
idle();
- tick_nohz_restart_sched_tick();
+ rcu_idle_exit();
+ tick_nohz_idle_exit();
preempt_enable_no_resched();
schedule();
preempt_disable();
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 27489b6dd533..3b7a7c483785 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -23,6 +23,9 @@ config IA64
select HAVE_ARCH_TRACEHOOK
select HAVE_DMA_API_DEBUG
select HAVE_GENERIC_HARDIRQS
+ select HAVE_MEMBLOCK
+ select HAVE_MEMBLOCK_NODE_MAP
+ select ARCH_DISCARD_MEMBLOCK
select GENERIC_IRQ_PROBE
select GENERIC_PENDING_IRQ if SMP
select IRQ_PER_CPU
@@ -474,9 +477,6 @@ config NODES_SHIFT
MAX_NUMNODES will be 2^(This value).
If in doubt, use the default.
-config ARCH_POPULATES_NODE_MAP
- def_bool y
-
# VIRTUAL_MEM_MAP and FLAT_NODE_MEM_MAP are functionally equivalent.
# VIRTUAL_MEM_MAP has been retained for historical reasons.
config VIRTUAL_MEM_MAP
diff --git a/arch/ia64/include/asm/cputime.h b/arch/ia64/include/asm/cputime.h
index 461e52f0277f..3deac956d325 100644
--- a/arch/ia64/include/asm/cputime.h
+++ b/arch/ia64/include/asm/cputime.h
@@ -50,6 +50,8 @@ typedef u64 __nocast cputime64_t;
((__force u64)(__ct) / NSEC_PER_USEC)
#define usecs_to_cputime(__usecs) \
(__force cputime_t)((__usecs) * NSEC_PER_USEC)
+#define usecs_to_cputime64(__usecs) \
+ (__force cputime64_t)((__usecs) * NSEC_PER_USEC)
/*
* Convert cputime <-> seconds
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c
index f114a3b14c6a..1516d1dc11fd 100644
--- a/arch/ia64/mm/contig.c
+++ b/arch/ia64/mm/contig.c
@@ -16,6 +16,7 @@
*/
#include <linux/bootmem.h>
#include <linux/efi.h>
+#include <linux/memblock.h>
#include <linux/mm.h>
#include <linux/nmi.h>
#include <linux/swap.h>
@@ -348,7 +349,7 @@ paging_init (void)
printk("Virtual mem_map starts at 0x%p\n", mem_map);
}
#else /* !CONFIG_VIRTUAL_MEM_MAP */
- add_active_range(0, 0, max_low_pfn);
+ memblock_add_node(0, PFN_PHYS(max_low_pfn), 0);
free_area_init_nodes(max_zone_pfns);
#endif /* !CONFIG_VIRTUAL_MEM_MAP */
zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index 00cb0e26c64e..13df239dbed1 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -10,6 +10,7 @@
#include <linux/bootmem.h>
#include <linux/efi.h>
#include <linux/elf.h>
+#include <linux/memblock.h>
#include <linux/mm.h>
#include <linux/mmzone.h>
#include <linux/module.h>
@@ -557,8 +558,7 @@ int __init register_active_ranges(u64 start, u64 len, int nid)
#endif
if (start < end)
- add_active_range(nid, __pa(start) >> PAGE_SHIFT,
- __pa(end) >> PAGE_SHIFT);
+ memblock_add_node(__pa(start), end - start, nid);
return 0;
}
diff --git a/arch/microblaze/include/asm/memblock.h b/arch/microblaze/include/asm/memblock.h
deleted file mode 100644
index 20a8e257c77f..000000000000
--- a/arch/microblaze/include/asm/memblock.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/*
- * Copyright (C) 2008 Michal Simek <monstr@monstr.eu>
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-
-#ifndef _ASM_MICROBLAZE_MEMBLOCK_H
-#define _ASM_MICROBLAZE_MEMBLOCK_H
-
-#endif /* _ASM_MICROBLAZE_MEMBLOCK_H */
-
-
diff --git a/arch/microblaze/kernel/process.c b/arch/microblaze/kernel/process.c
index 95cc295976a7..7dcb5bfffb75 100644
--- a/arch/microblaze/kernel/process.c
+++ b/arch/microblaze/kernel/process.c
@@ -103,10 +103,12 @@ void cpu_idle(void)
if (!idle)
idle = default_idle;
- tick_nohz_stop_sched_tick(1);
+ tick_nohz_idle_enter();
+ rcu_idle_enter();
while (!need_resched())
idle();
- tick_nohz_restart_sched_tick();
+ rcu_idle_exit();
+ tick_nohz_idle_exit();
preempt_enable_no_resched();
schedule();
diff --git a/arch/microblaze/kernel/prom.c b/arch/microblaze/kernel/prom.c
index 977484add216..80d314e81901 100644
--- a/arch/microblaze/kernel/prom.c
+++ b/arch/microblaze/kernel/prom.c
@@ -122,7 +122,6 @@ void __init early_init_devtree(void *params)
of_scan_flat_dt(early_init_dt_scan_chosen, cmd_line);
/* Scan memory nodes and rebuild MEMBLOCKs */
- memblock_init();
of_scan_flat_dt(early_init_dt_scan_root, NULL);
of_scan_flat_dt(early_init_dt_scan_memory, NULL);
@@ -130,7 +129,7 @@ void __init early_init_devtree(void *params)
strlcpy(boot_command_line, cmd_line, COMMAND_LINE_SIZE);
parse_early_param();
- memblock_analyze();
+ memblock_allow_resize();
pr_debug("Phys. mem: %lx\n", (unsigned long) memblock_phys_mem_size());
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index d46f1da18a3c..9c652eb68aaa 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -25,6 +25,9 @@ config MIPS
select GENERIC_IRQ_SHOW
select HAVE_ARCH_JUMP_LABEL
select IRQ_FORCED_THREADING
+ select HAVE_MEMBLOCK
+ select HAVE_MEMBLOCK_NODE_MAP
+ select ARCH_DISCARD_MEMBLOCK
menu "Machine selection"
@@ -2064,9 +2067,6 @@ config ARCH_DISCONTIGMEM_ENABLE
or have huge holes in the physical address space for other reasons.
See <file:Documentation/vm/numa> for more.
-config ARCH_POPULATES_NODE_MAP
- def_bool y
-
config ARCH_SPARSEMEM_ENABLE
bool
select SPARSEMEM_STATIC
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index c47f96e453c0..7955409051c4 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -56,7 +56,8 @@ void __noreturn cpu_idle(void)
/* endless idle loop with no priority at all */
while (1) {
- tick_nohz_stop_sched_tick(1);
+ tick_nohz_idle_enter();
+ rcu_idle_enter();
while (!need_resched() && cpu_online(cpu)) {
#ifdef CONFIG_MIPS_MT_SMTC
extern void smtc_idle_loop_hook(void);
@@ -77,7 +78,8 @@ void __noreturn cpu_idle(void)
system_state == SYSTEM_BOOTING))
play_dead();
#endif
- tick_nohz_restart_sched_tick();
+ rcu_idle_exit();
+ tick_nohz_idle_exit();
preempt_enable_no_resched();
schedule();
preempt_disable();
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index 84af26ab2212..b1cb8f87d7b4 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -14,6 +14,7 @@
#include <linux/ioport.h>
#include <linux/export.h>
#include <linux/screen_info.h>
+#include <linux/memblock.h>
#include <linux/bootmem.h>
#include <linux/initrd.h>
#include <linux/root_dev.h>
@@ -352,7 +353,7 @@ static void __init bootmem_init(void)
continue;
#endif
- add_active_range(0, start, end);
+ memblock_add_node(PFN_PHYS(start), PFN_PHYS(end - start), 0);
}
/*
diff --git a/arch/mips/sgi-ip27/ip27-memory.c b/arch/mips/sgi-ip27/ip27-memory.c
index bc1297109cc5..b105eca3c020 100644
--- a/arch/mips/sgi-ip27/ip27-memory.c
+++ b/arch/mips/sgi-ip27/ip27-memory.c
@@ -12,6 +12,7 @@
*/
#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/memblock.h>
#include <linux/mm.h>
#include <linux/mmzone.h>
#include <linux/module.h>
@@ -381,8 +382,8 @@ static void __init szmem(void)
continue;
}
num_physpages += slot_psize;
- add_active_range(node, slot_getbasepfn(node, slot),
- slot_getbasepfn(node, slot) + slot_psize);
+ memblock_add_node(PFN_PHYS(slot_getbasepfn(node, slot)),
+ PFN_PHYS(slot_psize), node);
}
}
}
diff --git a/arch/openrisc/include/asm/memblock.h b/arch/openrisc/include/asm/memblock.h
deleted file mode 100644
index bbe5a1c788cb..000000000000
--- a/arch/openrisc/include/asm/memblock.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * OpenRISC Linux
- *
- * Linux architectural port borrowing liberally from similar works of
- * others. All original copyrights apply as per the original source
- * declaration.
- *
- * OpenRISC implementation:
- * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
- * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
- * et al.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef __ASM_OPENRISC_MEMBLOCK_H
-#define __ASM_OPENRISC_MEMBLOCK_H
-
-/* empty */
-
-#endif /* __ASM_OPENRISC_MEMBLOCK_H */
diff --git a/arch/openrisc/kernel/idle.c b/arch/openrisc/kernel/idle.c
index d5bc5f813e89..e5fc78877830 100644
--- a/arch/openrisc/kernel/idle.c
+++ b/arch/openrisc/kernel/idle.c
@@ -51,7 +51,8 @@ void cpu_idle(void)
/* endless idle loop with no priority at all */
while (1) {
- tick_nohz_stop_sched_tick(1);
+ tick_nohz_idle_enter();
+ rcu_idle_enter();
while (!need_resched()) {
check_pgt_cache();
@@ -69,7 +70,8 @@ void cpu_idle(void)
set_thread_flag(TIF_POLLING_NRFLAG);
}
- tick_nohz_restart_sched_tick();
+ rcu_idle_exit();
+ tick_nohz_idle_exit();
preempt_enable_no_resched();
schedule();
preempt_disable();
diff --git a/arch/openrisc/kernel/prom.c b/arch/openrisc/kernel/prom.c
index 1bb58ba89afa..3d4478f6c942 100644
--- a/arch/openrisc/kernel/prom.c
+++ b/arch/openrisc/kernel/prom.c
@@ -76,14 +76,13 @@ void __init early_init_devtree(void *params)
of_scan_flat_dt(early_init_dt_scan_chosen, cmd_line);
/* Scan memory nodes and rebuild MEMBLOCKs */
- memblock_init();
of_scan_flat_dt(early_init_dt_scan_root, NULL);
of_scan_flat_dt(early_init_dt_scan_memory, NULL);
/* Save command line for /proc/cmdline and then parse parameters */
strlcpy(boot_command_line, cmd_line, COMMAND_LINE_SIZE);
- memblock_analyze();
+ memblock_allow_resize();
/* We must copy the flattend device tree from init memory to regular
* memory because the device tree references the strings in it
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 951e18f5335b..ead0bc68439d 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -117,6 +117,7 @@ config PPC
select HAVE_KRETPROBES
select HAVE_ARCH_TRACEHOOK
select HAVE_MEMBLOCK
+ select HAVE_MEMBLOCK_NODE_MAP
select HAVE_DMA_ATTRS
select HAVE_DMA_API_DEBUG
select USE_GENERIC_SMP_HELPERS if SMP
@@ -421,9 +422,6 @@ config ARCH_SPARSEMEM_DEFAULT
def_bool y
depends on (SMP && PPC_PSERIES) || PPC_PS3
-config ARCH_POPULATES_NODE_MAP
- def_bool y
-
config SYS_SUPPORTS_HUGETLBFS
bool
diff --git a/arch/powerpc/include/asm/cputime.h b/arch/powerpc/include/asm/cputime.h
index e94935c52019..6ec1c380a4d6 100644
--- a/arch/powerpc/include/asm/cputime.h
+++ b/arch/powerpc/include/asm/cputime.h
@@ -134,6 +134,8 @@ static inline cputime_t usecs_to_cputime(const unsigned long us)
return (__force cputime_t) ct;
}
+#define usecs_to_cputime64(us) usecs_to_cputime(us)
+
/*
* Convert cputime <-> seconds
*/
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index d4df013ad779..69c7377d2071 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -381,39 +381,6 @@ static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
}
#endif
-static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
- unsigned long pte_index)
-{
- unsigned long rb, va_low;
-
- rb = (v & ~0x7fUL) << 16; /* AVA field */
- va_low = pte_index >> 3;
- if (v & HPTE_V_SECONDARY)
- va_low = ~va_low;
- /* xor vsid from AVA */
- if (!(v & HPTE_V_1TB_SEG))
- va_low ^= v >> 12;
- else
- va_low ^= v >> 24;
- va_low &= 0x7ff;
- if (v & HPTE_V_LARGE) {
- rb |= 1; /* L field */
- if (cpu_has_feature(CPU_FTR_ARCH_206) &&
- (r & 0xff000)) {
- /* non-16MB large page, must be 64k */
- /* (masks depend on page size) */
- rb |= 0x1000; /* page encoding in LP field */
- rb |= (va_low & 0x7f) << 16; /* 7b of VA in AVA/LP field */
- rb |= (va_low & 0xfe); /* AVAL field (P7 doesn't seem to care) */
- }
- } else {
- /* 4kB page */
- rb |= (va_low & 0x7ff) << 12; /* remaining 11b of VA */
- }
- rb |= (v >> 54) & 0x300; /* B field */
- return rb;
-}
-
/* Magic register values loaded into r3 and r4 before the 'sc' assembly
* instruction for the OSI hypercalls */
#define OSI_SC_MAGIC_R3 0x113724FA
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h
index e43fe42b9875..d0ac94f98f9e 100644
--- a/arch/powerpc/include/asm/kvm_book3s_64.h
+++ b/arch/powerpc/include/asm/kvm_book3s_64.h
@@ -29,4 +29,37 @@ static inline struct kvmppc_book3s_shadow_vcpu *to_svcpu(struct kvm_vcpu *vcpu)
#define SPAPR_TCE_SHIFT 12
+static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
+ unsigned long pte_index)
+{
+ unsigned long rb, va_low;
+
+ rb = (v & ~0x7fUL) << 16; /* AVA field */
+ va_low = pte_index >> 3;
+ if (v & HPTE_V_SECONDARY)
+ va_low = ~va_low;
+ /* xor vsid from AVA */
+ if (!(v & HPTE_V_1TB_SEG))
+ va_low ^= v >> 12;
+ else
+ va_low ^= v >> 24;
+ va_low &= 0x7ff;
+ if (v & HPTE_V_LARGE) {
+ rb |= 1; /* L field */
+ if (cpu_has_feature(CPU_FTR_ARCH_206) &&
+ (r & 0xff000)) {
+ /* non-16MB large page, must be 64k */
+ /* (masks depend on page size) */
+ rb |= 0x1000; /* page encoding in LP field */
+ rb |= (va_low & 0x7f) << 16; /* 7b of VA in AVA/LP field */
+ rb |= (va_low & 0xfe); /* AVAL field (P7 doesn't seem to care) */
+ }
+ } else {
+ /* 4kB page */
+ rb |= (va_low & 0x7ff) << 12; /* remaining 11b of VA */
+ }
+ rb |= (v >> 54) & 0x300; /* B field */
+ return rb;
+}
+
#endif /* __ASM_KVM_BOOK3S_64_H__ */
diff --git a/arch/powerpc/include/asm/memblock.h b/arch/powerpc/include/asm/memblock.h
deleted file mode 100644
index 43efc345065e..000000000000
--- a/arch/powerpc/include/asm/memblock.h
+++ /dev/null
@@ -1,8 +0,0 @@
-#ifndef _ASM_POWERPC_MEMBLOCK_H
-#define _ASM_POWERPC_MEMBLOCK_H
-
-#include <asm/udbg.h>
-
-#define MEMBLOCK_DBG(fmt...) udbg_printf(fmt)
-
-#endif /* _ASM_POWERPC_MEMBLOCK_H */
diff --git a/arch/powerpc/kernel/idle.c b/arch/powerpc/kernel/idle.c
index 39a2baa6ad58..9c3cd490b1bd 100644
--- a/arch/powerpc/kernel/idle.c
+++ b/arch/powerpc/kernel/idle.c
@@ -46,6 +46,12 @@ static int __init powersave_off(char *arg)
}
__setup("powersave=off", powersave_off);
+#if defined(CONFIG_PPC_PSERIES) && defined(CONFIG_TRACEPOINTS)
+static const bool idle_uses_rcu = 1;
+#else
+static const bool idle_uses_rcu;
+#endif
+
/*
* The body of the idle task.
*/
@@ -56,7 +62,10 @@ void cpu_idle(void)
set_thread_flag(TIF_POLLING_NRFLAG);
while (1) {
- tick_nohz_stop_sched_tick(1);
+ tick_nohz_idle_enter();
+ if (!idle_uses_rcu)
+ rcu_idle_enter();
+
while (!need_resched() && !cpu_should_die()) {
ppc64_runlatch_off();
@@ -93,7 +102,9 @@ void cpu_idle(void)
HMT_medium();
ppc64_runlatch_on();
- tick_nohz_restart_sched_tick();
+ if (!idle_uses_rcu)
+ rcu_idle_exit();
+ tick_nohz_idle_exit();
preempt_enable_no_resched();
if (cpu_should_die())
cpu_die();
diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c
index 9ce1672afb59..a2158a395d96 100644
--- a/arch/powerpc/kernel/machine_kexec.c
+++ b/arch/powerpc/kernel/machine_kexec.c
@@ -107,9 +107,6 @@ void __init reserve_crashkernel(void)
unsigned long long crash_size, crash_base;
int ret;
- /* this is necessary because of memblock_phys_mem_size() */
- memblock_analyze();
-
/* use common parsing */
ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
&crash_size, &crash_base);
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index fa1235b0503b..abe405dab34d 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -733,8 +733,6 @@ void __init early_init_devtree(void *params)
of_scan_flat_dt(early_init_dt_scan_chosen_ppc, cmd_line);
/* Scan memory nodes and rebuild MEMBLOCKs */
- memblock_init();
-
of_scan_flat_dt(early_init_dt_scan_root, NULL);
of_scan_flat_dt(early_init_dt_scan_memory_ppc, NULL);
@@ -756,20 +754,14 @@ void __init early_init_devtree(void *params)
early_reserve_mem();
phyp_dump_reserve_mem();
- limit = memory_limit;
- if (! limit) {
- phys_addr_t memsize;
-
- /* Ensure that total memory size is page-aligned, because
- * otherwise mark_bootmem() gets upset. */
- memblock_analyze();
- memsize = memblock_phys_mem_size();
- if ((memsize & PAGE_MASK) != memsize)
- limit = memsize & PAGE_MASK;
- }
+ /*
+ * Ensure that total memory size is page-aligned, because otherwise
+ * mark_bootmem() gets upset.
+ */
+ limit = ALIGN(memory_limit ?: memblock_phys_mem_size(), PAGE_SIZE);
memblock_enforce_memory_limit(limit);
- memblock_analyze();
+ memblock_allow_resize();
memblock_dump_all();
DBG("Phys. mem: %llx\n", memblock_phys_mem_size());
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 0cb137a9b038..336983da9e72 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -538,7 +538,7 @@ static void kvmppc_start_thread(struct kvm_vcpu *vcpu)
tpaca->kvm_hstate.napping = 0;
vcpu->cpu = vc->pcpu;
smp_wmb();
-#ifdef CONFIG_PPC_ICP_NATIVE
+#if defined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP)
if (vcpu->arch.ptid) {
tpaca->cpu_start = 0x80;
wmb();
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index 3c791e1eb675..e2cfb9e1e20e 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -658,10 +658,12 @@ program_interrupt:
ulong cmd = kvmppc_get_gpr(vcpu, 3);
int i;
+#ifdef CONFIG_KVM_BOOK3S_64_PR
if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE) {
r = RESUME_GUEST;
break;
}
+#endif
run->papr_hcall.nr = cmd;
for (i = 0; i < 9; ++i) {
diff --git a/arch/powerpc/kvm/e500.c b/arch/powerpc/kvm/e500.c
index 26d20903f2bc..8c0d45a6faf7 100644
--- a/arch/powerpc/kvm/e500.c
+++ b/arch/powerpc/kvm/e500.c
@@ -15,6 +15,7 @@
#include <linux/kvm_host.h>
#include <linux/slab.h>
#include <linux/err.h>
+#include <linux/export.h>
#include <asm/reg.h>
#include <asm/cputable.h>
diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c
index 161cefde5c15..58861fa1220e 100644
--- a/arch/powerpc/mm/init_32.c
+++ b/arch/powerpc/mm/init_32.c
@@ -134,8 +134,7 @@ void __init MMU_init(void)
if (memblock.memory.cnt > 1) {
#ifndef CONFIG_WII
- memblock.memory.cnt = 1;
- memblock_analyze();
+ memblock_enforce_memory_limit(memblock.memory.regions[0].size);
printk(KERN_WARNING "Only using first contiguous memory region");
#else
wii_memory_fixups();
@@ -158,7 +157,6 @@ void __init MMU_init(void)
#ifndef CONFIG_HIGHMEM
total_memory = total_lowmem;
memblock_enforce_memory_limit(total_lowmem);
- memblock_analyze();
#endif /* CONFIG_HIGHMEM */
}
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 2dd6bdd31fe1..8e2eb6611b0b 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -199,7 +199,7 @@ void __init do_init_bootmem(void)
unsigned long start_pfn, end_pfn;
start_pfn = memblock_region_memory_base_pfn(reg);
end_pfn = memblock_region_memory_end_pfn(reg);
- add_active_range(0, start_pfn, end_pfn);
+ memblock_set_node(0, (phys_addr_t)ULLONG_MAX, 0);
}
/* Add all physical memory to the bootmem map, mark each area
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index b22a83a91cb8..e6eea0ac80c8 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -127,45 +127,25 @@ static int __cpuinit fake_numa_create_new_node(unsigned long end_pfn,
}
/*
- * get_active_region_work_fn - A helper function for get_node_active_region
- * Returns datax set to the start_pfn and end_pfn if they contain
- * the initial value of datax->start_pfn between them
- * @start_pfn: start page(inclusive) of region to check
- * @end_pfn: end page(exclusive) of region to check
- * @datax: comes in with ->start_pfn set to value to search for and
- * goes out with active range if it contains it
- * Returns 1 if search value is in range else 0
- */
-static int __init get_active_region_work_fn(unsigned long start_pfn,
- unsigned long end_pfn, void *datax)
-{
- struct node_active_region *data;
- data = (struct node_active_region *)datax;
-
- if (start_pfn <= data->start_pfn && end_pfn > data->start_pfn) {
- data->start_pfn = start_pfn;
- data->end_pfn = end_pfn;
- return 1;
- }
- return 0;
-
-}
-
-/*
- * get_node_active_region - Return active region containing start_pfn
+ * get_node_active_region - Return active region containing pfn
* Active range returned is empty if none found.
- * @start_pfn: The page to return the region for.
- * @node_ar: Returned set to the active region containing start_pfn
+ * @pfn: The page to return the region for
+ * @node_ar: Returned set to the active region containing @pfn
*/
-static void __init get_node_active_region(unsigned long start_pfn,
- struct node_active_region *node_ar)
+static void __init get_node_active_region(unsigned long pfn,
+ struct node_active_region *node_ar)
{
- int nid = early_pfn_to_nid(start_pfn);
+ unsigned long start_pfn, end_pfn;
+ int i, nid;
- node_ar->nid = nid;
- node_ar->start_pfn = start_pfn;
- node_ar->end_pfn = start_pfn;
- work_with_active_regions(nid, get_active_region_work_fn, node_ar);
+ for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
+ if (pfn >= start_pfn && pfn < end_pfn) {
+ node_ar->nid = nid;
+ node_ar->start_pfn = start_pfn;
+ node_ar->end_pfn = end_pfn;
+ break;
+ }
+ }
}
static void map_cpu_to_node(int cpu, int node)
@@ -710,9 +690,7 @@ static void __init parse_drconf_memory(struct device_node *memory)
node_set_online(nid);
sz = numa_enforce_memory_limit(base, size);
if (sz)
- add_active_range(nid, base >> PAGE_SHIFT,
- (base >> PAGE_SHIFT)
- + (sz >> PAGE_SHIFT));
+ memblock_set_node(base, sz, nid);
} while (--ranges);
}
}
@@ -802,8 +780,7 @@ new_range:
continue;
}
- add_active_range(nid, start >> PAGE_SHIFT,
- (start >> PAGE_SHIFT) + (size >> PAGE_SHIFT));
+ memblock_set_node(start, size, nid);
if (--ranges)
goto new_range;
@@ -839,7 +816,8 @@ static void __init setup_nonnuma(void)
end_pfn = memblock_region_memory_end_pfn(reg);
fake_numa_create_new_node(end_pfn, &nid);
- add_active_range(nid, start_pfn, end_pfn);
+ memblock_set_node(PFN_PHYS(start_pfn),
+ PFN_PHYS(end_pfn - start_pfn), nid);
node_set_online(nid);
}
}
diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c
index 4e13d6f9023e..573ba3b69d1f 100644
--- a/arch/powerpc/mm/tlb_nohash.c
+++ b/arch/powerpc/mm/tlb_nohash.c
@@ -615,7 +615,6 @@ static void __early_init_mmu(int boot_cpu)
/* limit memory so we dont have linear faults */
memblock_enforce_memory_limit(linear_map_top);
- memblock_analyze();
patch_exception(0x1c0, exc_data_tlb_miss_bolted_book3e);
patch_exception(0x1e0, exc_instruction_tlb_miss_bolted_book3e);
diff --git a/arch/powerpc/platforms/embedded6xx/wii.c b/arch/powerpc/platforms/embedded6xx/wii.c
index 1b5dc1a2e145..6d8dadf19f0b 100644
--- a/arch/powerpc/platforms/embedded6xx/wii.c
+++ b/arch/powerpc/platforms/embedded6xx/wii.c
@@ -79,24 +79,19 @@ void __init wii_memory_fixups(void)
BUG_ON(memblock.memory.cnt != 2);
BUG_ON(!page_aligned(p[0].base) || !page_aligned(p[1].base));
- p[0].size = _ALIGN_DOWN(p[0].size, PAGE_SIZE);
- p[1].size = _ALIGN_DOWN(p[1].size, PAGE_SIZE);
+ /* trim unaligned tail */
+ memblock_remove(ALIGN(p[1].base + p[1].size, PAGE_SIZE),
+ (phys_addr_t)ULLONG_MAX);
- wii_hole_start = p[0].base + p[0].size;
+ /* determine hole, add & reserve them */
+ wii_hole_start = ALIGN(p[0].base + p[0].size, PAGE_SIZE);
wii_hole_size = p[1].base - wii_hole_start;
-
- pr_info("MEM1: <%08llx %08llx>\n", p[0].base, p[0].size);
- pr_info("HOLE: <%08lx %08lx>\n", wii_hole_start, wii_hole_size);
- pr_info("MEM2: <%08llx %08llx>\n", p[1].base, p[1].size);
-
- p[0].size += wii_hole_size + p[1].size;
-
- memblock.memory.cnt = 1;
- memblock_analyze();
-
- /* reserve the hole */
+ memblock_add(wii_hole_start, wii_hole_size);
memblock_reserve(wii_hole_start, wii_hole_size);
+ BUG_ON(memblock.memory.cnt != 1);
+ __memblock_dump_all();
+
/* allow ioremapping the address space in the hole */
__allow_ioremap_reserved = 1;
}
diff --git a/arch/powerpc/platforms/iseries/setup.c b/arch/powerpc/platforms/iseries/setup.c
index ea0acbd8966d..8fc62586a973 100644
--- a/arch/powerpc/platforms/iseries/setup.c
+++ b/arch/powerpc/platforms/iseries/setup.c
@@ -563,7 +563,8 @@ static void yield_shared_processor(void)
static void iseries_shared_idle(void)
{
while (1) {
- tick_nohz_stop_sched_tick(1);
+ tick_nohz_idle_enter();
+ rcu_idle_enter();
while (!need_resched() && !hvlpevent_is_pending()) {
local_irq_disable();
ppc64_runlatch_off();
@@ -577,7 +578,8 @@ static void iseries_shared_idle(void)
}
ppc64_runlatch_on();
- tick_nohz_restart_sched_tick();
+ rcu_idle_exit();
+ tick_nohz_idle_exit();
if (hvlpevent_is_pending())
process_iSeries_events();
@@ -593,7 +595,8 @@ static void iseries_dedicated_idle(void)
set_thread_flag(TIF_POLLING_NRFLAG);
while (1) {
- tick_nohz_stop_sched_tick(1);
+ tick_nohz_idle_enter();
+ rcu_idle_enter();
if (!need_resched()) {
while (!need_resched()) {
ppc64_runlatch_off();
@@ -610,7 +613,8 @@ static void iseries_dedicated_idle(void)
}
ppc64_runlatch_on();
- tick_nohz_restart_sched_tick();
+ rcu_idle_exit();
+ tick_nohz_idle_exit();
preempt_enable_no_resched();
schedule();
preempt_disable();
diff --git a/arch/powerpc/platforms/ps3/mm.c b/arch/powerpc/platforms/ps3/mm.c
index 72714ad27842..8bd6ba542691 100644
--- a/arch/powerpc/platforms/ps3/mm.c
+++ b/arch/powerpc/platforms/ps3/mm.c
@@ -319,7 +319,6 @@ static int __init ps3_mm_add_memory(void)
}
memblock_add(start_addr, map.r1.size);
- memblock_analyze();
result = online_pages(start_pfn, nr_pages);
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index 27a49508b410..52d429be6c76 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -555,6 +555,8 @@ void __trace_hcall_entry(unsigned long opcode, unsigned long *args)
(*depth)++;
trace_hcall_entry(opcode, args);
+ if (opcode == H_CEDE)
+ rcu_idle_enter();
(*depth)--;
out:
@@ -575,6 +577,8 @@ void __trace_hcall_exit(long opcode, unsigned long retval,
goto out;
(*depth)++;
+ if (opcode == H_CEDE)
+ rcu_idle_exit();
trace_hcall_exit(opcode, retval, retbuf);
(*depth)--;
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 373679b3744a..d48ede334434 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -92,6 +92,9 @@ config S390
select HAVE_ARCH_JUMP_LABEL if !MARCH_G5
select HAVE_RCU_TABLE_FREE if SMP
select ARCH_SAVE_PAGE_KEYS if HIBERNATION
+ select HAVE_MEMBLOCK
+ select HAVE_MEMBLOCK_NODE_MAP
+ select ARCH_DISCARD_MEMBLOCK
select ARCH_INLINE_SPIN_TRYLOCK
select ARCH_INLINE_SPIN_TRYLOCK_BH
select ARCH_INLINE_SPIN_LOCK
@@ -345,9 +348,6 @@ config WARN_DYNAMIC_STACK
Say N if you are unsure.
-config ARCH_POPULATES_NODE_MAP
- def_bool y
-
comment "Kernel preemption"
source "kernel/Kconfig.preempt"
diff --git a/arch/s390/include/asm/cputime.h b/arch/s390/include/asm/cputime.h
index 0887a0463e33..c23c3900c304 100644
--- a/arch/s390/include/asm/cputime.h
+++ b/arch/s390/include/asm/cputime.h
@@ -72,6 +72,8 @@ static inline cputime_t usecs_to_cputime(const unsigned int m)
return (__force cputime_t)(m * 4096ULL);
}
+#define usecs_to_cputime64(m) usecs_to_cputime(m)
+
/*
* Convert cputime to milliseconds and back.
*/
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 9451b210a1b4..3201ae447990 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -91,10 +91,12 @@ static void default_idle(void)
void cpu_idle(void)
{
for (;;) {
- tick_nohz_stop_sched_tick(1);
+ tick_nohz_idle_enter();
+ rcu_idle_enter();
while (!need_resched())
default_idle();
- tick_nohz_restart_sched_tick();
+ rcu_idle_exit();
+ tick_nohz_idle_exit();
preempt_enable_no_resched();
schedule();
preempt_disable();
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index e54c4ff8abaa..f11d1b037c50 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -21,6 +21,7 @@
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/kernel.h>
+#include <linux/memblock.h>
#include <linux/mm.h>
#include <linux/stddef.h>
#include <linux/unistd.h>
@@ -820,7 +821,8 @@ setup_memory(void)
end_chunk = min(end_chunk, end_pfn);
if (start_chunk >= end_chunk)
continue;
- add_active_range(0, start_chunk, end_chunk);
+ memblock_add_node(PFN_PHYS(start_chunk),
+ PFN_PHYS(end_chunk - start_chunk), 0);
pfn = max(start_chunk, start_pfn);
for (; pfn < end_chunk; pfn++)
page_set_storage_key(PFN_PHYS(pfn),
diff --git a/arch/s390/oprofile/hwsampler.c b/arch/s390/oprofile/hwsampler.c
index f43c0e4282af..9daee91e6c3f 100644
--- a/arch/s390/oprofile/hwsampler.c
+++ b/arch/s390/oprofile/hwsampler.c
@@ -22,6 +22,7 @@
#include <asm/irq.h>
#include "hwsampler.h"
+#include "op_counter.h"
#define MAX_NUM_SDB 511
#define MIN_NUM_SDB 1
@@ -896,6 +897,8 @@ static void add_samples_to_oprofile(unsigned int cpu, unsigned long *sdbt,
if (sample_data_ptr->P == 1) {
/* userspace sample */
unsigned int pid = sample_data_ptr->prim_asn;
+ if (!counter_config.user)
+ goto skip_sample;
rcu_read_lock();
tsk = pid_task(find_vpid(pid), PIDTYPE_PID);
if (tsk)
@@ -903,6 +906,8 @@ static void add_samples_to_oprofile(unsigned int cpu, unsigned long *sdbt,
rcu_read_unlock();
} else {
/* kernelspace sample */
+ if (!counter_config.kernel)
+ goto skip_sample;
regs = task_pt_regs(current);
}
@@ -910,7 +915,7 @@ static void add_samples_to_oprofile(unsigned int cpu, unsigned long *sdbt,
oprofile_add_ext_hw_sample(sample_data_ptr->ia, regs, 0,
!sample_data_ptr->P, tsk);
mutex_unlock(&hws_sem);
-
+ skip_sample:
sample_data_ptr++;
}
}
diff --git a/arch/s390/oprofile/init.c b/arch/s390/oprofile/init.c
index 6efc18b5e60a..2297be406c61 100644
--- a/arch/s390/oprofile/init.c
+++ b/arch/s390/oprofile/init.c
@@ -2,10 +2,11 @@
* arch/s390/oprofile/init.c
*
* S390 Version
- * Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Copyright (C) 2002-2011 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Author(s): Thomas Spatzier (tspat@de.ibm.com)
* Author(s): Mahesh Salgaonkar (mahesh@linux.vnet.ibm.com)
* Author(s): Heinz Graalfs (graalfs@linux.vnet.ibm.com)
+ * Author(s): Andreas Krebbel (krebbel@linux.vnet.ibm.com)
*
* @remark Copyright 2002-2011 OProfile authors
*/
@@ -14,6 +15,8 @@
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/fs.h>
+#include <linux/module.h>
+#include <asm/processor.h>
#include "../../../drivers/oprofile/oprof.h"
@@ -22,6 +25,7 @@ extern void s390_backtrace(struct pt_regs * const regs, unsigned int depth);
#ifdef CONFIG_64BIT
#include "hwsampler.h"
+#include "op_counter.h"
#define DEFAULT_INTERVAL 4127518
@@ -35,16 +39,41 @@ static unsigned long oprofile_max_interval;
static unsigned long oprofile_sdbt_blocks = DEFAULT_SDBT_BLOCKS;
static unsigned long oprofile_sdb_blocks = DEFAULT_SDB_BLOCKS;
-static int hwsampler_file;
+static int hwsampler_enabled;
static int hwsampler_running; /* start_mutex must be held to change */
+static int hwsampler_available;
static struct oprofile_operations timer_ops;
+struct op_counter_config counter_config;
+
+enum __force_cpu_type {
+ reserved = 0, /* do not force */
+ timer,
+};
+static int force_cpu_type;
+
+static int set_cpu_type(const char *str, struct kernel_param *kp)
+{
+ if (!strcmp(str, "timer")) {
+ force_cpu_type = timer;
+ printk(KERN_INFO "oprofile: forcing timer to be returned "
+ "as cpu type\n");
+ } else {
+ force_cpu_type = 0;
+ }
+
+ return 0;
+}
+module_param_call(cpu_type, set_cpu_type, NULL, NULL, 0);
+MODULE_PARM_DESC(cpu_type, "Force legacy basic mode sampling"
+ "(report cpu_type \"timer\"");
+
static int oprofile_hwsampler_start(void)
{
int retval;
- hwsampler_running = hwsampler_file;
+ hwsampler_running = hwsampler_enabled;
if (!hwsampler_running)
return timer_ops.start();
@@ -72,10 +101,16 @@ static void oprofile_hwsampler_stop(void)
return;
}
+/*
+ * File ops used for:
+ * /dev/oprofile/0/enabled
+ * /dev/oprofile/hwsampling/hwsampler (cpu_type = timer)
+ */
+
static ssize_t hwsampler_read(struct file *file, char __user *buf,
size_t count, loff_t *offset)
{
- return oprofilefs_ulong_to_user(hwsampler_file, buf, count, offset);
+ return oprofilefs_ulong_to_user(hwsampler_enabled, buf, count, offset);
}
static ssize_t hwsampler_write(struct file *file, char const __user *buf,
@@ -88,9 +123,12 @@ static ssize_t hwsampler_write(struct file *file, char const __user *buf,
return -EINVAL;
retval = oprofilefs_ulong_from_user(&val, buf, count);
- if (retval)
+ if (retval <= 0)
return retval;
+ if (val != 0 && val != 1)
+ return -EINVAL;
+
if (oprofile_started)
/*
* save to do without locking as we set
@@ -99,7 +137,7 @@ static ssize_t hwsampler_write(struct file *file, char const __user *buf,
*/
return -EBUSY;
- hwsampler_file = val;
+ hwsampler_enabled = val;
return count;
}
@@ -109,38 +147,311 @@ static const struct file_operations hwsampler_fops = {
.write = hwsampler_write,
};
+/*
+ * File ops used for:
+ * /dev/oprofile/0/count
+ * /dev/oprofile/hwsampling/hw_interval (cpu_type = timer)
+ *
+ * Make sure that the value is within the hardware range.
+ */
+
+static ssize_t hw_interval_read(struct file *file, char __user *buf,
+ size_t count, loff_t *offset)
+{
+ return oprofilefs_ulong_to_user(oprofile_hw_interval, buf,
+ count, offset);
+}
+
+static ssize_t hw_interval_write(struct file *file, char const __user *buf,
+ size_t count, loff_t *offset)
+{
+ unsigned long val;
+ int retval;
+
+ if (*offset)
+ return -EINVAL;
+ retval = oprofilefs_ulong_from_user(&val, buf, count);
+ if (retval)
+ return retval;
+ if (val < oprofile_min_interval)
+ oprofile_hw_interval = oprofile_min_interval;
+ else if (val > oprofile_max_interval)
+ oprofile_hw_interval = oprofile_max_interval;
+ else
+ oprofile_hw_interval = val;
+
+ return count;
+}
+
+static const struct file_operations hw_interval_fops = {
+ .read = hw_interval_read,
+ .write = hw_interval_write,
+};
+
+/*
+ * File ops used for:
+ * /dev/oprofile/0/event
+ * Only a single event with number 0 is supported with this counter.
+ *
+ * /dev/oprofile/0/unit_mask
+ * This is a dummy file needed by the user space tools.
+ * No value other than 0 is accepted or returned.
+ */
+
+static ssize_t hwsampler_zero_read(struct file *file, char __user *buf,
+ size_t count, loff_t *offset)
+{
+ return oprofilefs_ulong_to_user(0, buf, count, offset);
+}
+
+static ssize_t hwsampler_zero_write(struct file *file, char const __user *buf,
+ size_t count, loff_t *offset)
+{
+ unsigned long val;
+ int retval;
+
+ if (*offset)
+ return -EINVAL;
+
+ retval = oprofilefs_ulong_from_user(&val, buf, count);
+ if (retval)
+ return retval;
+ if (val != 0)
+ return -EINVAL;
+ return count;
+}
+
+static const struct file_operations zero_fops = {
+ .read = hwsampler_zero_read,
+ .write = hwsampler_zero_write,
+};
+
+/* /dev/oprofile/0/kernel file ops. */
+
+static ssize_t hwsampler_kernel_read(struct file *file, char __user *buf,
+ size_t count, loff_t *offset)
+{
+ return oprofilefs_ulong_to_user(counter_config.kernel,
+ buf, count, offset);
+}
+
+static ssize_t hwsampler_kernel_write(struct file *file, char const __user *buf,
+ size_t count, loff_t *offset)
+{
+ unsigned long val;
+ int retval;
+
+ if (*offset)
+ return -EINVAL;
+
+ retval = oprofilefs_ulong_from_user(&val, buf, count);
+ if (retval)
+ return retval;
+
+ if (val != 0 && val != 1)
+ return -EINVAL;
+
+ counter_config.kernel = val;
+
+ return count;
+}
+
+static const struct file_operations kernel_fops = {
+ .read = hwsampler_kernel_read,
+ .write = hwsampler_kernel_write,
+};
+
+/* /dev/oprofile/0/user file ops. */
+
+static ssize_t hwsampler_user_read(struct file *file, char __user *buf,
+ size_t count, loff_t *offset)
+{
+ return oprofilefs_ulong_to_user(counter_config.user,
+ buf, count, offset);
+}
+
+static ssize_t hwsampler_user_write(struct file *file, char const __user *buf,
+ size_t count, loff_t *offset)
+{
+ unsigned long val;
+ int retval;
+
+ if (*offset)
+ return -EINVAL;
+
+ retval = oprofilefs_ulong_from_user(&val, buf, count);
+ if (retval)
+ return retval;
+
+ if (val != 0 && val != 1)
+ return -EINVAL;
+
+ counter_config.user = val;
+
+ return count;
+}
+
+static const struct file_operations user_fops = {
+ .read = hwsampler_user_read,
+ .write = hwsampler_user_write,
+};
+
+
+/*
+ * File ops used for: /dev/oprofile/timer/enabled
+ * The value always has to be the inverted value of hwsampler_enabled. So
+ * no separate variable is created. That way we do not need locking.
+ */
+
+static ssize_t timer_enabled_read(struct file *file, char __user *buf,
+ size_t count, loff_t *offset)
+{
+ return oprofilefs_ulong_to_user(!hwsampler_enabled, buf, count, offset);
+}
+
+static ssize_t timer_enabled_write(struct file *file, char const __user *buf,
+ size_t count, loff_t *offset)
+{
+ unsigned long val;
+ int retval;
+
+ if (*offset)
+ return -EINVAL;
+
+ retval = oprofilefs_ulong_from_user(&val, buf, count);
+ if (retval)
+ return retval;
+
+ if (val != 0 && val != 1)
+ return -EINVAL;
+
+ /* Timer cannot be disabled without having hardware sampling. */
+ if (val == 0 && !hwsampler_available)
+ return -EINVAL;
+
+ if (oprofile_started)
+ /*
+ * save to do without locking as we set
+ * hwsampler_running in start() when start_mutex is
+ * held
+ */
+ return -EBUSY;
+
+ hwsampler_enabled = !val;
+
+ return count;
+}
+
+static const struct file_operations timer_enabled_fops = {
+ .read = timer_enabled_read,
+ .write = timer_enabled_write,
+};
+
+
static int oprofile_create_hwsampling_files(struct super_block *sb,
- struct dentry *root)
+ struct dentry *root)
{
- struct dentry *hw_dir;
+ struct dentry *dir;
+
+ dir = oprofilefs_mkdir(sb, root, "timer");
+ if (!dir)
+ return -EINVAL;
+
+ oprofilefs_create_file(sb, dir, "enabled", &timer_enabled_fops);
+
+ if (!hwsampler_available)
+ return 0;
/* reinitialize default values */
- hwsampler_file = 1;
+ hwsampler_enabled = 1;
+ counter_config.kernel = 1;
+ counter_config.user = 1;
- hw_dir = oprofilefs_mkdir(sb, root, "hwsampling");
- if (!hw_dir)
- return -EINVAL;
+ if (!force_cpu_type) {
+ /*
+ * Create the counter file system. A single virtual
+ * counter is created which can be used to
+ * enable/disable hardware sampling dynamically from
+ * user space. The user space will configure a single
+ * counter with a single event. The value of 'event'
+ * and 'unit_mask' are not evaluated by the kernel code
+ * and can only be set to 0.
+ */
+
+ dir = oprofilefs_mkdir(sb, root, "0");
+ if (!dir)
+ return -EINVAL;
- oprofilefs_create_file(sb, hw_dir, "hwsampler", &hwsampler_fops);
- oprofilefs_create_ulong(sb, hw_dir, "hw_interval",
- &oprofile_hw_interval);
- oprofilefs_create_ro_ulong(sb, hw_dir, "hw_min_interval",
- &oprofile_min_interval);
- oprofilefs_create_ro_ulong(sb, hw_dir, "hw_max_interval",
- &oprofile_max_interval);
- oprofilefs_create_ulong(sb, hw_dir, "hw_sdbt_blocks",
- &oprofile_sdbt_blocks);
+ oprofilefs_create_file(sb, dir, "enabled", &hwsampler_fops);
+ oprofilefs_create_file(sb, dir, "event", &zero_fops);
+ oprofilefs_create_file(sb, dir, "count", &hw_interval_fops);
+ oprofilefs_create_file(sb, dir, "unit_mask", &zero_fops);
+ oprofilefs_create_file(sb, dir, "kernel", &kernel_fops);
+ oprofilefs_create_file(sb, dir, "user", &user_fops);
+ oprofilefs_create_ulong(sb, dir, "hw_sdbt_blocks",
+ &oprofile_sdbt_blocks);
+ } else {
+ /*
+ * Hardware sampling can be used but the cpu_type is
+ * forced to timer in order to deal with legacy user
+ * space tools. The /dev/oprofile/hwsampling fs is
+ * provided in that case.
+ */
+ dir = oprofilefs_mkdir(sb, root, "hwsampling");
+ if (!dir)
+ return -EINVAL;
+
+ oprofilefs_create_file(sb, dir, "hwsampler",
+ &hwsampler_fops);
+ oprofilefs_create_file(sb, dir, "hw_interval",
+ &hw_interval_fops);
+ oprofilefs_create_ro_ulong(sb, dir, "hw_min_interval",
+ &oprofile_min_interval);
+ oprofilefs_create_ro_ulong(sb, dir, "hw_max_interval",
+ &oprofile_max_interval);
+ oprofilefs_create_ulong(sb, dir, "hw_sdbt_blocks",
+ &oprofile_sdbt_blocks);
+ }
return 0;
}
static int oprofile_hwsampler_init(struct oprofile_operations *ops)
{
+ /*
+ * Initialize the timer mode infrastructure as well in order
+ * to be able to switch back dynamically. oprofile_timer_init
+ * is not supposed to fail.
+ */
+ if (oprofile_timer_init(ops))
+ BUG();
+
+ memcpy(&timer_ops, ops, sizeof(timer_ops));
+ ops->create_files = oprofile_create_hwsampling_files;
+
+ /*
+ * If the user space tools do not support newer cpu types,
+ * the force_cpu_type module parameter
+ * can be used to always return \"timer\" as cpu type.
+ */
+ if (force_cpu_type != timer) {
+ struct cpuid id;
+
+ get_cpu_id (&id);
+
+ switch (id.machine) {
+ case 0x2097: case 0x2098: ops->cpu_type = "s390/z10"; break;
+ case 0x2817: case 0x2818: ops->cpu_type = "s390/z196"; break;
+ default: return -ENODEV;
+ }
+ }
+
if (hwsampler_setup())
return -ENODEV;
/*
- * create hwsampler files only if hwsampler_setup() succeeds.
+ * Query the range for the sampling interval from the
+ * hardware.
*/
oprofile_min_interval = hwsampler_query_min_interval();
if (oprofile_min_interval == 0)
@@ -155,23 +466,17 @@ static int oprofile_hwsampler_init(struct oprofile_operations *ops)
if (oprofile_hw_interval > oprofile_max_interval)
oprofile_hw_interval = oprofile_max_interval;
- if (oprofile_timer_init(ops))
- return -ENODEV;
-
- printk(KERN_INFO "oprofile: using hardware sampling\n");
-
- memcpy(&timer_ops, ops, sizeof(timer_ops));
+ printk(KERN_INFO "oprofile: System z hardware sampling "
+ "facility found.\n");
ops->start = oprofile_hwsampler_start;
ops->stop = oprofile_hwsampler_stop;
- ops->create_files = oprofile_create_hwsampling_files;
return 0;
}
static void oprofile_hwsampler_exit(void)
{
- oprofile_timer_exit();
hwsampler_shutdown();
}
@@ -182,7 +487,15 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
ops->backtrace = s390_backtrace;
#ifdef CONFIG_64BIT
- return oprofile_hwsampler_init(ops);
+
+ /*
+ * -ENODEV is not reported to the caller. The module itself
+ * will use the timer mode sampling as fallback and this is
+ * always available.
+ */
+ hwsampler_available = oprofile_hwsampler_init(ops) == 0;
+
+ return 0;
#else
return -ENODEV;
#endif
diff --git a/arch/s390/oprofile/op_counter.h b/arch/s390/oprofile/op_counter.h
new file mode 100644
index 000000000000..1a8d3ca09014
--- /dev/null
+++ b/arch/s390/oprofile/op_counter.h
@@ -0,0 +1,23 @@
+/**
+ * arch/s390/oprofile/op_counter.h
+ *
+ * Copyright (C) 2011 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s): Andreas Krebbel (krebbel@linux.vnet.ibm.com)
+ *
+ * @remark Copyright 2011 OProfile authors
+ */
+
+#ifndef OP_COUNTER_H
+#define OP_COUNTER_H
+
+struct op_counter_config {
+ /* `enabled' maps to the hwsampler_file variable. */
+ /* `count' maps to the oprofile_hw_interval variable. */
+ /* `event' and `unit_mask' are unused. */
+ unsigned long kernel;
+ unsigned long user;
+};
+
+extern struct op_counter_config counter_config;
+
+#endif /* OP_COUNTER_H */
diff --git a/arch/score/Kconfig b/arch/score/Kconfig
index df169e84db4e..8b0c9464aa9d 100644
--- a/arch/score/Kconfig
+++ b/arch/score/Kconfig
@@ -4,6 +4,9 @@ config SCORE
def_bool y
select HAVE_GENERIC_HARDIRQS
select GENERIC_IRQ_SHOW
+ select HAVE_MEMBLOCK
+ select HAVE_MEMBLOCK_NODE_MAP
+ select ARCH_DISCARD_MEMBLOCK
choice
prompt "System type"
@@ -60,9 +63,6 @@ config 32BIT
config ARCH_FLATMEM_ENABLE
def_bool y
-config ARCH_POPULATES_NODE_MAP
- def_bool y
-
source "mm/Kconfig"
config MEMORY_START
diff --git a/arch/score/kernel/setup.c b/arch/score/kernel/setup.c
index 6f898c057878..b48459afefdd 100644
--- a/arch/score/kernel/setup.c
+++ b/arch/score/kernel/setup.c
@@ -26,6 +26,7 @@
#include <linux/bootmem.h>
#include <linux/initrd.h>
#include <linux/ioport.h>
+#include <linux/memblock.h>
#include <linux/mm.h>
#include <linux/seq_file.h>
#include <linux/screen_info.h>
@@ -54,7 +55,8 @@ static void __init bootmem_init(void)
/* Initialize the boot-time allocator with low memory only. */
bootmap_size = init_bootmem_node(NODE_DATA(0), start_pfn,
min_low_pfn, max_low_pfn);
- add_active_range(0, min_low_pfn, max_low_pfn);
+ memblock_add_node(PFN_PHYS(min_low_pfn),
+ PFN_PHYS(max_low_pfn - min_low_pfn), 0);
free_bootmem(PFN_PHYS(start_pfn),
(max_low_pfn - start_pfn) << PAGE_SHIFT);
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 5629e2099130..47a2f1c2cb0d 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -4,6 +4,7 @@ config SUPERH
select CLKDEV_LOOKUP
select HAVE_IDE if HAS_IOPORT
select HAVE_MEMBLOCK
+ select HAVE_MEMBLOCK_NODE_MAP
select HAVE_OPROFILE
select HAVE_GENERIC_DMA_COHERENT
select HAVE_ARCH_TRACEHOOK
diff --git a/arch/sh/boards/board-sh7757lcr.c b/arch/sh/boards/board-sh7757lcr.c
index ec8c84c14b17..895e337c79b6 100644
--- a/arch/sh/boards/board-sh7757lcr.c
+++ b/arch/sh/boards/board-sh7757lcr.c
@@ -50,9 +50,9 @@ static struct platform_device heartbeat_device = {
#define GBECONT 0xffc10100
#define GBECONT_RMII1 BIT(17)
#define GBECONT_RMII0 BIT(16)
-static void sh7757_eth_set_mdio_gate(unsigned long addr)
+static void sh7757_eth_set_mdio_gate(void *addr)
{
- if ((addr & 0x00000fff) < 0x0800)
+ if (((unsigned long)addr & 0x00000fff) < 0x0800)
writel(readl(GBECONT) | GBECONT_RMII0, GBECONT);
else
writel(readl(GBECONT) | GBECONT_RMII1, GBECONT);
@@ -116,9 +116,9 @@ static struct platform_device sh7757_eth1_device = {
},
};
-static void sh7757_eth_giga_set_mdio_gate(unsigned long addr)
+static void sh7757_eth_giga_set_mdio_gate(void *addr)
{
- if ((addr & 0x00000fff) < 0x0800) {
+ if (((unsigned long)addr & 0x00000fff) < 0x0800) {
gpio_set_value(GPIO_PTT4, 1);
writel(readl(GBECONT) & ~GBECONT_RMII0, GBECONT);
} else {
@@ -210,8 +210,12 @@ static struct resource sh_mmcif_resources[] = {
};
static struct sh_mmcif_dma sh7757lcr_mmcif_dma = {
- .chan_priv_tx = SHDMA_SLAVE_MMCIF_TX,
- .chan_priv_rx = SHDMA_SLAVE_MMCIF_RX,
+ .chan_priv_tx = {
+ .slave_id = SHDMA_SLAVE_MMCIF_TX,
+ },
+ .chan_priv_rx = {
+ .slave_id = SHDMA_SLAVE_MMCIF_RX,
+ }
};
static struct sh_mmcif_plat_data sh_mmcif_plat = {
diff --git a/arch/sh/include/asm/memblock.h b/arch/sh/include/asm/memblock.h
deleted file mode 100644
index e87063fad2ea..000000000000
--- a/arch/sh/include/asm/memblock.h
+++ /dev/null
@@ -1,4 +0,0 @@
-#ifndef __ASM_SH_MEMBLOCK_H
-#define __ASM_SH_MEMBLOCK_H
-
-#endif /* __ASM_SH_MEMBLOCK_H */
diff --git a/arch/sh/kernel/idle.c b/arch/sh/kernel/idle.c
index db4ecd731a00..406508d4ce74 100644
--- a/arch/sh/kernel/idle.c
+++ b/arch/sh/kernel/idle.c
@@ -89,7 +89,8 @@ void cpu_idle(void)
/* endless idle loop with no priority at all */
while (1) {
- tick_nohz_stop_sched_tick(1);
+ tick_nohz_idle_enter();
+ rcu_idle_enter();
while (!need_resched()) {
check_pgt_cache();
@@ -111,7 +112,8 @@ void cpu_idle(void)
start_critical_timings();
}
- tick_nohz_restart_sched_tick();
+ rcu_idle_exit();
+ tick_nohz_idle_exit();
preempt_enable_no_resched();
schedule();
preempt_disable();
diff --git a/arch/sh/kernel/machine_kexec.c b/arch/sh/kernel/machine_kexec.c
index c5a33f007f88..9fea49f6e667 100644
--- a/arch/sh/kernel/machine_kexec.c
+++ b/arch/sh/kernel/machine_kexec.c
@@ -157,9 +157,6 @@ void __init reserve_crashkernel(void)
unsigned long long crash_size, crash_base;
int ret;
- /* this is necessary because of memblock_phys_mem_size() */
- memblock_analyze();
-
ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
&crash_size, &crash_base);
if (ret == 0 && crash_size > 0) {
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c
index 1a0e946679a4..7b57bf1dc855 100644
--- a/arch/sh/kernel/setup.c
+++ b/arch/sh/kernel/setup.c
@@ -230,7 +230,8 @@ void __init __add_active_range(unsigned int nid, unsigned long start_pfn,
pmb_bolt_mapping((unsigned long)__va(start), start, end - start,
PAGE_KERNEL);
- add_active_range(nid, start_pfn, end_pfn);
+ memblock_set_node(PFN_PHYS(start_pfn),
+ PFN_PHYS(end_pfn - start_pfn), nid);
}
void __init __weak plat_early_device_setup(void)
diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig
index c3e61b366493..cb8f9920f4dd 100644
--- a/arch/sh/mm/Kconfig
+++ b/arch/sh/mm/Kconfig
@@ -143,9 +143,6 @@ config MAX_ACTIVE_REGIONS
CPU_SUBTYPE_SH7785)
default "1"
-config ARCH_POPULATES_NODE_MAP
- def_bool y
-
config ARCH_SELECT_MEMORY_MODEL
def_bool y
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index 939ca0f356f6..82cc576fab15 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -324,7 +324,6 @@ void __init paging_init(void)
unsigned long vaddr, end;
int nid;
- memblock_init();
sh_mv.mv_mem_init();
early_reserve_mem();
@@ -337,7 +336,7 @@ void __init paging_init(void)
sh_mv.mv_mem_reserve();
memblock_enforce_memory_limit(memory_limit);
- memblock_analyze();
+ memblock_allow_resize();
memblock_dump_all();
diff --git a/arch/sh/oprofile/common.c b/arch/sh/oprofile/common.c
index b4c2d2b946dd..e4dd5d5a1115 100644
--- a/arch/sh/oprofile/common.c
+++ b/arch/sh/oprofile/common.c
@@ -49,7 +49,7 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
return oprofile_perf_init(ops);
}
-void __exit oprofile_arch_exit(void)
+void oprofile_arch_exit(void)
{
oprofile_perf_exit();
kfree(sh_pmu_op_name);
@@ -60,5 +60,5 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
ops->backtrace = sh_backtrace;
return -ENODEV;
}
-void __exit oprofile_arch_exit(void) {}
+void oprofile_arch_exit(void) {}
#endif /* CONFIG_HW_PERF_EVENTS */
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index f92602e86607..70ae9d81870e 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -43,6 +43,7 @@ config SPARC64
select HAVE_KPROBES
select HAVE_RCU_TABLE_FREE if SMP
select HAVE_MEMBLOCK
+ select HAVE_MEMBLOCK_NODE_MAP
select HAVE_SYSCALL_WRAPPERS
select HAVE_DYNAMIC_FTRACE
select HAVE_FTRACE_MCOUNT_RECORD
@@ -352,9 +353,6 @@ config NODES_SPAN_OTHER_NODES
def_bool y
depends on NEED_MULTIPLE_NODES
-config ARCH_POPULATES_NODE_MAP
- def_bool y if SPARC64
-
config ARCH_SELECT_MEMORY_MODEL
def_bool y if SPARC64
diff --git a/arch/sparc/include/asm/memblock.h b/arch/sparc/include/asm/memblock.h
deleted file mode 100644
index c67b047ef85e..000000000000
--- a/arch/sparc/include/asm/memblock.h
+++ /dev/null
@@ -1,8 +0,0 @@
-#ifndef _SPARC64_MEMBLOCK_H
-#define _SPARC64_MEMBLOCK_H
-
-#include <asm/oplib.h>
-
-#define MEMBLOCK_DBG(fmt...) prom_printf(fmt)
-
-#endif /* !(_SPARC64_MEMBLOCK_H) */
diff --git a/arch/sparc/kernel/ds.c b/arch/sparc/kernel/ds.c
index 7429b47c3aca..381edcd5bc29 100644
--- a/arch/sparc/kernel/ds.c
+++ b/arch/sparc/kernel/ds.c
@@ -1181,13 +1181,11 @@ static int __devinit ds_probe(struct vio_dev *vdev,
dp->rcv_buf_len = 4096;
- dp->ds_states = kzalloc(sizeof(ds_states_template),
- GFP_KERNEL);
+ dp->ds_states = kmemdup(ds_states_template,
+ sizeof(ds_states_template), GFP_KERNEL);
if (!dp->ds_states)
goto out_free_rcv_buf;
- memcpy(dp->ds_states, ds_states_template,
- sizeof(ds_states_template));
dp->num_ds_states = ARRAY_SIZE(ds_states_template);
for (i = 0; i < dp->num_ds_states; i++)
diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c
index b272cda35a01..af5755d20fbe 100644
--- a/arch/sparc/kernel/pci_sun4v.c
+++ b/arch/sparc/kernel/pci_sun4v.c
@@ -849,10 +849,10 @@ static int pci_sun4v_msiq_build_irq(struct pci_pbm_info *pbm,
if (!irq)
return -ENOMEM;
- if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE))
- return -EINVAL;
if (pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_VALID))
return -EINVAL;
+ if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE))
+ return -EINVAL;
return irq;
}
diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
index 3739a06a76cb..39d8b05201a2 100644
--- a/arch/sparc/kernel/process_64.c
+++ b/arch/sparc/kernel/process_64.c
@@ -95,12 +95,14 @@ void cpu_idle(void)
set_thread_flag(TIF_POLLING_NRFLAG);
while(1) {
- tick_nohz_stop_sched_tick(1);
+ tick_nohz_idle_enter();
+ rcu_idle_enter();
while (!need_resched() && !cpu_is_offline(cpu))
sparc64_yield(cpu);
- tick_nohz_restart_sched_tick();
+ rcu_idle_exit();
+ tick_nohz_idle_exit();
preempt_enable_no_resched();
diff --git a/arch/sparc/kernel/prom_common.c b/arch/sparc/kernel/prom_common.c
index 46614807a57f..741df916c124 100644
--- a/arch/sparc/kernel/prom_common.c
+++ b/arch/sparc/kernel/prom_common.c
@@ -58,12 +58,10 @@ int of_set_property(struct device_node *dp, const char *name, void *val, int len
void *new_val;
int err;
- new_val = kmalloc(len, GFP_KERNEL);
+ new_val = kmemdup(val, len, GFP_KERNEL);
if (!new_val)
return -ENOMEM;
- memcpy(new_val, val, len);
-
err = -ENODEV;
mutex_lock(&of_set_property_mutex);
diff --git a/arch/sparc/kernel/setup_32.c b/arch/sparc/kernel/setup_32.c
index fe1e3fc31bc5..ffb883ddd0f0 100644
--- a/arch/sparc/kernel/setup_32.c
+++ b/arch/sparc/kernel/setup_32.c
@@ -84,7 +84,7 @@ static void prom_sync_me(void)
prom_printf("PROM SYNC COMMAND...\n");
show_free_areas(0);
- if(current->pid != 0) {
+ if (!is_idle_task(current)) {
local_irq_enable();
sys_sync();
local_irq_disable();
diff --git a/arch/sparc/mm/btfixup.c b/arch/sparc/mm/btfixup.c
index 5175ac2f4820..8a7f81743c12 100644
--- a/arch/sparc/mm/btfixup.c
+++ b/arch/sparc/mm/btfixup.c
@@ -302,8 +302,7 @@ void __init btfixup(void)
case 'i': /* INT */
if ((insn & 0xc1c00000) == 0x01000000) /* %HI */
set_addr(addr, q[1], fmangled, (insn & 0xffc00000) | (p[1] >> 10));
- else if ((insn & 0x80002000) == 0x80002000 &&
- (insn & 0x01800000) != 0x01800000) /* %LO */
+ else if ((insn & 0x80002000) == 0x80002000) /* %LO */
set_addr(addr, q[1], fmangled, (insn & 0xffffe000) | (p[1] & 0x3ff));
else {
prom_printf(insn_i, p, addr, insn);
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 8e073d802139..b3f5e7dfea51 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -790,7 +790,7 @@ static int find_node(unsigned long addr)
return -1;
}
-u64 memblock_nid_range(u64 start, u64 end, int *nid)
+static u64 memblock_nid_range(u64 start, u64 end, int *nid)
{
*nid = find_node(start);
start += PAGE_SIZE;
@@ -808,7 +808,7 @@ u64 memblock_nid_range(u64 start, u64 end, int *nid)
return start;
}
#else
-u64 memblock_nid_range(u64 start, u64 end, int *nid)
+static u64 memblock_nid_range(u64 start, u64 end, int *nid)
{
*nid = 0;
return end;
@@ -816,7 +816,7 @@ u64 memblock_nid_range(u64 start, u64 end, int *nid)
#endif
/* This must be invoked after performing all of the necessary
- * add_active_range() calls for 'nid'. We need to be able to get
+ * memblock_set_node() calls for 'nid'. We need to be able to get
* correct data from get_pfn_range_for_nid().
*/
static void __init allocate_node_data(int nid)
@@ -987,14 +987,11 @@ static void __init add_node_ranges(void)
this_end = memblock_nid_range(start, end, &nid);
- numadbg("Adding active range nid[%d] "
+ numadbg("Setting memblock NUMA node nid[%d] "
"start[%lx] end[%lx]\n",
nid, start, this_end);
- add_active_range(nid,
- start >> PAGE_SHIFT,
- this_end >> PAGE_SHIFT);
-
+ memblock_set_node(start, this_end - start, nid);
start = this_end;
}
}
@@ -1282,7 +1279,6 @@ static void __init bootmem_init_nonnuma(void)
{
unsigned long top_of_ram = memblock_end_of_DRAM();
unsigned long total_ram = memblock_phys_mem_size();
- struct memblock_region *reg;
numadbg("bootmem_init_nonnuma()\n");
@@ -1292,20 +1288,8 @@ static void __init bootmem_init_nonnuma(void)
(top_of_ram - total_ram) >> 20);
init_node_masks_nonnuma();
-
- for_each_memblock(memory, reg) {
- unsigned long start_pfn, end_pfn;
-
- if (!reg->size)
- continue;
-
- start_pfn = memblock_region_memory_base_pfn(reg);
- end_pfn = memblock_region_memory_end_pfn(reg);
- add_active_range(0, start_pfn, end_pfn);
- }
-
+ memblock_set_node(0, (phys_addr_t)ULLONG_MAX, 0);
allocate_node_data(0);
-
node_set_online(0);
}
@@ -1769,8 +1753,6 @@ void __init paging_init(void)
sun4v_ktsb_init();
}
- memblock_init();
-
/* Find available physical memory...
*
* Read it twice in order to work around a bug in openfirmware.
@@ -1796,7 +1778,7 @@ void __init paging_init(void)
memblock_enforce_memory_limit(cmdline_memory_size);
- memblock_analyze();
+ memblock_allow_resize();
memblock_dump_all();
set_bit(0, mmu_context_bmap);
diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c
index 9c45d8bbdf57..4c1ac6e5347a 100644
--- a/arch/tile/kernel/process.c
+++ b/arch/tile/kernel/process.c
@@ -85,7 +85,8 @@ void cpu_idle(void)
/* endless idle loop with no priority at all */
while (1) {
- tick_nohz_stop_sched_tick(1);
+ tick_nohz_idle_enter();
+ rcu_idle_enter();
while (!need_resched()) {
if (cpu_is_offline(cpu))
BUG(); /* no HOTPLUG_CPU */
@@ -105,7 +106,8 @@ void cpu_idle(void)
local_irq_enable();
current_thread_info()->status |= TS_POLLING;
}
- tick_nohz_restart_sched_tick();
+ rcu_idle_exit();
+ tick_nohz_idle_exit();
preempt_enable_no_resched();
schedule();
preempt_disable();
diff --git a/arch/tile/mm/fault.c b/arch/tile/mm/fault.c
index 25b7b90fd620..c1eaaa1fcc20 100644
--- a/arch/tile/mm/fault.c
+++ b/arch/tile/mm/fault.c
@@ -54,7 +54,7 @@ static noinline void force_sig_info_fault(const char *type, int si_signo,
if (unlikely(tsk->pid < 2)) {
panic("Signal %d (code %d) at %#lx sent to %s!",
si_signo, si_code & 0xffff, address,
- tsk->pid ? "init" : "the idle task");
+ is_idle_task(tsk) ? "the idle task" : "init");
}
info.si_signo = si_signo;
@@ -515,7 +515,7 @@ no_context:
if (unlikely(tsk->pid < 2)) {
panic("Kernel page fault running %s!",
- tsk->pid ? "init" : "the idle task");
+ is_idle_task(tsk) ? "the idle task" : "init");
}
/*
diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
index c5338351aecd..69f24905abdc 100644
--- a/arch/um/kernel/process.c
+++ b/arch/um/kernel/process.c
@@ -246,10 +246,12 @@ void default_idle(void)
if (need_resched())
schedule();
- tick_nohz_stop_sched_tick(1);
+ tick_nohz_idle_enter();
+ rcu_idle_enter();
nsecs = disable_timer();
idle_sleep(nsecs);
- tick_nohz_restart_sched_tick();
+ rcu_idle_exit();
+ tick_nohz_idle_exit();
}
}
diff --git a/arch/unicore32/kernel/process.c b/arch/unicore32/kernel/process.c
index ba401df971ed..52edc2b62873 100644
--- a/arch/unicore32/kernel/process.c
+++ b/arch/unicore32/kernel/process.c
@@ -55,7 +55,8 @@ void cpu_idle(void)
{
/* endless idle loop with no priority at all */
while (1) {
- tick_nohz_stop_sched_tick(1);
+ tick_nohz_idle_enter();
+ rcu_idle_enter();
while (!need_resched()) {
local_irq_disable();
stop_critical_timings();
@@ -63,7 +64,8 @@ void cpu_idle(void)
local_irq_enable();
start_critical_timings();
}
- tick_nohz_restart_sched_tick();
+ rcu_idle_exit();
+ tick_nohz_idle_exit();
preempt_enable_no_resched();
schedule();
preempt_disable();
diff --git a/arch/unicore32/kernel/setup.c b/arch/unicore32/kernel/setup.c
index 471b6bca8da4..673d7a89d8ff 100644
--- a/arch/unicore32/kernel/setup.c
+++ b/arch/unicore32/kernel/setup.c
@@ -37,6 +37,7 @@
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
#include <asm/traps.h>
+#include <asm/memblock.h>
#include "setup.h"
diff --git a/arch/unicore32/mm/init.c b/arch/unicore32/mm/init.c
index 3b379cddbc64..de186bde8975 100644
--- a/arch/unicore32/mm/init.c
+++ b/arch/unicore32/mm/init.c
@@ -26,6 +26,7 @@
#include <asm/setup.h>
#include <asm/sizes.h>
#include <asm/tlb.h>
+#include <asm/memblock.h>
#include <mach/map.h>
#include "mm.h"
@@ -245,7 +246,6 @@ void __init uc32_memblock_init(struct meminfo *mi)
sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]),
meminfo_cmp, NULL);
- memblock_init();
for (i = 0; i < mi->nr_banks; i++)
memblock_add(mi->bank[i].start, mi->bank[i].size);
@@ -264,7 +264,7 @@ void __init uc32_memblock_init(struct meminfo *mi)
uc32_mm_memblock_reserve();
- memblock_analyze();
+ memblock_allow_resize();
memblock_dump_all();
}
diff --git a/arch/unicore32/mm/mmu.c b/arch/unicore32/mm/mmu.c
index 3e5c3e5a0b45..43c20b40e444 100644
--- a/arch/unicore32/mm/mmu.c
+++ b/arch/unicore32/mm/mmu.c
@@ -25,6 +25,7 @@
#include <asm/setup.h>
#include <asm/sizes.h>
#include <asm/tlb.h>
+#include <asm/memblock.h>
#include <mach/map.h>
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index efb42949cc09..67d6af3581bc 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -26,6 +26,8 @@ config X86
select HAVE_IOREMAP_PROT
select HAVE_KPROBES
select HAVE_MEMBLOCK
+ select HAVE_MEMBLOCK_NODE_MAP
+ select ARCH_DISCARD_MEMBLOCK
select ARCH_WANT_OPTIONAL_GPIOLIB
select ARCH_WANT_FRAME_POINTERS
select HAVE_DMA_ATTRS
@@ -204,9 +206,6 @@ config ZONE_DMA32
bool
default X86_64
-config ARCH_POPULATES_NODE_MAP
- def_bool y
-
config AUDIT_ARCH
bool
default X86_64
diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
index 908b96957d88..37782566af24 100644
--- a/arch/x86/include/asm/e820.h
+++ b/arch/x86/include/asm/e820.h
@@ -117,7 +117,7 @@ static inline void early_memtest(unsigned long start, unsigned long end)
extern unsigned long e820_end_of_ram_pfn(void);
extern unsigned long e820_end_of_low_ram_pfn(void);
-extern u64 early_reserve_e820(u64 startt, u64 sizet, u64 align);
+extern u64 early_reserve_e820(u64 sizet, u64 align);
void memblock_x86_fill(void);
void memblock_find_dma_reserve(void);
diff --git a/arch/x86/include/asm/insn.h b/arch/x86/include/asm/insn.h
index 88c765e16410..74df3f1eddfd 100644
--- a/arch/x86/include/asm/insn.h
+++ b/arch/x86/include/asm/insn.h
@@ -137,6 +137,13 @@ static inline int insn_is_avx(struct insn *insn)
return (insn->vex_prefix.value != 0);
}
+/* Ensure this instruction is decoded completely */
+static inline int insn_complete(struct insn *insn)
+{
+ return insn->opcode.got && insn->modrm.got && insn->sib.got &&
+ insn->displacement.got && insn->immediate.got;
+}
+
static inline insn_byte_t insn_vex_m_bits(struct insn *insn)
{
if (insn->vex_prefix.nbytes == 2) /* 2 bytes VEX */
diff --git a/arch/x86/include/asm/memblock.h b/arch/x86/include/asm/memblock.h
deleted file mode 100644
index 0cd3800f33b9..000000000000
--- a/arch/x86/include/asm/memblock.h
+++ /dev/null
@@ -1,23 +0,0 @@
-#ifndef _X86_MEMBLOCK_H
-#define _X86_MEMBLOCK_H
-
-#define ARCH_DISCARD_MEMBLOCK
-
-u64 memblock_x86_find_in_range_size(u64 start, u64 *sizep, u64 align);
-
-void memblock_x86_reserve_range(u64 start, u64 end, char *name);
-void memblock_x86_free_range(u64 start, u64 end);
-struct range;
-int __get_free_all_memory_range(struct range **range, int nodeid,
- unsigned long start_pfn, unsigned long end_pfn);
-int get_free_all_memory_range(struct range **rangep, int nodeid);
-
-void memblock_x86_register_active_regions(int nid, unsigned long start_pfn,
- unsigned long last_pfn);
-u64 memblock_x86_hole_size(u64 start, u64 end);
-u64 memblock_x86_find_in_range_node(int nid, u64 start, u64 end, u64 size, u64 align);
-u64 memblock_x86_free_memory_in_range(u64 addr, u64 limit);
-u64 memblock_x86_memory_in_range(u64 addr, u64 limit);
-bool memblock_x86_check_reserved_size(u64 *addrp, u64 *sizep, u64 align);
-
-#endif
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index f61c62f7d5d8..096c975e099f 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -57,6 +57,7 @@
(1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX))
#define ARCH_PERFMON_BRANCH_MISSES_RETIRED 6
+#define ARCH_PERFMON_EVENTS_COUNT 7
/*
* Intel "Architectural Performance Monitoring" CPUID
@@ -72,6 +73,19 @@ union cpuid10_eax {
unsigned int full;
};
+union cpuid10_ebx {
+ struct {
+ unsigned int no_unhalted_core_cycles:1;
+ unsigned int no_instructions_retired:1;
+ unsigned int no_unhalted_reference_cycles:1;
+ unsigned int no_llc_reference:1;
+ unsigned int no_llc_misses:1;
+ unsigned int no_branch_instruction_retired:1;
+ unsigned int no_branch_misses_retired:1;
+ } split;
+ unsigned int full;
+};
+
union cpuid10_edx {
struct {
unsigned int num_counters_fixed:5;
@@ -81,6 +95,15 @@ union cpuid10_edx {
unsigned int full;
};
+struct x86_pmu_capability {
+ int version;
+ int num_counters_gp;
+ int num_counters_fixed;
+ int bit_width_gp;
+ int bit_width_fixed;
+ unsigned int events_mask;
+ int events_mask_len;
+};
/*
* Fixed-purpose performance events:
@@ -89,23 +112,24 @@ union cpuid10_edx {
/*
* All 3 fixed-mode PMCs are configured via this single MSR:
*/
-#define MSR_ARCH_PERFMON_FIXED_CTR_CTRL 0x38d
+#define MSR_ARCH_PERFMON_FIXED_CTR_CTRL 0x38d
/*
* The counts are available in three separate MSRs:
*/
/* Instr_Retired.Any: */
-#define MSR_ARCH_PERFMON_FIXED_CTR0 0x309
-#define X86_PMC_IDX_FIXED_INSTRUCTIONS (X86_PMC_IDX_FIXED + 0)
+#define MSR_ARCH_PERFMON_FIXED_CTR0 0x309
+#define X86_PMC_IDX_FIXED_INSTRUCTIONS (X86_PMC_IDX_FIXED + 0)
/* CPU_CLK_Unhalted.Core: */
-#define MSR_ARCH_PERFMON_FIXED_CTR1 0x30a
-#define X86_PMC_IDX_FIXED_CPU_CYCLES (X86_PMC_IDX_FIXED + 1)
+#define MSR_ARCH_PERFMON_FIXED_CTR1 0x30a
+#define X86_PMC_IDX_FIXED_CPU_CYCLES (X86_PMC_IDX_FIXED + 1)
/* CPU_CLK_Unhalted.Ref: */
-#define MSR_ARCH_PERFMON_FIXED_CTR2 0x30b
-#define X86_PMC_IDX_FIXED_BUS_CYCLES (X86_PMC_IDX_FIXED + 2)
+#define MSR_ARCH_PERFMON_FIXED_CTR2 0x30b
+#define X86_PMC_IDX_FIXED_REF_CYCLES (X86_PMC_IDX_FIXED + 2)
+#define X86_PMC_MSK_FIXED_REF_CYCLES (1ULL << X86_PMC_IDX_FIXED_REF_CYCLES)
/*
* We model BTS tracing as another fixed-mode PMC.
@@ -202,6 +226,7 @@ struct perf_guest_switch_msr {
};
extern struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr);
+extern void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap);
#else
static inline perf_guest_switch_msr *perf_guest_get_msrs(int *nr)
{
@@ -209,6 +234,11 @@ static inline perf_guest_switch_msr *perf_guest_get_msrs(int *nr)
return NULL;
}
+static inline void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap)
+{
+ memset(cap, 0, sizeof(*cap));
+}
+
static inline void perf_events_lapic_init(void) { }
#endif
diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c
index 3d2661ca6542..6e76c191a835 100644
--- a/arch/x86/kernel/aperture_64.c
+++ b/arch/x86/kernel/aperture_64.c
@@ -88,13 +88,13 @@ static u32 __init allocate_aperture(void)
*/
addr = memblock_find_in_range(GART_MIN_ADDR, GART_MAX_ADDR,
aper_size, aper_size);
- if (addr == MEMBLOCK_ERROR || addr + aper_size > GART_MAX_ADDR) {
+ if (!addr || addr + aper_size > GART_MAX_ADDR) {
printk(KERN_ERR
"Cannot allocate aperture memory hole (%lx,%uK)\n",
addr, aper_size>>10);
return 0;
}
- memblock_x86_reserve_range(addr, addr + aper_size, "aperture64");
+ memblock_reserve(addr, aper_size);
/*
* Kmemleak should not scan this block as it may not be mapped via the
* kernel direct mapping.
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index f98d84caf94c..2cd2d93643dc 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -876,8 +876,8 @@ void __irq_entry smp_apic_timer_interrupt(struct pt_regs *regs)
* Besides, if we don't timer interrupts ignore the global
* interrupt lock, which is the WrongThing (tm) to do.
*/
- exit_idle();
irq_enter();
+ exit_idle();
local_apic_timer_interrupt();
irq_exit();
@@ -1809,8 +1809,8 @@ void smp_spurious_interrupt(struct pt_regs *regs)
{
u32 v;
- exit_idle();
irq_enter();
+ exit_idle();
/*
* Check if this really is a spurious interrupt and ACK it
* if it is a vectored one. Just in case...
@@ -1846,8 +1846,8 @@ void smp_error_interrupt(struct pt_regs *regs)
"Illegal register address", /* APIC Error Bit 7 */
};
- exit_idle();
irq_enter();
+ exit_idle();
/* First tickle the hardware, only then report what went on. -- REW */
v0 = apic_read(APIC_ESR);
apic_write(APIC_ESR, 0);
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 6d939d7847e2..898055585516 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -2421,8 +2421,8 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void)
unsigned vector, me;
ack_APIC_irq();
- exit_idle();
irq_enter();
+ exit_idle();
me = smp_processor_id();
for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
diff --git a/arch/x86/kernel/check.c b/arch/x86/kernel/check.c
index 452932d34730..5da1269e8ddc 100644
--- a/arch/x86/kernel/check.c
+++ b/arch/x86/kernel/check.c
@@ -62,7 +62,8 @@ early_param("memory_corruption_check_size", set_corruption_check_size);
void __init setup_bios_corruption_check(void)
{
- u64 addr = PAGE_SIZE; /* assume first page is reserved anyway */
+ phys_addr_t start, end;
+ u64 i;
if (memory_corruption_check == -1) {
memory_corruption_check =
@@ -82,28 +83,23 @@ void __init setup_bios_corruption_check(void)
corruption_check_size = round_up(corruption_check_size, PAGE_SIZE);
- while (addr < corruption_check_size && num_scan_areas < MAX_SCAN_AREAS) {
- u64 size;
- addr = memblock_x86_find_in_range_size(addr, &size, PAGE_SIZE);
+ for_each_free_mem_range(i, MAX_NUMNODES, &start, &end, NULL) {
+ start = clamp_t(phys_addr_t, round_up(start, PAGE_SIZE),
+ PAGE_SIZE, corruption_check_size);
+ end = clamp_t(phys_addr_t, round_down(end, PAGE_SIZE),
+ PAGE_SIZE, corruption_check_size);
+ if (start >= end)
+ continue;
- if (addr == MEMBLOCK_ERROR)
- break;
-
- if (addr >= corruption_check_size)
- break;
-
- if ((addr + size) > corruption_check_size)
- size = corruption_check_size - addr;
-
- memblock_x86_reserve_range(addr, addr + size, "SCAN RAM");
- scan_areas[num_scan_areas].addr = addr;
- scan_areas[num_scan_areas].size = size;
- num_scan_areas++;
+ memblock_reserve(start, end - start);
+ scan_areas[num_scan_areas].addr = start;
+ scan_areas[num_scan_areas].size = end - start;
/* Assume we've already mapped this early memory */
- memset(__va(addr), 0, size);
+ memset(__va(start), 0, end - start);
- addr += size;
+ if (++num_scan_areas >= MAX_SCAN_AREAS)
+ break;
}
if (num_scan_areas)
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
index 787e06c84ea6..ce215616d5b9 100644
--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
+++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
@@ -397,8 +397,8 @@ static void (*smp_thermal_vector)(void) = unexpected_thermal_interrupt;
asmlinkage void smp_thermal_interrupt(struct pt_regs *regs)
{
- exit_idle();
irq_enter();
+ exit_idle();
inc_irq_stat(irq_thermal_count);
smp_thermal_vector();
irq_exit();
diff --git a/arch/x86/kernel/cpu/mcheck/threshold.c b/arch/x86/kernel/cpu/mcheck/threshold.c
index d746df2909c9..aa578cadb940 100644
--- a/arch/x86/kernel/cpu/mcheck/threshold.c
+++ b/arch/x86/kernel/cpu/mcheck/threshold.c
@@ -19,8 +19,8 @@ void (*mce_threshold_vector)(void) = default_threshold_interrupt;
asmlinkage void smp_threshold_interrupt(void)
{
- exit_idle();
irq_enter();
+ exit_idle();
inc_irq_stat(irq_threshold_count);
mce_threshold_vector();
irq_exit();
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 2bda212a0010..5adce1040b11 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -484,18 +484,195 @@ static inline int is_x86_event(struct perf_event *event)
return event->pmu == &pmu;
}
+/*
+ * Event scheduler state:
+ *
+ * Assign events iterating over all events and counters, beginning
+ * with events with least weights first. Keep the current iterator
+ * state in struct sched_state.
+ */
+struct sched_state {
+ int weight;
+ int event; /* event index */
+ int counter; /* counter index */
+ int unassigned; /* number of events to be assigned left */
+ unsigned long used[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
+};
+
+/* Total max is X86_PMC_IDX_MAX, but we are O(n!) limited */
+#define SCHED_STATES_MAX 2
+
+struct perf_sched {
+ int max_weight;
+ int max_events;
+ struct event_constraint **constraints;
+ struct sched_state state;
+ int saved_states;
+ struct sched_state saved[SCHED_STATES_MAX];
+};
+
+/*
+ * Initialize interator that runs through all events and counters.
+ */
+static void perf_sched_init(struct perf_sched *sched, struct event_constraint **c,
+ int num, int wmin, int wmax)
+{
+ int idx;
+
+ memset(sched, 0, sizeof(*sched));
+ sched->max_events = num;
+ sched->max_weight = wmax;
+ sched->constraints = c;
+
+ for (idx = 0; idx < num; idx++) {
+ if (c[idx]->weight == wmin)
+ break;
+ }
+
+ sched->state.event = idx; /* start with min weight */
+ sched->state.weight = wmin;
+ sched->state.unassigned = num;
+}
+
+static void perf_sched_save_state(struct perf_sched *sched)
+{
+ if (WARN_ON_ONCE(sched->saved_states >= SCHED_STATES_MAX))
+ return;
+
+ sched->saved[sched->saved_states] = sched->state;
+ sched->saved_states++;
+}
+
+static bool perf_sched_restore_state(struct perf_sched *sched)
+{
+ if (!sched->saved_states)
+ return false;
+
+ sched->saved_states--;
+ sched->state = sched->saved[sched->saved_states];
+
+ /* continue with next counter: */
+ clear_bit(sched->state.counter++, sched->state.used);
+
+ return true;
+}
+
+/*
+ * Select a counter for the current event to schedule. Return true on
+ * success.
+ */
+static bool __perf_sched_find_counter(struct perf_sched *sched)
+{
+ struct event_constraint *c;
+ int idx;
+
+ if (!sched->state.unassigned)
+ return false;
+
+ if (sched->state.event >= sched->max_events)
+ return false;
+
+ c = sched->constraints[sched->state.event];
+
+ /* Prefer fixed purpose counters */
+ if (x86_pmu.num_counters_fixed) {
+ idx = X86_PMC_IDX_FIXED;
+ for_each_set_bit_cont(idx, c->idxmsk, X86_PMC_IDX_MAX) {
+ if (!__test_and_set_bit(idx, sched->state.used))
+ goto done;
+ }
+ }
+ /* Grab the first unused counter starting with idx */
+ idx = sched->state.counter;
+ for_each_set_bit_cont(idx, c->idxmsk, X86_PMC_IDX_FIXED) {
+ if (!__test_and_set_bit(idx, sched->state.used))
+ goto done;
+ }
+
+ return false;
+
+done:
+ sched->state.counter = idx;
+
+ if (c->overlap)
+ perf_sched_save_state(sched);
+
+ return true;
+}
+
+static bool perf_sched_find_counter(struct perf_sched *sched)
+{
+ while (!__perf_sched_find_counter(sched)) {
+ if (!perf_sched_restore_state(sched))
+ return false;
+ }
+
+ return true;
+}
+
+/*
+ * Go through all unassigned events and find the next one to schedule.
+ * Take events with the least weight first. Return true on success.
+ */
+static bool perf_sched_next_event(struct perf_sched *sched)
+{
+ struct event_constraint *c;
+
+ if (!sched->state.unassigned || !--sched->state.unassigned)
+ return false;
+
+ do {
+ /* next event */
+ sched->state.event++;
+ if (sched->state.event >= sched->max_events) {
+ /* next weight */
+ sched->state.event = 0;
+ sched->state.weight++;
+ if (sched->state.weight > sched->max_weight)
+ return false;
+ }
+ c = sched->constraints[sched->state.event];
+ } while (c->weight != sched->state.weight);
+
+ sched->state.counter = 0; /* start with first counter */
+
+ return true;
+}
+
+/*
+ * Assign a counter for each event.
+ */
+static int perf_assign_events(struct event_constraint **constraints, int n,
+ int wmin, int wmax, int *assign)
+{
+ struct perf_sched sched;
+
+ perf_sched_init(&sched, constraints, n, wmin, wmax);
+
+ do {
+ if (!perf_sched_find_counter(&sched))
+ break; /* failed */
+ if (assign)
+ assign[sched.state.event] = sched.state.counter;
+ } while (perf_sched_next_event(&sched));
+
+ return sched.state.unassigned;
+}
+
int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
{
struct event_constraint *c, *constraints[X86_PMC_IDX_MAX];
unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
- int i, j, w, wmax, num = 0;
+ int i, wmin, wmax, num = 0;
struct hw_perf_event *hwc;
bitmap_zero(used_mask, X86_PMC_IDX_MAX);
- for (i = 0; i < n; i++) {
+ for (i = 0, wmin = X86_PMC_IDX_MAX, wmax = 0; i < n; i++) {
c = x86_pmu.get_event_constraints(cpuc, cpuc->event_list[i]);
constraints[i] = c;
+ wmin = min(wmin, c->weight);
+ wmax = max(wmax, c->weight);
}
/*
@@ -521,60 +698,12 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
if (assign)
assign[i] = hwc->idx;
}
- if (i == n)
- goto done;
-
- /*
- * begin slow path
- */
-
- bitmap_zero(used_mask, X86_PMC_IDX_MAX);
- /*
- * weight = number of possible counters
- *
- * 1 = most constrained, only works on one counter
- * wmax = least constrained, works on any counter
- *
- * assign events to counters starting with most
- * constrained events.
- */
- wmax = x86_pmu.num_counters;
+ /* slow path */
+ if (i != n)
+ num = perf_assign_events(constraints, n, wmin, wmax, assign);
/*
- * when fixed event counters are present,
- * wmax is incremented by 1 to account
- * for one more choice
- */
- if (x86_pmu.num_counters_fixed)
- wmax++;
-
- for (w = 1, num = n; num && w <= wmax; w++) {
- /* for each event */
- for (i = 0; num && i < n; i++) {
- c = constraints[i];
- hwc = &cpuc->event_list[i]->hw;
-
- if (c->weight != w)
- continue;
-
- for_each_set_bit(j, c->idxmsk, X86_PMC_IDX_MAX) {
- if (!test_bit(j, used_mask))
- break;
- }
-
- if (j == X86_PMC_IDX_MAX)
- break;
-
- __set_bit(j, used_mask);
-
- if (assign)
- assign[i] = j;
- num--;
- }
- }
-done:
- /*
* scheduling failed or is just a simulation,
* free resources if necessary
*/
@@ -1119,6 +1248,7 @@ static void __init pmu_check_apic(void)
static int __init init_hw_perf_events(void)
{
+ struct x86_pmu_quirk *quirk;
struct event_constraint *c;
int err;
@@ -1147,8 +1277,8 @@ static int __init init_hw_perf_events(void)
pr_cont("%s PMU driver.\n", x86_pmu.name);
- if (x86_pmu.quirks)
- x86_pmu.quirks();
+ for (quirk = x86_pmu.quirks; quirk; quirk = quirk->next)
+ quirk->func();
if (x86_pmu.num_counters > X86_PMC_MAX_GENERIC) {
WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
@@ -1171,12 +1301,18 @@ static int __init init_hw_perf_events(void)
unconstrained = (struct event_constraint)
__EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1,
- 0, x86_pmu.num_counters);
+ 0, x86_pmu.num_counters, 0);
if (x86_pmu.event_constraints) {
+ /*
+ * event on fixed counter2 (REF_CYCLES) only works on this
+ * counter, so do not extend mask to generic counters
+ */
for_each_event_constraint(c, x86_pmu.event_constraints) {
- if (c->cmask != X86_RAW_EVENT_MASK)
+ if (c->cmask != X86_RAW_EVENT_MASK
+ || c->idxmsk64 == X86_PMC_MSK_FIXED_REF_CYCLES) {
continue;
+ }
c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
c->weight += x86_pmu.num_counters;
@@ -1566,3 +1702,15 @@ unsigned long perf_misc_flags(struct pt_regs *regs)
return misc;
}
+
+void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap)
+{
+ cap->version = x86_pmu.version;
+ cap->num_counters_gp = x86_pmu.num_counters;
+ cap->num_counters_fixed = x86_pmu.num_counters_fixed;
+ cap->bit_width_gp = x86_pmu.cntval_bits;
+ cap->bit_width_fixed = x86_pmu.cntval_bits;
+ cap->events_mask = (unsigned int)x86_pmu.events_maskl;
+ cap->events_mask_len = x86_pmu.events_mask_len;
+}
+EXPORT_SYMBOL_GPL(perf_get_x86_pmu_capability);
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
index b9698d40ac4b..8944062f46e2 100644
--- a/arch/x86/kernel/cpu/perf_event.h
+++ b/arch/x86/kernel/cpu/perf_event.h
@@ -45,6 +45,7 @@ struct event_constraint {
u64 code;
u64 cmask;
int weight;
+ int overlap;
};
struct amd_nb {
@@ -151,15 +152,40 @@ struct cpu_hw_events {
void *kfree_on_online;
};
-#define __EVENT_CONSTRAINT(c, n, m, w) {\
+#define __EVENT_CONSTRAINT(c, n, m, w, o) {\
{ .idxmsk64 = (n) }, \
.code = (c), \
.cmask = (m), \
.weight = (w), \
+ .overlap = (o), \
}
#define EVENT_CONSTRAINT(c, n, m) \
- __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n))
+ __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 0)
+
+/*
+ * The overlap flag marks event constraints with overlapping counter
+ * masks. This is the case if the counter mask of such an event is not
+ * a subset of any other counter mask of a constraint with an equal or
+ * higher weight, e.g.:
+ *
+ * c_overlaps = EVENT_CONSTRAINT_OVERLAP(0, 0x09, 0);
+ * c_another1 = EVENT_CONSTRAINT(0, 0x07, 0);
+ * c_another2 = EVENT_CONSTRAINT(0, 0x38, 0);
+ *
+ * The event scheduler may not select the correct counter in the first
+ * cycle because it needs to know which subsequent events will be
+ * scheduled. It may fail to schedule the events then. So we set the
+ * overlap flag for such constraints to give the scheduler a hint which
+ * events to select for counter rescheduling.
+ *
+ * Care must be taken as the rescheduling algorithm is O(n!) which
+ * will increase scheduling cycles for an over-commited system
+ * dramatically. The number of such EVENT_CONSTRAINT_OVERLAP() macros
+ * and its counter masks must be kept at a minimum.
+ */
+#define EVENT_CONSTRAINT_OVERLAP(c, n, m) \
+ __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 1)
/*
* Constraint on the Event code.
@@ -235,6 +261,11 @@ union perf_capabilities {
u64 capabilities;
};
+struct x86_pmu_quirk {
+ struct x86_pmu_quirk *next;
+ void (*func)(void);
+};
+
/*
* struct x86_pmu - generic x86 pmu
*/
@@ -259,6 +290,11 @@ struct x86_pmu {
int num_counters_fixed;
int cntval_bits;
u64 cntval_mask;
+ union {
+ unsigned long events_maskl;
+ unsigned long events_mask[BITS_TO_LONGS(ARCH_PERFMON_EVENTS_COUNT)];
+ };
+ int events_mask_len;
int apic;
u64 max_period;
struct event_constraint *
@@ -268,7 +304,7 @@ struct x86_pmu {
void (*put_event_constraints)(struct cpu_hw_events *cpuc,
struct perf_event *event);
struct event_constraint *event_constraints;
- void (*quirks)(void);
+ struct x86_pmu_quirk *quirks;
int perfctr_second_write;
int (*cpu_prepare)(int cpu);
@@ -309,6 +345,15 @@ struct x86_pmu {
struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr);
};
+#define x86_add_quirk(func_) \
+do { \
+ static struct x86_pmu_quirk __quirk __initdata = { \
+ .func = func_, \
+ }; \
+ __quirk.next = x86_pmu.quirks; \
+ x86_pmu.quirks = &__quirk; \
+} while (0)
+
#define ERF_NO_HT_SHARING 1
#define ERF_HAS_RSP_1 2
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c
index aeefd45697a2..0397b23be8e9 100644
--- a/arch/x86/kernel/cpu/perf_event_amd.c
+++ b/arch/x86/kernel/cpu/perf_event_amd.c
@@ -492,7 +492,7 @@ static __initconst const struct x86_pmu amd_pmu = {
static struct event_constraint amd_f15_PMC0 = EVENT_CONSTRAINT(0, 0x01, 0);
static struct event_constraint amd_f15_PMC20 = EVENT_CONSTRAINT(0, 0x07, 0);
static struct event_constraint amd_f15_PMC3 = EVENT_CONSTRAINT(0, 0x08, 0);
-static struct event_constraint amd_f15_PMC30 = EVENT_CONSTRAINT(0, 0x09, 0);
+static struct event_constraint amd_f15_PMC30 = EVENT_CONSTRAINT_OVERLAP(0, 0x09, 0);
static struct event_constraint amd_f15_PMC50 = EVENT_CONSTRAINT(0, 0x3F, 0);
static struct event_constraint amd_f15_PMC53 = EVENT_CONSTRAINT(0, 0x38, 0);
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 8d601b18bf9f..3bd37bdf1b8e 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -28,6 +28,7 @@ static u64 intel_perfmon_event_map[PERF_COUNT_HW_MAX] __read_mostly =
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
[PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
[PERF_COUNT_HW_BUS_CYCLES] = 0x013c,
+ [PERF_COUNT_HW_REF_CPU_CYCLES] = 0x0300, /* pseudo-encoding */
};
static struct event_constraint intel_core_event_constraints[] __read_mostly =
@@ -45,12 +46,7 @@ static struct event_constraint intel_core2_event_constraints[] __read_mostly =
{
FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
- /*
- * Core2 has Fixed Counter 2 listed as CPU_CLK_UNHALTED.REF and event
- * 0x013c as CPU_CLK_UNHALTED.BUS and specifies there is a fixed
- * ratio between these counters.
- */
- /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
+ FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
@@ -68,7 +64,7 @@ static struct event_constraint intel_nehalem_event_constraints[] __read_mostly =
{
FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
- /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
+ FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
@@ -90,7 +86,7 @@ static struct event_constraint intel_westmere_event_constraints[] __read_mostly
{
FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
- /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
+ FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */
INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
@@ -102,7 +98,7 @@ static struct event_constraint intel_snb_event_constraints[] __read_mostly =
{
FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
- /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
+ FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */
INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
@@ -125,7 +121,7 @@ static struct event_constraint intel_gen_event_constraints[] __read_mostly =
{
FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
- /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
+ FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
EVENT_CONSTRAINT_END
};
@@ -1169,7 +1165,7 @@ again:
*/
c = &unconstrained;
} else if (intel_try_alt_er(event, orig_idx)) {
- raw_spin_unlock(&era->lock);
+ raw_spin_unlock_irqrestore(&era->lock, flags);
goto again;
}
raw_spin_unlock_irqrestore(&era->lock, flags);
@@ -1519,7 +1515,7 @@ static __initconst const struct x86_pmu intel_pmu = {
.guest_get_msrs = intel_guest_get_msrs,
};
-static void intel_clovertown_quirks(void)
+static __init void intel_clovertown_quirk(void)
{
/*
* PEBS is unreliable due to:
@@ -1545,19 +1541,60 @@ static void intel_clovertown_quirks(void)
x86_pmu.pebs_constraints = NULL;
}
-static void intel_sandybridge_quirks(void)
+static __init void intel_sandybridge_quirk(void)
{
printk(KERN_WARNING "PEBS disabled due to CPU errata.\n");
x86_pmu.pebs = 0;
x86_pmu.pebs_constraints = NULL;
}
+static const struct { int id; char *name; } intel_arch_events_map[] __initconst = {
+ { PERF_COUNT_HW_CPU_CYCLES, "cpu cycles" },
+ { PERF_COUNT_HW_INSTRUCTIONS, "instructions" },
+ { PERF_COUNT_HW_BUS_CYCLES, "bus cycles" },
+ { PERF_COUNT_HW_CACHE_REFERENCES, "cache references" },
+ { PERF_COUNT_HW_CACHE_MISSES, "cache misses" },
+ { PERF_COUNT_HW_BRANCH_INSTRUCTIONS, "branch instructions" },
+ { PERF_COUNT_HW_BRANCH_MISSES, "branch misses" },
+};
+
+static __init void intel_arch_events_quirk(void)
+{
+ int bit;
+
+ /* disable event that reported as not presend by cpuid */
+ for_each_set_bit(bit, x86_pmu.events_mask, ARRAY_SIZE(intel_arch_events_map)) {
+ intel_perfmon_event_map[intel_arch_events_map[bit].id] = 0;
+ printk(KERN_WARNING "CPUID marked event: \'%s\' unavailable\n",
+ intel_arch_events_map[bit].name);
+ }
+}
+
+static __init void intel_nehalem_quirk(void)
+{
+ union cpuid10_ebx ebx;
+
+ ebx.full = x86_pmu.events_maskl;
+ if (ebx.split.no_branch_misses_retired) {
+ /*
+ * Erratum AAJ80 detected, we work it around by using
+ * the BR_MISP_EXEC.ANY event. This will over-count
+ * branch-misses, but it's still much better than the
+ * architectural event which is often completely bogus:
+ */
+ intel_perfmon_event_map[PERF_COUNT_HW_BRANCH_MISSES] = 0x7f89;
+ ebx.split.no_branch_misses_retired = 0;
+ x86_pmu.events_maskl = ebx.full;
+ printk(KERN_INFO "CPU erratum AAJ80 worked around\n");
+ }
+}
+
__init int intel_pmu_init(void)
{
union cpuid10_edx edx;
union cpuid10_eax eax;
+ union cpuid10_ebx ebx;
unsigned int unused;
- unsigned int ebx;
int version;
if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
@@ -1574,8 +1611,8 @@ __init int intel_pmu_init(void)
* Check whether the Architectural PerfMon supports
* Branch Misses Retired hw_event or not.
*/
- cpuid(10, &eax.full, &ebx, &unused, &edx.full);
- if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED)
+ cpuid(10, &eax.full, &ebx.full, &unused, &edx.full);
+ if (eax.split.mask_length < ARCH_PERFMON_EVENTS_COUNT)
return -ENODEV;
version = eax.split.version_id;
@@ -1589,6 +1626,9 @@ __init int intel_pmu_init(void)
x86_pmu.cntval_bits = eax.split.bit_width;
x86_pmu.cntval_mask = (1ULL << eax.split.bit_width) - 1;
+ x86_pmu.events_maskl = ebx.full;
+ x86_pmu.events_mask_len = eax.split.mask_length;
+
/*
* Quirk: v2 perfmon does not report fixed-purpose events, so
* assume at least 3 events:
@@ -1608,6 +1648,8 @@ __init int intel_pmu_init(void)
intel_ds_init();
+ x86_add_quirk(intel_arch_events_quirk); /* Install first, so it runs last */
+
/*
* Install the hw-cache-events table:
*/
@@ -1617,7 +1659,7 @@ __init int intel_pmu_init(void)
break;
case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
- x86_pmu.quirks = intel_clovertown_quirks;
+ x86_add_quirk(intel_clovertown_quirk);
case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
case 29: /* six-core 45 nm xeon "Dunnington" */
@@ -1651,17 +1693,8 @@ __init int intel_pmu_init(void)
/* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x1803fb1;
- if (ebx & 0x40) {
- /*
- * Erratum AAJ80 detected, we work it around by using
- * the BR_MISP_EXEC.ANY event. This will over-count
- * branch-misses, but it's still much better than the
- * architectural event which is often completely bogus:
- */
- intel_perfmon_event_map[PERF_COUNT_HW_BRANCH_MISSES] = 0x7f89;
+ x86_add_quirk(intel_nehalem_quirk);
- pr_cont("erratum AAJ80 worked around, ");
- }
pr_cont("Nehalem events, ");
break;
@@ -1701,7 +1734,7 @@ __init int intel_pmu_init(void)
break;
case 42: /* SandyBridge */
- x86_pmu.quirks = intel_sandybridge_quirks;
+ x86_add_quirk(intel_sandybridge_quirk);
case 45: /* SandyBridge, "Romely-EP" */
memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
@@ -1738,5 +1771,6 @@ __init int intel_pmu_init(void)
break;
}
}
+
return 0;
}
diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
index 3b97a80ce329..c99f9ed013d5 100644
--- a/arch/x86/kernel/dumpstack_32.c
+++ b/arch/x86/kernel/dumpstack_32.c
@@ -116,16 +116,16 @@ void show_registers(struct pt_regs *regs)
for (i = 0; i < code_len; i++, ip++) {
if (ip < (u8 *)PAGE_OFFSET ||
probe_kernel_address(ip, c)) {
- printk(" Bad EIP value.");
+ printk(KERN_CONT " Bad EIP value.");
break;
}
if (ip == (u8 *)regs->ip)
- printk("<%02x> ", c);
+ printk(KERN_CONT "<%02x> ", c);
else
- printk("%02x ", c);
+ printk(KERN_CONT "%02x ", c);
}
}
- printk("\n");
+ printk(KERN_CONT "\n");
}
int is_valid_bugaddr(unsigned long ip)
diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
index 19853ad8afc5..6d728d9284bd 100644
--- a/arch/x86/kernel/dumpstack_64.c
+++ b/arch/x86/kernel/dumpstack_64.c
@@ -284,16 +284,16 @@ void show_registers(struct pt_regs *regs)
for (i = 0; i < code_len; i++, ip++) {
if (ip < (u8 *)PAGE_OFFSET ||
probe_kernel_address(ip, c)) {
- printk(" Bad RIP value.");
+ printk(KERN_CONT " Bad RIP value.");
break;
}
if (ip == (u8 *)regs->ip)
- printk("<%02x> ", c);
+ printk(KERN_CONT "<%02x> ", c);
else
- printk("%02x ", c);
+ printk(KERN_CONT "%02x ", c);
}
}
- printk("\n");
+ printk(KERN_CONT "\n");
}
int is_valid_bugaddr(unsigned long ip)
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index 303a0e48f076..8071e2f3d6eb 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -738,35 +738,17 @@ core_initcall(e820_mark_nvs_memory);
/*
* pre allocated 4k and reserved it in memblock and e820_saved
*/
-u64 __init early_reserve_e820(u64 startt, u64 sizet, u64 align)
+u64 __init early_reserve_e820(u64 size, u64 align)
{
- u64 size = 0;
u64 addr;
- u64 start;
- for (start = startt; ; start += size) {
- start = memblock_x86_find_in_range_size(start, &size, align);
- if (start == MEMBLOCK_ERROR)
- return 0;
- if (size >= sizet)
- break;
+ addr = __memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
+ if (addr) {
+ e820_update_range_saved(addr, size, E820_RAM, E820_RESERVED);
+ printk(KERN_INFO "update e820_saved for early_reserve_e820\n");
+ update_e820_saved();
}
-#ifdef CONFIG_X86_32
- if (start >= MAXMEM)
- return 0;
- if (start + size > MAXMEM)
- size = MAXMEM - start;
-#endif
-
- addr = round_down(start + size - sizet, align);
- if (addr < start)
- return 0;
- memblock_x86_reserve_range(addr, addr + sizet, "new next");
- e820_update_range_saved(addr, sizet, E820_RAM, E820_RESERVED);
- printk(KERN_INFO "update e820_saved for early_reserve_e820\n");
- update_e820_saved();
-
return addr;
}
@@ -1090,7 +1072,7 @@ void __init memblock_x86_fill(void)
* We are safe to enable resizing, beause memblock_x86_fill()
* is rather later for x86
*/
- memblock_can_resize = 1;
+ memblock_allow_resize();
for (i = 0; i < e820.nr_map; i++) {
struct e820entry *ei = &e820.map[i];
@@ -1105,22 +1087,36 @@ void __init memblock_x86_fill(void)
memblock_add(ei->addr, ei->size);
}
- memblock_analyze();
memblock_dump_all();
}
void __init memblock_find_dma_reserve(void)
{
#ifdef CONFIG_X86_64
- u64 free_size_pfn;
- u64 mem_size_pfn;
+ u64 nr_pages = 0, nr_free_pages = 0;
+ unsigned long start_pfn, end_pfn;
+ phys_addr_t start, end;
+ int i;
+ u64 u;
+
/*
* need to find out used area below MAX_DMA_PFN
* need to use memblock to get free size in [0, MAX_DMA_PFN]
* at first, and assume boot_mem will not take below MAX_DMA_PFN
*/
- mem_size_pfn = memblock_x86_memory_in_range(0, MAX_DMA_PFN << PAGE_SHIFT) >> PAGE_SHIFT;
- free_size_pfn = memblock_x86_free_memory_in_range(0, MAX_DMA_PFN << PAGE_SHIFT) >> PAGE_SHIFT;
- set_dma_reserve(mem_size_pfn - free_size_pfn);
+ for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) {
+ start_pfn = min_t(unsigned long, start_pfn, MAX_DMA_PFN);
+ end_pfn = min_t(unsigned long, end_pfn, MAX_DMA_PFN);
+ nr_pages += end_pfn - start_pfn;
+ }
+
+ for_each_free_mem_range(u, MAX_NUMNODES, &start, &end, NULL) {
+ start_pfn = min_t(unsigned long, PFN_UP(start), MAX_DMA_PFN);
+ end_pfn = min_t(unsigned long, PFN_DOWN(end), MAX_DMA_PFN);
+ if (start_pfn < end_pfn)
+ nr_free_pages += end_pfn - start_pfn;
+ }
+
+ set_dma_reserve(nr_pages - nr_free_pages);
#endif
}
diff --git a/arch/x86/kernel/head.c b/arch/x86/kernel/head.c
index af0699ba48cf..48d9d4ea1020 100644
--- a/arch/x86/kernel/head.c
+++ b/arch/x86/kernel/head.c
@@ -52,5 +52,5 @@ void __init reserve_ebda_region(void)
lowmem = 0x9f000;
/* reserve all memory between lowmem and the 1MB mark */
- memblock_x86_reserve_range(lowmem, 0x100000, "* BIOS reserved");
+ memblock_reserve(lowmem, 0x100000 - lowmem);
}
diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
index 3bb08509a7a1..51ff18616d50 100644
--- a/arch/x86/kernel/head32.c
+++ b/arch/x86/kernel/head32.c
@@ -31,9 +31,8 @@ static void __init i386_default_early_setup(void)
void __init i386_start_kernel(void)
{
- memblock_init();
-
- memblock_x86_reserve_range(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
+ memblock_reserve(__pa_symbol(&_text),
+ __pa_symbol(&__bss_stop) - __pa_symbol(&_text));
#ifdef CONFIG_BLK_DEV_INITRD
/* Reserve INITRD */
@@ -42,7 +41,7 @@ void __init i386_start_kernel(void)
u64 ramdisk_image = boot_params.hdr.ramdisk_image;
u64 ramdisk_size = boot_params.hdr.ramdisk_size;
u64 ramdisk_end = PAGE_ALIGN(ramdisk_image + ramdisk_size);
- memblock_x86_reserve_range(ramdisk_image, ramdisk_end, "RAMDISK");
+ memblock_reserve(ramdisk_image, ramdisk_end - ramdisk_image);
}
#endif
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 5655c2272adb..3a3b779f41d3 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -98,9 +98,8 @@ void __init x86_64_start_reservations(char *real_mode_data)
{
copy_bootdata(__va(real_mode_data));
- memblock_init();
-
- memblock_x86_reserve_range(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
+ memblock_reserve(__pa_symbol(&_text),
+ __pa_symbol(&__bss_stop) - __pa_symbol(&_text));
#ifdef CONFIG_BLK_DEV_INITRD
/* Reserve INITRD */
@@ -109,7 +108,7 @@ void __init x86_64_start_reservations(char *real_mode_data)
unsigned long ramdisk_image = boot_params.hdr.ramdisk_image;
unsigned long ramdisk_size = boot_params.hdr.ramdisk_size;
unsigned long ramdisk_end = PAGE_ALIGN(ramdisk_image + ramdisk_size);
- memblock_x86_reserve_range(ramdisk_image, ramdisk_end, "RAMDISK");
+ memblock_reserve(ramdisk_image, ramdisk_end - ramdisk_image);
}
#endif
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index 429e0c92924e..5d31e5bdbf85 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -181,8 +181,8 @@ unsigned int __irq_entry do_IRQ(struct pt_regs *regs)
unsigned vector = ~regs->orig_ax;
unsigned irq;
- exit_idle();
irq_enter();
+ exit_idle();
irq = __this_cpu_read(vector_irq[vector]);
@@ -209,10 +209,10 @@ void smp_x86_platform_ipi(struct pt_regs *regs)
ack_APIC_irq();
- exit_idle();
-
irq_enter();
+ exit_idle();
+
inc_irq_stat(x86_platform_ipis);
if (x86_platform_ipi_callback)
diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c
index ea9d5f2f13ef..2889b3d43882 100644
--- a/arch/x86/kernel/jump_label.c
+++ b/arch/x86/kernel/jump_label.c
@@ -50,7 +50,7 @@ void arch_jump_label_transform(struct jump_entry *entry,
put_online_cpus();
}
-void arch_jump_label_transform_static(struct jump_entry *entry,
+__init_or_module void arch_jump_label_transform_static(struct jump_entry *entry,
enum jump_label_type type)
{
__jump_label_transform(entry, type, text_poke_early);
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
index 0741b062a304..ca470e4c92dc 100644
--- a/arch/x86/kernel/mpparse.c
+++ b/arch/x86/kernel/mpparse.c
@@ -564,9 +564,7 @@ void __init default_get_smp_config(unsigned int early)
static void __init smp_reserve_memory(struct mpf_intel *mpf)
{
- unsigned long size = get_mpc_size(mpf->physptr);
-
- memblock_x86_reserve_range(mpf->physptr, mpf->physptr+size, "* MP-table mpc");
+ memblock_reserve(mpf->physptr, get_mpc_size(mpf->physptr));
}
static int __init smp_scan_config(unsigned long base, unsigned long length)
@@ -595,7 +593,7 @@ static int __init smp_scan_config(unsigned long base, unsigned long length)
mpf, (u64)virt_to_phys(mpf));
mem = virt_to_phys(mpf);
- memblock_x86_reserve_range(mem, mem + sizeof(*mpf), "* MP-table mpf");
+ memblock_reserve(mem, sizeof(*mpf));
if (mpf->physptr)
smp_reserve_memory(mpf);
@@ -836,10 +834,8 @@ early_param("alloc_mptable", parse_alloc_mptable_opt);
void __init early_reserve_e820_mpc_new(void)
{
- if (enable_update_mptable && alloc_mptable) {
- u64 startt = 0;
- mpc_new_phys = early_reserve_e820(startt, mpc_new_length, 4);
- }
+ if (enable_update_mptable && alloc_mptable)
+ mpc_new_phys = early_reserve_e820(mpc_new_length, 4);
}
static int __init update_mp_table(void)
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 795b79f984c2..485204f58cda 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -99,7 +99,8 @@ void cpu_idle(void)
/* endless idle loop with no priority at all */
while (1) {
- tick_nohz_stop_sched_tick(1);
+ tick_nohz_idle_enter();
+ rcu_idle_enter();
while (!need_resched()) {
check_pgt_cache();
@@ -116,7 +117,8 @@ void cpu_idle(void)
pm_idle();
start_critical_timings();
}
- tick_nohz_restart_sched_tick();
+ rcu_idle_exit();
+ tick_nohz_idle_exit();
preempt_enable_no_resched();
schedule();
preempt_disable();
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 3bd7e6eebf31..64e926c89a6f 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -122,7 +122,7 @@ void cpu_idle(void)
/* endless idle loop with no priority at all */
while (1) {
- tick_nohz_stop_sched_tick(1);
+ tick_nohz_idle_enter();
while (!need_resched()) {
rmb();
@@ -139,8 +139,14 @@ void cpu_idle(void)
enter_idle();
/* Don't trace irqs off for idle */
stop_critical_timings();
+
+ /* enter_idle() needs rcu for notifiers */
+ rcu_idle_enter();
+
if (cpuidle_idle_call())
pm_idle();
+
+ rcu_idle_exit();
start_critical_timings();
/* In many cases the interrupt that ended idle
@@ -149,7 +155,7 @@ void cpu_idle(void)
__exit_idle();
}
- tick_nohz_restart_sched_tick();
+ tick_nohz_idle_exit();
preempt_enable_no_resched();
schedule();
preempt_disable();
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index cf0ef986cb6d..d05444ac2aea 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -306,7 +306,8 @@ static void __init cleanup_highmap(void)
static void __init reserve_brk(void)
{
if (_brk_end > _brk_start)
- memblock_x86_reserve_range(__pa(_brk_start), __pa(_brk_end), "BRK");
+ memblock_reserve(__pa(_brk_start),
+ __pa(_brk_end) - __pa(_brk_start));
/* Mark brk area as locked down and no longer taking any
new allocations */
@@ -331,13 +332,13 @@ static void __init relocate_initrd(void)
ramdisk_here = memblock_find_in_range(0, end_of_lowmem, area_size,
PAGE_SIZE);
- if (ramdisk_here == MEMBLOCK_ERROR)
+ if (!ramdisk_here)
panic("Cannot find place for new RAMDISK of size %lld\n",
ramdisk_size);
/* Note: this includes all the lowmem currently occupied by
the initrd, we rely on that fact to keep the data intact. */
- memblock_x86_reserve_range(ramdisk_here, ramdisk_here + area_size, "NEW RAMDISK");
+ memblock_reserve(ramdisk_here, area_size);
initrd_start = ramdisk_here + PAGE_OFFSET;
initrd_end = initrd_start + ramdisk_size;
printk(KERN_INFO "Allocated new RAMDISK: %08llx - %08llx\n",
@@ -393,7 +394,7 @@ static void __init reserve_initrd(void)
initrd_start = 0;
if (ramdisk_size >= (end_of_lowmem>>1)) {
- memblock_x86_free_range(ramdisk_image, ramdisk_end);
+ memblock_free(ramdisk_image, ramdisk_end - ramdisk_image);
printk(KERN_ERR "initrd too large to handle, "
"disabling initrd\n");
return;
@@ -416,7 +417,7 @@ static void __init reserve_initrd(void)
relocate_initrd();
- memblock_x86_free_range(ramdisk_image, ramdisk_end);
+ memblock_free(ramdisk_image, ramdisk_end - ramdisk_image);
}
#else
static void __init reserve_initrd(void)
@@ -490,15 +491,13 @@ static void __init memblock_x86_reserve_range_setup_data(void)
{
struct setup_data *data;
u64 pa_data;
- char buf[32];
if (boot_params.hdr.version < 0x0209)
return;
pa_data = boot_params.hdr.setup_data;
while (pa_data) {
data = early_memremap(pa_data, sizeof(*data));
- sprintf(buf, "setup data %x", data->type);
- memblock_x86_reserve_range(pa_data, pa_data+sizeof(*data)+data->len, buf);
+ memblock_reserve(pa_data, sizeof(*data) + data->len);
pa_data = data->next;
early_iounmap(data, sizeof(*data));
}
@@ -554,7 +553,7 @@ static void __init reserve_crashkernel(void)
crash_base = memblock_find_in_range(alignment,
CRASH_KERNEL_ADDR_MAX, crash_size, alignment);
- if (crash_base == MEMBLOCK_ERROR) {
+ if (!crash_base) {
pr_info("crashkernel reservation failed - No suitable area found.\n");
return;
}
@@ -568,7 +567,7 @@ static void __init reserve_crashkernel(void)
return;
}
}
- memblock_x86_reserve_range(crash_base, crash_base + crash_size, "CRASH KERNEL");
+ memblock_reserve(crash_base, crash_size);
printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
"for crashkernel (System RAM: %ldMB)\n",
@@ -626,7 +625,7 @@ static __init void reserve_ibft_region(void)
addr = find_ibft_region(&size);
if (size)
- memblock_x86_reserve_range(addr, addr + size, "* ibft");
+ memblock_reserve(addr, size);
}
static unsigned reserve_low = CONFIG_X86_RESERVE_LOW << 10;
diff --git a/arch/x86/kernel/trampoline.c b/arch/x86/kernel/trampoline.c
index a91ae7709b49..a73b61055ad6 100644
--- a/arch/x86/kernel/trampoline.c
+++ b/arch/x86/kernel/trampoline.c
@@ -14,11 +14,11 @@ void __init setup_trampolines(void)
/* Has to be in very low memory so we can execute real-mode AP code. */
mem = memblock_find_in_range(0, 1<<20, size, PAGE_SIZE);
- if (mem == MEMBLOCK_ERROR)
+ if (!mem)
panic("Cannot allocate trampoline\n");
x86_trampoline_base = __va(mem);
- memblock_x86_reserve_range(mem, mem + size, "TRAMPOLINE");
+ memblock_reserve(mem, size);
printk(KERN_DEBUG "Base memory trampoline at [%p] %llx size %zu\n",
x86_trampoline_base, (unsigned long long)mem, size);
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
index 76e3f1cd0369..405f2620392f 100644
--- a/arch/x86/kvm/i8254.c
+++ b/arch/x86/kvm/i8254.c
@@ -338,11 +338,15 @@ static enum hrtimer_restart pit_timer_fn(struct hrtimer *data)
return HRTIMER_NORESTART;
}
-static void create_pit_timer(struct kvm_kpit_state *ps, u32 val, int is_period)
+static void create_pit_timer(struct kvm *kvm, u32 val, int is_period)
{
+ struct kvm_kpit_state *ps = &kvm->arch.vpit->pit_state;
struct kvm_timer *pt = &ps->pit_timer;
s64 interval;
+ if (!irqchip_in_kernel(kvm))
+ return;
+
interval = muldiv64(val, NSEC_PER_SEC, KVM_PIT_FREQ);
pr_debug("create pit timer, interval is %llu nsec\n", interval);
@@ -394,13 +398,13 @@ static void pit_load_count(struct kvm *kvm, int channel, u32 val)
/* FIXME: enhance mode 4 precision */
case 4:
if (!(ps->flags & KVM_PIT_FLAGS_HPET_LEGACY)) {
- create_pit_timer(ps, val, 0);
+ create_pit_timer(kvm, val, 0);
}
break;
case 2:
case 3:
if (!(ps->flags & KVM_PIT_FLAGS_HPET_LEGACY)){
- create_pit_timer(ps, val, 1);
+ create_pit_timer(kvm, val, 1);
}
break;
default:
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index c38efd7b792e..4c938da2ba00 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -602,7 +602,6 @@ static void update_cpuid(struct kvm_vcpu *vcpu)
{
struct kvm_cpuid_entry2 *best;
struct kvm_lapic *apic = vcpu->arch.apic;
- u32 timer_mode_mask;
best = kvm_find_cpuid_entry(vcpu, 1, 0);
if (!best)
@@ -615,15 +614,12 @@ static void update_cpuid(struct kvm_vcpu *vcpu)
best->ecx |= bit(X86_FEATURE_OSXSAVE);
}
- if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
- best->function == 0x1) {
- best->ecx |= bit(X86_FEATURE_TSC_DEADLINE_TIMER);
- timer_mode_mask = 3 << 17;
- } else
- timer_mode_mask = 1 << 17;
-
- if (apic)
- apic->lapic_timer.timer_mode_mask = timer_mode_mask;
+ if (apic) {
+ if (best->ecx & bit(X86_FEATURE_TSC_DEADLINE_TIMER))
+ apic->lapic_timer.timer_mode_mask = 3 << 17;
+ else
+ apic->lapic_timer.timer_mode_mask = 1 << 17;
+ }
}
int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
@@ -2135,6 +2131,9 @@ int kvm_dev_ioctl_check_extension(long ext)
case KVM_CAP_TSC_CONTROL:
r = kvm_has_tsc_control;
break;
+ case KVM_CAP_TSC_DEADLINE_TIMER:
+ r = boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER);
+ break;
default:
r = 0;
break;
diff --git a/arch/x86/lib/inat.c b/arch/x86/lib/inat.c
index 46fc4ee09fc4..88ad5fbda6e1 100644
--- a/arch/x86/lib/inat.c
+++ b/arch/x86/lib/inat.c
@@ -82,9 +82,16 @@ insn_attr_t inat_get_avx_attribute(insn_byte_t opcode, insn_byte_t vex_m,
const insn_attr_t *table;
if (vex_m > X86_VEX_M_MAX || vex_p > INAT_LSTPFX_MAX)
return 0;
- table = inat_avx_tables[vex_m][vex_p];
+ /* At first, this checks the master table */
+ table = inat_avx_tables[vex_m][0];
if (!table)
return 0;
+ if (!inat_is_group(table[opcode]) && vex_p) {
+ /* If this is not a group, get attribute directly */
+ table = inat_avx_tables[vex_m][vex_p];
+ if (!table)
+ return 0;
+ }
return table[opcode];
}
diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
index 374562ed6704..5a1f9f3e3fbb 100644
--- a/arch/x86/lib/insn.c
+++ b/arch/x86/lib/insn.c
@@ -202,7 +202,7 @@ void insn_get_opcode(struct insn *insn)
m = insn_vex_m_bits(insn);
p = insn_vex_p_bits(insn);
insn->attr = inat_get_avx_attribute(op, m, p);
- if (!inat_accept_vex(insn->attr))
+ if (!inat_accept_vex(insn->attr) && !inat_is_group(insn->attr))
insn->attr = 0; /* This instruction is bad */
goto end; /* VEX has only 1 byte for opcode */
}
@@ -249,6 +249,8 @@ void insn_get_modrm(struct insn *insn)
pfx = insn_last_prefix(insn);
insn->attr = inat_get_group_attribute(mod, pfx,
insn->attr);
+ if (insn_is_avx(insn) && !inat_accept_vex(insn->attr))
+ insn->attr = 0; /* This is bad */
}
}
diff --git a/arch/x86/lib/x86-opcode-map.txt b/arch/x86/lib/x86-opcode-map.txt
index a793da5e560e..5b83c51c12e0 100644
--- a/arch/x86/lib/x86-opcode-map.txt
+++ b/arch/x86/lib/x86-opcode-map.txt
@@ -1,5 +1,11 @@
# x86 Opcode Maps
#
+# This is (mostly) based on following documentations.
+# - Intel(R) 64 and IA-32 Architectures Software Developer's Manual Vol.2
+# (#325383-040US, October 2011)
+# - Intel(R) Advanced Vector Extensions Programming Reference
+# (#319433-011,JUNE 2011).
+#
#<Opcode maps>
# Table: table-name
# Referrer: escaped-name
@@ -15,10 +21,13 @@
# EndTable
#
# AVX Superscripts
-# (VEX): this opcode can accept VEX prefix.
-# (oVEX): this opcode requires VEX prefix.
-# (o128): this opcode only supports 128bit VEX.
-# (o256): this opcode only supports 256bit VEX.
+# (v): this opcode requires VEX prefix.
+# (v1): this opcode only supports 128bit VEX.
+#
+# Last Prefix Superscripts
+# - (66): the last prefix is 0x66
+# - (F3): the last prefix is 0xF3
+# - (F2): the last prefix is 0xF2
#
Table: one byte opcode
@@ -199,8 +208,8 @@ a0: MOV AL,Ob
a1: MOV rAX,Ov
a2: MOV Ob,AL
a3: MOV Ov,rAX
-a4: MOVS/B Xb,Yb
-a5: MOVS/W/D/Q Xv,Yv
+a4: MOVS/B Yb,Xb
+a5: MOVS/W/D/Q Yv,Xv
a6: CMPS/B Xb,Yb
a7: CMPS/W/D Xv,Yv
a8: TEST AL,Ib
@@ -233,8 +242,8 @@ c0: Grp2 Eb,Ib (1A)
c1: Grp2 Ev,Ib (1A)
c2: RETN Iw (f64)
c3: RETN
-c4: LES Gz,Mp (i64) | 3bytes-VEX (Prefix)
-c5: LDS Gz,Mp (i64) | 2bytes-VEX (Prefix)
+c4: LES Gz,Mp (i64) | VEX+2byte (Prefix)
+c5: LDS Gz,Mp (i64) | VEX+1byte (Prefix)
c6: Grp11 Eb,Ib (1A)
c7: Grp11 Ev,Iz (1A)
c8: ENTER Iw,Ib
@@ -320,14 +329,19 @@ AVXcode: 1
# 3DNow! uses the last imm byte as opcode extension.
0f: 3DNow! Pq,Qq,Ib
# 0x0f 0x10-0x1f
-10: movups Vps,Wps (VEX) | movss Vss,Wss (F3),(VEX),(o128) | movupd Vpd,Wpd (66),(VEX) | movsd Vsd,Wsd (F2),(VEX),(o128)
-11: movups Wps,Vps (VEX) | movss Wss,Vss (F3),(VEX),(o128) | movupd Wpd,Vpd (66),(VEX) | movsd Wsd,Vsd (F2),(VEX),(o128)
-12: movlps Vq,Mq (VEX),(o128) | movlpd Vq,Mq (66),(VEX),(o128) | movhlps Vq,Uq (VEX),(o128) | movddup Vq,Wq (F2),(VEX) | movsldup Vq,Wq (F3),(VEX)
-13: mpvlps Mq,Vq (VEX),(o128) | movlpd Mq,Vq (66),(VEX),(o128)
-14: unpcklps Vps,Wq (VEX) | unpcklpd Vpd,Wq (66),(VEX)
-15: unpckhps Vps,Wq (VEX) | unpckhpd Vpd,Wq (66),(VEX)
-16: movhps Vq,Mq (VEX),(o128) | movhpd Vq,Mq (66),(VEX),(o128) | movlsps Vq,Uq (VEX),(o128) | movshdup Vq,Wq (F3),(VEX)
-17: movhps Mq,Vq (VEX),(o128) | movhpd Mq,Vq (66),(VEX),(o128)
+# NOTE: According to Intel SDM opcode map, vmovups and vmovupd has no operands
+# but it actually has operands. And also, vmovss and vmovsd only accept 128bit.
+# MOVSS/MOVSD has too many forms(3) on SDM. This map just shows a typical form.
+# Many AVX instructions lack v1 superscript, according to Intel AVX-Prgramming
+# Reference A.1
+10: vmovups Vps,Wps | vmovupd Vpd,Wpd (66) | vmovss Vx,Hx,Wss (F3),(v1) | vmovsd Vx,Hx,Wsd (F2),(v1)
+11: vmovups Wps,Vps | vmovupd Wpd,Vpd (66) | vmovss Wss,Hx,Vss (F3),(v1) | vmovsd Wsd,Hx,Vsd (F2),(v1)
+12: vmovlps Vq,Hq,Mq (v1) | vmovhlps Vq,Hq,Uq (v1) | vmovlpd Vq,Hq,Mq (66),(v1) | vmovsldup Vx,Wx (F3) | vmovddup Vx,Wx (F2)
+13: vmovlps Mq,Vq (v1) | vmovlpd Mq,Vq (66),(v1)
+14: vunpcklps Vx,Hx,Wx | vunpcklpd Vx,Hx,Wx (66)
+15: vunpckhps Vx,Hx,Wx | vunpckhpd Vx,Hx,Wx (66)
+16: vmovhps Vdq,Hq,Mq (v1) | vmovlhps Vdq,Hq,Uq (v1) | vmovhpd Vdq,Hq,Mq (66),(v1) | vmovshdup Vx,Wx (F3)
+17: vmovhps Mq,Vq (v1) | vmovhpd Mq,Vq (66),(v1)
18: Grp16 (1A)
19:
1a:
@@ -345,14 +359,14 @@ AVXcode: 1
25:
26:
27:
-28: movaps Vps,Wps (VEX) | movapd Vpd,Wpd (66),(VEX)
-29: movaps Wps,Vps (VEX) | movapd Wpd,Vpd (66),(VEX)
-2a: cvtpi2ps Vps,Qpi | cvtsi2ss Vss,Ed/q (F3),(VEX),(o128) | cvtpi2pd Vpd,Qpi (66) | cvtsi2sd Vsd,Ed/q (F2),(VEX),(o128)
-2b: movntps Mps,Vps (VEX) | movntpd Mpd,Vpd (66),(VEX)
-2c: cvttps2pi Ppi,Wps | cvttss2si Gd/q,Wss (F3),(VEX),(o128) | cvttpd2pi Ppi,Wpd (66) | cvttsd2si Gd/q,Wsd (F2),(VEX),(o128)
-2d: cvtps2pi Ppi,Wps | cvtss2si Gd/q,Wss (F3),(VEX),(o128) | cvtpd2pi Qpi,Wpd (66) | cvtsd2si Gd/q,Wsd (F2),(VEX),(o128)
-2e: ucomiss Vss,Wss (VEX),(o128) | ucomisd Vsd,Wsd (66),(VEX),(o128)
-2f: comiss Vss,Wss (VEX),(o128) | comisd Vsd,Wsd (66),(VEX),(o128)
+28: vmovaps Vps,Wps | vmovapd Vpd,Wpd (66)
+29: vmovaps Wps,Vps | vmovapd Wpd,Vpd (66)
+2a: cvtpi2ps Vps,Qpi | cvtpi2pd Vpd,Qpi (66) | vcvtsi2ss Vss,Hss,Ey (F3),(v1) | vcvtsi2sd Vsd,Hsd,Ey (F2),(v1)
+2b: vmovntps Mps,Vps | vmovntpd Mpd,Vpd (66)
+2c: cvttps2pi Ppi,Wps | cvttpd2pi Ppi,Wpd (66) | vcvttss2si Gy,Wss (F3),(v1) | vcvttsd2si Gy,Wsd (F2),(v1)
+2d: cvtps2pi Ppi,Wps | cvtpd2pi Qpi,Wpd (66) | vcvtss2si Gy,Wss (F3),(v1) | vcvtsd2si Gy,Wsd (F2),(v1)
+2e: vucomiss Vss,Wss (v1) | vucomisd Vsd,Wsd (66),(v1)
+2f: vcomiss Vss,Wss (v1) | vcomisd Vsd,Wsd (66),(v1)
# 0x0f 0x30-0x3f
30: WRMSR
31: RDTSC
@@ -388,65 +402,66 @@ AVXcode: 1
4e: CMOVLE/NG Gv,Ev
4f: CMOVNLE/G Gv,Ev
# 0x0f 0x50-0x5f
-50: movmskps Gd/q,Ups (VEX) | movmskpd Gd/q,Upd (66),(VEX)
-51: sqrtps Vps,Wps (VEX) | sqrtss Vss,Wss (F3),(VEX),(o128) | sqrtpd Vpd,Wpd (66),(VEX) | sqrtsd Vsd,Wsd (F2),(VEX),(o128)
-52: rsqrtps Vps,Wps (VEX) | rsqrtss Vss,Wss (F3),(VEX),(o128)
-53: rcpps Vps,Wps (VEX) | rcpss Vss,Wss (F3),(VEX),(o128)
-54: andps Vps,Wps (VEX) | andpd Vpd,Wpd (66),(VEX)
-55: andnps Vps,Wps (VEX) | andnpd Vpd,Wpd (66),(VEX)
-56: orps Vps,Wps (VEX) | orpd Vpd,Wpd (66),(VEX)
-57: xorps Vps,Wps (VEX) | xorpd Vpd,Wpd (66),(VEX)
-58: addps Vps,Wps (VEX) | addss Vss,Wss (F3),(VEX),(o128) | addpd Vpd,Wpd (66),(VEX) | addsd Vsd,Wsd (F2),(VEX),(o128)
-59: mulps Vps,Wps (VEX) | mulss Vss,Wss (F3),(VEX),(o128) | mulpd Vpd,Wpd (66),(VEX) | mulsd Vsd,Wsd (F2),(VEX),(o128)
-5a: cvtps2pd Vpd,Wps (VEX) | cvtss2sd Vsd,Wss (F3),(VEX),(o128) | cvtpd2ps Vps,Wpd (66),(VEX) | cvtsd2ss Vsd,Wsd (F2),(VEX),(o128)
-5b: cvtdq2ps Vps,Wdq (VEX) | cvtps2dq Vdq,Wps (66),(VEX) | cvttps2dq Vdq,Wps (F3),(VEX)
-5c: subps Vps,Wps (VEX) | subss Vss,Wss (F3),(VEX),(o128) | subpd Vpd,Wpd (66),(VEX) | subsd Vsd,Wsd (F2),(VEX),(o128)
-5d: minps Vps,Wps (VEX) | minss Vss,Wss (F3),(VEX),(o128) | minpd Vpd,Wpd (66),(VEX) | minsd Vsd,Wsd (F2),(VEX),(o128)
-5e: divps Vps,Wps (VEX) | divss Vss,Wss (F3),(VEX),(o128) | divpd Vpd,Wpd (66),(VEX) | divsd Vsd,Wsd (F2),(VEX),(o128)
-5f: maxps Vps,Wps (VEX) | maxss Vss,Wss (F3),(VEX),(o128) | maxpd Vpd,Wpd (66),(VEX) | maxsd Vsd,Wsd (F2),(VEX),(o128)
+50: vmovmskps Gy,Ups | vmovmskpd Gy,Upd (66)
+51: vsqrtps Vps,Wps | vsqrtpd Vpd,Wpd (66) | vsqrtss Vss,Hss,Wss (F3),(v1) | vsqrtsd Vsd,Hsd,Wsd (F2),(v1)
+52: vrsqrtps Vps,Wps | vrsqrtss Vss,Hss,Wss (F3),(v1)
+53: vrcpps Vps,Wps | vrcpss Vss,Hss,Wss (F3),(v1)
+54: vandps Vps,Hps,Wps | vandpd Vpd,Hpd,Wpd (66)
+55: vandnps Vps,Hps,Wps | vandnpd Vpd,Hpd,Wpd (66)
+56: vorps Vps,Hps,Wps | vorpd Vpd,Hpd,Wpd (66)
+57: vxorps Vps,Hps,Wps | vxorpd Vpd,Hpd,Wpd (66)
+58: vaddps Vps,Hps,Wps | vaddpd Vpd,Hpd,Wpd (66) | vaddss Vss,Hss,Wss (F3),(v1) | vaddsd Vsd,Hsd,Wsd (F2),(v1)
+59: vmulps Vps,Hps,Wps | vmulpd Vpd,Hpd,Wpd (66) | vmulss Vss,Hss,Wss (F3),(v1) | vmulsd Vsd,Hsd,Wsd (F2),(v1)
+5a: vcvtps2pd Vpd,Wps | vcvtpd2ps Vps,Wpd (66) | vcvtss2sd Vsd,Hx,Wss (F3),(v1) | vcvtsd2ss Vss,Hx,Wsd (F2),(v1)
+5b: vcvtdq2ps Vps,Wdq | vcvtps2dq Vdq,Wps (66) | vcvttps2dq Vdq,Wps (F3)
+5c: vsubps Vps,Hps,Wps | vsubpd Vpd,Hpd,Wpd (66) | vsubss Vss,Hss,Wss (F3),(v1) | vsubsd Vsd,Hsd,Wsd (F2),(v1)
+5d: vminps Vps,Hps,Wps | vminpd Vpd,Hpd,Wpd (66) | vminss Vss,Hss,Wss (F3),(v1) | vminsd Vsd,Hsd,Wsd (F2),(v1)
+5e: vdivps Vps,Hps,Wps | vdivpd Vpd,Hpd,Wpd (66) | vdivss Vss,Hss,Wss (F3),(v1) | vdivsd Vsd,Hsd,Wsd (F2),(v1)
+5f: vmaxps Vps,Hps,Wps | vmaxpd Vpd,Hpd,Wpd (66) | vmaxss Vss,Hss,Wss (F3),(v1) | vmaxsd Vsd,Hsd,Wsd (F2),(v1)
# 0x0f 0x60-0x6f
-60: punpcklbw Pq,Qd | punpcklbw Vdq,Wdq (66),(VEX),(o128)
-61: punpcklwd Pq,Qd | punpcklwd Vdq,Wdq (66),(VEX),(o128)
-62: punpckldq Pq,Qd | punpckldq Vdq,Wdq (66),(VEX),(o128)
-63: packsswb Pq,Qq | packsswb Vdq,Wdq (66),(VEX),(o128)
-64: pcmpgtb Pq,Qq | pcmpgtb Vdq,Wdq (66),(VEX),(o128)
-65: pcmpgtw Pq,Qq | pcmpgtw Vdq,Wdq (66),(VEX),(o128)
-66: pcmpgtd Pq,Qq | pcmpgtd Vdq,Wdq (66),(VEX),(o128)
-67: packuswb Pq,Qq | packuswb Vdq,Wdq (66),(VEX),(o128)
-68: punpckhbw Pq,Qd | punpckhbw Vdq,Wdq (66),(VEX),(o128)
-69: punpckhwd Pq,Qd | punpckhwd Vdq,Wdq (66),(VEX),(o128)
-6a: punpckhdq Pq,Qd | punpckhdq Vdq,Wdq (66),(VEX),(o128)
-6b: packssdw Pq,Qd | packssdw Vdq,Wdq (66),(VEX),(o128)
-6c: punpcklqdq Vdq,Wdq (66),(VEX),(o128)
-6d: punpckhqdq Vdq,Wdq (66),(VEX),(o128)
-6e: movd/q/ Pd,Ed/q | movd/q Vdq,Ed/q (66),(VEX),(o128)
-6f: movq Pq,Qq | movdqa Vdq,Wdq (66),(VEX) | movdqu Vdq,Wdq (F3),(VEX)
+60: punpcklbw Pq,Qd | vpunpcklbw Vx,Hx,Wx (66),(v1)
+61: punpcklwd Pq,Qd | vpunpcklwd Vx,Hx,Wx (66),(v1)
+62: punpckldq Pq,Qd | vpunpckldq Vx,Hx,Wx (66),(v1)
+63: packsswb Pq,Qq | vpacksswb Vx,Hx,Wx (66),(v1)
+64: pcmpgtb Pq,Qq | vpcmpgtb Vx,Hx,Wx (66),(v1)
+65: pcmpgtw Pq,Qq | vpcmpgtw Vx,Hx,Wx (66),(v1)
+66: pcmpgtd Pq,Qq | vpcmpgtd Vx,Hx,Wx (66),(v1)
+67: packuswb Pq,Qq | vpackuswb Vx,Hx,Wx (66),(v1)
+68: punpckhbw Pq,Qd | vpunpckhbw Vx,Hx,Wx (66),(v1)
+69: punpckhwd Pq,Qd | vpunpckhwd Vx,Hx,Wx (66),(v1)
+6a: punpckhdq Pq,Qd | vpunpckhdq Vx,Hx,Wx (66),(v1)
+6b: packssdw Pq,Qd | vpackssdw Vx,Hx,Wx (66),(v1)
+6c: vpunpcklqdq Vx,Hx,Wx (66),(v1)
+6d: vpunpckhqdq Vx,Hx,Wx (66),(v1)
+6e: movd/q Pd,Ey | vmovd/q Vy,Ey (66),(v1)
+6f: movq Pq,Qq | vmovdqa Vx,Wx (66) | vmovdqu Vx,Wx (F3)
# 0x0f 0x70-0x7f
-70: pshufw Pq,Qq,Ib | pshufd Vdq,Wdq,Ib (66),(VEX),(o128) | pshufhw Vdq,Wdq,Ib (F3),(VEX),(o128) | pshuflw VdqWdq,Ib (F2),(VEX),(o128)
+70: pshufw Pq,Qq,Ib | vpshufd Vx,Wx,Ib (66),(v1) | vpshufhw Vx,Wx,Ib (F3),(v1) | vpshuflw Vx,Wx,Ib (F2),(v1)
71: Grp12 (1A)
72: Grp13 (1A)
73: Grp14 (1A)
-74: pcmpeqb Pq,Qq | pcmpeqb Vdq,Wdq (66),(VEX),(o128)
-75: pcmpeqw Pq,Qq | pcmpeqw Vdq,Wdq (66),(VEX),(o128)
-76: pcmpeqd Pq,Qq | pcmpeqd Vdq,Wdq (66),(VEX),(o128)
-77: emms/vzeroupper/vzeroall (VEX)
-78: VMREAD Ed/q,Gd/q
-79: VMWRITE Gd/q,Ed/q
+74: pcmpeqb Pq,Qq | vpcmpeqb Vx,Hx,Wx (66),(v1)
+75: pcmpeqw Pq,Qq | vpcmpeqw Vx,Hx,Wx (66),(v1)
+76: pcmpeqd Pq,Qq | vpcmpeqd Vx,Hx,Wx (66),(v1)
+# Note: Remove (v), because vzeroall and vzeroupper becomes emms without VEX.
+77: emms | vzeroupper | vzeroall
+78: VMREAD Ey,Gy
+79: VMWRITE Gy,Ey
7a:
7b:
-7c: haddps Vps,Wps (F2),(VEX) | haddpd Vpd,Wpd (66),(VEX)
-7d: hsubps Vps,Wps (F2),(VEX) | hsubpd Vpd,Wpd (66),(VEX)
-7e: movd/q Ed/q,Pd | movd/q Ed/q,Vdq (66),(VEX),(o128) | movq Vq,Wq (F3),(VEX),(o128)
-7f: movq Qq,Pq | movdqa Wdq,Vdq (66),(VEX) | movdqu Wdq,Vdq (F3),(VEX)
+7c: vhaddpd Vpd,Hpd,Wpd (66) | vhaddps Vps,Hps,Wps (F2)
+7d: vhsubpd Vpd,Hpd,Wpd (66) | vhsubps Vps,Hps,Wps (F2)
+7e: movd/q Ey,Pd | vmovd/q Ey,Vy (66),(v1) | vmovq Vq,Wq (F3),(v1)
+7f: movq Qq,Pq | vmovdqa Wx,Vx (66) | vmovdqu Wx,Vx (F3)
# 0x0f 0x80-0x8f
80: JO Jz (f64)
81: JNO Jz (f64)
-82: JB/JNAE/JC Jz (f64)
-83: JNB/JAE/JNC Jz (f64)
-84: JZ/JE Jz (f64)
-85: JNZ/JNE Jz (f64)
+82: JB/JC/JNAE Jz (f64)
+83: JAE/JNB/JNC Jz (f64)
+84: JE/JZ Jz (f64)
+85: JNE/JNZ Jz (f64)
86: JBE/JNA Jz (f64)
-87: JNBE/JA Jz (f64)
+87: JA/JNBE Jz (f64)
88: JS Jz (f64)
89: JNS Jz (f64)
8a: JP/JPE Jz (f64)
@@ -502,18 +517,18 @@ b8: JMPE | POPCNT Gv,Ev (F3)
b9: Grp10 (1A)
ba: Grp8 Ev,Ib (1A)
bb: BTC Ev,Gv
-bc: BSF Gv,Ev
-bd: BSR Gv,Ev
+bc: BSF Gv,Ev | TZCNT Gv,Ev (F3)
+bd: BSR Gv,Ev | LZCNT Gv,Ev (F3)
be: MOVSX Gv,Eb
bf: MOVSX Gv,Ew
# 0x0f 0xc0-0xcf
c0: XADD Eb,Gb
c1: XADD Ev,Gv
-c2: cmpps Vps,Wps,Ib (VEX) | cmpss Vss,Wss,Ib (F3),(VEX),(o128) | cmppd Vpd,Wpd,Ib (66),(VEX) | cmpsd Vsd,Wsd,Ib (F2),(VEX)
-c3: movnti Md/q,Gd/q
-c4: pinsrw Pq,Rd/q/Mw,Ib | pinsrw Vdq,Rd/q/Mw,Ib (66),(VEX),(o128)
-c5: pextrw Gd,Nq,Ib | pextrw Gd,Udq,Ib (66),(VEX),(o128)
-c6: shufps Vps,Wps,Ib (VEX) | shufpd Vpd,Wpd,Ib (66),(VEX)
+c2: vcmpps Vps,Hps,Wps,Ib | vcmppd Vpd,Hpd,Wpd,Ib (66) | vcmpss Vss,Hss,Wss,Ib (F3),(v1) | vcmpsd Vsd,Hsd,Wsd,Ib (F2),(v1)
+c3: movnti My,Gy
+c4: pinsrw Pq,Ry/Mw,Ib | vpinsrw Vdq,Hdq,Ry/Mw,Ib (66),(v1)
+c5: pextrw Gd,Nq,Ib | vpextrw Gd,Udq,Ib (66),(v1)
+c6: vshufps Vps,Hps,Wps,Ib | vshufpd Vpd,Hpd,Wpd,Ib (66)
c7: Grp9 (1A)
c8: BSWAP RAX/EAX/R8/R8D
c9: BSWAP RCX/ECX/R9/R9D
@@ -524,55 +539,55 @@ cd: BSWAP RBP/EBP/R13/R13D
ce: BSWAP RSI/ESI/R14/R14D
cf: BSWAP RDI/EDI/R15/R15D
# 0x0f 0xd0-0xdf
-d0: addsubps Vps,Wps (F2),(VEX) | addsubpd Vpd,Wpd (66),(VEX)
-d1: psrlw Pq,Qq | psrlw Vdq,Wdq (66),(VEX),(o128)
-d2: psrld Pq,Qq | psrld Vdq,Wdq (66),(VEX),(o128)
-d3: psrlq Pq,Qq | psrlq Vdq,Wdq (66),(VEX),(o128)
-d4: paddq Pq,Qq | paddq Vdq,Wdq (66),(VEX),(o128)
-d5: pmullw Pq,Qq | pmullw Vdq,Wdq (66),(VEX),(o128)
-d6: movq Wq,Vq (66),(VEX),(o128) | movq2dq Vdq,Nq (F3) | movdq2q Pq,Uq (F2)
-d7: pmovmskb Gd,Nq | pmovmskb Gd,Udq (66),(VEX),(o128)
-d8: psubusb Pq,Qq | psubusb Vdq,Wdq (66),(VEX),(o128)
-d9: psubusw Pq,Qq | psubusw Vdq,Wdq (66),(VEX),(o128)
-da: pminub Pq,Qq | pminub Vdq,Wdq (66),(VEX),(o128)
-db: pand Pq,Qq | pand Vdq,Wdq (66),(VEX),(o128)
-dc: paddusb Pq,Qq | paddusb Vdq,Wdq (66),(VEX),(o128)
-dd: paddusw Pq,Qq | paddusw Vdq,Wdq (66),(VEX),(o128)
-de: pmaxub Pq,Qq | pmaxub Vdq,Wdq (66),(VEX),(o128)
-df: pandn Pq,Qq | pandn Vdq,Wdq (66),(VEX),(o128)
+d0: vaddsubpd Vpd,Hpd,Wpd (66) | vaddsubps Vps,Hps,Wps (F2)
+d1: psrlw Pq,Qq | vpsrlw Vx,Hx,Wx (66),(v1)
+d2: psrld Pq,Qq | vpsrld Vx,Hx,Wx (66),(v1)
+d3: psrlq Pq,Qq | vpsrlq Vx,Hx,Wx (66),(v1)
+d4: paddq Pq,Qq | vpaddq Vx,Hx,Wx (66),(v1)
+d5: pmullw Pq,Qq | vpmullw Vx,Hx,Wx (66),(v1)
+d6: vmovq Wq,Vq (66),(v1) | movq2dq Vdq,Nq (F3) | movdq2q Pq,Uq (F2)
+d7: pmovmskb Gd,Nq | vpmovmskb Gd,Ux (66),(v1)
+d8: psubusb Pq,Qq | vpsubusb Vx,Hx,Wx (66),(v1)
+d9: psubusw Pq,Qq | vpsubusw Vx,Hx,Wx (66),(v1)
+da: pminub Pq,Qq | vpminub Vx,Hx,Wx (66),(v1)
+db: pand Pq,Qq | vpand Vx,Hx,Wx (66),(v1)
+dc: paddusb Pq,Qq | vpaddusb Vx,Hx,Wx (66),(v1)
+dd: paddusw Pq,Qq | vpaddusw Vx,Hx,Wx (66),(v1)
+de: pmaxub Pq,Qq | vpmaxub Vx,Hx,Wx (66),(v1)
+df: pandn Pq,Qq | vpandn Vx,Hx,Wx (66),(v1)
# 0x0f 0xe0-0xef
-e0: pavgb Pq,Qq | pavgb Vdq,Wdq (66),(VEX),(o128)
-e1: psraw Pq,Qq | psraw Vdq,Wdq (66),(VEX),(o128)
-e2: psrad Pq,Qq | psrad Vdq,Wdq (66),(VEX),(o128)
-e3: pavgw Pq,Qq | pavgw Vdq,Wdq (66),(VEX),(o128)
-e4: pmulhuw Pq,Qq | pmulhuw Vdq,Wdq (66),(VEX),(o128)
-e5: pmulhw Pq,Qq | pmulhw Vdq,Wdq (66),(VEX),(o128)
-e6: cvtpd2dq Vdq,Wpd (F2),(VEX) | cvttpd2dq Vdq,Wpd (66),(VEX) | cvtdq2pd Vpd,Wdq (F3),(VEX)
-e7: movntq Mq,Pq | movntdq Mdq,Vdq (66),(VEX)
-e8: psubsb Pq,Qq | psubsb Vdq,Wdq (66),(VEX),(o128)
-e9: psubsw Pq,Qq | psubsw Vdq,Wdq (66),(VEX),(o128)
-ea: pminsw Pq,Qq | pminsw Vdq,Wdq (66),(VEX),(o128)
-eb: por Pq,Qq | por Vdq,Wdq (66),(VEX),(o128)
-ec: paddsb Pq,Qq | paddsb Vdq,Wdq (66),(VEX),(o128)
-ed: paddsw Pq,Qq | paddsw Vdq,Wdq (66),(VEX),(o128)
-ee: pmaxsw Pq,Qq | pmaxsw Vdq,Wdq (66),(VEX),(o128)
-ef: pxor Pq,Qq | pxor Vdq,Wdq (66),(VEX),(o128)
+e0: pavgb Pq,Qq | vpavgb Vx,Hx,Wx (66),(v1)
+e1: psraw Pq,Qq | vpsraw Vx,Hx,Wx (66),(v1)
+e2: psrad Pq,Qq | vpsrad Vx,Hx,Wx (66),(v1)
+e3: pavgw Pq,Qq | vpavgw Vx,Hx,Wx (66),(v1)
+e4: pmulhuw Pq,Qq | vpmulhuw Vx,Hx,Wx (66),(v1)
+e5: pmulhw Pq,Qq | vpmulhw Vx,Hx,Wx (66),(v1)
+e6: vcvttpd2dq Vx,Wpd (66) | vcvtdq2pd Vx,Wdq (F3) | vcvtpd2dq Vx,Wpd (F2)
+e7: movntq Mq,Pq | vmovntdq Mx,Vx (66)
+e8: psubsb Pq,Qq | vpsubsb Vx,Hx,Wx (66),(v1)
+e9: psubsw Pq,Qq | vpsubsw Vx,Hx,Wx (66),(v1)
+ea: pminsw Pq,Qq | vpminsw Vx,Hx,Wx (66),(v1)
+eb: por Pq,Qq | vpor Vx,Hx,Wx (66),(v1)
+ec: paddsb Pq,Qq | vpaddsb Vx,Hx,Wx (66),(v1)
+ed: paddsw Pq,Qq | vpaddsw Vx,Hx,Wx (66),(v1)
+ee: pmaxsw Pq,Qq | vpmaxsw Vx,Hx,Wx (66),(v1)
+ef: pxor Pq,Qq | vpxor Vx,Hx,Wx (66),(v1)
# 0x0f 0xf0-0xff
-f0: lddqu Vdq,Mdq (F2),(VEX)
-f1: psllw Pq,Qq | psllw Vdq,Wdq (66),(VEX),(o128)
-f2: pslld Pq,Qq | pslld Vdq,Wdq (66),(VEX),(o128)
-f3: psllq Pq,Qq | psllq Vdq,Wdq (66),(VEX),(o128)
-f4: pmuludq Pq,Qq | pmuludq Vdq,Wdq (66),(VEX),(o128)
-f5: pmaddwd Pq,Qq | pmaddwd Vdq,Wdq (66),(VEX),(o128)
-f6: psadbw Pq,Qq | psadbw Vdq,Wdq (66),(VEX),(o128)
-f7: maskmovq Pq,Nq | maskmovdqu Vdq,Udq (66),(VEX),(o128)
-f8: psubb Pq,Qq | psubb Vdq,Wdq (66),(VEX),(o128)
-f9: psubw Pq,Qq | psubw Vdq,Wdq (66),(VEX),(o128)
-fa: psubd Pq,Qq | psubd Vdq,Wdq (66),(VEX),(o128)
-fb: psubq Pq,Qq | psubq Vdq,Wdq (66),(VEX),(o128)
-fc: paddb Pq,Qq | paddb Vdq,Wdq (66),(VEX),(o128)
-fd: paddw Pq,Qq | paddw Vdq,Wdq (66),(VEX),(o128)
-fe: paddd Pq,Qq | paddd Vdq,Wdq (66),(VEX),(o128)
+f0: vlddqu Vx,Mx (F2)
+f1: psllw Pq,Qq | vpsllw Vx,Hx,Wx (66),(v1)
+f2: pslld Pq,Qq | vpslld Vx,Hx,Wx (66),(v1)
+f3: psllq Pq,Qq | vpsllq Vx,Hx,Wx (66),(v1)
+f4: pmuludq Pq,Qq | vpmuludq Vx,Hx,Wx (66),(v1)
+f5: pmaddwd Pq,Qq | vpmaddwd Vx,Hx,Wx (66),(v1)
+f6: psadbw Pq,Qq | vpsadbw Vx,Hx,Wx (66),(v1)
+f7: maskmovq Pq,Nq | vmaskmovdqu Vx,Ux (66),(v1)
+f8: psubb Pq,Qq | vpsubb Vx,Hx,Wx (66),(v1)
+f9: psubw Pq,Qq | vpsubw Vx,Hx,Wx (66),(v1)
+fa: psubd Pq,Qq | vpsubd Vx,Hx,Wx (66),(v1)
+fb: psubq Pq,Qq | vpsubq Vx,Hx,Wx (66),(v1)
+fc: paddb Pq,Qq | vpaddb Vx,Hx,Wx (66),(v1)
+fd: paddw Pq,Qq | vpaddw Vx,Hx,Wx (66),(v1)
+fe: paddd Pq,Qq | vpaddd Vx,Hx,Wx (66),(v1)
ff:
EndTable
@@ -580,155 +595,193 @@ Table: 3-byte opcode 1 (0x0f 0x38)
Referrer: 3-byte escape 1
AVXcode: 2
# 0x0f 0x38 0x00-0x0f
-00: pshufb Pq,Qq | pshufb Vdq,Wdq (66),(VEX),(o128)
-01: phaddw Pq,Qq | phaddw Vdq,Wdq (66),(VEX),(o128)
-02: phaddd Pq,Qq | phaddd Vdq,Wdq (66),(VEX),(o128)
-03: phaddsw Pq,Qq | phaddsw Vdq,Wdq (66),(VEX),(o128)
-04: pmaddubsw Pq,Qq | pmaddubsw Vdq,Wdq (66),(VEX),(o128)
-05: phsubw Pq,Qq | phsubw Vdq,Wdq (66),(VEX),(o128)
-06: phsubd Pq,Qq | phsubd Vdq,Wdq (66),(VEX),(o128)
-07: phsubsw Pq,Qq | phsubsw Vdq,Wdq (66),(VEX),(o128)
-08: psignb Pq,Qq | psignb Vdq,Wdq (66),(VEX),(o128)
-09: psignw Pq,Qq | psignw Vdq,Wdq (66),(VEX),(o128)
-0a: psignd Pq,Qq | psignd Vdq,Wdq (66),(VEX),(o128)
-0b: pmulhrsw Pq,Qq | pmulhrsw Vdq,Wdq (66),(VEX),(o128)
-0c: Vpermilps /r (66),(oVEX)
-0d: Vpermilpd /r (66),(oVEX)
-0e: vtestps /r (66),(oVEX)
-0f: vtestpd /r (66),(oVEX)
+00: pshufb Pq,Qq | vpshufb Vx,Hx,Wx (66),(v1)
+01: phaddw Pq,Qq | vphaddw Vx,Hx,Wx (66),(v1)
+02: phaddd Pq,Qq | vphaddd Vx,Hx,Wx (66),(v1)
+03: phaddsw Pq,Qq | vphaddsw Vx,Hx,Wx (66),(v1)
+04: pmaddubsw Pq,Qq | vpmaddubsw Vx,Hx,Wx (66),(v1)
+05: phsubw Pq,Qq | vphsubw Vx,Hx,Wx (66),(v1)
+06: phsubd Pq,Qq | vphsubd Vx,Hx,Wx (66),(v1)
+07: phsubsw Pq,Qq | vphsubsw Vx,Hx,Wx (66),(v1)
+08: psignb Pq,Qq | vpsignb Vx,Hx,Wx (66),(v1)
+09: psignw Pq,Qq | vpsignw Vx,Hx,Wx (66),(v1)
+0a: psignd Pq,Qq | vpsignd Vx,Hx,Wx (66),(v1)
+0b: pmulhrsw Pq,Qq | vpmulhrsw Vx,Hx,Wx (66),(v1)
+0c: vpermilps Vx,Hx,Wx (66),(v)
+0d: vpermilpd Vx,Hx,Wx (66),(v)
+0e: vtestps Vx,Wx (66),(v)
+0f: vtestpd Vx,Wx (66),(v)
# 0x0f 0x38 0x10-0x1f
10: pblendvb Vdq,Wdq (66)
11:
12:
-13:
+13: vcvtph2ps Vx,Wx,Ib (66),(v)
14: blendvps Vdq,Wdq (66)
15: blendvpd Vdq,Wdq (66)
-16:
-17: ptest Vdq,Wdq (66),(VEX)
-18: vbroadcastss /r (66),(oVEX)
-19: vbroadcastsd /r (66),(oVEX),(o256)
-1a: vbroadcastf128 /r (66),(oVEX),(o256)
+16: vpermps Vqq,Hqq,Wqq (66),(v)
+17: vptest Vx,Wx (66)
+18: vbroadcastss Vx,Wd (66),(v)
+19: vbroadcastsd Vqq,Wq (66),(v)
+1a: vbroadcastf128 Vqq,Mdq (66),(v)
1b:
-1c: pabsb Pq,Qq | pabsb Vdq,Wdq (66),(VEX),(o128)
-1d: pabsw Pq,Qq | pabsw Vdq,Wdq (66),(VEX),(o128)
-1e: pabsd Pq,Qq | pabsd Vdq,Wdq (66),(VEX),(o128)
+1c: pabsb Pq,Qq | vpabsb Vx,Wx (66),(v1)
+1d: pabsw Pq,Qq | vpabsw Vx,Wx (66),(v1)
+1e: pabsd Pq,Qq | vpabsd Vx,Wx (66),(v1)
1f:
# 0x0f 0x38 0x20-0x2f
-20: pmovsxbw Vdq,Udq/Mq (66),(VEX),(o128)
-21: pmovsxbd Vdq,Udq/Md (66),(VEX),(o128)
-22: pmovsxbq Vdq,Udq/Mw (66),(VEX),(o128)
-23: pmovsxwd Vdq,Udq/Mq (66),(VEX),(o128)
-24: pmovsxwq Vdq,Udq/Md (66),(VEX),(o128)
-25: pmovsxdq Vdq,Udq/Mq (66),(VEX),(o128)
+20: vpmovsxbw Vx,Ux/Mq (66),(v1)
+21: vpmovsxbd Vx,Ux/Md (66),(v1)
+22: vpmovsxbq Vx,Ux/Mw (66),(v1)
+23: vpmovsxwd Vx,Ux/Mq (66),(v1)
+24: vpmovsxwq Vx,Ux/Md (66),(v1)
+25: vpmovsxdq Vx,Ux/Mq (66),(v1)
26:
27:
-28: pmuldq Vdq,Wdq (66),(VEX),(o128)
-29: pcmpeqq Vdq,Wdq (66),(VEX),(o128)
-2a: movntdqa Vdq,Mdq (66),(VEX),(o128)
-2b: packusdw Vdq,Wdq (66),(VEX),(o128)
-2c: vmaskmovps(ld) /r (66),(oVEX)
-2d: vmaskmovpd(ld) /r (66),(oVEX)
-2e: vmaskmovps(st) /r (66),(oVEX)
-2f: vmaskmovpd(st) /r (66),(oVEX)
+28: vpmuldq Vx,Hx,Wx (66),(v1)
+29: vpcmpeqq Vx,Hx,Wx (66),(v1)
+2a: vmovntdqa Vx,Mx (66),(v1)
+2b: vpackusdw Vx,Hx,Wx (66),(v1)
+2c: vmaskmovps Vx,Hx,Mx (66),(v)
+2d: vmaskmovpd Vx,Hx,Mx (66),(v)
+2e: vmaskmovps Mx,Hx,Vx (66),(v)
+2f: vmaskmovpd Mx,Hx,Vx (66),(v)
# 0x0f 0x38 0x30-0x3f
-30: pmovzxbw Vdq,Udq/Mq (66),(VEX),(o128)
-31: pmovzxbd Vdq,Udq/Md (66),(VEX),(o128)
-32: pmovzxbq Vdq,Udq/Mw (66),(VEX),(o128)
-33: pmovzxwd Vdq,Udq/Mq (66),(VEX),(o128)
-34: pmovzxwq Vdq,Udq/Md (66),(VEX),(o128)
-35: pmovzxdq Vdq,Udq/Mq (66),(VEX),(o128)
-36:
-37: pcmpgtq Vdq,Wdq (66),(VEX),(o128)
-38: pminsb Vdq,Wdq (66),(VEX),(o128)
-39: pminsd Vdq,Wdq (66),(VEX),(o128)
-3a: pminuw Vdq,Wdq (66),(VEX),(o128)
-3b: pminud Vdq,Wdq (66),(VEX),(o128)
-3c: pmaxsb Vdq,Wdq (66),(VEX),(o128)
-3d: pmaxsd Vdq,Wdq (66),(VEX),(o128)
-3e: pmaxuw Vdq,Wdq (66),(VEX),(o128)
-3f: pmaxud Vdq,Wdq (66),(VEX),(o128)
+30: vpmovzxbw Vx,Ux/Mq (66),(v1)
+31: vpmovzxbd Vx,Ux/Md (66),(v1)
+32: vpmovzxbq Vx,Ux/Mw (66),(v1)
+33: vpmovzxwd Vx,Ux/Mq (66),(v1)
+34: vpmovzxwq Vx,Ux/Md (66),(v1)
+35: vpmovzxdq Vx,Ux/Mq (66),(v1)
+36: vpermd Vqq,Hqq,Wqq (66),(v)
+37: vpcmpgtq Vx,Hx,Wx (66),(v1)
+38: vpminsb Vx,Hx,Wx (66),(v1)
+39: vpminsd Vx,Hx,Wx (66),(v1)
+3a: vpminuw Vx,Hx,Wx (66),(v1)
+3b: vpminud Vx,Hx,Wx (66),(v1)
+3c: vpmaxsb Vx,Hx,Wx (66),(v1)
+3d: vpmaxsd Vx,Hx,Wx (66),(v1)
+3e: vpmaxuw Vx,Hx,Wx (66),(v1)
+3f: vpmaxud Vx,Hx,Wx (66),(v1)
# 0x0f 0x38 0x40-0x8f
-40: pmulld Vdq,Wdq (66),(VEX),(o128)
-41: phminposuw Vdq,Wdq (66),(VEX),(o128)
-80: INVEPT Gd/q,Mdq (66)
-81: INVPID Gd/q,Mdq (66)
+40: vpmulld Vx,Hx,Wx (66),(v1)
+41: vphminposuw Vdq,Wdq (66),(v1)
+42:
+43:
+44:
+45: vpsrlvd/q Vx,Hx,Wx (66),(v)
+46: vpsravd Vx,Hx,Wx (66),(v)
+47: vpsllvd/q Vx,Hx,Wx (66),(v)
+# Skip 0x48-0x57
+58: vpbroadcastd Vx,Wx (66),(v)
+59: vpbroadcastq Vx,Wx (66),(v)
+5a: vbroadcasti128 Vqq,Mdq (66),(v)
+# Skip 0x5b-0x77
+78: vpbroadcastb Vx,Wx (66),(v)
+79: vpbroadcastw Vx,Wx (66),(v)
+# Skip 0x7a-0x7f
+80: INVEPT Gy,Mdq (66)
+81: INVPID Gy,Mdq (66)
+82: INVPCID Gy,Mdq (66)
+8c: vpmaskmovd/q Vx,Hx,Mx (66),(v)
+8e: vpmaskmovd/q Mx,Vx,Hx (66),(v)
# 0x0f 0x38 0x90-0xbf (FMA)
-96: vfmaddsub132pd/ps /r (66),(VEX)
-97: vfmsubadd132pd/ps /r (66),(VEX)
-98: vfmadd132pd/ps /r (66),(VEX)
-99: vfmadd132sd/ss /r (66),(VEX),(o128)
-9a: vfmsub132pd/ps /r (66),(VEX)
-9b: vfmsub132sd/ss /r (66),(VEX),(o128)
-9c: vfnmadd132pd/ps /r (66),(VEX)
-9d: vfnmadd132sd/ss /r (66),(VEX),(o128)
-9e: vfnmsub132pd/ps /r (66),(VEX)
-9f: vfnmsub132sd/ss /r (66),(VEX),(o128)
-a6: vfmaddsub213pd/ps /r (66),(VEX)
-a7: vfmsubadd213pd/ps /r (66),(VEX)
-a8: vfmadd213pd/ps /r (66),(VEX)
-a9: vfmadd213sd/ss /r (66),(VEX),(o128)
-aa: vfmsub213pd/ps /r (66),(VEX)
-ab: vfmsub213sd/ss /r (66),(VEX),(o128)
-ac: vfnmadd213pd/ps /r (66),(VEX)
-ad: vfnmadd213sd/ss /r (66),(VEX),(o128)
-ae: vfnmsub213pd/ps /r (66),(VEX)
-af: vfnmsub213sd/ss /r (66),(VEX),(o128)
-b6: vfmaddsub231pd/ps /r (66),(VEX)
-b7: vfmsubadd231pd/ps /r (66),(VEX)
-b8: vfmadd231pd/ps /r (66),(VEX)
-b9: vfmadd231sd/ss /r (66),(VEX),(o128)
-ba: vfmsub231pd/ps /r (66),(VEX)
-bb: vfmsub231sd/ss /r (66),(VEX),(o128)
-bc: vfnmadd231pd/ps /r (66),(VEX)
-bd: vfnmadd231sd/ss /r (66),(VEX),(o128)
-be: vfnmsub231pd/ps /r (66),(VEX)
-bf: vfnmsub231sd/ss /r (66),(VEX),(o128)
+90: vgatherdd/q Vx,Hx,Wx (66),(v)
+91: vgatherqd/q Vx,Hx,Wx (66),(v)
+92: vgatherdps/d Vx,Hx,Wx (66),(v)
+93: vgatherqps/d Vx,Hx,Wx (66),(v)
+94:
+95:
+96: vfmaddsub132ps/d Vx,Hx,Wx (66),(v)
+97: vfmsubadd132ps/d Vx,Hx,Wx (66),(v)
+98: vfmadd132ps/d Vx,Hx,Wx (66),(v)
+99: vfmadd132ss/d Vx,Hx,Wx (66),(v),(v1)
+9a: vfmsub132ps/d Vx,Hx,Wx (66),(v)
+9b: vfmsub132ss/d Vx,Hx,Wx (66),(v),(v1)
+9c: vfnmadd132ps/d Vx,Hx,Wx (66),(v)
+9d: vfnmadd132ss/d Vx,Hx,Wx (66),(v),(v1)
+9e: vfnmsub132ps/d Vx,Hx,Wx (66),(v)
+9f: vfnmsub132ss/d Vx,Hx,Wx (66),(v),(v1)
+a6: vfmaddsub213ps/d Vx,Hx,Wx (66),(v)
+a7: vfmsubadd213ps/d Vx,Hx,Wx (66),(v)
+a8: vfmadd213ps/d Vx,Hx,Wx (66),(v)
+a9: vfmadd213ss/d Vx,Hx,Wx (66),(v),(v1)
+aa: vfmsub213ps/d Vx,Hx,Wx (66),(v)
+ab: vfmsub213ss/d Vx,Hx,Wx (66),(v),(v1)
+ac: vfnmadd213ps/d Vx,Hx,Wx (66),(v)
+ad: vfnmadd213ss/d Vx,Hx,Wx (66),(v),(v1)
+ae: vfnmsub213ps/d Vx,Hx,Wx (66),(v)
+af: vfnmsub213ss/d Vx,Hx,Wx (66),(v),(v1)
+b6: vfmaddsub231ps/d Vx,Hx,Wx (66),(v)
+b7: vfmsubadd231ps/d Vx,Hx,Wx (66),(v)
+b8: vfmadd231ps/d Vx,Hx,Wx (66),(v)
+b9: vfmadd231ss/d Vx,Hx,Wx (66),(v),(v1)
+ba: vfmsub231ps/d Vx,Hx,Wx (66),(v)
+bb: vfmsub231ss/d Vx,Hx,Wx (66),(v),(v1)
+bc: vfnmadd231ps/d Vx,Hx,Wx (66),(v)
+bd: vfnmadd231ss/d Vx,Hx,Wx (66),(v),(v1)
+be: vfnmsub231ps/d Vx,Hx,Wx (66),(v)
+bf: vfnmsub231ss/d Vx,Hx,Wx (66),(v),(v1)
# 0x0f 0x38 0xc0-0xff
-db: aesimc Vdq,Wdq (66),(VEX),(o128)
-dc: aesenc Vdq,Wdq (66),(VEX),(o128)
-dd: aesenclast Vdq,Wdq (66),(VEX),(o128)
-de: aesdec Vdq,Wdq (66),(VEX),(o128)
-df: aesdeclast Vdq,Wdq (66),(VEX),(o128)
-f0: MOVBE Gv,Mv | CRC32 Gd,Eb (F2)
-f1: MOVBE Mv,Gv | CRC32 Gd,Ev (F2)
+db: VAESIMC Vdq,Wdq (66),(v1)
+dc: VAESENC Vdq,Hdq,Wdq (66),(v1)
+dd: VAESENCLAST Vdq,Hdq,Wdq (66),(v1)
+de: VAESDEC Vdq,Hdq,Wdq (66),(v1)
+df: VAESDECLAST Vdq,Hdq,Wdq (66),(v1)
+f0: MOVBE Gy,My | MOVBE Gw,Mw (66) | CRC32 Gd,Eb (F2)
+f1: MOVBE My,Gy | MOVBE Mw,Gw (66) | CRC32 Gd,Ey (F2)
+f3: ANDN Gy,By,Ey (v)
+f4: Grp17 (1A)
+f5: BZHI Gy,Ey,By (v) | PEXT Gy,By,Ey (F3),(v) | PDEP Gy,By,Ey (F2),(v)
+f6: MULX By,Gy,rDX,Ey (F2),(v)
+f7: BEXTR Gy,Ey,By (v) | SHLX Gy,Ey,By (66),(v) | SARX Gy,Ey,By (F3),(v) | SHRX Gy,Ey,By (F2),(v)
EndTable
Table: 3-byte opcode 2 (0x0f 0x3a)
Referrer: 3-byte escape 2
AVXcode: 3
# 0x0f 0x3a 0x00-0xff
-04: vpermilps /r,Ib (66),(oVEX)
-05: vpermilpd /r,Ib (66),(oVEX)
-06: vperm2f128 /r,Ib (66),(oVEX),(o256)
-08: roundps Vdq,Wdq,Ib (66),(VEX)
-09: roundpd Vdq,Wdq,Ib (66),(VEX)
-0a: roundss Vss,Wss,Ib (66),(VEX),(o128)
-0b: roundsd Vsd,Wsd,Ib (66),(VEX),(o128)
-0c: blendps Vdq,Wdq,Ib (66),(VEX)
-0d: blendpd Vdq,Wdq,Ib (66),(VEX)
-0e: pblendw Vdq,Wdq,Ib (66),(VEX),(o128)
-0f: palignr Pq,Qq,Ib | palignr Vdq,Wdq,Ib (66),(VEX),(o128)
-14: pextrb Rd/Mb,Vdq,Ib (66),(VEX),(o128)
-15: pextrw Rd/Mw,Vdq,Ib (66),(VEX),(o128)
-16: pextrd/pextrq Ed/q,Vdq,Ib (66),(VEX),(o128)
-17: extractps Ed,Vdq,Ib (66),(VEX),(o128)
-18: vinsertf128 /r,Ib (66),(oVEX),(o256)
-19: vextractf128 /r,Ib (66),(oVEX),(o256)
-20: pinsrb Vdq,Rd/q/Mb,Ib (66),(VEX),(o128)
-21: insertps Vdq,Udq/Md,Ib (66),(VEX),(o128)
-22: pinsrd/pinsrq Vdq,Ed/q,Ib (66),(VEX),(o128)
-40: dpps Vdq,Wdq,Ib (66),(VEX)
-41: dppd Vdq,Wdq,Ib (66),(VEX),(o128)
-42: mpsadbw Vdq,Wdq,Ib (66),(VEX),(o128)
-44: pclmulq Vdq,Wdq,Ib (66),(VEX),(o128)
-4a: vblendvps /r,Ib (66),(oVEX)
-4b: vblendvpd /r,Ib (66),(oVEX)
-4c: vpblendvb /r,Ib (66),(oVEX),(o128)
-60: pcmpestrm Vdq,Wdq,Ib (66),(VEX),(o128)
-61: pcmpestri Vdq,Wdq,Ib (66),(VEX),(o128)
-62: pcmpistrm Vdq,Wdq,Ib (66),(VEX),(o128)
-63: pcmpistri Vdq,Wdq,Ib (66),(VEX),(o128)
-df: aeskeygenassist Vdq,Wdq,Ib (66),(VEX),(o128)
+00: vpermq Vqq,Wqq,Ib (66),(v)
+01: vpermpd Vqq,Wqq,Ib (66),(v)
+02: vpblendd Vx,Hx,Wx,Ib (66),(v)
+03:
+04: vpermilps Vx,Wx,Ib (66),(v)
+05: vpermilpd Vx,Wx,Ib (66),(v)
+06: vperm2f128 Vqq,Hqq,Wqq,Ib (66),(v)
+07:
+08: vroundps Vx,Wx,Ib (66)
+09: vroundpd Vx,Wx,Ib (66)
+0a: vroundss Vss,Wss,Ib (66),(v1)
+0b: vroundsd Vsd,Wsd,Ib (66),(v1)
+0c: vblendps Vx,Hx,Wx,Ib (66)
+0d: vblendpd Vx,Hx,Wx,Ib (66)
+0e: vpblendw Vx,Hx,Wx,Ib (66),(v1)
+0f: palignr Pq,Qq,Ib | vpalignr Vx,Hx,Wx,Ib (66),(v1)
+14: vpextrb Rd/Mb,Vdq,Ib (66),(v1)
+15: vpextrw Rd/Mw,Vdq,Ib (66),(v1)
+16: vpextrd/q Ey,Vdq,Ib (66),(v1)
+17: vextractps Ed,Vdq,Ib (66),(v1)
+18: vinsertf128 Vqq,Hqq,Wqq,Ib (66),(v)
+19: vextractf128 Wdq,Vqq,Ib (66),(v)
+1d: vcvtps2ph Wx,Vx,Ib (66),(v)
+20: vpinsrb Vdq,Hdq,Ry/Mb,Ib (66),(v1)
+21: vinsertps Vdq,Hdq,Udq/Md,Ib (66),(v1)
+22: vpinsrd/q Vdq,Hdq,Ey,Ib (66),(v1)
+38: vinserti128 Vqq,Hqq,Wqq,Ib (66),(v)
+39: vextracti128 Wdq,Vqq,Ib (66),(v)
+40: vdpps Vx,Hx,Wx,Ib (66)
+41: vdppd Vdq,Hdq,Wdq,Ib (66),(v1)
+42: vmpsadbw Vx,Hx,Wx,Ib (66),(v1)
+44: vpclmulqdq Vdq,Hdq,Wdq,Ib (66),(v1)
+46: vperm2i128 Vqq,Hqq,Wqq,Ib (66),(v)
+4a: vblendvps Vx,Hx,Wx,Lx (66),(v)
+4b: vblendvpd Vx,Hx,Wx,Lx (66),(v)
+4c: vpblendvb Vx,Hx,Wx,Lx (66),(v1)
+60: vpcmpestrm Vdq,Wdq,Ib (66),(v1)
+61: vpcmpestri Vdq,Wdq,Ib (66),(v1)
+62: vpcmpistrm Vdq,Wdq,Ib (66),(v1)
+63: vpcmpistri Vdq,Wdq,Ib (66),(v1)
+df: VAESKEYGEN Vdq,Wdq,Ib (66),(v1)
+f0: RORX Gy,Ey,Ib (F2),(v)
EndTable
GrpTable: Grp1
@@ -790,7 +843,7 @@ GrpTable: Grp5
2: CALLN Ev (f64)
3: CALLF Ep
4: JMPN Ev (f64)
-5: JMPF Ep
+5: JMPF Mp
6: PUSH Ev (d64)
7:
EndTable
@@ -807,7 +860,7 @@ EndTable
GrpTable: Grp7
0: SGDT Ms | VMCALL (001),(11B) | VMLAUNCH (010),(11B) | VMRESUME (011),(11B) | VMXOFF (100),(11B)
1: SIDT Ms | MONITOR (000),(11B) | MWAIT (001)
-2: LGDT Ms | XGETBV (000),(11B) | XSETBV (001),(11B)
+2: LGDT Ms | XGETBV (000),(11B) | XSETBV (001),(11B) | VMFUNC (100),(11B)
3: LIDT Ms
4: SMSW Mw/Rv
5:
@@ -824,44 +877,45 @@ EndTable
GrpTable: Grp9
1: CMPXCHG8B/16B Mq/Mdq
-6: VMPTRLD Mq | VMCLEAR Mq (66) | VMXON Mq (F3)
-7: VMPTRST Mq
+6: VMPTRLD Mq | VMCLEAR Mq (66) | VMXON Mq (F3) | RDRAND Rv (11B)
+7: VMPTRST Mq | VMPTRST Mq (F3)
EndTable
GrpTable: Grp10
EndTable
GrpTable: Grp11
+# Note: the operands are given by group opcode
0: MOV
EndTable
GrpTable: Grp12
-2: psrlw Nq,Ib (11B) | psrlw Udq,Ib (66),(11B),(VEX),(o128)
-4: psraw Nq,Ib (11B) | psraw Udq,Ib (66),(11B),(VEX),(o128)
-6: psllw Nq,Ib (11B) | psllw Udq,Ib (66),(11B),(VEX),(o128)
+2: psrlw Nq,Ib (11B) | vpsrlw Hx,Ux,Ib (66),(11B),(v1)
+4: psraw Nq,Ib (11B) | vpsraw Hx,Ux,Ib (66),(11B),(v1)
+6: psllw Nq,Ib (11B) | vpsllw Hx,Ux,Ib (66),(11B),(v1)
EndTable
GrpTable: Grp13
-2: psrld Nq,Ib (11B) | psrld Udq,Ib (66),(11B),(VEX),(o128)
-4: psrad Nq,Ib (11B) | psrad Udq,Ib (66),(11B),(VEX),(o128)
-6: pslld Nq,Ib (11B) | pslld Udq,Ib (66),(11B),(VEX),(o128)
+2: psrld Nq,Ib (11B) | vpsrld Hx,Ux,Ib (66),(11B),(v1)
+4: psrad Nq,Ib (11B) | vpsrad Hx,Ux,Ib (66),(11B),(v1)
+6: pslld Nq,Ib (11B) | vpslld Hx,Ux,Ib (66),(11B),(v1)
EndTable
GrpTable: Grp14
-2: psrlq Nq,Ib (11B) | psrlq Udq,Ib (66),(11B),(VEX),(o128)
-3: psrldq Udq,Ib (66),(11B),(VEX),(o128)
-6: psllq Nq,Ib (11B) | psllq Udq,Ib (66),(11B),(VEX),(o128)
-7: pslldq Udq,Ib (66),(11B),(VEX),(o128)
+2: psrlq Nq,Ib (11B) | vpsrlq Hx,Ux,Ib (66),(11B),(v1)
+3: vpsrldq Hx,Ux,Ib (66),(11B),(v1)
+6: psllq Nq,Ib (11B) | vpsllq Hx,Ux,Ib (66),(11B),(v1)
+7: vpslldq Hx,Ux,Ib (66),(11B),(v1)
EndTable
GrpTable: Grp15
-0: fxsave
-1: fxstor
-2: ldmxcsr (VEX)
-3: stmxcsr (VEX)
+0: fxsave | RDFSBASE Ry (F3),(11B)
+1: fxstor | RDGSBASE Ry (F3),(11B)
+2: vldmxcsr Md (v1) | WRFSBASE Ry (F3),(11B)
+3: vstmxcsr Md (v1) | WRGSBASE Ry (F3),(11B)
4: XSAVE
5: XRSTOR | lfence (11B)
-6: mfence (11B)
+6: XSAVEOPT | mfence (11B)
7: clflush | sfence (11B)
EndTable
@@ -872,6 +926,12 @@ GrpTable: Grp16
3: prefetch T2
EndTable
+GrpTable: Grp17
+1: BLSR By,Ey (v)
+2: BLSMSK By,Ey (v)
+3: BLSI By,Ey (v)
+EndTable
+
# AMD's Prefetch Group
GrpTable: GrpP
0: PREFETCH
diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
index 3d11327c9ab4..23d8e5fecf76 100644
--- a/arch/x86/mm/Makefile
+++ b/arch/x86/mm/Makefile
@@ -27,6 +27,4 @@ obj-$(CONFIG_AMD_NUMA) += amdtopology.o
obj-$(CONFIG_ACPI_NUMA) += srat.o
obj-$(CONFIG_NUMA_EMU) += numa_emulation.o
-obj-$(CONFIG_HAVE_MEMBLOCK) += memblock.o
-
obj-$(CONFIG_MEMTEST) += memtest.o
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 87488b93a65c..a298914058f9 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -67,7 +67,7 @@ static void __init find_early_table_space(unsigned long end, int use_pse,
good_end = max_pfn_mapped << PAGE_SHIFT;
base = memblock_find_in_range(start, good_end, tables, PAGE_SIZE);
- if (base == MEMBLOCK_ERROR)
+ if (!base)
panic("Cannot find space for the kernel page tables");
pgt_buf_start = base >> PAGE_SHIFT;
@@ -80,7 +80,7 @@ static void __init find_early_table_space(unsigned long end, int use_pse,
void __init native_pagetable_reserve(u64 start, u64 end)
{
- memblock_x86_reserve_range(start, end, "PGTABLE");
+ memblock_reserve(start, end - start);
}
struct map_range {
@@ -279,8 +279,8 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
* pgt_buf_end) and free the other ones (pgt_buf_end - pgt_buf_top)
* so that they can be reused for other purposes.
*
- * On native it just means calling memblock_x86_reserve_range, on Xen it
- * also means marking RW the pagetable pages that we allocated before
+ * On native it just means calling memblock_reserve, on Xen it also
+ * means marking RW the pagetable pages that we allocated before
* but that haven't been used.
*
* In fact on xen we mark RO the whole range pgt_buf_start -
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 29f7c6d98179..0c1da394a634 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -427,23 +427,17 @@ static void __init add_one_highpage_init(struct page *page)
void __init add_highpages_with_active_regions(int nid,
unsigned long start_pfn, unsigned long end_pfn)
{
- struct range *range;
- int nr_range;
- int i;
-
- nr_range = __get_free_all_memory_range(&range, nid, start_pfn, end_pfn);
-
- for (i = 0; i < nr_range; i++) {
- struct page *page;
- int node_pfn;
-
- for (node_pfn = range[i].start; node_pfn < range[i].end;
- node_pfn++) {
- if (!pfn_valid(node_pfn))
- continue;
- page = pfn_to_page(node_pfn);
- add_one_highpage_init(page);
- }
+ phys_addr_t start, end;
+ u64 i;
+
+ for_each_free_mem_range(i, nid, &start, &end, NULL) {
+ unsigned long pfn = clamp_t(unsigned long, PFN_UP(start),
+ start_pfn, end_pfn);
+ unsigned long e_pfn = clamp_t(unsigned long, PFN_DOWN(end),
+ start_pfn, end_pfn);
+ for ( ; pfn < e_pfn; pfn++)
+ if (pfn_valid(pfn))
+ add_one_highpage_init(pfn_to_page(pfn));
}
}
#else
@@ -650,18 +644,18 @@ void __init initmem_init(void)
highstart_pfn = highend_pfn = max_pfn;
if (max_pfn > max_low_pfn)
highstart_pfn = max_low_pfn;
- memblock_x86_register_active_regions(0, 0, highend_pfn);
- sparse_memory_present_with_active_regions(0);
printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
pages_to_mb(highend_pfn - highstart_pfn));
num_physpages = highend_pfn;
high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1;
#else
- memblock_x86_register_active_regions(0, 0, max_low_pfn);
- sparse_memory_present_with_active_regions(0);
num_physpages = max_low_pfn;
high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1;
#endif
+
+ memblock_set_node(0, (phys_addr_t)ULLONG_MAX, 0);
+ sparse_memory_present_with_active_regions(0);
+
#ifdef CONFIG_FLATMEM
max_mapnr = num_physpages;
#endif
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index bbaaa005bf0e..a8a56ce3a962 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -608,7 +608,7 @@ kernel_physical_mapping_init(unsigned long start,
#ifndef CONFIG_NUMA
void __init initmem_init(void)
{
- memblock_x86_register_active_regions(0, 0, max_pfn);
+ memblock_set_node(0, (phys_addr_t)ULLONG_MAX, 0);
}
#endif
diff --git a/arch/x86/mm/memblock.c b/arch/x86/mm/memblock.c
deleted file mode 100644
index 992da5ec5a64..000000000000
--- a/arch/x86/mm/memblock.c
+++ /dev/null
@@ -1,348 +0,0 @@
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/init.h>
-#include <linux/bitops.h>
-#include <linux/memblock.h>
-#include <linux/bootmem.h>
-#include <linux/mm.h>
-#include <linux/range.h>
-
-/* Check for already reserved areas */
-bool __init memblock_x86_check_reserved_size(u64 *addrp, u64 *sizep, u64 align)
-{
- struct memblock_region *r;
- u64 addr = *addrp, last;
- u64 size = *sizep;
- bool changed = false;
-
-again:
- last = addr + size;
- for_each_memblock(reserved, r) {
- if (last > r->base && addr < r->base) {
- size = r->base - addr;
- changed = true;
- goto again;
- }
- if (last > (r->base + r->size) && addr < (r->base + r->size)) {
- addr = round_up(r->base + r->size, align);
- size = last - addr;
- changed = true;
- goto again;
- }
- if (last <= (r->base + r->size) && addr >= r->base) {
- *sizep = 0;
- return false;
- }
- }
- if (changed) {
- *addrp = addr;
- *sizep = size;
- }
- return changed;
-}
-
-/*
- * Find next free range after start, and size is returned in *sizep
- */
-u64 __init memblock_x86_find_in_range_size(u64 start, u64 *sizep, u64 align)
-{
- struct memblock_region *r;
-
- for_each_memblock(memory, r) {
- u64 ei_start = r->base;
- u64 ei_last = ei_start + r->size;
- u64 addr;
-
- addr = round_up(ei_start, align);
- if (addr < start)
- addr = round_up(start, align);
- if (addr >= ei_last)
- continue;
- *sizep = ei_last - addr;
- while (memblock_x86_check_reserved_size(&addr, sizep, align))
- ;
-
- if (*sizep)
- return addr;
- }
-
- return MEMBLOCK_ERROR;
-}
-
-static __init struct range *find_range_array(int count)
-{
- u64 end, size, mem;
- struct range *range;
-
- size = sizeof(struct range) * count;
- end = memblock.current_limit;
-
- mem = memblock_find_in_range(0, end, size, sizeof(struct range));
- if (mem == MEMBLOCK_ERROR)
- panic("can not find more space for range array");
-
- /*
- * This range is tempoaray, so don't reserve it, it will not be
- * overlapped because We will not alloccate new buffer before
- * We discard this one
- */
- range = __va(mem);
- memset(range, 0, size);
-
- return range;
-}
-
-static void __init memblock_x86_subtract_reserved(struct range *range, int az)
-{
- u64 final_start, final_end;
- struct memblock_region *r;
-
- /* Take out region array itself at first*/
- memblock_free_reserved_regions();
-
- memblock_dbg("Subtract (%ld early reservations)\n", memblock.reserved.cnt);
-
- for_each_memblock(reserved, r) {
- memblock_dbg(" [%010llx-%010llx]\n", (u64)r->base, (u64)r->base + r->size - 1);
- final_start = PFN_DOWN(r->base);
- final_end = PFN_UP(r->base + r->size);
- if (final_start >= final_end)
- continue;
- subtract_range(range, az, final_start, final_end);
- }
-
- /* Put region array back ? */
- memblock_reserve_reserved_regions();
-}
-
-struct count_data {
- int nr;
-};
-
-static int __init count_work_fn(unsigned long start_pfn,
- unsigned long end_pfn, void *datax)
-{
- struct count_data *data = datax;
-
- data->nr++;
-
- return 0;
-}
-
-static int __init count_early_node_map(int nodeid)
-{
- struct count_data data;
-
- data.nr = 0;
- work_with_active_regions(nodeid, count_work_fn, &data);
-
- return data.nr;
-}
-
-int __init __get_free_all_memory_range(struct range **rangep, int nodeid,
- unsigned long start_pfn, unsigned long end_pfn)
-{
- int count;
- struct range *range;
- int nr_range;
-
- count = (memblock.reserved.cnt + count_early_node_map(nodeid)) * 2;
-
- range = find_range_array(count);
- nr_range = 0;
-
- /*
- * Use early_node_map[] and memblock.reserved.region to get range array
- * at first
- */
- nr_range = add_from_early_node_map(range, count, nr_range, nodeid);
- subtract_range(range, count, 0, start_pfn);
- subtract_range(range, count, end_pfn, -1ULL);
-
- memblock_x86_subtract_reserved(range, count);
- nr_range = clean_sort_range(range, count);
-
- *rangep = range;
- return nr_range;
-}
-
-int __init get_free_all_memory_range(struct range **rangep, int nodeid)
-{
- unsigned long end_pfn = -1UL;
-
-#ifdef CONFIG_X86_32
- end_pfn = max_low_pfn;
-#endif
- return __get_free_all_memory_range(rangep, nodeid, 0, end_pfn);
-}
-
-static u64 __init __memblock_x86_memory_in_range(u64 addr, u64 limit, bool get_free)
-{
- int i, count;
- struct range *range;
- int nr_range;
- u64 final_start, final_end;
- u64 free_size;
- struct memblock_region *r;
-
- count = (memblock.reserved.cnt + memblock.memory.cnt) * 2;
-
- range = find_range_array(count);
- nr_range = 0;
-
- addr = PFN_UP(addr);
- limit = PFN_DOWN(limit);
-
- for_each_memblock(memory, r) {
- final_start = PFN_UP(r->base);
- final_end = PFN_DOWN(r->base + r->size);
- if (final_start >= final_end)
- continue;
- if (final_start >= limit || final_end <= addr)
- continue;
-
- nr_range = add_range(range, count, nr_range, final_start, final_end);
- }
- subtract_range(range, count, 0, addr);
- subtract_range(range, count, limit, -1ULL);
-
- /* Subtract memblock.reserved.region in range ? */
- if (!get_free)
- goto sort_and_count_them;
- for_each_memblock(reserved, r) {
- final_start = PFN_DOWN(r->base);
- final_end = PFN_UP(r->base + r->size);
- if (final_start >= final_end)
- continue;
- if (final_start >= limit || final_end <= addr)
- continue;
-
- subtract_range(range, count, final_start, final_end);
- }
-
-sort_and_count_them:
- nr_range = clean_sort_range(range, count);
-
- free_size = 0;
- for (i = 0; i < nr_range; i++)
- free_size += range[i].end - range[i].start;
-
- return free_size << PAGE_SHIFT;
-}
-
-u64 __init memblock_x86_free_memory_in_range(u64 addr, u64 limit)
-{
- return __memblock_x86_memory_in_range(addr, limit, true);
-}
-
-u64 __init memblock_x86_memory_in_range(u64 addr, u64 limit)
-{
- return __memblock_x86_memory_in_range(addr, limit, false);
-}
-
-void __init memblock_x86_reserve_range(u64 start, u64 end, char *name)
-{
- if (start == end)
- return;
-
- if (WARN_ONCE(start > end, "memblock_x86_reserve_range: wrong range [%#llx, %#llx)\n", start, end))
- return;
-
- memblock_dbg(" memblock_x86_reserve_range: [%#010llx-%#010llx] %16s\n", start, end - 1, name);
-
- memblock_reserve(start, end - start);
-}
-
-void __init memblock_x86_free_range(u64 start, u64 end)
-{
- if (start == end)
- return;
-
- if (WARN_ONCE(start > end, "memblock_x86_free_range: wrong range [%#llx, %#llx)\n", start, end))
- return;
-
- memblock_dbg(" memblock_x86_free_range: [%#010llx-%#010llx]\n", start, end - 1);
-
- memblock_free(start, end - start);
-}
-
-/*
- * Need to call this function after memblock_x86_register_active_regions,
- * so early_node_map[] is filled already.
- */
-u64 __init memblock_x86_find_in_range_node(int nid, u64 start, u64 end, u64 size, u64 align)
-{
- u64 addr;
- addr = find_memory_core_early(nid, size, align, start, end);
- if (addr != MEMBLOCK_ERROR)
- return addr;
-
- /* Fallback, should already have start end within node range */
- return memblock_find_in_range(start, end, size, align);
-}
-
-/*
- * Finds an active region in the address range from start_pfn to last_pfn and
- * returns its range in ei_startpfn and ei_endpfn for the memblock entry.
- */
-static int __init memblock_x86_find_active_region(const struct memblock_region *ei,
- unsigned long start_pfn,
- unsigned long last_pfn,
- unsigned long *ei_startpfn,
- unsigned long *ei_endpfn)
-{
- u64 align = PAGE_SIZE;
-
- *ei_startpfn = round_up(ei->base, align) >> PAGE_SHIFT;
- *ei_endpfn = round_down(ei->base + ei->size, align) >> PAGE_SHIFT;
-
- /* Skip map entries smaller than a page */
- if (*ei_startpfn >= *ei_endpfn)
- return 0;
-
- /* Skip if map is outside the node */
- if (*ei_endpfn <= start_pfn || *ei_startpfn >= last_pfn)
- return 0;
-
- /* Check for overlaps */
- if (*ei_startpfn < start_pfn)
- *ei_startpfn = start_pfn;
- if (*ei_endpfn > last_pfn)
- *ei_endpfn = last_pfn;
-
- return 1;
-}
-
-/* Walk the memblock.memory map and register active regions within a node */
-void __init memblock_x86_register_active_regions(int nid, unsigned long start_pfn,
- unsigned long last_pfn)
-{
- unsigned long ei_startpfn;
- unsigned long ei_endpfn;
- struct memblock_region *r;
-
- for_each_memblock(memory, r)
- if (memblock_x86_find_active_region(r, start_pfn, last_pfn,
- &ei_startpfn, &ei_endpfn))
- add_active_range(nid, ei_startpfn, ei_endpfn);
-}
-
-/*
- * Find the hole size (in bytes) in the memory range.
- * @start: starting address of the memory range to scan
- * @end: ending address of the memory range to scan
- */
-u64 __init memblock_x86_hole_size(u64 start, u64 end)
-{
- unsigned long start_pfn = start >> PAGE_SHIFT;
- unsigned long last_pfn = end >> PAGE_SHIFT;
- unsigned long ei_startpfn, ei_endpfn, ram = 0;
- struct memblock_region *r;
-
- for_each_memblock(memory, r)
- if (memblock_x86_find_active_region(r, start_pfn, last_pfn,
- &ei_startpfn, &ei_endpfn))
- ram += ei_endpfn - ei_startpfn;
-
- return end - start - ((u64)ram << PAGE_SHIFT);
-}
diff --git a/arch/x86/mm/memtest.c b/arch/x86/mm/memtest.c
index 92faf3a1c53e..c80b9fb95734 100644
--- a/arch/x86/mm/memtest.c
+++ b/arch/x86/mm/memtest.c
@@ -34,7 +34,7 @@ static void __init reserve_bad_mem(u64 pattern, u64 start_bad, u64 end_bad)
(unsigned long long) pattern,
(unsigned long long) start_bad,
(unsigned long long) end_bad);
- memblock_x86_reserve_range(start_bad, end_bad, "BAD RAM");
+ memblock_reserve(start_bad, end_bad - start_bad);
}
static void __init memtest(u64 pattern, u64 start_phys, u64 size)
@@ -70,24 +70,19 @@ static void __init memtest(u64 pattern, u64 start_phys, u64 size)
static void __init do_one_pass(u64 pattern, u64 start, u64 end)
{
- u64 size = 0;
-
- while (start < end) {
- start = memblock_x86_find_in_range_size(start, &size, 1);
-
- /* done ? */
- if (start >= end)
- break;
- if (start + size > end)
- size = end - start;
-
- printk(KERN_INFO " %010llx - %010llx pattern %016llx\n",
- (unsigned long long) start,
- (unsigned long long) start + size,
- (unsigned long long) cpu_to_be64(pattern));
- memtest(pattern, start, size);
-
- start += size;
+ u64 i;
+ phys_addr_t this_start, this_end;
+
+ for_each_free_mem_range(i, MAX_NUMNODES, &this_start, &this_end, NULL) {
+ this_start = clamp_t(phys_addr_t, this_start, start, end);
+ this_end = clamp_t(phys_addr_t, this_end, start, end);
+ if (this_start < this_end) {
+ printk(KERN_INFO " %010llx - %010llx pattern %016llx\n",
+ (unsigned long long)this_start,
+ (unsigned long long)this_end,
+ (unsigned long long)cpu_to_be64(pattern));
+ memtest(pattern, this_start, this_end - this_start);
+ }
}
}
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
index fbeaaf416610..496f494593bf 100644
--- a/arch/x86/mm/numa.c
+++ b/arch/x86/mm/numa.c
@@ -192,8 +192,6 @@ int __init numa_add_memblk(int nid, u64 start, u64 end)
/* Initialize NODE_DATA for a node on the local memory */
static void __init setup_node_data(int nid, u64 start, u64 end)
{
- const u64 nd_low = PFN_PHYS(MAX_DMA_PFN);
- const u64 nd_high = PFN_PHYS(max_pfn_mapped);
const size_t nd_size = roundup(sizeof(pg_data_t), PAGE_SIZE);
bool remapped = false;
u64 nd_pa;
@@ -224,17 +222,12 @@ static void __init setup_node_data(int nid, u64 start, u64 end)
nd_pa = __pa(nd);
remapped = true;
} else {
- nd_pa = memblock_x86_find_in_range_node(nid, nd_low, nd_high,
- nd_size, SMP_CACHE_BYTES);
- if (nd_pa == MEMBLOCK_ERROR)
- nd_pa = memblock_find_in_range(nd_low, nd_high,
- nd_size, SMP_CACHE_BYTES);
- if (nd_pa == MEMBLOCK_ERROR) {
+ nd_pa = memblock_alloc_nid(nd_size, SMP_CACHE_BYTES, nid);
+ if (!nd_pa) {
pr_err("Cannot find %zu bytes in node %d\n",
nd_size, nid);
return;
}
- memblock_x86_reserve_range(nd_pa, nd_pa + nd_size, "NODE_DATA");
nd = __va(nd_pa);
}
@@ -371,8 +364,7 @@ void __init numa_reset_distance(void)
/* numa_distance could be 1LU marking allocation failure, test cnt */
if (numa_distance_cnt)
- memblock_x86_free_range(__pa(numa_distance),
- __pa(numa_distance) + size);
+ memblock_free(__pa(numa_distance), size);
numa_distance_cnt = 0;
numa_distance = NULL; /* enable table creation */
}
@@ -395,13 +387,13 @@ static int __init numa_alloc_distance(void)
phys = memblock_find_in_range(0, PFN_PHYS(max_pfn_mapped),
size, PAGE_SIZE);
- if (phys == MEMBLOCK_ERROR) {
+ if (!phys) {
pr_warning("NUMA: Warning: can't allocate distance table!\n");
/* don't retry until explicitly reset */
numa_distance = (void *)1LU;
return -ENOMEM;
}
- memblock_x86_reserve_range(phys, phys + size, "NUMA DIST");
+ memblock_reserve(phys, size);
numa_distance = __va(phys);
numa_distance_cnt = cnt;
@@ -482,8 +474,8 @@ static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi)
numaram = 0;
}
- e820ram = max_pfn - (memblock_x86_hole_size(0,
- PFN_PHYS(max_pfn)) >> PAGE_SHIFT);
+ e820ram = max_pfn - absent_pages_in_range(0, max_pfn);
+
/* We seem to lose 3 pages somewhere. Allow 1M of slack. */
if ((s64)(e820ram - numaram) >= (1 << (20 - PAGE_SHIFT))) {
printk(KERN_ERR "NUMA: nodes only cover %LuMB of your %LuMB e820 RAM. Not used.\n",
@@ -505,13 +497,10 @@ static int __init numa_register_memblks(struct numa_meminfo *mi)
if (WARN_ON(nodes_empty(node_possible_map)))
return -EINVAL;
- for (i = 0; i < mi->nr_blks; i++)
- memblock_x86_register_active_regions(mi->blk[i].nid,
- mi->blk[i].start >> PAGE_SHIFT,
- mi->blk[i].end >> PAGE_SHIFT);
-
- /* for out of order entries */
- sort_node_map();
+ for (i = 0; i < mi->nr_blks; i++) {
+ struct numa_memblk *mb = &mi->blk[i];
+ memblock_set_node(mb->start, mb->end - mb->start, mb->nid);
+ }
/*
* If sections array is gonna be used for pfn -> nid mapping, check
@@ -545,6 +534,8 @@ static int __init numa_register_memblks(struct numa_meminfo *mi)
setup_node_data(nid, start, end);
}
+ /* Dump memblock with node info and return. */
+ memblock_dump_all();
return 0;
}
@@ -582,7 +573,7 @@ static int __init numa_init(int (*init_func)(void))
nodes_clear(node_possible_map);
nodes_clear(node_online_map);
memset(&numa_meminfo, 0, sizeof(numa_meminfo));
- remove_all_active_ranges();
+ WARN_ON(memblock_set_node(0, ULLONG_MAX, MAX_NUMNODES));
numa_reset_distance();
ret = init_func();
diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c
index 3adebe7e536a..534255a36b6b 100644
--- a/arch/x86/mm/numa_32.c
+++ b/arch/x86/mm/numa_32.c
@@ -199,23 +199,23 @@ void __init init_alloc_remap(int nid, u64 start, u64 end)
/* allocate node memory and the lowmem remap area */
node_pa = memblock_find_in_range(start, end, size, LARGE_PAGE_BYTES);
- if (node_pa == MEMBLOCK_ERROR) {
+ if (!node_pa) {
pr_warning("remap_alloc: failed to allocate %lu bytes for node %d\n",
size, nid);
return;
}
- memblock_x86_reserve_range(node_pa, node_pa + size, "KVA RAM");
+ memblock_reserve(node_pa, size);
remap_pa = memblock_find_in_range(min_low_pfn << PAGE_SHIFT,
max_low_pfn << PAGE_SHIFT,
size, LARGE_PAGE_BYTES);
- if (remap_pa == MEMBLOCK_ERROR) {
+ if (!remap_pa) {
pr_warning("remap_alloc: failed to allocate %lu bytes remap area for node %d\n",
size, nid);
- memblock_x86_free_range(node_pa, node_pa + size);
+ memblock_free(node_pa, size);
return;
}
- memblock_x86_reserve_range(remap_pa, remap_pa + size, "KVA PG");
+ memblock_reserve(remap_pa, size);
remap_va = phys_to_virt(remap_pa);
/* perform actual remap */
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c
index dd27f401f0a0..92e27119ee1a 100644
--- a/arch/x86/mm/numa_64.c
+++ b/arch/x86/mm/numa_64.c
@@ -19,7 +19,7 @@ unsigned long __init numa_free_all_bootmem(void)
for_each_online_node(i)
pages += free_all_bootmem_node(NODE_DATA(i));
- pages += free_all_memory_core_early(MAX_NUMNODES);
+ pages += free_low_memory_core_early(MAX_NUMNODES);
return pages;
}
diff --git a/arch/x86/mm/numa_emulation.c b/arch/x86/mm/numa_emulation.c
index d0ed086b6247..46db56845f18 100644
--- a/arch/x86/mm/numa_emulation.c
+++ b/arch/x86/mm/numa_emulation.c
@@ -28,6 +28,16 @@ static int __init emu_find_memblk_by_nid(int nid, const struct numa_meminfo *mi)
return -ENOENT;
}
+static u64 mem_hole_size(u64 start, u64 end)
+{
+ unsigned long start_pfn = PFN_UP(start);
+ unsigned long end_pfn = PFN_DOWN(end);
+
+ if (start_pfn < end_pfn)
+ return PFN_PHYS(absent_pages_in_range(start_pfn, end_pfn));
+ return 0;
+}
+
/*
* Sets up nid to range from @start to @end. The return value is -errno if
* something went wrong, 0 otherwise.
@@ -89,7 +99,7 @@ static int __init split_nodes_interleave(struct numa_meminfo *ei,
* Calculate target node size. x86_32 freaks on __udivdi3() so do
* the division in ulong number of pages and convert back.
*/
- size = max_addr - addr - memblock_x86_hole_size(addr, max_addr);
+ size = max_addr - addr - mem_hole_size(addr, max_addr);
size = PFN_PHYS((unsigned long)(size >> PAGE_SHIFT) / nr_nodes);
/*
@@ -135,8 +145,7 @@ static int __init split_nodes_interleave(struct numa_meminfo *ei,
* Continue to add memory to this fake node if its
* non-reserved memory is less than the per-node size.
*/
- while (end - start -
- memblock_x86_hole_size(start, end) < size) {
+ while (end - start - mem_hole_size(start, end) < size) {
end += FAKE_NODE_MIN_SIZE;
if (end > limit) {
end = limit;
@@ -150,7 +159,7 @@ static int __init split_nodes_interleave(struct numa_meminfo *ei,
* this one must extend to the boundary.
*/
if (end < dma32_end && dma32_end - end -
- memblock_x86_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE)
+ mem_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE)
end = dma32_end;
/*
@@ -158,8 +167,7 @@ static int __init split_nodes_interleave(struct numa_meminfo *ei,
* next node, this one must extend to the end of the
* physical node.
*/
- if (limit - end -
- memblock_x86_hole_size(end, limit) < size)
+ if (limit - end - mem_hole_size(end, limit) < size)
end = limit;
ret = emu_setup_memblk(ei, pi, nid++ % nr_nodes,
@@ -180,7 +188,7 @@ static u64 __init find_end_of_node(u64 start, u64 max_addr, u64 size)
{
u64 end = start + size;
- while (end - start - memblock_x86_hole_size(start, end) < size) {
+ while (end - start - mem_hole_size(start, end) < size) {
end += FAKE_NODE_MIN_SIZE;
if (end > max_addr) {
end = max_addr;
@@ -211,8 +219,7 @@ static int __init split_nodes_size_interleave(struct numa_meminfo *ei,
* creates a uniform distribution of node sizes across the entire
* machine (but not necessarily over physical nodes).
*/
- min_size = (max_addr - addr - memblock_x86_hole_size(addr, max_addr)) /
- MAX_NUMNODES;
+ min_size = (max_addr - addr - mem_hole_size(addr, max_addr)) / MAX_NUMNODES;
min_size = max(min_size, FAKE_NODE_MIN_SIZE);
if ((min_size & FAKE_NODE_MIN_HASH_MASK) < min_size)
min_size = (min_size + FAKE_NODE_MIN_SIZE) &
@@ -252,7 +259,7 @@ static int __init split_nodes_size_interleave(struct numa_meminfo *ei,
* this one must extend to the boundary.
*/
if (end < dma32_end && dma32_end - end -
- memblock_x86_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE)
+ mem_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE)
end = dma32_end;
/*
@@ -260,8 +267,7 @@ static int __init split_nodes_size_interleave(struct numa_meminfo *ei,
* next node, this one must extend to the end of the
* physical node.
*/
- if (limit - end -
- memblock_x86_hole_size(end, limit) < size)
+ if (limit - end - mem_hole_size(end, limit) < size)
end = limit;
ret = emu_setup_memblk(ei, pi, nid++ % MAX_NUMNODES,
@@ -351,11 +357,11 @@ void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt)
phys = memblock_find_in_range(0, PFN_PHYS(max_pfn_mapped),
phys_size, PAGE_SIZE);
- if (phys == MEMBLOCK_ERROR) {
+ if (!phys) {
pr_warning("NUMA: Warning: can't allocate copy of distance table, disabling emulation\n");
goto no_emu;
}
- memblock_x86_reserve_range(phys, phys + phys_size, "TMP NUMA DIST");
+ memblock_reserve(phys, phys_size);
phys_dist = __va(phys);
for (i = 0; i < numa_dist_cnt; i++)
@@ -424,7 +430,7 @@ void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt)
/* free the copied physical distance table */
if (phys_dist)
- memblock_x86_free_range(__pa(phys_dist), __pa(phys_dist) + phys_size);
+ memblock_free(__pa(phys_dist), phys_size);
return;
no_emu:
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index bfab3fa10edc..7b65f752c5f8 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -568,8 +568,8 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
break;
}
if (filter[i].jt != 0) {
- if (filter[i].jf)
- t_offset += is_near(f_offset) ? 2 : 6;
+ if (filter[i].jf && f_offset)
+ t_offset += is_near(f_offset) ? 2 : 5;
EMIT_COND_JMP(t_op, t_offset);
if (filter[i].jf)
EMIT_JMP(f_offset);
diff --git a/arch/x86/oprofile/Makefile b/arch/x86/oprofile/Makefile
index 446902b2a6b6..1599f568f0e2 100644
--- a/arch/x86/oprofile/Makefile
+++ b/arch/x86/oprofile/Makefile
@@ -4,9 +4,8 @@ DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \
oprof.o cpu_buffer.o buffer_sync.o \
event_buffer.o oprofile_files.o \
oprofilefs.o oprofile_stats.o \
- timer_int.o )
+ timer_int.o nmi_timer_int.o )
oprofile-y := $(DRIVER_OBJS) init.o backtrace.o
oprofile-$(CONFIG_X86_LOCAL_APIC) += nmi_int.o op_model_amd.o \
op_model_ppro.o op_model_p4.o
-oprofile-$(CONFIG_X86_IO_APIC) += nmi_timer_int.o
diff --git a/arch/x86/oprofile/init.c b/arch/x86/oprofile/init.c
index f148cf652678..9e138d00ad36 100644
--- a/arch/x86/oprofile/init.c
+++ b/arch/x86/oprofile/init.c
@@ -16,37 +16,23 @@
* with the NMI mode driver.
*/
+#ifdef CONFIG_X86_LOCAL_APIC
extern int op_nmi_init(struct oprofile_operations *ops);
-extern int op_nmi_timer_init(struct oprofile_operations *ops);
extern void op_nmi_exit(void);
-extern void x86_backtrace(struct pt_regs * const regs, unsigned int depth);
+#else
+static int op_nmi_init(struct oprofile_operations *ops) { return -ENODEV; }
+static void op_nmi_exit(void) { }
+#endif
-static int nmi_timer;
+extern void x86_backtrace(struct pt_regs * const regs, unsigned int depth);
int __init oprofile_arch_init(struct oprofile_operations *ops)
{
- int ret;
-
- ret = -ENODEV;
-
-#ifdef CONFIG_X86_LOCAL_APIC
- ret = op_nmi_init(ops);
-#endif
- nmi_timer = (ret != 0);
-#ifdef CONFIG_X86_IO_APIC
- if (nmi_timer)
- ret = op_nmi_timer_init(ops);
-#endif
ops->backtrace = x86_backtrace;
-
- return ret;
+ return op_nmi_init(ops);
}
-
void oprofile_arch_exit(void)
{
-#ifdef CONFIG_X86_LOCAL_APIC
- if (!nmi_timer)
- op_nmi_exit();
-#endif
+ op_nmi_exit();
}
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
index 75f9528e0372..26b8a8514ee5 100644
--- a/arch/x86/oprofile/nmi_int.c
+++ b/arch/x86/oprofile/nmi_int.c
@@ -595,24 +595,36 @@ static int __init p4_init(char **cpu_type)
return 0;
}
-static int force_arch_perfmon;
-static int force_cpu_type(const char *str, struct kernel_param *kp)
+enum __force_cpu_type {
+ reserved = 0, /* do not force */
+ timer,
+ arch_perfmon,
+};
+
+static int force_cpu_type;
+
+static int set_cpu_type(const char *str, struct kernel_param *kp)
{
- if (!strcmp(str, "arch_perfmon")) {
- force_arch_perfmon = 1;
+ if (!strcmp(str, "timer")) {
+ force_cpu_type = timer;
+ printk(KERN_INFO "oprofile: forcing NMI timer mode\n");
+ } else if (!strcmp(str, "arch_perfmon")) {
+ force_cpu_type = arch_perfmon;
printk(KERN_INFO "oprofile: forcing architectural perfmon\n");
+ } else {
+ force_cpu_type = 0;
}
return 0;
}
-module_param_call(cpu_type, force_cpu_type, NULL, NULL, 0);
+module_param_call(cpu_type, set_cpu_type, NULL, NULL, 0);
static int __init ppro_init(char **cpu_type)
{
__u8 cpu_model = boot_cpu_data.x86_model;
struct op_x86_model_spec *spec = &op_ppro_spec; /* default */
- if (force_arch_perfmon && cpu_has_arch_perfmon)
+ if (force_cpu_type == arch_perfmon && cpu_has_arch_perfmon)
return 0;
/*
@@ -679,6 +691,9 @@ int __init op_nmi_init(struct oprofile_operations *ops)
if (!cpu_has_apic)
return -ENODEV;
+ if (force_cpu_type == timer)
+ return -ENODEV;
+
switch (vendor) {
case X86_VENDOR_AMD:
/* Needs to be at least an Athlon (or hammer in 32bit mode) */
diff --git a/arch/x86/oprofile/nmi_timer_int.c b/arch/x86/oprofile/nmi_timer_int.c
deleted file mode 100644
index 7f8052cd6620..000000000000
--- a/arch/x86/oprofile/nmi_timer_int.c
+++ /dev/null
@@ -1,50 +0,0 @@
-/**
- * @file nmi_timer_int.c
- *
- * @remark Copyright 2003 OProfile authors
- * @remark Read the file COPYING
- *
- * @author Zwane Mwaikambo <zwane@linuxpower.ca>
- */
-
-#include <linux/init.h>
-#include <linux/smp.h>
-#include <linux/errno.h>
-#include <linux/oprofile.h>
-#include <linux/rcupdate.h>
-#include <linux/kdebug.h>
-
-#include <asm/nmi.h>
-#include <asm/apic.h>
-#include <asm/ptrace.h>
-
-static int profile_timer_exceptions_notify(unsigned int val, struct pt_regs *regs)
-{
- oprofile_add_sample(regs, 0);
- return NMI_HANDLED;
-}
-
-static int timer_start(void)
-{
- if (register_nmi_handler(NMI_LOCAL, profile_timer_exceptions_notify,
- 0, "oprofile-timer"))
- return 1;
- return 0;
-}
-
-
-static void timer_stop(void)
-{
- unregister_nmi_handler(NMI_LOCAL, "oprofile-timer");
- synchronize_sched(); /* Allow already-started NMIs to complete. */
-}
-
-
-int __init op_nmi_timer_init(struct oprofile_operations *ops)
-{
- ops->start = timer_start;
- ops->stop = timer_stop;
- ops->cpu_type = "timer";
- printk(KERN_INFO "oprofile: using NMI timer interrupt.\n");
- return 0;
-}
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index 37718f0f053d..4a01967f02e7 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -352,8 +352,7 @@ void __init efi_memblock_x86_reserve_range(void)
boot_params.efi_info.efi_memdesc_size;
memmap.desc_version = boot_params.efi_info.efi_memdesc_version;
memmap.desc_size = boot_params.efi_info.efi_memdesc_size;
- memblock_x86_reserve_range(pmap, pmap + memmap.nr_map * memmap.desc_size,
- "EFI memmap");
+ memblock_reserve(pmap, memmap.nr_map * memmap.desc_size);
}
#if EFI_DEBUG
@@ -397,16 +396,14 @@ void __init efi_reserve_boot_services(void)
if ((start+size >= virt_to_phys(_text)
&& start <= virt_to_phys(_end)) ||
!e820_all_mapped(start, start+size, E820_RAM) ||
- memblock_x86_check_reserved_size(&start, &size,
- 1<<EFI_PAGE_SHIFT)) {
+ memblock_is_region_reserved(start, size)) {
/* Could not reserve, skip it */
md->num_pages = 0;
memblock_dbg(PFX "Could not reserve boot range "
"[0x%010llx-0x%010llx]\n",
start, start+size-1);
} else
- memblock_x86_reserve_range(start, start+size,
- "EFI Boot");
+ memblock_reserve(start, size);
}
}
diff --git a/arch/x86/tools/Makefile b/arch/x86/tools/Makefile
index f82082677337..d511aa97533a 100644
--- a/arch/x86/tools/Makefile
+++ b/arch/x86/tools/Makefile
@@ -18,14 +18,21 @@ chkobjdump = $(srctree)/arch/x86/tools/chkobjdump.awk
quiet_cmd_posttest = TEST $@
cmd_posttest = ($(OBJDUMP) -v | $(AWK) -f $(chkobjdump)) || $(OBJDUMP) -d -j .text $(objtree)/vmlinux | $(AWK) -f $(distill_awk) | $(obj)/test_get_len $(posttest_64bit) $(posttest_verbose)
-posttest: $(obj)/test_get_len vmlinux
+quiet_cmd_sanitytest = TEST $@
+ cmd_sanitytest = $(obj)/insn_sanity $(posttest_64bit) -m 1000000
+
+posttest: $(obj)/test_get_len vmlinux $(obj)/insn_sanity
$(call cmd,posttest)
+ $(call cmd,sanitytest)
-hostprogs-y := test_get_len
+hostprogs-y += test_get_len insn_sanity
# -I needed for generated C source and C source which in the kernel tree.
HOSTCFLAGS_test_get_len.o := -Wall -I$(objtree)/arch/x86/lib/ -I$(srctree)/arch/x86/include/ -I$(srctree)/arch/x86/lib/ -I$(srctree)/include/
+HOSTCFLAGS_insn_sanity.o := -Wall -I$(objtree)/arch/x86/lib/ -I$(srctree)/arch/x86/include/ -I$(srctree)/arch/x86/lib/ -I$(srctree)/include/
+
# Dependencies are also needed.
$(obj)/test_get_len.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h $(srctree)/arch/x86/include/asm/inat.h $(srctree)/arch/x86/include/asm/insn.h $(objtree)/arch/x86/lib/inat-tables.c
+$(obj)/insn_sanity.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h $(srctree)/arch/x86/include/asm/inat.h $(srctree)/arch/x86/include/asm/insn.h $(objtree)/arch/x86/lib/inat-tables.c
diff --git a/arch/x86/tools/gen-insn-attr-x86.awk b/arch/x86/tools/gen-insn-attr-x86.awk
index eaf11f52fc0b..5f6a5b6c3a15 100644
--- a/arch/x86/tools/gen-insn-attr-x86.awk
+++ b/arch/x86/tools/gen-insn-attr-x86.awk
@@ -47,7 +47,7 @@ BEGIN {
sep_expr = "^\\|$"
group_expr = "^Grp[0-9A-Za-z]+"
- imm_expr = "^[IJAO][a-z]"
+ imm_expr = "^[IJAOL][a-z]"
imm_flag["Ib"] = "INAT_MAKE_IMM(INAT_IMM_BYTE)"
imm_flag["Jb"] = "INAT_MAKE_IMM(INAT_IMM_BYTE)"
imm_flag["Iw"] = "INAT_MAKE_IMM(INAT_IMM_WORD)"
@@ -59,6 +59,7 @@ BEGIN {
imm_flag["Iv"] = "INAT_MAKE_IMM(INAT_IMM_VWORD)"
imm_flag["Ob"] = "INAT_MOFFSET"
imm_flag["Ov"] = "INAT_MOFFSET"
+ imm_flag["Lx"] = "INAT_MAKE_IMM(INAT_IMM_BYTE)"
modrm_expr = "^([CDEGMNPQRSUVW/][a-z]+|NTA|T[012])"
force64_expr = "\\([df]64\\)"
@@ -70,8 +71,12 @@ BEGIN {
lprefix3_expr = "\\(F2\\)"
max_lprefix = 4
- vexok_expr = "\\(VEX\\)"
- vexonly_expr = "\\(oVEX\\)"
+ # All opcodes starting with lower-case 'v' or with (v1) superscript
+ # accepts VEX prefix
+ vexok_opcode_expr = "^v.*"
+ vexok_expr = "\\(v1\\)"
+ # All opcodes with (v) superscript supports *only* VEX prefix
+ vexonly_expr = "\\(v\\)"
prefix_expr = "\\(Prefix\\)"
prefix_num["Operand-Size"] = "INAT_PFX_OPNDSZ"
@@ -85,8 +90,8 @@ BEGIN {
prefix_num["SEG=GS"] = "INAT_PFX_GS"
prefix_num["SEG=SS"] = "INAT_PFX_SS"
prefix_num["Address-Size"] = "INAT_PFX_ADDRSZ"
- prefix_num["2bytes-VEX"] = "INAT_PFX_VEX2"
- prefix_num["3bytes-VEX"] = "INAT_PFX_VEX3"
+ prefix_num["VEX+1byte"] = "INAT_PFX_VEX2"
+ prefix_num["VEX+2byte"] = "INAT_PFX_VEX3"
clear_vars()
}
@@ -310,12 +315,10 @@ function convert_operands(count,opnd, i,j,imm,mod)
if (match(opcode, fpu_expr))
flags = add_flags(flags, "INAT_MODRM")
- # check VEX only code
+ # check VEX codes
if (match(ext, vexonly_expr))
flags = add_flags(flags, "INAT_VEXOK | INAT_VEXONLY")
-
- # check VEX only code
- if (match(ext, vexok_expr))
+ else if (match(ext, vexok_expr) || match(opcode, vexok_opcode_expr))
flags = add_flags(flags, "INAT_VEXOK")
# check prefixes
diff --git a/arch/x86/tools/insn_sanity.c b/arch/x86/tools/insn_sanity.c
new file mode 100644
index 000000000000..cc2f8c131286
--- /dev/null
+++ b/arch/x86/tools/insn_sanity.c
@@ -0,0 +1,275 @@
+/*
+ * x86 decoder sanity test - based on test_get_insn.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2009
+ * Copyright (C) Hitachi, Ltd., 2011
+ */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <assert.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+
+#define unlikely(cond) (cond)
+#define ARRAY_SIZE(a) (sizeof(a)/sizeof(a[0]))
+
+#include <asm/insn.h>
+#include <inat.c>
+#include <insn.c>
+
+/*
+ * Test of instruction analysis against tampering.
+ * Feed random binary to instruction decoder and ensure not to
+ * access out-of-instruction-buffer.
+ */
+
+#define DEFAULT_MAX_ITER 10000
+#define INSN_NOP 0x90
+
+static const char *prog; /* Program name */
+static int verbose; /* Verbosity */
+static int x86_64; /* x86-64 bit mode flag */
+static unsigned int seed; /* Random seed */
+static unsigned long iter_start; /* Start of iteration number */
+static unsigned long iter_end = DEFAULT_MAX_ITER; /* End of iteration number */
+static FILE *input_file; /* Input file name */
+
+static void usage(const char *err)
+{
+ if (err)
+ fprintf(stderr, "Error: %s\n\n", err);
+ fprintf(stderr, "Usage: %s [-y|-n|-v] [-s seed[,no]] [-m max] [-i input]\n", prog);
+ fprintf(stderr, "\t-y 64bit mode\n");
+ fprintf(stderr, "\t-n 32bit mode\n");
+ fprintf(stderr, "\t-v Verbosity(-vv dumps any decoded result)\n");
+ fprintf(stderr, "\t-s Give a random seed (and iteration number)\n");
+ fprintf(stderr, "\t-m Give a maximum iteration number\n");
+ fprintf(stderr, "\t-i Give an input file with decoded binary\n");
+ exit(1);
+}
+
+static void dump_field(FILE *fp, const char *name, const char *indent,
+ struct insn_field *field)
+{
+ fprintf(fp, "%s.%s = {\n", indent, name);
+ fprintf(fp, "%s\t.value = %d, bytes[] = {%x, %x, %x, %x},\n",
+ indent, field->value, field->bytes[0], field->bytes[1],
+ field->bytes[2], field->bytes[3]);
+ fprintf(fp, "%s\t.got = %d, .nbytes = %d},\n", indent,
+ field->got, field->nbytes);
+}
+
+static void dump_insn(FILE *fp, struct insn *insn)
+{
+ fprintf(fp, "Instruction = {\n");
+ dump_field(fp, "prefixes", "\t", &insn->prefixes);
+ dump_field(fp, "rex_prefix", "\t", &insn->rex_prefix);
+ dump_field(fp, "vex_prefix", "\t", &insn->vex_prefix);
+ dump_field(fp, "opcode", "\t", &insn->opcode);
+ dump_field(fp, "modrm", "\t", &insn->modrm);
+ dump_field(fp, "sib", "\t", &insn->sib);
+ dump_field(fp, "displacement", "\t", &insn->displacement);
+ dump_field(fp, "immediate1", "\t", &insn->immediate1);
+ dump_field(fp, "immediate2", "\t", &insn->immediate2);
+ fprintf(fp, "\t.attr = %x, .opnd_bytes = %d, .addr_bytes = %d,\n",
+ insn->attr, insn->opnd_bytes, insn->addr_bytes);
+ fprintf(fp, "\t.length = %d, .x86_64 = %d, .kaddr = %p}\n",
+ insn->length, insn->x86_64, insn->kaddr);
+}
+
+static void dump_stream(FILE *fp, const char *msg, unsigned long nr_iter,
+ unsigned char *insn_buf, struct insn *insn)
+{
+ int i;
+
+ fprintf(fp, "%s:\n", msg);
+
+ dump_insn(fp, insn);
+
+ fprintf(fp, "You can reproduce this with below command(s);\n");
+
+ /* Input a decoded instruction sequence directly */
+ fprintf(fp, " $ echo ");
+ for (i = 0; i < MAX_INSN_SIZE; i++)
+ fprintf(fp, " %02x", insn_buf[i]);
+ fprintf(fp, " | %s -i -\n", prog);
+
+ if (!input_file) {
+ fprintf(fp, "Or \n");
+ /* Give a seed and iteration number */
+ fprintf(fp, " $ %s -s 0x%x,%lu\n", prog, seed, nr_iter);
+ }
+}
+
+static void init_random_seed(void)
+{
+ int fd;
+
+ fd = open("/dev/urandom", O_RDONLY);
+ if (fd < 0)
+ goto fail;
+
+ if (read(fd, &seed, sizeof(seed)) != sizeof(seed))
+ goto fail;
+
+ close(fd);
+ return;
+fail:
+ usage("Failed to open /dev/urandom");
+}
+
+/* Read given instruction sequence from the input file */
+static int read_next_insn(unsigned char *insn_buf)
+{
+ char buf[256] = "", *tmp;
+ int i;
+
+ tmp = fgets(buf, ARRAY_SIZE(buf), input_file);
+ if (tmp == NULL || feof(input_file))
+ return 0;
+
+ for (i = 0; i < MAX_INSN_SIZE; i++) {
+ insn_buf[i] = (unsigned char)strtoul(tmp, &tmp, 16);
+ if (*tmp != ' ')
+ break;
+ }
+
+ return i;
+}
+
+static int generate_insn(unsigned char *insn_buf)
+{
+ int i;
+
+ if (input_file)
+ return read_next_insn(insn_buf);
+
+ /* Fills buffer with random binary up to MAX_INSN_SIZE */
+ for (i = 0; i < MAX_INSN_SIZE - 1; i += 2)
+ *(unsigned short *)(&insn_buf[i]) = random() & 0xffff;
+
+ while (i < MAX_INSN_SIZE)
+ insn_buf[i++] = random() & 0xff;
+
+ return i;
+}
+
+static void parse_args(int argc, char **argv)
+{
+ int c;
+ char *tmp = NULL;
+ int set_seed = 0;
+
+ prog = argv[0];
+ while ((c = getopt(argc, argv, "ynvs:m:i:")) != -1) {
+ switch (c) {
+ case 'y':
+ x86_64 = 1;
+ break;
+ case 'n':
+ x86_64 = 0;
+ break;
+ case 'v':
+ verbose++;
+ break;
+ case 'i':
+ if (strcmp("-", optarg) == 0)
+ input_file = stdin;
+ else
+ input_file = fopen(optarg, "r");
+ if (!input_file)
+ usage("Failed to open input file");
+ break;
+ case 's':
+ seed = (unsigned int)strtoul(optarg, &tmp, 0);
+ if (*tmp == ',') {
+ optarg = tmp + 1;
+ iter_start = strtoul(optarg, &tmp, 0);
+ }
+ if (*tmp != '\0' || tmp == optarg)
+ usage("Failed to parse seed");
+ set_seed = 1;
+ break;
+ case 'm':
+ iter_end = strtoul(optarg, &tmp, 0);
+ if (*tmp != '\0' || tmp == optarg)
+ usage("Failed to parse max_iter");
+ break;
+ default:
+ usage(NULL);
+ }
+ }
+
+ /* Check errors */
+ if (iter_end < iter_start)
+ usage("Max iteration number must be bigger than iter-num");
+
+ if (set_seed && input_file)
+ usage("Don't use input file (-i) with random seed (-s)");
+
+ /* Initialize random seed */
+ if (!input_file) {
+ if (!set_seed) /* No seed is given */
+ init_random_seed();
+ srand(seed);
+ }
+}
+
+int main(int argc, char **argv)
+{
+ struct insn insn;
+ int insns = 0;
+ int errors = 0;
+ unsigned long i;
+ unsigned char insn_buf[MAX_INSN_SIZE * 2];
+
+ parse_args(argc, argv);
+
+ /* Prepare stop bytes with NOPs */
+ memset(insn_buf + MAX_INSN_SIZE, INSN_NOP, MAX_INSN_SIZE);
+
+ for (i = 0; i < iter_end; i++) {
+ if (generate_insn(insn_buf) <= 0)
+ break;
+
+ if (i < iter_start) /* Skip to given iteration number */
+ continue;
+
+ /* Decode an instruction */
+ insn_init(&insn, insn_buf, x86_64);
+ insn_get_length(&insn);
+
+ if (insn.next_byte <= insn.kaddr ||
+ insn.kaddr + MAX_INSN_SIZE < insn.next_byte) {
+ /* Access out-of-range memory */
+ dump_stream(stderr, "Error: Found an access violation", i, insn_buf, &insn);
+ errors++;
+ } else if (verbose && !insn_complete(&insn))
+ dump_stream(stdout, "Info: Found an undecodable input", i, insn_buf, &insn);
+ else if (verbose >= 2)
+ dump_insn(stdout, &insn);
+ insns++;
+ }
+
+ fprintf(stdout, "%s: decoded and checked %d %s instructions with %d errors (seed:0x%x)\n", (errors) ? "Failure" : "Success", insns, (input_file) ? "given" : "random", errors, seed);
+
+ return errors ? 1 : 0;
+}
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 1f928659c338..12eb07bfb267 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -1215,8 +1215,6 @@ asmlinkage void __init xen_start_kernel(void)
local_irq_disable();
early_boot_irqs_disabled = true;
- memblock_init();
-
xen_raw_console_write("mapping kernel into physical memory\n");
pgd = xen_setup_kernel_pagetable(pgd, xen_start_info->nr_pages);
xen_ident_map_ISA();
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 87f6673b1207..f4bf8aa574f4 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -1774,10 +1774,8 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
__xen_write_cr3(true, __pa(pgd));
xen_mc_issue(PARAVIRT_LAZY_CPU);
- memblock_x86_reserve_range(__pa(xen_start_info->pt_base),
- __pa(xen_start_info->pt_base +
- xen_start_info->nr_pt_frames * PAGE_SIZE),
- "XEN PAGETABLES");
+ memblock_reserve(__pa(xen_start_info->pt_base),
+ xen_start_info->nr_pt_frames * PAGE_SIZE);
return pgd;
}
@@ -1853,10 +1851,8 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
PFN_DOWN(__pa(initial_page_table)));
xen_write_cr3(__pa(initial_page_table));
- memblock_x86_reserve_range(__pa(xen_start_info->pt_base),
- __pa(xen_start_info->pt_base +
- xen_start_info->nr_pt_frames * PAGE_SIZE),
- "XEN PAGETABLES");
+ memblock_reserve(__pa(xen_start_info->pt_base),
+ xen_start_info->nr_pt_frames * PAGE_SIZE));
return initial_page_table;
}
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index 1093f80c162d..e03c63692176 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -75,7 +75,7 @@ static void __init xen_add_extra_mem(u64 start, u64 size)
if (i == XEN_EXTRA_MEM_MAX_REGIONS)
printk(KERN_WARNING "Warning: not enough extra memory regions\n");
- memblock_x86_reserve_range(start, start + size, "XEN EXTRA");
+ memblock_reserve(start, size);
xen_max_p2m_pfn = PFN_DOWN(start + size);
@@ -173,9 +173,21 @@ static unsigned long __init xen_get_max_pages(void)
domid_t domid = DOMID_SELF;
int ret;
- ret = HYPERVISOR_memory_op(XENMEM_maximum_reservation, &domid);
- if (ret > 0)
- max_pages = ret;
+ /*
+ * For the initial domain we use the maximum reservation as
+ * the maximum page.
+ *
+ * For guest domains the current maximum reservation reflects
+ * the current maximum rather than the static maximum. In this
+ * case the e820 map provided to us will cover the static
+ * maximum region.
+ */
+ if (xen_initial_domain()) {
+ ret = HYPERVISOR_memory_op(XENMEM_maximum_reservation, &domid);
+ if (ret > 0)
+ max_pages = ret;
+ }
+
return min(max_pages, MAX_DOMAIN_PAGES);
}
@@ -299,9 +311,8 @@ char * __init xen_memory_setup(void)
* - xen_start_info
* See comment above "struct start_info" in <xen/interface/xen.h>
*/
- memblock_x86_reserve_range(__pa(xen_start_info->mfn_list),
- __pa(xen_start_info->pt_base),
- "XEN START INFO");
+ memblock_reserve(__pa(xen_start_info->mfn_list),
+ xen_start_info->pt_base - xen_start_info->mfn_list);
sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
diff --git a/block/blk-core.c b/block/blk-core.c
index ea70e6c80cd3..15de223c7f93 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -366,7 +366,14 @@ void blk_drain_queue(struct request_queue *q, bool drain_all)
if (drain_all)
blk_throtl_drain(q);
- __blk_run_queue(q);
+ /*
+ * This function might be called on a queue which failed
+ * driver init after queue creation. Some drivers
+ * (e.g. fd) get unhappy in such cases. Kick queue iff
+ * dispatch queue has something on it.
+ */
+ if (!list_empty(&q->queue_head))
+ __blk_run_queue(q);
if (drain_all)
nr_rqs = q->rq.count[0] + q->rq.count[1];
@@ -467,6 +474,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
q->backing_dev_info.state = 0;
q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY;
q->backing_dev_info.name = "block";
+ q->node = node_id;
err = bdi_init(&q->backing_dev_info);
if (err) {
@@ -551,7 +559,7 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
if (!uninit_q)
return NULL;
- q = blk_init_allocated_queue_node(uninit_q, rfn, lock, node_id);
+ q = blk_init_allocated_queue(uninit_q, rfn, lock);
if (!q)
blk_cleanup_queue(uninit_q);
@@ -563,18 +571,9 @@ struct request_queue *
blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
spinlock_t *lock)
{
- return blk_init_allocated_queue_node(q, rfn, lock, -1);
-}
-EXPORT_SYMBOL(blk_init_allocated_queue);
-
-struct request_queue *
-blk_init_allocated_queue_node(struct request_queue *q, request_fn_proc *rfn,
- spinlock_t *lock, int node_id)
-{
if (!q)
return NULL;
- q->node = node_id;
if (blk_init_free_list(q))
return NULL;
@@ -604,7 +603,7 @@ blk_init_allocated_queue_node(struct request_queue *q, request_fn_proc *rfn,
return NULL;
}
-EXPORT_SYMBOL(blk_init_allocated_queue_node);
+EXPORT_SYMBOL(blk_init_allocated_queue);
int blk_get_queue(struct request_queue *q)
{
diff --git a/block/blk-map.c b/block/blk-map.c
index 164cd0059706..623e1cd4cffe 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -311,7 +311,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
if (IS_ERR(bio))
return PTR_ERR(bio);
- if (rq_data_dir(rq) == WRITE)
+ if (!reading)
bio->bi_rw |= REQ_WRITE;
if (do_copy)
diff --git a/block/blk-tag.c b/block/blk-tag.c
index e74d6d13838f..4af6f5cc1167 100644
--- a/block/blk-tag.c
+++ b/block/blk-tag.c
@@ -282,18 +282,9 @@ EXPORT_SYMBOL(blk_queue_resize_tags);
void blk_queue_end_tag(struct request_queue *q, struct request *rq)
{
struct blk_queue_tag *bqt = q->queue_tags;
- int tag = rq->tag;
+ unsigned tag = rq->tag; /* negative tags invalid */
- BUG_ON(tag == -1);
-
- if (unlikely(tag >= bqt->max_depth)) {
- /*
- * This can happen after tag depth has been reduced.
- * But tag shouldn't be larger than real_max_depth.
- */
- WARN_ON(tag >= bqt->real_max_depth);
- return;
- }
+ BUG_ON(tag >= bqt->real_max_depth);
list_del_init(&rq->queuelist);
rq->cmd_flags &= ~REQ_QUEUED;
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 16ace89613bc..3548705b04e4 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -1655,6 +1655,8 @@ cfq_merged_requests(struct request_queue *q, struct request *rq,
struct request *next)
{
struct cfq_queue *cfqq = RQ_CFQQ(rq);
+ struct cfq_data *cfqd = q->elevator->elevator_data;
+
/*
* reposition in fifo if next is older than rq
*/
@@ -1669,6 +1671,16 @@ cfq_merged_requests(struct request_queue *q, struct request *rq,
cfq_remove_request(next);
cfq_blkiocg_update_io_merged_stats(&(RQ_CFQG(rq))->blkg,
rq_data_dir(next), rq_is_sync(next));
+
+ cfqq = RQ_CFQQ(next);
+ /*
+ * all requests of this queue are merged to other queues, delete it
+ * from the service tree. If it's the active_queue,
+ * cfq_dispatch_requests() will choose to expire it or do idle
+ */
+ if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list) &&
+ cfqq != cfqd->active_queue)
+ cfq_del_cfqq_rr(cfqd, cfqq);
}
static int cfq_allow_merge(struct request_queue *q, struct request *rq,
@@ -3184,7 +3196,7 @@ static int cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc,
}
}
- if (ret)
+ if (ret && ret != -EEXIST)
printk(KERN_ERR "cfq: cic link failed!\n");
return ret;
@@ -3200,6 +3212,7 @@ cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
{
struct io_context *ioc = NULL;
struct cfq_io_context *cic;
+ int ret;
might_sleep_if(gfp_mask & __GFP_WAIT);
@@ -3207,6 +3220,7 @@ cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
if (!ioc)
return NULL;
+retry:
cic = cfq_cic_lookup(cfqd, ioc);
if (cic)
goto out;
@@ -3215,7 +3229,12 @@ cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
if (cic == NULL)
goto err;
- if (cfq_cic_link(cfqd, ioc, cic, gfp_mask))
+ ret = cfq_cic_link(cfqd, ioc, cic, gfp_mask);
+ if (ret == -EEXIST) {
+ /* someone has linked cic to ioc already */
+ cfq_cic_free(cic);
+ goto retry;
+ } else if (ret)
goto err_free;
out:
@@ -4036,6 +4055,11 @@ static void *cfq_init_queue(struct request_queue *q)
if (blkio_alloc_blkg_stats(&cfqg->blkg)) {
kfree(cfqg);
+
+ spin_lock(&cic_index_lock);
+ ida_remove(&cic_index_ida, cfqd->cic_index);
+ spin_unlock(&cic_index_lock);
+
kfree(cfqd);
return NULL;
}
diff --git a/block/ioctl.c b/block/ioctl.c
index ca939fc1030f..d510c2a4eff8 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -180,6 +180,26 @@ int __blkdev_driver_ioctl(struct block_device *bdev, fmode_t mode,
EXPORT_SYMBOL_GPL(__blkdev_driver_ioctl);
/*
+ * Is it an unrecognized ioctl? The correct returns are either
+ * ENOTTY (final) or ENOIOCTLCMD ("I don't know this one, try a
+ * fallback"). ENOIOCTLCMD gets turned into ENOTTY by the ioctl
+ * code before returning.
+ *
+ * Confused drivers sometimes return EINVAL, which is wrong. It
+ * means "I understood the ioctl command, but the parameters to
+ * it were wrong".
+ *
+ * We should aim to just fix the broken drivers, the EINVAL case
+ * should go away.
+ */
+static inline int is_unrecognized_ioctl(int ret)
+{
+ return ret == -EINVAL ||
+ ret == -ENOTTY ||
+ ret == -ENOIOCTLCMD;
+}
+
+/*
* always keep this in sync with compat_blkdev_ioctl()
*/
int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
@@ -196,8 +216,7 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
return -EACCES;
ret = __blkdev_driver_ioctl(bdev, mode, cmd, arg);
- /* -EINVAL to handle old uncorrected drivers */
- if (ret != -EINVAL && ret != -ENOTTY)
+ if (!is_unrecognized_ioctl(ret))
return ret;
fsync_bdev(bdev);
@@ -206,8 +225,7 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
case BLKROSET:
ret = __blkdev_driver_ioctl(bdev, mode, cmd, arg);
- /* -EINVAL to handle old uncorrected drivers */
- if (ret != -EINVAL && ret != -ENOTTY)
+ if (!is_unrecognized_ioctl(ret))
return ret;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 6bdedd7cca2c..cf047c406d92 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -820,7 +820,7 @@ config PATA_PLATFORM
config PATA_OF_PLATFORM
tristate "OpenFirmware platform device PATA support"
- depends on PATA_PLATFORM && OF
+ depends on PATA_PLATFORM && OF && OF_IRQ
help
This option enables support for generic directly connected ATA
devices commonly found on embedded systems with OpenFirmware
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 251acea3d359..3991502b21e5 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -247,6 +247,13 @@ struct sys_device *get_cpu_sysdev(unsigned cpu)
}
EXPORT_SYMBOL_GPL(get_cpu_sysdev);
+bool cpu_is_hotpluggable(unsigned cpu)
+{
+ struct sys_device *dev = get_cpu_sysdev(cpu);
+ return dev && container_of(dev, struct cpu, sysdev)->hotpluggable;
+}
+EXPORT_SYMBOL_GPL(cpu_is_hotpluggable);
+
int __init cpu_dev_init(void)
{
int err;
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 8004ac30a7a8..587cce57adae 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -2601,6 +2601,8 @@ static int fill_cmd(ctlr_info_t *h, CommandList_struct *c, __u8 cmd, void *buff,
c->Request.Timeout = 0;
c->Request.CDB[0] = BMIC_WRITE;
c->Request.CDB[6] = BMIC_CACHE_FLUSH;
+ c->Request.CDB[7] = (size >> 8) & 0xFF;
+ c->Request.CDB[8] = size & 0xFF;
break;
case TEST_UNIT_READY:
c->Request.CDBLen = 6;
@@ -4880,7 +4882,7 @@ static int cciss_request_irq(ctlr_info_t *h,
{
if (h->msix_vector || h->msi_vector) {
if (!request_irq(h->intr[h->intr_mode], msixhandler,
- IRQF_DISABLED, h->devname, h))
+ 0, h->devname, h))
return 0;
dev_err(&h->pdev->dev, "Unable to get msi irq %d"
" for %s\n", h->intr[h->intr_mode],
@@ -4889,7 +4891,7 @@ static int cciss_request_irq(ctlr_info_t *h,
}
if (!request_irq(h->intr[h->intr_mode], intxhandler,
- IRQF_DISABLED, h->devname, h))
+ IRQF_SHARED, h->devname, h))
return 0;
dev_err(&h->pdev->dev, "Unable to get irq %d for %s\n",
h->intr[h->intr_mode], h->devname);
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 68b205a9338f..1e888c9e85b3 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -422,7 +422,7 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
/*
* We use punch hole to reclaim the free space used by the
- * image a.k.a. discard. However we do support discard if
+ * image a.k.a. discard. However we do not support discard if
* encryption is enabled, because it may give an attacker
* useful information.
*/
@@ -797,7 +797,7 @@ static void loop_config_discard(struct loop_device *lo)
}
q->limits.discard_granularity = inode->i_sb->s_blocksize;
- q->limits.discard_alignment = inode->i_sb->s_blocksize;
+ q->limits.discard_alignment = 0;
q->limits.max_discard_sectors = UINT_MAX >> 9;
q->limits.discard_zeroes_data = 1;
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index ae3e167e17ad..89ddab127e33 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -16,6 +16,8 @@
* handle GCR disks
*/
+#undef DEBUG
+
#include <linux/stddef.h>
#include <linux/kernel.h>
#include <linux/sched.h>
@@ -36,13 +38,11 @@
#include <asm/machdep.h>
#include <asm/pmac_feature.h>
-static DEFINE_MUTEX(swim3_mutex);
-static struct request_queue *swim3_queue;
-static struct gendisk *disks[2];
-static struct request *fd_req;
-
#define MAX_FLOPPIES 2
+static DEFINE_MUTEX(swim3_mutex);
+static struct gendisk *disks[MAX_FLOPPIES];
+
enum swim_state {
idle,
locating,
@@ -177,7 +177,6 @@ struct swim3 {
struct floppy_state {
enum swim_state state;
- spinlock_t lock;
struct swim3 __iomem *swim3; /* hardware registers */
struct dbdma_regs __iomem *dma; /* DMA controller registers */
int swim3_intr; /* interrupt number for SWIM3 */
@@ -204,8 +203,20 @@ struct floppy_state {
int wanted;
struct macio_dev *mdev;
char dbdma_cmd_space[5 * sizeof(struct dbdma_cmd)];
+ int index;
+ struct request *cur_req;
};
+#define swim3_err(fmt, arg...) dev_err(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg)
+#define swim3_warn(fmt, arg...) dev_warn(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg)
+#define swim3_info(fmt, arg...) dev_info(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg)
+
+#ifdef DEBUG
+#define swim3_dbg(fmt, arg...) dev_dbg(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg)
+#else
+#define swim3_dbg(fmt, arg...) do { } while(0)
+#endif
+
static struct floppy_state floppy_states[MAX_FLOPPIES];
static int floppy_count = 0;
static DEFINE_SPINLOCK(swim3_lock);
@@ -224,17 +235,8 @@ static unsigned short write_postamble[] = {
0, 0, 0, 0, 0, 0
};
-static void swim3_select(struct floppy_state *fs, int sel);
-static void swim3_action(struct floppy_state *fs, int action);
-static int swim3_readbit(struct floppy_state *fs, int bit);
-static void do_fd_request(struct request_queue * q);
-static void start_request(struct floppy_state *fs);
-static void set_timeout(struct floppy_state *fs, int nticks,
- void (*proc)(unsigned long));
-static void scan_track(struct floppy_state *fs);
static void seek_track(struct floppy_state *fs, int n);
static void init_dma(struct dbdma_cmd *cp, int cmd, void *buf, int count);
-static void setup_transfer(struct floppy_state *fs);
static void act(struct floppy_state *fs);
static void scan_timeout(unsigned long data);
static void seek_timeout(unsigned long data);
@@ -254,18 +256,21 @@ static unsigned int floppy_check_events(struct gendisk *disk,
unsigned int clearing);
static int floppy_revalidate(struct gendisk *disk);
-static bool swim3_end_request(int err, unsigned int nr_bytes)
+static bool swim3_end_request(struct floppy_state *fs, int err, unsigned int nr_bytes)
{
- if (__blk_end_request(fd_req, err, nr_bytes))
- return true;
+ struct request *req = fs->cur_req;
+ int rc;
- fd_req = NULL;
- return false;
-}
+ swim3_dbg(" end request, err=%d nr_bytes=%d, cur_req=%p\n",
+ err, nr_bytes, req);
-static bool swim3_end_request_cur(int err)
-{
- return swim3_end_request(err, blk_rq_cur_bytes(fd_req));
+ if (err)
+ nr_bytes = blk_rq_cur_bytes(req);
+ rc = __blk_end_request(req, err, nr_bytes);
+ if (rc)
+ return true;
+ fs->cur_req = NULL;
+ return false;
}
static void swim3_select(struct floppy_state *fs, int sel)
@@ -303,50 +308,53 @@ static int swim3_readbit(struct floppy_state *fs, int bit)
return (stat & DATA) == 0;
}
-static void do_fd_request(struct request_queue * q)
-{
- int i;
-
- for(i=0; i<floppy_count; i++) {
- struct floppy_state *fs = &floppy_states[i];
- if (fs->mdev->media_bay &&
- check_media_bay(fs->mdev->media_bay) != MB_FD)
- continue;
- start_request(fs);
- }
-}
-
static void start_request(struct floppy_state *fs)
{
struct request *req;
unsigned long x;
+ swim3_dbg("start request, initial state=%d\n", fs->state);
+
if (fs->state == idle && fs->wanted) {
fs->state = available;
wake_up(&fs->wait);
return;
}
while (fs->state == idle) {
- if (!fd_req) {
- fd_req = blk_fetch_request(swim3_queue);
- if (!fd_req)
+ swim3_dbg("start request, idle loop, cur_req=%p\n", fs->cur_req);
+ if (!fs->cur_req) {
+ fs->cur_req = blk_fetch_request(disks[fs->index]->queue);
+ swim3_dbg(" fetched request %p\n", fs->cur_req);
+ if (!fs->cur_req)
break;
}
- req = fd_req;
-#if 0
- printk("do_fd_req: dev=%s cmd=%d sec=%ld nr_sec=%u buf=%p\n",
- req->rq_disk->disk_name, req->cmd,
- (long)blk_rq_pos(req), blk_rq_sectors(req), req->buffer);
- printk(" errors=%d current_nr_sectors=%u\n",
- req->errors, blk_rq_cur_sectors(req));
+ req = fs->cur_req;
+
+ if (fs->mdev->media_bay &&
+ check_media_bay(fs->mdev->media_bay) != MB_FD) {
+ swim3_dbg("%s", " media bay absent, dropping req\n");
+ swim3_end_request(fs, -ENODEV, 0);
+ continue;
+ }
+
+#if 0 /* This is really too verbose */
+ swim3_dbg("do_fd_req: dev=%s cmd=%d sec=%ld nr_sec=%u buf=%p\n",
+ req->rq_disk->disk_name, req->cmd,
+ (long)blk_rq_pos(req), blk_rq_sectors(req),
+ req->buffer);
+ swim3_dbg(" errors=%d current_nr_sectors=%u\n",
+ req->errors, blk_rq_cur_sectors(req));
#endif
if (blk_rq_pos(req) >= fs->total_secs) {
- swim3_end_request_cur(-EIO);
+ swim3_dbg(" pos out of bounds (%ld, max is %ld)\n",
+ (long)blk_rq_pos(req), (long)fs->total_secs);
+ swim3_end_request(fs, -EIO, 0);
continue;
}
if (fs->ejected) {
- swim3_end_request_cur(-EIO);
+ swim3_dbg("%s", " disk ejected\n");
+ swim3_end_request(fs, -EIO, 0);
continue;
}
@@ -354,7 +362,8 @@ static void start_request(struct floppy_state *fs)
if (fs->write_prot < 0)
fs->write_prot = swim3_readbit(fs, WRITE_PROT);
if (fs->write_prot) {
- swim3_end_request_cur(-EIO);
+ swim3_dbg("%s", " try to write, disk write protected\n");
+ swim3_end_request(fs, -EIO, 0);
continue;
}
}
@@ -369,7 +378,6 @@ static void start_request(struct floppy_state *fs)
x = ((long)blk_rq_pos(req)) % fs->secpercyl;
fs->head = x / fs->secpertrack;
fs->req_sector = x % fs->secpertrack + 1;
- fd_req = req;
fs->state = do_transfer;
fs->retries = 0;
@@ -377,12 +385,14 @@ static void start_request(struct floppy_state *fs)
}
}
+static void do_fd_request(struct request_queue * q)
+{
+ start_request(q->queuedata);
+}
+
static void set_timeout(struct floppy_state *fs, int nticks,
void (*proc)(unsigned long))
{
- unsigned long flags;
-
- spin_lock_irqsave(&fs->lock, flags);
if (fs->timeout_pending)
del_timer(&fs->timeout);
fs->timeout.expires = jiffies + nticks;
@@ -390,7 +400,6 @@ static void set_timeout(struct floppy_state *fs, int nticks,
fs->timeout.data = (unsigned long) fs;
add_timer(&fs->timeout);
fs->timeout_pending = 1;
- spin_unlock_irqrestore(&fs->lock, flags);
}
static inline void scan_track(struct floppy_state *fs)
@@ -442,40 +451,45 @@ static inline void setup_transfer(struct floppy_state *fs)
struct swim3 __iomem *sw = fs->swim3;
struct dbdma_cmd *cp = fs->dma_cmd;
struct dbdma_regs __iomem *dr = fs->dma;
+ struct request *req = fs->cur_req;
- if (blk_rq_cur_sectors(fd_req) <= 0) {
- printk(KERN_ERR "swim3: transfer 0 sectors?\n");
+ if (blk_rq_cur_sectors(req) <= 0) {
+ swim3_warn("%s", "Transfer 0 sectors ?\n");
return;
}
- if (rq_data_dir(fd_req) == WRITE)
+ if (rq_data_dir(req) == WRITE)
n = 1;
else {
n = fs->secpertrack - fs->req_sector + 1;
- if (n > blk_rq_cur_sectors(fd_req))
- n = blk_rq_cur_sectors(fd_req);
+ if (n > blk_rq_cur_sectors(req))
+ n = blk_rq_cur_sectors(req);
}
+
+ swim3_dbg(" setup xfer at sect %d (of %d) head %d for %d\n",
+ fs->req_sector, fs->secpertrack, fs->head, n);
+
fs->scount = n;
swim3_select(fs, fs->head? READ_DATA_1: READ_DATA_0);
out_8(&sw->sector, fs->req_sector);
out_8(&sw->nsect, n);
out_8(&sw->gap3, 0);
out_le32(&dr->cmdptr, virt_to_bus(cp));
- if (rq_data_dir(fd_req) == WRITE) {
+ if (rq_data_dir(req) == WRITE) {
/* Set up 3 dma commands: write preamble, data, postamble */
init_dma(cp, OUTPUT_MORE, write_preamble, sizeof(write_preamble));
++cp;
- init_dma(cp, OUTPUT_MORE, fd_req->buffer, 512);
+ init_dma(cp, OUTPUT_MORE, req->buffer, 512);
++cp;
init_dma(cp, OUTPUT_LAST, write_postamble, sizeof(write_postamble));
} else {
- init_dma(cp, INPUT_LAST, fd_req->buffer, n * 512);
+ init_dma(cp, INPUT_LAST, req->buffer, n * 512);
}
++cp;
out_le16(&cp->command, DBDMA_STOP);
out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS);
in_8(&sw->error);
out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS);
- if (rq_data_dir(fd_req) == WRITE)
+ if (rq_data_dir(req) == WRITE)
out_8(&sw->control_bis, WRITE_SECTORS);
in_8(&sw->intr);
out_le32(&dr->control, (RUN << 16) | RUN);
@@ -488,12 +502,16 @@ static inline void setup_transfer(struct floppy_state *fs)
static void act(struct floppy_state *fs)
{
for (;;) {
+ swim3_dbg(" act loop, state=%d, req_cyl=%d, cur_cyl=%d\n",
+ fs->state, fs->req_cyl, fs->cur_cyl);
+
switch (fs->state) {
case idle:
return; /* XXX shouldn't get here */
case locating:
if (swim3_readbit(fs, TRACK_ZERO)) {
+ swim3_dbg("%s", " locate track 0\n");
fs->cur_cyl = 0;
if (fs->req_cyl == 0)
fs->state = do_transfer;
@@ -511,7 +529,7 @@ static void act(struct floppy_state *fs)
break;
}
if (fs->req_cyl == fs->cur_cyl) {
- printk("whoops, seeking 0\n");
+ swim3_warn("%s", "Whoops, seeking 0\n");
fs->state = do_transfer;
break;
}
@@ -527,7 +545,9 @@ static void act(struct floppy_state *fs)
case do_transfer:
if (fs->cur_cyl != fs->req_cyl) {
if (fs->retries > 5) {
- swim3_end_request_cur(-EIO);
+ swim3_err("Wrong cylinder in transfer, want: %d got %d\n",
+ fs->req_cyl, fs->cur_cyl);
+ swim3_end_request(fs, -EIO, 0);
fs->state = idle;
return;
}
@@ -542,7 +562,7 @@ static void act(struct floppy_state *fs)
return;
default:
- printk(KERN_ERR"swim3: unknown state %d\n", fs->state);
+ swim3_err("Unknown state %d\n", fs->state);
return;
}
}
@@ -552,59 +572,75 @@ static void scan_timeout(unsigned long data)
{
struct floppy_state *fs = (struct floppy_state *) data;
struct swim3 __iomem *sw = fs->swim3;
+ unsigned long flags;
+
+ swim3_dbg("* scan timeout, state=%d\n", fs->state);
+ spin_lock_irqsave(&swim3_lock, flags);
fs->timeout_pending = 0;
out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS);
out_8(&sw->select, RELAX);
out_8(&sw->intr_enable, 0);
fs->cur_cyl = -1;
if (fs->retries > 5) {
- swim3_end_request_cur(-EIO);
+ swim3_end_request(fs, -EIO, 0);
fs->state = idle;
start_request(fs);
} else {
fs->state = jogging;
act(fs);
}
+ spin_unlock_irqrestore(&swim3_lock, flags);
}
static void seek_timeout(unsigned long data)
{
struct floppy_state *fs = (struct floppy_state *) data;
struct swim3 __iomem *sw = fs->swim3;
+ unsigned long flags;
+
+ swim3_dbg("* seek timeout, state=%d\n", fs->state);
+ spin_lock_irqsave(&swim3_lock, flags);
fs->timeout_pending = 0;
out_8(&sw->control_bic, DO_SEEK);
out_8(&sw->select, RELAX);
out_8(&sw->intr_enable, 0);
- printk(KERN_ERR "swim3: seek timeout\n");
- swim3_end_request_cur(-EIO);
+ swim3_err("%s", "Seek timeout\n");
+ swim3_end_request(fs, -EIO, 0);
fs->state = idle;
start_request(fs);
+ spin_unlock_irqrestore(&swim3_lock, flags);
}
static void settle_timeout(unsigned long data)
{
struct floppy_state *fs = (struct floppy_state *) data;
struct swim3 __iomem *sw = fs->swim3;
+ unsigned long flags;
+
+ swim3_dbg("* settle timeout, state=%d\n", fs->state);
+ spin_lock_irqsave(&swim3_lock, flags);
fs->timeout_pending = 0;
if (swim3_readbit(fs, SEEK_COMPLETE)) {
out_8(&sw->select, RELAX);
fs->state = locating;
act(fs);
- return;
+ goto unlock;
}
out_8(&sw->select, RELAX);
if (fs->settle_time < 2*HZ) {
++fs->settle_time;
set_timeout(fs, 1, settle_timeout);
- return;
+ goto unlock;
}
- printk(KERN_ERR "swim3: seek settle timeout\n");
- swim3_end_request_cur(-EIO);
+ swim3_err("%s", "Seek settle timeout\n");
+ swim3_end_request(fs, -EIO, 0);
fs->state = idle;
start_request(fs);
+ unlock:
+ spin_unlock_irqrestore(&swim3_lock, flags);
}
static void xfer_timeout(unsigned long data)
@@ -612,8 +648,12 @@ static void xfer_timeout(unsigned long data)
struct floppy_state *fs = (struct floppy_state *) data;
struct swim3 __iomem *sw = fs->swim3;
struct dbdma_regs __iomem *dr = fs->dma;
+ unsigned long flags;
int n;
+ swim3_dbg("* xfer timeout, state=%d\n", fs->state);
+
+ spin_lock_irqsave(&swim3_lock, flags);
fs->timeout_pending = 0;
out_le32(&dr->control, RUN << 16);
/* We must wait a bit for dbdma to stop */
@@ -622,12 +662,13 @@ static void xfer_timeout(unsigned long data)
out_8(&sw->intr_enable, 0);
out_8(&sw->control_bic, WRITE_SECTORS | DO_ACTION);
out_8(&sw->select, RELAX);
- printk(KERN_ERR "swim3: timeout %sing sector %ld\n",
- (rq_data_dir(fd_req)==WRITE? "writ": "read"),
- (long)blk_rq_pos(fd_req));
- swim3_end_request_cur(-EIO);
+ swim3_err("Timeout %sing sector %ld\n",
+ (rq_data_dir(fs->cur_req)==WRITE? "writ": "read"),
+ (long)blk_rq_pos(fs->cur_req));
+ swim3_end_request(fs, -EIO, 0);
fs->state = idle;
start_request(fs);
+ spin_unlock_irqrestore(&swim3_lock, flags);
}
static irqreturn_t swim3_interrupt(int irq, void *dev_id)
@@ -638,12 +679,17 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
int stat, resid;
struct dbdma_regs __iomem *dr;
struct dbdma_cmd *cp;
+ unsigned long flags;
+ struct request *req = fs->cur_req;
+
+ swim3_dbg("* interrupt, state=%d\n", fs->state);
+ spin_lock_irqsave(&swim3_lock, flags);
intr = in_8(&sw->intr);
err = (intr & ERROR_INTR)? in_8(&sw->error): 0;
if ((intr & ERROR_INTR) && fs->state != do_transfer)
- printk(KERN_ERR "swim3_interrupt, state=%d, dir=%x, intr=%x, err=%x\n",
- fs->state, rq_data_dir(fd_req), intr, err);
+ swim3_err("Non-transfer error interrupt: state=%d, dir=%x, intr=%x, err=%x\n",
+ fs->state, rq_data_dir(req), intr, err);
switch (fs->state) {
case locating:
if (intr & SEEN_SECTOR) {
@@ -653,10 +699,10 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
del_timer(&fs->timeout);
fs->timeout_pending = 0;
if (sw->ctrack == 0xff) {
- printk(KERN_ERR "swim3: seen sector but cyl=ff?\n");
+ swim3_err("%s", "Seen sector but cyl=ff?\n");
fs->cur_cyl = -1;
if (fs->retries > 5) {
- swim3_end_request_cur(-EIO);
+ swim3_end_request(fs, -EIO, 0);
fs->state = idle;
start_request(fs);
} else {
@@ -668,8 +714,8 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
fs->cur_cyl = sw->ctrack;
fs->cur_sector = sw->csect;
if (fs->expect_cyl != -1 && fs->expect_cyl != fs->cur_cyl)
- printk(KERN_ERR "swim3: expected cyl %d, got %d\n",
- fs->expect_cyl, fs->cur_cyl);
+ swim3_err("Expected cyl %d, got %d\n",
+ fs->expect_cyl, fs->cur_cyl);
fs->state = do_transfer;
act(fs);
}
@@ -704,7 +750,7 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
fs->timeout_pending = 0;
dr = fs->dma;
cp = fs->dma_cmd;
- if (rq_data_dir(fd_req) == WRITE)
+ if (rq_data_dir(req) == WRITE)
++cp;
/*
* Check that the main data transfer has finished.
@@ -729,31 +775,32 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
if (intr & ERROR_INTR) {
n = fs->scount - 1 - resid / 512;
if (n > 0) {
- blk_update_request(fd_req, 0, n << 9);
+ blk_update_request(req, 0, n << 9);
fs->req_sector += n;
}
if (fs->retries < 5) {
++fs->retries;
act(fs);
} else {
- printk("swim3: error %sing block %ld (err=%x)\n",
- rq_data_dir(fd_req) == WRITE? "writ": "read",
- (long)blk_rq_pos(fd_req), err);
- swim3_end_request_cur(-EIO);
+ swim3_err("Error %sing block %ld (err=%x)\n",
+ rq_data_dir(req) == WRITE? "writ": "read",
+ (long)blk_rq_pos(req), err);
+ swim3_end_request(fs, -EIO, 0);
fs->state = idle;
}
} else {
if ((stat & ACTIVE) == 0 || resid != 0) {
/* musta been an error */
- printk(KERN_ERR "swim3: fd dma: stat=%x resid=%d\n", stat, resid);
- printk(KERN_ERR " state=%d, dir=%x, intr=%x, err=%x\n",
- fs->state, rq_data_dir(fd_req), intr, err);
- swim3_end_request_cur(-EIO);
+ swim3_err("fd dma error: stat=%x resid=%d\n", stat, resid);
+ swim3_err(" state=%d, dir=%x, intr=%x, err=%x\n",
+ fs->state, rq_data_dir(req), intr, err);
+ swim3_end_request(fs, -EIO, 0);
fs->state = idle;
start_request(fs);
break;
}
- if (swim3_end_request(0, fs->scount << 9)) {
+ fs->retries = 0;
+ if (swim3_end_request(fs, 0, fs->scount << 9)) {
fs->req_sector += fs->scount;
if (fs->req_sector > fs->secpertrack) {
fs->req_sector -= fs->secpertrack;
@@ -770,8 +817,9 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
start_request(fs);
break;
default:
- printk(KERN_ERR "swim3: don't know what to do in state %d\n", fs->state);
+ swim3_err("Don't know what to do in state %d\n", fs->state);
}
+ spin_unlock_irqrestore(&swim3_lock, flags);
return IRQ_HANDLED;
}
@@ -781,26 +829,31 @@ static void fd_dma_interrupt(int irq, void *dev_id)
}
*/
+/* Called under the mutex to grab exclusive access to a drive */
static int grab_drive(struct floppy_state *fs, enum swim_state state,
int interruptible)
{
unsigned long flags;
- spin_lock_irqsave(&fs->lock, flags);
- if (fs->state != idle) {
+ swim3_dbg("%s", "-> grab drive\n");
+
+ spin_lock_irqsave(&swim3_lock, flags);
+ if (fs->state != idle && fs->state != available) {
++fs->wanted;
while (fs->state != available) {
+ spin_unlock_irqrestore(&swim3_lock, flags);
if (interruptible && signal_pending(current)) {
--fs->wanted;
- spin_unlock_irqrestore(&fs->lock, flags);
return -EINTR;
}
interruptible_sleep_on(&fs->wait);
+ spin_lock_irqsave(&swim3_lock, flags);
}
--fs->wanted;
}
fs->state = state;
- spin_unlock_irqrestore(&fs->lock, flags);
+ spin_unlock_irqrestore(&swim3_lock, flags);
+
return 0;
}
@@ -808,10 +861,12 @@ static void release_drive(struct floppy_state *fs)
{
unsigned long flags;
- spin_lock_irqsave(&fs->lock, flags);
+ swim3_dbg("%s", "-> release drive\n");
+
+ spin_lock_irqsave(&swim3_lock, flags);
fs->state = idle;
start_request(fs);
- spin_unlock_irqrestore(&fs->lock, flags);
+ spin_unlock_irqrestore(&swim3_lock, flags);
}
static int fd_eject(struct floppy_state *fs)
@@ -966,6 +1021,7 @@ static int floppy_release(struct gendisk *disk, fmode_t mode)
{
struct floppy_state *fs = disk->private_data;
struct swim3 __iomem *sw = fs->swim3;
+
mutex_lock(&swim3_mutex);
if (fs->ref_count > 0 && --fs->ref_count == 0) {
swim3_action(fs, MOTOR_OFF);
@@ -1031,30 +1087,48 @@ static const struct block_device_operations floppy_fops = {
.revalidate_disk= floppy_revalidate,
};
+static void swim3_mb_event(struct macio_dev* mdev, int mb_state)
+{
+ struct floppy_state *fs = macio_get_drvdata(mdev);
+ struct swim3 __iomem *sw = fs->swim3;
+
+ if (!fs)
+ return;
+ if (mb_state != MB_FD)
+ return;
+
+ /* Clear state */
+ out_8(&sw->intr_enable, 0);
+ in_8(&sw->intr);
+ in_8(&sw->error);
+}
+
static int swim3_add_device(struct macio_dev *mdev, int index)
{
struct device_node *swim = mdev->ofdev.dev.of_node;
struct floppy_state *fs = &floppy_states[index];
int rc = -EBUSY;
+ /* Do this first for message macros */
+ memset(fs, 0, sizeof(*fs));
+ fs->mdev = mdev;
+ fs->index = index;
+
/* Check & Request resources */
if (macio_resource_count(mdev) < 2) {
- printk(KERN_WARNING "ifd%d: no address for %s\n",
- index, swim->full_name);
+ swim3_err("%s", "No address in device-tree\n");
return -ENXIO;
}
- if (macio_irq_count(mdev) < 2) {
- printk(KERN_WARNING "fd%d: no intrs for device %s\n",
- index, swim->full_name);
+ if (macio_irq_count(mdev) < 1) {
+ swim3_err("%s", "No interrupt in device-tree\n");
+ return -ENXIO;
}
if (macio_request_resource(mdev, 0, "swim3 (mmio)")) {
- printk(KERN_ERR "fd%d: can't request mmio resource for %s\n",
- index, swim->full_name);
+ swim3_err("%s", "Can't request mmio resource\n");
return -EBUSY;
}
if (macio_request_resource(mdev, 1, "swim3 (dma)")) {
- printk(KERN_ERR "fd%d: can't request dma resource for %s\n",
- index, swim->full_name);
+ swim3_err("%s", "Can't request dma resource\n");
macio_release_resource(mdev, 0);
return -EBUSY;
}
@@ -1063,22 +1137,18 @@ static int swim3_add_device(struct macio_dev *mdev, int index)
if (mdev->media_bay == NULL)
pmac_call_feature(PMAC_FTR_SWIM3_ENABLE, swim, 0, 1);
- memset(fs, 0, sizeof(*fs));
- spin_lock_init(&fs->lock);
fs->state = idle;
fs->swim3 = (struct swim3 __iomem *)
ioremap(macio_resource_start(mdev, 0), 0x200);
if (fs->swim3 == NULL) {
- printk("fd%d: couldn't map registers for %s\n",
- index, swim->full_name);
+ swim3_err("%s", "Couldn't map mmio registers\n");
rc = -ENOMEM;
goto out_release;
}
fs->dma = (struct dbdma_regs __iomem *)
ioremap(macio_resource_start(mdev, 1), 0x200);
if (fs->dma == NULL) {
- printk("fd%d: couldn't map DMA for %s\n",
- index, swim->full_name);
+ swim3_err("%s", "Couldn't map dma registers\n");
iounmap(fs->swim3);
rc = -ENOMEM;
goto out_release;
@@ -1090,31 +1160,25 @@ static int swim3_add_device(struct macio_dev *mdev, int index)
fs->secpercyl = 36;
fs->secpertrack = 18;
fs->total_secs = 2880;
- fs->mdev = mdev;
init_waitqueue_head(&fs->wait);
fs->dma_cmd = (struct dbdma_cmd *) DBDMA_ALIGN(fs->dbdma_cmd_space);
memset(fs->dma_cmd, 0, 2 * sizeof(struct dbdma_cmd));
st_le16(&fs->dma_cmd[1].command, DBDMA_STOP);
+ if (mdev->media_bay == NULL || check_media_bay(mdev->media_bay) == MB_FD)
+ swim3_mb_event(mdev, MB_FD);
+
if (request_irq(fs->swim3_intr, swim3_interrupt, 0, "SWIM3", fs)) {
- printk(KERN_ERR "fd%d: couldn't request irq %d for %s\n",
- index, fs->swim3_intr, swim->full_name);
+ swim3_err("%s", "Couldn't request interrupt\n");
pmac_call_feature(PMAC_FTR_SWIM3_ENABLE, swim, 0, 0);
goto out_unmap;
return -EBUSY;
}
-/*
- if (request_irq(fs->dma_intr, fd_dma_interrupt, 0, "SWIM3-dma", fs)) {
- printk(KERN_ERR "Couldn't get irq %d for SWIM3 DMA",
- fs->dma_intr);
- return -EBUSY;
- }
-*/
init_timer(&fs->timeout);
- printk(KERN_INFO "fd%d: SWIM3 floppy controller %s\n", floppy_count,
+ swim3_info("SWIM3 floppy controller %s\n",
mdev->media_bay ? "in media bay" : "");
return 0;
@@ -1132,41 +1196,42 @@ static int swim3_add_device(struct macio_dev *mdev, int index)
static int __devinit swim3_attach(struct macio_dev *mdev, const struct of_device_id *match)
{
- int i, rc;
struct gendisk *disk;
+ int index, rc;
+
+ index = floppy_count++;
+ if (index >= MAX_FLOPPIES)
+ return -ENXIO;
/* Add the drive */
- rc = swim3_add_device(mdev, floppy_count);
+ rc = swim3_add_device(mdev, index);
if (rc)
return rc;
+ /* Now register that disk. Same comment about failure handling */
+ disk = disks[index] = alloc_disk(1);
+ if (disk == NULL)
+ return -ENOMEM;
+ disk->queue = blk_init_queue(do_fd_request, &swim3_lock);
+ if (disk->queue == NULL) {
+ put_disk(disk);
+ return -ENOMEM;
+ }
+ disk->queue->queuedata = &floppy_states[index];
- /* Now create the queue if not there yet */
- if (swim3_queue == NULL) {
+ if (index == 0) {
/* If we failed, there isn't much we can do as the driver is still
* too dumb to remove the device, just bail out
*/
if (register_blkdev(FLOPPY_MAJOR, "fd"))
return 0;
- swim3_queue = blk_init_queue(do_fd_request, &swim3_lock);
- if (swim3_queue == NULL) {
- unregister_blkdev(FLOPPY_MAJOR, "fd");
- return 0;
- }
}
- /* Now register that disk. Same comment about failure handling */
- i = floppy_count++;
- disk = disks[i] = alloc_disk(1);
- if (disk == NULL)
- return 0;
-
disk->major = FLOPPY_MAJOR;
- disk->first_minor = i;
+ disk->first_minor = index;
disk->fops = &floppy_fops;
- disk->private_data = &floppy_states[i];
- disk->queue = swim3_queue;
+ disk->private_data = &floppy_states[index];
disk->flags |= GENHD_FL_REMOVABLE;
- sprintf(disk->disk_name, "fd%d", i);
+ sprintf(disk->disk_name, "fd%d", index);
set_capacity(disk, 2880);
add_disk(disk);
@@ -1194,6 +1259,9 @@ static struct macio_driver swim3_driver =
.of_match_table = swim3_match,
},
.probe = swim3_attach,
+#ifdef CONFIG_PMAC_MEDIABAY
+ .mediabay_event = swim3_mb_event,
+#endif
#if 0
.suspend = swim3_suspend,
.resume = swim3_resume,
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index 11b41fd40c27..5ccf142ef0b8 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -188,7 +188,7 @@ config BT_MRVL
The core driver to support Marvell Bluetooth devices.
This driver is required if you want to support
- Marvell Bluetooth devices, such as 8688/8787.
+ Marvell Bluetooth devices, such as 8688/8787/8797.
Say Y here to compile Marvell Bluetooth driver
into the kernel or say M to compile it as module.
@@ -201,8 +201,8 @@ config BT_MRVL_SDIO
The driver for Marvell Bluetooth chipsets with SDIO interface.
This driver is required if you want to use Marvell Bluetooth
- devices with SDIO interface. Currently SD8688/SD8787 chipsets are
- supported.
+ devices with SDIO interface. Currently SD8688/SD8787/SD8797
+ chipsets are supported.
Say Y here to compile support for Marvell BT-over-SDIO driver
into the kernel or say M to compile it as module.
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index 9ef48167e2cf..27b74b0d547b 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -65,7 +65,7 @@ static const struct btmrvl_sdio_card_reg btmrvl_reg_8688 = {
.io_port_1 = 0x01,
.io_port_2 = 0x02,
};
-static const struct btmrvl_sdio_card_reg btmrvl_reg_8787 = {
+static const struct btmrvl_sdio_card_reg btmrvl_reg_87xx = {
.cfg = 0x00,
.host_int_mask = 0x02,
.host_intstatus = 0x03,
@@ -92,7 +92,14 @@ static const struct btmrvl_sdio_device btmrvl_sdio_sd8688 = {
static const struct btmrvl_sdio_device btmrvl_sdio_sd8787 = {
.helper = NULL,
.firmware = "mrvl/sd8787_uapsta.bin",
- .reg = &btmrvl_reg_8787,
+ .reg = &btmrvl_reg_87xx,
+ .sd_blksz_fw_dl = 256,
+};
+
+static const struct btmrvl_sdio_device btmrvl_sdio_sd8797 = {
+ .helper = NULL,
+ .firmware = "mrvl/sd8797_uapsta.bin",
+ .reg = &btmrvl_reg_87xx,
.sd_blksz_fw_dl = 256,
};
@@ -103,6 +110,9 @@ static const struct sdio_device_id btmrvl_sdio_ids[] = {
/* Marvell SD8787 Bluetooth device */
{ SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x911A),
.driver_data = (unsigned long) &btmrvl_sdio_sd8787 },
+ /* Marvell SD8797 Bluetooth device */
+ { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x912A),
+ .driver_data = (unsigned long) &btmrvl_sdio_sd8797 },
{ } /* Terminating entry */
};
@@ -1076,3 +1086,4 @@ MODULE_LICENSE("GPL v2");
MODULE_FIRMWARE("sd8688_helper.bin");
MODULE_FIRMWARE("sd8688.bin");
MODULE_FIRMWARE("mrvl/sd8787_uapsta.bin");
+MODULE_FIRMWARE("mrvl/sd8797_uapsta.bin");
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index fe4ebc375b3d..eabc437ce500 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -777,9 +777,8 @@ skip_waking:
usb_mark_last_busy(data->udev);
}
- usb_free_urb(urb);
-
done:
+ usb_free_urb(urb);
return err;
}
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index c2917ffad2c2..34767a6d7f42 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -139,6 +139,8 @@
#define IPMI_WDOG_SET_TIMER 0x24
#define IPMI_WDOG_GET_TIMER 0x25
+#define IPMI_WDOG_TIMER_NOT_INIT_RESP 0x80
+
/* These are here until the real ones get into the watchdog.h interface. */
#ifndef WDIOC_GETTIMEOUT
#define WDIOC_GETTIMEOUT _IOW(WATCHDOG_IOCTL_BASE, 20, int)
@@ -596,6 +598,7 @@ static int ipmi_heartbeat(void)
struct kernel_ipmi_msg msg;
int rv;
struct ipmi_system_interface_addr addr;
+ int timeout_retries = 0;
if (ipmi_ignore_heartbeat)
return 0;
@@ -616,6 +619,7 @@ static int ipmi_heartbeat(void)
mutex_lock(&heartbeat_lock);
+restart:
atomic_set(&heartbeat_tofree, 2);
/*
@@ -653,7 +657,33 @@ static int ipmi_heartbeat(void)
/* Wait for the heartbeat to be sent. */
wait_for_completion(&heartbeat_wait);
- if (heartbeat_recv_msg.msg.data[0] != 0) {
+ if (heartbeat_recv_msg.msg.data[0] == IPMI_WDOG_TIMER_NOT_INIT_RESP) {
+ timeout_retries++;
+ if (timeout_retries > 3) {
+ printk(KERN_ERR PFX ": Unable to restore the IPMI"
+ " watchdog's settings, giving up.\n");
+ rv = -EIO;
+ goto out_unlock;
+ }
+
+ /*
+ * The timer was not initialized, that means the BMC was
+ * probably reset and lost the watchdog information. Attempt
+ * to restore the timer's info. Note that we still hold
+ * the heartbeat lock, to keep a heartbeat from happening
+ * in this process, so must say no heartbeat to avoid a
+ * deadlock on this mutex.
+ */
+ rv = ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB);
+ if (rv) {
+ printk(KERN_ERR PFX ": Unable to send the command to"
+ " set the watchdog's settings, giving up.\n");
+ goto out_unlock;
+ }
+
+ /* We might need a new heartbeat, so do it now */
+ goto restart;
+ } else if (heartbeat_recv_msg.msg.data[0] != 0) {
/*
* Got an error in the heartbeat response. It was already
* reported in ipmi_wdog_msg_handler, but we should return
@@ -662,6 +692,7 @@ static int ipmi_heartbeat(void)
rv = -EINVAL;
}
+out_unlock:
mutex_unlock(&heartbeat_lock);
return rv;
@@ -922,11 +953,15 @@ static struct miscdevice ipmi_wdog_miscdev = {
static void ipmi_wdog_msg_handler(struct ipmi_recv_msg *msg,
void *handler_data)
{
- if (msg->msg.data[0] != 0) {
+ if (msg->msg.cmd == IPMI_WDOG_RESET_TIMER &&
+ msg->msg.data[0] == IPMI_WDOG_TIMER_NOT_INIT_RESP)
+ printk(KERN_INFO PFX "response: The IPMI controller appears"
+ " to have been reset, will attempt to reinitialize"
+ " the watchdog timer\n");
+ else if (msg->msg.data[0] != 0)
printk(KERN_ERR PFX "response: Error %x on cmd %x\n",
msg->msg.data[0],
msg->msg.cmd);
- }
ipmi_free_recv_msg(msg);
}
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index ab8f469f5cf8..5a99bb3f255a 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -124,7 +124,7 @@ config MV_XOR
config MX3_IPU
bool "MX3x Image Processing Unit support"
- depends on ARCH_MX3
+ depends on SOC_IMX31 || SOC_IMX35
select DMA_ENGINE
default y
help
@@ -216,7 +216,7 @@ config PCH_DMA
config IMX_SDMA
tristate "i.MX SDMA support"
- depends on ARCH_MX25 || ARCH_MX3 || ARCH_MX5
+ depends on ARCH_MX25 || SOC_IMX31 || SOC_IMX35 || ARCH_MX5
select DMA_ENGINE
help
Support the i.MX SDMA engine. This engine is integrated into
diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c
index c811cb107904..2cce44a1d7d0 100644
--- a/drivers/firmware/iscsi_ibft.c
+++ b/drivers/firmware/iscsi_ibft.c
@@ -746,6 +746,37 @@ static void __exit ibft_exit(void)
ibft_cleanup();
}
+#ifdef CONFIG_ACPI
+static const struct {
+ char *sign;
+} ibft_signs[] = {
+ /*
+ * One spec says "IBFT", the other says "iBFT". We have to check
+ * for both.
+ */
+ { ACPI_SIG_IBFT },
+ { "iBFT" },
+};
+
+static void __init acpi_find_ibft_region(void)
+{
+ int i;
+ struct acpi_table_header *table = NULL;
+
+ if (acpi_disabled)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(ibft_signs) && !ibft_addr; i++) {
+ acpi_get_table(ibft_signs[i].sign, 0, &table);
+ ibft_addr = (struct acpi_table_ibft *)table;
+ }
+}
+#else
+static void __init acpi_find_ibft_region(void)
+{
+}
+#endif
+
/*
* ibft_init() - creates sysfs tree entries for the iBFT data.
*/
@@ -753,9 +784,16 @@ static int __init ibft_init(void)
{
int rc = 0;
+ /*
+ As on UEFI systems the setup_arch()/find_ibft_region()
+ is called before ACPI tables are parsed and it only does
+ legacy finding.
+ */
+ if (!ibft_addr)
+ acpi_find_ibft_region();
+
if (ibft_addr) {
- printk(KERN_INFO "iBFT detected at 0x%llx.\n",
- (u64)isa_virt_to_bus(ibft_addr));
+ pr_info("iBFT detected.\n");
rc = ibft_check_device();
if (rc)
diff --git a/drivers/firmware/iscsi_ibft_find.c b/drivers/firmware/iscsi_ibft_find.c
index bfe723266fd8..4da4eb9ae926 100644
--- a/drivers/firmware/iscsi_ibft_find.c
+++ b/drivers/firmware/iscsi_ibft_find.c
@@ -45,13 +45,6 @@ EXPORT_SYMBOL_GPL(ibft_addr);
static const struct {
char *sign;
} ibft_signs[] = {
-#ifdef CONFIG_ACPI
- /*
- * One spec says "IBFT", the other says "iBFT". We have to check
- * for both.
- */
- { ACPI_SIG_IBFT },
-#endif
{ "iBFT" },
{ "BIFT" }, /* Broadcom iSCSI Offload */
};
@@ -62,14 +55,6 @@ static const struct {
#define VGA_MEM 0xA0000 /* VGA buffer */
#define VGA_SIZE 0x20000 /* 128kB */
-#ifdef CONFIG_ACPI
-static int __init acpi_find_ibft(struct acpi_table_header *header)
-{
- ibft_addr = (struct acpi_table_ibft *)header;
- return 0;
-}
-#endif /* CONFIG_ACPI */
-
static int __init find_ibft_in_mem(void)
{
unsigned long pos;
@@ -94,6 +79,7 @@ static int __init find_ibft_in_mem(void)
* the table cannot be valid. */
if (pos + len <= (IBFT_END-1)) {
ibft_addr = (struct acpi_table_ibft *)virt;
+ pr_info("iBFT found at 0x%lx.\n", pos);
goto done;
}
}
@@ -108,20 +94,12 @@ done:
*/
unsigned long __init find_ibft_region(unsigned long *sizep)
{
-#ifdef CONFIG_ACPI
- int i;
-#endif
ibft_addr = NULL;
-#ifdef CONFIG_ACPI
- for (i = 0; i < ARRAY_SIZE(ibft_signs) && !ibft_addr; i++)
- acpi_table_parse(ibft_signs[i].sign, acpi_find_ibft);
-#endif /* CONFIG_ACPI */
-
/* iBFT 1.03 section 1.4.3.1 mandates that UEFI machines will
* only use ACPI for this */
- if (!ibft_addr && !efi_enabled)
+ if (!efi_enabled)
find_ibft_in_mem();
if (ibft_addr) {
diff --git a/drivers/gpio/gpio-da9052.c b/drivers/gpio/gpio-da9052.c
index 038f5eb8b13d..f8ce29ef9f88 100644
--- a/drivers/gpio/gpio-da9052.c
+++ b/drivers/gpio/gpio-da9052.c
@@ -22,7 +22,6 @@
#include <linux/mfd/da9052/da9052.h>
#include <linux/mfd/da9052/reg.h>
#include <linux/mfd/da9052/pdata.h>
-#include <linux/mfd/da9052/gpio.h>
#define DA9052_INPUT 1
#define DA9052_OUTPUT_OPENDRAIN 2
@@ -43,6 +42,9 @@
#define DA9052_GPIO_MASK_UPPER_NIBBLE 0xF0
#define DA9052_GPIO_MASK_LOWER_NIBBLE 0x0F
#define DA9052_GPIO_NIBBLE_SHIFT 4
+#define DA9052_IRQ_GPI0 16
+#define DA9052_GPIO_ODD_SHIFT 7
+#define DA9052_GPIO_EVEN_SHIFT 3
struct da9052_gpio {
struct da9052 *da9052;
@@ -104,33 +106,26 @@ static int da9052_gpio_get(struct gpio_chip *gc, unsigned offset)
static void da9052_gpio_set(struct gpio_chip *gc, unsigned offset, int value)
{
struct da9052_gpio *gpio = to_da9052_gpio(gc);
- unsigned char register_value = 0;
int ret;
if (da9052_gpio_port_odd(offset)) {
- if (value) {
- register_value = DA9052_GPIO_ODD_PORT_MODE;
ret = da9052_reg_update(gpio->da9052, (offset >> 1) +
DA9052_GPIO_0_1_REG,
DA9052_GPIO_ODD_PORT_MODE,
- register_value);
+ value << DA9052_GPIO_ODD_SHIFT);
if (ret != 0)
dev_err(gpio->da9052->dev,
"Failed to updated gpio odd reg,%d",
ret);
- }
} else {
- if (value) {
- register_value = DA9052_GPIO_EVEN_PORT_MODE;
ret = da9052_reg_update(gpio->da9052, (offset >> 1) +
DA9052_GPIO_0_1_REG,
DA9052_GPIO_EVEN_PORT_MODE,
- register_value);
+ value << DA9052_GPIO_EVEN_SHIFT);
if (ret != 0)
dev_err(gpio->da9052->dev,
"Failed to updated gpio even reg,%d",
ret);
- }
}
}
@@ -201,9 +196,9 @@ static struct gpio_chip reference_gp __devinitdata = {
.direction_input = da9052_gpio_direction_input,
.direction_output = da9052_gpio_direction_output,
.to_irq = da9052_gpio_to_irq,
- .can_sleep = 1;
- .ngpio = 16;
- .base = -1;
+ .can_sleep = 1,
+ .ngpio = 16,
+ .base = -1,
};
static int __devinit da9052_gpio_probe(struct platform_device *pdev)
diff --git a/drivers/gpio/gpio-ml-ioh.c b/drivers/gpio/gpio-ml-ioh.c
index ea8e73869250..461958fc2264 100644
--- a/drivers/gpio/gpio-ml-ioh.c
+++ b/drivers/gpio/gpio-ml-ioh.c
@@ -332,6 +332,34 @@ static void ioh_irq_mask(struct irq_data *d)
&chip->reg->regs[chip->ch].imask);
}
+static void ioh_irq_disable(struct irq_data *d)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct ioh_gpio *chip = gc->private;
+ unsigned long flags;
+ u32 ien;
+
+ spin_lock_irqsave(&chip->spinlock, flags);
+ ien = ioread32(&chip->reg->regs[chip->ch].ien);
+ ien &= ~(1 << (d->irq - chip->irq_base));
+ iowrite32(ien, &chip->reg->regs[chip->ch].ien);
+ spin_unlock_irqrestore(&chip->spinlock, flags);
+}
+
+static void ioh_irq_enable(struct irq_data *d)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct ioh_gpio *chip = gc->private;
+ unsigned long flags;
+ u32 ien;
+
+ spin_lock_irqsave(&chip->spinlock, flags);
+ ien = ioread32(&chip->reg->regs[chip->ch].ien);
+ ien |= 1 << (d->irq - chip->irq_base);
+ iowrite32(ien, &chip->reg->regs[chip->ch].ien);
+ spin_unlock_irqrestore(&chip->spinlock, flags);
+}
+
static irqreturn_t ioh_gpio_handler(int irq, void *dev_id)
{
struct ioh_gpio *chip = dev_id;
@@ -339,7 +367,7 @@ static irqreturn_t ioh_gpio_handler(int irq, void *dev_id)
int i, j;
int ret = IRQ_NONE;
- for (i = 0; i < 8; i++) {
+ for (i = 0; i < 8; i++, chip++) {
reg_val = ioread32(&chip->reg->regs[i].istatus);
for (j = 0; j < num_ports[i]; j++) {
if (reg_val & BIT(j)) {
@@ -370,6 +398,8 @@ static __devinit void ioh_gpio_alloc_generic_chip(struct ioh_gpio *chip,
ct->chip.irq_mask = ioh_irq_mask;
ct->chip.irq_unmask = ioh_irq_unmask;
ct->chip.irq_set_type = ioh_irq_type;
+ ct->chip.irq_disable = ioh_irq_disable;
+ ct->chip.irq_enable = ioh_irq_enable;
irq_setup_generic_chip(gc, IRQ_MSK(num), IRQ_GC_INIT_MASK_CACHE,
IRQ_NOREQUEST | IRQ_NOPROBE, 0);
diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c
index ec3fcf0a7e12..5cd04b65c556 100644
--- a/drivers/gpio/gpio-mpc8xxx.c
+++ b/drivers/gpio/gpio-mpc8xxx.c
@@ -132,6 +132,15 @@ static int mpc8xxx_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val
return 0;
}
+static int mpc5121_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
+{
+ /* GPIO 28..31 are input only on MPC5121 */
+ if (gpio >= 28)
+ return -EINVAL;
+
+ return mpc8xxx_gpio_dir_out(gc, gpio, val);
+}
+
static int mpc8xxx_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
{
struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc);
@@ -340,11 +349,10 @@ static void __init mpc8xxx_add_controller(struct device_node *np)
mm_gc->save_regs = mpc8xxx_gpio_save_regs;
gc->ngpio = MPC8XXX_GPIO_PINS;
gc->direction_input = mpc8xxx_gpio_dir_in;
- gc->direction_output = mpc8xxx_gpio_dir_out;
- if (of_device_is_compatible(np, "fsl,mpc8572-gpio"))
- gc->get = mpc8572_gpio_get;
- else
- gc->get = mpc8xxx_gpio_get;
+ gc->direction_output = of_device_is_compatible(np, "fsl,mpc5121-gpio") ?
+ mpc5121_gpio_dir_out : mpc8xxx_gpio_dir_out;
+ gc->get = of_device_is_compatible(np, "fsl,mpc8572-gpio") ?
+ mpc8572_gpio_get : mpc8xxx_gpio_get;
gc->set = mpc8xxx_gpio_set;
gc->to_irq = mpc8xxx_gpio_to_irq;
diff --git a/drivers/gpio/gpio-pl061.c b/drivers/gpio/gpio-pl061.c
index 093c90bd3c1d..4102f63230fd 100644
--- a/drivers/gpio/gpio-pl061.c
+++ b/drivers/gpio/gpio-pl061.c
@@ -238,10 +238,6 @@ static int pl061_probe(struct amba_device *dev, const struct amba_id *id)
int ret, irq, i;
static DECLARE_BITMAP(init_irq, NR_IRQS);
- pdata = dev->dev.platform_data;
- if (pdata == NULL)
- return -ENODEV;
-
chip = kzalloc(sizeof(*chip), GFP_KERNEL);
if (chip == NULL)
return -ENOMEM;
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index d09a6e02dc95..004b048c5192 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -62,6 +62,7 @@ static int i915_capabilities(struct seq_file *m, void *data)
const struct intel_device_info *info = INTEL_INFO(dev);
seq_printf(m, "gen: %d\n", info->gen);
+ seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev));
#define B(x) seq_printf(m, #x ": %s\n", yesno(info->x))
B(is_mobile);
B(is_i85x);
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index a9533c54c93c..a9ae374861e7 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1454,6 +1454,14 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
diff1 = now - dev_priv->last_time1;
+ /* Prevent division-by-zero if we are asking too fast.
+ * Also, we don't get interesting results if we are polling
+ * faster than once in 10ms, so just return the saved value
+ * in such cases.
+ */
+ if (diff1 <= 10)
+ return dev_priv->chipset_power;
+
count1 = I915_READ(DMIEC);
count2 = I915_READ(DDREC);
count3 = I915_READ(CSIEC);
@@ -1484,6 +1492,8 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
dev_priv->last_count1 = total_count;
dev_priv->last_time1 = now;
+ dev_priv->chipset_power = ret;
+
return ret;
}
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 15bfa9145d2b..a1103fc6597d 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -58,15 +58,15 @@ module_param_named(powersave, i915_powersave, int, 0600);
MODULE_PARM_DESC(powersave,
"Enable powersavings, fbc, downclocking, etc. (default: true)");
-unsigned int i915_semaphores __read_mostly = 0;
+int i915_semaphores __read_mostly = -1;
module_param_named(semaphores, i915_semaphores, int, 0600);
MODULE_PARM_DESC(semaphores,
- "Use semaphores for inter-ring sync (default: false)");
+ "Use semaphores for inter-ring sync (default: -1 (use per-chip defaults))");
-unsigned int i915_enable_rc6 __read_mostly = 0;
+int i915_enable_rc6 __read_mostly = -1;
module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600);
MODULE_PARM_DESC(i915_enable_rc6,
- "Enable power-saving render C-state 6 (default: true)");
+ "Enable power-saving render C-state 6 (default: -1 (use per-chip default)");
int i915_enable_fbc __read_mostly = -1;
module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600);
@@ -328,7 +328,7 @@ void intel_detect_pch(struct drm_device *dev)
}
}
-static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
+void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
{
int count;
@@ -344,6 +344,22 @@ static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
udelay(10);
}
+void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
+{
+ int count;
+
+ count = 0;
+ while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1))
+ udelay(10);
+
+ I915_WRITE_NOTRACE(FORCEWAKE_MT, (1<<16) | 1);
+ POSTING_READ(FORCEWAKE_MT);
+
+ count = 0;
+ while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1) == 0)
+ udelay(10);
+}
+
/*
* Generally this is called implicitly by the register read function. However,
* if some sequence requires the GT to not power down then this function should
@@ -356,15 +372,21 @@ void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
/* Forcewake is atomic in case we get in here without the lock */
if (atomic_add_return(1, &dev_priv->forcewake_count) == 1)
- __gen6_gt_force_wake_get(dev_priv);
+ dev_priv->display.force_wake_get(dev_priv);
}
-static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
+void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
{
I915_WRITE_NOTRACE(FORCEWAKE, 0);
POSTING_READ(FORCEWAKE);
}
+void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
+{
+ I915_WRITE_NOTRACE(FORCEWAKE_MT, (1<<16) | 0);
+ POSTING_READ(FORCEWAKE_MT);
+}
+
/*
* see gen6_gt_force_wake_get()
*/
@@ -373,7 +395,7 @@ void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
if (atomic_dec_and_test(&dev_priv->forcewake_count))
- __gen6_gt_force_wake_put(dev_priv);
+ dev_priv->display.force_wake_put(dev_priv);
}
void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
@@ -903,8 +925,9 @@ MODULE_LICENSE("GPL and additional rights");
/* We give fast paths for the really cool registers */
#define NEEDS_FORCE_WAKE(dev_priv, reg) \
(((dev_priv)->info->gen >= 6) && \
- ((reg) < 0x40000) && \
- ((reg) != FORCEWAKE))
+ ((reg) < 0x40000) && \
+ ((reg) != FORCEWAKE) && \
+ ((reg) != ECOBUS))
#define __i915_read(x, y) \
u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 4a9c1b979804..554bef7a3b9c 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -107,6 +107,7 @@ struct opregion_header;
struct opregion_acpi;
struct opregion_swsci;
struct opregion_asle;
+struct drm_i915_private;
struct intel_opregion {
struct opregion_header *header;
@@ -221,6 +222,8 @@ struct drm_i915_display_funcs {
struct drm_i915_gem_object *obj);
int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb,
int x, int y);
+ void (*force_wake_get)(struct drm_i915_private *dev_priv);
+ void (*force_wake_put)(struct drm_i915_private *dev_priv);
/* clock updates for mode set */
/* cursor updates */
/* render clock increase/decrease */
@@ -710,6 +713,7 @@ typedef struct drm_i915_private {
u64 last_count1;
unsigned long last_time1;
+ unsigned long chipset_power;
u64 last_count2;
struct timespec last_time2;
unsigned long gfx_power;
@@ -998,11 +1002,11 @@ extern int i915_max_ioctl;
extern unsigned int i915_fbpercrtc __always_unused;
extern int i915_panel_ignore_lid __read_mostly;
extern unsigned int i915_powersave __read_mostly;
-extern unsigned int i915_semaphores __read_mostly;
+extern int i915_semaphores __read_mostly;
extern unsigned int i915_lvds_downclock __read_mostly;
extern int i915_panel_use_ssc __read_mostly;
extern int i915_vbt_sdvo_panel_type __read_mostly;
-extern unsigned int i915_enable_rc6 __read_mostly;
+extern int i915_enable_rc6 __read_mostly;
extern int i915_enable_fbc __read_mostly;
extern bool i915_enable_hangcheck __read_mostly;
@@ -1308,6 +1312,11 @@ extern void gen6_set_rps(struct drm_device *dev, u8 val);
extern void intel_detect_pch(struct drm_device *dev);
extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
+extern void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
+extern void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv);
+extern void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
+extern void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv);
+
/* overlay */
#ifdef CONFIG_DEBUG_FS
extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
@@ -1352,8 +1361,9 @@ void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
/* We give fast paths for the really cool registers */
#define NEEDS_FORCE_WAKE(dev_priv, reg) \
(((dev_priv)->info->gen >= 6) && \
- ((reg) < 0x40000) && \
- ((reg) != FORCEWAKE))
+ ((reg) < 0x40000) && \
+ ((reg) != FORCEWAKE) && \
+ ((reg) != ECOBUS))
#define __i915_read(x, y) \
u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 60ff1b63b568..8359dc777041 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2026,13 +2026,8 @@ i915_wait_request(struct intel_ring_buffer *ring,
* to handle this, the waiter on a request often wants an associated
* buffer to have made it to the inactive list, and we would need
* a separate wait queue to handle that.
- *
- * To avoid a recursion with the ilk VT-d workaround (that calls
- * gpu_idle when unbinding objects with interruptible==false) don't
- * retire requests in that case (because it might call unbind if the
- * active list holds the last reference to the object).
*/
- if (ret == 0 && dev_priv->mm.interruptible)
+ if (ret == 0)
i915_gem_retire_requests_ring(ring);
return ret;
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 3693e83a97f3..b9da8900ae4e 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -32,6 +32,7 @@
#include "i915_drv.h"
#include "i915_trace.h"
#include "intel_drv.h"
+#include <linux/dma_remapping.h>
struct change_domains {
uint32_t invalidate_domains;
@@ -746,6 +747,22 @@ i915_gem_execbuffer_flush(struct drm_device *dev,
return 0;
}
+static bool
+intel_enable_semaphores(struct drm_device *dev)
+{
+ if (INTEL_INFO(dev)->gen < 6)
+ return 0;
+
+ if (i915_semaphores >= 0)
+ return i915_semaphores;
+
+ /* Disable semaphores on SNB */
+ if (INTEL_INFO(dev)->gen == 6)
+ return 0;
+
+ return 1;
+}
+
static int
i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *to)
@@ -758,7 +775,7 @@ i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
return 0;
/* XXX gpu semaphores are implicated in various hard hangs on SNB */
- if (INTEL_INFO(obj->base.dev)->gen < 6 || !i915_semaphores)
+ if (!intel_enable_semaphores(obj->base.dev))
return i915_gem_object_wait_rendering(obj);
idx = intel_ring_sync_index(from, to);
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index b080cc824001..a26d5b0a3690 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -3303,10 +3303,10 @@
/* or SDVOB */
#define HDMIB 0xe1140
#define PORT_ENABLE (1 << 31)
-#define TRANSCODER_A (0)
-#define TRANSCODER_B (1 << 30)
-#define TRANSCODER(pipe) ((pipe) << 30)
-#define TRANSCODER_MASK (1 << 30)
+#define TRANSCODER(pipe) ((pipe) << 30)
+#define TRANSCODER_CPT(pipe) ((pipe) << 29)
+#define TRANSCODER_MASK (1 << 30)
+#define TRANSCODER_MASK_CPT (3 << 29)
#define COLOR_FORMAT_8bpc (0)
#define COLOR_FORMAT_12bpc (3 << 26)
#define SDVOB_HOTPLUG_ENABLE (1 << 23)
@@ -3447,8 +3447,30 @@
#define EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B (0x38<<22)
#define EDP_LINK_TRAIN_VOL_EMP_MASK_SNB (0x3f<<22)
+/* IVB */
+#define EDP_LINK_TRAIN_400MV_0DB_IVB (0x24 <<22)
+#define EDP_LINK_TRAIN_400MV_3_5DB_IVB (0x2a <<22)
+#define EDP_LINK_TRAIN_400MV_6DB_IVB (0x2f <<22)
+#define EDP_LINK_TRAIN_600MV_0DB_IVB (0x30 <<22)
+#define EDP_LINK_TRAIN_600MV_3_5DB_IVB (0x36 <<22)
+#define EDP_LINK_TRAIN_800MV_0DB_IVB (0x38 <<22)
+#define EDP_LINK_TRAIN_800MV_3_5DB_IVB (0x33 <<22)
+
+/* legacy values */
+#define EDP_LINK_TRAIN_500MV_0DB_IVB (0x00 <<22)
+#define EDP_LINK_TRAIN_1000MV_0DB_IVB (0x20 <<22)
+#define EDP_LINK_TRAIN_500MV_3_5DB_IVB (0x02 <<22)
+#define EDP_LINK_TRAIN_1000MV_3_5DB_IVB (0x22 <<22)
+#define EDP_LINK_TRAIN_1000MV_6DB_IVB (0x23 <<22)
+
+#define EDP_LINK_TRAIN_VOL_EMP_MASK_IVB (0x3f<<22)
+
#define FORCEWAKE 0xA18C
#define FORCEWAKE_ACK 0x130090
+#define FORCEWAKE_MT 0xa188 /* multi-threaded */
+#define FORCEWAKE_MT_ACK 0x130040
+#define ECOBUS 0xa180
+#define FORCEWAKE_MT_ENABLE (1<<5)
#define GT_FIFO_FREE_ENTRIES 0x120008
#define GT_FIFO_NUM_RESERVED_ENTRIES 20
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index e77a863a3833..daa5743ccbd6 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -38,8 +38,8 @@
#include "i915_drv.h"
#include "i915_trace.h"
#include "drm_dp_helper.h"
-
#include "drm_crtc_helper.h"
+#include <linux/dma_remapping.h>
#define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
@@ -4670,6 +4670,7 @@ static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
/**
* intel_choose_pipe_bpp_dither - figure out what color depth the pipe should send
* @crtc: CRTC structure
+ * @mode: requested mode
*
* A pipe may be connected to one or more outputs. Based on the depth of the
* attached framebuffer, choose a good color depth to use on the pipe.
@@ -4681,13 +4682,15 @@ static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
* HDMI supports only 8bpc or 12bpc, so clamp to 8bpc with dither for 10bpc
* Displays may support a restricted set as well, check EDID and clamp as
* appropriate.
+ * DP may want to dither down to 6bpc to fit larger modes
*
* RETURNS:
* Dithering requirement (i.e. false if display bpc and pipe bpc match,
* true if they don't match).
*/
static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
- unsigned int *pipe_bpp)
+ unsigned int *pipe_bpp,
+ struct drm_display_mode *mode)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4758,6 +4761,11 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
}
}
+ if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
+ DRM_DEBUG_KMS("Dithering DP to 6bpc\n");
+ display_bpc = 6;
+ }
+
/*
* We could just drive the pipe at the highest bpc all the time and
* enable dithering as needed, but that costs bandwidth. So choose
@@ -5019,6 +5027,16 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
pipeconf &= ~PIPECONF_DOUBLE_WIDE;
}
+ /* default to 8bpc */
+ pipeconf &= ~(PIPECONF_BPP_MASK | PIPECONF_DITHER_EN);
+ if (is_dp) {
+ if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
+ pipeconf |= PIPECONF_BPP_6 |
+ PIPECONF_DITHER_EN |
+ PIPECONF_DITHER_TYPE_SP;
+ }
+ }
+
dpll |= DPLL_VCO_ENABLE;
DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
@@ -5480,7 +5498,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
/* determine panel color depth */
temp = I915_READ(PIPECONF(pipe));
temp &= ~PIPE_BPC_MASK;
- dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp);
+ dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp, mode);
switch (pipe_bpp) {
case 18:
temp |= PIPE_6BPC;
@@ -7189,11 +7207,16 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
work->old_fb_obj = intel_fb->obj;
INIT_WORK(&work->work, intel_unpin_work_fn);
+ ret = drm_vblank_get(dev, intel_crtc->pipe);
+ if (ret)
+ goto free_work;
+
/* We borrow the event spin lock for protecting unpin_work */
spin_lock_irqsave(&dev->event_lock, flags);
if (intel_crtc->unpin_work) {
spin_unlock_irqrestore(&dev->event_lock, flags);
kfree(work);
+ drm_vblank_put(dev, intel_crtc->pipe);
DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
return -EBUSY;
@@ -7212,10 +7235,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
crtc->fb = fb;
- ret = drm_vblank_get(dev, intel_crtc->pipe);
- if (ret)
- goto cleanup_objs;
-
work->pending_flip_obj = obj;
work->enable_stall_check = true;
@@ -7238,7 +7257,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
cleanup_pending:
atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
-cleanup_objs:
drm_gem_object_unreference(&work->old_fb_obj->base);
drm_gem_object_unreference(&obj->base);
mutex_unlock(&dev->struct_mutex);
@@ -7247,6 +7265,8 @@ cleanup_objs:
intel_crtc->unpin_work = NULL;
spin_unlock_irqrestore(&dev->event_lock, flags);
+ drm_vblank_put(dev, intel_crtc->pipe);
+free_work:
kfree(work);
return ret;
@@ -7887,6 +7907,31 @@ void intel_init_emon(struct drm_device *dev)
dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK);
}
+static bool intel_enable_rc6(struct drm_device *dev)
+{
+ /*
+ * Respect the kernel parameter if it is set
+ */
+ if (i915_enable_rc6 >= 0)
+ return i915_enable_rc6;
+
+ /*
+ * Disable RC6 on Ironlake
+ */
+ if (INTEL_INFO(dev)->gen == 5)
+ return 0;
+
+ /*
+ * Disable rc6 on Sandybridge
+ */
+ if (INTEL_INFO(dev)->gen == 6) {
+ DRM_DEBUG_DRIVER("Sandybridge: RC6 disabled\n");
+ return 0;
+ }
+ DRM_DEBUG_DRIVER("RC6 enabled\n");
+ return 1;
+}
+
void gen6_enable_rps(struct drm_i915_private *dev_priv)
{
u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
@@ -7923,7 +7968,7 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_RC6p_THRESHOLD, 100000);
I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
- if (i915_enable_rc6)
+ if (intel_enable_rc6(dev_priv->dev))
rc6_mask = GEN6_RC_CTL_RC6p_ENABLE |
GEN6_RC_CTL_RC6_ENABLE;
@@ -8372,7 +8417,7 @@ void ironlake_enable_rc6(struct drm_device *dev)
/* rc6 disabled by default due to repeated reports of hanging during
* boot and resume.
*/
- if (!i915_enable_rc6)
+ if (!intel_enable_rc6(dev))
return;
mutex_lock(&dev->struct_mutex);
@@ -8491,6 +8536,28 @@ static void intel_init_display(struct drm_device *dev)
/* For FIFO watermark updates */
if (HAS_PCH_SPLIT(dev)) {
+ dev_priv->display.force_wake_get = __gen6_gt_force_wake_get;
+ dev_priv->display.force_wake_put = __gen6_gt_force_wake_put;
+
+ /* IVB configs may use multi-threaded forcewake */
+ if (IS_IVYBRIDGE(dev)) {
+ u32 ecobus;
+
+ mutex_lock(&dev->struct_mutex);
+ __gen6_gt_force_wake_mt_get(dev_priv);
+ ecobus = I915_READ(ECOBUS);
+ __gen6_gt_force_wake_mt_put(dev_priv);
+ mutex_unlock(&dev->struct_mutex);
+
+ if (ecobus & FORCEWAKE_MT_ENABLE) {
+ DRM_DEBUG_KMS("Using MT version of forcewake\n");
+ dev_priv->display.force_wake_get =
+ __gen6_gt_force_wake_mt_get;
+ dev_priv->display.force_wake_put =
+ __gen6_gt_force_wake_mt_put;
+ }
+ }
+
if (HAS_PCH_IBX(dev))
dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating;
else if (HAS_PCH_CPT(dev))
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 4d0358fad937..92b041b66e49 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -208,13 +208,15 @@ intel_dp_link_clock(uint8_t link_bw)
*/
static int
-intel_dp_link_required(struct intel_dp *intel_dp, int pixel_clock)
+intel_dp_link_required(struct intel_dp *intel_dp, int pixel_clock, int check_bpp)
{
struct drm_crtc *crtc = intel_dp->base.base.crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int bpp = 24;
- if (intel_crtc)
+ if (check_bpp)
+ bpp = check_bpp;
+ else if (intel_crtc)
bpp = intel_crtc->bpp;
return (pixel_clock * bpp + 9) / 10;
@@ -233,6 +235,7 @@ intel_dp_mode_valid(struct drm_connector *connector,
struct intel_dp *intel_dp = intel_attached_dp(connector);
int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp));
int max_lanes = intel_dp_max_lane_count(intel_dp);
+ int max_rate, mode_rate;
if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) {
if (mode->hdisplay > intel_dp->panel_fixed_mode->hdisplay)
@@ -242,9 +245,17 @@ intel_dp_mode_valid(struct drm_connector *connector,
return MODE_PANEL;
}
- if (intel_dp_link_required(intel_dp, mode->clock)
- > intel_dp_max_data_rate(max_link_clock, max_lanes))
- return MODE_CLOCK_HIGH;
+ mode_rate = intel_dp_link_required(intel_dp, mode->clock, 0);
+ max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
+
+ if (mode_rate > max_rate) {
+ mode_rate = intel_dp_link_required(intel_dp,
+ mode->clock, 18);
+ if (mode_rate > max_rate)
+ return MODE_CLOCK_HIGH;
+ else
+ mode->private_flags |= INTEL_MODE_DP_FORCE_6BPC;
+ }
if (mode->clock < 10000)
return MODE_CLOCK_LOW;
@@ -362,8 +373,8 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
* clock divider.
*/
if (is_cpu_edp(intel_dp)) {
- if (IS_GEN6(dev))
- aux_clock_divider = 200; /* SNB eDP input clock at 400Mhz */
+ if (IS_GEN6(dev) || IS_GEN7(dev))
+ aux_clock_divider = 200; /* SNB & IVB eDP input clock at 400Mhz */
else
aux_clock_divider = 225; /* eDP input clock at 450Mhz */
} else if (HAS_PCH_SPLIT(dev))
@@ -672,6 +683,7 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
int lane_count, clock;
int max_lane_count = intel_dp_max_lane_count(intel_dp);
int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
+ int bpp = mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 0;
static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) {
@@ -689,7 +701,7 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
for (clock = 0; clock <= max_clock; clock++) {
int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count);
- if (intel_dp_link_required(intel_dp, mode->clock)
+ if (intel_dp_link_required(intel_dp, mode->clock, bpp)
<= link_avail) {
intel_dp->link_bw = bws[clock];
intel_dp->lane_count = lane_count;
@@ -817,10 +829,11 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
}
/*
- * There are three kinds of DP registers:
+ * There are four kinds of DP registers:
*
* IBX PCH
- * CPU
+ * SNB CPU
+ * IVB CPU
* CPT PCH
*
* IBX PCH and CPU are the same for almost everything,
@@ -873,7 +886,25 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
/* Split out the IBX/CPU vs CPT settings */
- if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) {
+ if (is_cpu_edp(intel_dp) && IS_GEN7(dev)) {
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+ intel_dp->DP |= DP_SYNC_HS_HIGH;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+ intel_dp->DP |= DP_SYNC_VS_HIGH;
+ intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
+
+ if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
+ intel_dp->DP |= DP_ENHANCED_FRAMING;
+
+ intel_dp->DP |= intel_crtc->pipe << 29;
+
+ /* don't miss out required setting for eDP */
+ intel_dp->DP |= DP_PLL_ENABLE;
+ if (adjusted_mode->clock < 200000)
+ intel_dp->DP |= DP_PLL_FREQ_160MHZ;
+ else
+ intel_dp->DP |= DP_PLL_FREQ_270MHZ;
+ } else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) {
intel_dp->DP |= intel_dp->color_range;
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
@@ -1375,34 +1406,59 @@ static char *link_train_names[] = {
* These are source-specific values; current Intel hardware supports
* a maximum voltage of 800mV and a maximum pre-emphasis of 6dB
*/
-#define I830_DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_800
-#define I830_DP_VOLTAGE_MAX_CPT DP_TRAIN_VOLTAGE_SWING_1200
static uint8_t
-intel_dp_pre_emphasis_max(uint8_t voltage_swing)
+intel_dp_voltage_max(struct intel_dp *intel_dp)
{
- switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
- case DP_TRAIN_VOLTAGE_SWING_400:
- return DP_TRAIN_PRE_EMPHASIS_6;
- case DP_TRAIN_VOLTAGE_SWING_600:
- return DP_TRAIN_PRE_EMPHASIS_6;
- case DP_TRAIN_VOLTAGE_SWING_800:
- return DP_TRAIN_PRE_EMPHASIS_3_5;
- case DP_TRAIN_VOLTAGE_SWING_1200:
- default:
- return DP_TRAIN_PRE_EMPHASIS_0;
+ struct drm_device *dev = intel_dp->base.base.dev;
+
+ if (IS_GEN7(dev) && is_cpu_edp(intel_dp))
+ return DP_TRAIN_VOLTAGE_SWING_800;
+ else if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
+ return DP_TRAIN_VOLTAGE_SWING_1200;
+ else
+ return DP_TRAIN_VOLTAGE_SWING_800;
+}
+
+static uint8_t
+intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
+{
+ struct drm_device *dev = intel_dp->base.base.dev;
+
+ if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
+ switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
+ case DP_TRAIN_VOLTAGE_SWING_400:
+ return DP_TRAIN_PRE_EMPHASIS_6;
+ case DP_TRAIN_VOLTAGE_SWING_600:
+ case DP_TRAIN_VOLTAGE_SWING_800:
+ return DP_TRAIN_PRE_EMPHASIS_3_5;
+ default:
+ return DP_TRAIN_PRE_EMPHASIS_0;
+ }
+ } else {
+ switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
+ case DP_TRAIN_VOLTAGE_SWING_400:
+ return DP_TRAIN_PRE_EMPHASIS_6;
+ case DP_TRAIN_VOLTAGE_SWING_600:
+ return DP_TRAIN_PRE_EMPHASIS_6;
+ case DP_TRAIN_VOLTAGE_SWING_800:
+ return DP_TRAIN_PRE_EMPHASIS_3_5;
+ case DP_TRAIN_VOLTAGE_SWING_1200:
+ default:
+ return DP_TRAIN_PRE_EMPHASIS_0;
+ }
}
}
static void
intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
{
- struct drm_device *dev = intel_dp->base.base.dev;
uint8_t v = 0;
uint8_t p = 0;
int lane;
uint8_t *adjust_request = link_status + (DP_ADJUST_REQUEST_LANE0_1 - DP_LANE0_1_STATUS);
- int voltage_max;
+ uint8_t voltage_max;
+ uint8_t preemph_max;
for (lane = 0; lane < intel_dp->lane_count; lane++) {
uint8_t this_v = intel_get_adjust_request_voltage(adjust_request, lane);
@@ -1414,15 +1470,13 @@ intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_ST
p = this_p;
}
- if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
- voltage_max = I830_DP_VOLTAGE_MAX_CPT;
- else
- voltage_max = I830_DP_VOLTAGE_MAX;
+ voltage_max = intel_dp_voltage_max(intel_dp);
if (v >= voltage_max)
v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
- if (p >= intel_dp_pre_emphasis_max(v))
- p = intel_dp_pre_emphasis_max(v) | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
+ preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
+ if (p >= preemph_max)
+ p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
for (lane = 0; lane < 4; lane++)
intel_dp->train_set[lane] = v | p;
@@ -1494,6 +1548,37 @@ intel_gen6_edp_signal_levels(uint8_t train_set)
}
}
+/* Gen7's DP voltage swing and pre-emphasis control */
+static uint32_t
+intel_gen7_edp_signal_levels(uint8_t train_set)
+{
+ int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
+ DP_TRAIN_PRE_EMPHASIS_MASK);
+ switch (signal_levels) {
+ case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
+ return EDP_LINK_TRAIN_400MV_0DB_IVB;
+ case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
+ return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
+ case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
+ return EDP_LINK_TRAIN_400MV_6DB_IVB;
+
+ case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
+ return EDP_LINK_TRAIN_600MV_0DB_IVB;
+ case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
+ return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
+
+ case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
+ return EDP_LINK_TRAIN_800MV_0DB_IVB;
+ case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
+ return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
+
+ default:
+ DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
+ "0x%x\n", signal_levels);
+ return EDP_LINK_TRAIN_500MV_0DB_IVB;
+ }
+}
+
static uint8_t
intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
int lane)
@@ -1599,7 +1684,8 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
DP_LINK_CONFIGURATION_SIZE);
DP |= DP_PORT_EN;
- if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
+
+ if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
DP &= ~DP_LINK_TRAIN_MASK_CPT;
else
DP &= ~DP_LINK_TRAIN_MASK;
@@ -1613,7 +1699,11 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
uint8_t link_status[DP_LINK_STATUS_SIZE];
uint32_t signal_levels;
- if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
+
+ if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
+ signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]);
+ DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels;
+ } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
} else {
@@ -1622,7 +1712,7 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
}
- if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
+ if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
reg = DP | DP_LINK_TRAIN_PAT_1_CPT;
else
reg = DP | DP_LINK_TRAIN_PAT_1;
@@ -1703,7 +1793,10 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
break;
}
- if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
+ if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
+ signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]);
+ DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels;
+ } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
} else {
@@ -1711,7 +1804,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
}
- if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
+ if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
reg = DP | DP_LINK_TRAIN_PAT_2_CPT;
else
reg = DP | DP_LINK_TRAIN_PAT_2;
@@ -1752,7 +1845,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
++tries;
}
- if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
+ if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
reg = DP | DP_LINK_TRAIN_OFF_CPT;
else
reg = DP | DP_LINK_TRAIN_OFF;
@@ -1782,7 +1875,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
udelay(100);
}
- if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp)) {
+ if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) {
DP &= ~DP_LINK_TRAIN_MASK_CPT;
I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
} else {
@@ -1794,7 +1887,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
msleep(17);
if (is_edp(intel_dp)) {
- if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
+ if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
DP |= DP_LINK_TRAIN_OFF_CPT;
else
DP |= DP_LINK_TRAIN_OFF;
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index bd9a604b73da..a1b4343814e8 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -110,6 +110,7 @@
/* drm_display_mode->private_flags */
#define INTEL_MODE_PIXEL_MULTIPLIER_SHIFT (0x0)
#define INTEL_MODE_PIXEL_MULTIPLIER_MASK (0xf << INTEL_MODE_PIXEL_MULTIPLIER_SHIFT)
+#define INTEL_MODE_DP_FORCE_6BPC (0x10)
static inline void
intel_mode_set_pixel_multiplier(struct drm_display_mode *mode,
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 42f165a520de..e44191132ac4 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -715,6 +715,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "EB1007"),
},
},
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Asus AT5NM10T-I",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "AT5NM10T-I"),
+ },
+ },
{ } /* terminating entry */
};
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 21f60b7d69a3..04d79fd1dc9d 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -178,13 +178,10 @@ u32 intel_panel_get_max_backlight(struct drm_device *dev)
if (HAS_PCH_SPLIT(dev)) {
max >>= 16;
} else {
- if (IS_PINEVIEW(dev)) {
+ if (INTEL_INFO(dev)->gen < 4)
max >>= 17;
- } else {
+ else
max >>= 16;
- if (INTEL_INFO(dev)->gen < 4)
- max &= ~1;
- }
if (is_backlight_combination_mode(dev))
max *= 0xff;
@@ -203,13 +200,12 @@ u32 intel_panel_get_backlight(struct drm_device *dev)
val = I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
} else {
val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
- if (IS_PINEVIEW(dev))
+ if (INTEL_INFO(dev)->gen < 4)
val >>= 1;
if (is_backlight_combination_mode(dev)) {
u8 lbpc;
- val &= ~1;
pci_read_config_byte(dev->pdev, PCI_LBPC, &lbpc);
val *= lbpc;
}
@@ -246,11 +242,9 @@ static void intel_panel_actually_set_backlight(struct drm_device *dev, u32 level
}
tmp = I915_READ(BLC_PWM_CTL);
- if (IS_PINEVIEW(dev)) {
- tmp &= ~(BACKLIGHT_DUTY_CYCLE_MASK - 1);
+ if (INTEL_INFO(dev)->gen < 4)
level <<= 1;
- } else
- tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK;
+ tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK;
I915_WRITE(BLC_PWM_CTL, tmp | level);
}
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 3003fb25aefd..f7b9268df266 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -50,6 +50,7 @@
#define IS_TMDS(c) (c->output_flag & SDVO_TMDS_MASK)
#define IS_LVDS(c) (c->output_flag & SDVO_LVDS_MASK)
#define IS_TV_OR_LVDS(c) (c->output_flag & (SDVO_TV_MASK | SDVO_LVDS_MASK))
+#define IS_DIGITAL(c) (c->output_flag & (SDVO_TMDS_MASK | SDVO_LVDS_MASK))
static const char *tv_format_names[] = {
@@ -1086,8 +1087,12 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
}
sdvox |= (9 << 19) | SDVO_BORDER_ENABLE;
}
- if (intel_crtc->pipe == 1)
- sdvox |= SDVO_PIPE_B_SELECT;
+
+ if (INTEL_PCH_TYPE(dev) >= PCH_CPT)
+ sdvox |= TRANSCODER_CPT(intel_crtc->pipe);
+ else
+ sdvox |= TRANSCODER(intel_crtc->pipe);
+
if (intel_sdvo->has_hdmi_audio)
sdvox |= SDVO_AUDIO_ENABLE;
@@ -1314,6 +1319,18 @@ intel_sdvo_tmds_sink_detect(struct drm_connector *connector)
return status;
}
+static bool
+intel_sdvo_connector_matches_edid(struct intel_sdvo_connector *sdvo,
+ struct edid *edid)
+{
+ bool monitor_is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL);
+ bool connector_is_digital = !!IS_DIGITAL(sdvo);
+
+ DRM_DEBUG_KMS("connector_is_digital? %d, monitor_is_digital? %d\n",
+ connector_is_digital, monitor_is_digital);
+ return connector_is_digital == monitor_is_digital;
+}
+
static enum drm_connector_status
intel_sdvo_detect(struct drm_connector *connector, bool force)
{
@@ -1358,10 +1375,12 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
if (edid == NULL)
edid = intel_sdvo_get_analog_edid(connector);
if (edid != NULL) {
- if (edid->input & DRM_EDID_INPUT_DIGITAL)
- ret = connector_status_disconnected;
- else
+ if (intel_sdvo_connector_matches_edid(intel_sdvo_connector,
+ edid))
ret = connector_status_connected;
+ else
+ ret = connector_status_disconnected;
+
connector->display_info.raw_edid = NULL;
kfree(edid);
} else
@@ -1402,11 +1421,8 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
edid = intel_sdvo_get_analog_edid(connector);
if (edid != NULL) {
- struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
- bool monitor_is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL);
- bool connector_is_digital = !!IS_TMDS(intel_sdvo_connector);
-
- if (connector_is_digital == monitor_is_digital) {
+ if (intel_sdvo_connector_matches_edid(to_intel_sdvo_connector(connector),
+ edid)) {
drm_mode_connector_update_edid_property(connector, edid);
drm_add_edid_modes(connector, edid);
}
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 5e00d1670aa9..92c9628c572d 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -3276,6 +3276,18 @@ int evergreen_init(struct radeon_device *rdev)
rdev->accel_working = false;
}
}
+
+ /* Don't start up if the MC ucode is missing on BTC parts.
+ * The default clocks and voltages before the MC ucode
+ * is loaded are not suffient for advanced operations.
+ */
+ if (ASIC_IS_DCE5(rdev)) {
+ if (!rdev->mc_fw && !(rdev->flags & RADEON_IS_IGP)) {
+ DRM_ERROR("radeon: MC ucode required for NI+.\n");
+ return -EINVAL;
+ }
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index d24baf30efcb..5082d17d14dc 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -2560,7 +2560,11 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
rdev->pm.current_clock_mode_index = 0;
- rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
+ if (rdev->pm.default_power_state_index >= 0)
+ rdev->pm.current_vddc =
+ rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
+ else
+ rdev->pm.current_vddc = 0;
}
void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 8cca91a93bde..dc279706ca70 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -390,6 +390,11 @@ extern int vmw_context_check(struct vmw_private *dev_priv,
struct ttm_object_file *tfile,
int id,
struct vmw_resource **p_res);
+extern int vmw_user_lookup_handle(struct vmw_private *dev_priv,
+ struct ttm_object_file *tfile,
+ uint32_t handle,
+ struct vmw_surface **out_surf,
+ struct vmw_dma_buffer **out_buf);
extern void vmw_surface_res_free(struct vmw_resource *res);
extern int vmw_surface_init(struct vmw_private *dev_priv,
struct vmw_surface *srf,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
index 03bbc2a6f9a7..a0c2f12b1e1b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -33,6 +33,7 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
{
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
uint32_t fifo_min, hwversion;
+ const struct vmw_fifo_state *fifo = &dev_priv->fifo;
if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
return false;
@@ -41,7 +42,12 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
if (fifo_min <= SVGA_FIFO_3D_HWVERSION * sizeof(unsigned int))
return false;
- hwversion = ioread32(fifo_mem + SVGA_FIFO_3D_HWVERSION);
+ hwversion = ioread32(fifo_mem +
+ ((fifo->capabilities &
+ SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
+ SVGA_FIFO_3D_HWVERSION_REVISED :
+ SVGA_FIFO_3D_HWVERSION));
+
if (hwversion == 0)
return false;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index 5ff561d4e0b4..66917c6c3813 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -58,8 +58,14 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
case DRM_VMW_PARAM_FIFO_HW_VERSION:
{
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
-
- param->value = ioread32(fifo_mem + SVGA_FIFO_3D_HWVERSION);
+ const struct vmw_fifo_state *fifo = &dev_priv->fifo;
+
+ param->value =
+ ioread32(fifo_mem +
+ ((fifo->capabilities &
+ SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
+ SVGA_FIFO_3D_HWVERSION_REVISED :
+ SVGA_FIFO_3D_HWVERSION));
break;
}
default:
@@ -166,13 +172,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
ret = -EINVAL;
goto out_no_fb;
}
-
vfb = vmw_framebuffer_to_vfb(obj_to_fb(obj));
- if (!vfb->dmabuf) {
- DRM_ERROR("Framebuffer not dmabuf backed.\n");
- ret = -EINVAL;
- goto out_no_fb;
- }
ret = ttm_read_lock(&vmaster->lock, true);
if (unlikely(ret != 0))
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 37d40545ed77..f94b33ae2215 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -31,6 +31,44 @@
/* Might need a hrtimer here? */
#define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)
+
+struct vmw_clip_rect {
+ int x1, x2, y1, y2;
+};
+
+/**
+ * Clip @num_rects number of @rects against @clip storing the
+ * results in @out_rects and the number of passed rects in @out_num.
+ */
+void vmw_clip_cliprects(struct drm_clip_rect *rects,
+ int num_rects,
+ struct vmw_clip_rect clip,
+ SVGASignedRect *out_rects,
+ int *out_num)
+{
+ int i, k;
+
+ for (i = 0, k = 0; i < num_rects; i++) {
+ int x1 = max_t(int, clip.x1, rects[i].x1);
+ int y1 = max_t(int, clip.y1, rects[i].y1);
+ int x2 = min_t(int, clip.x2, rects[i].x2);
+ int y2 = min_t(int, clip.y2, rects[i].y2);
+
+ if (x1 >= x2)
+ continue;
+ if (y1 >= y2)
+ continue;
+
+ out_rects[k].left = x1;
+ out_rects[k].top = y1;
+ out_rects[k].right = x2;
+ out_rects[k].bottom = y2;
+ k++;
+ }
+
+ *out_num = k;
+}
+
void vmw_display_unit_cleanup(struct vmw_display_unit *du)
{
if (du->cursor_surface)
@@ -82,6 +120,43 @@ int vmw_cursor_update_image(struct vmw_private *dev_priv,
return 0;
}
+int vmw_cursor_update_dmabuf(struct vmw_private *dev_priv,
+ struct vmw_dma_buffer *dmabuf,
+ u32 width, u32 height,
+ u32 hotspotX, u32 hotspotY)
+{
+ struct ttm_bo_kmap_obj map;
+ unsigned long kmap_offset;
+ unsigned long kmap_num;
+ void *virtual;
+ bool dummy;
+ int ret;
+
+ kmap_offset = 0;
+ kmap_num = (width*height*4 + PAGE_SIZE - 1) >> PAGE_SHIFT;
+
+ ret = ttm_bo_reserve(&dmabuf->base, true, false, false, 0);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("reserve failed\n");
+ return -EINVAL;
+ }
+
+ ret = ttm_bo_kmap(&dmabuf->base, kmap_offset, kmap_num, &map);
+ if (unlikely(ret != 0))
+ goto err_unreserve;
+
+ virtual = ttm_kmap_obj_virtual(&map, &dummy);
+ ret = vmw_cursor_update_image(dev_priv, virtual, width, height,
+ hotspotX, hotspotY);
+
+ ttm_bo_kunmap(&map);
+err_unreserve:
+ ttm_bo_unreserve(&dmabuf->base);
+
+ return ret;
+}
+
+
void vmw_cursor_update_position(struct vmw_private *dev_priv,
bool show, int x, int y)
{
@@ -110,24 +185,21 @@ int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
return -EINVAL;
if (handle) {
- ret = vmw_user_surface_lookup_handle(dev_priv, tfile,
- handle, &surface);
- if (!ret) {
- if (!surface->snooper.image) {
- DRM_ERROR("surface not suitable for cursor\n");
- vmw_surface_unreference(&surface);
- return -EINVAL;
- }
- } else {
- ret = vmw_user_dmabuf_lookup(tfile,
- handle, &dmabuf);
- if (ret) {
- DRM_ERROR("failed to find surface or dmabuf: %i\n", ret);
- return -EINVAL;
- }
+ ret = vmw_user_lookup_handle(dev_priv, tfile,
+ handle, &surface, &dmabuf);
+ if (ret) {
+ DRM_ERROR("failed to find surface or dmabuf: %i\n", ret);
+ return -EINVAL;
}
}
+ /* need to do this before taking down old image */
+ if (surface && !surface->snooper.image) {
+ DRM_ERROR("surface not suitable for cursor\n");
+ vmw_surface_unreference(&surface);
+ return -EINVAL;
+ }
+
/* takedown old cursor */
if (du->cursor_surface) {
du->cursor_surface->snooper.crtc = NULL;
@@ -146,36 +218,11 @@ int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
vmw_cursor_update_image(dev_priv, surface->snooper.image,
64, 64, du->hotspot_x, du->hotspot_y);
} else if (dmabuf) {
- struct ttm_bo_kmap_obj map;
- unsigned long kmap_offset;
- unsigned long kmap_num;
- void *virtual;
- bool dummy;
-
/* vmw_user_surface_lookup takes one reference */
du->cursor_dmabuf = dmabuf;
- kmap_offset = 0;
- kmap_num = (64*64*4) >> PAGE_SHIFT;
-
- ret = ttm_bo_reserve(&dmabuf->base, true, false, false, 0);
- if (unlikely(ret != 0)) {
- DRM_ERROR("reserve failed\n");
- return -EINVAL;
- }
-
- ret = ttm_bo_kmap(&dmabuf->base, kmap_offset, kmap_num, &map);
- if (unlikely(ret != 0))
- goto err_unreserve;
-
- virtual = ttm_kmap_obj_virtual(&map, &dummy);
- vmw_cursor_update_image(dev_priv, virtual, 64, 64,
- du->hotspot_x, du->hotspot_y);
-
- ttm_bo_kunmap(&map);
-err_unreserve:
- ttm_bo_unreserve(&dmabuf->base);
-
+ ret = vmw_cursor_update_dmabuf(dev_priv, dmabuf, width, height,
+ du->hotspot_x, du->hotspot_y);
} else {
vmw_cursor_update_position(dev_priv, false, 0, 0);
return 0;
@@ -377,8 +424,9 @@ static int do_surface_dirty_sou(struct vmw_private *dev_priv,
struct drm_clip_rect *clips,
unsigned num_clips, int inc)
{
- struct drm_clip_rect *clips_ptr;
struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
+ struct drm_clip_rect *clips_ptr;
+ struct drm_clip_rect *tmp;
struct drm_crtc *crtc;
size_t fifo_size;
int i, num_units;
@@ -391,7 +439,6 @@ static int do_surface_dirty_sou(struct vmw_private *dev_priv,
} *cmd;
SVGASignedRect *blits;
-
num_units = 0;
list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list,
head) {
@@ -402,13 +449,24 @@ static int do_surface_dirty_sou(struct vmw_private *dev_priv,
BUG_ON(!clips || !num_clips);
+ tmp = kzalloc(sizeof(*tmp) * num_clips, GFP_KERNEL);
+ if (unlikely(tmp == NULL)) {
+ DRM_ERROR("Temporary cliprect memory alloc failed.\n");
+ return -ENOMEM;
+ }
+
fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num_clips;
cmd = kzalloc(fifo_size, GFP_KERNEL);
if (unlikely(cmd == NULL)) {
DRM_ERROR("Temporary fifo memory alloc failed.\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto out_free_tmp;
}
+ /* setup blits pointer */
+ blits = (SVGASignedRect *)&cmd[1];
+
+ /* initial clip region */
left = clips->x1;
right = clips->x2;
top = clips->y1;
@@ -434,45 +492,60 @@ static int do_surface_dirty_sou(struct vmw_private *dev_priv,
cmd->body.srcRect.bottom = bottom;
clips_ptr = clips;
- blits = (SVGASignedRect *)&cmd[1];
for (i = 0; i < num_clips; i++, clips_ptr += inc) {
- blits[i].left = clips_ptr->x1 - left;
- blits[i].right = clips_ptr->x2 - left;
- blits[i].top = clips_ptr->y1 - top;
- blits[i].bottom = clips_ptr->y2 - top;
+ tmp[i].x1 = clips_ptr->x1 - left;
+ tmp[i].x2 = clips_ptr->x2 - left;
+ tmp[i].y1 = clips_ptr->y1 - top;
+ tmp[i].y2 = clips_ptr->y2 - top;
}
/* do per unit writing, reuse fifo for each */
for (i = 0; i < num_units; i++) {
struct vmw_display_unit *unit = units[i];
- int clip_x1 = left - unit->crtc.x;
- int clip_y1 = top - unit->crtc.y;
- int clip_x2 = right - unit->crtc.x;
- int clip_y2 = bottom - unit->crtc.y;
+ struct vmw_clip_rect clip;
+ int num;
+
+ clip.x1 = left - unit->crtc.x;
+ clip.y1 = top - unit->crtc.y;
+ clip.x2 = right - unit->crtc.x;
+ clip.y2 = bottom - unit->crtc.y;
/* skip any crtcs that misses the clip region */
- if (clip_x1 >= unit->crtc.mode.hdisplay ||
- clip_y1 >= unit->crtc.mode.vdisplay ||
- clip_x2 <= 0 || clip_y2 <= 0)
+ if (clip.x1 >= unit->crtc.mode.hdisplay ||
+ clip.y1 >= unit->crtc.mode.vdisplay ||
+ clip.x2 <= 0 || clip.y2 <= 0)
continue;
+ /*
+ * In order for the clip rects to be correctly scaled
+ * the src and dest rects needs to be the same size.
+ */
+ cmd->body.destRect.left = clip.x1;
+ cmd->body.destRect.right = clip.x2;
+ cmd->body.destRect.top = clip.y1;
+ cmd->body.destRect.bottom = clip.y2;
+
+ /* create a clip rect of the crtc in dest coords */
+ clip.x2 = unit->crtc.mode.hdisplay - clip.x1;
+ clip.y2 = unit->crtc.mode.vdisplay - clip.y1;
+ clip.x1 = 0 - clip.x1;
+ clip.y1 = 0 - clip.y1;
+
/* need to reset sid as it is changed by execbuf */
cmd->body.srcImage.sid = cpu_to_le32(framebuffer->user_handle);
-
cmd->body.destScreenId = unit->unit;
- /*
- * The blit command is a lot more resilient then the
- * readback command when it comes to clip rects. So its
- * okay to go out of bounds.
- */
+ /* clip and write blits to cmd stream */
+ vmw_clip_cliprects(tmp, num_clips, clip, blits, &num);
- cmd->body.destRect.left = clip_x1;
- cmd->body.destRect.right = clip_x2;
- cmd->body.destRect.top = clip_y1;
- cmd->body.destRect.bottom = clip_y2;
+ /* if no cliprects hit skip this */
+ if (num == 0)
+ continue;
+ /* recalculate package length */
+ fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num;
+ cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd,
fifo_size, 0, NULL);
@@ -480,7 +553,10 @@ static int do_surface_dirty_sou(struct vmw_private *dev_priv,
break;
}
+
kfree(cmd);
+out_free_tmp:
+ kfree(tmp);
return ret;
}
@@ -556,6 +632,10 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
* Sanity checks.
*/
+ /* Surface must be marked as a scanout. */
+ if (unlikely(!surface->scanout))
+ return -EINVAL;
+
if (unlikely(surface->mip_levels[0] != 1 ||
surface->num_sizes != 1 ||
surface->sizes[0].width < mode_cmd->width ||
@@ -782,6 +862,7 @@ static int do_dmabuf_dirty_sou(struct drm_file *file_priv,
int clip_y1 = clips_ptr->y1 - unit->crtc.y;
int clip_x2 = clips_ptr->x2 - unit->crtc.x;
int clip_y2 = clips_ptr->y2 - unit->crtc.y;
+ int move_x, move_y;
/* skip any crtcs that misses the clip region */
if (clip_x1 >= unit->crtc.mode.hdisplay ||
@@ -789,12 +870,21 @@ static int do_dmabuf_dirty_sou(struct drm_file *file_priv,
clip_x2 <= 0 || clip_y2 <= 0)
continue;
+ /* clip size to crtc size */
+ clip_x2 = min_t(int, clip_x2, unit->crtc.mode.hdisplay);
+ clip_y2 = min_t(int, clip_y2, unit->crtc.mode.vdisplay);
+
+ /* translate both src and dest to bring clip into screen */
+ move_x = min_t(int, clip_x1, 0);
+ move_y = min_t(int, clip_y1, 0);
+
+ /* actual translate done here */
blits[hit_num].header = SVGA_CMD_BLIT_GMRFB_TO_SCREEN;
blits[hit_num].body.destScreenId = unit->unit;
- blits[hit_num].body.srcOrigin.x = clips_ptr->x1;
- blits[hit_num].body.srcOrigin.y = clips_ptr->y1;
- blits[hit_num].body.destRect.left = clip_x1;
- blits[hit_num].body.destRect.top = clip_y1;
+ blits[hit_num].body.srcOrigin.x = clips_ptr->x1 - move_x;
+ blits[hit_num].body.srcOrigin.y = clips_ptr->y1 - move_y;
+ blits[hit_num].body.destRect.left = clip_x1 - move_x;
+ blits[hit_num].body.destRect.top = clip_y1 - move_y;
blits[hit_num].body.destRect.right = clip_x2;
blits[hit_num].body.destRect.bottom = clip_y2;
hit_num++;
@@ -1003,7 +1093,6 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
struct vmw_surface *surface = NULL;
struct vmw_dma_buffer *bo = NULL;
struct ttm_base_object *user_obj;
- u64 required_size;
int ret;
/**
@@ -1012,8 +1101,9 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
* requested framebuffer.
*/
- required_size = mode_cmd->pitch * mode_cmd->height;
- if (unlikely(required_size > (u64) dev_priv->vram_size)) {
+ if (!vmw_kms_validate_mode_vram(dev_priv,
+ mode_cmd->pitch,
+ mode_cmd->height)) {
DRM_ERROR("VRAM size is too small for requested mode.\n");
return ERR_PTR(-ENOMEM);
}
@@ -1033,46 +1123,29 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
return ERR_PTR(-ENOENT);
}
- /**
- * End conditioned code.
- */
-
- ret = vmw_user_surface_lookup_handle(dev_priv, tfile,
- mode_cmd->handle, &surface);
+ /* returns either a dmabuf or surface */
+ ret = vmw_user_lookup_handle(dev_priv, tfile,
+ mode_cmd->handle,
+ &surface, &bo);
if (ret)
- goto try_dmabuf;
-
- if (!surface->scanout)
- goto err_not_scanout;
-
- ret = vmw_kms_new_framebuffer_surface(dev_priv, file_priv, surface,
- &vfb, mode_cmd);
-
- /* vmw_user_surface_lookup takes one ref so does new_fb */
- vmw_surface_unreference(&surface);
-
- if (ret) {
- DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
- ttm_base_object_unref(&user_obj);
- return ERR_PTR(ret);
- } else
- vfb->user_obj = user_obj;
- return &vfb->base;
-
-try_dmabuf:
- DRM_INFO("%s: trying buffer\n", __func__);
-
- ret = vmw_user_dmabuf_lookup(tfile, mode_cmd->handle, &bo);
- if (ret) {
- DRM_ERROR("failed to find buffer: %i\n", ret);
- return ERR_PTR(-ENOENT);
- }
-
- ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, bo, &vfb,
- mode_cmd);
+ goto err_out;
+
+ /* Create the new framebuffer depending one what we got back */
+ if (bo)
+ ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, bo, &vfb,
+ mode_cmd);
+ else if (surface)
+ ret = vmw_kms_new_framebuffer_surface(dev_priv, file_priv,
+ surface, &vfb, mode_cmd);
+ else
+ BUG();
- /* vmw_user_dmabuf_lookup takes one ref so does new_fb */
- vmw_dmabuf_unreference(&bo);
+err_out:
+ /* vmw_user_lookup_handle takes one ref so does new_fb */
+ if (bo)
+ vmw_dmabuf_unreference(&bo);
+ if (surface)
+ vmw_surface_unreference(&surface);
if (ret) {
DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
@@ -1082,14 +1155,6 @@ try_dmabuf:
vfb->user_obj = user_obj;
return &vfb->base;
-
-err_not_scanout:
- DRM_ERROR("surface not marked as scanout\n");
- /* vmw_user_surface_lookup takes one ref */
- vmw_surface_unreference(&surface);
- ttm_base_object_unref(&user_obj);
-
- return ERR_PTR(-EINVAL);
}
static struct drm_mode_config_funcs vmw_kms_funcs = {
@@ -1106,10 +1171,12 @@ int vmw_kms_present(struct vmw_private *dev_priv,
uint32_t num_clips)
{
struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
+ struct drm_clip_rect *tmp;
struct drm_crtc *crtc;
size_t fifo_size;
int i, k, num_units;
int ret = 0; /* silence warning */
+ int left, right, top, bottom;
struct {
SVGA3dCmdHeader header;
@@ -1127,60 +1194,95 @@ int vmw_kms_present(struct vmw_private *dev_priv,
BUG_ON(surface == NULL);
BUG_ON(!clips || !num_clips);
+ tmp = kzalloc(sizeof(*tmp) * num_clips, GFP_KERNEL);
+ if (unlikely(tmp == NULL)) {
+ DRM_ERROR("Temporary cliprect memory alloc failed.\n");
+ return -ENOMEM;
+ }
+
fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num_clips;
cmd = kmalloc(fifo_size, GFP_KERNEL);
if (unlikely(cmd == NULL)) {
DRM_ERROR("Failed to allocate temporary fifo memory.\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto out_free_tmp;
+ }
+
+ left = clips->x;
+ right = clips->x + clips->w;
+ top = clips->y;
+ bottom = clips->y + clips->h;
+
+ for (i = 1; i < num_clips; i++) {
+ left = min_t(int, left, (int)clips[i].x);
+ right = max_t(int, right, (int)clips[i].x + clips[i].w);
+ top = min_t(int, top, (int)clips[i].y);
+ bottom = max_t(int, bottom, (int)clips[i].y + clips[i].h);
}
/* only need to do this once */
memset(cmd, 0, fifo_size);
cmd->header.id = cpu_to_le32(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN);
- cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
-
- cmd->body.srcRect.left = 0;
- cmd->body.srcRect.right = surface->sizes[0].width;
- cmd->body.srcRect.top = 0;
- cmd->body.srcRect.bottom = surface->sizes[0].height;
blits = (SVGASignedRect *)&cmd[1];
+
+ cmd->body.srcRect.left = left;
+ cmd->body.srcRect.right = right;
+ cmd->body.srcRect.top = top;
+ cmd->body.srcRect.bottom = bottom;
+
for (i = 0; i < num_clips; i++) {
- blits[i].left = clips[i].x;
- blits[i].right = clips[i].x + clips[i].w;
- blits[i].top = clips[i].y;
- blits[i].bottom = clips[i].y + clips[i].h;
+ tmp[i].x1 = clips[i].x - left;
+ tmp[i].x2 = clips[i].x + clips[i].w - left;
+ tmp[i].y1 = clips[i].y - top;
+ tmp[i].y2 = clips[i].y + clips[i].h - top;
}
for (k = 0; k < num_units; k++) {
struct vmw_display_unit *unit = units[k];
- int clip_x1 = destX - unit->crtc.x;
- int clip_y1 = destY - unit->crtc.y;
- int clip_x2 = clip_x1 + surface->sizes[0].width;
- int clip_y2 = clip_y1 + surface->sizes[0].height;
+ struct vmw_clip_rect clip;
+ int num;
+
+ clip.x1 = left + destX - unit->crtc.x;
+ clip.y1 = top + destY - unit->crtc.y;
+ clip.x2 = right + destX - unit->crtc.x;
+ clip.y2 = bottom + destY - unit->crtc.y;
/* skip any crtcs that misses the clip region */
- if (clip_x1 >= unit->crtc.mode.hdisplay ||
- clip_y1 >= unit->crtc.mode.vdisplay ||
- clip_x2 <= 0 || clip_y2 <= 0)
+ if (clip.x1 >= unit->crtc.mode.hdisplay ||
+ clip.y1 >= unit->crtc.mode.vdisplay ||
+ clip.x2 <= 0 || clip.y2 <= 0)
continue;
+ /*
+ * In order for the clip rects to be correctly scaled
+ * the src and dest rects needs to be the same size.
+ */
+ cmd->body.destRect.left = clip.x1;
+ cmd->body.destRect.right = clip.x2;
+ cmd->body.destRect.top = clip.y1;
+ cmd->body.destRect.bottom = clip.y2;
+
+ /* create a clip rect of the crtc in dest coords */
+ clip.x2 = unit->crtc.mode.hdisplay - clip.x1;
+ clip.y2 = unit->crtc.mode.vdisplay - clip.y1;
+ clip.x1 = 0 - clip.x1;
+ clip.y1 = 0 - clip.y1;
+
/* need to reset sid as it is changed by execbuf */
cmd->body.srcImage.sid = sid;
-
cmd->body.destScreenId = unit->unit;
- /*
- * The blit command is a lot more resilient then the
- * readback command when it comes to clip rects. So its
- * okay to go out of bounds.
- */
+ /* clip and write blits to cmd stream */
+ vmw_clip_cliprects(tmp, num_clips, clip, blits, &num);
- cmd->body.destRect.left = clip_x1;
- cmd->body.destRect.right = clip_x2;
- cmd->body.destRect.top = clip_y1;
- cmd->body.destRect.bottom = clip_y2;
+ /* if no cliprects hit skip this */
+ if (num == 0)
+ continue;
+ /* recalculate package length */
+ fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num;
+ cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd,
fifo_size, 0, NULL);
@@ -1189,6 +1291,8 @@ int vmw_kms_present(struct vmw_private *dev_priv,
}
kfree(cmd);
+out_free_tmp:
+ kfree(tmp);
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index af8e6e5bd964..e1cb8556355f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -62,9 +62,14 @@ struct vmw_framebuffer {
int vmw_cursor_update_image(struct vmw_private *dev_priv,
u32 *image, u32 width, u32 height,
u32 hotspotX, u32 hotspotY);
+int vmw_cursor_update_dmabuf(struct vmw_private *dev_priv,
+ struct vmw_dma_buffer *dmabuf,
+ u32 width, u32 height,
+ u32 hotspotX, u32 hotspotY);
void vmw_cursor_update_position(struct vmw_private *dev_priv,
bool show, int x, int y);
+
/**
* Base class display unit.
*
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index 90c5e3928491..8f8dbd43c33d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -74,9 +74,10 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
{
struct vmw_legacy_display *lds = dev_priv->ldu_priv;
struct vmw_legacy_display_unit *entry;
+ struct vmw_display_unit *du = NULL;
struct drm_framebuffer *fb = NULL;
struct drm_crtc *crtc = NULL;
- int i = 0;
+ int i = 0, ret;
/* If there is no display topology the host just assumes
* that the guest will set the same layout as the host.
@@ -129,6 +130,25 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
lds->last_num_active = lds->num_active;
+
+ /* Find the first du with a cursor. */
+ list_for_each_entry(entry, &lds->active, active) {
+ du = &entry->base;
+
+ if (!du->cursor_dmabuf)
+ continue;
+
+ ret = vmw_cursor_update_dmabuf(dev_priv,
+ du->cursor_dmabuf,
+ 64, 64,
+ du->hotspot_x,
+ du->hotspot_y);
+ if (ret == 0)
+ break;
+
+ DRM_ERROR("Could not update cursor image\n");
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 86c5e4cceb31..1c7f09e26819 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -1190,6 +1190,29 @@ void vmw_resource_unreserve(struct list_head *list)
write_unlock(lock);
}
+/**
+ * Helper function that looks either a surface or dmabuf.
+ *
+ * The pointer this pointed at by out_surf and out_buf needs to be null.
+ */
+int vmw_user_lookup_handle(struct vmw_private *dev_priv,
+ struct ttm_object_file *tfile,
+ uint32_t handle,
+ struct vmw_surface **out_surf,
+ struct vmw_dma_buffer **out_buf)
+{
+ int ret;
+
+ BUG_ON(*out_surf || *out_buf);
+
+ ret = vmw_user_surface_lookup_handle(dev_priv, tfile, handle, out_surf);
+ if (!ret)
+ return 0;
+
+ ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf);
+ return ret;
+}
+
int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv,
struct ttm_object_file *tfile,
diff --git a/drivers/i2c/busses/i2c-eg20t.c b/drivers/i2c/busses/i2c-eg20t.c
index 8cebef49aeac..18936ac9d51c 100644
--- a/drivers/i2c/busses/i2c-eg20t.c
+++ b/drivers/i2c/busses/i2c-eg20t.c
@@ -893,6 +893,13 @@ static int __devinit pch_i2c_probe(struct pci_dev *pdev,
/* Set the number of I2C channel instance */
adap_info->ch_num = id->driver_data;
+ ret = request_irq(pdev->irq, pch_i2c_handler, IRQF_SHARED,
+ KBUILD_MODNAME, adap_info);
+ if (ret) {
+ pch_pci_err(pdev, "request_irq FAILED\n");
+ goto err_request_irq;
+ }
+
for (i = 0; i < adap_info->ch_num; i++) {
pch_adap = &adap_info->pch_data[i].pch_adapter;
adap_info->pch_i2c_suspended = false;
@@ -910,28 +917,23 @@ static int __devinit pch_i2c_probe(struct pci_dev *pdev,
pch_adap->dev.parent = &pdev->dev;
+ pch_i2c_init(&adap_info->pch_data[i]);
ret = i2c_add_adapter(pch_adap);
if (ret) {
pch_pci_err(pdev, "i2c_add_adapter[ch:%d] FAILED\n", i);
- goto err_i2c_add_adapter;
+ goto err_add_adapter;
}
-
- pch_i2c_init(&adap_info->pch_data[i]);
- }
- ret = request_irq(pdev->irq, pch_i2c_handler, IRQF_SHARED,
- KBUILD_MODNAME, adap_info);
- if (ret) {
- pch_pci_err(pdev, "request_irq FAILED\n");
- goto err_i2c_add_adapter;
}
pci_set_drvdata(pdev, adap_info);
pch_pci_dbg(pdev, "returns %d.\n", ret);
return 0;
-err_i2c_add_adapter:
+err_add_adapter:
for (j = 0; j < i; j++)
i2c_del_adapter(&adap_info->pch_data[j].pch_adapter);
+ free_irq(pdev->irq, adap_info);
+err_request_irq:
pci_iounmap(pdev, base_addr);
err_pci_iomap:
pci_release_regions(pdev);
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index a43d0023446a..fa23faa20f0e 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -1047,13 +1047,14 @@ omap_i2c_probe(struct platform_device *pdev)
* size. This is to ensure that we can handle the status on int
* call back latencies.
*/
- if (dev->rev >= OMAP_I2C_REV_ON_3530_4430) {
- dev->fifo_size = 0;
+
+ dev->fifo_size = (dev->fifo_size / 2);
+
+ if (dev->rev >= OMAP_I2C_REV_ON_3530_4430)
dev->b_hw = 0; /* Disable hardware fixes */
- } else {
- dev->fifo_size = (dev->fifo_size / 2);
+ else
dev->b_hw = 1; /* Enable hardware fixes */
- }
+
/* calculate wakeup latency constraint for MPU */
if (dev->set_mpu_wkup_lat != NULL)
dev->latency = (1000000 * dev->fifo_size) /
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index 2754cef86a06..4c1718081685 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -534,6 +534,7 @@ static int s3c24xx_i2c_doxfer(struct s3c24xx_i2c *i2c,
/* first, try busy waiting briefly */
do {
+ cpu_relax();
iicstat = readl(i2c->regs + S3C2410_IICSTAT);
} while ((iicstat & S3C2410_IICSTAT_START) && --spins);
@@ -786,7 +787,7 @@ static void s3c24xx_i2c_dt_gpio_free(struct s3c24xx_i2c *i2c)
#else
static int s3c24xx_i2c_parse_dt_gpio(struct s3c24xx_i2c *i2c)
{
- return -EINVAL;
+ return 0;
}
static void s3c24xx_i2c_dt_gpio_free(struct s3c24xx_i2c *i2c)
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 75ff821c0af0..d0d4aa9f4802 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -2513,6 +2513,9 @@ static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
req.private_data_len = sizeof(struct cma_hdr) +
conn_param->private_data_len;
+ if (req.private_data_len < conn_param->private_data_len)
+ return -EINVAL;
+
req.private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
if (!req.private_data)
return -ENOMEM;
@@ -2562,6 +2565,9 @@ static int cma_connect_ib(struct rdma_id_private *id_priv,
memset(&req, 0, sizeof req);
offset = cma_user_data_offset(id_priv->id.ps);
req.private_data_len = offset + conn_param->private_data_len;
+ if (req.private_data_len < conn_param->private_data_len)
+ return -EINVAL;
+
private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
if (!private_data)
return -ENOMEM;
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 77f3dbc0aaa1..18836cdf1e10 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -1244,7 +1244,8 @@ err_reg:
err_counter:
for (; i; --i)
- mlx4_counter_free(ibdev->dev, ibdev->counters[i - 1]);
+ if (ibdev->counters[i - 1] != -1)
+ mlx4_counter_free(ibdev->dev, ibdev->counters[i - 1]);
err_map:
iounmap(ibdev->uar_map);
@@ -1275,7 +1276,8 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
}
iounmap(ibdev->uar_map);
for (p = 0; p < ibdev->num_ports; ++p)
- mlx4_counter_free(ibdev->dev, ibdev->counters[p]);
+ if (ibdev->counters[p] != -1)
+ mlx4_counter_free(ibdev->dev, ibdev->counters[p]);
mlx4_foreach_port(p, dev, MLX4_PORT_TYPE_IB)
mlx4_CLOSE_PORT(dev, p);
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
index 574600ef5b42..a7403248d83d 100644
--- a/drivers/infiniband/hw/qib/qib_file_ops.c
+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
@@ -1285,7 +1285,7 @@ static int setup_ctxt(struct qib_pportdata *ppd, int ctxt,
strlcpy(rcd->comm, current->comm, sizeof(rcd->comm));
ctxt_fp(fp) = rcd;
qib_stats.sps_ctxts++;
- dd->freectxts++;
+ dd->freectxts--;
ret = 0;
goto bail;
@@ -1794,7 +1794,7 @@ static int qib_close(struct inode *in, struct file *fp)
if (dd->pageshadow)
unlock_expected_tids(rcd);
qib_stats.sps_ctxts--;
- dd->freectxts--;
+ dd->freectxts++;
}
mutex_unlock(&qib_mutex);
diff --git a/drivers/input/misc/cma3000_d0x.c b/drivers/input/misc/cma3000_d0x.c
index 80793f1608eb..06517e60e50c 100644
--- a/drivers/input/misc/cma3000_d0x.c
+++ b/drivers/input/misc/cma3000_d0x.c
@@ -115,8 +115,8 @@ static void decode_mg(struct cma3000_accl_data *data, int *datax,
static irqreturn_t cma3000_thread_irq(int irq, void *dev_id)
{
struct cma3000_accl_data *data = dev_id;
- int datax, datay, dataz;
- u8 ctrl, mode, range, intr_status;
+ int datax, datay, dataz, intr_status;
+ u8 ctrl, mode, range;
intr_status = CMA3000_READ(data, CMA3000_INTSTATUS, "interrupt status");
if (intr_status < 0)
diff --git a/drivers/input/mouse/sentelic.c b/drivers/input/mouse/sentelic.c
index c5b12d2e955a..86d6f39178b0 100644
--- a/drivers/input/mouse/sentelic.c
+++ b/drivers/input/mouse/sentelic.c
@@ -2,7 +2,7 @@
* Finger Sensing Pad PS/2 mouse driver.
*
* Copyright (C) 2005-2007 Asia Vital Components Co., Ltd.
- * Copyright (C) 2005-2010 Tai-hwa Liang, Sentelic Corporation.
+ * Copyright (C) 2005-2011 Tai-hwa Liang, Sentelic Corporation.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -162,7 +162,7 @@ static int fsp_reg_write(struct psmouse *psmouse, int reg_addr, int reg_val)
ps2_sendbyte(ps2dev, v, FSP_CMD_TIMEOUT2);
if (ps2_sendbyte(ps2dev, 0xf3, FSP_CMD_TIMEOUT) < 0)
- return -1;
+ goto out;
if ((v = fsp_test_invert_cmd(reg_val)) != reg_val) {
/* inversion is required */
@@ -261,7 +261,7 @@ static int fsp_page_reg_write(struct psmouse *psmouse, int reg_val)
ps2_sendbyte(ps2dev, 0x88, FSP_CMD_TIMEOUT2);
if (ps2_sendbyte(ps2dev, 0xf3, FSP_CMD_TIMEOUT) < 0)
- return -1;
+ goto out;
if ((v = fsp_test_invert_cmd(reg_val)) != reg_val) {
ps2_sendbyte(ps2dev, 0x47, FSP_CMD_TIMEOUT2);
@@ -309,7 +309,7 @@ static int fsp_get_buttons(struct psmouse *psmouse, int *btn)
};
int val;
- if (fsp_reg_read(psmouse, FSP_REG_TMOD_STATUS1, &val) == -1)
+ if (fsp_reg_read(psmouse, FSP_REG_TMOD_STATUS, &val) == -1)
return -EIO;
*btn = buttons[(val & 0x30) >> 4];
diff --git a/drivers/input/mouse/sentelic.h b/drivers/input/mouse/sentelic.h
index ed1395ac7b8b..2e4af24f8c15 100644
--- a/drivers/input/mouse/sentelic.h
+++ b/drivers/input/mouse/sentelic.h
@@ -2,7 +2,7 @@
* Finger Sensing Pad PS/2 mouse driver.
*
* Copyright (C) 2005-2007 Asia Vital Components Co., Ltd.
- * Copyright (C) 2005-2009 Tai-hwa Liang, Sentelic Corporation.
+ * Copyright (C) 2005-2011 Tai-hwa Liang, Sentelic Corporation.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -33,6 +33,7 @@
/* Finger-sensing Pad control registers */
#define FSP_REG_SYSCTL1 0x10
#define FSP_BIT_EN_REG_CLK BIT(5)
+#define FSP_REG_TMOD_STATUS 0x20
#define FSP_REG_OPC_QDOWN 0x31
#define FSP_BIT_EN_OPC_TAG BIT(7)
#define FSP_REG_OPTZ_XLO 0x34
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index c080b828e5dc..a6dcd18e9adf 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -24,6 +24,7 @@
*/
#include <linux/module.h>
+#include <linux/delay.h>
#include <linux/dmi.h>
#include <linux/input/mt.h>
#include <linux/serio.h>
@@ -1220,6 +1221,16 @@ static int synaptics_reconnect(struct psmouse *psmouse)
do {
psmouse_reset(psmouse);
+ if (retry) {
+ /*
+ * On some boxes, right after resuming, the touchpad
+ * needs some time to finish initializing (I assume
+ * it needs time to calibrate) and start responding
+ * to Synaptics-specific queries, so let's wait a
+ * bit.
+ */
+ ssleep(1);
+ }
error = synaptics_detect(psmouse, 0);
} while (error && ++retry < 3);
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
index da0d8761e778..2ee47d01a3b4 100644
--- a/drivers/input/tablet/wacom_wac.c
+++ b/drivers/input/tablet/wacom_wac.c
@@ -1470,6 +1470,9 @@ static const struct wacom_features wacom_features_0xE3 =
static const struct wacom_features wacom_features_0xE6 =
{ "Wacom ISDv4 E6", WACOM_PKGLEN_TPC2FG, 27760, 15694, 255,
0, TABLETPC2FG, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+static const struct wacom_features wacom_features_0xEC =
+ { "Wacom ISDv4 EC", WACOM_PKGLEN_GRAPHIRE, 25710, 14500, 255,
+ 0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0x47 =
{ "Wacom Intuos2 6x8", WACOM_PKGLEN_INTUOS, 20320, 16240, 1023,
31, INTUOS, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
@@ -1611,6 +1614,7 @@ const struct usb_device_id wacom_ids[] = {
{ USB_DEVICE_WACOM(0xE2) },
{ USB_DEVICE_WACOM(0xE3) },
{ USB_DEVICE_WACOM(0xE6) },
+ { USB_DEVICE_WACOM(0xEC) },
{ USB_DEVICE_WACOM(0x47) },
{ USB_DEVICE_LENOVO(0x6004) },
{ }
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index a004c3945c67..31053a951c34 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -41,6 +41,7 @@
#include <linux/tboot.h>
#include <linux/dmi.h>
#include <linux/pci-ats.h>
+#include <linux/memblock.h>
#include <asm/cacheflush.h>
#include <asm/iommu.h>
@@ -405,6 +406,9 @@ int dmar_disabled = 0;
int dmar_disabled = 1;
#endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
+int intel_iommu_enabled = 0;
+EXPORT_SYMBOL_GPL(intel_iommu_enabled);
+
static int dmar_map_gfx = 1;
static int dmar_forcedac;
static int intel_iommu_strict;
@@ -2185,18 +2189,6 @@ static inline void iommu_prepare_isa(void)
static int md_domain_init(struct dmar_domain *domain, int guest_width);
-static int __init si_domain_work_fn(unsigned long start_pfn,
- unsigned long end_pfn, void *datax)
-{
- int *ret = datax;
-
- *ret = iommu_domain_identity_map(si_domain,
- (uint64_t)start_pfn << PAGE_SHIFT,
- (uint64_t)end_pfn << PAGE_SHIFT);
- return *ret;
-
-}
-
static int __init si_domain_init(int hw)
{
struct dmar_drhd_unit *drhd;
@@ -2228,9 +2220,15 @@ static int __init si_domain_init(int hw)
return 0;
for_each_online_node(nid) {
- work_with_active_regions(nid, si_domain_work_fn, &ret);
- if (ret)
- return ret;
+ unsigned long start_pfn, end_pfn;
+ int i;
+
+ for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
+ ret = iommu_domain_identity_map(si_domain,
+ PFN_PHYS(start_pfn), PFN_PHYS(end_pfn));
+ if (ret)
+ return ret;
+ }
}
return 0;
@@ -3647,6 +3645,8 @@ int __init intel_iommu_init(void)
bus_register_notifier(&pci_bus_type, &device_nb);
+ intel_iommu_enabled = 1;
+
return 0;
}
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 2fb2963df553..5b5fa5cdaa31 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -90,7 +90,7 @@ struct iommu_domain *iommu_domain_alloc(struct bus_type *bus)
if (bus == NULL || bus->iommu_ops == NULL)
return NULL;
- domain = kmalloc(sizeof(*domain), GFP_KERNEL);
+ domain = kzalloc(sizeof(*domain), GFP_KERNEL);
if (!domain)
return NULL;
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index b6907118283a..6d03774b176e 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -1393,9 +1393,6 @@ void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long secto
atomic_read(&bitmap->behind_writes),
bitmap->mddev->bitmap_info.max_write_behind);
}
- if (bitmap->mddev->degraded)
- /* Never clear bits or update events_cleared when degraded */
- success = 0;
while (sectors) {
sector_t blocks;
@@ -1409,7 +1406,7 @@ void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long secto
return;
}
- if (success &&
+ if (success && !bitmap->mddev->degraded &&
bitmap->events_cleared < bitmap->mddev->events) {
bitmap->events_cleared = bitmap->mddev->events;
bitmap->need_sync = 1;
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index c3273efd08cb..627456542fb3 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -230,6 +230,7 @@ static int linear_add(struct mddev *mddev, struct md_rdev *rdev)
return -EINVAL;
rdev->raid_disk = rdev->saved_raid_disk;
+ rdev->saved_raid_disk = -1;
newconf = linear_conf(mddev,mddev->raid_disks+1);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index ee981737edfc..f47f1f8ac44b 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -7360,8 +7360,7 @@ static int remove_and_add_spares(struct mddev *mddev)
spares++;
md_new_event(mddev);
set_bit(MD_CHANGE_DEVS, &mddev->flags);
- } else
- break;
+ }
}
}
}
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 31670f8d6b65..858fdbb7eb07 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -3065,11 +3065,17 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
}
} else if (test_bit(In_sync, &rdev->flags))
set_bit(R5_Insync, &dev->flags);
- else {
+ else if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset)
/* in sync if before recovery_offset */
- if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset)
- set_bit(R5_Insync, &dev->flags);
- }
+ set_bit(R5_Insync, &dev->flags);
+ else if (test_bit(R5_UPTODATE, &dev->flags) &&
+ test_bit(R5_Expanded, &dev->flags))
+ /* If we've reshaped into here, we assume it is Insync.
+ * We will shortly update recovery_offset to make
+ * it official.
+ */
+ set_bit(R5_Insync, &dev->flags);
+
if (rdev && test_bit(R5_WriteError, &dev->flags)) {
clear_bit(R5_Insync, &dev->flags);
if (!test_bit(Faulty, &rdev->flags)) {
diff --git a/drivers/media/common/tuners/mxl5007t.c b/drivers/media/common/tuners/mxl5007t.c
index 7eb1bf75cd07..5d02221e99dd 100644
--- a/drivers/media/common/tuners/mxl5007t.c
+++ b/drivers/media/common/tuners/mxl5007t.c
@@ -488,9 +488,10 @@ static int mxl5007t_write_regs(struct mxl5007t_state *state,
static int mxl5007t_read_reg(struct mxl5007t_state *state, u8 reg, u8 *val)
{
+ u8 buf[2] = { 0xfb, reg };
struct i2c_msg msg[] = {
{ .addr = state->i2c_props.addr, .flags = 0,
- .buf = &reg, .len = 1 },
+ .buf = buf, .len = 2 },
{ .addr = state->i2c_props.addr, .flags = I2C_M_RD,
.buf = val, .len = 1 },
};
diff --git a/drivers/media/common/tuners/tda18218.c b/drivers/media/common/tuners/tda18218.c
index aacfe2387e28..4fc29730a12c 100644
--- a/drivers/media/common/tuners/tda18218.c
+++ b/drivers/media/common/tuners/tda18218.c
@@ -141,7 +141,7 @@ static int tda18218_set_params(struct dvb_frontend *fe,
switch (params->u.ofdm.bandwidth) {
case BANDWIDTH_6_MHZ:
LP_Fc = 0;
- LO_Frac = params->frequency + 4000000;
+ LO_Frac = params->frequency + 3000000;
break;
case BANDWIDTH_7_MHZ:
LP_Fc = 1;
diff --git a/drivers/media/rc/ati_remote.c b/drivers/media/rc/ati_remote.c
index 303f22ea04c0..01bb8daf4b09 100644
--- a/drivers/media/rc/ati_remote.c
+++ b/drivers/media/rc/ati_remote.c
@@ -189,7 +189,7 @@ struct ati_remote {
dma_addr_t inbuf_dma;
dma_addr_t outbuf_dma;
- unsigned char old_data[2]; /* Detect duplicate events */
+ unsigned char old_data; /* Detect duplicate events */
unsigned long old_jiffies;
unsigned long acc_jiffies; /* handle acceleration */
unsigned long first_jiffies;
@@ -221,35 +221,35 @@ struct ati_remote {
/* Translation table from hardware messages to input events. */
static const struct {
short kind;
- unsigned char data1, data2;
+ unsigned char data;
int type;
unsigned int code;
int value;
} ati_remote_tbl[] = {
/* Directional control pad axes */
- {KIND_ACCEL, 0x35, 0x70, EV_REL, REL_X, -1}, /* left */
- {KIND_ACCEL, 0x36, 0x71, EV_REL, REL_X, 1}, /* right */
- {KIND_ACCEL, 0x37, 0x72, EV_REL, REL_Y, -1}, /* up */
- {KIND_ACCEL, 0x38, 0x73, EV_REL, REL_Y, 1}, /* down */
+ {KIND_ACCEL, 0x70, EV_REL, REL_X, -1}, /* left */
+ {KIND_ACCEL, 0x71, EV_REL, REL_X, 1}, /* right */
+ {KIND_ACCEL, 0x72, EV_REL, REL_Y, -1}, /* up */
+ {KIND_ACCEL, 0x73, EV_REL, REL_Y, 1}, /* down */
/* Directional control pad diagonals */
- {KIND_LU, 0x39, 0x74, EV_REL, 0, 0}, /* left up */
- {KIND_RU, 0x3a, 0x75, EV_REL, 0, 0}, /* right up */
- {KIND_LD, 0x3c, 0x77, EV_REL, 0, 0}, /* left down */
- {KIND_RD, 0x3b, 0x76, EV_REL, 0, 0}, /* right down */
+ {KIND_LU, 0x74, EV_REL, 0, 0}, /* left up */
+ {KIND_RU, 0x75, EV_REL, 0, 0}, /* right up */
+ {KIND_LD, 0x77, EV_REL, 0, 0}, /* left down */
+ {KIND_RD, 0x76, EV_REL, 0, 0}, /* right down */
/* "Mouse button" buttons */
- {KIND_LITERAL, 0x3d, 0x78, EV_KEY, BTN_LEFT, 1}, /* left btn down */
- {KIND_LITERAL, 0x3e, 0x79, EV_KEY, BTN_LEFT, 0}, /* left btn up */
- {KIND_LITERAL, 0x41, 0x7c, EV_KEY, BTN_RIGHT, 1},/* right btn down */
- {KIND_LITERAL, 0x42, 0x7d, EV_KEY, BTN_RIGHT, 0},/* right btn up */
+ {KIND_LITERAL, 0x78, EV_KEY, BTN_LEFT, 1}, /* left btn down */
+ {KIND_LITERAL, 0x79, EV_KEY, BTN_LEFT, 0}, /* left btn up */
+ {KIND_LITERAL, 0x7c, EV_KEY, BTN_RIGHT, 1},/* right btn down */
+ {KIND_LITERAL, 0x7d, EV_KEY, BTN_RIGHT, 0},/* right btn up */
/* Artificial "doubleclick" events are generated by the hardware.
* They are mapped to the "side" and "extra" mouse buttons here. */
- {KIND_FILTERED, 0x3f, 0x7a, EV_KEY, BTN_SIDE, 1}, /* left dblclick */
- {KIND_FILTERED, 0x43, 0x7e, EV_KEY, BTN_EXTRA, 1},/* right dblclick */
+ {KIND_FILTERED, 0x7a, EV_KEY, BTN_SIDE, 1}, /* left dblclick */
+ {KIND_FILTERED, 0x7e, EV_KEY, BTN_EXTRA, 1},/* right dblclick */
/* Non-mouse events are handled by rc-core */
- {KIND_END, 0x00, 0x00, EV_MAX + 1, 0, 0}
+ {KIND_END, 0x00, EV_MAX + 1, 0, 0}
};
/* Local function prototypes */
@@ -397,25 +397,6 @@ static int ati_remote_sendpacket(struct ati_remote *ati_remote, u16 cmd, unsigne
}
/*
- * ati_remote_event_lookup
- */
-static int ati_remote_event_lookup(int rem, unsigned char d1, unsigned char d2)
-{
- int i;
-
- for (i = 0; ati_remote_tbl[i].kind != KIND_END; i++) {
- /*
- * Decide if the table entry matches the remote input.
- */
- if (ati_remote_tbl[i].data1 == d1 &&
- ati_remote_tbl[i].data2 == d2)
- return i;
-
- }
- return -1;
-}
-
-/*
* ati_remote_compute_accel
*
* Implements acceleration curve for directional control pad
@@ -463,7 +444,15 @@ static void ati_remote_input_report(struct urb *urb)
int index = -1;
int acc;
int remote_num;
- unsigned char scancode[2];
+ unsigned char scancode;
+ int i;
+
+ /*
+ * data[0] = 0x14
+ * data[1] = data[2] + data[3] + 0xd5 (a checksum byte)
+ * data[2] = the key code (with toggle bit in MSB with some models)
+ * data[3] = channel << 4 (the low 4 bits must be zero)
+ */
/* Deal with strange looking inputs */
if ( (urb->actual_length != 4) || (data[0] != 0x14) ||
@@ -472,6 +461,13 @@ static void ati_remote_input_report(struct urb *urb)
return;
}
+ if (data[1] != ((data[2] + data[3] + 0xd5) & 0xff)) {
+ dbginfo(&ati_remote->interface->dev,
+ "wrong checksum in input: %02x %02x %02x %02x\n",
+ data[0], data[1], data[2], data[3]);
+ return;
+ }
+
/* Mask unwanted remote channels. */
/* note: remote_num is 0-based, channel 1 on remote == 0 here */
remote_num = (data[3] >> 4) & 0x0f;
@@ -482,31 +478,30 @@ static void ati_remote_input_report(struct urb *urb)
return;
}
- scancode[0] = (((data[1] - ((remote_num + 1) << 4)) & 0xf0) | (data[1] & 0x0f));
-
/*
- * Some devices (e.g. SnapStream Firefly) use 8080 as toggle code,
- * so we have to clear them. The first bit is a bit tricky as the
- * "non-toggled" state depends on remote_num, so we xor it with the
- * second bit which is only used for toggle.
+ * MSB is a toggle code, though only used by some devices
+ * (e.g. SnapStream Firefly)
*/
- scancode[0] ^= (data[2] & 0x80);
-
- scancode[1] = data[2] & ~0x80;
+ scancode = data[2] & 0x7f;
- /* Look up event code index in mouse translation table. */
- index = ati_remote_event_lookup(remote_num, scancode[0], scancode[1]);
+ /* Look up event code index in the mouse translation table. */
+ for (i = 0; ati_remote_tbl[i].kind != KIND_END; i++) {
+ if (scancode == ati_remote_tbl[i].data) {
+ index = i;
+ break;
+ }
+ }
if (index >= 0) {
dbginfo(&ati_remote->interface->dev,
- "channel 0x%02x; mouse data %02x,%02x; index %d; keycode %d\n",
- remote_num, data[1], data[2], index, ati_remote_tbl[index].code);
+ "channel 0x%02x; mouse data %02x; index %d; keycode %d\n",
+ remote_num, data[2], index, ati_remote_tbl[index].code);
if (!dev)
return; /* no mouse device */
} else
dbginfo(&ati_remote->interface->dev,
- "channel 0x%02x; key data %02x,%02x, scancode %02x,%02x\n",
- remote_num, data[1], data[2], scancode[0], scancode[1]);
+ "channel 0x%02x; key data %02x, scancode %02x\n",
+ remote_num, data[2], scancode);
if (index >= 0 && ati_remote_tbl[index].kind == KIND_LITERAL) {
@@ -523,8 +518,7 @@ static void ati_remote_input_report(struct urb *urb)
unsigned long now = jiffies;
/* Filter duplicate events which happen "too close" together. */
- if (ati_remote->old_data[0] == data[1] &&
- ati_remote->old_data[1] == data[2] &&
+ if (ati_remote->old_data == data[2] &&
time_before(now, ati_remote->old_jiffies +
msecs_to_jiffies(repeat_filter))) {
ati_remote->repeat_count++;
@@ -533,8 +527,7 @@ static void ati_remote_input_report(struct urb *urb)
ati_remote->first_jiffies = now;
}
- ati_remote->old_data[0] = data[1];
- ati_remote->old_data[1] = data[2];
+ ati_remote->old_data = data[2];
ati_remote->old_jiffies = now;
/* Ensure we skip at least the 4 first duplicate events (generated
@@ -549,14 +542,13 @@ static void ati_remote_input_report(struct urb *urb)
if (index < 0) {
/* Not a mouse event, hand it to rc-core. */
- u32 rc_code = (scancode[0] << 8) | scancode[1];
/*
* We don't use the rc-core repeat handling yet as
* it would cause ghost repeats which would be a
* regression for this driver.
*/
- rc_keydown_notimeout(ati_remote->rdev, rc_code,
+ rc_keydown_notimeout(ati_remote->rdev, scancode,
data[2]);
rc_keyup(ati_remote->rdev);
return;
@@ -607,8 +599,7 @@ static void ati_remote_input_report(struct urb *urb)
input_sync(dev);
ati_remote->old_jiffies = jiffies;
- ati_remote->old_data[0] = data[1];
- ati_remote->old_data[1] = data[2];
+ ati_remote->old_data = data[2];
}
}
diff --git a/drivers/media/rc/keymaps/rc-ati-x10.c b/drivers/media/rc/keymaps/rc-ati-x10.c
index e1b8b2605c48..81506440eded 100644
--- a/drivers/media/rc/keymaps/rc-ati-x10.c
+++ b/drivers/media/rc/keymaps/rc-ati-x10.c
@@ -27,55 +27,55 @@
#include <media/rc-map.h>
static struct rc_map_table ati_x10[] = {
- { 0xd20d, KEY_1 },
- { 0xd30e, KEY_2 },
- { 0xd40f, KEY_3 },
- { 0xd510, KEY_4 },
- { 0xd611, KEY_5 },
- { 0xd712, KEY_6 },
- { 0xd813, KEY_7 },
- { 0xd914, KEY_8 },
- { 0xda15, KEY_9 },
- { 0xdc17, KEY_0 },
- { 0xc500, KEY_A },
- { 0xc601, KEY_B },
- { 0xde19, KEY_C },
- { 0xe01b, KEY_D },
- { 0xe621, KEY_E },
- { 0xe823, KEY_F },
+ { 0x0d, KEY_1 },
+ { 0x0e, KEY_2 },
+ { 0x0f, KEY_3 },
+ { 0x10, KEY_4 },
+ { 0x11, KEY_5 },
+ { 0x12, KEY_6 },
+ { 0x13, KEY_7 },
+ { 0x14, KEY_8 },
+ { 0x15, KEY_9 },
+ { 0x17, KEY_0 },
+ { 0x00, KEY_A },
+ { 0x01, KEY_B },
+ { 0x19, KEY_C },
+ { 0x1b, KEY_D },
+ { 0x21, KEY_E },
+ { 0x23, KEY_F },
- { 0xdd18, KEY_KPENTER }, /* "check" */
- { 0xdb16, KEY_MENU }, /* "menu" */
- { 0xc702, KEY_POWER }, /* Power */
- { 0xc803, KEY_TV }, /* TV */
- { 0xc904, KEY_DVD }, /* DVD */
- { 0xca05, KEY_WWW }, /* WEB */
- { 0xcb06, KEY_BOOKMARKS }, /* "book" */
- { 0xcc07, KEY_EDIT }, /* "hand" */
- { 0xe11c, KEY_COFFEE }, /* "timer" */
- { 0xe520, KEY_FRONT }, /* "max" */
- { 0xe21d, KEY_LEFT }, /* left */
- { 0xe41f, KEY_RIGHT }, /* right */
- { 0xe722, KEY_DOWN }, /* down */
- { 0xdf1a, KEY_UP }, /* up */
- { 0xe31e, KEY_OK }, /* "OK" */
- { 0xce09, KEY_VOLUMEDOWN }, /* VOL + */
- { 0xcd08, KEY_VOLUMEUP }, /* VOL - */
- { 0xcf0a, KEY_MUTE }, /* MUTE */
- { 0xd00b, KEY_CHANNELUP }, /* CH + */
- { 0xd10c, KEY_CHANNELDOWN },/* CH - */
- { 0xec27, KEY_RECORD }, /* ( o) red */
- { 0xea25, KEY_PLAY }, /* ( >) */
- { 0xe924, KEY_REWIND }, /* (<<) */
- { 0xeb26, KEY_FORWARD }, /* (>>) */
- { 0xed28, KEY_STOP }, /* ([]) */
- { 0xee29, KEY_PAUSE }, /* ('') */
- { 0xf02b, KEY_PREVIOUS }, /* (<-) */
- { 0xef2a, KEY_NEXT }, /* (>+) */
- { 0xf22d, KEY_INFO }, /* PLAYING */
- { 0xf32e, KEY_HOME }, /* TOP */
- { 0xf42f, KEY_END }, /* END */
- { 0xf530, KEY_SELECT }, /* SELECT */
+ { 0x18, KEY_KPENTER }, /* "check" */
+ { 0x16, KEY_MENU }, /* "menu" */
+ { 0x02, KEY_POWER }, /* Power */
+ { 0x03, KEY_TV }, /* TV */
+ { 0x04, KEY_DVD }, /* DVD */
+ { 0x05, KEY_WWW }, /* WEB */
+ { 0x06, KEY_BOOKMARKS }, /* "book" */
+ { 0x07, KEY_EDIT }, /* "hand" */
+ { 0x1c, KEY_COFFEE }, /* "timer" */
+ { 0x20, KEY_FRONT }, /* "max" */
+ { 0x1d, KEY_LEFT }, /* left */
+ { 0x1f, KEY_RIGHT }, /* right */
+ { 0x22, KEY_DOWN }, /* down */
+ { 0x1a, KEY_UP }, /* up */
+ { 0x1e, KEY_OK }, /* "OK" */
+ { 0x09, KEY_VOLUMEDOWN }, /* VOL + */
+ { 0x08, KEY_VOLUMEUP }, /* VOL - */
+ { 0x0a, KEY_MUTE }, /* MUTE */
+ { 0x0b, KEY_CHANNELUP }, /* CH + */
+ { 0x0c, KEY_CHANNELDOWN },/* CH - */
+ { 0x27, KEY_RECORD }, /* ( o) red */
+ { 0x25, KEY_PLAY }, /* ( >) */
+ { 0x24, KEY_REWIND }, /* (<<) */
+ { 0x26, KEY_FORWARD }, /* (>>) */
+ { 0x28, KEY_STOP }, /* ([]) */
+ { 0x29, KEY_PAUSE }, /* ('') */
+ { 0x2b, KEY_PREVIOUS }, /* (<-) */
+ { 0x2a, KEY_NEXT }, /* (>+) */
+ { 0x2d, KEY_INFO }, /* PLAYING */
+ { 0x2e, KEY_HOME }, /* TOP */
+ { 0x2f, KEY_END }, /* END */
+ { 0x30, KEY_SELECT }, /* SELECT */
};
static struct rc_map_list ati_x10_map = {
diff --git a/drivers/media/rc/keymaps/rc-medion-x10.c b/drivers/media/rc/keymaps/rc-medion-x10.c
index 09e2cc01d110..479cdb897810 100644
--- a/drivers/media/rc/keymaps/rc-medion-x10.c
+++ b/drivers/media/rc/keymaps/rc-medion-x10.c
@@ -25,70 +25,70 @@
#include <media/rc-map.h>
static struct rc_map_table medion_x10[] = {
- { 0xf12c, KEY_TV }, /* TV */
- { 0xf22d, KEY_VCR }, /* VCR */
- { 0xc904, KEY_DVD }, /* DVD */
- { 0xcb06, KEY_AUDIO }, /* MUSIC */
-
- { 0xf32e, KEY_RADIO }, /* RADIO */
- { 0xca05, KEY_DIRECTORY }, /* PHOTO */
- { 0xf42f, KEY_INFO }, /* TV-PREVIEW */
- { 0xf530, KEY_LIST }, /* CHANNEL-LST */
-
- { 0xe01b, KEY_SETUP }, /* SETUP */
- { 0xf631, KEY_VIDEO }, /* VIDEO DESKTOP */
-
- { 0xcd08, KEY_VOLUMEDOWN }, /* VOL - */
- { 0xce09, KEY_VOLUMEUP }, /* VOL + */
- { 0xd00b, KEY_CHANNELUP }, /* CHAN + */
- { 0xd10c, KEY_CHANNELDOWN }, /* CHAN - */
- { 0xc500, KEY_MUTE }, /* MUTE */
-
- { 0xf732, KEY_RED }, /* red */
- { 0xf833, KEY_GREEN }, /* green */
- { 0xf934, KEY_YELLOW }, /* yellow */
- { 0xfa35, KEY_BLUE }, /* blue */
- { 0xdb16, KEY_TEXT }, /* TXT */
-
- { 0xd20d, KEY_1 },
- { 0xd30e, KEY_2 },
- { 0xd40f, KEY_3 },
- { 0xd510, KEY_4 },
- { 0xd611, KEY_5 },
- { 0xd712, KEY_6 },
- { 0xd813, KEY_7 },
- { 0xd914, KEY_8 },
- { 0xda15, KEY_9 },
- { 0xdc17, KEY_0 },
- { 0xe11c, KEY_SEARCH }, /* TV/RAD, CH SRC */
- { 0xe520, KEY_DELETE }, /* DELETE */
-
- { 0xfb36, KEY_KEYBOARD }, /* RENAME */
- { 0xdd18, KEY_SCREEN }, /* SNAPSHOT */
-
- { 0xdf1a, KEY_UP }, /* up */
- { 0xe722, KEY_DOWN }, /* down */
- { 0xe21d, KEY_LEFT }, /* left */
- { 0xe41f, KEY_RIGHT }, /* right */
- { 0xe31e, KEY_OK }, /* OK */
-
- { 0xfc37, KEY_SELECT }, /* ACQUIRE IMAGE */
- { 0xfd38, KEY_EDIT }, /* EDIT IMAGE */
-
- { 0xe924, KEY_REWIND }, /* rewind (<<) */
- { 0xea25, KEY_PLAY }, /* play ( >) */
- { 0xeb26, KEY_FORWARD }, /* forward (>>) */
- { 0xec27, KEY_RECORD }, /* record ( o) */
- { 0xed28, KEY_STOP }, /* stop ([]) */
- { 0xee29, KEY_PAUSE }, /* pause ('') */
-
- { 0xe621, KEY_PREVIOUS }, /* prev */
- { 0xfe39, KEY_SWITCHVIDEOMODE }, /* F SCR */
- { 0xe823, KEY_NEXT }, /* next */
- { 0xde19, KEY_MENU }, /* MENU */
- { 0xff3a, KEY_LANGUAGE }, /* AUDIO */
-
- { 0xc702, KEY_POWER }, /* POWER */
+ { 0x2c, KEY_TV }, /* TV */
+ { 0x2d, KEY_VCR }, /* VCR */
+ { 0x04, KEY_DVD }, /* DVD */
+ { 0x06, KEY_AUDIO }, /* MUSIC */
+
+ { 0x2e, KEY_RADIO }, /* RADIO */
+ { 0x05, KEY_DIRECTORY }, /* PHOTO */
+ { 0x2f, KEY_INFO }, /* TV-PREVIEW */
+ { 0x30, KEY_LIST }, /* CHANNEL-LST */
+
+ { 0x1b, KEY_SETUP }, /* SETUP */
+ { 0x31, KEY_VIDEO }, /* VIDEO DESKTOP */
+
+ { 0x08, KEY_VOLUMEDOWN }, /* VOL - */
+ { 0x09, KEY_VOLUMEUP }, /* VOL + */
+ { 0x0b, KEY_CHANNELUP }, /* CHAN + */
+ { 0x0c, KEY_CHANNELDOWN }, /* CHAN - */
+ { 0x00, KEY_MUTE }, /* MUTE */
+
+ { 0x32, KEY_RED }, /* red */
+ { 0x33, KEY_GREEN }, /* green */
+ { 0x34, KEY_YELLOW }, /* yellow */
+ { 0x35, KEY_BLUE }, /* blue */
+ { 0x16, KEY_TEXT }, /* TXT */
+
+ { 0x0d, KEY_1 },
+ { 0x0e, KEY_2 },
+ { 0x0f, KEY_3 },
+ { 0x10, KEY_4 },
+ { 0x11, KEY_5 },
+ { 0x12, KEY_6 },
+ { 0x13, KEY_7 },
+ { 0x14, KEY_8 },
+ { 0x15, KEY_9 },
+ { 0x17, KEY_0 },
+ { 0x1c, KEY_SEARCH }, /* TV/RAD, CH SRC */
+ { 0x20, KEY_DELETE }, /* DELETE */
+
+ { 0x36, KEY_KEYBOARD }, /* RENAME */
+ { 0x18, KEY_SCREEN }, /* SNAPSHOT */
+
+ { 0x1a, KEY_UP }, /* up */
+ { 0x22, KEY_DOWN }, /* down */
+ { 0x1d, KEY_LEFT }, /* left */
+ { 0x1f, KEY_RIGHT }, /* right */
+ { 0x1e, KEY_OK }, /* OK */
+
+ { 0x37, KEY_SELECT }, /* ACQUIRE IMAGE */
+ { 0x38, KEY_EDIT }, /* EDIT IMAGE */
+
+ { 0x24, KEY_REWIND }, /* rewind (<<) */
+ { 0x25, KEY_PLAY }, /* play ( >) */
+ { 0x26, KEY_FORWARD }, /* forward (>>) */
+ { 0x27, KEY_RECORD }, /* record ( o) */
+ { 0x28, KEY_STOP }, /* stop ([]) */
+ { 0x29, KEY_PAUSE }, /* pause ('') */
+
+ { 0x21, KEY_PREVIOUS }, /* prev */
+ { 0x39, KEY_SWITCHVIDEOMODE }, /* F SCR */
+ { 0x23, KEY_NEXT }, /* next */
+ { 0x19, KEY_MENU }, /* MENU */
+ { 0x3a, KEY_LANGUAGE }, /* AUDIO */
+
+ { 0x02, KEY_POWER }, /* POWER */
};
static struct rc_map_list medion_x10_map = {
diff --git a/drivers/media/rc/keymaps/rc-snapstream-firefly.c b/drivers/media/rc/keymaps/rc-snapstream-firefly.c
index ef146520931c..c7f33ec719b4 100644
--- a/drivers/media/rc/keymaps/rc-snapstream-firefly.c
+++ b/drivers/media/rc/keymaps/rc-snapstream-firefly.c
@@ -22,63 +22,63 @@
#include <media/rc-map.h>
static struct rc_map_table snapstream_firefly[] = {
- { 0xf12c, KEY_ZOOM }, /* Maximize */
- { 0xc702, KEY_CLOSE },
-
- { 0xd20d, KEY_1 },
- { 0xd30e, KEY_2 },
- { 0xd40f, KEY_3 },
- { 0xd510, KEY_4 },
- { 0xd611, KEY_5 },
- { 0xd712, KEY_6 },
- { 0xd813, KEY_7 },
- { 0xd914, KEY_8 },
- { 0xda15, KEY_9 },
- { 0xdc17, KEY_0 },
- { 0xdb16, KEY_BACK },
- { 0xdd18, KEY_KPENTER }, /* ent */
-
- { 0xce09, KEY_VOLUMEUP },
- { 0xcd08, KEY_VOLUMEDOWN },
- { 0xcf0a, KEY_MUTE },
- { 0xd00b, KEY_CHANNELUP },
- { 0xd10c, KEY_CHANNELDOWN },
- { 0xc500, KEY_VENDOR }, /* firefly */
-
- { 0xf32e, KEY_INFO },
- { 0xf42f, KEY_OPTION },
-
- { 0xe21d, KEY_LEFT },
- { 0xe41f, KEY_RIGHT },
- { 0xe722, KEY_DOWN },
- { 0xdf1a, KEY_UP },
- { 0xe31e, KEY_OK },
-
- { 0xe11c, KEY_MENU },
- { 0xe520, KEY_EXIT },
-
- { 0xec27, KEY_RECORD },
- { 0xea25, KEY_PLAY },
- { 0xed28, KEY_STOP },
- { 0xe924, KEY_REWIND },
- { 0xeb26, KEY_FORWARD },
- { 0xee29, KEY_PAUSE },
- { 0xf02b, KEY_PREVIOUS },
- { 0xef2a, KEY_NEXT },
-
- { 0xcb06, KEY_AUDIO }, /* Music */
- { 0xca05, KEY_IMAGES }, /* Photos */
- { 0xc904, KEY_DVD },
- { 0xc803, KEY_TV },
- { 0xcc07, KEY_VIDEO },
-
- { 0xc601, KEY_HELP },
- { 0xf22d, KEY_MODE }, /* Mouse */
-
- { 0xde19, KEY_A },
- { 0xe01b, KEY_B },
- { 0xe621, KEY_C },
- { 0xe823, KEY_D },
+ { 0x2c, KEY_ZOOM }, /* Maximize */
+ { 0x02, KEY_CLOSE },
+
+ { 0x0d, KEY_1 },
+ { 0x0e, KEY_2 },
+ { 0x0f, KEY_3 },
+ { 0x10, KEY_4 },
+ { 0x11, KEY_5 },
+ { 0x12, KEY_6 },
+ { 0x13, KEY_7 },
+ { 0x14, KEY_8 },
+ { 0x15, KEY_9 },
+ { 0x17, KEY_0 },
+ { 0x16, KEY_BACK },
+ { 0x18, KEY_KPENTER }, /* ent */
+
+ { 0x09, KEY_VOLUMEUP },
+ { 0x08, KEY_VOLUMEDOWN },
+ { 0x0a, KEY_MUTE },
+ { 0x0b, KEY_CHANNELUP },
+ { 0x0c, KEY_CHANNELDOWN },
+ { 0x00, KEY_VENDOR }, /* firefly */
+
+ { 0x2e, KEY_INFO },
+ { 0x2f, KEY_OPTION },
+
+ { 0x1d, KEY_LEFT },
+ { 0x1f, KEY_RIGHT },
+ { 0x22, KEY_DOWN },
+ { 0x1a, KEY_UP },
+ { 0x1e, KEY_OK },
+
+ { 0x1c, KEY_MENU },
+ { 0x20, KEY_EXIT },
+
+ { 0x27, KEY_RECORD },
+ { 0x25, KEY_PLAY },
+ { 0x28, KEY_STOP },
+ { 0x24, KEY_REWIND },
+ { 0x26, KEY_FORWARD },
+ { 0x29, KEY_PAUSE },
+ { 0x2b, KEY_PREVIOUS },
+ { 0x2a, KEY_NEXT },
+
+ { 0x06, KEY_AUDIO }, /* Music */
+ { 0x05, KEY_IMAGES }, /* Photos */
+ { 0x04, KEY_DVD },
+ { 0x03, KEY_TV },
+ { 0x07, KEY_VIDEO },
+
+ { 0x01, KEY_HELP },
+ { 0x2d, KEY_MODE }, /* Mouse */
+
+ { 0x19, KEY_A },
+ { 0x1b, KEY_B },
+ { 0x21, KEY_C },
+ { 0x23, KEY_D },
};
static struct rc_map_list snapstream_firefly_map = {
diff --git a/drivers/media/video/au0828/au0828-cards.c b/drivers/media/video/au0828/au0828-cards.c
index 39fc923fc46b..1c6015a04f96 100644
--- a/drivers/media/video/au0828/au0828-cards.c
+++ b/drivers/media/video/au0828/au0828-cards.c
@@ -162,11 +162,14 @@ static void hauppauge_eeprom(struct au0828_dev *dev, u8 *eeprom_data)
switch (tv.model) {
case 72000: /* WinTV-HVR950q (Retail, IR, ATSC/QAM */
case 72001: /* WinTV-HVR950q (Retail, IR, ATSC/QAM and analog video */
+ case 72101: /* WinTV-HVR950q (Retail, IR, ATSC/QAM and analog video */
+ case 72201: /* WinTV-HVR950q (OEM, IR, ATSC/QAM and analog video */
case 72211: /* WinTV-HVR950q (OEM, IR, ATSC/QAM and analog video */
case 72221: /* WinTV-HVR950q (OEM, IR, ATSC/QAM and analog video */
case 72231: /* WinTV-HVR950q (OEM, IR, ATSC/QAM and analog video */
case 72241: /* WinTV-HVR950q (OEM, No IR, ATSC/QAM and analog video */
case 72251: /* WinTV-HVR950q (Retail, IR, ATSC/QAM and analog video */
+ case 72261: /* WinTV-HVR950q (OEM, IR, ATSC/QAM and analog video */
case 72301: /* WinTV-HVR850 (Retail, IR, ATSC and analog video */
case 72500: /* WinTV-HVR950q (OEM, No IR, ATSC/QAM */
break;
@@ -324,6 +327,10 @@ struct usb_device_id au0828_usb_id_table[] = {
.driver_info = AU0828_BOARD_HAUPPAUGE_HVR950Q_MXL },
{ USB_DEVICE(0x2040, 0x8200),
.driver_info = AU0828_BOARD_HAUPPAUGE_WOODBURY },
+ { USB_DEVICE(0x2040, 0x7260),
+ .driver_info = AU0828_BOARD_HAUPPAUGE_HVR950Q },
+ { USB_DEVICE(0x2040, 0x7213),
+ .driver_info = AU0828_BOARD_HAUPPAUGE_HVR950Q },
{ },
};
diff --git a/drivers/media/video/gspca/gspca.c b/drivers/media/video/gspca/gspca.c
index 881e04c7ffe6..2ca10dfec91f 100644
--- a/drivers/media/video/gspca/gspca.c
+++ b/drivers/media/video/gspca/gspca.c
@@ -838,13 +838,13 @@ static int gspca_init_transfer(struct gspca_dev *gspca_dev)
gspca_dev->usb_err = 0;
/* do the specific subdriver stuff before endpoint selection */
- gspca_dev->alt = 0;
+ intf = usb_ifnum_to_if(gspca_dev->dev, gspca_dev->iface);
+ gspca_dev->alt = gspca_dev->cam.bulk ? intf->num_altsetting : 0;
if (gspca_dev->sd_desc->isoc_init) {
ret = gspca_dev->sd_desc->isoc_init(gspca_dev);
if (ret < 0)
goto unlock;
}
- intf = usb_ifnum_to_if(gspca_dev->dev, gspca_dev->iface);
xfer = gspca_dev->cam.bulk ? USB_ENDPOINT_XFER_BULK
: USB_ENDPOINT_XFER_ISOC;
@@ -957,7 +957,7 @@ retry:
ret = -EIO;
goto out;
}
- alt = ep_tb[--alt_idx].alt;
+ gspca_dev->alt = ep_tb[--alt_idx].alt;
}
}
out:
diff --git a/drivers/media/video/m5mols/m5mols.h b/drivers/media/video/m5mols/m5mols.h
index 89d09a8914f8..82c8817bd32d 100644
--- a/drivers/media/video/m5mols/m5mols.h
+++ b/drivers/media/video/m5mols/m5mols.h
@@ -162,7 +162,6 @@ struct m5mols_version {
* @pad: media pad
* @ffmt: current fmt according to resolution type
* @res_type: current resolution type
- * @code: current code
* @irq_waitq: waitqueue for the capture
* @work_irq: workqueue for the IRQ
* @flags: state variable for the interrupt handler
@@ -192,7 +191,6 @@ struct m5mols_info {
struct media_pad pad;
struct v4l2_mbus_framefmt ffmt[M5MOLS_RESTYPE_MAX];
int res_type;
- enum v4l2_mbus_pixelcode code;
wait_queue_head_t irq_waitq;
struct work_struct work_irq;
unsigned long flags;
diff --git a/drivers/media/video/m5mols/m5mols_core.c b/drivers/media/video/m5mols/m5mols_core.c
index 05ab3700647e..e0f09e531800 100644
--- a/drivers/media/video/m5mols/m5mols_core.c
+++ b/drivers/media/video/m5mols/m5mols_core.c
@@ -334,7 +334,7 @@ int m5mols_mode(struct m5mols_info *info, u8 mode)
int ret = -EINVAL;
u8 reg;
- if (mode < REG_PARAMETER && mode > REG_CAPTURE)
+ if (mode < REG_PARAMETER || mode > REG_CAPTURE)
return ret;
ret = m5mols_read_u8(sd, SYSTEM_SYSMODE, &reg);
@@ -511,9 +511,6 @@ static int m5mols_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
struct m5mols_info *info = to_m5mols(sd);
struct v4l2_mbus_framefmt *format;
- if (fmt->pad != 0)
- return -EINVAL;
-
format = __find_format(info, fh, fmt->which, info->res_type);
if (!format)
return -EINVAL;
@@ -532,9 +529,6 @@ static int m5mols_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
u32 resolution = 0;
int ret;
- if (fmt->pad != 0)
- return -EINVAL;
-
ret = __find_resolution(sd, format, &type, &resolution);
if (ret < 0)
return ret;
@@ -543,13 +537,14 @@ static int m5mols_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
if (!sfmt)
return 0;
- *sfmt = m5mols_default_ffmt[type];
- sfmt->width = format->width;
- sfmt->height = format->height;
+
+ format->code = m5mols_default_ffmt[type].code;
+ format->colorspace = V4L2_COLORSPACE_JPEG;
+ format->field = V4L2_FIELD_NONE;
if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
+ *sfmt = *format;
info->resolution = resolution;
- info->code = format->code;
info->res_type = type;
}
@@ -626,13 +621,14 @@ static int m5mols_start_monitor(struct m5mols_info *info)
static int m5mols_s_stream(struct v4l2_subdev *sd, int enable)
{
struct m5mols_info *info = to_m5mols(sd);
+ u32 code = info->ffmt[info->res_type].code;
if (enable) {
int ret = -EINVAL;
- if (is_code(info->code, M5MOLS_RESTYPE_MONITOR))
+ if (is_code(code, M5MOLS_RESTYPE_MONITOR))
ret = m5mols_start_monitor(info);
- if (is_code(info->code, M5MOLS_RESTYPE_CAPTURE))
+ if (is_code(code, M5MOLS_RESTYPE_CAPTURE))
ret = m5mols_start_capture(info);
return ret;
diff --git a/drivers/media/video/mt9m111.c b/drivers/media/video/mt9m111.c
index cf2c0fb95f2f..398f96ffd35e 100644
--- a/drivers/media/video/mt9m111.c
+++ b/drivers/media/video/mt9m111.c
@@ -955,6 +955,7 @@ static int mt9m111_probe(struct i2c_client *client,
mt9m111->rect.height = MT9M111_MAX_HEIGHT;
mt9m111->fmt = &mt9m111_colour_fmts[0];
mt9m111->lastpage = -1;
+ mutex_init(&mt9m111->power_lock);
ret = mt9m111_video_probe(client);
if (ret) {
diff --git a/drivers/media/video/mt9t112.c b/drivers/media/video/mt9t112.c
index 32114a3c0ca7..7b34b11daf24 100644
--- a/drivers/media/video/mt9t112.c
+++ b/drivers/media/video/mt9t112.c
@@ -1083,8 +1083,10 @@ static int mt9t112_probe(struct i2c_client *client,
v4l2_i2c_subdev_init(&priv->subdev, client, &mt9t112_subdev_ops);
ret = mt9t112_camera_probe(client);
- if (ret)
+ if (ret) {
kfree(priv);
+ return ret;
+ }
/* Cannot fail: using the default supported pixel code */
mt9t112_set_params(priv, &rect, V4L2_MBUS_FMT_UYVY8_2X8);
diff --git a/drivers/media/video/omap/omap_vout.c b/drivers/media/video/omap/omap_vout.c
index 9c5c19f142de..ee0d0b39cd17 100644
--- a/drivers/media/video/omap/omap_vout.c
+++ b/drivers/media/video/omap/omap_vout.c
@@ -38,6 +38,7 @@
#include <linux/irq.h>
#include <linux/videodev2.h>
#include <linux/dma-mapping.h>
+#include <linux/slab.h>
#include <media/videobuf-dma-contig.h>
#include <media/v4l2-device.h>
@@ -2169,6 +2170,14 @@ static int __init omap_vout_probe(struct platform_device *pdev)
vid_dev->num_displays = 0;
for_each_dss_dev(dssdev) {
omap_dss_get_device(dssdev);
+
+ if (!dssdev->driver) {
+ dev_warn(&pdev->dev, "no driver for display: %s\n",
+ dssdev->name);
+ omap_dss_put_device(dssdev);
+ continue;
+ }
+
vid_dev->displays[vid_dev->num_displays++] = dssdev;
}
diff --git a/drivers/media/video/omap1_camera.c b/drivers/media/video/omap1_camera.c
index e87ae2f634b2..6a6cf388bae4 100644
--- a/drivers/media/video/omap1_camera.c
+++ b/drivers/media/video/omap1_camera.c
@@ -24,6 +24,7 @@
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
+#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
diff --git a/drivers/media/video/omap24xxcam-dma.c b/drivers/media/video/omap24xxcam-dma.c
index 1d54b86c936b..3ea38a8def8e 100644
--- a/drivers/media/video/omap24xxcam-dma.c
+++ b/drivers/media/video/omap24xxcam-dma.c
@@ -506,7 +506,7 @@ int omap24xxcam_sgdma_queue(struct omap24xxcam_sgdma *sgdma,
unsigned long flags;
struct sgdma_state *sg_state;
- if ((sglen < 0) || ((sglen > 0) & !sglist))
+ if ((sglen < 0) || ((sglen > 0) && !sglist))
return -EINVAL;
spin_lock_irqsave(&sgdma->lock, flags);
diff --git a/drivers/media/video/omap3isp/ispccdc.c b/drivers/media/video/omap3isp/ispccdc.c
index b0b0fa5a3572..54a4a3f22e2e 100644
--- a/drivers/media/video/omap3isp/ispccdc.c
+++ b/drivers/media/video/omap3isp/ispccdc.c
@@ -1408,7 +1408,7 @@ static void ccdc_hs_vs_isr(struct isp_ccdc_device *ccdc)
{
struct isp_pipeline *pipe =
to_isp_pipeline(&ccdc->video_out.video.entity);
- struct video_device *vdev = &ccdc->subdev.devnode;
+ struct video_device *vdev = ccdc->subdev.devnode;
struct v4l2_event event;
memset(&event, 0, sizeof(event));
diff --git a/drivers/media/video/omap3isp/ispstat.c b/drivers/media/video/omap3isp/ispstat.c
index 68d539456c55..bc0b2c7349b9 100644
--- a/drivers/media/video/omap3isp/ispstat.c
+++ b/drivers/media/video/omap3isp/ispstat.c
@@ -496,7 +496,7 @@ static int isp_stat_bufs_alloc(struct ispstat *stat, u32 size)
static void isp_stat_queue_event(struct ispstat *stat, int err)
{
- struct video_device *vdev = &stat->subdev.devnode;
+ struct video_device *vdev = stat->subdev.devnode;
struct v4l2_event event;
struct omap3isp_stat_event_status *status = (void *)event.u.data;
diff --git a/drivers/media/video/omap3isp/ispvideo.c b/drivers/media/video/omap3isp/ispvideo.c
index d1000723c5ae..f2290578448c 100644
--- a/drivers/media/video/omap3isp/ispvideo.c
+++ b/drivers/media/video/omap3isp/ispvideo.c
@@ -26,6 +26,7 @@
#include <asm/cacheflush.h>
#include <linux/clk.h>
#include <linux/mm.h>
+#include <linux/module.h>
#include <linux/pagemap.h>
#include <linux/scatterlist.h>
#include <linux/sched.h>
diff --git a/drivers/media/video/ov6650.c b/drivers/media/video/ov6650.c
index 9f2d26b1d4cb..6806345ec2f0 100644
--- a/drivers/media/video/ov6650.c
+++ b/drivers/media/video/ov6650.c
@@ -540,7 +540,7 @@ static u8 to_clkrc(struct v4l2_fract *timeperframe,
static int ov6650_s_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct soc_camera_device *icd = (struct soc_camera_device *)sd->grp_id;
+ struct soc_camera_device *icd = v4l2_get_subdev_hostdata(sd);
struct soc_camera_sense *sense = icd->sense;
struct ov6650 *priv = to_ov6650(client);
bool half_scale = !is_unscaled_ok(mf->width, mf->height, &priv->rect);
diff --git a/drivers/media/video/s5p-fimc/fimc-capture.c b/drivers/media/video/s5p-fimc/fimc-capture.c
index c8d91b0cd9bd..2cc3b9166724 100644
--- a/drivers/media/video/s5p-fimc/fimc-capture.c
+++ b/drivers/media/video/s5p-fimc/fimc-capture.c
@@ -98,6 +98,10 @@ static int fimc_capture_state_cleanup(struct fimc_dev *fimc, bool suspend)
vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
}
set_bit(ST_CAPT_SUSPENDED, &fimc->state);
+
+ fimc_hw_reset(fimc);
+ cap->buf_index = 0;
+
spin_unlock_irqrestore(&fimc->slock, flags);
if (streaming)
@@ -137,7 +141,7 @@ int fimc_capture_config_update(struct fimc_ctx *ctx)
struct fimc_dev *fimc = ctx->fimc_dev;
int ret;
- if (test_bit(ST_CAPT_APPLY_CFG, &fimc->state))
+ if (!test_bit(ST_CAPT_APPLY_CFG, &fimc->state))
return 0;
spin_lock(&ctx->slock);
@@ -150,7 +154,7 @@ int fimc_capture_config_update(struct fimc_ctx *ctx)
fimc_hw_set_rotation(ctx);
fimc_prepare_dma_offset(ctx, &ctx->d_frame);
fimc_hw_set_out_dma(ctx);
- set_bit(ST_CAPT_APPLY_CFG, &fimc->state);
+ clear_bit(ST_CAPT_APPLY_CFG, &fimc->state);
}
spin_unlock(&ctx->slock);
return ret;
@@ -164,7 +168,6 @@ static int start_streaming(struct vb2_queue *q, unsigned int count)
int min_bufs;
int ret;
- fimc_hw_reset(fimc);
vid_cap->frame_count = 0;
ret = fimc_init_capture(fimc);
@@ -523,7 +526,7 @@ static struct fimc_fmt *fimc_capture_try_format(struct fimc_ctx *ctx,
max_w = rotation ? pl->out_rot_en_w : pl->out_rot_dis_w;
min_w = ctx->state & FIMC_DST_CROP ? dst->width : var->min_out_pixsize;
min_h = ctx->state & FIMC_DST_CROP ? dst->height : var->min_out_pixsize;
- if (fimc->id == 1 && var->pix_hoff)
+ if (var->min_vsize_align == 1 && !rotation)
align_h = fimc_fmt_is_rgb(ffmt->color) ? 0 : 1;
depth = fimc_get_format_depth(ffmt);
@@ -1239,6 +1242,7 @@ static int fimc_subdev_set_fmt(struct v4l2_subdev *sd,
mutex_lock(&fimc->lock);
set_frame_bounds(ff, mf->width, mf->height);
+ fimc->vid_cap.mf = *mf;
ff->fmt = ffmt;
/* Reset the crop rectangle if required. */
@@ -1375,7 +1379,7 @@ static void fimc_destroy_capture_subdev(struct fimc_dev *fimc)
media_entity_cleanup(&sd->entity);
v4l2_device_unregister_subdev(sd);
kfree(sd);
- sd = NULL;
+ fimc->vid_cap.subdev = NULL;
}
/* Set default format at the sensor and host interface */
diff --git a/drivers/media/video/s5p-fimc/fimc-core.c b/drivers/media/video/s5p-fimc/fimc-core.c
index 19ca6db38b2f..07c6254faee3 100644
--- a/drivers/media/video/s5p-fimc/fimc-core.c
+++ b/drivers/media/video/s5p-fimc/fimc-core.c
@@ -37,7 +37,7 @@ static char *fimc_clocks[MAX_FIMC_CLOCKS] = {
static struct fimc_fmt fimc_formats[] = {
{
.name = "RGB565",
- .fourcc = V4L2_PIX_FMT_RGB565X,
+ .fourcc = V4L2_PIX_FMT_RGB565,
.depth = { 16 },
.color = S5P_FIMC_RGB565,
.memplanes = 1,
@@ -1038,12 +1038,11 @@ static int fimc_try_fmt_mplane(struct fimc_ctx *ctx, struct v4l2_format *f)
mod_x = 6; /* 64 x 32 pixels tile */
mod_y = 5;
} else {
- if (fimc->id == 1 && variant->pix_hoff)
+ if (variant->min_vsize_align == 1)
mod_y = fimc_fmt_is_rgb(fmt->color) ? 0 : 1;
else
- mod_y = mod_x;
+ mod_y = ffs(variant->min_vsize_align) - 1;
}
- dbg("mod_x: %d, mod_y: %d, max_w: %d", mod_x, mod_y, max_w);
v4l_bound_align_image(&pix->width, 16, max_w, mod_x,
&pix->height, 8, variant->pix_limit->scaler_dis_w, mod_y, 0);
@@ -1226,10 +1225,10 @@ static int fimc_m2m_try_crop(struct fimc_ctx *ctx, struct v4l2_crop *cr)
fimc->variant->min_inp_pixsize : fimc->variant->min_out_pixsize;
/* Get pixel alignment constraints. */
- if (fimc->id == 1 && fimc->variant->pix_hoff)
+ if (fimc->variant->min_vsize_align == 1)
halign = fimc_fmt_is_rgb(f->fmt->color) ? 0 : 1;
else
- halign = ffs(min_size) - 1;
+ halign = ffs(fimc->variant->min_vsize_align) - 1;
for (i = 0; i < f->fmt->colplanes; i++)
depth += f->fmt->depth[i];
@@ -1615,7 +1614,6 @@ static int fimc_probe(struct platform_device *pdev)
pdata = pdev->dev.platform_data;
fimc->pdata = pdata;
- set_bit(ST_LPM, &fimc->state);
init_waitqueue_head(&fimc->irq_queue);
spin_lock_init(&fimc->slock);
@@ -1707,8 +1705,6 @@ static int fimc_runtime_resume(struct device *dev)
/* Enable clocks and perform basic initalization */
clk_enable(fimc->clock[CLK_GATE]);
fimc_hw_reset(fimc);
- if (fimc->variant->out_buf_count > 4)
- fimc_hw_set_dma_seq(fimc, 0xF);
/* Resume the capture or mem-to-mem device */
if (fimc_capture_busy(fimc))
@@ -1750,8 +1746,6 @@ static int fimc_resume(struct device *dev)
return 0;
}
fimc_hw_reset(fimc);
- if (fimc->variant->out_buf_count > 4)
- fimc_hw_set_dma_seq(fimc, 0xF);
spin_unlock_irqrestore(&fimc->slock, flags);
if (fimc_capture_busy(fimc))
@@ -1780,7 +1774,6 @@ static int __devexit fimc_remove(struct platform_device *pdev)
struct fimc_dev *fimc = platform_get_drvdata(pdev);
pm_runtime_disable(&pdev->dev);
- fimc_runtime_suspend(&pdev->dev);
pm_runtime_set_suspended(&pdev->dev);
vb2_dma_contig_cleanup_ctx(fimc->alloc_ctx);
@@ -1840,6 +1833,7 @@ static struct samsung_fimc_variant fimc0_variant_s5p = {
.min_inp_pixsize = 16,
.min_out_pixsize = 16,
.hor_offs_align = 8,
+ .min_vsize_align = 16,
.out_buf_count = 4,
.pix_limit = &s5p_pix_limit[0],
};
@@ -1849,6 +1843,7 @@ static struct samsung_fimc_variant fimc2_variant_s5p = {
.min_inp_pixsize = 16,
.min_out_pixsize = 16,
.hor_offs_align = 8,
+ .min_vsize_align = 16,
.out_buf_count = 4,
.pix_limit = &s5p_pix_limit[1],
};
@@ -1861,6 +1856,7 @@ static struct samsung_fimc_variant fimc0_variant_s5pv210 = {
.min_inp_pixsize = 16,
.min_out_pixsize = 16,
.hor_offs_align = 8,
+ .min_vsize_align = 16,
.out_buf_count = 4,
.pix_limit = &s5p_pix_limit[1],
};
@@ -1874,6 +1870,7 @@ static struct samsung_fimc_variant fimc1_variant_s5pv210 = {
.min_inp_pixsize = 16,
.min_out_pixsize = 16,
.hor_offs_align = 1,
+ .min_vsize_align = 1,
.out_buf_count = 4,
.pix_limit = &s5p_pix_limit[2],
};
@@ -1884,6 +1881,7 @@ static struct samsung_fimc_variant fimc2_variant_s5pv210 = {
.min_inp_pixsize = 16,
.min_out_pixsize = 16,
.hor_offs_align = 8,
+ .min_vsize_align = 16,
.out_buf_count = 4,
.pix_limit = &s5p_pix_limit[2],
};
@@ -1898,6 +1896,7 @@ static struct samsung_fimc_variant fimc0_variant_exynos4 = {
.min_inp_pixsize = 16,
.min_out_pixsize = 16,
.hor_offs_align = 2,
+ .min_vsize_align = 1,
.out_buf_count = 32,
.pix_limit = &s5p_pix_limit[1],
};
@@ -1910,6 +1909,7 @@ static struct samsung_fimc_variant fimc3_variant_exynos4 = {
.min_inp_pixsize = 16,
.min_out_pixsize = 16,
.hor_offs_align = 2,
+ .min_vsize_align = 1,
.out_buf_count = 32,
.pix_limit = &s5p_pix_limit[3],
};
diff --git a/drivers/media/video/s5p-fimc/fimc-core.h b/drivers/media/video/s5p-fimc/fimc-core.h
index a6936dad5b10..c7f01c47b20f 100644
--- a/drivers/media/video/s5p-fimc/fimc-core.h
+++ b/drivers/media/video/s5p-fimc/fimc-core.h
@@ -377,6 +377,7 @@ struct fimc_pix_limit {
* @min_inp_pixsize: minimum input pixel size
* @min_out_pixsize: minimum output pixel size
* @hor_offs_align: horizontal pixel offset aligment
+ * @min_vsize_align: minimum vertical pixel size alignment
* @out_buf_count: the number of buffers in output DMA sequence
*/
struct samsung_fimc_variant {
@@ -390,6 +391,7 @@ struct samsung_fimc_variant {
u16 min_inp_pixsize;
u16 min_out_pixsize;
u16 hor_offs_align;
+ u16 min_vsize_align;
u16 out_buf_count;
};
diff --git a/drivers/media/video/s5p-fimc/fimc-mdevice.c b/drivers/media/video/s5p-fimc/fimc-mdevice.c
index cc337b1de913..615c862f0360 100644
--- a/drivers/media/video/s5p-fimc/fimc-mdevice.c
+++ b/drivers/media/video/s5p-fimc/fimc-mdevice.c
@@ -220,6 +220,7 @@ static struct v4l2_subdev *fimc_md_register_sensor(struct fimc_md *fmd,
sd = v4l2_i2c_new_subdev_board(&fmd->v4l2_dev, adapter,
s_info->pdata->board_info, NULL);
if (IS_ERR_OR_NULL(sd)) {
+ i2c_put_adapter(adapter);
v4l2_err(&fmd->v4l2_dev, "Failed to acquire subdev\n");
return NULL;
}
@@ -234,12 +235,15 @@ static struct v4l2_subdev *fimc_md_register_sensor(struct fimc_md *fmd,
static void fimc_md_unregister_sensor(struct v4l2_subdev *sd)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct i2c_adapter *adapter;
if (!client)
return;
v4l2_device_unregister_subdev(sd);
+ adapter = client->adapter;
i2c_unregister_device(client);
- i2c_put_adapter(client->adapter);
+ if (adapter)
+ i2c_put_adapter(adapter);
}
static int fimc_md_register_sensor_entities(struct fimc_md *fmd)
@@ -381,20 +385,28 @@ static void fimc_md_unregister_entities(struct fimc_md *fmd)
static int fimc_md_register_video_nodes(struct fimc_md *fmd)
{
+ struct video_device *vdev;
int i, ret = 0;
for (i = 0; i < FIMC_MAX_DEVS && !ret; i++) {
if (!fmd->fimc[i])
continue;
- if (fmd->fimc[i]->m2m.vfd)
- ret = video_register_device(fmd->fimc[i]->m2m.vfd,
- VFL_TYPE_GRABBER, -1);
- if (ret)
- break;
- if (fmd->fimc[i]->vid_cap.vfd)
- ret = video_register_device(fmd->fimc[i]->vid_cap.vfd,
- VFL_TYPE_GRABBER, -1);
+ vdev = fmd->fimc[i]->m2m.vfd;
+ if (vdev) {
+ ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
+ if (ret)
+ break;
+ v4l2_info(&fmd->v4l2_dev, "Registered %s as /dev/%s\n",
+ vdev->name, video_device_node_name(vdev));
+ }
+
+ vdev = fmd->fimc[i]->vid_cap.vfd;
+ if (vdev == NULL)
+ continue;
+ ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
+ v4l2_info(&fmd->v4l2_dev, "Registered %s as /dev/%s\n",
+ vdev->name, video_device_node_name(vdev));
}
return ret;
@@ -502,7 +514,7 @@ static int fimc_md_create_links(struct fimc_md *fmd)
if (WARN(csis == NULL,
"MIPI-CSI interface specified "
"but s5p-csis module is not loaded!\n"))
- continue;
+ return -EINVAL;
ret = media_entity_create_link(&sensor->entity, 0,
&csis->entity, CSIS_PAD_SINK,
@@ -742,9 +754,6 @@ static int __devinit fimc_md_probe(struct platform_device *pdev)
struct fimc_md *fmd;
int ret;
- if (WARN(!pdev->dev.platform_data, "Platform data not specified!\n"))
- return -EINVAL;
-
fmd = kzalloc(sizeof(struct fimc_md), GFP_KERNEL);
if (!fmd)
return -ENOMEM;
@@ -782,9 +791,11 @@ static int __devinit fimc_md_probe(struct platform_device *pdev)
if (ret)
goto err3;
- ret = fimc_md_register_sensor_entities(fmd);
- if (ret)
- goto err3;
+ if (pdev->dev.platform_data) {
+ ret = fimc_md_register_sensor_entities(fmd);
+ if (ret)
+ goto err3;
+ }
ret = fimc_md_create_links(fmd);
if (ret)
goto err3;
diff --git a/drivers/media/video/s5p-fimc/fimc-reg.c b/drivers/media/video/s5p-fimc/fimc-reg.c
index 20e664e34163..44f5c2d1920b 100644
--- a/drivers/media/video/s5p-fimc/fimc-reg.c
+++ b/drivers/media/video/s5p-fimc/fimc-reg.c
@@ -35,6 +35,9 @@ void fimc_hw_reset(struct fimc_dev *dev)
cfg = readl(dev->regs + S5P_CIGCTRL);
cfg &= ~S5P_CIGCTRL_SWRST;
writel(cfg, dev->regs + S5P_CIGCTRL);
+
+ if (dev->variant->out_buf_count > 4)
+ fimc_hw_set_dma_seq(dev, 0xF);
}
static u32 fimc_hw_get_in_flip(struct fimc_ctx *ctx)
@@ -251,7 +254,14 @@ static void fimc_hw_set_scaler(struct fimc_ctx *ctx)
struct fimc_scaler *sc = &ctx->scaler;
struct fimc_frame *src_frame = &ctx->s_frame;
struct fimc_frame *dst_frame = &ctx->d_frame;
- u32 cfg = 0;
+
+ u32 cfg = readl(dev->regs + S5P_CISCCTRL);
+
+ cfg &= ~(S5P_CISCCTRL_CSCR2Y_WIDE | S5P_CISCCTRL_CSCY2R_WIDE |
+ S5P_CISCCTRL_SCALEUP_H | S5P_CISCCTRL_SCALEUP_V |
+ S5P_CISCCTRL_SCALERBYPASS | S5P_CISCCTRL_ONE2ONE |
+ S5P_CISCCTRL_INRGB_FMT_MASK | S5P_CISCCTRL_OUTRGB_FMT_MASK |
+ S5P_CISCCTRL_INTERLACE | S5P_CISCCTRL_RGB_EXT);
if (!(ctx->flags & FIMC_COLOR_RANGE_NARROW))
cfg |= (S5P_CISCCTRL_CSCR2Y_WIDE | S5P_CISCCTRL_CSCY2R_WIDE);
@@ -308,9 +318,9 @@ void fimc_hw_set_mainscaler(struct fimc_ctx *ctx)
fimc_hw_set_scaler(ctx);
cfg = readl(dev->regs + S5P_CISCCTRL);
+ cfg &= ~(S5P_CISCCTRL_MHRATIO_MASK | S5P_CISCCTRL_MVRATIO_MASK);
if (variant->has_mainscaler_ext) {
- cfg &= ~(S5P_CISCCTRL_MHRATIO_MASK | S5P_CISCCTRL_MVRATIO_MASK);
cfg |= S5P_CISCCTRL_MHRATIO_EXT(sc->main_hratio);
cfg |= S5P_CISCCTRL_MVRATIO_EXT(sc->main_vratio);
writel(cfg, dev->regs + S5P_CISCCTRL);
@@ -323,7 +333,6 @@ void fimc_hw_set_mainscaler(struct fimc_ctx *ctx)
cfg |= S5P_CIEXTEN_MVRATIO_EXT(sc->main_vratio);
writel(cfg, dev->regs + S5P_CIEXTEN);
} else {
- cfg &= ~(S5P_CISCCTRL_MHRATIO_MASK | S5P_CISCCTRL_MVRATIO_MASK);
cfg |= S5P_CISCCTRL_MHRATIO(sc->main_hratio);
cfg |= S5P_CISCCTRL_MVRATIO(sc->main_vratio);
writel(cfg, dev->regs + S5P_CISCCTRL);
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_enc.c b/drivers/media/video/s5p-mfc/s5p_mfc_enc.c
index 1e8cdb77d4b8..dff9dc798795 100644
--- a/drivers/media/video/s5p-mfc/s5p_mfc_enc.c
+++ b/drivers/media/video/s5p-mfc/s5p_mfc_enc.c
@@ -61,7 +61,7 @@ static struct s5p_mfc_fmt formats[] = {
.num_planes = 1,
},
{
- .name = "H264 Encoded Stream",
+ .name = "H263 Encoded Stream",
.fourcc = V4L2_PIX_FMT_H263,
.codec_mode = S5P_FIMV_CODEC_H263_ENC,
.type = MFC_FMT_ENC,
diff --git a/drivers/media/video/s5p-tv/mixer_video.c b/drivers/media/video/s5p-tv/mixer_video.c
index e16d3a4bc1dc..b47d0c06ecf5 100644
--- a/drivers/media/video/s5p-tv/mixer_video.c
+++ b/drivers/media/video/s5p-tv/mixer_video.c
@@ -16,6 +16,7 @@
#include <media/v4l2-ioctl.h>
#include <linux/videodev2.h>
#include <linux/mm.h>
+#include <linux/module.h>
#include <linux/version.h>
#include <linux/timer.h>
#include <media/videobuf2-dma-contig.h>
diff --git a/drivers/media/video/sh_mobile_ceu_camera.c b/drivers/media/video/sh_mobile_ceu_camera.c
index f390682629cf..c51decfcae19 100644
--- a/drivers/media/video/sh_mobile_ceu_camera.c
+++ b/drivers/media/video/sh_mobile_ceu_camera.c
@@ -566,8 +566,10 @@ static int sh_mobile_ceu_add_device(struct soc_camera_device *icd)
ret = sh_mobile_ceu_soft_reset(pcdev);
csi2_sd = find_csi2(pcdev);
- if (csi2_sd)
- csi2_sd->grp_id = (long)icd;
+ if (csi2_sd) {
+ csi2_sd->grp_id = soc_camera_grp_id(icd);
+ v4l2_set_subdev_hostdata(csi2_sd, icd);
+ }
ret = v4l2_subdev_call(csi2_sd, core, s_power, 1);
if (ret < 0 && ret != -ENOIOCTLCMD && ret != -ENODEV) {
@@ -768,7 +770,7 @@ static struct v4l2_subdev *find_bus_subdev(struct sh_mobile_ceu_dev *pcdev,
{
if (pcdev->csi2_pdev) {
struct v4l2_subdev *csi2_sd = find_csi2(pcdev);
- if (csi2_sd && csi2_sd->grp_id == (u32)icd)
+ if (csi2_sd && csi2_sd->grp_id == soc_camera_grp_id(icd))
return csi2_sd;
}
@@ -1089,8 +1091,9 @@ static int sh_mobile_ceu_get_formats(struct soc_camera_device *icd, unsigned int
/* Try 2560x1920, 1280x960, 640x480, 320x240 */
mf.width = 2560 >> shift;
mf.height = 1920 >> shift;
- ret = v4l2_device_call_until_err(sd->v4l2_dev, (long)icd, video,
- s_mbus_fmt, &mf);
+ ret = v4l2_device_call_until_err(sd->v4l2_dev,
+ soc_camera_grp_id(icd), video,
+ s_mbus_fmt, &mf);
if (ret < 0)
return ret;
shift++;
@@ -1389,7 +1392,8 @@ static int client_s_fmt(struct soc_camera_device *icd,
bool ceu_1to1;
int ret;
- ret = v4l2_device_call_until_err(sd->v4l2_dev, (long)icd, video,
+ ret = v4l2_device_call_until_err(sd->v4l2_dev,
+ soc_camera_grp_id(icd), video,
s_mbus_fmt, mf);
if (ret < 0)
return ret;
@@ -1426,8 +1430,9 @@ static int client_s_fmt(struct soc_camera_device *icd,
tmp_h = min(2 * tmp_h, max_height);
mf->width = tmp_w;
mf->height = tmp_h;
- ret = v4l2_device_call_until_err(sd->v4l2_dev, (long)icd, video,
- s_mbus_fmt, mf);
+ ret = v4l2_device_call_until_err(sd->v4l2_dev,
+ soc_camera_grp_id(icd), video,
+ s_mbus_fmt, mf);
dev_geo(dev, "Camera scaled to %ux%u\n",
mf->width, mf->height);
if (ret < 0) {
@@ -1580,8 +1585,9 @@ static int sh_mobile_ceu_set_crop(struct soc_camera_device *icd,
}
if (interm_width < icd->user_width || interm_height < icd->user_height) {
- ret = v4l2_device_call_until_err(sd->v4l2_dev, (int)icd, video,
- s_mbus_fmt, &mf);
+ ret = v4l2_device_call_until_err(sd->v4l2_dev,
+ soc_camera_grp_id(icd), video,
+ s_mbus_fmt, &mf);
if (ret < 0)
return ret;
@@ -1867,7 +1873,8 @@ static int sh_mobile_ceu_try_fmt(struct soc_camera_device *icd,
mf.code = xlate->code;
mf.colorspace = pix->colorspace;
- ret = v4l2_device_call_until_err(sd->v4l2_dev, (long)icd, video, try_mbus_fmt, &mf);
+ ret = v4l2_device_call_until_err(sd->v4l2_dev, soc_camera_grp_id(icd),
+ video, try_mbus_fmt, &mf);
if (ret < 0)
return ret;
@@ -1891,8 +1898,9 @@ static int sh_mobile_ceu_try_fmt(struct soc_camera_device *icd,
*/
mf.width = 2560;
mf.height = 1920;
- ret = v4l2_device_call_until_err(sd->v4l2_dev, (long)icd, video,
- try_mbus_fmt, &mf);
+ ret = v4l2_device_call_until_err(sd->v4l2_dev,
+ soc_camera_grp_id(icd), video,
+ try_mbus_fmt, &mf);
if (ret < 0) {
/* Shouldn't actually happen... */
dev_err(icd->parent,
diff --git a/drivers/media/video/sh_mobile_csi2.c b/drivers/media/video/sh_mobile_csi2.c
index ea4f0473ed3b..8a652b53ff7e 100644
--- a/drivers/media/video/sh_mobile_csi2.c
+++ b/drivers/media/video/sh_mobile_csi2.c
@@ -143,7 +143,7 @@ static int sh_csi2_s_mbus_config(struct v4l2_subdev *sd,
const struct v4l2_mbus_config *cfg)
{
struct sh_csi2 *priv = container_of(sd, struct sh_csi2, subdev);
- struct soc_camera_device *icd = (struct soc_camera_device *)sd->grp_id;
+ struct soc_camera_device *icd = v4l2_get_subdev_hostdata(sd);
struct v4l2_subdev *client_sd = soc_camera_to_subdev(icd);
struct v4l2_mbus_config client_cfg = {.type = V4L2_MBUS_CSI2,
.flags = priv->mipi_flags};
@@ -202,7 +202,7 @@ static void sh_csi2_hwinit(struct sh_csi2 *priv)
static int sh_csi2_client_connect(struct sh_csi2 *priv)
{
struct sh_csi2_pdata *pdata = priv->pdev->dev.platform_data;
- struct soc_camera_device *icd = (struct soc_camera_device *)priv->subdev.grp_id;
+ struct soc_camera_device *icd = v4l2_get_subdev_hostdata(&priv->subdev);
struct v4l2_subdev *client_sd = soc_camera_to_subdev(icd);
struct device *dev = v4l2_get_subdevdata(&priv->subdev);
struct v4l2_mbus_config cfg;
diff --git a/drivers/media/video/soc_camera.c b/drivers/media/video/soc_camera.c
index b72580c38957..62e4312515cb 100644
--- a/drivers/media/video/soc_camera.c
+++ b/drivers/media/video/soc_camera.c
@@ -1103,7 +1103,8 @@ static int soc_camera_probe(struct soc_camera_device *icd)
}
sd = soc_camera_to_subdev(icd);
- sd->grp_id = (long)icd;
+ sd->grp_id = soc_camera_grp_id(icd);
+ v4l2_set_subdev_hostdata(sd, icd);
if (v4l2_ctrl_add_handler(&icd->ctrl_handler, sd->ctrl_handler))
goto ectrl;
diff --git a/drivers/mfd/ab5500-debugfs.c b/drivers/mfd/ab5500-debugfs.c
index 43c0ebb81956..b7b2d3483fd4 100644
--- a/drivers/mfd/ab5500-debugfs.c
+++ b/drivers/mfd/ab5500-debugfs.c
@@ -4,7 +4,7 @@
* Debugfs support for the AB5500 MFD driver
*/
-#include <linux/export.h>
+#include <linux/module.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/mfd/ab5500/ab5500.h>
diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c
index 1e9173804ede..d3d572b2317b 100644
--- a/drivers/mfd/ab8500-core.c
+++ b/drivers/mfd/ab8500-core.c
@@ -620,6 +620,7 @@ static struct resource __devinitdata ab8500_fg_resources[] = {
static struct resource __devinitdata ab8500_chargalg_resources[] = {};
+#ifdef CONFIG_DEBUG_FS
static struct resource __devinitdata ab8500_debug_resources[] = {
{
.name = "IRQ_FIRST",
@@ -634,6 +635,7 @@ static struct resource __devinitdata ab8500_debug_resources[] = {
.flags = IORESOURCE_IRQ,
},
};
+#endif
static struct resource __devinitdata ab8500_usb_resources[] = {
{
diff --git a/drivers/mfd/adp5520.c b/drivers/mfd/adp5520.c
index f1d88483112c..8d816cce8322 100644
--- a/drivers/mfd/adp5520.c
+++ b/drivers/mfd/adp5520.c
@@ -109,7 +109,7 @@ int adp5520_set_bits(struct device *dev, int reg, uint8_t bit_mask)
ret = __adp5520_read(chip->client, reg, &reg_val);
- if (!ret && ((reg_val & bit_mask) == 0)) {
+ if (!ret && ((reg_val & bit_mask) != bit_mask)) {
reg_val |= bit_mask;
ret = __adp5520_write(chip->client, reg, reg_val);
}
diff --git a/drivers/mfd/da903x.c b/drivers/mfd/da903x.c
index 1b79c37fd599..1924b857a0fb 100644
--- a/drivers/mfd/da903x.c
+++ b/drivers/mfd/da903x.c
@@ -182,7 +182,7 @@ int da903x_set_bits(struct device *dev, int reg, uint8_t bit_mask)
if (ret)
goto out;
- if ((reg_val & bit_mask) == 0) {
+ if ((reg_val & bit_mask) != bit_mask) {
reg_val |= bit_mask;
ret = __da903x_write(chip->client, reg, reg_val);
}
@@ -549,6 +549,7 @@ static int __devexit da903x_remove(struct i2c_client *client)
struct da903x_chip *chip = i2c_get_clientdata(client);
da903x_remove_subdevs(chip);
+ free_irq(client->irq, chip);
kfree(chip);
return 0;
}
diff --git a/drivers/mfd/jz4740-adc.c b/drivers/mfd/jz4740-adc.c
index 1e9ee533eacb..ef39528088f2 100644
--- a/drivers/mfd/jz4740-adc.c
+++ b/drivers/mfd/jz4740-adc.c
@@ -16,6 +16,7 @@
*/
#include <linux/err.h>
+#include <linux/io.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
diff --git a/drivers/mfd/tps6586x.c b/drivers/mfd/tps6586x.c
index bba26d96c240..a5ddf31b60ca 100644
--- a/drivers/mfd/tps6586x.c
+++ b/drivers/mfd/tps6586x.c
@@ -197,7 +197,7 @@ int tps6586x_set_bits(struct device *dev, int reg, uint8_t bit_mask)
if (ret)
goto out;
- if ((reg_val & bit_mask) == 0) {
+ if ((reg_val & bit_mask) != bit_mask) {
reg_val |= bit_mask;
ret = __tps6586x_write(to_i2c_client(dev), reg, reg_val);
}
diff --git a/drivers/mfd/tps65910.c b/drivers/mfd/tps65910.c
index 6f5b8cf2f652..c1da84bc1573 100644
--- a/drivers/mfd/tps65910.c
+++ b/drivers/mfd/tps65910.c
@@ -120,7 +120,7 @@ int tps65910_clear_bits(struct tps65910 *tps65910, u8 reg, u8 mask)
goto out;
}
- data &= mask;
+ data &= ~mask;
err = tps65910_i2c_write(tps65910, reg, 1, &data);
if (err)
dev_err(tps65910->dev, "write to reg %x failed\n", reg);
diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c
index bfbd66021afd..61e70cfaa774 100644
--- a/drivers/mfd/twl-core.c
+++ b/drivers/mfd/twl-core.c
@@ -363,13 +363,13 @@ int twl_i2c_write(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes)
pr_err("%s: invalid module number %d\n", DRIVER_NAME, mod_no);
return -EPERM;
}
- sid = twl_map[mod_no].sid;
- twl = &twl_modules[sid];
-
if (unlikely(!inuse)) {
- pr_err("%s: client %d is not initialized\n", DRIVER_NAME, sid);
+ pr_err("%s: not initialized\n", DRIVER_NAME);
return -EPERM;
}
+ sid = twl_map[mod_no].sid;
+ twl = &twl_modules[sid];
+
mutex_lock(&twl->xfer_lock);
/*
* [MSG1]: fill the register address data
@@ -420,13 +420,13 @@ int twl_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes)
pr_err("%s: invalid module number %d\n", DRIVER_NAME, mod_no);
return -EPERM;
}
- sid = twl_map[mod_no].sid;
- twl = &twl_modules[sid];
-
if (unlikely(!inuse)) {
- pr_err("%s: client %d is not initialized\n", DRIVER_NAME, sid);
+ pr_err("%s: not initialized\n", DRIVER_NAME);
return -EPERM;
}
+ sid = twl_map[mod_no].sid;
+ twl = &twl_modules[sid];
+
mutex_lock(&twl->xfer_lock);
/* [MSG1] fill the register address data */
msg = &twl->xfer_msg[0];
diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
index f062c8cc6c38..29f11e0765fe 100644
--- a/drivers/mfd/twl4030-irq.c
+++ b/drivers/mfd/twl4030-irq.c
@@ -432,6 +432,7 @@ struct sih_agent {
u32 edge_change;
struct mutex irq_lock;
+ char *irq_name;
};
/*----------------------------------------------------------------------*/
@@ -589,7 +590,7 @@ static inline int sih_read_isr(const struct sih *sih)
* Generic handler for SIH interrupts ... we "know" this is called
* in task context, with IRQs enabled.
*/
-static void handle_twl4030_sih(unsigned irq, struct irq_desc *desc)
+static irqreturn_t handle_twl4030_sih(int irq, void *data)
{
struct sih_agent *agent = irq_get_handler_data(irq);
const struct sih *sih = agent->sih;
@@ -602,7 +603,7 @@ static void handle_twl4030_sih(unsigned irq, struct irq_desc *desc)
pr_err("twl4030: %s SIH, read ISR error %d\n",
sih->name, isr);
/* REVISIT: recover; eventually mask it all, etc */
- return;
+ return IRQ_HANDLED;
}
while (isr) {
@@ -616,6 +617,7 @@ static void handle_twl4030_sih(unsigned irq, struct irq_desc *desc)
pr_err("twl4030: %s SIH, invalid ISR bit %d\n",
sih->name, irq);
}
+ return IRQ_HANDLED;
}
static unsigned twl4030_irq_next;
@@ -668,18 +670,19 @@ int twl4030_sih_setup(int module)
activate_irq(irq);
}
- status = irq_base;
twl4030_irq_next += i;
/* replace generic PIH handler (handle_simple_irq) */
irq = sih_mod + twl4030_irq_base;
irq_set_handler_data(irq, agent);
- irq_set_chained_handler(irq, handle_twl4030_sih);
+ agent->irq_name = kasprintf(GFP_KERNEL, "twl4030_%s", sih->name);
+ status = request_threaded_irq(irq, NULL, handle_twl4030_sih, 0,
+ agent->irq_name ?: sih->name, NULL);
pr_info("twl4030: %s (irq %d) chaining IRQs %d..%d\n", sih->name,
irq, irq_base, twl4030_irq_next - 1);
- return status;
+ return status < 0 ? status : irq_base;
}
/* FIXME need a call to reverse twl4030_sih_setup() ... */
@@ -733,8 +736,9 @@ int twl4030_init_irq(int irq_num, unsigned irq_base, unsigned irq_end)
}
/* install an irq handler to demultiplex the TWL4030 interrupt */
- status = request_threaded_irq(irq_num, NULL, handle_twl4030_pih, 0,
- "TWL4030-PIH", NULL);
+ status = request_threaded_irq(irq_num, NULL, handle_twl4030_pih,
+ IRQF_ONESHOT,
+ "TWL4030-PIH", NULL);
if (status < 0) {
pr_err("twl4030: could not claim irq%d: %d\n", irq_num, status);
goto fail_rqirq;
diff --git a/drivers/mfd/wm8994-core.c b/drivers/mfd/wm8994-core.c
index 5d6ba132837e..61894fced8ea 100644
--- a/drivers/mfd/wm8994-core.c
+++ b/drivers/mfd/wm8994-core.c
@@ -239,6 +239,7 @@ static int wm8994_suspend(struct device *dev)
switch (wm8994->type) {
case WM8958:
+ case WM1811:
ret = wm8994_reg_read(wm8994, WM8958_MIC_DETECT_1);
if (ret < 0) {
dev_err(dev, "Failed to read power status: %d\n", ret);
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index e8a5eb38748b..d31c78b72b0f 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -302,17 +302,6 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
host->max_blk_size = 512;
host->max_blk_count = PAGE_CACHE_SIZE / 512;
- /*
- * Enable runtime power management by default. This flag was added due
- * to runtime power management causing disruption for some users, but
- * the power on/off code has been improved since then.
- *
- * We'll enable this flag by default as an experiment, and if no
- * problems are reported, we will follow up later and remove the flag
- * altogether.
- */
- host->caps = MMC_CAP_POWER_OFF_CARD;
-
return host;
free:
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 50b5f9926f64..0726e59fd418 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -675,7 +675,8 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
unsigned int status)
{
/* First check for errors */
- if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
+ if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_STARTBITERR|
+ MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
u32 remain, success;
/* Terminate the DMA transfer */
@@ -754,8 +755,12 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
}
if (!cmd->data || cmd->error) {
- if (host->data)
+ if (host->data) {
+ /* Terminate the DMA transfer */
+ if (dma_inprogress(host))
+ mmci_dma_data_error(host);
mmci_stop_data(host);
+ }
mmci_request_end(host, cmd->mrq);
} else if (!(cmd->data->flags & MMC_DATA_READ)) {
mmci_start_data(host, cmd->data);
@@ -955,8 +960,9 @@ static irqreturn_t mmci_irq(int irq, void *dev_id)
dev_dbg(mmc_dev(host->mmc), "irq0 (data+cmd) %08x\n", status);
data = host->data;
- if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|
- MCI_RXOVERRUN|MCI_DATAEND|MCI_DATABLOCKEND) && data)
+ if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_STARTBITERR|
+ MCI_TXUNDERRUN|MCI_RXOVERRUN|MCI_DATAEND|
+ MCI_DATABLOCKEND) && data)
mmci_data_irq(host, data, status);
cmd = host->cmd;
diff --git a/drivers/mmc/host/sdhci-cns3xxx.c b/drivers/mmc/host/sdhci-cns3xxx.c
index 87b6f079b6e0..b4257e700617 100644
--- a/drivers/mmc/host/sdhci-cns3xxx.c
+++ b/drivers/mmc/host/sdhci-cns3xxx.c
@@ -109,13 +109,10 @@ static struct platform_driver sdhci_cns3xxx_driver = {
.driver = {
.name = "sdhci-cns3xxx",
.owner = THIS_MODULE,
+ .pm = SDHCI_PLTFM_PMOPS,
},
.probe = sdhci_cns3xxx_probe,
.remove = __devexit_p(sdhci_cns3xxx_remove),
-#ifdef CONFIG_PM
- .suspend = sdhci_pltfm_suspend,
- .resume = sdhci_pltfm_resume,
-#endif
};
static int __init sdhci_cns3xxx_init(void)
diff --git a/drivers/mmc/host/sdhci-dove.c b/drivers/mmc/host/sdhci-dove.c
index f2d29dca4420..a81312c91f70 100644
--- a/drivers/mmc/host/sdhci-dove.c
+++ b/drivers/mmc/host/sdhci-dove.c
@@ -82,13 +82,10 @@ static struct platform_driver sdhci_dove_driver = {
.driver = {
.name = "sdhci-dove",
.owner = THIS_MODULE,
+ .pm = SDHCI_PLTFM_PMOPS,
},
.probe = sdhci_dove_probe,
.remove = __devexit_p(sdhci_dove_remove),
-#ifdef CONFIG_PM
- .suspend = sdhci_pltfm_suspend,
- .resume = sdhci_pltfm_resume,
-#endif
};
static int __init sdhci_dove_init(void)
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index 4b976f00ea85..38ebc4ea259f 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -599,14 +599,11 @@ static struct platform_driver sdhci_esdhc_imx_driver = {
.name = "sdhci-esdhc-imx",
.owner = THIS_MODULE,
.of_match_table = imx_esdhc_dt_ids,
+ .pm = SDHCI_PLTFM_PMOPS,
},
.id_table = imx_esdhc_devtype,
.probe = sdhci_esdhc_imx_probe,
.remove = __devexit_p(sdhci_esdhc_imx_remove),
-#ifdef CONFIG_PM
- .suspend = sdhci_pltfm_suspend,
- .resume = sdhci_pltfm_resume,
-#endif
};
static int __init sdhci_esdhc_imx_init(void)
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index 59e9d003e589..01e5f627e0f0 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -125,13 +125,10 @@ static struct platform_driver sdhci_esdhc_driver = {
.name = "sdhci-esdhc",
.owner = THIS_MODULE,
.of_match_table = sdhci_esdhc_of_match,
+ .pm = SDHCI_PLTFM_PMOPS,
},
.probe = sdhci_esdhc_probe,
.remove = __devexit_p(sdhci_esdhc_remove),
-#ifdef CONFIG_PM
- .suspend = sdhci_pltfm_suspend,
- .resume = sdhci_pltfm_resume,
-#endif
};
static int __init sdhci_esdhc_init(void)
diff --git a/drivers/mmc/host/sdhci-of-hlwd.c b/drivers/mmc/host/sdhci-of-hlwd.c
index 9b0d794a4f69..3619adc7d9fc 100644
--- a/drivers/mmc/host/sdhci-of-hlwd.c
+++ b/drivers/mmc/host/sdhci-of-hlwd.c
@@ -87,13 +87,10 @@ static struct platform_driver sdhci_hlwd_driver = {
.name = "sdhci-hlwd",
.owner = THIS_MODULE,
.of_match_table = sdhci_hlwd_of_match,
+ .pm = SDHCI_PLTFM_PMOPS,
},
.probe = sdhci_hlwd_probe,
.remove = __devexit_p(sdhci_hlwd_remove),
-#ifdef CONFIG_PM
- .suspend = sdhci_pltfm_suspend,
- .resume = sdhci_pltfm_resume,
-#endif
};
static int __init sdhci_hlwd_init(void)
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
index d833d9c2f7e3..6878a94626bc 100644
--- a/drivers/mmc/host/sdhci-pci.c
+++ b/drivers/mmc/host/sdhci-pci.c
@@ -54,8 +54,7 @@ struct sdhci_pci_fixes {
int (*probe_slot) (struct sdhci_pci_slot *);
void (*remove_slot) (struct sdhci_pci_slot *, int);
- int (*suspend) (struct sdhci_pci_chip *,
- pm_message_t);
+ int (*suspend) (struct sdhci_pci_chip *);
int (*resume) (struct sdhci_pci_chip *);
};
@@ -549,7 +548,7 @@ static void jmicron_remove_slot(struct sdhci_pci_slot *slot, int dead)
jmicron_enable_mmc(slot->host, 0);
}
-static int jmicron_suspend(struct sdhci_pci_chip *chip, pm_message_t state)
+static int jmicron_suspend(struct sdhci_pci_chip *chip)
{
int i;
@@ -993,8 +992,9 @@ static struct sdhci_ops sdhci_pci_ops = {
#ifdef CONFIG_PM
-static int sdhci_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+static int sdhci_pci_suspend(struct device *dev)
{
+ struct pci_dev *pdev = to_pci_dev(dev);
struct sdhci_pci_chip *chip;
struct sdhci_pci_slot *slot;
mmc_pm_flag_t slot_pm_flags;
@@ -1010,7 +1010,7 @@ static int sdhci_pci_suspend(struct pci_dev *pdev, pm_message_t state)
if (!slot)
continue;
- ret = sdhci_suspend_host(slot->host, state);
+ ret = sdhci_suspend_host(slot->host);
if (ret) {
for (i--; i >= 0; i--)
@@ -1026,7 +1026,7 @@ static int sdhci_pci_suspend(struct pci_dev *pdev, pm_message_t state)
}
if (chip->fixes && chip->fixes->suspend) {
- ret = chip->fixes->suspend(chip, state);
+ ret = chip->fixes->suspend(chip);
if (ret) {
for (i = chip->num_slots - 1; i >= 0; i--)
sdhci_resume_host(chip->slots[i]->host);
@@ -1042,16 +1042,17 @@ static int sdhci_pci_suspend(struct pci_dev *pdev, pm_message_t state)
}
pci_set_power_state(pdev, PCI_D3hot);
} else {
- pci_enable_wake(pdev, pci_choose_state(pdev, state), 0);
+ pci_enable_wake(pdev, PCI_D3hot, 0);
pci_disable_device(pdev);
- pci_set_power_state(pdev, pci_choose_state(pdev, state));
+ pci_set_power_state(pdev, PCI_D3hot);
}
return 0;
}
-static int sdhci_pci_resume(struct pci_dev *pdev)
+static int sdhci_pci_resume(struct device *dev)
{
+ struct pci_dev *pdev = to_pci_dev(dev);
struct sdhci_pci_chip *chip;
struct sdhci_pci_slot *slot;
int i, ret;
@@ -1099,7 +1100,6 @@ static int sdhci_pci_runtime_suspend(struct device *dev)
struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
struct sdhci_pci_chip *chip;
struct sdhci_pci_slot *slot;
- pm_message_t state = { .event = PM_EVENT_SUSPEND };
int i, ret;
chip = pci_get_drvdata(pdev);
@@ -1121,7 +1121,7 @@ static int sdhci_pci_runtime_suspend(struct device *dev)
}
if (chip->fixes && chip->fixes->suspend) {
- ret = chip->fixes->suspend(chip, state);
+ ret = chip->fixes->suspend(chip);
if (ret) {
for (i = chip->num_slots - 1; i >= 0; i--)
sdhci_runtime_resume_host(chip->slots[i]->host);
@@ -1176,6 +1176,8 @@ static int sdhci_pci_runtime_idle(struct device *dev)
#endif
static const struct dev_pm_ops sdhci_pci_pm_ops = {
+ .suspend = sdhci_pci_suspend,
+ .resume = sdhci_pci_resume,
.runtime_suspend = sdhci_pci_runtime_suspend,
.runtime_resume = sdhci_pci_runtime_resume,
.runtime_idle = sdhci_pci_runtime_idle,
@@ -1428,8 +1430,6 @@ static struct pci_driver sdhci_driver = {
.id_table = pci_ids,
.probe = sdhci_pci_probe,
.remove = __devexit_p(sdhci_pci_remove),
- .suspend = sdhci_pci_suspend,
- .resume = sdhci_pci_resume,
.driver = {
.pm = &sdhci_pci_pm_ops
},
diff --git a/drivers/mmc/host/sdhci-pltfm.c b/drivers/mmc/host/sdhci-pltfm.c
index a9e12ea05583..03970bcb3495 100644
--- a/drivers/mmc/host/sdhci-pltfm.c
+++ b/drivers/mmc/host/sdhci-pltfm.c
@@ -194,21 +194,25 @@ int sdhci_pltfm_unregister(struct platform_device *pdev)
EXPORT_SYMBOL_GPL(sdhci_pltfm_unregister);
#ifdef CONFIG_PM
-int sdhci_pltfm_suspend(struct platform_device *dev, pm_message_t state)
+static int sdhci_pltfm_suspend(struct device *dev)
{
- struct sdhci_host *host = platform_get_drvdata(dev);
+ struct sdhci_host *host = dev_get_drvdata(dev);
- return sdhci_suspend_host(host, state);
+ return sdhci_suspend_host(host);
}
-EXPORT_SYMBOL_GPL(sdhci_pltfm_suspend);
-int sdhci_pltfm_resume(struct platform_device *dev)
+static int sdhci_pltfm_resume(struct device *dev)
{
- struct sdhci_host *host = platform_get_drvdata(dev);
+ struct sdhci_host *host = dev_get_drvdata(dev);
return sdhci_resume_host(host);
}
-EXPORT_SYMBOL_GPL(sdhci_pltfm_resume);
+
+const struct dev_pm_ops sdhci_pltfm_pmops = {
+ .suspend = sdhci_pltfm_suspend,
+ .resume = sdhci_pltfm_resume,
+};
+EXPORT_SYMBOL_GPL(sdhci_pltfm_pmops);
#endif /* CONFIG_PM */
static int __init sdhci_pltfm_drv_init(void)
diff --git a/drivers/mmc/host/sdhci-pltfm.h b/drivers/mmc/host/sdhci-pltfm.h
index 3a9fc3f40840..37e0e184a0bb 100644
--- a/drivers/mmc/host/sdhci-pltfm.h
+++ b/drivers/mmc/host/sdhci-pltfm.h
@@ -99,8 +99,10 @@ extern int sdhci_pltfm_register(struct platform_device *pdev,
extern int sdhci_pltfm_unregister(struct platform_device *pdev);
#ifdef CONFIG_PM
-extern int sdhci_pltfm_suspend(struct platform_device *dev, pm_message_t state);
-extern int sdhci_pltfm_resume(struct platform_device *dev);
+extern const struct dev_pm_ops sdhci_pltfm_pmops;
+#define SDHCI_PLTFM_PMOPS (&sdhci_pltfm_pmops)
+#else
+#define SDHCI_PLTFM_PMOPS NULL
#endif
#endif /* _DRIVERS_MMC_SDHCI_PLTFM_H */
diff --git a/drivers/mmc/host/sdhci-pxav2.c b/drivers/mmc/host/sdhci-pxav2.c
index d4bf6d30c7ba..7a039c3cb1f1 100644
--- a/drivers/mmc/host/sdhci-pxav2.c
+++ b/drivers/mmc/host/sdhci-pxav2.c
@@ -218,13 +218,10 @@ static struct platform_driver sdhci_pxav2_driver = {
.driver = {
.name = "sdhci-pxav2",
.owner = THIS_MODULE,
+ .pm = SDHCI_PLTFM_PMOPS,
},
.probe = sdhci_pxav2_probe,
.remove = __devexit_p(sdhci_pxav2_remove),
-#ifdef CONFIG_PM
- .suspend = sdhci_pltfm_suspend,
- .resume = sdhci_pltfm_resume,
-#endif
};
static int __init sdhci_pxav2_init(void)
{
diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c
index cff4ad3e7a59..15673a7ee6a5 100644
--- a/drivers/mmc/host/sdhci-pxav3.c
+++ b/drivers/mmc/host/sdhci-pxav3.c
@@ -264,13 +264,10 @@ static struct platform_driver sdhci_pxav3_driver = {
.driver = {
.name = "sdhci-pxav3",
.owner = THIS_MODULE,
+ .pm = SDHCI_PLTFM_PMOPS,
},
.probe = sdhci_pxav3_probe,
.remove = __devexit_p(sdhci_pxav3_remove),
-#ifdef CONFIG_PM
- .suspend = sdhci_pltfm_suspend,
- .resume = sdhci_pltfm_resume,
-#endif
};
static int __init sdhci_pxav3_init(void)
{
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index cb60c4197e0a..0d33ff0d67fb 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -622,23 +622,29 @@ static int __devexit sdhci_s3c_remove(struct platform_device *pdev)
#ifdef CONFIG_PM
-static int sdhci_s3c_suspend(struct platform_device *dev, pm_message_t pm)
+static int sdhci_s3c_suspend(struct device *dev)
{
- struct sdhci_host *host = platform_get_drvdata(dev);
+ struct sdhci_host *host = dev_get_drvdata(dev);
- return sdhci_suspend_host(host, pm);
+ return sdhci_suspend_host(host);
}
-static int sdhci_s3c_resume(struct platform_device *dev)
+static int sdhci_s3c_resume(struct device *dev)
{
- struct sdhci_host *host = platform_get_drvdata(dev);
+ struct sdhci_host *host = dev_get_drvdata(dev);
return sdhci_resume_host(host);
}
+static const struct dev_pm_ops sdhci_s3c_pmops = {
+ .suspend = sdhci_s3c_suspend,
+ .resume = sdhci_s3c_resume,
+};
+
+#define SDHCI_S3C_PMOPS (&sdhci_s3c_pmops)
+
#else
-#define sdhci_s3c_suspend NULL
-#define sdhci_s3c_resume NULL
+#define SDHCI_S3C_PMOPS NULL
#endif
static struct platform_driver sdhci_s3c_driver = {
@@ -647,6 +653,7 @@ static struct platform_driver sdhci_s3c_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "s3c-sdhci",
+ .pm = SDHCI_S3C_PMOPS,
},
};
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index 89699e861fc1..e2e18d3f949c 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -318,13 +318,10 @@ static struct platform_driver sdhci_tegra_driver = {
.name = "sdhci-tegra",
.owner = THIS_MODULE,
.of_match_table = sdhci_tegra_dt_match,
+ .pm = SDHCI_PLTFM_PMOPS,
},
.probe = sdhci_tegra_probe,
.remove = __devexit_p(sdhci_tegra_remove),
-#ifdef CONFIG_PM
- .suspend = sdhci_pltfm_suspend,
- .resume = sdhci_pltfm_resume,
-#endif
};
static int __init sdhci_tegra_init(void)
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 6d8eea323541..19ed580f2cab 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -2327,7 +2327,7 @@ out:
#ifdef CONFIG_PM
-int sdhci_suspend_host(struct sdhci_host *host, pm_message_t state)
+int sdhci_suspend_host(struct sdhci_host *host)
{
int ret;
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 0a5b65460d8a..a04d4d0c6fd2 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -374,7 +374,7 @@ extern int sdhci_add_host(struct sdhci_host *host);
extern void sdhci_remove_host(struct sdhci_host *host, int dead);
#ifdef CONFIG_PM
-extern int sdhci_suspend_host(struct sdhci_host *host, pm_message_t state);
+extern int sdhci_suspend_host(struct sdhci_host *host);
extern int sdhci_resume_host(struct sdhci_host *host);
extern void sdhci_enable_irq_wakeups(struct sdhci_host *host);
#endif
diff --git a/drivers/mmc/host/vub300.c b/drivers/mmc/host/vub300.c
index e8f6e65183d7..2ec978bc32ba 100644
--- a/drivers/mmc/host/vub300.c
+++ b/drivers/mmc/host/vub300.c
@@ -259,7 +259,7 @@ static int firmware_rom_wait_states = 0x04;
static int firmware_rom_wait_states = 0x1C;
#endif
-module_param(firmware_rom_wait_states, bool, 0644);
+module_param(firmware_rom_wait_states, int, 0644);
MODULE_PARM_DESC(firmware_rom_wait_states,
"ROM wait states byte=RRRIIEEE (Reserved Internal External)");
diff --git a/drivers/mtd/maps/plat-ram.c b/drivers/mtd/maps/plat-ram.c
index 94f553489725..45876d0e5b8e 100644
--- a/drivers/mtd/maps/plat-ram.c
+++ b/drivers/mtd/maps/plat-ram.c
@@ -227,10 +227,14 @@ static int platram_probe(struct platform_device *pdev)
if (!err)
dev_info(&pdev->dev, "registered mtd device\n");
- /* add the whole device. */
- err = mtd_device_register(info->mtd, NULL, 0);
- if (err)
- dev_err(&pdev->dev, "failed to register the entire device\n");
+ if (pdata->nr_partitions) {
+ /* add the whole device. */
+ err = mtd_device_register(info->mtd, NULL, 0);
+ if (err) {
+ dev_err(&pdev->dev,
+ "failed to register the entire device\n");
+ }
+ }
return err;
diff --git a/drivers/mtd/maps/pxa2xx-flash.c b/drivers/mtd/maps/pxa2xx-flash.c
index 411a17df9fc1..2a25b6789af4 100644
--- a/drivers/mtd/maps/pxa2xx-flash.c
+++ b/drivers/mtd/maps/pxa2xx-flash.c
@@ -98,7 +98,7 @@ static int __devinit pxa2xx_flash_probe(struct platform_device *pdev)
}
info->mtd->owner = THIS_MODULE;
- mtd_device_parse_register(info->mtd, probes, 0, NULL, 0);
+ mtd_device_parse_register(info->mtd, probes, 0, flash->parts, flash->nr_parts);
platform_set_drvdata(pdev, info);
return 0;
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
index 071b63420f0e..493ec2fcf97f 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
@@ -21,9 +21,9 @@
#include <linux/clk.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
+#include <linux/module.h>
#include <linux/mtd/gpmi-nand.h>
#include <linux/mtd/partitions.h>
-
#include "gpmi-nand.h"
/* add our owner bbt descriptor */
diff --git a/drivers/mtd/nand/ndfc.c b/drivers/mtd/nand/ndfc.c
index ee1713907b92..f8aacf48ecdd 100644
--- a/drivers/mtd/nand/ndfc.c
+++ b/drivers/mtd/nand/ndfc.c
@@ -188,7 +188,7 @@ static int ndfc_chip_init(struct ndfc_controller *ndfc,
if (!flash_np)
return -ENODEV;
- ppdata->of_node = flash_np;
+ ppdata.of_node = flash_np;
ndfc->mtd.name = kasprintf(GFP_KERNEL, "%s.%s",
dev_name(&ndfc->ofdev->dev), flash_np->name);
if (!ndfc->mtd.name) {
diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig
index 5272f9d4dda9..9de37642f09f 100644
--- a/drivers/net/ethernet/freescale/Kconfig
+++ b/drivers/net/ethernet/freescale/Kconfig
@@ -23,8 +23,8 @@ if NET_VENDOR_FREESCALE
config FEC
bool "FEC ethernet controller (of ColdFire and some i.MX CPUs)"
depends on (M523x || M527x || M5272 || M528x || M520x || M532x || \
- ARCH_MXC || ARCH_MXS)
- default ARCH_MXC || ARCH_MXS if ARM
+ ARCH_MXC || SOC_IMX28)
+ default ARCH_MXC || SOC_IMX28 if ARM
select PHYLIB
---help---
Say Y here if you want to use the built-in 10/100 Fast ethernet
diff --git a/drivers/net/ethernet/freescale/fec.c b/drivers/net/ethernet/freescale/fec.c
index 1124ce0a1594..c136230d50bb 100644
--- a/drivers/net/ethernet/freescale/fec.c
+++ b/drivers/net/ethernet/freescale/fec.c
@@ -232,6 +232,7 @@ struct fec_enet_private {
struct platform_device *pdev;
int opened;
+ int dev_id;
/* Phylib and MDIO interface */
struct mii_bus *mii_bus;
@@ -837,7 +838,7 @@ static void __inline__ fec_get_mac(struct net_device *ndev)
/* Adjust MAC if using macaddr */
if (iap == macaddr)
- ndev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->pdev->id;
+ ndev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->dev_id;
}
/* ------------------------------------------------------------------------- */
@@ -953,7 +954,7 @@ static int fec_enet_mii_probe(struct net_device *ndev)
char mdio_bus_id[MII_BUS_ID_SIZE];
char phy_name[MII_BUS_ID_SIZE + 3];
int phy_id;
- int dev_id = fep->pdev->id;
+ int dev_id = fep->dev_id;
fep->phy_dev = NULL;
@@ -1031,7 +1032,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
* mdio interface in board design, and need to be configured by
* fec0 mii_bus.
*/
- if ((id_entry->driver_data & FEC_QUIRK_ENET_MAC) && pdev->id > 0) {
+ if ((id_entry->driver_data & FEC_QUIRK_ENET_MAC) && fep->dev_id > 0) {
/* fec1 uses fec0 mii_bus */
fep->mii_bus = fec0_mii_bus;
return 0;
@@ -1063,7 +1064,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
fep->mii_bus->read = fec_enet_mdio_read;
fep->mii_bus->write = fec_enet_mdio_write;
fep->mii_bus->reset = fec_enet_mdio_reset;
- snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%x", pdev->id + 1);
+ snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%x", fep->dev_id + 1);
fep->mii_bus->priv = fep;
fep->mii_bus->parent = &pdev->dev;
@@ -1521,6 +1522,7 @@ fec_probe(struct platform_device *pdev)
int i, irq, ret = 0;
struct resource *r;
const struct of_device_id *of_id;
+ static int dev_id;
of_id = of_match_device(fec_dt_ids, &pdev->dev);
if (of_id)
@@ -1548,6 +1550,7 @@ fec_probe(struct platform_device *pdev)
fep->hwp = ioremap(r->start, resource_size(r));
fep->pdev = pdev;
+ fep->dev_id = dev_id++;
if (!fep->hwp) {
ret = -ENOMEM;
diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
index 52f4e8ad48e7..4d9f84b8ab97 100644
--- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c
+++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
@@ -183,28 +183,10 @@ void fsl_pq_mdio_bus_name(char *name, struct device_node *np)
}
EXPORT_SYMBOL_GPL(fsl_pq_mdio_bus_name);
-/* Scan the bus in reverse, looking for an empty spot */
-static int fsl_pq_mdio_find_free(struct mii_bus *new_bus)
-{
- int i;
-
- for (i = PHY_MAX_ADDR; i > 0; i--) {
- u32 phy_id;
-
- if (get_phy_id(new_bus, i, &phy_id))
- return -1;
-
- if (phy_id == 0xffffffff)
- break;
- }
-
- return i;
-}
-
-#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
static u32 __iomem *get_gfar_tbipa(struct fsl_pq_mdio __iomem *regs, struct device_node *np)
{
+#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
struct gfar __iomem *enet_regs;
/*
@@ -220,15 +202,15 @@ static u32 __iomem *get_gfar_tbipa(struct fsl_pq_mdio __iomem *regs, struct devi
} else if (of_device_is_compatible(np, "fsl,etsec2-mdio") ||
of_device_is_compatible(np, "fsl,etsec2-tbi")) {
return of_iomap(np, 1);
- } else
- return NULL;
-}
+ }
#endif
+ return NULL;
+}
-#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE)
static int get_ucc_id_for_range(u64 start, u64 end, u32 *ucc_id)
{
+#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE)
struct device_node *np = NULL;
int err = 0;
@@ -261,9 +243,10 @@ static int get_ucc_id_for_range(u64 start, u64 end, u32 *ucc_id)
return err;
else
return -EINVAL;
-}
+#else
+ return -ENODEV;
#endif
-
+}
static int fsl_pq_mdio_probe(struct platform_device *ofdev)
{
@@ -339,19 +322,13 @@ static int fsl_pq_mdio_probe(struct platform_device *ofdev)
of_device_is_compatible(np, "fsl,etsec2-mdio") ||
of_device_is_compatible(np, "fsl,etsec2-tbi") ||
of_device_is_compatible(np, "gianfar")) {
-#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
tbipa = get_gfar_tbipa(regs, np);
if (!tbipa) {
err = -EINVAL;
goto err_free_irqs;
}
-#else
- err = -ENODEV;
- goto err_free_irqs;
-#endif
} else if (of_device_is_compatible(np, "fsl,ucc-mdio") ||
of_device_is_compatible(np, "ucc_geth_phy")) {
-#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE)
u32 id;
static u32 mii_mng_master;
@@ -364,10 +341,6 @@ static int fsl_pq_mdio_probe(struct platform_device *ofdev)
mii_mng_master = id;
ucc_set_qe_mux_mii_mng(id - 1);
}
-#else
- err = -ENODEV;
- goto err_free_irqs;
-#endif
} else {
err = -ENODEV;
goto err_free_irqs;
@@ -386,16 +359,6 @@ static int fsl_pq_mdio_probe(struct platform_device *ofdev)
}
if (tbiaddr == -1) {
- out_be32(tbipa, 0);
-
- tbiaddr = fsl_pq_mdio_find_free(new_bus);
- }
-
- /*
- * We define TBIPA at 0 to be illegal, opting to fail for boards that
- * have PHYs at 1-31, rather than change tbipa and rescan.
- */
- if (tbiaddr == 0) {
err = -EBUSY;
goto err_free_irqs;
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index c7b60839ac99..dea0cb4400e2 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -2606,6 +2606,9 @@ static int skge_up(struct net_device *dev)
spin_unlock_irq(&hw->hw_lock);
napi_enable(&skge->napi);
+
+ skge_set_multicast(dev);
+
return 0;
free_tx_ring:
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index 227997d775e8..5829e0b47e7e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -147,6 +147,7 @@ void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
if (priv->mdev->dev->caps.comp_pool && cq->vector)
mlx4_release_eq(priv->mdev->dev, cq->vector);
+ cq->vector = 0;
cq->buf_size = 0;
cq->buf = NULL;
}
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 67bf07819992..c8f47f17186f 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -477,7 +477,6 @@ enum rtl_register_content {
/* Config1 register p.24 */
LEDS1 = (1 << 7),
LEDS0 = (1 << 6),
- MSIEnable = (1 << 5), /* Enable Message Signaled Interrupt */
Speed_down = (1 << 4),
MEMMAP = (1 << 3),
IOMAP = (1 << 2),
@@ -485,6 +484,7 @@ enum rtl_register_content {
PMEnable = (1 << 0), /* Power Management Enable */
/* Config2 register p. 25 */
+ MSIEnable = (1 << 5), /* 8169 only. Reserved in the 8168. */
PCI_Clock_66MHz = 0x01,
PCI_Clock_33MHz = 0x00,
@@ -3426,22 +3426,24 @@ static const struct rtl_cfg_info {
};
/* Cfg9346_Unlock assumed. */
-static unsigned rtl_try_msi(struct pci_dev *pdev, void __iomem *ioaddr,
+static unsigned rtl_try_msi(struct rtl8169_private *tp,
const struct rtl_cfg_info *cfg)
{
+ void __iomem *ioaddr = tp->mmio_addr;
unsigned msi = 0;
u8 cfg2;
cfg2 = RTL_R8(Config2) & ~MSIEnable;
if (cfg->features & RTL_FEATURE_MSI) {
- if (pci_enable_msi(pdev)) {
- dev_info(&pdev->dev, "no MSI. Back to INTx.\n");
+ if (pci_enable_msi(tp->pci_dev)) {
+ netif_info(tp, hw, tp->dev, "no MSI. Back to INTx.\n");
} else {
cfg2 |= MSIEnable;
msi = RTL_FEATURE_MSI;
}
}
- RTL_W8(Config2, cfg2);
+ if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
+ RTL_W8(Config2, cfg2);
return msi;
}
@@ -4077,7 +4079,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
tp->features |= RTL_FEATURE_WOL;
if ((RTL_R8(Config5) & (UWF | BWF | MWF)) != 0)
tp->features |= RTL_FEATURE_WOL;
- tp->features |= rtl_try_msi(pdev, ioaddr, cfg);
+ tp->features |= rtl_try_msi(tp, cfg);
RTL_W8(Cfg9346, Cfg9346_Lock);
if (rtl_tbi_enabled(tp)) {
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
index dca9d3369cdd..c97d2f590855 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -836,11 +836,13 @@ int cpdma_chan_stop(struct cpdma_chan *chan)
chan_write(chan, cp, CPDMA_TEARDOWN_VALUE);
/* handle completed packets */
+ spin_unlock_irqrestore(&chan->lock, flags);
do {
ret = __cpdma_chan_process(chan);
if (ret < 0)
break;
} while ((ret & CPDMA_DESC_TD_COMPLETE) == 0);
+ spin_lock_irqsave(&chan->lock, flags);
/* remaining packets haven't been tx/rx'ed, clean them up */
while (chan->head) {
diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
index 89f829f5f725..f8a6853b692e 100644
--- a/drivers/net/ppp/pptp.c
+++ b/drivers/net/ppp/pptp.c
@@ -423,10 +423,8 @@ static int pptp_bind(struct socket *sock, struct sockaddr *uservaddr,
lock_sock(sk);
opt->src_addr = sp->sa_addr.pptp;
- if (add_chan(po)) {
- release_sock(sk);
+ if (add_chan(po))
error = -EBUSY;
- }
release_sock(sk);
return error;
diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c
index e6fed4d4cb77..e95f0e60a9bc 100644
--- a/drivers/net/usb/asix.c
+++ b/drivers/net/usb/asix.c
@@ -1655,6 +1655,10 @@ static const struct usb_device_id products [] = {
// ASIX 88772a
USB_DEVICE(0x0db0, 0xa877),
.driver_info = (unsigned long) &ax88772_info,
+}, {
+ // Asus USB Ethernet Adapter
+ USB_DEVICE (0x0b95, 0x7e2b),
+ .driver_info = (unsigned long) &ax88772_info,
},
{ }, // END
};
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 93fbe6f40898..a9c5ae75277e 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -286,7 +286,7 @@ static bool ath_complete_reset(struct ath_softc *sc, bool start)
ath_start_ani(common);
}
- if (ath9k_hw_ops(ah)->antdiv_comb_conf_get && sc->ant_rx != 3) {
+ if ((ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) && sc->ant_rx != 3) {
struct ath_hw_antcomb_conf div_ant_conf;
u8 lna_conf;
@@ -1843,6 +1843,9 @@ static void ath9k_sta_notify(struct ieee80211_hw *hw,
struct ath_softc *sc = hw->priv;
struct ath_node *an = (struct ath_node *) sta->drv_priv;
+ if (!(sc->sc_flags & SC_OP_TXAGGR))
+ return;
+
switch (cmd) {
case STA_NOTIFY_SLEEP:
an->sleeping = true;
diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c
index 888abc2be3a5..528d5f3e868c 100644
--- a/drivers/net/wireless/ath/ath9k/rc.c
+++ b/drivers/net/wireless/ath/ath9k/rc.c
@@ -1271,7 +1271,9 @@ static void ath_rc_init(struct ath_softc *sc,
ath_rc_priv->max_valid_rate = k;
ath_rc_sort_validrates(rate_table, ath_rc_priv);
- ath_rc_priv->rate_max_phy = ath_rc_priv->valid_rate_index[k-4];
+ ath_rc_priv->rate_max_phy = (k > 4) ?
+ ath_rc_priv->valid_rate_index[k-4] :
+ ath_rc_priv->valid_rate_index[k-1];
ath_rc_priv->rate_table = rate_table;
ath_dbg(common, ATH_DBG_CONFIG,
diff --git a/drivers/net/wireless/b43/pio.c b/drivers/net/wireless/b43/pio.c
index fcff923b3c18..279a53eae4c5 100644
--- a/drivers/net/wireless/b43/pio.c
+++ b/drivers/net/wireless/b43/pio.c
@@ -617,9 +617,19 @@ static bool pio_rx_frame(struct b43_pio_rxqueue *q)
const char *err_msg = NULL;
struct b43_rxhdr_fw4 *rxhdr =
(struct b43_rxhdr_fw4 *)wl->pio_scratchspace;
+ size_t rxhdr_size = sizeof(*rxhdr);
BUILD_BUG_ON(sizeof(wl->pio_scratchspace) < sizeof(*rxhdr));
- memset(rxhdr, 0, sizeof(*rxhdr));
+ switch (dev->fw.hdr_format) {
+ case B43_FW_HDR_410:
+ case B43_FW_HDR_351:
+ rxhdr_size -= sizeof(rxhdr->format_598) -
+ sizeof(rxhdr->format_351);
+ break;
+ case B43_FW_HDR_598:
+ break;
+ }
+ memset(rxhdr, 0, rxhdr_size);
/* Check if we have data and wait for it to get ready. */
if (q->rev >= 8) {
@@ -657,11 +667,11 @@ data_ready:
/* Get the preamble (RX header) */
if (q->rev >= 8) {
- b43_block_read(dev, rxhdr, sizeof(*rxhdr),
+ b43_block_read(dev, rxhdr, rxhdr_size,
q->mmio_base + B43_PIO8_RXDATA,
sizeof(u32));
} else {
- b43_block_read(dev, rxhdr, sizeof(*rxhdr),
+ b43_block_read(dev, rxhdr, rxhdr_size,
q->mmio_base + B43_PIO_RXDATA,
sizeof(u16));
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
index a7a6def40d05..5c7c17c7166a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
@@ -606,8 +606,8 @@ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
if (ctx->ht.enabled) {
/* if HT40 is used, it should not change
* after associated except channel switch */
- if (iwl_is_associated_ctx(ctx) &&
- !ctx->ht.is_40mhz)
+ if (!ctx->ht.is_40mhz ||
+ !iwl_is_associated_ctx(ctx))
iwlagn_config_ht40(conf, ctx);
} else
ctx->ht.is_40mhz = false;
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
index 35a6b71f358c..df1540ca6102 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
@@ -91,7 +91,10 @@ static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv,
tx_cmd->tid_tspec = qc[0] & 0xf;
tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
} else {
- tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
+ if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
+ tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
+ else
+ tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
}
iwlagn_tx_cmd_protection(priv, info, fc, &tx_flags);
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index bacc06c95e7a..e0e9a3dfbc00 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -2850,6 +2850,9 @@ static int iwlagn_mac_tx_sync(struct ieee80211_hw *hw,
int ret;
u8 sta_id;
+ if (ctx->ctxid != IWL_RXON_CTX_PAN)
+ return 0;
+
IWL_DEBUG_MAC80211(priv, "enter\n");
mutex_lock(&priv->shrd->mutex);
@@ -2898,6 +2901,9 @@ static void iwlagn_mac_finish_tx_sync(struct ieee80211_hw *hw,
struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
struct iwl_rxon_context *ctx = vif_priv->ctx;
+ if (ctx->ctxid != IWL_RXON_CTX_PAN)
+ return;
+
IWL_DEBUG_MAC80211(priv, "enter\n");
mutex_lock(&priv->shrd->mutex);
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
index ce918980e977..5f17ab8e76ba 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
@@ -1197,9 +1197,7 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
iwl_print_hex_dump(trans, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
/* Set up entry for this TFD in Tx byte-count array */
- if (is_agg)
- iwl_trans_txq_update_byte_cnt_tbl(trans, txq,
- le16_to_cpu(tx_cmd->len));
+ iwl_trans_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len));
dma_sync_single_for_device(bus(trans)->dev, txcmd_phys, firstlen,
DMA_BIDIRECTIONAL);
diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c
index ac278156d390..6e0a3eaecf70 100644
--- a/drivers/net/wireless/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/mwifiex/cmdevt.c
@@ -939,7 +939,6 @@ mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter)
{
struct cmd_ctrl_node *cmd_node = NULL, *tmp_node = NULL;
unsigned long cmd_flags;
- unsigned long cmd_pending_q_flags;
unsigned long scan_pending_q_flags;
uint16_t cancel_scan_cmd = false;
@@ -949,12 +948,9 @@ mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter)
cmd_node = adapter->curr_cmd;
cmd_node->wait_q_enabled = false;
cmd_node->cmd_flag |= CMD_F_CANCELED;
- spin_lock_irqsave(&adapter->cmd_pending_q_lock,
- cmd_pending_q_flags);
- list_del(&cmd_node->list);
- spin_unlock_irqrestore(&adapter->cmd_pending_q_lock,
- cmd_pending_q_flags);
mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
+ mwifiex_complete_cmd(adapter, adapter->curr_cmd);
+ adapter->curr_cmd = NULL;
spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags);
}
@@ -981,7 +977,6 @@ mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter)
spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags);
}
adapter->cmd_wait_q.status = -1;
- mwifiex_complete_cmd(adapter, adapter->curr_cmd);
}
/*
diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c
index ea4a29b7e331..1679c2593b7b 100644
--- a/drivers/net/wireless/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/mwifiex/sta_ioctl.c
@@ -55,9 +55,14 @@ int mwifiex_wait_queue_complete(struct mwifiex_adapter *adapter)
{
bool cancel_flag = false;
int status = adapter->cmd_wait_q.status;
- struct cmd_ctrl_node *cmd_queued = adapter->cmd_queued;
+ struct cmd_ctrl_node *cmd_queued;
+ if (!adapter->cmd_queued)
+ return 0;
+
+ cmd_queued = adapter->cmd_queued;
adapter->cmd_queued = NULL;
+
dev_dbg(adapter->dev, "cmd pending\n");
atomic_inc(&adapter->cmd_pending);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
index 592a10ac5929..3b585aadabfc 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
@@ -569,7 +569,7 @@ static bool _rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw,
}
case ERFSLEEP:{
if (ppsc->rfpwr_state == ERFOFF)
- break;
+ return false;
for (queue_id = 0, i = 0;
queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
ring = &pcipriv->dev.tx_ring[queue_id];
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c b/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c
index 72852900df84..e49cf2244c75 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c
@@ -548,7 +548,7 @@ static bool _rtl92cu_phy_set_rf_power_state(struct ieee80211_hw *hw,
break;
case ERFSLEEP:
if (ppsc->rfpwr_state == ERFOFF)
- break;
+ return false;
for (queue_id = 0, i = 0;
queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
ring = &pcipriv->dev.tx_ring[queue_id];
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
index 3ac7af1c5509..0883349e1c83 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
@@ -3374,7 +3374,7 @@ bool rtl92d_phy_set_rf_power_state(struct ieee80211_hw *hw,
break;
case ERFSLEEP:
if (ppsc->rfpwr_state == ERFOFF)
- break;
+ return false;
for (queue_id = 0, i = 0;
queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/phy.c b/drivers/net/wireless/rtlwifi/rtl8192se/phy.c
index f27171af979c..f10ac1ad9087 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/phy.c
@@ -602,7 +602,7 @@ bool rtl92s_phy_set_rf_power_state(struct ieee80211_hw *hw,
}
case ERFSLEEP:
if (ppsc->rfpwr_state == ERFOFF)
- break;
+ return false;
for (queue_id = 0, i = 0;
queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index cbd5d701c7e0..63b3ec48c203 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -314,7 +314,7 @@ static const struct of_dev_auxdata *of_dev_lookup(const struct of_dev_auxdata *l
if (!lookup)
return NULL;
- for(; lookup->name != NULL; lookup++) {
+ for(; lookup->compatible != NULL; lookup++) {
if (!of_device_is_compatible(np, lookup->compatible))
continue;
if (of_address_to_resource(np, 0, &res))
diff --git a/drivers/oprofile/nmi_timer_int.c b/drivers/oprofile/nmi_timer_int.c
new file mode 100644
index 000000000000..76f1c9357f39
--- /dev/null
+++ b/drivers/oprofile/nmi_timer_int.c
@@ -0,0 +1,173 @@
+/**
+ * @file nmi_timer_int.c
+ *
+ * @remark Copyright 2011 Advanced Micro Devices, Inc.
+ *
+ * @author Robert Richter <robert.richter@amd.com>
+ */
+
+#include <linux/init.h>
+#include <linux/smp.h>
+#include <linux/errno.h>
+#include <linux/oprofile.h>
+#include <linux/perf_event.h>
+
+#ifdef CONFIG_OPROFILE_NMI_TIMER
+
+static DEFINE_PER_CPU(struct perf_event *, nmi_timer_events);
+static int ctr_running;
+
+static struct perf_event_attr nmi_timer_attr = {
+ .type = PERF_TYPE_HARDWARE,
+ .config = PERF_COUNT_HW_CPU_CYCLES,
+ .size = sizeof(struct perf_event_attr),
+ .pinned = 1,
+ .disabled = 1,
+};
+
+static void nmi_timer_callback(struct perf_event *event,
+ struct perf_sample_data *data,
+ struct pt_regs *regs)
+{
+ event->hw.interrupts = 0; /* don't throttle interrupts */
+ oprofile_add_sample(regs, 0);
+}
+
+static int nmi_timer_start_cpu(int cpu)
+{
+ struct perf_event *event = per_cpu(nmi_timer_events, cpu);
+
+ if (!event) {
+ event = perf_event_create_kernel_counter(&nmi_timer_attr, cpu, NULL,
+ nmi_timer_callback, NULL);
+ if (IS_ERR(event))
+ return PTR_ERR(event);
+ per_cpu(nmi_timer_events, cpu) = event;
+ }
+
+ if (event && ctr_running)
+ perf_event_enable(event);
+
+ return 0;
+}
+
+static void nmi_timer_stop_cpu(int cpu)
+{
+ struct perf_event *event = per_cpu(nmi_timer_events, cpu);
+
+ if (event && ctr_running)
+ perf_event_disable(event);
+}
+
+static int nmi_timer_cpu_notifier(struct notifier_block *b, unsigned long action,
+ void *data)
+{
+ int cpu = (unsigned long)data;
+ switch (action) {
+ case CPU_DOWN_FAILED:
+ case CPU_ONLINE:
+ nmi_timer_start_cpu(cpu);
+ break;
+ case CPU_DOWN_PREPARE:
+ nmi_timer_stop_cpu(cpu);
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block nmi_timer_cpu_nb = {
+ .notifier_call = nmi_timer_cpu_notifier
+};
+
+static int nmi_timer_start(void)
+{
+ int cpu;
+
+ get_online_cpus();
+ ctr_running = 1;
+ for_each_online_cpu(cpu)
+ nmi_timer_start_cpu(cpu);
+ put_online_cpus();
+
+ return 0;
+}
+
+static void nmi_timer_stop(void)
+{
+ int cpu;
+
+ get_online_cpus();
+ for_each_online_cpu(cpu)
+ nmi_timer_stop_cpu(cpu);
+ ctr_running = 0;
+ put_online_cpus();
+}
+
+static void nmi_timer_shutdown(void)
+{
+ struct perf_event *event;
+ int cpu;
+
+ get_online_cpus();
+ unregister_cpu_notifier(&nmi_timer_cpu_nb);
+ for_each_possible_cpu(cpu) {
+ event = per_cpu(nmi_timer_events, cpu);
+ if (!event)
+ continue;
+ perf_event_disable(event);
+ per_cpu(nmi_timer_events, cpu) = NULL;
+ perf_event_release_kernel(event);
+ }
+
+ put_online_cpus();
+}
+
+static int nmi_timer_setup(void)
+{
+ int cpu, err;
+ u64 period;
+
+ /* clock cycles per tick: */
+ period = (u64)cpu_khz * 1000;
+ do_div(period, HZ);
+ nmi_timer_attr.sample_period = period;
+
+ get_online_cpus();
+ err = register_cpu_notifier(&nmi_timer_cpu_nb);
+ if (err)
+ goto out;
+ /* can't attach events to offline cpus: */
+ for_each_online_cpu(cpu) {
+ err = nmi_timer_start_cpu(cpu);
+ if (err)
+ break;
+ }
+ if (err)
+ nmi_timer_shutdown();
+out:
+ put_online_cpus();
+ return err;
+}
+
+int __init op_nmi_timer_init(struct oprofile_operations *ops)
+{
+ int err = 0;
+
+ err = nmi_timer_setup();
+ if (err)
+ return err;
+ nmi_timer_shutdown(); /* only check, don't alloc */
+
+ ops->create_files = NULL;
+ ops->setup = nmi_timer_setup;
+ ops->shutdown = nmi_timer_shutdown;
+ ops->start = nmi_timer_start;
+ ops->stop = nmi_timer_stop;
+ ops->cpu_type = "timer";
+
+ printk(KERN_INFO "oprofile: using NMI timer interrupt.\n");
+
+ return 0;
+}
+
+#endif
diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
index f8c752e408a6..ed2c3ec07024 100644
--- a/drivers/oprofile/oprof.c
+++ b/drivers/oprofile/oprof.c
@@ -246,37 +246,31 @@ static int __init oprofile_init(void)
int err;
/* always init architecture to setup backtrace support */
+ timer_mode = 0;
err = oprofile_arch_init(&oprofile_ops);
+ if (!err) {
+ if (!timer && !oprofilefs_register())
+ return 0;
+ oprofile_arch_exit();
+ }
- timer_mode = err || timer; /* fall back to timer mode on errors */
- if (timer_mode) {
- if (!err)
- oprofile_arch_exit();
+ /* setup timer mode: */
+ timer_mode = 1;
+ /* no nmi timer mode if oprofile.timer is set */
+ if (timer || op_nmi_timer_init(&oprofile_ops)) {
err = oprofile_timer_init(&oprofile_ops);
if (err)
return err;
}
- err = oprofilefs_register();
- if (!err)
- return 0;
-
- /* failed */
- if (timer_mode)
- oprofile_timer_exit();
- else
- oprofile_arch_exit();
-
- return err;
+ return oprofilefs_register();
}
static void __exit oprofile_exit(void)
{
oprofilefs_unregister();
- if (timer_mode)
- oprofile_timer_exit();
- else
+ if (!timer_mode)
oprofile_arch_exit();
}
diff --git a/drivers/oprofile/oprof.h b/drivers/oprofile/oprof.h
index 177b73de5e5f..d32ef816337c 100644
--- a/drivers/oprofile/oprof.h
+++ b/drivers/oprofile/oprof.h
@@ -35,7 +35,15 @@ struct dentry;
void oprofile_create_files(struct super_block *sb, struct dentry *root);
int oprofile_timer_init(struct oprofile_operations *ops);
-void oprofile_timer_exit(void);
+#ifdef CONFIG_OPROFILE_NMI_TIMER
+int op_nmi_timer_init(struct oprofile_operations *ops);
+#else
+static inline int op_nmi_timer_init(struct oprofile_operations *ops)
+{
+ return -ENODEV;
+}
+#endif
+
int oprofile_set_ulong(unsigned long *addr, unsigned long val);
int oprofile_set_timeout(unsigned long time);
diff --git a/drivers/oprofile/oprofile_files.c b/drivers/oprofile/oprofile_files.c
index 89f63456646f..84a208dbed93 100644
--- a/drivers/oprofile/oprofile_files.c
+++ b/drivers/oprofile/oprofile_files.c
@@ -45,7 +45,7 @@ static ssize_t timeout_write(struct file *file, char const __user *buf,
return -EINVAL;
retval = oprofilefs_ulong_from_user(&val, buf, count);
- if (retval)
+ if (retval <= 0)
return retval;
retval = oprofile_set_timeout(val);
@@ -84,7 +84,7 @@ static ssize_t depth_write(struct file *file, char const __user *buf, size_t cou
return -EINVAL;
retval = oprofilefs_ulong_from_user(&val, buf, count);
- if (retval)
+ if (retval <= 0)
return retval;
retval = oprofile_set_ulong(&oprofile_backtrace_depth, val);
@@ -141,9 +141,10 @@ static ssize_t enable_write(struct file *file, char const __user *buf, size_t co
return -EINVAL;
retval = oprofilefs_ulong_from_user(&val, buf, count);
- if (retval)
+ if (retval <= 0)
return retval;
+ retval = 0;
if (val)
retval = oprofile_start();
else
diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
index d0de6cc2d7a5..2f0aa0f700e6 100644
--- a/drivers/oprofile/oprofilefs.c
+++ b/drivers/oprofile/oprofilefs.c
@@ -60,6 +60,13 @@ ssize_t oprofilefs_ulong_to_user(unsigned long val, char __user *buf, size_t cou
}
+/*
+ * Note: If oprofilefs_ulong_from_user() returns 0, then *val remains
+ * unchanged and might be uninitialized. This follows write syscall
+ * implementation when count is zero: "If count is zero ... [and if]
+ * no errors are detected, 0 will be returned without causing any
+ * other effect." (man 2 write)
+ */
int oprofilefs_ulong_from_user(unsigned long *val, char const __user *buf, size_t count)
{
char tmpbuf[TMPBUFSIZE];
@@ -79,7 +86,7 @@ int oprofilefs_ulong_from_user(unsigned long *val, char const __user *buf, size_
raw_spin_lock_irqsave(&oprofilefs_lock, flags);
*val = simple_strtoul(tmpbuf, NULL, 0);
raw_spin_unlock_irqrestore(&oprofilefs_lock, flags);
- return 0;
+ return count;
}
@@ -99,7 +106,7 @@ static ssize_t ulong_write_file(struct file *file, char const __user *buf, size_
return -EINVAL;
retval = oprofilefs_ulong_from_user(&value, buf, count);
- if (retval)
+ if (retval <= 0)
return retval;
retval = oprofile_set_ulong(file->private_data, value);
diff --git a/drivers/oprofile/timer_int.c b/drivers/oprofile/timer_int.c
index 878fba126582..93404f72dfa8 100644
--- a/drivers/oprofile/timer_int.c
+++ b/drivers/oprofile/timer_int.c
@@ -97,24 +97,24 @@ static struct notifier_block __refdata oprofile_cpu_notifier = {
.notifier_call = oprofile_cpu_notify,
};
-int oprofile_timer_init(struct oprofile_operations *ops)
+static int oprofile_hrtimer_setup(void)
{
- int rc;
-
- rc = register_hotcpu_notifier(&oprofile_cpu_notifier);
- if (rc)
- return rc;
- ops->create_files = NULL;
- ops->setup = NULL;
- ops->shutdown = NULL;
- ops->start = oprofile_hrtimer_start;
- ops->stop = oprofile_hrtimer_stop;
- ops->cpu_type = "timer";
- printk(KERN_INFO "oprofile: using timer interrupt.\n");
- return 0;
+ return register_hotcpu_notifier(&oprofile_cpu_notifier);
}
-void oprofile_timer_exit(void)
+static void oprofile_hrtimer_shutdown(void)
{
unregister_hotcpu_notifier(&oprofile_cpu_notifier);
}
+
+int oprofile_timer_init(struct oprofile_operations *ops)
+{
+ ops->create_files = NULL;
+ ops->setup = oprofile_hrtimer_setup;
+ ops->shutdown = oprofile_hrtimer_shutdown;
+ ops->start = oprofile_hrtimer_start;
+ ops->stop = oprofile_hrtimer_stop;
+ ops->cpu_type = "timer";
+ printk(KERN_INFO "oprofile: using timer interrupt.\n");
+ return 0;
+}
diff --git a/drivers/pci/ats.c b/drivers/pci/ats.c
index 7ec56fb0bd78..b0dd08e6a9da 100644
--- a/drivers/pci/ats.c
+++ b/drivers/pci/ats.c
@@ -13,6 +13,7 @@
#include <linux/export.h>
#include <linux/pci-ats.h>
#include <linux/pci.h>
+#include <linux/slab.h>
#include "pci.h"
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index fce1c54a0c8d..9ddf69e3bbef 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -132,6 +132,18 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
if (!acpi_pci_check_ejectable(pbus, handle) && !is_dock_device(handle))
return AE_OK;
+ pdev = pbus->self;
+ if (pdev && pci_is_pcie(pdev)) {
+ tmp = acpi_find_root_bridge_handle(pdev);
+ if (tmp) {
+ struct acpi_pci_root *root = acpi_pci_find_root(tmp);
+
+ if (root && (root->osc_control_set &
+ OSC_PCI_EXPRESS_NATIVE_HP_CONTROL))
+ return AE_OK;
+ }
+ }
+
acpi_evaluate_integer(handle, "_ADR", NULL, &adr);
device = (adr >> 16) & 0xffff;
function = adr & 0xffff;
@@ -213,7 +225,6 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
pdev = pci_get_slot(pbus, PCI_DEVFN(device, function));
if (pdev) {
- pdev->current_state = PCI_D0;
slot->flags |= (SLOT_ENABLED | SLOT_POWEREDON);
pci_dev_put(pdev);
}
@@ -459,17 +470,8 @@ static int add_bridge(acpi_handle handle)
{
acpi_status status;
unsigned long long tmp;
- struct acpi_pci_root *root;
acpi_handle dummy_handle;
- /*
- * We shouldn't use this bridge if PCIe native hotplug control has been
- * granted by the BIOS for it.
- */
- root = acpi_pci_find_root(handle);
- if (root && (root->osc_control_set & OSC_PCI_EXPRESS_NATIVE_HP_CONTROL))
- return -ENODEV;
-
/* if the bridge doesn't have _STA, we assume it is always there */
status = acpi_get_handle(handle, "_STA", &dummy_handle);
if (ACPI_SUCCESS(status)) {
@@ -1385,19 +1387,11 @@ static void handle_hotplug_event_func(acpi_handle handle, u32 type,
static acpi_status
find_root_bridges(acpi_handle handle, u32 lvl, void *context, void **rv)
{
- struct acpi_pci_root *root;
int *count = (int *)context;
if (!acpi_is_root_bridge(handle))
return AE_OK;
- root = acpi_pci_find_root(handle);
- if (!root)
- return AE_OK;
-
- if (root->osc_control_set & OSC_PCI_EXPRESS_NATIVE_HP_CONTROL)
- return AE_OK;
-
(*count)++;
acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
handle_hotplug_event_bridge, NULL);
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index b82c155d7b37..1969a3ee3058 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -283,6 +283,7 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
struct resource *res;
struct pci_dev *pdev;
struct pci_sriov *iov = dev->sriov;
+ int bars = 0;
if (!nr_virtfn)
return 0;
@@ -307,6 +308,7 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
nres = 0;
for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
+ bars |= (1 << (i + PCI_IOV_RESOURCES));
res = dev->resource + PCI_IOV_RESOURCES + i;
if (res->parent)
nres++;
@@ -324,6 +326,11 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
return -ENOMEM;
}
+ if (pci_enable_resources(dev, bars)) {
+ dev_err(&dev->dev, "SR-IOV: IOV BARS not allocated\n");
+ return -ENOMEM;
+ }
+
if (iov->link != dev->devfn) {
pdev = pci_get_slot(dev->bus, iov->link);
if (!pdev)
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 6f45a73c6e9f..6d4a5319148d 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -664,6 +664,9 @@ static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
error = platform_pci_set_power_state(dev, state);
if (!error)
pci_update_current_state(dev, state);
+ /* Fall back to PCI_D0 if native PM is not supported */
+ if (!dev->pm_cap)
+ dev->current_state = PCI_D0;
} else {
error = -ENODEV;
/* Fall back to PCI_D0 if native PM is not supported */
@@ -1126,7 +1129,11 @@ static int __pci_enable_device_flags(struct pci_dev *dev,
if (atomic_add_return(1, &dev->enable_cnt) > 1)
return 0; /* already enabled */
- for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
+ /* only skip sriov related */
+ for (i = 0; i <= PCI_ROM_RESOURCE; i++)
+ if (dev->resource[i].flags & flags)
+ bars |= (1 << i);
+ for (i = PCI_BRIDGE_RESOURCES; i < DEVICE_COUNT_RESOURCE; i++)
if (dev->resource[i].flags & flags)
bars |= (1 << i);
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index fa4d9f324189..8e286259a007 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -319,20 +319,6 @@ int rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
}
EXPORT_SYMBOL_GPL(rtc_read_alarm);
-static int ___rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
-{
- int err;
-
- if (!rtc->ops)
- err = -ENODEV;
- else if (!rtc->ops->set_alarm)
- err = -EINVAL;
- else
- err = rtc->ops->set_alarm(rtc->dev.parent, alarm);
-
- return err;
-}
-
static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
{
struct rtc_time tm;
@@ -356,7 +342,14 @@ static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
* over right here, before we set the alarm.
*/
- return ___rtc_set_alarm(rtc, alarm);
+ if (!rtc->ops)
+ err = -ENODEV;
+ else if (!rtc->ops->set_alarm)
+ err = -EINVAL;
+ else
+ err = rtc->ops->set_alarm(rtc->dev.parent, alarm);
+
+ return err;
}
int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
@@ -770,20 +763,6 @@ static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer)
return 0;
}
-static void rtc_alarm_disable(struct rtc_device *rtc)
-{
- struct rtc_wkalrm alarm;
- struct rtc_time tm;
-
- __rtc_read_time(rtc, &tm);
-
- alarm.time = rtc_ktime_to_tm(ktime_add(rtc_tm_to_ktime(tm),
- ktime_set(300, 0)));
- alarm.enabled = 0;
-
- ___rtc_set_alarm(rtc, &alarm);
-}
-
/**
* rtc_timer_remove - Removes a rtc_timer from the rtc_device timerqueue
* @rtc rtc device
@@ -805,10 +784,8 @@ static void rtc_timer_remove(struct rtc_device *rtc, struct rtc_timer *timer)
struct rtc_wkalrm alarm;
int err;
next = timerqueue_getnext(&rtc->timerqueue);
- if (!next) {
- rtc_alarm_disable(rtc);
+ if (!next)
return;
- }
alarm.time = rtc_ktime_to_tm(next->expires);
alarm.enabled = 1;
err = __rtc_set_alarm(rtc, &alarm);
@@ -870,8 +847,7 @@ again:
err = __rtc_set_alarm(rtc, &alarm);
if (err == -ETIME)
goto again;
- } else
- rtc_alarm_disable(rtc);
+ }
mutex_unlock(&rtc->ops_lock);
}
diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c
index eda128fc1d38..64aedd8cc095 100644
--- a/drivers/rtc/rtc-m41t80.c
+++ b/drivers/rtc/rtc-m41t80.c
@@ -357,10 +357,19 @@ static int m41t80_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *t)
static struct rtc_class_ops m41t80_rtc_ops = {
.read_time = m41t80_rtc_read_time,
.set_time = m41t80_rtc_set_time,
+ /*
+ * XXX - m41t80 alarm functionality is reported broken.
+ * until it is fixed, don't register alarm functions.
+ *
.read_alarm = m41t80_rtc_read_alarm,
.set_alarm = m41t80_rtc_set_alarm,
+ */
.proc = m41t80_rtc_proc,
+ /*
+ * See above comment on broken alarm
+ *
.alarm_irq_enable = m41t80_rtc_alarm_irq_enable,
+ */
};
#if defined(CONFIG_RTC_INTF_SYSFS) || defined(CONFIG_RTC_INTF_SYSFS_MODULE)
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 11f07f888223..b79576b64f45 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -55,6 +55,10 @@ static void zfcp_scsi_slave_destroy(struct scsi_device *sdev)
{
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
+ /* if previous slave_alloc returned early, there is nothing to do */
+ if (!zfcp_sdev->port)
+ return;
+
zfcp_erp_lun_shutdown_wait(sdev, "scssd_1");
put_device(&zfcp_sdev->port->dev);
}
diff --git a/drivers/sbus/char/bbc_i2c.c b/drivers/sbus/char/bbc_i2c.c
index 5f94d22c491e..542668292900 100644
--- a/drivers/sbus/char/bbc_i2c.c
+++ b/drivers/sbus/char/bbc_i2c.c
@@ -233,13 +233,9 @@ int bbc_i2c_write_buf(struct bbc_i2c_client *client,
int ret = 0;
while (len > 0) {
- int err = bbc_i2c_writeb(client, *buf, off);
-
- if (err < 0) {
- ret = err;
+ ret = bbc_i2c_writeb(client, *buf, off);
+ if (ret < 0)
break;
- }
-
len--;
buf++;
off++;
@@ -253,11 +249,9 @@ int bbc_i2c_read_buf(struct bbc_i2c_client *client,
int ret = 0;
while (len > 0) {
- int err = bbc_i2c_readb(client, buf, off);
- if (err < 0) {
- ret = err;
+ ret = bbc_i2c_readb(client, buf, off);
+ if (ret < 0)
break;
- }
len--;
buf++;
off++;
@@ -422,17 +416,6 @@ static struct platform_driver bbc_i2c_driver = {
.remove = __devexit_p(bbc_i2c_remove),
};
-static int __init bbc_i2c_init(void)
-{
- return platform_driver_register(&bbc_i2c_driver);
-}
-
-static void __exit bbc_i2c_exit(void)
-{
- platform_driver_unregister(&bbc_i2c_driver);
-}
-
-module_init(bbc_i2c_init);
-module_exit(bbc_i2c_exit);
+module_platform_driver(bbc_i2c_driver);
MODULE_LICENSE("GPL");
diff --git a/drivers/sbus/char/display7seg.c b/drivers/sbus/char/display7seg.c
index 965a1fccd66a..4b9939726c34 100644
--- a/drivers/sbus/char/display7seg.c
+++ b/drivers/sbus/char/display7seg.c
@@ -275,15 +275,4 @@ static struct platform_driver d7s_driver = {
.remove = __devexit_p(d7s_remove),
};
-static int __init d7s_init(void)
-{
- return platform_driver_register(&d7s_driver);
-}
-
-static void __exit d7s_exit(void)
-{
- platform_driver_unregister(&d7s_driver);
-}
-
-module_init(d7s_init);
-module_exit(d7s_exit);
+module_platform_driver(d7s_driver);
diff --git a/drivers/sbus/char/envctrl.c b/drivers/sbus/char/envctrl.c
index be7b4e56154f..339fd6f65eda 100644
--- a/drivers/sbus/char/envctrl.c
+++ b/drivers/sbus/char/envctrl.c
@@ -1138,16 +1138,6 @@ static struct platform_driver envctrl_driver = {
.remove = __devexit_p(envctrl_remove),
};
-static int __init envctrl_init(void)
-{
- return platform_driver_register(&envctrl_driver);
-}
-
-static void __exit envctrl_exit(void)
-{
- platform_driver_unregister(&envctrl_driver);
-}
+module_platform_driver(envctrl_driver);
-module_init(envctrl_init);
-module_exit(envctrl_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/sbus/char/flash.c b/drivers/sbus/char/flash.c
index 73dd4e7afaaa..826157f38694 100644
--- a/drivers/sbus/char/flash.c
+++ b/drivers/sbus/char/flash.c
@@ -216,16 +216,6 @@ static struct platform_driver flash_driver = {
.remove = __devexit_p(flash_remove),
};
-static int __init flash_init(void)
-{
- return platform_driver_register(&flash_driver);
-}
-
-static void __exit flash_cleanup(void)
-{
- platform_driver_unregister(&flash_driver);
-}
+module_platform_driver(flash_driver);
-module_init(flash_init);
-module_exit(flash_cleanup);
MODULE_LICENSE("GPL");
diff --git a/drivers/sbus/char/uctrl.c b/drivers/sbus/char/uctrl.c
index ebce9639a26a..0b31658ccde5 100644
--- a/drivers/sbus/char/uctrl.c
+++ b/drivers/sbus/char/uctrl.c
@@ -435,16 +435,6 @@ static struct platform_driver uctrl_driver = {
};
-static int __init uctrl_init(void)
-{
- return platform_driver_register(&uctrl_driver);
-}
-
-static void __exit uctrl_exit(void)
-{
- platform_driver_unregister(&uctrl_driver);
-}
+module_platform_driver(uctrl_driver);
-module_init(uctrl_init);
-module_exit(uctrl_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index dba72a4e6a1c..1ad0b8225560 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -1906,18 +1906,19 @@ static int bnx2i_queue_scsi_cmd_resp(struct iscsi_session *session,
spin_lock(&session->lock);
task = iscsi_itt_to_task(bnx2i_conn->cls_conn->dd_data,
cqe->itt & ISCSI_CMD_RESPONSE_INDEX);
- if (!task) {
+ if (!task || !task->sc) {
spin_unlock(&session->lock);
return -EINVAL;
}
sc = task->sc;
- spin_unlock(&session->lock);
if (!blk_rq_cpu_valid(sc->request))
cpu = smp_processor_id();
else
cpu = sc->request->cpu;
+ spin_unlock(&session->lock);
+
p = &per_cpu(bnx2i_percpu, cpu);
spin_lock(&p->p_work_lock);
if (unlikely(!p->iothread)) {
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index cefbe44bb84a..8d67467dd9ce 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -31,6 +31,8 @@
#include <linux/sysfs.h>
#include <linux/ctype.h>
#include <linux/workqueue.h>
+#include <net/dcbnl.h>
+#include <net/dcbevent.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsicam.h>
#include <scsi/scsi_transport.h>
@@ -101,6 +103,8 @@ static int fcoe_ddp_done(struct fc_lport *, u16);
static int fcoe_ddp_target(struct fc_lport *, u16, struct scatterlist *,
unsigned int);
static int fcoe_cpu_callback(struct notifier_block *, unsigned long, void *);
+static int fcoe_dcb_app_notification(struct notifier_block *notifier,
+ ulong event, void *ptr);
static bool fcoe_match(struct net_device *netdev);
static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode);
@@ -129,6 +133,11 @@ static struct notifier_block fcoe_cpu_notifier = {
.notifier_call = fcoe_cpu_callback,
};
+/* notification function for DCB events */
+static struct notifier_block dcb_notifier = {
+ .notifier_call = fcoe_dcb_app_notification,
+};
+
static struct scsi_transport_template *fcoe_nport_scsi_transport;
static struct scsi_transport_template *fcoe_vport_scsi_transport;
@@ -1522,6 +1531,8 @@ int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
skb_reset_network_header(skb);
skb->mac_len = elen;
skb->protocol = htons(ETH_P_FCOE);
+ skb->priority = port->priority;
+
if (fcoe->netdev->priv_flags & IFF_802_1Q_VLAN &&
fcoe->realdev->features & NETIF_F_HW_VLAN_TX) {
skb->vlan_tci = VLAN_TAG_PRESENT |
@@ -1624,6 +1635,7 @@ static inline int fcoe_filter_frames(struct fc_lport *lport,
stats->InvalidCRCCount++;
if (stats->InvalidCRCCount < 5)
printk(KERN_WARNING "fcoe: dropping frame with CRC error\n");
+ put_cpu();
return -EINVAL;
}
@@ -1746,6 +1758,7 @@ int fcoe_percpu_receive_thread(void *arg)
*/
static void fcoe_dev_setup(void)
{
+ register_dcbevent_notifier(&dcb_notifier);
register_netdevice_notifier(&fcoe_notifier);
}
@@ -1754,9 +1767,69 @@ static void fcoe_dev_setup(void)
*/
static void fcoe_dev_cleanup(void)
{
+ unregister_dcbevent_notifier(&dcb_notifier);
unregister_netdevice_notifier(&fcoe_notifier);
}
+static struct fcoe_interface *
+fcoe_hostlist_lookup_realdev_port(struct net_device *netdev)
+{
+ struct fcoe_interface *fcoe;
+ struct net_device *real_dev;
+
+ list_for_each_entry(fcoe, &fcoe_hostlist, list) {
+ if (fcoe->netdev->priv_flags & IFF_802_1Q_VLAN)
+ real_dev = vlan_dev_real_dev(fcoe->netdev);
+ else
+ real_dev = fcoe->netdev;
+
+ if (netdev == real_dev)
+ return fcoe;
+ }
+ return NULL;
+}
+
+static int fcoe_dcb_app_notification(struct notifier_block *notifier,
+ ulong event, void *ptr)
+{
+ struct dcb_app_type *entry = ptr;
+ struct fcoe_interface *fcoe;
+ struct net_device *netdev;
+ struct fcoe_port *port;
+ int prio;
+
+ if (entry->app.selector != DCB_APP_IDTYPE_ETHTYPE)
+ return NOTIFY_OK;
+
+ netdev = dev_get_by_index(&init_net, entry->ifindex);
+ if (!netdev)
+ return NOTIFY_OK;
+
+ fcoe = fcoe_hostlist_lookup_realdev_port(netdev);
+ dev_put(netdev);
+ if (!fcoe)
+ return NOTIFY_OK;
+
+ if (entry->dcbx & DCB_CAP_DCBX_VER_CEE)
+ prio = ffs(entry->app.priority) - 1;
+ else
+ prio = entry->app.priority;
+
+ if (prio < 0)
+ return NOTIFY_OK;
+
+ if (entry->app.protocol == ETH_P_FIP ||
+ entry->app.protocol == ETH_P_FCOE)
+ fcoe->ctlr.priority = prio;
+
+ if (entry->app.protocol == ETH_P_FCOE) {
+ port = lport_priv(fcoe->ctlr.lp);
+ port->priority = prio;
+ }
+
+ return NOTIFY_OK;
+}
+
/**
* fcoe_device_notification() - Handler for net device events
* @notifier: The context of the notification
@@ -1965,6 +2038,46 @@ static bool fcoe_match(struct net_device *netdev)
}
/**
+ * fcoe_dcb_create() - Initialize DCB attributes and hooks
+ * @netdev: The net_device object of the L2 link that should be queried
+ * @port: The fcoe_port to bind FCoE APP priority with
+ * @
+ */
+static void fcoe_dcb_create(struct fcoe_interface *fcoe)
+{
+#ifdef CONFIG_DCB
+ int dcbx;
+ u8 fup, up;
+ struct net_device *netdev = fcoe->realdev;
+ struct fcoe_port *port = lport_priv(fcoe->ctlr.lp);
+ struct dcb_app app = {
+ .priority = 0,
+ .protocol = ETH_P_FCOE
+ };
+
+ /* setup DCB priority attributes. */
+ if (netdev && netdev->dcbnl_ops && netdev->dcbnl_ops->getdcbx) {
+ dcbx = netdev->dcbnl_ops->getdcbx(netdev);
+
+ if (dcbx & DCB_CAP_DCBX_VER_IEEE) {
+ app.selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE;
+ up = dcb_ieee_getapp_mask(netdev, &app);
+ app.protocol = ETH_P_FIP;
+ fup = dcb_ieee_getapp_mask(netdev, &app);
+ } else {
+ app.selector = DCB_APP_IDTYPE_ETHTYPE;
+ up = dcb_getapp(netdev, &app);
+ app.protocol = ETH_P_FIP;
+ fup = dcb_getapp(netdev, &app);
+ }
+
+ port->priority = ffs(up) ? ffs(up) - 1 : 0;
+ fcoe->ctlr.priority = ffs(fup) ? ffs(fup) - 1 : port->priority;
+ }
+#endif
+}
+
+/**
* fcoe_create() - Create a fcoe interface
* @netdev : The net_device object the Ethernet interface to create on
* @fip_mode: The FIP mode for this creation
@@ -2007,6 +2120,9 @@ static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
/* Make this the "master" N_Port */
fcoe->ctlr.lp = lport;
+ /* setup DCB priority attributes. */
+ fcoe_dcb_create(fcoe);
+
/* add to lports list */
fcoe_hostlist_add(lport);
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index c74c4b8e71ef..e7522dcc296e 100644
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -320,6 +320,7 @@ static void fcoe_ctlr_solicit(struct fcoe_ctlr *fip, struct fcoe_fcf *fcf)
skb_put(skb, sizeof(*sol));
skb->protocol = htons(ETH_P_FIP);
+ skb->priority = fip->priority;
skb_reset_mac_header(skb);
skb_reset_network_header(skb);
fip->send(fip, skb);
@@ -474,6 +475,7 @@ static void fcoe_ctlr_send_keep_alive(struct fcoe_ctlr *fip,
}
skb_put(skb, len);
skb->protocol = htons(ETH_P_FIP);
+ skb->priority = fip->priority;
skb_reset_mac_header(skb);
skb_reset_network_header(skb);
fip->send(fip, skb);
@@ -566,6 +568,7 @@ static int fcoe_ctlr_encaps(struct fcoe_ctlr *fip, struct fc_lport *lport,
cap->fip.fip_dl_len = htons(dlen / FIP_BPW);
skb->protocol = htons(ETH_P_FIP);
+ skb->priority = fip->priority;
skb_reset_mac_header(skb);
skb_reset_network_header(skb);
return 0;
@@ -1911,6 +1914,7 @@ static void fcoe_ctlr_vn_send(struct fcoe_ctlr *fip,
skb_put(skb, len);
skb->protocol = htons(ETH_P_FIP);
+ skb->priority = fip->priority;
skb_reset_mac_header(skb);
skb_reset_network_header(skb);
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index 4e041f6d808c..d570573b7963 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -4335,7 +4335,7 @@ _scsih_smart_predicted_fault(struct MPT2SAS_ADAPTER *ioc, u16 handle)
/* insert into event log */
sz = offsetof(Mpi2EventNotificationReply_t, EventData) +
sizeof(Mpi2EventDataSasDeviceStatusChange_t);
- event_reply = kzalloc(sz, GFP_KERNEL);
+ event_reply = kzalloc(sz, GFP_ATOMIC);
if (!event_reply) {
printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
ioc->name, __FILE__, __LINE__, __func__);
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index ac326c41e931..6465dae5883a 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -1762,12 +1762,31 @@ qla2x00_get_host_port_state(struct Scsi_Host *shost)
scsi_qla_host_t *vha = shost_priv(shost);
struct scsi_qla_host *base_vha = pci_get_drvdata(vha->hw->pdev);
- if (!base_vha->flags.online)
+ if (!base_vha->flags.online) {
fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
- else if (atomic_read(&base_vha->loop_state) == LOOP_TIMEOUT)
- fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
- else
+ return;
+ }
+
+ switch (atomic_read(&base_vha->loop_state)) {
+ case LOOP_UPDATE:
+ fc_host_port_state(shost) = FC_PORTSTATE_DIAGNOSTICS;
+ break;
+ case LOOP_DOWN:
+ if (test_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags))
+ fc_host_port_state(shost) = FC_PORTSTATE_DIAGNOSTICS;
+ else
+ fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
+ break;
+ case LOOP_DEAD:
+ fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
+ break;
+ case LOOP_READY:
fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
+ break;
+ default:
+ fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
+ break;
+ }
}
static int
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 9df4787715c0..f3cddd5800c3 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -12,17 +12,17 @@
* | Level | Last Value Used | Holes |
* ----------------------------------------------------------------------
* | Module Init and Probe | 0x0116 | |
- * | Mailbox commands | 0x1129 | |
+ * | Mailbox commands | 0x112b | |
* | Device Discovery | 0x2083 | |
* | Queue Command and IO tracing | 0x302e | 0x3008 |
* | DPC Thread | 0x401c | |
* | Async Events | 0x5059 | |
- * | Timer Routines | 0x600d | |
+ * | Timer Routines | 0x6010 | 0x600e,0x600f |
* | User Space Interactions | 0x709d | |
- * | Task Management | 0x8041 | |
+ * | Task Management | 0x8041 | 0x800b |
* | AER/EEH | 0x900f | |
* | Virtual Port | 0xa007 | |
- * | ISP82XX Specific | 0xb051 | |
+ * | ISP82XX Specific | 0xb052 | |
* | MultiQ | 0xc00b | |
* | Misc | 0xd00b | |
* ----------------------------------------------------------------------
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index ce32d8135c9e..c0c11afb685c 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -578,6 +578,7 @@ extern int qla82xx_check_md_needed(scsi_qla_host_t *);
extern void qla82xx_chip_reset_cleanup(scsi_qla_host_t *);
extern int qla82xx_mbx_beacon_ctl(scsi_qla_host_t *, int);
extern char *qdev_state(uint32_t);
+extern void qla82xx_clear_pending_mbx(scsi_qla_host_t *);
/* BSG related functions */
extern int qla24xx_bsg_request(struct fc_bsg_job *);
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index f03e915f1877..54ea68cec4c5 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -1509,7 +1509,8 @@ enable_82xx_npiv:
&ha->fw_xcb_count, NULL, NULL,
&ha->max_npiv_vports, NULL);
- if (!fw_major_version && ql2xallocfwdump)
+ if (!fw_major_version && ql2xallocfwdump
+ && !IS_QLA82XX(ha))
qla2x00_alloc_fw_dump(vha);
}
} else {
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index dbec89622a0f..a4b267e60a35 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -120,11 +120,10 @@ qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
* Returns a pointer to the continuation type 1 IOCB packet.
*/
static inline cont_a64_entry_t *
-qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha)
+qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
{
cont_a64_entry_t *cont_pkt;
- struct req_que *req = vha->req;
/* Adjust ring index. */
req->ring_index++;
if (req->ring_index == req->length) {
@@ -292,7 +291,7 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
* Five DSDs are available in the Continuation
* Type 1 IOCB.
*/
- cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
+ cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
avail_dsds = 5;
}
@@ -684,7 +683,7 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
* Five DSDs are available in the Continuation
* Type 1 IOCB.
*/
- cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
+ cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
avail_dsds = 5;
}
@@ -2070,7 +2069,8 @@ qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
* Five DSDs are available in the Cont.
* Type 1 IOCB.
*/
- cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
+ cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
+ vha->hw->req_q_map[0]);
cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
avail_dsds = 5;
cont_iocb_prsnt = 1;
@@ -2096,6 +2096,7 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
int index;
uint16_t tot_dsds;
scsi_qla_host_t *vha = sp->fcport->vha;
+ struct qla_hw_data *ha = vha->hw;
struct fc_bsg_job *bsg_job = ((struct srb_ctx *)sp->ctx)->u.bsg_job;
int loop_iterartion = 0;
int cont_iocb_prsnt = 0;
@@ -2141,7 +2142,8 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
* Five DSDs are available in the Cont.
* Type 1 IOCB.
*/
- cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
+ cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
+ ha->req_q_map[0]);
cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
avail_dsds = 5;
cont_iocb_prsnt = 1;
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 2516adf1aeea..7b91b290ffd6 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -1741,7 +1741,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
resid, scsi_bufflen(cp));
cp->result = DID_ERROR << 16 | lscsi_status;
- break;
+ goto check_scsi_status;
}
if (!lscsi_status &&
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 3b3cec9f6ac2..82a33533ed26 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -79,8 +79,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
mcp->mb[0] = MBS_LINK_DOWN_ERROR;
ql_log(ql_log_warn, base_vha, 0x1004,
"FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
- rval = QLA_FUNCTION_FAILED;
- goto premature_exit;
+ return QLA_FUNCTION_TIMEOUT;
}
/*
@@ -163,6 +162,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
HINT_MBX_INT_PENDING) {
spin_unlock_irqrestore(&ha->hardware_lock,
flags);
+ ha->flags.mbox_busy = 0;
ql_dbg(ql_dbg_mbx, base_vha, 0x1010,
"Pending mailbox timeout, exiting.\n");
rval = QLA_FUNCTION_TIMEOUT;
@@ -188,6 +188,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
HINT_MBX_INT_PENDING) {
spin_unlock_irqrestore(&ha->hardware_lock,
flags);
+ ha->flags.mbox_busy = 0;
ql_dbg(ql_dbg_mbx, base_vha, 0x1012,
"Pending mailbox timeout, exiting.\n");
rval = QLA_FUNCTION_TIMEOUT;
@@ -302,7 +303,15 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
!test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
-
+ if (IS_QLA82XX(ha)) {
+ ql_dbg(ql_dbg_mbx, vha, 0x112a,
+ "disabling pause transmit on port "
+ "0 & 1.\n");
+ qla82xx_wr_32(ha,
+ QLA82XX_CRB_NIU + 0x98,
+ CRB_NIU_XG_PAUSE_CTL_P0|
+ CRB_NIU_XG_PAUSE_CTL_P1);
+ }
ql_log(ql_log_info, base_vha, 0x101c,
"Mailbox cmd timeout occured. "
"Scheduling ISP abort eeh_busy=0x%x.\n",
@@ -318,7 +327,15 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
!test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
-
+ if (IS_QLA82XX(ha)) {
+ ql_dbg(ql_dbg_mbx, vha, 0x112b,
+ "disabling pause transmit on port "
+ "0 & 1.\n");
+ qla82xx_wr_32(ha,
+ QLA82XX_CRB_NIU + 0x98,
+ CRB_NIU_XG_PAUSE_CTL_P0|
+ CRB_NIU_XG_PAUSE_CTL_P1);
+ }
ql_log(ql_log_info, base_vha, 0x101e,
"Mailbox cmd timeout occured. "
"Scheduling ISP abort.\n");
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index 94bded5ddce4..03554934b0a5 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -3817,6 +3817,20 @@ exit:
return rval;
}
+void qla82xx_clear_pending_mbx(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+ if (ha->flags.mbox_busy) {
+ ha->flags.mbox_int = 1;
+ ha->flags.mbox_busy = 0;
+ ql_log(ql_log_warn, vha, 0x6010,
+ "Doing premature completion of mbx command.\n");
+ if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags))
+ complete(&ha->mbx_intr_comp);
+ }
+}
+
void qla82xx_watchdog(scsi_qla_host_t *vha)
{
uint32_t dev_state, halt_status;
@@ -3839,9 +3853,13 @@ void qla82xx_watchdog(scsi_qla_host_t *vha)
qla2xxx_wake_dpc(vha);
} else {
if (qla82xx_check_fw_alive(vha)) {
+ ql_dbg(ql_dbg_timer, vha, 0x6011,
+ "disabling pause transmit on port 0 & 1.\n");
+ qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
+ CRB_NIU_XG_PAUSE_CTL_P0|CRB_NIU_XG_PAUSE_CTL_P1);
halt_status = qla82xx_rd_32(ha,
QLA82XX_PEG_HALT_STATUS1);
- ql_dbg(ql_dbg_timer, vha, 0x6005,
+ ql_log(ql_log_info, vha, 0x6005,
"dumping hw/fw registers:.\n "
" PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,.\n "
" PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,.\n "
@@ -3858,6 +3876,11 @@ void qla82xx_watchdog(scsi_qla_host_t *vha)
QLA82XX_CRB_PEG_NET_3 + 0x3c),
qla82xx_rd_32(ha,
QLA82XX_CRB_PEG_NET_4 + 0x3c));
+ if (LSW(MSB(halt_status)) == 0x67)
+ ql_log(ql_log_warn, vha, 0xb052,
+ "Firmware aborted with "
+ "error code 0x00006700. Device is "
+ "being reset.\n");
if (halt_status & HALT_STATUS_UNRECOVERABLE) {
set_bit(ISP_UNRECOVERABLE,
&vha->dpc_flags);
@@ -3869,16 +3892,8 @@ void qla82xx_watchdog(scsi_qla_host_t *vha)
}
qla2xxx_wake_dpc(vha);
ha->flags.isp82xx_fw_hung = 1;
- if (ha->flags.mbox_busy) {
- ha->flags.mbox_int = 1;
- ql_log(ql_log_warn, vha, 0x6007,
- "Due to FW hung, doing "
- "premature completion of mbx "
- "command.\n");
- if (test_bit(MBX_INTR_WAIT,
- &ha->mbx_cmd_flags))
- complete(&ha->mbx_intr_comp);
- }
+ ql_log(ql_log_warn, vha, 0x6007, "Firmware hung.\n");
+ qla82xx_clear_pending_mbx(vha);
}
}
}
@@ -4073,10 +4088,7 @@ qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha)
msleep(1000);
if (qla82xx_check_fw_alive(vha)) {
ha->flags.isp82xx_fw_hung = 1;
- if (ha->flags.mbox_busy) {
- ha->flags.mbox_int = 1;
- complete(&ha->mbx_intr_comp);
- }
+ qla82xx_clear_pending_mbx(vha);
break;
}
}
diff --git a/drivers/scsi/qla2xxx/qla_nx.h b/drivers/scsi/qla2xxx/qla_nx.h
index 57820c199bc2..57a226be339a 100644
--- a/drivers/scsi/qla2xxx/qla_nx.h
+++ b/drivers/scsi/qla2xxx/qla_nx.h
@@ -1173,4 +1173,8 @@ struct qla82xx_md_entry_queue {
static const int MD_MIU_TEST_AGT_RDDATA[] = { 0x410000A8, 0x410000AC,
0x410000B8, 0x410000BC };
+
+#define CRB_NIU_XG_PAUSE_CTL_P0 0x1
+#define CRB_NIU_XG_PAUSE_CTL_P1 0x8
+
#endif
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index fd14c7bfc626..f9e5b85e84d8 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -201,12 +201,12 @@ MODULE_PARM_DESC(ql2xmdcapmask,
"Set the Minidump driver capture mask level. "
"Default is 0x7F - Can be set to 0x3, 0x7, 0xF, 0x1F, 0x7F.");
-int ql2xmdenable;
+int ql2xmdenable = 1;
module_param(ql2xmdenable, int, S_IRUGO);
MODULE_PARM_DESC(ql2xmdenable,
"Enable/disable MiniDump. "
- "0 (Default) - MiniDump disabled. "
- "1 - MiniDump enabled.");
+ "0 - MiniDump disabled. "
+ "1 (Default) - MiniDump enabled.");
/*
* SCSI host template entry points
@@ -423,6 +423,7 @@ fail2:
qla25xx_delete_queues(vha);
destroy_workqueue(ha->wq);
ha->wq = NULL;
+ vha->req = ha->req_q_map[0];
fail:
ha->mqenable = 0;
kfree(ha->req_q_map);
@@ -814,49 +815,6 @@ qla2x00_wait_for_chip_reset(scsi_qla_host_t *vha)
return return_status;
}
-/*
- * qla2x00_wait_for_loop_ready
- * Wait for MAX_LOOP_TIMEOUT(5 min) value for loop
- * to be in LOOP_READY state.
- * Input:
- * ha - pointer to host adapter structure
- *
- * Note:
- * Does context switching-Release SPIN_LOCK
- * (if any) before calling this routine.
- *
- *
- * Return:
- * Success (LOOP_READY) : 0
- * Failed (LOOP_NOT_READY) : 1
- */
-static inline int
-qla2x00_wait_for_loop_ready(scsi_qla_host_t *vha)
-{
- int return_status = QLA_SUCCESS;
- unsigned long loop_timeout ;
- struct qla_hw_data *ha = vha->hw;
- scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
-
- /* wait for 5 min at the max for loop to be ready */
- loop_timeout = jiffies + (MAX_LOOP_TIMEOUT * HZ);
-
- while ((!atomic_read(&base_vha->loop_down_timer) &&
- atomic_read(&base_vha->loop_state) == LOOP_DOWN) ||
- atomic_read(&base_vha->loop_state) != LOOP_READY) {
- if (atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
- return_status = QLA_FUNCTION_FAILED;
- break;
- }
- msleep(1000);
- if (time_after_eq(jiffies, loop_timeout)) {
- return_status = QLA_FUNCTION_FAILED;
- break;
- }
- }
- return (return_status);
-}
-
static void
sp_get(struct srb *sp)
{
@@ -1035,12 +993,6 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
"Wait for hba online failed for cmd=%p.\n", cmd);
goto eh_reset_failed;
}
- err = 1;
- if (qla2x00_wait_for_loop_ready(vha) != QLA_SUCCESS) {
- ql_log(ql_log_warn, vha, 0x800b,
- "Wait for loop ready failed for cmd=%p.\n", cmd);
- goto eh_reset_failed;
- }
err = 2;
if (do_reset(fcport, cmd->device->lun, cmd->request->cpu + 1)
!= QLA_SUCCESS) {
@@ -1137,10 +1089,9 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
goto eh_bus_reset_done;
}
- if (qla2x00_wait_for_loop_ready(vha) == QLA_SUCCESS) {
- if (qla2x00_loop_reset(vha) == QLA_SUCCESS)
- ret = SUCCESS;
- }
+ if (qla2x00_loop_reset(vha) == QLA_SUCCESS)
+ ret = SUCCESS;
+
if (ret == FAILED)
goto eh_bus_reset_done;
@@ -1206,15 +1157,6 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
if (qla2x00_wait_for_reset_ready(vha) != QLA_SUCCESS)
goto eh_host_reset_lock;
- /*
- * Fixme-may be dpc thread is active and processing
- * loop_resync,so wait a while for it to
- * be completed and then issue big hammer.Otherwise
- * it may cause I/O failure as big hammer marks the
- * devices as lost kicking of the port_down_timer
- * while dpc is stuck for the mailbox to complete.
- */
- qla2x00_wait_for_loop_ready(vha);
if (vha != base_vha) {
if (qla2x00_vp_abort_isp(vha))
goto eh_host_reset_lock;
@@ -1297,16 +1239,13 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
atomic_set(&vha->loop_state, LOOP_DOWN);
atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
qla2x00_mark_all_devices_lost(vha, 0);
- qla2x00_wait_for_loop_ready(vha);
}
if (ha->flags.enable_lip_reset) {
ret = qla2x00_lip_reset(vha);
- if (ret != QLA_SUCCESS) {
+ if (ret != QLA_SUCCESS)
ql_dbg(ql_dbg_taskm, vha, 0x802e,
"lip_reset failed (%d).\n", ret);
- } else
- qla2x00_wait_for_loop_ready(vha);
}
/* Issue marker command only when we are going to start the I/O */
@@ -4070,13 +4009,8 @@ qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
/* For ISP82XX complete any pending mailbox cmd */
if (IS_QLA82XX(ha)) {
ha->flags.isp82xx_fw_hung = 1;
- if (ha->flags.mbox_busy) {
- ha->flags.mbox_int = 1;
- ql_dbg(ql_dbg_aer, vha, 0x9001,
- "Due to pci channel io frozen, doing premature "
- "completion of mbx command.\n");
- complete(&ha->mbx_intr_comp);
- }
+ ql_dbg(ql_dbg_aer, vha, 0x9001, "Pci channel io frozen\n");
+ qla82xx_clear_pending_mbx(vha);
}
qla2x00_free_irqs(vha);
pci_disable_device(pdev);
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 13b6357c1fa2..23f33a6d52d7 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "8.03.07.07-k"
+#define QLA2XXX_VERSION "8.03.07.12-k"
#define QLA_DRIVER_MAJOR_VER 8
#define QLA_DRIVER_MINOR_VER 3
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index ace637bf254e..fd5edc6e166d 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -147,7 +147,7 @@
#define ISCSI_ALIAS_SIZE 32 /* ISCSI Alias name size */
#define ISCSI_NAME_SIZE 0xE0 /* ISCSI Name size */
-#define QL4_SESS_RECOVERY_TMO 30 /* iSCSI session */
+#define QL4_SESS_RECOVERY_TMO 120 /* iSCSI session */
/* recovery timeout */
#define LSDW(x) ((u32)((u64)(x)))
@@ -173,6 +173,8 @@
#define ISNS_DEREG_TOV 5
#define HBA_ONLINE_TOV 30
#define DISABLE_ACB_TOV 30
+#define IP_CONFIG_TOV 30
+#define LOGIN_TOV 12
#define MAX_RESET_HA_RETRIES 2
@@ -240,6 +242,45 @@ struct ddb_entry {
uint16_t fw_ddb_index; /* DDB firmware index */
uint32_t fw_ddb_device_state; /* F/W Device State -- see ql4_fw.h */
+ uint16_t ddb_type;
+#define FLASH_DDB 0x01
+
+ struct dev_db_entry fw_ddb_entry;
+ int (*unblock_sess)(struct iscsi_cls_session *cls_session);
+ int (*ddb_change)(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
+ struct ddb_entry *ddb_entry, uint32_t state);
+
+ /* Driver Re-login */
+ unsigned long flags; /* DDB Flags */
+ uint16_t default_relogin_timeout; /* Max time to wait for
+ * relogin to complete */
+ atomic_t retry_relogin_timer; /* Min Time between relogins
+ * (4000 only) */
+ atomic_t relogin_timer; /* Max Time to wait for
+ * relogin to complete */
+ atomic_t relogin_retry_count; /* Num of times relogin has been
+ * retried */
+ uint32_t default_time2wait; /* Default Min time between
+ * relogins (+aens) */
+
+};
+
+struct qla_ddb_index {
+ struct list_head list;
+ uint16_t fw_ddb_idx;
+ struct dev_db_entry fw_ddb;
+};
+
+#define DDB_IPADDR_LEN 64
+
+struct ql4_tuple_ddb {
+ int port;
+ int tpgt;
+ char ip_addr[DDB_IPADDR_LEN];
+ char iscsi_name[ISCSI_NAME_SIZE];
+ uint16_t options;
+#define DDB_OPT_IPV6 0x0e0e
+#define DDB_OPT_IPV4 0x0f0f
};
/*
@@ -411,7 +452,7 @@ struct scsi_qla_host {
#define AF_FW_RECOVERY 19 /* 0x00080000 */
#define AF_EEH_BUSY 20 /* 0x00100000 */
#define AF_PCI_CHANNEL_IO_PERM_FAILURE 21 /* 0x00200000 */
-
+#define AF_BUILD_DDB_LIST 22 /* 0x00400000 */
unsigned long dpc_flags;
#define DPC_RESET_HA 1 /* 0x00000002 */
@@ -604,6 +645,7 @@ struct scsi_qla_host {
uint16_t bootload_minor;
uint16_t bootload_patch;
uint16_t bootload_build;
+ uint16_t def_timeout; /* Default login timeout */
uint32_t flash_state;
#define QLFLASH_WAITING 0
@@ -623,6 +665,11 @@ struct scsi_qla_host {
uint16_t iscsi_pci_func_cnt;
uint8_t model_name[16];
struct completion disable_acb_comp;
+ struct dma_pool *fw_ddb_dma_pool;
+#define DDB_DMA_BLOCK_SIZE 512
+ uint16_t pri_ddb_idx;
+ uint16_t sec_ddb_idx;
+ int is_reset;
};
struct ql4_task_data {
@@ -835,6 +882,10 @@ static inline int ql4xxx_reset_active(struct scsi_qla_host *ha)
/*---------------------------------------------------------------------------*/
/* Defines for qla4xxx_initialize_adapter() and qla4xxx_recover_adapter() */
+
+#define INIT_ADAPTER 0
+#define RESET_ADAPTER 1
+
#define PRESERVE_DDB_LIST 0
#define REBUILD_DDB_LIST 1
diff --git a/drivers/scsi/qla4xxx/ql4_fw.h b/drivers/scsi/qla4xxx/ql4_fw.h
index cbd5a20dbbd1..4ac07f882521 100644
--- a/drivers/scsi/qla4xxx/ql4_fw.h
+++ b/drivers/scsi/qla4xxx/ql4_fw.h
@@ -12,6 +12,7 @@
#define MAX_PRST_DEV_DB_ENTRIES 64
#define MIN_DISC_DEV_DB_ENTRY MAX_PRST_DEV_DB_ENTRIES
#define MAX_DEV_DB_ENTRIES 512
+#define MAX_DEV_DB_ENTRIES_40XX 256
/*************************************************************************
*
@@ -604,6 +605,13 @@ struct addr_ctrl_blk {
uint8_t res14[140]; /* 274-2FF */
};
+#define IP_ADDR_COUNT 4 /* Total 4 IP address supported in one interface
+ * One IPv4, one IPv6 link local and 2 IPv6
+ */
+
+#define IP_STATE_MASK 0x0F000000
+#define IP_STATE_SHIFT 24
+
struct init_fw_ctrl_blk {
struct addr_ctrl_blk pri;
/* struct addr_ctrl_blk sec;*/
diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h
index 160db9d5ea21..d0dd4b330206 100644
--- a/drivers/scsi/qla4xxx/ql4_glbl.h
+++ b/drivers/scsi/qla4xxx/ql4_glbl.h
@@ -13,7 +13,7 @@ struct iscsi_cls_conn;
int qla4xxx_hw_reset(struct scsi_qla_host *ha);
int ql4xxx_lock_drvr_wait(struct scsi_qla_host *a);
int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb *srb);
-int qla4xxx_initialize_adapter(struct scsi_qla_host *ha);
+int qla4xxx_initialize_adapter(struct scsi_qla_host *ha, int is_reset);
int qla4xxx_soft_reset(struct scsi_qla_host *ha);
irqreturn_t qla4xxx_intr_handler(int irq, void *dev_id);
@@ -153,10 +153,13 @@ int qla4xxx_req_ddb_entry(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
uint32_t *mbx_sts);
int qla4xxx_clear_ddb_entry(struct scsi_qla_host *ha, uint32_t fw_ddb_index);
int qla4xxx_send_passthru0(struct iscsi_task *task);
+void qla4xxx_free_ddb_index(struct scsi_qla_host *ha);
int qla4xxx_get_mgmt_data(struct scsi_qla_host *ha, uint16_t fw_ddb_index,
uint16_t stats_size, dma_addr_t stats_dma);
void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
struct ddb_entry *ddb_entry);
+void qla4xxx_update_session_conn_fwddb_param(struct scsi_qla_host *ha,
+ struct ddb_entry *ddb_entry);
int qla4xxx_bootdb_by_index(struct scsi_qla_host *ha,
struct dev_db_entry *fw_ddb_entry,
dma_addr_t fw_ddb_entry_dma, uint16_t ddb_index);
@@ -169,11 +172,22 @@ int qla4xxx_set_nvram(struct scsi_qla_host *ha, dma_addr_t nvram_dma,
int qla4xxx_restore_factory_defaults(struct scsi_qla_host *ha,
uint32_t region, uint32_t field0,
uint32_t field1);
+int qla4xxx_get_ddb_index(struct scsi_qla_host *ha, uint16_t *ddb_index);
+void qla4xxx_login_flash_ddb(struct iscsi_cls_session *cls_session);
+int qla4xxx_unblock_ddb(struct iscsi_cls_session *cls_session);
+int qla4xxx_unblock_flash_ddb(struct iscsi_cls_session *cls_session);
+int qla4xxx_flash_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
+ struct ddb_entry *ddb_entry, uint32_t state);
+int qla4xxx_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
+ struct ddb_entry *ddb_entry, uint32_t state);
+void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset);
/* BSG Functions */
int qla4xxx_bsg_request(struct bsg_job *bsg_job);
int qla4xxx_process_vendor_specific(struct bsg_job *bsg_job);
+void qla4xxx_arm_relogin_timer(struct ddb_entry *ddb_entry);
+
extern int ql4xextended_error_logging;
extern int ql4xdontresethba;
extern int ql4xenablemsix;
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index 3075fbaef553..1bdfa8120ac8 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -773,22 +773,24 @@ int qla4xxx_start_firmware(struct scsi_qla_host *ha)
* be freed so that when login happens from user space there are free DDB
* indices available.
**/
-static void qla4xxx_free_ddb_index(struct scsi_qla_host *ha)
+void qla4xxx_free_ddb_index(struct scsi_qla_host *ha)
{
int max_ddbs;
int ret;
uint32_t idx = 0, next_idx = 0;
uint32_t state = 0, conn_err = 0;
- max_ddbs = is_qla40XX(ha) ? MAX_PRST_DEV_DB_ENTRIES :
+ max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
MAX_DEV_DB_ENTRIES;
for (idx = 0; idx < max_ddbs; idx = next_idx) {
ret = qla4xxx_get_fwddb_entry(ha, idx, NULL, 0, NULL,
&next_idx, &state, &conn_err,
NULL, NULL);
- if (ret == QLA_ERROR)
+ if (ret == QLA_ERROR) {
+ next_idx++;
continue;
+ }
if (state == DDB_DS_NO_CONNECTION_ACTIVE ||
state == DDB_DS_SESSION_FAILED) {
DEBUG2(ql4_printk(KERN_INFO, ha,
@@ -804,7 +806,6 @@ static void qla4xxx_free_ddb_index(struct scsi_qla_host *ha)
}
}
-
/**
* qla4xxx_initialize_adapter - initiailizes hba
* @ha: Pointer to host adapter structure.
@@ -812,7 +813,7 @@ static void qla4xxx_free_ddb_index(struct scsi_qla_host *ha)
* This routine parforms all of the steps necessary to initialize the adapter.
*
**/
-int qla4xxx_initialize_adapter(struct scsi_qla_host *ha)
+int qla4xxx_initialize_adapter(struct scsi_qla_host *ha, int is_reset)
{
int status = QLA_ERROR;
@@ -840,7 +841,8 @@ int qla4xxx_initialize_adapter(struct scsi_qla_host *ha)
if (status == QLA_ERROR)
goto exit_init_hba;
- qla4xxx_free_ddb_index(ha);
+ if (is_reset == RESET_ADAPTER)
+ qla4xxx_build_ddb_list(ha, is_reset);
set_bit(AF_ONLINE, &ha->flags);
exit_init_hba:
@@ -855,38 +857,12 @@ exit_init_hba:
return status;
}
-/**
- * qla4xxx_process_ddb_changed - process ddb state change
- * @ha - Pointer to host adapter structure.
- * @fw_ddb_index - Firmware's device database index
- * @state - Device state
- *
- * This routine processes a Decive Database Changed AEN Event.
- **/
-int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
- uint32_t state, uint32_t conn_err)
+int qla4xxx_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
+ struct ddb_entry *ddb_entry, uint32_t state)
{
- struct ddb_entry * ddb_entry;
uint32_t old_fw_ddb_device_state;
int status = QLA_ERROR;
- /* check for out of range index */
- if (fw_ddb_index >= MAX_DDB_ENTRIES)
- goto exit_ddb_event;
-
- /* Get the corresponging ddb entry */
- ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, fw_ddb_index);
- /* Device does not currently exist in our database. */
- if (ddb_entry == NULL) {
- ql4_printk(KERN_ERR, ha, "%s: No ddb_entry at FW index [%d]\n",
- __func__, fw_ddb_index);
-
- if (state == DDB_DS_NO_CONNECTION_ACTIVE)
- clear_bit(fw_ddb_index, ha->ddb_idx_map);
-
- goto exit_ddb_event;
- }
-
old_fw_ddb_device_state = ddb_entry->fw_ddb_device_state;
DEBUG2(ql4_printk(KERN_INFO, ha,
"%s: DDB - old state = 0x%x, new state = 0x%x for "
@@ -900,9 +876,7 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
switch (state) {
case DDB_DS_SESSION_ACTIVE:
case DDB_DS_DISCOVERY:
- iscsi_conn_start(ddb_entry->conn);
- iscsi_conn_login_event(ddb_entry->conn,
- ISCSI_CONN_STATE_LOGGED_IN);
+ ddb_entry->unblock_sess(ddb_entry->sess);
qla4xxx_update_session_conn_param(ha, ddb_entry);
status = QLA_SUCCESS;
break;
@@ -936,9 +910,7 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
switch (state) {
case DDB_DS_SESSION_ACTIVE:
case DDB_DS_DISCOVERY:
- iscsi_conn_start(ddb_entry->conn);
- iscsi_conn_login_event(ddb_entry->conn,
- ISCSI_CONN_STATE_LOGGED_IN);
+ ddb_entry->unblock_sess(ddb_entry->sess);
qla4xxx_update_session_conn_param(ha, ddb_entry);
status = QLA_SUCCESS;
break;
@@ -954,7 +926,198 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
__func__));
break;
}
+ return status;
+}
+
+void qla4xxx_arm_relogin_timer(struct ddb_entry *ddb_entry)
+{
+ /*
+ * This triggers a relogin. After the relogin_timer
+ * expires, the relogin gets scheduled. We must wait a
+ * minimum amount of time since receiving an 0x8014 AEN
+ * with failed device_state or a logout response before
+ * we can issue another relogin.
+ *
+ * Firmware pads this timeout: (time2wait +1).
+ * Driver retry to login should be longer than F/W.
+ * Otherwise F/W will fail
+ * set_ddb() mbx cmd with 0x4005 since it still
+ * counting down its time2wait.
+ */
+ atomic_set(&ddb_entry->relogin_timer, 0);
+ atomic_set(&ddb_entry->retry_relogin_timer,
+ ddb_entry->default_time2wait + 4);
+
+}
+
+int qla4xxx_flash_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
+ struct ddb_entry *ddb_entry, uint32_t state)
+{
+ uint32_t old_fw_ddb_device_state;
+ int status = QLA_ERROR;
+
+ old_fw_ddb_device_state = ddb_entry->fw_ddb_device_state;
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: DDB - old state = 0x%x, new state = 0x%x for "
+ "index [%d]\n", __func__,
+ ddb_entry->fw_ddb_device_state, state, fw_ddb_index));
+
+ ddb_entry->fw_ddb_device_state = state;
+
+ switch (old_fw_ddb_device_state) {
+ case DDB_DS_LOGIN_IN_PROCESS:
+ case DDB_DS_NO_CONNECTION_ACTIVE:
+ switch (state) {
+ case DDB_DS_SESSION_ACTIVE:
+ ddb_entry->unblock_sess(ddb_entry->sess);
+ qla4xxx_update_session_conn_fwddb_param(ha, ddb_entry);
+ status = QLA_SUCCESS;
+ break;
+ case DDB_DS_SESSION_FAILED:
+ iscsi_block_session(ddb_entry->sess);
+ if (!test_bit(DF_RELOGIN, &ddb_entry->flags))
+ qla4xxx_arm_relogin_timer(ddb_entry);
+ status = QLA_SUCCESS;
+ break;
+ }
+ break;
+ case DDB_DS_SESSION_ACTIVE:
+ switch (state) {
+ case DDB_DS_SESSION_FAILED:
+ iscsi_block_session(ddb_entry->sess);
+ if (!test_bit(DF_RELOGIN, &ddb_entry->flags))
+ qla4xxx_arm_relogin_timer(ddb_entry);
+ status = QLA_SUCCESS;
+ break;
+ }
+ break;
+ case DDB_DS_SESSION_FAILED:
+ switch (state) {
+ case DDB_DS_SESSION_ACTIVE:
+ ddb_entry->unblock_sess(ddb_entry->sess);
+ qla4xxx_update_session_conn_fwddb_param(ha, ddb_entry);
+ status = QLA_SUCCESS;
+ break;
+ case DDB_DS_SESSION_FAILED:
+ if (!test_bit(DF_RELOGIN, &ddb_entry->flags))
+ qla4xxx_arm_relogin_timer(ddb_entry);
+ status = QLA_SUCCESS;
+ break;
+ }
+ break;
+ default:
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Unknown Event\n",
+ __func__));
+ break;
+ }
+ return status;
+}
+
+/**
+ * qla4xxx_process_ddb_changed - process ddb state change
+ * @ha - Pointer to host adapter structure.
+ * @fw_ddb_index - Firmware's device database index
+ * @state - Device state
+ *
+ * This routine processes a Decive Database Changed AEN Event.
+ **/
+int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha,
+ uint32_t fw_ddb_index,
+ uint32_t state, uint32_t conn_err)
+{
+ struct ddb_entry *ddb_entry;
+ int status = QLA_ERROR;
+
+ /* check for out of range index */
+ if (fw_ddb_index >= MAX_DDB_ENTRIES)
+ goto exit_ddb_event;
+
+ /* Get the corresponging ddb entry */
+ ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, fw_ddb_index);
+ /* Device does not currently exist in our database. */
+ if (ddb_entry == NULL) {
+ ql4_printk(KERN_ERR, ha, "%s: No ddb_entry at FW index [%d]\n",
+ __func__, fw_ddb_index);
+
+ if (state == DDB_DS_NO_CONNECTION_ACTIVE)
+ clear_bit(fw_ddb_index, ha->ddb_idx_map);
+
+ goto exit_ddb_event;
+ }
+
+ ddb_entry->ddb_change(ha, fw_ddb_index, ddb_entry, state);
exit_ddb_event:
return status;
}
+
+/**
+ * qla4xxx_login_flash_ddb - Login to target (DDB)
+ * @cls_session: Pointer to the session to login
+ *
+ * This routine logins to the target.
+ * Issues setddb and conn open mbx
+ **/
+void qla4xxx_login_flash_ddb(struct iscsi_cls_session *cls_session)
+{
+ struct iscsi_session *sess;
+ struct ddb_entry *ddb_entry;
+ struct scsi_qla_host *ha;
+ struct dev_db_entry *fw_ddb_entry = NULL;
+ dma_addr_t fw_ddb_dma;
+ uint32_t mbx_sts = 0;
+ int ret;
+
+ sess = cls_session->dd_data;
+ ddb_entry = sess->dd_data;
+ ha = ddb_entry->ha;
+
+ if (!test_bit(AF_LINK_UP, &ha->flags))
+ return;
+
+ if (ddb_entry->ddb_type != FLASH_DDB) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Skipping login to non FLASH DB"));
+ goto exit_login;
+ }
+
+ fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
+ &fw_ddb_dma);
+ if (fw_ddb_entry == NULL) {
+ DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n"));
+ goto exit_login;
+ }
+
+ if (ddb_entry->fw_ddb_index == INVALID_ENTRY) {
+ ret = qla4xxx_get_ddb_index(ha, &ddb_entry->fw_ddb_index);
+ if (ret == QLA_ERROR)
+ goto exit_login;
+
+ ha->fw_ddb_index_map[ddb_entry->fw_ddb_index] = ddb_entry;
+ ha->tot_ddbs++;
+ }
+
+ memcpy(fw_ddb_entry, &ddb_entry->fw_ddb_entry,
+ sizeof(struct dev_db_entry));
+ ddb_entry->sess->target_id = ddb_entry->fw_ddb_index;
+
+ ret = qla4xxx_set_ddb_entry(ha, ddb_entry->fw_ddb_index,
+ fw_ddb_dma, &mbx_sts);
+ if (ret == QLA_ERROR) {
+ DEBUG2(ql4_printk(KERN_ERR, ha, "Set DDB failed\n"));
+ goto exit_login;
+ }
+
+ ddb_entry->fw_ddb_device_state = DDB_DS_LOGIN_IN_PROCESS;
+ ret = qla4xxx_conn_open(ha, ddb_entry->fw_ddb_index);
+ if (ret == QLA_ERROR) {
+ ql4_printk(KERN_ERR, ha, "%s: Login failed: %s\n", __func__,
+ sess->targetname);
+ goto exit_login;
+ }
+
+exit_login:
+ if (fw_ddb_entry)
+ dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
+}
+
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index 4c2b84870392..c2593782fbbe 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -41,6 +41,16 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
return status;
}
+ if (is_qla40XX(ha)) {
+ if (test_bit(AF_HA_REMOVAL, &ha->flags)) {
+ DEBUG2(ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: "
+ "prematurely completing mbx cmd as "
+ "adapter removal detected\n",
+ ha->host_no, __func__));
+ return status;
+ }
+ }
+
if (is_qla8022(ha)) {
if (test_bit(AF_FW_RECOVERY, &ha->flags)) {
DEBUG2(ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: "
@@ -413,6 +423,7 @@ qla4xxx_update_local_ifcb(struct scsi_qla_host *ha,
memcpy(ha->name_string, init_fw_cb->iscsi_name,
min(sizeof(ha->name_string),
sizeof(init_fw_cb->iscsi_name)));
+ ha->def_timeout = le16_to_cpu(init_fw_cb->def_timeout);
/*memcpy(ha->alias, init_fw_cb->Alias,
min(sizeof(ha->alias), sizeof(init_fw_cb->Alias)));*/
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 30f31b127f33..4169c8baa112 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -8,6 +8,7 @@
#include <linux/slab.h>
#include <linux/blkdev.h>
#include <linux/iscsi_boot_sysfs.h>
+#include <linux/inet.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsicam.h>
@@ -31,6 +32,13 @@ static struct kmem_cache *srb_cachep;
/*
* Module parameter information and variables
*/
+int ql4xdisablesysfsboot = 1;
+module_param(ql4xdisablesysfsboot, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(ql4xdisablesysfsboot,
+ "Set to disable exporting boot targets to sysfs\n"
+ " 0 - Export boot targets\n"
+ " 1 - Do not export boot targets (Default)");
+
int ql4xdontresethba = 0;
module_param(ql4xdontresethba, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(ql4xdontresethba,
@@ -63,7 +71,7 @@ static int ql4xsess_recovery_tmo = QL4_SESS_RECOVERY_TMO;
module_param(ql4xsess_recovery_tmo, int, S_IRUGO);
MODULE_PARM_DESC(ql4xsess_recovery_tmo,
"Target Session Recovery Timeout.\n"
- " Default: 30 sec.");
+ " Default: 120 sec.");
static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha);
/*
@@ -415,7 +423,7 @@ static int qla4xxx_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
qla_ep = ep->dd_data;
ha = to_qla_host(qla_ep->host);
- if (adapter_up(ha))
+ if (adapter_up(ha) && !test_bit(AF_BUILD_DDB_LIST, &ha->flags))
ret = 1;
return ret;
@@ -975,6 +983,150 @@ static int qla4xxx_conn_get_param(struct iscsi_cls_conn *cls_conn,
}
+int qla4xxx_get_ddb_index(struct scsi_qla_host *ha, uint16_t *ddb_index)
+{
+ uint32_t mbx_sts = 0;
+ uint16_t tmp_ddb_index;
+ int ret;
+
+get_ddb_index:
+ tmp_ddb_index = find_first_zero_bit(ha->ddb_idx_map, MAX_DDB_ENTRIES);
+
+ if (tmp_ddb_index >= MAX_DDB_ENTRIES) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Free DDB index not available\n"));
+ ret = QLA_ERROR;
+ goto exit_get_ddb_index;
+ }
+
+ if (test_and_set_bit(tmp_ddb_index, ha->ddb_idx_map))
+ goto get_ddb_index;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Found a free DDB index at %d\n", tmp_ddb_index));
+ ret = qla4xxx_req_ddb_entry(ha, tmp_ddb_index, &mbx_sts);
+ if (ret == QLA_ERROR) {
+ if (mbx_sts == MBOX_STS_COMMAND_ERROR) {
+ ql4_printk(KERN_INFO, ha,
+ "DDB index = %d not available trying next\n",
+ tmp_ddb_index);
+ goto get_ddb_index;
+ }
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Free FW DDB not available\n"));
+ }
+
+ *ddb_index = tmp_ddb_index;
+
+exit_get_ddb_index:
+ return ret;
+}
+
+static int qla4xxx_match_ipaddress(struct scsi_qla_host *ha,
+ struct ddb_entry *ddb_entry,
+ char *existing_ipaddr,
+ char *user_ipaddr)
+{
+ uint8_t dst_ipaddr[IPv6_ADDR_LEN];
+ char formatted_ipaddr[DDB_IPADDR_LEN];
+ int status = QLA_SUCCESS, ret = 0;
+
+ if (ddb_entry->fw_ddb_entry.options & DDB_OPT_IPV6_DEVICE) {
+ ret = in6_pton(user_ipaddr, strlen(user_ipaddr), dst_ipaddr,
+ '\0', NULL);
+ if (ret == 0) {
+ status = QLA_ERROR;
+ goto out_match;
+ }
+ ret = sprintf(formatted_ipaddr, "%pI6", dst_ipaddr);
+ } else {
+ ret = in4_pton(user_ipaddr, strlen(user_ipaddr), dst_ipaddr,
+ '\0', NULL);
+ if (ret == 0) {
+ status = QLA_ERROR;
+ goto out_match;
+ }
+ ret = sprintf(formatted_ipaddr, "%pI4", dst_ipaddr);
+ }
+
+ if (strcmp(existing_ipaddr, formatted_ipaddr))
+ status = QLA_ERROR;
+
+out_match:
+ return status;
+}
+
+static int qla4xxx_match_fwdb_session(struct scsi_qla_host *ha,
+ struct iscsi_cls_conn *cls_conn)
+{
+ int idx = 0, max_ddbs, rval;
+ struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn);
+ struct iscsi_session *sess, *existing_sess;
+ struct iscsi_conn *conn, *existing_conn;
+ struct ddb_entry *ddb_entry;
+
+ sess = cls_sess->dd_data;
+ conn = cls_conn->dd_data;
+
+ if (sess->targetname == NULL ||
+ conn->persistent_address == NULL ||
+ conn->persistent_port == 0)
+ return QLA_ERROR;
+
+ max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
+ MAX_DEV_DB_ENTRIES;
+
+ for (idx = 0; idx < max_ddbs; idx++) {
+ ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
+ if (ddb_entry == NULL)
+ continue;
+
+ if (ddb_entry->ddb_type != FLASH_DDB)
+ continue;
+
+ existing_sess = ddb_entry->sess->dd_data;
+ existing_conn = ddb_entry->conn->dd_data;
+
+ if (existing_sess->targetname == NULL ||
+ existing_conn->persistent_address == NULL ||
+ existing_conn->persistent_port == 0)
+ continue;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "IQN = %s User IQN = %s\n",
+ existing_sess->targetname,
+ sess->targetname));
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "IP = %s User IP = %s\n",
+ existing_conn->persistent_address,
+ conn->persistent_address));
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Port = %d User Port = %d\n",
+ existing_conn->persistent_port,
+ conn->persistent_port));
+
+ if (strcmp(existing_sess->targetname, sess->targetname))
+ continue;
+ rval = qla4xxx_match_ipaddress(ha, ddb_entry,
+ existing_conn->persistent_address,
+ conn->persistent_address);
+ if (rval == QLA_ERROR)
+ continue;
+ if (existing_conn->persistent_port != conn->persistent_port)
+ continue;
+ break;
+ }
+
+ if (idx == max_ddbs)
+ return QLA_ERROR;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Match found in fwdb sessions\n"));
+ return QLA_SUCCESS;
+}
+
static struct iscsi_cls_session *
qla4xxx_session_create(struct iscsi_endpoint *ep,
uint16_t cmds_max, uint16_t qdepth,
@@ -984,8 +1136,7 @@ qla4xxx_session_create(struct iscsi_endpoint *ep,
struct scsi_qla_host *ha;
struct qla_endpoint *qla_ep;
struct ddb_entry *ddb_entry;
- uint32_t ddb_index;
- uint32_t mbx_sts = 0;
+ uint16_t ddb_index;
struct iscsi_session *sess;
struct sockaddr *dst_addr;
int ret;
@@ -1000,32 +1151,9 @@ qla4xxx_session_create(struct iscsi_endpoint *ep,
dst_addr = (struct sockaddr *)&qla_ep->dst_addr;
ha = to_qla_host(qla_ep->host);
-get_ddb_index:
- ddb_index = find_first_zero_bit(ha->ddb_idx_map, MAX_DDB_ENTRIES);
-
- if (ddb_index >= MAX_DDB_ENTRIES) {
- DEBUG2(ql4_printk(KERN_INFO, ha,
- "Free DDB index not available\n"));
- return NULL;
- }
-
- if (test_and_set_bit(ddb_index, ha->ddb_idx_map))
- goto get_ddb_index;
-
- DEBUG2(ql4_printk(KERN_INFO, ha,
- "Found a free DDB index at %d\n", ddb_index));
- ret = qla4xxx_req_ddb_entry(ha, ddb_index, &mbx_sts);
- if (ret == QLA_ERROR) {
- if (mbx_sts == MBOX_STS_COMMAND_ERROR) {
- ql4_printk(KERN_INFO, ha,
- "DDB index = %d not available trying next\n",
- ddb_index);
- goto get_ddb_index;
- }
- DEBUG2(ql4_printk(KERN_INFO, ha,
- "Free FW DDB not available\n"));
+ ret = qla4xxx_get_ddb_index(ha, &ddb_index);
+ if (ret == QLA_ERROR)
return NULL;
- }
cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, qla_ep->host,
cmds_max, sizeof(struct ddb_entry),
@@ -1040,6 +1168,8 @@ get_ddb_index:
ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE;
ddb_entry->ha = ha;
ddb_entry->sess = cls_sess;
+ ddb_entry->unblock_sess = qla4xxx_unblock_ddb;
+ ddb_entry->ddb_change = qla4xxx_ddb_change;
cls_sess->recovery_tmo = ql4xsess_recovery_tmo;
ha->fw_ddb_index_map[ddb_entry->fw_ddb_index] = ddb_entry;
ha->tot_ddbs++;
@@ -1077,6 +1207,9 @@ qla4xxx_conn_create(struct iscsi_cls_session *cls_sess, uint32_t conn_idx)
DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn),
conn_idx);
+ if (!cls_conn)
+ return NULL;
+
sess = cls_sess->dd_data;
ddb_entry = sess->dd_data;
ddb_entry->conn = cls_conn;
@@ -1109,7 +1242,7 @@ static int qla4xxx_conn_start(struct iscsi_cls_conn *cls_conn)
struct iscsi_session *sess;
struct ddb_entry *ddb_entry;
struct scsi_qla_host *ha;
- struct dev_db_entry *fw_ddb_entry;
+ struct dev_db_entry *fw_ddb_entry = NULL;
dma_addr_t fw_ddb_entry_dma;
uint32_t mbx_sts = 0;
int ret = 0;
@@ -1120,12 +1253,25 @@ static int qla4xxx_conn_start(struct iscsi_cls_conn *cls_conn)
ddb_entry = sess->dd_data;
ha = ddb_entry->ha;
+ /* Check if we have matching FW DDB, if yes then do not
+ * login to this target. This could cause target to logout previous
+ * connection
+ */
+ ret = qla4xxx_match_fwdb_session(ha, cls_conn);
+ if (ret == QLA_SUCCESS) {
+ ql4_printk(KERN_INFO, ha,
+ "Session already exist in FW.\n");
+ ret = -EEXIST;
+ goto exit_conn_start;
+ }
+
fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
&fw_ddb_entry_dma, GFP_KERNEL);
if (!fw_ddb_entry) {
ql4_printk(KERN_ERR, ha,
"%s: Unable to allocate dma buffer\n", __func__);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto exit_conn_start;
}
ret = qla4xxx_set_param_ddbentry(ha, ddb_entry, cls_conn, &mbx_sts);
@@ -1138,9 +1284,7 @@ static int qla4xxx_conn_start(struct iscsi_cls_conn *cls_conn)
if (mbx_sts)
if (ddb_entry->fw_ddb_device_state ==
DDB_DS_SESSION_ACTIVE) {
- iscsi_conn_start(ddb_entry->conn);
- iscsi_conn_login_event(ddb_entry->conn,
- ISCSI_CONN_STATE_LOGGED_IN);
+ ddb_entry->unblock_sess(ddb_entry->sess);
goto exit_set_param;
}
@@ -1167,8 +1311,9 @@ exit_set_param:
ret = 0;
exit_conn_start:
- dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
- fw_ddb_entry, fw_ddb_entry_dma);
+ if (fw_ddb_entry)
+ dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+ fw_ddb_entry, fw_ddb_entry_dma);
return ret;
}
@@ -1344,6 +1489,101 @@ static int qla4xxx_task_xmit(struct iscsi_task *task)
return -ENOSYS;
}
+static void qla4xxx_copy_fwddb_param(struct scsi_qla_host *ha,
+ struct dev_db_entry *fw_ddb_entry,
+ struct iscsi_cls_session *cls_sess,
+ struct iscsi_cls_conn *cls_conn)
+{
+ int buflen = 0;
+ struct iscsi_session *sess;
+ struct iscsi_conn *conn;
+ char ip_addr[DDB_IPADDR_LEN];
+ uint16_t options = 0;
+
+ sess = cls_sess->dd_data;
+ conn = cls_conn->dd_data;
+
+ conn->max_recv_dlength = BYTE_UNITS *
+ le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len);
+
+ conn->max_xmit_dlength = BYTE_UNITS *
+ le16_to_cpu(fw_ddb_entry->iscsi_max_snd_data_seg_len);
+
+ sess->initial_r2t_en =
+ (BIT_10 & le16_to_cpu(fw_ddb_entry->iscsi_options));
+
+ sess->max_r2t = le16_to_cpu(fw_ddb_entry->iscsi_max_outsnd_r2t);
+
+ sess->imm_data_en = (BIT_11 & le16_to_cpu(fw_ddb_entry->iscsi_options));
+
+ sess->first_burst = BYTE_UNITS *
+ le16_to_cpu(fw_ddb_entry->iscsi_first_burst_len);
+
+ sess->max_burst = BYTE_UNITS *
+ le16_to_cpu(fw_ddb_entry->iscsi_max_burst_len);
+
+ sess->time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
+
+ sess->time2retain = le16_to_cpu(fw_ddb_entry->iscsi_def_time2retain);
+
+ conn->persistent_port = le16_to_cpu(fw_ddb_entry->port);
+
+ sess->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
+
+ options = le16_to_cpu(fw_ddb_entry->options);
+ if (options & DDB_OPT_IPV6_DEVICE)
+ sprintf(ip_addr, "%pI6", fw_ddb_entry->ip_addr);
+ else
+ sprintf(ip_addr, "%pI4", fw_ddb_entry->ip_addr);
+
+ iscsi_set_param(cls_conn, ISCSI_PARAM_TARGET_NAME,
+ (char *)fw_ddb_entry->iscsi_name, buflen);
+ iscsi_set_param(cls_conn, ISCSI_PARAM_INITIATOR_NAME,
+ (char *)ha->name_string, buflen);
+ iscsi_set_param(cls_conn, ISCSI_PARAM_PERSISTENT_ADDRESS,
+ (char *)ip_addr, buflen);
+}
+
+void qla4xxx_update_session_conn_fwddb_param(struct scsi_qla_host *ha,
+ struct ddb_entry *ddb_entry)
+{
+ struct iscsi_cls_session *cls_sess;
+ struct iscsi_cls_conn *cls_conn;
+ uint32_t ddb_state;
+ dma_addr_t fw_ddb_entry_dma;
+ struct dev_db_entry *fw_ddb_entry;
+
+ fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+ &fw_ddb_entry_dma, GFP_KERNEL);
+ if (!fw_ddb_entry) {
+ ql4_printk(KERN_ERR, ha,
+ "%s: Unable to allocate dma buffer\n", __func__);
+ goto exit_session_conn_fwddb_param;
+ }
+
+ if (qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_entry,
+ fw_ddb_entry_dma, NULL, NULL, &ddb_state,
+ NULL, NULL, NULL) == QLA_ERROR) {
+ DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed "
+ "get_ddb_entry for fw_ddb_index %d\n",
+ ha->host_no, __func__,
+ ddb_entry->fw_ddb_index));
+ goto exit_session_conn_fwddb_param;
+ }
+
+ cls_sess = ddb_entry->sess;
+
+ cls_conn = ddb_entry->conn;
+
+ /* Update params */
+ qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess, cls_conn);
+
+exit_session_conn_fwddb_param:
+ if (fw_ddb_entry)
+ dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+ fw_ddb_entry, fw_ddb_entry_dma);
+}
+
void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
struct ddb_entry *ddb_entry)
{
@@ -1360,7 +1600,7 @@ void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
if (!fw_ddb_entry) {
ql4_printk(KERN_ERR, ha,
"%s: Unable to allocate dma buffer\n", __func__);
- return;
+ goto exit_session_conn_param;
}
if (qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_entry,
@@ -1370,7 +1610,7 @@ void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
"get_ddb_entry for fw_ddb_index %d\n",
ha->host_no, __func__,
ddb_entry->fw_ddb_index));
- return;
+ goto exit_session_conn_param;
}
cls_sess = ddb_entry->sess;
@@ -1379,6 +1619,12 @@ void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
cls_conn = ddb_entry->conn;
conn = cls_conn->dd_data;
+ /* Update timers after login */
+ ddb_entry->default_relogin_timeout =
+ le16_to_cpu(fw_ddb_entry->def_timeout);
+ ddb_entry->default_time2wait =
+ le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
+
/* Update params */
conn->max_recv_dlength = BYTE_UNITS *
le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len);
@@ -1407,6 +1653,11 @@ void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
memcpy(sess->initiatorname, ha->name_string,
min(sizeof(ha->name_string), sizeof(sess->initiatorname)));
+
+exit_session_conn_param:
+ if (fw_ddb_entry)
+ dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+ fw_ddb_entry, fw_ddb_entry_dma);
}
/*
@@ -1607,6 +1858,9 @@ static void qla4xxx_mem_free(struct scsi_qla_host *ha)
vfree(ha->chap_list);
ha->chap_list = NULL;
+ if (ha->fw_ddb_dma_pool)
+ dma_pool_destroy(ha->fw_ddb_dma_pool);
+
/* release io space registers */
if (is_qla8022(ha)) {
if (ha->nx_pcibase)
@@ -1689,6 +1943,16 @@ static int qla4xxx_mem_alloc(struct scsi_qla_host *ha)
goto mem_alloc_error_exit;
}
+ ha->fw_ddb_dma_pool = dma_pool_create("ql4_fw_ddb", &ha->pdev->dev,
+ DDB_DMA_BLOCK_SIZE, 8, 0);
+
+ if (ha->fw_ddb_dma_pool == NULL) {
+ ql4_printk(KERN_WARNING, ha,
+ "%s: fw_ddb_dma_pool allocation failed..\n",
+ __func__);
+ goto mem_alloc_error_exit;
+ }
+
return QLA_SUCCESS;
mem_alloc_error_exit:
@@ -1800,6 +2064,60 @@ void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
}
}
+void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
+{
+ struct iscsi_session *sess;
+ struct ddb_entry *ddb_entry;
+ struct scsi_qla_host *ha;
+
+ sess = cls_sess->dd_data;
+ ddb_entry = sess->dd_data;
+ ha = ddb_entry->ha;
+
+ if (!(ddb_entry->ddb_type == FLASH_DDB))
+ return;
+
+ if (adapter_up(ha) && !test_bit(DF_RELOGIN, &ddb_entry->flags) &&
+ !iscsi_is_session_online(cls_sess)) {
+ if (atomic_read(&ddb_entry->retry_relogin_timer) !=
+ INVALID_ENTRY) {
+ if (atomic_read(&ddb_entry->retry_relogin_timer) ==
+ 0) {
+ atomic_set(&ddb_entry->retry_relogin_timer,
+ INVALID_ENTRY);
+ set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
+ set_bit(DF_RELOGIN, &ddb_entry->flags);
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: index [%d] login device\n",
+ __func__, ddb_entry->fw_ddb_index));
+ } else
+ atomic_dec(&ddb_entry->retry_relogin_timer);
+ }
+ }
+
+ /* Wait for relogin to timeout */
+ if (atomic_read(&ddb_entry->relogin_timer) &&
+ (atomic_dec_and_test(&ddb_entry->relogin_timer) != 0)) {
+ /*
+ * If the relogin times out and the device is
+ * still NOT ONLINE then try and relogin again.
+ */
+ if (!iscsi_is_session_online(cls_sess)) {
+ /* Reset retry relogin timer */
+ atomic_inc(&ddb_entry->relogin_retry_count);
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: index[%d] relogin timed out-retrying"
+ " relogin (%d), retry (%d)\n", __func__,
+ ddb_entry->fw_ddb_index,
+ atomic_read(&ddb_entry->relogin_retry_count),
+ ddb_entry->default_time2wait + 4));
+ set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
+ atomic_set(&ddb_entry->retry_relogin_timer,
+ ddb_entry->default_time2wait + 4);
+ }
+ }
+}
+
/**
* qla4xxx_timer - checks every second for work to do.
* @ha: Pointer to host adapter structure.
@@ -1809,6 +2127,8 @@ static void qla4xxx_timer(struct scsi_qla_host *ha)
int start_dpc = 0;
uint16_t w;
+ iscsi_host_for_each_session(ha->host, qla4xxx_check_relogin_flash_ddb);
+
/* If we are in the middle of AER/EEH processing
* skip any processing and reschedule the timer
*/
@@ -2078,7 +2398,12 @@ static void qla4xxx_fail_session(struct iscsi_cls_session *cls_session)
sess = cls_session->dd_data;
ddb_entry = sess->dd_data;
ddb_entry->fw_ddb_device_state = DDB_DS_SESSION_FAILED;
- iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED);
+
+ if (ddb_entry->ddb_type == FLASH_DDB)
+ iscsi_block_session(ddb_entry->sess);
+ else
+ iscsi_session_failure(cls_session->dd_data,
+ ISCSI_ERR_CONN_FAILED);
}
/**
@@ -2163,7 +2488,7 @@ recover_ha_init_adapter:
/* NOTE: AF_ONLINE flag set upon successful completion of
* qla4xxx_initialize_adapter */
- status = qla4xxx_initialize_adapter(ha);
+ status = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
}
/* Retry failed adapter initialization, if necessary
@@ -2245,17 +2570,108 @@ static void qla4xxx_relogin_devices(struct iscsi_cls_session *cls_session)
iscsi_unblock_session(ddb_entry->sess);
} else {
/* Trigger relogin */
- iscsi_session_failure(cls_session->dd_data,
- ISCSI_ERR_CONN_FAILED);
+ if (ddb_entry->ddb_type == FLASH_DDB) {
+ if (!test_bit(DF_RELOGIN, &ddb_entry->flags))
+ qla4xxx_arm_relogin_timer(ddb_entry);
+ } else
+ iscsi_session_failure(cls_session->dd_data,
+ ISCSI_ERR_CONN_FAILED);
}
}
}
+int qla4xxx_unblock_flash_ddb(struct iscsi_cls_session *cls_session)
+{
+ struct iscsi_session *sess;
+ struct ddb_entry *ddb_entry;
+ struct scsi_qla_host *ha;
+
+ sess = cls_session->dd_data;
+ ddb_entry = sess->dd_data;
+ ha = ddb_entry->ha;
+ ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
+ " unblock session\n", ha->host_no, __func__,
+ ddb_entry->fw_ddb_index);
+
+ iscsi_unblock_session(ddb_entry->sess);
+
+ /* Start scan target */
+ if (test_bit(AF_ONLINE, &ha->flags)) {
+ ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
+ " start scan\n", ha->host_no, __func__,
+ ddb_entry->fw_ddb_index);
+ scsi_queue_work(ha->host, &ddb_entry->sess->scan_work);
+ }
+ return QLA_SUCCESS;
+}
+
+int qla4xxx_unblock_ddb(struct iscsi_cls_session *cls_session)
+{
+ struct iscsi_session *sess;
+ struct ddb_entry *ddb_entry;
+ struct scsi_qla_host *ha;
+
+ sess = cls_session->dd_data;
+ ddb_entry = sess->dd_data;
+ ha = ddb_entry->ha;
+ ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
+ " unblock user space session\n", ha->host_no, __func__,
+ ddb_entry->fw_ddb_index);
+ iscsi_conn_start(ddb_entry->conn);
+ iscsi_conn_login_event(ddb_entry->conn,
+ ISCSI_CONN_STATE_LOGGED_IN);
+
+ return QLA_SUCCESS;
+}
+
static void qla4xxx_relogin_all_devices(struct scsi_qla_host *ha)
{
iscsi_host_for_each_session(ha->host, qla4xxx_relogin_devices);
}
+static void qla4xxx_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
+{
+ uint16_t relogin_timer;
+ struct iscsi_session *sess;
+ struct ddb_entry *ddb_entry;
+ struct scsi_qla_host *ha;
+
+ sess = cls_sess->dd_data;
+ ddb_entry = sess->dd_data;
+ ha = ddb_entry->ha;
+
+ relogin_timer = max(ddb_entry->default_relogin_timeout,
+ (uint16_t)RELOGIN_TOV);
+ atomic_set(&ddb_entry->relogin_timer, relogin_timer);
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "scsi%ld: Relogin index [%d]. TOV=%d\n", ha->host_no,
+ ddb_entry->fw_ddb_index, relogin_timer));
+
+ qla4xxx_login_flash_ddb(cls_sess);
+}
+
+static void qla4xxx_dpc_relogin(struct iscsi_cls_session *cls_sess)
+{
+ struct iscsi_session *sess;
+ struct ddb_entry *ddb_entry;
+ struct scsi_qla_host *ha;
+
+ sess = cls_sess->dd_data;
+ ddb_entry = sess->dd_data;
+ ha = ddb_entry->ha;
+
+ if (!(ddb_entry->ddb_type == FLASH_DDB))
+ return;
+
+ if (test_and_clear_bit(DF_RELOGIN, &ddb_entry->flags) &&
+ !iscsi_is_session_online(cls_sess)) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "relogin issued\n"));
+ qla4xxx_relogin_flash_ddb(cls_sess);
+ }
+}
+
void qla4xxx_wake_dpc(struct scsi_qla_host *ha)
{
if (ha->dpc_thread)
@@ -2356,6 +2772,12 @@ dpc_post_reset_ha:
if (test_and_clear_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags))
qla4xxx_get_dhcp_ip_address(ha);
+ /* ---- relogin device? --- */
+ if (adapter_up(ha) &&
+ test_and_clear_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags)) {
+ iscsi_host_for_each_session(ha->host, qla4xxx_dpc_relogin);
+ }
+
/* ---- link change? --- */
if (test_and_clear_bit(DPC_LINK_CHANGED, &ha->dpc_flags)) {
if (!test_bit(AF_LINK_UP, &ha->flags)) {
@@ -2368,8 +2790,12 @@ dpc_post_reset_ha:
* fatal error recovery. Therefore, the driver must
* manually relogin to devices when recovering from
* connection failures, logouts, expired KATO, etc. */
-
- qla4xxx_relogin_all_devices(ha);
+ if (test_and_clear_bit(AF_BUILD_DDB_LIST, &ha->flags)) {
+ qla4xxx_build_ddb_list(ha, ha->is_reset);
+ iscsi_host_for_each_session(ha->host,
+ qla4xxx_login_flash_ddb);
+ } else
+ qla4xxx_relogin_all_devices(ha);
}
}
}
@@ -2867,6 +3293,9 @@ static int get_fw_boot_info(struct scsi_qla_host *ha, uint16_t ddb_index[])
" target ID %d\n", __func__, ddb_index[0],
ddb_index[1]));
+ ha->pri_ddb_idx = ddb_index[0];
+ ha->sec_ddb_idx = ddb_index[1];
+
exit_boot_info_free:
dma_free_coherent(&ha->pdev->dev, size, buf, buf_dma);
exit_boot_info:
@@ -3034,6 +3463,9 @@ static int qla4xxx_get_boot_info(struct scsi_qla_host *ha)
return ret;
}
+ if (ql4xdisablesysfsboot)
+ return QLA_SUCCESS;
+
if (ddb_index[0] == 0xffff)
goto sec_target;
@@ -3066,7 +3498,15 @@ static int qla4xxx_setup_boot_info(struct scsi_qla_host *ha)
struct iscsi_boot_kobj *boot_kobj;
if (qla4xxx_get_boot_info(ha) != QLA_SUCCESS)
- return 0;
+ return QLA_ERROR;
+
+ if (ql4xdisablesysfsboot) {
+ ql4_printk(KERN_INFO, ha,
+ "%s: syfsboot disabled - driver will trigger login"
+ "and publish session for discovery .\n", __func__);
+ return QLA_SUCCESS;
+ }
+
ha->boot_kset = iscsi_boot_create_host_kset(ha->host->host_no);
if (!ha->boot_kset)
@@ -3108,7 +3548,7 @@ static int qla4xxx_setup_boot_info(struct scsi_qla_host *ha)
if (!boot_kobj)
goto put_host;
- return 0;
+ return QLA_SUCCESS;
put_host:
scsi_host_put(ha->host);
@@ -3174,9 +3614,507 @@ static void qla4xxx_create_chap_list(struct scsi_qla_host *ha)
exit_chap_list:
dma_free_coherent(&ha->pdev->dev, chap_size,
chap_flash_data, chap_dma);
- return;
}
+static void qla4xxx_get_param_ddb(struct ddb_entry *ddb_entry,
+ struct ql4_tuple_ddb *tddb)
+{
+ struct scsi_qla_host *ha;
+ struct iscsi_cls_session *cls_sess;
+ struct iscsi_cls_conn *cls_conn;
+ struct iscsi_session *sess;
+ struct iscsi_conn *conn;
+
+ DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
+ ha = ddb_entry->ha;
+ cls_sess = ddb_entry->sess;
+ sess = cls_sess->dd_data;
+ cls_conn = ddb_entry->conn;
+ conn = cls_conn->dd_data;
+
+ tddb->tpgt = sess->tpgt;
+ tddb->port = conn->persistent_port;
+ strncpy(tddb->iscsi_name, sess->targetname, ISCSI_NAME_SIZE);
+ strncpy(tddb->ip_addr, conn->persistent_address, DDB_IPADDR_LEN);
+}
+
+static void qla4xxx_convert_param_ddb(struct dev_db_entry *fw_ddb_entry,
+ struct ql4_tuple_ddb *tddb)
+{
+ uint16_t options = 0;
+
+ tddb->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
+ memcpy(&tddb->iscsi_name[0], &fw_ddb_entry->iscsi_name[0],
+ min(sizeof(tddb->iscsi_name), sizeof(fw_ddb_entry->iscsi_name)));
+
+ options = le16_to_cpu(fw_ddb_entry->options);
+ if (options & DDB_OPT_IPV6_DEVICE)
+ sprintf(tddb->ip_addr, "%pI6", fw_ddb_entry->ip_addr);
+ else
+ sprintf(tddb->ip_addr, "%pI4", fw_ddb_entry->ip_addr);
+
+ tddb->port = le16_to_cpu(fw_ddb_entry->port);
+}
+
+static int qla4xxx_compare_tuple_ddb(struct scsi_qla_host *ha,
+ struct ql4_tuple_ddb *old_tddb,
+ struct ql4_tuple_ddb *new_tddb)
+{
+ if (strcmp(old_tddb->iscsi_name, new_tddb->iscsi_name))
+ return QLA_ERROR;
+
+ if (strcmp(old_tddb->ip_addr, new_tddb->ip_addr))
+ return QLA_ERROR;
+
+ if (old_tddb->port != new_tddb->port)
+ return QLA_ERROR;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Match Found, fw[%d,%d,%s,%s], [%d,%d,%s,%s]",
+ old_tddb->port, old_tddb->tpgt, old_tddb->ip_addr,
+ old_tddb->iscsi_name, new_tddb->port, new_tddb->tpgt,
+ new_tddb->ip_addr, new_tddb->iscsi_name));
+
+ return QLA_SUCCESS;
+}
+
+static int qla4xxx_is_session_exists(struct scsi_qla_host *ha,
+ struct dev_db_entry *fw_ddb_entry)
+{
+ struct ddb_entry *ddb_entry;
+ struct ql4_tuple_ddb *fw_tddb = NULL;
+ struct ql4_tuple_ddb *tmp_tddb = NULL;
+ int idx;
+ int ret = QLA_ERROR;
+
+ fw_tddb = vzalloc(sizeof(*fw_tddb));
+ if (!fw_tddb) {
+ DEBUG2(ql4_printk(KERN_WARNING, ha,
+ "Memory Allocation failed.\n"));
+ ret = QLA_SUCCESS;
+ goto exit_check;
+ }
+
+ tmp_tddb = vzalloc(sizeof(*tmp_tddb));
+ if (!tmp_tddb) {
+ DEBUG2(ql4_printk(KERN_WARNING, ha,
+ "Memory Allocation failed.\n"));
+ ret = QLA_SUCCESS;
+ goto exit_check;
+ }
+
+ qla4xxx_convert_param_ddb(fw_ddb_entry, fw_tddb);
+
+ for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) {
+ ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
+ if (ddb_entry == NULL)
+ continue;
+
+ qla4xxx_get_param_ddb(ddb_entry, tmp_tddb);
+ if (!qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb)) {
+ ret = QLA_SUCCESS; /* found */
+ goto exit_check;
+ }
+ }
+
+exit_check:
+ if (fw_tddb)
+ vfree(fw_tddb);
+ if (tmp_tddb)
+ vfree(tmp_tddb);
+ return ret;
+}
+
+static int qla4xxx_is_flash_ddb_exists(struct scsi_qla_host *ha,
+ struct list_head *list_nt,
+ struct dev_db_entry *fw_ddb_entry)
+{
+ struct qla_ddb_index *nt_ddb_idx, *nt_ddb_idx_tmp;
+ struct ql4_tuple_ddb *fw_tddb = NULL;
+ struct ql4_tuple_ddb *tmp_tddb = NULL;
+ int ret = QLA_ERROR;
+
+ fw_tddb = vzalloc(sizeof(*fw_tddb));
+ if (!fw_tddb) {
+ DEBUG2(ql4_printk(KERN_WARNING, ha,
+ "Memory Allocation failed.\n"));
+ ret = QLA_SUCCESS;
+ goto exit_check;
+ }
+
+ tmp_tddb = vzalloc(sizeof(*tmp_tddb));
+ if (!tmp_tddb) {
+ DEBUG2(ql4_printk(KERN_WARNING, ha,
+ "Memory Allocation failed.\n"));
+ ret = QLA_SUCCESS;
+ goto exit_check;
+ }
+
+ qla4xxx_convert_param_ddb(fw_ddb_entry, fw_tddb);
+
+ list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) {
+ qla4xxx_convert_param_ddb(&nt_ddb_idx->fw_ddb, tmp_tddb);
+ if (!qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb)) {
+ ret = QLA_SUCCESS; /* found */
+ goto exit_check;
+ }
+ }
+
+exit_check:
+ if (fw_tddb)
+ vfree(fw_tddb);
+ if (tmp_tddb)
+ vfree(tmp_tddb);
+ return ret;
+}
+
+static void qla4xxx_free_nt_list(struct list_head *list_nt)
+{
+ struct qla_ddb_index *nt_ddb_idx, *nt_ddb_idx_tmp;
+
+ /* Free up the normaltargets list */
+ list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) {
+ list_del_init(&nt_ddb_idx->list);
+ vfree(nt_ddb_idx);
+ }
+
+}
+
+static struct iscsi_endpoint *qla4xxx_get_ep_fwdb(struct scsi_qla_host *ha,
+ struct dev_db_entry *fw_ddb_entry)
+{
+ struct iscsi_endpoint *ep;
+ struct sockaddr_in *addr;
+ struct sockaddr_in6 *addr6;
+ struct sockaddr *dst_addr;
+ char *ip;
+
+ /* TODO: need to destroy on unload iscsi_endpoint*/
+ dst_addr = vmalloc(sizeof(*dst_addr));
+ if (!dst_addr)
+ return NULL;
+
+ if (fw_ddb_entry->options & DDB_OPT_IPV6_DEVICE) {
+ dst_addr->sa_family = AF_INET6;
+ addr6 = (struct sockaddr_in6 *)dst_addr;
+ ip = (char *)&addr6->sin6_addr;
+ memcpy(ip, fw_ddb_entry->ip_addr, IPv6_ADDR_LEN);
+ addr6->sin6_port = htons(le16_to_cpu(fw_ddb_entry->port));
+
+ } else {
+ dst_addr->sa_family = AF_INET;
+ addr = (struct sockaddr_in *)dst_addr;
+ ip = (char *)&addr->sin_addr;
+ memcpy(ip, fw_ddb_entry->ip_addr, IP_ADDR_LEN);
+ addr->sin_port = htons(le16_to_cpu(fw_ddb_entry->port));
+ }
+
+ ep = qla4xxx_ep_connect(ha->host, dst_addr, 0);
+ vfree(dst_addr);
+ return ep;
+}
+
+static int qla4xxx_verify_boot_idx(struct scsi_qla_host *ha, uint16_t idx)
+{
+ if (ql4xdisablesysfsboot)
+ return QLA_SUCCESS;
+ if (idx == ha->pri_ddb_idx || idx == ha->sec_ddb_idx)
+ return QLA_ERROR;
+ return QLA_SUCCESS;
+}
+
+static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
+ struct ddb_entry *ddb_entry)
+{
+ ddb_entry->ddb_type = FLASH_DDB;
+ ddb_entry->fw_ddb_index = INVALID_ENTRY;
+ ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE;
+ ddb_entry->ha = ha;
+ ddb_entry->unblock_sess = qla4xxx_unblock_flash_ddb;
+ ddb_entry->ddb_change = qla4xxx_flash_ddb_change;
+
+ atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
+ atomic_set(&ddb_entry->relogin_timer, 0);
+ atomic_set(&ddb_entry->relogin_retry_count, 0);
+
+ ddb_entry->default_relogin_timeout =
+ le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
+ ddb_entry->default_time2wait =
+ le16_to_cpu(ddb_entry->fw_ddb_entry.iscsi_def_time2wait);
+}
+
+static void qla4xxx_wait_for_ip_configuration(struct scsi_qla_host *ha)
+{
+ uint32_t idx = 0;
+ uint32_t ip_idx[IP_ADDR_COUNT] = {0, 1, 2, 3}; /* 4 IP interfaces */
+ uint32_t sts[MBOX_REG_COUNT];
+ uint32_t ip_state;
+ unsigned long wtime;
+ int ret;
+
+ wtime = jiffies + (HZ * IP_CONFIG_TOV);
+ do {
+ for (idx = 0; idx < IP_ADDR_COUNT; idx++) {
+ if (ip_idx[idx] == -1)
+ continue;
+
+ ret = qla4xxx_get_ip_state(ha, 0, ip_idx[idx], sts);
+
+ if (ret == QLA_ERROR) {
+ ip_idx[idx] = -1;
+ continue;
+ }
+
+ ip_state = (sts[1] & IP_STATE_MASK) >> IP_STATE_SHIFT;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Waiting for IP state for idx = %d, state = 0x%x\n",
+ ip_idx[idx], ip_state));
+ if (ip_state == IP_ADDRSTATE_UNCONFIGURED ||
+ ip_state == IP_ADDRSTATE_INVALID ||
+ ip_state == IP_ADDRSTATE_PREFERRED ||
+ ip_state == IP_ADDRSTATE_DEPRICATED ||
+ ip_state == IP_ADDRSTATE_DISABLING)
+ ip_idx[idx] = -1;
+
+ }
+
+ /* Break if all IP states checked */
+ if ((ip_idx[0] == -1) &&
+ (ip_idx[1] == -1) &&
+ (ip_idx[2] == -1) &&
+ (ip_idx[3] == -1))
+ break;
+ schedule_timeout_uninterruptible(HZ);
+ } while (time_after(wtime, jiffies));
+}
+
+void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset)
+{
+ int max_ddbs;
+ int ret;
+ uint32_t idx = 0, next_idx = 0;
+ uint32_t state = 0, conn_err = 0;
+ uint16_t conn_id;
+ struct dev_db_entry *fw_ddb_entry;
+ struct ddb_entry *ddb_entry = NULL;
+ dma_addr_t fw_ddb_dma;
+ struct iscsi_cls_session *cls_sess;
+ struct iscsi_session *sess;
+ struct iscsi_cls_conn *cls_conn;
+ struct iscsi_endpoint *ep;
+ uint16_t cmds_max = 32, tmo = 0;
+ uint32_t initial_cmdsn = 0;
+ struct list_head list_st, list_nt; /* List of sendtargets */
+ struct qla_ddb_index *st_ddb_idx, *st_ddb_idx_tmp;
+ int fw_idx_size;
+ unsigned long wtime;
+ struct qla_ddb_index *nt_ddb_idx;
+
+ if (!test_bit(AF_LINK_UP, &ha->flags)) {
+ set_bit(AF_BUILD_DDB_LIST, &ha->flags);
+ ha->is_reset = is_reset;
+ return;
+ }
+ max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
+ MAX_DEV_DB_ENTRIES;
+
+ fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
+ &fw_ddb_dma);
+ if (fw_ddb_entry == NULL) {
+ DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n"));
+ goto exit_ddb_list;
+ }
+
+ INIT_LIST_HEAD(&list_st);
+ INIT_LIST_HEAD(&list_nt);
+ fw_idx_size = sizeof(struct qla_ddb_index);
+
+ for (idx = 0; idx < max_ddbs; idx = next_idx) {
+ ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry,
+ fw_ddb_dma, NULL,
+ &next_idx, &state, &conn_err,
+ NULL, &conn_id);
+ if (ret == QLA_ERROR)
+ break;
+
+ if (qla4xxx_verify_boot_idx(ha, idx) != QLA_SUCCESS)
+ goto continue_next_st;
+
+ /* Check if ST, add to the list_st */
+ if (strlen((char *) fw_ddb_entry->iscsi_name) != 0)
+ goto continue_next_st;
+
+ st_ddb_idx = vzalloc(fw_idx_size);
+ if (!st_ddb_idx)
+ break;
+
+ st_ddb_idx->fw_ddb_idx = idx;
+
+ list_add_tail(&st_ddb_idx->list, &list_st);
+continue_next_st:
+ if (next_idx == 0)
+ break;
+ }
+
+ /* Before issuing conn open mbox, ensure all IPs states are configured
+ * Note, conn open fails if IPs are not configured
+ */
+ qla4xxx_wait_for_ip_configuration(ha);
+
+ /* Go thru the STs and fire the sendtargets by issuing conn open mbx */
+ list_for_each_entry_safe(st_ddb_idx, st_ddb_idx_tmp, &list_st, list) {
+ qla4xxx_conn_open(ha, st_ddb_idx->fw_ddb_idx);
+ }
+
+ /* Wait to ensure all sendtargets are done for min 12 sec wait */
+ tmo = ((ha->def_timeout < LOGIN_TOV) ? LOGIN_TOV : ha->def_timeout);
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Default time to wait for build ddb %d\n", tmo));
+
+ wtime = jiffies + (HZ * tmo);
+ do {
+ list_for_each_entry_safe(st_ddb_idx, st_ddb_idx_tmp, &list_st,
+ list) {
+ ret = qla4xxx_get_fwddb_entry(ha,
+ st_ddb_idx->fw_ddb_idx,
+ NULL, 0, NULL, &next_idx,
+ &state, &conn_err, NULL,
+ NULL);
+ if (ret == QLA_ERROR)
+ continue;
+
+ if (state == DDB_DS_NO_CONNECTION_ACTIVE ||
+ state == DDB_DS_SESSION_FAILED) {
+ list_del_init(&st_ddb_idx->list);
+ vfree(st_ddb_idx);
+ }
+ }
+ schedule_timeout_uninterruptible(HZ / 10);
+ } while (time_after(wtime, jiffies));
+
+ /* Free up the sendtargets list */
+ list_for_each_entry_safe(st_ddb_idx, st_ddb_idx_tmp, &list_st, list) {
+ list_del_init(&st_ddb_idx->list);
+ vfree(st_ddb_idx);
+ }
+
+ for (idx = 0; idx < max_ddbs; idx = next_idx) {
+ ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry,
+ fw_ddb_dma, NULL,
+ &next_idx, &state, &conn_err,
+ NULL, &conn_id);
+ if (ret == QLA_ERROR)
+ break;
+
+ if (qla4xxx_verify_boot_idx(ha, idx) != QLA_SUCCESS)
+ goto continue_next_nt;
+
+ /* Check if NT, then add to list it */
+ if (strlen((char *) fw_ddb_entry->iscsi_name) == 0)
+ goto continue_next_nt;
+
+ if (state == DDB_DS_NO_CONNECTION_ACTIVE ||
+ state == DDB_DS_SESSION_FAILED) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Adding DDB to session = 0x%x\n",
+ idx));
+ if (is_reset == INIT_ADAPTER) {
+ nt_ddb_idx = vmalloc(fw_idx_size);
+ if (!nt_ddb_idx)
+ break;
+
+ nt_ddb_idx->fw_ddb_idx = idx;
+
+ memcpy(&nt_ddb_idx->fw_ddb, fw_ddb_entry,
+ sizeof(struct dev_db_entry));
+
+ if (qla4xxx_is_flash_ddb_exists(ha, &list_nt,
+ fw_ddb_entry) == QLA_SUCCESS) {
+ vfree(nt_ddb_idx);
+ goto continue_next_nt;
+ }
+ list_add_tail(&nt_ddb_idx->list, &list_nt);
+ } else if (is_reset == RESET_ADAPTER) {
+ if (qla4xxx_is_session_exists(ha,
+ fw_ddb_entry) == QLA_SUCCESS)
+ goto continue_next_nt;
+ }
+
+ /* Create session object, with INVALID_ENTRY,
+ * the targer_id would get set when we issue the login
+ */
+ cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport,
+ ha->host, cmds_max,
+ sizeof(struct ddb_entry),
+ sizeof(struct ql4_task_data),
+ initial_cmdsn, INVALID_ENTRY);
+ if (!cls_sess)
+ goto exit_ddb_list;
+
+ /*
+ * iscsi_session_setup increments the driver reference
+ * count which wouldn't let the driver to be unloaded.
+ * so calling module_put function to decrement the
+ * reference count.
+ **/
+ module_put(qla4xxx_iscsi_transport.owner);
+ sess = cls_sess->dd_data;
+ ddb_entry = sess->dd_data;
+ ddb_entry->sess = cls_sess;
+
+ cls_sess->recovery_tmo = ql4xsess_recovery_tmo;
+ memcpy(&ddb_entry->fw_ddb_entry, fw_ddb_entry,
+ sizeof(struct dev_db_entry));
+
+ qla4xxx_setup_flash_ddb_entry(ha, ddb_entry);
+
+ cls_conn = iscsi_conn_setup(cls_sess,
+ sizeof(struct qla_conn),
+ conn_id);
+ if (!cls_conn)
+ goto exit_ddb_list;
+
+ ddb_entry->conn = cls_conn;
+
+ /* Setup ep, for displaying attributes in sysfs */
+ ep = qla4xxx_get_ep_fwdb(ha, fw_ddb_entry);
+ if (ep) {
+ ep->conn = cls_conn;
+ cls_conn->ep = ep;
+ } else {
+ DEBUG2(ql4_printk(KERN_ERR, ha,
+ "Unable to get ep\n"));
+ }
+
+ /* Update sess/conn params */
+ qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess,
+ cls_conn);
+
+ if (is_reset == RESET_ADAPTER) {
+ iscsi_block_session(cls_sess);
+ /* Use the relogin path to discover new devices
+ * by short-circuting the logic of setting
+ * timer to relogin - instead set the flags
+ * to initiate login right away.
+ */
+ set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
+ set_bit(DF_RELOGIN, &ddb_entry->flags);
+ }
+ }
+continue_next_nt:
+ if (next_idx == 0)
+ break;
+ }
+exit_ddb_list:
+ qla4xxx_free_nt_list(&list_nt);
+ if (fw_ddb_entry)
+ dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
+
+ qla4xxx_free_ddb_index(ha);
+}
+
+
/**
* qla4xxx_probe_adapter - callback function to probe HBA
* @pdev: pointer to pci_dev structure
@@ -3298,7 +4236,7 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
* firmware
* NOTE: interrupts enabled upon successful completion
*/
- status = qla4xxx_initialize_adapter(ha);
+ status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER);
while ((!test_bit(AF_ONLINE, &ha->flags)) &&
init_retry_count++ < MAX_INIT_RETRIES) {
@@ -3319,7 +4257,7 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
if (ha->isp_ops->reset_chip(ha) == QLA_ERROR)
continue;
- status = qla4xxx_initialize_adapter(ha);
+ status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER);
}
if (!test_bit(AF_ONLINE, &ha->flags)) {
@@ -3386,12 +4324,16 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
ha->host_no, ha->firmware_version[0], ha->firmware_version[1],
ha->patch_number, ha->build_number);
- qla4xxx_create_chap_list(ha);
-
if (qla4xxx_setup_boot_info(ha))
ql4_printk(KERN_ERR, ha, "%s:ISCSI boot info setup failed\n",
__func__);
+ /* Perform the build ddb list and login to each */
+ qla4xxx_build_ddb_list(ha, INIT_ADAPTER);
+ iscsi_host_for_each_session(ha->host, qla4xxx_login_flash_ddb);
+
+ qla4xxx_create_chap_list(ha);
+
qla4xxx_create_ifaces(ha);
return 0;
@@ -3449,6 +4391,38 @@ static void qla4xxx_prevent_other_port_reinit(struct scsi_qla_host *ha)
}
}
+static void qla4xxx_destroy_fw_ddb_session(struct scsi_qla_host *ha)
+{
+ struct ddb_entry *ddb_entry;
+ int options;
+ int idx;
+
+ for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) {
+
+ ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
+ if ((ddb_entry != NULL) &&
+ (ddb_entry->ddb_type == FLASH_DDB)) {
+
+ options = LOGOUT_OPTION_CLOSE_SESSION;
+ if (qla4xxx_session_logout_ddb(ha, ddb_entry, options)
+ == QLA_ERROR)
+ ql4_printk(KERN_ERR, ha, "%s: Logout failed\n",
+ __func__);
+
+ qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index);
+ /*
+ * we have decremented the reference count of the driver
+ * when we setup the session to have the driver unload
+ * to be seamless without actually destroying the
+ * session
+ **/
+ try_module_get(qla4xxx_iscsi_transport.owner);
+ iscsi_destroy_endpoint(ddb_entry->conn->ep);
+ qla4xxx_free_ddb(ha, ddb_entry);
+ iscsi_session_teardown(ddb_entry->sess);
+ }
+ }
+}
/**
* qla4xxx_remove_adapter - calback function to remove adapter.
* @pci_dev: PCI device pointer
@@ -3465,9 +4439,11 @@ static void __devexit qla4xxx_remove_adapter(struct pci_dev *pdev)
/* destroy iface from sysfs */
qla4xxx_destroy_ifaces(ha);
- if (ha->boot_kset)
+ if ((!ql4xdisablesysfsboot) && ha->boot_kset)
iscsi_boot_destroy_kset(ha->boot_kset);
+ qla4xxx_destroy_fw_ddb_session(ha);
+
scsi_remove_host(ha->host);
qla4xxx_free_adapter(ha);
@@ -4115,7 +5091,7 @@ static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
qla4_8xxx_idc_unlock(ha);
clear_bit(AF_FW_RECOVERY, &ha->flags);
- rval = qla4xxx_initialize_adapter(ha);
+ rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
qla4_8xxx_idc_lock(ha);
if (rval != QLA_SUCCESS) {
@@ -4151,7 +5127,7 @@ static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
if ((qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE) ==
QLA82XX_DEV_READY)) {
clear_bit(AF_FW_RECOVERY, &ha->flags);
- rval = qla4xxx_initialize_adapter(ha);
+ rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
if (rval == QLA_SUCCESS) {
ret = qla4xxx_request_irqs(ha);
if (ret) {
diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h
index c15347d3f532..5254e57968f5 100644
--- a/drivers/scsi/qla4xxx/ql4_version.h
+++ b/drivers/scsi/qla4xxx/ql4_version.h
@@ -5,4 +5,4 @@
* See LICENSE.qla4xxx for copyright and licensing details.
*/
-#define QLA4XXX_DRIVER_VERSION "5.02.00-k8"
+#define QLA4XXX_DRIVER_VERSION "5.02.00-k9"
diff --git a/drivers/ssb/driver_pcicore.c b/drivers/ssb/driver_pcicore.c
index 84c934c0a545..520e8286db28 100644
--- a/drivers/ssb/driver_pcicore.c
+++ b/drivers/ssb/driver_pcicore.c
@@ -517,10 +517,14 @@ static void ssb_pcicore_pcie_setup_workarounds(struct ssb_pcicore *pc)
static void __devinit ssb_pcicore_init_clientmode(struct ssb_pcicore *pc)
{
- ssb_pcicore_fix_sprom_core_index(pc);
+ struct ssb_device *pdev = pc->dev;
+ struct ssb_bus *bus = pdev->bus;
+
+ if (bus->bustype == SSB_BUSTYPE_PCI)
+ ssb_pcicore_fix_sprom_core_index(pc);
/* Disable PCI interrupts. */
- ssb_write32(pc->dev, SSB_INTVEC, 0);
+ ssb_write32(pdev, SSB_INTVEC, 0);
/* Additional PCIe always once-executed workarounds */
if (pc->dev->id.coreid == SSB_DEV_PCIE) {
diff --git a/drivers/staging/rtl8712/usb_intf.c b/drivers/staging/rtl8712/usb_intf.c
index fb2e89c3056c..5385da2e9cdb 100644
--- a/drivers/staging/rtl8712/usb_intf.c
+++ b/drivers/staging/rtl8712/usb_intf.c
@@ -89,6 +89,7 @@ static struct usb_device_id rtl871x_usb_id_tbl[] = {
{USB_DEVICE(0x0DF6, 0x0045)},
{USB_DEVICE(0x0DF6, 0x0059)}, /* 11n mode disable */
{USB_DEVICE(0x0DF6, 0x004B)},
+ {USB_DEVICE(0x0DF6, 0x005D)},
{USB_DEVICE(0x0DF6, 0x0063)},
/* Sweex */
{USB_DEVICE(0x177F, 0x0154)},
diff --git a/drivers/staging/tidspbridge/core/dsp-clock.c b/drivers/staging/tidspbridge/core/dsp-clock.c
index 3d1279c424a8..7eb56178fb64 100644
--- a/drivers/staging/tidspbridge/core/dsp-clock.c
+++ b/drivers/staging/tidspbridge/core/dsp-clock.c
@@ -54,6 +54,7 @@
/* Bridge GPT id (1 - 4), DM Timer id (5 - 8) */
#define DMT_ID(id) ((id) + 4)
+#define DM_TIMER_CLOCKS 4
/* Bridge MCBSP id (6 - 10), OMAP Mcbsp id (0 - 4) */
#define MCBSP_ID(id) ((id) - 6)
@@ -114,8 +115,13 @@ static s8 get_clk_type(u8 id)
*/
void dsp_clk_exit(void)
{
+ int i;
+
dsp_clock_disable_all(dsp_clocks);
+ for (i = 0; i < DM_TIMER_CLOCKS; i++)
+ omap_dm_timer_free(timer[i]);
+
clk_put(iva2_clk);
clk_put(ssi.sst_fck);
clk_put(ssi.ssr_fck);
@@ -130,9 +136,13 @@ void dsp_clk_exit(void)
void dsp_clk_init(void)
{
static struct platform_device dspbridge_device;
+ int i, id;
dspbridge_device.dev.bus = &platform_bus_type;
+ for (i = 0, id = 5; i < DM_TIMER_CLOCKS; i++, id++)
+ timer[i] = omap_dm_timer_request_specific(id);
+
iva2_clk = clk_get(&dspbridge_device.dev, "iva2_ck");
if (IS_ERR(iva2_clk))
dev_err(bridge, "failed to get iva2 clock %p\n", iva2_clk);
@@ -204,8 +214,7 @@ int dsp_clk_enable(enum dsp_clk_id clk_id)
clk_enable(iva2_clk);
break;
case GPT_CLK:
- timer[clk_id - 1] =
- omap_dm_timer_request_specific(DMT_ID(clk_id));
+ status = omap_dm_timer_start(timer[clk_id - 1]);
break;
#ifdef CONFIG_OMAP_MCBSP
case MCBSP_CLK:
@@ -281,7 +290,7 @@ int dsp_clk_disable(enum dsp_clk_id clk_id)
clk_disable(iva2_clk);
break;
case GPT_CLK:
- omap_dm_timer_free(timer[clk_id - 1]);
+ status = omap_dm_timer_stop(timer[clk_id - 1]);
break;
#ifdef CONFIG_OMAP_MCBSP
case MCBSP_CLK:
diff --git a/drivers/staging/tidspbridge/rmgr/drv_interface.c b/drivers/staging/tidspbridge/rmgr/drv_interface.c
index c43c7e3421c8..76cfc6edecd9 100644
--- a/drivers/staging/tidspbridge/rmgr/drv_interface.c
+++ b/drivers/staging/tidspbridge/rmgr/drv_interface.c
@@ -24,11 +24,7 @@
#include <linux/types.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
-
-#ifdef MODULE
#include <linux/module.h>
-#endif
-
#include <linux/device.h>
#include <linux/init.h>
#include <linux/moduleparam.h>
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index e8c564a53346..a8078d0638fa 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1458,6 +1458,16 @@ static const struct usb_device_id acm_ids[] = {
},
{ USB_DEVICE(0x22b8, 0x6425), /* Motorola MOTOMAGX phones */
},
+ /* Motorola H24 HSPA module: */
+ { USB_DEVICE(0x22b8, 0x2d91) }, /* modem */
+ { USB_DEVICE(0x22b8, 0x2d92) }, /* modem + diagnostics */
+ { USB_DEVICE(0x22b8, 0x2d93) }, /* modem + AT port */
+ { USB_DEVICE(0x22b8, 0x2d95) }, /* modem + AT port + diagnostics */
+ { USB_DEVICE(0x22b8, 0x2d96) }, /* modem + NMEA */
+ { USB_DEVICE(0x22b8, 0x2d97) }, /* modem + diagnostics + NMEA */
+ { USB_DEVICE(0x22b8, 0x2d99) }, /* modem + AT port + NMEA */
+ { USB_DEVICE(0x22b8, 0x2d9a) }, /* modem + AT port + diagnostics + NMEA */
+
{ USB_DEVICE(0x0572, 0x1329), /* Hummingbird huc56s (Conexant) */
.driver_info = NO_UNION_NORMAL, /* union descriptor misplaced on
data interface instead of
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 717ebc9ff941..600d82348511 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -264,7 +264,7 @@ static int __devinit dwc3_core_init(struct dwc3 *dwc)
ret = -ENODEV;
goto err0;
}
- dwc->revision = reg & DWC3_GSNPSREV_MASK;
+ dwc->revision = reg;
dwc3_core_soft_reset(dwc);
diff --git a/drivers/usb/gadget/epautoconf.c b/drivers/usb/gadget/epautoconf.c
index 596a0b464e61..4dff83d2f265 100644
--- a/drivers/usb/gadget/epautoconf.c
+++ b/drivers/usb/gadget/epautoconf.c
@@ -130,9 +130,6 @@ ep_matches (
num_req_streams = ep_comp->bmAttributes & 0x1f;
if (num_req_streams > ep->max_streams)
return 0;
- /* Update the ep_comp descriptor if needed */
- if (num_req_streams != ep->max_streams)
- ep_comp->bmAttributes = ep->max_streams;
}
}
diff --git a/drivers/usb/gadget/f_mass_storage.c b/drivers/usb/gadget/f_mass_storage.c
index c39d58860fa0..1a6f415c0d02 100644
--- a/drivers/usb/gadget/f_mass_storage.c
+++ b/drivers/usb/gadget/f_mass_storage.c
@@ -2975,6 +2975,7 @@ static void fsg_unbind(struct usb_configuration *c, struct usb_function *f)
fsg_common_put(common);
usb_free_descriptors(fsg->function.descriptors);
usb_free_descriptors(fsg->function.hs_descriptors);
+ usb_free_descriptors(fsg->function.ss_descriptors);
kfree(fsg);
}
diff --git a/drivers/usb/host/isp1760-if.c b/drivers/usb/host/isp1760-if.c
index a7dc1e1d45f2..2ac4ac2e4ef9 100644
--- a/drivers/usb/host/isp1760-if.c
+++ b/drivers/usb/host/isp1760-if.c
@@ -18,7 +18,7 @@
#include "isp1760-hcd.h"
-#ifdef CONFIG_OF
+#if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ)
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/of_platform.h>
@@ -31,7 +31,7 @@
#include <linux/pci.h>
#endif
-#ifdef CONFIG_OF
+#if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ)
struct isp1760 {
struct usb_hcd *hcd;
int rst_gpio;
@@ -437,7 +437,7 @@ static int __init isp1760_init(void)
ret = platform_driver_register(&isp1760_plat_driver);
if (!ret)
any_ret = 0;
-#ifdef CONFIG_OF
+#if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ)
ret = platform_driver_register(&isp1760_of_driver);
if (!ret)
any_ret = 0;
@@ -457,7 +457,7 @@ module_init(isp1760_init);
static void __exit isp1760_exit(void)
{
platform_driver_unregister(&isp1760_plat_driver);
-#ifdef CONFIG_OF
+#if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ)
platform_driver_unregister(&isp1760_of_driver);
#endif
#ifdef CONFIG_PCI
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 60ddba8066ea..79cb0af779fa 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -774,6 +774,10 @@ static void musb_ep_program(struct musb *musb, u8 epnum,
if (musb->double_buffer_not_ok)
musb_writew(epio, MUSB_TXMAXP,
hw_ep->max_packet_sz_tx);
+ else if (can_bulk_split(musb, qh->type))
+ musb_writew(epio, MUSB_TXMAXP, packet_sz
+ | ((hw_ep->max_packet_sz_tx /
+ packet_sz) - 1) << 11);
else
musb_writew(epio, MUSB_TXMAXP,
qh->maxpacket |
diff --git a/drivers/usb/renesas_usbhs/mod.c b/drivers/usb/renesas_usbhs/mod.c
index 053f86d70009..ad96a3896729 100644
--- a/drivers/usb/renesas_usbhs/mod.c
+++ b/drivers/usb/renesas_usbhs/mod.c
@@ -349,7 +349,7 @@ void usbhs_irq_callback_update(struct usbhs_priv *priv, struct usbhs_mod *mod)
if (mod->irq_attch)
intenb1 |= ATTCHE;
- if (mod->irq_attch)
+ if (mod->irq_dtch)
intenb1 |= DTCHE;
if (mod->irq_sign)
diff --git a/drivers/usb/renesas_usbhs/mod_host.c b/drivers/usb/renesas_usbhs/mod_host.c
index bade761a1e52..7955de589951 100644
--- a/drivers/usb/renesas_usbhs/mod_host.c
+++ b/drivers/usb/renesas_usbhs/mod_host.c
@@ -1267,6 +1267,7 @@ int usbhs_mod_host_probe(struct usbhs_priv *priv)
dev_err(dev, "Failed to create hcd\n");
return -ENOMEM;
}
+ hcd->has_tt = 1; /* for low/full speed */
pipe_info = kzalloc(sizeof(*pipe_info) * pipe_size, GFP_KERNEL);
if (!pipe_info) {
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index e3426602dc82..6dd64534fad0 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -663,7 +663,12 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x01) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x02) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x03) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x08) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x10) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x12) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x13) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x02, 0x01) }, /* E398 3G Modem */
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x02, 0x02) }, /* E398 3G PC UI Interface */
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x02, 0x03) }, /* E398 3G Application Interface */
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) },
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) },
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V740) },
diff --git a/drivers/watchdog/coh901327_wdt.c b/drivers/watchdog/coh901327_wdt.c
index 03f449a430d2..5b89f7d6cd0f 100644
--- a/drivers/watchdog/coh901327_wdt.c
+++ b/drivers/watchdog/coh901327_wdt.c
@@ -76,8 +76,6 @@ static int irq;
static void __iomem *virtbase;
static unsigned long coh901327_users;
static unsigned long boot_status;
-static u16 wdogenablestore;
-static u16 irqmaskstore;
static struct device *parent;
/*
@@ -461,6 +459,10 @@ out:
}
#ifdef CONFIG_PM
+
+static u16 wdogenablestore;
+static u16 irqmaskstore;
+
static int coh901327_suspend(struct platform_device *pdev, pm_message_t state)
{
irqmaskstore = readw(virtbase + U300_WDOG_IMR) & 0x0001U;
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
index 3774c9b8dac9..8464ea1c36a1 100644
--- a/drivers/watchdog/hpwdt.c
+++ b/drivers/watchdog/hpwdt.c
@@ -231,6 +231,7 @@ static int __devinit cru_detect(unsigned long map_entry,
cmn_regs.u1.reax = CRU_BIOS_SIGNATURE_VALUE;
+ set_memory_x((unsigned long)bios32_entrypoint, (2 * PAGE_SIZE));
asminline_call(&cmn_regs, bios32_entrypoint);
if (cmn_regs.u1.ral != 0) {
@@ -248,8 +249,10 @@ static int __devinit cru_detect(unsigned long map_entry,
if ((physical_bios_base + physical_bios_offset)) {
cru_rom_addr =
ioremap(cru_physical_address, cru_length);
- if (cru_rom_addr)
+ if (cru_rom_addr) {
+ set_memory_x((unsigned long)cru_rom_addr, cru_length);
retval = 0;
+ }
}
printk(KERN_DEBUG "hpwdt: CRU Base Address: 0x%lx\n",
diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c
index ba6ad662635a..99796c5d913d 100644
--- a/drivers/watchdog/iTCO_wdt.c
+++ b/drivers/watchdog/iTCO_wdt.c
@@ -384,10 +384,10 @@ MODULE_PARM_DESC(nowayout,
"Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
-static int turn_SMI_watchdog_clear_off = 0;
+static int turn_SMI_watchdog_clear_off = 1;
module_param(turn_SMI_watchdog_clear_off, int, 0);
MODULE_PARM_DESC(turn_SMI_watchdog_clear_off,
- "Turn off SMI clearing watchdog (default=0)");
+ "Turn off SMI clearing watchdog (depends on TCO-version)(default=1)");
/*
* Some TCO specific functions
@@ -813,7 +813,7 @@ static int __devinit iTCO_wdt_init(struct pci_dev *pdev,
ret = -EIO;
goto out_unmap;
}
- if (turn_SMI_watchdog_clear_off) {
+ if (turn_SMI_watchdog_clear_off >= iTCO_wdt_private.iTCO_version) {
/* Bit 13: TCO_EN -> 0 = Disables TCO logic generating an SMI# */
val32 = inl(SMI_EN);
val32 &= 0xffffdfff; /* Turn off SMI clearing watchdog */
diff --git a/drivers/watchdog/sp805_wdt.c b/drivers/watchdog/sp805_wdt.c
index cc2cfbe33b30..bfaf9bb1ee0d 100644
--- a/drivers/watchdog/sp805_wdt.c
+++ b/drivers/watchdog/sp805_wdt.c
@@ -351,7 +351,7 @@ static int __devexit sp805_wdt_remove(struct amba_device *adev)
return 0;
}
-static struct amba_id sp805_wdt_ids[] __initdata = {
+static struct amba_id sp805_wdt_ids[] = {
{
.id = 0x00141805,
.mask = 0x00ffffff,
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 8e964b91c447..284798aaf8b1 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -166,7 +166,7 @@ retry:
/*
* Get IO TLB memory from any location.
*/
- xen_io_tlb_start = alloc_bootmem(bytes);
+ xen_io_tlb_start = alloc_bootmem_pages(PAGE_ALIGN(bytes));
if (!xen_io_tlb_start) {
m = "Cannot allocate Xen-SWIOTLB buffer!\n";
goto error;
@@ -179,7 +179,7 @@ retry:
bytes,
xen_io_tlb_nslabs);
if (rc) {
- free_bootmem(__pa(xen_io_tlb_start), bytes);
+ free_bootmem(__pa(xen_io_tlb_start), PAGE_ALIGN(bytes));
m = "Failed to get contiguous memory for DMA from Xen!\n"\
"You either: don't have the permissions, do not have"\
" enough free memory under 4GB, or the hypervisor memory"\
diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c
index b3b8f2f3ad10..ede860f921df 100644
--- a/drivers/xen/xenbus/xenbus_xs.c
+++ b/drivers/xen/xenbus/xenbus_xs.c
@@ -621,15 +621,6 @@ static struct xenbus_watch *find_watch(const char *token)
return NULL;
}
-static void xs_reset_watches(void)
-{
- int err;
-
- err = xs_error(xs_single(XBT_NIL, XS_RESET_WATCHES, "", NULL));
- if (err && err != -EEXIST)
- printk(KERN_WARNING "xs_reset_watches failed: %d\n", err);
-}
-
/* Register callback to watch this node. */
int register_xenbus_watch(struct xenbus_watch *watch)
{
@@ -906,9 +897,5 @@ int xs_init(void)
if (IS_ERR(task))
return PTR_ERR(task);
- /* shutdown watches for kexec boot */
- if (xen_hvm_domain())
- xs_reset_watches();
-
return 0;
}
diff --git a/firmware/README.AddingFirmware b/firmware/README.AddingFirmware
index e24cd8986d8b..ea78c3a17eec 100644
--- a/firmware/README.AddingFirmware
+++ b/firmware/README.AddingFirmware
@@ -12,7 +12,7 @@ here.
This directory is _NOT_ for adding arbitrary new firmware images. The
place to add those is the separate linux-firmware repository:
- git://git.kernel.org/pub/scm/linux/kernel/git/dwmw2/linux-firmware.git
+ git://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git
That repository contains all these firmware images which have been
extracted from older drivers, as well various new firmware images which
@@ -22,6 +22,7 @@ been permitted to redistribute under separate cover.
To submit firmware to that repository, please send either a git binary
diff or preferably a git pull request to:
David Woodhouse <dwmw2@infradead.org>
+ Ben Hutchings <ben@decadent.org.uk>
Your commit should include an update to the WHENCE file clearly
identifying the licence under which the firmware is available, and
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
index 7ec14097fef1..0b394580d860 100644
--- a/fs/btrfs/async-thread.c
+++ b/fs/btrfs/async-thread.c
@@ -64,6 +64,8 @@ struct btrfs_worker_thread {
int idle;
};
+static int __btrfs_start_workers(struct btrfs_workers *workers);
+
/*
* btrfs_start_workers uses kthread_run, which can block waiting for memory
* for a very long time. It will actually throttle on page writeback,
@@ -88,27 +90,10 @@ static void start_new_worker_func(struct btrfs_work *work)
{
struct worker_start *start;
start = container_of(work, struct worker_start, work);
- btrfs_start_workers(start->queue, 1);
+ __btrfs_start_workers(start->queue);
kfree(start);
}
-static int start_new_worker(struct btrfs_workers *queue)
-{
- struct worker_start *start;
- int ret;
-
- start = kzalloc(sizeof(*start), GFP_NOFS);
- if (!start)
- return -ENOMEM;
-
- start->work.func = start_new_worker_func;
- start->queue = queue;
- ret = btrfs_queue_worker(queue->atomic_worker_start, &start->work);
- if (ret)
- kfree(start);
- return ret;
-}
-
/*
* helper function to move a thread onto the idle list after it
* has finished some requests.
@@ -153,12 +138,20 @@ static void check_busy_worker(struct btrfs_worker_thread *worker)
static void check_pending_worker_creates(struct btrfs_worker_thread *worker)
{
struct btrfs_workers *workers = worker->workers;
+ struct worker_start *start;
unsigned long flags;
rmb();
if (!workers->atomic_start_pending)
return;
+ start = kzalloc(sizeof(*start), GFP_NOFS);
+ if (!start)
+ return;
+
+ start->work.func = start_new_worker_func;
+ start->queue = workers;
+
spin_lock_irqsave(&workers->lock, flags);
if (!workers->atomic_start_pending)
goto out;
@@ -170,10 +163,11 @@ static void check_pending_worker_creates(struct btrfs_worker_thread *worker)
workers->num_workers_starting += 1;
spin_unlock_irqrestore(&workers->lock, flags);
- start_new_worker(workers);
+ btrfs_queue_worker(workers->atomic_worker_start, &start->work);
return;
out:
+ kfree(start);
spin_unlock_irqrestore(&workers->lock, flags);
}
@@ -331,7 +325,7 @@ again:
run_ordered_completions(worker->workers, work);
check_pending_worker_creates(worker);
-
+ cond_resched();
}
spin_lock_irq(&worker->lock);
@@ -462,56 +456,55 @@ void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max,
* starts new worker threads. This does not enforce the max worker
* count in case you need to temporarily go past it.
*/
-static int __btrfs_start_workers(struct btrfs_workers *workers,
- int num_workers)
+static int __btrfs_start_workers(struct btrfs_workers *workers)
{
struct btrfs_worker_thread *worker;
int ret = 0;
- int i;
- for (i = 0; i < num_workers; i++) {
- worker = kzalloc(sizeof(*worker), GFP_NOFS);
- if (!worker) {
- ret = -ENOMEM;
- goto fail;
- }
+ worker = kzalloc(sizeof(*worker), GFP_NOFS);
+ if (!worker) {
+ ret = -ENOMEM;
+ goto fail;
+ }
- INIT_LIST_HEAD(&worker->pending);
- INIT_LIST_HEAD(&worker->prio_pending);
- INIT_LIST_HEAD(&worker->worker_list);
- spin_lock_init(&worker->lock);
-
- atomic_set(&worker->num_pending, 0);
- atomic_set(&worker->refs, 1);
- worker->workers = workers;
- worker->task = kthread_run(worker_loop, worker,
- "btrfs-%s-%d", workers->name,
- workers->num_workers + i);
- if (IS_ERR(worker->task)) {
- ret = PTR_ERR(worker->task);
- kfree(worker);
- goto fail;
- }
- spin_lock_irq(&workers->lock);
- list_add_tail(&worker->worker_list, &workers->idle_list);
- worker->idle = 1;
- workers->num_workers++;
- workers->num_workers_starting--;
- WARN_ON(workers->num_workers_starting < 0);
- spin_unlock_irq(&workers->lock);
+ INIT_LIST_HEAD(&worker->pending);
+ INIT_LIST_HEAD(&worker->prio_pending);
+ INIT_LIST_HEAD(&worker->worker_list);
+ spin_lock_init(&worker->lock);
+
+ atomic_set(&worker->num_pending, 0);
+ atomic_set(&worker->refs, 1);
+ worker->workers = workers;
+ worker->task = kthread_run(worker_loop, worker,
+ "btrfs-%s-%d", workers->name,
+ workers->num_workers + 1);
+ if (IS_ERR(worker->task)) {
+ ret = PTR_ERR(worker->task);
+ kfree(worker);
+ goto fail;
}
+ spin_lock_irq(&workers->lock);
+ list_add_tail(&worker->worker_list, &workers->idle_list);
+ worker->idle = 1;
+ workers->num_workers++;
+ workers->num_workers_starting--;
+ WARN_ON(workers->num_workers_starting < 0);
+ spin_unlock_irq(&workers->lock);
+
return 0;
fail:
- btrfs_stop_workers(workers);
+ spin_lock_irq(&workers->lock);
+ workers->num_workers_starting--;
+ spin_unlock_irq(&workers->lock);
return ret;
}
-int btrfs_start_workers(struct btrfs_workers *workers, int num_workers)
+int btrfs_start_workers(struct btrfs_workers *workers)
{
spin_lock_irq(&workers->lock);
- workers->num_workers_starting += num_workers;
+ workers->num_workers_starting++;
spin_unlock_irq(&workers->lock);
- return __btrfs_start_workers(workers, num_workers);
+ return __btrfs_start_workers(workers);
}
/*
@@ -568,9 +561,10 @@ static struct btrfs_worker_thread *find_worker(struct btrfs_workers *workers)
struct btrfs_worker_thread *worker;
unsigned long flags;
struct list_head *fallback;
+ int ret;
-again:
spin_lock_irqsave(&workers->lock, flags);
+again:
worker = next_worker(workers);
if (!worker) {
@@ -584,7 +578,10 @@ again:
workers->num_workers_starting++;
spin_unlock_irqrestore(&workers->lock, flags);
/* we're below the limit, start another worker */
- __btrfs_start_workers(workers, 1);
+ ret = __btrfs_start_workers(workers);
+ spin_lock_irqsave(&workers->lock, flags);
+ if (ret)
+ goto fallback;
goto again;
}
}
@@ -665,7 +662,7 @@ void btrfs_set_work_high_prio(struct btrfs_work *work)
/*
* places a struct btrfs_work into the pending queue of one of the kthreads
*/
-int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
+void btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
{
struct btrfs_worker_thread *worker;
unsigned long flags;
@@ -673,7 +670,7 @@ int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
/* don't requeue something already on a list */
if (test_and_set_bit(WORK_QUEUED_BIT, &work->flags))
- goto out;
+ return;
worker = find_worker(workers);
if (workers->ordered) {
@@ -712,7 +709,4 @@ int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
if (wake)
wake_up_process(worker->task);
spin_unlock_irqrestore(&worker->lock, flags);
-
-out:
- return 0;
}
diff --git a/fs/btrfs/async-thread.h b/fs/btrfs/async-thread.h
index 5077746cf85e..f34cc31fa3c9 100644
--- a/fs/btrfs/async-thread.h
+++ b/fs/btrfs/async-thread.h
@@ -109,8 +109,8 @@ struct btrfs_workers {
char *name;
};
-int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work);
-int btrfs_start_workers(struct btrfs_workers *workers, int num_workers);
+void btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work);
+int btrfs_start_workers(struct btrfs_workers *workers);
int btrfs_stop_workers(struct btrfs_workers *workers);
void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max,
struct btrfs_workers *async_starter);
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 50634abef9b4..67385033323d 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -2692,7 +2692,8 @@ int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
int btrfs_readpage(struct file *file, struct page *page);
void btrfs_evict_inode(struct inode *inode);
int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc);
-void btrfs_dirty_inode(struct inode *inode, int flags);
+int btrfs_dirty_inode(struct inode *inode);
+int btrfs_update_time(struct file *file);
struct inode *btrfs_alloc_inode(struct super_block *sb);
void btrfs_destroy_inode(struct inode *inode);
int btrfs_drop_inode(struct inode *inode);
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index 5b163572e0ca..9c1eccc2c503 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -640,8 +640,8 @@ static int btrfs_delayed_inode_reserve_metadata(
* Now if src_rsv == delalloc_block_rsv we'll let it just steal since
* we're accounted for.
*/
- if (!trans->bytes_reserved &&
- src_rsv != &root->fs_info->delalloc_block_rsv) {
+ if (!src_rsv || (!trans->bytes_reserved &&
+ src_rsv != &root->fs_info->delalloc_block_rsv)) {
ret = btrfs_block_rsv_add_noflush(root, dst_rsv, num_bytes);
/*
* Since we're under a transaction reserve_metadata_bytes could
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 632f8f3cc9db..f44b3928dc2d 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -2194,19 +2194,27 @@ struct btrfs_root *open_ctree(struct super_block *sb,
fs_info->endio_meta_write_workers.idle_thresh = 2;
fs_info->readahead_workers.idle_thresh = 2;
- btrfs_start_workers(&fs_info->workers, 1);
- btrfs_start_workers(&fs_info->generic_worker, 1);
- btrfs_start_workers(&fs_info->submit_workers, 1);
- btrfs_start_workers(&fs_info->delalloc_workers, 1);
- btrfs_start_workers(&fs_info->fixup_workers, 1);
- btrfs_start_workers(&fs_info->endio_workers, 1);
- btrfs_start_workers(&fs_info->endio_meta_workers, 1);
- btrfs_start_workers(&fs_info->endio_meta_write_workers, 1);
- btrfs_start_workers(&fs_info->endio_write_workers, 1);
- btrfs_start_workers(&fs_info->endio_freespace_worker, 1);
- btrfs_start_workers(&fs_info->delayed_workers, 1);
- btrfs_start_workers(&fs_info->caching_workers, 1);
- btrfs_start_workers(&fs_info->readahead_workers, 1);
+ /*
+ * btrfs_start_workers can really only fail because of ENOMEM so just
+ * return -ENOMEM if any of these fail.
+ */
+ ret = btrfs_start_workers(&fs_info->workers);
+ ret |= btrfs_start_workers(&fs_info->generic_worker);
+ ret |= btrfs_start_workers(&fs_info->submit_workers);
+ ret |= btrfs_start_workers(&fs_info->delalloc_workers);
+ ret |= btrfs_start_workers(&fs_info->fixup_workers);
+ ret |= btrfs_start_workers(&fs_info->endio_workers);
+ ret |= btrfs_start_workers(&fs_info->endio_meta_workers);
+ ret |= btrfs_start_workers(&fs_info->endio_meta_write_workers);
+ ret |= btrfs_start_workers(&fs_info->endio_write_workers);
+ ret |= btrfs_start_workers(&fs_info->endio_freespace_worker);
+ ret |= btrfs_start_workers(&fs_info->delayed_workers);
+ ret |= btrfs_start_workers(&fs_info->caching_workers);
+ ret |= btrfs_start_workers(&fs_info->readahead_workers);
+ if (ret) {
+ ret = -ENOMEM;
+ goto fail_sb_buffer;
+ }
fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super);
fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages,
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 2ad813674d77..f5fbe576d2ba 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -2822,7 +2822,7 @@ out_free:
btrfs_release_path(path);
out:
spin_lock(&block_group->lock);
- if (!ret)
+ if (!ret && dcs == BTRFS_DC_SETUP)
block_group->cache_generation = trans->transid;
block_group->disk_cache_state = dcs;
spin_unlock(&block_group->lock);
@@ -4204,12 +4204,17 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
u64 to_reserve = 0;
+ u64 csum_bytes;
unsigned nr_extents = 0;
+ int extra_reserve = 0;
int flush = 1;
int ret;
+ /* Need to be holding the i_mutex here if we aren't free space cache */
if (btrfs_is_free_space_inode(root, inode))
flush = 0;
+ else
+ WARN_ON(!mutex_is_locked(&inode->i_mutex));
if (flush && btrfs_transaction_in_commit(root->fs_info))
schedule_timeout(1);
@@ -4220,11 +4225,9 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
BTRFS_I(inode)->outstanding_extents++;
if (BTRFS_I(inode)->outstanding_extents >
- BTRFS_I(inode)->reserved_extents) {
+ BTRFS_I(inode)->reserved_extents)
nr_extents = BTRFS_I(inode)->outstanding_extents -
BTRFS_I(inode)->reserved_extents;
- BTRFS_I(inode)->reserved_extents += nr_extents;
- }
/*
* Add an item to reserve for updating the inode when we complete the
@@ -4232,11 +4235,12 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
*/
if (!BTRFS_I(inode)->delalloc_meta_reserved) {
nr_extents++;
- BTRFS_I(inode)->delalloc_meta_reserved = 1;
+ extra_reserve = 1;
}
to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);
+ csum_bytes = BTRFS_I(inode)->csum_bytes;
spin_unlock(&BTRFS_I(inode)->lock);
ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush);
@@ -4246,22 +4250,35 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
spin_lock(&BTRFS_I(inode)->lock);
dropped = drop_outstanding_extent(inode);
- to_free = calc_csum_metadata_size(inode, num_bytes, 0);
- spin_unlock(&BTRFS_I(inode)->lock);
- to_free += btrfs_calc_trans_metadata_size(root, dropped);
-
/*
- * Somebody could have come in and twiddled with the
- * reservation, so if we have to free more than we would have
- * reserved from this reservation go ahead and release those
- * bytes.
+ * If the inodes csum_bytes is the same as the original
+ * csum_bytes then we know we haven't raced with any free()ers
+ * so we can just reduce our inodes csum bytes and carry on.
+ * Otherwise we have to do the normal free thing to account for
+ * the case that the free side didn't free up its reserve
+ * because of this outstanding reservation.
*/
- to_free -= to_reserve;
+ if (BTRFS_I(inode)->csum_bytes == csum_bytes)
+ calc_csum_metadata_size(inode, num_bytes, 0);
+ else
+ to_free = calc_csum_metadata_size(inode, num_bytes, 0);
+ spin_unlock(&BTRFS_I(inode)->lock);
+ if (dropped)
+ to_free += btrfs_calc_trans_metadata_size(root, dropped);
+
if (to_free)
btrfs_block_rsv_release(root, block_rsv, to_free);
return ret;
}
+ spin_lock(&BTRFS_I(inode)->lock);
+ if (extra_reserve) {
+ BTRFS_I(inode)->delalloc_meta_reserved = 1;
+ nr_extents--;
+ }
+ BTRFS_I(inode)->reserved_extents += nr_extents;
+ spin_unlock(&BTRFS_I(inode)->lock);
+
block_rsv_add_bytes(block_rsv, to_reserve, 1);
return 0;
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index dafdfa059bf6..97fbe939c050 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -1167,6 +1167,8 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
nrptrs = min((iov_iter_count(i) + PAGE_CACHE_SIZE - 1) /
PAGE_CACHE_SIZE, PAGE_CACHE_SIZE /
(sizeof(struct page *)));
+ nrptrs = min(nrptrs, current->nr_dirtied_pause - current->nr_dirtied);
+ nrptrs = max(nrptrs, 8);
pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL);
if (!pages)
return -ENOMEM;
@@ -1387,7 +1389,11 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
goto out;
}
- file_update_time(file);
+ err = btrfs_update_time(file);
+ if (err) {
+ mutex_unlock(&inode->i_mutex);
+ goto out;
+ }
BTRFS_I(inode)->sequence++;
start_pos = round_down(pos, root->sectorsize);
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 2c984f7d4c2a..fd1a06df5bc6 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -38,6 +38,7 @@
#include <linux/falloc.h>
#include <linux/slab.h>
#include <linux/ratelimit.h>
+#include <linux/mount.h>
#include "compat.h"
#include "ctree.h"
#include "disk-io.h"
@@ -2031,7 +2032,7 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
/* insert an orphan item to track this unlinked/truncated file */
if (insert >= 1) {
ret = btrfs_insert_orphan_item(trans, root, btrfs_ino(inode));
- BUG_ON(ret);
+ BUG_ON(ret && ret != -EEXIST);
}
/* insert an orphan item to track subvolume contains orphan files */
@@ -2158,6 +2159,38 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
if (ret && ret != -ESTALE)
goto out;
+ if (ret == -ESTALE && root == root->fs_info->tree_root) {
+ struct btrfs_root *dead_root;
+ struct btrfs_fs_info *fs_info = root->fs_info;
+ int is_dead_root = 0;
+
+ /*
+ * this is an orphan in the tree root. Currently these
+ * could come from 2 sources:
+ * a) a snapshot deletion in progress
+ * b) a free space cache inode
+ * We need to distinguish those two, as the snapshot
+ * orphan must not get deleted.
+ * find_dead_roots already ran before us, so if this
+ * is a snapshot deletion, we should find the root
+ * in the dead_roots list
+ */
+ spin_lock(&fs_info->trans_lock);
+ list_for_each_entry(dead_root, &fs_info->dead_roots,
+ root_list) {
+ if (dead_root->root_key.objectid ==
+ found_key.objectid) {
+ is_dead_root = 1;
+ break;
+ }
+ }
+ spin_unlock(&fs_info->trans_lock);
+ if (is_dead_root) {
+ /* prevent this orphan from being found again */
+ key.offset = found_key.objectid - 1;
+ continue;
+ }
+ }
/*
* Inode is already gone but the orphan item is still there,
* kill the orphan item.
@@ -2191,7 +2224,14 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
continue;
}
nr_truncate++;
+ /*
+ * Need to hold the imutex for reservation purposes, not
+ * a huge deal here but I have a WARN_ON in
+ * btrfs_delalloc_reserve_space to catch offenders.
+ */
+ mutex_lock(&inode->i_mutex);
ret = btrfs_truncate(inode);
+ mutex_unlock(&inode->i_mutex);
} else {
nr_unlink++;
}
@@ -3327,7 +3367,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
u64 hint_byte = 0;
hole_size = last_byte - cur_offset;
- trans = btrfs_start_transaction(root, 2);
+ trans = btrfs_start_transaction(root, 3);
if (IS_ERR(trans)) {
err = PTR_ERR(trans);
break;
@@ -3337,6 +3377,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
cur_offset + hole_size,
&hint_byte, 1);
if (err) {
+ btrfs_update_inode(trans, root, inode);
btrfs_end_transaction(trans, root);
break;
}
@@ -3346,6 +3387,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
0, hole_size, 0, hole_size,
0, 0, 0);
if (err) {
+ btrfs_update_inode(trans, root, inode);
btrfs_end_transaction(trans, root);
break;
}
@@ -3353,6 +3395,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
btrfs_drop_extent_cache(inode, hole_start,
last_byte - 1, 0);
+ btrfs_update_inode(trans, root, inode);
btrfs_end_transaction(trans, root);
}
free_extent_map(em);
@@ -3370,6 +3413,8 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
static int btrfs_setsize(struct inode *inode, loff_t newsize)
{
+ struct btrfs_root *root = BTRFS_I(inode)->root;
+ struct btrfs_trans_handle *trans;
loff_t oldsize = i_size_read(inode);
int ret;
@@ -3377,16 +3422,19 @@ static int btrfs_setsize(struct inode *inode, loff_t newsize)
return 0;
if (newsize > oldsize) {
- i_size_write(inode, newsize);
- btrfs_ordered_update_i_size(inode, i_size_read(inode), NULL);
truncate_pagecache(inode, oldsize, newsize);
ret = btrfs_cont_expand(inode, oldsize, newsize);
- if (ret) {
- btrfs_setsize(inode, oldsize);
+ if (ret)
return ret;
- }
- mark_inode_dirty(inode);
+ trans = btrfs_start_transaction(root, 1);
+ if (IS_ERR(trans))
+ return PTR_ERR(trans);
+
+ i_size_write(inode, newsize);
+ btrfs_ordered_update_i_size(inode, i_size_read(inode), NULL);
+ ret = btrfs_update_inode(trans, root, inode);
+ btrfs_end_transaction_throttle(trans, root);
} else {
/*
@@ -3426,9 +3474,9 @@ static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
if (attr->ia_valid) {
setattr_copy(inode, attr);
- mark_inode_dirty(inode);
+ err = btrfs_dirty_inode(inode);
- if (attr->ia_valid & ATTR_MODE)
+ if (!err && attr->ia_valid & ATTR_MODE)
err = btrfs_acl_chmod(inode);
}
@@ -4204,42 +4252,80 @@ int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc)
* FIXME, needs more benchmarking...there are no reasons other than performance
* to keep or drop this code.
*/
-void btrfs_dirty_inode(struct inode *inode, int flags)
+int btrfs_dirty_inode(struct inode *inode)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_trans_handle *trans;
int ret;
if (BTRFS_I(inode)->dummy_inode)
- return;
+ return 0;
trans = btrfs_join_transaction(root);
- BUG_ON(IS_ERR(trans));
+ if (IS_ERR(trans))
+ return PTR_ERR(trans);
ret = btrfs_update_inode(trans, root, inode);
if (ret && ret == -ENOSPC) {
/* whoops, lets try again with the full transaction */
btrfs_end_transaction(trans, root);
trans = btrfs_start_transaction(root, 1);
- if (IS_ERR(trans)) {
- printk_ratelimited(KERN_ERR "btrfs: fail to "
- "dirty inode %llu error %ld\n",
- (unsigned long long)btrfs_ino(inode),
- PTR_ERR(trans));
- return;
- }
+ if (IS_ERR(trans))
+ return PTR_ERR(trans);
ret = btrfs_update_inode(trans, root, inode);
- if (ret) {
- printk_ratelimited(KERN_ERR "btrfs: fail to "
- "dirty inode %llu error %d\n",
- (unsigned long long)btrfs_ino(inode),
- ret);
- }
}
btrfs_end_transaction(trans, root);
if (BTRFS_I(inode)->delayed_node)
btrfs_balance_delayed_items(root);
+
+ return ret;
+}
+
+/*
+ * This is a copy of file_update_time. We need this so we can return error on
+ * ENOSPC for updating the inode in the case of file write and mmap writes.
+ */
+int btrfs_update_time(struct file *file)
+{
+ struct inode *inode = file->f_path.dentry->d_inode;
+ struct timespec now;
+ int ret;
+ enum { S_MTIME = 1, S_CTIME = 2, S_VERSION = 4 } sync_it = 0;
+
+ /* First try to exhaust all avenues to not sync */
+ if (IS_NOCMTIME(inode))
+ return 0;
+
+ now = current_fs_time(inode->i_sb);
+ if (!timespec_equal(&inode->i_mtime, &now))
+ sync_it = S_MTIME;
+
+ if (!timespec_equal(&inode->i_ctime, &now))
+ sync_it |= S_CTIME;
+
+ if (IS_I_VERSION(inode))
+ sync_it |= S_VERSION;
+
+ if (!sync_it)
+ return 0;
+
+ /* Finally allowed to write? Takes lock. */
+ if (mnt_want_write_file(file))
+ return 0;
+
+ /* Only change inode inside the lock region */
+ if (sync_it & S_VERSION)
+ inode_inc_iversion(inode);
+ if (sync_it & S_CTIME)
+ inode->i_ctime = now;
+ if (sync_it & S_MTIME)
+ inode->i_mtime = now;
+ ret = btrfs_dirty_inode(inode);
+ if (!ret)
+ mark_inode_dirty_sync(inode);
+ mnt_drop_write(file->f_path.mnt);
+ return ret;
}
/*
@@ -4504,10 +4590,6 @@ static int btrfs_add_nondir(struct btrfs_trans_handle *trans,
int err = btrfs_add_link(trans, dir, inode,
dentry->d_name.name, dentry->d_name.len,
backref, index);
- if (!err) {
- d_instantiate(dentry, inode);
- return 0;
- }
if (err > 0)
err = -EEXIST;
return err;
@@ -4555,13 +4637,21 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
goto out_unlock;
}
+ /*
+ * If the active LSM wants to access the inode during
+ * d_instantiate it needs these. Smack checks to see
+ * if the filesystem supports xattrs by looking at the
+ * ops vector.
+ */
+
+ inode->i_op = &btrfs_special_inode_operations;
err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
if (err)
drop_inode = 1;
else {
- inode->i_op = &btrfs_special_inode_operations;
init_special_inode(inode, inode->i_mode, rdev);
btrfs_update_inode(trans, root, inode);
+ d_instantiate(dentry, inode);
}
out_unlock:
nr = trans->blocks_used;
@@ -4613,15 +4703,23 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
goto out_unlock;
}
+ /*
+ * If the active LSM wants to access the inode during
+ * d_instantiate it needs these. Smack checks to see
+ * if the filesystem supports xattrs by looking at the
+ * ops vector.
+ */
+ inode->i_fop = &btrfs_file_operations;
+ inode->i_op = &btrfs_file_inode_operations;
+
err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
if (err)
drop_inode = 1;
else {
inode->i_mapping->a_ops = &btrfs_aops;
inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
- inode->i_fop = &btrfs_file_operations;
- inode->i_op = &btrfs_file_inode_operations;
BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
+ d_instantiate(dentry, inode);
}
out_unlock:
nr = trans->blocks_used;
@@ -4679,6 +4777,7 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
struct dentry *parent = dentry->d_parent;
err = btrfs_update_inode(trans, root, inode);
BUG_ON(err);
+ d_instantiate(dentry, inode);
btrfs_log_new_name(trans, inode, NULL, parent);
}
@@ -6303,7 +6402,12 @@ int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
u64 page_start;
u64 page_end;
+ /* Need this to keep space reservations serialized */
+ mutex_lock(&inode->i_mutex);
ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE);
+ mutex_unlock(&inode->i_mutex);
+ if (!ret)
+ ret = btrfs_update_time(vma->vm_file);
if (ret) {
if (ret == -ENOMEM)
ret = VM_FAULT_OOM;
@@ -6515,8 +6619,9 @@ static int btrfs_truncate(struct inode *inode)
/* Just need the 1 for updating the inode */
trans = btrfs_start_transaction(root, 1);
if (IS_ERR(trans)) {
- err = PTR_ERR(trans);
- goto out;
+ ret = err = PTR_ERR(trans);
+ trans = NULL;
+ break;
}
}
@@ -7076,14 +7181,21 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
goto out_unlock;
}
+ /*
+ * If the active LSM wants to access the inode during
+ * d_instantiate it needs these. Smack checks to see
+ * if the filesystem supports xattrs by looking at the
+ * ops vector.
+ */
+ inode->i_fop = &btrfs_file_operations;
+ inode->i_op = &btrfs_file_inode_operations;
+
err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
if (err)
drop_inode = 1;
else {
inode->i_mapping->a_ops = &btrfs_aops;
inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
- inode->i_fop = &btrfs_file_operations;
- inode->i_op = &btrfs_file_inode_operations;
BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
}
if (drop_inode)
@@ -7132,6 +7244,8 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
drop_inode = 1;
out_unlock:
+ if (!err)
+ d_instantiate(dentry, inode);
nr = trans->blocks_used;
btrfs_end_transaction_throttle(trans, root);
if (drop_inode) {
@@ -7353,6 +7467,7 @@ static const struct inode_operations btrfs_symlink_inode_operations = {
.follow_link = page_follow_link_light,
.put_link = page_put_link,
.getattr = btrfs_getattr,
+ .setattr = btrfs_setattr,
.permission = btrfs_permission,
.setxattr = btrfs_setxattr,
.getxattr = btrfs_getxattr,
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 72d461656f60..c04f02c7d5bb 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -252,11 +252,11 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
trans = btrfs_join_transaction(root);
BUG_ON(IS_ERR(trans));
+ btrfs_update_iflags(inode);
+ inode->i_ctime = CURRENT_TIME;
ret = btrfs_update_inode(trans, root, inode);
BUG_ON(ret);
- btrfs_update_iflags(inode);
- inode->i_ctime = CURRENT_TIME;
btrfs_end_transaction(trans, root);
mnt_drop_write(file->f_path.mnt);
@@ -858,8 +858,10 @@ static int cluster_pages_for_defrag(struct inode *inode,
return 0;
file_end = (isize - 1) >> PAGE_CACHE_SHIFT;
+ mutex_lock(&inode->i_mutex);
ret = btrfs_delalloc_reserve_space(inode,
num_pages << PAGE_CACHE_SHIFT);
+ mutex_unlock(&inode->i_mutex);
if (ret)
return ret;
again:
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index dff29d5e151a..cfb55434a469 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -2947,7 +2947,9 @@ static int relocate_file_extent_cluster(struct inode *inode,
index = (cluster->start - offset) >> PAGE_CACHE_SHIFT;
last_index = (cluster->end - offset) >> PAGE_CACHE_SHIFT;
while (index <= last_index) {
+ mutex_lock(&inode->i_mutex);
ret = btrfs_delalloc_reserve_metadata(inode, PAGE_CACHE_SIZE);
+ mutex_unlock(&inode->i_mutex);
if (ret)
goto out;
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index c27bcb67f330..ddf2c90d3fc0 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -1535,18 +1535,22 @@ static noinline_for_stack int scrub_supers(struct scrub_dev *sdev)
static noinline_for_stack int scrub_workers_get(struct btrfs_root *root)
{
struct btrfs_fs_info *fs_info = root->fs_info;
+ int ret = 0;
mutex_lock(&fs_info->scrub_lock);
if (fs_info->scrub_workers_refcnt == 0) {
btrfs_init_workers(&fs_info->scrub_workers, "scrub",
fs_info->thread_pool_size, &fs_info->generic_worker);
fs_info->scrub_workers.idle_thresh = 4;
- btrfs_start_workers(&fs_info->scrub_workers, 1);
+ ret = btrfs_start_workers(&fs_info->scrub_workers);
+ if (ret)
+ goto out;
}
++fs_info->scrub_workers_refcnt;
+out:
mutex_unlock(&fs_info->scrub_lock);
- return 0;
+ return ret;
}
static noinline_for_stack void scrub_workers_put(struct btrfs_root *root)
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index e28ad4baf483..200f63bc6675 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -41,6 +41,7 @@
#include <linux/slab.h>
#include <linux/cleancache.h>
#include <linux/mnt_namespace.h>
+#include <linux/ratelimit.h>
#include "compat.h"
#include "delayed-inode.h"
#include "ctree.h"
@@ -1053,7 +1054,7 @@ static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes)
u64 avail_space;
u64 used_space;
u64 min_stripe_size;
- int min_stripes = 1;
+ int min_stripes = 1, num_stripes = 1;
int i = 0, nr_devices;
int ret;
@@ -1067,12 +1068,16 @@ static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes)
/* calc min stripe number for data space alloction */
type = btrfs_get_alloc_profile(root, 1);
- if (type & BTRFS_BLOCK_GROUP_RAID0)
+ if (type & BTRFS_BLOCK_GROUP_RAID0) {
min_stripes = 2;
- else if (type & BTRFS_BLOCK_GROUP_RAID1)
+ num_stripes = nr_devices;
+ } else if (type & BTRFS_BLOCK_GROUP_RAID1) {
min_stripes = 2;
- else if (type & BTRFS_BLOCK_GROUP_RAID10)
+ num_stripes = 2;
+ } else if (type & BTRFS_BLOCK_GROUP_RAID10) {
min_stripes = 4;
+ num_stripes = 4;
+ }
if (type & BTRFS_BLOCK_GROUP_DUP)
min_stripe_size = 2 * BTRFS_STRIPE_LEN;
@@ -1141,13 +1146,16 @@ static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes)
i = nr_devices - 1;
avail_space = 0;
while (nr_devices >= min_stripes) {
+ if (num_stripes > nr_devices)
+ num_stripes = nr_devices;
+
if (devices_info[i].max_avail >= min_stripe_size) {
int j;
u64 alloc_size;
- avail_space += devices_info[i].max_avail * min_stripes;
+ avail_space += devices_info[i].max_avail * num_stripes;
alloc_size = devices_info[i].max_avail;
- for (j = i + 1 - min_stripes; j <= i; j++)
+ for (j = i + 1 - num_stripes; j <= i; j++)
devices_info[j].max_avail -= alloc_size;
}
i--;
@@ -1264,6 +1272,16 @@ static int btrfs_unfreeze(struct super_block *sb)
return 0;
}
+static void btrfs_fs_dirty_inode(struct inode *inode, int flags)
+{
+ int ret;
+
+ ret = btrfs_dirty_inode(inode);
+ if (ret)
+ printk_ratelimited(KERN_ERR "btrfs: fail to dirty inode %Lu "
+ "error %d\n", btrfs_ino(inode), ret);
+}
+
static const struct super_operations btrfs_super_ops = {
.drop_inode = btrfs_drop_inode,
.evict_inode = btrfs_evict_inode,
@@ -1271,7 +1289,7 @@ static const struct super_operations btrfs_super_ops = {
.sync_fs = btrfs_sync_fs,
.show_options = btrfs_show_options,
.write_inode = btrfs_write_inode,
- .dirty_inode = btrfs_dirty_inode,
+ .dirty_inode = btrfs_fs_dirty_inode,
.alloc_inode = btrfs_alloc_inode,
.destroy_inode = btrfs_destroy_inode,
.statfs = btrfs_statfs,
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 0a8c8f8304b1..f4b839fd3c9d 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -295,6 +295,12 @@ loop_lock:
btrfs_requeue_work(&device->work);
goto done;
}
+ /* unplug every 64 requests just for good measure */
+ if (batch_run % 64 == 0) {
+ blk_finish_plug(&plug);
+ blk_start_plug(&plug);
+ sync_pending = 0;
+ }
}
cond_resched();
@@ -3258,7 +3264,7 @@ static void btrfs_end_bio(struct bio *bio, int err)
*/
if (atomic_read(&bbio->error) > bbio->max_errors) {
err = -EIO;
- } else if (err) {
+ } else {
/*
* this bio is actually up to date, we didn't
* go over the max number of errors
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index 3eeb97661262..98954003a8d3 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -1094,42 +1094,19 @@ static int ceph_snapdir_d_revalidate(struct dentry *dentry,
/*
* Set/clear/test dir complete flag on the dir's dentry.
*/
-static struct dentry * __d_find_any_alias(struct inode *inode)
-{
- struct dentry *alias;
-
- if (list_empty(&inode->i_dentry))
- return NULL;
- alias = list_first_entry(&inode->i_dentry, struct dentry, d_alias);
- return alias;
-}
-
void ceph_dir_set_complete(struct inode *inode)
{
- struct dentry *dentry = __d_find_any_alias(inode);
-
- if (dentry && ceph_dentry(dentry)) {
- dout(" marking %p (%p) complete\n", inode, dentry);
- set_bit(CEPH_D_COMPLETE, &ceph_dentry(dentry)->flags);
- }
+ /* not yet implemented */
}
void ceph_dir_clear_complete(struct inode *inode)
{
- struct dentry *dentry = __d_find_any_alias(inode);
-
- if (dentry && ceph_dentry(dentry)) {
- dout(" marking %p (%p) NOT complete\n", inode, dentry);
- clear_bit(CEPH_D_COMPLETE, &ceph_dentry(dentry)->flags);
- }
+ /* not yet implemented */
}
bool ceph_dir_test_complete(struct inode *inode)
{
- struct dentry *dentry = __d_find_any_alias(inode);
-
- if (dentry && ceph_dentry(dentry))
- return test_bit(CEPH_D_COMPLETE, &ceph_dentry(dentry)->flags);
+ /* not yet implemented */
return false;
}
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 8cd4b52d4217..f3670cf72587 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -282,7 +282,7 @@ static int coalesce_t2(struct smb_hdr *psecond, struct smb_hdr *pTargetSMB)
byte_count = be32_to_cpu(pTargetSMB->smb_buf_length);
byte_count += total_in_buf2;
/* don't allow buffer to overflow */
- if (byte_count > CIFSMaxBufSize)
+ if (byte_count > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4)
return -ENOBUFS;
pTargetSMB->smb_buf_length = cpu_to_be32(byte_count);
@@ -2122,7 +2122,7 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info)
warned_on_ntlm = true;
cERROR(1, "default security mechanism requested. The default "
"security mechanism will be upgraded from ntlm to "
- "ntlmv2 in kernel release 3.2");
+ "ntlmv2 in kernel release 3.3");
}
ses->overrideSecFlg = volume_info->secFlg;
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
index 51352de88ef1..a10e428b32b4 100644
--- a/fs/compat_ioctl.c
+++ b/fs/compat_ioctl.c
@@ -1506,35 +1506,6 @@ static long do_ioctl_trans(int fd, unsigned int cmd,
return -ENOIOCTLCMD;
}
-static void compat_ioctl_error(struct file *filp, unsigned int fd,
- unsigned int cmd, unsigned long arg)
-{
- char buf[10];
- char *fn = "?";
- char *path;
-
- /* find the name of the device. */
- path = (char *)__get_free_page(GFP_KERNEL);
- if (path) {
- fn = d_path(&filp->f_path, path, PAGE_SIZE);
- if (IS_ERR(fn))
- fn = "?";
- }
-
- sprintf(buf,"'%c'", (cmd>>_IOC_TYPESHIFT) & _IOC_TYPEMASK);
- if (!isprint(buf[1]))
- sprintf(buf, "%02x", buf[1]);
- compat_printk("ioctl32(%s:%d): Unknown cmd fd(%d) "
- "cmd(%08x){t:%s;sz:%u} arg(%08x) on %s\n",
- current->comm, current->pid,
- (int)fd, (unsigned int)cmd, buf,
- (cmd >> _IOC_SIZESHIFT) & _IOC_SIZEMASK,
- (unsigned int)arg, fn);
-
- if (path)
- free_page((unsigned long)path);
-}
-
static int compat_ioctl_check_table(unsigned int xcmd)
{
int i;
@@ -1621,13 +1592,8 @@ asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
goto found_handler;
error = do_ioctl_trans(fd, cmd, arg, filp);
- if (error == -ENOIOCTLCMD) {
- static int count;
-
- if (++count <= 50)
- compat_ioctl_error(filp, fd, cmd, arg);
- error = -EINVAL;
- }
+ if (error == -ENOIOCTLCMD)
+ error = -ENOTTY;
goto out_fput;
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index ac86f8b3e3cb..517f211a3bd4 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -47,17 +47,6 @@ struct wb_writeback_work {
struct completion *done; /* set if the caller waits */
};
-const char *wb_reason_name[] = {
- [WB_REASON_BACKGROUND] = "background",
- [WB_REASON_TRY_TO_FREE_PAGES] = "try_to_free_pages",
- [WB_REASON_SYNC] = "sync",
- [WB_REASON_PERIODIC] = "periodic",
- [WB_REASON_LAPTOP_TIMER] = "laptop_timer",
- [WB_REASON_FREE_MORE_MEM] = "free_more_memory",
- [WB_REASON_FS_FREE_SPACE] = "fs_free_space",
- [WB_REASON_FORKER_THREAD] = "forker_thread"
-};
-
/*
* Include the creation of the trace points after defining the
* wb_writeback_work structure so that the definition remains local to this
diff --git a/fs/ioctl.c b/fs/ioctl.c
index 1d9b9fcb2db4..066836e81848 100644
--- a/fs/ioctl.c
+++ b/fs/ioctl.c
@@ -42,7 +42,7 @@ static long vfs_ioctl(struct file *filp, unsigned int cmd,
error = filp->f_op->unlocked_ioctl(filp, cmd, arg);
if (error == -ENOIOCTLCMD)
- error = -EINVAL;
+ error = -ENOTTY;
out:
return error;
}
diff --git a/fs/locks.c b/fs/locks.c
index 3b0d05dcd7c1..637694bf3a03 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -1205,6 +1205,8 @@ int __break_lease(struct inode *inode, unsigned int mode)
int want_write = (mode & O_ACCMODE) != O_RDONLY;
new_fl = lease_alloc(NULL, want_write ? F_WRLCK : F_RDLCK);
+ if (IS_ERR(new_fl))
+ return PTR_ERR(new_fl);
lock_flocks();
@@ -1221,12 +1223,6 @@ int __break_lease(struct inode *inode, unsigned int mode)
if (fl->fl_owner == current->files)
i_have_this_lease = 1;
- if (IS_ERR(new_fl) && !i_have_this_lease
- && ((mode & O_NONBLOCK) == 0)) {
- error = PTR_ERR(new_fl);
- goto out;
- }
-
break_time = 0;
if (lease_break_time > 0) {
break_time = jiffies + lease_break_time * HZ;
@@ -1284,8 +1280,7 @@ restart:
out:
unlock_flocks();
- if (!IS_ERR(new_fl))
- locks_free_lock(new_fl);
+ locks_free_lock(new_fl);
return error;
}
diff --git a/fs/minix/inode.c b/fs/minix/inode.c
index 1d9e33966db0..4d46a6a59070 100644
--- a/fs/minix/inode.c
+++ b/fs/minix/inode.c
@@ -263,23 +263,6 @@ static int minix_fill_super(struct super_block *s, void *data, int silent)
goto out_no_root;
}
- ret = -ENOMEM;
- s->s_root = d_alloc_root(root_inode);
- if (!s->s_root)
- goto out_iput;
-
- if (!(s->s_flags & MS_RDONLY)) {
- if (sbi->s_version != MINIX_V3) /* s_state is now out from V3 sb */
- ms->s_state &= ~MINIX_VALID_FS;
- mark_buffer_dirty(bh);
- }
- if (!(sbi->s_mount_state & MINIX_VALID_FS))
- printk("MINIX-fs: mounting unchecked file system, "
- "running fsck is recommended\n");
- else if (sbi->s_mount_state & MINIX_ERROR_FS)
- printk("MINIX-fs: mounting file system with errors, "
- "running fsck is recommended\n");
-
/* Apparently minix can create filesystems that allocate more blocks for
* the bitmaps than needed. We simply ignore that, but verify it didn't
* create one with not enough blocks and bail out if so.
@@ -300,6 +283,23 @@ static int minix_fill_super(struct super_block *s, void *data, int silent)
goto out_iput;
}
+ ret = -ENOMEM;
+ s->s_root = d_alloc_root(root_inode);
+ if (!s->s_root)
+ goto out_iput;
+
+ if (!(s->s_flags & MS_RDONLY)) {
+ if (sbi->s_version != MINIX_V3) /* s_state is now out from V3 sb */
+ ms->s_state &= ~MINIX_VALID_FS;
+ mark_buffer_dirty(bh);
+ }
+ if (!(sbi->s_mount_state & MINIX_VALID_FS))
+ printk("MINIX-fs: mounting unchecked file system, "
+ "running fsck is recommended\n");
+ else if (sbi->s_mount_state & MINIX_ERROR_FS)
+ printk("MINIX-fs: mounting file system with errors, "
+ "running fsck is recommended\n");
+
return 0;
out_iput:
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index eca56d4b39c0..606ef0f20aed 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -147,7 +147,7 @@ static loff_t nfs_file_llseek(struct file *filp, loff_t offset, int origin)
* origin == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate
* the cached file length
*/
- if (origin != SEEK_SET || origin != SEEK_CUR) {
+ if (origin != SEEK_SET && origin != SEEK_CUR) {
struct inode *inode = filp->f_mapping->host;
int retval = nfs_revalidate_file_size(inode, filp);
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index be2bbac13817..d9f4d78c3413 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -39,6 +39,8 @@
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/string.h>
+#include <linux/ratelimit.h>
+#include <linux/printk.h>
#include <linux/slab.h>
#include <linux/sunrpc/clnt.h>
#include <linux/sunrpc/gss_api.h>
@@ -894,6 +896,8 @@ out:
static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode)
{
+ if (delegation == NULL)
+ return 0;
if ((delegation->type & fmode) != fmode)
return 0;
if (test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags))
@@ -1036,8 +1040,7 @@ static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata)
}
rcu_read_lock();
delegation = rcu_dereference(nfsi->delegation);
- if (delegation == NULL ||
- !can_open_delegated(delegation, fmode)) {
+ if (!can_open_delegated(delegation, fmode)) {
rcu_read_unlock();
break;
}
@@ -1091,7 +1094,12 @@ static struct nfs4_state *nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data
if (delegation)
delegation_flags = delegation->flags;
rcu_read_unlock();
- if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0)
+ if (data->o_arg.claim == NFS4_OPEN_CLAIM_DELEGATE_CUR) {
+ pr_err_ratelimited("NFS: Broken NFSv4 server %s is "
+ "returning a delegation for "
+ "OPEN(CLAIM_DELEGATE_CUR)\n",
+ NFS_CLIENT(inode)->cl_server);
+ } else if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0)
nfs_inode_set_delegation(state->inode,
data->owner->so_cred,
&data->o_res);
@@ -1423,11 +1431,9 @@ static void nfs4_open_prepare(struct rpc_task *task, void *calldata)
goto out_no_action;
rcu_read_lock();
delegation = rcu_dereference(NFS_I(data->state->inode)->delegation);
- if (delegation != NULL &&
- test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) == 0) {
- rcu_read_unlock();
- goto out_no_action;
- }
+ if (data->o_arg.claim != NFS4_OPEN_CLAIM_DELEGATE_CUR &&
+ can_open_delegated(delegation, data->o_arg.fmode))
+ goto unlock_no_action;
rcu_read_unlock();
}
/* Update sequence id. */
@@ -1444,6 +1450,8 @@ static void nfs4_open_prepare(struct rpc_task *task, void *calldata)
return;
rpc_call_start(task);
return;
+unlock_no_action:
+ rcu_read_unlock();
out_no_action:
task->tk_action = NULL;
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 39914be40b03..6a7107ae6b72 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -1156,11 +1156,13 @@ restart:
if (status >= 0) {
status = nfs4_reclaim_locks(state, ops);
if (status >= 0) {
+ spin_lock(&state->state_lock);
list_for_each_entry(lock, &state->lock_states, ls_locks) {
if (!(lock->ls_flags & NFS_LOCK_INITIALIZED))
printk("%s: Lock reclaim failed!\n",
__func__);
}
+ spin_unlock(&state->state_lock);
nfs4_put_open_state(state);
goto restart;
}
@@ -1224,10 +1226,12 @@ static void nfs4_clear_open_state(struct nfs4_state *state)
clear_bit(NFS_O_RDONLY_STATE, &state->flags);
clear_bit(NFS_O_WRONLY_STATE, &state->flags);
clear_bit(NFS_O_RDWR_STATE, &state->flags);
+ spin_lock(&state->state_lock);
list_for_each_entry(lock, &state->lock_states, ls_locks) {
lock->ls_seqid.flags = 0;
lock->ls_flags &= ~NFS_LOCK_INITIALIZED;
}
+ spin_unlock(&state->state_lock);
}
static void nfs4_reset_seqids(struct nfs_server *server,
@@ -1350,12 +1354,14 @@ static void nfs4_warn_keyexpired(const char *s)
static int nfs4_recovery_handle_error(struct nfs_client *clp, int error)
{
switch (error) {
+ case 0:
+ break;
case -NFS4ERR_CB_PATH_DOWN:
nfs_handle_cb_pathdown(clp);
- return 0;
+ break;
case -NFS4ERR_NO_GRACE:
nfs4_state_end_reclaim_reboot(clp);
- return 0;
+ break;
case -NFS4ERR_STALE_CLIENTID:
case -NFS4ERR_LEASE_MOVED:
set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
@@ -1375,13 +1381,15 @@ static int nfs4_recovery_handle_error(struct nfs_client *clp, int error)
case -NFS4ERR_SEQ_MISORDERED:
set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
/* Zero session reset errors */
- return 0;
+ break;
case -EKEYEXPIRED:
/* Nothing we can do */
nfs4_warn_keyexpired(clp->cl_hostname);
- return 0;
+ break;
+ default:
+ return error;
}
- return error;
+ return 0;
}
static int nfs4_do_reclaim(struct nfs_client *clp, const struct nfs4_state_recovery_ops *ops)
@@ -1428,7 +1436,7 @@ static int nfs4_check_lease(struct nfs_client *clp)
struct rpc_cred *cred;
const struct nfs4_state_maintenance_ops *ops =
clp->cl_mvops->state_renewal_ops;
- int status = -NFS4ERR_EXPIRED;
+ int status;
/* Is the client already known to have an expired lease? */
if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
@@ -1438,6 +1446,7 @@ static int nfs4_check_lease(struct nfs_client *clp)
spin_unlock(&clp->cl_lock);
if (cred == NULL) {
cred = nfs4_get_setclientid_cred(clp);
+ status = -ENOKEY;
if (cred == NULL)
goto out;
}
@@ -1525,16 +1534,16 @@ void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags)
{
if (!flags)
return;
- else if (flags & SEQ4_STATUS_RESTART_RECLAIM_NEEDED)
+ if (flags & SEQ4_STATUS_RESTART_RECLAIM_NEEDED)
nfs41_handle_server_reboot(clp);
- else if (flags & (SEQ4_STATUS_EXPIRED_ALL_STATE_REVOKED |
+ if (flags & (SEQ4_STATUS_EXPIRED_ALL_STATE_REVOKED |
SEQ4_STATUS_EXPIRED_SOME_STATE_REVOKED |
SEQ4_STATUS_ADMIN_STATE_REVOKED |
SEQ4_STATUS_LEASE_MOVED))
nfs41_handle_state_revoked(clp);
- else if (flags & SEQ4_STATUS_RECALLABLE_STATE_REVOKED)
+ if (flags & SEQ4_STATUS_RECALLABLE_STATE_REVOKED)
nfs41_handle_recallable_state_revoked(clp);
- else if (flags & (SEQ4_STATUS_CB_PATH_DOWN |
+ if (flags & (SEQ4_STATUS_CB_PATH_DOWN |
SEQ4_STATUS_BACKCHANNEL_FAULT |
SEQ4_STATUS_CB_PATH_DOWN_SESSION))
nfs41_handle_cb_path_down(clp);
@@ -1662,10 +1671,10 @@ static void nfs4_state_manager(struct nfs_client *clp)
if (test_and_clear_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state)) {
status = nfs4_check_lease(clp);
+ if (status < 0)
+ goto out_error;
if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
continue;
- if (status < 0 && status != -NFS4ERR_CB_PATH_DOWN)
- goto out_error;
}
/* Initialize or reset the session */
diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
index 41d6743d303c..ac258beeda3c 100644
--- a/fs/nilfs2/ioctl.c
+++ b/fs/nilfs2/ioctl.c
@@ -625,6 +625,9 @@ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp,
if (argv[n].v_nmembs > nsegs * nilfs->ns_blocks_per_segment)
goto out_free;
+ if (argv[n].v_nmembs >= UINT_MAX / argv[n].v_size)
+ goto out_free;
+
len = argv[n].v_size * argv[n].v_nmembs;
base = (void __user *)(unsigned long)argv[n].v_base;
if (len == 0) {
@@ -842,6 +845,19 @@ long nilfs_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
case FS_IOC32_GETVERSION:
cmd = FS_IOC_GETVERSION;
break;
+ case NILFS_IOCTL_CHANGE_CPMODE:
+ case NILFS_IOCTL_DELETE_CHECKPOINT:
+ case NILFS_IOCTL_GET_CPINFO:
+ case NILFS_IOCTL_GET_CPSTAT:
+ case NILFS_IOCTL_GET_SUINFO:
+ case NILFS_IOCTL_GET_SUSTAT:
+ case NILFS_IOCTL_GET_VINFO:
+ case NILFS_IOCTL_GET_BDESCS:
+ case NILFS_IOCTL_CLEAN_SEGMENTS:
+ case NILFS_IOCTL_SYNC:
+ case NILFS_IOCTL_RESIZE:
+ case NILFS_IOCTL_SET_ALLOC_RANGE:
+ break;
default:
return -ENOIOCTLCMD;
}
diff --git a/fs/proc/stat.c b/fs/proc/stat.c
index 2527a68057fc..d76ca6ae2b1b 100644
--- a/fs/proc/stat.c
+++ b/fs/proc/stat.c
@@ -31,7 +31,7 @@ static u64 get_idle_time(int cpu)
idle = kcpustat_cpu(cpu).cpustat[CPUTIME_IDLE];
idle += arch_idle_time(cpu);
} else
- idle = nsecs_to_jiffies64(1000 * idle_time);
+ idle = usecs_to_cputime64(idle_time);
return idle;
}
@@ -44,7 +44,7 @@ static u64 get_iowait_time(int cpu)
/* !NO_HZ so we can rely on cpustat.iowait */
iowait = kcpustat_cpu(cpu).cpustat[CPUTIME_IOWAIT];
else
- iowait = nsecs_to_jiffies64(1000 * iowait_time);
+ iowait = usecs_to_cputime64(iowait_time);
return iowait;
}
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index 3eca58f51ae9..8a899496fd5f 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -869,27 +869,6 @@ xfs_fs_dirty_inode(
}
STATIC int
-xfs_log_inode(
- struct xfs_inode *ip)
-{
- struct xfs_mount *mp = ip->i_mount;
- struct xfs_trans *tp;
- int error;
-
- tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS);
- error = xfs_trans_reserve(tp, 0, XFS_FSYNC_TS_LOG_RES(mp), 0, 0, 0);
- if (error) {
- xfs_trans_cancel(tp, 0);
- return error;
- }
-
- xfs_ilock(ip, XFS_ILOCK_EXCL);
- xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
- xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
- return xfs_trans_commit(tp, 0);
-}
-
-STATIC int
xfs_fs_write_inode(
struct inode *inode,
struct writeback_control *wbc)
@@ -902,10 +881,8 @@ xfs_fs_write_inode(
if (XFS_FORCED_SHUTDOWN(mp))
return -XFS_ERROR(EIO);
- if (!ip->i_update_core)
- return 0;
- if (wbc->sync_mode == WB_SYNC_ALL) {
+ if (wbc->sync_mode == WB_SYNC_ALL || wbc->for_kupdate) {
/*
* Make sure the inode has made it it into the log. Instead
* of forcing it all the way to stable storage using a
@@ -913,11 +890,14 @@ xfs_fs_write_inode(
* ->sync_fs call do that for thus, which reduces the number
* of synchronous log forces dramatically.
*/
- error = xfs_log_inode(ip);
+ error = xfs_log_dirty_inode(ip, NULL, 0);
if (error)
goto out;
return 0;
} else {
+ if (!ip->i_update_core)
+ return 0;
+
/*
* We make this non-blocking if the inode is contended, return
* EAGAIN to indicate to the caller that they did not succeed.
diff --git a/fs/xfs/xfs_sync.c b/fs/xfs/xfs_sync.c
index be5c51d8f757..f0994aedcd15 100644
--- a/fs/xfs/xfs_sync.c
+++ b/fs/xfs/xfs_sync.c
@@ -336,6 +336,32 @@ xfs_sync_fsdata(
return error;
}
+int
+xfs_log_dirty_inode(
+ struct xfs_inode *ip,
+ struct xfs_perag *pag,
+ int flags)
+{
+ struct xfs_mount *mp = ip->i_mount;
+ struct xfs_trans *tp;
+ int error;
+
+ if (!ip->i_update_core)
+ return 0;
+
+ tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS);
+ error = xfs_trans_reserve(tp, 0, XFS_FSYNC_TS_LOG_RES(mp), 0, 0, 0);
+ if (error) {
+ xfs_trans_cancel(tp, 0);
+ return error;
+ }
+
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
+ xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
+ xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+ return xfs_trans_commit(tp, 0);
+}
+
/*
* When remounting a filesystem read-only or freezing the filesystem, we have
* two phases to execute. This first phase is syncing the data before we
@@ -359,6 +385,16 @@ xfs_quiesce_data(
{
int error, error2 = 0;
+ /*
+ * Log all pending size and timestamp updates. The vfs writeback
+ * code is supposed to do this, but due to its overagressive
+ * livelock detection it will skip inodes where appending writes
+ * were written out in the first non-blocking sync phase if their
+ * completion took long enough that it happened after taking the
+ * timestamp for the cut-off in the blocking phase.
+ */
+ xfs_inode_ag_iterator(mp, xfs_log_dirty_inode, 0);
+
xfs_qm_sync(mp, SYNC_TRYLOCK);
xfs_qm_sync(mp, SYNC_WAIT);
diff --git a/fs/xfs/xfs_sync.h b/fs/xfs/xfs_sync.h
index 941202e7ac6e..fa965479d788 100644
--- a/fs/xfs/xfs_sync.h
+++ b/fs/xfs/xfs_sync.h
@@ -34,6 +34,8 @@ void xfs_quiesce_attr(struct xfs_mount *mp);
void xfs_flush_inodes(struct xfs_inode *ip);
+int xfs_log_dirty_inode(struct xfs_inode *ip, struct xfs_perag *pag, int flags);
+
int xfs_reclaim_inodes(struct xfs_mount *mp, int mode);
int xfs_reclaim_inodes_count(struct xfs_mount *mp);
void xfs_reclaim_inodes_nr(struct xfs_mount *mp, int nr_to_scan);
diff --git a/include/asm-generic/cputime.h b/include/asm-generic/cputime.h
index 77202e2c9fc5..9a62937c56ca 100644
--- a/include/asm-generic/cputime.h
+++ b/include/asm-generic/cputime.h
@@ -24,9 +24,11 @@ typedef u64 __nocast cputime64_t;
* Convert cputime to microseconds and back.
*/
#define cputime_to_usecs(__ct) \
- jiffies_to_usecs(cputime_to_jiffies(__ct));
-#define usecs_to_cputime(__msecs) \
- jiffies_to_cputime(usecs_to_jiffies(__msecs));
+ jiffies_to_usecs(cputime_to_jiffies(__ct))
+#define usecs_to_cputime(__usec) \
+ jiffies_to_cputime(usecs_to_jiffies(__usec))
+#define usecs_to_cputime64(__usec) \
+ jiffies64_to_cputime64(nsecs_to_jiffies64((__usec) * 1000))
/*
* Convert cputime to seconds and back.
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index a3ef66a2a083..3c1063acb2ab 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -22,8 +22,14 @@ extern unsigned long __sw_hweight64(__u64 w);
#include <asm/bitops.h>
#define for_each_set_bit(bit, addr, size) \
- for ((bit) = find_first_bit((addr), (size)); \
- (bit) < (size); \
+ for ((bit) = find_first_bit((addr), (size)); \
+ (bit) < (size); \
+ (bit) = find_next_bit((addr), (size), (bit) + 1))
+
+/* same as for_each_set_bit() but use bit as value to start with */
+#define for_each_set_bit_cont(bit, addr, size) \
+ for ((bit) = find_next_bit((addr), (size), (bit)); \
+ (bit) < (size); \
(bit) = find_next_bit((addr), (size), (bit) + 1))
static __inline__ int get_bitmask_order(unsigned int count)
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index c7a6d3b5bc7b..94acd8172b5b 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -805,9 +805,6 @@ extern void blk_unprep_request(struct request *);
*/
extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn,
spinlock_t *lock, int node_id);
-extern struct request_queue *blk_init_allocated_queue_node(struct request_queue *,
- request_fn_proc *,
- spinlock_t *, int node_id);
extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *);
extern struct request_queue *blk_init_allocated_queue(struct request_queue *,
request_fn_proc *, spinlock_t *);
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h
index ab344a521105..66d3e954eb6c 100644
--- a/include/linux/bootmem.h
+++ b/include/linux/bootmem.h
@@ -44,7 +44,7 @@ extern unsigned long init_bootmem_node(pg_data_t *pgdat,
unsigned long endpfn);
extern unsigned long init_bootmem(unsigned long addr, unsigned long memend);
-unsigned long free_all_memory_core_early(int nodeid);
+extern unsigned long free_low_memory_core_early(int nodeid);
extern unsigned long free_all_bootmem_node(pg_data_t *pgdat);
extern unsigned long free_all_bootmem(void);
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index c86c940d1de3..081147da0564 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -71,7 +71,7 @@ struct timecounter {
/**
* cyclecounter_cyc2ns - converts cycle counter cycles to nanoseconds
- * @tc: Pointer to cycle counter.
+ * @cc: Pointer to cycle counter.
* @cycles: Cycles
*
* XXX - This could use some mult_lxl_ll() asm optimization. Same code
@@ -114,7 +114,7 @@ extern u64 timecounter_read(struct timecounter *tc);
* time base as values returned by
* timecounter_read()
* @tc: Pointer to time counter.
- * @cycle: a value returned by tc->cc->read()
+ * @cycle_tstamp: a value returned by tc->cc->read()
*
* Cycle counts that are converted correctly as long as they
* fall into the interval [-1/2 max cycle count, +1/2 max cycle count],
@@ -156,11 +156,12 @@ extern u64 timecounter_cyc2time(struct timecounter *tc,
* @mult: cycle to nanosecond multiplier
* @shift: cycle to nanosecond divisor (power of two)
* @max_idle_ns: max idle time permitted by the clocksource (nsecs)
- * @maxadj maximum adjustment value to mult (~11%)
+ * @maxadj: maximum adjustment value to mult (~11%)
* @flags: flags describing special properties
* @archdata: arch-specific data
* @suspend: suspend function for the clocksource, if necessary
* @resume: resume function for the clocksource, if necessary
+ * @cycle_last: most recent cycle counter value seen by ::read()
*/
struct clocksource {
/*
@@ -187,6 +188,7 @@ struct clocksource {
void (*suspend)(struct clocksource *cs);
void (*resume)(struct clocksource *cs);
+ /* private: */
#ifdef CONFIG_CLOCKSOURCE_WATCHDOG
/* Watchdog related data, used by the framework */
struct list_head wd_list;
@@ -261,6 +263,9 @@ static inline u32 clocksource_hz2mult(u32 hz, u32 shift_constant)
/**
* clocksource_cyc2ns - converts clocksource cycles to nanoseconds
+ * @cycles: cycles
+ * @mult: cycle to nanosecond multiplier
+ * @shift: cycle to nanosecond divisor (power of two)
*
* Converts cycles to nanoseconds, using the given mult and shift.
*
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 6cb60fd2ea84..305c263021e7 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -27,6 +27,7 @@ struct cpu {
extern int register_cpu(struct cpu *cpu, int num);
extern struct sys_device *get_cpu_sysdev(unsigned cpu);
+extern bool cpu_is_hotpluggable(unsigned cpu);
extern int cpu_add_sysdev_attr(struct sysdev_attribute *attr);
extern void cpu_remove_sysdev_attr(struct sysdev_attribute *attr);
diff --git a/include/linux/debugobjects.h b/include/linux/debugobjects.h
index 65970b811e22..0e5f5785d9f2 100644
--- a/include/linux/debugobjects.h
+++ b/include/linux/debugobjects.h
@@ -46,6 +46,8 @@ struct debug_obj {
* fails
* @fixup_free: fixup function, which is called when the free check
* fails
+ * @fixup_assert_init: fixup function, which is called when the assert_init
+ * check fails
*/
struct debug_obj_descr {
const char *name;
@@ -54,6 +56,7 @@ struct debug_obj_descr {
int (*fixup_activate) (void *addr, enum debug_obj_state state);
int (*fixup_destroy) (void *addr, enum debug_obj_state state);
int (*fixup_free) (void *addr, enum debug_obj_state state);
+ int (*fixup_assert_init)(void *addr, enum debug_obj_state state);
};
#ifdef CONFIG_DEBUG_OBJECTS
@@ -64,6 +67,7 @@ extern void debug_object_activate (void *addr, struct debug_obj_descr *descr);
extern void debug_object_deactivate(void *addr, struct debug_obj_descr *descr);
extern void debug_object_destroy (void *addr, struct debug_obj_descr *descr);
extern void debug_object_free (void *addr, struct debug_obj_descr *descr);
+extern void debug_object_assert_init(void *addr, struct debug_obj_descr *descr);
/*
* Active state:
@@ -89,6 +93,8 @@ static inline void
debug_object_destroy (void *addr, struct debug_obj_descr *descr) { }
static inline void
debug_object_free (void *addr, struct debug_obj_descr *descr) { }
+static inline void
+debug_object_assert_init(void *addr, struct debug_obj_descr *descr) { }
static inline void debug_objects_early_init(void) { }
static inline void debug_objects_mem_init(void) { }
diff --git a/include/linux/dma_remapping.h b/include/linux/dma_remapping.h
index ef90cbd8e173..57c9a8ae4f2d 100644
--- a/include/linux/dma_remapping.h
+++ b/include/linux/dma_remapping.h
@@ -31,6 +31,7 @@ extern void free_dmar_iommu(struct intel_iommu *iommu);
extern int iommu_calculate_agaw(struct intel_iommu *iommu);
extern int iommu_calculate_max_sagaw(struct intel_iommu *iommu);
extern int dmar_disabled;
+extern int intel_iommu_enabled;
#else
static inline int iommu_calculate_agaw(struct intel_iommu *iommu)
{
@@ -44,6 +45,7 @@ static inline void free_dmar_iommu(struct intel_iommu *iommu)
{
}
#define dmar_disabled (1)
+#define intel_iommu_enabled (0)
#endif
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
index f743883f769e..bb7f30971858 100644
--- a/include/linux/hardirq.h
+++ b/include/linux/hardirq.h
@@ -139,20 +139,7 @@ static inline void account_system_vtime(struct task_struct *tsk)
extern void account_system_vtime(struct task_struct *tsk);
#endif
-#if defined(CONFIG_NO_HZ)
#if defined(CONFIG_TINY_RCU) || defined(CONFIG_TINY_PREEMPT_RCU)
-extern void rcu_enter_nohz(void);
-extern void rcu_exit_nohz(void);
-
-static inline void rcu_irq_enter(void)
-{
- rcu_exit_nohz();
-}
-
-static inline void rcu_irq_exit(void)
-{
- rcu_enter_nohz();
-}
static inline void rcu_nmi_enter(void)
{
@@ -163,17 +150,9 @@ static inline void rcu_nmi_exit(void)
}
#else
-extern void rcu_irq_enter(void);
-extern void rcu_irq_exit(void);
extern void rcu_nmi_enter(void);
extern void rcu_nmi_exit(void);
#endif
-#else
-# define rcu_irq_enter() do { } while (0)
-# define rcu_irq_exit() do { } while (0)
-# define rcu_nmi_enter() do { } while (0)
-# define rcu_nmi_exit() do { } while (0)
-#endif /* #if defined(CONFIG_NO_HZ) */
/*
* It is safe to do non-atomic ops on ->hardirq_context,
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index 388b0d425b50..5ce8b140428f 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -3,6 +3,7 @@
#include <linux/types.h>
#include <linux/compiler.h>
+#include <linux/workqueue.h>
#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL)
@@ -14,6 +15,12 @@ struct jump_label_key {
#endif
};
+struct jump_label_key_deferred {
+ struct jump_label_key key;
+ unsigned long timeout;
+ struct delayed_work work;
+};
+
# include <asm/jump_label.h>
# define HAVE_JUMP_LABEL
#endif /* CC_HAVE_ASM_GOTO && CONFIG_JUMP_LABEL */
@@ -51,8 +58,11 @@ extern void arch_jump_label_transform_static(struct jump_entry *entry,
extern int jump_label_text_reserved(void *start, void *end);
extern void jump_label_inc(struct jump_label_key *key);
extern void jump_label_dec(struct jump_label_key *key);
+extern void jump_label_dec_deferred(struct jump_label_key_deferred *key);
extern bool jump_label_enabled(struct jump_label_key *key);
extern void jump_label_apply_nops(struct module *mod);
+extern void jump_label_rate_limit(struct jump_label_key_deferred *key,
+ unsigned long rl);
#else /* !HAVE_JUMP_LABEL */
@@ -68,6 +78,10 @@ static __always_inline void jump_label_init(void)
{
}
+struct jump_label_key_deferred {
+ struct jump_label_key key;
+};
+
static __always_inline bool static_branch(struct jump_label_key *key)
{
if (unlikely(atomic_read(&key->enabled)))
@@ -85,6 +99,11 @@ static inline void jump_label_dec(struct jump_label_key *key)
atomic_dec(&key->enabled);
}
+static inline void jump_label_dec_deferred(struct jump_label_key_deferred *key)
+{
+ jump_label_dec(&key->key);
+}
+
static inline int jump_label_text_reserved(void *start, void *end)
{
return 0;
@@ -102,6 +121,14 @@ static inline int jump_label_apply_nops(struct module *mod)
{
return 0;
}
+
+static inline void jump_label_rate_limit(struct jump_label_key_deferred *key,
+ unsigned long rl)
+{
+}
#endif /* HAVE_JUMP_LABEL */
+#define jump_label_key_enabled ((struct jump_label_key){ .enabled = ATOMIC_INIT(1), })
+#define jump_label_key_disabled ((struct jump_label_key){ .enabled = ATOMIC_INIT(0), })
+
#endif /* _LINUX_JUMP_LABEL_H */
diff --git a/include/linux/kvm.h b/include/linux/kvm.h
index c3892fc1d538..68e67e50d028 100644
--- a/include/linux/kvm.h
+++ b/include/linux/kvm.h
@@ -557,6 +557,7 @@ struct kvm_ppc_pvinfo {
#define KVM_CAP_MAX_VCPUS 66 /* returns max vcpus per vm */
#define KVM_CAP_PPC_PAPR 68
#define KVM_CAP_S390_GMAP 71
+#define KVM_CAP_TSC_DEADLINE_TIMER 72
#ifdef KVM_CAP_IRQ_ROUTING
diff --git a/include/linux/lglock.h b/include/linux/lglock.h
index f549056fb20b..87f402ccec55 100644
--- a/include/linux/lglock.h
+++ b/include/linux/lglock.h
@@ -22,6 +22,7 @@
#include <linux/spinlock.h>
#include <linux/lockdep.h>
#include <linux/percpu.h>
+#include <linux/cpu.h>
/* can make br locks by using local lock for read side, global lock for write */
#define br_lock_init(name) name##_lock_init()
@@ -72,9 +73,31 @@
#define DEFINE_LGLOCK(name) \
\
+ DEFINE_SPINLOCK(name##_cpu_lock); \
+ cpumask_t name##_cpus __read_mostly; \
DEFINE_PER_CPU(arch_spinlock_t, name##_lock); \
DEFINE_LGLOCK_LOCKDEP(name); \
\
+ static int \
+ name##_lg_cpu_callback(struct notifier_block *nb, \
+ unsigned long action, void *hcpu) \
+ { \
+ switch (action & ~CPU_TASKS_FROZEN) { \
+ case CPU_UP_PREPARE: \
+ spin_lock(&name##_cpu_lock); \
+ cpu_set((unsigned long)hcpu, name##_cpus); \
+ spin_unlock(&name##_cpu_lock); \
+ break; \
+ case CPU_UP_CANCELED: case CPU_DEAD: \
+ spin_lock(&name##_cpu_lock); \
+ cpu_clear((unsigned long)hcpu, name##_cpus); \
+ spin_unlock(&name##_cpu_lock); \
+ } \
+ return NOTIFY_OK; \
+ } \
+ static struct notifier_block name##_lg_cpu_notifier = { \
+ .notifier_call = name##_lg_cpu_callback, \
+ }; \
void name##_lock_init(void) { \
int i; \
LOCKDEP_INIT_MAP(&name##_lock_dep_map, #name, &name##_lock_key, 0); \
@@ -83,6 +106,11 @@
lock = &per_cpu(name##_lock, i); \
*lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; \
} \
+ register_hotcpu_notifier(&name##_lg_cpu_notifier); \
+ get_online_cpus(); \
+ for_each_online_cpu(i) \
+ cpu_set(i, name##_cpus); \
+ put_online_cpus(); \
} \
EXPORT_SYMBOL(name##_lock_init); \
\
@@ -124,9 +152,9 @@
\
void name##_global_lock_online(void) { \
int i; \
- preempt_disable(); \
+ spin_lock(&name##_cpu_lock); \
rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_); \
- for_each_online_cpu(i) { \
+ for_each_cpu(i, &name##_cpus) { \
arch_spinlock_t *lock; \
lock = &per_cpu(name##_lock, i); \
arch_spin_lock(lock); \
@@ -137,12 +165,12 @@
void name##_global_unlock_online(void) { \
int i; \
rwlock_release(&name##_lock_dep_map, 1, _RET_IP_); \
- for_each_online_cpu(i) { \
+ for_each_cpu(i, &name##_cpus) { \
arch_spinlock_t *lock; \
lock = &per_cpu(name##_lock, i); \
arch_spin_unlock(lock); \
} \
- preempt_enable(); \
+ spin_unlock(&name##_cpu_lock); \
} \
EXPORT_SYMBOL(name##_global_unlock_online); \
\
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index b6a56e37284c..d36619ead3ba 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -343,6 +343,8 @@ extern void lockdep_trace_alloc(gfp_t mask);
#define lockdep_assert_held(l) WARN_ON(debug_locks && !lockdep_is_held(l))
+#define lockdep_recursing(tsk) ((tsk)->lockdep_recursion)
+
#else /* !LOCKDEP */
static inline void lockdep_off(void)
@@ -392,6 +394,8 @@ struct lock_class_key { };
#define lockdep_assert_held(l) do { } while (0)
+#define lockdep_recursing(tsk) (0)
+
#endif /* !LOCKDEP */
#ifdef CONFIG_LOCK_STAT
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index e6b843e16e81..a6bb10235148 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -2,8 +2,6 @@
#define _LINUX_MEMBLOCK_H
#ifdef __KERNEL__
-#define MEMBLOCK_ERROR 0
-
#ifdef CONFIG_HAVE_MEMBLOCK
/*
* Logical memory blocks.
@@ -19,81 +17,161 @@
#include <linux/init.h>
#include <linux/mm.h>
-#include <asm/memblock.h>
-
#define INIT_MEMBLOCK_REGIONS 128
struct memblock_region {
phys_addr_t base;
phys_addr_t size;
+#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
+ int nid;
+#endif
};
struct memblock_type {
unsigned long cnt; /* number of regions */
unsigned long max; /* size of the allocated array */
+ phys_addr_t total_size; /* size of all regions */
struct memblock_region *regions;
};
struct memblock {
phys_addr_t current_limit;
- phys_addr_t memory_size; /* Updated by memblock_analyze() */
struct memblock_type memory;
struct memblock_type reserved;
};
extern struct memblock memblock;
extern int memblock_debug;
-extern int memblock_can_resize;
#define memblock_dbg(fmt, ...) \
if (memblock_debug) printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
-u64 memblock_find_in_range(u64 start, u64 end, u64 size, u64 align);
+phys_addr_t memblock_find_in_range_node(phys_addr_t start, phys_addr_t end,
+ phys_addr_t size, phys_addr_t align, int nid);
+phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end,
+ phys_addr_t size, phys_addr_t align);
int memblock_free_reserved_regions(void);
int memblock_reserve_reserved_regions(void);
-extern void memblock_init(void);
-extern void memblock_analyze(void);
-extern long memblock_add(phys_addr_t base, phys_addr_t size);
-extern long memblock_remove(phys_addr_t base, phys_addr_t size);
-extern long memblock_free(phys_addr_t base, phys_addr_t size);
-extern long memblock_reserve(phys_addr_t base, phys_addr_t size);
+void memblock_allow_resize(void);
+int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid);
+int memblock_add(phys_addr_t base, phys_addr_t size);
+int memblock_remove(phys_addr_t base, phys_addr_t size);
+int memblock_free(phys_addr_t base, phys_addr_t size);
+int memblock_reserve(phys_addr_t base, phys_addr_t size);
+
+#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
+void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
+ unsigned long *out_end_pfn, int *out_nid);
+
+/**
+ * for_each_mem_pfn_range - early memory pfn range iterator
+ * @i: an integer used as loop variable
+ * @nid: node selector, %MAX_NUMNODES for all nodes
+ * @p_start: ptr to ulong for start pfn of the range, can be %NULL
+ * @p_end: ptr to ulong for end pfn of the range, can be %NULL
+ * @p_nid: ptr to int for nid of the range, can be %NULL
+ *
+ * Walks over configured memory ranges. Available after early_node_map is
+ * populated.
+ */
+#define for_each_mem_pfn_range(i, nid, p_start, p_end, p_nid) \
+ for (i = -1, __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid); \
+ i >= 0; __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid))
+#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
+
+void __next_free_mem_range(u64 *idx, int nid, phys_addr_t *out_start,
+ phys_addr_t *out_end, int *out_nid);
+
+/**
+ * for_each_free_mem_range - iterate through free memblock areas
+ * @i: u64 used as loop variable
+ * @nid: node selector, %MAX_NUMNODES for all nodes
+ * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
+ * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
+ * @p_nid: ptr to int for nid of the range, can be %NULL
+ *
+ * Walks over free (memory && !reserved) areas of memblock. Available as
+ * soon as memblock is initialized.
+ */
+#define for_each_free_mem_range(i, nid, p_start, p_end, p_nid) \
+ for (i = 0, \
+ __next_free_mem_range(&i, nid, p_start, p_end, p_nid); \
+ i != (u64)ULLONG_MAX; \
+ __next_free_mem_range(&i, nid, p_start, p_end, p_nid))
+
+void __next_free_mem_range_rev(u64 *idx, int nid, phys_addr_t *out_start,
+ phys_addr_t *out_end, int *out_nid);
-/* The numa aware allocator is only available if
- * CONFIG_ARCH_POPULATES_NODE_MAP is set
+/**
+ * for_each_free_mem_range_reverse - rev-iterate through free memblock areas
+ * @i: u64 used as loop variable
+ * @nid: node selector, %MAX_NUMNODES for all nodes
+ * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
+ * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
+ * @p_nid: ptr to int for nid of the range, can be %NULL
+ *
+ * Walks over free (memory && !reserved) areas of memblock in reverse
+ * order. Available as soon as memblock is initialized.
*/
-extern phys_addr_t memblock_alloc_nid(phys_addr_t size, phys_addr_t align,
- int nid);
-extern phys_addr_t memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align,
- int nid);
+#define for_each_free_mem_range_reverse(i, nid, p_start, p_end, p_nid) \
+ for (i = (u64)ULLONG_MAX, \
+ __next_free_mem_range_rev(&i, nid, p_start, p_end, p_nid); \
+ i != (u64)ULLONG_MAX; \
+ __next_free_mem_range_rev(&i, nid, p_start, p_end, p_nid))
-extern phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align);
+#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
+int memblock_set_node(phys_addr_t base, phys_addr_t size, int nid);
+
+static inline void memblock_set_region_node(struct memblock_region *r, int nid)
+{
+ r->nid = nid;
+}
+
+static inline int memblock_get_region_node(const struct memblock_region *r)
+{
+ return r->nid;
+}
+#else
+static inline void memblock_set_region_node(struct memblock_region *r, int nid)
+{
+}
+
+static inline int memblock_get_region_node(const struct memblock_region *r)
+{
+ return 0;
+}
+#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
+
+phys_addr_t memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid);
+phys_addr_t memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid);
+
+phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align);
/* Flags for memblock_alloc_base() amd __memblock_alloc_base() */
#define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0)
#define MEMBLOCK_ALLOC_ACCESSIBLE 0
-extern phys_addr_t memblock_alloc_base(phys_addr_t size,
- phys_addr_t align,
- phys_addr_t max_addr);
-extern phys_addr_t __memblock_alloc_base(phys_addr_t size,
- phys_addr_t align,
- phys_addr_t max_addr);
-extern phys_addr_t memblock_phys_mem_size(void);
-extern phys_addr_t memblock_start_of_DRAM(void);
-extern phys_addr_t memblock_end_of_DRAM(void);
-extern void memblock_enforce_memory_limit(phys_addr_t memory_limit);
-extern int memblock_is_memory(phys_addr_t addr);
-extern int memblock_is_region_memory(phys_addr_t base, phys_addr_t size);
-extern int memblock_is_reserved(phys_addr_t addr);
-extern int memblock_is_region_reserved(phys_addr_t base, phys_addr_t size);
-
-extern void memblock_dump_all(void);
-
-/* Provided by the architecture */
-extern phys_addr_t memblock_nid_range(phys_addr_t start, phys_addr_t end, int *nid);
-extern int memblock_memory_can_coalesce(phys_addr_t addr1, phys_addr_t size1,
- phys_addr_t addr2, phys_addr_t size2);
+phys_addr_t memblock_alloc_base(phys_addr_t size, phys_addr_t align,
+ phys_addr_t max_addr);
+phys_addr_t __memblock_alloc_base(phys_addr_t size, phys_addr_t align,
+ phys_addr_t max_addr);
+phys_addr_t memblock_phys_mem_size(void);
+phys_addr_t memblock_start_of_DRAM(void);
+phys_addr_t memblock_end_of_DRAM(void);
+void memblock_enforce_memory_limit(phys_addr_t memory_limit);
+int memblock_is_memory(phys_addr_t addr);
+int memblock_is_region_memory(phys_addr_t base, phys_addr_t size);
+int memblock_is_reserved(phys_addr_t addr);
+int memblock_is_region_reserved(phys_addr_t base, phys_addr_t size);
+
+extern void __memblock_dump_all(void);
+
+static inline void memblock_dump_all(void)
+{
+ if (memblock_debug)
+ __memblock_dump_all();
+}
/**
* memblock_set_current_limit - Set the current allocation limit to allow
@@ -101,7 +179,7 @@ extern int memblock_memory_can_coalesce(phys_addr_t addr1, phys_addr_t size1,
* accessible during boot
* @limit: New limit value (physical address)
*/
-extern void memblock_set_current_limit(phys_addr_t limit);
+void memblock_set_current_limit(phys_addr_t limit);
/*
@@ -154,9 +232,9 @@ static inline unsigned long memblock_region_reserved_end_pfn(const struct memblo
region++)
-#ifdef ARCH_DISCARD_MEMBLOCK
-#define __init_memblock __init
-#define __initdata_memblock __initdata
+#ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
+#define __init_memblock __meminit
+#define __initdata_memblock __meminitdata
#else
#define __init_memblock
#define __initdata_memblock
@@ -165,7 +243,7 @@ static inline unsigned long memblock_region_reserved_end_pfn(const struct memblo
#else
static inline phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align)
{
- return MEMBLOCK_ERROR;
+ return 0;
}
#endif /* CONFIG_HAVE_MEMBLOCK */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 4baadd18f4ad..5d9b4c9813bd 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1253,41 +1253,34 @@ static inline void pgtable_page_dtor(struct page *page)
extern void free_area_init(unsigned long * zones_size);
extern void free_area_init_node(int nid, unsigned long * zones_size,
unsigned long zone_start_pfn, unsigned long *zholes_size);
-#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
+#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
/*
- * With CONFIG_ARCH_POPULATES_NODE_MAP set, an architecture may initialise its
+ * With CONFIG_HAVE_MEMBLOCK_NODE_MAP set, an architecture may initialise its
* zones, allocate the backing mem_map and account for memory holes in a more
* architecture independent manner. This is a substitute for creating the
* zone_sizes[] and zholes_size[] arrays and passing them to
* free_area_init_node()
*
* An architecture is expected to register range of page frames backed by
- * physical memory with add_active_range() before calling
+ * physical memory with memblock_add[_node]() before calling
* free_area_init_nodes() passing in the PFN each zone ends at. At a basic
* usage, an architecture is expected to do something like
*
* unsigned long max_zone_pfns[MAX_NR_ZONES] = {max_dma, max_normal_pfn,
* max_highmem_pfn};
* for_each_valid_physical_page_range()
- * add_active_range(node_id, start_pfn, end_pfn)
+ * memblock_add_node(base, size, nid)
* free_area_init_nodes(max_zone_pfns);
*
- * If the architecture guarantees that there are no holes in the ranges
- * registered with add_active_range(), free_bootmem_active_regions()
- * will call free_bootmem_node() for each registered physical page range.
- * Similarly sparse_memory_present_with_active_regions() calls
- * memory_present() for each range when SPARSEMEM is enabled.
+ * free_bootmem_with_active_regions() calls free_bootmem_node() for each
+ * registered physical page range. Similarly
+ * sparse_memory_present_with_active_regions() calls memory_present() for
+ * each range when SPARSEMEM is enabled.
*
* See mm/page_alloc.c for more information on each function exposed by
- * CONFIG_ARCH_POPULATES_NODE_MAP
+ * CONFIG_HAVE_MEMBLOCK_NODE_MAP.
*/
extern void free_area_init_nodes(unsigned long *max_zone_pfn);
-extern void add_active_range(unsigned int nid, unsigned long start_pfn,
- unsigned long end_pfn);
-extern void remove_active_range(unsigned int nid, unsigned long start_pfn,
- unsigned long end_pfn);
-extern void remove_all_active_ranges(void);
-void sort_node_map(void);
unsigned long node_map_pfn_alignment(void);
unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn,
unsigned long end_pfn);
@@ -1300,14 +1293,11 @@ extern void free_bootmem_with_active_regions(int nid,
unsigned long max_low_pfn);
int add_from_early_node_map(struct range *range, int az,
int nr_range, int nid);
-u64 __init find_memory_core_early(int nid, u64 size, u64 align,
- u64 goal, u64 limit);
-typedef int (*work_fn_t)(unsigned long, unsigned long, void *);
-extern void work_with_active_regions(int nid, work_fn_t work_fn, void *data);
extern void sparse_memory_present_with_active_regions(int nid);
-#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
-#if !defined(CONFIG_ARCH_POPULATES_NODE_MAP) && \
+#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
+
+#if !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) && \
!defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID)
static inline int __early_pfn_to_nid(unsigned long pfn)
{
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 188cb2ffe8db..3ac040f19369 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -598,13 +598,13 @@ struct zonelist {
#endif
};
-#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
+#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
struct node_active_region {
unsigned long start_pfn;
unsigned long end_pfn;
int nid;
};
-#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
+#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
#ifndef CONFIG_DISCONTIGMEM
/* The array of struct pages - for discontigmem use pgdat->lmem_map */
@@ -720,7 +720,7 @@ extern int movable_zone;
static inline int zone_movable_is_highmem(void)
{
-#if defined(CONFIG_HIGHMEM) && defined(CONFIG_ARCH_POPULATES_NODE_MAP)
+#if defined(CONFIG_HIGHMEM) && defined(CONFIG_HAVE_MEMBLOCK_NODE)
return movable_zone == ZONE_HIGHMEM;
#else
return 0;
@@ -938,7 +938,7 @@ static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
#endif
#if !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) && \
- !defined(CONFIG_ARCH_POPULATES_NODE_MAP)
+ !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP)
static inline unsigned long early_pfn_to_nid(unsigned long pfn)
{
return 0;
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index b1f89122bf6a..08855613ceb3 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -54,6 +54,7 @@ enum perf_hw_id {
PERF_COUNT_HW_BUS_CYCLES = 6,
PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 7,
PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 8,
+ PERF_COUNT_HW_REF_CPU_CYCLES = 9,
PERF_COUNT_HW_MAX, /* non-ABI */
};
@@ -890,6 +891,7 @@ struct perf_event_context {
int nr_active;
int is_active;
int nr_stat;
+ int nr_freq;
int rotate_disable;
atomic_t refcount;
struct task_struct *task;
@@ -1063,12 +1065,12 @@ perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
}
}
-extern struct jump_label_key perf_sched_events;
+extern struct jump_label_key_deferred perf_sched_events;
static inline void perf_event_task_sched_in(struct task_struct *prev,
struct task_struct *task)
{
- if (static_branch(&perf_sched_events))
+ if (static_branch(&perf_sched_events.key))
__perf_event_task_sched_in(prev, task);
}
@@ -1077,7 +1079,7 @@ static inline void perf_event_task_sched_out(struct task_struct *prev,
{
perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, NULL, 0);
- if (static_branch(&perf_sched_events))
+ if (static_branch(&perf_sched_events.key))
__perf_event_task_sched_out(prev, next);
}
diff --git a/include/linux/poison.h b/include/linux/poison.h
index 79159de0e341..2110a81c5e2a 100644
--- a/include/linux/poison.h
+++ b/include/linux/poison.h
@@ -40,12 +40,6 @@
#define RED_INACTIVE 0x09F911029D74E35BULL /* when obj is inactive */
#define RED_ACTIVE 0xD84156C5635688C0ULL /* when obj is active */
-#ifdef CONFIG_PHYS_ADDR_T_64BIT
-#define MEMBLOCK_INACTIVE 0x3a84fb0144c9e71bULL
-#else
-#define MEMBLOCK_INACTIVE 0x44c9e71bUL
-#endif
-
#define SLUB_RED_INACTIVE 0xbb
#define SLUB_RED_ACTIVE 0xcc
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 2cf4226ade7e..81c04f4348ec 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -51,6 +51,8 @@ extern int rcutorture_runnable; /* for sysctl */
#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU)
extern void rcutorture_record_test_transition(void);
extern void rcutorture_record_progress(unsigned long vernum);
+extern void do_trace_rcu_torture_read(char *rcutorturename,
+ struct rcu_head *rhp);
#else
static inline void rcutorture_record_test_transition(void)
{
@@ -58,6 +60,12 @@ static inline void rcutorture_record_test_transition(void)
static inline void rcutorture_record_progress(unsigned long vernum)
{
}
+#ifdef CONFIG_RCU_TRACE
+extern void do_trace_rcu_torture_read(char *rcutorturename,
+ struct rcu_head *rhp);
+#else
+#define do_trace_rcu_torture_read(rcutorturename, rhp) do { } while (0)
+#endif
#endif
#define UINT_CMP_GE(a, b) (UINT_MAX / 2 >= (a) - (b))
@@ -177,23 +185,10 @@ extern void rcu_sched_qs(int cpu);
extern void rcu_bh_qs(int cpu);
extern void rcu_check_callbacks(int cpu, int user);
struct notifier_block;
-
-#ifdef CONFIG_NO_HZ
-
-extern void rcu_enter_nohz(void);
-extern void rcu_exit_nohz(void);
-
-#else /* #ifdef CONFIG_NO_HZ */
-
-static inline void rcu_enter_nohz(void)
-{
-}
-
-static inline void rcu_exit_nohz(void)
-{
-}
-
-#endif /* #else #ifdef CONFIG_NO_HZ */
+extern void rcu_idle_enter(void);
+extern void rcu_idle_exit(void);
+extern void rcu_irq_enter(void);
+extern void rcu_irq_exit(void);
/*
* Infrastructure to implement the synchronize_() primitives in
@@ -233,22 +228,30 @@ static inline void destroy_rcu_head_on_stack(struct rcu_head *head)
#ifdef CONFIG_DEBUG_LOCK_ALLOC
-extern struct lockdep_map rcu_lock_map;
-# define rcu_read_acquire() \
- lock_acquire(&rcu_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_)
-# define rcu_read_release() lock_release(&rcu_lock_map, 1, _THIS_IP_)
+#ifdef CONFIG_PROVE_RCU
+extern int rcu_is_cpu_idle(void);
+#else /* !CONFIG_PROVE_RCU */
+static inline int rcu_is_cpu_idle(void)
+{
+ return 0;
+}
+#endif /* else !CONFIG_PROVE_RCU */
-extern struct lockdep_map rcu_bh_lock_map;
-# define rcu_read_acquire_bh() \
- lock_acquire(&rcu_bh_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_)
-# define rcu_read_release_bh() lock_release(&rcu_bh_lock_map, 1, _THIS_IP_)
+static inline void rcu_lock_acquire(struct lockdep_map *map)
+{
+ WARN_ON_ONCE(rcu_is_cpu_idle());
+ lock_acquire(map, 0, 0, 2, 1, NULL, _THIS_IP_);
+}
-extern struct lockdep_map rcu_sched_lock_map;
-# define rcu_read_acquire_sched() \
- lock_acquire(&rcu_sched_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_)
-# define rcu_read_release_sched() \
- lock_release(&rcu_sched_lock_map, 1, _THIS_IP_)
+static inline void rcu_lock_release(struct lockdep_map *map)
+{
+ WARN_ON_ONCE(rcu_is_cpu_idle());
+ lock_release(map, 1, _THIS_IP_);
+}
+extern struct lockdep_map rcu_lock_map;
+extern struct lockdep_map rcu_bh_lock_map;
+extern struct lockdep_map rcu_sched_lock_map;
extern int debug_lockdep_rcu_enabled(void);
/**
@@ -262,11 +265,18 @@ extern int debug_lockdep_rcu_enabled(void);
*
* Checks debug_lockdep_rcu_enabled() to prevent false positives during boot
* and while lockdep is disabled.
+ *
+ * Note that rcu_read_lock() and the matching rcu_read_unlock() must
+ * occur in the same context, for example, it is illegal to invoke
+ * rcu_read_unlock() in process context if the matching rcu_read_lock()
+ * was invoked from within an irq handler.
*/
static inline int rcu_read_lock_held(void)
{
if (!debug_lockdep_rcu_enabled())
return 1;
+ if (rcu_is_cpu_idle())
+ return 0;
return lock_is_held(&rcu_lock_map);
}
@@ -290,6 +300,19 @@ extern int rcu_read_lock_bh_held(void);
*
* Check debug_lockdep_rcu_enabled() to prevent false positives during boot
* and while lockdep is disabled.
+ *
+ * Note that if the CPU is in the idle loop from an RCU point of
+ * view (ie: that we are in the section between rcu_idle_enter() and
+ * rcu_idle_exit()) then rcu_read_lock_held() returns false even if the CPU
+ * did an rcu_read_lock(). The reason for this is that RCU ignores CPUs
+ * that are in such a section, considering these as in extended quiescent
+ * state, so such a CPU is effectively never in an RCU read-side critical
+ * section regardless of what RCU primitives it invokes. This state of
+ * affairs is required --- we need to keep an RCU-free window in idle
+ * where the CPU may possibly enter into low power mode. This way we can
+ * notice an extended quiescent state to other CPUs that started a grace
+ * period. Otherwise we would delay any grace period as long as we run in
+ * the idle task.
*/
#ifdef CONFIG_PREEMPT_COUNT
static inline int rcu_read_lock_sched_held(void)
@@ -298,6 +321,8 @@ static inline int rcu_read_lock_sched_held(void)
if (!debug_lockdep_rcu_enabled())
return 1;
+ if (rcu_is_cpu_idle())
+ return 0;
if (debug_locks)
lockdep_opinion = lock_is_held(&rcu_sched_lock_map);
return lockdep_opinion || preempt_count() != 0 || irqs_disabled();
@@ -311,12 +336,8 @@ static inline int rcu_read_lock_sched_held(void)
#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
-# define rcu_read_acquire() do { } while (0)
-# define rcu_read_release() do { } while (0)
-# define rcu_read_acquire_bh() do { } while (0)
-# define rcu_read_release_bh() do { } while (0)
-# define rcu_read_acquire_sched() do { } while (0)
-# define rcu_read_release_sched() do { } while (0)
+# define rcu_lock_acquire(a) do { } while (0)
+# define rcu_lock_release(a) do { } while (0)
static inline int rcu_read_lock_held(void)
{
@@ -637,7 +658,7 @@ static inline void rcu_read_lock(void)
{
__rcu_read_lock();
__acquire(RCU);
- rcu_read_acquire();
+ rcu_lock_acquire(&rcu_lock_map);
}
/*
@@ -657,7 +678,7 @@ static inline void rcu_read_lock(void)
*/
static inline void rcu_read_unlock(void)
{
- rcu_read_release();
+ rcu_lock_release(&rcu_lock_map);
__release(RCU);
__rcu_read_unlock();
}
@@ -673,12 +694,17 @@ static inline void rcu_read_unlock(void)
* critical sections in interrupt context can use just rcu_read_lock(),
* though this should at least be commented to avoid confusing people
* reading the code.
+ *
+ * Note that rcu_read_lock_bh() and the matching rcu_read_unlock_bh()
+ * must occur in the same context, for example, it is illegal to invoke
+ * rcu_read_unlock_bh() from one task if the matching rcu_read_lock_bh()
+ * was invoked from some other task.
*/
static inline void rcu_read_lock_bh(void)
{
local_bh_disable();
__acquire(RCU_BH);
- rcu_read_acquire_bh();
+ rcu_lock_acquire(&rcu_bh_lock_map);
}
/*
@@ -688,7 +714,7 @@ static inline void rcu_read_lock_bh(void)
*/
static inline void rcu_read_unlock_bh(void)
{
- rcu_read_release_bh();
+ rcu_lock_release(&rcu_bh_lock_map);
__release(RCU_BH);
local_bh_enable();
}
@@ -700,12 +726,17 @@ static inline void rcu_read_unlock_bh(void)
* are being done using call_rcu_sched() or synchronize_rcu_sched().
* Read-side critical sections can also be introduced by anything that
* disables preemption, including local_irq_disable() and friends.
+ *
+ * Note that rcu_read_lock_sched() and the matching rcu_read_unlock_sched()
+ * must occur in the same context, for example, it is illegal to invoke
+ * rcu_read_unlock_sched() from process context if the matching
+ * rcu_read_lock_sched() was invoked from an NMI handler.
*/
static inline void rcu_read_lock_sched(void)
{
preempt_disable();
__acquire(RCU_SCHED);
- rcu_read_acquire_sched();
+ rcu_lock_acquire(&rcu_sched_lock_map);
}
/* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */
@@ -722,7 +753,7 @@ static inline notrace void rcu_read_lock_sched_notrace(void)
*/
static inline void rcu_read_unlock_sched(void)
{
- rcu_read_release_sched();
+ rcu_lock_release(&rcu_sched_lock_map);
__release(RCU_SCHED);
preempt_enable();
}
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 5a2ab3c2757d..cf0eb342bcba 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -2085,6 +2085,14 @@ extern int sched_setscheduler(struct task_struct *, int,
extern int sched_setscheduler_nocheck(struct task_struct *, int,
const struct sched_param *);
extern struct task_struct *idle_task(int cpu);
+/**
+ * is_idle_task - is the specified task an idle task?
+ * @tsk: the task in question.
+ */
+static inline bool is_idle_task(struct task_struct *p)
+{
+ return p->pid == 0;
+}
extern struct task_struct *curr_task(int cpu);
extern void set_curr_task(int cpu, struct task_struct *p);
diff --git a/include/linux/security.h b/include/linux/security.h
index 19d8e04e1688..e8c619d39291 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -2056,7 +2056,7 @@ static inline int security_old_inode_init_security(struct inode *inode,
char **name, void **value,
size_t *len)
{
- return 0;
+ return -EOPNOTSUPP;
}
static inline int security_inode_create(struct inode *dir,
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index 58971e891f48..e1b005918bbb 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -28,6 +28,7 @@
#define _LINUX_SRCU_H
#include <linux/mutex.h>
+#include <linux/rcupdate.h>
struct srcu_struct_array {
int c[2];
@@ -60,18 +61,10 @@ int __init_srcu_struct(struct srcu_struct *sp, const char *name,
__init_srcu_struct((sp), #sp, &__srcu_key); \
})
-# define srcu_read_acquire(sp) \
- lock_acquire(&(sp)->dep_map, 0, 0, 2, 1, NULL, _THIS_IP_)
-# define srcu_read_release(sp) \
- lock_release(&(sp)->dep_map, 1, _THIS_IP_)
-
#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
int init_srcu_struct(struct srcu_struct *sp);
-# define srcu_read_acquire(sp) do { } while (0)
-# define srcu_read_release(sp) do { } while (0)
-
#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
void cleanup_srcu_struct(struct srcu_struct *sp);
@@ -90,12 +83,32 @@ long srcu_batches_completed(struct srcu_struct *sp);
* read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC,
* this assumes we are in an SRCU read-side critical section unless it can
* prove otherwise.
+ *
+ * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot
+ * and while lockdep is disabled.
+ *
+ * Note that if the CPU is in the idle loop from an RCU point of view
+ * (ie: that we are in the section between rcu_idle_enter() and
+ * rcu_idle_exit()) then srcu_read_lock_held() returns false even if
+ * the CPU did an srcu_read_lock(). The reason for this is that RCU
+ * ignores CPUs that are in such a section, considering these as in
+ * extended quiescent state, so such a CPU is effectively never in an
+ * RCU read-side critical section regardless of what RCU primitives it
+ * invokes. This state of affairs is required --- we need to keep an
+ * RCU-free window in idle where the CPU may possibly enter into low
+ * power mode. This way we can notice an extended quiescent state to
+ * other CPUs that started a grace period. Otherwise we would delay any
+ * grace period as long as we run in the idle task.
*/
static inline int srcu_read_lock_held(struct srcu_struct *sp)
{
- if (debug_locks)
- return lock_is_held(&sp->dep_map);
- return 1;
+ if (rcu_is_cpu_idle())
+ return 0;
+
+ if (!debug_lockdep_rcu_enabled())
+ return 1;
+
+ return lock_is_held(&sp->dep_map);
}
#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
@@ -145,12 +158,17 @@ static inline int srcu_read_lock_held(struct srcu_struct *sp)
* one way to indirectly wait on an SRCU grace period is to acquire
* a mutex that is held elsewhere while calling synchronize_srcu() or
* synchronize_srcu_expedited().
+ *
+ * Note that srcu_read_lock() and the matching srcu_read_unlock() must
+ * occur in the same context, for example, it is illegal to invoke
+ * srcu_read_unlock() in an irq handler if the matching srcu_read_lock()
+ * was invoked in process context.
*/
static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp)
{
int retval = __srcu_read_lock(sp);
- srcu_read_acquire(sp);
+ rcu_lock_acquire(&(sp)->dep_map);
return retval;
}
@@ -164,8 +182,51 @@ static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp)
static inline void srcu_read_unlock(struct srcu_struct *sp, int idx)
__releases(sp)
{
- srcu_read_release(sp);
+ rcu_lock_release(&(sp)->dep_map);
+ __srcu_read_unlock(sp, idx);
+}
+
+/**
+ * srcu_read_lock_raw - register a new reader for an SRCU-protected structure.
+ * @sp: srcu_struct in which to register the new reader.
+ *
+ * Enter an SRCU read-side critical section. Similar to srcu_read_lock(),
+ * but avoids the RCU-lockdep checking. This means that it is legal to
+ * use srcu_read_lock_raw() in one context, for example, in an exception
+ * handler, and then have the matching srcu_read_unlock_raw() in another
+ * context, for example in the task that took the exception.
+ *
+ * However, the entire SRCU read-side critical section must reside within a
+ * single task. For example, beware of using srcu_read_lock_raw() in
+ * a device interrupt handler and srcu_read_unlock() in the interrupted
+ * task: This will not work if interrupts are threaded.
+ */
+static inline int srcu_read_lock_raw(struct srcu_struct *sp)
+{
+ unsigned long flags;
+ int ret;
+
+ local_irq_save(flags);
+ ret = __srcu_read_lock(sp);
+ local_irq_restore(flags);
+ return ret;
+}
+
+/**
+ * srcu_read_unlock_raw - unregister reader from an SRCU-protected structure.
+ * @sp: srcu_struct in which to unregister the old reader.
+ * @idx: return value from corresponding srcu_read_lock_raw().
+ *
+ * Exit an SRCU read-side critical section without lockdep-RCU checking.
+ * See srcu_read_lock_raw() for more details.
+ */
+static inline void srcu_read_unlock_raw(struct srcu_struct *sp, int idx)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
__srcu_read_unlock(sp, idx);
+ local_irq_restore(flags);
}
#endif
diff --git a/include/linux/tick.h b/include/linux/tick.h
index b232ccc0ee29..ab8be90b5cc9 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -7,6 +7,7 @@
#define _LINUX_TICK_H
#include <linux/clockchips.h>
+#include <linux/irqflags.h>
#ifdef CONFIG_GENERIC_CLOCKEVENTS
@@ -121,14 +122,16 @@ static inline int tick_oneshot_mode_active(void) { return 0; }
#endif /* !CONFIG_GENERIC_CLOCKEVENTS */
# ifdef CONFIG_NO_HZ
-extern void tick_nohz_stop_sched_tick(int inidle);
-extern void tick_nohz_restart_sched_tick(void);
+extern void tick_nohz_idle_enter(void);
+extern void tick_nohz_idle_exit(void);
+extern void tick_nohz_irq_exit(void);
extern ktime_t tick_nohz_get_sleep_length(void);
extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time);
extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time);
# else
-static inline void tick_nohz_stop_sched_tick(int inidle) { }
-static inline void tick_nohz_restart_sched_tick(void) { }
+static inline void tick_nohz_idle_enter(void) { }
+static inline void tick_nohz_idle_exit(void) { }
+
static inline ktime_t tick_nohz_get_sleep_length(void)
{
ktime_t len = { .tv64 = NSEC_PER_SEC/HZ };
diff --git a/include/linux/wait.h b/include/linux/wait.h
index 3efc9f3f43a0..a9ce45e8501c 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -77,13 +77,13 @@ struct task_struct;
#define __WAIT_BIT_KEY_INITIALIZER(word, bit) \
{ .flags = word, .bit_nr = bit, }
-extern void __init_waitqueue_head(wait_queue_head_t *q, struct lock_class_key *);
+extern void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_class_key *);
#define init_waitqueue_head(q) \
do { \
static struct lock_class_key __key; \
\
- __init_waitqueue_head((q), &__key); \
+ __init_waitqueue_head((q), #q, &__key); \
} while (0)
#ifdef CONFIG_LOCKDEP
diff --git a/include/media/soc_camera.h b/include/media/soc_camera.h
index b1377b931eb7..5fb2c3d10c05 100644
--- a/include/media/soc_camera.h
+++ b/include/media/soc_camera.h
@@ -254,7 +254,7 @@ unsigned long soc_camera_apply_board_flags(struct soc_camera_link *icl,
static inline struct video_device *soc_camera_i2c_to_vdev(const struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
- struct soc_camera_device *icd = (struct soc_camera_device *)sd->grp_id;
+ struct soc_camera_device *icd = v4l2_get_subdev_hostdata(sd);
return icd ? icd->vdev : NULL;
}
@@ -279,6 +279,11 @@ static inline struct soc_camera_device *soc_camera_from_vbq(const struct videobu
return container_of(vq, struct soc_camera_device, vb_vidq);
}
+static inline u32 soc_camera_grp_id(const struct soc_camera_device *icd)
+{
+ return (icd->iface << 8) | (icd->devnum + 1);
+}
+
void soc_camera_lock(struct vb2_queue *vq);
void soc_camera_unlock(struct vb2_queue *vq);
diff --git a/include/net/dst.h b/include/net/dst.h
index 6faec1a60216..75766b42660e 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -53,6 +53,7 @@ struct dst_entry {
#define DST_NOHASH 0x0008
#define DST_NOCACHE 0x0010
#define DST_NOCOUNT 0x0020
+#define DST_NOPEER 0x0040
short error;
short obsolete;
diff --git a/include/net/flow.h b/include/net/flow.h
index a09447749e2d..57f15a7f1cdd 100644
--- a/include/net/flow.h
+++ b/include/net/flow.h
@@ -207,6 +207,7 @@ extern struct flow_cache_object *flow_cache_lookup(
u8 dir, flow_resolve_t resolver, void *ctx);
extern void flow_cache_flush(void);
+extern void flow_cache_flush_deferred(void);
extern atomic_t flow_cache_genid;
#endif
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index 873d5be7926c..e5a7b9aaf552 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -1207,7 +1207,7 @@ extern void ip_vs_control_cleanup(void);
extern struct ip_vs_dest *
ip_vs_find_dest(struct net *net, int af, const union nf_inet_addr *daddr,
__be16 dport, const union nf_inet_addr *vaddr, __be16 vport,
- __u16 protocol, __u32 fwmark);
+ __u16 protocol, __u32 fwmark, __u32 flags);
extern struct ip_vs_dest *ip_vs_try_bind_dest(struct ip_vs_conn *cp);
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index e90e7a9935dd..a15432da27c3 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -241,6 +241,9 @@ extern struct sctp_globals {
* bits is an indicator of when to send and window update SACK.
*/
int rwnd_update_shift;
+
+ /* Threshold for autoclose timeout, in seconds. */
+ unsigned long max_autoclose;
} sctp_globals;
#define sctp_rto_initial (sctp_globals.rto_initial)
@@ -281,6 +284,7 @@ extern struct sctp_globals {
#define sctp_auth_enable (sctp_globals.auth_enable)
#define sctp_checksum_disable (sctp_globals.checksum_disable)
#define sctp_rwnd_upd_shift (sctp_globals.rwnd_update_shift)
+#define sctp_max_autoclose (sctp_globals.max_autoclose)
/* SCTP Socket type: UDP or TCP style. */
typedef enum {
diff --git a/include/net/sock.h b/include/net/sock.h
index abb6e0f0c3c3..32e39371fba6 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -637,12 +637,14 @@ static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb)
/*
* Take into account size of receive queue and backlog queue
+ * Do not take into account this skb truesize,
+ * to allow even a single big packet to come.
*/
static inline bool sk_rcvqueues_full(const struct sock *sk, const struct sk_buff *skb)
{
unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_rmem_alloc);
- return qsize + skb->truesize > sk->sk_rcvbuf;
+ return qsize > sk->sk_rcvbuf;
}
/* The per-socket spinlock must be held here. */
diff --git a/include/scsi/libfcoe.h b/include/scsi/libfcoe.h
index d1e95c6ac776..5a35a2a2d3c5 100644
--- a/include/scsi/libfcoe.h
+++ b/include/scsi/libfcoe.h
@@ -147,6 +147,7 @@ struct fcoe_ctlr {
u8 map_dest;
u8 spma;
u8 probe_tries;
+ u8 priority;
u8 dest_addr[ETH_ALEN];
u8 ctl_src_addr[ETH_ALEN];
@@ -301,6 +302,7 @@ struct fcoe_percpu_s {
* @lport: The associated local port
* @fcoe_pending_queue: The pending Rx queue of skbs
* @fcoe_pending_queue_active: Indicates if the pending queue is active
+ * @priority: Packet priority (DCB)
* @max_queue_depth: Max queue depth of pending queue
* @min_queue_depth: Min queue depth of pending queue
* @timer: The queue timer
@@ -316,6 +318,7 @@ struct fcoe_port {
struct fc_lport *lport;
struct sk_buff_head fcoe_pending_queue;
u8 fcoe_pending_queue_active;
+ u8 priority;
u32 max_queue_depth;
u32 min_queue_depth;
struct timer_list timer;
diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h
index 669fbd62ec25..d2d88bed891b 100644
--- a/include/trace/events/rcu.h
+++ b/include/trace/events/rcu.h
@@ -241,24 +241,73 @@ TRACE_EVENT(rcu_fqs,
/*
* Tracepoint for dyntick-idle entry/exit events. These take a string
- * as argument: "Start" for entering dyntick-idle mode and "End" for
- * leaving it.
+ * as argument: "Start" for entering dyntick-idle mode, "End" for
+ * leaving it, "--=" for events moving towards idle, and "++=" for events
+ * moving away from idle. "Error on entry: not idle task" and "Error on
+ * exit: not idle task" indicate that a non-idle task is erroneously
+ * toying with the idle loop.
+ *
+ * These events also take a pair of numbers, which indicate the nesting
+ * depth before and after the event of interest. Note that task-related
+ * events use the upper bits of each number, while interrupt-related
+ * events use the lower bits.
*/
TRACE_EVENT(rcu_dyntick,
- TP_PROTO(char *polarity),
+ TP_PROTO(char *polarity, long long oldnesting, long long newnesting),
- TP_ARGS(polarity),
+ TP_ARGS(polarity, oldnesting, newnesting),
TP_STRUCT__entry(
__field(char *, polarity)
+ __field(long long, oldnesting)
+ __field(long long, newnesting)
),
TP_fast_assign(
__entry->polarity = polarity;
+ __entry->oldnesting = oldnesting;
+ __entry->newnesting = newnesting;
+ ),
+
+ TP_printk("%s %llx %llx", __entry->polarity,
+ __entry->oldnesting, __entry->newnesting)
+);
+
+/*
+ * Tracepoint for RCU preparation for idle, the goal being to get RCU
+ * processing done so that the current CPU can shut off its scheduling
+ * clock and enter dyntick-idle mode. One way to accomplish this is
+ * to drain all RCU callbacks from this CPU, and the other is to have
+ * done everything RCU requires for the current grace period. In this
+ * latter case, the CPU will be awakened at the end of the current grace
+ * period in order to process the remainder of its callbacks.
+ *
+ * These tracepoints take a string as argument:
+ *
+ * "No callbacks": Nothing to do, no callbacks on this CPU.
+ * "In holdoff": Nothing to do, holding off after unsuccessful attempt.
+ * "Begin holdoff": Attempt failed, don't retry until next jiffy.
+ * "Dyntick with callbacks": Entering dyntick-idle despite callbacks.
+ * "More callbacks": Still more callbacks, try again to clear them out.
+ * "Callbacks drained": All callbacks processed, off to dyntick idle!
+ * "Timer": Timer fired to cause CPU to continue processing callbacks.
+ */
+TRACE_EVENT(rcu_prep_idle,
+
+ TP_PROTO(char *reason),
+
+ TP_ARGS(reason),
+
+ TP_STRUCT__entry(
+ __field(char *, reason)
+ ),
+
+ TP_fast_assign(
+ __entry->reason = reason;
),
- TP_printk("%s", __entry->polarity)
+ TP_printk("%s", __entry->reason)
);
/*
@@ -412,27 +461,71 @@ TRACE_EVENT(rcu_invoke_kfree_callback,
/*
* Tracepoint for exiting rcu_do_batch after RCU callbacks have been
- * invoked. The first argument is the name of the RCU flavor and
- * the second argument is number of callbacks actually invoked.
+ * invoked. The first argument is the name of the RCU flavor,
+ * the second argument is number of callbacks actually invoked,
+ * the third argument (cb) is whether or not any of the callbacks that
+ * were ready to invoke at the beginning of this batch are still
+ * queued, the fourth argument (nr) is the return value of need_resched(),
+ * the fifth argument (iit) is 1 if the current task is the idle task,
+ * and the sixth argument (risk) is the return value from
+ * rcu_is_callbacks_kthread().
*/
TRACE_EVENT(rcu_batch_end,
- TP_PROTO(char *rcuname, int callbacks_invoked),
+ TP_PROTO(char *rcuname, int callbacks_invoked,
+ bool cb, bool nr, bool iit, bool risk),
- TP_ARGS(rcuname, callbacks_invoked),
+ TP_ARGS(rcuname, callbacks_invoked, cb, nr, iit, risk),
TP_STRUCT__entry(
__field(char *, rcuname)
__field(int, callbacks_invoked)
+ __field(bool, cb)
+ __field(bool, nr)
+ __field(bool, iit)
+ __field(bool, risk)
),
TP_fast_assign(
__entry->rcuname = rcuname;
__entry->callbacks_invoked = callbacks_invoked;
+ __entry->cb = cb;
+ __entry->nr = nr;
+ __entry->iit = iit;
+ __entry->risk = risk;
+ ),
+
+ TP_printk("%s CBs-invoked=%d idle=%c%c%c%c",
+ __entry->rcuname, __entry->callbacks_invoked,
+ __entry->cb ? 'C' : '.',
+ __entry->nr ? 'S' : '.',
+ __entry->iit ? 'I' : '.',
+ __entry->risk ? 'R' : '.')
+);
+
+/*
+ * Tracepoint for rcutorture readers. The first argument is the name
+ * of the RCU flavor from rcutorture's viewpoint and the second argument
+ * is the callback address.
+ */
+TRACE_EVENT(rcu_torture_read,
+
+ TP_PROTO(char *rcutorturename, struct rcu_head *rhp),
+
+ TP_ARGS(rcutorturename, rhp),
+
+ TP_STRUCT__entry(
+ __field(char *, rcutorturename)
+ __field(struct rcu_head *, rhp)
+ ),
+
+ TP_fast_assign(
+ __entry->rcutorturename = rcutorturename;
+ __entry->rhp = rhp;
),
- TP_printk("%s CBs-invoked=%d",
- __entry->rcuname, __entry->callbacks_invoked)
+ TP_printk("%s torture read %p",
+ __entry->rcutorturename, __entry->rhp)
);
#else /* #ifdef CONFIG_RCU_TRACE */
@@ -443,13 +536,16 @@ TRACE_EVENT(rcu_batch_end,
#define trace_rcu_unlock_preempted_task(rcuname, gpnum, pid) do { } while (0)
#define trace_rcu_quiescent_state_report(rcuname, gpnum, mask, qsmask, level, grplo, grphi, gp_tasks) do { } while (0)
#define trace_rcu_fqs(rcuname, gpnum, cpu, qsevent) do { } while (0)
-#define trace_rcu_dyntick(polarity) do { } while (0)
+#define trace_rcu_dyntick(polarity, oldnesting, newnesting) do { } while (0)
+#define trace_rcu_prep_idle(reason) do { } while (0)
#define trace_rcu_callback(rcuname, rhp, qlen) do { } while (0)
#define trace_rcu_kfree_callback(rcuname, rhp, offset, qlen) do { } while (0)
#define trace_rcu_batch_start(rcuname, qlen, blimit) do { } while (0)
#define trace_rcu_invoke_callback(rcuname, rhp) do { } while (0)
#define trace_rcu_invoke_kfree_callback(rcuname, rhp, offset) do { } while (0)
-#define trace_rcu_batch_end(rcuname, callbacks_invoked) do { } while (0)
+#define trace_rcu_batch_end(rcuname, callbacks_invoked, cb, nr, iit, risk) \
+ do { } while (0)
+#define trace_rcu_torture_read(rcutorturename, rhp) do { } while (0)
#endif /* #else #ifdef CONFIG_RCU_TRACE */
diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
index b99caa8b780c..99d1d0decf88 100644
--- a/include/trace/events/writeback.h
+++ b/include/trace/events/writeback.h
@@ -21,6 +21,16 @@
{I_REFERENCED, "I_REFERENCED"} \
)
+#define WB_WORK_REASON \
+ {WB_REASON_BACKGROUND, "background"}, \
+ {WB_REASON_TRY_TO_FREE_PAGES, "try_to_free_pages"}, \
+ {WB_REASON_SYNC, "sync"}, \
+ {WB_REASON_PERIODIC, "periodic"}, \
+ {WB_REASON_LAPTOP_TIMER, "laptop_timer"}, \
+ {WB_REASON_FREE_MORE_MEM, "free_more_memory"}, \
+ {WB_REASON_FS_FREE_SPACE, "fs_free_space"}, \
+ {WB_REASON_FORKER_THREAD, "forker_thread"}
+
struct wb_writeback_work;
DECLARE_EVENT_CLASS(writeback_work_class,
@@ -55,7 +65,7 @@ DECLARE_EVENT_CLASS(writeback_work_class,
__entry->for_kupdate,
__entry->range_cyclic,
__entry->for_background,
- wb_reason_name[__entry->reason]
+ __print_symbolic(__entry->reason, WB_WORK_REASON)
)
);
#define DEFINE_WRITEBACK_WORK_EVENT(name) \
@@ -184,7 +194,8 @@ TRACE_EVENT(writeback_queue_io,
__entry->older, /* older_than_this in jiffies */
__entry->age, /* older_than_this in relative milliseconds */
__entry->moved,
- wb_reason_name[__entry->reason])
+ __print_symbolic(__entry->reason, WB_WORK_REASON)
+ )
);
TRACE_EVENT(global_dirty_state,
diff --git a/include/xen/interface/io/xs_wire.h b/include/xen/interface/io/xs_wire.h
index f0b6890370be..f6f07aa35af5 100644
--- a/include/xen/interface/io/xs_wire.h
+++ b/include/xen/interface/io/xs_wire.h
@@ -29,8 +29,7 @@ enum xsd_sockmsg_type
XS_IS_DOMAIN_INTRODUCED,
XS_RESUME,
XS_SET_TARGET,
- XS_RESTRICT,
- XS_RESET_WATCHES
+ XS_RESTRICT
};
#define XS_WRITE_NONE "NONE"
diff --git a/init/Kconfig b/init/Kconfig
index 43298f9810fb..82b6a4c675b2 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -469,14 +469,14 @@ config RCU_FANOUT_EXACT
config RCU_FAST_NO_HZ
bool "Accelerate last non-dyntick-idle CPU's grace periods"
- depends on TREE_RCU && NO_HZ && SMP
+ depends on NO_HZ && SMP
default n
help
This option causes RCU to attempt to accelerate grace periods
- in order to allow the final CPU to enter dynticks-idle state
- more quickly. On the other hand, this option increases the
- overhead of the dynticks-idle checking, particularly on systems
- with large numbers of CPUs.
+ in order to allow CPUs to enter dynticks-idle state more
+ quickly. On the other hand, this option increases the overhead
+ of the dynticks-idle checking, particularly on systems with
+ large numbers of CPUs.
Say Y if energy efficiency is critically important, particularly
if you have relatively few CPUs.
diff --git a/init/main.c b/init/main.c
index 217ed23e9487..2c76efb513c2 100644
--- a/init/main.c
+++ b/init/main.c
@@ -469,13 +469,12 @@ asmlinkage void __init start_kernel(void)
char * command_line;
extern const struct kernel_param __start___param[], __stop___param[];
- smp_setup_processor_id();
-
/*
* Need to run as early as possible, to initialize the
* lockdep hash:
*/
lockdep_init();
+ smp_setup_processor_id();
debug_objects_early_init();
/*
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index d9d5648f3cdc..a184470cf9b5 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -2098,11 +2098,6 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
continue;
/* get old css_set pointer */
task_lock(tsk);
- if (tsk->flags & PF_EXITING) {
- /* ignore this task if it's going away */
- task_unlock(tsk);
- continue;
- }
oldcg = tsk->cgroups;
get_css_set(oldcg);
task_unlock(tsk);
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 3f8ee8a138c4..5ca38d5d238a 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -379,6 +379,7 @@ out:
cpu_maps_update_done();
return err;
}
+EXPORT_SYMBOL_GPL(cpu_up);
#ifdef CONFIG_PM_SLEEP_SMP
static cpumask_var_t frozen_cpus;
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 9fe58c46a426..0b1712dba587 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -123,6 +123,19 @@ static inline struct cpuset *task_cs(struct task_struct *task)
struct cpuset, css);
}
+#ifdef CONFIG_NUMA
+static inline bool task_has_mempolicy(struct task_struct *task)
+{
+ return task->mempolicy;
+}
+#else
+static inline bool task_has_mempolicy(struct task_struct *task)
+{
+ return false;
+}
+#endif
+
+
/* bits in struct cpuset flags field */
typedef enum {
CS_CPU_EXCLUSIVE,
@@ -949,7 +962,7 @@ static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
static void cpuset_change_task_nodemask(struct task_struct *tsk,
nodemask_t *newmems)
{
- bool masks_disjoint = !nodes_intersects(*newmems, tsk->mems_allowed);
+ bool need_loop;
repeat:
/*
@@ -962,6 +975,14 @@ repeat:
return;
task_lock(tsk);
+ /*
+ * Determine if a loop is necessary if another thread is doing
+ * get_mems_allowed(). If at least one node remains unchanged and
+ * tsk does not have a mempolicy, then an empty nodemask will not be
+ * possible when mems_allowed is larger than a word.
+ */
+ need_loop = task_has_mempolicy(tsk) ||
+ !nodes_intersects(*newmems, tsk->mems_allowed);
nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems);
mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP1);
@@ -981,11 +1002,9 @@ repeat:
/*
* Allocation of memory is very fast, we needn't sleep when waiting
- * for the read-side. No wait is necessary, however, if at least one
- * node remains unchanged.
+ * for the read-side.
*/
- while (masks_disjoint &&
- ACCESS_ONCE(tsk->mems_allowed_change_disable)) {
+ while (need_loop && ACCESS_ONCE(tsk->mems_allowed_change_disable)) {
task_unlock(tsk);
if (!task_curr(tsk))
yield();
diff --git a/kernel/debug/kdb/kdb_support.c b/kernel/debug/kdb/kdb_support.c
index 5532dd37aa86..7d6fb40d2188 100644
--- a/kernel/debug/kdb/kdb_support.c
+++ b/kernel/debug/kdb/kdb_support.c
@@ -636,7 +636,7 @@ char kdb_task_state_char (const struct task_struct *p)
(p->exit_state & EXIT_ZOMBIE) ? 'Z' :
(p->exit_state & EXIT_DEAD) ? 'E' :
(p->state & TASK_INTERRUPTIBLE) ? 'S' : '?';
- if (p->pid == 0) {
+ if (is_idle_task(p)) {
/* Idle task. Is it really idle, apart from the kdb
* interrupt? */
if (!kdb_task_has_cpu(p) || kgdb_info[cpu].irq_depth == 1) {
diff --git a/kernel/events/Makefile b/kernel/events/Makefile
index 89e5e8aa4c36..22d901f9caf4 100644
--- a/kernel/events/Makefile
+++ b/kernel/events/Makefile
@@ -2,5 +2,5 @@ ifdef CONFIG_FUNCTION_TRACER
CFLAGS_REMOVE_core.o = -pg
endif
-obj-y := core.o ring_buffer.o
+obj-y := core.o ring_buffer.o callchain.o
obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c
new file mode 100644
index 000000000000..057e24b665cf
--- /dev/null
+++ b/kernel/events/callchain.c
@@ -0,0 +1,191 @@
+/*
+ * Performance events callchain code, extracted from core.c:
+ *
+ * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
+ * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
+ * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
+ * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
+ *
+ * For licensing details see kernel-base/COPYING
+ */
+
+#include <linux/perf_event.h>
+#include <linux/slab.h>
+#include "internal.h"
+
+struct callchain_cpus_entries {
+ struct rcu_head rcu_head;
+ struct perf_callchain_entry *cpu_entries[0];
+};
+
+static DEFINE_PER_CPU(int, callchain_recursion[PERF_NR_CONTEXTS]);
+static atomic_t nr_callchain_events;
+static DEFINE_MUTEX(callchain_mutex);
+static struct callchain_cpus_entries *callchain_cpus_entries;
+
+
+__weak void perf_callchain_kernel(struct perf_callchain_entry *entry,
+ struct pt_regs *regs)
+{
+}
+
+__weak void perf_callchain_user(struct perf_callchain_entry *entry,
+ struct pt_regs *regs)
+{
+}
+
+static void release_callchain_buffers_rcu(struct rcu_head *head)
+{
+ struct callchain_cpus_entries *entries;
+ int cpu;
+
+ entries = container_of(head, struct callchain_cpus_entries, rcu_head);
+
+ for_each_possible_cpu(cpu)
+ kfree(entries->cpu_entries[cpu]);
+
+ kfree(entries);
+}
+
+static void release_callchain_buffers(void)
+{
+ struct callchain_cpus_entries *entries;
+
+ entries = callchain_cpus_entries;
+ rcu_assign_pointer(callchain_cpus_entries, NULL);
+ call_rcu(&entries->rcu_head, release_callchain_buffers_rcu);
+}
+
+static int alloc_callchain_buffers(void)
+{
+ int cpu;
+ int size;
+ struct callchain_cpus_entries *entries;
+
+ /*
+ * We can't use the percpu allocation API for data that can be
+ * accessed from NMI. Use a temporary manual per cpu allocation
+ * until that gets sorted out.
+ */
+ size = offsetof(struct callchain_cpus_entries, cpu_entries[nr_cpu_ids]);
+
+ entries = kzalloc(size, GFP_KERNEL);
+ if (!entries)
+ return -ENOMEM;
+
+ size = sizeof(struct perf_callchain_entry) * PERF_NR_CONTEXTS;
+
+ for_each_possible_cpu(cpu) {
+ entries->cpu_entries[cpu] = kmalloc_node(size, GFP_KERNEL,
+ cpu_to_node(cpu));
+ if (!entries->cpu_entries[cpu])
+ goto fail;
+ }
+
+ rcu_assign_pointer(callchain_cpus_entries, entries);
+
+ return 0;
+
+fail:
+ for_each_possible_cpu(cpu)
+ kfree(entries->cpu_entries[cpu]);
+ kfree(entries);
+
+ return -ENOMEM;
+}
+
+int get_callchain_buffers(void)
+{
+ int err = 0;
+ int count;
+
+ mutex_lock(&callchain_mutex);
+
+ count = atomic_inc_return(&nr_callchain_events);
+ if (WARN_ON_ONCE(count < 1)) {
+ err = -EINVAL;
+ goto exit;
+ }
+
+ if (count > 1) {
+ /* If the allocation failed, give up */
+ if (!callchain_cpus_entries)
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ err = alloc_callchain_buffers();
+ if (err)
+ release_callchain_buffers();
+exit:
+ mutex_unlock(&callchain_mutex);
+
+ return err;
+}
+
+void put_callchain_buffers(void)
+{
+ if (atomic_dec_and_mutex_lock(&nr_callchain_events, &callchain_mutex)) {
+ release_callchain_buffers();
+ mutex_unlock(&callchain_mutex);
+ }
+}
+
+static struct perf_callchain_entry *get_callchain_entry(int *rctx)
+{
+ int cpu;
+ struct callchain_cpus_entries *entries;
+
+ *rctx = get_recursion_context(__get_cpu_var(callchain_recursion));
+ if (*rctx == -1)
+ return NULL;
+
+ entries = rcu_dereference(callchain_cpus_entries);
+ if (!entries)
+ return NULL;
+
+ cpu = smp_processor_id();
+
+ return &entries->cpu_entries[cpu][*rctx];
+}
+
+static void
+put_callchain_entry(int rctx)
+{
+ put_recursion_context(__get_cpu_var(callchain_recursion), rctx);
+}
+
+struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
+{
+ int rctx;
+ struct perf_callchain_entry *entry;
+
+
+ entry = get_callchain_entry(&rctx);
+ if (rctx == -1)
+ return NULL;
+
+ if (!entry)
+ goto exit_put;
+
+ entry->nr = 0;
+
+ if (!user_mode(regs)) {
+ perf_callchain_store(entry, PERF_CONTEXT_KERNEL);
+ perf_callchain_kernel(entry, regs);
+ if (current->mm)
+ regs = task_pt_regs(current);
+ else
+ regs = NULL;
+ }
+
+ if (regs) {
+ perf_callchain_store(entry, PERF_CONTEXT_USER);
+ perf_callchain_user(entry, regs);
+ }
+
+exit_put:
+ put_callchain_entry(rctx);
+
+ return entry;
+}
diff --git a/kernel/events/core.c b/kernel/events/core.c
index d3b9df5962c2..890eb02c2f21 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -128,7 +128,7 @@ enum event_type_t {
* perf_sched_events : >0 events exist
* perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu
*/
-struct jump_label_key perf_sched_events __read_mostly;
+struct jump_label_key_deferred perf_sched_events __read_mostly;
static DEFINE_PER_CPU(atomic_t, perf_cgroup_events);
static atomic_t nr_mmap_events __read_mostly;
@@ -1130,6 +1130,8 @@ event_sched_out(struct perf_event *event,
if (!is_software_event(event))
cpuctx->active_oncpu--;
ctx->nr_active--;
+ if (event->attr.freq && event->attr.sample_freq)
+ ctx->nr_freq--;
if (event->attr.exclusive || !cpuctx->active_oncpu)
cpuctx->exclusive = 0;
}
@@ -1325,6 +1327,7 @@ retry:
}
raw_spin_unlock_irq(&ctx->lock);
}
+EXPORT_SYMBOL_GPL(perf_event_disable);
static void perf_set_shadow_time(struct perf_event *event,
struct perf_event_context *ctx,
@@ -1406,6 +1409,8 @@ event_sched_in(struct perf_event *event,
if (!is_software_event(event))
cpuctx->active_oncpu++;
ctx->nr_active++;
+ if (event->attr.freq && event->attr.sample_freq)
+ ctx->nr_freq++;
if (event->attr.exclusive)
cpuctx->exclusive = 1;
@@ -1662,8 +1667,7 @@ retry:
* Note: this works for group members as well as group leaders
* since the non-leader members' sibling_lists will be empty.
*/
-static void __perf_event_mark_enabled(struct perf_event *event,
- struct perf_event_context *ctx)
+static void __perf_event_mark_enabled(struct perf_event *event)
{
struct perf_event *sub;
u64 tstamp = perf_event_time(event);
@@ -1701,7 +1705,7 @@ static int __perf_event_enable(void *info)
*/
perf_cgroup_set_timestamp(current, ctx);
- __perf_event_mark_enabled(event, ctx);
+ __perf_event_mark_enabled(event);
if (!event_filter_match(event)) {
if (is_cgroup_event(event))
@@ -1782,7 +1786,7 @@ void perf_event_enable(struct perf_event *event)
retry:
if (!ctx->is_active) {
- __perf_event_mark_enabled(event, ctx);
+ __perf_event_mark_enabled(event);
goto out;
}
@@ -1809,6 +1813,7 @@ retry:
out:
raw_spin_unlock_irq(&ctx->lock);
}
+EXPORT_SYMBOL_GPL(perf_event_enable);
int perf_event_refresh(struct perf_event *event, int refresh)
{
@@ -2327,6 +2332,9 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx, u64 period)
u64 interrupts, now;
s64 delta;
+ if (!ctx->nr_freq)
+ return;
+
list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
if (event->state != PERF_EVENT_STATE_ACTIVE)
continue;
@@ -2382,12 +2390,14 @@ static void perf_rotate_context(struct perf_cpu_context *cpuctx)
{
u64 interval = (u64)cpuctx->jiffies_interval * TICK_NSEC;
struct perf_event_context *ctx = NULL;
- int rotate = 0, remove = 1;
+ int rotate = 0, remove = 1, freq = 0;
if (cpuctx->ctx.nr_events) {
remove = 0;
if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
rotate = 1;
+ if (cpuctx->ctx.nr_freq)
+ freq = 1;
}
ctx = cpuctx->task_ctx;
@@ -2395,33 +2405,40 @@ static void perf_rotate_context(struct perf_cpu_context *cpuctx)
remove = 0;
if (ctx->nr_events != ctx->nr_active)
rotate = 1;
+ if (ctx->nr_freq)
+ freq = 1;
}
+ if (!rotate && !freq)
+ goto done;
+
perf_ctx_lock(cpuctx, cpuctx->task_ctx);
perf_pmu_disable(cpuctx->ctx.pmu);
- perf_ctx_adjust_freq(&cpuctx->ctx, interval);
- if (ctx)
- perf_ctx_adjust_freq(ctx, interval);
- if (!rotate)
- goto done;
+ if (freq) {
+ perf_ctx_adjust_freq(&cpuctx->ctx, interval);
+ if (ctx)
+ perf_ctx_adjust_freq(ctx, interval);
+ }
- cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
- if (ctx)
- ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE);
+ if (rotate) {
+ cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
+ if (ctx)
+ ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE);
- rotate_ctx(&cpuctx->ctx);
- if (ctx)
- rotate_ctx(ctx);
+ rotate_ctx(&cpuctx->ctx);
+ if (ctx)
+ rotate_ctx(ctx);
- perf_event_sched_in(cpuctx, ctx, current);
+ perf_event_sched_in(cpuctx, ctx, current);
+ }
+
+ perf_pmu_enable(cpuctx->ctx.pmu);
+ perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
done:
if (remove)
list_del_init(&cpuctx->rotation_list);
-
- perf_pmu_enable(cpuctx->ctx.pmu);
- perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
}
void perf_event_task_tick(void)
@@ -2448,7 +2465,7 @@ static int event_enable_on_exec(struct perf_event *event,
if (event->state >= PERF_EVENT_STATE_INACTIVE)
return 0;
- __perf_event_mark_enabled(event, ctx);
+ __perf_event_mark_enabled(event);
return 1;
}
@@ -2480,13 +2497,7 @@ static void perf_event_enable_on_exec(struct perf_event_context *ctx)
raw_spin_lock(&ctx->lock);
task_ctx_sched_out(ctx);
- list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
- ret = event_enable_on_exec(event, ctx);
- if (ret)
- enabled = 1;
- }
-
- list_for_each_entry(event, &ctx->flexible_groups, group_entry) {
+ list_for_each_entry(event, &ctx->event_list, event_entry) {
ret = event_enable_on_exec(event, ctx);
if (ret)
enabled = 1;
@@ -2574,215 +2585,6 @@ static u64 perf_event_read(struct perf_event *event)
}
/*
- * Callchain support
- */
-
-struct callchain_cpus_entries {
- struct rcu_head rcu_head;
- struct perf_callchain_entry *cpu_entries[0];
-};
-
-static DEFINE_PER_CPU(int, callchain_recursion[PERF_NR_CONTEXTS]);
-static atomic_t nr_callchain_events;
-static DEFINE_MUTEX(callchain_mutex);
-struct callchain_cpus_entries *callchain_cpus_entries;
-
-
-__weak void perf_callchain_kernel(struct perf_callchain_entry *entry,
- struct pt_regs *regs)
-{
-}
-
-__weak void perf_callchain_user(struct perf_callchain_entry *entry,
- struct pt_regs *regs)
-{
-}
-
-static void release_callchain_buffers_rcu(struct rcu_head *head)
-{
- struct callchain_cpus_entries *entries;
- int cpu;
-
- entries = container_of(head, struct callchain_cpus_entries, rcu_head);
-
- for_each_possible_cpu(cpu)
- kfree(entries->cpu_entries[cpu]);
-
- kfree(entries);
-}
-
-static void release_callchain_buffers(void)
-{
- struct callchain_cpus_entries *entries;
-
- entries = callchain_cpus_entries;
- rcu_assign_pointer(callchain_cpus_entries, NULL);
- call_rcu(&entries->rcu_head, release_callchain_buffers_rcu);
-}
-
-static int alloc_callchain_buffers(void)
-{
- int cpu;
- int size;
- struct callchain_cpus_entries *entries;
-
- /*
- * We can't use the percpu allocation API for data that can be
- * accessed from NMI. Use a temporary manual per cpu allocation
- * until that gets sorted out.
- */
- size = offsetof(struct callchain_cpus_entries, cpu_entries[nr_cpu_ids]);
-
- entries = kzalloc(size, GFP_KERNEL);
- if (!entries)
- return -ENOMEM;
-
- size = sizeof(struct perf_callchain_entry) * PERF_NR_CONTEXTS;
-
- for_each_possible_cpu(cpu) {
- entries->cpu_entries[cpu] = kmalloc_node(size, GFP_KERNEL,
- cpu_to_node(cpu));
- if (!entries->cpu_entries[cpu])
- goto fail;
- }
-
- rcu_assign_pointer(callchain_cpus_entries, entries);
-
- return 0;
-
-fail:
- for_each_possible_cpu(cpu)
- kfree(entries->cpu_entries[cpu]);
- kfree(entries);
-
- return -ENOMEM;
-}
-
-static int get_callchain_buffers(void)
-{
- int err = 0;
- int count;
-
- mutex_lock(&callchain_mutex);
-
- count = atomic_inc_return(&nr_callchain_events);
- if (WARN_ON_ONCE(count < 1)) {
- err = -EINVAL;
- goto exit;
- }
-
- if (count > 1) {
- /* If the allocation failed, give up */
- if (!callchain_cpus_entries)
- err = -ENOMEM;
- goto exit;
- }
-
- err = alloc_callchain_buffers();
- if (err)
- release_callchain_buffers();
-exit:
- mutex_unlock(&callchain_mutex);
-
- return err;
-}
-
-static void put_callchain_buffers(void)
-{
- if (atomic_dec_and_mutex_lock(&nr_callchain_events, &callchain_mutex)) {
- release_callchain_buffers();
- mutex_unlock(&callchain_mutex);
- }
-}
-
-static int get_recursion_context(int *recursion)
-{
- int rctx;
-
- if (in_nmi())
- rctx = 3;
- else if (in_irq())
- rctx = 2;
- else if (in_softirq())
- rctx = 1;
- else
- rctx = 0;
-
- if (recursion[rctx])
- return -1;
-
- recursion[rctx]++;
- barrier();
-
- return rctx;
-}
-
-static inline void put_recursion_context(int *recursion, int rctx)
-{
- barrier();
- recursion[rctx]--;
-}
-
-static struct perf_callchain_entry *get_callchain_entry(int *rctx)
-{
- int cpu;
- struct callchain_cpus_entries *entries;
-
- *rctx = get_recursion_context(__get_cpu_var(callchain_recursion));
- if (*rctx == -1)
- return NULL;
-
- entries = rcu_dereference(callchain_cpus_entries);
- if (!entries)
- return NULL;
-
- cpu = smp_processor_id();
-
- return &entries->cpu_entries[cpu][*rctx];
-}
-
-static void
-put_callchain_entry(int rctx)
-{
- put_recursion_context(__get_cpu_var(callchain_recursion), rctx);
-}
-
-static struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
-{
- int rctx;
- struct perf_callchain_entry *entry;
-
-
- entry = get_callchain_entry(&rctx);
- if (rctx == -1)
- return NULL;
-
- if (!entry)
- goto exit_put;
-
- entry->nr = 0;
-
- if (!user_mode(regs)) {
- perf_callchain_store(entry, PERF_CONTEXT_KERNEL);
- perf_callchain_kernel(entry, regs);
- if (current->mm)
- regs = task_pt_regs(current);
- else
- regs = NULL;
- }
-
- if (regs) {
- perf_callchain_store(entry, PERF_CONTEXT_USER);
- perf_callchain_user(entry, regs);
- }
-
-exit_put:
- put_callchain_entry(rctx);
-
- return entry;
-}
-
-/*
* Initialize the perf_event context in a task_struct:
*/
static void __perf_event_init_context(struct perf_event_context *ctx)
@@ -2946,7 +2748,7 @@ static void free_event(struct perf_event *event)
if (!event->parent) {
if (event->attach_state & PERF_ATTACH_TASK)
- jump_label_dec(&perf_sched_events);
+ jump_label_dec_deferred(&perf_sched_events);
if (event->attr.mmap || event->attr.mmap_data)
atomic_dec(&nr_mmap_events);
if (event->attr.comm)
@@ -2957,7 +2759,7 @@ static void free_event(struct perf_event *event)
put_callchain_buffers();
if (is_cgroup_event(event)) {
atomic_dec(&per_cpu(perf_cgroup_events, event->cpu));
- jump_label_dec(&perf_sched_events);
+ jump_label_dec_deferred(&perf_sched_events);
}
}
@@ -3558,9 +3360,13 @@ static void ring_buffer_wakeup(struct perf_event *event)
rcu_read_lock();
rb = rcu_dereference(event->rb);
- list_for_each_entry_rcu(event, &rb->event_list, rb_entry) {
+ if (!rb)
+ goto unlock;
+
+ list_for_each_entry_rcu(event, &rb->event_list, rb_entry)
wake_up_all(&event->waitq);
- }
+
+unlock:
rcu_read_unlock();
}
@@ -4816,7 +4622,6 @@ static void perf_swevent_overflow(struct perf_event *event, u64 overflow,
struct hw_perf_event *hwc = &event->hw;
int throttle = 0;
- data->period = event->hw.last_period;
if (!overflow)
overflow = perf_swevent_set_period(event);
@@ -4850,6 +4655,12 @@ static void perf_swevent_event(struct perf_event *event, u64 nr,
if (!is_sampling_event(event))
return;
+ if ((event->attr.sample_type & PERF_SAMPLE_PERIOD) && !event->attr.freq) {
+ data->period = nr;
+ return perf_swevent_overflow(event, 1, data, regs);
+ } else
+ data->period = event->hw.last_period;
+
if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq)
return perf_swevent_overflow(event, 1, data, regs);
@@ -5362,7 +5173,7 @@ static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
regs = get_irq_regs();
if (regs && !perf_exclude_event(event, regs)) {
- if (!(event->attr.exclude_idle && current->pid == 0))
+ if (!(event->attr.exclude_idle && is_idle_task(current)))
if (perf_event_overflow(event, &data, regs))
ret = HRTIMER_NORESTART;
}
@@ -5977,7 +5788,7 @@ done:
if (!event->parent) {
if (event->attach_state & PERF_ATTACH_TASK)
- jump_label_inc(&perf_sched_events);
+ jump_label_inc(&perf_sched_events.key);
if (event->attr.mmap || event->attr.mmap_data)
atomic_inc(&nr_mmap_events);
if (event->attr.comm)
@@ -6215,7 +6026,7 @@ SYSCALL_DEFINE5(perf_event_open,
* - that may need work on context switch
*/
atomic_inc(&per_cpu(perf_cgroup_events, event->cpu));
- jump_label_inc(&perf_sched_events);
+ jump_label_inc(&perf_sched_events.key);
}
/*
@@ -7061,6 +6872,9 @@ void __init perf_event_init(void)
ret = init_hw_breakpoint();
WARN(ret, "hw_breakpoint initialization failed with: %d", ret);
+
+ /* do not patch jump label more than once per second */
+ jump_label_rate_limit(&perf_sched_events, HZ);
}
static int __init perf_event_sysfs_init(void)
diff --git a/kernel/events/internal.h b/kernel/events/internal.h
index 64568a699375..b0b107f90afc 100644
--- a/kernel/events/internal.h
+++ b/kernel/events/internal.h
@@ -1,6 +1,10 @@
#ifndef _KERNEL_EVENTS_INTERNAL_H
#define _KERNEL_EVENTS_INTERNAL_H
+#include <linux/hardirq.h>
+
+/* Buffer handling */
+
#define RING_BUFFER_WRITABLE 0x01
struct ring_buffer {
@@ -67,7 +71,7 @@ static inline int page_order(struct ring_buffer *rb)
}
#endif
-static unsigned long perf_data_size(struct ring_buffer *rb)
+static inline unsigned long perf_data_size(struct ring_buffer *rb)
{
return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
}
@@ -96,4 +100,37 @@ __output_copy(struct perf_output_handle *handle,
} while (len);
}
+/* Callchain handling */
+extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs);
+extern int get_callchain_buffers(void);
+extern void put_callchain_buffers(void);
+
+static inline int get_recursion_context(int *recursion)
+{
+ int rctx;
+
+ if (in_nmi())
+ rctx = 3;
+ else if (in_irq())
+ rctx = 2;
+ else if (in_softirq())
+ rctx = 1;
+ else
+ rctx = 0;
+
+ if (recursion[rctx])
+ return -1;
+
+ recursion[rctx]++;
+ barrier();
+
+ return rctx;
+}
+
+static inline void put_recursion_context(int *recursion, int rctx)
+{
+ barrier();
+ recursion[rctx]--;
+}
+
#endif /* _KERNEL_EVENTS_INTERNAL_H */
diff --git a/kernel/exit.c b/kernel/exit.c
index 5e0d1f4c696e..d579a459309d 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -1530,8 +1530,15 @@ static int wait_consider_task(struct wait_opts *wo, int ptrace,
}
/* dead body doesn't have much to contribute */
- if (p->exit_state == EXIT_DEAD)
+ if (unlikely(p->exit_state == EXIT_DEAD)) {
+ /*
+ * But do not ignore this task until the tracer does
+ * wait_task_zombie()->do_notify_parent().
+ */
+ if (likely(!ptrace) && unlikely(ptrace_reparented(p)))
+ wo->notask_error = 0;
return 0;
+ }
/* slay zombie? */
if (p->exit_state == EXIT_ZOMBIE) {
diff --git a/kernel/futex.c b/kernel/futex.c
index ea87f4d2f455..1614be20173d 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -314,17 +314,29 @@ again:
#endif
lock_page(page_head);
+
+ /*
+ * If page_head->mapping is NULL, then it cannot be a PageAnon
+ * page; but it might be the ZERO_PAGE or in the gate area or
+ * in a special mapping (all cases which we are happy to fail);
+ * or it may have been a good file page when get_user_pages_fast
+ * found it, but truncated or holepunched or subjected to
+ * invalidate_complete_page2 before we got the page lock (also
+ * cases which we are happy to fail). And we hold a reference,
+ * so refcount care in invalidate_complete_page's remove_mapping
+ * prevents drop_caches from setting mapping to NULL beneath us.
+ *
+ * The case we do have to guard against is when memory pressure made
+ * shmem_writepage move it from filecache to swapcache beneath us:
+ * an unlikely race, but we do need to retry for page_head->mapping.
+ */
if (!page_head->mapping) {
+ int shmem_swizzled = PageSwapCache(page_head);
unlock_page(page_head);
put_page(page_head);
- /*
- * ZERO_PAGE pages don't have a mapping. Avoid a busy loop
- * trying to find one. RW mapping would have COW'd (and thus
- * have a mapping) so this page is RO and won't ever change.
- */
- if ((page_head == ZERO_PAGE(address)))
- return -EFAULT;
- goto again;
+ if (shmem_swizzled)
+ goto again;
+ return -EFAULT;
}
/*
diff --git a/kernel/hung_task.c b/kernel/hung_task.c
index 8b1748d0172c..2e48ec0c2e91 100644
--- a/kernel/hung_task.c
+++ b/kernel/hung_task.c
@@ -74,11 +74,17 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout)
/*
* Ensure the task is not frozen.
- * Also, when a freshly created task is scheduled once, changes
- * its state to TASK_UNINTERRUPTIBLE without having ever been
- * switched out once, it musn't be checked.
+ * Also, skip vfork and any other user process that freezer should skip.
*/
- if (unlikely(t->flags & PF_FROZEN || !switch_count))
+ if (unlikely(t->flags & (PF_FROZEN | PF_FREEZER_SKIP)))
+ return;
+
+ /*
+ * When a freshly created task is scheduled once, changes its state to
+ * TASK_UNINTERRUPTIBLE without having ever been switched out once, it
+ * musn't be checked.
+ */
+ if (unlikely(!switch_count))
return;
if (switch_count != t->last_switch_count) {
diff --git a/kernel/jump_label.c b/kernel/jump_label.c
index 66ff7109f697..30c3c7708132 100644
--- a/kernel/jump_label.c
+++ b/kernel/jump_label.c
@@ -72,15 +72,46 @@ void jump_label_inc(struct jump_label_key *key)
jump_label_unlock();
}
-void jump_label_dec(struct jump_label_key *key)
+static void __jump_label_dec(struct jump_label_key *key,
+ unsigned long rate_limit, struct delayed_work *work)
{
if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex))
return;
- jump_label_update(key, JUMP_LABEL_DISABLE);
+ if (rate_limit) {
+ atomic_inc(&key->enabled);
+ schedule_delayed_work(work, rate_limit);
+ } else
+ jump_label_update(key, JUMP_LABEL_DISABLE);
+
jump_label_unlock();
}
+static void jump_label_update_timeout(struct work_struct *work)
+{
+ struct jump_label_key_deferred *key =
+ container_of(work, struct jump_label_key_deferred, work.work);
+ __jump_label_dec(&key->key, 0, NULL);
+}
+
+void jump_label_dec(struct jump_label_key *key)
+{
+ __jump_label_dec(key, 0, NULL);
+}
+
+void jump_label_dec_deferred(struct jump_label_key_deferred *key)
+{
+ __jump_label_dec(&key->key, key->timeout, &key->work);
+}
+
+
+void jump_label_rate_limit(struct jump_label_key_deferred *key,
+ unsigned long rl)
+{
+ key->timeout = rl;
+ INIT_DELAYED_WORK(&key->work, jump_label_update_timeout);
+}
+
static int addr_conflict(struct jump_entry *entry, void *start, void *end)
{
if (entry->code <= (unsigned long)end &&
@@ -111,7 +142,7 @@ static int __jump_label_text_reserved(struct jump_entry *iter_start,
* running code can override this to make the non-live update case
* cheaper.
*/
-void __weak arch_jump_label_transform_static(struct jump_entry *entry,
+void __weak __init_or_module arch_jump_label_transform_static(struct jump_entry *entry,
enum jump_label_type type)
{
arch_jump_label_transform(entry, type);
@@ -217,8 +248,13 @@ void jump_label_apply_nops(struct module *mod)
if (iter_start == iter_stop)
return;
- for (iter = iter_start; iter < iter_stop; iter++)
- arch_jump_label_transform_static(iter, JUMP_LABEL_DISABLE);
+ for (iter = iter_start; iter < iter_stop; iter++) {
+ struct jump_label_key *iterk;
+
+ iterk = (struct jump_label_key *)(unsigned long)iter->key;
+ arch_jump_label_transform_static(iter, jump_label_enabled(iterk) ?
+ JUMP_LABEL_ENABLE : JUMP_LABEL_DISABLE);
+ }
}
static int jump_label_add_module(struct module *mod)
@@ -258,8 +294,7 @@ static int jump_label_add_module(struct module *mod)
key->next = jlm;
if (jump_label_enabled(key))
- __jump_label_update(key, iter, iter_stop,
- JUMP_LABEL_ENABLE);
+ __jump_label_update(key, iter, iter_stop, JUMP_LABEL_ENABLE);
}
return 0;
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index b2e08c932d91..8889f7dd7c46 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -431,6 +431,7 @@ unsigned int max_lockdep_depth;
* about it later on, in lockdep_info().
*/
static int lockdep_init_error;
+static const char *lock_init_error;
static unsigned long lockdep_init_trace_data[20];
static struct stack_trace lockdep_init_trace = {
.max_entries = ARRAY_SIZE(lockdep_init_trace_data),
@@ -499,36 +500,32 @@ void get_usage_chars(struct lock_class *class, char usage[LOCK_USAGE_CHARS])
usage[i] = '\0';
}
-static int __print_lock_name(struct lock_class *class)
+static void __print_lock_name(struct lock_class *class)
{
char str[KSYM_NAME_LEN];
const char *name;
name = class->name;
- if (!name)
- name = __get_key_name(class->key, str);
-
- return printk("%s", name);
-}
-
-static void print_lock_name(struct lock_class *class)
-{
- char str[KSYM_NAME_LEN], usage[LOCK_USAGE_CHARS];
- const char *name;
-
- get_usage_chars(class, usage);
-
- name = class->name;
if (!name) {
name = __get_key_name(class->key, str);
- printk(" (%s", name);
+ printk("%s", name);
} else {
- printk(" (%s", name);
+ printk("%s", name);
if (class->name_version > 1)
printk("#%d", class->name_version);
if (class->subclass)
printk("/%d", class->subclass);
}
+}
+
+static void print_lock_name(struct lock_class *class)
+{
+ char usage[LOCK_USAGE_CHARS];
+
+ get_usage_chars(class, usage);
+
+ printk(" (");
+ __print_lock_name(class);
printk("){%s}", usage);
}
@@ -568,11 +565,12 @@ static void lockdep_print_held_locks(struct task_struct *curr)
}
}
-static void print_kernel_version(void)
+static void print_kernel_ident(void)
{
- printk("%s %.*s\n", init_utsname()->release,
+ printk("%s %.*s %s\n", init_utsname()->release,
(int)strcspn(init_utsname()->version, " "),
- init_utsname()->version);
+ init_utsname()->version,
+ print_tainted());
}
static int very_verbose(struct lock_class *class)
@@ -656,6 +654,7 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
if (unlikely(!lockdep_initialized)) {
lockdep_init();
lockdep_init_error = 1;
+ lock_init_error = lock->name;
save_stack_trace(&lockdep_init_trace);
}
#endif
@@ -723,7 +722,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
class = look_up_lock_class(lock, subclass);
if (likely(class))
- return class;
+ goto out_set_class_cache;
/*
* Debug-check: all keys must be persistent!
@@ -808,6 +807,7 @@ out_unlock_set:
graph_unlock();
raw_local_irq_restore(flags);
+out_set_class_cache:
if (!subclass || force)
lock->class_cache[0] = class;
else if (subclass < NR_LOCKDEP_CACHING_CLASSES)
@@ -1149,7 +1149,7 @@ print_circular_bug_header(struct lock_list *entry, unsigned int depth,
printk("\n");
printk("======================================================\n");
printk("[ INFO: possible circular locking dependency detected ]\n");
- print_kernel_version();
+ print_kernel_ident();
printk("-------------------------------------------------------\n");
printk("%s/%d is trying to acquire lock:\n",
curr->comm, task_pid_nr(curr));
@@ -1488,7 +1488,7 @@ print_bad_irq_dependency(struct task_struct *curr,
printk("======================================================\n");
printk("[ INFO: %s-safe -> %s-unsafe lock order detected ]\n",
irqclass, irqclass);
- print_kernel_version();
+ print_kernel_ident();
printk("------------------------------------------------------\n");
printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n",
curr->comm, task_pid_nr(curr),
@@ -1717,7 +1717,7 @@ print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
printk("\n");
printk("=============================================\n");
printk("[ INFO: possible recursive locking detected ]\n");
- print_kernel_version();
+ print_kernel_ident();
printk("---------------------------------------------\n");
printk("%s/%d is trying to acquire lock:\n",
curr->comm, task_pid_nr(curr));
@@ -2224,7 +2224,7 @@ print_usage_bug(struct task_struct *curr, struct held_lock *this,
printk("\n");
printk("=================================\n");
printk("[ INFO: inconsistent lock state ]\n");
- print_kernel_version();
+ print_kernel_ident();
printk("---------------------------------\n");
printk("inconsistent {%s} -> {%s} usage.\n",
@@ -2289,7 +2289,7 @@ print_irq_inversion_bug(struct task_struct *curr,
printk("\n");
printk("=========================================================\n");
printk("[ INFO: possible irq lock inversion dependency detected ]\n");
- print_kernel_version();
+ print_kernel_ident();
printk("---------------------------------------------------------\n");
printk("%s/%d just changed the state of lock:\n",
curr->comm, task_pid_nr(curr));
@@ -3175,6 +3175,7 @@ print_unlock_inbalance_bug(struct task_struct *curr, struct lockdep_map *lock,
printk("\n");
printk("=====================================\n");
printk("[ BUG: bad unlock balance detected! ]\n");
+ print_kernel_ident();
printk("-------------------------------------\n");
printk("%s/%d is trying to release lock (",
curr->comm, task_pid_nr(curr));
@@ -3619,6 +3620,7 @@ print_lock_contention_bug(struct task_struct *curr, struct lockdep_map *lock,
printk("\n");
printk("=================================\n");
printk("[ BUG: bad contention detected! ]\n");
+ print_kernel_ident();
printk("---------------------------------\n");
printk("%s/%d is trying to contend lock (",
curr->comm, task_pid_nr(curr));
@@ -3974,7 +3976,8 @@ void __init lockdep_info(void)
#ifdef CONFIG_DEBUG_LOCKDEP
if (lockdep_init_error) {
- printk("WARNING: lockdep init error! Arch code didn't call lockdep_init() early enough?\n");
+ printk("WARNING: lockdep init error! lock-%s was acquired"
+ "before lockdep_init\n", lock_init_error);
printk("Call stack leading to lockdep invocation was:\n");
print_stack_trace(&lockdep_init_trace, 0);
}
@@ -3993,6 +3996,7 @@ print_freed_lock_bug(struct task_struct *curr, const void *mem_from,
printk("\n");
printk("=========================\n");
printk("[ BUG: held lock freed! ]\n");
+ print_kernel_ident();
printk("-------------------------\n");
printk("%s/%d is freeing memory %p-%p, with a lock still held there!\n",
curr->comm, task_pid_nr(curr), mem_from, mem_to-1);
@@ -4050,6 +4054,7 @@ static void print_held_locks_bug(struct task_struct *curr)
printk("\n");
printk("=====================================\n");
printk("[ BUG: lock held at task exit time! ]\n");
+ print_kernel_ident();
printk("-------------------------------------\n");
printk("%s/%d is exiting with locks still held!\n",
curr->comm, task_pid_nr(curr));
@@ -4147,6 +4152,7 @@ void lockdep_sys_exit(void)
printk("\n");
printk("================================================\n");
printk("[ BUG: lock held when returning to user space! ]\n");
+ print_kernel_ident();
printk("------------------------------------------------\n");
printk("%s/%d is leaving the kernel with locks still held!\n",
curr->comm, curr->pid);
@@ -4166,10 +4172,33 @@ void lockdep_rcu_suspicious(const char *file, const int line, const char *s)
printk("\n");
printk("===============================\n");
printk("[ INFO: suspicious RCU usage. ]\n");
+ print_kernel_ident();
printk("-------------------------------\n");
printk("%s:%d %s!\n", file, line, s);
printk("\nother info that might help us debug this:\n\n");
printk("\nrcu_scheduler_active = %d, debug_locks = %d\n", rcu_scheduler_active, debug_locks);
+
+ /*
+ * If a CPU is in the RCU-free window in idle (ie: in the section
+ * between rcu_idle_enter() and rcu_idle_exit(), then RCU
+ * considers that CPU to be in an "extended quiescent state",
+ * which means that RCU will be completely ignoring that CPU.
+ * Therefore, rcu_read_lock() and friends have absolutely no
+ * effect on a CPU running in that state. In other words, even if
+ * such an RCU-idle CPU has called rcu_read_lock(), RCU might well
+ * delete data structures out from under it. RCU really has no
+ * choice here: we need to keep an RCU-free window in idle where
+ * the CPU may possibly enter into low power mode. This way we can
+ * notice an extended quiescent state to other CPUs that started a grace
+ * period. Otherwise we would delay any grace period as long as we run
+ * in the idle task.
+ *
+ * So complain bitterly if someone does call rcu_read_lock(),
+ * rcu_read_lock_bh() and so on from extended quiescent states.
+ */
+ if (rcu_is_cpu_idle())
+ printk("RCU used illegally from extended quiescent state!\n");
+
lockdep_print_held_locks(curr);
printk("\nstack backtrace:\n");
dump_stack();
diff --git a/kernel/panic.c b/kernel/panic.c
index b26593604214..3458469eb7c3 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -237,11 +237,20 @@ void add_taint(unsigned flag)
* Can't trust the integrity of the kernel anymore.
* We don't call directly debug_locks_off() because the issue
* is not necessarily serious enough to set oops_in_progress to 1
- * Also we want to keep up lockdep for staging development and
- * post-warning case.
+ * Also we want to keep up lockdep for staging/out-of-tree
+ * development and post-warning case.
*/
- if (flag != TAINT_CRAP && flag != TAINT_WARN && __debug_locks_off())
- printk(KERN_WARNING "Disabling lock debugging due to kernel taint\n");
+ switch (flag) {
+ case TAINT_CRAP:
+ case TAINT_OOT_MODULE:
+ case TAINT_WARN:
+ case TAINT_FIRMWARE_WORKAROUND:
+ break;
+
+ default:
+ if (__debug_locks_off())
+ printk(KERN_WARNING "Disabling lock debugging due to kernel taint\n");
+ }
set_bit(flag, &tainted_mask);
}
diff --git a/kernel/printk.c b/kernel/printk.c
index 7982a0a841ea..989e4a52da76 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -199,7 +199,7 @@ void __init setup_log_buf(int early)
unsigned long mem;
mem = memblock_alloc(new_log_buf_len, PAGE_SIZE);
- if (mem == MEMBLOCK_ERROR)
+ if (!mem)
return;
new_log_buf = __va(mem);
} else {
@@ -688,6 +688,7 @@ static void zap_locks(void)
oops_timestamp = jiffies;
+ debug_locks_off();
/* If a crash is occurring, make sure we can't deadlock */
raw_spin_lock_init(&logbuf_lock);
/* And make sure that we print immediately */
@@ -840,9 +841,8 @@ asmlinkage int vprintk(const char *fmt, va_list args)
boot_delay_msec();
printk_delay();
- preempt_disable();
/* This stops the holder of console_sem just where we want him */
- raw_local_irq_save(flags);
+ local_irq_save(flags);
this_cpu = smp_processor_id();
/*
@@ -856,7 +856,7 @@ asmlinkage int vprintk(const char *fmt, va_list args)
* recursion and return - but flag the recursion so that
* it can be printed at the next appropriate moment:
*/
- if (!oops_in_progress) {
+ if (!oops_in_progress && !lockdep_recursing(current)) {
recursion_bug = 1;
goto out_restore_irqs;
}
@@ -962,9 +962,8 @@ asmlinkage int vprintk(const char *fmt, va_list args)
lockdep_on();
out_restore_irqs:
- raw_local_irq_restore(flags);
+ local_irq_restore(flags);
- preempt_enable();
return printed_len;
}
EXPORT_SYMBOL(printk);
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 24d04477b257..78ab24a7b0e4 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -96,9 +96,20 @@ void __ptrace_unlink(struct task_struct *child)
*/
if (!(child->flags & PF_EXITING) &&
(child->signal->flags & SIGNAL_STOP_STOPPED ||
- child->signal->group_stop_count))
+ child->signal->group_stop_count)) {
child->jobctl |= JOBCTL_STOP_PENDING;
+ /*
+ * This is only possible if this thread was cloned by the
+ * traced task running in the stopped group, set the signal
+ * for the future reports.
+ * FIXME: we should change ptrace_init_task() to handle this
+ * case.
+ */
+ if (!(child->jobctl & JOBCTL_STOP_SIGMASK))
+ child->jobctl |= SIGSTOP;
+ }
+
/*
* If transition to TASK_STOPPED is pending or in TASK_TRACED, kick
* @child in the butt. Note that @resume should be used iff @child
diff --git a/kernel/rcu.h b/kernel/rcu.h
index f600868d550d..aa88baab5f78 100644
--- a/kernel/rcu.h
+++ b/kernel/rcu.h
@@ -30,6 +30,13 @@
#endif /* #else #ifdef CONFIG_RCU_TRACE */
/*
+ * Process-level increment to ->dynticks_nesting field. This allows for
+ * architectures that use half-interrupts and half-exceptions from
+ * process context.
+ */
+#define DYNTICK_TASK_NESTING (LLONG_MAX / 2 - 1)
+
+/*
* debug_rcu_head_queue()/debug_rcu_head_unqueue() are used internally
* by call_rcu() and rcu callback execution, and are therefore not part of the
* RCU API. Leaving in rcupdate.h because they are used by all RCU flavors.
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index c5b98e565aee..2bc4e135ff23 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -93,6 +93,8 @@ int rcu_read_lock_bh_held(void)
{
if (!debug_lockdep_rcu_enabled())
return 1;
+ if (rcu_is_cpu_idle())
+ return 0;
return in_softirq() || irqs_disabled();
}
EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held);
@@ -316,3 +318,13 @@ struct debug_obj_descr rcuhead_debug_descr = {
};
EXPORT_SYMBOL_GPL(rcuhead_debug_descr);
#endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
+
+#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) || defined(CONFIG_RCU_TRACE)
+void do_trace_rcu_torture_read(char *rcutorturename, struct rcu_head *rhp)
+{
+ trace_rcu_torture_read(rcutorturename, rhp);
+}
+EXPORT_SYMBOL_GPL(do_trace_rcu_torture_read);
+#else
+#define do_trace_rcu_torture_read(rcutorturename, rhp) do { } while (0)
+#endif
diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c
index 636af6d9c6e5..977296dca0a4 100644
--- a/kernel/rcutiny.c
+++ b/kernel/rcutiny.c
@@ -53,31 +53,137 @@ static void __call_rcu(struct rcu_head *head,
#include "rcutiny_plugin.h"
-#ifdef CONFIG_NO_HZ
+static long long rcu_dynticks_nesting = DYNTICK_TASK_NESTING;
-static long rcu_dynticks_nesting = 1;
+/* Common code for rcu_idle_enter() and rcu_irq_exit(), see kernel/rcutree.c. */
+static void rcu_idle_enter_common(long long oldval)
+{
+ if (rcu_dynticks_nesting) {
+ RCU_TRACE(trace_rcu_dyntick("--=",
+ oldval, rcu_dynticks_nesting));
+ return;
+ }
+ RCU_TRACE(trace_rcu_dyntick("Start", oldval, rcu_dynticks_nesting));
+ if (!is_idle_task(current)) {
+ struct task_struct *idle = idle_task(smp_processor_id());
+
+ RCU_TRACE(trace_rcu_dyntick("Error on entry: not idle task",
+ oldval, rcu_dynticks_nesting));
+ ftrace_dump(DUMP_ALL);
+ WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
+ current->pid, current->comm,
+ idle->pid, idle->comm); /* must be idle task! */
+ }
+ rcu_sched_qs(0); /* implies rcu_bh_qsctr_inc(0) */
+}
/*
- * Enter dynticks-idle mode, which is an extended quiescent state
- * if we have fully entered that mode (i.e., if the new value of
- * dynticks_nesting is zero).
+ * Enter idle, which is an extended quiescent state if we have fully
+ * entered that mode (i.e., if the new value of dynticks_nesting is zero).
*/
-void rcu_enter_nohz(void)
+void rcu_idle_enter(void)
{
- if (--rcu_dynticks_nesting == 0)
- rcu_sched_qs(0); /* implies rcu_bh_qsctr_inc(0) */
+ unsigned long flags;
+ long long oldval;
+
+ local_irq_save(flags);
+ oldval = rcu_dynticks_nesting;
+ rcu_dynticks_nesting = 0;
+ rcu_idle_enter_common(oldval);
+ local_irq_restore(flags);
}
/*
- * Exit dynticks-idle mode, so that we are no longer in an extended
- * quiescent state.
+ * Exit an interrupt handler towards idle.
*/
-void rcu_exit_nohz(void)
+void rcu_irq_exit(void)
+{
+ unsigned long flags;
+ long long oldval;
+
+ local_irq_save(flags);
+ oldval = rcu_dynticks_nesting;
+ rcu_dynticks_nesting--;
+ WARN_ON_ONCE(rcu_dynticks_nesting < 0);
+ rcu_idle_enter_common(oldval);
+ local_irq_restore(flags);
+}
+
+/* Common code for rcu_idle_exit() and rcu_irq_enter(), see kernel/rcutree.c. */
+static void rcu_idle_exit_common(long long oldval)
{
+ if (oldval) {
+ RCU_TRACE(trace_rcu_dyntick("++=",
+ oldval, rcu_dynticks_nesting));
+ return;
+ }
+ RCU_TRACE(trace_rcu_dyntick("End", oldval, rcu_dynticks_nesting));
+ if (!is_idle_task(current)) {
+ struct task_struct *idle = idle_task(smp_processor_id());
+
+ RCU_TRACE(trace_rcu_dyntick("Error on exit: not idle task",
+ oldval, rcu_dynticks_nesting));
+ ftrace_dump(DUMP_ALL);
+ WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
+ current->pid, current->comm,
+ idle->pid, idle->comm); /* must be idle task! */
+ }
+}
+
+/*
+ * Exit idle, so that we are no longer in an extended quiescent state.
+ */
+void rcu_idle_exit(void)
+{
+ unsigned long flags;
+ long long oldval;
+
+ local_irq_save(flags);
+ oldval = rcu_dynticks_nesting;
+ WARN_ON_ONCE(oldval != 0);
+ rcu_dynticks_nesting = DYNTICK_TASK_NESTING;
+ rcu_idle_exit_common(oldval);
+ local_irq_restore(flags);
+}
+
+/*
+ * Enter an interrupt handler, moving away from idle.
+ */
+void rcu_irq_enter(void)
+{
+ unsigned long flags;
+ long long oldval;
+
+ local_irq_save(flags);
+ oldval = rcu_dynticks_nesting;
rcu_dynticks_nesting++;
+ WARN_ON_ONCE(rcu_dynticks_nesting == 0);
+ rcu_idle_exit_common(oldval);
+ local_irq_restore(flags);
+}
+
+#ifdef CONFIG_PROVE_RCU
+
+/*
+ * Test whether RCU thinks that the current CPU is idle.
+ */
+int rcu_is_cpu_idle(void)
+{
+ return !rcu_dynticks_nesting;
}
+EXPORT_SYMBOL(rcu_is_cpu_idle);
+
+#endif /* #ifdef CONFIG_PROVE_RCU */
-#endif /* #ifdef CONFIG_NO_HZ */
+/*
+ * Test whether the current CPU was interrupted from idle. Nested
+ * interrupts don't count, we must be running at the first interrupt
+ * level.
+ */
+int rcu_is_cpu_rrupt_from_idle(void)
+{
+ return rcu_dynticks_nesting <= 0;
+}
/*
* Helper function for rcu_sched_qs() and rcu_bh_qs().
@@ -126,14 +232,13 @@ void rcu_bh_qs(int cpu)
/*
* Check to see if the scheduling-clock interrupt came from an extended
- * quiescent state, and, if so, tell RCU about it.
+ * quiescent state, and, if so, tell RCU about it. This function must
+ * be called from hardirq context. It is normally called from the
+ * scheduling-clock interrupt.
*/
void rcu_check_callbacks(int cpu, int user)
{
- if (user ||
- (idle_cpu(cpu) &&
- !in_softirq() &&
- hardirq_count() <= (1 << HARDIRQ_SHIFT)))
+ if (user || rcu_is_cpu_rrupt_from_idle())
rcu_sched_qs(cpu);
else if (!in_softirq())
rcu_bh_qs(cpu);
@@ -154,7 +259,11 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
/* If no RCU callbacks ready to invoke, just return. */
if (&rcp->rcucblist == rcp->donetail) {
RCU_TRACE(trace_rcu_batch_start(rcp->name, 0, -1));
- RCU_TRACE(trace_rcu_batch_end(rcp->name, 0));
+ RCU_TRACE(trace_rcu_batch_end(rcp->name, 0,
+ ACCESS_ONCE(rcp->rcucblist),
+ need_resched(),
+ is_idle_task(current),
+ rcu_is_callbacks_kthread()));
return;
}
@@ -183,7 +292,9 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
RCU_TRACE(cb_count++);
}
RCU_TRACE(rcu_trace_sub_qlen(rcp, cb_count));
- RCU_TRACE(trace_rcu_batch_end(rcp->name, cb_count));
+ RCU_TRACE(trace_rcu_batch_end(rcp->name, cb_count, 0, need_resched(),
+ is_idle_task(current),
+ rcu_is_callbacks_kthread()));
}
static void rcu_process_callbacks(struct softirq_action *unused)
diff --git a/kernel/rcutiny_plugin.h b/kernel/rcutiny_plugin.h
index 2b0484a5dc28..9cb1ae4aabdd 100644
--- a/kernel/rcutiny_plugin.h
+++ b/kernel/rcutiny_plugin.h
@@ -312,8 +312,8 @@ static int rcu_boost(void)
rt_mutex_lock(&mtx);
rt_mutex_unlock(&mtx); /* Keep lockdep happy. */
- return rcu_preempt_ctrlblk.boost_tasks != NULL ||
- rcu_preempt_ctrlblk.exp_tasks != NULL;
+ return ACCESS_ONCE(rcu_preempt_ctrlblk.boost_tasks) != NULL ||
+ ACCESS_ONCE(rcu_preempt_ctrlblk.exp_tasks) != NULL;
}
/*
@@ -885,6 +885,19 @@ static void invoke_rcu_callbacks(void)
wake_up(&rcu_kthread_wq);
}
+#ifdef CONFIG_RCU_TRACE
+
+/*
+ * Is the current CPU running the RCU-callbacks kthread?
+ * Caller must have preemption disabled.
+ */
+static bool rcu_is_callbacks_kthread(void)
+{
+ return rcu_kthread_task == current;
+}
+
+#endif /* #ifdef CONFIG_RCU_TRACE */
+
/*
* This kthread invokes RCU callbacks whose grace periods have
* elapsed. It is awakened as needed, and takes the place of the
@@ -938,6 +951,18 @@ void invoke_rcu_callbacks(void)
raise_softirq(RCU_SOFTIRQ);
}
+#ifdef CONFIG_RCU_TRACE
+
+/*
+ * There is no callback kthread, so this thread is never it.
+ */
+static bool rcu_is_callbacks_kthread(void)
+{
+ return false;
+}
+
+#endif /* #ifdef CONFIG_RCU_TRACE */
+
void rcu_init(void)
{
open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
index 764825c2685c..88f17b8a3b1d 100644
--- a/kernel/rcutorture.c
+++ b/kernel/rcutorture.c
@@ -61,9 +61,11 @@ static int test_no_idle_hz; /* Test RCU's support for tickless idle CPUs. */
static int shuffle_interval = 3; /* Interval between shuffles (in sec)*/
static int stutter = 5; /* Start/stop testing interval (in sec) */
static int irqreader = 1; /* RCU readers from irq (timers). */
-static int fqs_duration = 0; /* Duration of bursts (us), 0 to disable. */
-static int fqs_holdoff = 0; /* Hold time within burst (us). */
+static int fqs_duration; /* Duration of bursts (us), 0 to disable. */
+static int fqs_holdoff; /* Hold time within burst (us). */
static int fqs_stutter = 3; /* Wait time between bursts (s). */
+static int onoff_interval; /* Wait time between CPU hotplugs, 0=disable. */
+static int shutdown_secs; /* Shutdown time (s). <=0 for no shutdown. */
static int test_boost = 1; /* Test RCU prio boost: 0=no, 1=maybe, 2=yes. */
static int test_boost_interval = 7; /* Interval between boost tests, seconds. */
static int test_boost_duration = 4; /* Duration of each boost test, seconds. */
@@ -91,6 +93,10 @@ module_param(fqs_holdoff, int, 0444);
MODULE_PARM_DESC(fqs_holdoff, "Holdoff time within fqs bursts (us)");
module_param(fqs_stutter, int, 0444);
MODULE_PARM_DESC(fqs_stutter, "Wait time between fqs bursts (s)");
+module_param(onoff_interval, int, 0444);
+MODULE_PARM_DESC(onoff_interval, "Time between CPU hotplugs (s), 0=disable");
+module_param(shutdown_secs, int, 0444);
+MODULE_PARM_DESC(shutdown_secs, "Shutdown time (s), zero to disable.");
module_param(test_boost, int, 0444);
MODULE_PARM_DESC(test_boost, "Test RCU prio boost: 0=no, 1=maybe, 2=yes.");
module_param(test_boost_interval, int, 0444);
@@ -119,6 +125,10 @@ static struct task_struct *shuffler_task;
static struct task_struct *stutter_task;
static struct task_struct *fqs_task;
static struct task_struct *boost_tasks[NR_CPUS];
+static struct task_struct *shutdown_task;
+#ifdef CONFIG_HOTPLUG_CPU
+static struct task_struct *onoff_task;
+#endif /* #ifdef CONFIG_HOTPLUG_CPU */
#define RCU_TORTURE_PIPE_LEN 10
@@ -149,6 +159,10 @@ static long n_rcu_torture_boost_rterror;
static long n_rcu_torture_boost_failure;
static long n_rcu_torture_boosts;
static long n_rcu_torture_timers;
+static long n_offline_attempts;
+static long n_offline_successes;
+static long n_online_attempts;
+static long n_online_successes;
static struct list_head rcu_torture_removed;
static cpumask_var_t shuffle_tmp_mask;
@@ -160,6 +174,8 @@ static int stutter_pause_test;
#define RCUTORTURE_RUNNABLE_INIT 0
#endif
int rcutorture_runnable = RCUTORTURE_RUNNABLE_INIT;
+module_param(rcutorture_runnable, int, 0444);
+MODULE_PARM_DESC(rcutorture_runnable, "Start rcutorture at boot");
#if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU)
#define rcu_can_boost() 1
@@ -167,6 +183,7 @@ int rcutorture_runnable = RCUTORTURE_RUNNABLE_INIT;
#define rcu_can_boost() 0
#endif /* #else #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */
+static unsigned long shutdown_time; /* jiffies to system shutdown. */
static unsigned long boost_starttime; /* jiffies of next boost test start. */
DEFINE_MUTEX(boost_mutex); /* protect setting boost_starttime */
/* and boost task create/destroy. */
@@ -182,6 +199,9 @@ static int fullstop = FULLSTOP_RMMOD;
*/
static DEFINE_MUTEX(fullstop_mutex);
+/* Forward reference. */
+static void rcu_torture_cleanup(void);
+
/*
* Detect and respond to a system shutdown.
*/
@@ -612,6 +632,30 @@ static struct rcu_torture_ops srcu_ops = {
.name = "srcu"
};
+static int srcu_torture_read_lock_raw(void) __acquires(&srcu_ctl)
+{
+ return srcu_read_lock_raw(&srcu_ctl);
+}
+
+static void srcu_torture_read_unlock_raw(int idx) __releases(&srcu_ctl)
+{
+ srcu_read_unlock_raw(&srcu_ctl, idx);
+}
+
+static struct rcu_torture_ops srcu_raw_ops = {
+ .init = srcu_torture_init,
+ .cleanup = srcu_torture_cleanup,
+ .readlock = srcu_torture_read_lock_raw,
+ .read_delay = srcu_read_delay,
+ .readunlock = srcu_torture_read_unlock_raw,
+ .completed = srcu_torture_completed,
+ .deferred_free = rcu_sync_torture_deferred_free,
+ .sync = srcu_torture_synchronize,
+ .cb_barrier = NULL,
+ .stats = srcu_torture_stats,
+ .name = "srcu_raw"
+};
+
static void srcu_torture_synchronize_expedited(void)
{
synchronize_srcu_expedited(&srcu_ctl);
@@ -913,6 +957,18 @@ rcu_torture_fakewriter(void *arg)
return 0;
}
+void rcutorture_trace_dump(void)
+{
+ static atomic_t beenhere = ATOMIC_INIT(0);
+
+ if (atomic_read(&beenhere))
+ return;
+ if (atomic_xchg(&beenhere, 1) != 0)
+ return;
+ do_trace_rcu_torture_read(cur_ops->name, (struct rcu_head *)~0UL);
+ ftrace_dump(DUMP_ALL);
+}
+
/*
* RCU torture reader from timer handler. Dereferences rcu_torture_current,
* incrementing the corresponding element of the pipeline array. The
@@ -934,6 +990,7 @@ static void rcu_torture_timer(unsigned long unused)
rcu_read_lock_bh_held() ||
rcu_read_lock_sched_held() ||
srcu_read_lock_held(&srcu_ctl));
+ do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu);
if (p == NULL) {
/* Leave because rcu_torture_writer is not yet underway */
cur_ops->readunlock(idx);
@@ -951,6 +1008,8 @@ static void rcu_torture_timer(unsigned long unused)
/* Should not happen, but... */
pipe_count = RCU_TORTURE_PIPE_LEN;
}
+ if (pipe_count > 1)
+ rcutorture_trace_dump();
__this_cpu_inc(rcu_torture_count[pipe_count]);
completed = cur_ops->completed() - completed;
if (completed > RCU_TORTURE_PIPE_LEN) {
@@ -994,6 +1053,7 @@ rcu_torture_reader(void *arg)
rcu_read_lock_bh_held() ||
rcu_read_lock_sched_held() ||
srcu_read_lock_held(&srcu_ctl));
+ do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu);
if (p == NULL) {
/* Wait for rcu_torture_writer to get underway */
cur_ops->readunlock(idx);
@@ -1009,6 +1069,8 @@ rcu_torture_reader(void *arg)
/* Should not happen, but... */
pipe_count = RCU_TORTURE_PIPE_LEN;
}
+ if (pipe_count > 1)
+ rcutorture_trace_dump();
__this_cpu_inc(rcu_torture_count[pipe_count]);
completed = cur_ops->completed() - completed;
if (completed > RCU_TORTURE_PIPE_LEN) {
@@ -1056,7 +1118,8 @@ rcu_torture_printk(char *page)
cnt += sprintf(&page[cnt],
"rtc: %p ver: %lu tfle: %d rta: %d rtaf: %d rtf: %d "
"rtmbe: %d rtbke: %ld rtbre: %ld "
- "rtbf: %ld rtb: %ld nt: %ld",
+ "rtbf: %ld rtb: %ld nt: %ld "
+ "onoff: %ld/%ld:%ld/%ld",
rcu_torture_current,
rcu_torture_current_version,
list_empty(&rcu_torture_freelist),
@@ -1068,7 +1131,11 @@ rcu_torture_printk(char *page)
n_rcu_torture_boost_rterror,
n_rcu_torture_boost_failure,
n_rcu_torture_boosts,
- n_rcu_torture_timers);
+ n_rcu_torture_timers,
+ n_online_successes,
+ n_online_attempts,
+ n_offline_successes,
+ n_offline_attempts);
if (atomic_read(&n_rcu_torture_mberror) != 0 ||
n_rcu_torture_boost_ktrerror != 0 ||
n_rcu_torture_boost_rterror != 0 ||
@@ -1232,12 +1299,14 @@ rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, char *tag)
"shuffle_interval=%d stutter=%d irqreader=%d "
"fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d "
"test_boost=%d/%d test_boost_interval=%d "
- "test_boost_duration=%d\n",
+ "test_boost_duration=%d shutdown_secs=%d "
+ "onoff_interval=%d\n",
torture_type, tag, nrealreaders, nfakewriters,
stat_interval, verbose, test_no_idle_hz, shuffle_interval,
stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter,
test_boost, cur_ops->can_boost,
- test_boost_interval, test_boost_duration);
+ test_boost_interval, test_boost_duration, shutdown_secs,
+ onoff_interval);
}
static struct notifier_block rcutorture_shutdown_nb = {
@@ -1287,6 +1356,131 @@ static int rcutorture_booster_init(int cpu)
return 0;
}
+/*
+ * Cause the rcutorture test to shutdown the system after the test has
+ * run for the time specified by the shutdown_secs module parameter.
+ */
+static int
+rcu_torture_shutdown(void *arg)
+{
+ long delta;
+ unsigned long jiffies_snap;
+
+ VERBOSE_PRINTK_STRING("rcu_torture_shutdown task started");
+ jiffies_snap = ACCESS_ONCE(jiffies);
+ while (ULONG_CMP_LT(jiffies_snap, shutdown_time) &&
+ !kthread_should_stop()) {
+ delta = shutdown_time - jiffies_snap;
+ if (verbose)
+ printk(KERN_ALERT "%s" TORTURE_FLAG
+ "rcu_torture_shutdown task: %lu "
+ "jiffies remaining\n",
+ torture_type, delta);
+ schedule_timeout_interruptible(delta);
+ jiffies_snap = ACCESS_ONCE(jiffies);
+ }
+ if (kthread_should_stop()) {
+ VERBOSE_PRINTK_STRING("rcu_torture_shutdown task stopping");
+ return 0;
+ }
+
+ /* OK, shut down the system. */
+
+ VERBOSE_PRINTK_STRING("rcu_torture_shutdown task shutting down system");
+ shutdown_task = NULL; /* Avoid self-kill deadlock. */
+ rcu_torture_cleanup(); /* Get the success/failure message. */
+ kernel_power_off(); /* Shut down the system. */
+ return 0;
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+
+/*
+ * Execute random CPU-hotplug operations at the interval specified
+ * by the onoff_interval.
+ */
+static int
+rcu_torture_onoff(void *arg)
+{
+ int cpu;
+ int maxcpu = -1;
+ DEFINE_RCU_RANDOM(rand);
+
+ VERBOSE_PRINTK_STRING("rcu_torture_onoff task started");
+ for_each_online_cpu(cpu)
+ maxcpu = cpu;
+ WARN_ON(maxcpu < 0);
+ while (!kthread_should_stop()) {
+ cpu = (rcu_random(&rand) >> 4) % (maxcpu + 1);
+ if (cpu_online(cpu) && cpu_is_hotpluggable(cpu)) {
+ if (verbose)
+ printk(KERN_ALERT "%s" TORTURE_FLAG
+ "rcu_torture_onoff task: offlining %d\n",
+ torture_type, cpu);
+ n_offline_attempts++;
+ if (cpu_down(cpu) == 0) {
+ if (verbose)
+ printk(KERN_ALERT "%s" TORTURE_FLAG
+ "rcu_torture_onoff task: "
+ "offlined %d\n",
+ torture_type, cpu);
+ n_offline_successes++;
+ }
+ } else if (cpu_is_hotpluggable(cpu)) {
+ if (verbose)
+ printk(KERN_ALERT "%s" TORTURE_FLAG
+ "rcu_torture_onoff task: onlining %d\n",
+ torture_type, cpu);
+ n_online_attempts++;
+ if (cpu_up(cpu) == 0) {
+ if (verbose)
+ printk(KERN_ALERT "%s" TORTURE_FLAG
+ "rcu_torture_onoff task: "
+ "onlined %d\n",
+ torture_type, cpu);
+ n_online_successes++;
+ }
+ }
+ schedule_timeout_interruptible(onoff_interval * HZ);
+ }
+ VERBOSE_PRINTK_STRING("rcu_torture_onoff task stopping");
+ return 0;
+}
+
+static int
+rcu_torture_onoff_init(void)
+{
+ if (onoff_interval <= 0)
+ return 0;
+ onoff_task = kthread_run(rcu_torture_onoff, NULL, "rcu_torture_onoff");
+ if (IS_ERR(onoff_task)) {
+ onoff_task = NULL;
+ return PTR_ERR(onoff_task);
+ }
+ return 0;
+}
+
+static void rcu_torture_onoff_cleanup(void)
+{
+ if (onoff_task == NULL)
+ return;
+ VERBOSE_PRINTK_STRING("Stopping rcu_torture_onoff task");
+ kthread_stop(onoff_task);
+}
+
+#else /* #ifdef CONFIG_HOTPLUG_CPU */
+
+static void
+rcu_torture_onoff_init(void)
+{
+}
+
+static void rcu_torture_onoff_cleanup(void)
+{
+}
+
+#endif /* #else #ifdef CONFIG_HOTPLUG_CPU */
+
static int rcutorture_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
{
@@ -1391,6 +1585,11 @@ rcu_torture_cleanup(void)
for_each_possible_cpu(i)
rcutorture_booster_cleanup(i);
}
+ if (shutdown_task != NULL) {
+ VERBOSE_PRINTK_STRING("Stopping rcu_torture_shutdown task");
+ kthread_stop(shutdown_task);
+ }
+ rcu_torture_onoff_cleanup();
/* Wait for all RCU callbacks to fire. */
@@ -1416,7 +1615,7 @@ rcu_torture_init(void)
static struct rcu_torture_ops *torture_ops[] =
{ &rcu_ops, &rcu_sync_ops, &rcu_expedited_ops,
&rcu_bh_ops, &rcu_bh_sync_ops, &rcu_bh_expedited_ops,
- &srcu_ops, &srcu_expedited_ops,
+ &srcu_ops, &srcu_raw_ops, &srcu_expedited_ops,
&sched_ops, &sched_sync_ops, &sched_expedited_ops, };
mutex_lock(&fullstop_mutex);
@@ -1607,6 +1806,18 @@ rcu_torture_init(void)
}
}
}
+ if (shutdown_secs > 0) {
+ shutdown_time = jiffies + shutdown_secs * HZ;
+ shutdown_task = kthread_run(rcu_torture_shutdown, NULL,
+ "rcu_torture_shutdown");
+ if (IS_ERR(shutdown_task)) {
+ firsterr = PTR_ERR(shutdown_task);
+ VERBOSE_PRINTK_ERRSTRING("Failed to create shutdown");
+ shutdown_task = NULL;
+ goto unwind;
+ }
+ }
+ rcu_torture_onoff_init();
register_reboot_notifier(&rcutorture_shutdown_nb);
rcutorture_record_test_transition();
mutex_unlock(&fullstop_mutex);
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 6b76d812740c..6c4a6722abfd 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -69,7 +69,7 @@ static struct lock_class_key rcu_node_class[NUM_RCU_LVLS];
NUM_RCU_LVL_3, \
NUM_RCU_LVL_4, /* == MAX_RCU_LVLS */ \
}, \
- .signaled = RCU_GP_IDLE, \
+ .fqs_state = RCU_GP_IDLE, \
.gpnum = -300, \
.completed = -300, \
.onofflock = __RAW_SPIN_LOCK_UNLOCKED(&structname##_state.onofflock), \
@@ -195,12 +195,10 @@ void rcu_note_context_switch(int cpu)
}
EXPORT_SYMBOL_GPL(rcu_note_context_switch);
-#ifdef CONFIG_NO_HZ
DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
- .dynticks_nesting = 1,
+ .dynticks_nesting = DYNTICK_TASK_NESTING,
.dynticks = ATOMIC_INIT(1),
};
-#endif /* #ifdef CONFIG_NO_HZ */
static int blimit = 10; /* Maximum callbacks per rcu_do_batch. */
static int qhimark = 10000; /* If this many pending, ignore blimit. */
@@ -328,11 +326,11 @@ static int rcu_implicit_offline_qs(struct rcu_data *rdp)
return 1;
}
- /* If preemptible RCU, no point in sending reschedule IPI. */
- if (rdp->preemptible)
- return 0;
-
- /* The CPU is online, so send it a reschedule IPI. */
+ /*
+ * The CPU is online, so send it a reschedule IPI. This forces
+ * it through the scheduler, and (inefficiently) also handles cases
+ * where idle loops fail to inform RCU about the CPU being idle.
+ */
if (rdp->cpu != smp_processor_id())
smp_send_reschedule(rdp->cpu);
else
@@ -343,59 +341,181 @@ static int rcu_implicit_offline_qs(struct rcu_data *rdp)
#endif /* #ifdef CONFIG_SMP */
-#ifdef CONFIG_NO_HZ
+/*
+ * rcu_idle_enter_common - inform RCU that current CPU is moving towards idle
+ *
+ * If the new value of the ->dynticks_nesting counter now is zero,
+ * we really have entered idle, and must do the appropriate accounting.
+ * The caller must have disabled interrupts.
+ */
+static void rcu_idle_enter_common(struct rcu_dynticks *rdtp, long long oldval)
+{
+ trace_rcu_dyntick("Start", oldval, 0);
+ if (!is_idle_task(current)) {
+ struct task_struct *idle = idle_task(smp_processor_id());
+
+ trace_rcu_dyntick("Error on entry: not idle task", oldval, 0);
+ ftrace_dump(DUMP_ALL);
+ WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
+ current->pid, current->comm,
+ idle->pid, idle->comm); /* must be idle task! */
+ }
+ rcu_prepare_for_idle(smp_processor_id());
+ /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
+ smp_mb__before_atomic_inc(); /* See above. */
+ atomic_inc(&rdtp->dynticks);
+ smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
+ WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
+}
/**
- * rcu_enter_nohz - inform RCU that current CPU is entering nohz
+ * rcu_idle_enter - inform RCU that current CPU is entering idle
*
- * Enter nohz mode, in other words, -leave- the mode in which RCU
+ * Enter idle mode, in other words, -leave- the mode in which RCU
* read-side critical sections can occur. (Though RCU read-side
- * critical sections can occur in irq handlers in nohz mode, a possibility
- * handled by rcu_irq_enter() and rcu_irq_exit()).
+ * critical sections can occur in irq handlers in idle, a possibility
+ * handled by irq_enter() and irq_exit().)
+ *
+ * We crowbar the ->dynticks_nesting field to zero to allow for
+ * the possibility of usermode upcalls having messed up our count
+ * of interrupt nesting level during the prior busy period.
*/
-void rcu_enter_nohz(void)
+void rcu_idle_enter(void)
{
unsigned long flags;
+ long long oldval;
struct rcu_dynticks *rdtp;
local_irq_save(flags);
rdtp = &__get_cpu_var(rcu_dynticks);
- if (--rdtp->dynticks_nesting) {
- local_irq_restore(flags);
- return;
- }
- trace_rcu_dyntick("Start");
- /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
- smp_mb__before_atomic_inc(); /* See above. */
- atomic_inc(&rdtp->dynticks);
- smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
+ oldval = rdtp->dynticks_nesting;
+ rdtp->dynticks_nesting = 0;
+ rcu_idle_enter_common(rdtp, oldval);
local_irq_restore(flags);
}
-/*
- * rcu_exit_nohz - inform RCU that current CPU is leaving nohz
+/**
+ * rcu_irq_exit - inform RCU that current CPU is exiting irq towards idle
+ *
+ * Exit from an interrupt handler, which might possibly result in entering
+ * idle mode, in other words, leaving the mode in which read-side critical
+ * sections can occur.
*
- * Exit nohz mode, in other words, -enter- the mode in which RCU
- * read-side critical sections normally occur.
+ * This code assumes that the idle loop never does anything that might
+ * result in unbalanced calls to irq_enter() and irq_exit(). If your
+ * architecture violates this assumption, RCU will give you what you
+ * deserve, good and hard. But very infrequently and irreproducibly.
+ *
+ * Use things like work queues to work around this limitation.
+ *
+ * You have been warned.
*/
-void rcu_exit_nohz(void)
+void rcu_irq_exit(void)
{
unsigned long flags;
+ long long oldval;
struct rcu_dynticks *rdtp;
local_irq_save(flags);
rdtp = &__get_cpu_var(rcu_dynticks);
- if (rdtp->dynticks_nesting++) {
- local_irq_restore(flags);
- return;
- }
+ oldval = rdtp->dynticks_nesting;
+ rdtp->dynticks_nesting--;
+ WARN_ON_ONCE(rdtp->dynticks_nesting < 0);
+ if (rdtp->dynticks_nesting)
+ trace_rcu_dyntick("--=", oldval, rdtp->dynticks_nesting);
+ else
+ rcu_idle_enter_common(rdtp, oldval);
+ local_irq_restore(flags);
+}
+
+/*
+ * rcu_idle_exit_common - inform RCU that current CPU is moving away from idle
+ *
+ * If the new value of the ->dynticks_nesting counter was previously zero,
+ * we really have exited idle, and must do the appropriate accounting.
+ * The caller must have disabled interrupts.
+ */
+static void rcu_idle_exit_common(struct rcu_dynticks *rdtp, long long oldval)
+{
smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
atomic_inc(&rdtp->dynticks);
/* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
smp_mb__after_atomic_inc(); /* See above. */
WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
- trace_rcu_dyntick("End");
+ rcu_cleanup_after_idle(smp_processor_id());
+ trace_rcu_dyntick("End", oldval, rdtp->dynticks_nesting);
+ if (!is_idle_task(current)) {
+ struct task_struct *idle = idle_task(smp_processor_id());
+
+ trace_rcu_dyntick("Error on exit: not idle task",
+ oldval, rdtp->dynticks_nesting);
+ ftrace_dump(DUMP_ALL);
+ WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
+ current->pid, current->comm,
+ idle->pid, idle->comm); /* must be idle task! */
+ }
+}
+
+/**
+ * rcu_idle_exit - inform RCU that current CPU is leaving idle
+ *
+ * Exit idle mode, in other words, -enter- the mode in which RCU
+ * read-side critical sections can occur.
+ *
+ * We crowbar the ->dynticks_nesting field to DYNTICK_TASK_NESTING to
+ * allow for the possibility of usermode upcalls messing up our count
+ * of interrupt nesting level during the busy period that is just
+ * now starting.
+ */
+void rcu_idle_exit(void)
+{
+ unsigned long flags;
+ struct rcu_dynticks *rdtp;
+ long long oldval;
+
+ local_irq_save(flags);
+ rdtp = &__get_cpu_var(rcu_dynticks);
+ oldval = rdtp->dynticks_nesting;
+ WARN_ON_ONCE(oldval != 0);
+ rdtp->dynticks_nesting = DYNTICK_TASK_NESTING;
+ rcu_idle_exit_common(rdtp, oldval);
+ local_irq_restore(flags);
+}
+
+/**
+ * rcu_irq_enter - inform RCU that current CPU is entering irq away from idle
+ *
+ * Enter an interrupt handler, which might possibly result in exiting
+ * idle mode, in other words, entering the mode in which read-side critical
+ * sections can occur.
+ *
+ * Note that the Linux kernel is fully capable of entering an interrupt
+ * handler that it never exits, for example when doing upcalls to
+ * user mode! This code assumes that the idle loop never does upcalls to
+ * user mode. If your architecture does do upcalls from the idle loop (or
+ * does anything else that results in unbalanced calls to the irq_enter()
+ * and irq_exit() functions), RCU will give you what you deserve, good
+ * and hard. But very infrequently and irreproducibly.
+ *
+ * Use things like work queues to work around this limitation.
+ *
+ * You have been warned.
+ */
+void rcu_irq_enter(void)
+{
+ unsigned long flags;
+ struct rcu_dynticks *rdtp;
+ long long oldval;
+
+ local_irq_save(flags);
+ rdtp = &__get_cpu_var(rcu_dynticks);
+ oldval = rdtp->dynticks_nesting;
+ rdtp->dynticks_nesting++;
+ WARN_ON_ONCE(rdtp->dynticks_nesting == 0);
+ if (oldval)
+ trace_rcu_dyntick("++=", oldval, rdtp->dynticks_nesting);
+ else
+ rcu_idle_exit_common(rdtp, oldval);
local_irq_restore(flags);
}
@@ -442,27 +562,37 @@ void rcu_nmi_exit(void)
WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
}
+#ifdef CONFIG_PROVE_RCU
+
/**
- * rcu_irq_enter - inform RCU of entry to hard irq context
+ * rcu_is_cpu_idle - see if RCU thinks that the current CPU is idle
*
- * If the CPU was idle with dynamic ticks active, this updates the
- * rdtp->dynticks to let the RCU handling know that the CPU is active.
+ * If the current CPU is in its idle loop and is neither in an interrupt
+ * or NMI handler, return true.
*/
-void rcu_irq_enter(void)
+int rcu_is_cpu_idle(void)
{
- rcu_exit_nohz();
+ int ret;
+
+ preempt_disable();
+ ret = (atomic_read(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
+ preempt_enable();
+ return ret;
}
+EXPORT_SYMBOL(rcu_is_cpu_idle);
+
+#endif /* #ifdef CONFIG_PROVE_RCU */
/**
- * rcu_irq_exit - inform RCU of exit from hard irq context
+ * rcu_is_cpu_rrupt_from_idle - see if idle or immediately interrupted from idle
*
- * If the CPU was idle with dynamic ticks active, update the rdp->dynticks
- * to put let the RCU handling be aware that the CPU is going back to idle
- * with no ticks.
+ * If the current CPU is idle or running at a first-level (not nested)
+ * interrupt from idle, return true. The caller must have at least
+ * disabled preemption.
*/
-void rcu_irq_exit(void)
+int rcu_is_cpu_rrupt_from_idle(void)
{
- rcu_enter_nohz();
+ return __get_cpu_var(rcu_dynticks).dynticks_nesting <= 1;
}
#ifdef CONFIG_SMP
@@ -475,7 +605,7 @@ void rcu_irq_exit(void)
static int dyntick_save_progress_counter(struct rcu_data *rdp)
{
rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
- return 0;
+ return (rdp->dynticks_snap & 0x1) == 0;
}
/*
@@ -512,26 +642,6 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
#endif /* #ifdef CONFIG_SMP */
-#else /* #ifdef CONFIG_NO_HZ */
-
-#ifdef CONFIG_SMP
-
-static int dyntick_save_progress_counter(struct rcu_data *rdp)
-{
- return 0;
-}
-
-static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
-{
- return rcu_implicit_offline_qs(rdp);
-}
-
-#endif /* #ifdef CONFIG_SMP */
-
-#endif /* #else #ifdef CONFIG_NO_HZ */
-
-int rcu_cpu_stall_suppress __read_mostly;
-
static void record_gp_stall_check_time(struct rcu_state *rsp)
{
rsp->gp_start = jiffies;
@@ -866,8 +976,8 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
/* Advance to a new grace period and initialize state. */
rsp->gpnum++;
trace_rcu_grace_period(rsp->name, rsp->gpnum, "start");
- WARN_ON_ONCE(rsp->signaled == RCU_GP_INIT);
- rsp->signaled = RCU_GP_INIT; /* Hold off force_quiescent_state. */
+ WARN_ON_ONCE(rsp->fqs_state == RCU_GP_INIT);
+ rsp->fqs_state = RCU_GP_INIT; /* Hold off force_quiescent_state. */
rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS;
record_gp_stall_check_time(rsp);
@@ -877,7 +987,7 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
rnp->qsmask = rnp->qsmaskinit;
rnp->gpnum = rsp->gpnum;
rnp->completed = rsp->completed;
- rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state OK. */
+ rsp->fqs_state = RCU_SIGNAL_INIT; /* force_quiescent_state OK */
rcu_start_gp_per_cpu(rsp, rnp, rdp);
rcu_preempt_boost_start_gp(rnp);
trace_rcu_grace_period_init(rsp->name, rnp->gpnum,
@@ -927,7 +1037,7 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
rnp = rcu_get_root(rsp);
raw_spin_lock(&rnp->lock); /* irqs already disabled. */
- rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state now OK. */
+ rsp->fqs_state = RCU_SIGNAL_INIT; /* force_quiescent_state now OK. */
raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
raw_spin_unlock_irqrestore(&rsp->onofflock, flags);
}
@@ -991,7 +1101,7 @@ static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags)
rsp->completed = rsp->gpnum; /* Declare the grace period complete. */
trace_rcu_grace_period(rsp->name, rsp->completed, "end");
- rsp->signaled = RCU_GP_IDLE;
+ rsp->fqs_state = RCU_GP_IDLE;
rcu_start_gp(rsp, flags); /* releases root node's rnp->lock. */
}
@@ -1221,7 +1331,7 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp)
else
raw_spin_unlock_irqrestore(&rnp->lock, flags);
if (need_report & RCU_OFL_TASKS_EXP_GP)
- rcu_report_exp_rnp(rsp, rnp);
+ rcu_report_exp_rnp(rsp, rnp, true);
rcu_node_kthread_setaffinity(rnp, -1);
}
@@ -1263,7 +1373,9 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
/* If no callbacks are ready, just return.*/
if (!cpu_has_callbacks_ready_to_invoke(rdp)) {
trace_rcu_batch_start(rsp->name, 0, 0);
- trace_rcu_batch_end(rsp->name, 0);
+ trace_rcu_batch_end(rsp->name, 0, !!ACCESS_ONCE(rdp->nxtlist),
+ need_resched(), is_idle_task(current),
+ rcu_is_callbacks_kthread());
return;
}
@@ -1291,12 +1403,17 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
debug_rcu_head_unqueue(list);
__rcu_reclaim(rsp->name, list);
list = next;
- if (++count >= bl)
+ /* Stop only if limit reached and CPU has something to do. */
+ if (++count >= bl &&
+ (need_resched() ||
+ (!is_idle_task(current) && !rcu_is_callbacks_kthread())))
break;
}
local_irq_save(flags);
- trace_rcu_batch_end(rsp->name, count);
+ trace_rcu_batch_end(rsp->name, count, !!list, need_resched(),
+ is_idle_task(current),
+ rcu_is_callbacks_kthread());
/* Update count, and requeue any remaining callbacks. */
rdp->qlen -= count;
@@ -1334,16 +1451,14 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
* (user mode or idle loop for rcu, non-softirq execution for rcu_bh).
* Also schedule RCU core processing.
*
- * This function must be called with hardirqs disabled. It is normally
+ * This function must be called from hardirq context. It is normally
* invoked from the scheduling-clock interrupt. If rcu_pending returns
* false, there is no point in invoking rcu_check_callbacks().
*/
void rcu_check_callbacks(int cpu, int user)
{
trace_rcu_utilization("Start scheduler-tick");
- if (user ||
- (idle_cpu(cpu) && rcu_scheduler_active &&
- !in_softirq() && hardirq_count() <= (1 << HARDIRQ_SHIFT))) {
+ if (user || rcu_is_cpu_rrupt_from_idle()) {
/*
* Get here if this CPU took its interrupt from user
@@ -1457,7 +1572,7 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
goto unlock_fqs_ret; /* no GP in progress, time updated. */
}
rsp->fqs_active = 1;
- switch (rsp->signaled) {
+ switch (rsp->fqs_state) {
case RCU_GP_IDLE:
case RCU_GP_INIT:
@@ -1473,7 +1588,7 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
force_qs_rnp(rsp, dyntick_save_progress_counter);
raw_spin_lock(&rnp->lock); /* irqs already disabled */
if (rcu_gp_in_progress(rsp))
- rsp->signaled = RCU_FORCE_QS;
+ rsp->fqs_state = RCU_FORCE_QS;
break;
case RCU_FORCE_QS:
@@ -1812,7 +1927,7 @@ static int rcu_pending(int cpu)
* by the current CPU, even if none need be done immediately, returning
* 1 if so.
*/
-static int rcu_needs_cpu_quick_check(int cpu)
+static int rcu_cpu_has_callbacks(int cpu)
{
/* RCU callbacks either ready or pending? */
return per_cpu(rcu_sched_data, cpu).nxtlist ||
@@ -1913,9 +2028,9 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
for (i = 0; i < RCU_NEXT_SIZE; i++)
rdp->nxttail[i] = &rdp->nxtlist;
rdp->qlen = 0;
-#ifdef CONFIG_NO_HZ
rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
-#endif /* #ifdef CONFIG_NO_HZ */
+ WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_NESTING);
+ WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
rdp->cpu = cpu;
rdp->rsp = rsp;
raw_spin_unlock_irqrestore(&rnp->lock, flags);
@@ -1942,6 +2057,10 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)
rdp->qlen_last_fqs_check = 0;
rdp->n_force_qs_snap = rsp->n_force_qs;
rdp->blimit = blimit;
+ rdp->dynticks->dynticks_nesting = DYNTICK_TASK_NESTING;
+ atomic_set(&rdp->dynticks->dynticks,
+ (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
+ rcu_prepare_for_idle_init(cpu);
raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
/*
@@ -2023,6 +2142,7 @@ static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
rcu_send_cbs_to_online(&rcu_bh_state);
rcu_send_cbs_to_online(&rcu_sched_state);
rcu_preempt_send_cbs_to_online();
+ rcu_cleanup_after_idle(cpu);
break;
case CPU_DEAD:
case CPU_DEAD_FROZEN:
diff --git a/kernel/rcutree.h b/kernel/rcutree.h
index 849ce9ec51fe..fddff92d6676 100644
--- a/kernel/rcutree.h
+++ b/kernel/rcutree.h
@@ -84,9 +84,10 @@
* Dynticks per-CPU state.
*/
struct rcu_dynticks {
- int dynticks_nesting; /* Track irq/process nesting level. */
- int dynticks_nmi_nesting; /* Track NMI nesting level. */
- atomic_t dynticks; /* Even value for dynticks-idle, else odd. */
+ long long dynticks_nesting; /* Track irq/process nesting level. */
+ /* Process level is worth LLONG_MAX/2. */
+ int dynticks_nmi_nesting; /* Track NMI nesting level. */
+ atomic_t dynticks; /* Even value for idle, else odd. */
};
/* RCU's kthread states for tracing. */
@@ -274,16 +275,12 @@ struct rcu_data {
/* did other CPU force QS recently? */
long blimit; /* Upper limit on a processed batch */
-#ifdef CONFIG_NO_HZ
/* 3) dynticks interface. */
struct rcu_dynticks *dynticks; /* Shared per-CPU dynticks state. */
int dynticks_snap; /* Per-GP tracking for dynticks. */
-#endif /* #ifdef CONFIG_NO_HZ */
/* 4) reasons this CPU needed to be kicked by force_quiescent_state */
-#ifdef CONFIG_NO_HZ
unsigned long dynticks_fqs; /* Kicked due to dynticks idle. */
-#endif /* #ifdef CONFIG_NO_HZ */
unsigned long offline_fqs; /* Kicked due to being offline. */
unsigned long resched_ipi; /* Sent a resched IPI. */
@@ -302,16 +299,12 @@ struct rcu_data {
struct rcu_state *rsp;
};
-/* Values for signaled field in struct rcu_state. */
+/* Values for fqs_state field in struct rcu_state. */
#define RCU_GP_IDLE 0 /* No grace period in progress. */
#define RCU_GP_INIT 1 /* Grace period being initialized. */
#define RCU_SAVE_DYNTICK 2 /* Need to scan dyntick state. */
#define RCU_FORCE_QS 3 /* Need to force quiescent state. */
-#ifdef CONFIG_NO_HZ
#define RCU_SIGNAL_INIT RCU_SAVE_DYNTICK
-#else /* #ifdef CONFIG_NO_HZ */
-#define RCU_SIGNAL_INIT RCU_FORCE_QS
-#endif /* #else #ifdef CONFIG_NO_HZ */
#define RCU_JIFFIES_TILL_FORCE_QS 3 /* for rsp->jiffies_force_qs */
@@ -361,7 +354,7 @@ struct rcu_state {
/* The following fields are guarded by the root rcu_node's lock. */
- u8 signaled ____cacheline_internodealigned_in_smp;
+ u8 fqs_state ____cacheline_internodealigned_in_smp;
/* Force QS state. */
u8 fqs_active; /* force_quiescent_state() */
/* is running. */
@@ -451,7 +444,8 @@ static void rcu_preempt_check_callbacks(int cpu);
static void rcu_preempt_process_callbacks(void);
void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu));
#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_TREE_PREEMPT_RCU)
-static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp);
+static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
+ bool wake);
#endif /* #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_TREE_PREEMPT_RCU) */
static int rcu_preempt_pending(int cpu);
static int rcu_preempt_needs_cpu(int cpu);
@@ -461,6 +455,7 @@ static void __init __rcu_init_preempt(void);
static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
static void invoke_rcu_callbacks_kthread(void);
+static bool rcu_is_callbacks_kthread(void);
#ifdef CONFIG_RCU_BOOST
static void rcu_preempt_do_callbacks(void);
static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp,
@@ -473,5 +468,8 @@ static void rcu_yield(void (*f)(unsigned long), unsigned long arg);
#endif /* #ifdef CONFIG_RCU_BOOST */
static void rcu_cpu_kthread_setrt(int cpu, int to_rt);
static void __cpuinit rcu_prepare_kthreads(int cpu);
+static void rcu_prepare_for_idle_init(int cpu);
+static void rcu_cleanup_after_idle(int cpu);
+static void rcu_prepare_for_idle(int cpu);
#endif /* #ifndef RCU_TREE_NONCORE */
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index 4b9b9f8a4184..8bb35d73e1f9 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -312,6 +312,7 @@ static noinline void rcu_read_unlock_special(struct task_struct *t)
{
int empty;
int empty_exp;
+ int empty_exp_now;
unsigned long flags;
struct list_head *np;
#ifdef CONFIG_RCU_BOOST
@@ -382,8 +383,10 @@ static noinline void rcu_read_unlock_special(struct task_struct *t)
/*
* If this was the last task on the current list, and if
* we aren't waiting on any CPUs, report the quiescent state.
- * Note that rcu_report_unblock_qs_rnp() releases rnp->lock.
+ * Note that rcu_report_unblock_qs_rnp() releases rnp->lock,
+ * so we must take a snapshot of the expedited state.
*/
+ empty_exp_now = !rcu_preempted_readers_exp(rnp);
if (!empty && !rcu_preempt_blocked_readers_cgp(rnp)) {
trace_rcu_quiescent_state_report("preempt_rcu",
rnp->gpnum,
@@ -406,8 +409,8 @@ static noinline void rcu_read_unlock_special(struct task_struct *t)
* If this was the last task on the expedited lists,
* then we need to report up the rcu_node hierarchy.
*/
- if (!empty_exp && !rcu_preempted_readers_exp(rnp))
- rcu_report_exp_rnp(&rcu_preempt_state, rnp);
+ if (!empty_exp && empty_exp_now)
+ rcu_report_exp_rnp(&rcu_preempt_state, rnp, true);
} else {
local_irq_restore(flags);
}
@@ -729,9 +732,13 @@ static int sync_rcu_preempt_exp_done(struct rcu_node *rnp)
* recursively up the tree. (Calm down, calm down, we do the recursion
* iteratively!)
*
+ * Most callers will set the "wake" flag, but the task initiating the
+ * expedited grace period need not wake itself.
+ *
* Caller must hold sync_rcu_preempt_exp_mutex.
*/
-static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp)
+static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
+ bool wake)
{
unsigned long flags;
unsigned long mask;
@@ -744,7 +751,8 @@ static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp)
}
if (rnp->parent == NULL) {
raw_spin_unlock_irqrestore(&rnp->lock, flags);
- wake_up(&sync_rcu_preempt_exp_wq);
+ if (wake)
+ wake_up(&sync_rcu_preempt_exp_wq);
break;
}
mask = rnp->grpmask;
@@ -777,7 +785,7 @@ sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp)
must_wait = 1;
}
if (!must_wait)
- rcu_report_exp_rnp(rsp, rnp);
+ rcu_report_exp_rnp(rsp, rnp, false); /* Don't wake self. */
}
/*
@@ -1069,9 +1077,9 @@ EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
* report on tasks preempted in RCU read-side critical sections during
* expedited RCU grace periods.
*/
-static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp)
+static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
+ bool wake)
{
- return;
}
#endif /* #ifdef CONFIG_HOTPLUG_CPU */
@@ -1157,8 +1165,6 @@ static void rcu_initiate_boost_trace(struct rcu_node *rnp)
#endif /* #else #ifdef CONFIG_RCU_TRACE */
-static struct lock_class_key rcu_boost_class;
-
/*
* Carry out RCU priority boosting on the task indicated by ->exp_tasks
* or ->boost_tasks, advancing the pointer to the next task in the
@@ -1221,15 +1227,13 @@ static int rcu_boost(struct rcu_node *rnp)
*/
t = container_of(tb, struct task_struct, rcu_node_entry);
rt_mutex_init_proxy_locked(&mtx, t);
- /* Avoid lockdep false positives. This rt_mutex is its own thing. */
- lockdep_set_class_and_name(&mtx.wait_lock, &rcu_boost_class,
- "rcu_boost_mutex");
t->rcu_boost_mutex = &mtx;
raw_spin_unlock_irqrestore(&rnp->lock, flags);
rt_mutex_lock(&mtx); /* Side effect: boosts task t's priority. */
rt_mutex_unlock(&mtx); /* Keep lockdep happy. */
- return rnp->exp_tasks != NULL || rnp->boost_tasks != NULL;
+ return ACCESS_ONCE(rnp->exp_tasks) != NULL ||
+ ACCESS_ONCE(rnp->boost_tasks) != NULL;
}
/*
@@ -1329,6 +1333,15 @@ static void invoke_rcu_callbacks_kthread(void)
}
/*
+ * Is the current CPU running the RCU-callbacks kthread?
+ * Caller must have preemption disabled.
+ */
+static bool rcu_is_callbacks_kthread(void)
+{
+ return __get_cpu_var(rcu_cpu_kthread_task) == current;
+}
+
+/*
* Set the affinity of the boost kthread. The CPU-hotplug locks are
* held, so no one should be messing with the existence of the boost
* kthread.
@@ -1772,6 +1785,11 @@ static void invoke_rcu_callbacks_kthread(void)
WARN_ON_ONCE(1);
}
+static bool rcu_is_callbacks_kthread(void)
+{
+ return false;
+}
+
static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
{
}
@@ -1907,7 +1925,7 @@ void synchronize_sched_expedited(void)
* grace period works for us.
*/
get_online_cpus();
- snap = atomic_read(&sync_sched_expedited_started) - 1;
+ snap = atomic_read(&sync_sched_expedited_started);
smp_mb(); /* ensure read is before try_stop_cpus(). */
}
@@ -1939,88 +1957,243 @@ EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
* 1 if so. This function is part of the RCU implementation; it is -not-
* an exported member of the RCU API.
*
- * Because we have preemptible RCU, just check whether this CPU needs
- * any flavor of RCU. Do not chew up lots of CPU cycles with preemption
- * disabled in a most-likely vain attempt to cause RCU not to need this CPU.
+ * Because we not have RCU_FAST_NO_HZ, just check whether this CPU needs
+ * any flavor of RCU.
*/
int rcu_needs_cpu(int cpu)
{
- return rcu_needs_cpu_quick_check(cpu);
+ return rcu_cpu_has_callbacks(cpu);
+}
+
+/*
+ * Because we do not have RCU_FAST_NO_HZ, don't bother initializing for it.
+ */
+static void rcu_prepare_for_idle_init(int cpu)
+{
+}
+
+/*
+ * Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up
+ * after it.
+ */
+static void rcu_cleanup_after_idle(int cpu)
+{
+}
+
+/*
+ * Do the idle-entry grace-period work, which, because CONFIG_RCU_FAST_NO_HZ=y,
+ * is nothing.
+ */
+static void rcu_prepare_for_idle(int cpu)
+{
}
#else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */
-#define RCU_NEEDS_CPU_FLUSHES 5
+/*
+ * This code is invoked when a CPU goes idle, at which point we want
+ * to have the CPU do everything required for RCU so that it can enter
+ * the energy-efficient dyntick-idle mode. This is handled by a
+ * state machine implemented by rcu_prepare_for_idle() below.
+ *
+ * The following three proprocessor symbols control this state machine:
+ *
+ * RCU_IDLE_FLUSHES gives the maximum number of times that we will attempt
+ * to satisfy RCU. Beyond this point, it is better to incur a periodic
+ * scheduling-clock interrupt than to loop through the state machine
+ * at full power.
+ * RCU_IDLE_OPT_FLUSHES gives the number of RCU_IDLE_FLUSHES that are
+ * optional if RCU does not need anything immediately from this
+ * CPU, even if this CPU still has RCU callbacks queued. The first
+ * times through the state machine are mandatory: we need to give
+ * the state machine a chance to communicate a quiescent state
+ * to the RCU core.
+ * RCU_IDLE_GP_DELAY gives the number of jiffies that a CPU is permitted
+ * to sleep in dyntick-idle mode with RCU callbacks pending. This
+ * is sized to be roughly one RCU grace period. Those energy-efficiency
+ * benchmarkers who might otherwise be tempted to set this to a large
+ * number, be warned: Setting RCU_IDLE_GP_DELAY too high can hang your
+ * system. And if you are -that- concerned about energy efficiency,
+ * just power the system down and be done with it!
+ *
+ * The values below work well in practice. If future workloads require
+ * adjustment, they can be converted into kernel config parameters, though
+ * making the state machine smarter might be a better option.
+ */
+#define RCU_IDLE_FLUSHES 5 /* Number of dyntick-idle tries. */
+#define RCU_IDLE_OPT_FLUSHES 3 /* Optional dyntick-idle tries. */
+#define RCU_IDLE_GP_DELAY 6 /* Roughly one grace period. */
+
static DEFINE_PER_CPU(int, rcu_dyntick_drain);
static DEFINE_PER_CPU(unsigned long, rcu_dyntick_holdoff);
+static DEFINE_PER_CPU(struct hrtimer, rcu_idle_gp_timer);
+static ktime_t rcu_idle_gp_wait;
/*
- * Check to see if any future RCU-related work will need to be done
- * by the current CPU, even if none need be done immediately, returning
- * 1 if so. This function is part of the RCU implementation; it is -not-
- * an exported member of the RCU API.
+ * Allow the CPU to enter dyntick-idle mode if either: (1) There are no
+ * callbacks on this CPU, (2) this CPU has not yet attempted to enter
+ * dyntick-idle mode, or (3) this CPU is in the process of attempting to
+ * enter dyntick-idle mode. Otherwise, if we have recently tried and failed
+ * to enter dyntick-idle mode, we refuse to try to enter it. After all,
+ * it is better to incur scheduling-clock interrupts than to spin
+ * continuously for the same time duration!
+ */
+int rcu_needs_cpu(int cpu)
+{
+ /* If no callbacks, RCU doesn't need the CPU. */
+ if (!rcu_cpu_has_callbacks(cpu))
+ return 0;
+ /* Otherwise, RCU needs the CPU only if it recently tried and failed. */
+ return per_cpu(rcu_dyntick_holdoff, cpu) == jiffies;
+}
+
+/*
+ * Timer handler used to force CPU to start pushing its remaining RCU
+ * callbacks in the case where it entered dyntick-idle mode with callbacks
+ * pending. The hander doesn't really need to do anything because the
+ * real work is done upon re-entry to idle, or by the next scheduling-clock
+ * interrupt should idle not be re-entered.
+ */
+static enum hrtimer_restart rcu_idle_gp_timer_func(struct hrtimer *hrtp)
+{
+ trace_rcu_prep_idle("Timer");
+ return HRTIMER_NORESTART;
+}
+
+/*
+ * Initialize the timer used to pull CPUs out of dyntick-idle mode.
+ */
+static void rcu_prepare_for_idle_init(int cpu)
+{
+ static int firsttime = 1;
+ struct hrtimer *hrtp = &per_cpu(rcu_idle_gp_timer, cpu);
+
+ hrtimer_init(hrtp, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ hrtp->function = rcu_idle_gp_timer_func;
+ if (firsttime) {
+ unsigned int upj = jiffies_to_usecs(RCU_IDLE_GP_DELAY);
+
+ rcu_idle_gp_wait = ns_to_ktime(upj * (u64)1000);
+ firsttime = 0;
+ }
+}
+
+/*
+ * Clean up for exit from idle. Because we are exiting from idle, there
+ * is no longer any point to rcu_idle_gp_timer, so cancel it. This will
+ * do nothing if this timer is not active, so just cancel it unconditionally.
+ */
+static void rcu_cleanup_after_idle(int cpu)
+{
+ hrtimer_cancel(&per_cpu(rcu_idle_gp_timer, cpu));
+}
+
+/*
+ * Check to see if any RCU-related work can be done by the current CPU,
+ * and if so, schedule a softirq to get it done. This function is part
+ * of the RCU implementation; it is -not- an exported member of the RCU API.
*
- * Because we are not supporting preemptible RCU, attempt to accelerate
- * any current grace periods so that RCU no longer needs this CPU, but
- * only if all other CPUs are already in dynticks-idle mode. This will
- * allow the CPU cores to be powered down immediately, as opposed to after
- * waiting many milliseconds for grace periods to elapse.
+ * The idea is for the current CPU to clear out all work required by the
+ * RCU core for the current grace period, so that this CPU can be permitted
+ * to enter dyntick-idle mode. In some cases, it will need to be awakened
+ * at the end of the grace period by whatever CPU ends the grace period.
+ * This allows CPUs to go dyntick-idle more quickly, and to reduce the
+ * number of wakeups by a modest integer factor.
*
* Because it is not legal to invoke rcu_process_callbacks() with irqs
* disabled, we do one pass of force_quiescent_state(), then do a
* invoke_rcu_core() to cause rcu_process_callbacks() to be invoked
* later. The per-cpu rcu_dyntick_drain variable controls the sequencing.
+ *
+ * The caller must have disabled interrupts.
*/
-int rcu_needs_cpu(int cpu)
+static void rcu_prepare_for_idle(int cpu)
{
- int c = 0;
- int snap;
- int thatcpu;
-
- /* Check for being in the holdoff period. */
- if (per_cpu(rcu_dyntick_holdoff, cpu) == jiffies)
- return rcu_needs_cpu_quick_check(cpu);
-
- /* Don't bother unless we are the last non-dyntick-idle CPU. */
- for_each_online_cpu(thatcpu) {
- if (thatcpu == cpu)
- continue;
- snap = atomic_add_return(0, &per_cpu(rcu_dynticks,
- thatcpu).dynticks);
- smp_mb(); /* Order sampling of snap with end of grace period. */
- if ((snap & 0x1) != 0) {
- per_cpu(rcu_dyntick_drain, cpu) = 0;
- per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1;
- return rcu_needs_cpu_quick_check(cpu);
- }
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ /*
+ * If there are no callbacks on this CPU, enter dyntick-idle mode.
+ * Also reset state to avoid prejudicing later attempts.
+ */
+ if (!rcu_cpu_has_callbacks(cpu)) {
+ per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1;
+ per_cpu(rcu_dyntick_drain, cpu) = 0;
+ local_irq_restore(flags);
+ trace_rcu_prep_idle("No callbacks");
+ return;
+ }
+
+ /*
+ * If in holdoff mode, just return. We will presumably have
+ * refrained from disabling the scheduling-clock tick.
+ */
+ if (per_cpu(rcu_dyntick_holdoff, cpu) == jiffies) {
+ local_irq_restore(flags);
+ trace_rcu_prep_idle("In holdoff");
+ return;
}
/* Check and update the rcu_dyntick_drain sequencing. */
if (per_cpu(rcu_dyntick_drain, cpu) <= 0) {
/* First time through, initialize the counter. */
- per_cpu(rcu_dyntick_drain, cpu) = RCU_NEEDS_CPU_FLUSHES;
+ per_cpu(rcu_dyntick_drain, cpu) = RCU_IDLE_FLUSHES;
+ } else if (per_cpu(rcu_dyntick_drain, cpu) <= RCU_IDLE_OPT_FLUSHES &&
+ !rcu_pending(cpu)) {
+ /* Can we go dyntick-idle despite still having callbacks? */
+ trace_rcu_prep_idle("Dyntick with callbacks");
+ per_cpu(rcu_dyntick_drain, cpu) = 0;
+ per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1;
+ hrtimer_start(&per_cpu(rcu_idle_gp_timer, cpu),
+ rcu_idle_gp_wait, HRTIMER_MODE_REL);
+ return; /* Nothing more to do immediately. */
} else if (--per_cpu(rcu_dyntick_drain, cpu) <= 0) {
/* We have hit the limit, so time to give up. */
per_cpu(rcu_dyntick_holdoff, cpu) = jiffies;
- return rcu_needs_cpu_quick_check(cpu);
+ local_irq_restore(flags);
+ trace_rcu_prep_idle("Begin holdoff");
+ invoke_rcu_core(); /* Force the CPU out of dyntick-idle. */
+ return;
}
- /* Do one step pushing remaining RCU callbacks through. */
+ /*
+ * Do one step of pushing the remaining RCU callbacks through
+ * the RCU core state machine.
+ */
+#ifdef CONFIG_TREE_PREEMPT_RCU
+ if (per_cpu(rcu_preempt_data, cpu).nxtlist) {
+ local_irq_restore(flags);
+ rcu_preempt_qs(cpu);
+ force_quiescent_state(&rcu_preempt_state, 0);
+ local_irq_save(flags);
+ }
+#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
if (per_cpu(rcu_sched_data, cpu).nxtlist) {
+ local_irq_restore(flags);
rcu_sched_qs(cpu);
force_quiescent_state(&rcu_sched_state, 0);
- c = c || per_cpu(rcu_sched_data, cpu).nxtlist;
+ local_irq_save(flags);
}
if (per_cpu(rcu_bh_data, cpu).nxtlist) {
+ local_irq_restore(flags);
rcu_bh_qs(cpu);
force_quiescent_state(&rcu_bh_state, 0);
- c = c || per_cpu(rcu_bh_data, cpu).nxtlist;
+ local_irq_save(flags);
}
- /* If RCU callbacks are still pending, RCU still needs this CPU. */
- if (c)
+ /*
+ * If RCU callbacks are still pending, RCU still needs this CPU.
+ * So try forcing the callbacks through the grace period.
+ */
+ if (rcu_cpu_has_callbacks(cpu)) {
+ local_irq_restore(flags);
+ trace_rcu_prep_idle("More callbacks");
invoke_rcu_core();
- return c;
+ } else {
+ local_irq_restore(flags);
+ trace_rcu_prep_idle("Callbacks drained");
+ }
}
#endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */
diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
index 9feffa4c0695..654cfe67f0d1 100644
--- a/kernel/rcutree_trace.c
+++ b/kernel/rcutree_trace.c
@@ -67,13 +67,11 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
rdp->completed, rdp->gpnum,
rdp->passed_quiesce, rdp->passed_quiesce_gpnum,
rdp->qs_pending);
-#ifdef CONFIG_NO_HZ
- seq_printf(m, " dt=%d/%d/%d df=%lu",
+ seq_printf(m, " dt=%d/%llx/%d df=%lu",
atomic_read(&rdp->dynticks->dynticks),
rdp->dynticks->dynticks_nesting,
rdp->dynticks->dynticks_nmi_nesting,
rdp->dynticks_fqs);
-#endif /* #ifdef CONFIG_NO_HZ */
seq_printf(m, " of=%lu ri=%lu", rdp->offline_fqs, rdp->resched_ipi);
seq_printf(m, " ql=%ld qs=%c%c%c%c",
rdp->qlen,
@@ -141,13 +139,11 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp)
rdp->completed, rdp->gpnum,
rdp->passed_quiesce, rdp->passed_quiesce_gpnum,
rdp->qs_pending);
-#ifdef CONFIG_NO_HZ
- seq_printf(m, ",%d,%d,%d,%lu",
+ seq_printf(m, ",%d,%llx,%d,%lu",
atomic_read(&rdp->dynticks->dynticks),
rdp->dynticks->dynticks_nesting,
rdp->dynticks->dynticks_nmi_nesting,
rdp->dynticks_fqs);
-#endif /* #ifdef CONFIG_NO_HZ */
seq_printf(m, ",%lu,%lu", rdp->offline_fqs, rdp->resched_ipi);
seq_printf(m, ",%ld,\"%c%c%c%c\"", rdp->qlen,
".N"[rdp->nxttail[RCU_NEXT_READY_TAIL] !=
@@ -171,9 +167,7 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp)
static int show_rcudata_csv(struct seq_file *m, void *unused)
{
seq_puts(m, "\"CPU\",\"Online?\",\"c\",\"g\",\"pq\",\"pgp\",\"pq\",");
-#ifdef CONFIG_NO_HZ
seq_puts(m, "\"dt\",\"dt nesting\",\"dt NMI nesting\",\"df\",");
-#endif /* #ifdef CONFIG_NO_HZ */
seq_puts(m, "\"of\",\"ri\",\"ql\",\"qs\"");
#ifdef CONFIG_RCU_BOOST
seq_puts(m, "\"kt\",\"ktl\"");
@@ -278,7 +272,7 @@ static void print_one_rcu_state(struct seq_file *m, struct rcu_state *rsp)
gpnum = rsp->gpnum;
seq_printf(m, "c=%lu g=%lu s=%d jfq=%ld j=%x "
"nfqs=%lu/nfqsng=%lu(%lu) fqlh=%lu\n",
- rsp->completed, gpnum, rsp->signaled,
+ rsp->completed, gpnum, rsp->fqs_state,
(long)(rsp->jiffies_force_qs - jiffies),
(int)(jiffies & 0xffff),
rsp->n_force_qs, rsp->n_force_qs_ngp,
diff --git a/kernel/rtmutex-debug.c b/kernel/rtmutex-debug.c
index 8eafd1bd273e..16502d3a71c8 100644
--- a/kernel/rtmutex-debug.c
+++ b/kernel/rtmutex-debug.c
@@ -101,6 +101,7 @@ void debug_rt_mutex_print_deadlock(struct rt_mutex_waiter *waiter)
printk("\n============================================\n");
printk( "[ BUG: circular locking deadlock detected! ]\n");
+ printk("%s\n", print_tainted());
printk( "--------------------------------------------\n");
printk("%s/%d is deadlocking current task %s/%d\n\n",
task->comm, task_pid_nr(task),
diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c
index f9d8482dd487..a242e691c993 100644
--- a/kernel/rtmutex.c
+++ b/kernel/rtmutex.c
@@ -579,7 +579,6 @@ __rt_mutex_slowlock(struct rt_mutex *lock, int state,
struct rt_mutex_waiter *waiter)
{
int ret = 0;
- int was_disabled;
for (;;) {
/* Try to acquire the lock: */
@@ -602,17 +601,10 @@ __rt_mutex_slowlock(struct rt_mutex *lock, int state,
raw_spin_unlock(&lock->wait_lock);
- was_disabled = irqs_disabled();
- if (was_disabled)
- local_irq_enable();
-
debug_rt_mutex_print_deadlock(waiter);
schedule_rt_mutex(lock);
- if (was_disabled)
- local_irq_disable();
-
raw_spin_lock(&lock->wait_lock);
set_current_state(state);
}
diff --git a/kernel/signal.c b/kernel/signal.c
index 739ef2bf105c..56ce3a618b28 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -1992,8 +1992,6 @@ static bool do_signal_stop(int signr)
*/
if (!(sig->flags & SIGNAL_STOP_STOPPED))
sig->group_exit_code = signr;
- else
- WARN_ON_ONCE(!current->ptrace);
sig->group_stop_count = 0;
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 2c71d91efff0..4eb3a0fa351e 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -347,12 +347,12 @@ void irq_exit(void)
if (!in_interrupt() && local_softirq_pending())
invoke_softirq();
- rcu_irq_exit();
#ifdef CONFIG_NO_HZ
/* Make sure that timer wheel updates are propagated */
if (idle_cpu(smp_processor_id()) && !in_interrupt() && !need_resched())
- tick_nohz_stop_sched_tick(0);
+ tick_nohz_irq_exit();
#endif
+ rcu_irq_exit();
preempt_enable_no_resched();
}
diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
index 6318b511afa1..a650694883a1 100644
--- a/kernel/sysctl_binary.c
+++ b/kernel/sysctl_binary.c
@@ -1354,7 +1354,7 @@ static ssize_t binary_sysctl(const int *name, int nlen,
fput(file);
out_putname:
- putname(pathname);
+ __putname(pathname);
out:
return result;
}
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
index c4eb71c8b2ea..1ecd6ba36d6c 100644
--- a/kernel/time/clockevents.c
+++ b/kernel/time/clockevents.c
@@ -387,7 +387,6 @@ void clockevents_exchange_device(struct clock_event_device *old,
* released list and do a notify add later.
*/
if (old) {
- old->event_handler = clockevents_handle_noop;
clockevents_set_mode(old, CLOCK_EVT_MODE_UNUSED);
list_del(&old->list);
list_add(&old->list, &clockevents_released);
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index da2f760e780c..d3ad022136e5 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -647,7 +647,7 @@ static void clocksource_enqueue(struct clocksource *cs)
/**
* __clocksource_updatefreq_scale - Used update clocksource with new freq
- * @t: clocksource to be registered
+ * @cs: clocksource to be registered
* @scale: Scale factor multiplied against freq to get clocksource hz
* @freq: clocksource frequency (cycles per second) divided by scale
*
@@ -699,7 +699,7 @@ EXPORT_SYMBOL_GPL(__clocksource_updatefreq_scale);
/**
* __clocksource_register_scale - Used to install new clocksources
- * @t: clocksource to be registered
+ * @cs: clocksource to be registered
* @scale: Scale factor multiplied against freq to get clocksource hz
* @freq: clocksource frequency (cycles per second) divided by scale
*
@@ -727,7 +727,7 @@ EXPORT_SYMBOL_GPL(__clocksource_register_scale);
/**
* clocksource_register - Used to install new clocksources
- * @t: clocksource to be registered
+ * @cs: clocksource to be registered
*
* Returns -EBUSY if registration fails, zero otherwise.
*/
@@ -761,6 +761,8 @@ static void __clocksource_change_rating(struct clocksource *cs, int rating)
/**
* clocksource_change_rating - Change the rating of a registered clocksource
+ * @cs: clocksource to be changed
+ * @rating: new rating
*/
void clocksource_change_rating(struct clocksource *cs, int rating)
{
@@ -772,6 +774,7 @@ EXPORT_SYMBOL(clocksource_change_rating);
/**
* clocksource_unregister - remove a registered clocksource
+ * @cs: clocksource to be unregistered
*/
void clocksource_unregister(struct clocksource *cs)
{
@@ -787,6 +790,7 @@ EXPORT_SYMBOL(clocksource_unregister);
/**
* sysfs_show_current_clocksources - sysfs interface for current clocksource
* @dev: unused
+ * @attr: unused
* @buf: char buffer to be filled with clocksource list
*
* Provides sysfs interface for listing current clocksource.
@@ -807,6 +811,7 @@ sysfs_show_current_clocksources(struct sys_device *dev,
/**
* sysfs_override_clocksource - interface for manually overriding clocksource
* @dev: unused
+ * @attr: unused
* @buf: name of override clocksource
* @count: length of buffer
*
@@ -842,6 +847,7 @@ static ssize_t sysfs_override_clocksource(struct sys_device *dev,
/**
* sysfs_show_available_clocksources - sysfs interface for listing clocksource
* @dev: unused
+ * @attr: unused
* @buf: char buffer to be filled with clocksource list
*
* Provides sysfs interface for listing registered clocksources
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 31cc06163ed5..7656642e4b8e 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -275,51 +275,17 @@ u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time)
}
EXPORT_SYMBOL_GPL(get_cpu_iowait_time_us);
-/**
- * tick_nohz_stop_sched_tick - stop the idle tick from the idle task
- *
- * When the next event is more than a tick into the future, stop the idle tick
- * Called either from the idle loop or from irq_exit() when an idle period was
- * just interrupted by an interrupt which did not cause a reschedule.
- */
-void tick_nohz_stop_sched_tick(int inidle)
+static void tick_nohz_stop_sched_tick(struct tick_sched *ts)
{
- unsigned long seq, last_jiffies, next_jiffies, delta_jiffies, flags;
- struct tick_sched *ts;
+ unsigned long seq, last_jiffies, next_jiffies, delta_jiffies;
ktime_t last_update, expires, now;
struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev;
u64 time_delta;
int cpu;
- local_irq_save(flags);
-
cpu = smp_processor_id();
ts = &per_cpu(tick_cpu_sched, cpu);
- /*
- * Update the idle state in the scheduler domain hierarchy
- * when tick_nohz_stop_sched_tick() is called from the idle loop.
- * State will be updated to busy during the first busy tick after
- * exiting idle.
- */
- if (inidle)
- set_cpu_sd_state_idle();
-
- /*
- * Call to tick_nohz_start_idle stops the last_update_time from being
- * updated. Thus, it must not be called in the event we are called from
- * irq_exit() with the prior state different than idle.
- */
- if (!inidle && !ts->inidle)
- goto end;
-
- /*
- * Set ts->inidle unconditionally. Even if the system did not
- * switch to NOHZ mode the cpu frequency governers rely on the
- * update of the idle time accounting in tick_nohz_start_idle().
- */
- ts->inidle = 1;
-
now = tick_nohz_start_idle(cpu, ts);
/*
@@ -335,10 +301,10 @@ void tick_nohz_stop_sched_tick(int inidle)
}
if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE))
- goto end;
+ return;
if (need_resched())
- goto end;
+ return;
if (unlikely(local_softirq_pending() && cpu_online(cpu))) {
static int ratelimit;
@@ -348,7 +314,7 @@ void tick_nohz_stop_sched_tick(int inidle)
(unsigned int) local_softirq_pending());
ratelimit++;
}
- goto end;
+ return;
}
ts->idle_calls++;
@@ -443,7 +409,6 @@ void tick_nohz_stop_sched_tick(int inidle)
ts->idle_tick = hrtimer_get_expires(&ts->sched_timer);
ts->tick_stopped = 1;
ts->idle_jiffies = last_jiffies;
- rcu_enter_nohz();
}
ts->idle_sleeps++;
@@ -481,8 +446,64 @@ out:
ts->next_jiffies = next_jiffies;
ts->last_jiffies = last_jiffies;
ts->sleep_length = ktime_sub(dev->next_event, now);
-end:
- local_irq_restore(flags);
+}
+
+/**
+ * tick_nohz_idle_enter - stop the idle tick from the idle task
+ *
+ * When the next event is more than a tick into the future, stop the idle tick
+ * Called when we start the idle loop.
+ *
+ * The arch is responsible of calling:
+ *
+ * - rcu_idle_enter() after its last use of RCU before the CPU is put
+ * to sleep.
+ * - rcu_idle_exit() before the first use of RCU after the CPU is woken up.
+ */
+void tick_nohz_idle_enter(void)
+{
+ struct tick_sched *ts;
+
+ WARN_ON_ONCE(irqs_disabled());
+
+ /*
+ * Update the idle state in the scheduler domain hierarchy
+ * when tick_nohz_stop_sched_tick() is called from the idle loop.
+ * State will be updated to busy during the first busy tick after
+ * exiting idle.
+ */
+ set_cpu_sd_state_idle();
+
+ local_irq_disable();
+
+ ts = &__get_cpu_var(tick_cpu_sched);
+ /*
+ * set ts->inidle unconditionally. even if the system did not
+ * switch to nohz mode the cpu frequency governers rely on the
+ * update of the idle time accounting in tick_nohz_start_idle().
+ */
+ ts->inidle = 1;
+ tick_nohz_stop_sched_tick(ts);
+
+ local_irq_enable();
+}
+
+/**
+ * tick_nohz_irq_exit - update next tick event from interrupt exit
+ *
+ * When an interrupt fires while we are idle and it doesn't cause
+ * a reschedule, it may still add, modify or delete a timer, enqueue
+ * an RCU callback, etc...
+ * So we need to re-calculate and reprogram the next tick event.
+ */
+void tick_nohz_irq_exit(void)
+{
+ struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
+
+ if (!ts->inidle)
+ return;
+
+ tick_nohz_stop_sched_tick(ts);
}
/**
@@ -524,11 +545,13 @@ static void tick_nohz_restart(struct tick_sched *ts, ktime_t now)
}
/**
- * tick_nohz_restart_sched_tick - restart the idle tick from the idle task
+ * tick_nohz_idle_exit - restart the idle tick from the idle task
*
* Restart the idle tick when the CPU is woken up from idle
+ * This also exit the RCU extended quiescent state. The CPU
+ * can use RCU again after this function is called.
*/
-void tick_nohz_restart_sched_tick(void)
+void tick_nohz_idle_exit(void)
{
int cpu = smp_processor_id();
struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
@@ -538,6 +561,7 @@ void tick_nohz_restart_sched_tick(void)
ktime_t now;
local_irq_disable();
+
if (ts->idle_active || (ts->inidle && ts->tick_stopped))
now = ktime_get();
@@ -552,8 +576,6 @@ void tick_nohz_restart_sched_tick(void)
ts->inidle = 0;
- rcu_exit_nohz();
-
/* Update jiffies first */
select_nohz_load_balancer(0);
tick_do_update_jiffies64(now);
diff --git a/kernel/timer.c b/kernel/timer.c
index 9c3c62b0c4bc..a297ffcf888e 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -427,6 +427,12 @@ static int timer_fixup_init(void *addr, enum debug_obj_state state)
}
}
+/* Stub timer callback for improperly used timers. */
+static void stub_timer(unsigned long data)
+{
+ WARN_ON(1);
+}
+
/*
* fixup_activate is called when:
* - an active object is activated
@@ -450,7 +456,8 @@ static int timer_fixup_activate(void *addr, enum debug_obj_state state)
debug_object_activate(timer, &timer_debug_descr);
return 0;
} else {
- WARN_ON_ONCE(1);
+ setup_timer(timer, stub_timer, 0);
+ return 1;
}
return 0;
@@ -480,12 +487,40 @@ static int timer_fixup_free(void *addr, enum debug_obj_state state)
}
}
+/*
+ * fixup_assert_init is called when:
+ * - an untracked/uninit-ed object is found
+ */
+static int timer_fixup_assert_init(void *addr, enum debug_obj_state state)
+{
+ struct timer_list *timer = addr;
+
+ switch (state) {
+ case ODEBUG_STATE_NOTAVAILABLE:
+ if (timer->entry.prev == TIMER_ENTRY_STATIC) {
+ /*
+ * This is not really a fixup. The timer was
+ * statically initialized. We just make sure that it
+ * is tracked in the object tracker.
+ */
+ debug_object_init(timer, &timer_debug_descr);
+ return 0;
+ } else {
+ setup_timer(timer, stub_timer, 0);
+ return 1;
+ }
+ default:
+ return 0;
+ }
+}
+
static struct debug_obj_descr timer_debug_descr = {
- .name = "timer_list",
- .debug_hint = timer_debug_hint,
- .fixup_init = timer_fixup_init,
- .fixup_activate = timer_fixup_activate,
- .fixup_free = timer_fixup_free,
+ .name = "timer_list",
+ .debug_hint = timer_debug_hint,
+ .fixup_init = timer_fixup_init,
+ .fixup_activate = timer_fixup_activate,
+ .fixup_free = timer_fixup_free,
+ .fixup_assert_init = timer_fixup_assert_init,
};
static inline void debug_timer_init(struct timer_list *timer)
@@ -508,6 +543,11 @@ static inline void debug_timer_free(struct timer_list *timer)
debug_object_free(timer, &timer_debug_descr);
}
+static inline void debug_timer_assert_init(struct timer_list *timer)
+{
+ debug_object_assert_init(timer, &timer_debug_descr);
+}
+
static void __init_timer(struct timer_list *timer,
const char *name,
struct lock_class_key *key);
@@ -531,6 +571,7 @@ EXPORT_SYMBOL_GPL(destroy_timer_on_stack);
static inline void debug_timer_init(struct timer_list *timer) { }
static inline void debug_timer_activate(struct timer_list *timer) { }
static inline void debug_timer_deactivate(struct timer_list *timer) { }
+static inline void debug_timer_assert_init(struct timer_list *timer) { }
#endif
static inline void debug_init(struct timer_list *timer)
@@ -552,6 +593,11 @@ static inline void debug_deactivate(struct timer_list *timer)
trace_timer_cancel(timer);
}
+static inline void debug_assert_init(struct timer_list *timer)
+{
+ debug_timer_assert_init(timer);
+}
+
static void __init_timer(struct timer_list *timer,
const char *name,
struct lock_class_key *key)
@@ -902,6 +948,8 @@ int del_timer(struct timer_list *timer)
unsigned long flags;
int ret = 0;
+ debug_assert_init(timer);
+
timer_stats_timer_clear_start_info(timer);
if (timer_pending(timer)) {
base = lock_timer_base(timer, &flags);
@@ -932,6 +980,8 @@ int try_to_del_timer_sync(struct timer_list *timer)
unsigned long flags;
int ret = -1;
+ debug_assert_init(timer);
+
base = lock_timer_base(timer, &flags);
if (base->running_timer == timer)
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index f2bd275bb60f..91dc4bc8bf72 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -338,7 +338,8 @@ static DECLARE_WAIT_QUEUE_HEAD(trace_wait);
/* trace_flags holds trace_options default values */
unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME |
- TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE;
+ TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |
+ TRACE_ITER_IRQ_INFO;
static int trace_stop_count;
static DEFINE_RAW_SPINLOCK(tracing_start_lock);
@@ -426,6 +427,7 @@ static const char *trace_options[] = {
"record-cmd",
"overwrite",
"disable_on_free",
+ "irq-info",
NULL
};
@@ -1843,6 +1845,33 @@ static void s_stop(struct seq_file *m, void *p)
trace_event_read_unlock();
}
+static void
+get_total_entries(struct trace_array *tr, unsigned long *total, unsigned long *entries)
+{
+ unsigned long count;
+ int cpu;
+
+ *total = 0;
+ *entries = 0;
+
+ for_each_tracing_cpu(cpu) {
+ count = ring_buffer_entries_cpu(tr->buffer, cpu);
+ /*
+ * If this buffer has skipped entries, then we hold all
+ * entries for the trace and we need to ignore the
+ * ones before the time stamp.
+ */
+ if (tr->data[cpu]->skipped_entries) {
+ count -= tr->data[cpu]->skipped_entries;
+ /* total is the same as the entries */
+ *total += count;
+ } else
+ *total += count +
+ ring_buffer_overrun_cpu(tr->buffer, cpu);
+ *entries += count;
+ }
+}
+
static void print_lat_help_header(struct seq_file *m)
{
seq_puts(m, "# _------=> CPU# \n");
@@ -1855,12 +1884,35 @@ static void print_lat_help_header(struct seq_file *m)
seq_puts(m, "# \\ / ||||| \\ | / \n");
}
-static void print_func_help_header(struct seq_file *m)
+static void print_event_info(struct trace_array *tr, struct seq_file *m)
+{
+ unsigned long total;
+ unsigned long entries;
+
+ get_total_entries(tr, &total, &entries);
+ seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
+ entries, total, num_online_cpus());
+ seq_puts(m, "#\n");
+}
+
+static void print_func_help_header(struct trace_array *tr, struct seq_file *m)
{
- seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n");
+ print_event_info(tr, m);
+ seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n");
seq_puts(m, "# | | | | |\n");
}
+static void print_func_help_header_irq(struct trace_array *tr, struct seq_file *m)
+{
+ print_event_info(tr, m);
+ seq_puts(m, "# _-----=> irqs-off\n");
+ seq_puts(m, "# / _----=> need-resched\n");
+ seq_puts(m, "# | / _---=> hardirq/softirq\n");
+ seq_puts(m, "# || / _--=> preempt-depth\n");
+ seq_puts(m, "# ||| / delay\n");
+ seq_puts(m, "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n");
+ seq_puts(m, "# | | | |||| | |\n");
+}
void
print_trace_header(struct seq_file *m, struct trace_iterator *iter)
@@ -1869,32 +1921,14 @@ print_trace_header(struct seq_file *m, struct trace_iterator *iter)
struct trace_array *tr = iter->tr;
struct trace_array_cpu *data = tr->data[tr->cpu];
struct tracer *type = current_trace;
- unsigned long entries = 0;
- unsigned long total = 0;
- unsigned long count;
+ unsigned long entries;
+ unsigned long total;
const char *name = "preemption";
- int cpu;
if (type)
name = type->name;
-
- for_each_tracing_cpu(cpu) {
- count = ring_buffer_entries_cpu(tr->buffer, cpu);
- /*
- * If this buffer has skipped entries, then we hold all
- * entries for the trace and we need to ignore the
- * ones before the time stamp.
- */
- if (tr->data[cpu]->skipped_entries) {
- count -= tr->data[cpu]->skipped_entries;
- /* total is the same as the entries */
- total += count;
- } else
- total += count +
- ring_buffer_overrun_cpu(tr->buffer, cpu);
- entries += count;
- }
+ get_total_entries(tr, &total, &entries);
seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
name, UTS_RELEASE);
@@ -2140,6 +2174,21 @@ enum print_line_t print_trace_line(struct trace_iterator *iter)
return print_trace_fmt(iter);
}
+void trace_latency_header(struct seq_file *m)
+{
+ struct trace_iterator *iter = m->private;
+
+ /* print nothing if the buffers are empty */
+ if (trace_empty(iter))
+ return;
+
+ if (iter->iter_flags & TRACE_FILE_LAT_FMT)
+ print_trace_header(m, iter);
+
+ if (!(trace_flags & TRACE_ITER_VERBOSE))
+ print_lat_help_header(m);
+}
+
void trace_default_header(struct seq_file *m)
{
struct trace_iterator *iter = m->private;
@@ -2155,8 +2204,12 @@ void trace_default_header(struct seq_file *m)
if (!(trace_flags & TRACE_ITER_VERBOSE))
print_lat_help_header(m);
} else {
- if (!(trace_flags & TRACE_ITER_VERBOSE))
- print_func_help_header(m);
+ if (!(trace_flags & TRACE_ITER_VERBOSE)) {
+ if (trace_flags & TRACE_ITER_IRQ_INFO)
+ print_func_help_header_irq(iter->tr, m);
+ else
+ print_func_help_header(iter->tr, m);
+ }
}
}
@@ -4775,6 +4828,7 @@ void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
{
__ftrace_dump(true, oops_dump_mode);
}
+EXPORT_SYMBOL_GPL(ftrace_dump);
__init static int tracer_alloc_buffers(void)
{
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 092e1f8d18dc..2c2657462ac3 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -370,6 +370,7 @@ void trace_graph_function(struct trace_array *tr,
unsigned long ip,
unsigned long parent_ip,
unsigned long flags, int pc);
+void trace_latency_header(struct seq_file *m);
void trace_default_header(struct seq_file *m);
void print_trace_header(struct seq_file *m, struct trace_iterator *iter);
int trace_empty(struct trace_iterator *iter);
@@ -654,6 +655,7 @@ enum trace_iterator_flags {
TRACE_ITER_RECORD_CMD = 0x100000,
TRACE_ITER_OVERWRITE = 0x200000,
TRACE_ITER_STOP_ON_FREE = 0x400000,
+ TRACE_ITER_IRQ_INFO = 0x800000,
};
/*
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index 95dc31efd6dd..f04cc3136bd3 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -27,6 +27,12 @@
#include "trace.h"
#include "trace_output.h"
+#define DEFAULT_SYS_FILTER_MESSAGE \
+ "### global filter ###\n" \
+ "# Use this to set filters for multiple events.\n" \
+ "# Only events with the given fields will be affected.\n" \
+ "# If no events are modified, an error message will be displayed here"
+
enum filter_op_ids
{
OP_OR,
@@ -646,7 +652,7 @@ void print_subsystem_event_filter(struct event_subsystem *system,
if (filter && filter->filter_string)
trace_seq_printf(s, "%s\n", filter->filter_string);
else
- trace_seq_printf(s, "none\n");
+ trace_seq_printf(s, DEFAULT_SYS_FILTER_MESSAGE "\n");
mutex_unlock(&event_mutex);
}
@@ -1838,7 +1844,10 @@ int apply_subsystem_event_filter(struct event_subsystem *system,
if (!filter)
goto out;
- replace_filter_string(filter, filter_string);
+ /* System filters just show a default message */
+ kfree(filter->filter_string);
+ filter->filter_string = NULL;
+
/*
* No event actually uses the system filter
* we can free it without synchronize_sched().
@@ -1848,14 +1857,12 @@ int apply_subsystem_event_filter(struct event_subsystem *system,
parse_init(ps, filter_ops, filter_string);
err = filter_parse(ps);
- if (err) {
- append_filter_err(ps, system->filter);
- goto out;
- }
+ if (err)
+ goto err_filter;
err = replace_system_preds(system, ps, filter_string);
if (err)
- append_filter_err(ps, system->filter);
+ goto err_filter;
out:
filter_opstack_clear(ps);
@@ -1865,6 +1872,11 @@ out_unlock:
mutex_unlock(&event_mutex);
return err;
+
+err_filter:
+ replace_filter_string(filter, filter_string);
+ append_filter_err(ps, system->filter);
+ goto out;
}
#ifdef CONFIG_PERF_EVENTS
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index 20dad0d7a163..99d20e920368 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -280,9 +280,20 @@ static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
}
static void irqsoff_graph_return(struct ftrace_graph_ret *trace) { }
-static void irqsoff_print_header(struct seq_file *s) { }
static void irqsoff_trace_open(struct trace_iterator *iter) { }
static void irqsoff_trace_close(struct trace_iterator *iter) { }
+
+#ifdef CONFIG_FUNCTION_TRACER
+static void irqsoff_print_header(struct seq_file *s)
+{
+ trace_default_header(s);
+}
+#else
+static void irqsoff_print_header(struct seq_file *s)
+{
+ trace_latency_header(s);
+}
+#endif /* CONFIG_FUNCTION_TRACER */
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
/*
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index 51999309a6cf..0d6ff3555942 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -627,11 +627,23 @@ int trace_print_context(struct trace_iterator *iter)
unsigned long usec_rem = do_div(t, USEC_PER_SEC);
unsigned long secs = (unsigned long)t;
char comm[TASK_COMM_LEN];
+ int ret;
trace_find_cmdline(entry->pid, comm);
- return trace_seq_printf(s, "%16s-%-5d [%03d] %5lu.%06lu: ",
- comm, entry->pid, iter->cpu, secs, usec_rem);
+ ret = trace_seq_printf(s, "%16s-%-5d [%03d] ",
+ comm, entry->pid, iter->cpu);
+ if (!ret)
+ return 0;
+
+ if (trace_flags & TRACE_ITER_IRQ_INFO) {
+ ret = trace_print_lat_fmt(s, entry);
+ if (!ret)
+ return 0;
+ }
+
+ return trace_seq_printf(s, " %5lu.%06lu: ",
+ secs, usec_rem);
}
int trace_print_lat_context(struct trace_iterator *iter)
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index e4a70c0c71b6..ff791ea48b57 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -280,9 +280,20 @@ static enum print_line_t wakeup_print_line(struct trace_iterator *iter)
}
static void wakeup_graph_return(struct ftrace_graph_ret *trace) { }
-static void wakeup_print_header(struct seq_file *s) { }
static void wakeup_trace_open(struct trace_iterator *iter) { }
static void wakeup_trace_close(struct trace_iterator *iter) { }
+
+#ifdef CONFIG_FUNCTION_TRACER
+static void wakeup_print_header(struct seq_file *s)
+{
+ trace_default_header(s);
+}
+#else
+static void wakeup_print_header(struct seq_file *s)
+{
+ trace_latency_header(s);
+}
+#endif /* CONFIG_FUNCTION_TRACER */
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
/*
diff --git a/kernel/wait.c b/kernel/wait.c
index 26fa7797f90f..7fdd9eaca2c3 100644
--- a/kernel/wait.c
+++ b/kernel/wait.c
@@ -10,10 +10,10 @@
#include <linux/wait.h>
#include <linux/hash.h>
-void __init_waitqueue_head(wait_queue_head_t *q, struct lock_class_key *key)
+void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_class_key *key)
{
spin_lock_init(&q->lock);
- lockdep_set_class(&q->lock, key);
+ lockdep_set_class_and_name(&q->lock, key, name);
INIT_LIST_HEAD(&q->task_list);
}
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index a78b7c6e042c..77cb245f8e7b 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -268,12 +268,16 @@ static void debug_print_object(struct debug_obj *obj, char *msg)
* Try to repair the damage, so we have a better chance to get useful
* debug output.
*/
-static void
+static int
debug_object_fixup(int (*fixup)(void *addr, enum debug_obj_state state),
void * addr, enum debug_obj_state state)
{
+ int fixed = 0;
+
if (fixup)
- debug_objects_fixups += fixup(addr, state);
+ fixed = fixup(addr, state);
+ debug_objects_fixups += fixed;
+ return fixed;
}
static void debug_object_is_on_stack(void *addr, int onstack)
@@ -386,6 +390,9 @@ void debug_object_activate(void *addr, struct debug_obj_descr *descr)
struct debug_bucket *db;
struct debug_obj *obj;
unsigned long flags;
+ struct debug_obj o = { .object = addr,
+ .state = ODEBUG_STATE_NOTAVAILABLE,
+ .descr = descr };
if (!debug_objects_enabled)
return;
@@ -425,8 +432,9 @@ void debug_object_activate(void *addr, struct debug_obj_descr *descr)
* let the type specific code decide whether this is
* true or not.
*/
- debug_object_fixup(descr->fixup_activate, addr,
- ODEBUG_STATE_NOTAVAILABLE);
+ if (debug_object_fixup(descr->fixup_activate, addr,
+ ODEBUG_STATE_NOTAVAILABLE))
+ debug_print_object(&o, "activate");
}
/**
@@ -563,6 +571,44 @@ out_unlock:
}
/**
+ * debug_object_assert_init - debug checks when object should be init-ed
+ * @addr: address of the object
+ * @descr: pointer to an object specific debug description structure
+ */
+void debug_object_assert_init(void *addr, struct debug_obj_descr *descr)
+{
+ struct debug_bucket *db;
+ struct debug_obj *obj;
+ unsigned long flags;
+
+ if (!debug_objects_enabled)
+ return;
+
+ db = get_bucket((unsigned long) addr);
+
+ raw_spin_lock_irqsave(&db->lock, flags);
+
+ obj = lookup_object(addr, db);
+ if (!obj) {
+ struct debug_obj o = { .object = addr,
+ .state = ODEBUG_STATE_NOTAVAILABLE,
+ .descr = descr };
+
+ raw_spin_unlock_irqrestore(&db->lock, flags);
+ /*
+ * Maybe the object is static. Let the type specific
+ * code decide what to do.
+ */
+ if (debug_object_fixup(descr->fixup_assert_init, addr,
+ ODEBUG_STATE_NOTAVAILABLE))
+ debug_print_object(&o, "assert_init");
+ return;
+ }
+
+ raw_spin_unlock_irqrestore(&db->lock, flags);
+}
+
+/**
* debug_object_active_state - debug checks object usage state machine
* @addr: address of the object
* @descr: pointer to an object specific debug description structure
diff --git a/mm/Kconfig b/mm/Kconfig
index 011b110365c8..e338407f1225 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -131,6 +131,12 @@ config SPARSEMEM_VMEMMAP
config HAVE_MEMBLOCK
boolean
+config HAVE_MEMBLOCK_NODE_MAP
+ boolean
+
+config ARCH_DISCARD_MEMBLOCK
+ boolean
+
config NO_BOOTMEM
boolean
diff --git a/mm/filemap.c b/mm/filemap.c
index c106d3b3cc64..5f0a3c91fdac 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1828,7 +1828,7 @@ repeat:
page = __page_cache_alloc(gfp | __GFP_COLD);
if (!page)
return ERR_PTR(-ENOMEM);
- err = add_to_page_cache_lru(page, mapping, index, GFP_KERNEL);
+ err = add_to_page_cache_lru(page, mapping, index, gfp);
if (unlikely(err)) {
page_cache_release(page);
if (err == -EEXIST)
@@ -1925,10 +1925,7 @@ static struct page *wait_on_page_read(struct page *page)
* @gfp: the page allocator flags to use if allocating
*
* This is the same as "read_mapping_page(mapping, index, NULL)", but with
- * any new page allocations done using the specified allocation flags. Note
- * that the Radix tree operations will still use GFP_KERNEL, so you can't
- * expect to do this atomically or anything like that - but you can pass in
- * other page requirements.
+ * any new page allocations done using the specified allocation flags.
*
* If the page does not get brought uptodate, return -EIO.
*/
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 73f17c0293c0..2316840b337a 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -901,7 +901,6 @@ retry:
h->resv_huge_pages += delta;
ret = 0;
- spin_unlock(&hugetlb_lock);
/* Free the needed pages to the hugetlb pool */
list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
if ((--needed) < 0)
@@ -915,6 +914,7 @@ retry:
VM_BUG_ON(page_count(page));
enqueue_huge_page(h, page);
}
+ spin_unlock(&hugetlb_lock);
/* Free unnecessary surplus pages to the buddy allocator */
free:
diff --git a/mm/memblock.c b/mm/memblock.c
index 84bec4969ed5..2f55f19b7c86 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -20,12 +20,23 @@
#include <linux/seq_file.h>
#include <linux/memblock.h>
-struct memblock memblock __initdata_memblock;
+static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
+static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
+
+struct memblock memblock __initdata_memblock = {
+ .memory.regions = memblock_memory_init_regions,
+ .memory.cnt = 1, /* empty dummy entry */
+ .memory.max = INIT_MEMBLOCK_REGIONS,
+
+ .reserved.regions = memblock_reserved_init_regions,
+ .reserved.cnt = 1, /* empty dummy entry */
+ .reserved.max = INIT_MEMBLOCK_REGIONS,
+
+ .current_limit = MEMBLOCK_ALLOC_ANYWHERE,
+};
int memblock_debug __initdata_memblock;
-int memblock_can_resize __initdata_memblock;
-static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS + 1] __initdata_memblock;
-static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS + 1] __initdata_memblock;
+static int memblock_can_resize __initdata_memblock;
/* inline so we don't get a warning when pr_debug is compiled out */
static inline const char *memblock_type_name(struct memblock_type *type)
@@ -38,20 +49,15 @@ static inline const char *memblock_type_name(struct memblock_type *type)
return "unknown";
}
-/*
- * Address comparison utilities
- */
-
-static phys_addr_t __init_memblock memblock_align_down(phys_addr_t addr, phys_addr_t size)
-{
- return addr & ~(size - 1);
-}
-
-static phys_addr_t __init_memblock memblock_align_up(phys_addr_t addr, phys_addr_t size)
+/* adjust *@size so that (@base + *@size) doesn't overflow, return new size */
+static inline phys_addr_t memblock_cap_size(phys_addr_t base, phys_addr_t *size)
{
- return (addr + (size - 1)) & ~(size - 1);
+ return *size = min(*size, (phys_addr_t)ULLONG_MAX - base);
}
+/*
+ * Address comparison utilities
+ */
static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1,
phys_addr_t base2, phys_addr_t size2)
{
@@ -73,83 +79,66 @@ static long __init_memblock memblock_overlaps_region(struct memblock_type *type,
return (i < type->cnt) ? i : -1;
}
-/*
- * Find, allocate, deallocate or reserve unreserved regions. All allocations
- * are top-down.
+/**
+ * memblock_find_in_range_node - find free area in given range and node
+ * @start: start of candidate range
+ * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
+ * @size: size of free area to find
+ * @align: alignment of free area to find
+ * @nid: nid of the free area to find, %MAX_NUMNODES for any node
+ *
+ * Find @size free area aligned to @align in the specified range and node.
+ *
+ * RETURNS:
+ * Found address on success, %0 on failure.
*/
-
-static phys_addr_t __init_memblock memblock_find_region(phys_addr_t start, phys_addr_t end,
- phys_addr_t size, phys_addr_t align)
+phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t start,
+ phys_addr_t end, phys_addr_t size,
+ phys_addr_t align, int nid)
{
- phys_addr_t base, res_base;
- long j;
-
- /* In case, huge size is requested */
- if (end < size)
- return MEMBLOCK_ERROR;
-
- base = memblock_align_down((end - size), align);
+ phys_addr_t this_start, this_end, cand;
+ u64 i;
- /* Prevent allocations returning 0 as it's also used to
- * indicate an allocation failure
- */
- if (start == 0)
- start = PAGE_SIZE;
-
- while (start <= base) {
- j = memblock_overlaps_region(&memblock.reserved, base, size);
- if (j < 0)
- return base;
- res_base = memblock.reserved.regions[j].base;
- if (res_base < size)
- break;
- base = memblock_align_down(res_base - size, align);
- }
+ /* align @size to avoid excessive fragmentation on reserved array */
+ size = round_up(size, align);
- return MEMBLOCK_ERROR;
-}
-
-static phys_addr_t __init_memblock memblock_find_base(phys_addr_t size,
- phys_addr_t align, phys_addr_t start, phys_addr_t end)
-{
- long i;
-
- BUG_ON(0 == size);
-
- /* Pump up max_addr */
+ /* pump up @end */
if (end == MEMBLOCK_ALLOC_ACCESSIBLE)
end = memblock.current_limit;
- /* We do a top-down search, this tends to limit memory
- * fragmentation by keeping early boot allocs near the
- * top of memory
- */
- for (i = memblock.memory.cnt - 1; i >= 0; i--) {
- phys_addr_t memblockbase = memblock.memory.regions[i].base;
- phys_addr_t memblocksize = memblock.memory.regions[i].size;
- phys_addr_t bottom, top, found;
+ /* adjust @start to avoid underflow and allocating the first page */
+ start = max3(start, size, (phys_addr_t)PAGE_SIZE);
+ end = max(start, end);
- if (memblocksize < size)
- continue;
- if ((memblockbase + memblocksize) <= start)
- break;
- bottom = max(memblockbase, start);
- top = min(memblockbase + memblocksize, end);
- if (bottom >= top)
- continue;
- found = memblock_find_region(bottom, top, size, align);
- if (found != MEMBLOCK_ERROR)
- return found;
+ for_each_free_mem_range_reverse(i, nid, &this_start, &this_end, NULL) {
+ this_start = clamp(this_start, start, end);
+ this_end = clamp(this_end, start, end);
+
+ cand = round_down(this_end - size, align);
+ if (cand >= this_start)
+ return cand;
}
- return MEMBLOCK_ERROR;
+ return 0;
}
-/*
- * Find a free area with specified alignment in a specific range.
+/**
+ * memblock_find_in_range - find free area in given range
+ * @start: start of candidate range
+ * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
+ * @size: size of free area to find
+ * @align: alignment of free area to find
+ *
+ * Find @size free area aligned to @align in the specified range.
+ *
+ * RETURNS:
+ * Found address on success, %0 on failure.
*/
-u64 __init_memblock memblock_find_in_range(u64 start, u64 end, u64 size, u64 align)
+phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start,
+ phys_addr_t end, phys_addr_t size,
+ phys_addr_t align)
{
- return memblock_find_base(size, align, start, end);
+ return memblock_find_in_range_node(start, end, size, align,
+ MAX_NUMNODES);
}
/*
@@ -178,25 +167,21 @@ int __init_memblock memblock_reserve_reserved_regions(void)
static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r)
{
- unsigned long i;
-
- for (i = r; i < type->cnt - 1; i++) {
- type->regions[i].base = type->regions[i + 1].base;
- type->regions[i].size = type->regions[i + 1].size;
- }
+ type->total_size -= type->regions[r].size;
+ memmove(&type->regions[r], &type->regions[r + 1],
+ (type->cnt - (r + 1)) * sizeof(type->regions[r]));
type->cnt--;
/* Special case for empty arrays */
if (type->cnt == 0) {
+ WARN_ON(type->total_size != 0);
type->cnt = 1;
type->regions[0].base = 0;
type->regions[0].size = 0;
+ memblock_set_region_node(&type->regions[0], MAX_NUMNODES);
}
}
-/* Defined below but needed now */
-static long memblock_add_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size);
-
static int __init_memblock memblock_double_array(struct memblock_type *type)
{
struct memblock_region *new_array, *old_array;
@@ -226,10 +211,10 @@ static int __init_memblock memblock_double_array(struct memblock_type *type)
*/
if (use_slab) {
new_array = kmalloc(new_size, GFP_KERNEL);
- addr = new_array == NULL ? MEMBLOCK_ERROR : __pa(new_array);
+ addr = new_array ? __pa(new_array) : 0;
} else
- addr = memblock_find_base(new_size, sizeof(phys_addr_t), 0, MEMBLOCK_ALLOC_ACCESSIBLE);
- if (addr == MEMBLOCK_ERROR) {
+ addr = memblock_find_in_range(0, MEMBLOCK_ALLOC_ACCESSIBLE, new_size, sizeof(phys_addr_t));
+ if (!addr) {
pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n",
memblock_type_name(type), type->max, type->max * 2);
return -1;
@@ -254,7 +239,7 @@ static int __init_memblock memblock_double_array(struct memblock_type *type)
return 0;
/* Add the new reserved region now. Should not fail ! */
- BUG_ON(memblock_add_region(&memblock.reserved, addr, new_size));
+ BUG_ON(memblock_reserve(addr, new_size));
/* If the array wasn't our static init one, then free it. We only do
* that before SLAB is available as later on, we don't know whether
@@ -268,343 +253,514 @@ static int __init_memblock memblock_double_array(struct memblock_type *type)
return 0;
}
-int __init_memblock __weak memblock_memory_can_coalesce(phys_addr_t addr1, phys_addr_t size1,
- phys_addr_t addr2, phys_addr_t size2)
-{
- return 1;
-}
-
-static long __init_memblock memblock_add_region(struct memblock_type *type,
- phys_addr_t base, phys_addr_t size)
+/**
+ * memblock_merge_regions - merge neighboring compatible regions
+ * @type: memblock type to scan
+ *
+ * Scan @type and merge neighboring compatible regions.
+ */
+static void __init_memblock memblock_merge_regions(struct memblock_type *type)
{
- phys_addr_t end = base + size;
- int i, slot = -1;
-
- /* First try and coalesce this MEMBLOCK with others */
- for (i = 0; i < type->cnt; i++) {
- struct memblock_region *rgn = &type->regions[i];
- phys_addr_t rend = rgn->base + rgn->size;
+ int i = 0;
- /* Exit if there's no possible hits */
- if (rgn->base > end || rgn->size == 0)
- break;
+ /* cnt never goes below 1 */
+ while (i < type->cnt - 1) {
+ struct memblock_region *this = &type->regions[i];
+ struct memblock_region *next = &type->regions[i + 1];
- /* Check if we are fully enclosed within an existing
- * block
- */
- if (rgn->base <= base && rend >= end)
- return 0;
+ if (this->base + this->size != next->base ||
+ memblock_get_region_node(this) !=
+ memblock_get_region_node(next)) {
+ BUG_ON(this->base + this->size > next->base);
+ i++;
+ continue;
+ }
- /* Check if we overlap or are adjacent with the bottom
- * of a block.
- */
- if (base < rgn->base && end >= rgn->base) {
- /* If we can't coalesce, create a new block */
- if (!memblock_memory_can_coalesce(base, size,
- rgn->base,
- rgn->size)) {
- /* Overlap & can't coalesce are mutually
- * exclusive, if you do that, be prepared
- * for trouble
- */
- WARN_ON(end != rgn->base);
- goto new_block;
- }
- /* We extend the bottom of the block down to our
- * base
- */
- rgn->base = base;
- rgn->size = rend - base;
+ this->size += next->size;
+ memmove(next, next + 1, (type->cnt - (i + 1)) * sizeof(*next));
+ type->cnt--;
+ }
+}
- /* Return if we have nothing else to allocate
- * (fully coalesced)
- */
- if (rend >= end)
- return 0;
+/**
+ * memblock_insert_region - insert new memblock region
+ * @type: memblock type to insert into
+ * @idx: index for the insertion point
+ * @base: base address of the new region
+ * @size: size of the new region
+ *
+ * Insert new memblock region [@base,@base+@size) into @type at @idx.
+ * @type must already have extra room to accomodate the new region.
+ */
+static void __init_memblock memblock_insert_region(struct memblock_type *type,
+ int idx, phys_addr_t base,
+ phys_addr_t size, int nid)
+{
+ struct memblock_region *rgn = &type->regions[idx];
- /* We continue processing from the end of the
- * coalesced block.
- */
- base = rend;
- size = end - base;
- }
+ BUG_ON(type->cnt >= type->max);
+ memmove(rgn + 1, rgn, (type->cnt - idx) * sizeof(*rgn));
+ rgn->base = base;
+ rgn->size = size;
+ memblock_set_region_node(rgn, nid);
+ type->cnt++;
+ type->total_size += size;
+}
- /* Now check if we overlap or are adjacent with the
- * top of a block
- */
- if (base <= rend && end >= rend) {
- /* If we can't coalesce, create a new block */
- if (!memblock_memory_can_coalesce(rgn->base,
- rgn->size,
- base, size)) {
- /* Overlap & can't coalesce are mutually
- * exclusive, if you do that, be prepared
- * for trouble
- */
- WARN_ON(rend != base);
- goto new_block;
- }
- /* We adjust our base down to enclose the
- * original block and destroy it. It will be
- * part of our new allocation. Since we've
- * freed an entry, we know we won't fail
- * to allocate one later, so we won't risk
- * losing the original block allocation.
- */
- size += (base - rgn->base);
- base = rgn->base;
- memblock_remove_region(type, i--);
- }
- }
+/**
+ * memblock_add_region - add new memblock region
+ * @type: memblock type to add new region into
+ * @base: base address of the new region
+ * @size: size of the new region
+ * @nid: nid of the new region
+ *
+ * Add new memblock region [@base,@base+@size) into @type. The new region
+ * is allowed to overlap with existing ones - overlaps don't affect already
+ * existing regions. @type is guaranteed to be minimal (all neighbouring
+ * compatible regions are merged) after the addition.
+ *
+ * RETURNS:
+ * 0 on success, -errno on failure.
+ */
+static int __init_memblock memblock_add_region(struct memblock_type *type,
+ phys_addr_t base, phys_addr_t size, int nid)
+{
+ bool insert = false;
+ phys_addr_t obase = base;
+ phys_addr_t end = base + memblock_cap_size(base, &size);
+ int i, nr_new;
- /* If the array is empty, special case, replace the fake
- * filler region and return
- */
- if ((type->cnt == 1) && (type->regions[0].size == 0)) {
+ /* special case for empty array */
+ if (type->regions[0].size == 0) {
+ WARN_ON(type->cnt != 1 || type->total_size);
type->regions[0].base = base;
type->regions[0].size = size;
+ memblock_set_region_node(&type->regions[0], nid);
+ type->total_size = size;
return 0;
}
-
- new_block:
- /* If we are out of space, we fail. It's too late to resize the array
- * but then this shouldn't have happened in the first place.
+repeat:
+ /*
+ * The following is executed twice. Once with %false @insert and
+ * then with %true. The first counts the number of regions needed
+ * to accomodate the new area. The second actually inserts them.
*/
- if (WARN_ON(type->cnt >= type->max))
- return -1;
+ base = obase;
+ nr_new = 0;
- /* Couldn't coalesce the MEMBLOCK, so add it to the sorted table. */
- for (i = type->cnt - 1; i >= 0; i--) {
- if (base < type->regions[i].base) {
- type->regions[i+1].base = type->regions[i].base;
- type->regions[i+1].size = type->regions[i].size;
- } else {
- type->regions[i+1].base = base;
- type->regions[i+1].size = size;
- slot = i + 1;
+ for (i = 0; i < type->cnt; i++) {
+ struct memblock_region *rgn = &type->regions[i];
+ phys_addr_t rbase = rgn->base;
+ phys_addr_t rend = rbase + rgn->size;
+
+ if (rbase >= end)
break;
+ if (rend <= base)
+ continue;
+ /*
+ * @rgn overlaps. If it separates the lower part of new
+ * area, insert that portion.
+ */
+ if (rbase > base) {
+ nr_new++;
+ if (insert)
+ memblock_insert_region(type, i++, base,
+ rbase - base, nid);
}
+ /* area below @rend is dealt with, forget about it */
+ base = min(rend, end);
}
- if (base < type->regions[0].base) {
- type->regions[0].base = base;
- type->regions[0].size = size;
- slot = 0;
+
+ /* insert the remaining portion */
+ if (base < end) {
+ nr_new++;
+ if (insert)
+ memblock_insert_region(type, i, base, end - base, nid);
}
- type->cnt++;
- /* The array is full ? Try to resize it. If that fails, we undo
- * our allocation and return an error
+ /*
+ * If this was the first round, resize array and repeat for actual
+ * insertions; otherwise, merge and return.
*/
- if (type->cnt == type->max && memblock_double_array(type)) {
- BUG_ON(slot < 0);
- memblock_remove_region(type, slot);
- return -1;
+ if (!insert) {
+ while (type->cnt + nr_new > type->max)
+ if (memblock_double_array(type) < 0)
+ return -ENOMEM;
+ insert = true;
+ goto repeat;
+ } else {
+ memblock_merge_regions(type);
+ return 0;
}
-
- return 0;
}
-long __init_memblock memblock_add(phys_addr_t base, phys_addr_t size)
+int __init_memblock memblock_add_node(phys_addr_t base, phys_addr_t size,
+ int nid)
{
- return memblock_add_region(&memblock.memory, base, size);
+ return memblock_add_region(&memblock.memory, base, size, nid);
+}
+int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size)
+{
+ return memblock_add_region(&memblock.memory, base, size, MAX_NUMNODES);
}
-static long __init_memblock __memblock_remove(struct memblock_type *type,
- phys_addr_t base, phys_addr_t size)
+/**
+ * memblock_isolate_range - isolate given range into disjoint memblocks
+ * @type: memblock type to isolate range for
+ * @base: base of range to isolate
+ * @size: size of range to isolate
+ * @start_rgn: out parameter for the start of isolated region
+ * @end_rgn: out parameter for the end of isolated region
+ *
+ * Walk @type and ensure that regions don't cross the boundaries defined by
+ * [@base,@base+@size). Crossing regions are split at the boundaries,
+ * which may create at most two more regions. The index of the first
+ * region inside the range is returned in *@start_rgn and end in *@end_rgn.
+ *
+ * RETURNS:
+ * 0 on success, -errno on failure.
+ */
+static int __init_memblock memblock_isolate_range(struct memblock_type *type,
+ phys_addr_t base, phys_addr_t size,
+ int *start_rgn, int *end_rgn)
{
- phys_addr_t end = base + size;
+ phys_addr_t end = base + memblock_cap_size(base, &size);
int i;
- /* Walk through the array for collisions */
+ *start_rgn = *end_rgn = 0;
+
+ /* we'll create at most two more regions */
+ while (type->cnt + 2 > type->max)
+ if (memblock_double_array(type) < 0)
+ return -ENOMEM;
+
for (i = 0; i < type->cnt; i++) {
struct memblock_region *rgn = &type->regions[i];
- phys_addr_t rend = rgn->base + rgn->size;
+ phys_addr_t rbase = rgn->base;
+ phys_addr_t rend = rbase + rgn->size;
- /* Nothing more to do, exit */
- if (rgn->base > end || rgn->size == 0)
+ if (rbase >= end)
break;
-
- /* If we fully enclose the block, drop it */
- if (base <= rgn->base && end >= rend) {
- memblock_remove_region(type, i--);
+ if (rend <= base)
continue;
- }
- /* If we are fully enclosed within a block
- * then we need to split it and we are done
- */
- if (base > rgn->base && end < rend) {
- rgn->size = base - rgn->base;
- if (!memblock_add_region(type, end, rend - end))
- return 0;
- /* Failure to split is bad, we at least
- * restore the block before erroring
+ if (rbase < base) {
+ /*
+ * @rgn intersects from below. Split and continue
+ * to process the next region - the new top half.
+ */
+ rgn->base = base;
+ rgn->size -= base - rbase;
+ type->total_size -= base - rbase;
+ memblock_insert_region(type, i, rbase, base - rbase,
+ memblock_get_region_node(rgn));
+ } else if (rend > end) {
+ /*
+ * @rgn intersects from above. Split and redo the
+ * current region - the new bottom half.
*/
- rgn->size = rend - rgn->base;
- WARN_ON(1);
- return -1;
- }
-
- /* Check if we need to trim the bottom of a block */
- if (rgn->base < end && rend > end) {
- rgn->size -= end - rgn->base;
rgn->base = end;
- break;
+ rgn->size -= end - rbase;
+ type->total_size -= end - rbase;
+ memblock_insert_region(type, i--, rbase, end - rbase,
+ memblock_get_region_node(rgn));
+ } else {
+ /* @rgn is fully contained, record it */
+ if (!*end_rgn)
+ *start_rgn = i;
+ *end_rgn = i + 1;
}
+ }
- /* And check if we need to trim the top of a block */
- if (base < rend)
- rgn->size -= rend - base;
+ return 0;
+}
- }
+static int __init_memblock __memblock_remove(struct memblock_type *type,
+ phys_addr_t base, phys_addr_t size)
+{
+ int start_rgn, end_rgn;
+ int i, ret;
+
+ ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
+ if (ret)
+ return ret;
+
+ for (i = end_rgn - 1; i >= start_rgn; i--)
+ memblock_remove_region(type, i);
return 0;
}
-long __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size)
+int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size)
{
return __memblock_remove(&memblock.memory, base, size);
}
-long __init_memblock memblock_free(phys_addr_t base, phys_addr_t size)
+int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size)
{
+ memblock_dbg(" memblock_free: [%#016llx-%#016llx] %pF\n",
+ (unsigned long long)base,
+ (unsigned long long)base + size,
+ (void *)_RET_IP_);
+
return __memblock_remove(&memblock.reserved, base, size);
}
-long __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size)
+int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size)
{
struct memblock_type *_rgn = &memblock.reserved;
+ memblock_dbg("memblock_reserve: [%#016llx-%#016llx] %pF\n",
+ (unsigned long long)base,
+ (unsigned long long)base + size,
+ (void *)_RET_IP_);
BUG_ON(0 == size);
- return memblock_add_region(_rgn, base, size);
+ return memblock_add_region(_rgn, base, size, MAX_NUMNODES);
}
-phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
+/**
+ * __next_free_mem_range - next function for for_each_free_mem_range()
+ * @idx: pointer to u64 loop variable
+ * @nid: nid: node selector, %MAX_NUMNODES for all nodes
+ * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
+ * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
+ * @p_nid: ptr to int for nid of the range, can be %NULL
+ *
+ * Find the first free area from *@idx which matches @nid, fill the out
+ * parameters, and update *@idx for the next iteration. The lower 32bit of
+ * *@idx contains index into memory region and the upper 32bit indexes the
+ * areas before each reserved region. For example, if reserved regions
+ * look like the following,
+ *
+ * 0:[0-16), 1:[32-48), 2:[128-130)
+ *
+ * The upper 32bit indexes the following regions.
+ *
+ * 0:[0-0), 1:[16-32), 2:[48-128), 3:[130-MAX)
+ *
+ * As both region arrays are sorted, the function advances the two indices
+ * in lockstep and returns each intersection.
+ */
+void __init_memblock __next_free_mem_range(u64 *idx, int nid,
+ phys_addr_t *out_start,
+ phys_addr_t *out_end, int *out_nid)
{
- phys_addr_t found;
+ struct memblock_type *mem = &memblock.memory;
+ struct memblock_type *rsv = &memblock.reserved;
+ int mi = *idx & 0xffffffff;
+ int ri = *idx >> 32;
- /* We align the size to limit fragmentation. Without this, a lot of
- * small allocs quickly eat up the whole reserve array on sparc
- */
- size = memblock_align_up(size, align);
+ for ( ; mi < mem->cnt; mi++) {
+ struct memblock_region *m = &mem->regions[mi];
+ phys_addr_t m_start = m->base;
+ phys_addr_t m_end = m->base + m->size;
- found = memblock_find_base(size, align, 0, max_addr);
- if (found != MEMBLOCK_ERROR &&
- !memblock_add_region(&memblock.reserved, found, size))
- return found;
+ /* only memory regions are associated with nodes, check it */
+ if (nid != MAX_NUMNODES && nid != memblock_get_region_node(m))
+ continue;
- return 0;
+ /* scan areas before each reservation for intersection */
+ for ( ; ri < rsv->cnt + 1; ri++) {
+ struct memblock_region *r = &rsv->regions[ri];
+ phys_addr_t r_start = ri ? r[-1].base + r[-1].size : 0;
+ phys_addr_t r_end = ri < rsv->cnt ? r->base : ULLONG_MAX;
+
+ /* if ri advanced past mi, break out to advance mi */
+ if (r_start >= m_end)
+ break;
+ /* if the two regions intersect, we're done */
+ if (m_start < r_end) {
+ if (out_start)
+ *out_start = max(m_start, r_start);
+ if (out_end)
+ *out_end = min(m_end, r_end);
+ if (out_nid)
+ *out_nid = memblock_get_region_node(m);
+ /*
+ * The region which ends first is advanced
+ * for the next iteration.
+ */
+ if (m_end <= r_end)
+ mi++;
+ else
+ ri++;
+ *idx = (u32)mi | (u64)ri << 32;
+ return;
+ }
+ }
+ }
+
+ /* signal end of iteration */
+ *idx = ULLONG_MAX;
}
-phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
+/**
+ * __next_free_mem_range_rev - next function for for_each_free_mem_range_reverse()
+ * @idx: pointer to u64 loop variable
+ * @nid: nid: node selector, %MAX_NUMNODES for all nodes
+ * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
+ * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
+ * @p_nid: ptr to int for nid of the range, can be %NULL
+ *
+ * Reverse of __next_free_mem_range().
+ */
+void __init_memblock __next_free_mem_range_rev(u64 *idx, int nid,
+ phys_addr_t *out_start,
+ phys_addr_t *out_end, int *out_nid)
{
- phys_addr_t alloc;
+ struct memblock_type *mem = &memblock.memory;
+ struct memblock_type *rsv = &memblock.reserved;
+ int mi = *idx & 0xffffffff;
+ int ri = *idx >> 32;
- alloc = __memblock_alloc_base(size, align, max_addr);
+ if (*idx == (u64)ULLONG_MAX) {
+ mi = mem->cnt - 1;
+ ri = rsv->cnt;
+ }
- if (alloc == 0)
- panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n",
- (unsigned long long) size, (unsigned long long) max_addr);
+ for ( ; mi >= 0; mi--) {
+ struct memblock_region *m = &mem->regions[mi];
+ phys_addr_t m_start = m->base;
+ phys_addr_t m_end = m->base + m->size;
- return alloc;
-}
+ /* only memory regions are associated with nodes, check it */
+ if (nid != MAX_NUMNODES && nid != memblock_get_region_node(m))
+ continue;
-phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align)
-{
- return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
-}
+ /* scan areas before each reservation for intersection */
+ for ( ; ri >= 0; ri--) {
+ struct memblock_region *r = &rsv->regions[ri];
+ phys_addr_t r_start = ri ? r[-1].base + r[-1].size : 0;
+ phys_addr_t r_end = ri < rsv->cnt ? r->base : ULLONG_MAX;
+
+ /* if ri advanced past mi, break out to advance mi */
+ if (r_end <= m_start)
+ break;
+ /* if the two regions intersect, we're done */
+ if (m_end > r_start) {
+ if (out_start)
+ *out_start = max(m_start, r_start);
+ if (out_end)
+ *out_end = min(m_end, r_end);
+ if (out_nid)
+ *out_nid = memblock_get_region_node(m);
+
+ if (m_start >= r_start)
+ mi--;
+ else
+ ri--;
+ *idx = (u32)mi | (u64)ri << 32;
+ return;
+ }
+ }
+ }
+ *idx = ULLONG_MAX;
+}
+#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
/*
- * Additional node-local allocators. Search for node memory is bottom up
- * and walks memblock regions within that node bottom-up as well, but allocation
- * within an memblock region is top-down. XXX I plan to fix that at some stage
- *
- * WARNING: Only available after early_node_map[] has been populated,
- * on some architectures, that is after all the calls to add_active_range()
- * have been done to populate it.
+ * Common iterator interface used to define for_each_mem_range().
*/
-
-phys_addr_t __weak __init memblock_nid_range(phys_addr_t start, phys_addr_t end, int *nid)
+void __init_memblock __next_mem_pfn_range(int *idx, int nid,
+ unsigned long *out_start_pfn,
+ unsigned long *out_end_pfn, int *out_nid)
{
-#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
- /*
- * This code originates from sparc which really wants use to walk by addresses
- * and returns the nid. This is not very convenient for early_pfn_map[] users
- * as the map isn't sorted yet, and it really wants to be walked by nid.
- *
- * For now, I implement the inefficient method below which walks the early
- * map multiple times. Eventually we may want to use an ARCH config option
- * to implement a completely different method for both case.
- */
- unsigned long start_pfn, end_pfn;
- int i;
+ struct memblock_type *type = &memblock.memory;
+ struct memblock_region *r;
- for (i = 0; i < MAX_NUMNODES; i++) {
- get_pfn_range_for_nid(i, &start_pfn, &end_pfn);
- if (start < PFN_PHYS(start_pfn) || start >= PFN_PHYS(end_pfn))
+ while (++*idx < type->cnt) {
+ r = &type->regions[*idx];
+
+ if (PFN_UP(r->base) >= PFN_DOWN(r->base + r->size))
continue;
- *nid = i;
- return min(end, PFN_PHYS(end_pfn));
+ if (nid == MAX_NUMNODES || nid == r->nid)
+ break;
+ }
+ if (*idx >= type->cnt) {
+ *idx = -1;
+ return;
}
-#endif
- *nid = 0;
- return end;
+ if (out_start_pfn)
+ *out_start_pfn = PFN_UP(r->base);
+ if (out_end_pfn)
+ *out_end_pfn = PFN_DOWN(r->base + r->size);
+ if (out_nid)
+ *out_nid = r->nid;
}
-static phys_addr_t __init memblock_alloc_nid_region(struct memblock_region *mp,
- phys_addr_t size,
- phys_addr_t align, int nid)
+/**
+ * memblock_set_node - set node ID on memblock regions
+ * @base: base of area to set node ID for
+ * @size: size of area to set node ID for
+ * @nid: node ID to set
+ *
+ * Set the nid of memblock memory regions in [@base,@base+@size) to @nid.
+ * Regions which cross the area boundaries are split as necessary.
+ *
+ * RETURNS:
+ * 0 on success, -errno on failure.
+ */
+int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size,
+ int nid)
{
- phys_addr_t start, end;
+ struct memblock_type *type = &memblock.memory;
+ int start_rgn, end_rgn;
+ int i, ret;
- start = mp->base;
- end = start + mp->size;
+ ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
+ if (ret)
+ return ret;
- start = memblock_align_up(start, align);
- while (start < end) {
- phys_addr_t this_end;
- int this_nid;
+ for (i = start_rgn; i < end_rgn; i++)
+ type->regions[i].nid = nid;
- this_end = memblock_nid_range(start, end, &this_nid);
- if (this_nid == nid) {
- phys_addr_t ret = memblock_find_region(start, this_end, size, align);
- if (ret != MEMBLOCK_ERROR &&
- !memblock_add_region(&memblock.reserved, ret, size))
- return ret;
- }
- start = this_end;
- }
+ memblock_merge_regions(type);
+ return 0;
+}
+#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
+
+static phys_addr_t __init memblock_alloc_base_nid(phys_addr_t size,
+ phys_addr_t align, phys_addr_t max_addr,
+ int nid)
+{
+ phys_addr_t found;
- return MEMBLOCK_ERROR;
+ found = memblock_find_in_range_node(0, max_addr, size, align, nid);
+ if (found && !memblock_reserve(found, size))
+ return found;
+
+ return 0;
}
phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid)
{
- struct memblock_type *mem = &memblock.memory;
- int i;
+ return memblock_alloc_base_nid(size, align, MEMBLOCK_ALLOC_ACCESSIBLE, nid);
+}
- BUG_ON(0 == size);
+phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
+{
+ return memblock_alloc_base_nid(size, align, max_addr, MAX_NUMNODES);
+}
- /* We align the size to limit fragmentation. Without this, a lot of
- * small allocs quickly eat up the whole reserve array on sparc
- */
- size = memblock_align_up(size, align);
+phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
+{
+ phys_addr_t alloc;
- /* We do a bottom-up search for a region with the right
- * nid since that's easier considering how memblock_nid_range()
- * works
- */
- for (i = 0; i < mem->cnt; i++) {
- phys_addr_t ret = memblock_alloc_nid_region(&mem->regions[i],
- size, align, nid);
- if (ret != MEMBLOCK_ERROR)
- return ret;
- }
+ alloc = __memblock_alloc_base(size, align, max_addr);
- return 0;
+ if (alloc == 0)
+ panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n",
+ (unsigned long long) size, (unsigned long long) max_addr);
+
+ return alloc;
+}
+
+phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align)
+{
+ return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
}
phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid)
@@ -613,7 +769,7 @@ phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, i
if (res)
return res;
- return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ANYWHERE);
+ return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
}
@@ -621,10 +777,9 @@ phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, i
* Remaining API functions
*/
-/* You must call memblock_analyze() before this. */
phys_addr_t __init memblock_phys_mem_size(void)
{
- return memblock.memory_size;
+ return memblock.memory.total_size;
}
/* lowest address */
@@ -640,45 +795,28 @@ phys_addr_t __init_memblock memblock_end_of_DRAM(void)
return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size);
}
-/* You must call memblock_analyze() after this. */
-void __init memblock_enforce_memory_limit(phys_addr_t memory_limit)
+void __init memblock_enforce_memory_limit(phys_addr_t limit)
{
unsigned long i;
- phys_addr_t limit;
- struct memblock_region *p;
+ phys_addr_t max_addr = (phys_addr_t)ULLONG_MAX;
- if (!memory_limit)
+ if (!limit)
return;
- /* Truncate the memblock regions to satisfy the memory limit. */
- limit = memory_limit;
+ /* find out max address */
for (i = 0; i < memblock.memory.cnt; i++) {
- if (limit > memblock.memory.regions[i].size) {
- limit -= memblock.memory.regions[i].size;
- continue;
- }
-
- memblock.memory.regions[i].size = limit;
- memblock.memory.cnt = i + 1;
- break;
- }
-
- memory_limit = memblock_end_of_DRAM();
+ struct memblock_region *r = &memblock.memory.regions[i];
- /* And truncate any reserves above the limit also. */
- for (i = 0; i < memblock.reserved.cnt; i++) {
- p = &memblock.reserved.regions[i];
-
- if (p->base > memory_limit)
- p->size = 0;
- else if ((p->base + p->size) > memory_limit)
- p->size = memory_limit - p->base;
-
- if (p->size == 0) {
- memblock_remove_region(&memblock.reserved, i);
- i--;
+ if (limit <= r->size) {
+ max_addr = r->base + limit;
+ break;
}
+ limit -= r->size;
}
+
+ /* truncate both memory and reserved regions */
+ __memblock_remove(&memblock.memory, max_addr, (phys_addr_t)ULLONG_MAX);
+ __memblock_remove(&memblock.reserved, max_addr, (phys_addr_t)ULLONG_MAX);
}
static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr)
@@ -712,16 +850,18 @@ int __init_memblock memblock_is_memory(phys_addr_t addr)
int __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size)
{
int idx = memblock_search(&memblock.memory, base);
+ phys_addr_t end = base + memblock_cap_size(base, &size);
if (idx == -1)
return 0;
return memblock.memory.regions[idx].base <= base &&
(memblock.memory.regions[idx].base +
- memblock.memory.regions[idx].size) >= (base + size);
+ memblock.memory.regions[idx].size) >= end;
}
int __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size)
{
+ memblock_cap_size(base, &size);
return memblock_overlaps_region(&memblock.reserved, base, size) >= 0;
}
@@ -731,86 +871,45 @@ void __init_memblock memblock_set_current_limit(phys_addr_t limit)
memblock.current_limit = limit;
}
-static void __init_memblock memblock_dump(struct memblock_type *region, char *name)
+static void __init_memblock memblock_dump(struct memblock_type *type, char *name)
{
unsigned long long base, size;
int i;
- pr_info(" %s.cnt = 0x%lx\n", name, region->cnt);
+ pr_info(" %s.cnt = 0x%lx\n", name, type->cnt);
- for (i = 0; i < region->cnt; i++) {
- base = region->regions[i].base;
- size = region->regions[i].size;
-
- pr_info(" %s[%#x]\t[%#016llx-%#016llx], %#llx bytes\n",
- name, i, base, base + size - 1, size);
+ for (i = 0; i < type->cnt; i++) {
+ struct memblock_region *rgn = &type->regions[i];
+ char nid_buf[32] = "";
+
+ base = rgn->base;
+ size = rgn->size;
+#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
+ if (memblock_get_region_node(rgn) != MAX_NUMNODES)
+ snprintf(nid_buf, sizeof(nid_buf), " on node %d",
+ memblock_get_region_node(rgn));
+#endif
+ pr_info(" %s[%#x]\t[%#016llx-%#016llx], %#llx bytes%s\n",
+ name, i, base, base + size - 1, size, nid_buf);
}
}
-void __init_memblock memblock_dump_all(void)
+void __init_memblock __memblock_dump_all(void)
{
- if (!memblock_debug)
- return;
-
pr_info("MEMBLOCK configuration:\n");
- pr_info(" memory size = 0x%llx\n", (unsigned long long)memblock.memory_size);
+ pr_info(" memory size = %#llx reserved size = %#llx\n",
+ (unsigned long long)memblock.memory.total_size,
+ (unsigned long long)memblock.reserved.total_size);
memblock_dump(&memblock.memory, "memory");
memblock_dump(&memblock.reserved, "reserved");
}
-void __init memblock_analyze(void)
+void __init memblock_allow_resize(void)
{
- int i;
-
- /* Check marker in the unused last array entry */
- WARN_ON(memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS].base
- != MEMBLOCK_INACTIVE);
- WARN_ON(memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS].base
- != MEMBLOCK_INACTIVE);
-
- memblock.memory_size = 0;
-
- for (i = 0; i < memblock.memory.cnt; i++)
- memblock.memory_size += memblock.memory.regions[i].size;
-
- /* We allow resizing from there */
memblock_can_resize = 1;
}
-void __init memblock_init(void)
-{
- static int init_done __initdata = 0;
-
- if (init_done)
- return;
- init_done = 1;
-
- /* Hookup the initial arrays */
- memblock.memory.regions = memblock_memory_init_regions;
- memblock.memory.max = INIT_MEMBLOCK_REGIONS;
- memblock.reserved.regions = memblock_reserved_init_regions;
- memblock.reserved.max = INIT_MEMBLOCK_REGIONS;
-
- /* Write a marker in the unused last array entry */
- memblock.memory.regions[INIT_MEMBLOCK_REGIONS].base = MEMBLOCK_INACTIVE;
- memblock.reserved.regions[INIT_MEMBLOCK_REGIONS].base = MEMBLOCK_INACTIVE;
-
- /* Create a dummy zero size MEMBLOCK which will get coalesced away later.
- * This simplifies the memblock_add() code below...
- */
- memblock.memory.regions[0].base = 0;
- memblock.memory.regions[0].size = 0;
- memblock.memory.cnt = 1;
-
- /* Ditto. */
- memblock.reserved.regions[0].base = 0;
- memblock.reserved.regions[0].size = 0;
- memblock.reserved.cnt = 1;
-
- memblock.current_limit = MEMBLOCK_ALLOC_ANYWHERE;
-}
-
static int __init early_memblock(char *p)
{
if (p && strstr(p, "debug"))
@@ -819,7 +918,7 @@ static int __init early_memblock(char *p)
}
early_param("memblock", early_memblock);
-#if defined(CONFIG_DEBUG_FS) && !defined(ARCH_DISCARD_MEMBLOCK)
+#if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_ARCH_DISCARD_MEMBLOCK)
static int memblock_debug_show(struct seq_file *m, void *private)
{
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 6aff93c98aca..b63f5f7dfa07 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -4907,9 +4907,9 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
int cpu;
enable_swap_cgroup();
parent = NULL;
- root_mem_cgroup = memcg;
if (mem_cgroup_soft_limit_tree_init())
goto free_out;
+ root_mem_cgroup = memcg;
for_each_possible_cpu(cpu) {
struct memcg_stock_pcp *stock =
&per_cpu(memcg_stock, cpu);
@@ -4948,7 +4948,6 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
return &memcg->css;
free_out:
__mem_cgroup_free(memcg);
- root_mem_cgroup = NULL;
return ERR_PTR(error);
}
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index adc395481813..c3fdbcb17658 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -636,6 +636,7 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
struct vm_area_struct *prev;
struct vm_area_struct *vma;
int err = 0;
+ pgoff_t pgoff;
unsigned long vmstart;
unsigned long vmend;
@@ -643,13 +644,21 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
if (!vma || vma->vm_start > start)
return -EFAULT;
+ if (start > vma->vm_start)
+ prev = vma;
+
for (; vma && vma->vm_start < end; prev = vma, vma = next) {
next = vma->vm_next;
vmstart = max(start, vma->vm_start);
vmend = min(end, vma->vm_end);
+ if (mpol_equal(vma_policy(vma), new_pol))
+ continue;
+
+ pgoff = vma->vm_pgoff +
+ ((vmstart - vma->vm_start) >> PAGE_SHIFT);
prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
- vma->anon_vma, vma->vm_file, vma->vm_pgoff,
+ vma->anon_vma, vma->vm_file, pgoff,
new_pol);
if (prev) {
vma = prev;
diff --git a/mm/nobootmem.c b/mm/nobootmem.c
index 7fa41b4a07bf..24f0fc1a56d6 100644
--- a/mm/nobootmem.c
+++ b/mm/nobootmem.c
@@ -41,14 +41,13 @@ static void * __init __alloc_memory_core_early(int nid, u64 size, u64 align,
if (limit > memblock.current_limit)
limit = memblock.current_limit;
- addr = find_memory_core_early(nid, size, align, goal, limit);
-
- if (addr == MEMBLOCK_ERROR)
+ addr = memblock_find_in_range_node(goal, limit, size, align, nid);
+ if (!addr)
return NULL;
ptr = phys_to_virt(addr);
memset(ptr, 0, size);
- memblock_x86_reserve_range(addr, addr + size, "BOOTMEM");
+ memblock_reserve(addr, size);
/*
* The min_count is set to 0 so that bootmem allocated blocks
* are never reported as leaks.
@@ -107,23 +106,27 @@ static void __init __free_pages_memory(unsigned long start, unsigned long end)
__free_pages_bootmem(pfn_to_page(i), 0);
}
-unsigned long __init free_all_memory_core_early(int nodeid)
+unsigned long __init free_low_memory_core_early(int nodeid)
{
- int i;
- u64 start, end;
unsigned long count = 0;
- struct range *range = NULL;
- int nr_range;
-
- nr_range = get_free_all_memory_range(&range, nodeid);
-
- for (i = 0; i < nr_range; i++) {
- start = range[i].start;
- end = range[i].end;
- count += end - start;
- __free_pages_memory(start, end);
+ phys_addr_t start, end;
+ u64 i;
+
+ /* free reserved array temporarily so that it's treated as free area */
+ memblock_free_reserved_regions();
+
+ for_each_free_mem_range(i, MAX_NUMNODES, &start, &end, NULL) {
+ unsigned long start_pfn = PFN_UP(start);
+ unsigned long end_pfn = min_t(unsigned long,
+ PFN_DOWN(end), max_low_pfn);
+ if (start_pfn < end_pfn) {
+ __free_pages_memory(start_pfn, end_pfn);
+ count += end_pfn - start_pfn;
+ }
}
+ /* put region array back? */
+ memblock_reserve_reserved_regions();
return count;
}
@@ -137,7 +140,7 @@ unsigned long __init free_all_bootmem_node(pg_data_t *pgdat)
{
register_page_bootmem_info_node(pgdat);
- /* free_all_memory_core_early(MAX_NUMNODES) will be called later */
+ /* free_low_memory_core_early(MAX_NUMNODES) will be called later */
return 0;
}
@@ -155,7 +158,7 @@ unsigned long __init free_all_bootmem(void)
* Use MAX_NUMNODES will make sure all ranges in early_node_map[]
* will be used instead of only Node0 related
*/
- return free_all_memory_core_early(MAX_NUMNODES);
+ return free_low_memory_core_early(MAX_NUMNODES);
}
/**
@@ -172,7 +175,7 @@ void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
unsigned long size)
{
kmemleak_free_part(__va(physaddr), size);
- memblock_x86_free_range(physaddr, physaddr + size);
+ memblock_free(physaddr, size);
}
/**
@@ -187,7 +190,7 @@ void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
void __init free_bootmem(unsigned long addr, unsigned long size)
{
kmemleak_free_part(__va(addr), size);
- memblock_x86_free_range(addr, addr + size);
+ memblock_free(addr, size);
}
static void * __init ___alloc_bootmem_nopanic(unsigned long size,
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 76f2c5ae908e..069b64e521fc 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -176,7 +176,7 @@ static bool oom_unkillable_task(struct task_struct *p,
unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *mem,
const nodemask_t *nodemask, unsigned long totalpages)
{
- int points;
+ long points;
if (oom_unkillable_task(p, mem, nodemask))
return 0;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 2b8ba3aebf6e..bdc804c2d99c 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -181,39 +181,17 @@ static unsigned long __meminitdata nr_kernel_pages;
static unsigned long __meminitdata nr_all_pages;
static unsigned long __meminitdata dma_reserve;
-#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
- /*
- * MAX_ACTIVE_REGIONS determines the maximum number of distinct
- * ranges of memory (RAM) that may be registered with add_active_range().
- * Ranges passed to add_active_range() will be merged if possible
- * so the number of times add_active_range() can be called is
- * related to the number of nodes and the number of holes
- */
- #ifdef CONFIG_MAX_ACTIVE_REGIONS
- /* Allow an architecture to set MAX_ACTIVE_REGIONS to save memory */
- #define MAX_ACTIVE_REGIONS CONFIG_MAX_ACTIVE_REGIONS
- #else
- #if MAX_NUMNODES >= 32
- /* If there can be many nodes, allow up to 50 holes per node */
- #define MAX_ACTIVE_REGIONS (MAX_NUMNODES*50)
- #else
- /* By default, allow up to 256 distinct regions */
- #define MAX_ACTIVE_REGIONS 256
- #endif
- #endif
-
- static struct node_active_region __meminitdata early_node_map[MAX_ACTIVE_REGIONS];
- static int __meminitdata nr_nodemap_entries;
- static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
- static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
- static unsigned long __initdata required_kernelcore;
- static unsigned long __initdata required_movablecore;
- static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
-
- /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
- int movable_zone;
- EXPORT_SYMBOL(movable_zone);
-#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
+#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
+static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
+static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
+static unsigned long __initdata required_kernelcore;
+static unsigned long __initdata required_movablecore;
+static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
+
+/* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
+int movable_zone;
+EXPORT_SYMBOL(movable_zone);
+#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
#if MAX_NUMNODES > 1
int nr_node_ids __read_mostly = MAX_NUMNODES;
@@ -706,10 +684,10 @@ void __meminit __free_pages_bootmem(struct page *page, unsigned int order)
int loop;
prefetchw(page);
- for (loop = 0; loop < BITS_PER_LONG; loop++) {
+ for (loop = 0; loop < (1 << order); loop++) {
struct page *p = &page[loop];
- if (loop + 1 < BITS_PER_LONG)
+ if (loop + 1 < (1 << order))
prefetchw(p + 1);
__ClearPageReserved(p);
set_page_count(p, 0);
@@ -3737,35 +3715,7 @@ __meminit int init_currently_empty_zone(struct zone *zone,
return 0;
}
-#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
-/*
- * Basic iterator support. Return the first range of PFNs for a node
- * Note: nid == MAX_NUMNODES returns first region regardless of node
- */
-static int __meminit first_active_region_index_in_nid(int nid)
-{
- int i;
-
- for (i = 0; i < nr_nodemap_entries; i++)
- if (nid == MAX_NUMNODES || early_node_map[i].nid == nid)
- return i;
-
- return -1;
-}
-
-/*
- * Basic iterator support. Return the next active range of PFNs for a node
- * Note: nid == MAX_NUMNODES returns next region regardless of node
- */
-static int __meminit next_active_region_index_in_nid(int index, int nid)
-{
- for (index = index + 1; index < nr_nodemap_entries; index++)
- if (nid == MAX_NUMNODES || early_node_map[index].nid == nid)
- return index;
-
- return -1;
-}
-
+#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
#ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
/*
* Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
@@ -3775,15 +3725,12 @@ static int __meminit next_active_region_index_in_nid(int index, int nid)
*/
int __meminit __early_pfn_to_nid(unsigned long pfn)
{
- int i;
-
- for (i = 0; i < nr_nodemap_entries; i++) {
- unsigned long start_pfn = early_node_map[i].start_pfn;
- unsigned long end_pfn = early_node_map[i].end_pfn;
+ unsigned long start_pfn, end_pfn;
+ int i, nid;
+ for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid)
if (start_pfn <= pfn && pfn < end_pfn)
- return early_node_map[i].nid;
- }
+ return nid;
/* This is a memory hole */
return -1;
}
@@ -3812,11 +3759,6 @@ bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
}
#endif
-/* Basic iterator support to walk early_node_map[] */
-#define for_each_active_range_index_in_nid(i, nid) \
- for (i = first_active_region_index_in_nid(nid); i != -1; \
- i = next_active_region_index_in_nid(i, nid))
-
/**
* free_bootmem_with_active_regions - Call free_bootmem_node for each active range
* @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed.
@@ -3826,122 +3768,34 @@ bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
* add_active_ranges() contain no holes and may be freed, this
* this function may be used instead of calling free_bootmem() manually.
*/
-void __init free_bootmem_with_active_regions(int nid,
- unsigned long max_low_pfn)
-{
- int i;
-
- for_each_active_range_index_in_nid(i, nid) {
- unsigned long size_pages = 0;
- unsigned long end_pfn = early_node_map[i].end_pfn;
-
- if (early_node_map[i].start_pfn >= max_low_pfn)
- continue;
-
- if (end_pfn > max_low_pfn)
- end_pfn = max_low_pfn;
-
- size_pages = end_pfn - early_node_map[i].start_pfn;
- free_bootmem_node(NODE_DATA(early_node_map[i].nid),
- PFN_PHYS(early_node_map[i].start_pfn),
- size_pages << PAGE_SHIFT);
- }
-}
-
-#ifdef CONFIG_HAVE_MEMBLOCK
-/*
- * Basic iterator support. Return the last range of PFNs for a node
- * Note: nid == MAX_NUMNODES returns last region regardless of node
- */
-static int __meminit last_active_region_index_in_nid(int nid)
+void __init free_bootmem_with_active_regions(int nid, unsigned long max_low_pfn)
{
- int i;
-
- for (i = nr_nodemap_entries - 1; i >= 0; i--)
- if (nid == MAX_NUMNODES || early_node_map[i].nid == nid)
- return i;
-
- return -1;
-}
-
-/*
- * Basic iterator support. Return the previous active range of PFNs for a node
- * Note: nid == MAX_NUMNODES returns next region regardless of node
- */
-static int __meminit previous_active_region_index_in_nid(int index, int nid)
-{
- for (index = index - 1; index >= 0; index--)
- if (nid == MAX_NUMNODES || early_node_map[index].nid == nid)
- return index;
-
- return -1;
-}
-
-#define for_each_active_range_index_in_nid_reverse(i, nid) \
- for (i = last_active_region_index_in_nid(nid); i != -1; \
- i = previous_active_region_index_in_nid(i, nid))
-
-u64 __init find_memory_core_early(int nid, u64 size, u64 align,
- u64 goal, u64 limit)
-{
- int i;
-
- /* Need to go over early_node_map to find out good range for node */
- for_each_active_range_index_in_nid_reverse(i, nid) {
- u64 addr;
- u64 ei_start, ei_last;
- u64 final_start, final_end;
-
- ei_last = early_node_map[i].end_pfn;
- ei_last <<= PAGE_SHIFT;
- ei_start = early_node_map[i].start_pfn;
- ei_start <<= PAGE_SHIFT;
-
- final_start = max(ei_start, goal);
- final_end = min(ei_last, limit);
-
- if (final_start >= final_end)
- continue;
-
- addr = memblock_find_in_range(final_start, final_end, size, align);
+ unsigned long start_pfn, end_pfn;
+ int i, this_nid;
- if (addr == MEMBLOCK_ERROR)
- continue;
+ for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid) {
+ start_pfn = min(start_pfn, max_low_pfn);
+ end_pfn = min(end_pfn, max_low_pfn);
- return addr;
+ if (start_pfn < end_pfn)
+ free_bootmem_node(NODE_DATA(this_nid),
+ PFN_PHYS(start_pfn),
+ (end_pfn - start_pfn) << PAGE_SHIFT);
}
-
- return MEMBLOCK_ERROR;
}
-#endif
int __init add_from_early_node_map(struct range *range, int az,
int nr_range, int nid)
{
+ unsigned long start_pfn, end_pfn;
int i;
- u64 start, end;
/* need to go over early_node_map to find out good range for node */
- for_each_active_range_index_in_nid(i, nid) {
- start = early_node_map[i].start_pfn;
- end = early_node_map[i].end_pfn;
- nr_range = add_range(range, az, nr_range, start, end);
- }
+ for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL)
+ nr_range = add_range(range, az, nr_range, start_pfn, end_pfn);
return nr_range;
}
-void __init work_with_active_regions(int nid, work_fn_t work_fn, void *data)
-{
- int i;
- int ret;
-
- for_each_active_range_index_in_nid(i, nid) {
- ret = work_fn(early_node_map[i].start_pfn,
- early_node_map[i].end_pfn, data);
- if (ret)
- break;
- }
-}
/**
* sparse_memory_present_with_active_regions - Call memory_present for each active range
* @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used.
@@ -3952,12 +3806,11 @@ void __init work_with_active_regions(int nid, work_fn_t work_fn, void *data)
*/
void __init sparse_memory_present_with_active_regions(int nid)
{
- int i;
+ unsigned long start_pfn, end_pfn;
+ int i, this_nid;
- for_each_active_range_index_in_nid(i, nid)
- memory_present(early_node_map[i].nid,
- early_node_map[i].start_pfn,
- early_node_map[i].end_pfn);
+ for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid)
+ memory_present(this_nid, start_pfn, end_pfn);
}
/**
@@ -3974,13 +3827,15 @@ void __init sparse_memory_present_with_active_regions(int nid)
void __meminit get_pfn_range_for_nid(unsigned int nid,
unsigned long *start_pfn, unsigned long *end_pfn)
{
+ unsigned long this_start_pfn, this_end_pfn;
int i;
+
*start_pfn = -1UL;
*end_pfn = 0;
- for_each_active_range_index_in_nid(i, nid) {
- *start_pfn = min(*start_pfn, early_node_map[i].start_pfn);
- *end_pfn = max(*end_pfn, early_node_map[i].end_pfn);
+ for_each_mem_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) {
+ *start_pfn = min(*start_pfn, this_start_pfn);
+ *end_pfn = max(*end_pfn, this_end_pfn);
}
if (*start_pfn == -1UL)
@@ -4083,46 +3938,16 @@ unsigned long __meminit __absent_pages_in_range(int nid,
unsigned long range_start_pfn,
unsigned long range_end_pfn)
{
- int i = 0;
- unsigned long prev_end_pfn = 0, hole_pages = 0;
- unsigned long start_pfn;
-
- /* Find the end_pfn of the first active range of pfns in the node */
- i = first_active_region_index_in_nid(nid);
- if (i == -1)
- return 0;
-
- prev_end_pfn = min(early_node_map[i].start_pfn, range_end_pfn);
-
- /* Account for ranges before physical memory on this node */
- if (early_node_map[i].start_pfn > range_start_pfn)
- hole_pages = prev_end_pfn - range_start_pfn;
-
- /* Find all holes for the zone within the node */
- for (; i != -1; i = next_active_region_index_in_nid(i, nid)) {
-
- /* No need to continue if prev_end_pfn is outside the zone */
- if (prev_end_pfn >= range_end_pfn)
- break;
-
- /* Make sure the end of the zone is not within the hole */
- start_pfn = min(early_node_map[i].start_pfn, range_end_pfn);
- prev_end_pfn = max(prev_end_pfn, range_start_pfn);
+ unsigned long nr_absent = range_end_pfn - range_start_pfn;
+ unsigned long start_pfn, end_pfn;
+ int i;
- /* Update the hole size cound and move on */
- if (start_pfn > range_start_pfn) {
- BUG_ON(prev_end_pfn > start_pfn);
- hole_pages += start_pfn - prev_end_pfn;
- }
- prev_end_pfn = early_node_map[i].end_pfn;
+ for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
+ start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn);
+ end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn);
+ nr_absent -= end_pfn - start_pfn;
}
-
- /* Account for ranges past physical memory on this node */
- if (range_end_pfn > prev_end_pfn)
- hole_pages += range_end_pfn -
- max(range_start_pfn, prev_end_pfn);
-
- return hole_pages;
+ return nr_absent;
}
/**
@@ -4143,14 +3968,14 @@ static unsigned long __meminit zone_absent_pages_in_node(int nid,
unsigned long zone_type,
unsigned long *ignored)
{
+ unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type];
+ unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
unsigned long node_start_pfn, node_end_pfn;
unsigned long zone_start_pfn, zone_end_pfn;
get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
- zone_start_pfn = max(arch_zone_lowest_possible_pfn[zone_type],
- node_start_pfn);
- zone_end_pfn = min(arch_zone_highest_possible_pfn[zone_type],
- node_end_pfn);
+ zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
+ zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
adjust_zone_range_for_zone_movable(nid, zone_type,
node_start_pfn, node_end_pfn,
@@ -4158,7 +3983,7 @@ static unsigned long __meminit zone_absent_pages_in_node(int nid,
return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
}
-#else
+#else /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
static inline unsigned long __meminit zone_spanned_pages_in_node(int nid,
unsigned long zone_type,
unsigned long *zones_size)
@@ -4176,7 +4001,7 @@ static inline unsigned long __meminit zone_absent_pages_in_node(int nid,
return zholes_size[zone_type];
}
-#endif
+#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
unsigned long *zones_size, unsigned long *zholes_size)
@@ -4399,10 +4224,10 @@ static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)
*/
if (pgdat == NODE_DATA(0)) {
mem_map = NODE_DATA(0)->node_mem_map;
-#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
+#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
mem_map -= (pgdat->node_start_pfn - ARCH_PFN_OFFSET);
-#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
+#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
}
#endif
#endif /* CONFIG_FLAT_NODE_MEM_MAP */
@@ -4427,7 +4252,7 @@ void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
free_area_init_core(pgdat, zones_size, zholes_size);
}
-#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
+#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
#if MAX_NUMNODES > 1
/*
@@ -4449,170 +4274,6 @@ static inline void setup_nr_node_ids(void)
#endif
/**
- * add_active_range - Register a range of PFNs backed by physical memory
- * @nid: The node ID the range resides on
- * @start_pfn: The start PFN of the available physical memory
- * @end_pfn: The end PFN of the available physical memory
- *
- * These ranges are stored in an early_node_map[] and later used by
- * free_area_init_nodes() to calculate zone sizes and holes. If the
- * range spans a memory hole, it is up to the architecture to ensure
- * the memory is not freed by the bootmem allocator. If possible
- * the range being registered will be merged with existing ranges.
- */
-void __init add_active_range(unsigned int nid, unsigned long start_pfn,
- unsigned long end_pfn)
-{
- int i;
-
- mminit_dprintk(MMINIT_TRACE, "memory_register",
- "Entering add_active_range(%d, %#lx, %#lx) "
- "%d entries of %d used\n",
- nid, start_pfn, end_pfn,
- nr_nodemap_entries, MAX_ACTIVE_REGIONS);
-
- mminit_validate_memmodel_limits(&start_pfn, &end_pfn);
-
- /* Merge with existing active regions if possible */
- for (i = 0; i < nr_nodemap_entries; i++) {
- if (early_node_map[i].nid != nid)
- continue;
-
- /* Skip if an existing region covers this new one */
- if (start_pfn >= early_node_map[i].start_pfn &&
- end_pfn <= early_node_map[i].end_pfn)
- return;
-
- /* Merge forward if suitable */
- if (start_pfn <= early_node_map[i].end_pfn &&
- end_pfn > early_node_map[i].end_pfn) {
- early_node_map[i].end_pfn = end_pfn;
- return;
- }
-
- /* Merge backward if suitable */
- if (start_pfn < early_node_map[i].start_pfn &&
- end_pfn >= early_node_map[i].start_pfn) {
- early_node_map[i].start_pfn = start_pfn;
- return;
- }
- }
-
- /* Check that early_node_map is large enough */
- if (i >= MAX_ACTIVE_REGIONS) {
- printk(KERN_CRIT "More than %d memory regions, truncating\n",
- MAX_ACTIVE_REGIONS);
- return;
- }
-
- early_node_map[i].nid = nid;
- early_node_map[i].start_pfn = start_pfn;
- early_node_map[i].end_pfn = end_pfn;
- nr_nodemap_entries = i + 1;
-}
-
-/**
- * remove_active_range - Shrink an existing registered range of PFNs
- * @nid: The node id the range is on that should be shrunk
- * @start_pfn: The new PFN of the range
- * @end_pfn: The new PFN of the range
- *
- * i386 with NUMA use alloc_remap() to store a node_mem_map on a local node.
- * The map is kept near the end physical page range that has already been
- * registered. This function allows an arch to shrink an existing registered
- * range.
- */
-void __init remove_active_range(unsigned int nid, unsigned long start_pfn,
- unsigned long end_pfn)
-{
- int i, j;
- int removed = 0;
-
- printk(KERN_DEBUG "remove_active_range (%d, %lu, %lu)\n",
- nid, start_pfn, end_pfn);
-
- /* Find the old active region end and shrink */
- for_each_active_range_index_in_nid(i, nid) {
- if (early_node_map[i].start_pfn >= start_pfn &&
- early_node_map[i].end_pfn <= end_pfn) {
- /* clear it */
- early_node_map[i].start_pfn = 0;
- early_node_map[i].end_pfn = 0;
- removed = 1;
- continue;
- }
- if (early_node_map[i].start_pfn < start_pfn &&
- early_node_map[i].end_pfn > start_pfn) {
- unsigned long temp_end_pfn = early_node_map[i].end_pfn;
- early_node_map[i].end_pfn = start_pfn;
- if (temp_end_pfn > end_pfn)
- add_active_range(nid, end_pfn, temp_end_pfn);
- continue;
- }
- if (early_node_map[i].start_pfn >= start_pfn &&
- early_node_map[i].end_pfn > end_pfn &&
- early_node_map[i].start_pfn < end_pfn) {
- early_node_map[i].start_pfn = end_pfn;
- continue;
- }
- }
-
- if (!removed)
- return;
-
- /* remove the blank ones */
- for (i = nr_nodemap_entries - 1; i > 0; i--) {
- if (early_node_map[i].nid != nid)
- continue;
- if (early_node_map[i].end_pfn)
- continue;
- /* we found it, get rid of it */
- for (j = i; j < nr_nodemap_entries - 1; j++)
- memcpy(&early_node_map[j], &early_node_map[j+1],
- sizeof(early_node_map[j]));
- j = nr_nodemap_entries - 1;
- memset(&early_node_map[j], 0, sizeof(early_node_map[j]));
- nr_nodemap_entries--;
- }
-}
-
-/**
- * remove_all_active_ranges - Remove all currently registered regions
- *
- * During discovery, it may be found that a table like SRAT is invalid
- * and an alternative discovery method must be used. This function removes
- * all currently registered regions.
- */
-void __init remove_all_active_ranges(void)
-{
- memset(early_node_map, 0, sizeof(early_node_map));
- nr_nodemap_entries = 0;
-}
-
-/* Compare two active node_active_regions */
-static int __init cmp_node_active_region(const void *a, const void *b)
-{
- struct node_active_region *arange = (struct node_active_region *)a;
- struct node_active_region *brange = (struct node_active_region *)b;
-
- /* Done this way to avoid overflows */
- if (arange->start_pfn > brange->start_pfn)
- return 1;
- if (arange->start_pfn < brange->start_pfn)
- return -1;
-
- return 0;
-}
-
-/* sort the node_map by start_pfn */
-void __init sort_node_map(void)
-{
- sort(early_node_map, (size_t)nr_nodemap_entries,
- sizeof(struct node_active_region),
- cmp_node_active_region, NULL);
-}
-
-/**
* node_map_pfn_alignment - determine the maximum internode alignment
*
* This function should be called after node map is populated and sorted.
@@ -4634,15 +4295,11 @@ void __init sort_node_map(void)
unsigned long __init node_map_pfn_alignment(void)
{
unsigned long accl_mask = 0, last_end = 0;
+ unsigned long start, end, mask;
int last_nid = -1;
- int i;
-
- for_each_active_range_index_in_nid(i, MAX_NUMNODES) {
- int nid = early_node_map[i].nid;
- unsigned long start = early_node_map[i].start_pfn;
- unsigned long end = early_node_map[i].end_pfn;
- unsigned long mask;
+ int i, nid;
+ for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) {
if (!start || last_nid < 0 || last_nid == nid) {
last_nid = nid;
last_end = end;
@@ -4669,12 +4326,12 @@ unsigned long __init node_map_pfn_alignment(void)
/* Find the lowest pfn for a node */
static unsigned long __init find_min_pfn_for_node(int nid)
{
- int i;
unsigned long min_pfn = ULONG_MAX;
+ unsigned long start_pfn;
+ int i;
- /* Assuming a sorted map, the first range found has the starting pfn */
- for_each_active_range_index_in_nid(i, nid)
- min_pfn = min(min_pfn, early_node_map[i].start_pfn);
+ for_each_mem_pfn_range(i, nid, &start_pfn, NULL, NULL)
+ min_pfn = min(min_pfn, start_pfn);
if (min_pfn == ULONG_MAX) {
printk(KERN_WARNING
@@ -4703,15 +4360,16 @@ unsigned long __init find_min_pfn_with_active_regions(void)
*/
static unsigned long __init early_calculate_totalpages(void)
{
- int i;
unsigned long totalpages = 0;
+ unsigned long start_pfn, end_pfn;
+ int i, nid;
+
+ for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
+ unsigned long pages = end_pfn - start_pfn;
- for (i = 0; i < nr_nodemap_entries; i++) {
- unsigned long pages = early_node_map[i].end_pfn -
- early_node_map[i].start_pfn;
totalpages += pages;
if (pages)
- node_set_state(early_node_map[i].nid, N_HIGH_MEMORY);
+ node_set_state(nid, N_HIGH_MEMORY);
}
return totalpages;
}
@@ -4766,6 +4424,8 @@ restart:
/* Spread kernelcore memory as evenly as possible throughout nodes */
kernelcore_node = required_kernelcore / usable_nodes;
for_each_node_state(nid, N_HIGH_MEMORY) {
+ unsigned long start_pfn, end_pfn;
+
/*
* Recalculate kernelcore_node if the division per node
* now exceeds what is necessary to satisfy the requested
@@ -4782,13 +4442,10 @@ restart:
kernelcore_remaining = kernelcore_node;
/* Go through each range of PFNs within this node */
- for_each_active_range_index_in_nid(i, nid) {
- unsigned long start_pfn, end_pfn;
+ for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
unsigned long size_pages;
- start_pfn = max(early_node_map[i].start_pfn,
- zone_movable_pfn[nid]);
- end_pfn = early_node_map[i].end_pfn;
+ start_pfn = max(start_pfn, zone_movable_pfn[nid]);
if (start_pfn >= end_pfn)
continue;
@@ -4890,11 +4547,8 @@ static void check_for_regular_memory(pg_data_t *pgdat)
*/
void __init free_area_init_nodes(unsigned long *max_zone_pfn)
{
- unsigned long nid;
- int i;
-
- /* Sort early_node_map as initialisation assumes it is sorted */
- sort_node_map();
+ unsigned long start_pfn, end_pfn;
+ int i, nid;
/* Record where the zone boundaries are */
memset(arch_zone_lowest_possible_pfn, 0,
@@ -4941,11 +4595,9 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)
}
/* Print out the early_node_map[] */
- printk("early_node_map[%d] active PFN ranges\n", nr_nodemap_entries);
- for (i = 0; i < nr_nodemap_entries; i++)
- printk(" %3d: %0#10lx -> %0#10lx\n", early_node_map[i].nid,
- early_node_map[i].start_pfn,
- early_node_map[i].end_pfn);
+ printk("Early memory PFN ranges\n");
+ for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid)
+ printk(" %3d: %0#10lx -> %0#10lx\n", nid, start_pfn, end_pfn);
/* Initialise every node */
mminit_verify_pageflags_layout();
@@ -4998,7 +4650,7 @@ static int __init cmdline_parse_movablecore(char *p)
early_param("kernelcore", cmdline_parse_kernelcore);
early_param("movablecore", cmdline_parse_movablecore);
-#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
+#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
/**
* set_dma_reserve - set the specified number of pages reserved in the first zone
diff --git a/mm/percpu.c b/mm/percpu.c
index 3bb810a72006..716eb4acf2fc 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -1023,9 +1023,11 @@ phys_addr_t per_cpu_ptr_to_phys(void *addr)
if (!is_vmalloc_addr(addr))
return __pa(addr);
else
- return page_to_phys(vmalloc_to_page(addr));
+ return page_to_phys(vmalloc_to_page(addr)) +
+ offset_in_page(addr);
} else
- return page_to_phys(pcpu_addr_to_page(addr));
+ return page_to_phys(pcpu_addr_to_page(addr)) +
+ offset_in_page(addr);
}
/**
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 1d8b32f07139..27be2f0d4cb7 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -1290,7 +1290,7 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
unsigned long align, unsigned long flags, unsigned long start,
unsigned long end, int node, gfp_t gfp_mask, void *caller)
{
- static struct vmap_area *va;
+ struct vmap_area *va;
struct vm_struct *area;
BUG_ON(in_interrupt());
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index c7aafc7c5ed4..5f09a578d49d 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -245,9 +245,11 @@ void tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
if (tt_global_entry) {
/* This node is probably going to update its tt table */
tt_global_entry->orig_node->tt_poss_change = true;
- /* The global entry has to be marked as PENDING and has to be
+ /* The global entry has to be marked as ROAMING and has to be
* kept for consistency purpose */
- tt_global_entry->flags |= TT_CLIENT_PENDING;
+ tt_global_entry->flags |= TT_CLIENT_ROAM;
+ tt_global_entry->roam_at = jiffies;
+
send_roam_adv(bat_priv, tt_global_entry->addr,
tt_global_entry->orig_node);
}
@@ -694,6 +696,7 @@ void tt_global_del(struct bat_priv *bat_priv,
const char *message, bool roaming)
{
struct tt_global_entry *tt_global_entry = NULL;
+ struct tt_local_entry *tt_local_entry = NULL;
tt_global_entry = tt_global_hash_find(bat_priv, addr);
if (!tt_global_entry)
@@ -701,15 +704,29 @@ void tt_global_del(struct bat_priv *bat_priv,
if (tt_global_entry->orig_node == orig_node) {
if (roaming) {
- tt_global_entry->flags |= TT_CLIENT_ROAM;
- tt_global_entry->roam_at = jiffies;
- goto out;
+ /* if we are deleting a global entry due to a roam
+ * event, there are two possibilities:
+ * 1) the client roamed from node A to node B => we mark
+ * it with TT_CLIENT_ROAM, we start a timer and we
+ * wait for node B to claim it. In case of timeout
+ * the entry is purged.
+ * 2) the client roamed to us => we can directly delete
+ * the global entry, since it is useless now. */
+ tt_local_entry = tt_local_hash_find(bat_priv,
+ tt_global_entry->addr);
+ if (!tt_local_entry) {
+ tt_global_entry->flags |= TT_CLIENT_ROAM;
+ tt_global_entry->roam_at = jiffies;
+ goto out;
+ }
}
_tt_global_del(bat_priv, tt_global_entry, message);
}
out:
if (tt_global_entry)
tt_global_entry_free_ref(tt_global_entry);
+ if (tt_local_entry)
+ tt_local_entry_free_ref(tt_local_entry);
}
void tt_global_del_orig(struct bat_priv *bat_priv,
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index 91bcd3a961ec..1eea8208b2cc 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -79,17 +79,12 @@ static struct bnep_session *__bnep_get_session(u8 *dst)
static void __bnep_link_session(struct bnep_session *s)
{
- /* It's safe to call __module_get() here because sessions are added
- by the socket layer which has to hold the reference to this module.
- */
- __module_get(THIS_MODULE);
list_add(&s->list, &bnep_session_list);
}
static void __bnep_unlink_session(struct bnep_session *s)
{
list_del(&s->list);
- module_put(THIS_MODULE);
}
static int bnep_send(struct bnep_session *s, void *data, size_t len)
@@ -530,6 +525,7 @@ static int bnep_session(void *arg)
up_write(&bnep_session_sem);
free_netdev(dev);
+ module_put_and_exit(0);
return 0;
}
@@ -616,9 +612,11 @@ int bnep_add_connection(struct bnep_connadd_req *req, struct socket *sock)
__bnep_link_session(s);
+ __module_get(THIS_MODULE);
s->task = kthread_run(bnep_session, s, "kbnepd %s", dev->name);
if (IS_ERR(s->task)) {
/* Session thread start failed, gotta cleanup. */
+ module_put(THIS_MODULE);
unregister_netdev(dev);
__bnep_unlink_session(s);
err = PTR_ERR(s->task);
diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c
index 7d00ddf9e9dc..5a6e634f7fca 100644
--- a/net/bluetooth/cmtp/core.c
+++ b/net/bluetooth/cmtp/core.c
@@ -67,14 +67,12 @@ static struct cmtp_session *__cmtp_get_session(bdaddr_t *bdaddr)
static void __cmtp_link_session(struct cmtp_session *session)
{
- __module_get(THIS_MODULE);
list_add(&session->list, &cmtp_session_list);
}
static void __cmtp_unlink_session(struct cmtp_session *session)
{
list_del(&session->list);
- module_put(THIS_MODULE);
}
static void __cmtp_copy_session(struct cmtp_session *session, struct cmtp_conninfo *ci)
@@ -327,6 +325,7 @@ static int cmtp_session(void *arg)
up_write(&cmtp_session_sem);
kfree(session);
+ module_put_and_exit(0);
return 0;
}
@@ -376,9 +375,11 @@ int cmtp_add_connection(struct cmtp_connadd_req *req, struct socket *sock)
__cmtp_link_session(session);
+ __module_get(THIS_MODULE);
session->task = kthread_run(cmtp_session, session, "kcmtpd_ctr_%d",
session->num);
if (IS_ERR(session->task)) {
+ module_put(THIS_MODULE);
err = PTR_ERR(session->task);
goto unlink;
}
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index e0af7237cd92..c1c597e3e198 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -673,7 +673,7 @@ int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
goto encrypt;
auth:
- if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend))
+ if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend))
return 0;
if (!hci_conn_auth(conn, sec_level, auth_type))
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index be84ae33ae36..b84458dcc226 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -613,7 +613,7 @@ static int hci_dev_do_close(struct hci_dev *hdev)
if (!test_bit(HCI_RAW, &hdev->flags)) {
set_bit(HCI_INIT, &hdev->flags);
__hci_request(hdev, hci_reset_req, 0,
- msecs_to_jiffies(HCI_INIT_TIMEOUT));
+ msecs_to_jiffies(250));
clear_bit(HCI_INIT, &hdev->flags);
}
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index d7d96b6b1f0d..643a41b76e2e 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -545,7 +545,7 @@ static void hci_setup(struct hci_dev *hdev)
{
hci_setup_event_mask(hdev);
- if (hdev->lmp_ver > 1)
+ if (hdev->hci_ver > 1)
hci_send_cmd(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
if (hdev->features[6] & LMP_SIMPLE_PAIR) {
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 5ea94a1eecf2..17b5b1cd9657 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -2152,7 +2152,7 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi
void *ptr = req->data;
int type, olen;
unsigned long val;
- struct l2cap_conf_rfc rfc;
+ struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
@@ -2271,6 +2271,16 @@ static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
}
}
+ /* Use sane default values in case a misbehaving remote device
+ * did not send an RFC option.
+ */
+ rfc.mode = chan->mode;
+ rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
+ rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
+ rfc.max_pdu_size = cpu_to_le16(chan->imtu);
+
+ BT_ERR("Expected RFC option was not found, using defaults");
+
done:
switch (rfc.mode) {
case L2CAP_MODE_ERTM:
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index 4e32e18211f9..2d28dfe98389 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -1146,6 +1146,7 @@ static int rfcomm_recv_ua(struct rfcomm_session *s, u8 dlci)
if (list_empty(&s->dlcs)) {
s->state = BT_DISCONN;
rfcomm_send_disc(s, 0);
+ rfcomm_session_clear_timer(s);
}
break;
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index d6ec3720c77e..fa8b8f763580 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -114,12 +114,18 @@ static struct neighbour *fake_neigh_lookup(const struct dst_entry *dst, const vo
return NULL;
}
+static unsigned int fake_mtu(const struct dst_entry *dst)
+{
+ return dst->dev->mtu;
+}
+
static struct dst_ops fake_dst_ops = {
.family = AF_INET,
.protocol = cpu_to_be16(ETH_P_IP),
.update_pmtu = fake_update_pmtu,
.cow_metrics = fake_cow_metrics,
.neigh_lookup = fake_neigh_lookup,
+ .mtu = fake_mtu,
};
/*
@@ -141,7 +147,7 @@ void br_netfilter_rtable_init(struct net_bridge *br)
rt->dst.dev = br->dev;
rt->dst.path = &rt->dst;
dst_init_metrics(&rt->dst, br_dst_default_metrics, true);
- rt->dst.flags = DST_NOXFRM;
+ rt->dst.flags = DST_NOXFRM | DST_NOPEER;
rt->dst.ops = &fake_dst_ops;
}
diff --git a/net/core/flow.c b/net/core/flow.c
index 8ae42de9c79e..e318c7e98042 100644
--- a/net/core/flow.c
+++ b/net/core/flow.c
@@ -358,6 +358,18 @@ void flow_cache_flush(void)
put_online_cpus();
}
+static void flow_cache_flush_task(struct work_struct *work)
+{
+ flow_cache_flush();
+}
+
+static DECLARE_WORK(flow_cache_flush_work, flow_cache_flush_task);
+
+void flow_cache_flush_deferred(void)
+{
+ schedule_work(&flow_cache_flush_work);
+}
+
static int __cpuinit flow_cache_cpu_prepare(struct flow_cache *fc, int cpu)
{
struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu);
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index c71c434a4c05..385aefe53648 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -665,11 +665,14 @@ static ssize_t store_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
if (count) {
int i;
- if (count > 1<<30) {
+ if (count > INT_MAX)
+ return -EINVAL;
+ count = roundup_pow_of_two(count);
+ if (count > (ULONG_MAX - sizeof(struct rps_dev_flow_table))
+ / sizeof(struct rps_dev_flow)) {
/* Enforce a limit to prevent overflow */
return -EINVAL;
}
- count = roundup_pow_of_two(count);
table = vmalloc(RPS_DEV_FLOW_TABLE_SIZE(count));
if (!table)
return -ENOMEM;
diff --git a/net/core/sock.c b/net/core/sock.c
index 4ed7b1d12f5e..b23f174ab84c 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -288,11 +288,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
unsigned long flags;
struct sk_buff_head *list = &sk->sk_receive_queue;
- /* Cast sk->rcvbuf to unsigned... It's pointless, but reduces
- number of warnings when compiling with -W --ANK
- */
- if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
- (unsigned)sk->sk_rcvbuf) {
+ if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
atomic_inc(&sk->sk_drops);
trace_sock_rcvqueue_full(sk, skb);
return -ENOMEM;
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index 0da2afc97f32..99ec116bef14 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -253,6 +253,10 @@ static int __init ic_open_devs(void)
}
}
+ /* no point in waiting if we could not bring up at least one device */
+ if (!ic_first_dev)
+ goto have_carrier;
+
/* wait for a carrier on at least one device */
start = jiffies;
while (jiffies - start < msecs_to_jiffies(CONF_CARRIER_TIMEOUT)) {
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index 065effd8349a..0b2e7329abda 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -285,6 +285,8 @@ static struct ip_tunnel * ipip_tunnel_locate(struct net *net,
if (register_netdevice(dev) < 0)
goto failed_free;
+ strcpy(nt->parms.name, dev->name);
+
dev_hold(dev);
ipip_tunnel_link(ipn, nt);
return nt;
@@ -759,7 +761,6 @@ static int ipip_tunnel_init(struct net_device *dev)
struct ip_tunnel *tunnel = netdev_priv(dev);
tunnel->dev = dev;
- strcpy(tunnel->parms.name, dev->name);
memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4);
memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
@@ -825,6 +826,7 @@ static void ipip_destroy_tunnels(struct ipip_net *ipn, struct list_head *head)
static int __net_init ipip_init_net(struct net *net)
{
struct ipip_net *ipn = net_generic(net, ipip_net_id);
+ struct ip_tunnel *t;
int err;
ipn->tunnels[0] = ipn->tunnels_wc;
@@ -848,6 +850,9 @@ static int __net_init ipip_init_net(struct net *net)
if ((err = register_netdev(ipn->fb_tunnel_dev)))
goto err_reg_dev;
+ t = netdev_priv(ipn->fb_tunnel_dev);
+
+ strcpy(t->parms.name, ipn->fb_tunnel_dev->name);
return 0;
err_reg_dev:
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 46af62363b8c..94cdbc55ca7e 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -91,6 +91,7 @@
#include <linux/rcupdate.h>
#include <linux/times.h>
#include <linux/slab.h>
+#include <linux/prefetch.h>
#include <net/dst.h>
#include <net/net_namespace.h>
#include <net/protocol.h>
@@ -120,6 +121,7 @@
static int ip_rt_max_size;
static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT;
+static int ip_rt_gc_interval __read_mostly = 60 * HZ;
static int ip_rt_gc_min_interval __read_mostly = HZ / 2;
static int ip_rt_redirect_number __read_mostly = 9;
static int ip_rt_redirect_load __read_mostly = HZ / 50;
@@ -133,6 +135,9 @@ static int ip_rt_min_advmss __read_mostly = 256;
static int rt_chain_length_max __read_mostly = 20;
static int redirect_genid;
+static struct delayed_work expires_work;
+static unsigned long expires_ljiffies;
+
/*
* Interface to generic destination cache.
*/
@@ -830,6 +835,97 @@ static int has_noalias(const struct rtable *head, const struct rtable *rth)
return ONE;
}
+static void rt_check_expire(void)
+{
+ static unsigned int rover;
+ unsigned int i = rover, goal;
+ struct rtable *rth;
+ struct rtable __rcu **rthp;
+ unsigned long samples = 0;
+ unsigned long sum = 0, sum2 = 0;
+ unsigned long delta;
+ u64 mult;
+
+ delta = jiffies - expires_ljiffies;
+ expires_ljiffies = jiffies;
+ mult = ((u64)delta) << rt_hash_log;
+ if (ip_rt_gc_timeout > 1)
+ do_div(mult, ip_rt_gc_timeout);
+ goal = (unsigned int)mult;
+ if (goal > rt_hash_mask)
+ goal = rt_hash_mask + 1;
+ for (; goal > 0; goal--) {
+ unsigned long tmo = ip_rt_gc_timeout;
+ unsigned long length;
+
+ i = (i + 1) & rt_hash_mask;
+ rthp = &rt_hash_table[i].chain;
+
+ if (need_resched())
+ cond_resched();
+
+ samples++;
+
+ if (rcu_dereference_raw(*rthp) == NULL)
+ continue;
+ length = 0;
+ spin_lock_bh(rt_hash_lock_addr(i));
+ while ((rth = rcu_dereference_protected(*rthp,
+ lockdep_is_held(rt_hash_lock_addr(i)))) != NULL) {
+ prefetch(rth->dst.rt_next);
+ if (rt_is_expired(rth)) {
+ *rthp = rth->dst.rt_next;
+ rt_free(rth);
+ continue;
+ }
+ if (rth->dst.expires) {
+ /* Entry is expired even if it is in use */
+ if (time_before_eq(jiffies, rth->dst.expires)) {
+nofree:
+ tmo >>= 1;
+ rthp = &rth->dst.rt_next;
+ /*
+ * We only count entries on
+ * a chain with equal hash inputs once
+ * so that entries for different QOS
+ * levels, and other non-hash input
+ * attributes don't unfairly skew
+ * the length computation
+ */
+ length += has_noalias(rt_hash_table[i].chain, rth);
+ continue;
+ }
+ } else if (!rt_may_expire(rth, tmo, ip_rt_gc_timeout))
+ goto nofree;
+
+ /* Cleanup aged off entries. */
+ *rthp = rth->dst.rt_next;
+ rt_free(rth);
+ }
+ spin_unlock_bh(rt_hash_lock_addr(i));
+ sum += length;
+ sum2 += length*length;
+ }
+ if (samples) {
+ unsigned long avg = sum / samples;
+ unsigned long sd = int_sqrt(sum2 / samples - avg*avg);
+ rt_chain_length_max = max_t(unsigned long,
+ ip_rt_gc_elasticity,
+ (avg + 4*sd) >> FRACT_BITS);
+ }
+ rover = i;
+}
+
+/*
+ * rt_worker_func() is run in process context.
+ * we call rt_check_expire() to scan part of the hash table
+ */
+static void rt_worker_func(struct work_struct *work)
+{
+ rt_check_expire();
+ schedule_delayed_work(&expires_work, ip_rt_gc_interval);
+}
+
/*
* Perturbation of rt_genid by a small quantity [1..256]
* Using 8 bits of shuffling ensure we can call rt_cache_invalidate()
@@ -1271,7 +1367,7 @@ void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more)
{
struct rtable *rt = (struct rtable *) dst;
- if (rt) {
+ if (rt && !(rt->dst.flags & DST_NOPEER)) {
if (rt->peer == NULL)
rt_bind_peer(rt, rt->rt_dst, 1);
@@ -1282,7 +1378,7 @@ void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more)
iph->id = htons(inet_getid(rt->peer, more));
return;
}
- } else
+ } else if (!rt)
printk(KERN_DEBUG "rt_bind_peer(0) @%p\n",
__builtin_return_address(0));
@@ -3179,6 +3275,13 @@ static ctl_table ipv4_route_table[] = {
.proc_handler = proc_dointvec_jiffies,
},
{
+ .procname = "gc_interval",
+ .data = &ip_rt_gc_interval,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
+ {
.procname = "redirect_load",
.data = &ip_rt_redirect_load,
.maxlen = sizeof(int),
@@ -3388,6 +3491,11 @@ int __init ip_rt_init(void)
devinet_init();
ip_fib_init();
+ INIT_DELAYED_WORK_DEFERRABLE(&expires_work, rt_worker_func);
+ expires_ljiffies = jiffies;
+ schedule_delayed_work(&expires_work,
+ net_random() % ip_rt_gc_interval + ip_rt_gc_interval);
+
if (ip_rt_proc_init())
printk(KERN_ERR "Unable to create route proc files\n");
#ifdef CONFIG_XFRM
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index cf88df82e2c2..36806def8cfd 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -1805,7 +1805,8 @@ static struct inet6_dev *addrconf_add_dev(struct net_device *dev)
return ERR_PTR(-EACCES);
/* Add default multicast route */
- addrconf_add_mroute(dev);
+ if (!(dev->flags & IFF_LOOPBACK))
+ addrconf_add_mroute(dev);
/* Add link local route */
addrconf_add_lroute(dev);
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 84d0bd5cac93..ec562713db9b 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -603,7 +603,7 @@ void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
static atomic_t ipv6_fragmentation_id;
int old, new;
- if (rt) {
+ if (rt && !(rt->dst.flags & DST_NOPEER)) {
struct inet_peer *peer;
if (!rt->rt6i_peer)
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 3399dd326287..b582a0a0f1c5 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -728,7 +728,7 @@ static struct rt6_info *rt6_alloc_cow(const struct rt6_info *ort,
int attempts = !in_softirq();
if (!(rt->rt6i_flags&RTF_GATEWAY)) {
- if (rt->rt6i_dst.plen != 128 &&
+ if (ort->rt6i_dst.plen != 128 &&
ipv6_addr_equal(&ort->rt6i_dst.addr, daddr))
rt->rt6i_flags |= RTF_ANYCAST;
ipv6_addr_copy(&rt->rt6i_gateway, daddr);
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index a7a18602a046..96f3623618e3 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -263,6 +263,8 @@ static struct ip_tunnel *ipip6_tunnel_locate(struct net *net,
if (register_netdevice(dev) < 0)
goto failed_free;
+ strcpy(nt->parms.name, dev->name);
+
dev_hold(dev);
ipip6_tunnel_link(sitn, nt);
@@ -1144,7 +1146,6 @@ static int ipip6_tunnel_init(struct net_device *dev)
struct ip_tunnel *tunnel = netdev_priv(dev);
tunnel->dev = dev;
- strcpy(tunnel->parms.name, dev->name);
memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4);
memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
@@ -1207,6 +1208,7 @@ static void __net_exit sit_destroy_tunnels(struct sit_net *sitn, struct list_hea
static int __net_init sit_init_net(struct net *net)
{
struct sit_net *sitn = net_generic(net, sit_net_id);
+ struct ip_tunnel *t;
int err;
sitn->tunnels[0] = sitn->tunnels_wc;
@@ -1231,6 +1233,9 @@ static int __net_init sit_init_net(struct net *net)
if ((err = register_netdev(sitn->fb_tunnel_dev)))
goto err_reg_dev;
+ t = netdev_priv(sitn->fb_tunnel_dev);
+
+ strcpy(t->parms.name, sitn->fb_tunnel_dev->name);
return 0;
err_reg_dev:
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index dfd3a648a551..a18e6c3d36e3 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -833,15 +833,15 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
copied += used;
len -= used;
+ /* For non stream protcols we get one packet per recvmsg call */
+ if (sk->sk_type != SOCK_STREAM)
+ goto copy_uaddr;
+
if (!(flags & MSG_PEEK)) {
sk_eat_skb(sk, skb, 0);
*seq = 0;
}
- /* For non stream protcols we get one packet per recvmsg call */
- if (sk->sk_type != SOCK_STREAM)
- goto copy_uaddr;
-
/* Partial read */
if (used + offset < skb->len)
continue;
@@ -857,6 +857,12 @@ copy_uaddr:
}
if (llc_sk(sk)->cmsg_flags)
llc_cmsg_rcv(msg, skb);
+
+ if (!(flags & MSG_PEEK)) {
+ sk_eat_skb(sk, skb, 0);
+ *seq = 0;
+ }
+
goto out;
}
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index b064e4df12c6..2e4b961648d4 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -303,6 +303,38 @@ ieee80211_wake_queue_agg(struct ieee80211_local *local, int tid)
__release(agg_queue);
}
+/*
+ * splice packets from the STA's pending to the local pending,
+ * requires a call to ieee80211_agg_splice_finish later
+ */
+static void __acquires(agg_queue)
+ieee80211_agg_splice_packets(struct ieee80211_local *local,
+ struct tid_ampdu_tx *tid_tx, u16 tid)
+{
+ int queue = ieee80211_ac_from_tid(tid);
+ unsigned long flags;
+
+ ieee80211_stop_queue_agg(local, tid);
+
+ if (WARN(!tid_tx, "TID %d gone but expected when splicing aggregates"
+ " from the pending queue\n", tid))
+ return;
+
+ if (!skb_queue_empty(&tid_tx->pending)) {
+ spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
+ /* copy over remaining packets */
+ skb_queue_splice_tail_init(&tid_tx->pending,
+ &local->pending[queue]);
+ spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
+ }
+}
+
+static void __releases(agg_queue)
+ieee80211_agg_splice_finish(struct ieee80211_local *local, u16 tid)
+{
+ ieee80211_wake_queue_agg(local, tid);
+}
+
void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
{
struct tid_ampdu_tx *tid_tx;
@@ -314,19 +346,17 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
/*
- * While we're asking the driver about the aggregation,
- * stop the AC queue so that we don't have to worry
- * about frames that came in while we were doing that,
- * which would require us to put them to the AC pending
- * afterwards which just makes the code more complex.
+ * Start queuing up packets for this aggregation session.
+ * We're going to release them once the driver is OK with
+ * that.
*/
- ieee80211_stop_queue_agg(local, tid);
-
clear_bit(HT_AGG_STATE_WANT_START, &tid_tx->state);
/*
- * make sure no packets are being processed to get
- * valid starting sequence number
+ * Make sure no packets are being processed. This ensures that
+ * we have a valid starting sequence number and that in-flight
+ * packets have been flushed out and no packets for this TID
+ * will go into the driver during the ampdu_action call.
*/
synchronize_net();
@@ -340,17 +370,15 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
" tid %d\n", tid);
#endif
spin_lock_bh(&sta->lock);
+ ieee80211_agg_splice_packets(local, tid_tx, tid);
ieee80211_assign_tid_tx(sta, tid, NULL);
+ ieee80211_agg_splice_finish(local, tid);
spin_unlock_bh(&sta->lock);
- ieee80211_wake_queue_agg(local, tid);
kfree_rcu(tid_tx, rcu_head);
return;
}
- /* we can take packets again now */
- ieee80211_wake_queue_agg(local, tid);
-
/* activate the timer for the recipient's addBA response */
mod_timer(&tid_tx->addba_resp_timer, jiffies + ADDBA_RESP_INTERVAL);
#ifdef CONFIG_MAC80211_HT_DEBUG
@@ -466,38 +494,6 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
}
EXPORT_SYMBOL(ieee80211_start_tx_ba_session);
-/*
- * splice packets from the STA's pending to the local pending,
- * requires a call to ieee80211_agg_splice_finish later
- */
-static void __acquires(agg_queue)
-ieee80211_agg_splice_packets(struct ieee80211_local *local,
- struct tid_ampdu_tx *tid_tx, u16 tid)
-{
- int queue = ieee80211_ac_from_tid(tid);
- unsigned long flags;
-
- ieee80211_stop_queue_agg(local, tid);
-
- if (WARN(!tid_tx, "TID %d gone but expected when splicing aggregates"
- " from the pending queue\n", tid))
- return;
-
- if (!skb_queue_empty(&tid_tx->pending)) {
- spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
- /* copy over remaining packets */
- skb_queue_splice_tail_init(&tid_tx->pending,
- &local->pending[queue]);
- spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
- }
-}
-
-static void __releases(agg_queue)
-ieee80211_agg_splice_finish(struct ieee80211_local *local, u16 tid)
-{
- ieee80211_wake_queue_agg(local, tid);
-}
-
static void ieee80211_agg_tx_operational(struct ieee80211_local *local,
struct sta_info *sta, u16 tid)
{
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
index 12571fb2881c..29fa5badde75 100644
--- a/net/netfilter/ipvs/ip_vs_conn.c
+++ b/net/netfilter/ipvs/ip_vs_conn.c
@@ -616,7 +616,7 @@ struct ip_vs_dest *ip_vs_try_bind_dest(struct ip_vs_conn *cp)
if ((cp) && (!cp->dest)) {
dest = ip_vs_find_dest(ip_vs_conn_net(cp), cp->af, &cp->daddr,
cp->dport, &cp->vaddr, cp->vport,
- cp->protocol, cp->fwmark);
+ cp->protocol, cp->fwmark, cp->flags);
ip_vs_bind_dest(cp, dest);
return dest;
} else
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 008bf97cc91a..e1a66cf37f9a 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -619,15 +619,21 @@ struct ip_vs_dest *ip_vs_find_dest(struct net *net, int af,
const union nf_inet_addr *daddr,
__be16 dport,
const union nf_inet_addr *vaddr,
- __be16 vport, __u16 protocol, __u32 fwmark)
+ __be16 vport, __u16 protocol, __u32 fwmark,
+ __u32 flags)
{
struct ip_vs_dest *dest;
struct ip_vs_service *svc;
+ __be16 port = dport;
svc = ip_vs_service_get(net, af, fwmark, protocol, vaddr, vport);
if (!svc)
return NULL;
- dest = ip_vs_lookup_dest(svc, daddr, dport);
+ if (fwmark && (flags & IP_VS_CONN_F_FWD_MASK) != IP_VS_CONN_F_MASQ)
+ port = 0;
+ dest = ip_vs_lookup_dest(svc, daddr, port);
+ if (!dest)
+ dest = ip_vs_lookup_dest(svc, daddr, port ^ dport);
if (dest)
atomic_inc(&dest->refcnt);
ip_vs_service_put(svc);
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
index 3cdd479f9b5d..2b6678c0ce14 100644
--- a/net/netfilter/ipvs/ip_vs_sync.c
+++ b/net/netfilter/ipvs/ip_vs_sync.c
@@ -740,7 +740,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
* but still handled.
*/
dest = ip_vs_find_dest(net, type, daddr, dport, param->vaddr,
- param->vport, protocol, fwmark);
+ param->vport, protocol, fwmark, flags);
/* Set the approprite ativity flag */
if (protocol == IPPROTO_TCP) {
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index ef21b221f036..257e77256c5c 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -135,7 +135,7 @@ nla_put_failure:
static inline int
ctnetlink_dump_timeout(struct sk_buff *skb, const struct nf_conn *ct)
{
- long timeout = (ct->timeout.expires - jiffies) / HZ;
+ long timeout = ((long)ct->timeout.expires - (long)jiffies) / HZ;
if (timeout < 0)
timeout = 0;
@@ -1358,12 +1358,15 @@ ctnetlink_create_conntrack(struct net *net, u16 zone,
nf_ct_protonum(ct));
if (helper == NULL) {
rcu_read_unlock();
+ spin_unlock_bh(&nf_conntrack_lock);
#ifdef CONFIG_MODULES
if (request_module("nfct-helper-%s", helpname) < 0) {
+ spin_lock_bh(&nf_conntrack_lock);
err = -EOPNOTSUPP;
goto err1;
}
+ spin_lock_bh(&nf_conntrack_lock);
rcu_read_lock();
helper = __nf_conntrack_helper_find(helpname,
nf_ct_l3num(ct),
@@ -1638,7 +1641,7 @@ ctnetlink_exp_dump_expect(struct sk_buff *skb,
const struct nf_conntrack_expect *exp)
{
struct nf_conn *master = exp->master;
- long timeout = (exp->timeout.expires - jiffies) / HZ;
+ long timeout = ((long)exp->timeout.expires - (long)jiffies) / HZ;
struct nf_conn_help *help;
if (timeout < 0)
@@ -1869,25 +1872,30 @@ ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb,
err = -ENOMEM;
skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
- if (skb2 == NULL)
+ if (skb2 == NULL) {
+ nf_ct_expect_put(exp);
goto out;
+ }
rcu_read_lock();
err = ctnetlink_exp_fill_info(skb2, NETLINK_CB(skb).pid,
nlh->nlmsg_seq, IPCTNL_MSG_EXP_NEW, exp);
rcu_read_unlock();
+ nf_ct_expect_put(exp);
if (err <= 0)
goto free;
- nf_ct_expect_put(exp);
+ err = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT);
+ if (err < 0)
+ goto out;
- return netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT);
+ return 0;
free:
kfree_skb(skb2);
out:
- nf_ct_expect_put(exp);
- return err;
+ /* this avoids a loop in nfnetlink. */
+ return err == -EAGAIN ? -ENOBUFS : err;
}
static int
diff --git a/net/netfilter/xt_connbytes.c b/net/netfilter/xt_connbytes.c
index 5b138506690e..9ddf1c3bfb39 100644
--- a/net/netfilter/xt_connbytes.c
+++ b/net/netfilter/xt_connbytes.c
@@ -87,10 +87,10 @@ connbytes_mt(const struct sk_buff *skb, struct xt_action_param *par)
break;
}
- if (sinfo->count.to)
+ if (sinfo->count.to >= sinfo->count.from)
return what <= sinfo->count.to && what >= sinfo->count.from;
- else
- return what >= sinfo->count.from;
+ else /* inverted */
+ return what < sinfo->count.to || what > sinfo->count.from;
}
static int connbytes_mt_check(const struct xt_mtchk_param *par)
diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
index 3925c6578767..ea66034499ce 100644
--- a/net/nfc/nci/core.c
+++ b/net/nfc/nci/core.c
@@ -69,7 +69,7 @@ static int __nci_request(struct nci_dev *ndev,
__u32 timeout)
{
int rc = 0;
- unsigned long completion_rc;
+ long completion_rc;
ndev->req_status = NCI_REQ_PEND;
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 82a6f34d39d0..d9d4970b9b07 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1630,8 +1630,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
if (snaplen > res)
snaplen = res;
- if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
- (unsigned)sk->sk_rcvbuf)
+ if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
goto drop_n_acct;
if (skb_shared(skb)) {
@@ -1762,8 +1761,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
if (po->tp_version <= TPACKET_V2) {
if (macoff + snaplen > po->rx_ring.frame_size) {
if (po->copy_thresh &&
- atomic_read(&sk->sk_rmem_alloc) + skb->truesize
- < (unsigned)sk->sk_rcvbuf) {
+ atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
if (skb_shared(skb)) {
copy_skb = skb_clone(skb, GFP_ATOMIC);
} else {
@@ -2450,8 +2448,12 @@ static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 protoc
{
struct packet_sock *po = pkt_sk(sk);
- if (po->fanout)
+ if (po->fanout) {
+ if (dev)
+ dev_put(dev);
+
return -EINVAL;
+ }
lock_sock(sk);
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
index b9493a09a870..6cd8ddfb512d 100644
--- a/net/sched/sch_gred.c
+++ b/net/sched/sch_gred.c
@@ -385,7 +385,7 @@ static inline int gred_change_vq(struct Qdisc *sch, int dp,
struct gred_sched_data *q;
if (table->tab[dp] == NULL) {
- table->tab[dp] = kzalloc(sizeof(*q), GFP_KERNEL);
+ table->tab[dp] = kzalloc(sizeof(*q), GFP_ATOMIC);
if (table->tab[dp] == NULL)
return -ENOMEM;
}
diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c
index f88256cbacbf..28de43092330 100644
--- a/net/sched/sch_mqprio.c
+++ b/net/sched/sch_mqprio.c
@@ -107,7 +107,7 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt)
if (!netif_is_multiqueue(dev))
return -EOPNOTSUPP;
- if (nla_len(opt) < sizeof(*qopt))
+ if (!opt || nla_len(opt) < sizeof(*qopt))
return -EINVAL;
qopt = nla_data(opt);
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index eb3b9a86c6ed..a4ab207cdc59 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -488,7 +488,7 @@ static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr)
return -EINVAL;
s = sizeof(struct disttable) + n * sizeof(s16);
- d = kmalloc(s, GFP_KERNEL);
+ d = kmalloc(s, GFP_KERNEL | __GFP_NOWARN);
if (!d)
d = vmalloc(s);
if (!d)
@@ -501,9 +501,10 @@ static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr)
root_lock = qdisc_root_sleeping_lock(sch);
spin_lock_bh(root_lock);
- dist_free(q->delay_dist);
- q->delay_dist = d;
+ swap(q->delay_dist, d);
spin_unlock_bh(root_lock);
+
+ dist_free(d);
return 0;
}
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
index 103343408593..7b0325459e71 100644
--- a/net/sched/sch_qfq.c
+++ b/net/sched/sch_qfq.c
@@ -817,11 +817,11 @@ skip_unblock:
static void qfq_update_start(struct qfq_sched *q, struct qfq_class *cl)
{
unsigned long mask;
- uint32_t limit, roundedF;
+ u64 limit, roundedF;
int slot_shift = cl->grp->slot_shift;
roundedF = qfq_round_down(cl->F, slot_shift);
- limit = qfq_round_down(q->V, slot_shift) + (1UL << slot_shift);
+ limit = qfq_round_down(q->V, slot_shift) + (1ULL << slot_shift);
if (!qfq_gt(cl->F, q->V) || qfq_gt(roundedF, limit)) {
/* timestamp was stale */
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 152b5b3c3fff..acd2edbc073e 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -173,7 +173,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
asoc->timeouts[SCTP_EVENT_TIMEOUT_HEARTBEAT] = 0;
asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = asoc->sackdelay;
asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] =
- (unsigned long)sp->autoclose * HZ;
+ min_t(unsigned long, sp->autoclose, sctp_max_autoclose) * HZ;
/* Initializes the timers */
for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i)
diff --git a/net/sctp/output.c b/net/sctp/output.c
index 08b3cead6503..817174eb5f41 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -697,13 +697,7 @@ static void sctp_packet_append_data(struct sctp_packet *packet,
/* Keep track of how many bytes are in flight to the receiver. */
asoc->outqueue.outstanding_bytes += datasize;
- /* Update our view of the receiver's rwnd. Include sk_buff overhead
- * while updating peer.rwnd so that it reduces the chances of a
- * receiver running out of receive buffer space even when receive
- * window is still open. This can happen when a sender is sending
- * sending small messages.
- */
- datasize += sizeof(struct sk_buff);
+ /* Update our view of the receiver's rwnd. */
if (datasize < rwnd)
rwnd -= datasize;
else
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index 14c2b06028ff..cfeb1d4a1ee6 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -411,8 +411,7 @@ void sctp_retransmit_mark(struct sctp_outq *q,
chunk->transport->flight_size -=
sctp_data_size(chunk);
q->outstanding_bytes -= sctp_data_size(chunk);
- q->asoc->peer.rwnd += (sctp_data_size(chunk) +
- sizeof(struct sk_buff));
+ q->asoc->peer.rwnd += sctp_data_size(chunk);
}
continue;
}
@@ -432,8 +431,7 @@ void sctp_retransmit_mark(struct sctp_outq *q,
* (Section 7.2.4)), add the data size of those
* chunks to the rwnd.
*/
- q->asoc->peer.rwnd += (sctp_data_size(chunk) +
- sizeof(struct sk_buff));
+ q->asoc->peer.rwnd += sctp_data_size(chunk);
q->outstanding_bytes -= sctp_data_size(chunk);
if (chunk->transport)
transport->flight_size -= sctp_data_size(chunk);
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 61b9fca5a173..6f6ad8686833 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -1285,6 +1285,9 @@ SCTP_STATIC __init int sctp_init(void)
sctp_max_instreams = SCTP_DEFAULT_INSTREAMS;
sctp_max_outstreams = SCTP_DEFAULT_OUTSTREAMS;
+ /* Initialize maximum autoclose timeout. */
+ sctp_max_autoclose = INT_MAX / HZ;
+
/* Initialize handle used for association ids. */
idr_init(&sctp_assocs_id);
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 13bf5fcdbff1..54a7cd2fdd7a 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -2200,8 +2200,6 @@ static int sctp_setsockopt_autoclose(struct sock *sk, char __user *optval,
return -EINVAL;
if (copy_from_user(&sp->autoclose, optval, optlen))
return -EFAULT;
- /* make sure it won't exceed MAX_SCHEDULE_TIMEOUT */
- sp->autoclose = min_t(long, sp->autoclose, MAX_SCHEDULE_TIMEOUT / HZ);
return 0;
}
diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
index 6b3952961b85..60ffbd067ff7 100644
--- a/net/sctp/sysctl.c
+++ b/net/sctp/sysctl.c
@@ -53,6 +53,10 @@ static int sack_timer_min = 1;
static int sack_timer_max = 500;
static int addr_scope_max = 3; /* check sctp_scope_policy_t in include/net/sctp/constants.h for max entries */
static int rwnd_scale_max = 16;
+static unsigned long max_autoclose_min = 0;
+static unsigned long max_autoclose_max =
+ (MAX_SCHEDULE_TIMEOUT / HZ > UINT_MAX)
+ ? UINT_MAX : MAX_SCHEDULE_TIMEOUT / HZ;
extern long sysctl_sctp_mem[3];
extern int sysctl_sctp_rmem[3];
@@ -258,6 +262,15 @@ static ctl_table sctp_table[] = {
.extra1 = &one,
.extra2 = &rwnd_scale_max,
},
+ {
+ .procname = "max_autoclose",
+ .data = &sctp_max_autoclose,
+ .maxlen = sizeof(unsigned long),
+ .mode = 0644,
+ .proc_handler = &proc_doulongvec_minmax,
+ .extra1 = &max_autoclose_min,
+ .extra2 = &max_autoclose_max,
+ },
{ /* sentinel */ }
};
diff --git a/net/socket.c b/net/socket.c
index 2877647f347b..a0053750e37a 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -2883,7 +2883,7 @@ static int bond_ioctl(struct net *net, unsigned int cmd,
return dev_ioctl(net, cmd, uifr);
default:
- return -EINVAL;
+ return -ENOIOCTLCMD;
}
}
@@ -3210,20 +3210,6 @@ static int compat_sock_ioctl_trans(struct file *file, struct socket *sock,
return sock_do_ioctl(net, sock, cmd, arg);
}
- /* Prevent warning from compat_sys_ioctl, these always
- * result in -EINVAL in the native case anyway. */
- switch (cmd) {
- case SIOCRTMSG:
- case SIOCGIFCOUNT:
- case SIOCSRARP:
- case SIOCGRARP:
- case SIOCDRARP:
- case SIOCSIFLINK:
- case SIOCGIFSLAVE:
- case SIOCSIFSLAVE:
- return -EINVAL;
- }
-
return -ENOIOCTLCMD;
}
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index d12ffa545811..00a1a2acd587 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -590,6 +590,27 @@ void rpc_prepare_task(struct rpc_task *task)
task->tk_ops->rpc_call_prepare(task, task->tk_calldata);
}
+static void
+rpc_init_task_statistics(struct rpc_task *task)
+{
+ /* Initialize retry counters */
+ task->tk_garb_retry = 2;
+ task->tk_cred_retry = 2;
+ task->tk_rebind_retry = 2;
+
+ /* starting timestamp */
+ task->tk_start = ktime_get();
+}
+
+static void
+rpc_reset_task_statistics(struct rpc_task *task)
+{
+ task->tk_timeouts = 0;
+ task->tk_flags &= ~(RPC_CALL_MAJORSEEN|RPC_TASK_KILLED|RPC_TASK_SENT);
+
+ rpc_init_task_statistics(task);
+}
+
/*
* Helper that calls task->tk_ops->rpc_call_done if it exists
*/
@@ -602,6 +623,7 @@ void rpc_exit_task(struct rpc_task *task)
WARN_ON(RPC_ASSASSINATED(task));
/* Always release the RPC slot and buffer memory */
xprt_release(task);
+ rpc_reset_task_statistics(task);
}
}
}
@@ -804,11 +826,6 @@ static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *ta
task->tk_calldata = task_setup_data->callback_data;
INIT_LIST_HEAD(&task->tk_task);
- /* Initialize retry counters */
- task->tk_garb_retry = 2;
- task->tk_cred_retry = 2;
- task->tk_rebind_retry = 2;
-
task->tk_priority = task_setup_data->priority - RPC_PRIORITY_LOW;
task->tk_owner = current->tgid;
@@ -818,8 +835,7 @@ static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *ta
if (task->tk_ops->rpc_call_prepare != NULL)
task->tk_action = rpc_prepare_task;
- /* starting timestamp */
- task->tk_start = ktime_get();
+ rpc_init_task_statistics(task);
dprintk("RPC: new task initialized, procpid %u\n",
task_pid_nr(current));
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index f4385e45a5fc..c64c0ef519b5 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -995,13 +995,11 @@ out_init_req:
static void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
{
- if (xprt_dynamic_free_slot(xprt, req))
- return;
-
- memset(req, 0, sizeof(*req)); /* mark unused */
-
spin_lock(&xprt->reserve_lock);
- list_add(&req->rq_list, &xprt->free);
+ if (!xprt_dynamic_free_slot(xprt, req)) {
+ memset(req, 0, sizeof(*req)); /* mark unused */
+ list_add(&req->rq_list, &xprt->free);
+ }
rpc_wake_up_next(&xprt->backlog);
spin_unlock(&xprt->reserve_lock);
}
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 2118d6446630..9049a5caeb25 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -2276,8 +2276,6 @@ static void __xfrm_garbage_collect(struct net *net)
{
struct dst_entry *head, *next;
- flow_cache_flush();
-
spin_lock_bh(&xfrm_policy_sk_bundle_lock);
head = xfrm_policy_sk_bundles;
xfrm_policy_sk_bundles = NULL;
@@ -2290,6 +2288,18 @@ static void __xfrm_garbage_collect(struct net *net)
}
}
+static void xfrm_garbage_collect(struct net *net)
+{
+ flow_cache_flush();
+ __xfrm_garbage_collect(net);
+}
+
+static void xfrm_garbage_collect_deferred(struct net *net)
+{
+ flow_cache_flush_deferred();
+ __xfrm_garbage_collect(net);
+}
+
static void xfrm_init_pmtu(struct dst_entry *dst)
{
do {
@@ -2422,7 +2432,7 @@ int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
if (likely(dst_ops->neigh_lookup == NULL))
dst_ops->neigh_lookup = xfrm_neigh_lookup;
if (likely(afinfo->garbage_collect == NULL))
- afinfo->garbage_collect = __xfrm_garbage_collect;
+ afinfo->garbage_collect = xfrm_garbage_collect_deferred;
xfrm_policy_afinfo[afinfo->family] = afinfo;
}
write_unlock_bh(&xfrm_policy_afinfo_lock);
@@ -2516,7 +2526,7 @@ static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void
switch (event) {
case NETDEV_DOWN:
- __xfrm_garbage_collect(dev_net(dev));
+ xfrm_garbage_collect(dev_net(dev));
}
return NOTIFY_DONE;
}
diff --git a/scripts/kconfig/Makefile b/scripts/kconfig/Makefile
index ba573fe7c74d..914833d99b06 100644
--- a/scripts/kconfig/Makefile
+++ b/scripts/kconfig/Makefile
@@ -60,8 +60,8 @@ update-po-config: $(obj)/kxgettext $(obj)/gconf.glade.h
--directory=$(srctree) --directory=$(objtree) \
--output $(obj)/config.pot
$(Q)sed -i s/CHARSET/UTF-8/ $(obj)/config.pot
- $(Q)ln -fs Kconfig.x86 arch/um/Kconfig
- $(Q)(for i in `ls $(srctree)/arch/*/Kconfig`; \
+ $(Q)(for i in `ls $(srctree)/arch/*/Kconfig \
+ $(srctree)/arch/*/um/Kconfig`; \
do \
echo " GEN $$i"; \
$(obj)/kxgettext $$i \
@@ -69,7 +69,6 @@ update-po-config: $(obj)/kxgettext $(obj)/gconf.glade.h
done )
$(Q)msguniq --sort-by-file --to-code=UTF-8 $(obj)/config.pot \
--output $(obj)/linux.pot
- $(Q)rm -f $(srctree)/arch/um/Kconfig
$(Q)rm -f $(obj)/config.pot
PHONY += allnoconfig allyesconfig allmodconfig alldefconfig randconfig
diff --git a/security/integrity/evm/evm_crypto.c b/security/integrity/evm/evm_crypto.c
index 5dd5b140242c..8738deff26fa 100644
--- a/security/integrity/evm/evm_crypto.c
+++ b/security/integrity/evm/evm_crypto.c
@@ -27,20 +27,35 @@ static int evmkey_len = MAX_KEY_SIZE;
struct crypto_shash *hmac_tfm;
+static DEFINE_MUTEX(mutex);
+
static struct shash_desc *init_desc(void)
{
int rc;
struct shash_desc *desc;
if (hmac_tfm == NULL) {
+ mutex_lock(&mutex);
+ if (hmac_tfm)
+ goto out;
hmac_tfm = crypto_alloc_shash(evm_hmac, 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(hmac_tfm)) {
pr_err("Can not allocate %s (reason: %ld)\n",
evm_hmac, PTR_ERR(hmac_tfm));
rc = PTR_ERR(hmac_tfm);
hmac_tfm = NULL;
+ mutex_unlock(&mutex);
+ return ERR_PTR(rc);
+ }
+ rc = crypto_shash_setkey(hmac_tfm, evmkey, evmkey_len);
+ if (rc) {
+ crypto_free_shash(hmac_tfm);
+ hmac_tfm = NULL;
+ mutex_unlock(&mutex);
return ERR_PTR(rc);
}
+out:
+ mutex_unlock(&mutex);
}
desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(hmac_tfm),
@@ -51,11 +66,7 @@ static struct shash_desc *init_desc(void)
desc->tfm = hmac_tfm;
desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
- rc = crypto_shash_setkey(hmac_tfm, evmkey, evmkey_len);
- if (rc)
- goto out;
rc = crypto_shash_init(desc);
-out:
if (rc) {
kfree(desc);
return ERR_PTR(rc);
diff --git a/security/security.c b/security/security.c
index 0c6cc69c8f86..e2f684aeb70c 100644
--- a/security/security.c
+++ b/security/security.c
@@ -381,7 +381,7 @@ int security_old_inode_init_security(struct inode *inode, struct inode *dir,
void **value, size_t *len)
{
if (unlikely(IS_PRIVATE(inode)))
- return 0;
+ return -EOPNOTSUPP;
return security_ops->inode_init_security(inode, dir, qstr, name, value,
len);
}
diff --git a/security/selinux/netport.c b/security/selinux/netport.c
index 0b62bd112461..7b9eb1faf68b 100644
--- a/security/selinux/netport.c
+++ b/security/selinux/netport.c
@@ -123,7 +123,9 @@ static void sel_netport_insert(struct sel_netport *port)
if (sel_netport_hash[idx].size == SEL_NETPORT_HASH_BKT_LIMIT) {
struct sel_netport *tail;
tail = list_entry(
- rcu_dereference(sel_netport_hash[idx].list.prev),
+ rcu_dereference_protected(
+ sel_netport_hash[idx].list.prev,
+ lockdep_is_held(&sel_netport_lock)),
struct sel_netport, list);
list_del_rcu(&tail->list);
kfree_rcu(tail, rcu);
diff --git a/sound/atmel/ac97c.c b/sound/atmel/ac97c.c
index 6e5addeb236b..73516f69ac7c 100644
--- a/sound/atmel/ac97c.c
+++ b/sound/atmel/ac97c.c
@@ -899,6 +899,10 @@ static void atmel_ac97c_reset(struct atmel_ac97c *chip)
/* AC97 v2.2 specifications says minimum 1 us. */
udelay(2);
gpio_set_value(chip->reset_pin, 1);
+ } else {
+ ac97c_writel(chip, MR, AC97C_MR_WRST | AC97C_MR_ENA);
+ udelay(2);
+ ac97c_writel(chip, MR, AC97C_MR_ENA);
}
}
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 7d98240def0b..c2f79e63124d 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -2507,6 +2507,7 @@ static struct snd_pci_quirk position_fix_list[] __devinitdata = {
SND_PCI_QUIRK(0x1043, 0x813d, "ASUS P5AD2", POS_FIX_LPIB),
SND_PCI_QUIRK(0x1043, 0x81b3, "ASUS", POS_FIX_LPIB),
SND_PCI_QUIRK(0x1043, 0x81e7, "ASUS M2V", POS_FIX_LPIB),
+ SND_PCI_QUIRK(0x1043, 0x83ce, "ASUS 1101HA", POS_FIX_LPIB),
SND_PCI_QUIRK(0x104d, 0x9069, "Sony VPCS11V9E", POS_FIX_LPIB),
SND_PCI_QUIRK(0x1297, 0x3166, "Shuttle", POS_FIX_LPIB),
SND_PCI_QUIRK(0x1458, 0xa022, "ga-ma770-ud3", POS_FIX_LPIB),
@@ -2970,7 +2971,8 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = {
/* SCH */
{ PCI_DEVICE(0x8086, 0x811b),
.driver_data = AZX_DRIVER_SCH | AZX_DCAPS_SCH_SNOOP |
- AZX_DCAPS_BUFSIZE},
+ AZX_DCAPS_BUFSIZE | AZX_DCAPS_POSFIX_LPIB }, /* Poulsbo */
+ /* ICH */
{ PCI_DEVICE(0x8086, 0x2668),
.driver_data = AZX_DRIVER_ICH | AZX_DCAPS_OLD_SSYNC |
AZX_DCAPS_BUFSIZE }, /* ICH6 */
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index eeb25d529e30..616678fde486 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -4929,6 +4929,12 @@ static int find_mute_led_gpio(struct hda_codec *codec, int default_polarity)
set_hp_led_gpio(codec);
return 1;
}
+ /* BIOS bug: unfilled OEM string */
+ if (strstr(dev->name, "HP_Mute_LED_P_G")) {
+ set_hp_led_gpio(codec);
+ spec->gpio_led_polarity = 1;
+ return 1;
+ }
}
/*
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index 4584514d93d4..fa787d45d74a 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -33,7 +33,7 @@ config SND_SOC_ALL_CODECS
select SND_SOC_CX20442
select SND_SOC_DA7210 if I2C
select SND_SOC_DFBMCS320
- select SND_SOC_JZ4740_CODEC if SOC_JZ4740
+ select SND_SOC_JZ4740_CODEC
select SND_SOC_LM4857 if I2C
select SND_SOC_MAX98088 if I2C
select SND_SOC_MAX98095 if I2C
diff --git a/sound/soc/codecs/jz4740.c b/sound/soc/codecs/jz4740.c
index e373f8f06907..3e1f4e172bfb 100644
--- a/sound/soc/codecs/jz4740.c
+++ b/sound/soc/codecs/jz4740.c
@@ -15,6 +15,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <linux/io.h>
#include <linux/delay.h>
diff --git a/sound/soc/codecs/wm8776.c b/sound/soc/codecs/wm8776.c
index bfdc52370ad0..d3b0a20744f1 100644
--- a/sound/soc/codecs/wm8776.c
+++ b/sound/soc/codecs/wm8776.c
@@ -235,6 +235,7 @@ static int wm8776_hw_params(struct snd_pcm_substream *substream,
switch (snd_pcm_format_width(params_format(params))) {
case 16:
iface = 0;
+ break;
case 20:
iface = 0x10;
break;
diff --git a/sound/soc/codecs/wm8958-dsp2.c b/sound/soc/codecs/wm8958-dsp2.c
index 0293763debe5..5a14d5c0e0e1 100644
--- a/sound/soc/codecs/wm8958-dsp2.c
+++ b/sound/soc/codecs/wm8958-dsp2.c
@@ -60,6 +60,8 @@ static int wm8958_dsp2_fw(struct snd_soc_codec *codec, const char *name,
}
if (memcmp(fw->data, "WMFW", 4) != 0) {
+ memcpy(&data32, fw->data, sizeof(data32));
+ data32 = be32_to_cpu(data32);
dev_err(codec->dev, "%s: firmware has bad file magic %08x\n",
name, data32);
goto err;
diff --git a/sound/soc/codecs/wm8996.c b/sound/soc/codecs/wm8996.c
index 645c980d6b80..a33b04d17195 100644
--- a/sound/soc/codecs/wm8996.c
+++ b/sound/soc/codecs/wm8996.c
@@ -1968,6 +1968,7 @@ static int wm8996_set_sysclk(struct snd_soc_dai *dai,
break;
case 24576000:
ratediv = WM8996_SYSCLK_DIV;
+ wm8996->sysclk /= 2;
case 12288000:
snd_soc_update_bits(codec, WM8996_AIF_RATE,
WM8996_SYSCLK_RATE, WM8996_SYSCLK_RATE);
diff --git a/sound/soc/mxs/mxs-pcm.c b/sound/soc/mxs/mxs-pcm.c
index dea5aa4aa647..f39d7dd9fbcb 100644
--- a/sound/soc/mxs/mxs-pcm.c
+++ b/sound/soc/mxs/mxs-pcm.c
@@ -357,3 +357,6 @@ static void __exit snd_mxs_pcm_exit(void)
platform_driver_unregister(&mxs_pcm_driver);
}
module_exit(snd_mxs_pcm_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:mxs-pcm-audio");
diff --git a/sound/soc/mxs/mxs-sgtl5000.c b/sound/soc/mxs/mxs-sgtl5000.c
index 7fbeaec06eb4..1c57f6630a48 100644
--- a/sound/soc/mxs/mxs-sgtl5000.c
+++ b/sound/soc/mxs/mxs-sgtl5000.c
@@ -171,3 +171,4 @@ module_exit(mxs_sgtl5000_exit);
MODULE_AUTHOR("Freescale Semiconductor, Inc.");
MODULE_DESCRIPTION("MXS ALSA SoC Machine driver");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:mxs-sgtl5000");
diff --git a/sound/soc/pxa/hx4700.c b/sound/soc/pxa/hx4700.c
index 65c124831a00..c664e33fb6d7 100644
--- a/sound/soc/pxa/hx4700.c
+++ b/sound/soc/pxa/hx4700.c
@@ -209,9 +209,10 @@ static int __devinit hx4700_audio_probe(struct platform_device *pdev)
snd_soc_card_hx4700.dev = &pdev->dev;
ret = snd_soc_register_card(&snd_soc_card_hx4700);
if (ret)
- return ret;
+ gpio_free_array(hx4700_audio_gpios,
+ ARRAY_SIZE(hx4700_audio_gpios));
- return 0;
+ return ret;
}
static int __devexit hx4700_audio_remove(struct platform_device *pdev)
diff --git a/sound/soc/samsung/jive_wm8750.c b/sound/soc/samsung/jive_wm8750.c
index 1826acf20f7c..8e523fd9189e 100644
--- a/sound/soc/samsung/jive_wm8750.c
+++ b/sound/soc/samsung/jive_wm8750.c
@@ -101,7 +101,6 @@ static int jive_wm8750_init(struct snd_soc_pcm_runtime *rtd)
{
struct snd_soc_codec *codec = rtd->codec;
struct snd_soc_dapm_context *dapm = &codec->dapm;
- int err;
/* These endpoints are not being used. */
snd_soc_dapm_nc_pin(dapm, "LINPUT2");
@@ -131,7 +130,7 @@ static struct snd_soc_card snd_soc_machine_jive = {
.dai_link = &jive_dai,
.num_links = 1,
- .dapm_widgtets = wm8750_dapm_widgets,
+ .dapm_widgets = wm8750_dapm_widgets,
.num_dapm_widgets = ARRAY_SIZE(wm8750_dapm_widgets),
.dapm_routes = audio_map,
.num_dapm_routes = ARRAY_SIZE(audio_map),
diff --git a/sound/soc/samsung/smdk2443_wm9710.c b/sound/soc/samsung/smdk2443_wm9710.c
index 3a0dbfc793f0..8bd1dc5706bf 100644
--- a/sound/soc/samsung/smdk2443_wm9710.c
+++ b/sound/soc/samsung/smdk2443_wm9710.c
@@ -12,6 +12,7 @@
*
*/
+#include <linux/module.h>
#include <sound/soc.h>
static struct snd_soc_card smdk2443;
diff --git a/tools/perf/Documentation/perf-annotate.txt b/tools/perf/Documentation/perf-annotate.txt
index fe6762ed56bd..c89f9e1453f7 100644
--- a/tools/perf/Documentation/perf-annotate.txt
+++ b/tools/perf/Documentation/perf-annotate.txt
@@ -22,7 +22,7 @@ OPTIONS
-------
-i::
--input=::
- Input file name. (default: perf.data)
+ Input file name. (default: perf.data unless stdin is a fifo)
-d::
--dsos=<dso[,dso...]>::
@@ -66,7 +66,7 @@ OPTIONS
used. This interfaces starts by centering on the line with more
samples, TAB/UNTAB cycles through the lines with more samples.
--c::
+-C::
--cpu:: Only report samples for the list of CPUs provided. Multiple CPUs can
be provided as a comma-separated list with no space: 0,1. Ranges of
CPUs are specified with -: 0-2. Default is to report samples on all
diff --git a/tools/perf/Documentation/perf-buildid-list.txt b/tools/perf/Documentation/perf-buildid-list.txt
index cc22325ffd1b..25c52efcc7f0 100644
--- a/tools/perf/Documentation/perf-buildid-list.txt
+++ b/tools/perf/Documentation/perf-buildid-list.txt
@@ -26,7 +26,7 @@ OPTIONS
Show only DSOs with hits.
-i::
--input=::
- Input file name. (default: perf.data)
+ Input file name. (default: perf.data unless stdin is a fifo)
-f::
--force::
Don't do ownership validation.
diff --git a/tools/perf/Documentation/perf-evlist.txt b/tools/perf/Documentation/perf-evlist.txt
index 0cada9e053dc..0507ec7bad71 100644
--- a/tools/perf/Documentation/perf-evlist.txt
+++ b/tools/perf/Documentation/perf-evlist.txt
@@ -18,7 +18,7 @@ OPTIONS
-------
-i::
--input=::
- Input file name. (default: perf.data)
+ Input file name. (default: perf.data unless stdin is a fifo)
SEE ALSO
--------
diff --git a/tools/perf/Documentation/perf-kmem.txt b/tools/perf/Documentation/perf-kmem.txt
index a52fcde894c7..7c8fbbf3f61c 100644
--- a/tools/perf/Documentation/perf-kmem.txt
+++ b/tools/perf/Documentation/perf-kmem.txt
@@ -23,7 +23,7 @@ OPTIONS
-------
-i <file>::
--input=<file>::
- Select the input file (default: perf.data)
+ Select the input file (default: perf.data unless stdin is a fifo)
--caller::
Show per-callsite statistics
diff --git a/tools/perf/Documentation/perf-lock.txt b/tools/perf/Documentation/perf-lock.txt
index 4a26a2f3a6a3..d6b2a4f2108b 100644
--- a/tools/perf/Documentation/perf-lock.txt
+++ b/tools/perf/Documentation/perf-lock.txt
@@ -29,7 +29,7 @@ COMMON OPTIONS
-i::
--input=<file>::
- Input file name.
+ Input file name. (default: perf.data unless stdin is a fifo)
-v::
--verbose::
diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt
index 5a520f825295..2937f7e14bb7 100644
--- a/tools/perf/Documentation/perf-record.txt
+++ b/tools/perf/Documentation/perf-record.txt
@@ -89,7 +89,7 @@ OPTIONS
-m::
--mmap-pages=::
- Number of mmap data pages.
+ Number of mmap data pages. Must be a power of two.
-g::
--call-graph::
diff --git a/tools/perf/Documentation/perf-report.txt b/tools/perf/Documentation/perf-report.txt
index 212f24d672e1..9b430e98712e 100644
--- a/tools/perf/Documentation/perf-report.txt
+++ b/tools/perf/Documentation/perf-report.txt
@@ -19,7 +19,7 @@ OPTIONS
-------
-i::
--input=::
- Input file name. (default: perf.data)
+ Input file name. (default: perf.data unless stdin is a fifo)
-v::
--verbose::
@@ -39,7 +39,7 @@ OPTIONS
-T::
--threads::
Show per-thread event counters
--C::
+-c::
--comms=::
Only consider symbols in these comms. CSV that understands
file://filename entries.
@@ -80,9 +80,10 @@ OPTIONS
--dump-raw-trace::
Dump raw trace in ASCII.
--g [type,min,order]::
+-g [type,min[,limit],order]::
--call-graph::
- Display call chains using type, min percent threshold and order.
+ Display call chains using type, min percent threshold, optional print
+ limit and order.
type can be either:
- flat: single column, linear exposure of call chains.
- graph: use a graph tree, displaying absolute overhead rates.
@@ -128,7 +129,7 @@ OPTIONS
--symfs=<directory>::
Look for files with symbols relative to this directory.
--c::
+-C::
--cpu:: Only report samples for the list of CPUs provided. Multiple CPUs can
be provided as a comma-separated list with no space: 0,1. Ranges of
CPUs are specified with -: 0-2. Default is to report samples on all
diff --git a/tools/perf/Documentation/perf-sched.txt b/tools/perf/Documentation/perf-sched.txt
index 5b212b57f70b..8ff4df956951 100644
--- a/tools/perf/Documentation/perf-sched.txt
+++ b/tools/perf/Documentation/perf-sched.txt
@@ -40,7 +40,7 @@ OPTIONS
-------
-i::
--input=<file>::
- Input file name. (default: perf.data)
+ Input file name. (default: perf.data unless stdin is a fifo)
-v::
--verbose::
diff --git a/tools/perf/Documentation/perf-script.txt b/tools/perf/Documentation/perf-script.txt
index dec87ecb530e..2f6cef43da25 100644
--- a/tools/perf/Documentation/perf-script.txt
+++ b/tools/perf/Documentation/perf-script.txt
@@ -106,7 +106,7 @@ OPTIONS
-i::
--input=::
- Input file name.
+ Input file name. (default: perf.data unless stdin is a fifo)
-d::
--debug-mode::
@@ -182,12 +182,17 @@ OPTIONS
--hide-call-graph::
When printing symbols do not display call chain.
--c::
+-C::
--cpu:: Only report samples for the list of CPUs provided. Multiple CPUs can
be provided as a comma-separated list with no space: 0,1. Ranges of
CPUs are specified with -: 0-2. Default is to report samples on all
CPUs.
+-c::
+--comms=::
+ Only display events for these comms. CSV that understands
+ file://filename entries.
+
-I::
--show-info::
Display extended information about the perf.data file. This adds
diff --git a/tools/perf/Documentation/perf-test.txt b/tools/perf/Documentation/perf-test.txt
index 2c3b462f64b0..b24ac40fcd58 100644
--- a/tools/perf/Documentation/perf-test.txt
+++ b/tools/perf/Documentation/perf-test.txt
@@ -8,13 +8,19 @@ perf-test - Runs sanity tests.
SYNOPSIS
--------
[verse]
-'perf test <options>'
+'perf test [<options>] [{list <test-name-fragment>|[<test-name-fragments>|<test-numbers>]}]'
DESCRIPTION
-----------
This command does assorted sanity tests, initially through linked routines but
also will look for a directory with more tests in the form of scripts.
+To get a list of available tests use 'perf test list', specifying a test name
+fragment will show all tests that have it.
+
+To run just specific tests, inform test name fragments or the numbers obtained
+from 'perf test list'.
+
OPTIONS
-------
-v::
diff --git a/tools/perf/Documentation/perf-timechart.txt b/tools/perf/Documentation/perf-timechart.txt
index d7b79e2ba2ad..1632b0efc757 100644
--- a/tools/perf/Documentation/perf-timechart.txt
+++ b/tools/perf/Documentation/perf-timechart.txt
@@ -27,7 +27,7 @@ OPTIONS
Select the output file (default: output.svg)
-i::
--input=::
- Select the input file (default: perf.data)
+ Select the input file (default: perf.data unless stdin is a fifo)
-w::
--width=::
Select the width of the SVG file (default: 1000)
diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index b98e3075646b..ac86d67b636e 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -278,6 +278,7 @@ LIB_H += util/strbuf.h
LIB_H += util/strlist.h
LIB_H += util/strfilter.h
LIB_H += util/svghelper.h
+LIB_H += util/tool.h
LIB_H += util/run-command.h
LIB_H += util/sigchain.h
LIB_H += util/symbol.h
diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c
index 46b4c24f338e..214ba7f9f577 100644
--- a/tools/perf/builtin-annotate.c
+++ b/tools/perf/builtin-annotate.c
@@ -27,32 +27,32 @@
#include "util/sort.h"
#include "util/hist.h"
#include "util/session.h"
+#include "util/tool.h"
#include <linux/bitmap.h>
-static char const *input_name = "perf.data";
-
-static bool force, use_tui, use_stdio;
-
-static bool full_paths;
-
-static bool print_line;
-
-static const char *sym_hist_filter;
-
-static const char *cpu_list;
-static DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
+struct perf_annotate {
+ struct perf_tool tool;
+ char const *input_name;
+ bool force, use_tui, use_stdio;
+ bool full_paths;
+ bool print_line;
+ const char *sym_hist_filter;
+ const char *cpu_list;
+ DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
+};
-static int perf_evlist__add_sample(struct perf_evlist *evlist,
- struct perf_sample *sample,
- struct perf_evsel *evsel,
- struct addr_location *al)
+static int perf_evsel__add_sample(struct perf_evsel *evsel,
+ struct perf_sample *sample,
+ struct addr_location *al,
+ struct perf_annotate *ann)
{
struct hist_entry *he;
int ret;
- if (sym_hist_filter != NULL &&
- (al->sym == NULL || strcmp(sym_hist_filter, al->sym->name) != 0)) {
+ if (ann->sym_hist_filter != NULL &&
+ (al->sym == NULL ||
+ strcmp(ann->sym_hist_filter, al->sym->name) != 0)) {
/* We're only interested in a symbol named sym_hist_filter */
if (al->sym != NULL) {
rb_erase(&al->sym->rb_node,
@@ -69,8 +69,7 @@ static int perf_evlist__add_sample(struct perf_evlist *evlist,
ret = 0;
if (he->ms.sym != NULL) {
struct annotation *notes = symbol__annotation(he->ms.sym);
- if (notes->src == NULL &&
- symbol__alloc_hist(he->ms.sym, evlist->nr_entries) < 0)
+ if (notes->src == NULL && symbol__alloc_hist(he->ms.sym) < 0)
return -ENOMEM;
ret = hist_entry__inc_addr_samples(he, evsel->idx, al->addr);
@@ -81,25 +80,26 @@ static int perf_evlist__add_sample(struct perf_evlist *evlist,
return ret;
}
-static int process_sample_event(union perf_event *event,
+static int process_sample_event(struct perf_tool *tool,
+ union perf_event *event,
struct perf_sample *sample,
struct perf_evsel *evsel,
- struct perf_session *session)
+ struct machine *machine)
{
+ struct perf_annotate *ann = container_of(tool, struct perf_annotate, tool);
struct addr_location al;
- if (perf_event__preprocess_sample(event, session, &al, sample,
+ if (perf_event__preprocess_sample(event, machine, &al, sample,
symbol__annotate_init) < 0) {
pr_warning("problem processing %d event, skipping it.\n",
event->header.type);
return -1;
}
- if (cpu_list && !test_bit(sample->cpu, cpu_bitmap))
+ if (ann->cpu_list && !test_bit(sample->cpu, ann->cpu_bitmap))
return 0;
- if (!al.filtered &&
- perf_evlist__add_sample(session->evlist, sample, evsel, &al)) {
+ if (!al.filtered && perf_evsel__add_sample(evsel, sample, &al, ann)) {
pr_warning("problem incrementing symbol count, "
"skipping event\n");
return -1;
@@ -108,14 +108,15 @@ static int process_sample_event(union perf_event *event,
return 0;
}
-static int hist_entry__tty_annotate(struct hist_entry *he, int evidx)
+static int hist_entry__tty_annotate(struct hist_entry *he, int evidx,
+ struct perf_annotate *ann)
{
return symbol__tty_annotate(he->ms.sym, he->ms.map, evidx,
- print_line, full_paths, 0, 0);
+ ann->print_line, ann->full_paths, 0, 0);
}
static void hists__find_annotations(struct hists *self, int evidx,
- int nr_events)
+ struct perf_annotate *ann)
{
struct rb_node *nd = rb_first(&self->entries), *next;
int key = K_RIGHT;
@@ -138,8 +139,7 @@ find_next:
}
if (use_browser > 0) {
- key = hist_entry__tui_annotate(he, evidx, nr_events,
- NULL, NULL, 0);
+ key = hist_entry__tui_annotate(he, evidx, NULL, NULL, 0);
switch (key) {
case K_RIGHT:
next = rb_next(nd);
@@ -154,7 +154,7 @@ find_next:
if (next != NULL)
nd = next;
} else {
- hist_entry__tty_annotate(he, evidx);
+ hist_entry__tty_annotate(he, evidx, ann);
nd = rb_next(nd);
/*
* Since we have a hist_entry per IP for the same
@@ -167,33 +167,26 @@ find_next:
}
}
-static struct perf_event_ops event_ops = {
- .sample = process_sample_event,
- .mmap = perf_event__process_mmap,
- .comm = perf_event__process_comm,
- .fork = perf_event__process_task,
- .ordered_samples = true,
- .ordering_requires_timestamps = true,
-};
-
-static int __cmd_annotate(void)
+static int __cmd_annotate(struct perf_annotate *ann)
{
int ret;
struct perf_session *session;
struct perf_evsel *pos;
u64 total_nr_samples;
- session = perf_session__new(input_name, O_RDONLY, force, false, &event_ops);
+ session = perf_session__new(ann->input_name, O_RDONLY,
+ ann->force, false, &ann->tool);
if (session == NULL)
return -ENOMEM;
- if (cpu_list) {
- ret = perf_session__cpu_bitmap(session, cpu_list, cpu_bitmap);
+ if (ann->cpu_list) {
+ ret = perf_session__cpu_bitmap(session, ann->cpu_list,
+ ann->cpu_bitmap);
if (ret)
goto out_delete;
}
- ret = perf_session__process_events(session, &event_ops);
+ ret = perf_session__process_events(session, &ann->tool);
if (ret)
goto out_delete;
@@ -217,13 +210,12 @@ static int __cmd_annotate(void)
total_nr_samples += nr_samples;
hists__collapse_resort(hists);
hists__output_resort(hists);
- hists__find_annotations(hists, pos->idx,
- session->evlist->nr_entries);
+ hists__find_annotations(hists, pos->idx, ann);
}
}
if (total_nr_samples == 0) {
- ui__warning("The %s file has no samples!\n", input_name);
+ ui__warning("The %s file has no samples!\n", session->filename);
goto out_delete;
}
out_delete:
@@ -247,29 +239,41 @@ static const char * const annotate_usage[] = {
NULL
};
-static const struct option options[] = {
- OPT_STRING('i', "input", &input_name, "file",
+int cmd_annotate(int argc, const char **argv, const char *prefix __used)
+{
+ struct perf_annotate annotate = {
+ .tool = {
+ .sample = process_sample_event,
+ .mmap = perf_event__process_mmap,
+ .comm = perf_event__process_comm,
+ .fork = perf_event__process_task,
+ .ordered_samples = true,
+ .ordering_requires_timestamps = true,
+ },
+ };
+ const struct option options[] = {
+ OPT_STRING('i', "input", &annotate.input_name, "file",
"input file name"),
OPT_STRING('d', "dsos", &symbol_conf.dso_list_str, "dso[,dso...]",
"only consider symbols in these dsos"),
- OPT_STRING('s', "symbol", &sym_hist_filter, "symbol",
+ OPT_STRING('s', "symbol", &annotate.sym_hist_filter, "symbol",
"symbol to annotate"),
- OPT_BOOLEAN('f', "force", &force, "don't complain, do it"),
+ OPT_BOOLEAN('f', "force", &annotate.force, "don't complain, do it"),
OPT_INCR('v', "verbose", &verbose,
"be more verbose (show symbol address, etc)"),
OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
"dump raw trace in ASCII"),
- OPT_BOOLEAN(0, "tui", &use_tui, "Use the TUI interface"),
- OPT_BOOLEAN(0, "stdio", &use_stdio, "Use the stdio interface"),
+ OPT_BOOLEAN(0, "tui", &annotate.use_tui, "Use the TUI interface"),
+ OPT_BOOLEAN(0, "stdio", &annotate.use_stdio, "Use the stdio interface"),
OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
"file", "vmlinux pathname"),
OPT_BOOLEAN('m', "modules", &symbol_conf.use_modules,
"load module symbols - WARNING: use only with -k and LIVE kernel"),
- OPT_BOOLEAN('l', "print-line", &print_line,
+ OPT_BOOLEAN('l', "print-line", &annotate.print_line,
"print matching source lines (may be slow)"),
- OPT_BOOLEAN('P', "full-paths", &full_paths,
+ OPT_BOOLEAN('P', "full-paths", &annotate.full_paths,
"Don't shorten the displayed pathnames"),
- OPT_STRING('c', "cpu", &cpu_list, "cpu", "list of cpus to profile"),
+ OPT_STRING('C', "cpu", &annotate.cpu_list, "cpu", "list of cpus to profile"),
OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory",
"Look for files with symbols relative to this directory"),
OPT_BOOLEAN(0, "source", &symbol_conf.annotate_src,
@@ -279,15 +283,13 @@ static const struct option options[] = {
OPT_STRING('M', "disassembler-style", &disassembler_style, "disassembler style",
"Specify disassembler style (e.g. -M intel for intel syntax)"),
OPT_END()
-};
+ };
-int cmd_annotate(int argc, const char **argv, const char *prefix __used)
-{
argc = parse_options(argc, argv, options, annotate_usage, 0);
- if (use_stdio)
+ if (annotate.use_stdio)
use_browser = 0;
- else if (use_tui)
+ else if (annotate.use_tui)
use_browser = 1;
setup_browser(true);
@@ -308,7 +310,7 @@ int cmd_annotate(int argc, const char **argv, const char *prefix __used)
if (argc > 1)
usage_with_options(annotate_usage, options);
- sym_hist_filter = argv[0];
+ annotate.sym_hist_filter = argv[0];
}
if (field_sep && *field_sep == '.') {
@@ -316,5 +318,5 @@ int cmd_annotate(int argc, const char **argv, const char *prefix __used)
return -1;
}
- return __cmd_annotate();
+ return __cmd_annotate(&annotate);
}
diff --git a/tools/perf/builtin-buildid-list.c b/tools/perf/builtin-buildid-list.c
index cb690a65bf02..52480467e9ff 100644
--- a/tools/perf/builtin-buildid-list.c
+++ b/tools/perf/builtin-buildid-list.c
@@ -18,7 +18,7 @@
#include <libelf.h>
-static char const *input_name = "perf.data";
+static const char *input_name;
static bool force;
static bool show_kernel;
static bool with_hits;
@@ -39,24 +39,6 @@ static const struct option options[] = {
OPT_END()
};
-static int perf_session__list_build_ids(void)
-{
- struct perf_session *session;
-
- session = perf_session__new(input_name, O_RDONLY, force, false,
- &build_id__mark_dso_hit_ops);
- if (session == NULL)
- return -1;
-
- if (with_hits)
- perf_session__process_events(session, &build_id__mark_dso_hit_ops);
-
- perf_session__fprintf_dsos_buildid(session, stdout, with_hits);
-
- perf_session__delete(session);
- return 0;
-}
-
static int sysfs__fprintf_build_id(FILE *fp)
{
u8 kallsyms_build_id[BUILD_ID_SIZE];
@@ -85,17 +67,36 @@ static int filename__fprintf_build_id(const char *name, FILE *fp)
return fprintf(fp, "%s\n", sbuild_id);
}
-static int __cmd_buildid_list(void)
+static int perf_session__list_build_ids(void)
{
- if (show_kernel)
- return sysfs__fprintf_build_id(stdout);
+ struct perf_session *session;
elf_version(EV_CURRENT);
+
+ session = perf_session__new(input_name, O_RDONLY, force, false,
+ &build_id__mark_dso_hit_ops);
+ if (session == NULL)
+ return -1;
+
/*
- * See if this is an ELF file first:
- */
- if (filename__fprintf_build_id(input_name, stdout))
- return 0;
+ * See if this is an ELF file first:
+ */
+ if (filename__fprintf_build_id(session->filename, stdout))
+ goto out;
+
+ if (with_hits)
+ perf_session__process_events(session, &build_id__mark_dso_hit_ops);
+
+ perf_session__fprintf_dsos_buildid(session, stdout, with_hits);
+out:
+ perf_session__delete(session);
+ return 0;
+}
+
+static int __cmd_buildid_list(void)
+{
+ if (show_kernel)
+ return sysfs__fprintf_build_id(stdout);
return perf_session__list_build_ids();
}
diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c
index b39f3a1ee7dc..4f19513d7dda 100644
--- a/tools/perf/builtin-diff.c
+++ b/tools/perf/builtin-diff.c
@@ -9,7 +9,9 @@
#include "util/debug.h"
#include "util/event.h"
#include "util/hist.h"
+#include "util/evsel.h"
#include "util/session.h"
+#include "util/tool.h"
#include "util/sort.h"
#include "util/symbol.h"
#include "util/util.h"
@@ -30,14 +32,15 @@ static int hists__add_entry(struct hists *self,
return -ENOMEM;
}
-static int diff__process_sample_event(union perf_event *event,
+static int diff__process_sample_event(struct perf_tool *tool __used,
+ union perf_event *event,
struct perf_sample *sample,
struct perf_evsel *evsel __used,
- struct perf_session *session)
+ struct machine *machine)
{
struct addr_location al;
- if (perf_event__preprocess_sample(event, session, &al, sample, NULL) < 0) {
+ if (perf_event__preprocess_sample(event, machine, &al, sample, NULL) < 0) {
pr_warning("problem processing %d event, skipping it.\n",
event->header.type);
return -1;
@@ -46,16 +49,16 @@ static int diff__process_sample_event(union perf_event *event,
if (al.filtered || al.sym == NULL)
return 0;
- if (hists__add_entry(&session->hists, &al, sample->period)) {
+ if (hists__add_entry(&evsel->hists, &al, sample->period)) {
pr_warning("problem incrementing symbol period, skipping event\n");
return -1;
}
- session->hists.stats.total_period += sample->period;
+ evsel->hists.stats.total_period += sample->period;
return 0;
}
-static struct perf_event_ops event_ops = {
+static struct perf_tool perf_diff = {
.sample = diff__process_sample_event,
.mmap = perf_event__process_mmap,
.comm = perf_event__process_comm,
@@ -145,13 +148,13 @@ static int __cmd_diff(void)
int ret, i;
struct perf_session *session[2];
- session[0] = perf_session__new(input_old, O_RDONLY, force, false, &event_ops);
- session[1] = perf_session__new(input_new, O_RDONLY, force, false, &event_ops);
+ session[0] = perf_session__new(input_old, O_RDONLY, force, false, &perf_diff);
+ session[1] = perf_session__new(input_new, O_RDONLY, force, false, &perf_diff);
if (session[0] == NULL || session[1] == NULL)
return -ENOMEM;
for (i = 0; i < 2; ++i) {
- ret = perf_session__process_events(session[i], &event_ops);
+ ret = perf_session__process_events(session[i], &perf_diff);
if (ret)
goto out_delete;
}
diff --git a/tools/perf/builtin-evlist.c b/tools/perf/builtin-evlist.c
index 4c5e9e04a41f..26760322c4f4 100644
--- a/tools/perf/builtin-evlist.c
+++ b/tools/perf/builtin-evlist.c
@@ -15,7 +15,7 @@
#include "util/parse-options.h"
#include "util/session.h"
-static char const *input_name = "perf.data";
+static const char *input_name;
static int __cmd_evlist(void)
{
diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c
index 8dfc12bb119b..09c106193e65 100644
--- a/tools/perf/builtin-inject.c
+++ b/tools/perf/builtin-inject.c
@@ -9,6 +9,7 @@
#include "perf.h"
#include "util/session.h"
+#include "util/tool.h"
#include "util/debug.h"
#include "util/parse-options.h"
@@ -16,8 +17,9 @@
static char const *input_name = "-";
static bool inject_build_ids;
-static int perf_event__repipe_synth(union perf_event *event,
- struct perf_session *session __used)
+static int perf_event__repipe_synth(struct perf_tool *tool __used,
+ union perf_event *event,
+ struct machine *machine __used)
{
uint32_t size;
void *buf = event;
@@ -36,41 +38,70 @@ static int perf_event__repipe_synth(union perf_event *event,
return 0;
}
-static int perf_event__repipe(union perf_event *event,
+static int perf_event__repipe_op2_synth(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_session *session __used)
+{
+ return perf_event__repipe_synth(tool, event, NULL);
+}
+
+static int perf_event__repipe_event_type_synth(struct perf_tool *tool,
+ union perf_event *event)
+{
+ return perf_event__repipe_synth(tool, event, NULL);
+}
+
+static int perf_event__repipe_tracing_data_synth(union perf_event *event,
+ struct perf_session *session __used)
+{
+ return perf_event__repipe_synth(NULL, event, NULL);
+}
+
+static int perf_event__repipe_attr(union perf_event *event,
+ struct perf_evlist **pevlist __used)
+{
+ return perf_event__repipe_synth(NULL, event, NULL);
+}
+
+static int perf_event__repipe(struct perf_tool *tool,
+ union perf_event *event,
struct perf_sample *sample __used,
- struct perf_session *session)
+ struct machine *machine)
{
- return perf_event__repipe_synth(event, session);
+ return perf_event__repipe_synth(tool, event, machine);
}
-static int perf_event__repipe_sample(union perf_event *event,
+static int perf_event__repipe_sample(struct perf_tool *tool,
+ union perf_event *event,
struct perf_sample *sample __used,
struct perf_evsel *evsel __used,
- struct perf_session *session)
+ struct machine *machine)
{
- return perf_event__repipe_synth(event, session);
+ return perf_event__repipe_synth(tool, event, machine);
}
-static int perf_event__repipe_mmap(union perf_event *event,
+static int perf_event__repipe_mmap(struct perf_tool *tool,
+ union perf_event *event,
struct perf_sample *sample,
- struct perf_session *session)
+ struct machine *machine)
{
int err;
- err = perf_event__process_mmap(event, sample, session);
- perf_event__repipe(event, sample, session);
+ err = perf_event__process_mmap(tool, event, sample, machine);
+ perf_event__repipe(tool, event, sample, machine);
return err;
}
-static int perf_event__repipe_task(union perf_event *event,
+static int perf_event__repipe_task(struct perf_tool *tool,
+ union perf_event *event,
struct perf_sample *sample,
- struct perf_session *session)
+ struct machine *machine)
{
int err;
- err = perf_event__process_task(event, sample, session);
- perf_event__repipe(event, sample, session);
+ err = perf_event__process_task(tool, event, sample, machine);
+ perf_event__repipe(tool, event, sample, machine);
return err;
}
@@ -80,7 +111,7 @@ static int perf_event__repipe_tracing_data(union perf_event *event,
{
int err;
- perf_event__repipe_synth(event, session);
+ perf_event__repipe_synth(NULL, event, NULL);
err = perf_event__process_tracing_data(event, session);
return err;
@@ -100,10 +131,10 @@ static int dso__read_build_id(struct dso *self)
return -1;
}
-static int dso__inject_build_id(struct dso *self, struct perf_session *session)
+static int dso__inject_build_id(struct dso *self, struct perf_tool *tool,
+ struct machine *machine)
{
u16 misc = PERF_RECORD_MISC_USER;
- struct machine *machine;
int err;
if (dso__read_build_id(self) < 0) {
@@ -111,17 +142,11 @@ static int dso__inject_build_id(struct dso *self, struct perf_session *session)
return -1;
}
- machine = perf_session__find_host_machine(session);
- if (machine == NULL) {
- pr_err("Can't find machine for session\n");
- return -1;
- }
-
if (self->kernel)
misc = PERF_RECORD_MISC_KERNEL;
- err = perf_event__synthesize_build_id(self, misc, perf_event__repipe,
- machine, session);
+ err = perf_event__synthesize_build_id(tool, self, misc, perf_event__repipe,
+ machine);
if (err) {
pr_err("Can't synthesize build_id event for %s\n", self->long_name);
return -1;
@@ -130,10 +155,11 @@ static int dso__inject_build_id(struct dso *self, struct perf_session *session)
return 0;
}
-static int perf_event__inject_buildid(union perf_event *event,
+static int perf_event__inject_buildid(struct perf_tool *tool,
+ union perf_event *event,
struct perf_sample *sample,
struct perf_evsel *evsel __used,
- struct perf_session *session)
+ struct machine *machine)
{
struct addr_location al;
struct thread *thread;
@@ -141,21 +167,21 @@ static int perf_event__inject_buildid(union perf_event *event,
cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
- thread = perf_session__findnew(session, event->ip.pid);
+ thread = machine__findnew_thread(machine, event->ip.pid);
if (thread == NULL) {
pr_err("problem processing %d event, skipping it.\n",
event->header.type);
goto repipe;
}
- thread__find_addr_map(thread, session, cpumode, MAP__FUNCTION,
- event->ip.pid, event->ip.ip, &al);
+ thread__find_addr_map(thread, machine, cpumode, MAP__FUNCTION,
+ event->ip.ip, &al);
if (al.map != NULL) {
if (!al.map->dso->hit) {
al.map->dso->hit = 1;
if (map__load(al.map, NULL) >= 0) {
- dso__inject_build_id(al.map->dso, session);
+ dso__inject_build_id(al.map->dso, tool, machine);
/*
* If this fails, too bad, let the other side
* account this as unresolved.
@@ -168,24 +194,24 @@ static int perf_event__inject_buildid(union perf_event *event,
}
repipe:
- perf_event__repipe(event, sample, session);
+ perf_event__repipe(tool, event, sample, machine);
return 0;
}
-struct perf_event_ops inject_ops = {
+struct perf_tool perf_inject = {
.sample = perf_event__repipe_sample,
.mmap = perf_event__repipe,
.comm = perf_event__repipe,
.fork = perf_event__repipe,
.exit = perf_event__repipe,
.lost = perf_event__repipe,
- .read = perf_event__repipe,
+ .read = perf_event__repipe_sample,
.throttle = perf_event__repipe,
.unthrottle = perf_event__repipe,
- .attr = perf_event__repipe_synth,
- .event_type = perf_event__repipe_synth,
- .tracing_data = perf_event__repipe_synth,
- .build_id = perf_event__repipe_synth,
+ .attr = perf_event__repipe_attr,
+ .event_type = perf_event__repipe_event_type_synth,
+ .tracing_data = perf_event__repipe_tracing_data_synth,
+ .build_id = perf_event__repipe_op2_synth,
};
extern volatile int session_done;
@@ -203,17 +229,17 @@ static int __cmd_inject(void)
signal(SIGINT, sig_handler);
if (inject_build_ids) {
- inject_ops.sample = perf_event__inject_buildid;
- inject_ops.mmap = perf_event__repipe_mmap;
- inject_ops.fork = perf_event__repipe_task;
- inject_ops.tracing_data = perf_event__repipe_tracing_data;
+ perf_inject.sample = perf_event__inject_buildid;
+ perf_inject.mmap = perf_event__repipe_mmap;
+ perf_inject.fork = perf_event__repipe_task;
+ perf_inject.tracing_data = perf_event__repipe_tracing_data;
}
- session = perf_session__new(input_name, O_RDONLY, false, true, &inject_ops);
+ session = perf_session__new(input_name, O_RDONLY, false, true, &perf_inject);
if (session == NULL)
return -ENOMEM;
- ret = perf_session__process_events(session, &inject_ops);
+ ret = perf_session__process_events(session, &perf_inject);
perf_session__delete(session);
diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c
index 225e963df105..fe1ad8f21961 100644
--- a/tools/perf/builtin-kmem.c
+++ b/tools/perf/builtin-kmem.c
@@ -7,6 +7,7 @@
#include "util/thread.h"
#include "util/header.h"
#include "util/session.h"
+#include "util/tool.h"
#include "util/parse-options.h"
#include "util/trace-event.h"
@@ -18,7 +19,7 @@
struct alloc_stat;
typedef int (*sort_fn_t)(struct alloc_stat *, struct alloc_stat *);
-static char const *input_name = "perf.data";
+static const char *input_name;
static int alloc_flag;
static int caller_flag;
@@ -303,12 +304,13 @@ static void process_raw_event(union perf_event *raw_event __used, void *data,
}
}
-static int process_sample_event(union perf_event *event,
+static int process_sample_event(struct perf_tool *tool __used,
+ union perf_event *event,
struct perf_sample *sample,
struct perf_evsel *evsel __used,
- struct perf_session *session)
+ struct machine *machine)
{
- struct thread *thread = perf_session__findnew(session, event->ip.pid);
+ struct thread *thread = machine__findnew_thread(machine, event->ip.pid);
if (thread == NULL) {
pr_debug("problem processing %d event, skipping it.\n",
@@ -324,7 +326,7 @@ static int process_sample_event(union perf_event *event,
return 0;
}
-static struct perf_event_ops event_ops = {
+static struct perf_tool perf_kmem = {
.sample = process_sample_event,
.comm = perf_event__process_comm,
.ordered_samples = true,
@@ -483,7 +485,7 @@ static int __cmd_kmem(void)
{
int err = -EINVAL;
struct perf_session *session = perf_session__new(input_name, O_RDONLY,
- 0, false, &event_ops);
+ 0, false, &perf_kmem);
if (session == NULL)
return -ENOMEM;
@@ -494,7 +496,7 @@ static int __cmd_kmem(void)
goto out_delete;
setup_pager();
- err = perf_session__process_events(session, &event_ops);
+ err = perf_session__process_events(session, &perf_kmem);
if (err != 0)
goto out_delete;
sort_result();
diff --git a/tools/perf/builtin-kvm.c b/tools/perf/builtin-kvm.c
index 34d1e853829d..032324a76b87 100644
--- a/tools/perf/builtin-kvm.c
+++ b/tools/perf/builtin-kvm.c
@@ -38,7 +38,7 @@ static const struct option kvm_options[] = {
OPT_BOOLEAN(0, "guest", &perf_guest,
"Collect guest os data"),
OPT_BOOLEAN(0, "host", &perf_host,
- "Collect guest os data"),
+ "Collect host os data"),
OPT_STRING(0, "guestmount", &symbol_conf.guestmount, "directory",
"guest mount directory under which every guest os"
" instance has a subdir"),
diff --git a/tools/perf/builtin-lock.c b/tools/perf/builtin-lock.c
index 899080ace267..2296c391d0f5 100644
--- a/tools/perf/builtin-lock.c
+++ b/tools/perf/builtin-lock.c
@@ -12,6 +12,7 @@
#include "util/debug.h"
#include "util/session.h"
+#include "util/tool.h"
#include <sys/types.h>
#include <sys/prctl.h>
@@ -325,7 +326,7 @@ alloc_failed:
die("memory allocation failed\n");
}
-static char const *input_name = "perf.data";
+static const char *input_name;
struct raw_event_sample {
u32 size;
@@ -845,12 +846,13 @@ static void dump_info(void)
die("Unknown type of information\n");
}
-static int process_sample_event(union perf_event *event,
+static int process_sample_event(struct perf_tool *tool __used,
+ union perf_event *event,
struct perf_sample *sample,
struct perf_evsel *evsel __used,
- struct perf_session *s)
+ struct machine *machine)
{
- struct thread *thread = perf_session__findnew(s, sample->tid);
+ struct thread *thread = machine__findnew_thread(machine, sample->tid);
if (thread == NULL) {
pr_debug("problem processing %d event, skipping it.\n",
@@ -863,7 +865,7 @@ static int process_sample_event(union perf_event *event,
return 0;
}
-static struct perf_event_ops eops = {
+static struct perf_tool eops = {
.sample = process_sample_event,
.comm = perf_event__process_comm,
.ordered_samples = true,
diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c
index 710ae3d0a489..59d43abfbfec 100644
--- a/tools/perf/builtin-probe.c
+++ b/tools/perf/builtin-probe.c
@@ -46,7 +46,6 @@
#define DEFAULT_VAR_FILTER "!__k???tab_* & !__crc_*"
#define DEFAULT_FUNC_FILTER "!_*"
-#define MAX_PATH_LEN 256
/* Session management structure */
static struct {
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 6ab58cc99d53..0abfb18b911f 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -22,6 +22,7 @@
#include "util/evsel.h"
#include "util/debug.h"
#include "util/session.h"
+#include "util/tool.h"
#include "util/symbol.h"
#include "util/cpumap.h"
#include "util/thread_map.h"
@@ -35,55 +36,36 @@ enum write_mode_t {
WRITE_APPEND
};
-static u64 user_interval = ULLONG_MAX;
-static u64 default_interval = 0;
-
-static unsigned int page_size;
-static unsigned int mmap_pages = UINT_MAX;
-static unsigned int user_freq = UINT_MAX;
-static int freq = 1000;
-static int output;
-static int pipe_output = 0;
-static const char *output_name = NULL;
-static bool group = false;
-static int realtime_prio = 0;
-static bool nodelay = false;
-static bool raw_samples = false;
-static bool sample_id_all_avail = true;
-static bool system_wide = false;
-static pid_t target_pid = -1;
-static pid_t target_tid = -1;
-static pid_t child_pid = -1;
-static bool no_inherit = false;
-static enum write_mode_t write_mode = WRITE_FORCE;
-static bool call_graph = false;
-static bool inherit_stat = false;
-static bool no_samples = false;
-static bool sample_address = false;
-static bool sample_time = false;
-static bool no_buildid = false;
-static bool no_buildid_cache = false;
-static struct perf_evlist *evsel_list;
-
-static long samples = 0;
-static u64 bytes_written = 0;
-
-static int file_new = 1;
-static off_t post_processing_offset;
-
-static struct perf_session *session;
-static const char *cpu_list;
-static const char *progname;
-
-static void advance_output(size_t size)
+struct perf_record {
+ struct perf_tool tool;
+ struct perf_record_opts opts;
+ u64 bytes_written;
+ const char *output_name;
+ struct perf_evlist *evlist;
+ struct perf_session *session;
+ const char *progname;
+ int output;
+ unsigned int page_size;
+ int realtime_prio;
+ enum write_mode_t write_mode;
+ bool no_buildid;
+ bool no_buildid_cache;
+ bool force;
+ bool file_new;
+ bool append_file;
+ long samples;
+ off_t post_processing_offset;
+};
+
+static void advance_output(struct perf_record *rec, size_t size)
{
- bytes_written += size;
+ rec->bytes_written += size;
}
-static void write_output(void *buf, size_t size)
+static void write_output(struct perf_record *rec, void *buf, size_t size)
{
while (size) {
- int ret = write(output, buf, size);
+ int ret = write(rec->output, buf, size);
if (ret < 0)
die("failed to write");
@@ -91,30 +73,33 @@ static void write_output(void *buf, size_t size)
size -= ret;
buf += ret;
- bytes_written += ret;
+ rec->bytes_written += ret;
}
}
-static int process_synthesized_event(union perf_event *event,
+static int process_synthesized_event(struct perf_tool *tool,
+ union perf_event *event,
struct perf_sample *sample __used,
- struct perf_session *self __used)
+ struct machine *machine __used)
{
- write_output(event, event->header.size);
+ struct perf_record *rec = container_of(tool, struct perf_record, tool);
+ write_output(rec, event, event->header.size);
return 0;
}
-static void mmap_read(struct perf_mmap *md)
+static void perf_record__mmap_read(struct perf_record *rec,
+ struct perf_mmap *md)
{
unsigned int head = perf_mmap__read_head(md);
unsigned int old = md->prev;
- unsigned char *data = md->base + page_size;
+ unsigned char *data = md->base + rec->page_size;
unsigned long size;
void *buf;
if (old == head)
return;
- samples++;
+ rec->samples++;
size = head - old;
@@ -123,14 +108,14 @@ static void mmap_read(struct perf_mmap *md)
size = md->mask + 1 - (old & md->mask);
old += size;
- write_output(buf, size);
+ write_output(rec, buf, size);
}
buf = &data[old & md->mask];
size = head - old;
old += size;
- write_output(buf, size);
+ write_output(rec, buf, size);
md->prev = old;
perf_mmap__write_tail(md, old);
@@ -149,17 +134,18 @@ static void sig_handler(int sig)
signr = sig;
}
-static void sig_atexit(void)
+static void perf_record__sig_exit(int exit_status __used, void *arg)
{
+ struct perf_record *rec = arg;
int status;
- if (child_pid > 0) {
+ if (rec->evlist->workload.pid > 0) {
if (!child_finished)
- kill(child_pid, SIGTERM);
+ kill(rec->evlist->workload.pid, SIGTERM);
wait(&status);
if (WIFSIGNALED(status))
- psignal(WTERMSIG(status), progname);
+ psignal(WTERMSIG(status), rec->progname);
}
if (signr == -1 || signr == SIGUSR1)
@@ -169,78 +155,6 @@ static void sig_atexit(void)
kill(getpid(), signr);
}
-static void config_attr(struct perf_evsel *evsel, struct perf_evlist *evlist)
-{
- struct perf_event_attr *attr = &evsel->attr;
- int track = !evsel->idx; /* only the first counter needs these */
-
- attr->disabled = 1;
- attr->inherit = !no_inherit;
- attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
- PERF_FORMAT_TOTAL_TIME_RUNNING |
- PERF_FORMAT_ID;
-
- attr->sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID;
-
- if (evlist->nr_entries > 1)
- attr->sample_type |= PERF_SAMPLE_ID;
-
- /*
- * We default some events to a 1 default interval. But keep
- * it a weak assumption overridable by the user.
- */
- if (!attr->sample_period || (user_freq != UINT_MAX &&
- user_interval != ULLONG_MAX)) {
- if (freq) {
- attr->sample_type |= PERF_SAMPLE_PERIOD;
- attr->freq = 1;
- attr->sample_freq = freq;
- } else {
- attr->sample_period = default_interval;
- }
- }
-
- if (no_samples)
- attr->sample_freq = 0;
-
- if (inherit_stat)
- attr->inherit_stat = 1;
-
- if (sample_address) {
- attr->sample_type |= PERF_SAMPLE_ADDR;
- attr->mmap_data = track;
- }
-
- if (call_graph)
- attr->sample_type |= PERF_SAMPLE_CALLCHAIN;
-
- if (system_wide)
- attr->sample_type |= PERF_SAMPLE_CPU;
-
- if (sample_id_all_avail &&
- (sample_time || system_wide || !no_inherit || cpu_list))
- attr->sample_type |= PERF_SAMPLE_TIME;
-
- if (raw_samples) {
- attr->sample_type |= PERF_SAMPLE_TIME;
- attr->sample_type |= PERF_SAMPLE_RAW;
- attr->sample_type |= PERF_SAMPLE_CPU;
- }
-
- if (nodelay) {
- attr->watermark = 0;
- attr->wakeup_events = 1;
- }
-
- attr->mmap = track;
- attr->comm = track;
-
- if (target_pid == -1 && target_tid == -1 && !system_wide) {
- attr->disabled = 1;
- attr->enable_on_exec = 1;
- }
-}
-
static bool perf_evlist__equal(struct perf_evlist *evlist,
struct perf_evlist *other)
{
@@ -260,15 +174,17 @@ static bool perf_evlist__equal(struct perf_evlist *evlist,
return true;
}
-static void open_counters(struct perf_evlist *evlist)
+static void perf_record__open(struct perf_record *rec)
{
struct perf_evsel *pos, *first;
-
- if (evlist->cpus->map[0] < 0)
- no_inherit = true;
+ struct perf_evlist *evlist = rec->evlist;
+ struct perf_session *session = rec->session;
+ struct perf_record_opts *opts = &rec->opts;
first = list_entry(evlist->entries.next, struct perf_evsel, node);
+ perf_evlist__config_attrs(evlist, opts);
+
list_for_each_entry(pos, &evlist->entries, node) {
struct perf_event_attr *attr = &pos->attr;
struct xyarray *group_fd = NULL;
@@ -286,29 +202,27 @@ static void open_counters(struct perf_evlist *evlist)
*/
bool time_needed = attr->sample_type & PERF_SAMPLE_TIME;
- if (group && pos != first)
+ if (opts->group && pos != first)
group_fd = first->fd;
-
- config_attr(pos, evlist);
retry_sample_id:
- attr->sample_id_all = sample_id_all_avail ? 1 : 0;
+ attr->sample_id_all = opts->sample_id_all_avail ? 1 : 0;
try_again:
- if (perf_evsel__open(pos, evlist->cpus, evlist->threads, group,
- group_fd) < 0) {
+ if (perf_evsel__open(pos, evlist->cpus, evlist->threads,
+ opts->group, group_fd) < 0) {
int err = errno;
if (err == EPERM || err == EACCES) {
ui__error_paranoid();
exit(EXIT_FAILURE);
- } else if (err == ENODEV && cpu_list) {
+ } else if (err == ENODEV && opts->cpu_list) {
die("No such device - did you specify"
" an out-of-range profile CPU?\n");
- } else if (err == EINVAL && sample_id_all_avail) {
+ } else if (err == EINVAL && opts->sample_id_all_avail) {
/*
* Old kernel, no attr->sample_id_type_all field
*/
- sample_id_all_avail = false;
- if (!sample_time && !raw_samples && !time_needed)
+ opts->sample_id_all_avail = false;
+ if (!opts->sample_time && !opts->raw_samples && !time_needed)
attr->sample_type &= ~PERF_SAMPLE_TIME;
goto retry_sample_id;
@@ -358,10 +272,20 @@ try_again:
exit(-1);
}
- if (perf_evlist__mmap(evlist, mmap_pages, false) < 0)
+ if (perf_evlist__mmap(evlist, opts->mmap_pages, false) < 0) {
+ if (errno == EPERM)
+ die("Permission error mapping pages.\n"
+ "Consider increasing "
+ "/proc/sys/kernel/perf_event_mlock_kb,\n"
+ "or try again with a smaller value of -m/--mmap_pages.\n"
+ "(current value: %d)\n", opts->mmap_pages);
+ else if (!is_power_of_2(opts->mmap_pages))
+ die("--mmap_pages/-m value must be a power of two.");
+
die("failed to mmap with %d (%s)\n", errno, strerror(errno));
+ }
- if (file_new)
+ if (rec->file_new)
session->evlist = evlist;
else {
if (!perf_evlist__equal(session->evlist, evlist)) {
@@ -373,29 +297,32 @@ try_again:
perf_session__update_sample_type(session);
}
-static int process_buildids(void)
+static int process_buildids(struct perf_record *rec)
{
- u64 size = lseek(output, 0, SEEK_CUR);
+ u64 size = lseek(rec->output, 0, SEEK_CUR);
if (size == 0)
return 0;
- session->fd = output;
- return __perf_session__process_events(session, post_processing_offset,
- size - post_processing_offset,
+ rec->session->fd = rec->output;
+ return __perf_session__process_events(rec->session, rec->post_processing_offset,
+ size - rec->post_processing_offset,
size, &build_id__mark_dso_hit_ops);
}
-static void atexit_header(void)
+static void perf_record__exit(int status __used, void *arg)
{
- if (!pipe_output) {
- session->header.data_size += bytes_written;
-
- if (!no_buildid)
- process_buildids();
- perf_session__write_header(session, evsel_list, output, true);
- perf_session__delete(session);
- perf_evlist__delete(evsel_list);
+ struct perf_record *rec = arg;
+
+ if (!rec->opts.pipe_output) {
+ rec->session->header.data_size += rec->bytes_written;
+
+ if (!rec->no_buildid)
+ process_buildids(rec);
+ perf_session__write_header(rec->session, rec->evlist,
+ rec->output, true);
+ perf_session__delete(rec->session);
+ perf_evlist__delete(rec->evlist);
symbol__exit();
}
}
@@ -403,7 +330,7 @@ static void atexit_header(void)
static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
{
int err;
- struct perf_session *psession = data;
+ struct perf_tool *tool = data;
if (machine__is_host(machine))
return;
@@ -416,8 +343,8 @@ static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
*method is used to avoid symbol missing when the first addr is
*in module instead of in guest kernel.
*/
- err = perf_event__synthesize_modules(process_synthesized_event,
- psession, machine);
+ err = perf_event__synthesize_modules(tool, process_synthesized_event,
+ machine);
if (err < 0)
pr_err("Couldn't record guest kernel [%d]'s reference"
" relocation symbol.\n", machine->pid);
@@ -426,12 +353,11 @@ static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
* We use _stext for guest kernel because guest kernel's /proc/kallsyms
* have no _text sometimes.
*/
- err = perf_event__synthesize_kernel_mmap(process_synthesized_event,
- psession, machine, "_text");
+ err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
+ machine, "_text");
if (err < 0)
- err = perf_event__synthesize_kernel_mmap(process_synthesized_event,
- psession, machine,
- "_stext");
+ err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
+ machine, "_stext");
if (err < 0)
pr_err("Couldn't record guest kernel [%d]'s reference"
" relocation symbol.\n", machine->pid);
@@ -442,73 +368,71 @@ static struct perf_event_header finished_round_event = {
.type = PERF_RECORD_FINISHED_ROUND,
};
-static void mmap_read_all(void)
+static void perf_record__mmap_read_all(struct perf_record *rec)
{
int i;
- for (i = 0; i < evsel_list->nr_mmaps; i++) {
- if (evsel_list->mmap[i].base)
- mmap_read(&evsel_list->mmap[i]);
+ for (i = 0; i < rec->evlist->nr_mmaps; i++) {
+ if (rec->evlist->mmap[i].base)
+ perf_record__mmap_read(rec, &rec->evlist->mmap[i]);
}
- if (perf_header__has_feat(&session->header, HEADER_TRACE_INFO))
- write_output(&finished_round_event, sizeof(finished_round_event));
+ if (perf_header__has_feat(&rec->session->header, HEADER_TRACE_INFO))
+ write_output(rec, &finished_round_event, sizeof(finished_round_event));
}
-static int __cmd_record(int argc, const char **argv)
+static int __cmd_record(struct perf_record *rec, int argc, const char **argv)
{
struct stat st;
int flags;
- int err;
+ int err, output;
unsigned long waking = 0;
- int child_ready_pipe[2], go_pipe[2];
const bool forks = argc > 0;
- char buf;
struct machine *machine;
+ struct perf_tool *tool = &rec->tool;
+ struct perf_record_opts *opts = &rec->opts;
+ struct perf_evlist *evsel_list = rec->evlist;
+ const char *output_name = rec->output_name;
+ struct perf_session *session;
- progname = argv[0];
+ rec->progname = argv[0];
- page_size = sysconf(_SC_PAGE_SIZE);
+ rec->page_size = sysconf(_SC_PAGE_SIZE);
- atexit(sig_atexit);
+ on_exit(perf_record__sig_exit, rec);
signal(SIGCHLD, sig_handler);
signal(SIGINT, sig_handler);
signal(SIGUSR1, sig_handler);
- if (forks && (pipe(child_ready_pipe) < 0 || pipe(go_pipe) < 0)) {
- perror("failed to create pipes");
- exit(-1);
- }
-
if (!output_name) {
if (!fstat(STDOUT_FILENO, &st) && S_ISFIFO(st.st_mode))
- pipe_output = 1;
+ opts->pipe_output = true;
else
- output_name = "perf.data";
+ rec->output_name = output_name = "perf.data";
}
if (output_name) {
if (!strcmp(output_name, "-"))
- pipe_output = 1;
+ opts->pipe_output = true;
else if (!stat(output_name, &st) && st.st_size) {
- if (write_mode == WRITE_FORCE) {
+ if (rec->write_mode == WRITE_FORCE) {
char oldname[PATH_MAX];
snprintf(oldname, sizeof(oldname), "%s.old",
output_name);
unlink(oldname);
rename(output_name, oldname);
}
- } else if (write_mode == WRITE_APPEND) {
- write_mode = WRITE_FORCE;
+ } else if (rec->write_mode == WRITE_APPEND) {
+ rec->write_mode = WRITE_FORCE;
}
}
flags = O_CREAT|O_RDWR;
- if (write_mode == WRITE_APPEND)
- file_new = 0;
+ if (rec->write_mode == WRITE_APPEND)
+ rec->file_new = 0;
else
flags |= O_TRUNC;
- if (pipe_output)
+ if (opts->pipe_output)
output = STDOUT_FILENO;
else
output = open(output_name, flags, S_IRUSR | S_IWUSR);
@@ -517,17 +441,21 @@ static int __cmd_record(int argc, const char **argv)
exit(-1);
}
+ rec->output = output;
+
session = perf_session__new(output_name, O_WRONLY,
- write_mode == WRITE_FORCE, false, NULL);
+ rec->write_mode == WRITE_FORCE, false, NULL);
if (session == NULL) {
pr_err("Not enough memory for reading perf file header\n");
return -1;
}
- if (!no_buildid)
+ rec->session = session;
+
+ if (!rec->no_buildid)
perf_header__set_feat(&session->header, HEADER_BUILD_ID);
- if (!file_new) {
+ if (!rec->file_new) {
err = perf_session__read_header(session, output);
if (err < 0)
goto out_delete_session;
@@ -549,94 +477,57 @@ static int __cmd_record(int argc, const char **argv)
perf_header__set_feat(&session->header, HEADER_NUMA_TOPOLOGY);
perf_header__set_feat(&session->header, HEADER_CPUID);
- /* 512 kiB: default amount of unprivileged mlocked memory */
- if (mmap_pages == UINT_MAX)
- mmap_pages = (512 * 1024) / page_size;
-
if (forks) {
- child_pid = fork();
- if (child_pid < 0) {
- perror("failed to fork");
- exit(-1);
- }
-
- if (!child_pid) {
- if (pipe_output)
- dup2(2, 1);
- close(child_ready_pipe[0]);
- close(go_pipe[1]);
- fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
-
- /*
- * Do a dummy execvp to get the PLT entry resolved,
- * so we avoid the resolver overhead on the real
- * execvp call.
- */
- execvp("", (char **)argv);
-
- /*
- * Tell the parent we're ready to go
- */
- close(child_ready_pipe[1]);
-
- /*
- * Wait until the parent tells us to go.
- */
- if (read(go_pipe[0], &buf, 1) == -1)
- perror("unable to read pipe");
-
- execvp(argv[0], (char **)argv);
-
- perror(argv[0]);
- kill(getppid(), SIGUSR1);
- exit(-1);
- }
-
- if (!system_wide && target_tid == -1 && target_pid == -1)
- evsel_list->threads->map[0] = child_pid;
-
- close(child_ready_pipe[1]);
- close(go_pipe[0]);
- /*
- * wait for child to settle
- */
- if (read(child_ready_pipe[0], &buf, 1) == -1) {
- perror("unable to read pipe");
- exit(-1);
+ err = perf_evlist__prepare_workload(evsel_list, opts, argv);
+ if (err < 0) {
+ pr_err("Couldn't run the workload!\n");
+ goto out_delete_session;
}
- close(child_ready_pipe[0]);
}
- open_counters(evsel_list);
+ perf_record__open(rec);
/*
- * perf_session__delete(session) will be called at atexit_header()
+ * perf_session__delete(session) will be called at perf_record__exit()
*/
- atexit(atexit_header);
+ on_exit(perf_record__exit, rec);
- if (pipe_output) {
+ if (opts->pipe_output) {
err = perf_header__write_pipe(output);
if (err < 0)
return err;
- } else if (file_new) {
+ } else if (rec->file_new) {
err = perf_session__write_header(session, evsel_list,
output, false);
if (err < 0)
return err;
}
- post_processing_offset = lseek(output, 0, SEEK_CUR);
+ if (!!rec->no_buildid
+ && !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) {
+ pr_err("Couldn't generating buildids. "
+ "Use --no-buildid to profile anyway.\n");
+ return -1;
+ }
- if (pipe_output) {
- err = perf_session__synthesize_attrs(session,
- process_synthesized_event);
+ rec->post_processing_offset = lseek(output, 0, SEEK_CUR);
+
+ machine = perf_session__find_host_machine(session);
+ if (!machine) {
+ pr_err("Couldn't find native kernel information.\n");
+ return -1;
+ }
+
+ if (opts->pipe_output) {
+ err = perf_event__synthesize_attrs(tool, session,
+ process_synthesized_event);
if (err < 0) {
pr_err("Couldn't synthesize attrs.\n");
return err;
}
- err = perf_event__synthesize_event_types(process_synthesized_event,
- session);
+ err = perf_event__synthesize_event_types(tool, process_synthesized_event,
+ machine);
if (err < 0) {
pr_err("Couldn't synthesize event_types.\n");
return err;
@@ -651,56 +542,49 @@ static int __cmd_record(int argc, const char **argv)
* return this more properly and also
* propagate errors that now are calling die()
*/
- err = perf_event__synthesize_tracing_data(output, evsel_list,
- process_synthesized_event,
- session);
+ err = perf_event__synthesize_tracing_data(tool, output, evsel_list,
+ process_synthesized_event);
if (err <= 0) {
pr_err("Couldn't record tracing data.\n");
return err;
}
- advance_output(err);
+ advance_output(rec, err);
}
}
- machine = perf_session__find_host_machine(session);
- if (!machine) {
- pr_err("Couldn't find native kernel information.\n");
- return -1;
- }
-
- err = perf_event__synthesize_kernel_mmap(process_synthesized_event,
- session, machine, "_text");
+ err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
+ machine, "_text");
if (err < 0)
- err = perf_event__synthesize_kernel_mmap(process_synthesized_event,
- session, machine, "_stext");
+ err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
+ machine, "_stext");
if (err < 0)
pr_err("Couldn't record kernel reference relocation symbol\n"
"Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
"Check /proc/kallsyms permission or run as root.\n");
- err = perf_event__synthesize_modules(process_synthesized_event,
- session, machine);
+ err = perf_event__synthesize_modules(tool, process_synthesized_event,
+ machine);
if (err < 0)
pr_err("Couldn't record kernel module information.\n"
"Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
"Check /proc/modules permission or run as root.\n");
if (perf_guest)
- perf_session__process_machines(session,
+ perf_session__process_machines(session, tool,
perf_event__synthesize_guest_os);
- if (!system_wide)
- perf_event__synthesize_thread_map(evsel_list->threads,
+ if (!opts->system_wide)
+ perf_event__synthesize_thread_map(tool, evsel_list->threads,
process_synthesized_event,
- session);
+ machine);
else
- perf_event__synthesize_threads(process_synthesized_event,
- session);
+ perf_event__synthesize_threads(tool, process_synthesized_event,
+ machine);
- if (realtime_prio) {
+ if (rec->realtime_prio) {
struct sched_param param;
- param.sched_priority = realtime_prio;
+ param.sched_priority = rec->realtime_prio;
if (sched_setscheduler(0, SCHED_FIFO, &param)) {
pr_err("Could not set realtime priority.\n");
exit(-1);
@@ -713,14 +597,14 @@ static int __cmd_record(int argc, const char **argv)
* Let the child rip
*/
if (forks)
- close(go_pipe[1]);
+ perf_evlist__start_workload(evsel_list);
for (;;) {
- int hits = samples;
+ int hits = rec->samples;
- mmap_read_all();
+ perf_record__mmap_read_all(rec);
- if (hits == samples) {
+ if (hits == rec->samples) {
if (done)
break;
err = poll(evsel_list->pollfd, evsel_list->nr_fds, -1);
@@ -741,9 +625,9 @@ static int __cmd_record(int argc, const char **argv)
*/
fprintf(stderr,
"[ perf record: Captured and wrote %.3f MB %s (~%" PRIu64 " samples) ]\n",
- (double)bytes_written / 1024.0 / 1024.0,
+ (double)rec->bytes_written / 1024.0 / 1024.0,
output_name,
- bytes_written / 24);
+ rec->bytes_written / 24);
return 0;
@@ -758,58 +642,89 @@ static const char * const record_usage[] = {
NULL
};
-static bool force, append_file;
+/*
+ * XXX Ideally would be local to cmd_record() and passed to a perf_record__new
+ * because we need to have access to it in perf_record__exit, that is called
+ * after cmd_record() exits, but since record_options need to be accessible to
+ * builtin-script, leave it here.
+ *
+ * At least we don't ouch it in all the other functions here directly.
+ *
+ * Just say no to tons of global variables, sigh.
+ */
+static struct perf_record record = {
+ .opts = {
+ .target_pid = -1,
+ .target_tid = -1,
+ .mmap_pages = UINT_MAX,
+ .user_freq = UINT_MAX,
+ .user_interval = ULLONG_MAX,
+ .freq = 1000,
+ .sample_id_all_avail = true,
+ },
+ .write_mode = WRITE_FORCE,
+ .file_new = true,
+};
+/*
+ * XXX Will stay a global variable till we fix builtin-script.c to stop messing
+ * with it and switch to use the library functions in perf_evlist that came
+ * from builtin-record.c, i.e. use perf_record_opts,
+ * perf_evlist__prepare_workload, etc instead of fork+exec'in 'perf record',
+ * using pipes, etc.
+ */
const struct option record_options[] = {
- OPT_CALLBACK('e', "event", &evsel_list, "event",
+ OPT_CALLBACK('e', "event", &record.evlist, "event",
"event selector. use 'perf list' to list available events",
parse_events_option),
- OPT_CALLBACK(0, "filter", &evsel_list, "filter",
+ OPT_CALLBACK(0, "filter", &record.evlist, "filter",
"event filter", parse_filter),
- OPT_INTEGER('p', "pid", &target_pid,
+ OPT_INTEGER('p', "pid", &record.opts.target_pid,
"record events on existing process id"),
- OPT_INTEGER('t', "tid", &target_tid,
+ OPT_INTEGER('t', "tid", &record.opts.target_tid,
"record events on existing thread id"),
- OPT_INTEGER('r', "realtime", &realtime_prio,
+ OPT_INTEGER('r', "realtime", &record.realtime_prio,
"collect data with this RT SCHED_FIFO priority"),
- OPT_BOOLEAN('D', "no-delay", &nodelay,
+ OPT_BOOLEAN('D', "no-delay", &record.opts.no_delay,
"collect data without buffering"),
- OPT_BOOLEAN('R', "raw-samples", &raw_samples,
+ OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples,
"collect raw sample records from all opened counters"),
- OPT_BOOLEAN('a', "all-cpus", &system_wide,
+ OPT_BOOLEAN('a', "all-cpus", &record.opts.system_wide,
"system-wide collection from all CPUs"),
- OPT_BOOLEAN('A', "append", &append_file,
+ OPT_BOOLEAN('A', "append", &record.append_file,
"append to the output file to do incremental profiling"),
- OPT_STRING('C', "cpu", &cpu_list, "cpu",
+ OPT_STRING('C', "cpu", &record.opts.cpu_list, "cpu",
"list of cpus to monitor"),
- OPT_BOOLEAN('f', "force", &force,
+ OPT_BOOLEAN('f', "force", &record.force,
"overwrite existing data file (deprecated)"),
- OPT_U64('c', "count", &user_interval, "event period to sample"),
- OPT_STRING('o', "output", &output_name, "file",
+ OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"),
+ OPT_STRING('o', "output", &record.output_name, "file",
"output file name"),
- OPT_BOOLEAN('i', "no-inherit", &no_inherit,
+ OPT_BOOLEAN('i', "no-inherit", &record.opts.no_inherit,
"child tasks do not inherit counters"),
- OPT_UINTEGER('F', "freq", &user_freq, "profile at this frequency"),
- OPT_UINTEGER('m', "mmap-pages", &mmap_pages, "number of mmap data pages"),
- OPT_BOOLEAN(0, "group", &group,
+ OPT_UINTEGER('F', "freq", &record.opts.user_freq, "profile at this frequency"),
+ OPT_UINTEGER('m', "mmap-pages", &record.opts.mmap_pages,
+ "number of mmap data pages"),
+ OPT_BOOLEAN(0, "group", &record.opts.group,
"put the counters into a counter group"),
- OPT_BOOLEAN('g', "call-graph", &call_graph,
+ OPT_BOOLEAN('g', "call-graph", &record.opts.call_graph,
"do call-graph (stack chain/backtrace) recording"),
OPT_INCR('v', "verbose", &verbose,
"be more verbose (show counter open errors, etc)"),
OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
- OPT_BOOLEAN('s', "stat", &inherit_stat,
+ OPT_BOOLEAN('s', "stat", &record.opts.inherit_stat,
"per thread counts"),
- OPT_BOOLEAN('d', "data", &sample_address,
+ OPT_BOOLEAN('d', "data", &record.opts.sample_address,
"Sample addresses"),
- OPT_BOOLEAN('T', "timestamp", &sample_time, "Sample timestamps"),
- OPT_BOOLEAN('n', "no-samples", &no_samples,
+ OPT_BOOLEAN('T', "timestamp", &record.opts.sample_time, "Sample timestamps"),
+ OPT_BOOLEAN('P', "period", &record.opts.period, "Sample period"),
+ OPT_BOOLEAN('n', "no-samples", &record.opts.no_samples,
"don't sample"),
- OPT_BOOLEAN('N', "no-buildid-cache", &no_buildid_cache,
+ OPT_BOOLEAN('N', "no-buildid-cache", &record.no_buildid_cache,
"do not update the buildid cache"),
- OPT_BOOLEAN('B', "no-buildid", &no_buildid,
+ OPT_BOOLEAN('B', "no-buildid", &record.no_buildid,
"do not collect buildids in perf.data"),
- OPT_CALLBACK('G', "cgroup", &evsel_list, "name",
+ OPT_CALLBACK('G', "cgroup", &record.evlist, "name",
"monitor event in cgroup name only",
parse_cgroups),
OPT_END()
@@ -819,6 +734,8 @@ int cmd_record(int argc, const char **argv, const char *prefix __used)
{
int err = -ENOMEM;
struct perf_evsel *pos;
+ struct perf_evlist *evsel_list;
+ struct perf_record *rec = &record;
perf_header__set_cmdline(argc, argv);
@@ -826,23 +743,25 @@ int cmd_record(int argc, const char **argv, const char *prefix __used)
if (evsel_list == NULL)
return -ENOMEM;
+ rec->evlist = evsel_list;
+
argc = parse_options(argc, argv, record_options, record_usage,
PARSE_OPT_STOP_AT_NON_OPTION);
- if (!argc && target_pid == -1 && target_tid == -1 &&
- !system_wide && !cpu_list)
+ if (!argc && rec->opts.target_pid == -1 && rec->opts.target_tid == -1 &&
+ !rec->opts.system_wide && !rec->opts.cpu_list)
usage_with_options(record_usage, record_options);
- if (force && append_file) {
+ if (rec->force && rec->append_file) {
fprintf(stderr, "Can't overwrite and append at the same time."
" You need to choose between -f and -A");
usage_with_options(record_usage, record_options);
- } else if (append_file) {
- write_mode = WRITE_APPEND;
+ } else if (rec->append_file) {
+ rec->write_mode = WRITE_APPEND;
} else {
- write_mode = WRITE_FORCE;
+ rec->write_mode = WRITE_FORCE;
}
- if (nr_cgroups && !system_wide) {
+ if (nr_cgroups && !rec->opts.system_wide) {
fprintf(stderr, "cgroup monitoring only available in"
" system-wide mode\n");
usage_with_options(record_usage, record_options);
@@ -860,7 +779,7 @@ int cmd_record(int argc, const char **argv, const char *prefix __used)
"If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
"even with a suitable vmlinux or kallsyms file.\n\n");
- if (no_buildid_cache || no_buildid)
+ if (rec->no_buildid_cache || rec->no_buildid)
disable_buildid_cache();
if (evsel_list->nr_entries == 0 &&
@@ -869,43 +788,37 @@ int cmd_record(int argc, const char **argv, const char *prefix __used)
goto out_symbol_exit;
}
- if (target_pid != -1)
- target_tid = target_pid;
+ if (rec->opts.target_pid != -1)
+ rec->opts.target_tid = rec->opts.target_pid;
- if (perf_evlist__create_maps(evsel_list, target_pid,
- target_tid, cpu_list) < 0)
+ if (perf_evlist__create_maps(evsel_list, rec->opts.target_pid,
+ rec->opts.target_tid, rec->opts.cpu_list) < 0)
usage_with_options(record_usage, record_options);
list_for_each_entry(pos, &evsel_list->entries, node) {
- if (perf_evsel__alloc_fd(pos, evsel_list->cpus->nr,
- evsel_list->threads->nr) < 0)
- goto out_free_fd;
if (perf_header__push_event(pos->attr.config, event_name(pos)))
goto out_free_fd;
}
- if (perf_evlist__alloc_pollfd(evsel_list) < 0)
- goto out_free_fd;
-
- if (user_interval != ULLONG_MAX)
- default_interval = user_interval;
- if (user_freq != UINT_MAX)
- freq = user_freq;
+ if (rec->opts.user_interval != ULLONG_MAX)
+ rec->opts.default_interval = rec->opts.user_interval;
+ if (rec->opts.user_freq != UINT_MAX)
+ rec->opts.freq = rec->opts.user_freq;
/*
* User specified count overrides default frequency.
*/
- if (default_interval)
- freq = 0;
- else if (freq) {
- default_interval = freq;
+ if (rec->opts.default_interval)
+ rec->opts.freq = 0;
+ else if (rec->opts.freq) {
+ rec->opts.default_interval = rec->opts.freq;
} else {
fprintf(stderr, "frequency and count are zero, aborting\n");
err = -EINVAL;
goto out_free_fd;
}
- err = __cmd_record(argc, argv);
+ err = __cmd_record(&record, argc, argv);
out_free_fd:
perf_evlist__delete_maps(evsel_list);
out_symbol_exit:
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 4d7c8340c326..25d34d483e49 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -25,6 +25,7 @@
#include "util/evsel.h"
#include "util/header.h"
#include "util/session.h"
+#include "util/tool.h"
#include "util/parse-options.h"
#include "util/parse-events.h"
@@ -35,38 +36,35 @@
#include <linux/bitmap.h>
-static char const *input_name = "perf.data";
-
-static bool force, use_tui, use_stdio;
-static bool hide_unresolved;
-static bool dont_use_callchains;
-static bool show_full_info;
-
-static bool show_threads;
-static struct perf_read_values show_threads_values;
-
-static const char default_pretty_printing_style[] = "normal";
-static const char *pretty_printing_style = default_pretty_printing_style;
-
-static char callchain_default_opt[] = "fractal,0.5,callee";
-static bool inverted_callchain;
-static symbol_filter_t annotate_init;
-
-static const char *cpu_list;
-static DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
+struct perf_report {
+ struct perf_tool tool;
+ struct perf_session *session;
+ char const *input_name;
+ bool force, use_tui, use_stdio;
+ bool hide_unresolved;
+ bool dont_use_callchains;
+ bool show_full_info;
+ bool show_threads;
+ bool inverted_callchain;
+ struct perf_read_values show_threads_values;
+ const char *pretty_printing_style;
+ symbol_filter_t annotate_init;
+ const char *cpu_list;
+ DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
+};
-static int perf_session__add_hist_entry(struct perf_session *session,
- struct addr_location *al,
- struct perf_sample *sample,
- struct perf_evsel *evsel)
+static int perf_evsel__add_hist_entry(struct perf_evsel *evsel,
+ struct addr_location *al,
+ struct perf_sample *sample,
+ struct machine *machine)
{
struct symbol *parent = NULL;
int err = 0;
struct hist_entry *he;
if ((sort__has_parent || symbol_conf.use_callchain) && sample->callchain) {
- err = perf_session__resolve_callchain(session, al->thread,
- sample->callchain, &parent);
+ err = machine__resolve_callchain(machine, evsel, al->thread,
+ sample->callchain, &parent);
if (err)
return err;
}
@@ -76,7 +74,8 @@ static int perf_session__add_hist_entry(struct perf_session *session,
return -ENOMEM;
if (symbol_conf.use_callchain) {
- err = callchain_append(he->callchain, &session->callchain_cursor,
+ err = callchain_append(he->callchain,
+ &evsel->hists.callchain_cursor,
sample->period);
if (err)
return err;
@@ -92,8 +91,7 @@ static int perf_session__add_hist_entry(struct perf_session *session,
assert(evsel != NULL);
err = -ENOMEM;
- if (notes->src == NULL &&
- symbol__alloc_hist(he->ms.sym, session->evlist->nr_entries) < 0)
+ if (notes->src == NULL && symbol__alloc_hist(he->ms.sym) < 0)
goto out;
err = hist_entry__inc_addr_samples(he, evsel->idx, al->addr);
@@ -106,30 +104,32 @@ out:
}
-static int process_sample_event(union perf_event *event,
+static int process_sample_event(struct perf_tool *tool,
+ union perf_event *event,
struct perf_sample *sample,
struct perf_evsel *evsel,
- struct perf_session *session)
+ struct machine *machine)
{
+ struct perf_report *rep = container_of(tool, struct perf_report, tool);
struct addr_location al;
- if (perf_event__preprocess_sample(event, session, &al, sample,
- annotate_init) < 0) {
+ if (perf_event__preprocess_sample(event, machine, &al, sample,
+ rep->annotate_init) < 0) {
fprintf(stderr, "problem processing %d event, skipping it.\n",
event->header.type);
return -1;
}
- if (al.filtered || (hide_unresolved && al.sym == NULL))
+ if (al.filtered || (rep->hide_unresolved && al.sym == NULL))
return 0;
- if (cpu_list && !test_bit(sample->cpu, cpu_bitmap))
+ if (rep->cpu_list && !test_bit(sample->cpu, rep->cpu_bitmap))
return 0;
if (al.map != NULL)
al.map->dso->hit = 1;
- if (perf_session__add_hist_entry(session, &al, sample, evsel)) {
+ if (perf_evsel__add_hist_entry(evsel, &al, sample, machine)) {
pr_debug("problem incrementing symbol period, skipping event\n");
return -1;
}
@@ -137,15 +137,17 @@ static int process_sample_event(union perf_event *event,
return 0;
}
-static int process_read_event(union perf_event *event,
+static int process_read_event(struct perf_tool *tool,
+ union perf_event *event,
struct perf_sample *sample __used,
- struct perf_session *session)
+ struct perf_evsel *evsel,
+ struct machine *machine __used)
{
- struct perf_evsel *evsel = perf_evlist__id2evsel(session->evlist,
- event->read.id);
- if (show_threads) {
+ struct perf_report *rep = container_of(tool, struct perf_report, tool);
+
+ if (rep->show_threads) {
const char *name = evsel ? event_name(evsel) : "unknown";
- perf_read_values_add_value(&show_threads_values,
+ perf_read_values_add_value(&rep->show_threads_values,
event->read.pid, event->read.tid,
event->read.id,
name,
@@ -159,8 +161,10 @@ static int process_read_event(union perf_event *event,
return 0;
}
-static int perf_session__setup_sample_type(struct perf_session *self)
+static int perf_report__setup_sample_type(struct perf_report *rep)
{
+ struct perf_session *self = rep->session;
+
if (!(self->sample_type & PERF_SAMPLE_CALLCHAIN)) {
if (sort__has_parent) {
ui__warning("Selected --sort parent, but no "
@@ -173,7 +177,8 @@ static int perf_session__setup_sample_type(struct perf_session *self)
"you call 'perf record' without -g?\n");
return -1;
}
- } else if (!dont_use_callchains && callchain_param.mode != CHAIN_NONE &&
+ } else if (!rep->dont_use_callchains &&
+ callchain_param.mode != CHAIN_NONE &&
!symbol_conf.use_callchain) {
symbol_conf.use_callchain = true;
if (callchain_register_param(&callchain_param) < 0) {
@@ -186,22 +191,6 @@ static int perf_session__setup_sample_type(struct perf_session *self)
return 0;
}
-static struct perf_event_ops event_ops = {
- .sample = process_sample_event,
- .mmap = perf_event__process_mmap,
- .comm = perf_event__process_comm,
- .exit = perf_event__process_task,
- .fork = perf_event__process_task,
- .lost = perf_event__process_lost,
- .read = process_read_event,
- .attr = perf_event__process_attr,
- .event_type = perf_event__process_event_type,
- .tracing_data = perf_event__process_tracing_data,
- .build_id = perf_event__process_build_id,
- .ordered_samples = true,
- .ordering_requires_timestamps = true,
-};
-
extern volatile int session_done;
static void sig_handler(int sig __used)
@@ -224,6 +213,7 @@ static size_t hists__fprintf_nr_sample_events(struct hists *self,
}
static int perf_evlist__tty_browse_hists(struct perf_evlist *evlist,
+ struct perf_report *rep,
const char *help)
{
struct perf_evsel *pos;
@@ -241,18 +231,18 @@ static int perf_evlist__tty_browse_hists(struct perf_evlist *evlist,
parent_pattern == default_parent_pattern) {
fprintf(stdout, "#\n# (%s)\n#\n", help);
- if (show_threads) {
- bool style = !strcmp(pretty_printing_style, "raw");
- perf_read_values_display(stdout, &show_threads_values,
+ if (rep->show_threads) {
+ bool style = !strcmp(rep->pretty_printing_style, "raw");
+ perf_read_values_display(stdout, &rep->show_threads_values,
style);
- perf_read_values_destroy(&show_threads_values);
+ perf_read_values_destroy(&rep->show_threads_values);
}
}
return 0;
}
-static int __cmd_report(void)
+static int __cmd_report(struct perf_report *rep)
{
int ret = -EINVAL;
u64 nr_samples;
@@ -264,27 +254,31 @@ static int __cmd_report(void)
signal(SIGINT, sig_handler);
- session = perf_session__new(input_name, O_RDONLY, force, false, &event_ops);
+ session = perf_session__new(rep->input_name, O_RDONLY,
+ rep->force, false, &rep->tool);
if (session == NULL)
return -ENOMEM;
- if (cpu_list) {
- ret = perf_session__cpu_bitmap(session, cpu_list, cpu_bitmap);
+ rep->session = session;
+
+ if (rep->cpu_list) {
+ ret = perf_session__cpu_bitmap(session, rep->cpu_list,
+ rep->cpu_bitmap);
if (ret)
goto out_delete;
}
if (use_browser <= 0)
- perf_session__fprintf_info(session, stdout, show_full_info);
+ perf_session__fprintf_info(session, stdout, rep->show_full_info);
- if (show_threads)
- perf_read_values_init(&show_threads_values);
+ if (rep->show_threads)
+ perf_read_values_init(&rep->show_threads_values);
- ret = perf_session__setup_sample_type(session);
+ ret = perf_report__setup_sample_type(rep);
if (ret)
goto out_delete;
- ret = perf_session__process_events(session, &event_ops);
+ ret = perf_session__process_events(session, &rep->tool);
if (ret)
goto out_delete;
@@ -327,7 +321,7 @@ static int __cmd_report(void)
}
if (nr_samples == 0) {
- ui__warning("The %s file has no samples!\n", input_name);
+ ui__warning("The %s file has no samples!\n", session->filename);
goto out_delete;
}
@@ -335,7 +329,7 @@ static int __cmd_report(void)
perf_evlist__tui_browse_hists(session->evlist, help,
NULL, NULL, 0);
} else
- perf_evlist__tty_browse_hists(session->evlist, help);
+ perf_evlist__tty_browse_hists(session->evlist, rep, help);
out_delete:
/*
@@ -354,9 +348,9 @@ out_delete:
}
static int
-parse_callchain_opt(const struct option *opt __used, const char *arg,
- int unset)
+parse_callchain_opt(const struct option *opt, const char *arg, int unset)
{
+ struct perf_report *rep = (struct perf_report *)opt->value;
char *tok, *tok2;
char *endptr;
@@ -364,7 +358,7 @@ parse_callchain_opt(const struct option *opt __used, const char *arg,
* --no-call-graph
*/
if (unset) {
- dont_use_callchains = true;
+ rep->dont_use_callchains = true;
return 0;
}
@@ -412,7 +406,7 @@ parse_callchain_opt(const struct option *opt __used, const char *arg,
goto setup;
if (tok2[0] != 'c') {
- callchain_param.print_limit = strtod(tok2, &endptr);
+ callchain_param.print_limit = strtoul(tok2, &endptr, 0);
tok2 = strtok(NULL, ",");
if (!tok2)
goto setup;
@@ -433,13 +427,34 @@ setup:
return 0;
}
-static const char * const report_usage[] = {
- "perf report [<options>] <command>",
- NULL
-};
-
-static const struct option options[] = {
- OPT_STRING('i', "input", &input_name, "file",
+int cmd_report(int argc, const char **argv, const char *prefix __used)
+{
+ struct stat st;
+ char callchain_default_opt[] = "fractal,0.5,callee";
+ const char * const report_usage[] = {
+ "perf report [<options>]",
+ NULL
+ };
+ struct perf_report report = {
+ .tool = {
+ .sample = process_sample_event,
+ .mmap = perf_event__process_mmap,
+ .comm = perf_event__process_comm,
+ .exit = perf_event__process_task,
+ .fork = perf_event__process_task,
+ .lost = perf_event__process_lost,
+ .read = process_read_event,
+ .attr = perf_event__process_attr,
+ .event_type = perf_event__process_event_type,
+ .tracing_data = perf_event__process_tracing_data,
+ .build_id = perf_event__process_build_id,
+ .ordered_samples = true,
+ .ordering_requires_timestamps = true,
+ },
+ .pretty_printing_style = "normal",
+ };
+ const struct option options[] = {
+ OPT_STRING('i', "input", &report.input_name, "file",
"input file name"),
OPT_INCR('v', "verbose", &verbose,
"be more verbose (show symbol address, etc)"),
@@ -449,17 +464,18 @@ static const struct option options[] = {
"file", "vmlinux pathname"),
OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name,
"file", "kallsyms pathname"),
- OPT_BOOLEAN('f', "force", &force, "don't complain, do it"),
+ OPT_BOOLEAN('f', "force", &report.force, "don't complain, do it"),
OPT_BOOLEAN('m', "modules", &symbol_conf.use_modules,
"load module symbols - WARNING: use only with -k and LIVE kernel"),
OPT_BOOLEAN('n', "show-nr-samples", &symbol_conf.show_nr_samples,
"Show a column with the number of samples"),
- OPT_BOOLEAN('T', "threads", &show_threads,
+ OPT_BOOLEAN('T', "threads", &report.show_threads,
"Show per-thread event counters"),
- OPT_STRING(0, "pretty", &pretty_printing_style, "key",
+ OPT_STRING(0, "pretty", &report.pretty_printing_style, "key",
"pretty printing style key: normal raw"),
- OPT_BOOLEAN(0, "tui", &use_tui, "Use the TUI interface"),
- OPT_BOOLEAN(0, "stdio", &use_stdio, "Use the stdio interface"),
+ OPT_BOOLEAN(0, "tui", &report.use_tui, "Use the TUI interface"),
+ OPT_BOOLEAN(0, "stdio", &report.use_stdio,
+ "Use the stdio interface"),
OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
"sort by key(s): pid, comm, dso, symbol, parent"),
OPT_BOOLEAN(0, "showcpuutilization", &symbol_conf.show_cpu_utilization,
@@ -468,13 +484,14 @@ static const struct option options[] = {
"regex filter to identify parent, see: '--sort parent'"),
OPT_BOOLEAN('x', "exclude-other", &symbol_conf.exclude_other,
"Only display entries with parent-match"),
- OPT_CALLBACK_DEFAULT('g', "call-graph", NULL, "output_type,min_percent, call_order",
- "Display callchains using output_type (graph, flat, fractal, or none) , min percent threshold and callchain order. "
+ OPT_CALLBACK_DEFAULT('g', "call-graph", &report, "output_type,min_percent[,print_limit],call_order",
+ "Display callchains using output_type (graph, flat, fractal, or none) , min percent threshold, optional print limit and callchain order. "
"Default: fractal,0.5,callee", &parse_callchain_opt, callchain_default_opt),
- OPT_BOOLEAN('G', "inverted", &inverted_callchain, "alias for inverted call graph"),
+ OPT_BOOLEAN('G', "inverted", &report.inverted_callchain,
+ "alias for inverted call graph"),
OPT_STRING('d', "dsos", &symbol_conf.dso_list_str, "dso[,dso...]",
"only consider symbols in these dsos"),
- OPT_STRING('C', "comms", &symbol_conf.comm_list_str, "comm[,comm...]",
+ OPT_STRING('c', "comms", &symbol_conf.comm_list_str, "comm[,comm...]",
"only consider symbols in these comms"),
OPT_STRING('S', "symbols", &symbol_conf.sym_list_str, "symbol[,symbol...]",
"only consider these symbols"),
@@ -484,12 +501,13 @@ static const struct option options[] = {
OPT_STRING('t', "field-separator", &symbol_conf.field_sep, "separator",
"separator for columns, no spaces will be added between "
"columns '.' is reserved."),
- OPT_BOOLEAN('U', "hide-unresolved", &hide_unresolved,
+ OPT_BOOLEAN('U', "hide-unresolved", &report.hide_unresolved,
"Only display entries resolved to a symbol"),
OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory",
"Look for files with symbols relative to this directory"),
- OPT_STRING('c', "cpu", &cpu_list, "cpu", "list of cpus to profile"),
- OPT_BOOLEAN('I', "show-info", &show_full_info,
+ OPT_STRING('C', "cpu", &report.cpu_list, "cpu",
+ "list of cpus to profile"),
+ OPT_BOOLEAN('I', "show-info", &report.show_full_info,
"Display extended information about perf.data file"),
OPT_BOOLEAN(0, "source", &symbol_conf.annotate_src,
"Interleave source code with assembly code (default)"),
@@ -500,24 +518,30 @@ static const struct option options[] = {
OPT_BOOLEAN(0, "show-total-period", &symbol_conf.show_total_period,
"Show a column with the sum of periods"),
OPT_END()
-};
+ };
-int cmd_report(int argc, const char **argv, const char *prefix __used)
-{
argc = parse_options(argc, argv, options, report_usage, 0);
- if (use_stdio)
+ if (report.use_stdio)
use_browser = 0;
- else if (use_tui)
+ else if (report.use_tui)
use_browser = 1;
- if (inverted_callchain)
+ if (report.inverted_callchain)
callchain_param.order = ORDER_CALLER;
- if (strcmp(input_name, "-") != 0)
+ if (!report.input_name || !strlen(report.input_name)) {
+ if (!fstat(STDIN_FILENO, &st) && S_ISFIFO(st.st_mode))
+ report.input_name = "-";
+ else
+ report.input_name = "perf.data";
+ }
+
+ if (strcmp(report.input_name, "-") != 0)
setup_browser(true);
else
use_browser = 0;
+
/*
* Only in the newt browser we are doing integrated annotation,
* so don't allocate extra space that won't be used in the stdio
@@ -525,7 +549,7 @@ int cmd_report(int argc, const char **argv, const char *prefix __used)
*/
if (use_browser > 0) {
symbol_conf.priv_size = sizeof(struct annotation);
- annotate_init = symbol__annotate_init;
+ report.annotate_init = symbol__annotate_init;
/*
* For searching by name on the "Browse map details".
* providing it only in verbose mode not to bloat too
@@ -572,5 +596,5 @@ int cmd_report(int argc, const char **argv, const char *prefix __used)
sort_entry__setup_elide(&sort_comm, symbol_conf.comm_list, "comm", stdout);
sort_entry__setup_elide(&sort_sym, symbol_conf.sym_list, "symbol", stdout);
- return __cmd_report();
+ return __cmd_report(&report);
}
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index 5177964943e7..fb8b5f83b4a0 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -2,11 +2,14 @@
#include "perf.h"
#include "util/util.h"
+#include "util/evlist.h"
#include "util/cache.h"
+#include "util/evsel.h"
#include "util/symbol.h"
#include "util/thread.h"
#include "util/header.h"
#include "util/session.h"
+#include "util/tool.h"
#include "util/parse-options.h"
#include "util/trace-event.h"
@@ -19,7 +22,7 @@
#include <pthread.h>
#include <math.h>
-static char const *input_name = "perf.data";
+static const char *input_name;
static char default_sort_order[] = "avg, max, switch, runtime";
static const char *sort_order = default_sort_order;
@@ -723,21 +726,21 @@ struct trace_migrate_task_event {
struct trace_sched_handler {
void (*switch_event)(struct trace_switch_event *,
- struct perf_session *,
+ struct machine *,
struct event *,
int cpu,
u64 timestamp,
struct thread *thread);
void (*runtime_event)(struct trace_runtime_event *,
- struct perf_session *,
+ struct machine *,
struct event *,
int cpu,
u64 timestamp,
struct thread *thread);
void (*wakeup_event)(struct trace_wakeup_event *,
- struct perf_session *,
+ struct machine *,
struct event *,
int cpu,
u64 timestamp,
@@ -750,7 +753,7 @@ struct trace_sched_handler {
struct thread *thread);
void (*migrate_task_event)(struct trace_migrate_task_event *,
- struct perf_session *session,
+ struct machine *machine,
struct event *,
int cpu,
u64 timestamp,
@@ -760,7 +763,7 @@ struct trace_sched_handler {
static void
replay_wakeup_event(struct trace_wakeup_event *wakeup_event,
- struct perf_session *session __used,
+ struct machine *machine __used,
struct event *event,
int cpu __used,
u64 timestamp __used,
@@ -787,7 +790,7 @@ static u64 cpu_last_switched[MAX_CPUS];
static void
replay_switch_event(struct trace_switch_event *switch_event,
- struct perf_session *session __used,
+ struct machine *machine __used,
struct event *event,
int cpu,
u64 timestamp,
@@ -1021,7 +1024,7 @@ add_sched_in_event(struct work_atoms *atoms, u64 timestamp)
static void
latency_switch_event(struct trace_switch_event *switch_event,
- struct perf_session *session,
+ struct machine *machine,
struct event *event __used,
int cpu,
u64 timestamp,
@@ -1045,8 +1048,8 @@ latency_switch_event(struct trace_switch_event *switch_event,
die("hm, delta: %" PRIu64 " < 0 ?\n", delta);
- sched_out = perf_session__findnew(session, switch_event->prev_pid);
- sched_in = perf_session__findnew(session, switch_event->next_pid);
+ sched_out = machine__findnew_thread(machine, switch_event->prev_pid);
+ sched_in = machine__findnew_thread(machine, switch_event->next_pid);
out_events = thread_atoms_search(&atom_root, sched_out, &cmp_pid);
if (!out_events) {
@@ -1074,13 +1077,13 @@ latency_switch_event(struct trace_switch_event *switch_event,
static void
latency_runtime_event(struct trace_runtime_event *runtime_event,
- struct perf_session *session,
+ struct machine *machine,
struct event *event __used,
int cpu,
u64 timestamp,
struct thread *this_thread __used)
{
- struct thread *thread = perf_session__findnew(session, runtime_event->pid);
+ struct thread *thread = machine__findnew_thread(machine, runtime_event->pid);
struct work_atoms *atoms = thread_atoms_search(&atom_root, thread, &cmp_pid);
BUG_ON(cpu >= MAX_CPUS || cpu < 0);
@@ -1097,7 +1100,7 @@ latency_runtime_event(struct trace_runtime_event *runtime_event,
static void
latency_wakeup_event(struct trace_wakeup_event *wakeup_event,
- struct perf_session *session,
+ struct machine *machine,
struct event *__event __used,
int cpu __used,
u64 timestamp,
@@ -1111,7 +1114,7 @@ latency_wakeup_event(struct trace_wakeup_event *wakeup_event,
if (!wakeup_event->success)
return;
- wakee = perf_session__findnew(session, wakeup_event->pid);
+ wakee = machine__findnew_thread(machine, wakeup_event->pid);
atoms = thread_atoms_search(&atom_root, wakee, &cmp_pid);
if (!atoms) {
thread_atoms_insert(wakee);
@@ -1145,7 +1148,7 @@ latency_wakeup_event(struct trace_wakeup_event *wakeup_event,
static void
latency_migrate_task_event(struct trace_migrate_task_event *migrate_task_event,
- struct perf_session *session,
+ struct machine *machine,
struct event *__event __used,
int cpu __used,
u64 timestamp,
@@ -1161,7 +1164,7 @@ latency_migrate_task_event(struct trace_migrate_task_event *migrate_task_event,
if (profile_cpu == -1)
return;
- migrant = perf_session__findnew(session, migrate_task_event->pid);
+ migrant = machine__findnew_thread(machine, migrate_task_event->pid);
atoms = thread_atoms_search(&atom_root, migrant, &cmp_pid);
if (!atoms) {
thread_atoms_insert(migrant);
@@ -1356,12 +1359,13 @@ static void sort_lat(void)
static struct trace_sched_handler *trace_handler;
static void
-process_sched_wakeup_event(void *data, struct perf_session *session,
+process_sched_wakeup_event(struct perf_tool *tool __used,
struct event *event,
- int cpu __used,
- u64 timestamp __used,
- struct thread *thread __used)
+ struct perf_sample *sample,
+ struct machine *machine,
+ struct thread *thread)
{
+ void *data = sample->raw_data;
struct trace_wakeup_event wakeup_event;
FILL_COMMON_FIELDS(wakeup_event, event, data);
@@ -1373,8 +1377,8 @@ process_sched_wakeup_event(void *data, struct perf_session *session,
FILL_FIELD(wakeup_event, cpu, event, data);
if (trace_handler->wakeup_event)
- trace_handler->wakeup_event(&wakeup_event, session, event,
- cpu, timestamp, thread);
+ trace_handler->wakeup_event(&wakeup_event, machine, event,
+ sample->cpu, sample->time, thread);
}
/*
@@ -1392,7 +1396,7 @@ static char next_shortname2 = '0';
static void
map_switch_event(struct trace_switch_event *switch_event,
- struct perf_session *session,
+ struct machine *machine,
struct event *event __used,
int this_cpu,
u64 timestamp,
@@ -1420,8 +1424,8 @@ map_switch_event(struct trace_switch_event *switch_event,
die("hm, delta: %" PRIu64 " < 0 ?\n", delta);
- sched_out = perf_session__findnew(session, switch_event->prev_pid);
- sched_in = perf_session__findnew(session, switch_event->next_pid);
+ sched_out = machine__findnew_thread(machine, switch_event->prev_pid);
+ sched_in = machine__findnew_thread(machine, switch_event->next_pid);
curr_thread[this_cpu] = sched_in;
@@ -1469,14 +1473,15 @@ map_switch_event(struct trace_switch_event *switch_event,
}
}
-
static void
-process_sched_switch_event(void *data, struct perf_session *session,
+process_sched_switch_event(struct perf_tool *tool __used,
struct event *event,
- int this_cpu,
- u64 timestamp __used,
- struct thread *thread __used)
+ struct perf_sample *sample,
+ struct machine *machine,
+ struct thread *thread)
{
+ int this_cpu = sample->cpu;
+ void *data = sample->raw_data;
struct trace_switch_event switch_event;
FILL_COMMON_FIELDS(switch_event, event, data);
@@ -1498,19 +1503,20 @@ process_sched_switch_event(void *data, struct perf_session *session,
nr_context_switch_bugs++;
}
if (trace_handler->switch_event)
- trace_handler->switch_event(&switch_event, session, event,
- this_cpu, timestamp, thread);
+ trace_handler->switch_event(&switch_event, machine, event,
+ this_cpu, sample->time, thread);
curr_pid[this_cpu] = switch_event.next_pid;
}
static void
-process_sched_runtime_event(void *data, struct perf_session *session,
- struct event *event,
- int cpu __used,
- u64 timestamp __used,
- struct thread *thread __used)
+process_sched_runtime_event(struct perf_tool *tool __used,
+ struct event *event,
+ struct perf_sample *sample,
+ struct machine *machine,
+ struct thread *thread)
{
+ void *data = sample->raw_data;
struct trace_runtime_event runtime_event;
FILL_ARRAY(runtime_event, comm, event, data);
@@ -1519,16 +1525,18 @@ process_sched_runtime_event(void *data, struct perf_session *session,
FILL_FIELD(runtime_event, vruntime, event, data);
if (trace_handler->runtime_event)
- trace_handler->runtime_event(&runtime_event, session, event, cpu, timestamp, thread);
+ trace_handler->runtime_event(&runtime_event, machine, event,
+ sample->cpu, sample->time, thread);
}
static void
-process_sched_fork_event(void *data,
+process_sched_fork_event(struct perf_tool *tool __used,
struct event *event,
- int cpu __used,
- u64 timestamp __used,
- struct thread *thread __used)
+ struct perf_sample *sample,
+ struct machine *machine __used,
+ struct thread *thread)
{
+ void *data = sample->raw_data;
struct trace_fork_event fork_event;
FILL_COMMON_FIELDS(fork_event, event, data);
@@ -1540,13 +1548,14 @@ process_sched_fork_event(void *data,
if (trace_handler->fork_event)
trace_handler->fork_event(&fork_event, event,
- cpu, timestamp, thread);
+ sample->cpu, sample->time, thread);
}
static void
-process_sched_exit_event(struct event *event,
- int cpu __used,
- u64 timestamp __used,
+process_sched_exit_event(struct perf_tool *tool __used,
+ struct event *event,
+ struct perf_sample *sample __used,
+ struct machine *machine __used,
struct thread *thread __used)
{
if (verbose)
@@ -1554,12 +1563,13 @@ process_sched_exit_event(struct event *event,
}
static void
-process_sched_migrate_task_event(void *data, struct perf_session *session,
- struct event *event,
- int cpu __used,
- u64 timestamp __used,
- struct thread *thread __used)
+process_sched_migrate_task_event(struct perf_tool *tool __used,
+ struct event *event,
+ struct perf_sample *sample,
+ struct machine *machine,
+ struct thread *thread)
{
+ void *data = sample->raw_data;
struct trace_migrate_task_event migrate_task_event;
FILL_COMMON_FIELDS(migrate_task_event, event, data);
@@ -1570,67 +1580,47 @@ process_sched_migrate_task_event(void *data, struct perf_session *session,
FILL_FIELD(migrate_task_event, cpu, event, data);
if (trace_handler->migrate_task_event)
- trace_handler->migrate_task_event(&migrate_task_event, session,
- event, cpu, timestamp, thread);
+ trace_handler->migrate_task_event(&migrate_task_event, machine,
+ event, sample->cpu,
+ sample->time, thread);
}
-static void process_raw_event(union perf_event *raw_event __used,
- struct perf_session *session, void *data, int cpu,
- u64 timestamp, struct thread *thread)
-{
- struct event *event;
- int type;
-
-
- type = trace_parse_common_type(data);
- event = trace_find_event(type);
-
- if (!strcmp(event->name, "sched_switch"))
- process_sched_switch_event(data, session, event, cpu, timestamp, thread);
- if (!strcmp(event->name, "sched_stat_runtime"))
- process_sched_runtime_event(data, session, event, cpu, timestamp, thread);
- if (!strcmp(event->name, "sched_wakeup"))
- process_sched_wakeup_event(data, session, event, cpu, timestamp, thread);
- if (!strcmp(event->name, "sched_wakeup_new"))
- process_sched_wakeup_event(data, session, event, cpu, timestamp, thread);
- if (!strcmp(event->name, "sched_process_fork"))
- process_sched_fork_event(data, event, cpu, timestamp, thread);
- if (!strcmp(event->name, "sched_process_exit"))
- process_sched_exit_event(event, cpu, timestamp, thread);
- if (!strcmp(event->name, "sched_migrate_task"))
- process_sched_migrate_task_event(data, session, event, cpu, timestamp, thread);
-}
+typedef void (*tracepoint_handler)(struct perf_tool *tool, struct event *event,
+ struct perf_sample *sample,
+ struct machine *machine,
+ struct thread *thread);
-static int process_sample_event(union perf_event *event,
- struct perf_sample *sample,
- struct perf_evsel *evsel __used,
- struct perf_session *session)
+static int perf_sched__process_tracepoint_sample(struct perf_tool *tool,
+ union perf_event *event __used,
+ struct perf_sample *sample,
+ struct perf_evsel *evsel,
+ struct machine *machine)
{
- struct thread *thread;
-
- if (!(session->sample_type & PERF_SAMPLE_RAW))
- return 0;
+ struct thread *thread = machine__findnew_thread(machine, sample->pid);
- thread = perf_session__findnew(session, sample->pid);
if (thread == NULL) {
- pr_debug("problem processing %d event, skipping it.\n",
- event->header.type);
+ pr_debug("problem processing %s event, skipping it.\n",
+ evsel->name);
return -1;
}
- dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
+ evsel->hists.stats.total_period += sample->period;
+ hists__inc_nr_events(&evsel->hists, PERF_RECORD_SAMPLE);
- if (profile_cpu != -1 && profile_cpu != (int)sample->cpu)
- return 0;
+ if (evsel->handler.func != NULL) {
+ tracepoint_handler f = evsel->handler.func;
- process_raw_event(event, session, sample->raw_data, sample->cpu,
- sample->time, thread);
+ if (evsel->handler.data == NULL)
+ evsel->handler.data = trace_find_event(evsel->attr.config);
+
+ f(tool, evsel->handler.data, sample, machine, thread);
+ }
return 0;
}
-static struct perf_event_ops event_ops = {
- .sample = process_sample_event,
+static struct perf_tool perf_sched = {
+ .sample = perf_sched__process_tracepoint_sample,
.comm = perf_event__process_comm,
.lost = perf_event__process_lost,
.fork = perf_event__process_task,
@@ -1640,13 +1630,25 @@ static struct perf_event_ops event_ops = {
static void read_events(bool destroy, struct perf_session **psession)
{
int err = -EINVAL;
+ const struct perf_evsel_str_handler handlers[] = {
+ { "sched:sched_switch", process_sched_switch_event, },
+ { "sched:sched_stat_runtime", process_sched_runtime_event, },
+ { "sched:sched_wakeup", process_sched_wakeup_event, },
+ { "sched:sched_wakeup_new", process_sched_wakeup_event, },
+ { "sched:sched_process_fork", process_sched_fork_event, },
+ { "sched:sched_process_exit", process_sched_exit_event, },
+ { "sched:sched_migrate_task", process_sched_migrate_task_event, },
+ };
struct perf_session *session = perf_session__new(input_name, O_RDONLY,
- 0, false, &event_ops);
+ 0, false, &perf_sched);
if (session == NULL)
die("No Memory");
+ err = perf_evlist__set_tracepoints_handlers_array(session->evlist, handlers);
+ assert(err == 0);
+
if (perf_session__has_traces(session, "record -R")) {
- err = perf_session__process_events(session, &event_ops);
+ err = perf_session__process_events(session, &perf_sched);
if (err)
die("Failed to process events, error %d", err);
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index 2f62a2952269..fd1909afcfd6 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -7,6 +7,7 @@
#include "util/header.h"
#include "util/parse-options.h"
#include "util/session.h"
+#include "util/tool.h"
#include "util/symbol.h"
#include "util/thread.h"
#include "util/trace-event.h"
@@ -23,6 +24,7 @@ static u64 nr_unordered;
extern const struct option record_options[];
static bool no_callchain;
static bool show_full_info;
+static bool system_wide;
static const char *cpu_list;
static DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
@@ -315,7 +317,7 @@ static bool sample_addr_correlates_sym(struct perf_event_attr *attr)
static void print_sample_addr(union perf_event *event,
struct perf_sample *sample,
- struct perf_session *session,
+ struct machine *machine,
struct thread *thread,
struct perf_event_attr *attr)
{
@@ -328,11 +330,11 @@ static void print_sample_addr(union perf_event *event,
if (!sample_addr_correlates_sym(attr))
return;
- thread__find_addr_map(thread, session, cpumode, MAP__FUNCTION,
- event->ip.pid, sample->addr, &al);
+ thread__find_addr_map(thread, machine, cpumode, MAP__FUNCTION,
+ sample->addr, &al);
if (!al.map)
- thread__find_addr_map(thread, session, cpumode, MAP__VARIABLE,
- event->ip.pid, sample->addr, &al);
+ thread__find_addr_map(thread, machine, cpumode, MAP__VARIABLE,
+ sample->addr, &al);
al.cpu = sample->cpu;
al.sym = NULL;
@@ -362,7 +364,7 @@ static void print_sample_addr(union perf_event *event,
static void process_event(union perf_event *event __unused,
struct perf_sample *sample,
struct perf_evsel *evsel,
- struct perf_session *session,
+ struct machine *machine,
struct thread *thread)
{
struct perf_event_attr *attr = &evsel->attr;
@@ -377,15 +379,15 @@ static void process_event(union perf_event *event __unused,
sample->raw_size);
if (PRINT_FIELD(ADDR))
- print_sample_addr(event, sample, session, thread, attr);
+ print_sample_addr(event, sample, machine, thread, attr);
if (PRINT_FIELD(IP)) {
if (!symbol_conf.use_callchain)
printf(" ");
else
printf("\n");
- perf_session__print_ip(event, sample, session,
- PRINT_FIELD(SYM), PRINT_FIELD(DSO));
+ perf_event__print_ip(event, sample, machine, evsel,
+ PRINT_FIELD(SYM), PRINT_FIELD(DSO));
}
printf("\n");
@@ -432,14 +434,16 @@ static int cleanup_scripting(void)
return scripting_ops->stop_script();
}
-static char const *input_name = "perf.data";
+static const char *input_name;
-static int process_sample_event(union perf_event *event,
+static int process_sample_event(struct perf_tool *tool __used,
+ union perf_event *event,
struct perf_sample *sample,
struct perf_evsel *evsel,
- struct perf_session *session)
+ struct machine *machine)
{
- struct thread *thread = perf_session__findnew(session, event->ip.pid);
+ struct addr_location al;
+ struct thread *thread = machine__findnew_thread(machine, event->ip.tid);
if (thread == NULL) {
pr_debug("problem processing %d event, skipping it.\n",
@@ -458,16 +462,25 @@ static int process_sample_event(union perf_event *event,
return 0;
}
+ if (perf_event__preprocess_sample(event, machine, &al, sample, 0) < 0) {
+ pr_err("problem processing %d event, skipping it.\n",
+ event->header.type);
+ return -1;
+ }
+
+ if (al.filtered)
+ return 0;
+
if (cpu_list && !test_bit(sample->cpu, cpu_bitmap))
return 0;
- scripting_ops->process_event(event, sample, evsel, session, thread);
+ scripting_ops->process_event(event, sample, evsel, machine, thread);
- session->hists.stats.total_period += sample->period;
+ evsel->hists.stats.total_period += sample->period;
return 0;
}
-static struct perf_event_ops event_ops = {
+static struct perf_tool perf_script = {
.sample = process_sample_event,
.mmap = perf_event__process_mmap,
.comm = perf_event__process_comm,
@@ -494,7 +507,7 @@ static int __cmd_script(struct perf_session *session)
signal(SIGINT, sig_handler);
- ret = perf_session__process_events(session, &event_ops);
+ ret = perf_session__process_events(session, &perf_script);
if (debug_mode)
pr_err("Misordered timestamps: %" PRIu64 "\n", nr_unordered);
@@ -523,12 +536,6 @@ static struct script_spec *script_spec__new(const char *spec,
return s;
}
-static void script_spec__delete(struct script_spec *s)
-{
- free(s->spec);
- free(s);
-}
-
static void script_spec__add(struct script_spec *s)
{
list_add_tail(&s->node, &script_specs);
@@ -554,16 +561,11 @@ static struct script_spec *script_spec__findnew(const char *spec,
s = script_spec__new(spec, ops);
if (!s)
- goto out_delete_spec;
+ return NULL;
script_spec__add(s);
return s;
-
-out_delete_spec:
- script_spec__delete(s);
-
- return NULL;
}
int script_spec_register(const char *spec, struct scripting_ops *ops)
@@ -681,7 +683,8 @@ static int parse_output_fields(const struct option *opt __used,
type = PERF_TYPE_RAW;
else {
fprintf(stderr, "Invalid event type in field string.\n");
- return -EINVAL;
+ rc = -EINVAL;
+ goto out;
}
if (output[type].user_set)
@@ -923,6 +926,24 @@ static int read_script_info(struct script_desc *desc, const char *filename)
return 0;
}
+static char *get_script_root(struct dirent *script_dirent, const char *suffix)
+{
+ char *script_root, *str;
+
+ script_root = strdup(script_dirent->d_name);
+ if (!script_root)
+ return NULL;
+
+ str = (char *)ends_with(script_root, suffix);
+ if (!str) {
+ free(script_root);
+ return NULL;
+ }
+
+ *str = '\0';
+ return script_root;
+}
+
static int list_available_scripts(const struct option *opt __used,
const char *s __used, int unset __used)
{
@@ -934,7 +955,6 @@ static int list_available_scripts(const struct option *opt __used,
struct script_desc *desc;
char first_half[BUFSIZ];
char *script_root;
- char *str;
snprintf(scripts_path, MAXPATHLEN, "%s/scripts", perf_exec_path());
@@ -950,16 +970,14 @@ static int list_available_scripts(const struct option *opt __used,
continue;
for_each_script(lang_path, lang_dir, script_dirent, script_next) {
- script_root = strdup(script_dirent.d_name);
- str = (char *)ends_with(script_root, REPORT_SUFFIX);
- if (str) {
- *str = '\0';
+ script_root = get_script_root(&script_dirent, REPORT_SUFFIX);
+ if (script_root) {
desc = script_desc__findnew(script_root);
snprintf(script_path, MAXPATHLEN, "%s/%s",
lang_path, script_dirent.d_name);
read_script_info(desc, script_path);
+ free(script_root);
}
- free(script_root);
}
}
@@ -981,8 +999,7 @@ static char *get_script_path(const char *script_root, const char *suffix)
char script_path[MAXPATHLEN];
DIR *scripts_dir, *lang_dir;
char lang_path[MAXPATHLEN];
- char *str, *__script_root;
- char *path = NULL;
+ char *__script_root;
snprintf(scripts_path, MAXPATHLEN, "%s/scripts", perf_exec_path());
@@ -998,23 +1015,18 @@ static char *get_script_path(const char *script_root, const char *suffix)
continue;
for_each_script(lang_path, lang_dir, script_dirent, script_next) {
- __script_root = strdup(script_dirent.d_name);
- str = (char *)ends_with(__script_root, suffix);
- if (str) {
- *str = '\0';
- if (strcmp(__script_root, script_root))
- continue;
+ __script_root = get_script_root(&script_dirent, suffix);
+ if (__script_root && !strcmp(script_root, __script_root)) {
+ free(__script_root);
snprintf(script_path, MAXPATHLEN, "%s/%s",
lang_path, script_dirent.d_name);
- path = strdup(script_path);
- free(__script_root);
- break;
+ return strdup(script_path);
}
free(__script_root);
}
}
- return path;
+ return NULL;
}
static bool is_top_script(const char *script_path)
@@ -1083,7 +1095,11 @@ static const struct option options[] = {
OPT_CALLBACK('f', "fields", NULL, "str",
"comma separated output fields prepend with 'type:'. Valid types: hw,sw,trace,raw. Fields: comm,tid,pid,time,cpu,event,trace,ip,sym,dso,addr",
parse_output_fields),
- OPT_STRING('c', "cpu", &cpu_list, "cpu", "list of cpus to profile"),
+ OPT_BOOLEAN('a', "all-cpus", &system_wide,
+ "system-wide collection from all CPUs"),
+ OPT_STRING('C', "cpu", &cpu_list, "cpu", "list of cpus to profile"),
+ OPT_STRING('c', "comms", &symbol_conf.comm_list_str, "comm[,comm...]",
+ "only display events for these comms"),
OPT_BOOLEAN('I', "show-info", &show_full_info,
"display extended information from perf.data file"),
OPT_END()
@@ -1110,7 +1126,6 @@ int cmd_script(int argc, const char **argv, const char *prefix __used)
struct perf_session *session;
char *script_path = NULL;
const char **__argv;
- bool system_wide;
int i, j, err;
setup_scripting();
@@ -1178,15 +1193,17 @@ int cmd_script(int argc, const char **argv, const char *prefix __used)
}
if (!pid) {
- system_wide = true;
j = 0;
dup2(live_pipe[1], 1);
close(live_pipe[0]);
- if (!is_top_script(argv[0]))
+ if (is_top_script(argv[0])) {
+ system_wide = true;
+ } else if (!system_wide) {
system_wide = !have_cmd(argc - rep_args,
&argv[rep_args]);
+ }
__argv = malloc((argc + 6) * sizeof(const char *));
if (!__argv)
@@ -1234,10 +1251,11 @@ int cmd_script(int argc, const char **argv, const char *prefix __used)
script_path = rep_script_path;
if (script_path) {
- system_wide = false;
j = 0;
- if (rec_script_path)
+ if (!rec_script_path)
+ system_wide = false;
+ else if (!system_wide)
system_wide = !have_cmd(argc - 1, &argv[1]);
__argv = malloc((argc + 2) * sizeof(const char *));
@@ -1261,7 +1279,7 @@ int cmd_script(int argc, const char **argv, const char *prefix __used)
if (!script_name)
setup_pager();
- session = perf_session__new(input_name, O_RDONLY, 0, false, &event_ops);
+ session = perf_session__new(input_name, O_RDONLY, 0, false, &perf_script);
if (session == NULL)
return -ENOMEM;
@@ -1287,7 +1305,7 @@ int cmd_script(int argc, const char **argv, const char *prefix __used)
return -1;
}
- input = open(input_name, O_RDONLY);
+ input = open(session->filename, O_RDONLY); /* input_name */
if (input < 0) {
perror("failed to open file");
exit(-1);
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 955930e0a5c3..f5d2a63eba66 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -578,6 +578,33 @@ static void nsec_printout(int cpu, struct perf_evsel *evsel, double avg)
avg / avg_stats(&walltime_nsecs_stats));
}
+/* used for get_ratio_color() */
+enum grc_type {
+ GRC_STALLED_CYCLES_FE,
+ GRC_STALLED_CYCLES_BE,
+ GRC_CACHE_MISSES,
+ GRC_MAX_NR
+};
+
+static const char *get_ratio_color(enum grc_type type, double ratio)
+{
+ static const double grc_table[GRC_MAX_NR][3] = {
+ [GRC_STALLED_CYCLES_FE] = { 50.0, 30.0, 10.0 },
+ [GRC_STALLED_CYCLES_BE] = { 75.0, 50.0, 20.0 },
+ [GRC_CACHE_MISSES] = { 20.0, 10.0, 5.0 },
+ };
+ const char *color = PERF_COLOR_NORMAL;
+
+ if (ratio > grc_table[type][0])
+ color = PERF_COLOR_RED;
+ else if (ratio > grc_table[type][1])
+ color = PERF_COLOR_MAGENTA;
+ else if (ratio > grc_table[type][2])
+ color = PERF_COLOR_YELLOW;
+
+ return color;
+}
+
static void print_stalled_cycles_frontend(int cpu, struct perf_evsel *evsel __used, double avg)
{
double total, ratio = 0.0;
@@ -588,13 +615,7 @@ static void print_stalled_cycles_frontend(int cpu, struct perf_evsel *evsel __us
if (total)
ratio = avg / total * 100.0;
- color = PERF_COLOR_NORMAL;
- if (ratio > 50.0)
- color = PERF_COLOR_RED;
- else if (ratio > 30.0)
- color = PERF_COLOR_MAGENTA;
- else if (ratio > 10.0)
- color = PERF_COLOR_YELLOW;
+ color = get_ratio_color(GRC_STALLED_CYCLES_FE, ratio);
fprintf(output, " # ");
color_fprintf(output, color, "%6.2f%%", ratio);
@@ -611,13 +632,7 @@ static void print_stalled_cycles_backend(int cpu, struct perf_evsel *evsel __use
if (total)
ratio = avg / total * 100.0;
- color = PERF_COLOR_NORMAL;
- if (ratio > 75.0)
- color = PERF_COLOR_RED;
- else if (ratio > 50.0)
- color = PERF_COLOR_MAGENTA;
- else if (ratio > 20.0)
- color = PERF_COLOR_YELLOW;
+ color = get_ratio_color(GRC_STALLED_CYCLES_BE, ratio);
fprintf(output, " # ");
color_fprintf(output, color, "%6.2f%%", ratio);
@@ -634,13 +649,7 @@ static void print_branch_misses(int cpu, struct perf_evsel *evsel __used, double
if (total)
ratio = avg / total * 100.0;
- color = PERF_COLOR_NORMAL;
- if (ratio > 20.0)
- color = PERF_COLOR_RED;
- else if (ratio > 10.0)
- color = PERF_COLOR_MAGENTA;
- else if (ratio > 5.0)
- color = PERF_COLOR_YELLOW;
+ color = get_ratio_color(GRC_CACHE_MISSES, ratio);
fprintf(output, " # ");
color_fprintf(output, color, "%6.2f%%", ratio);
@@ -657,13 +666,7 @@ static void print_l1_dcache_misses(int cpu, struct perf_evsel *evsel __used, dou
if (total)
ratio = avg / total * 100.0;
- color = PERF_COLOR_NORMAL;
- if (ratio > 20.0)
- color = PERF_COLOR_RED;
- else if (ratio > 10.0)
- color = PERF_COLOR_MAGENTA;
- else if (ratio > 5.0)
- color = PERF_COLOR_YELLOW;
+ color = get_ratio_color(GRC_CACHE_MISSES, ratio);
fprintf(output, " # ");
color_fprintf(output, color, "%6.2f%%", ratio);
@@ -680,13 +683,7 @@ static void print_l1_icache_misses(int cpu, struct perf_evsel *evsel __used, dou
if (total)
ratio = avg / total * 100.0;
- color = PERF_COLOR_NORMAL;
- if (ratio > 20.0)
- color = PERF_COLOR_RED;
- else if (ratio > 10.0)
- color = PERF_COLOR_MAGENTA;
- else if (ratio > 5.0)
- color = PERF_COLOR_YELLOW;
+ color = get_ratio_color(GRC_CACHE_MISSES, ratio);
fprintf(output, " # ");
color_fprintf(output, color, "%6.2f%%", ratio);
@@ -703,13 +700,7 @@ static void print_dtlb_cache_misses(int cpu, struct perf_evsel *evsel __used, do
if (total)
ratio = avg / total * 100.0;
- color = PERF_COLOR_NORMAL;
- if (ratio > 20.0)
- color = PERF_COLOR_RED;
- else if (ratio > 10.0)
- color = PERF_COLOR_MAGENTA;
- else if (ratio > 5.0)
- color = PERF_COLOR_YELLOW;
+ color = get_ratio_color(GRC_CACHE_MISSES, ratio);
fprintf(output, " # ");
color_fprintf(output, color, "%6.2f%%", ratio);
@@ -726,13 +717,7 @@ static void print_itlb_cache_misses(int cpu, struct perf_evsel *evsel __used, do
if (total)
ratio = avg / total * 100.0;
- color = PERF_COLOR_NORMAL;
- if (ratio > 20.0)
- color = PERF_COLOR_RED;
- else if (ratio > 10.0)
- color = PERF_COLOR_MAGENTA;
- else if (ratio > 5.0)
- color = PERF_COLOR_YELLOW;
+ color = get_ratio_color(GRC_CACHE_MISSES, ratio);
fprintf(output, " # ");
color_fprintf(output, color, "%6.2f%%", ratio);
@@ -749,13 +734,7 @@ static void print_ll_cache_misses(int cpu, struct perf_evsel *evsel __used, doub
if (total)
ratio = avg / total * 100.0;
- color = PERF_COLOR_NORMAL;
- if (ratio > 20.0)
- color = PERF_COLOR_RED;
- else if (ratio > 10.0)
- color = PERF_COLOR_MAGENTA;
- else if (ratio > 5.0)
- color = PERF_COLOR_YELLOW;
+ color = get_ratio_color(GRC_CACHE_MISSES, ratio);
fprintf(output, " # ");
color_fprintf(output, color, "%6.2f%%", ratio);
@@ -1108,22 +1087,13 @@ static const struct option options[] = {
*/
static int add_default_attributes(void)
{
- struct perf_evsel *pos;
- size_t attr_nr = 0;
- size_t c;
-
/* Set attrs if no event is selected and !null_run: */
if (null_run)
return 0;
if (!evsel_list->nr_entries) {
- for (c = 0; c < ARRAY_SIZE(default_attrs); c++) {
- pos = perf_evsel__new(default_attrs + c, c + attr_nr);
- if (pos == NULL)
- return -1;
- perf_evlist__add(evsel_list, pos);
- }
- attr_nr += c;
+ if (perf_evlist__add_attrs_array(evsel_list, default_attrs) < 0)
+ return -1;
}
/* Detailed events get appended to the event list: */
@@ -1132,38 +1102,21 @@ static int add_default_attributes(void)
return 0;
/* Append detailed run extra attributes: */
- for (c = 0; c < ARRAY_SIZE(detailed_attrs); c++) {
- pos = perf_evsel__new(detailed_attrs + c, c + attr_nr);
- if (pos == NULL)
- return -1;
- perf_evlist__add(evsel_list, pos);
- }
- attr_nr += c;
+ if (perf_evlist__add_attrs_array(evsel_list, detailed_attrs) < 0)
+ return -1;
if (detailed_run < 2)
return 0;
/* Append very detailed run extra attributes: */
- for (c = 0; c < ARRAY_SIZE(very_detailed_attrs); c++) {
- pos = perf_evsel__new(very_detailed_attrs + c, c + attr_nr);
- if (pos == NULL)
- return -1;
- perf_evlist__add(evsel_list, pos);
- }
+ if (perf_evlist__add_attrs_array(evsel_list, very_detailed_attrs) < 0)
+ return -1;
if (detailed_run < 3)
return 0;
/* Append very, very detailed run extra attributes: */
- for (c = 0; c < ARRAY_SIZE(very_very_detailed_attrs); c++) {
- pos = perf_evsel__new(very_very_detailed_attrs + c, c + attr_nr);
- if (pos == NULL)
- return -1;
- perf_evlist__add(evsel_list, pos);
- }
-
-
- return 0;
+ return perf_evlist__add_attrs_array(evsel_list, very_very_detailed_attrs);
}
int cmd_stat(int argc, const char **argv, const char *prefix __used)
@@ -1267,8 +1220,7 @@ int cmd_stat(int argc, const char **argv, const char *prefix __used)
list_for_each_entry(pos, &evsel_list->entries, node) {
if (perf_evsel__alloc_stat_priv(pos) < 0 ||
- perf_evsel__alloc_counts(pos, evsel_list->cpus->nr) < 0 ||
- perf_evsel__alloc_fd(pos, evsel_list->cpus->nr, evsel_list->threads->nr) < 0)
+ perf_evsel__alloc_counts(pos, evsel_list->cpus->nr) < 0)
goto out_free_fd;
}
diff --git a/tools/perf/builtin-test.c b/tools/perf/builtin-test.c
index 831d1baeac37..2b9a7f497a20 100644
--- a/tools/perf/builtin-test.c
+++ b/tools/perf/builtin-test.c
@@ -7,6 +7,7 @@
#include "util/cache.h"
#include "util/debug.h"
+#include "util/debugfs.h"
#include "util/evlist.h"
#include "util/parse-options.h"
#include "util/parse-events.h"
@@ -14,8 +15,6 @@
#include "util/thread_map.h"
#include "../../include/linux/hw_breakpoint.h"
-static long page_size;
-
static int vmlinux_matches_kallsyms_filter(struct map *map __used, struct symbol *sym)
{
bool *visited = symbol__priv(sym);
@@ -31,6 +30,7 @@ static int test__vmlinux_matches_kallsyms(void)
struct map *kallsyms_map, *vmlinux_map;
struct machine kallsyms, vmlinux;
enum map_type type = MAP__FUNCTION;
+ long page_size = sysconf(_SC_PAGE_SIZE);
struct ref_reloc_sym ref_reloc_sym = { .name = "_stext", };
/*
@@ -247,7 +247,7 @@ static int trace_event__id(const char *evname)
if (asprintf(&filename,
"%s/syscalls/%s/id",
- debugfs_path, evname) < 0)
+ tracing_events_path, evname) < 0)
return -1;
fd = open(filename, O_RDONLY);
@@ -603,7 +603,7 @@ out_free_threads:
#define TEST_ASSERT_VAL(text, cond) \
do { \
- if (!cond) { \
+ if (!(cond)) { \
pr_debug("FAILED %s:%d %s\n", __FILE__, __LINE__, text); \
return -1; \
} \
@@ -759,6 +759,103 @@ static int test__checkevent_breakpoint_w(struct perf_evlist *evlist)
return 0;
}
+static int test__checkevent_tracepoint_modifier(struct perf_evlist *evlist)
+{
+ struct perf_evsel *evsel = list_entry(evlist->entries.next,
+ struct perf_evsel, node);
+
+ TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel);
+ TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
+ TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
+
+ return test__checkevent_tracepoint(evlist);
+}
+
+static int
+test__checkevent_tracepoint_multi_modifier(struct perf_evlist *evlist)
+{
+ struct perf_evsel *evsel;
+
+ TEST_ASSERT_VAL("wrong number of entries", evlist->nr_entries > 1);
+
+ list_for_each_entry(evsel, &evlist->entries, node) {
+ TEST_ASSERT_VAL("wrong exclude_user",
+ !evsel->attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel",
+ evsel->attr.exclude_kernel);
+ TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
+ TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
+ }
+
+ return test__checkevent_tracepoint_multi(evlist);
+}
+
+static int test__checkevent_raw_modifier(struct perf_evlist *evlist)
+{
+ struct perf_evsel *evsel = list_entry(evlist->entries.next,
+ struct perf_evsel, node);
+
+ TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel);
+ TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
+ TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip);
+
+ return test__checkevent_raw(evlist);
+}
+
+static int test__checkevent_numeric_modifier(struct perf_evlist *evlist)
+{
+ struct perf_evsel *evsel = list_entry(evlist->entries.next,
+ struct perf_evsel, node);
+
+ TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel);
+ TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv);
+ TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip);
+
+ return test__checkevent_numeric(evlist);
+}
+
+static int test__checkevent_symbolic_name_modifier(struct perf_evlist *evlist)
+{
+ struct perf_evsel *evsel = list_entry(evlist->entries.next,
+ struct perf_evsel, node);
+
+ TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel);
+ TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv);
+ TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
+
+ return test__checkevent_symbolic_name(evlist);
+}
+
+static int test__checkevent_symbolic_alias_modifier(struct perf_evlist *evlist)
+{
+ struct perf_evsel *evsel = list_entry(evlist->entries.next,
+ struct perf_evsel, node);
+
+ TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel);
+ TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
+ TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
+
+ return test__checkevent_symbolic_alias(evlist);
+}
+
+static int test__checkevent_genhw_modifier(struct perf_evlist *evlist)
+{
+ struct perf_evsel *evsel = list_entry(evlist->entries.next,
+ struct perf_evsel, node);
+
+ TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel);
+ TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
+ TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip);
+
+ return test__checkevent_genhw(evlist);
+}
+
static struct test__event_st {
const char *name;
__u32 type;
@@ -808,6 +905,34 @@ static struct test__event_st {
.name = "mem:0:w",
.check = test__checkevent_breakpoint_w,
},
+ {
+ .name = "syscalls:sys_enter_open:k",
+ .check = test__checkevent_tracepoint_modifier,
+ },
+ {
+ .name = "syscalls:*:u",
+ .check = test__checkevent_tracepoint_multi_modifier,
+ },
+ {
+ .name = "r1:kp",
+ .check = test__checkevent_raw_modifier,
+ },
+ {
+ .name = "1:1:hp",
+ .check = test__checkevent_numeric_modifier,
+ },
+ {
+ .name = "instructions:h",
+ .check = test__checkevent_symbolic_name_modifier,
+ },
+ {
+ .name = "faults:u",
+ .check = test__checkevent_symbolic_alias_modifier,
+ },
+ {
+ .name = "L1-dcache-load-miss:kp",
+ .check = test__checkevent_genhw_modifier,
+ },
};
#define TEST__EVENTS_CNT (sizeof(test__events) / sizeof(struct test__event_st))
@@ -841,6 +966,336 @@ static int test__parse_events(void)
return ret;
}
+
+static int sched__get_first_possible_cpu(pid_t pid, cpu_set_t **maskp,
+ size_t *sizep)
+{
+ cpu_set_t *mask;
+ size_t size;
+ int i, cpu = -1, nrcpus = 1024;
+realloc:
+ mask = CPU_ALLOC(nrcpus);
+ size = CPU_ALLOC_SIZE(nrcpus);
+ CPU_ZERO_S(size, mask);
+
+ if (sched_getaffinity(pid, size, mask) == -1) {
+ CPU_FREE(mask);
+ if (errno == EINVAL && nrcpus < (1024 << 8)) {
+ nrcpus = nrcpus << 2;
+ goto realloc;
+ }
+ perror("sched_getaffinity");
+ return -1;
+ }
+
+ for (i = 0; i < nrcpus; i++) {
+ if (CPU_ISSET_S(i, size, mask)) {
+ if (cpu == -1) {
+ cpu = i;
+ *maskp = mask;
+ *sizep = size;
+ } else
+ CPU_CLR_S(i, size, mask);
+ }
+ }
+
+ if (cpu == -1)
+ CPU_FREE(mask);
+
+ return cpu;
+}
+
+static int test__PERF_RECORD(void)
+{
+ struct perf_record_opts opts = {
+ .target_pid = -1,
+ .target_tid = -1,
+ .no_delay = true,
+ .freq = 10,
+ .mmap_pages = 256,
+ .sample_id_all_avail = true,
+ };
+ cpu_set_t *cpu_mask = NULL;
+ size_t cpu_mask_size = 0;
+ struct perf_evlist *evlist = perf_evlist__new(NULL, NULL);
+ struct perf_evsel *evsel;
+ struct perf_sample sample;
+ const char *cmd = "sleep";
+ const char *argv[] = { cmd, "1", NULL, };
+ char *bname;
+ u64 sample_type, prev_time = 0;
+ bool found_cmd_mmap = false,
+ found_libc_mmap = false,
+ found_vdso_mmap = false,
+ found_ld_mmap = false;
+ int err = -1, errs = 0, i, wakeups = 0, sample_size;
+ u32 cpu;
+ int total_events = 0, nr_events[PERF_RECORD_MAX] = { 0, };
+
+ if (evlist == NULL || argv == NULL) {
+ pr_debug("Not enough memory to create evlist\n");
+ goto out;
+ }
+
+ /*
+ * We need at least one evsel in the evlist, use the default
+ * one: "cycles".
+ */
+ err = perf_evlist__add_default(evlist);
+ if (err < 0) {
+ pr_debug("Not enough memory to create evsel\n");
+ goto out_delete_evlist;
+ }
+
+ /*
+ * Create maps of threads and cpus to monitor. In this case
+ * we start with all threads and cpus (-1, -1) but then in
+ * perf_evlist__prepare_workload we'll fill in the only thread
+ * we're monitoring, the one forked there.
+ */
+ err = perf_evlist__create_maps(evlist, opts.target_pid,
+ opts.target_tid, opts.cpu_list);
+ if (err < 0) {
+ pr_debug("Not enough memory to create thread/cpu maps\n");
+ goto out_delete_evlist;
+ }
+
+ /*
+ * Prepare the workload in argv[] to run, it'll fork it, and then wait
+ * for perf_evlist__start_workload() to exec it. This is done this way
+ * so that we have time to open the evlist (calling sys_perf_event_open
+ * on all the fds) and then mmap them.
+ */
+ err = perf_evlist__prepare_workload(evlist, &opts, argv);
+ if (err < 0) {
+ pr_debug("Couldn't run the workload!\n");
+ goto out_delete_evlist;
+ }
+
+ /*
+ * Config the evsels, setting attr->comm on the first one, etc.
+ */
+ evsel = list_entry(evlist->entries.next, struct perf_evsel, node);
+ evsel->attr.sample_type |= PERF_SAMPLE_CPU;
+ evsel->attr.sample_type |= PERF_SAMPLE_TID;
+ evsel->attr.sample_type |= PERF_SAMPLE_TIME;
+ perf_evlist__config_attrs(evlist, &opts);
+
+ err = sched__get_first_possible_cpu(evlist->workload.pid, &cpu_mask,
+ &cpu_mask_size);
+ if (err < 0) {
+ pr_debug("sched__get_first_possible_cpu: %s\n", strerror(errno));
+ goto out_delete_evlist;
+ }
+
+ cpu = err;
+
+ /*
+ * So that we can check perf_sample.cpu on all the samples.
+ */
+ if (sched_setaffinity(evlist->workload.pid, cpu_mask_size, cpu_mask) < 0) {
+ pr_debug("sched_setaffinity: %s\n", strerror(errno));
+ goto out_free_cpu_mask;
+ }
+
+ /*
+ * Call sys_perf_event_open on all the fds on all the evsels,
+ * grouping them if asked to.
+ */
+ err = perf_evlist__open(evlist, opts.group);
+ if (err < 0) {
+ pr_debug("perf_evlist__open: %s\n", strerror(errno));
+ goto out_delete_evlist;
+ }
+
+ /*
+ * mmap the first fd on a given CPU and ask for events for the other
+ * fds in the same CPU to be injected in the same mmap ring buffer
+ * (using ioctl(PERF_EVENT_IOC_SET_OUTPUT)).
+ */
+ err = perf_evlist__mmap(evlist, opts.mmap_pages, false);
+ if (err < 0) {
+ pr_debug("perf_evlist__mmap: %s\n", strerror(errno));
+ goto out_delete_evlist;
+ }
+
+ /*
+ * We'll need these two to parse the PERF_SAMPLE_* fields in each
+ * event.
+ */
+ sample_type = perf_evlist__sample_type(evlist);
+ sample_size = __perf_evsel__sample_size(sample_type);
+
+ /*
+ * Now that all is properly set up, enable the events, they will
+ * count just on workload.pid, which will start...
+ */
+ perf_evlist__enable(evlist);
+
+ /*
+ * Now!
+ */
+ perf_evlist__start_workload(evlist);
+
+ while (1) {
+ int before = total_events;
+
+ for (i = 0; i < evlist->nr_mmaps; i++) {
+ union perf_event *event;
+
+ while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) {
+ const u32 type = event->header.type;
+ const char *name = perf_event__name(type);
+
+ ++total_events;
+ if (type < PERF_RECORD_MAX)
+ nr_events[type]++;
+
+ err = perf_event__parse_sample(event, sample_type,
+ sample_size, true,
+ &sample, false);
+ if (err < 0) {
+ if (verbose)
+ perf_event__fprintf(event, stderr);
+ pr_debug("Couldn't parse sample\n");
+ goto out_err;
+ }
+
+ if (verbose) {
+ pr_info("%" PRIu64" %d ", sample.time, sample.cpu);
+ perf_event__fprintf(event, stderr);
+ }
+
+ if (prev_time > sample.time) {
+ pr_debug("%s going backwards in time, prev=%" PRIu64 ", curr=%" PRIu64 "\n",
+ name, prev_time, sample.time);
+ ++errs;
+ }
+
+ prev_time = sample.time;
+
+ if (sample.cpu != cpu) {
+ pr_debug("%s with unexpected cpu, expected %d, got %d\n",
+ name, cpu, sample.cpu);
+ ++errs;
+ }
+
+ if ((pid_t)sample.pid != evlist->workload.pid) {
+ pr_debug("%s with unexpected pid, expected %d, got %d\n",
+ name, evlist->workload.pid, sample.pid);
+ ++errs;
+ }
+
+ if ((pid_t)sample.tid != evlist->workload.pid) {
+ pr_debug("%s with unexpected tid, expected %d, got %d\n",
+ name, evlist->workload.pid, sample.tid);
+ ++errs;
+ }
+
+ if ((type == PERF_RECORD_COMM ||
+ type == PERF_RECORD_MMAP ||
+ type == PERF_RECORD_FORK ||
+ type == PERF_RECORD_EXIT) &&
+ (pid_t)event->comm.pid != evlist->workload.pid) {
+ pr_debug("%s with unexpected pid/tid\n", name);
+ ++errs;
+ }
+
+ if ((type == PERF_RECORD_COMM ||
+ type == PERF_RECORD_MMAP) &&
+ event->comm.pid != event->comm.tid) {
+ pr_debug("%s with different pid/tid!\n", name);
+ ++errs;
+ }
+
+ switch (type) {
+ case PERF_RECORD_COMM:
+ if (strcmp(event->comm.comm, cmd)) {
+ pr_debug("%s with unexpected comm!\n", name);
+ ++errs;
+ }
+ break;
+ case PERF_RECORD_EXIT:
+ goto found_exit;
+ case PERF_RECORD_MMAP:
+ bname = strrchr(event->mmap.filename, '/');
+ if (bname != NULL) {
+ if (!found_cmd_mmap)
+ found_cmd_mmap = !strcmp(bname + 1, cmd);
+ if (!found_libc_mmap)
+ found_libc_mmap = !strncmp(bname + 1, "libc", 4);
+ if (!found_ld_mmap)
+ found_ld_mmap = !strncmp(bname + 1, "ld", 2);
+ } else if (!found_vdso_mmap)
+ found_vdso_mmap = !strcmp(event->mmap.filename, "[vdso]");
+ break;
+
+ case PERF_RECORD_SAMPLE:
+ /* Just ignore samples for now */
+ break;
+ default:
+ pr_debug("Unexpected perf_event->header.type %d!\n",
+ type);
+ ++errs;
+ }
+ }
+ }
+
+ /*
+ * We don't use poll here because at least at 3.1 times the
+ * PERF_RECORD_{!SAMPLE} events don't honour
+ * perf_event_attr.wakeup_events, just PERF_EVENT_SAMPLE does.
+ */
+ if (total_events == before && false)
+ poll(evlist->pollfd, evlist->nr_fds, -1);
+
+ sleep(1);
+ if (++wakeups > 5) {
+ pr_debug("No PERF_RECORD_EXIT event!\n");
+ break;
+ }
+ }
+
+found_exit:
+ if (nr_events[PERF_RECORD_COMM] > 1) {
+ pr_debug("Excessive number of PERF_RECORD_COMM events!\n");
+ ++errs;
+ }
+
+ if (nr_events[PERF_RECORD_COMM] == 0) {
+ pr_debug("Missing PERF_RECORD_COMM for %s!\n", cmd);
+ ++errs;
+ }
+
+ if (!found_cmd_mmap) {
+ pr_debug("PERF_RECORD_MMAP for %s missing!\n", cmd);
+ ++errs;
+ }
+
+ if (!found_libc_mmap) {
+ pr_debug("PERF_RECORD_MMAP for %s missing!\n", "libc");
+ ++errs;
+ }
+
+ if (!found_ld_mmap) {
+ pr_debug("PERF_RECORD_MMAP for %s missing!\n", "ld");
+ ++errs;
+ }
+
+ if (!found_vdso_mmap) {
+ pr_debug("PERF_RECORD_MMAP for %s missing!\n", "[vdso]");
+ ++errs;
+ }
+out_err:
+ perf_evlist__munmap(evlist);
+out_free_cpu_mask:
+ CPU_FREE(cpu_mask);
+out_delete_evlist:
+ perf_evlist__delete(evlist);
+out:
+ return (err < 0 || errs > 0) ? -1 : 0;
+}
+
static struct test {
const char *desc;
int (*func)(void);
@@ -866,45 +1321,89 @@ static struct test {
.func = test__parse_events,
},
{
+ .desc = "Validate PERF_RECORD_* events & perf_sample fields",
+ .func = test__PERF_RECORD,
+ },
+ {
.func = NULL,
},
};
-static int __cmd_test(void)
+static bool perf_test__matches(int curr, int argc, const char *argv[])
{
- int i = 0;
+ int i;
+
+ if (argc == 0)
+ return true;
- page_size = sysconf(_SC_PAGE_SIZE);
+ for (i = 0; i < argc; ++i) {
+ char *end;
+ long nr = strtoul(argv[i], &end, 10);
+
+ if (*end == '\0') {
+ if (nr == curr + 1)
+ return true;
+ continue;
+ }
+
+ if (strstr(tests[curr].desc, argv[i]))
+ return true;
+ }
+
+ return false;
+}
+
+static int __cmd_test(int argc, const char *argv[])
+{
+ int i = 0;
while (tests[i].func) {
- int err;
- pr_info("%2d: %s:", i + 1, tests[i].desc);
+ int curr = i++, err;
+
+ if (!perf_test__matches(curr, argc, argv))
+ continue;
+
+ pr_info("%2d: %s:", i, tests[curr].desc);
pr_debug("\n--- start ---\n");
- err = tests[i].func();
- pr_debug("---- end ----\n%s:", tests[i].desc);
+ err = tests[curr].func();
+ pr_debug("---- end ----\n%s:", tests[curr].desc);
pr_info(" %s\n", err ? "FAILED!\n" : "Ok");
- ++i;
}
return 0;
}
-static const char * const test_usage[] = {
- "perf test [<options>]",
- NULL,
-};
+static int perf_test__list(int argc, const char **argv)
+{
+ int i = 0;
+
+ while (tests[i].func) {
+ int curr = i++;
-static const struct option test_options[] = {
+ if (argc > 1 && !strstr(tests[curr].desc, argv[1]))
+ continue;
+
+ pr_info("%2d: %s\n", i, tests[curr].desc);
+ }
+
+ return 0;
+}
+
+int cmd_test(int argc, const char **argv, const char *prefix __used)
+{
+ const char * const test_usage[] = {
+ "perf test [<options>] [{list <test-name-fragment>|[<test-name-fragments>|<test-numbers>]}]",
+ NULL,
+ };
+ const struct option test_options[] = {
OPT_INTEGER('v', "verbose", &verbose,
"be more verbose (show symbol address, etc)"),
OPT_END()
-};
+ };
-int cmd_test(int argc, const char **argv, const char *prefix __used)
-{
argc = parse_options(argc, argv, test_options, test_usage, 0);
- if (argc)
- usage_with_options(test_usage, test_options);
+ if (argc >= 1 && !strcmp(argv[0], "list"))
+ return perf_test__list(argc, argv);
symbol_conf.priv_size = sizeof(int);
symbol_conf.sort_by_name = true;
@@ -915,5 +1414,5 @@ int cmd_test(int argc, const char **argv, const char *prefix __used)
setup_pager();
- return __cmd_test();
+ return __cmd_test(argc, argv);
}
diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c
index aa26f4d66d10..3b75b2e21ea5 100644
--- a/tools/perf/builtin-timechart.c
+++ b/tools/perf/builtin-timechart.c
@@ -19,6 +19,7 @@
#include "util/color.h"
#include <linux/list.h>
#include "util/cache.h"
+#include "util/evsel.h"
#include <linux/rbtree.h>
#include "util/symbol.h"
#include "util/callchain.h"
@@ -31,13 +32,14 @@
#include "util/event.h"
#include "util/session.h"
#include "util/svghelper.h"
+#include "util/tool.h"
#define SUPPORT_OLD_POWER_EVENTS 1
#define PWR_EVENT_EXIT -1
-static char const *input_name = "perf.data";
-static char const *output_name = "output.svg";
+static const char *input_name;
+static const char *output_name = "output.svg";
static unsigned int numcpus;
static u64 min_freq; /* Lowest CPU frequency seen */
@@ -273,25 +275,28 @@ static int cpus_cstate_state[MAX_CPUS];
static u64 cpus_pstate_start_times[MAX_CPUS];
static u64 cpus_pstate_state[MAX_CPUS];
-static int process_comm_event(union perf_event *event,
+static int process_comm_event(struct perf_tool *tool __used,
+ union perf_event *event,
struct perf_sample *sample __used,
- struct perf_session *session __used)
+ struct machine *machine __used)
{
pid_set_comm(event->comm.tid, event->comm.comm);
return 0;
}
-static int process_fork_event(union perf_event *event,
+static int process_fork_event(struct perf_tool *tool __used,
+ union perf_event *event,
struct perf_sample *sample __used,
- struct perf_session *session __used)
+ struct machine *machine __used)
{
pid_fork(event->fork.pid, event->fork.ppid, event->fork.time);
return 0;
}
-static int process_exit_event(union perf_event *event,
+static int process_exit_event(struct perf_tool *tool __used,
+ union perf_event *event,
struct perf_sample *sample __used,
- struct perf_session *session __used)
+ struct machine *machine __used)
{
pid_exit(event->fork.pid, event->fork.time);
return 0;
@@ -486,14 +491,15 @@ static void sched_switch(int cpu, u64 timestamp, struct trace_entry *te)
}
-static int process_sample_event(union perf_event *event __used,
+static int process_sample_event(struct perf_tool *tool __used,
+ union perf_event *event __used,
struct perf_sample *sample,
- struct perf_evsel *evsel __used,
- struct perf_session *session)
+ struct perf_evsel *evsel,
+ struct machine *machine __used)
{
struct trace_entry *te;
- if (session->sample_type & PERF_SAMPLE_TIME) {
+ if (evsel->attr.sample_type & PERF_SAMPLE_TIME) {
if (!first_time || first_time > sample->time)
first_time = sample->time;
if (last_time < sample->time)
@@ -501,7 +507,7 @@ static int process_sample_event(union perf_event *event __used,
}
te = (void *)sample->raw_data;
- if (session->sample_type & PERF_SAMPLE_RAW && sample->raw_size > 0) {
+ if ((evsel->attr.sample_type & PERF_SAMPLE_RAW) && sample->raw_size > 0) {
char *event_str;
#ifdef SUPPORT_OLD_POWER_EVENTS
struct power_entry_old *peo;
@@ -974,7 +980,7 @@ static void write_svg_file(const char *filename)
svg_close();
}
-static struct perf_event_ops event_ops = {
+static struct perf_tool perf_timechart = {
.comm = process_comm_event,
.fork = process_fork_event,
.exit = process_exit_event,
@@ -985,7 +991,7 @@ static struct perf_event_ops event_ops = {
static int __cmd_timechart(void)
{
struct perf_session *session = perf_session__new(input_name, O_RDONLY,
- 0, false, &event_ops);
+ 0, false, &perf_timechart);
int ret = -EINVAL;
if (session == NULL)
@@ -994,7 +1000,7 @@ static int __cmd_timechart(void)
if (!perf_session__has_traces(session, "timechart record"))
goto out_delete;
- ret = perf_session__process_events(session, &event_ops);
+ ret = perf_session__process_events(session, &perf_timechart);
if (ret)
goto out_delete;
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index c9cdedb58134..4f81eeb99875 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -64,44 +64,6 @@
#include <linux/unistd.h>
#include <linux/types.h>
-static struct perf_top top = {
- .count_filter = 5,
- .delay_secs = 2,
- .target_pid = -1,
- .target_tid = -1,
- .freq = 1000, /* 1 KHz */
-};
-
-static bool system_wide = false;
-
-static bool use_tui, use_stdio;
-
-static bool sort_has_symbols;
-
-static bool dont_use_callchains;
-static char callchain_default_opt[] = "fractal,0.5,callee";
-
-
-static int default_interval = 0;
-
-static bool kptr_restrict_warned;
-static bool vmlinux_warned;
-static bool inherit = false;
-static int realtime_prio = 0;
-static bool group = false;
-static bool sample_id_all_avail = true;
-static unsigned int mmap_pages = 128;
-
-static bool dump_symtab = false;
-
-static struct winsize winsize;
-
-static const char *sym_filter = NULL;
-static int sym_pcnt_filter = 5;
-
-/*
- * Source functions
- */
void get_term_dimensions(struct winsize *ws)
{
@@ -125,21 +87,23 @@ void get_term_dimensions(struct winsize *ws)
ws->ws_col = 80;
}
-static void update_print_entries(struct winsize *ws)
+static void perf_top__update_print_entries(struct perf_top *top)
{
- top.print_entries = ws->ws_row;
+ top->print_entries = top->winsize.ws_row;
- if (top.print_entries > 9)
- top.print_entries -= 9;
+ if (top->print_entries > 9)
+ top->print_entries -= 9;
}
-static void sig_winch_handler(int sig __used)
+static void perf_top__sig_winch(int sig __used, siginfo_t *info __used, void *arg)
{
- get_term_dimensions(&winsize);
- update_print_entries(&winsize);
+ struct perf_top *top = arg;
+
+ get_term_dimensions(&top->winsize);
+ perf_top__update_print_entries(top);
}
-static int parse_source(struct hist_entry *he)
+static int perf_top__parse_source(struct perf_top *top, struct hist_entry *he)
{
struct symbol *sym;
struct annotation *notes;
@@ -170,7 +134,7 @@ static int parse_source(struct hist_entry *he)
pthread_mutex_lock(&notes->lock);
- if (symbol__alloc_hist(sym, top.evlist->nr_entries) < 0) {
+ if (symbol__alloc_hist(sym) < 0) {
pthread_mutex_unlock(&notes->lock);
pr_err("Not enough memory for annotating '%s' symbol!\n",
sym->name);
@@ -181,7 +145,7 @@ static int parse_source(struct hist_entry *he)
err = symbol__annotate(sym, map, 0);
if (err == 0) {
out_assign:
- top.sym_filter_entry = he;
+ top->sym_filter_entry = he;
}
pthread_mutex_unlock(&notes->lock);
@@ -194,14 +158,16 @@ static void __zero_source_counters(struct hist_entry *he)
symbol__annotate_zero_histograms(sym);
}
-static void record_precise_ip(struct hist_entry *he, int counter, u64 ip)
+static void perf_top__record_precise_ip(struct perf_top *top,
+ struct hist_entry *he,
+ int counter, u64 ip)
{
struct annotation *notes;
struct symbol *sym;
if (he == NULL || he->ms.sym == NULL ||
- ((top.sym_filter_entry == NULL ||
- top.sym_filter_entry->ms.sym != he->ms.sym) && use_browser != 1))
+ ((top->sym_filter_entry == NULL ||
+ top->sym_filter_entry->ms.sym != he->ms.sym) && use_browser != 1))
return;
sym = he->ms.sym;
@@ -210,8 +176,7 @@ static void record_precise_ip(struct hist_entry *he, int counter, u64 ip)
if (pthread_mutex_trylock(&notes->lock))
return;
- if (notes->src == NULL &&
- symbol__alloc_hist(sym, top.evlist->nr_entries) < 0) {
+ if (notes->src == NULL && symbol__alloc_hist(sym) < 0) {
pthread_mutex_unlock(&notes->lock);
pr_err("Not enough memory for annotating '%s' symbol!\n",
sym->name);
@@ -225,8 +190,9 @@ static void record_precise_ip(struct hist_entry *he, int counter, u64 ip)
pthread_mutex_unlock(&notes->lock);
}
-static void show_details(struct hist_entry *he)
+static void perf_top__show_details(struct perf_top *top)
{
+ struct hist_entry *he = top->sym_filter_entry;
struct annotation *notes;
struct symbol *symbol;
int more;
@@ -242,15 +208,15 @@ static void show_details(struct hist_entry *he)
if (notes->src == NULL)
goto out_unlock;
- printf("Showing %s for %s\n", event_name(top.sym_evsel), symbol->name);
- printf(" Events Pcnt (>=%d%%)\n", sym_pcnt_filter);
+ printf("Showing %s for %s\n", event_name(top->sym_evsel), symbol->name);
+ printf(" Events Pcnt (>=%d%%)\n", top->sym_pcnt_filter);
- more = symbol__annotate_printf(symbol, he->ms.map, top.sym_evsel->idx,
- 0, sym_pcnt_filter, top.print_entries, 4);
- if (top.zero)
- symbol__annotate_zero_histogram(symbol, top.sym_evsel->idx);
+ more = symbol__annotate_printf(symbol, he->ms.map, top->sym_evsel->idx,
+ 0, top->sym_pcnt_filter, top->print_entries, 4);
+ if (top->zero)
+ symbol__annotate_zero_histogram(symbol, top->sym_evsel->idx);
else
- symbol__annotate_decay_histogram(symbol, top.sym_evsel->idx);
+ symbol__annotate_decay_histogram(symbol, top->sym_evsel->idx);
if (more != 0)
printf("%d lines not displayed, maybe increase display entries [e]\n", more);
out_unlock:
@@ -259,11 +225,9 @@ out_unlock:
static const char CONSOLE_CLEAR[] = "";
-static struct hist_entry *
- perf_session__add_hist_entry(struct perf_session *session,
- struct addr_location *al,
- struct perf_sample *sample,
- struct perf_evsel *evsel)
+static struct hist_entry *perf_evsel__add_hist_entry(struct perf_evsel *evsel,
+ struct addr_location *al,
+ struct perf_sample *sample)
{
struct hist_entry *he;
@@ -271,50 +235,51 @@ static struct hist_entry *
if (he == NULL)
return NULL;
- session->hists.stats.total_period += sample->period;
+ evsel->hists.stats.total_period += sample->period;
hists__inc_nr_events(&evsel->hists, PERF_RECORD_SAMPLE);
return he;
}
-static void print_sym_table(void)
+static void perf_top__print_sym_table(struct perf_top *top)
{
char bf[160];
int printed = 0;
- const int win_width = winsize.ws_col - 1;
+ const int win_width = top->winsize.ws_col - 1;
puts(CONSOLE_CLEAR);
- perf_top__header_snprintf(&top, bf, sizeof(bf));
+ perf_top__header_snprintf(top, bf, sizeof(bf));
printf("%s\n", bf);
- perf_top__reset_sample_counters(&top);
+ perf_top__reset_sample_counters(top);
printf("%-*.*s\n", win_width, win_width, graph_dotted_line);
- if (top.sym_evsel->hists.stats.nr_lost_warned !=
- top.sym_evsel->hists.stats.nr_events[PERF_RECORD_LOST]) {
- top.sym_evsel->hists.stats.nr_lost_warned =
- top.sym_evsel->hists.stats.nr_events[PERF_RECORD_LOST];
+ if (top->sym_evsel->hists.stats.nr_lost_warned !=
+ top->sym_evsel->hists.stats.nr_events[PERF_RECORD_LOST]) {
+ top->sym_evsel->hists.stats.nr_lost_warned =
+ top->sym_evsel->hists.stats.nr_events[PERF_RECORD_LOST];
color_fprintf(stdout, PERF_COLOR_RED,
"WARNING: LOST %d chunks, Check IO/CPU overload",
- top.sym_evsel->hists.stats.nr_lost_warned);
+ top->sym_evsel->hists.stats.nr_lost_warned);
++printed;
}
- if (top.sym_filter_entry) {
- show_details(top.sym_filter_entry);
+ if (top->sym_filter_entry) {
+ perf_top__show_details(top);
return;
}
- hists__collapse_resort_threaded(&top.sym_evsel->hists);
- hists__output_resort_threaded(&top.sym_evsel->hists);
- hists__decay_entries_threaded(&top.sym_evsel->hists,
- top.hide_user_symbols,
- top.hide_kernel_symbols);
- hists__output_recalc_col_len(&top.sym_evsel->hists, winsize.ws_row - 3);
+ hists__collapse_resort_threaded(&top->sym_evsel->hists);
+ hists__output_resort_threaded(&top->sym_evsel->hists);
+ hists__decay_entries_threaded(&top->sym_evsel->hists,
+ top->hide_user_symbols,
+ top->hide_kernel_symbols);
+ hists__output_recalc_col_len(&top->sym_evsel->hists,
+ top->winsize.ws_row - 3);
putchar('\n');
- hists__fprintf(&top.sym_evsel->hists, NULL, false, false,
- winsize.ws_row - 4 - printed, win_width, stdout);
+ hists__fprintf(&top->sym_evsel->hists, NULL, false, false,
+ top->winsize.ws_row - 4 - printed, win_width, stdout);
}
static void prompt_integer(int *target, const char *msg)
@@ -352,17 +317,17 @@ static void prompt_percent(int *target, const char *msg)
*target = tmp;
}
-static void prompt_symbol(struct hist_entry **target, const char *msg)
+static void perf_top__prompt_symbol(struct perf_top *top, const char *msg)
{
char *buf = malloc(0), *p;
- struct hist_entry *syme = *target, *n, *found = NULL;
+ struct hist_entry *syme = top->sym_filter_entry, *n, *found = NULL;
struct rb_node *next;
size_t dummy = 0;
/* zero counters of active symbol */
if (syme) {
__zero_source_counters(syme);
- *target = NULL;
+ top->sym_filter_entry = NULL;
}
fprintf(stdout, "\n%s: ", msg);
@@ -373,7 +338,7 @@ static void prompt_symbol(struct hist_entry **target, const char *msg)
if (p)
*p = 0;
- next = rb_first(&top.sym_evsel->hists.entries);
+ next = rb_first(&top->sym_evsel->hists.entries);
while (next) {
n = rb_entry(next, struct hist_entry, rb_node);
if (n->ms.sym && !strcmp(buf, n->ms.sym->name)) {
@@ -386,47 +351,46 @@ static void prompt_symbol(struct hist_entry **target, const char *msg)
if (!found) {
fprintf(stderr, "Sorry, %s is not active.\n", buf);
sleep(1);
- return;
} else
- parse_source(found);
+ perf_top__parse_source(top, found);
out_free:
free(buf);
}
-static void print_mapped_keys(void)
+static void perf_top__print_mapped_keys(struct perf_top *top)
{
char *name = NULL;
- if (top.sym_filter_entry) {
- struct symbol *sym = top.sym_filter_entry->ms.sym;
+ if (top->sym_filter_entry) {
+ struct symbol *sym = top->sym_filter_entry->ms.sym;
name = sym->name;
}
fprintf(stdout, "\nMapped keys:\n");
- fprintf(stdout, "\t[d] display refresh delay. \t(%d)\n", top.delay_secs);
- fprintf(stdout, "\t[e] display entries (lines). \t(%d)\n", top.print_entries);
+ fprintf(stdout, "\t[d] display refresh delay. \t(%d)\n", top->delay_secs);
+ fprintf(stdout, "\t[e] display entries (lines). \t(%d)\n", top->print_entries);
- if (top.evlist->nr_entries > 1)
- fprintf(stdout, "\t[E] active event counter. \t(%s)\n", event_name(top.sym_evsel));
+ if (top->evlist->nr_entries > 1)
+ fprintf(stdout, "\t[E] active event counter. \t(%s)\n", event_name(top->sym_evsel));
- fprintf(stdout, "\t[f] profile display filter (count). \t(%d)\n", top.count_filter);
+ fprintf(stdout, "\t[f] profile display filter (count). \t(%d)\n", top->count_filter);
- fprintf(stdout, "\t[F] annotate display filter (percent). \t(%d%%)\n", sym_pcnt_filter);
+ fprintf(stdout, "\t[F] annotate display filter (percent). \t(%d%%)\n", top->sym_pcnt_filter);
fprintf(stdout, "\t[s] annotate symbol. \t(%s)\n", name?: "NULL");
fprintf(stdout, "\t[S] stop annotation.\n");
fprintf(stdout,
"\t[K] hide kernel_symbols symbols. \t(%s)\n",
- top.hide_kernel_symbols ? "yes" : "no");
+ top->hide_kernel_symbols ? "yes" : "no");
fprintf(stdout,
"\t[U] hide user symbols. \t(%s)\n",
- top.hide_user_symbols ? "yes" : "no");
- fprintf(stdout, "\t[z] toggle sample zeroing. \t(%d)\n", top.zero ? 1 : 0);
+ top->hide_user_symbols ? "yes" : "no");
+ fprintf(stdout, "\t[z] toggle sample zeroing. \t(%d)\n", top->zero ? 1 : 0);
fprintf(stdout, "\t[qQ] quit.\n");
}
-static int key_mapped(int c)
+static int perf_top__key_mapped(struct perf_top *top, int c)
{
switch (c) {
case 'd':
@@ -442,7 +406,7 @@ static int key_mapped(int c)
case 'S':
return 1;
case 'E':
- return top.evlist->nr_entries > 1 ? 1 : 0;
+ return top->evlist->nr_entries > 1 ? 1 : 0;
default:
break;
}
@@ -450,13 +414,13 @@ static int key_mapped(int c)
return 0;
}
-static void handle_keypress(int c)
+static void perf_top__handle_keypress(struct perf_top *top, int c)
{
- if (!key_mapped(c)) {
+ if (!perf_top__key_mapped(top, c)) {
struct pollfd stdin_poll = { .fd = 0, .events = POLLIN };
struct termios tc, save;
- print_mapped_keys();
+ perf_top__print_mapped_keys(top);
fprintf(stdout, "\nEnter selection, or unmapped key to continue: ");
fflush(stdout);
@@ -471,81 +435,86 @@ static void handle_keypress(int c)
c = getc(stdin);
tcsetattr(0, TCSAFLUSH, &save);
- if (!key_mapped(c))
+ if (!perf_top__key_mapped(top, c))
return;
}
switch (c) {
case 'd':
- prompt_integer(&top.delay_secs, "Enter display delay");
- if (top.delay_secs < 1)
- top.delay_secs = 1;
+ prompt_integer(&top->delay_secs, "Enter display delay");
+ if (top->delay_secs < 1)
+ top->delay_secs = 1;
break;
case 'e':
- prompt_integer(&top.print_entries, "Enter display entries (lines)");
- if (top.print_entries == 0) {
- sig_winch_handler(SIGWINCH);
- signal(SIGWINCH, sig_winch_handler);
+ prompt_integer(&top->print_entries, "Enter display entries (lines)");
+ if (top->print_entries == 0) {
+ struct sigaction act = {
+ .sa_sigaction = perf_top__sig_winch,
+ .sa_flags = SA_SIGINFO,
+ };
+ perf_top__sig_winch(SIGWINCH, NULL, top);
+ sigaction(SIGWINCH, &act, NULL);
} else
signal(SIGWINCH, SIG_DFL);
break;
case 'E':
- if (top.evlist->nr_entries > 1) {
+ if (top->evlist->nr_entries > 1) {
/* Select 0 as the default event: */
int counter = 0;
fprintf(stderr, "\nAvailable events:");
- list_for_each_entry(top.sym_evsel, &top.evlist->entries, node)
- fprintf(stderr, "\n\t%d %s", top.sym_evsel->idx, event_name(top.sym_evsel));
+ list_for_each_entry(top->sym_evsel, &top->evlist->entries, node)
+ fprintf(stderr, "\n\t%d %s", top->sym_evsel->idx, event_name(top->sym_evsel));
prompt_integer(&counter, "Enter details event counter");
- if (counter >= top.evlist->nr_entries) {
- top.sym_evsel = list_entry(top.evlist->entries.next, struct perf_evsel, node);
- fprintf(stderr, "Sorry, no such event, using %s.\n", event_name(top.sym_evsel));
+ if (counter >= top->evlist->nr_entries) {
+ top->sym_evsel = list_entry(top->evlist->entries.next, struct perf_evsel, node);
+ fprintf(stderr, "Sorry, no such event, using %s.\n", event_name(top->sym_evsel));
sleep(1);
break;
}
- list_for_each_entry(top.sym_evsel, &top.evlist->entries, node)
- if (top.sym_evsel->idx == counter)
+ list_for_each_entry(top->sym_evsel, &top->evlist->entries, node)
+ if (top->sym_evsel->idx == counter)
break;
} else
- top.sym_evsel = list_entry(top.evlist->entries.next, struct perf_evsel, node);
+ top->sym_evsel = list_entry(top->evlist->entries.next, struct perf_evsel, node);
break;
case 'f':
- prompt_integer(&top.count_filter, "Enter display event count filter");
+ prompt_integer(&top->count_filter, "Enter display event count filter");
break;
case 'F':
- prompt_percent(&sym_pcnt_filter, "Enter details display event filter (percent)");
+ prompt_percent(&top->sym_pcnt_filter,
+ "Enter details display event filter (percent)");
break;
case 'K':
- top.hide_kernel_symbols = !top.hide_kernel_symbols;
+ top->hide_kernel_symbols = !top->hide_kernel_symbols;
break;
case 'q':
case 'Q':
printf("exiting.\n");
- if (dump_symtab)
- perf_session__fprintf_dsos(top.session, stderr);
+ if (top->dump_symtab)
+ perf_session__fprintf_dsos(top->session, stderr);
exit(0);
case 's':
- prompt_symbol(&top.sym_filter_entry, "Enter details symbol");
+ perf_top__prompt_symbol(top, "Enter details symbol");
break;
case 'S':
- if (!top.sym_filter_entry)
+ if (!top->sym_filter_entry)
break;
else {
- struct hist_entry *syme = top.sym_filter_entry;
+ struct hist_entry *syme = top->sym_filter_entry;
- top.sym_filter_entry = NULL;
+ top->sym_filter_entry = NULL;
__zero_source_counters(syme);
}
break;
case 'U':
- top.hide_user_symbols = !top.hide_user_symbols;
+ top->hide_user_symbols = !top->hide_user_symbols;
break;
case 'z':
- top.zero = !top.zero;
+ top->zero = !top->zero;
break;
default:
break;
@@ -563,28 +532,30 @@ static void perf_top__sort_new_samples(void *arg)
hists__collapse_resort_threaded(&t->sym_evsel->hists);
hists__output_resort_threaded(&t->sym_evsel->hists);
hists__decay_entries_threaded(&t->sym_evsel->hists,
- top.hide_user_symbols,
- top.hide_kernel_symbols);
+ t->hide_user_symbols,
+ t->hide_kernel_symbols);
}
-static void *display_thread_tui(void *arg __used)
+static void *display_thread_tui(void *arg)
{
+ struct perf_top *top = arg;
const char *help = "For a higher level overview, try: perf top --sort comm,dso";
- perf_top__sort_new_samples(&top);
- perf_evlist__tui_browse_hists(top.evlist, help,
+ perf_top__sort_new_samples(top);
+ perf_evlist__tui_browse_hists(top->evlist, help,
perf_top__sort_new_samples,
- &top, top.delay_secs);
+ top, top->delay_secs);
exit_browser(0);
exit(0);
return NULL;
}
-static void *display_thread(void *arg __used)
+static void *display_thread(void *arg)
{
struct pollfd stdin_poll = { .fd = 0, .events = POLLIN };
struct termios tc, save;
+ struct perf_top *top = arg;
int delay_msecs, c;
tcgetattr(0, &save);
@@ -595,13 +566,13 @@ static void *display_thread(void *arg __used)
pthread__unblock_sigwinch();
repeat:
- delay_msecs = top.delay_secs * 1000;
+ delay_msecs = top->delay_secs * 1000;
tcsetattr(0, TCSANOW, &tc);
/* trash return*/
getc(stdin);
while (1) {
- print_sym_table();
+ perf_top__print_sym_table(top);
/*
* Either timeout expired or we got an EINTR due to SIGWINCH,
* refresh screen in both cases.
@@ -621,7 +592,7 @@ process_hotkey:
c = getc(stdin);
tcsetattr(0, TCSAFLUSH, &save);
- handle_keypress(c);
+ perf_top__handle_keypress(top, c);
goto repeat;
return NULL;
@@ -673,47 +644,17 @@ static int symbol_filter(struct map *map __used, struct symbol *sym)
return 0;
}
-static void perf_event__process_sample(const union perf_event *event,
+static void perf_event__process_sample(struct perf_tool *tool,
+ const union perf_event *event,
struct perf_evsel *evsel,
struct perf_sample *sample,
- struct perf_session *session)
+ struct machine *machine)
{
+ struct perf_top *top = container_of(tool, struct perf_top, tool);
struct symbol *parent = NULL;
u64 ip = event->ip.ip;
struct addr_location al;
- struct machine *machine;
int err;
- u8 origin = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
-
- ++top.samples;
-
- switch (origin) {
- case PERF_RECORD_MISC_USER:
- ++top.us_samples;
- if (top.hide_user_symbols)
- return;
- machine = perf_session__find_host_machine(session);
- break;
- case PERF_RECORD_MISC_KERNEL:
- ++top.kernel_samples;
- if (top.hide_kernel_symbols)
- return;
- machine = perf_session__find_host_machine(session);
- break;
- case PERF_RECORD_MISC_GUEST_KERNEL:
- ++top.guest_kernel_samples;
- machine = perf_session__find_machine(session, event->ip.pid);
- break;
- case PERF_RECORD_MISC_GUEST_USER:
- ++top.guest_us_samples;
- /*
- * TODO: we don't process guest user from host side
- * except simple counting.
- */
- return;
- default:
- return;
- }
if (!machine && perf_guest) {
pr_err("Can't find guest [%d]'s kernel information\n",
@@ -722,14 +663,14 @@ static void perf_event__process_sample(const union perf_event *event,
}
if (event->header.misc & PERF_RECORD_MISC_EXACT_IP)
- top.exact_samples++;
+ top->exact_samples++;
- if (perf_event__preprocess_sample(event, session, &al, sample,
+ if (perf_event__preprocess_sample(event, machine, &al, sample,
symbol_filter) < 0 ||
al.filtered)
return;
- if (!kptr_restrict_warned &&
+ if (!top->kptr_restrict_warned &&
symbol_conf.kptr_restrict &&
al.cpumode == PERF_RECORD_MISC_KERNEL) {
ui__warning(
@@ -740,7 +681,7 @@ static void perf_event__process_sample(const union perf_event *event,
" modules" : "");
if (use_browser <= 0)
sleep(5);
- kptr_restrict_warned = true;
+ top->kptr_restrict_warned = true;
}
if (al.sym == NULL) {
@@ -756,7 +697,7 @@ static void perf_event__process_sample(const union perf_event *event,
* --hide-kernel-symbols, even if the user specifies an
* invalid --vmlinux ;-)
*/
- if (!kptr_restrict_warned && !vmlinux_warned &&
+ if (!top->kptr_restrict_warned && !top->vmlinux_warned &&
al.map == machine->vmlinux_maps[MAP__FUNCTION] &&
RB_EMPTY_ROOT(&al.map->dso->symbols[MAP__FUNCTION])) {
if (symbol_conf.vmlinux_name) {
@@ -769,7 +710,7 @@ static void perf_event__process_sample(const union perf_event *event,
if (use_browser <= 0)
sleep(5);
- vmlinux_warned = true;
+ top->vmlinux_warned = true;
}
}
@@ -778,70 +719,109 @@ static void perf_event__process_sample(const union perf_event *event,
if ((sort__has_parent || symbol_conf.use_callchain) &&
sample->callchain) {
- err = perf_session__resolve_callchain(session, al.thread,
- sample->callchain, &parent);
+ err = machine__resolve_callchain(machine, evsel, al.thread,
+ sample->callchain, &parent);
if (err)
return;
}
- he = perf_session__add_hist_entry(session, &al, sample, evsel);
+ he = perf_evsel__add_hist_entry(evsel, &al, sample);
if (he == NULL) {
pr_err("Problem incrementing symbol period, skipping event\n");
return;
}
if (symbol_conf.use_callchain) {
- err = callchain_append(he->callchain, &session->callchain_cursor,
+ err = callchain_append(he->callchain, &evsel->hists.callchain_cursor,
sample->period);
if (err)
return;
}
- if (sort_has_symbols)
- record_precise_ip(he, evsel->idx, ip);
+ if (top->sort_has_symbols)
+ perf_top__record_precise_ip(top, he, evsel->idx, ip);
}
return;
}
-static void perf_session__mmap_read_idx(struct perf_session *self, int idx)
+static void perf_top__mmap_read_idx(struct perf_top *top, int idx)
{
struct perf_sample sample;
struct perf_evsel *evsel;
+ struct perf_session *session = top->session;
union perf_event *event;
+ struct machine *machine;
+ u8 origin;
int ret;
- while ((event = perf_evlist__mmap_read(top.evlist, idx)) != NULL) {
- ret = perf_session__parse_sample(self, event, &sample);
+ while ((event = perf_evlist__mmap_read(top->evlist, idx)) != NULL) {
+ ret = perf_session__parse_sample(session, event, &sample);
if (ret) {
pr_err("Can't parse sample, err = %d\n", ret);
continue;
}
- evsel = perf_evlist__id2evsel(self->evlist, sample.id);
+ evsel = perf_evlist__id2evsel(session->evlist, sample.id);
assert(evsel != NULL);
+ origin = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
+
if (event->header.type == PERF_RECORD_SAMPLE)
- perf_event__process_sample(event, evsel, &sample, self);
- else if (event->header.type < PERF_RECORD_MAX) {
+ ++top->samples;
+
+ switch (origin) {
+ case PERF_RECORD_MISC_USER:
+ ++top->us_samples;
+ if (top->hide_user_symbols)
+ continue;
+ machine = perf_session__find_host_machine(session);
+ break;
+ case PERF_RECORD_MISC_KERNEL:
+ ++top->kernel_samples;
+ if (top->hide_kernel_symbols)
+ continue;
+ machine = perf_session__find_host_machine(session);
+ break;
+ case PERF_RECORD_MISC_GUEST_KERNEL:
+ ++top->guest_kernel_samples;
+ machine = perf_session__find_machine(session, event->ip.pid);
+ break;
+ case PERF_RECORD_MISC_GUEST_USER:
+ ++top->guest_us_samples;
+ /*
+ * TODO: we don't process guest user from host side
+ * except simple counting.
+ */
+ /* Fall thru */
+ default:
+ continue;
+ }
+
+
+ if (event->header.type == PERF_RECORD_SAMPLE) {
+ perf_event__process_sample(&top->tool, event, evsel,
+ &sample, machine);
+ } else if (event->header.type < PERF_RECORD_MAX) {
hists__inc_nr_events(&evsel->hists, event->header.type);
- perf_event__process(event, &sample, self);
+ perf_event__process(&top->tool, event, &sample, machine);
} else
- ++self->hists.stats.nr_unknown_events;
+ ++session->hists.stats.nr_unknown_events;
}
}
-static void perf_session__mmap_read(struct perf_session *self)
+static void perf_top__mmap_read(struct perf_top *top)
{
int i;
- for (i = 0; i < top.evlist->nr_mmaps; i++)
- perf_session__mmap_read_idx(self, i);
+ for (i = 0; i < top->evlist->nr_mmaps; i++)
+ perf_top__mmap_read_idx(top, i);
}
-static void start_counters(struct perf_evlist *evlist)
+static void perf_top__start_counters(struct perf_top *top)
{
struct perf_evsel *counter, *first;
+ struct perf_evlist *evlist = top->evlist;
first = list_entry(evlist->entries.next, struct perf_evsel, node);
@@ -849,15 +829,15 @@ static void start_counters(struct perf_evlist *evlist)
struct perf_event_attr *attr = &counter->attr;
struct xyarray *group_fd = NULL;
- if (group && counter != first)
+ if (top->group && counter != first)
group_fd = first->fd;
attr->sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TID;
- if (top.freq) {
+ if (top->freq) {
attr->sample_type |= PERF_SAMPLE_PERIOD;
attr->freq = 1;
- attr->sample_freq = top.freq;
+ attr->sample_freq = top->freq;
}
if (evlist->nr_entries > 1) {
@@ -870,23 +850,23 @@ static void start_counters(struct perf_evlist *evlist)
attr->mmap = 1;
attr->comm = 1;
- attr->inherit = inherit;
+ attr->inherit = top->inherit;
retry_sample_id:
- attr->sample_id_all = sample_id_all_avail ? 1 : 0;
+ attr->sample_id_all = top->sample_id_all_avail ? 1 : 0;
try_again:
- if (perf_evsel__open(counter, top.evlist->cpus,
- top.evlist->threads, group,
+ if (perf_evsel__open(counter, top->evlist->cpus,
+ top->evlist->threads, top->group,
group_fd) < 0) {
int err = errno;
if (err == EPERM || err == EACCES) {
ui__error_paranoid();
goto out_err;
- } else if (err == EINVAL && sample_id_all_avail) {
+ } else if (err == EINVAL && top->sample_id_all_avail) {
/*
* Old kernel, no attr->sample_id_type_all field
*/
- sample_id_all_avail = false;
+ top->sample_id_all_avail = false;
goto retry_sample_id;
}
/*
@@ -920,7 +900,7 @@ try_again:
}
}
- if (perf_evlist__mmap(evlist, mmap_pages, false) < 0) {
+ if (perf_evlist__mmap(evlist, top->mmap_pages, false) < 0) {
ui__warning("Failed to mmap with %d (%s)\n",
errno, strerror(errno));
goto out_err;
@@ -933,14 +913,14 @@ out_err:
exit(0);
}
-static int setup_sample_type(void)
+static int perf_top__setup_sample_type(struct perf_top *top)
{
- if (!sort_has_symbols) {
+ if (!top->sort_has_symbols) {
if (symbol_conf.use_callchain) {
ui__warning("Selected -g but \"sym\" not present in --sort/-s.");
return -EINVAL;
}
- } else if (!dont_use_callchains && callchain_param.mode != CHAIN_NONE) {
+ } else if (!top->dont_use_callchains && callchain_param.mode != CHAIN_NONE) {
if (callchain_register_param(&callchain_param) < 0) {
ui__warning("Can't register callchain params.\n");
return -EINVAL;
@@ -950,7 +930,7 @@ static int setup_sample_type(void)
return 0;
}
-static int __cmd_top(void)
+static int __cmd_top(struct perf_top *top)
{
pthread_t thread;
int ret;
@@ -958,39 +938,40 @@ static int __cmd_top(void)
* FIXME: perf_session__new should allow passing a O_MMAP, so that all this
* mmap reading, etc is encapsulated in it. Use O_WRONLY for now.
*/
- top.session = perf_session__new(NULL, O_WRONLY, false, false, NULL);
- if (top.session == NULL)
+ top->session = perf_session__new(NULL, O_WRONLY, false, false, NULL);
+ if (top->session == NULL)
return -ENOMEM;
- ret = setup_sample_type();
+ ret = perf_top__setup_sample_type(top);
if (ret)
goto out_delete;
- if (top.target_tid != -1)
- perf_event__synthesize_thread_map(top.evlist->threads,
- perf_event__process, top.session);
+ if (top->target_tid != -1)
+ perf_event__synthesize_thread_map(&top->tool, top->evlist->threads,
+ perf_event__process,
+ &top->session->host_machine);
else
- perf_event__synthesize_threads(perf_event__process, top.session);
-
- start_counters(top.evlist);
- top.session->evlist = top.evlist;
- perf_session__update_sample_type(top.session);
+ perf_event__synthesize_threads(&top->tool, perf_event__process,
+ &top->session->host_machine);
+ perf_top__start_counters(top);
+ top->session->evlist = top->evlist;
+ perf_session__update_sample_type(top->session);
/* Wait for a minimal set of events before starting the snapshot */
- poll(top.evlist->pollfd, top.evlist->nr_fds, 100);
+ poll(top->evlist->pollfd, top->evlist->nr_fds, 100);
- perf_session__mmap_read(top.session);
+ perf_top__mmap_read(top);
if (pthread_create(&thread, NULL, (use_browser > 0 ? display_thread_tui :
- display_thread), NULL)) {
+ display_thread), top)) {
printf("Could not create display thread.\n");
exit(-1);
}
- if (realtime_prio) {
+ if (top->realtime_prio) {
struct sched_param param;
- param.sched_priority = realtime_prio;
+ param.sched_priority = top->realtime_prio;
if (sched_setscheduler(0, SCHED_FIFO, &param)) {
printf("Could not set realtime priority.\n");
exit(-1);
@@ -998,25 +979,25 @@ static int __cmd_top(void)
}
while (1) {
- u64 hits = top.samples;
+ u64 hits = top->samples;
- perf_session__mmap_read(top.session);
+ perf_top__mmap_read(top);
- if (hits == top.samples)
- ret = poll(top.evlist->pollfd, top.evlist->nr_fds, 100);
+ if (hits == top->samples)
+ ret = poll(top->evlist->pollfd, top->evlist->nr_fds, 100);
}
out_delete:
- perf_session__delete(top.session);
- top.session = NULL;
+ perf_session__delete(top->session);
+ top->session = NULL;
return 0;
}
static int
-parse_callchain_opt(const struct option *opt __used, const char *arg,
- int unset)
+parse_callchain_opt(const struct option *opt, const char *arg, int unset)
{
+ struct perf_top *top = (struct perf_top *)opt->value;
char *tok, *tok2;
char *endptr;
@@ -1024,7 +1005,7 @@ parse_callchain_opt(const struct option *opt __used, const char *arg,
* --no-call-graph
*/
if (unset) {
- dont_use_callchains = true;
+ top->dont_use_callchains = true;
return 0;
}
@@ -1052,9 +1033,7 @@ parse_callchain_opt(const struct option *opt __used, const char *arg,
symbol_conf.use_callchain = false;
return 0;
- }
-
- else
+ } else
return -1;
/* get the min percentage */
@@ -1098,17 +1077,32 @@ static const char * const top_usage[] = {
NULL
};
-static const struct option options[] = {
+int cmd_top(int argc, const char **argv, const char *prefix __used)
+{
+ struct perf_evsel *pos;
+ int status = -ENOMEM;
+ struct perf_top top = {
+ .count_filter = 5,
+ .delay_secs = 2,
+ .target_pid = -1,
+ .target_tid = -1,
+ .freq = 1000, /* 1 KHz */
+ .sample_id_all_avail = true,
+ .mmap_pages = 128,
+ .sym_pcnt_filter = 5,
+ };
+ char callchain_default_opt[] = "fractal,0.5,callee";
+ const struct option options[] = {
OPT_CALLBACK('e', "event", &top.evlist, "event",
"event selector. use 'perf list' to list available events",
parse_events_option),
- OPT_INTEGER('c', "count", &default_interval,
+ OPT_INTEGER('c', "count", &top.default_interval,
"event period to sample"),
OPT_INTEGER('p', "pid", &top.target_pid,
"profile events on existing process id"),
OPT_INTEGER('t', "tid", &top.target_tid,
"profile events on existing thread id"),
- OPT_BOOLEAN('a', "all-cpus", &system_wide,
+ OPT_BOOLEAN('a', "all-cpus", &top.system_wide,
"system-wide collection from all CPUs"),
OPT_STRING('C', "cpu", &top.cpu_list, "cpu",
"list of cpus to monitor"),
@@ -1116,20 +1110,20 @@ static const struct option options[] = {
"file", "vmlinux pathname"),
OPT_BOOLEAN('K', "hide_kernel_symbols", &top.hide_kernel_symbols,
"hide kernel symbols"),
- OPT_UINTEGER('m', "mmap-pages", &mmap_pages, "number of mmap data pages"),
- OPT_INTEGER('r', "realtime", &realtime_prio,
+ OPT_UINTEGER('m', "mmap-pages", &top.mmap_pages, "number of mmap data pages"),
+ OPT_INTEGER('r', "realtime", &top.realtime_prio,
"collect data with this RT SCHED_FIFO priority"),
OPT_INTEGER('d', "delay", &top.delay_secs,
"number of seconds to delay between refreshes"),
- OPT_BOOLEAN('D', "dump-symtab", &dump_symtab,
+ OPT_BOOLEAN('D', "dump-symtab", &top.dump_symtab,
"dump the symbol table used for profiling"),
OPT_INTEGER('f', "count-filter", &top.count_filter,
"only display functions with more events than this"),
- OPT_BOOLEAN('g', "group", &group,
+ OPT_BOOLEAN('g', "group", &top.group,
"put the counters into a counter group"),
- OPT_BOOLEAN('i', "inherit", &inherit,
+ OPT_BOOLEAN('i', "inherit", &top.inherit,
"child tasks inherit counters"),
- OPT_STRING(0, "sym-annotate", &sym_filter, "symbol name",
+ OPT_STRING(0, "sym-annotate", &top.sym_filter, "symbol name",
"symbol to annotate"),
OPT_BOOLEAN('z', "zero", &top.zero,
"zero history across updates"),
@@ -1139,15 +1133,15 @@ static const struct option options[] = {
"display this many functions"),
OPT_BOOLEAN('U', "hide_user_symbols", &top.hide_user_symbols,
"hide user symbols"),
- OPT_BOOLEAN(0, "tui", &use_tui, "Use the TUI interface"),
- OPT_BOOLEAN(0, "stdio", &use_stdio, "Use the stdio interface"),
+ OPT_BOOLEAN(0, "tui", &top.use_tui, "Use the TUI interface"),
+ OPT_BOOLEAN(0, "stdio", &top.use_stdio, "Use the stdio interface"),
OPT_INCR('v', "verbose", &verbose,
"be more verbose (show counter open errors, etc)"),
OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
"sort by key(s): pid, comm, dso, symbol, parent"),
OPT_BOOLEAN('n', "show-nr-samples", &symbol_conf.show_nr_samples,
"Show a column with the number of samples"),
- OPT_CALLBACK_DEFAULT('G', "call-graph", NULL, "output_type,min_percent, call_order",
+ OPT_CALLBACK_DEFAULT('G', "call-graph", &top, "output_type,min_percent, call_order",
"Display callchains using output_type (graph, flat, fractal, or none), min percent threshold and callchain order. "
"Default: fractal,0.5,callee", &parse_callchain_opt,
callchain_default_opt),
@@ -1166,12 +1160,7 @@ static const struct option options[] = {
OPT_STRING('M', "disassembler-style", &disassembler_style, "disassembler style",
"Specify disassembler style (e.g. -M intel for intel syntax)"),
OPT_END()
-};
-
-int cmd_top(int argc, const char **argv, const char *prefix __used)
-{
- struct perf_evsel *pos;
- int status = -ENOMEM;
+ };
top.evlist = perf_evlist__new(NULL, NULL);
if (top.evlist == NULL)
@@ -1188,9 +1177,9 @@ int cmd_top(int argc, const char **argv, const char *prefix __used)
setup_sorting(top_usage, options);
- if (use_stdio)
+ if (top.use_stdio)
use_browser = 0;
- else if (use_tui)
+ else if (top.use_tui)
use_browser = 1;
setup_browser(false);
@@ -1215,38 +1204,31 @@ int cmd_top(int argc, const char **argv, const char *prefix __used)
return -ENOMEM;
}
+ symbol_conf.nr_events = top.evlist->nr_entries;
+
if (top.delay_secs < 1)
top.delay_secs = 1;
/*
* User specified count overrides default frequency.
*/
- if (default_interval)
+ if (top.default_interval)
top.freq = 0;
else if (top.freq) {
- default_interval = top.freq;
+ top.default_interval = top.freq;
} else {
fprintf(stderr, "frequency and count are zero, aborting\n");
exit(EXIT_FAILURE);
}
list_for_each_entry(pos, &top.evlist->entries, node) {
- if (perf_evsel__alloc_fd(pos, top.evlist->cpus->nr,
- top.evlist->threads->nr) < 0)
- goto out_free_fd;
/*
* Fill in the ones not specifically initialized via -c:
*/
- if (pos->attr.sample_period)
- continue;
-
- pos->attr.sample_period = default_interval;
+ if (!pos->attr.sample_period)
+ pos->attr.sample_period = top.default_interval;
}
- if (perf_evlist__alloc_pollfd(top.evlist) < 0 ||
- perf_evlist__alloc_mmap(top.evlist) < 0)
- goto out_free_fd;
-
top.sym_evsel = list_entry(top.evlist->entries.next, struct perf_evsel, node);
symbol_conf.priv_size = sizeof(struct annotation);
@@ -1263,16 +1245,20 @@ int cmd_top(int argc, const char **argv, const char *prefix __used)
* Avoid annotation data structures overhead when symbols aren't on the
* sort list.
*/
- sort_has_symbols = sort_sym.list.next != NULL;
+ top.sort_has_symbols = sort_sym.list.next != NULL;
- get_term_dimensions(&winsize);
+ get_term_dimensions(&top.winsize);
if (top.print_entries == 0) {
- update_print_entries(&winsize);
- signal(SIGWINCH, sig_winch_handler);
+ struct sigaction act = {
+ .sa_sigaction = perf_top__sig_winch,
+ .sa_flags = SA_SIGINFO,
+ };
+ perf_top__update_print_entries(&top);
+ sigaction(SIGWINCH, &act, NULL);
}
- status = __cmd_top();
-out_free_fd:
+ status = __cmd_top(&top);
+
perf_evlist__delete(top.evlist);
return status;
diff --git a/tools/perf/perf.c b/tools/perf/perf.c
index 73d0cac8b67e..2b2e225a4d4c 100644
--- a/tools/perf/perf.c
+++ b/tools/perf/perf.c
@@ -29,8 +29,6 @@ struct pager_config {
int val;
};
-static char debugfs_mntpt[MAXPATHLEN];
-
static int pager_command_config(const char *var, const char *value, void *data)
{
struct pager_config *c = data;
@@ -81,15 +79,6 @@ static void commit_pager_choice(void)
}
}
-static void set_debugfs_path(void)
-{
- char *path;
-
- path = getenv(PERF_DEBUGFS_ENVIRONMENT);
- snprintf(debugfs_path, MAXPATHLEN, "%s/%s", path ?: debugfs_mntpt,
- "tracing/events");
-}
-
static int handle_options(const char ***argv, int *argc, int *envchanged)
{
int handled = 0;
@@ -161,15 +150,14 @@ static int handle_options(const char ***argv, int *argc, int *envchanged)
fprintf(stderr, "No directory given for --debugfs-dir.\n");
usage(perf_usage_string);
}
- strncpy(debugfs_mntpt, (*argv)[1], MAXPATHLEN);
- debugfs_mntpt[MAXPATHLEN - 1] = '\0';
+ debugfs_set_path((*argv)[1]);
if (envchanged)
*envchanged = 1;
(*argv)++;
(*argc)--;
} else if (!prefixcmp(cmd, CMD_DEBUGFS_DIR)) {
- strncpy(debugfs_mntpt, cmd + strlen(CMD_DEBUGFS_DIR), MAXPATHLEN);
- debugfs_mntpt[MAXPATHLEN - 1] = '\0';
+ debugfs_set_path(cmd + strlen(CMD_DEBUGFS_DIR));
+ fprintf(stderr, "dir: %s\n", debugfs_mountpoint);
if (envchanged)
*envchanged = 1;
} else {
@@ -281,7 +269,6 @@ static int run_builtin(struct cmd_struct *p, int argc, const char **argv)
if (use_pager == -1 && p->option & USE_PAGER)
use_pager = 1;
commit_pager_choice();
- set_debugfs_path();
status = p->fn(argc, argv, prefix);
exit_browser(status);
@@ -416,17 +403,6 @@ static int run_argv(int *argcp, const char ***argv)
return done_alias;
}
-/* mini /proc/mounts parser: searching for "^blah /mount/point debugfs" */
-static void get_debugfs_mntpt(void)
-{
- const char *path = debugfs_mount(NULL);
-
- if (path)
- strncpy(debugfs_mntpt, path, sizeof(debugfs_mntpt));
- else
- debugfs_mntpt[0] = '\0';
-}
-
static void pthread__block_sigwinch(void)
{
sigset_t set;
@@ -453,7 +429,7 @@ int main(int argc, const char **argv)
if (!cmd)
cmd = "perf-help";
/* get debugfs mount point from /proc/mounts */
- get_debugfs_mntpt();
+ debugfs_mount(NULL);
/*
* "perf-xxxx" is the same as "perf xxxx", but we obviously:
*
@@ -476,7 +452,6 @@ int main(int argc, const char **argv)
argc--;
handle_options(&argv, &argc, NULL);
commit_pager_choice();
- set_debugfs_path();
set_buildid_dir();
if (argc > 0) {
diff --git a/tools/perf/perf.h b/tools/perf/perf.h
index 914c895510f7..64f8bee31ced 100644
--- a/tools/perf/perf.h
+++ b/tools/perf/perf.h
@@ -185,4 +185,28 @@ extern const char perf_version_string[];
void pthread__unblock_sigwinch(void);
+struct perf_record_opts {
+ pid_t target_pid;
+ pid_t target_tid;
+ bool call_graph;
+ bool group;
+ bool inherit_stat;
+ bool no_delay;
+ bool no_inherit;
+ bool no_samples;
+ bool pipe_output;
+ bool raw_samples;
+ bool sample_address;
+ bool sample_time;
+ bool sample_id_all_avail;
+ bool system_wide;
+ bool period;
+ unsigned int freq;
+ unsigned int mmap_pages;
+ unsigned int user_freq;
+ u64 default_interval;
+ u64 user_interval;
+ const char *cpu_list;
+};
+
#endif
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index 119e996035c8..011ed2676604 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -25,17 +25,17 @@ int symbol__annotate_init(struct map *map __used, struct symbol *sym)
return 0;
}
-int symbol__alloc_hist(struct symbol *sym, int nevents)
+int symbol__alloc_hist(struct symbol *sym)
{
struct annotation *notes = symbol__annotation(sym);
size_t sizeof_sym_hist = (sizeof(struct sym_hist) +
(sym->end - sym->start) * sizeof(u64));
- notes->src = zalloc(sizeof(*notes->src) + nevents * sizeof_sym_hist);
+ notes->src = zalloc(sizeof(*notes->src) + symbol_conf.nr_events * sizeof_sym_hist);
if (notes->src == NULL)
return -1;
notes->src->sizeof_sym_hist = sizeof_sym_hist;
- notes->src->nr_histograms = nevents;
+ notes->src->nr_histograms = symbol_conf.nr_events;
INIT_LIST_HEAD(&notes->src->source);
return 0;
}
@@ -334,7 +334,7 @@ fallback:
disassembler_style ? "-M " : "",
disassembler_style ? disassembler_style : "",
map__rip_2objdump(map, sym->start),
- map__rip_2objdump(map, sym->end),
+ map__rip_2objdump(map, sym->end+1),
symbol_conf.annotate_asm_raw ? "" : "--no-show-raw",
symbol_conf.annotate_src ? "-S" : "",
symfs_filename, filename);
diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h
index d9072523d342..efa5dc82bfae 100644
--- a/tools/perf/util/annotate.h
+++ b/tools/perf/util/annotate.h
@@ -72,7 +72,7 @@ static inline struct annotation *symbol__annotation(struct symbol *sym)
int symbol__inc_addr_samples(struct symbol *sym, struct map *map,
int evidx, u64 addr);
-int symbol__alloc_hist(struct symbol *sym, int nevents);
+int symbol__alloc_hist(struct symbol *sym);
void symbol__annotate_zero_histograms(struct symbol *sym);
int symbol__annotate(struct symbol *sym, struct map *map, size_t privsize);
@@ -99,8 +99,7 @@ static inline int symbol__tui_annotate(struct symbol *sym __used,
}
#else
int symbol__tui_annotate(struct symbol *sym, struct map *map, int evidx,
- int nr_events, void(*timer)(void *arg), void *arg,
- int delay_secs);
+ void(*timer)(void *arg), void *arg, int delay_secs);
#endif
extern const char *disassembler_style;
diff --git a/tools/perf/util/build-id.c b/tools/perf/util/build-id.c
index a91cd99f26ea..dff9c7a725f4 100644
--- a/tools/perf/util/build-id.c
+++ b/tools/perf/util/build-id.c
@@ -13,15 +13,18 @@
#include "symbol.h"
#include <linux/kernel.h>
#include "debug.h"
+#include "session.h"
+#include "tool.h"
-static int build_id__mark_dso_hit(union perf_event *event,
+static int build_id__mark_dso_hit(struct perf_tool *tool __used,
+ union perf_event *event,
struct perf_sample *sample __used,
struct perf_evsel *evsel __used,
- struct perf_session *session)
+ struct machine *machine)
{
struct addr_location al;
u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
- struct thread *thread = perf_session__findnew(session, event->ip.pid);
+ struct thread *thread = machine__findnew_thread(machine, event->ip.pid);
if (thread == NULL) {
pr_err("problem processing %d event, skipping it.\n",
@@ -29,8 +32,8 @@ static int build_id__mark_dso_hit(union perf_event *event,
return -1;
}
- thread__find_addr_map(thread, session, cpumode, MAP__FUNCTION,
- event->ip.pid, event->ip.ip, &al);
+ thread__find_addr_map(thread, machine, cpumode, MAP__FUNCTION,
+ event->ip.ip, &al);
if (al.map != NULL)
al.map->dso->hit = 1;
@@ -38,25 +41,26 @@ static int build_id__mark_dso_hit(union perf_event *event,
return 0;
}
-static int perf_event__exit_del_thread(union perf_event *event,
+static int perf_event__exit_del_thread(struct perf_tool *tool __used,
+ union perf_event *event,
struct perf_sample *sample __used,
- struct perf_session *session)
+ struct machine *machine)
{
- struct thread *thread = perf_session__findnew(session, event->fork.tid);
+ struct thread *thread = machine__findnew_thread(machine, event->fork.tid);
dump_printf("(%d:%d):(%d:%d)\n", event->fork.pid, event->fork.tid,
event->fork.ppid, event->fork.ptid);
if (thread) {
- rb_erase(&thread->rb_node, &session->threads);
- session->last_match = NULL;
+ rb_erase(&thread->rb_node, &machine->threads);
+ machine->last_match = NULL;
thread__delete(thread);
}
return 0;
}
-struct perf_event_ops build_id__mark_dso_hit_ops = {
+struct perf_tool build_id__mark_dso_hit_ops = {
.sample = build_id__mark_dso_hit,
.mmap = perf_event__process_mmap,
.fork = perf_event__process_task,
diff --git a/tools/perf/util/build-id.h b/tools/perf/util/build-id.h
index 5dafb00eaa06..a993ba87d996 100644
--- a/tools/perf/util/build-id.h
+++ b/tools/perf/util/build-id.h
@@ -3,7 +3,7 @@
#include "session.h"
-extern struct perf_event_ops build_id__mark_dso_hit_ops;
+extern struct perf_tool build_id__mark_dso_hit_ops;
char *dso__build_id_filename(struct dso *self, char *bf, size_t size);
diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h
index 9b4ff16cac96..7f9c0f1ae3a9 100644
--- a/tools/perf/util/callchain.h
+++ b/tools/perf/util/callchain.h
@@ -101,6 +101,9 @@ int callchain_append(struct callchain_root *root,
int callchain_merge(struct callchain_cursor *cursor,
struct callchain_root *dst, struct callchain_root *src);
+struct ip_callchain;
+union perf_event;
+
bool ip_callchain__valid(struct ip_callchain *chain,
const union perf_event *event);
/*
diff --git a/tools/perf/util/cgroup.c b/tools/perf/util/cgroup.c
index 96bee5c46008..dbe2f16b1a1a 100644
--- a/tools/perf/util/cgroup.c
+++ b/tools/perf/util/cgroup.c
@@ -3,7 +3,6 @@
#include "parse-options.h"
#include "evsel.h"
#include "cgroup.h"
-#include "debugfs.h" /* MAX_PATH, STR() */
#include "evlist.h"
int nr_cgroups;
@@ -12,7 +11,7 @@ static int
cgroupfs_find_mountpoint(char *buf, size_t maxlen)
{
FILE *fp;
- char mountpoint[MAX_PATH+1], tokens[MAX_PATH+1], type[MAX_PATH+1];
+ char mountpoint[PATH_MAX + 1], tokens[PATH_MAX + 1], type[PATH_MAX + 1];
char *token, *saved_ptr = NULL;
int found = 0;
@@ -25,8 +24,8 @@ cgroupfs_find_mountpoint(char *buf, size_t maxlen)
* and inspect every cgroupfs mount point to find one that has
* perf_event subsystem
*/
- while (fscanf(fp, "%*s %"STR(MAX_PATH)"s %"STR(MAX_PATH)"s %"
- STR(MAX_PATH)"s %*d %*d\n",
+ while (fscanf(fp, "%*s %"STR(PATH_MAX)"s %"STR(PATH_MAX)"s %"
+ STR(PATH_MAX)"s %*d %*d\n",
mountpoint, type, tokens) == 3) {
if (!strcmp(type, "cgroup")) {
@@ -57,15 +56,15 @@ cgroupfs_find_mountpoint(char *buf, size_t maxlen)
static int open_cgroup(char *name)
{
- char path[MAX_PATH+1];
- char mnt[MAX_PATH+1];
+ char path[PATH_MAX + 1];
+ char mnt[PATH_MAX + 1];
int fd;
- if (cgroupfs_find_mountpoint(mnt, MAX_PATH+1))
+ if (cgroupfs_find_mountpoint(mnt, PATH_MAX + 1))
return -1;
- snprintf(path, MAX_PATH, "%s/%s", mnt, name);
+ snprintf(path, PATH_MAX, "%s/%s", mnt, name);
fd = open(path, O_RDONLY);
if (fd == -1)
diff --git a/tools/perf/util/config.c b/tools/perf/util/config.c
index 80d9598db31a..0deac6a14b65 100644
--- a/tools/perf/util/config.c
+++ b/tools/perf/util/config.c
@@ -1,5 +1,8 @@
/*
- * GIT - The information manager from hell
+ * config.c
+ *
+ * Helper functions for parsing config items.
+ * Originally copied from GIT source.
*
* Copyright (C) Linus Torvalds, 2005
* Copyright (C) Johannes Schindelin, 2005
diff --git a/tools/perf/util/debugfs.c b/tools/perf/util/debugfs.c
index a88fefc0cc0a..ffc35e748e89 100644
--- a/tools/perf/util/debugfs.c
+++ b/tools/perf/util/debugfs.c
@@ -2,8 +2,12 @@
#include "debugfs.h"
#include "cache.h"
+#include <linux/kernel.h>
+#include <sys/mount.h>
+
static int debugfs_premounted;
-static char debugfs_mountpoint[MAX_PATH+1];
+char debugfs_mountpoint[PATH_MAX + 1] = "/sys/kernel/debug";
+char tracing_events_path[PATH_MAX + 1] = "/sys/kernel/debug/tracing/events";
static const char *debugfs_known_mountpoints[] = {
"/sys/kernel/debug/",
@@ -62,11 +66,9 @@ const char *debugfs_find_mountpoint(void)
/* give up and parse /proc/mounts */
fp = fopen("/proc/mounts", "r");
if (fp == NULL)
- die("Can't open /proc/mounts for read");
+ return NULL;
- while (fscanf(fp, "%*s %"
- STR(MAX_PATH)
- "s %99s %*s %*d %*d\n",
+ while (fscanf(fp, "%*s %" STR(PATH_MAX) "s %99s %*s %*d %*d\n",
debugfs_mountpoint, type) == 2) {
if (strcmp(type, "debugfs") == 0)
break;
@@ -106,6 +108,12 @@ int debugfs_valid_entry(const char *path)
return 0;
}
+static void debugfs_set_tracing_events_path(const char *mountpoint)
+{
+ snprintf(tracing_events_path, sizeof(tracing_events_path), "%s/%s",
+ mountpoint, "tracing/events");
+}
+
/* mount the debugfs somewhere if it's not mounted */
char *debugfs_mount(const char *mountpoint)
@@ -113,7 +121,7 @@ char *debugfs_mount(const char *mountpoint)
/* see if it's already mounted */
if (debugfs_find_mountpoint()) {
debugfs_premounted = 1;
- return debugfs_mountpoint;
+ goto out;
}
/* if not mounted and no argument */
@@ -129,12 +137,19 @@ char *debugfs_mount(const char *mountpoint)
return NULL;
/* save the mountpoint */
- strncpy(debugfs_mountpoint, mountpoint, sizeof(debugfs_mountpoint));
debugfs_found = 1;
-
+ strncpy(debugfs_mountpoint, mountpoint, sizeof(debugfs_mountpoint));
+out:
+ debugfs_set_tracing_events_path(debugfs_mountpoint);
return debugfs_mountpoint;
}
+void debugfs_set_path(const char *mountpoint)
+{
+ snprintf(debugfs_mountpoint, sizeof(debugfs_mountpoint), "%s", mountpoint);
+ debugfs_set_tracing_events_path(mountpoint);
+}
+
/* umount the debugfs */
int debugfs_umount(void)
@@ -158,7 +173,7 @@ int debugfs_umount(void)
int debugfs_write(const char *entry, const char *value)
{
- char path[MAX_PATH+1];
+ char path[PATH_MAX + 1];
int ret, count;
int fd;
@@ -203,7 +218,7 @@ int debugfs_write(const char *entry, const char *value)
*/
int debugfs_read(const char *entry, char *buffer, size_t size)
{
- char path[MAX_PATH+1];
+ char path[PATH_MAX + 1];
int ret;
int fd;
diff --git a/tools/perf/util/debugfs.h b/tools/perf/util/debugfs.h
index 83a02879745f..4a878f735eb0 100644
--- a/tools/perf/util/debugfs.h
+++ b/tools/perf/util/debugfs.h
@@ -1,25 +1,18 @@
#ifndef __DEBUGFS_H__
#define __DEBUGFS_H__
-#include <sys/mount.h>
+const char *debugfs_find_mountpoint(void);
+int debugfs_valid_mountpoint(const char *debugfs);
+int debugfs_valid_entry(const char *path);
+char *debugfs_mount(const char *mountpoint);
+int debugfs_umount(void);
+void debugfs_set_path(const char *mountpoint);
+int debugfs_write(const char *entry, const char *value);
+int debugfs_read(const char *entry, char *buffer, size_t size);
+void debugfs_force_cleanup(void);
+int debugfs_make_path(const char *element, char *buffer, int size);
-#ifndef MAX_PATH
-# define MAX_PATH 256
-#endif
-
-#ifndef STR
-# define _STR(x) #x
-# define STR(x) _STR(x)
-#endif
-
-extern const char *debugfs_find_mountpoint(void);
-extern int debugfs_valid_mountpoint(const char *debugfs);
-extern int debugfs_valid_entry(const char *path);
-extern char *debugfs_mount(const char *mountpoint);
-extern int debugfs_umount(void);
-extern int debugfs_write(const char *entry, const char *value);
-extern int debugfs_read(const char *entry, char *buffer, size_t size);
-extern void debugfs_force_cleanup(void);
-extern int debugfs_make_path(const char *element, char *buffer, int size);
+extern char debugfs_mountpoint[];
+extern char tracing_events_path[];
#endif /* __DEBUGFS_H__ */
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index 437f8ca679a0..73ddaf06b8e7 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -1,7 +1,6 @@
#include <linux/types.h>
#include "event.h"
#include "debug.h"
-#include "session.h"
#include "sort.h"
#include "string.h"
#include "strlist.h"
@@ -44,36 +43,27 @@ static struct perf_sample synth_sample = {
.period = 1,
};
-static pid_t perf_event__synthesize_comm(union perf_event *event, pid_t pid,
- int full, perf_event__handler_t process,
- struct perf_session *session)
+static pid_t perf_event__get_comm_tgid(pid_t pid, char *comm, size_t len)
{
char filename[PATH_MAX];
char bf[BUFSIZ];
FILE *fp;
size_t size = 0;
- DIR *tasks;
- struct dirent dirent, *next;
- pid_t tgid = 0;
+ pid_t tgid = -1;
snprintf(filename, sizeof(filename), "/proc/%d/status", pid);
fp = fopen(filename, "r");
if (fp == NULL) {
-out_race:
- /*
- * We raced with a task exiting - just return:
- */
pr_debug("couldn't open %s\n", filename);
return 0;
}
- memset(&event->comm, 0, sizeof(event->comm));
-
- while (!event->comm.comm[0] || !event->comm.pid) {
+ while (!comm[0] || (tgid < 0)) {
if (fgets(bf, sizeof(bf), fp) == NULL) {
- pr_warning("couldn't get COMM and pgid, malformed %s\n", filename);
- goto out;
+ pr_warning("couldn't get COMM and pgid, malformed %s\n",
+ filename);
+ break;
}
if (memcmp(bf, "Name:", 5) == 0) {
@@ -81,33 +71,65 @@ out_race:
while (*name && isspace(*name))
++name;
size = strlen(name) - 1;
- memcpy(event->comm.comm, name, size++);
+ if (size >= len)
+ size = len - 1;
+ memcpy(comm, name, size);
+
} else if (memcmp(bf, "Tgid:", 5) == 0) {
char *tgids = bf + 5;
while (*tgids && isspace(*tgids))
++tgids;
- tgid = event->comm.pid = atoi(tgids);
+ tgid = atoi(tgids);
}
}
+ fclose(fp);
+
+ return tgid;
+}
+
+static pid_t perf_event__synthesize_comm(struct perf_tool *tool,
+ union perf_event *event, pid_t pid,
+ int full,
+ perf_event__handler_t process,
+ struct machine *machine)
+{
+ char filename[PATH_MAX];
+ size_t size;
+ DIR *tasks;
+ struct dirent dirent, *next;
+ pid_t tgid;
+
+ memset(&event->comm, 0, sizeof(event->comm));
+
+ tgid = perf_event__get_comm_tgid(pid, event->comm.comm,
+ sizeof(event->comm.comm));
+ if (tgid < 0)
+ goto out;
+
+ event->comm.pid = tgid;
event->comm.header.type = PERF_RECORD_COMM;
+
+ size = strlen(event->comm.comm) + 1;
size = ALIGN(size, sizeof(u64));
- memset(event->comm.comm + size, 0, session->id_hdr_size);
+ memset(event->comm.comm + size, 0, machine->id_hdr_size);
event->comm.header.size = (sizeof(event->comm) -
(sizeof(event->comm.comm) - size) +
- session->id_hdr_size);
+ machine->id_hdr_size);
if (!full) {
event->comm.tid = pid;
- process(event, &synth_sample, session);
+ process(tool, event, &synth_sample, machine);
goto out;
}
snprintf(filename, sizeof(filename), "/proc/%d/task", pid);
tasks = opendir(filename);
- if (tasks == NULL)
- goto out_race;
+ if (tasks == NULL) {
+ pr_debug("couldn't open %s\n", filename);
+ return 0;
+ }
while (!readdir_r(tasks, &dirent, &next) && next) {
char *end;
@@ -115,22 +137,32 @@ out_race:
if (*end)
continue;
+ /* already have tgid; jut want to update the comm */
+ (void) perf_event__get_comm_tgid(pid, event->comm.comm,
+ sizeof(event->comm.comm));
+
+ size = strlen(event->comm.comm) + 1;
+ size = ALIGN(size, sizeof(u64));
+ memset(event->comm.comm + size, 0, machine->id_hdr_size);
+ event->comm.header.size = (sizeof(event->comm) -
+ (sizeof(event->comm.comm) - size) +
+ machine->id_hdr_size);
+
event->comm.tid = pid;
- process(event, &synth_sample, session);
+ process(tool, event, &synth_sample, machine);
}
closedir(tasks);
out:
- fclose(fp);
-
return tgid;
}
-static int perf_event__synthesize_mmap_events(union perf_event *event,
+static int perf_event__synthesize_mmap_events(struct perf_tool *tool,
+ union perf_event *event,
pid_t pid, pid_t tgid,
perf_event__handler_t process,
- struct perf_session *session)
+ struct machine *machine)
{
char filename[PATH_MAX];
FILE *fp;
@@ -193,12 +225,12 @@ static int perf_event__synthesize_mmap_events(union perf_event *event,
event->mmap.len -= event->mmap.start;
event->mmap.header.size = (sizeof(event->mmap) -
(sizeof(event->mmap.filename) - size));
- memset(event->mmap.filename + size, 0, session->id_hdr_size);
- event->mmap.header.size += session->id_hdr_size;
+ memset(event->mmap.filename + size, 0, machine->id_hdr_size);
+ event->mmap.header.size += machine->id_hdr_size;
event->mmap.pid = tgid;
event->mmap.tid = pid;
- process(event, &synth_sample, session);
+ process(tool, event, &synth_sample, machine);
}
}
@@ -206,14 +238,14 @@ static int perf_event__synthesize_mmap_events(union perf_event *event,
return 0;
}
-int perf_event__synthesize_modules(perf_event__handler_t process,
- struct perf_session *session,
+int perf_event__synthesize_modules(struct perf_tool *tool,
+ perf_event__handler_t process,
struct machine *machine)
{
struct rb_node *nd;
struct map_groups *kmaps = &machine->kmaps;
union perf_event *event = zalloc((sizeof(event->mmap) +
- session->id_hdr_size));
+ machine->id_hdr_size));
if (event == NULL) {
pr_debug("Not enough memory synthesizing mmap event "
"for kernel modules\n");
@@ -243,15 +275,15 @@ int perf_event__synthesize_modules(perf_event__handler_t process,
event->mmap.header.type = PERF_RECORD_MMAP;
event->mmap.header.size = (sizeof(event->mmap) -
(sizeof(event->mmap.filename) - size));
- memset(event->mmap.filename + size, 0, session->id_hdr_size);
- event->mmap.header.size += session->id_hdr_size;
+ memset(event->mmap.filename + size, 0, machine->id_hdr_size);
+ event->mmap.header.size += machine->id_hdr_size;
event->mmap.start = pos->start;
event->mmap.len = pos->end - pos->start;
event->mmap.pid = machine->pid;
memcpy(event->mmap.filename, pos->dso->long_name,
pos->dso->long_name_len + 1);
- process(event, &synth_sample, session);
+ process(tool, event, &synth_sample, machine);
}
free(event);
@@ -260,40 +292,69 @@ int perf_event__synthesize_modules(perf_event__handler_t process,
static int __event__synthesize_thread(union perf_event *comm_event,
union perf_event *mmap_event,
- pid_t pid, perf_event__handler_t process,
- struct perf_session *session)
+ pid_t pid, int full,
+ perf_event__handler_t process,
+ struct perf_tool *tool,
+ struct machine *machine)
{
- pid_t tgid = perf_event__synthesize_comm(comm_event, pid, 1, process,
- session);
+ pid_t tgid = perf_event__synthesize_comm(tool, comm_event, pid, full,
+ process, machine);
if (tgid == -1)
return -1;
- return perf_event__synthesize_mmap_events(mmap_event, pid, tgid,
- process, session);
+ return perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
+ process, machine);
}
-int perf_event__synthesize_thread_map(struct thread_map *threads,
+int perf_event__synthesize_thread_map(struct perf_tool *tool,
+ struct thread_map *threads,
perf_event__handler_t process,
- struct perf_session *session)
+ struct machine *machine)
{
union perf_event *comm_event, *mmap_event;
- int err = -1, thread;
+ int err = -1, thread, j;
- comm_event = malloc(sizeof(comm_event->comm) + session->id_hdr_size);
+ comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
if (comm_event == NULL)
goto out;
- mmap_event = malloc(sizeof(mmap_event->mmap) + session->id_hdr_size);
+ mmap_event = malloc(sizeof(mmap_event->mmap) + machine->id_hdr_size);
if (mmap_event == NULL)
goto out_free_comm;
err = 0;
for (thread = 0; thread < threads->nr; ++thread) {
if (__event__synthesize_thread(comm_event, mmap_event,
- threads->map[thread],
- process, session)) {
+ threads->map[thread], 0,
+ process, tool, machine)) {
err = -1;
break;
}
+
+ /*
+ * comm.pid is set to thread group id by
+ * perf_event__synthesize_comm
+ */
+ if ((int) comm_event->comm.pid != threads->map[thread]) {
+ bool need_leader = true;
+
+ /* is thread group leader in thread_map? */
+ for (j = 0; j < threads->nr; ++j) {
+ if ((int) comm_event->comm.pid == threads->map[j]) {
+ need_leader = false;
+ break;
+ }
+ }
+
+ /* if not, generate events for it */
+ if (need_leader &&
+ __event__synthesize_thread(comm_event,
+ mmap_event,
+ comm_event->comm.pid, 0,
+ process, tool, machine)) {
+ err = -1;
+ break;
+ }
+ }
}
free(mmap_event);
out_free_comm:
@@ -302,19 +363,20 @@ out:
return err;
}
-int perf_event__synthesize_threads(perf_event__handler_t process,
- struct perf_session *session)
+int perf_event__synthesize_threads(struct perf_tool *tool,
+ perf_event__handler_t process,
+ struct machine *machine)
{
DIR *proc;
struct dirent dirent, *next;
union perf_event *comm_event, *mmap_event;
int err = -1;
- comm_event = malloc(sizeof(comm_event->comm) + session->id_hdr_size);
+ comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
if (comm_event == NULL)
goto out;
- mmap_event = malloc(sizeof(mmap_event->mmap) + session->id_hdr_size);
+ mmap_event = malloc(sizeof(mmap_event->mmap) + machine->id_hdr_size);
if (mmap_event == NULL)
goto out_free_comm;
@@ -329,8 +391,8 @@ int perf_event__synthesize_threads(perf_event__handler_t process,
if (*end) /* only interested in proper numerical dirents */
continue;
- __event__synthesize_thread(comm_event, mmap_event, pid,
- process, session);
+ __event__synthesize_thread(comm_event, mmap_event, pid, 1,
+ process, tool, machine);
}
closedir(proc);
@@ -365,8 +427,8 @@ static int find_symbol_cb(void *arg, const char *name, char type,
return 1;
}
-int perf_event__synthesize_kernel_mmap(perf_event__handler_t process,
- struct perf_session *session,
+int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
+ perf_event__handler_t process,
struct machine *machine,
const char *symbol_name)
{
@@ -383,7 +445,7 @@ int perf_event__synthesize_kernel_mmap(perf_event__handler_t process,
*/
struct process_symbol_args args = { .name = symbol_name, };
union perf_event *event = zalloc((sizeof(event->mmap) +
- session->id_hdr_size));
+ machine->id_hdr_size));
if (event == NULL) {
pr_debug("Not enough memory synthesizing mmap event "
"for kernel modules\n");
@@ -417,25 +479,32 @@ int perf_event__synthesize_kernel_mmap(perf_event__handler_t process,
size = ALIGN(size, sizeof(u64));
event->mmap.header.type = PERF_RECORD_MMAP;
event->mmap.header.size = (sizeof(event->mmap) -
- (sizeof(event->mmap.filename) - size) + session->id_hdr_size);
+ (sizeof(event->mmap.filename) - size) + machine->id_hdr_size);
event->mmap.pgoff = args.start;
event->mmap.start = map->start;
event->mmap.len = map->end - event->mmap.start;
event->mmap.pid = machine->pid;
- err = process(event, &synth_sample, session);
+ err = process(tool, event, &synth_sample, machine);
free(event);
return err;
}
-int perf_event__process_comm(union perf_event *event,
+size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp)
+{
+ return fprintf(fp, ": %s:%d\n", event->comm.comm, event->comm.tid);
+}
+
+int perf_event__process_comm(struct perf_tool *tool __used,
+ union perf_event *event,
struct perf_sample *sample __used,
- struct perf_session *session)
+ struct machine *machine)
{
- struct thread *thread = perf_session__findnew(session, event->comm.tid);
+ struct thread *thread = machine__findnew_thread(machine, event->comm.tid);
- dump_printf(": %s:%d\n", event->comm.comm, event->comm.tid);
+ if (dump_trace)
+ perf_event__fprintf_comm(event, stdout);
if (thread == NULL || thread__set_comm(thread, event->comm.comm)) {
dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
@@ -445,13 +514,13 @@ int perf_event__process_comm(union perf_event *event,
return 0;
}
-int perf_event__process_lost(union perf_event *event,
+int perf_event__process_lost(struct perf_tool *tool __used,
+ union perf_event *event,
struct perf_sample *sample __used,
- struct perf_session *session)
+ struct machine *machine __used)
{
dump_printf(": id:%" PRIu64 ": lost:%" PRIu64 "\n",
event->lost.id, event->lost.lost);
- session->hists.stats.total_lost += event->lost.lost;
return 0;
}
@@ -468,21 +537,15 @@ static void perf_event__set_kernel_mmap_len(union perf_event *event,
maps[MAP__FUNCTION]->end = ~0ULL;
}
-static int perf_event__process_kernel_mmap(union perf_event *event,
- struct perf_session *session)
+static int perf_event__process_kernel_mmap(struct perf_tool *tool __used,
+ union perf_event *event,
+ struct machine *machine)
{
struct map *map;
char kmmap_prefix[PATH_MAX];
- struct machine *machine;
enum dso_kernel_type kernel_type;
bool is_kernel_mmap;
- machine = perf_session__findnew_machine(session, event->mmap.pid);
- if (!machine) {
- pr_err("Can't find id %d's machine\n", event->mmap.pid);
- goto out_problem;
- }
-
machine__mmap_name(machine, kmmap_prefix, sizeof(kmmap_prefix));
if (machine__is_host(machine))
kernel_type = DSO_TYPE_KERNEL;
@@ -549,9 +612,9 @@ static int perf_event__process_kernel_mmap(union perf_event *event,
* time /proc/sys/kernel/kptr_restrict was non zero.
*/
if (event->mmap.pgoff != 0) {
- perf_session__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps,
- symbol_name,
- event->mmap.pgoff);
+ maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps,
+ symbol_name,
+ event->mmap.pgoff);
}
if (machine__is_default_guest(machine)) {
@@ -567,32 +630,35 @@ out_problem:
return -1;
}
-int perf_event__process_mmap(union perf_event *event,
+size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp)
+{
+ return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 "]: %s\n",
+ event->mmap.pid, event->mmap.tid, event->mmap.start,
+ event->mmap.len, event->mmap.pgoff, event->mmap.filename);
+}
+
+int perf_event__process_mmap(struct perf_tool *tool,
+ union perf_event *event,
struct perf_sample *sample __used,
- struct perf_session *session)
+ struct machine *machine)
{
- struct machine *machine;
struct thread *thread;
struct map *map;
u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
int ret = 0;
- dump_printf(" %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 "]: %s\n",
- event->mmap.pid, event->mmap.tid, event->mmap.start,
- event->mmap.len, event->mmap.pgoff, event->mmap.filename);
+ if (dump_trace)
+ perf_event__fprintf_mmap(event, stdout);
if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
cpumode == PERF_RECORD_MISC_KERNEL) {
- ret = perf_event__process_kernel_mmap(event, session);
+ ret = perf_event__process_kernel_mmap(tool, event, machine);
if (ret < 0)
goto out_problem;
return 0;
}
- machine = perf_session__find_host_machine(session);
- if (machine == NULL)
- goto out_problem;
- thread = perf_session__findnew(session, event->mmap.pid);
+ thread = machine__findnew_thread(machine, event->mmap.pid);
if (thread == NULL)
goto out_problem;
map = map__new(&machine->user_dsos, event->mmap.start,
@@ -610,18 +676,26 @@ out_problem:
return 0;
}
-int perf_event__process_task(union perf_event *event,
+size_t perf_event__fprintf_task(union perf_event *event, FILE *fp)
+{
+ return fprintf(fp, "(%d:%d):(%d:%d)\n",
+ event->fork.pid, event->fork.tid,
+ event->fork.ppid, event->fork.ptid);
+}
+
+int perf_event__process_task(struct perf_tool *tool __used,
+ union perf_event *event,
struct perf_sample *sample __used,
- struct perf_session *session)
+ struct machine *machine)
{
- struct thread *thread = perf_session__findnew(session, event->fork.tid);
- struct thread *parent = perf_session__findnew(session, event->fork.ptid);
+ struct thread *thread = machine__findnew_thread(machine, event->fork.tid);
+ struct thread *parent = machine__findnew_thread(machine, event->fork.ptid);
- dump_printf("(%d:%d):(%d:%d)\n", event->fork.pid, event->fork.tid,
- event->fork.ppid, event->fork.ptid);
+ if (dump_trace)
+ perf_event__fprintf_task(event, stdout);
if (event->header.type == PERF_RECORD_EXIT) {
- perf_session__remove_thread(session, thread);
+ machine__remove_thread(machine, thread);
return 0;
}
@@ -634,22 +708,45 @@ int perf_event__process_task(union perf_event *event,
return 0;
}
-int perf_event__process(union perf_event *event, struct perf_sample *sample,
- struct perf_session *session)
+size_t perf_event__fprintf(union perf_event *event, FILE *fp)
+{
+ size_t ret = fprintf(fp, "PERF_RECORD_%s",
+ perf_event__name(event->header.type));
+
+ switch (event->header.type) {
+ case PERF_RECORD_COMM:
+ ret += perf_event__fprintf_comm(event, fp);
+ break;
+ case PERF_RECORD_FORK:
+ case PERF_RECORD_EXIT:
+ ret += perf_event__fprintf_task(event, fp);
+ break;
+ case PERF_RECORD_MMAP:
+ ret += perf_event__fprintf_mmap(event, fp);
+ break;
+ default:
+ ret += fprintf(fp, "\n");
+ }
+
+ return ret;
+}
+
+int perf_event__process(struct perf_tool *tool, union perf_event *event,
+ struct perf_sample *sample, struct machine *machine)
{
switch (event->header.type) {
case PERF_RECORD_COMM:
- perf_event__process_comm(event, sample, session);
+ perf_event__process_comm(tool, event, sample, machine);
break;
case PERF_RECORD_MMAP:
- perf_event__process_mmap(event, sample, session);
+ perf_event__process_mmap(tool, event, sample, machine);
break;
case PERF_RECORD_FORK:
case PERF_RECORD_EXIT:
- perf_event__process_task(event, sample, session);
+ perf_event__process_task(tool, event, sample, machine);
break;
case PERF_RECORD_LOST:
- perf_event__process_lost(event, sample, session);
+ perf_event__process_lost(tool, event, sample, machine);
default:
break;
}
@@ -658,36 +755,29 @@ int perf_event__process(union perf_event *event, struct perf_sample *sample,
}
void thread__find_addr_map(struct thread *self,
- struct perf_session *session, u8 cpumode,
- enum map_type type, pid_t pid, u64 addr,
+ struct machine *machine, u8 cpumode,
+ enum map_type type, u64 addr,
struct addr_location *al)
{
struct map_groups *mg = &self->mg;
- struct machine *machine = NULL;
al->thread = self;
al->addr = addr;
al->cpumode = cpumode;
al->filtered = false;
+ if (machine == NULL) {
+ al->map = NULL;
+ return;
+ }
+
if (cpumode == PERF_RECORD_MISC_KERNEL && perf_host) {
al->level = 'k';
- machine = perf_session__find_host_machine(session);
- if (machine == NULL) {
- al->map = NULL;
- return;
- }
mg = &machine->kmaps;
} else if (cpumode == PERF_RECORD_MISC_USER && perf_host) {
al->level = '.';
- machine = perf_session__find_host_machine(session);
} else if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest) {
al->level = 'g';
- machine = perf_session__find_machine(session, pid);
- if (machine == NULL) {
- al->map = NULL;
- return;
- }
mg = &machine->kmaps;
} else {
/*
@@ -733,13 +823,12 @@ try_again:
al->addr = al->map->map_ip(al->map, al->addr);
}
-void thread__find_addr_location(struct thread *self,
- struct perf_session *session, u8 cpumode,
- enum map_type type, pid_t pid, u64 addr,
+void thread__find_addr_location(struct thread *thread, struct machine *machine,
+ u8 cpumode, enum map_type type, u64 addr,
struct addr_location *al,
symbol_filter_t filter)
{
- thread__find_addr_map(self, session, cpumode, type, pid, addr, al);
+ thread__find_addr_map(thread, machine, cpumode, type, addr, al);
if (al->map != NULL)
al->sym = map__find_symbol(al->map, al->addr, filter);
else
@@ -747,13 +836,13 @@ void thread__find_addr_location(struct thread *self,
}
int perf_event__preprocess_sample(const union perf_event *event,
- struct perf_session *session,
+ struct machine *machine,
struct addr_location *al,
struct perf_sample *sample,
symbol_filter_t filter)
{
u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
- struct thread *thread = perf_session__findnew(session, event->ip.pid);
+ struct thread *thread = machine__findnew_thread(machine, event->ip.pid);
if (thread == NULL)
return -1;
@@ -764,18 +853,18 @@ int perf_event__preprocess_sample(const union perf_event *event,
dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
/*
- * Have we already created the kernel maps for the host machine?
+ * Have we already created the kernel maps for this machine?
*
* This should have happened earlier, when we processed the kernel MMAP
* events, but for older perf.data files there was no such thing, so do
* it now.
*/
if (cpumode == PERF_RECORD_MISC_KERNEL &&
- session->host_machine.vmlinux_maps[MAP__FUNCTION] == NULL)
- machine__create_kernel_maps(&session->host_machine);
+ machine->vmlinux_maps[MAP__FUNCTION] == NULL)
+ machine__create_kernel_maps(machine);
- thread__find_addr_map(thread, session, cpumode, MAP__FUNCTION,
- event->ip.pid, event->ip.ip, al);
+ thread__find_addr_map(thread, machine, cpumode, MAP__FUNCTION,
+ event->ip.ip, al);
dump_printf(" ...... dso: %s\n",
al->map ? al->map->dso->long_name :
al->level == 'H' ? "[hypervisor]" : "<not found>");
@@ -783,13 +872,14 @@ int perf_event__preprocess_sample(const union perf_event *event,
al->cpu = sample->cpu;
if (al->map) {
+ struct dso *dso = al->map->dso;
+
if (symbol_conf.dso_list &&
- (!al->map || !al->map->dso ||
- !(strlist__has_entry(symbol_conf.dso_list,
- al->map->dso->short_name) ||
- (al->map->dso->short_name != al->map->dso->long_name &&
- strlist__has_entry(symbol_conf.dso_list,
- al->map->dso->long_name)))))
+ (!dso || !(strlist__has_entry(symbol_conf.dso_list,
+ dso->short_name) ||
+ (dso->short_name != dso->long_name &&
+ strlist__has_entry(symbol_conf.dso_list,
+ dso->long_name)))))
goto out_filtered;
al->sym = map__find_symbol(al->map, al->addr, filter);
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h
index 357a85b85248..cbdeaad9c5e5 100644
--- a/tools/perf/util/event.h
+++ b/tools/perf/util/event.h
@@ -2,6 +2,7 @@
#define __PERF_RECORD_H
#include <limits.h>
+#include <stdio.h>
#include "../perf.h"
#include "map.h"
@@ -141,43 +142,54 @@ union perf_event {
void perf_event__print_totals(void);
-struct perf_session;
+struct perf_tool;
struct thread_map;
-typedef int (*perf_event__handler_synth_t)(union perf_event *event,
- struct perf_session *session);
-typedef int (*perf_event__handler_t)(union perf_event *event,
+typedef int (*perf_event__handler_t)(struct perf_tool *tool,
+ union perf_event *event,
struct perf_sample *sample,
- struct perf_session *session);
+ struct machine *machine);
-int perf_event__synthesize_thread_map(struct thread_map *threads,
+int perf_event__synthesize_thread_map(struct perf_tool *tool,
+ struct thread_map *threads,
perf_event__handler_t process,
- struct perf_session *session);
-int perf_event__synthesize_threads(perf_event__handler_t process,
- struct perf_session *session);
-int perf_event__synthesize_kernel_mmap(perf_event__handler_t process,
- struct perf_session *session,
+ struct machine *machine);
+int perf_event__synthesize_threads(struct perf_tool *tool,
+ perf_event__handler_t process,
+ struct machine *machine);
+int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
+ perf_event__handler_t process,
struct machine *machine,
const char *symbol_name);
-int perf_event__synthesize_modules(perf_event__handler_t process,
- struct perf_session *session,
+int perf_event__synthesize_modules(struct perf_tool *tool,
+ perf_event__handler_t process,
struct machine *machine);
-int perf_event__process_comm(union perf_event *event, struct perf_sample *sample,
- struct perf_session *session);
-int perf_event__process_lost(union perf_event *event, struct perf_sample *sample,
- struct perf_session *session);
-int perf_event__process_mmap(union perf_event *event, struct perf_sample *sample,
- struct perf_session *session);
-int perf_event__process_task(union perf_event *event, struct perf_sample *sample,
- struct perf_session *session);
-int perf_event__process(union perf_event *event, struct perf_sample *sample,
- struct perf_session *session);
+int perf_event__process_comm(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct machine *machine);
+int perf_event__process_lost(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct machine *machine);
+int perf_event__process_mmap(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct machine *machine);
+int perf_event__process_task(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct machine *machine);
+int perf_event__process(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct machine *machine);
struct addr_location;
int perf_event__preprocess_sample(const union perf_event *self,
- struct perf_session *session,
+ struct machine *machine,
struct addr_location *al,
struct perf_sample *sample,
symbol_filter_t filter);
@@ -187,5 +199,13 @@ const char *perf_event__name(unsigned int id);
int perf_event__parse_sample(const union perf_event *event, u64 type,
int sample_size, bool sample_id_all,
struct perf_sample *sample, bool swapped);
+int perf_event__synthesize_sample(union perf_event *event, u64 type,
+ const struct perf_sample *sample,
+ bool swapped);
+
+size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp);
+size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp);
+size_t perf_event__fprintf_task(union perf_event *event, FILE *fp);
+size_t perf_event__fprintf(union perf_event *event, FILE *fp);
#endif /* __PERF_RECORD_H */
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index fbb4b4ab9cc6..fa1837088ca8 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -6,12 +6,16 @@
*
* Released under the GPL v2. (and only v2, not any later version)
*/
+#include "util.h"
+#include "debugfs.h"
#include <poll.h>
#include "cpumap.h"
#include "thread_map.h"
#include "evlist.h"
#include "evsel.h"
-#include "util.h"
+#include <unistd.h>
+
+#include "parse-events.h"
#include <sys/mman.h>
@@ -30,6 +34,7 @@ void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
INIT_HLIST_HEAD(&evlist->heads[i]);
INIT_LIST_HEAD(&evlist->entries);
perf_evlist__set_maps(evlist, cpus, threads);
+ evlist->workload.pid = -1;
}
struct perf_evlist *perf_evlist__new(struct cpu_map *cpus,
@@ -43,6 +48,22 @@ struct perf_evlist *perf_evlist__new(struct cpu_map *cpus,
return evlist;
}
+void perf_evlist__config_attrs(struct perf_evlist *evlist,
+ struct perf_record_opts *opts)
+{
+ struct perf_evsel *evsel;
+
+ if (evlist->cpus->map[0] < 0)
+ opts->no_inherit = true;
+
+ list_for_each_entry(evsel, &evlist->entries, node) {
+ perf_evsel__config(evsel, opts);
+
+ if (evlist->nr_entries > 1)
+ evsel->attr.sample_type |= PERF_SAMPLE_ID;
+ }
+}
+
static void perf_evlist__purge(struct perf_evlist *evlist)
{
struct perf_evsel *pos, *n;
@@ -76,6 +97,14 @@ void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
++evlist->nr_entries;
}
+static void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
+ struct list_head *list,
+ int nr_entries)
+{
+ list_splice_tail(list, &evlist->entries);
+ evlist->nr_entries += nr_entries;
+}
+
int perf_evlist__add_default(struct perf_evlist *evlist)
{
struct perf_event_attr attr = {
@@ -100,6 +129,126 @@ error:
return -ENOMEM;
}
+int perf_evlist__add_attrs(struct perf_evlist *evlist,
+ struct perf_event_attr *attrs, size_t nr_attrs)
+{
+ struct perf_evsel *evsel, *n;
+ LIST_HEAD(head);
+ size_t i;
+
+ for (i = 0; i < nr_attrs; i++) {
+ evsel = perf_evsel__new(attrs + i, evlist->nr_entries + i);
+ if (evsel == NULL)
+ goto out_delete_partial_list;
+ list_add_tail(&evsel->node, &head);
+ }
+
+ perf_evlist__splice_list_tail(evlist, &head, nr_attrs);
+
+ return 0;
+
+out_delete_partial_list:
+ list_for_each_entry_safe(evsel, n, &head, node)
+ perf_evsel__delete(evsel);
+ return -1;
+}
+
+static int trace_event__id(const char *evname)
+{
+ char *filename, *colon;
+ int err = -1, fd;
+
+ if (asprintf(&filename, "%s/%s/id", tracing_events_path, evname) < 0)
+ return -1;
+
+ colon = strrchr(filename, ':');
+ if (colon != NULL)
+ *colon = '/';
+
+ fd = open(filename, O_RDONLY);
+ if (fd >= 0) {
+ char id[16];
+ if (read(fd, id, sizeof(id)) > 0)
+ err = atoi(id);
+ close(fd);
+ }
+
+ free(filename);
+ return err;
+}
+
+int perf_evlist__add_tracepoints(struct perf_evlist *evlist,
+ const char *tracepoints[],
+ size_t nr_tracepoints)
+{
+ int err;
+ size_t i;
+ struct perf_event_attr *attrs = zalloc(nr_tracepoints * sizeof(*attrs));
+
+ if (attrs == NULL)
+ return -1;
+
+ for (i = 0; i < nr_tracepoints; i++) {
+ err = trace_event__id(tracepoints[i]);
+
+ if (err < 0)
+ goto out_free_attrs;
+
+ attrs[i].type = PERF_TYPE_TRACEPOINT;
+ attrs[i].config = err;
+ attrs[i].sample_type = (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
+ PERF_SAMPLE_CPU);
+ attrs[i].sample_period = 1;
+ }
+
+ err = perf_evlist__add_attrs(evlist, attrs, nr_tracepoints);
+out_free_attrs:
+ free(attrs);
+ return err;
+}
+
+static struct perf_evsel *
+ perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id)
+{
+ struct perf_evsel *evsel;
+
+ list_for_each_entry(evsel, &evlist->entries, node) {
+ if (evsel->attr.type == PERF_TYPE_TRACEPOINT &&
+ (int)evsel->attr.config == id)
+ return evsel;
+ }
+
+ return NULL;
+}
+
+int perf_evlist__set_tracepoints_handlers(struct perf_evlist *evlist,
+ const struct perf_evsel_str_handler *assocs,
+ size_t nr_assocs)
+{
+ struct perf_evsel *evsel;
+ int err;
+ size_t i;
+
+ for (i = 0; i < nr_assocs; i++) {
+ err = trace_event__id(assocs[i].name);
+ if (err < 0)
+ goto out;
+
+ evsel = perf_evlist__find_tracepoint_by_id(evlist, err);
+ if (evsel == NULL)
+ continue;
+
+ err = -EEXIST;
+ if (evsel->handler.func != NULL)
+ goto out;
+ evsel->handler.func = assocs[i].handler;
+ }
+
+ err = 0;
+out:
+ return err;
+}
+
void perf_evlist__disable(struct perf_evlist *evlist)
{
int cpu, thread;
@@ -126,7 +275,7 @@ void perf_evlist__enable(struct perf_evlist *evlist)
}
}
-int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
+static int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
{
int nfds = evlist->cpus->nr * evlist->threads->nr * evlist->nr_entries;
evlist->pollfd = malloc(sizeof(struct pollfd) * nfds);
@@ -282,7 +431,7 @@ void perf_evlist__munmap(struct perf_evlist *evlist)
evlist->mmap = NULL;
}
-int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
+static int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
{
evlist->nr_mmaps = evlist->cpus->nr;
if (evlist->cpus->map[0] == -1)
@@ -298,8 +447,10 @@ static int __perf_evlist__mmap(struct perf_evlist *evlist,
evlist->mmap[idx].mask = mask;
evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, prot,
MAP_SHARED, fd, 0);
- if (evlist->mmap[idx].base == MAP_FAILED)
+ if (evlist->mmap[idx].base == MAP_FAILED) {
+ evlist->mmap[idx].base = NULL;
return -1;
+ }
perf_evlist__add_pollfd(evlist, fd);
return 0;
@@ -400,14 +551,22 @@ out_unmap:
*
* Using perf_evlist__read_on_cpu does this automatically.
*/
-int perf_evlist__mmap(struct perf_evlist *evlist, int pages, bool overwrite)
+int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
+ bool overwrite)
{
unsigned int page_size = sysconf(_SC_PAGE_SIZE);
- int mask = pages * page_size - 1;
struct perf_evsel *evsel;
const struct cpu_map *cpus = evlist->cpus;
const struct thread_map *threads = evlist->threads;
- int prot = PROT_READ | (overwrite ? 0 : PROT_WRITE);
+ int prot = PROT_READ | (overwrite ? 0 : PROT_WRITE), mask;
+
+ /* 512 kiB: default amount of unprivileged mlocked memory */
+ if (pages == UINT_MAX)
+ pages = (512 * 1024) / page_size;
+ else if (!is_power_of_2(pages))
+ return -EINVAL;
+
+ mask = pages * page_size - 1;
if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0)
return -ENOMEM;
@@ -512,6 +671,38 @@ u64 perf_evlist__sample_type(const struct perf_evlist *evlist)
return first->attr.sample_type;
}
+u16 perf_evlist__id_hdr_size(const struct perf_evlist *evlist)
+{
+ struct perf_evsel *first;
+ struct perf_sample *data;
+ u64 sample_type;
+ u16 size = 0;
+
+ first = list_entry(evlist->entries.next, struct perf_evsel, node);
+
+ if (!first->attr.sample_id_all)
+ goto out;
+
+ sample_type = first->attr.sample_type;
+
+ if (sample_type & PERF_SAMPLE_TID)
+ size += sizeof(data->tid) * 2;
+
+ if (sample_type & PERF_SAMPLE_TIME)
+ size += sizeof(data->time);
+
+ if (sample_type & PERF_SAMPLE_ID)
+ size += sizeof(data->id);
+
+ if (sample_type & PERF_SAMPLE_STREAM_ID)
+ size += sizeof(data->stream_id);
+
+ if (sample_type & PERF_SAMPLE_CPU)
+ size += sizeof(data->cpu) * 2;
+out:
+ return size;
+}
+
bool perf_evlist__valid_sample_id_all(const struct perf_evlist *evlist)
{
struct perf_evsel *pos, *first;
@@ -569,3 +760,97 @@ out_err:
return err;
}
+
+int perf_evlist__prepare_workload(struct perf_evlist *evlist,
+ struct perf_record_opts *opts,
+ const char *argv[])
+{
+ int child_ready_pipe[2], go_pipe[2];
+ char bf;
+
+ if (pipe(child_ready_pipe) < 0) {
+ perror("failed to create 'ready' pipe");
+ return -1;
+ }
+
+ if (pipe(go_pipe) < 0) {
+ perror("failed to create 'go' pipe");
+ goto out_close_ready_pipe;
+ }
+
+ evlist->workload.pid = fork();
+ if (evlist->workload.pid < 0) {
+ perror("failed to fork");
+ goto out_close_pipes;
+ }
+
+ if (!evlist->workload.pid) {
+ if (opts->pipe_output)
+ dup2(2, 1);
+
+ close(child_ready_pipe[0]);
+ close(go_pipe[1]);
+ fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
+
+ /*
+ * Do a dummy execvp to get the PLT entry resolved,
+ * so we avoid the resolver overhead on the real
+ * execvp call.
+ */
+ execvp("", (char **)argv);
+
+ /*
+ * Tell the parent we're ready to go
+ */
+ close(child_ready_pipe[1]);
+
+ /*
+ * Wait until the parent tells us to go.
+ */
+ if (read(go_pipe[0], &bf, 1) == -1)
+ perror("unable to read pipe");
+
+ execvp(argv[0], (char **)argv);
+
+ perror(argv[0]);
+ kill(getppid(), SIGUSR1);
+ exit(-1);
+ }
+
+ if (!opts->system_wide && opts->target_tid == -1 && opts->target_pid == -1)
+ evlist->threads->map[0] = evlist->workload.pid;
+
+ close(child_ready_pipe[1]);
+ close(go_pipe[0]);
+ /*
+ * wait for child to settle
+ */
+ if (read(child_ready_pipe[0], &bf, 1) == -1) {
+ perror("unable to read pipe");
+ goto out_close_pipes;
+ }
+
+ evlist->workload.cork_fd = go_pipe[1];
+ close(child_ready_pipe[0]);
+ return 0;
+
+out_close_pipes:
+ close(go_pipe[0]);
+ close(go_pipe[1]);
+out_close_ready_pipe:
+ close(child_ready_pipe[0]);
+ close(child_ready_pipe[1]);
+ return -1;
+}
+
+int perf_evlist__start_workload(struct perf_evlist *evlist)
+{
+ if (evlist->workload.cork_fd > 0) {
+ /*
+ * Remove the cork, let it rip!
+ */
+ return close(evlist->workload.cork_fd);
+ }
+
+ return 0;
+}
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index 1779ffef7828..8922aeed0467 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -2,12 +2,16 @@
#define __PERF_EVLIST_H 1
#include <linux/list.h>
+#include <stdio.h>
#include "../perf.h"
#include "event.h"
+#include "util.h"
+#include <unistd.h>
struct pollfd;
struct thread_map;
struct cpu_map;
+struct perf_record_opts;
#define PERF_EVLIST__HLIST_BITS 8
#define PERF_EVLIST__HLIST_SIZE (1 << PERF_EVLIST__HLIST_BITS)
@@ -19,6 +23,10 @@ struct perf_evlist {
int nr_fds;
int nr_mmaps;
int mmap_len;
+ struct {
+ int cork_fd;
+ pid_t pid;
+ } workload;
bool overwrite;
union perf_event event_copy;
struct perf_mmap *mmap;
@@ -28,6 +36,11 @@ struct perf_evlist {
struct perf_evsel *selected;
};
+struct perf_evsel_str_handler {
+ const char *name;
+ void *handler;
+};
+
struct perf_evsel;
struct perf_evlist *perf_evlist__new(struct cpu_map *cpus,
@@ -39,11 +52,26 @@ void perf_evlist__delete(struct perf_evlist *evlist);
void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry);
int perf_evlist__add_default(struct perf_evlist *evlist);
+int perf_evlist__add_attrs(struct perf_evlist *evlist,
+ struct perf_event_attr *attrs, size_t nr_attrs);
+int perf_evlist__add_tracepoints(struct perf_evlist *evlist,
+ const char *tracepoints[], size_t nr_tracepoints);
+int perf_evlist__set_tracepoints_handlers(struct perf_evlist *evlist,
+ const struct perf_evsel_str_handler *assocs,
+ size_t nr_assocs);
+
+#define perf_evlist__add_attrs_array(evlist, array) \
+ perf_evlist__add_attrs(evlist, array, ARRAY_SIZE(array))
+
+#define perf_evlist__add_tracepoints_array(evlist, array) \
+ perf_evlist__add_tracepoints(evlist, array, ARRAY_SIZE(array))
+
+#define perf_evlist__set_tracepoints_handlers_array(evlist, array) \
+ perf_evlist__set_tracepoints_handlers(evlist, array, ARRAY_SIZE(array))
void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
int cpu, int thread, u64 id);
-int perf_evlist__alloc_pollfd(struct perf_evlist *evlist);
void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd);
struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id);
@@ -52,8 +80,16 @@ union perf_event *perf_evlist__mmap_read(struct perf_evlist *self, int idx);
int perf_evlist__open(struct perf_evlist *evlist, bool group);
-int perf_evlist__alloc_mmap(struct perf_evlist *evlist);
-int perf_evlist__mmap(struct perf_evlist *evlist, int pages, bool overwrite);
+void perf_evlist__config_attrs(struct perf_evlist *evlist,
+ struct perf_record_opts *opts);
+
+int perf_evlist__prepare_workload(struct perf_evlist *evlist,
+ struct perf_record_opts *opts,
+ const char *argv[]);
+int perf_evlist__start_workload(struct perf_evlist *evlist);
+
+int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
+ bool overwrite);
void perf_evlist__munmap(struct perf_evlist *evlist);
void perf_evlist__disable(struct perf_evlist *evlist);
@@ -77,6 +113,7 @@ int perf_evlist__set_filters(struct perf_evlist *evlist);
u64 perf_evlist__sample_type(const struct perf_evlist *evlist);
bool perf_evlist__sample_id_all(const const struct perf_evlist *evlist);
+u16 perf_evlist__id_hdr_size(const struct perf_evlist *evlist);
bool perf_evlist__valid_sample_type(const struct perf_evlist *evlist);
bool perf_evlist__valid_sample_id_all(const struct perf_evlist *evlist);
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index d7915d4e77cb..667f3b78bb2c 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -63,6 +63,79 @@ struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx)
return evsel;
}
+void perf_evsel__config(struct perf_evsel *evsel, struct perf_record_opts *opts)
+{
+ struct perf_event_attr *attr = &evsel->attr;
+ int track = !evsel->idx; /* only the first counter needs these */
+
+ attr->sample_id_all = opts->sample_id_all_avail ? 1 : 0;
+ attr->inherit = !opts->no_inherit;
+ attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
+ PERF_FORMAT_TOTAL_TIME_RUNNING |
+ PERF_FORMAT_ID;
+
+ attr->sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID;
+
+ /*
+ * We default some events to a 1 default interval. But keep
+ * it a weak assumption overridable by the user.
+ */
+ if (!attr->sample_period || (opts->user_freq != UINT_MAX &&
+ opts->user_interval != ULLONG_MAX)) {
+ if (opts->freq) {
+ attr->sample_type |= PERF_SAMPLE_PERIOD;
+ attr->freq = 1;
+ attr->sample_freq = opts->freq;
+ } else {
+ attr->sample_period = opts->default_interval;
+ }
+ }
+
+ if (opts->no_samples)
+ attr->sample_freq = 0;
+
+ if (opts->inherit_stat)
+ attr->inherit_stat = 1;
+
+ if (opts->sample_address) {
+ attr->sample_type |= PERF_SAMPLE_ADDR;
+ attr->mmap_data = track;
+ }
+
+ if (opts->call_graph)
+ attr->sample_type |= PERF_SAMPLE_CALLCHAIN;
+
+ if (opts->system_wide)
+ attr->sample_type |= PERF_SAMPLE_CPU;
+
+ if (opts->period)
+ attr->sample_type |= PERF_SAMPLE_PERIOD;
+
+ if (opts->sample_id_all_avail &&
+ (opts->sample_time || opts->system_wide ||
+ !opts->no_inherit || opts->cpu_list))
+ attr->sample_type |= PERF_SAMPLE_TIME;
+
+ if (opts->raw_samples) {
+ attr->sample_type |= PERF_SAMPLE_TIME;
+ attr->sample_type |= PERF_SAMPLE_RAW;
+ attr->sample_type |= PERF_SAMPLE_CPU;
+ }
+
+ if (opts->no_delay) {
+ attr->watermark = 0;
+ attr->wakeup_events = 1;
+ }
+
+ attr->mmap = track;
+ attr->comm = track;
+
+ if (opts->target_pid == -1 && opts->target_tid == -1 && !opts->system_wide) {
+ attr->disabled = 1;
+ attr->enable_on_exec = 1;
+ }
+}
+
int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
{
int cpu, thread;
@@ -387,7 +460,7 @@ int perf_event__parse_sample(const union perf_event *event, u64 type,
u32 val32[2];
} u;
-
+ memset(data, 0, sizeof(*data));
data->cpu = data->pid = data->tid = -1;
data->stream_id = data->id = data->time = -1ULL;
@@ -504,3 +577,82 @@ int perf_event__parse_sample(const union perf_event *event, u64 type,
return 0;
}
+
+int perf_event__synthesize_sample(union perf_event *event, u64 type,
+ const struct perf_sample *sample,
+ bool swapped)
+{
+ u64 *array;
+
+ /*
+ * used for cross-endian analysis. See git commit 65014ab3
+ * for why this goofiness is needed.
+ */
+ union {
+ u64 val64;
+ u32 val32[2];
+ } u;
+
+ array = event->sample.array;
+
+ if (type & PERF_SAMPLE_IP) {
+ event->ip.ip = sample->ip;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_TID) {
+ u.val32[0] = sample->pid;
+ u.val32[1] = sample->tid;
+ if (swapped) {
+ /*
+ * Inverse of what is done in perf_event__parse_sample
+ */
+ u.val32[0] = bswap_32(u.val32[0]);
+ u.val32[1] = bswap_32(u.val32[1]);
+ u.val64 = bswap_64(u.val64);
+ }
+
+ *array = u.val64;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_TIME) {
+ *array = sample->time;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_ADDR) {
+ *array = sample->addr;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_ID) {
+ *array = sample->id;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_STREAM_ID) {
+ *array = sample->stream_id;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_CPU) {
+ u.val32[0] = sample->cpu;
+ if (swapped) {
+ /*
+ * Inverse of what is done in perf_event__parse_sample
+ */
+ u.val32[0] = bswap_32(u.val32[0]);
+ u.val64 = bswap_64(u.val64);
+ }
+ *array = u.val64;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_PERIOD) {
+ *array = sample->period;
+ array++;
+ }
+
+ return 0;
+}
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index b1d15e6f7ae3..326b8e4d5035 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -61,12 +61,17 @@ struct perf_evsel {
off_t id_offset;
};
struct cgroup_sel *cgrp;
+ struct {
+ void *func;
+ void *data;
+ } handler;
bool supported;
};
struct cpu_map;
struct thread_map;
struct perf_evlist;
+struct perf_record_opts;
struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx);
void perf_evsel__init(struct perf_evsel *evsel,
@@ -74,6 +79,9 @@ void perf_evsel__init(struct perf_evsel *evsel,
void perf_evsel__exit(struct perf_evsel *evsel);
void perf_evsel__delete(struct perf_evsel *evsel);
+void perf_evsel__config(struct perf_evsel *evsel,
+ struct perf_record_opts *opts);
+
int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads);
int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus);
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 33c17a2b2a81..3e7e0b09c12c 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -8,6 +8,7 @@
#include <stdlib.h>
#include <linux/list.h>
#include <linux/kernel.h>
+#include <linux/bitops.h>
#include <sys/utsname.h>
#include "evlist.h"
@@ -28,9 +29,6 @@ static struct perf_trace_event_type *events;
static u32 header_argc;
static const char **header_argv;
-static int dsos__write_buildid_table(struct perf_header *header, int fd);
-static int perf_session__cache_build_ids(struct perf_session *session);
-
int perf_header__push_event(u64 id, const char *name)
{
if (strlen(name) > MAX_EVENT_NAME)
@@ -187,6 +185,252 @@ perf_header__set_cmdline(int argc, const char **argv)
return 0;
}
+#define dsos__for_each_with_build_id(pos, head) \
+ list_for_each_entry(pos, head, node) \
+ if (!pos->has_build_id) \
+ continue; \
+ else
+
+static int __dsos__write_buildid_table(struct list_head *head, pid_t pid,
+ u16 misc, int fd)
+{
+ struct dso *pos;
+
+ dsos__for_each_with_build_id(pos, head) {
+ int err;
+ struct build_id_event b;
+ size_t len;
+
+ if (!pos->hit)
+ continue;
+ len = pos->long_name_len + 1;
+ len = ALIGN(len, NAME_ALIGN);
+ memset(&b, 0, sizeof(b));
+ memcpy(&b.build_id, pos->build_id, sizeof(pos->build_id));
+ b.pid = pid;
+ b.header.misc = misc;
+ b.header.size = sizeof(b) + len;
+ err = do_write(fd, &b, sizeof(b));
+ if (err < 0)
+ return err;
+ err = write_padded(fd, pos->long_name,
+ pos->long_name_len + 1, len);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+
+static int machine__write_buildid_table(struct machine *machine, int fd)
+{
+ int err;
+ u16 kmisc = PERF_RECORD_MISC_KERNEL,
+ umisc = PERF_RECORD_MISC_USER;
+
+ if (!machine__is_host(machine)) {
+ kmisc = PERF_RECORD_MISC_GUEST_KERNEL;
+ umisc = PERF_RECORD_MISC_GUEST_USER;
+ }
+
+ err = __dsos__write_buildid_table(&machine->kernel_dsos, machine->pid,
+ kmisc, fd);
+ if (err == 0)
+ err = __dsos__write_buildid_table(&machine->user_dsos,
+ machine->pid, umisc, fd);
+ return err;
+}
+
+static int dsos__write_buildid_table(struct perf_header *header, int fd)
+{
+ struct perf_session *session = container_of(header,
+ struct perf_session, header);
+ struct rb_node *nd;
+ int err = machine__write_buildid_table(&session->host_machine, fd);
+
+ if (err)
+ return err;
+
+ for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) {
+ struct machine *pos = rb_entry(nd, struct machine, rb_node);
+ err = machine__write_buildid_table(pos, fd);
+ if (err)
+ break;
+ }
+ return err;
+}
+
+int build_id_cache__add_s(const char *sbuild_id, const char *debugdir,
+ const char *name, bool is_kallsyms)
+{
+ const size_t size = PATH_MAX;
+ char *realname, *filename = zalloc(size),
+ *linkname = zalloc(size), *targetname;
+ int len, err = -1;
+
+ if (is_kallsyms) {
+ if (symbol_conf.kptr_restrict) {
+ pr_debug("Not caching a kptr_restrict'ed /proc/kallsyms\n");
+ return 0;
+ }
+ realname = (char *)name;
+ } else
+ realname = realpath(name, NULL);
+
+ if (realname == NULL || filename == NULL || linkname == NULL)
+ goto out_free;
+
+ len = snprintf(filename, size, "%s%s%s",
+ debugdir, is_kallsyms ? "/" : "", realname);
+ if (mkdir_p(filename, 0755))
+ goto out_free;
+
+ snprintf(filename + len, sizeof(filename) - len, "/%s", sbuild_id);
+
+ if (access(filename, F_OK)) {
+ if (is_kallsyms) {
+ if (copyfile("/proc/kallsyms", filename))
+ goto out_free;
+ } else if (link(realname, filename) && copyfile(name, filename))
+ goto out_free;
+ }
+
+ len = snprintf(linkname, size, "%s/.build-id/%.2s",
+ debugdir, sbuild_id);
+
+ if (access(linkname, X_OK) && mkdir_p(linkname, 0755))
+ goto out_free;
+
+ snprintf(linkname + len, size - len, "/%s", sbuild_id + 2);
+ targetname = filename + strlen(debugdir) - 5;
+ memcpy(targetname, "../..", 5);
+
+ if (symlink(targetname, linkname) == 0)
+ err = 0;
+out_free:
+ if (!is_kallsyms)
+ free(realname);
+ free(filename);
+ free(linkname);
+ return err;
+}
+
+static int build_id_cache__add_b(const u8 *build_id, size_t build_id_size,
+ const char *name, const char *debugdir,
+ bool is_kallsyms)
+{
+ char sbuild_id[BUILD_ID_SIZE * 2 + 1];
+
+ build_id__sprintf(build_id, build_id_size, sbuild_id);
+
+ return build_id_cache__add_s(sbuild_id, debugdir, name, is_kallsyms);
+}
+
+int build_id_cache__remove_s(const char *sbuild_id, const char *debugdir)
+{
+ const size_t size = PATH_MAX;
+ char *filename = zalloc(size),
+ *linkname = zalloc(size);
+ int err = -1;
+
+ if (filename == NULL || linkname == NULL)
+ goto out_free;
+
+ snprintf(linkname, size, "%s/.build-id/%.2s/%s",
+ debugdir, sbuild_id, sbuild_id + 2);
+
+ if (access(linkname, F_OK))
+ goto out_free;
+
+ if (readlink(linkname, filename, size - 1) < 0)
+ goto out_free;
+
+ if (unlink(linkname))
+ goto out_free;
+
+ /*
+ * Since the link is relative, we must make it absolute:
+ */
+ snprintf(linkname, size, "%s/.build-id/%.2s/%s",
+ debugdir, sbuild_id, filename);
+
+ if (unlink(linkname))
+ goto out_free;
+
+ err = 0;
+out_free:
+ free(filename);
+ free(linkname);
+ return err;
+}
+
+static int dso__cache_build_id(struct dso *dso, const char *debugdir)
+{
+ bool is_kallsyms = dso->kernel && dso->long_name[0] != '/';
+
+ return build_id_cache__add_b(dso->build_id, sizeof(dso->build_id),
+ dso->long_name, debugdir, is_kallsyms);
+}
+
+static int __dsos__cache_build_ids(struct list_head *head, const char *debugdir)
+{
+ struct dso *pos;
+ int err = 0;
+
+ dsos__for_each_with_build_id(pos, head)
+ if (dso__cache_build_id(pos, debugdir))
+ err = -1;
+
+ return err;
+}
+
+static int machine__cache_build_ids(struct machine *machine, const char *debugdir)
+{
+ int ret = __dsos__cache_build_ids(&machine->kernel_dsos, debugdir);
+ ret |= __dsos__cache_build_ids(&machine->user_dsos, debugdir);
+ return ret;
+}
+
+static int perf_session__cache_build_ids(struct perf_session *session)
+{
+ struct rb_node *nd;
+ int ret;
+ char debugdir[PATH_MAX];
+
+ snprintf(debugdir, sizeof(debugdir), "%s", buildid_dir);
+
+ if (mkdir(debugdir, 0755) != 0 && errno != EEXIST)
+ return -1;
+
+ ret = machine__cache_build_ids(&session->host_machine, debugdir);
+
+ for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) {
+ struct machine *pos = rb_entry(nd, struct machine, rb_node);
+ ret |= machine__cache_build_ids(pos, debugdir);
+ }
+ return ret ? -1 : 0;
+}
+
+static bool machine__read_build_ids(struct machine *machine, bool with_hits)
+{
+ bool ret = __dsos__read_build_ids(&machine->kernel_dsos, with_hits);
+ ret |= __dsos__read_build_ids(&machine->user_dsos, with_hits);
+ return ret;
+}
+
+static bool perf_session__read_build_ids(struct perf_session *session, bool with_hits)
+{
+ struct rb_node *nd;
+ bool ret = machine__read_build_ids(&session->host_machine, with_hits);
+
+ for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) {
+ struct machine *pos = rb_entry(nd, struct machine, rb_node);
+ ret |= machine__read_build_ids(pos, with_hits);
+ }
+
+ return ret;
+}
+
static int write_trace_info(int fd, struct perf_header *h __used,
struct perf_evlist *evlist)
{
@@ -202,6 +446,9 @@ static int write_build_id(int fd, struct perf_header *h,
session = container_of(h, struct perf_session, header);
+ if (!perf_session__read_build_ids(session, true))
+ return -1;
+
err = dsos__write_buildid_table(h, fd);
if (err < 0) {
pr_debug("failed to write buildid table\n");
@@ -1065,26 +1312,30 @@ struct feature_ops {
bool full_only;
};
-#define FEAT_OPA(n, w, p) \
- [n] = { .name = #n, .write = w, .print = p }
-#define FEAT_OPF(n, w, p) \
- [n] = { .name = #n, .write = w, .print = p, .full_only = true }
+#define FEAT_OPA(n, func) \
+ [n] = { .name = #n, .write = write_##func, .print = print_##func }
+#define FEAT_OPF(n, func) \
+ [n] = { .name = #n, .write = write_##func, .print = print_##func, .full_only = true }
+
+/* feature_ops not implemented: */
+#define print_trace_info NULL
+#define print_build_id NULL
static const struct feature_ops feat_ops[HEADER_LAST_FEATURE] = {
- FEAT_OPA(HEADER_TRACE_INFO, write_trace_info, NULL),
- FEAT_OPA(HEADER_BUILD_ID, write_build_id, NULL),
- FEAT_OPA(HEADER_HOSTNAME, write_hostname, print_hostname),
- FEAT_OPA(HEADER_OSRELEASE, write_osrelease, print_osrelease),
- FEAT_OPA(HEADER_VERSION, write_version, print_version),
- FEAT_OPA(HEADER_ARCH, write_arch, print_arch),
- FEAT_OPA(HEADER_NRCPUS, write_nrcpus, print_nrcpus),
- FEAT_OPA(HEADER_CPUDESC, write_cpudesc, print_cpudesc),
- FEAT_OPA(HEADER_CPUID, write_cpuid, print_cpuid),
- FEAT_OPA(HEADER_TOTAL_MEM, write_total_mem, print_total_mem),
- FEAT_OPA(HEADER_EVENT_DESC, write_event_desc, print_event_desc),
- FEAT_OPA(HEADER_CMDLINE, write_cmdline, print_cmdline),
- FEAT_OPF(HEADER_CPU_TOPOLOGY, write_cpu_topology, print_cpu_topology),
- FEAT_OPF(HEADER_NUMA_TOPOLOGY, write_numa_topology, print_numa_topology),
+ FEAT_OPA(HEADER_TRACE_INFO, trace_info),
+ FEAT_OPA(HEADER_BUILD_ID, build_id),
+ FEAT_OPA(HEADER_HOSTNAME, hostname),
+ FEAT_OPA(HEADER_OSRELEASE, osrelease),
+ FEAT_OPA(HEADER_VERSION, version),
+ FEAT_OPA(HEADER_ARCH, arch),
+ FEAT_OPA(HEADER_NRCPUS, nrcpus),
+ FEAT_OPA(HEADER_CPUDESC, cpudesc),
+ FEAT_OPA(HEADER_CPUID, cpuid),
+ FEAT_OPA(HEADER_TOTAL_MEM, total_mem),
+ FEAT_OPA(HEADER_EVENT_DESC, event_desc),
+ FEAT_OPA(HEADER_CMDLINE, cmdline),
+ FEAT_OPF(HEADER_CPU_TOPOLOGY, cpu_topology),
+ FEAT_OPF(HEADER_NUMA_TOPOLOGY, numa_topology),
};
struct header_print_data {
@@ -1103,9 +1354,9 @@ static int perf_file_section__fprintf_info(struct perf_file_section *section,
"%d, continuing...\n", section->offset, feat);
return 0;
}
- if (feat < HEADER_TRACE_INFO || feat >= HEADER_LAST_FEATURE) {
+ if (feat >= HEADER_LAST_FEATURE) {
pr_warning("unknown feature %d\n", feat);
- return -1;
+ return 0;
}
if (!feat_ops[feat].print)
return 0;
@@ -1132,252 +1383,6 @@ int perf_header__fprintf_info(struct perf_session *session, FILE *fp, bool full)
return 0;
}
-#define dsos__for_each_with_build_id(pos, head) \
- list_for_each_entry(pos, head, node) \
- if (!pos->has_build_id) \
- continue; \
- else
-
-static int __dsos__write_buildid_table(struct list_head *head, pid_t pid,
- u16 misc, int fd)
-{
- struct dso *pos;
-
- dsos__for_each_with_build_id(pos, head) {
- int err;
- struct build_id_event b;
- size_t len;
-
- if (!pos->hit)
- continue;
- len = pos->long_name_len + 1;
- len = ALIGN(len, NAME_ALIGN);
- memset(&b, 0, sizeof(b));
- memcpy(&b.build_id, pos->build_id, sizeof(pos->build_id));
- b.pid = pid;
- b.header.misc = misc;
- b.header.size = sizeof(b) + len;
- err = do_write(fd, &b, sizeof(b));
- if (err < 0)
- return err;
- err = write_padded(fd, pos->long_name,
- pos->long_name_len + 1, len);
- if (err < 0)
- return err;
- }
-
- return 0;
-}
-
-static int machine__write_buildid_table(struct machine *machine, int fd)
-{
- int err;
- u16 kmisc = PERF_RECORD_MISC_KERNEL,
- umisc = PERF_RECORD_MISC_USER;
-
- if (!machine__is_host(machine)) {
- kmisc = PERF_RECORD_MISC_GUEST_KERNEL;
- umisc = PERF_RECORD_MISC_GUEST_USER;
- }
-
- err = __dsos__write_buildid_table(&machine->kernel_dsos, machine->pid,
- kmisc, fd);
- if (err == 0)
- err = __dsos__write_buildid_table(&machine->user_dsos,
- machine->pid, umisc, fd);
- return err;
-}
-
-static int dsos__write_buildid_table(struct perf_header *header, int fd)
-{
- struct perf_session *session = container_of(header,
- struct perf_session, header);
- struct rb_node *nd;
- int err = machine__write_buildid_table(&session->host_machine, fd);
-
- if (err)
- return err;
-
- for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) {
- struct machine *pos = rb_entry(nd, struct machine, rb_node);
- err = machine__write_buildid_table(pos, fd);
- if (err)
- break;
- }
- return err;
-}
-
-int build_id_cache__add_s(const char *sbuild_id, const char *debugdir,
- const char *name, bool is_kallsyms)
-{
- const size_t size = PATH_MAX;
- char *realname, *filename = zalloc(size),
- *linkname = zalloc(size), *targetname;
- int len, err = -1;
-
- if (is_kallsyms) {
- if (symbol_conf.kptr_restrict) {
- pr_debug("Not caching a kptr_restrict'ed /proc/kallsyms\n");
- return 0;
- }
- realname = (char *)name;
- } else
- realname = realpath(name, NULL);
-
- if (realname == NULL || filename == NULL || linkname == NULL)
- goto out_free;
-
- len = snprintf(filename, size, "%s%s%s",
- debugdir, is_kallsyms ? "/" : "", realname);
- if (mkdir_p(filename, 0755))
- goto out_free;
-
- snprintf(filename + len, sizeof(filename) - len, "/%s", sbuild_id);
-
- if (access(filename, F_OK)) {
- if (is_kallsyms) {
- if (copyfile("/proc/kallsyms", filename))
- goto out_free;
- } else if (link(realname, filename) && copyfile(name, filename))
- goto out_free;
- }
-
- len = snprintf(linkname, size, "%s/.build-id/%.2s",
- debugdir, sbuild_id);
-
- if (access(linkname, X_OK) && mkdir_p(linkname, 0755))
- goto out_free;
-
- snprintf(linkname + len, size - len, "/%s", sbuild_id + 2);
- targetname = filename + strlen(debugdir) - 5;
- memcpy(targetname, "../..", 5);
-
- if (symlink(targetname, linkname) == 0)
- err = 0;
-out_free:
- if (!is_kallsyms)
- free(realname);
- free(filename);
- free(linkname);
- return err;
-}
-
-static int build_id_cache__add_b(const u8 *build_id, size_t build_id_size,
- const char *name, const char *debugdir,
- bool is_kallsyms)
-{
- char sbuild_id[BUILD_ID_SIZE * 2 + 1];
-
- build_id__sprintf(build_id, build_id_size, sbuild_id);
-
- return build_id_cache__add_s(sbuild_id, debugdir, name, is_kallsyms);
-}
-
-int build_id_cache__remove_s(const char *sbuild_id, const char *debugdir)
-{
- const size_t size = PATH_MAX;
- char *filename = zalloc(size),
- *linkname = zalloc(size);
- int err = -1;
-
- if (filename == NULL || linkname == NULL)
- goto out_free;
-
- snprintf(linkname, size, "%s/.build-id/%.2s/%s",
- debugdir, sbuild_id, sbuild_id + 2);
-
- if (access(linkname, F_OK))
- goto out_free;
-
- if (readlink(linkname, filename, size - 1) < 0)
- goto out_free;
-
- if (unlink(linkname))
- goto out_free;
-
- /*
- * Since the link is relative, we must make it absolute:
- */
- snprintf(linkname, size, "%s/.build-id/%.2s/%s",
- debugdir, sbuild_id, filename);
-
- if (unlink(linkname))
- goto out_free;
-
- err = 0;
-out_free:
- free(filename);
- free(linkname);
- return err;
-}
-
-static int dso__cache_build_id(struct dso *dso, const char *debugdir)
-{
- bool is_kallsyms = dso->kernel && dso->long_name[0] != '/';
-
- return build_id_cache__add_b(dso->build_id, sizeof(dso->build_id),
- dso->long_name, debugdir, is_kallsyms);
-}
-
-static int __dsos__cache_build_ids(struct list_head *head, const char *debugdir)
-{
- struct dso *pos;
- int err = 0;
-
- dsos__for_each_with_build_id(pos, head)
- if (dso__cache_build_id(pos, debugdir))
- err = -1;
-
- return err;
-}
-
-static int machine__cache_build_ids(struct machine *machine, const char *debugdir)
-{
- int ret = __dsos__cache_build_ids(&machine->kernel_dsos, debugdir);
- ret |= __dsos__cache_build_ids(&machine->user_dsos, debugdir);
- return ret;
-}
-
-static int perf_session__cache_build_ids(struct perf_session *session)
-{
- struct rb_node *nd;
- int ret;
- char debugdir[PATH_MAX];
-
- snprintf(debugdir, sizeof(debugdir), "%s", buildid_dir);
-
- if (mkdir(debugdir, 0755) != 0 && errno != EEXIST)
- return -1;
-
- ret = machine__cache_build_ids(&session->host_machine, debugdir);
-
- for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) {
- struct machine *pos = rb_entry(nd, struct machine, rb_node);
- ret |= machine__cache_build_ids(pos, debugdir);
- }
- return ret ? -1 : 0;
-}
-
-static bool machine__read_build_ids(struct machine *machine, bool with_hits)
-{
- bool ret = __dsos__read_build_ids(&machine->kernel_dsos, with_hits);
- ret |= __dsos__read_build_ids(&machine->user_dsos, with_hits);
- return ret;
-}
-
-static bool perf_session__read_build_ids(struct perf_session *session, bool with_hits)
-{
- struct rb_node *nd;
- bool ret = machine__read_build_ids(&session->host_machine, with_hits);
-
- for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) {
- struct machine *pos = rb_entry(nd, struct machine, rb_node);
- ret |= machine__read_build_ids(pos, with_hits);
- }
-
- return ret;
-}
-
static int do_write_feat(int fd, struct perf_header *h, int type,
struct perf_file_section **p,
struct perf_evlist *evlist)
@@ -1386,6 +1391,8 @@ static int do_write_feat(int fd, struct perf_header *h, int type,
int ret = 0;
if (perf_header__has_feat(h, type)) {
+ if (!feat_ops[type].write)
+ return -1;
(*p)->offset = lseek(fd, 0, SEEK_CUR);
@@ -1408,18 +1415,12 @@ static int perf_header__adds_write(struct perf_header *header,
struct perf_evlist *evlist, int fd)
{
int nr_sections;
- struct perf_session *session;
struct perf_file_section *feat_sec, *p;
int sec_size;
u64 sec_start;
+ int feat;
int err;
- session = container_of(header, struct perf_session, header);
-
- if (perf_header__has_feat(header, HEADER_BUILD_ID &&
- !perf_session__read_build_ids(session, true)))
- perf_header__clear_feat(header, HEADER_BUILD_ID);
-
nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
if (!nr_sections)
return 0;
@@ -1433,64 +1434,11 @@ static int perf_header__adds_write(struct perf_header *header,
sec_start = header->data_offset + header->data_size;
lseek(fd, sec_start + sec_size, SEEK_SET);
- err = do_write_feat(fd, header, HEADER_TRACE_INFO, &p, evlist);
- if (err)
- goto out_free;
-
- err = do_write_feat(fd, header, HEADER_BUILD_ID, &p, evlist);
- if (err) {
- perf_header__clear_feat(header, HEADER_BUILD_ID);
- goto out_free;
+ for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) {
+ if (do_write_feat(fd, header, feat, &p, evlist))
+ perf_header__clear_feat(header, feat);
}
- err = do_write_feat(fd, header, HEADER_HOSTNAME, &p, evlist);
- if (err)
- perf_header__clear_feat(header, HEADER_HOSTNAME);
-
- err = do_write_feat(fd, header, HEADER_OSRELEASE, &p, evlist);
- if (err)
- perf_header__clear_feat(header, HEADER_OSRELEASE);
-
- err = do_write_feat(fd, header, HEADER_VERSION, &p, evlist);
- if (err)
- perf_header__clear_feat(header, HEADER_VERSION);
-
- err = do_write_feat(fd, header, HEADER_ARCH, &p, evlist);
- if (err)
- perf_header__clear_feat(header, HEADER_ARCH);
-
- err = do_write_feat(fd, header, HEADER_NRCPUS, &p, evlist);
- if (err)
- perf_header__clear_feat(header, HEADER_NRCPUS);
-
- err = do_write_feat(fd, header, HEADER_CPUDESC, &p, evlist);
- if (err)
- perf_header__clear_feat(header, HEADER_CPUDESC);
-
- err = do_write_feat(fd, header, HEADER_CPUID, &p, evlist);
- if (err)
- perf_header__clear_feat(header, HEADER_CPUID);
-
- err = do_write_feat(fd, header, HEADER_TOTAL_MEM, &p, evlist);
- if (err)
- perf_header__clear_feat(header, HEADER_TOTAL_MEM);
-
- err = do_write_feat(fd, header, HEADER_CMDLINE, &p, evlist);
- if (err)
- perf_header__clear_feat(header, HEADER_CMDLINE);
-
- err = do_write_feat(fd, header, HEADER_EVENT_DESC, &p, evlist);
- if (err)
- perf_header__clear_feat(header, HEADER_EVENT_DESC);
-
- err = do_write_feat(fd, header, HEADER_CPU_TOPOLOGY, &p, evlist);
- if (err)
- perf_header__clear_feat(header, HEADER_CPU_TOPOLOGY);
-
- err = do_write_feat(fd, header, HEADER_NUMA_TOPOLOGY, &p, evlist);
- if (err)
- perf_header__clear_feat(header, HEADER_NUMA_TOPOLOGY);
-
lseek(fd, sec_start, SEEK_SET);
/*
* may write more than needed due to dropped feature, but
@@ -1499,7 +1447,6 @@ static int perf_header__adds_write(struct perf_header *header,
err = do_write(fd, feat_sec, sec_size);
if (err < 0)
pr_debug("failed to write feature section\n");
-out_free:
free(feat_sec);
return err;
}
@@ -1637,20 +1584,20 @@ static int perf_header__getbuffer64(struct perf_header *header,
int perf_header__process_sections(struct perf_header *header, int fd,
void *data,
int (*process)(struct perf_file_section *section,
- struct perf_header *ph,
- int feat, int fd, void *data))
+ struct perf_header *ph,
+ int feat, int fd, void *data))
{
- struct perf_file_section *feat_sec;
+ struct perf_file_section *feat_sec, *sec;
int nr_sections;
int sec_size;
- int idx = 0;
- int err = -1, feat = 1;
+ int feat;
+ int err;
nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
if (!nr_sections)
return 0;
- feat_sec = calloc(sizeof(*feat_sec), nr_sections);
+ feat_sec = sec = calloc(sizeof(*feat_sec), nr_sections);
if (!feat_sec)
return -1;
@@ -1658,20 +1605,16 @@ int perf_header__process_sections(struct perf_header *header, int fd,
lseek(fd, header->data_offset + header->data_size, SEEK_SET);
- if (perf_header__getbuffer64(header, fd, feat_sec, sec_size))
+ err = perf_header__getbuffer64(header, fd, feat_sec, sec_size);
+ if (err < 0)
goto out_free;
- err = 0;
- while (idx < nr_sections && feat < HEADER_LAST_FEATURE) {
- if (perf_header__has_feat(header, feat)) {
- struct perf_file_section *sec = &feat_sec[idx++];
-
- err = process(sec, header, feat, fd, data);
- if (err < 0)
- break;
- }
- ++feat;
+ for_each_set_bit(feat, header->adds_features, HEADER_LAST_FEATURE) {
+ err = process(sec++, header, feat, fd, data);
+ if (err < 0)
+ goto out_free;
}
+ err = 0;
out_free:
free(feat_sec);
return err;
@@ -1906,32 +1849,21 @@ static int perf_file_section__process(struct perf_file_section *section,
return 0;
}
+ if (feat >= HEADER_LAST_FEATURE) {
+ pr_debug("unknown feature %d, continuing...\n", feat);
+ return 0;
+ }
+
switch (feat) {
case HEADER_TRACE_INFO:
trace_report(fd, false);
break;
-
case HEADER_BUILD_ID:
if (perf_header__read_build_ids(ph, fd, section->offset, section->size))
pr_debug("Failed to read buildids, continuing...\n");
break;
-
- case HEADER_HOSTNAME:
- case HEADER_OSRELEASE:
- case HEADER_VERSION:
- case HEADER_ARCH:
- case HEADER_NRCPUS:
- case HEADER_CPUDESC:
- case HEADER_CPUID:
- case HEADER_TOTAL_MEM:
- case HEADER_CMDLINE:
- case HEADER_EVENT_DESC:
- case HEADER_CPU_TOPOLOGY:
- case HEADER_NUMA_TOPOLOGY:
- break;
-
default:
- pr_debug("unknown feature %d, continuing...\n", feat);
+ break;
}
return 0;
@@ -2041,6 +1973,8 @@ int perf_session__read_header(struct perf_session *session, int fd)
lseek(fd, tmp, SEEK_SET);
}
+ symbol_conf.nr_events = nr_attrs;
+
if (f_header.event_types.size) {
lseek(fd, f_header.event_types.offset, SEEK_SET);
events = malloc(f_header.event_types.size);
@@ -2068,9 +2002,9 @@ out_delete_evlist:
return -ENOMEM;
}
-int perf_event__synthesize_attr(struct perf_event_attr *attr, u16 ids, u64 *id,
- perf_event__handler_t process,
- struct perf_session *session)
+int perf_event__synthesize_attr(struct perf_tool *tool,
+ struct perf_event_attr *attr, u16 ids, u64 *id,
+ perf_event__handler_t process)
{
union perf_event *ev;
size_t size;
@@ -2092,22 +2026,23 @@ int perf_event__synthesize_attr(struct perf_event_attr *attr, u16 ids, u64 *id,
ev->attr.header.type = PERF_RECORD_HEADER_ATTR;
ev->attr.header.size = size;
- err = process(ev, NULL, session);
+ err = process(tool, ev, NULL, NULL);
free(ev);
return err;
}
-int perf_session__synthesize_attrs(struct perf_session *session,
+int perf_event__synthesize_attrs(struct perf_tool *tool,
+ struct perf_session *session,
perf_event__handler_t process)
{
struct perf_evsel *attr;
int err = 0;
list_for_each_entry(attr, &session->evlist->entries, node) {
- err = perf_event__synthesize_attr(&attr->attr, attr->ids,
- attr->id, process, session);
+ err = perf_event__synthesize_attr(tool, &attr->attr, attr->ids,
+ attr->id, process);
if (err) {
pr_debug("failed to create perf header attribute\n");
return err;
@@ -2118,23 +2053,23 @@ int perf_session__synthesize_attrs(struct perf_session *session,
}
int perf_event__process_attr(union perf_event *event,
- struct perf_session *session)
+ struct perf_evlist **pevlist)
{
unsigned int i, ids, n_ids;
struct perf_evsel *evsel;
+ struct perf_evlist *evlist = *pevlist;
- if (session->evlist == NULL) {
- session->evlist = perf_evlist__new(NULL, NULL);
- if (session->evlist == NULL)
+ if (evlist == NULL) {
+ *pevlist = evlist = perf_evlist__new(NULL, NULL);
+ if (evlist == NULL)
return -ENOMEM;
}
- evsel = perf_evsel__new(&event->attr.attr,
- session->evlist->nr_entries);
+ evsel = perf_evsel__new(&event->attr.attr, evlist->nr_entries);
if (evsel == NULL)
return -ENOMEM;
- perf_evlist__add(session->evlist, evsel);
+ perf_evlist__add(evlist, evsel);
ids = event->header.size;
ids -= (void *)&event->attr.id - (void *)event;
@@ -2148,18 +2083,16 @@ int perf_event__process_attr(union perf_event *event,
return -ENOMEM;
for (i = 0; i < n_ids; i++) {
- perf_evlist__id_add(session->evlist, evsel, 0, i,
- event->attr.id[i]);
+ perf_evlist__id_add(evlist, evsel, 0, i, event->attr.id[i]);
}
- perf_session__update_sample_type(session);
-
return 0;
}
-int perf_event__synthesize_event_type(u64 event_id, char *name,
+int perf_event__synthesize_event_type(struct perf_tool *tool,
+ u64 event_id, char *name,
perf_event__handler_t process,
- struct perf_session *session)
+ struct machine *machine)
{
union perf_event ev;
size_t size = 0;
@@ -2177,13 +2110,14 @@ int perf_event__synthesize_event_type(u64 event_id, char *name,
ev.event_type.header.size = sizeof(ev.event_type) -
(sizeof(ev.event_type.event_type.name) - size);
- err = process(&ev, NULL, session);
+ err = process(tool, &ev, NULL, machine);
return err;
}
-int perf_event__synthesize_event_types(perf_event__handler_t process,
- struct perf_session *session)
+int perf_event__synthesize_event_types(struct perf_tool *tool,
+ perf_event__handler_t process,
+ struct machine *machine)
{
struct perf_trace_event_type *type;
int i, err = 0;
@@ -2191,9 +2125,9 @@ int perf_event__synthesize_event_types(perf_event__handler_t process,
for (i = 0; i < event_count; i++) {
type = &events[i];
- err = perf_event__synthesize_event_type(type->event_id,
+ err = perf_event__synthesize_event_type(tool, type->event_id,
type->name, process,
- session);
+ machine);
if (err) {
pr_debug("failed to create perf header event type\n");
return err;
@@ -2203,8 +2137,8 @@ int perf_event__synthesize_event_types(perf_event__handler_t process,
return err;
}
-int perf_event__process_event_type(union perf_event *event,
- struct perf_session *session __unused)
+int perf_event__process_event_type(struct perf_tool *tool __unused,
+ union perf_event *event)
{
if (perf_header__push_event(event->event_type.event_type.event_id,
event->event_type.event_type.name) < 0)
@@ -2213,9 +2147,9 @@ int perf_event__process_event_type(union perf_event *event,
return 0;
}
-int perf_event__synthesize_tracing_data(int fd, struct perf_evlist *evlist,
- perf_event__handler_t process,
- struct perf_session *session __unused)
+int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd,
+ struct perf_evlist *evlist,
+ perf_event__handler_t process)
{
union perf_event ev;
struct tracing_data *tdata;
@@ -2246,7 +2180,7 @@ int perf_event__synthesize_tracing_data(int fd, struct perf_evlist *evlist,
ev.tracing_data.header.size = sizeof(ev.tracing_data);
ev.tracing_data.size = aligned_size;
- process(&ev, NULL, session);
+ process(tool, &ev, NULL, NULL);
/*
* The put function will copy all the tracing data
@@ -2288,10 +2222,10 @@ int perf_event__process_tracing_data(union perf_event *event,
return size_read + padding;
}
-int perf_event__synthesize_build_id(struct dso *pos, u16 misc,
+int perf_event__synthesize_build_id(struct perf_tool *tool,
+ struct dso *pos, u16 misc,
perf_event__handler_t process,
- struct machine *machine,
- struct perf_session *session)
+ struct machine *machine)
{
union perf_event ev;
size_t len;
@@ -2311,12 +2245,13 @@ int perf_event__synthesize_build_id(struct dso *pos, u16 misc,
ev.build_id.header.size = sizeof(ev.build_id) + len;
memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len);
- err = process(&ev, NULL, session);
+ err = process(tool, &ev, NULL, machine);
return err;
}
-int perf_event__process_build_id(union perf_event *event,
+int perf_event__process_build_id(struct perf_tool *tool __used,
+ union perf_event *event,
struct perf_session *session)
{
__event_process_build_id(&event->build_id,
diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h
index 3d5a742f4a2a..ac4ec956024e 100644
--- a/tools/perf/util/header.h
+++ b/tools/perf/util/header.h
@@ -10,7 +10,8 @@
#include <linux/bitmap.h>
enum {
- HEADER_TRACE_INFO = 1,
+ HEADER_RESERVED = 0, /* always cleared */
+ HEADER_TRACE_INFO = 1,
HEADER_BUILD_ID,
HEADER_HOSTNAME,
@@ -27,10 +28,9 @@ enum {
HEADER_NUMA_TOPOLOGY,
HEADER_LAST_FEATURE,
+ HEADER_FEAT_BITS = 256,
};
-#define HEADER_FEAT_BITS 256
-
struct perf_file_section {
u64 offset;
u64 size;
@@ -68,6 +68,7 @@ struct perf_header {
};
struct perf_evlist;
+struct perf_session;
int perf_session__read_header(struct perf_session *session, int fd);
int perf_session__write_header(struct perf_session *session,
@@ -96,32 +97,36 @@ int build_id_cache__add_s(const char *sbuild_id, const char *debugdir,
const char *name, bool is_kallsyms);
int build_id_cache__remove_s(const char *sbuild_id, const char *debugdir);
-int perf_event__synthesize_attr(struct perf_event_attr *attr, u16 ids, u64 *id,
- perf_event__handler_t process,
- struct perf_session *session);
-int perf_session__synthesize_attrs(struct perf_session *session,
- perf_event__handler_t process);
-int perf_event__process_attr(union perf_event *event, struct perf_session *session);
+int perf_event__synthesize_attr(struct perf_tool *tool,
+ struct perf_event_attr *attr, u16 ids, u64 *id,
+ perf_event__handler_t process);
+int perf_event__synthesize_attrs(struct perf_tool *tool,
+ struct perf_session *session,
+ perf_event__handler_t process);
+int perf_event__process_attr(union perf_event *event, struct perf_evlist **pevlist);
-int perf_event__synthesize_event_type(u64 event_id, char *name,
+int perf_event__synthesize_event_type(struct perf_tool *tool,
+ u64 event_id, char *name,
perf_event__handler_t process,
- struct perf_session *session);
-int perf_event__synthesize_event_types(perf_event__handler_t process,
- struct perf_session *session);
-int perf_event__process_event_type(union perf_event *event,
- struct perf_session *session);
-
-int perf_event__synthesize_tracing_data(int fd, struct perf_evlist *evlist,
- perf_event__handler_t process,
- struct perf_session *session);
+ struct machine *machine);
+int perf_event__synthesize_event_types(struct perf_tool *tool,
+ perf_event__handler_t process,
+ struct machine *machine);
+int perf_event__process_event_type(struct perf_tool *tool,
+ union perf_event *event);
+
+int perf_event__synthesize_tracing_data(struct perf_tool *tool,
+ int fd, struct perf_evlist *evlist,
+ perf_event__handler_t process);
int perf_event__process_tracing_data(union perf_event *event,
struct perf_session *session);
-int perf_event__synthesize_build_id(struct dso *pos, u16 misc,
+int perf_event__synthesize_build_id(struct perf_tool *tool,
+ struct dso *pos, u16 misc,
perf_event__handler_t process,
- struct machine *machine,
- struct perf_session *session);
-int perf_event__process_build_id(union perf_event *event,
+ struct machine *machine);
+int perf_event__process_build_id(struct perf_tool *tool,
+ union perf_event *event,
struct perf_session *session);
/*
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
index 89289c8e935e..ff6f9d56ea41 100644
--- a/tools/perf/util/hist.h
+++ b/tools/perf/util/hist.h
@@ -117,7 +117,6 @@ int perf_evlist__tui_browse_hists(struct perf_evlist *evlist __used,
static inline int hist_entry__tui_annotate(struct hist_entry *self __used,
int evidx __used,
- int nr_events __used,
void(*timer)(void *arg) __used,
void *arg __used,
int delay_secs __used)
@@ -128,7 +127,7 @@ static inline int hist_entry__tui_annotate(struct hist_entry *self __used,
#define K_RIGHT -2
#else
#include "ui/keysyms.h"
-int hist_entry__tui_annotate(struct hist_entry *he, int evidx, int nr_events,
+int hist_entry__tui_annotate(struct hist_entry *he, int evidx,
void(*timer)(void *arg), void *arg, int delay_secs);
int perf_evlist__tui_browse_hists(struct perf_evlist *evlist, const char *help,
diff --git a/tools/perf/util/include/linux/bitops.h b/tools/perf/util/include/linux/bitops.h
index 305c8484f200..62cdee78db7b 100644
--- a/tools/perf/util/include/linux/bitops.h
+++ b/tools/perf/util/include/linux/bitops.h
@@ -9,6 +9,17 @@
#define BITS_PER_BYTE 8
#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
+#define for_each_set_bit(bit, addr, size) \
+ for ((bit) = find_first_bit((addr), (size)); \
+ (bit) < (size); \
+ (bit) = find_next_bit((addr), (size), (bit) + 1))
+
+/* same as for_each_set_bit() but use bit as value to start with */
+#define for_each_set_bit_cont(bit, addr, size) \
+ for ((bit) = find_next_bit((addr), (size), (bit)); \
+ (bit) < (size); \
+ (bit) = find_next_bit((addr), (size), (bit) + 1))
+
static inline void set_bit(int nr, unsigned long *addr)
{
addr[nr / BITS_PER_LONG] |= 1UL << (nr % BITS_PER_LONG);
@@ -30,4 +41,111 @@ static inline unsigned long hweight_long(unsigned long w)
return sizeof(w) == 4 ? hweight32(w) : hweight64(w);
}
+#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG)
+
+/**
+ * __ffs - find first bit in word.
+ * @word: The word to search
+ *
+ * Undefined if no bit exists, so code should check against 0 first.
+ */
+static __always_inline unsigned long __ffs(unsigned long word)
+{
+ int num = 0;
+
+#if BITS_PER_LONG == 64
+ if ((word & 0xffffffff) == 0) {
+ num += 32;
+ word >>= 32;
+ }
+#endif
+ if ((word & 0xffff) == 0) {
+ num += 16;
+ word >>= 16;
+ }
+ if ((word & 0xff) == 0) {
+ num += 8;
+ word >>= 8;
+ }
+ if ((word & 0xf) == 0) {
+ num += 4;
+ word >>= 4;
+ }
+ if ((word & 0x3) == 0) {
+ num += 2;
+ word >>= 2;
+ }
+ if ((word & 0x1) == 0)
+ num += 1;
+ return num;
+}
+
+/*
+ * Find the first set bit in a memory region.
+ */
+static inline unsigned long
+find_first_bit(const unsigned long *addr, unsigned long size)
+{
+ const unsigned long *p = addr;
+ unsigned long result = 0;
+ unsigned long tmp;
+
+ while (size & ~(BITS_PER_LONG-1)) {
+ if ((tmp = *(p++)))
+ goto found;
+ result += BITS_PER_LONG;
+ size -= BITS_PER_LONG;
+ }
+ if (!size)
+ return result;
+
+ tmp = (*p) & (~0UL >> (BITS_PER_LONG - size));
+ if (tmp == 0UL) /* Are any bits set? */
+ return result + size; /* Nope. */
+found:
+ return result + __ffs(tmp);
+}
+
+/*
+ * Find the next set bit in a memory region.
+ */
+static inline unsigned long
+find_next_bit(const unsigned long *addr, unsigned long size, unsigned long offset)
+{
+ const unsigned long *p = addr + BITOP_WORD(offset);
+ unsigned long result = offset & ~(BITS_PER_LONG-1);
+ unsigned long tmp;
+
+ if (offset >= size)
+ return size;
+ size -= result;
+ offset %= BITS_PER_LONG;
+ if (offset) {
+ tmp = *(p++);
+ tmp &= (~0UL << offset);
+ if (size < BITS_PER_LONG)
+ goto found_first;
+ if (tmp)
+ goto found_middle;
+ size -= BITS_PER_LONG;
+ result += BITS_PER_LONG;
+ }
+ while (size & ~(BITS_PER_LONG-1)) {
+ if ((tmp = *(p++)))
+ goto found_middle;
+ result += BITS_PER_LONG;
+ size -= BITS_PER_LONG;
+ }
+ if (!size)
+ return result;
+ tmp = *p;
+
+found_first:
+ tmp &= (~0UL >> (BITS_PER_LONG - size));
+ if (tmp == 0UL) /* Are any bits set? */
+ return result + size; /* Nope. */
+found_middle:
+ return result + __ffs(tmp);
+}
+
#endif
diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c
index 78284b13e808..316aa0ab7122 100644
--- a/tools/perf/util/map.c
+++ b/tools/perf/util/map.c
@@ -562,6 +562,10 @@ int machine__init(struct machine *self, const char *root_dir, pid_t pid)
INIT_LIST_HEAD(&self->user_dsos);
INIT_LIST_HEAD(&self->kernel_dsos);
+ self->threads = RB_ROOT;
+ INIT_LIST_HEAD(&self->dead_threads);
+ self->last_match = NULL;
+
self->kmaps.machine = self;
self->pid = pid;
self->root_dir = strdup(root_dir);
diff --git a/tools/perf/util/map.h b/tools/perf/util/map.h
index 890d85545d0f..2b8017f8a930 100644
--- a/tools/perf/util/map.h
+++ b/tools/perf/util/map.h
@@ -18,9 +18,11 @@ enum map_type {
extern const char *map_type__name[MAP__NR_TYPES];
struct dso;
+struct ip_callchain;
struct ref_reloc_sym;
struct map_groups;
struct machine;
+struct perf_evsel;
struct map {
union {
@@ -61,7 +63,11 @@ struct map_groups {
struct machine {
struct rb_node rb_node;
pid_t pid;
+ u16 id_hdr_size;
char *root_dir;
+ struct rb_root threads;
+ struct list_head dead_threads;
+ struct thread *last_match;
struct list_head user_dsos;
struct list_head kernel_dsos;
struct map_groups kmaps;
@@ -148,6 +154,13 @@ int machine__init(struct machine *self, const char *root_dir, pid_t pid);
void machine__exit(struct machine *self);
void machine__delete(struct machine *self);
+int machine__resolve_callchain(struct machine *machine,
+ struct perf_evsel *evsel, struct thread *thread,
+ struct ip_callchain *chain,
+ struct symbol **parent);
+int maps__set_kallsyms_ref_reloc_sym(struct map **maps, const char *symbol_name,
+ u64 addr);
+
/*
* Default guest kernel is defined by parameter --guestkallsyms
* and --guestmodules
@@ -190,6 +203,12 @@ struct symbol *map_groups__find_symbol_by_name(struct map_groups *mg,
struct map **mapp,
symbol_filter_t filter);
+
+struct thread *machine__findnew_thread(struct machine *machine, pid_t pid);
+void machine__remove_thread(struct machine *machine, struct thread *th);
+
+size_t machine__fprintf(struct machine *machine, FILE *fp);
+
static inline
struct symbol *machine__find_kernel_symbol(struct machine *self,
enum map_type type, u64 addr,
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 928918b796b2..531c283fc0c5 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -25,8 +25,6 @@ enum event_result {
EVT_HANDLED_ALL
};
-char debugfs_path[MAXPATHLEN];
-
#define CHW(x) .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_##x
#define CSW(x) .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_##x
@@ -40,6 +38,7 @@ static struct event_symbol event_symbols[] = {
{ CHW(BRANCH_INSTRUCTIONS), "branch-instructions", "branches" },
{ CHW(BRANCH_MISSES), "branch-misses", "" },
{ CHW(BUS_CYCLES), "bus-cycles", "" },
+ { CHW(REF_CPU_CYCLES), "ref-cycles", "" },
{ CSW(CPU_CLOCK), "cpu-clock", "" },
{ CSW(TASK_CLOCK), "task-clock", "" },
@@ -70,6 +69,7 @@ static const char *hw_event_names[PERF_COUNT_HW_MAX] = {
"bus-cycles",
"stalled-cycles-frontend",
"stalled-cycles-backend",
+ "ref-cycles",
};
static const char *sw_event_names[PERF_COUNT_SW_MAX] = {
@@ -140,7 +140,7 @@ static int tp_event_has_id(struct dirent *sys_dir, struct dirent *evt_dir)
char evt_path[MAXPATHLEN];
int fd;
- snprintf(evt_path, MAXPATHLEN, "%s/%s/%s/id", debugfs_path,
+ snprintf(evt_path, MAXPATHLEN, "%s/%s/%s/id", tracing_events_path,
sys_dir->d_name, evt_dir->d_name);
fd = open(evt_path, O_RDONLY);
if (fd < 0)
@@ -171,16 +171,16 @@ struct tracepoint_path *tracepoint_id_to_path(u64 config)
char evt_path[MAXPATHLEN];
char dir_path[MAXPATHLEN];
- if (debugfs_valid_mountpoint(debugfs_path))
+ if (debugfs_valid_mountpoint(tracing_events_path))
return NULL;
- sys_dir = opendir(debugfs_path);
+ sys_dir = opendir(tracing_events_path);
if (!sys_dir)
return NULL;
for_each_subsystem(sys_dir, sys_dirent, sys_next) {
- snprintf(dir_path, MAXPATHLEN, "%s/%s", debugfs_path,
+ snprintf(dir_path, MAXPATHLEN, "%s/%s", tracing_events_path,
sys_dirent.d_name);
evt_dir = opendir(dir_path);
if (!evt_dir)
@@ -447,7 +447,7 @@ parse_single_tracepoint_event(char *sys_name,
u64 id;
int fd;
- snprintf(evt_path, MAXPATHLEN, "%s/%s/%s/id", debugfs_path,
+ snprintf(evt_path, MAXPATHLEN, "%s/%s/%s/id", tracing_events_path,
sys_name, evt_name);
fd = open(evt_path, O_RDONLY);
@@ -485,7 +485,7 @@ parse_multiple_tracepoint_event(struct perf_evlist *evlist, char *sys_name,
struct dirent *evt_ent;
DIR *evt_dir;
- snprintf(evt_path, MAXPATHLEN, "%s/%s", debugfs_path, sys_name);
+ snprintf(evt_path, MAXPATHLEN, "%s/%s", tracing_events_path, sys_name);
evt_dir = opendir(evt_path);
if (!evt_dir) {
@@ -528,7 +528,7 @@ parse_tracepoint_event(struct perf_evlist *evlist, const char **strp,
char sys_name[MAX_EVENT_LENGTH];
unsigned int sys_length, evt_length;
- if (debugfs_valid_mountpoint(debugfs_path))
+ if (debugfs_valid_mountpoint(tracing_events_path))
return 0;
evt_name = strchr(*strp, ':');
@@ -920,10 +920,10 @@ void print_tracepoint_events(const char *subsys_glob, const char *event_glob)
char evt_path[MAXPATHLEN];
char dir_path[MAXPATHLEN];
- if (debugfs_valid_mountpoint(debugfs_path))
+ if (debugfs_valid_mountpoint(tracing_events_path))
return;
- sys_dir = opendir(debugfs_path);
+ sys_dir = opendir(tracing_events_path);
if (!sys_dir)
return;
@@ -932,7 +932,7 @@ void print_tracepoint_events(const char *subsys_glob, const char *event_glob)
!strglobmatch(sys_dirent.d_name, subsys_glob))
continue;
- snprintf(dir_path, MAXPATHLEN, "%s/%s", debugfs_path,
+ snprintf(dir_path, MAXPATHLEN, "%s/%s", tracing_events_path,
sys_dirent.d_name);
evt_dir = opendir(dir_path);
if (!evt_dir)
@@ -964,16 +964,16 @@ int is_valid_tracepoint(const char *event_string)
char evt_path[MAXPATHLEN];
char dir_path[MAXPATHLEN];
- if (debugfs_valid_mountpoint(debugfs_path))
+ if (debugfs_valid_mountpoint(tracing_events_path))
return 0;
- sys_dir = opendir(debugfs_path);
+ sys_dir = opendir(tracing_events_path);
if (!sys_dir)
return 0;
for_each_subsystem(sys_dir, sys_dirent, sys_next) {
- snprintf(dir_path, MAXPATHLEN, "%s/%s", debugfs_path,
+ snprintf(dir_path, MAXPATHLEN, "%s/%s", tracing_events_path,
sys_dirent.d_name);
evt_dir = opendir(dir_path);
if (!evt_dir)
diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h
index 2f8e375e038d..7e0cbe75d5f1 100644
--- a/tools/perf/util/parse-events.h
+++ b/tools/perf/util/parse-events.h
@@ -39,7 +39,6 @@ void print_tracepoint_events(const char *subsys_glob, const char *event_glob);
int print_hwcache_events(const char *event_glob);
extern int is_valid_tracepoint(const char *event_string);
-extern char debugfs_path[];
extern int valid_debugfs_mount(const char *debugfs);
#endif /* __PERF_PARSE_EVENTS_H */
diff --git a/tools/perf/util/probe-finder.h b/tools/perf/util/probe-finder.h
index 1132c8f0ce89..17e94d0c36f9 100644
--- a/tools/perf/util/probe-finder.h
+++ b/tools/perf/util/probe-finder.h
@@ -5,7 +5,6 @@
#include "util.h"
#include "probe-event.h"
-#define MAX_PATH_LEN 256
#define MAX_PROBE_BUFFER 1024
#define MAX_PROBES 128
diff --git a/tools/perf/util/scripting-engines/trace-event-perl.c b/tools/perf/util/scripting-engines/trace-event-perl.c
index 74350ffb57fe..e30749e38a9b 100644
--- a/tools/perf/util/scripting-engines/trace-event-perl.c
+++ b/tools/perf/util/scripting-engines/trace-event-perl.c
@@ -27,7 +27,10 @@
#include "../../perf.h"
#include "../util.h"
+#include "../thread.h"
+#include "../event.h"
#include "../trace-event.h"
+#include "../evsel.h"
#include <EXTERN.h>
#include <perl.h>
@@ -245,11 +248,11 @@ static inline struct event *find_cache_event(int type)
return event;
}
-static void perl_process_event(union perf_event *pevent __unused,
- struct perf_sample *sample,
- struct perf_evsel *evsel,
- struct perf_session *session __unused,
- struct thread *thread)
+static void perl_process_tracepoint(union perf_event *pevent __unused,
+ struct perf_sample *sample,
+ struct perf_evsel *evsel,
+ struct machine *machine __unused,
+ struct thread *thread)
{
struct format_field *field;
static char handler[256];
@@ -265,6 +268,9 @@ static void perl_process_event(union perf_event *pevent __unused,
dSP;
+ if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
+ return;
+
type = trace_parse_common_type(data);
event = find_cache_event(type);
@@ -332,6 +338,42 @@ static void perl_process_event(union perf_event *pevent __unused,
LEAVE;
}
+static void perl_process_event_generic(union perf_event *pevent __unused,
+ struct perf_sample *sample,
+ struct perf_evsel *evsel __unused,
+ struct machine *machine __unused,
+ struct thread *thread __unused)
+{
+ dSP;
+
+ if (!get_cv("process_event", 0))
+ return;
+
+ ENTER;
+ SAVETMPS;
+ PUSHMARK(SP);
+ XPUSHs(sv_2mortal(newSVpvn((const char *)pevent, pevent->header.size)));
+ XPUSHs(sv_2mortal(newSVpvn((const char *)&evsel->attr, sizeof(evsel->attr))));
+ XPUSHs(sv_2mortal(newSVpvn((const char *)sample, sizeof(*sample))));
+ XPUSHs(sv_2mortal(newSVpvn((const char *)sample->raw_data, sample->raw_size)));
+ PUTBACK;
+ call_pv("process_event", G_SCALAR);
+ SPAGAIN;
+ PUTBACK;
+ FREETMPS;
+ LEAVE;
+}
+
+static void perl_process_event(union perf_event *pevent,
+ struct perf_sample *sample,
+ struct perf_evsel *evsel,
+ struct machine *machine,
+ struct thread *thread)
+{
+ perl_process_tracepoint(pevent, sample, evsel, machine, thread);
+ perl_process_event_generic(pevent, sample, evsel, machine, thread);
+}
+
static void run_start_sub(void)
{
dSP; /* access to Perl stack */
@@ -553,7 +595,28 @@ static int perl_generate_script(const char *outfile)
fprintf(ofp, "sub print_header\n{\n"
"\tmy ($event_name, $cpu, $secs, $nsecs, $pid, $comm) = @_;\n\n"
"\tprintf(\"%%-20s %%5u %%05u.%%09u %%8u %%-20s \",\n\t "
- "$event_name, $cpu, $secs, $nsecs, $pid, $comm);\n}");
+ "$event_name, $cpu, $secs, $nsecs, $pid, $comm);\n}\n");
+
+ fprintf(ofp,
+ "\n# Packed byte string args of process_event():\n"
+ "#\n"
+ "# $event:\tunion perf_event\tutil/event.h\n"
+ "# $attr:\tstruct perf_event_attr\tlinux/perf_event.h\n"
+ "# $sample:\tstruct perf_sample\tutil/event.h\n"
+ "# $raw_data:\tperf_sample->raw_data\tutil/event.h\n"
+ "\n"
+ "sub process_event\n"
+ "{\n"
+ "\tmy ($event, $attr, $sample, $raw_data) = @_;\n"
+ "\n"
+ "\tmy @event\t= unpack(\"LSS\", $event);\n"
+ "\tmy @attr\t= unpack(\"LLQQQQQLLQQ\", $attr);\n"
+ "\tmy @sample\t= unpack(\"QLLQQQQQLL\", $sample);\n"
+ "\tmy @raw_data\t= unpack(\"C*\", $raw_data);\n"
+ "\n"
+ "\tuse Data::Dumper;\n"
+ "\tprint Dumper \\@event, \\@attr, \\@sample, \\@raw_data;\n"
+ "}\n");
fclose(ofp);
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
index 6ccf70e8d8f2..0b2a48783172 100644
--- a/tools/perf/util/scripting-engines/trace-event-python.c
+++ b/tools/perf/util/scripting-engines/trace-event-python.c
@@ -29,6 +29,8 @@
#include "../../perf.h"
#include "../util.h"
+#include "../event.h"
+#include "../thread.h"
#include "../trace-event.h"
PyMODINIT_FUNC initperf_trace_context(void);
@@ -207,7 +209,7 @@ static inline struct event *find_cache_event(int type)
static void python_process_event(union perf_event *pevent __unused,
struct perf_sample *sample,
struct perf_evsel *evsel __unused,
- struct perf_session *session __unused,
+ struct machine *machine __unused,
struct thread *thread)
{
PyObject *handler, *retval, *context, *t, *obj, *dict = NULL;
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 0f4555ce9063..b5ca2558c7bb 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -10,6 +10,7 @@
#include "evlist.h"
#include "evsel.h"
#include "session.h"
+#include "tool.h"
#include "sort.h"
#include "util.h"
#include "cpumap.h"
@@ -78,39 +79,13 @@ out_close:
return -1;
}
-static void perf_session__id_header_size(struct perf_session *session)
-{
- struct perf_sample *data;
- u64 sample_type = session->sample_type;
- u16 size = 0;
-
- if (!session->sample_id_all)
- goto out;
-
- if (sample_type & PERF_SAMPLE_TID)
- size += sizeof(data->tid) * 2;
-
- if (sample_type & PERF_SAMPLE_TIME)
- size += sizeof(data->time);
-
- if (sample_type & PERF_SAMPLE_ID)
- size += sizeof(data->id);
-
- if (sample_type & PERF_SAMPLE_STREAM_ID)
- size += sizeof(data->stream_id);
-
- if (sample_type & PERF_SAMPLE_CPU)
- size += sizeof(data->cpu) * 2;
-out:
- session->id_hdr_size = size;
-}
-
void perf_session__update_sample_type(struct perf_session *self)
{
self->sample_type = perf_evlist__sample_type(self->evlist);
self->sample_size = __perf_evsel__sample_size(self->sample_type);
self->sample_id_all = perf_evlist__sample_id_all(self->evlist);
- perf_session__id_header_size(self);
+ self->id_hdr_size = perf_evlist__id_hdr_size(self->evlist);
+ self->host_machine.id_hdr_size = self->id_hdr_size;
}
int perf_session__create_kernel_maps(struct perf_session *self)
@@ -130,18 +105,26 @@ static void perf_session__destroy_kernel_maps(struct perf_session *self)
struct perf_session *perf_session__new(const char *filename, int mode,
bool force, bool repipe,
- struct perf_event_ops *ops)
+ struct perf_tool *tool)
{
- size_t len = filename ? strlen(filename) + 1 : 0;
- struct perf_session *self = zalloc(sizeof(*self) + len);
+ struct perf_session *self;
+ struct stat st;
+ size_t len;
+
+ if (!filename || !strlen(filename)) {
+ if (!fstat(STDIN_FILENO, &st) && S_ISFIFO(st.st_mode))
+ filename = "-";
+ else
+ filename = "perf.data";
+ }
+
+ len = strlen(filename);
+ self = zalloc(sizeof(*self) + len);
if (self == NULL)
goto out;
memcpy(self->filename, filename, len);
- self->threads = RB_ROOT;
- INIT_LIST_HEAD(&self->dead_threads);
- self->last_match = NULL;
/*
* On 64bit we can mmap the data file in one go. No need for tiny mmap
* slices. On 32bit we use 32MB.
@@ -171,10 +154,10 @@ struct perf_session *perf_session__new(const char *filename, int mode,
goto out_delete;
}
- if (ops && ops->ordering_requires_timestamps &&
- ops->ordered_samples && !self->sample_id_all) {
+ if (tool && tool->ordering_requires_timestamps &&
+ tool->ordered_samples && !self->sample_id_all) {
dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
- ops->ordered_samples = false;
+ tool->ordered_samples = false;
}
out:
@@ -184,17 +167,22 @@ out_delete:
return NULL;
}
-static void perf_session__delete_dead_threads(struct perf_session *self)
+static void machine__delete_dead_threads(struct machine *machine)
{
struct thread *n, *t;
- list_for_each_entry_safe(t, n, &self->dead_threads, node) {
+ list_for_each_entry_safe(t, n, &machine->dead_threads, node) {
list_del(&t->node);
thread__delete(t);
}
}
-static void perf_session__delete_threads(struct perf_session *self)
+static void perf_session__delete_dead_threads(struct perf_session *session)
+{
+ machine__delete_dead_threads(&session->host_machine);
+}
+
+static void machine__delete_threads(struct machine *self)
{
struct rb_node *nd = rb_first(&self->threads);
@@ -207,6 +195,11 @@ static void perf_session__delete_threads(struct perf_session *self)
}
}
+static void perf_session__delete_threads(struct perf_session *session)
+{
+ machine__delete_threads(&session->host_machine);
+}
+
void perf_session__delete(struct perf_session *self)
{
perf_session__destroy_kernel_maps(self);
@@ -217,7 +210,7 @@ void perf_session__delete(struct perf_session *self)
free(self);
}
-void perf_session__remove_thread(struct perf_session *self, struct thread *th)
+void machine__remove_thread(struct machine *self, struct thread *th)
{
self->last_match = NULL;
rb_erase(&th->rb_node, &self->threads);
@@ -236,16 +229,16 @@ static bool symbol__match_parent_regex(struct symbol *sym)
return 0;
}
-int perf_session__resolve_callchain(struct perf_session *self,
- struct thread *thread,
- struct ip_callchain *chain,
- struct symbol **parent)
+int machine__resolve_callchain(struct machine *self, struct perf_evsel *evsel,
+ struct thread *thread,
+ struct ip_callchain *chain,
+ struct symbol **parent)
{
u8 cpumode = PERF_RECORD_MISC_USER;
unsigned int i;
int err;
- callchain_cursor_reset(&self->callchain_cursor);
+ callchain_cursor_reset(&evsel->hists.callchain_cursor);
for (i = 0; i < chain->nr; i++) {
u64 ip;
@@ -272,7 +265,7 @@ int perf_session__resolve_callchain(struct perf_session *self,
al.filtered = false;
thread__find_addr_location(thread, self, cpumode,
- MAP__FUNCTION, thread->pid, ip, &al, NULL);
+ MAP__FUNCTION, ip, &al, NULL);
if (al.sym != NULL) {
if (sort__has_parent && !*parent &&
symbol__match_parent_regex(al.sym))
@@ -281,7 +274,7 @@ int perf_session__resolve_callchain(struct perf_session *self,
break;
}
- err = callchain_cursor_append(&self->callchain_cursor,
+ err = callchain_cursor_append(&evsel->hists.callchain_cursor,
ip, al.map, al.sym);
if (err)
return err;
@@ -290,75 +283,91 @@ int perf_session__resolve_callchain(struct perf_session *self,
return 0;
}
-static int process_event_synth_stub(union perf_event *event __used,
- struct perf_session *session __used)
+static int process_event_synth_tracing_data_stub(union perf_event *event __used,
+ struct perf_session *session __used)
+{
+ dump_printf(": unhandled!\n");
+ return 0;
+}
+
+static int process_event_synth_attr_stub(union perf_event *event __used,
+ struct perf_evlist **pevlist __used)
{
dump_printf(": unhandled!\n");
return 0;
}
-static int process_event_sample_stub(union perf_event *event __used,
+static int process_event_sample_stub(struct perf_tool *tool __used,
+ union perf_event *event __used,
struct perf_sample *sample __used,
struct perf_evsel *evsel __used,
- struct perf_session *session __used)
+ struct machine *machine __used)
{
dump_printf(": unhandled!\n");
return 0;
}
-static int process_event_stub(union perf_event *event __used,
+static int process_event_stub(struct perf_tool *tool __used,
+ union perf_event *event __used,
struct perf_sample *sample __used,
- struct perf_session *session __used)
+ struct machine *machine __used)
{
dump_printf(": unhandled!\n");
return 0;
}
-static int process_finished_round_stub(union perf_event *event __used,
- struct perf_session *session __used,
- struct perf_event_ops *ops __used)
+static int process_finished_round_stub(struct perf_tool *tool __used,
+ union perf_event *event __used,
+ struct perf_session *perf_session __used)
{
dump_printf(": unhandled!\n");
return 0;
}
-static int process_finished_round(union perf_event *event,
- struct perf_session *session,
- struct perf_event_ops *ops);
+static int process_event_type_stub(struct perf_tool *tool __used,
+ union perf_event *event __used)
+{
+ dump_printf(": unhandled!\n");
+ return 0;
+}
-static void perf_event_ops__fill_defaults(struct perf_event_ops *handler)
+static int process_finished_round(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_session *session);
+
+static void perf_tool__fill_defaults(struct perf_tool *tool)
{
- if (handler->sample == NULL)
- handler->sample = process_event_sample_stub;
- if (handler->mmap == NULL)
- handler->mmap = process_event_stub;
- if (handler->comm == NULL)
- handler->comm = process_event_stub;
- if (handler->fork == NULL)
- handler->fork = process_event_stub;
- if (handler->exit == NULL)
- handler->exit = process_event_stub;
- if (handler->lost == NULL)
- handler->lost = perf_event__process_lost;
- if (handler->read == NULL)
- handler->read = process_event_stub;
- if (handler->throttle == NULL)
- handler->throttle = process_event_stub;
- if (handler->unthrottle == NULL)
- handler->unthrottle = process_event_stub;
- if (handler->attr == NULL)
- handler->attr = process_event_synth_stub;
- if (handler->event_type == NULL)
- handler->event_type = process_event_synth_stub;
- if (handler->tracing_data == NULL)
- handler->tracing_data = process_event_synth_stub;
- if (handler->build_id == NULL)
- handler->build_id = process_event_synth_stub;
- if (handler->finished_round == NULL) {
- if (handler->ordered_samples)
- handler->finished_round = process_finished_round;
+ if (tool->sample == NULL)
+ tool->sample = process_event_sample_stub;
+ if (tool->mmap == NULL)
+ tool->mmap = process_event_stub;
+ if (tool->comm == NULL)
+ tool->comm = process_event_stub;
+ if (tool->fork == NULL)
+ tool->fork = process_event_stub;
+ if (tool->exit == NULL)
+ tool->exit = process_event_stub;
+ if (tool->lost == NULL)
+ tool->lost = perf_event__process_lost;
+ if (tool->read == NULL)
+ tool->read = process_event_sample_stub;
+ if (tool->throttle == NULL)
+ tool->throttle = process_event_stub;
+ if (tool->unthrottle == NULL)
+ tool->unthrottle = process_event_stub;
+ if (tool->attr == NULL)
+ tool->attr = process_event_synth_attr_stub;
+ if (tool->event_type == NULL)
+ tool->event_type = process_event_type_stub;
+ if (tool->tracing_data == NULL)
+ tool->tracing_data = process_event_synth_tracing_data_stub;
+ if (tool->build_id == NULL)
+ tool->build_id = process_finished_round_stub;
+ if (tool->finished_round == NULL) {
+ if (tool->ordered_samples)
+ tool->finished_round = process_finished_round;
else
- handler->finished_round = process_finished_round_stub;
+ tool->finished_round = process_finished_round_stub;
}
}
@@ -490,11 +499,11 @@ static void perf_session_free_sample_buffers(struct perf_session *session)
static int perf_session_deliver_event(struct perf_session *session,
union perf_event *event,
struct perf_sample *sample,
- struct perf_event_ops *ops,
+ struct perf_tool *tool,
u64 file_offset);
static void flush_sample_queue(struct perf_session *s,
- struct perf_event_ops *ops)
+ struct perf_tool *tool)
{
struct ordered_samples *os = &s->ordered_samples;
struct list_head *head = &os->samples;
@@ -505,7 +514,7 @@ static void flush_sample_queue(struct perf_session *s,
unsigned idx = 0, progress_next = os->nr_samples / 16;
int ret;
- if (!ops->ordered_samples || !limit)
+ if (!tool->ordered_samples || !limit)
return;
list_for_each_entry_safe(iter, tmp, head, list) {
@@ -516,7 +525,7 @@ static void flush_sample_queue(struct perf_session *s,
if (ret)
pr_err("Can't parse sample, err = %d\n", ret);
else
- perf_session_deliver_event(s, iter->event, &sample, ops,
+ perf_session_deliver_event(s, iter->event, &sample, tool,
iter->file_offset);
os->last_flush = iter->timestamp;
@@ -578,11 +587,11 @@ static void flush_sample_queue(struct perf_session *s,
* Flush every events below timestamp 7
* etc...
*/
-static int process_finished_round(union perf_event *event __used,
- struct perf_session *session,
- struct perf_event_ops *ops)
+static int process_finished_round(struct perf_tool *tool,
+ union perf_event *event __used,
+ struct perf_session *session)
{
- flush_sample_queue(session, ops);
+ flush_sample_queue(session, tool);
session->ordered_samples.next_flush = session->ordered_samples.max_timestamp;
return 0;
@@ -737,13 +746,26 @@ static void dump_sample(struct perf_session *session, union perf_event *event,
callchain__printf(sample);
}
+static struct machine *
+ perf_session__find_machine_for_cpumode(struct perf_session *session,
+ union perf_event *event)
+{
+ const u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
+
+ if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest)
+ return perf_session__find_machine(session, event->ip.pid);
+
+ return perf_session__find_host_machine(session);
+}
+
static int perf_session_deliver_event(struct perf_session *session,
union perf_event *event,
struct perf_sample *sample,
- struct perf_event_ops *ops,
+ struct perf_tool *tool,
u64 file_offset)
{
struct perf_evsel *evsel;
+ struct machine *machine;
dump_event(session, event, file_offset, sample);
@@ -765,6 +787,8 @@ static int perf_session_deliver_event(struct perf_session *session,
hists__inc_nr_events(&evsel->hists, event->header.type);
}
+ machine = perf_session__find_machine_for_cpumode(session, event);
+
switch (event->header.type) {
case PERF_RECORD_SAMPLE:
dump_sample(session, event, sample);
@@ -772,23 +796,25 @@ static int perf_session_deliver_event(struct perf_session *session,
++session->hists.stats.nr_unknown_id;
return -1;
}
- return ops->sample(event, sample, evsel, session);
+ return tool->sample(tool, event, sample, evsel, machine);
case PERF_RECORD_MMAP:
- return ops->mmap(event, sample, session);
+ return tool->mmap(tool, event, sample, machine);
case PERF_RECORD_COMM:
- return ops->comm(event, sample, session);
+ return tool->comm(tool, event, sample, machine);
case PERF_RECORD_FORK:
- return ops->fork(event, sample, session);
+ return tool->fork(tool, event, sample, machine);
case PERF_RECORD_EXIT:
- return ops->exit(event, sample, session);
+ return tool->exit(tool, event, sample, machine);
case PERF_RECORD_LOST:
- return ops->lost(event, sample, session);
+ if (tool->lost == perf_event__process_lost)
+ session->hists.stats.total_lost += event->lost.lost;
+ return tool->lost(tool, event, sample, machine);
case PERF_RECORD_READ:
- return ops->read(event, sample, session);
+ return tool->read(tool, event, sample, evsel, machine);
case PERF_RECORD_THROTTLE:
- return ops->throttle(event, sample, session);
+ return tool->throttle(tool, event, sample, machine);
case PERF_RECORD_UNTHROTTLE:
- return ops->unthrottle(event, sample, session);
+ return tool->unthrottle(tool, event, sample, machine);
default:
++session->hists.stats.nr_unknown_events;
return -1;
@@ -812,24 +838,29 @@ static int perf_session__preprocess_sample(struct perf_session *session,
}
static int perf_session__process_user_event(struct perf_session *session, union perf_event *event,
- struct perf_event_ops *ops, u64 file_offset)
+ struct perf_tool *tool, u64 file_offset)
{
+ int err;
+
dump_event(session, event, file_offset, NULL);
/* These events are processed right away */
switch (event->header.type) {
case PERF_RECORD_HEADER_ATTR:
- return ops->attr(event, session);
+ err = tool->attr(event, &session->evlist);
+ if (err == 0)
+ perf_session__update_sample_type(session);
+ return err;
case PERF_RECORD_HEADER_EVENT_TYPE:
- return ops->event_type(event, session);
+ return tool->event_type(tool, event);
case PERF_RECORD_HEADER_TRACING_DATA:
/* setup for reading amidst mmap */
lseek(session->fd, file_offset, SEEK_SET);
- return ops->tracing_data(event, session);
+ return tool->tracing_data(event, session);
case PERF_RECORD_HEADER_BUILD_ID:
- return ops->build_id(event, session);
+ return tool->build_id(tool, event, session);
case PERF_RECORD_FINISHED_ROUND:
- return ops->finished_round(event, session, ops);
+ return tool->finished_round(tool, event, session);
default:
return -EINVAL;
}
@@ -837,7 +868,7 @@ static int perf_session__process_user_event(struct perf_session *session, union
static int perf_session__process_event(struct perf_session *session,
union perf_event *event,
- struct perf_event_ops *ops,
+ struct perf_tool *tool,
u64 file_offset)
{
struct perf_sample sample;
@@ -853,7 +884,7 @@ static int perf_session__process_event(struct perf_session *session,
hists__inc_nr_events(&session->hists, event->header.type);
if (event->header.type >= PERF_RECORD_USER_TYPE_START)
- return perf_session__process_user_event(session, event, ops, file_offset);
+ return perf_session__process_user_event(session, event, tool, file_offset);
/*
* For all kernel events we get the sample data
@@ -866,14 +897,14 @@ static int perf_session__process_event(struct perf_session *session,
if (perf_session__preprocess_sample(session, event, &sample))
return 0;
- if (ops->ordered_samples) {
+ if (tool->ordered_samples) {
ret = perf_session_queue_event(session, event, &sample,
file_offset);
if (ret != -ETIME)
return ret;
}
- return perf_session_deliver_event(session, event, &sample, ops,
+ return perf_session_deliver_event(session, event, &sample, tool,
file_offset);
}
@@ -884,6 +915,11 @@ void perf_event_header__bswap(struct perf_event_header *self)
self->size = bswap_16(self->size);
}
+struct thread *perf_session__findnew(struct perf_session *session, pid_t pid)
+{
+ return machine__findnew_thread(&session->host_machine, pid);
+}
+
static struct thread *perf_session__register_idle_thread(struct perf_session *self)
{
struct thread *thread = perf_session__findnew(self, 0);
@@ -897,9 +933,9 @@ static struct thread *perf_session__register_idle_thread(struct perf_session *se
}
static void perf_session__warn_about_errors(const struct perf_session *session,
- const struct perf_event_ops *ops)
+ const struct perf_tool *tool)
{
- if (ops->lost == perf_event__process_lost &&
+ if (tool->lost == perf_event__process_lost &&
session->hists.stats.nr_events[PERF_RECORD_LOST] != 0) {
ui__warning("Processed %d events and lost %d chunks!\n\n"
"Check IO/CPU overload!\n\n",
@@ -934,7 +970,7 @@ static void perf_session__warn_about_errors(const struct perf_session *session,
volatile int session_done;
static int __perf_session__process_pipe_events(struct perf_session *self,
- struct perf_event_ops *ops)
+ struct perf_tool *tool)
{
union perf_event event;
uint32_t size;
@@ -943,7 +979,7 @@ static int __perf_session__process_pipe_events(struct perf_session *self,
int err;
void *p;
- perf_event_ops__fill_defaults(ops);
+ perf_tool__fill_defaults(tool);
head = 0;
more:
@@ -979,8 +1015,7 @@ more:
}
}
- if (size == 0 ||
- (skip = perf_session__process_event(self, &event, ops, head)) < 0) {
+ if ((skip = perf_session__process_event(self, &event, tool, head)) < 0) {
dump_printf("%#" PRIx64 " [%#x]: skipping unknown header type: %d\n",
head, event.header.size, event.header.type);
/*
@@ -1003,7 +1038,7 @@ more:
done:
err = 0;
out_err:
- perf_session__warn_about_errors(self, ops);
+ perf_session__warn_about_errors(self, tool);
perf_session_free_sample_buffers(self);
return err;
}
@@ -1034,7 +1069,7 @@ fetch_mmaped_event(struct perf_session *session,
int __perf_session__process_events(struct perf_session *session,
u64 data_offset, u64 data_size,
- u64 file_size, struct perf_event_ops *ops)
+ u64 file_size, struct perf_tool *tool)
{
u64 head, page_offset, file_offset, file_pos, progress_next;
int err, mmap_prot, mmap_flags, map_idx = 0;
@@ -1043,7 +1078,7 @@ int __perf_session__process_events(struct perf_session *session,
union perf_event *event;
uint32_t size;
- perf_event_ops__fill_defaults(ops);
+ perf_tool__fill_defaults(tool);
page_size = sysconf(_SC_PAGESIZE);
@@ -1098,7 +1133,7 @@ more:
size = event->header.size;
if (size == 0 ||
- perf_session__process_event(session, event, ops, file_pos) < 0) {
+ perf_session__process_event(session, event, tool, file_pos) < 0) {
dump_printf("%#" PRIx64 " [%#x]: skipping unknown header type: %d\n",
file_offset + head, event->header.size,
event->header.type);
@@ -1127,15 +1162,15 @@ more:
err = 0;
/* do the final flush for ordered samples */
session->ordered_samples.next_flush = ULLONG_MAX;
- flush_sample_queue(session, ops);
+ flush_sample_queue(session, tool);
out_err:
- perf_session__warn_about_errors(session, ops);
+ perf_session__warn_about_errors(session, tool);
perf_session_free_sample_buffers(session);
return err;
}
int perf_session__process_events(struct perf_session *self,
- struct perf_event_ops *ops)
+ struct perf_tool *tool)
{
int err;
@@ -1146,9 +1181,9 @@ int perf_session__process_events(struct perf_session *self,
err = __perf_session__process_events(self,
self->header.data_offset,
self->header.data_size,
- self->size, ops);
+ self->size, tool);
else
- err = __perf_session__process_pipe_events(self, ops);
+ err = __perf_session__process_pipe_events(self, tool);
return err;
}
@@ -1163,9 +1198,8 @@ bool perf_session__has_traces(struct perf_session *self, const char *msg)
return true;
}
-int perf_session__set_kallsyms_ref_reloc_sym(struct map **maps,
- const char *symbol_name,
- u64 addr)
+int maps__set_kallsyms_ref_reloc_sym(struct map **maps,
+ const char *symbol_name, u64 addr)
{
char *bracket;
enum map_type i;
@@ -1224,6 +1258,27 @@ size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp)
return ret;
}
+size_t perf_session__fprintf(struct perf_session *session, FILE *fp)
+{
+ /*
+ * FIXME: Here we have to actually print all the machines in this
+ * session, not just the host...
+ */
+ return machine__fprintf(&session->host_machine, fp);
+}
+
+void perf_session__remove_thread(struct perf_session *session,
+ struct thread *th)
+{
+ /*
+ * FIXME: This one makes no sense, we need to remove the thread from
+ * the machine it belongs to, perf_session can have many machines, so
+ * doing it always on ->host_machine is wrong. Fix when auditing all
+ * the 'perf kvm' code.
+ */
+ machine__remove_thread(&session->host_machine, th);
+}
+
struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session,
unsigned int type)
{
@@ -1236,17 +1291,16 @@ struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session,
return NULL;
}
-void perf_session__print_ip(union perf_event *event,
- struct perf_sample *sample,
- struct perf_session *session,
- int print_sym, int print_dso)
+void perf_event__print_ip(union perf_event *event, struct perf_sample *sample,
+ struct machine *machine, struct perf_evsel *evsel,
+ int print_sym, int print_dso)
{
struct addr_location al;
const char *symname, *dsoname;
- struct callchain_cursor *cursor = &session->callchain_cursor;
+ struct callchain_cursor *cursor = &evsel->hists.callchain_cursor;
struct callchain_cursor_node *node;
- if (perf_event__preprocess_sample(event, session, &al, sample,
+ if (perf_event__preprocess_sample(event, machine, &al, sample,
NULL) < 0) {
error("problem processing %d event, skipping it.\n",
event->header.type);
@@ -1255,7 +1309,7 @@ void perf_session__print_ip(union perf_event *event,
if (symbol_conf.use_callchain && sample->callchain) {
- if (perf_session__resolve_callchain(session, al.thread,
+ if (machine__resolve_callchain(machine, evsel, al.thread,
sample->callchain, NULL) != 0) {
if (verbose)
error("Failed to resolve callchain. Skipping\n");
diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h
index 6e393c98eb34..37bc38381fb6 100644
--- a/tools/perf/util/session.h
+++ b/tools/perf/util/session.h
@@ -30,9 +30,6 @@ struct perf_session {
struct perf_header header;
unsigned long size;
unsigned long mmap_window;
- struct rb_root threads;
- struct list_head dead_threads;
- struct thread *last_match;
struct machine host_machine;
struct rb_root machines;
struct perf_evlist *evlist;
@@ -53,65 +50,31 @@ struct perf_session {
int cwdlen;
char *cwd;
struct ordered_samples ordered_samples;
- struct callchain_cursor callchain_cursor;
- char filename[0];
+ char filename[1];
};
-struct perf_evsel;
-struct perf_event_ops;
-
-typedef int (*event_sample)(union perf_event *event, struct perf_sample *sample,
- struct perf_evsel *evsel, struct perf_session *session);
-typedef int (*event_op)(union perf_event *self, struct perf_sample *sample,
- struct perf_session *session);
-typedef int (*event_synth_op)(union perf_event *self,
- struct perf_session *session);
-typedef int (*event_op2)(union perf_event *self, struct perf_session *session,
- struct perf_event_ops *ops);
-
-struct perf_event_ops {
- event_sample sample;
- event_op mmap,
- comm,
- fork,
- exit,
- lost,
- read,
- throttle,
- unthrottle;
- event_synth_op attr,
- event_type,
- tracing_data,
- build_id;
- event_op2 finished_round;
- bool ordered_samples;
- bool ordering_requires_timestamps;
-};
+struct perf_tool;
struct perf_session *perf_session__new(const char *filename, int mode,
bool force, bool repipe,
- struct perf_event_ops *ops);
+ struct perf_tool *tool);
void perf_session__delete(struct perf_session *self);
void perf_event_header__bswap(struct perf_event_header *self);
int __perf_session__process_events(struct perf_session *self,
u64 data_offset, u64 data_size, u64 size,
- struct perf_event_ops *ops);
+ struct perf_tool *tool);
int perf_session__process_events(struct perf_session *self,
- struct perf_event_ops *event_ops);
+ struct perf_tool *tool);
-int perf_session__resolve_callchain(struct perf_session *self,
+int perf_session__resolve_callchain(struct perf_session *self, struct perf_evsel *evsel,
struct thread *thread,
struct ip_callchain *chain,
struct symbol **parent);
bool perf_session__has_traces(struct perf_session *self, const char *msg);
-int perf_session__set_kallsyms_ref_reloc_sym(struct map **maps,
- const char *symbol_name,
- u64 addr);
-
void mem_bswap_64(void *src, int byte_size);
void perf_event__attr_swap(struct perf_event_attr *attr);
@@ -144,12 +107,16 @@ struct machine *perf_session__findnew_machine(struct perf_session *self, pid_t p
static inline
void perf_session__process_machines(struct perf_session *self,
+ struct perf_tool *tool,
machine__process_t process)
{
- process(&self->host_machine, self);
- return machines__process(&self->machines, process, self);
+ process(&self->host_machine, tool);
+ return machines__process(&self->machines, process, tool);
}
+struct thread *perf_session__findnew(struct perf_session *self, pid_t pid);
+size_t perf_session__fprintf(struct perf_session *self, FILE *fp);
+
size_t perf_session__fprintf_dsos(struct perf_session *self, FILE *fp);
size_t perf_session__fprintf_dsos_buildid(struct perf_session *self,
@@ -167,13 +134,20 @@ static inline int perf_session__parse_sample(struct perf_session *session,
session->header.needs_swap);
}
+static inline int perf_session__synthesize_sample(struct perf_session *session,
+ union perf_event *event,
+ const struct perf_sample *sample)
+{
+ return perf_event__synthesize_sample(event, session->sample_type,
+ sample, session->header.needs_swap);
+}
+
struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session,
unsigned int type);
-void perf_session__print_ip(union perf_event *event,
- struct perf_sample *sample,
- struct perf_session *session,
- int print_sym, int print_dso);
+void perf_event__print_ip(union perf_event *event, struct perf_sample *sample,
+ struct machine *machine, struct perf_evsel *evsel,
+ int print_sym, int print_dso);
int perf_session__cpu_bitmap(struct perf_session *session,
const char *cpu_list, unsigned long *cpu_bitmap);
diff --git a/tools/perf/util/setup.py b/tools/perf/util/setup.py
index 95d370074928..36d4c5619575 100644
--- a/tools/perf/util/setup.py
+++ b/tools/perf/util/setup.py
@@ -27,7 +27,8 @@ build_tmp = getenv('PYTHON_EXTBUILD_TMP')
perf = Extension('perf',
sources = ['util/python.c', 'util/ctype.c', 'util/evlist.c',
'util/evsel.c', 'util/cpumap.c', 'util/thread_map.c',
- 'util/util.c', 'util/xyarray.c', 'util/cgroup.c'],
+ 'util/util.c', 'util/xyarray.c', 'util/cgroup.c',
+ 'util/debugfs.c'],
include_dirs = ['util/include'],
extra_compile_args = cflags,
)
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 632b50c7bc26..215d50f2042e 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -1757,7 +1757,7 @@ static int map_groups__set_modules_path_dir(struct map_groups *mg,
struct stat st;
/*sshfs might return bad dent->d_type, so we have to stat*/
- sprintf(path, "%s/%s", dir_name, dent->d_name);
+ snprintf(path, sizeof(path), "%s/%s", dir_name, dent->d_name);
if (stat(path, &st))
continue;
@@ -1766,8 +1766,6 @@ static int map_groups__set_modules_path_dir(struct map_groups *mg,
!strcmp(dent->d_name, ".."))
continue;
- snprintf(path, sizeof(path), "%s/%s",
- dir_name, dent->d_name);
ret = map_groups__set_modules_path_dir(mg, path);
if (ret < 0)
goto out;
@@ -1788,9 +1786,6 @@ static int map_groups__set_modules_path_dir(struct map_groups *mg,
if (map == NULL)
continue;
- snprintf(path, sizeof(path), "%s/%s",
- dir_name, dent->d_name);
-
long_name = strdup(path);
if (long_name == NULL) {
ret = -1;
@@ -2609,10 +2604,10 @@ int symbol__init(void)
symbol_conf.initialized = true;
return 0;
-out_free_dso_list:
- strlist__delete(symbol_conf.dso_list);
out_free_comm_list:
strlist__delete(symbol_conf.comm_list);
+out_free_dso_list:
+ strlist__delete(symbol_conf.dso_list);
return -1;
}
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index 29f8d742e92f..123c2e14353e 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -68,6 +68,7 @@ struct strlist;
struct symbol_conf {
unsigned short priv_size;
+ unsigned short nr_events;
bool try_vmlinux_path,
use_modules,
sort_by_name,
diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c
index d5d3b22250f3..fb4b7ea6752f 100644
--- a/tools/perf/util/thread.c
+++ b/tools/perf/util/thread.c
@@ -61,7 +61,7 @@ static size_t thread__fprintf(struct thread *self, FILE *fp)
map_groups__fprintf(&self->mg, verbose, fp);
}
-struct thread *perf_session__findnew(struct perf_session *self, pid_t pid)
+struct thread *machine__findnew_thread(struct machine *self, pid_t pid)
{
struct rb_node **p = &self->threads.rb_node;
struct rb_node *parent = NULL;
@@ -125,12 +125,12 @@ int thread__fork(struct thread *self, struct thread *parent)
return 0;
}
-size_t perf_session__fprintf(struct perf_session *self, FILE *fp)
+size_t machine__fprintf(struct machine *machine, FILE *fp)
{
size_t ret = 0;
struct rb_node *nd;
- for (nd = rb_first(&self->threads); nd; nd = rb_next(nd)) {
+ for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) {
struct thread *pos = rb_entry(nd, struct thread, rb_node);
ret += thread__fprintf(pos, fp);
diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h
index e5f2401c1b5e..70c2c13ff679 100644
--- a/tools/perf/util/thread.h
+++ b/tools/perf/util/thread.h
@@ -18,16 +18,14 @@ struct thread {
int comm_len;
};
-struct perf_session;
+struct machine;
void thread__delete(struct thread *self);
int thread__set_comm(struct thread *self, const char *comm);
int thread__comm_len(struct thread *self);
-struct thread *perf_session__findnew(struct perf_session *self, pid_t pid);
void thread__insert_map(struct thread *self, struct map *map);
int thread__fork(struct thread *self, struct thread *parent);
-size_t perf_session__fprintf(struct perf_session *self, FILE *fp);
static inline struct map *thread__find_map(struct thread *self,
enum map_type type, u64 addr)
@@ -35,14 +33,12 @@ static inline struct map *thread__find_map(struct thread *self,
return self ? map_groups__find(&self->mg, type, addr) : NULL;
}
-void thread__find_addr_map(struct thread *self,
- struct perf_session *session, u8 cpumode,
- enum map_type type, pid_t pid, u64 addr,
+void thread__find_addr_map(struct thread *thread, struct machine *machine,
+ u8 cpumode, enum map_type type, u64 addr,
struct addr_location *al);
-void thread__find_addr_location(struct thread *self,
- struct perf_session *session, u8 cpumode,
- enum map_type type, pid_t pid, u64 addr,
+void thread__find_addr_location(struct thread *thread, struct machine *machine,
+ u8 cpumode, enum map_type type, u64 addr,
struct addr_location *al,
symbol_filter_t filter);
#endif /* __PERF_THREAD_H */
diff --git a/tools/perf/util/tool.h b/tools/perf/util/tool.h
new file mode 100644
index 000000000000..b0e1aadba8d5
--- /dev/null
+++ b/tools/perf/util/tool.h
@@ -0,0 +1,50 @@
+#ifndef __PERF_TOOL_H
+#define __PERF_TOOL_H
+
+#include <stdbool.h>
+
+struct perf_session;
+union perf_event;
+struct perf_evlist;
+struct perf_evsel;
+struct perf_sample;
+struct perf_tool;
+struct machine;
+
+typedef int (*event_sample)(struct perf_tool *tool, union perf_event *event,
+ struct perf_sample *sample,
+ struct perf_evsel *evsel, struct machine *machine);
+
+typedef int (*event_op)(struct perf_tool *tool, union perf_event *event,
+ struct perf_sample *sample, struct machine *machine);
+
+typedef int (*event_attr_op)(union perf_event *event,
+ struct perf_evlist **pevlist);
+typedef int (*event_simple_op)(struct perf_tool *tool, union perf_event *event);
+
+typedef int (*event_synth_op)(union perf_event *event,
+ struct perf_session *session);
+
+typedef int (*event_op2)(struct perf_tool *tool, union perf_event *event,
+ struct perf_session *session);
+
+struct perf_tool {
+ event_sample sample,
+ read;
+ event_op mmap,
+ comm,
+ fork,
+ exit,
+ lost,
+ throttle,
+ unthrottle;
+ event_attr_op attr;
+ event_synth_op tracing_data;
+ event_simple_op event_type;
+ event_op2 finished_round,
+ build_id;
+ bool ordered_samples;
+ bool ordering_requires_timestamps;
+};
+
+#endif /* __PERF_TOOL_H */
diff --git a/tools/perf/util/top.h b/tools/perf/util/top.h
index 399650967958..a248f3c2c60d 100644
--- a/tools/perf/util/top.h
+++ b/tools/perf/util/top.h
@@ -1,15 +1,17 @@
#ifndef __PERF_TOP_H
#define __PERF_TOP_H 1
+#include "tool.h"
#include "types.h"
-#include "../perf.h"
#include <stddef.h>
+#include <stdbool.h>
struct perf_evlist;
struct perf_evsel;
struct perf_session;
struct perf_top {
+ struct perf_tool tool;
struct perf_evlist *evlist;
/*
* Symbols will be added here in perf_event__process_sample and will
@@ -23,10 +25,26 @@ struct perf_top {
int freq;
pid_t target_pid, target_tid;
bool hide_kernel_symbols, hide_user_symbols, zero;
+ bool system_wide;
+ bool use_tui, use_stdio;
+ bool sort_has_symbols;
+ bool dont_use_callchains;
+ bool kptr_restrict_warned;
+ bool vmlinux_warned;
+ bool inherit;
+ bool group;
+ bool sample_id_all_avail;
+ bool dump_symtab;
const char *cpu_list;
struct hist_entry *sym_filter_entry;
struct perf_evsel *sym_evsel;
struct perf_session *session;
+ struct winsize winsize;
+ unsigned int mmap_pages;
+ int default_interval;
+ int realtime_prio;
+ int sym_pcnt_filter;
+ const char *sym_filter;
};
size_t perf_top__header_snprintf(struct perf_top *top, char *bf, size_t size);
diff --git a/tools/perf/util/trace-event-info.c b/tools/perf/util/trace-event-info.c
index d2655f08bcc0..ac6830d8292b 100644
--- a/tools/perf/util/trace-event-info.c
+++ b/tools/perf/util/trace-event-info.c
@@ -18,7 +18,8 @@
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
-#define _GNU_SOURCE
+#include <ctype.h>
+#include "util.h"
#include <dirent.h>
#include <mntent.h>
#include <stdio.h>
@@ -31,7 +32,6 @@
#include <pthread.h>
#include <fcntl.h>
#include <unistd.h>
-#include <ctype.h>
#include <errno.h>
#include <stdbool.h>
#include <linux/list.h>
@@ -44,10 +44,6 @@
#define VERSION "0.5"
-#define _STR(x) #x
-#define STR(x) _STR(x)
-#define MAX_PATH 256
-
#define TRACE_CTRL "tracing_on"
#define TRACE "trace"
#define AVAILABLE "available_tracers"
@@ -73,26 +69,6 @@ struct events {
};
-
-static void die(const char *fmt, ...)
-{
- va_list ap;
- int ret = errno;
-
- if (errno)
- perror("perf");
- else
- ret = -1;
-
- va_start(ap, fmt);
- fprintf(stderr, " ");
- vfprintf(stderr, fmt, ap);
- va_end(ap);
-
- fprintf(stderr, "\n");
- exit(ret);
-}
-
void *malloc_or_die(unsigned int size)
{
void *data;
diff --git a/tools/perf/util/trace-event-scripting.c b/tools/perf/util/trace-event-scripting.c
index c9dcbec7d800..a3fdf55f317b 100644
--- a/tools/perf/util/trace-event-scripting.c
+++ b/tools/perf/util/trace-event-scripting.c
@@ -39,7 +39,7 @@ static int stop_script_unsupported(void)
static void process_event_unsupported(union perf_event *event __unused,
struct perf_sample *sample __unused,
struct perf_evsel *evsel __unused,
- struct perf_session *session __unused,
+ struct machine *machine __unused,
struct thread *thread __unused)
{
}
diff --git a/tools/perf/util/trace-event.h b/tools/perf/util/trace-event.h
index a84100817649..58ae14c5baac 100644
--- a/tools/perf/util/trace-event.h
+++ b/tools/perf/util/trace-event.h
@@ -3,7 +3,11 @@
#include <stdbool.h>
#include "parse-events.h"
-#include "session.h"
+
+struct machine;
+struct perf_sample;
+union perf_event;
+struct thread;
#define __unused __attribute__((unused))
@@ -292,7 +296,7 @@ struct scripting_ops {
void (*process_event) (union perf_event *event,
struct perf_sample *sample,
struct perf_evsel *evsel,
- struct perf_session *session,
+ struct machine *machine,
struct thread *thread);
int (*generate_script) (const char *outfile);
};
diff --git a/tools/perf/util/ui/browsers/annotate.c b/tools/perf/util/ui/browsers/annotate.c
index 0575905d1205..295a9c93f945 100644
--- a/tools/perf/util/ui/browsers/annotate.c
+++ b/tools/perf/util/ui/browsers/annotate.c
@@ -224,7 +224,7 @@ static bool annotate_browser__toggle_source(struct annotate_browser *browser)
}
static int annotate_browser__run(struct annotate_browser *self, int evidx,
- int nr_events, void(*timer)(void *arg),
+ void(*timer)(void *arg),
void *arg, int delay_secs)
{
struct rb_node *nd = NULL;
@@ -328,8 +328,7 @@ static int annotate_browser__run(struct annotate_browser *self, int evidx,
notes = symbol__annotation(target);
pthread_mutex_lock(&notes->lock);
- if (notes->src == NULL &&
- symbol__alloc_hist(target, nr_events) < 0) {
+ if (notes->src == NULL && symbol__alloc_hist(target) < 0) {
pthread_mutex_unlock(&notes->lock);
ui__warning("Not enough memory for annotating '%s' symbol!\n",
target->name);
@@ -337,7 +336,7 @@ static int annotate_browser__run(struct annotate_browser *self, int evidx,
}
pthread_mutex_unlock(&notes->lock);
- symbol__tui_annotate(target, ms->map, evidx, nr_events,
+ symbol__tui_annotate(target, ms->map, evidx,
timer, arg, delay_secs);
}
continue;
@@ -358,15 +357,15 @@ out:
return key;
}
-int hist_entry__tui_annotate(struct hist_entry *he, int evidx, int nr_events,
+int hist_entry__tui_annotate(struct hist_entry *he, int evidx,
void(*timer)(void *arg), void *arg, int delay_secs)
{
- return symbol__tui_annotate(he->ms.sym, he->ms.map, evidx, nr_events,
+ return symbol__tui_annotate(he->ms.sym, he->ms.map, evidx,
timer, arg, delay_secs);
}
int symbol__tui_annotate(struct symbol *sym, struct map *map, int evidx,
- int nr_events, void(*timer)(void *arg), void *arg,
+ void(*timer)(void *arg), void *arg,
int delay_secs)
{
struct objdump_line *pos, *n;
@@ -419,8 +418,7 @@ int symbol__tui_annotate(struct symbol *sym, struct map *map, int evidx,
browser.b.nr_entries = browser.nr_entries;
browser.b.entries = &notes->src->source,
browser.b.width += 18; /* Percentage */
- ret = annotate_browser__run(&browser, evidx, nr_events,
- timer, arg, delay_secs);
+ ret = annotate_browser__run(&browser, evidx, timer, arg, delay_secs);
list_for_each_entry_safe(pos, n, &notes->src->source, node) {
list_del(&pos->node);
objdump_line__free(pos);
diff --git a/tools/perf/util/ui/browsers/hists.c b/tools/perf/util/ui/browsers/hists.c
index d0c94b459685..1212a386a033 100644
--- a/tools/perf/util/ui/browsers/hists.c
+++ b/tools/perf/util/ui/browsers/hists.c
@@ -1020,7 +1020,7 @@ do_annotate:
* Don't let this be freed, say, by hists__decay_entry.
*/
he->used = true;
- err = hist_entry__tui_annotate(he, evsel->idx, nr_events,
+ err = hist_entry__tui_annotate(he, evsel->idx,
timer, arg, delay_secs);
he->used = false;
ui_browser__update_nr_entries(&browser->b, browser->hists->nr_entries);
diff --git a/tools/perf/util/ui/progress.c b/tools/perf/util/ui/progress.c
index 295e366b6311..13aa64e50e11 100644
--- a/tools/perf/util/ui/progress.c
+++ b/tools/perf/util/ui/progress.c
@@ -14,6 +14,9 @@ void ui_progress__update(u64 curr, u64 total, const char *title)
if (use_browser <= 0)
return;
+ if (total == 0)
+ return;
+
ui__refresh_dimensions(true);
pthread_mutex_lock(&ui__lock);
y = SLtt_Screen_Rows / 2 - 2;
diff --git a/tools/perf/util/usage.c b/tools/perf/util/usage.c
index e16bf9a707e8..d76d1c0ff98f 100644
--- a/tools/perf/util/usage.c
+++ b/tools/perf/util/usage.c
@@ -1,5 +1,8 @@
/*
- * GIT - The information manager from hell
+ * usage.c
+ *
+ * Various reporting routines.
+ * Originally copied from GIT source.
*
* Copyright (C) Linus Torvalds, 2005
*/
diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h
index 0128906bac88..37be34dff798 100644
--- a/tools/perf/util/util.h
+++ b/tools/perf/util/util.h
@@ -245,4 +245,15 @@ int readn(int fd, void *buf, size_t size);
#define _STR(x) #x
#define STR(x) _STR(x)
+/*
+ * Determine whether some value is a power of two, where zero is
+ * *not* considered a power of two.
+ */
+
+static inline __attribute__((const))
+bool is_power_of_2(unsigned long n)
+{
+ return (n != 0 && ((n & (n - 1)) == 0));
+}
+
#endif
diff --git a/tools/perf/util/values.c b/tools/perf/util/values.c
index bdd33470b235..697c8b4e59cc 100644
--- a/tools/perf/util/values.c
+++ b/tools/perf/util/values.c
@@ -32,6 +32,7 @@ void perf_read_values_destroy(struct perf_read_values *values)
for (i = 0; i < values->threads; i++)
free(values->value[i]);
+ free(values->value);
free(values->pid);
free(values->tid);
free(values->counterrawid);
diff --git a/virt/kvm/assigned-dev.c b/virt/kvm/assigned-dev.c
index 3ad0925d23a9..758e3b36d4cf 100644
--- a/virt/kvm/assigned-dev.c
+++ b/virt/kvm/assigned-dev.c
@@ -17,6 +17,8 @@
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
+#include <linux/namei.h>
+#include <linux/fs.h>
#include "irq.h"
static struct kvm_assigned_dev_kernel *kvm_find_assigned_dev(struct list_head *head,
@@ -480,12 +482,76 @@ out:
return r;
}
+/*
+ * We want to test whether the caller has been granted permissions to
+ * use this device. To be able to configure and control the device,
+ * the user needs access to PCI configuration space and BAR resources.
+ * These are accessed through PCI sysfs. PCI config space is often
+ * passed to the process calling this ioctl via file descriptor, so we
+ * can't rely on access to that file. We can check for permissions
+ * on each of the BAR resource files, which is a pretty clear
+ * indicator that the user has been granted access to the device.
+ */
+static int probe_sysfs_permissions(struct pci_dev *dev)
+{
+#ifdef CONFIG_SYSFS
+ int i;
+ bool bar_found = false;
+
+ for (i = PCI_STD_RESOURCES; i <= PCI_STD_RESOURCE_END; i++) {
+ char *kpath, *syspath;
+ struct path path;
+ struct inode *inode;
+ int r;
+
+ if (!pci_resource_len(dev, i))
+ continue;
+
+ kpath = kobject_get_path(&dev->dev.kobj, GFP_KERNEL);
+ if (!kpath)
+ return -ENOMEM;
+
+ /* Per sysfs-rules, sysfs is always at /sys */
+ syspath = kasprintf(GFP_KERNEL, "/sys%s/resource%d", kpath, i);
+ kfree(kpath);
+ if (!syspath)
+ return -ENOMEM;
+
+ r = kern_path(syspath, LOOKUP_FOLLOW, &path);
+ kfree(syspath);
+ if (r)
+ return r;
+
+ inode = path.dentry->d_inode;
+
+ r = inode_permission(inode, MAY_READ | MAY_WRITE | MAY_ACCESS);
+ path_put(&path);
+ if (r)
+ return r;
+
+ bar_found = true;
+ }
+
+ /* If no resources, probably something special */
+ if (!bar_found)
+ return -EPERM;
+
+ return 0;
+#else
+ return -EINVAL; /* No way to control the device without sysfs */
+#endif
+}
+
static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
struct kvm_assigned_pci_dev *assigned_dev)
{
int r = 0, idx;
struct kvm_assigned_dev_kernel *match;
struct pci_dev *dev;
+ u8 header_type;
+
+ if (!(assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU))
+ return -EINVAL;
mutex_lock(&kvm->lock);
idx = srcu_read_lock(&kvm->srcu);
@@ -513,6 +579,18 @@ static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
r = -EINVAL;
goto out_free;
}
+
+ /* Don't allow bridges to be assigned */
+ pci_read_config_byte(dev, PCI_HEADER_TYPE, &header_type);
+ if ((header_type & PCI_HEADER_TYPE) != PCI_HEADER_TYPE_NORMAL) {
+ r = -EPERM;
+ goto out_put;
+ }
+
+ r = probe_sysfs_permissions(dev);
+ if (r)
+ goto out_put;
+
if (pci_enable_device(dev)) {
printk(KERN_INFO "%s: Could not enable PCI device\n", __func__);
r = -EBUSY;
@@ -544,16 +622,14 @@ static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
list_add(&match->list, &kvm->arch.assigned_dev_head);
- if (assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU) {
- if (!kvm->arch.iommu_domain) {
- r = kvm_iommu_map_guest(kvm);
- if (r)
- goto out_list_del;
- }
- r = kvm_assign_device(kvm, match);
+ if (!kvm->arch.iommu_domain) {
+ r = kvm_iommu_map_guest(kvm);
if (r)
goto out_list_del;
}
+ r = kvm_assign_device(kvm, match);
+ if (r)
+ goto out_list_del;
out:
srcu_read_unlock(&kvm->srcu, idx);
@@ -593,8 +669,7 @@ static int kvm_vm_ioctl_deassign_device(struct kvm *kvm,
goto out;
}
- if (match->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU)
- kvm_deassign_device(kvm, match);
+ kvm_deassign_device(kvm, match);
kvm_free_assigned_device(kvm, match);