summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTrilok Soni <tsoni@codeaurora.org>2016-08-25 19:05:37 -0700
committerTrilok Soni <tsoni@codeaurora.org>2016-08-26 14:34:05 -0700
commit5ab1e18aa3913d454e1bd1498b20ee581aae2c6b (patch)
tree42bd10ef0bf5cdb8deb05656bf802c77dc580ff7
parente97b6a0e0217f7c072fdad6c50673cd7a64348e1 (diff)
Revert "Merge remote-tracking branch 'msm-4.4/tmp-510d0a3f' into msm-4.4"
This reverts commit 9d6fd2c3e9fcfb ("Merge remote-tracking branch 'msm-4.4/tmp-510d0a3f' into msm-4.4"), because it breaks the dump parsing tools due to kernel can be loaded anywhere in the memory now and not fixed at linear mapping. Change-Id: Id416f0a249d803442847d09ac47781147b0d0ee6 Signed-off-by: Trilok Soni <tsoni@codeaurora.org>
-rw-r--r--Documentation/arm64/booting.txt20
-rw-r--r--Documentation/arm64/silicon-errata.txt58
-rw-r--r--Documentation/devicetree/bindings/ata/ahci-platform.txt4
-rw-r--r--Documentation/features/time/irq-time-acct/arch-support.txt2
-rw-r--r--Documentation/features/vm/huge-vmap/arch-support.txt2
-rw-r--r--MAINTAINERS4
-rw-r--r--Makefile2
-rw-r--r--arch/arc/include/asm/io.h27
-rw-r--r--arch/arm/boot/dts/am43x-epos-evm.dts5
-rw-r--r--arch/arm/boot/dts/armada-375.dtsi2
-rw-r--r--arch/arm/boot/dts/armada-385-linksys.dtsi2
-rw-r--r--arch/arm/boot/dts/at91sam9x5.dtsi2
-rw-r--r--arch/arm/boot/dts/pxa3xx.dtsi2
-rw-r--r--arch/arm/include/asm/kvm_asm.h2
-rw-r--r--arch/arm/kvm/arm.c5
-rw-r--r--arch/arm/mach-cns3xxx/pcie.c6
-rw-r--r--arch/arm/mach-exynos/Kconfig1
-rw-r--r--arch/arm/mach-exynos/pm_domains.c2
-rw-r--r--arch/arm/mach-omap2/cpuidle34xx.c69
-rw-r--r--arch/arm/mach-omap2/io.c1
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.c8
-rw-r--r--arch/arm/mach-prima2/Kconfig1
-rw-r--r--arch/arm/mach-socfpga/headsmp.S1
-rw-r--r--arch/arm64/Kconfig91
-rw-r--r--arch/arm64/Makefile11
-rw-r--r--arch/arm64/include/asm/acpi.h19
-rw-r--r--arch/arm64/include/asm/alternative.h64
-rw-r--r--arch/arm64/include/asm/assembler.h37
-rw-r--r--arch/arm64/include/asm/atomic_lse.h38
-rw-r--r--arch/arm64/include/asm/boot.h6
-rw-r--r--arch/arm64/include/asm/brk-imm.h25
-rw-r--r--arch/arm64/include/asm/bug.h2
-rw-r--r--arch/arm64/include/asm/cacheflush.h1
-rw-r--r--arch/arm64/include/asm/cmpxchg.h1
-rw-r--r--arch/arm64/include/asm/cpu.h1
-rw-r--r--arch/arm64/include/asm/cpufeature.h7
-rw-r--r--arch/arm64/include/asm/cputype.h39
-rw-r--r--arch/arm64/include/asm/debug-monitors.h14
-rw-r--r--arch/arm64/include/asm/elf.h24
-rw-r--r--arch/arm64/include/asm/fixmap.h10
-rw-r--r--arch/arm64/include/asm/ftrace.h2
-rw-r--r--arch/arm64/include/asm/futex.h14
-rw-r--r--arch/arm64/include/asm/hugetlb.h44
-rw-r--r--arch/arm64/include/asm/irq.h45
-rw-r--r--arch/arm64/include/asm/kasan.h5
-rw-r--r--arch/arm64/include/asm/kernel-pgtable.h12
-rw-r--r--arch/arm64/include/asm/kvm_asm.h2
-rw-r--r--arch/arm64/include/asm/kvm_host.h8
-rw-r--r--arch/arm64/include/asm/lse.h1
-rw-r--r--arch/arm64/include/asm/memory.h65
-rw-r--r--arch/arm64/include/asm/mmu_context.h62
-rw-r--r--arch/arm64/include/asm/module.h17
-rw-r--r--arch/arm64/include/asm/pgalloc.h26
-rw-r--r--arch/arm64/include/asm/pgtable-hwdef.h18
-rw-r--r--arch/arm64/include/asm/pgtable.h156
-rw-r--r--arch/arm64/include/asm/processor.h9
-rw-r--r--arch/arm64/include/asm/shmparam.h2
-rw-r--r--arch/arm64/include/asm/smp.h8
-rw-r--r--arch/arm64/include/asm/spinlock.h23
-rw-r--r--arch/arm64/include/asm/stacktrace.h9
-rw-r--r--arch/arm64/include/asm/sysreg.h23
-rw-r--r--arch/arm64/include/asm/thread_info.h10
-rw-r--r--arch/arm64/include/asm/uaccess.h82
-rw-r--r--arch/arm64/include/asm/word-at-a-time.h7
-rw-r--r--arch/arm64/include/uapi/asm/ptrace.h1
-rw-r--r--arch/arm64/kernel/Makefile3
-rw-r--r--arch/arm64/kernel/acpi_parking_protocol.c153
-rw-r--r--arch/arm64/kernel/alternative.c6
-rw-r--r--arch/arm64/kernel/armv8_deprecated.c13
-rw-r--r--arch/arm64/kernel/cpu_errata.c20
-rw-r--r--arch/arm64/kernel/cpu_ops.c27
-rw-r--r--arch/arm64/kernel/cpufeature.c133
-rw-r--r--arch/arm64/kernel/cpuinfo.c55
-rw-r--r--arch/arm64/kernel/efi-entry.S2
-rw-r--r--arch/arm64/kernel/entry.S69
-rw-r--r--arch/arm64/kernel/fpsimd.c2
-rw-r--r--arch/arm64/kernel/ftrace.c27
-rw-r--r--arch/arm64/kernel/head.S152
-rw-r--r--arch/arm64/kernel/image.h85
-rw-r--r--arch/arm64/kernel/irq.c3
-rw-r--r--arch/arm64/kernel/kaslr.c177
-rw-r--r--arch/arm64/kernel/module-plts.c201
-rw-r--r--arch/arm64/kernel/module.c96
-rw-r--r--arch/arm64/kernel/module.lds3
-rw-r--r--arch/arm64/kernel/perf_callchain.c5
-rw-r--r--arch/arm64/kernel/process.c21
-rw-r--r--arch/arm64/kernel/return_address.c5
-rw-r--r--arch/arm64/kernel/setup.c36
-rw-r--r--arch/arm64/kernel/sleep.S3
-rw-r--r--arch/arm64/kernel/smp.c30
-rw-r--r--arch/arm64/kernel/stacktrace.c74
-rw-r--r--arch/arm64/kernel/suspend.c20
-rw-r--r--arch/arm64/kernel/time.c5
-rw-r--r--arch/arm64/kernel/traps.c61
-rw-r--r--arch/arm64/kernel/vmlinux.lds.S32
-rw-r--r--arch/arm64/kvm/hyp.S6
-rw-r--r--arch/arm64/lib/Makefile13
-rw-r--r--arch/arm64/lib/clear_user.S12
-rw-r--r--arch/arm64/lib/copy_from_user.S12
-rw-r--r--arch/arm64/lib/copy_in_user.S20
-rw-r--r--arch/arm64/lib/copy_page.S63
-rw-r--r--arch/arm64/lib/copy_to_user.S12
-rw-r--r--arch/arm64/mm/cache.S28
-rw-r--r--arch/arm64/mm/context.c2
-rw-r--r--arch/arm64/mm/copypage.c3
-rw-r--r--arch/arm64/mm/dma-mapping.c4
-rw-r--r--arch/arm64/mm/dump.c17
-rw-r--r--arch/arm64/mm/extable.c2
-rw-r--r--arch/arm64/mm/fault.c34
-rw-r--r--arch/arm64/mm/flush.c33
-rw-r--r--arch/arm64/mm/hugetlbpage.c260
-rw-r--r--arch/arm64/mm/init.c132
-rw-r--r--arch/arm64/mm/kasan_init.c79
-rw-r--r--arch/arm64/mm/mmu.c701
-rw-r--r--arch/arm64/mm/pageattr.c66
-rw-r--r--arch/arm64/mm/pgd.c12
-rw-r--r--arch/arm64/mm/proc-macros.S22
-rw-r--r--arch/arm64/mm/proc.S28
-rw-r--r--arch/parisc/Kconfig1
-rw-r--r--arch/parisc/include/asm/assembly.h2
-rw-r--r--arch/parisc/include/asm/uaccess.h7
-rw-r--r--arch/parisc/mm/fault.c9
-rw-r--r--arch/powerpc/include/asm/word-at-a-time.h2
-rw-r--r--arch/powerpc/include/uapi/asm/cputable.h1
-rw-r--r--arch/powerpc/kernel/process.c18
-rw-r--r--arch/powerpc/kernel/prom.c26
-rw-r--r--arch/s390/include/asm/mmu.h2
-rw-r--r--arch/s390/include/asm/mmu_context.h28
-rw-r--r--arch/s390/include/asm/pci.h3
-rw-r--r--arch/s390/include/asm/pgalloc.h4
-rw-r--r--arch/s390/include/asm/processor.h2
-rw-r--r--arch/s390/include/asm/tlbflush.h9
-rw-r--r--arch/s390/mm/init.c3
-rw-r--r--arch/s390/mm/mmap.c6
-rw-r--r--arch/s390/mm/pgtable.c85
-rw-r--r--arch/x86/crypto/sha-mb/sha1_mb.c4
-rw-r--r--arch/x86/include/asm/efi.h2
-rw-r--r--arch/x86/include/asm/hugetlb.h1
-rw-r--r--arch/x86/kernel/apic/vector.c3
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce-genpool.c4
-rw-r--r--arch/x86/kernel/cpu/mcheck/therm_throt.c3
-rw-r--r--arch/x86/kernel/sysfb_efi.c14
-rw-r--r--arch/x86/kernel/tsc_msr.c2
-rw-r--r--arch/x86/kvm/x86.c10
-rw-r--r--arch/x86/mm/kmmio.c88
-rw-r--r--block/partition-generic.c13
-rw-r--r--crypto/ahash.c3
-rw-r--r--crypto/testmgr.c27
-rw-r--r--drivers/acpi/acpi_processor.c52
-rw-r--r--drivers/acpi/acpica/dsmethod.c3
-rw-r--r--drivers/acpi/bus.c3
-rw-r--r--drivers/acpi/internal.h6
-rw-r--r--drivers/ata/ahci_platform.c3
-rw-r--r--drivers/ata/ahci_xgene.c4
-rw-r--r--drivers/ata/libahci.c1
-rw-r--r--drivers/base/power/domain.c2
-rw-r--r--drivers/block/loop.c6
-rw-r--r--drivers/block/nbd.c4
-rw-r--r--drivers/block/paride/pd.c4
-rw-r--r--drivers/block/paride/pt.c4
-rw-r--r--drivers/bus/imx-weim.c2
-rw-r--r--drivers/clk/clk-divider.c11
-rw-r--r--drivers/clk/meson/clkc.c2
-rw-r--r--drivers/clk/nxp/clk-lpc18xx-ccu.c2
-rw-r--r--drivers/clk/qcom/gcc-msm8960.c4
-rw-r--r--drivers/clk/rockchip/clk.c13
-rw-r--r--drivers/clk/versatile/clk-sp810.c4
-rw-r--r--drivers/cpufreq/intel_pstate.c5
-rw-r--r--drivers/cpuidle/cpuidle-arm.c2
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-cmac.c3
-rw-r--r--drivers/crypto/ccp/ccp-crypto-sha.c3
-rw-r--r--drivers/crypto/qat/qat_common/adf_common_drv.h2
-rw-r--r--drivers/crypto/qat/qat_common/adf_ctl_drv.c6
-rw-r--r--drivers/crypto/qat/qat_common/adf_sriov.c26
-rw-r--r--drivers/crypto/talitos.c87
-rw-r--r--drivers/dma/dw/core.c34
-rw-r--r--drivers/dma/hsu/hsu.c2
-rw-r--r--drivers/dma/hsu/hsu.h3
-rw-r--r--drivers/dma/pxa_dma.c39
-rw-r--r--drivers/edac/i7core_edac.c2
-rw-r--r--drivers/edac/sb_edac.c8
-rw-r--r--drivers/extcon/extcon-max77843.c2
-rw-r--r--drivers/firmware/efi/efi.c1
-rw-r--r--drivers/firmware/efi/libstub/Makefile7
-rw-r--r--drivers/firmware/efi/libstub/arm-stub.c40
-rw-r--r--drivers/firmware/efi/libstub/arm64-stub.c78
-rw-r--r--drivers/firmware/efi/libstub/efi-stub-helper.c7
-rw-r--r--drivers/firmware/efi/libstub/efistub.h7
-rw-r--r--drivers/firmware/efi/libstub/fdt.c14
-rw-r--r--drivers/firmware/efi/libstub/random.c135
-rw-r--r--drivers/firmware/efi/vars.c37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_encoders.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c2
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c29
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h9
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c8
-rw-r--r--drivers/gpu/drm/i915/intel_csr.c3
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c22
-rw-r--r--drivers/gpu/drm/i915/intel_display.c2
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c4
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c6
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h2
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c12
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c6
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c6
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c30
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/ramht.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c13
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h2
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c10
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c4
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c154
-rw-r--r--drivers/gpu/drm/radeon/evergreen_reg.h46
-rw-r--r--drivers/gpu/drm/radeon/radeon_atpx_handler.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_dp_auxch.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c2
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c1
-rw-r--r--drivers/gpu/ipu-v3/ipu-common.c7
-rw-r--r--drivers/hid/hid-ids.h1
-rw-r--r--drivers/hid/usbhid/hid-quirks.c1
-rw-r--r--drivers/hid/wacom_wac.c5
-rw-r--r--drivers/hwtracing/stm/Kconfig1
-rw-r--r--drivers/i2c/busses/i2c-cpm.c4
-rw-r--r--drivers/i2c/busses/i2c-exynos5.c24
-rw-r--r--drivers/iio/magnetometer/ak8975.c6
-rw-r--r--drivers/infiniband/core/ucm.c4
-rw-r--r--drivers/infiniband/core/ucma.c3
-rw-r--r--drivers/infiniband/core/uverbs_main.c5
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c6
-rw-r--r--drivers/infiniband/hw/mlx5/main.c6
-rw-r--r--drivers/infiniband/hw/qib/qib_file_ops.c5
-rw-r--r--drivers/input/misc/max8997_haptic.c6
-rw-r--r--drivers/input/misc/pmic8xxx-pwrkey.c7
-rw-r--r--drivers/input/tablet/gtco.c10
-rw-r--r--drivers/input/touchscreen/zforce_ts.c4
-rw-r--r--drivers/iommu/amd_iommu.c87
-rw-r--r--drivers/iommu/dma-iommu.c4
-rw-r--r--drivers/irqchip/irq-mxs.c2
-rw-r--r--drivers/irqchip/irq-sunxi-nmi.c4
-rw-r--r--drivers/md/dm-cache-metadata.c64
-rw-r--r--drivers/md/md.c2
-rw-r--r--drivers/media/v4l2-core/videobuf2-core.c10
-rw-r--r--drivers/media/v4l2-core/videobuf2-memops.c2
-rw-r--r--drivers/mfd/intel-lpss.c1
-rw-r--r--drivers/misc/Kconfig2
-rw-r--r--drivers/misc/ad525x_dpot.c2
-rw-r--r--drivers/misc/cxl/irq.c1
-rw-r--r--drivers/misc/mic/scif/scif_rma.c7
-rw-r--r--drivers/mtd/nand/brcmnand/brcmnand.c34
-rw-r--r--drivers/mtd/nand/nand_base.c10
-rw-r--r--drivers/mtd/spi-nor/spi-nor.c46
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c2
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c4
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c10
-rw-r--r--drivers/net/ethernet/jme.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c10
-rw-r--r--drivers/net/usb/cdc_mbim.c9
-rw-r--r--drivers/net/wireless/ath/ath9k/ar5008_phy.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_phy.c5
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/ops.c2
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c4
-rw-r--r--drivers/net/wireless/mwifiex/sta_ioctl.c1
-rw-r--r--drivers/nvmem/mxs-ocotp.c4
-rw-r--r--drivers/of/fdt.c19
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common.c9
-rw-r--r--drivers/pinctrl/pinctrl-at91-pio4.c2
-rw-r--r--drivers/pinctrl/pinctrl-single.c6
-rw-r--r--drivers/platform/x86/toshiba_acpi.c2
-rw-r--r--drivers/pwm/pwm-brcmstb.c4
-rw-r--r--drivers/regulator/axp20x-regulator.c4
-rw-r--r--drivers/regulator/core.c2
-rw-r--r--drivers/regulator/s2mps11.c28
-rw-r--r--drivers/regulator/s5m8767.c13
-rw-r--r--drivers/rtc/rtc-ds1685.c8
-rw-r--r--drivers/rtc/rtc-hym8563.c2
-rw-r--r--drivers/rtc/rtc-max77686.c2
-rw-r--r--drivers/rtc/rtc-rx8025.c1
-rw-r--r--drivers/rtc/rtc-vr41xx.c13
-rw-r--r--drivers/scsi/device_handler/Kconfig8
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c5
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c3
-rw-r--r--drivers/scsi/qla1280.c2
-rw-r--r--drivers/soc/rockchip/pm_domains.c1
-rw-r--r--drivers/spi/spi-pxa2xx.c2
-rw-r--r--drivers/spi/spi-rockchip.c7
-rw-r--r--drivers/spi/spi-ti-qspi.c45
-rw-r--r--drivers/staging/rdma/hfi1/TODO2
-rw-r--r--drivers/staging/rdma/hfi1/file_ops.c6
-rw-r--r--drivers/thermal/rockchip_thermal.c11
-rw-r--r--drivers/tty/serial/sh-sci.c39
-rw-r--r--drivers/usb/core/hcd-pci.c9
-rw-r--r--drivers/usb/gadget/function/f_fs.c5
-rw-r--r--drivers/usb/host/xhci-mem.c6
-rw-r--r--drivers/usb/host/xhci-pci.c4
-rw-r--r--drivers/usb/host/xhci.c6
-rw-r--r--drivers/usb/serial/cp210x.c4
-rw-r--r--drivers/usb/usbip/usbip_common.c11
-rw-r--r--drivers/video/fbdev/Kconfig1
-rw-r--r--drivers/video/fbdev/da8xx-fb.c7
-rw-r--r--drivers/xen/balloon.c16
-rw-r--r--drivers/xen/evtchn.c20
-rw-r--r--fs/debugfs/inode.c2
-rw-r--r--fs/ext4/ext4.h13
-rw-r--r--fs/ext4/extents.c133
-rw-r--r--fs/ext4/file.c66
-rw-r--r--fs/ext4/inode.c77
-rw-r--r--fs/ext4/super.c1
-rw-r--r--fs/ext4/truncate.h2
-rw-r--r--fs/isofs/rock.c13
-rw-r--r--fs/namei.c26
-rw-r--r--fs/ocfs2/acl.c87
-rw-r--r--fs/ocfs2/acl.h5
-rw-r--r--fs/ocfs2/file.c4
-rw-r--r--fs/ocfs2/namei.c23
-rw-r--r--fs/ocfs2/refcounttree.c17
-rw-r--r--fs/ocfs2/xattr.c14
-rw-r--r--fs/ocfs2/xattr.h4
-rw-r--r--fs/open.c12
-rw-r--r--fs/pnode.c32
-rw-r--r--fs/proc/base.c3
-rw-r--r--fs/proc/task_mmu.c33
-rw-r--r--include/asm-generic/fixmap.h12
-rw-r--r--include/asm-generic/futex.h8
-rw-r--r--include/drm/drm_cache.h2
-rw-r--r--include/linux/bpf.h3
-rw-r--r--include/linux/cgroup-defs.h1
-rw-r--r--include/linux/clk-provider.h1
-rw-r--r--include/linux/cpuset.h6
-rw-r--r--include/linux/dcache.h12
-rw-r--r--include/linux/efi.h6
-rw-r--r--include/linux/hash.h20
-rw-r--r--include/linux/hugetlb.h2
-rw-r--r--include/linux/mfd/samsung/s2mps11.h2
-rw-r--r--include/linux/mlx5/device.h11
-rw-r--r--include/linux/mlx5/driver.h6
-rw-r--r--include/linux/mm.h2
-rw-r--r--include/linux/net.h10
-rw-r--r--include/media/videobuf2-core.h1
-rw-r--r--include/net/codel.h4
-rw-r--r--include/net/ip_vs.h17
-rw-r--r--include/net/sch_generic.h20
-rw-r--r--include/rdma/ib.h16
-rw-r--r--include/uapi/linux/if.h28
-rw-r--r--include/uapi/linux/libc-compat.h44
-rw-r--r--include/uapi/linux/v4l2-dv-timings.h30
-rw-r--r--include/xen/page.h4
-rw-r--r--kernel/bpf/inode.c7
-rw-r--r--kernel/bpf/syscall.c24
-rw-r--r--kernel/bpf/verifier.c66
-rw-r--r--kernel/cgroup.c14
-rw-r--r--kernel/cpuset.c4
-rw-r--r--kernel/events/ring_buffer.c10
-rw-r--r--kernel/futex.c27
-rw-r--r--kernel/locking/mcs_spinlock.h8
-rw-r--r--kernel/sched/core.c35
-rw-r--r--kernel/trace/trace_events.c9
-rw-r--r--kernel/workqueue.c40
-rw-r--r--lib/assoc_array.c4
-rw-r--r--lib/extable.c50
-rw-r--r--lib/lz4/lz4defs.h21
-rw-r--r--lib/mpi/mpicoder.c39
-rw-r--r--lib/test-string_helpers.c67
-rw-r--r--mm/compaction.c10
-rw-r--r--mm/huge_memory.c6
-rw-r--r--mm/memcontrol.c36
-rw-r--r--mm/memory.c40
-rw-r--r--mm/migrate.c8
-rw-r--r--mm/page-writeback.c6
-rw-r--r--mm/page_alloc.c2
-rw-r--r--mm/slub.c22
-rw-r--r--mm/vmscan.c2
-rw-r--r--mm/zsmalloc.c7
-rw-r--r--mm/zswap.c8
-rw-r--r--net/batman-adv/distributed-arp-table.c17
-rw-r--r--net/batman-adv/routing.c9
-rw-r--r--net/batman-adv/send.c6
-rw-r--r--net/batman-adv/soft-interface.c8
-rw-r--r--net/bridge/br_ioctl.c5
-rw-r--r--net/bridge/br_multicast.c12
-rw-r--r--net/core/rtnetlink.c18
-rw-r--r--net/core/skbuff.c11
-rw-r--r--net/decnet/dn_route.c9
-rw-r--r--net/ipv4/fib_frontend.c6
-rw-r--r--net/ipv4/fib_semantics.c2
-rw-r--r--net/ipv4/ip_gre.c11
-rw-r--r--net/ipv4/route.c12
-rw-r--r--net/ipv4/tcp_output.c6
-rw-r--r--net/ipv6/reassembly.c6
-rw-r--r--net/ipv6/route.c2
-rw-r--r--net/llc/af_llc.c1
-rw-r--r--net/mac80211/iface.c4
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c37
-rw-r--r--net/netfilter/ipvs/ip_vs_pe_sip.c6
-rw-r--r--net/netfilter/nf_conntrack_core.c4
-rw-r--r--net/netlink/af_netlink.c2
-rw-r--r--net/openvswitch/actions.c12
-rw-r--r--net/openvswitch/vport-netdev.c2
-rw-r--r--net/openvswitch/vport.h7
-rw-r--r--net/packet/af_packet.c1
-rw-r--r--net/sched/sch_api.c8
-rw-r--r--net/sched/sch_cbq.c12
-rw-r--r--net/sched/sch_choke.c6
-rw-r--r--net/sched/sch_codel.c10
-rw-r--r--net/sched/sch_drr.c9
-rw-r--r--net/sched/sch_dsmark.c11
-rw-r--r--net/sched/sch_fq.c4
-rw-r--r--net/sched/sch_fq_codel.c17
-rw-r--r--net/sched/sch_generic.c5
-rw-r--r--net/sched/sch_hfsc.c9
-rw-r--r--net/sched/sch_hhf.c10
-rw-r--r--net/sched/sch_htb.c24
-rw-r--r--net/sched/sch_multiq.c16
-rw-r--r--net/sched/sch_netem.c74
-rw-r--r--net/sched/sch_pie.c5
-rw-r--r--net/sched/sch_prio.c15
-rw-r--r--net/sched/sch_qfq.c9
-rw-r--r--net/sched/sch_red.c10
-rw-r--r--net/sched/sch_sfb.c10
-rw-r--r--net/sched/sch_sfq.c16
-rw-r--r--net/sched/sch_tbf.c15
-rw-r--r--net/sunrpc/cache.c6
-rw-r--r--net/vmw_vsock/af_vsock.c21
-rw-r--r--net/wireless/nl80211.c2
-rw-r--r--net/x25/x25_facilities.c1
-rw-r--r--samples/bpf/trace_output_kern.c1
-rw-r--r--scripts/kconfig/confdata.c12
-rw-r--r--scripts/sortextable.c11
-rw-r--r--sound/pci/hda/hda_generic.c6
-rw-r--r--sound/pci/hda/hda_intel.c3
-rw-r--r--sound/pci/hda/hda_sysfs.c8
-rw-r--r--sound/pci/hda/patch_cirrus.c14
-rw-r--r--sound/pci/hda/patch_realtek.c15
-rw-r--r--sound/pci/pcxhr/pcxhr_core.c1
-rw-r--r--sound/soc/codecs/rt5640.c2
-rw-r--r--sound/soc/codecs/rt5640.h36
-rw-r--r--sound/soc/codecs/ssm4567.c5
-rw-r--r--sound/soc/samsung/s3c-i2s-v2.c2
-rw-r--r--sound/soc/samsung/s3c-i2s-v2.h2
-rw-r--r--sound/soc/soc-dapm.c7
-rw-r--r--sound/usb/quirks.c3
-rw-r--r--tools/lib/traceevent/parse-filter.c4
-rw-r--r--tools/perf/Documentation/perf-stat.txt8
-rw-r--r--tools/perf/ui/browsers/hists.c10
-rw-r--r--tools/perf/util/event.c2
-rw-r--r--tools/perf/util/evlist.c4
-rw-r--r--tools/perf/util/intel-pt.c2
-rw-r--r--virt/kvm/arm/arch_timer.c49
465 files changed, 2414 insertions, 6429 deletions
diff --git a/Documentation/arm64/booting.txt b/Documentation/arm64/booting.txt
index 56d6d8b796db..701d39d3171a 100644
--- a/Documentation/arm64/booting.txt
+++ b/Documentation/arm64/booting.txt
@@ -109,13 +109,7 @@ Header notes:
1 - 4K
2 - 16K
3 - 64K
- Bit 3: Kernel physical placement
- 0 - 2MB aligned base should be as close as possible
- to the base of DRAM, since memory below it is not
- accessible via the linear mapping
- 1 - 2MB aligned base may be anywhere in physical
- memory
- Bits 4-63: Reserved.
+ Bits 3-63: Reserved.
- When image_size is zero, a bootloader should attempt to keep as much
memory as possible free for use by the kernel immediately after the
@@ -123,14 +117,14 @@ Header notes:
depending on selected features, and is effectively unbound.
The Image must be placed text_offset bytes from a 2MB aligned base
-address anywhere in usable system RAM and called there. The region
-between the 2 MB aligned base address and the start of the image has no
-special significance to the kernel, and may be used for other purposes.
+address near the start of usable system RAM and called there. Memory
+below that base address is currently unusable by Linux, and therefore it
+is strongly recommended that this location is the start of system RAM.
+The region between the 2 MB aligned base address and the start of the
+image has no special significance to the kernel, and may be used for
+other purposes.
At least image_size bytes from the start of the image must be free for
use by the kernel.
-NOTE: versions prior to v4.6 cannot make use of memory below the
-physical offset of the Image so it is recommended that the Image be
-placed as close as possible to the start of system RAM.
Any memory described to the kernel (even that below the start of the
image) which is not marked as reserved from the kernel (e.g., with a
diff --git a/Documentation/arm64/silicon-errata.txt b/Documentation/arm64/silicon-errata.txt
deleted file mode 100644
index 58b71ddf9b60..000000000000
--- a/Documentation/arm64/silicon-errata.txt
+++ /dev/null
@@ -1,58 +0,0 @@
- Silicon Errata and Software Workarounds
- =======================================
-
-Author: Will Deacon <will.deacon@arm.com>
-Date : 27 November 2015
-
-It is an unfortunate fact of life that hardware is often produced with
-so-called "errata", which can cause it to deviate from the architecture
-under specific circumstances. For hardware produced by ARM, these
-errata are broadly classified into the following categories:
-
- Category A: A critical error without a viable workaround.
- Category B: A significant or critical error with an acceptable
- workaround.
- Category C: A minor error that is not expected to occur under normal
- operation.
-
-For more information, consult one of the "Software Developers Errata
-Notice" documents available on infocenter.arm.com (registration
-required).
-
-As far as Linux is concerned, Category B errata may require some special
-treatment in the operating system. For example, avoiding a particular
-sequence of code, or configuring the processor in a particular way. A
-less common situation may require similar actions in order to declassify
-a Category A erratum into a Category C erratum. These are collectively
-known as "software workarounds" and are only required in the minority of
-cases (e.g. those cases that both require a non-secure workaround *and*
-can be triggered by Linux).
-
-For software workarounds that may adversely impact systems unaffected by
-the erratum in question, a Kconfig entry is added under "Kernel
-Features" -> "ARM errata workarounds via the alternatives framework".
-These are enabled by default and patched in at runtime when an affected
-CPU is detected. For less-intrusive workarounds, a Kconfig option is not
-available and the code is structured (preferably with a comment) in such
-a way that the erratum will not be hit.
-
-This approach can make it slightly onerous to determine exactly which
-errata are worked around in an arbitrary kernel source tree, so this
-file acts as a registry of software workarounds in the Linux Kernel and
-will be updated when new workarounds are committed and backported to
-stable kernels.
-
-| Implementor | Component | Erratum ID | Kconfig |
-+----------------+-----------------+-----------------+-------------------------+
-| ARM | Cortex-A53 | #826319 | ARM64_ERRATUM_826319 |
-| ARM | Cortex-A53 | #827319 | ARM64_ERRATUM_827319 |
-| ARM | Cortex-A53 | #824069 | ARM64_ERRATUM_824069 |
-| ARM | Cortex-A53 | #819472 | ARM64_ERRATUM_819472 |
-| ARM | Cortex-A53 | #845719 | ARM64_ERRATUM_845719 |
-| ARM | Cortex-A53 | #843419 | ARM64_ERRATUM_843419 |
-| ARM | Cortex-A57 | #832075 | ARM64_ERRATUM_832075 |
-| ARM | Cortex-A57 | #852523 | N/A |
-| ARM | Cortex-A57 | #834220 | ARM64_ERRATUM_834220 |
-| | | | |
-| Cavium | ThunderX ITS | #22375, #24313 | CAVIUM_ERRATUM_22375 |
-| Cavium | ThunderX GICv3 | #23154 | CAVIUM_ERRATUM_23154 |
diff --git a/Documentation/devicetree/bindings/ata/ahci-platform.txt b/Documentation/devicetree/bindings/ata/ahci-platform.txt
index c000832a7fb9..c2340eeeb97f 100644
--- a/Documentation/devicetree/bindings/ata/ahci-platform.txt
+++ b/Documentation/devicetree/bindings/ata/ahci-platform.txt
@@ -30,10 +30,6 @@ Optional properties:
- target-supply : regulator for SATA target power
- phys : reference to the SATA PHY node
- phy-names : must be "sata-phy"
-- ports-implemented : Mask that indicates which ports that the HBA supports
- are available for software to use. Useful if PORTS_IMPL
- is not programmed by the BIOS, which is true with
- some embedded SOC's.
Required properties when using sub-nodes:
- #address-cells : number of cells to encode an address
diff --git a/Documentation/features/time/irq-time-acct/arch-support.txt b/Documentation/features/time/irq-time-acct/arch-support.txt
index 4199ffecc0ff..e63316239938 100644
--- a/Documentation/features/time/irq-time-acct/arch-support.txt
+++ b/Documentation/features/time/irq-time-acct/arch-support.txt
@@ -9,7 +9,7 @@
| alpha: | .. |
| arc: | TODO |
| arm: | ok |
- | arm64: | ok |
+ | arm64: | .. |
| avr32: | TODO |
| blackfin: | TODO |
| c6x: | TODO |
diff --git a/Documentation/features/vm/huge-vmap/arch-support.txt b/Documentation/features/vm/huge-vmap/arch-support.txt
index df1d1f3c9af2..af6816bccb43 100644
--- a/Documentation/features/vm/huge-vmap/arch-support.txt
+++ b/Documentation/features/vm/huge-vmap/arch-support.txt
@@ -9,7 +9,7 @@
| alpha: | TODO |
| arc: | TODO |
| arm: | TODO |
- | arm64: | ok |
+ | arm64: | TODO |
| avr32: | TODO |
| blackfin: | TODO |
| c6x: | TODO |
diff --git a/MAINTAINERS b/MAINTAINERS
index ab65bbecb159..4c3e1d2ac31b 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -4097,8 +4097,8 @@ F: Documentation/efi-stub.txt
F: arch/ia64/kernel/efi.c
F: arch/x86/boot/compressed/eboot.[ch]
F: arch/x86/include/asm/efi.h
-F: arch/x86/platform/efi/
-F: drivers/firmware/efi/
+F: arch/x86/platform/efi/*
+F: drivers/firmware/efi/*
F: include/linux/efi*.h
EFI VARIABLE FILESYSTEM
diff --git a/Makefile b/Makefile
index 7d76ff290be4..e59197c28c78 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
VERSION = 4
PATCHLEVEL = 4
-SUBLEVEL = 11
+SUBLEVEL = 8
EXTRAVERSION =
NAME = Blurry Fish Butt
diff --git a/arch/arc/include/asm/io.h b/arch/arc/include/asm/io.h
index cb69299a492e..27b17adea50d 100644
--- a/arch/arc/include/asm/io.h
+++ b/arch/arc/include/asm/io.h
@@ -13,15 +13,6 @@
#include <asm/byteorder.h>
#include <asm/page.h>
-#ifdef CONFIG_ISA_ARCV2
-#include <asm/barrier.h>
-#define __iormb() rmb()
-#define __iowmb() wmb()
-#else
-#define __iormb() do { } while (0)
-#define __iowmb() do { } while (0)
-#endif
-
extern void __iomem *ioremap(unsigned long physaddr, unsigned long size);
extern void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size,
unsigned long flags);
@@ -31,15 +22,6 @@ extern void iounmap(const void __iomem *addr);
#define ioremap_wc(phy, sz) ioremap(phy, sz)
#define ioremap_wt(phy, sz) ioremap(phy, sz)
-/*
- * io{read,write}{16,32}be() macros
- */
-#define ioread16be(p) ({ u16 __v = be16_to_cpu((__force __be16)__raw_readw(p)); __iormb(); __v; })
-#define ioread32be(p) ({ u32 __v = be32_to_cpu((__force __be32)__raw_readl(p)); __iormb(); __v; })
-
-#define iowrite16be(v,p) ({ __iowmb(); __raw_writew((__force u16)cpu_to_be16(v), p); })
-#define iowrite32be(v,p) ({ __iowmb(); __raw_writel((__force u32)cpu_to_be32(v), p); })
-
/* Change struct page to physical address */
#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
@@ -117,6 +99,15 @@ static inline void __raw_writel(u32 w, volatile void __iomem *addr)
}
+#ifdef CONFIG_ISA_ARCV2
+#include <asm/barrier.h>
+#define __iormb() rmb()
+#define __iowmb() wmb()
+#else
+#define __iormb() do { } while (0)
+#define __iowmb() do { } while (0)
+#endif
+
/*
* MMIO can also get buffered/optimized in micro-arch, so barriers needed
* Based on ARM model for the typical use case
diff --git a/arch/arm/boot/dts/am43x-epos-evm.dts b/arch/arm/boot/dts/am43x-epos-evm.dts
index 00707aac72fc..47954ed990f8 100644
--- a/arch/arm/boot/dts/am43x-epos-evm.dts
+++ b/arch/arm/boot/dts/am43x-epos-evm.dts
@@ -792,8 +792,3 @@
tx-num-evt = <32>;
rx-num-evt = <32>;
};
-
-&synctimer_32kclk {
- assigned-clocks = <&mux_synctimer32k_ck>;
- assigned-clock-parents = <&clkdiv32k_ick>;
-};
diff --git a/arch/arm/boot/dts/armada-375.dtsi b/arch/arm/boot/dts/armada-375.dtsi
index cc952cf8ec30..7ccce7529b0c 100644
--- a/arch/arm/boot/dts/armada-375.dtsi
+++ b/arch/arm/boot/dts/armada-375.dtsi
@@ -529,7 +529,7 @@
};
sata@a0000 {
- compatible = "marvell,armada-370-sata";
+ compatible = "marvell,orion-sata";
reg = <0xa0000 0x5000>;
interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&gateclk 14>, <&gateclk 20>;
diff --git a/arch/arm/boot/dts/armada-385-linksys.dtsi b/arch/arm/boot/dts/armada-385-linksys.dtsi
index 85d2c377c332..3710755c6d76 100644
--- a/arch/arm/boot/dts/armada-385-linksys.dtsi
+++ b/arch/arm/boot/dts/armada-385-linksys.dtsi
@@ -117,7 +117,7 @@
};
/* USB part of the eSATA/USB 2.0 port */
- usb@58000 {
+ usb@50000 {
status = "okay";
};
diff --git a/arch/arm/boot/dts/at91sam9x5.dtsi b/arch/arm/boot/dts/at91sam9x5.dtsi
index cd0cd5fd09a3..0827d594b1f0 100644
--- a/arch/arm/boot/dts/at91sam9x5.dtsi
+++ b/arch/arm/boot/dts/at91sam9x5.dtsi
@@ -106,7 +106,7 @@
pmc: pmc@fffffc00 {
compatible = "atmel,at91sam9x5-pmc", "syscon";
- reg = <0xfffffc00 0x200>;
+ reg = <0xfffffc00 0x100>;
interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
interrupt-controller;
#address-cells = <1>;
diff --git a/arch/arm/boot/dts/pxa3xx.dtsi b/arch/arm/boot/dts/pxa3xx.dtsi
index 564341af7e97..cf6998a0804d 100644
--- a/arch/arm/boot/dts/pxa3xx.dtsi
+++ b/arch/arm/boot/dts/pxa3xx.dtsi
@@ -30,7 +30,7 @@
reg = <0x43100000 90>;
interrupts = <45>;
clocks = <&clks CLK_NAND>;
- dmas = <&pdma 97 3>;
+ dmas = <&pdma 97>;
dma-names = "data";
#address-cells = <1>;
#size-cells = <1>;
diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h
index c35c349da069..194c91b610ff 100644
--- a/arch/arm/include/asm/kvm_asm.h
+++ b/arch/arm/include/asm/kvm_asm.h
@@ -79,8 +79,6 @@
#define rr_lo_hi(a1, a2) a1, a2
#endif
-#define kvm_ksym_ref(kva) (kva)
-
#ifndef __ASSEMBLY__
struct kvm;
struct kvm_vcpu;
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index 70e6d557c75f..e06fd299de08 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -969,7 +969,7 @@ static void cpu_init_hyp_mode(void *dummy)
pgd_ptr = kvm_mmu_get_httbr();
stack_page = __this_cpu_read(kvm_arm_hyp_stack_page);
hyp_stack_ptr = stack_page + PAGE_SIZE;
- vector_ptr = (unsigned long)kvm_ksym_ref(__kvm_hyp_vector);
+ vector_ptr = (unsigned long)__kvm_hyp_vector;
__cpu_init_hyp_mode(boot_pgd_ptr, pgd_ptr, hyp_stack_ptr, vector_ptr);
@@ -1061,8 +1061,7 @@ static int init_hyp_mode(void)
/*
* Map the Hyp-code called directly from the host
*/
- err = create_hyp_mappings(kvm_ksym_ref(__kvm_hyp_code_start),
- kvm_ksym_ref(__kvm_hyp_code_end));
+ err = create_hyp_mappings(__kvm_hyp_code_start, __kvm_hyp_code_end);
if (err) {
kvm_err("Cannot map world-switch code\n");
goto out_free_mappings;
diff --git a/arch/arm/mach-cns3xxx/pcie.c b/arch/arm/mach-cns3xxx/pcie.c
index 318394ed5c7a..47905a50e075 100644
--- a/arch/arm/mach-cns3xxx/pcie.c
+++ b/arch/arm/mach-cns3xxx/pcie.c
@@ -220,13 +220,13 @@ static void cns3xxx_write_config(struct cns3xxx_pcie *cnspci,
u32 mask = (0x1ull << (size * 8)) - 1;
int shift = (where % 4) * 8;
- v = readl_relaxed(base);
+ v = readl_relaxed(base + (where & 0xffc));
v &= ~(mask << shift);
v |= (val & mask) << shift;
- writel_relaxed(v, base);
- readl_relaxed(base);
+ writel_relaxed(v, base + (where & 0xffc));
+ readl_relaxed(base + (where & 0xffc));
}
static void __init cns3xxx_pcie_hw_init(struct cns3xxx_pcie *cnspci)
diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig
index bfd8bb371477..3a10f1a8317a 100644
--- a/arch/arm/mach-exynos/Kconfig
+++ b/arch/arm/mach-exynos/Kconfig
@@ -26,7 +26,6 @@ menuconfig ARCH_EXYNOS
select S5P_DEV_MFC
select SRAM
select THERMAL
- select THERMAL_OF
select MFD_SYSCON
help
Support for SAMSUNG EXYNOS SoCs (EXYNOS4/5)
diff --git a/arch/arm/mach-exynos/pm_domains.c b/arch/arm/mach-exynos/pm_domains.c
index 875a2bab64f6..7c21760f590f 100644
--- a/arch/arm/mach-exynos/pm_domains.c
+++ b/arch/arm/mach-exynos/pm_domains.c
@@ -92,7 +92,7 @@ static int exynos_pd_power(struct generic_pm_domain *domain, bool power_on)
if (IS_ERR(pd->clk[i]))
break;
- if (IS_ERR(pd->pclk[i]))
+ if (IS_ERR(pd->clk[i]))
continue; /* Skip on first power up */
if (clk_set_parent(pd->clk[i], pd->pclk[i]))
pr_err("%s: error setting parent to clock%d\n",
diff --git a/arch/arm/mach-omap2/cpuidle34xx.c b/arch/arm/mach-omap2/cpuidle34xx.c
index 2a3db0bd9e15..aa7b379e2661 100644
--- a/arch/arm/mach-omap2/cpuidle34xx.c
+++ b/arch/arm/mach-omap2/cpuidle34xx.c
@@ -34,7 +34,6 @@
#include "pm.h"
#include "control.h"
#include "common.h"
-#include "soc.h"
/* Mach specific information to be recorded in the C-state driver_data */
struct omap3_idle_statedata {
@@ -316,69 +315,6 @@ static struct cpuidle_driver omap3_idle_driver = {
.safe_state_index = 0,
};
-/*
- * Numbers based on measurements made in October 2009 for PM optimized kernel
- * with CPU freq enabled on device Nokia N900. Assumes OPP2 (main idle OPP,
- * and worst case latencies).
- */
-static struct cpuidle_driver omap3430_idle_driver = {
- .name = "omap3430_idle",
- .owner = THIS_MODULE,
- .states = {
- {
- .enter = omap3_enter_idle_bm,
- .exit_latency = 110 + 162,
- .target_residency = 5,
- .name = "C1",
- .desc = "MPU ON + CORE ON",
- },
- {
- .enter = omap3_enter_idle_bm,
- .exit_latency = 106 + 180,
- .target_residency = 309,
- .name = "C2",
- .desc = "MPU ON + CORE ON",
- },
- {
- .enter = omap3_enter_idle_bm,
- .exit_latency = 107 + 410,
- .target_residency = 46057,
- .name = "C3",
- .desc = "MPU RET + CORE ON",
- },
- {
- .enter = omap3_enter_idle_bm,
- .exit_latency = 121 + 3374,
- .target_residency = 46057,
- .name = "C4",
- .desc = "MPU OFF + CORE ON",
- },
- {
- .enter = omap3_enter_idle_bm,
- .exit_latency = 855 + 1146,
- .target_residency = 46057,
- .name = "C5",
- .desc = "MPU RET + CORE RET",
- },
- {
- .enter = omap3_enter_idle_bm,
- .exit_latency = 7580 + 4134,
- .target_residency = 484329,
- .name = "C6",
- .desc = "MPU OFF + CORE RET",
- },
- {
- .enter = omap3_enter_idle_bm,
- .exit_latency = 7505 + 15274,
- .target_residency = 484329,
- .name = "C7",
- .desc = "MPU OFF + CORE OFF",
- },
- },
- .state_count = ARRAY_SIZE(omap3_idle_data),
- .safe_state_index = 0,
-};
-
/* Public functions */
/**
@@ -397,8 +333,5 @@ int __init omap3_idle_init(void)
if (!mpu_pd || !core_pd || !per_pd || !cam_pd)
return -ENODEV;
- if (cpu_is_omap3430())
- return cpuidle_register(&omap3430_idle_driver, NULL);
- else
- return cpuidle_register(&omap3_idle_driver, NULL);
+ return cpuidle_register(&omap3_idle_driver, NULL);
}
diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c
index 3a911d8dea8b..3eaeaca5da05 100644
--- a/arch/arm/mach-omap2/io.c
+++ b/arch/arm/mach-omap2/io.c
@@ -368,7 +368,6 @@ void __init omap5_map_io(void)
void __init dra7xx_map_io(void)
{
iotable_init(dra7xx_io_desc, ARRAY_SIZE(dra7xx_io_desc));
- omap_barriers_init();
}
#endif
/*
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index 147c90e70b2e..8e0bd5939e5a 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -1416,7 +1416,9 @@ static void _enable_sysc(struct omap_hwmod *oh)
(sf & SYSC_HAS_CLOCKACTIVITY))
_set_clockactivity(oh, oh->class->sysc->clockact, &v);
- _write_sysconfig(v, oh);
+ /* If the cached value is the same as the new value, skip the write */
+ if (oh->_sysc_cache != v)
+ _write_sysconfig(v, oh);
/*
* Set the autoidle bit only after setting the smartidle bit
@@ -1479,9 +1481,7 @@ static void _idle_sysc(struct omap_hwmod *oh)
_set_master_standbymode(oh, idlemode, &v);
}
- /* If the cached value is the same as the new value, skip the write */
- if (oh->_sysc_cache != v)
- _write_sysconfig(v, oh);
+ _write_sysconfig(v, oh);
}
/**
diff --git a/arch/arm/mach-prima2/Kconfig b/arch/arm/mach-prima2/Kconfig
index 56e55fd37d13..9ab8932403e5 100644
--- a/arch/arm/mach-prima2/Kconfig
+++ b/arch/arm/mach-prima2/Kconfig
@@ -1,7 +1,6 @@
menuconfig ARCH_SIRF
bool "CSR SiRF" if ARCH_MULTI_V7
select ARCH_HAS_RESET_CONTROLLER
- select RESET_CONTROLLER
select ARCH_REQUIRE_GPIOLIB
select GENERIC_IRQ_CHIP
select NO_IOPORT_MAP
diff --git a/arch/arm/mach-socfpga/headsmp.S b/arch/arm/mach-socfpga/headsmp.S
index c160fa3007e9..5d94b7a2fb10 100644
--- a/arch/arm/mach-socfpga/headsmp.S
+++ b/arch/arm/mach-socfpga/headsmp.S
@@ -13,7 +13,6 @@
#include <asm/assembler.h>
.arch armv7-a
- .arm
ENTRY(secondary_trampoline)
/* CPU1 will always fetch from 0x0 when it is brought out of reset.
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 65fbf6633e28..0314b80695ca 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -51,7 +51,6 @@ config ARM64
select HAVE_ALIGNED_STRUCT_PAGE if SLUB
select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_BITREVERSE
- select HAVE_ARCH_HUGE_VMAP
select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_KASAN if SPARSEMEM_VMEMMAP && !(ARM64_16K_PAGES && ARM64_VA_BITS_48)
select HAVE_ARCH_KGDB
@@ -425,7 +424,6 @@ config ARM64_ERRATUM_843419
bool "Cortex-A53: 843419: A load or store might access an incorrect address"
depends on MODULES
default y
- select ARM64_MODULE_CMODEL_LARGE
help
This option builds kernel modules using the large memory model in
order to avoid the use of the ADRP instruction, which can cause
@@ -603,9 +601,6 @@ config ARCH_NR_GPIO
source kernel/Kconfig.preempt
source kernel/Kconfig.hz
-config ARCH_SUPPORTS_DEBUG_PAGEALLOC
- def_bool y
-
config ARCH_HAS_HOLES_MEMORYMODEL
def_bool y if SPARSEMEM
@@ -656,6 +651,9 @@ config PERF_EVENTS_RESET_PMU_DEBUGFS
config SYS_SUPPORTS_HUGETLBFS
def_bool y
+config ARCH_WANT_GENERAL_HUGETLB
+ def_bool y
+
config ARCH_WANT_HUGE_PMD_SHARE
def_bool y if ARM64_4K_PAGES || (ARM64_16K_PAGES && !ARM64_VA_BITS_36)
@@ -828,93 +826,10 @@ config ARM64_LSE_ATOMICS
endmenu
-config ARM64_UAO
- bool "Enable support for User Access Override (UAO)"
- default y
- help
- User Access Override (UAO; part of the ARMv8.2 Extensions)
- causes the 'unprivileged' variant of the load/store instructions to
- be overriden to be privileged.
-
- This option changes get_user() and friends to use the 'unprivileged'
- variant of the load/store instructions. This ensures that user-space
- really did have access to the supplied memory. When addr_limit is
- set to kernel memory the UAO bit will be set, allowing privileged
- access to kernel memory.
-
- Choosing this option will cause copy_to_user() et al to use user-space
- memory permissions.
-
- The feature is detected at runtime, the kernel will use the
- regular load/store instructions if the cpu does not implement the
- feature.
-
-config ARM64_MODULE_CMODEL_LARGE
- bool
-
-config ARM64_MODULE_PLTS
- bool
- select ARM64_MODULE_CMODEL_LARGE
- select HAVE_MOD_ARCH_SPECIFIC
-
-config RELOCATABLE
- bool
- help
- This builds the kernel as a Position Independent Executable (PIE),
- which retains all relocation metadata required to relocate the
- kernel binary at runtime to a different virtual address than the
- address it was linked at.
- Since AArch64 uses the RELA relocation format, this requires a
- relocation pass at runtime even if the kernel is loaded at the
- same address it was linked at.
-
-config RANDOMIZE_BASE
- bool "Randomize the address of the kernel image"
- select ARM64_MODULE_PLTS
- select RELOCATABLE
- help
- Randomizes the virtual address at which the kernel image is
- loaded, as a security feature that deters exploit attempts
- relying on knowledge of the location of kernel internals.
-
- It is the bootloader's job to provide entropy, by passing a
- random u64 value in /chosen/kaslr-seed at kernel entry.
-
- When booting via the UEFI stub, it will invoke the firmware's
- EFI_RNG_PROTOCOL implementation (if available) to supply entropy
- to the kernel proper. In addition, it will randomise the physical
- location of the kernel Image as well.
-
- If unsure, say N.
-
-config RANDOMIZE_MODULE_REGION_FULL
- bool "Randomize the module region independently from the core kernel"
- depends on RANDOMIZE_BASE
- default y
- help
- Randomizes the location of the module region without considering the
- location of the core kernel. This way, it is impossible for modules
- to leak information about the location of core kernel data structures
- but it does imply that function calls between modules and the core
- kernel will need to be resolved via veneers in the module PLT.
-
- When this option is not set, the module region will be randomized over
- a limited range that contains the [_stext, _etext] interval of the
- core kernel, so branch relocations are always in range.
-
endmenu
menu "Boot options"
-config ARM64_ACPI_PARKING_PROTOCOL
- bool "Enable support for the ARM64 ACPI parking protocol"
- depends on ACPI
- help
- Enable support for the ARM64 ACPI parking protocol. If disabled
- the kernel will not allow booting through the ARM64 ACPI parking
- protocol even if the corresponding data is present in the ACPI
- MADT table.
-
config CMDLINE
string "Default kernel command string"
default ""
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 14c74a66c58e..285b32fa41c1 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -15,10 +15,6 @@ CPPFLAGS_vmlinux.lds = -DTEXT_OFFSET=$(TEXT_OFFSET)
OBJCOPYFLAGS :=-O binary -R .note -R .note.gnu.build-id -R .comment -S
GZFLAGS :=-9
-ifneq ($(CONFIG_RELOCATABLE),)
-LDFLAGS_vmlinux += -pie
-endif
-
KBUILD_DEFCONFIG := defconfig
# Check for binutils support for specific extensions
@@ -33,7 +29,6 @@ endif
KBUILD_CFLAGS += -mgeneral-regs-only $(lseinstr)
KBUILD_CFLAGS += -fno-pic
KBUILD_CFLAGS += $(call cc-option, -mpc-relative-literal-loads)
-KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
KBUILD_AFLAGS += $(lseinstr)
ifeq ($(CONFIG_CPU_BIG_ENDIAN), y)
@@ -48,14 +43,10 @@ endif
CHECKFLAGS += -D__aarch64__
-ifeq ($(CONFIG_ARM64_MODULE_CMODEL_LARGE), y)
+ifeq ($(CONFIG_ARM64_ERRATUM_843419), y)
KBUILD_CFLAGS_MODULE += -mcmodel=large
endif
-ifeq ($(CONFIG_ARM64_MODULE_PLTS),y)
-KBUILD_LDFLAGS_MODULE += -T $(srctree)/arch/arm64/kernel/module.lds
-endif
-
# Default value
head-y := arch/arm64/kernel/head.o
diff --git a/arch/arm64/include/asm/acpi.h b/arch/arm64/include/asm/acpi.h
index aee323b13802..caafd63b8092 100644
--- a/arch/arm64/include/asm/acpi.h
+++ b/arch/arm64/include/asm/acpi.h
@@ -87,26 +87,9 @@ void __init acpi_init_cpus(void);
static inline void acpi_init_cpus(void) { }
#endif /* CONFIG_ACPI */
-#ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
-bool acpi_parking_protocol_valid(int cpu);
-void __init
-acpi_set_mailbox_entry(int cpu, struct acpi_madt_generic_interrupt *processor);
-#else
-static inline bool acpi_parking_protocol_valid(int cpu) { return false; }
-static inline void
-acpi_set_mailbox_entry(int cpu, struct acpi_madt_generic_interrupt *processor)
-{}
-#endif
-
static inline const char *acpi_get_enable_method(int cpu)
{
- if (acpi_psci_present())
- return "psci";
-
- if (acpi_parking_protocol_valid(cpu))
- return "parking-protocol";
-
- return NULL;
+ return acpi_psci_present() ? "psci" : NULL;
}
#ifdef CONFIG_ACPI_APEI
diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h
index beccbdefa106..d56ec0715157 100644
--- a/arch/arm64/include/asm/alternative.h
+++ b/arch/arm64/include/asm/alternative.h
@@ -1,8 +1,6 @@
#ifndef __ASM_ALTERNATIVE_H
#define __ASM_ALTERNATIVE_H
-#include <asm/cpufeature.h>
-
#ifndef __ASSEMBLY__
#include <linux/init.h>
@@ -21,6 +19,7 @@ struct alt_instr {
void __init apply_alternatives_all(void);
void apply_alternatives(void *start, size_t length);
+void free_alternatives_memory(void);
#define ALTINSTR_ENTRY(feature) \
" .word 661b - .\n" /* label */ \
@@ -65,8 +64,6 @@ void apply_alternatives(void *start, size_t length);
#else
-#include <asm/assembler.h>
-
.macro altinstruction_entry orig_offset alt_offset feature orig_len alt_len
.word \orig_offset - .
.word \alt_offset - .
@@ -140,65 +137,6 @@ void apply_alternatives(void *start, size_t length);
alternative_insn insn1, insn2, cap, IS_ENABLED(cfg)
-/*
- * Generate the assembly for UAO alternatives with exception table entries.
- * This is complicated as there is no post-increment or pair versions of the
- * unprivileged instructions, and USER() only works for single instructions.
- */
-#ifdef CONFIG_ARM64_UAO
- .macro uao_ldp l, reg1, reg2, addr, post_inc
- alternative_if_not ARM64_HAS_UAO
-8888: ldp \reg1, \reg2, [\addr], \post_inc;
-8889: nop;
- nop;
- alternative_else
- ldtr \reg1, [\addr];
- ldtr \reg2, [\addr, #8];
- add \addr, \addr, \post_inc;
- alternative_endif
-
- _asm_extable 8888b,\l;
- _asm_extable 8889b,\l;
- .endm
-
- .macro uao_stp l, reg1, reg2, addr, post_inc
- alternative_if_not ARM64_HAS_UAO
-8888: stp \reg1, \reg2, [\addr], \post_inc;
-8889: nop;
- nop;
- alternative_else
- sttr \reg1, [\addr];
- sttr \reg2, [\addr, #8];
- add \addr, \addr, \post_inc;
- alternative_endif
-
- _asm_extable 8888b,\l;
- _asm_extable 8889b,\l;
- .endm
-
- .macro uao_user_alternative l, inst, alt_inst, reg, addr, post_inc
- alternative_if_not ARM64_HAS_UAO
-8888: \inst \reg, [\addr], \post_inc;
- nop;
- alternative_else
- \alt_inst \reg, [\addr];
- add \addr, \addr, \post_inc;
- alternative_endif
-
- _asm_extable 8888b,\l;
- .endm
-#else
- .macro uao_ldp l, reg1, reg2, addr, post_inc
- USER(\l, ldp \reg1, \reg2, [\addr], \post_inc)
- .endm
- .macro uao_stp l, reg1, reg2, addr, post_inc
- USER(\l, stp \reg1, \reg2, [\addr], \post_inc)
- .endm
- .macro uao_user_alternative l, inst, alt_inst, reg, addr, post_inc
- USER(\l, \inst \reg, [\addr], \post_inc)
- .endm
-#endif
-
#endif /* __ASSEMBLY__ */
/*
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index fcaf3cce639a..09f13a96941b 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -106,19 +106,12 @@
dmb \opt
.endm
-/*
- * Emit an entry into the exception table
- */
- .macro _asm_extable, from, to
- .pushsection __ex_table, "a"
- .align 3
- .long (\from - .), (\to - .)
- .popsection
- .endm
-
#define USER(l, x...) \
9999: x; \
- _asm_extable 9999b, l
+ .section __ex_table,"a"; \
+ .align 3; \
+ .quad 9999b,l; \
+ .previous
/*
* Register aliases.
@@ -212,17 +205,6 @@ lr .req x30 // link register
str \src, [\tmp, :lo12:\sym]
.endm
- /*
- * @sym: The name of the per-cpu variable
- * @reg: Result of per_cpu(sym, smp_processor_id())
- * @tmp: scratch register
- */
- .macro this_cpu_ptr, sym, reg, tmp
- adr_l \reg, \sym
- mrs \tmp, tpidr_el1
- add \reg, \reg, \tmp
- .endm
-
/*
* Annotate a function as position independent, i.e., safe to be called before
* the kernel virtual mapping is activated.
@@ -234,15 +216,4 @@ lr .req x30 // link register
.size __pi_##x, . - x; \
ENDPROC(x)
- /*
- * Emit a 64-bit absolute little endian symbol reference in a way that
- * ensures that it will be resolved at build time, even when building a
- * PIE binary. This requires cooperation from the linker script, which
- * must emit the lo32/hi32 halves individually.
- */
- .macro le64sym, sym
- .long \sym\()_lo32
- .long \sym\()_hi32
- .endm
-
#endif /* __ASM_ASSEMBLER_H */
diff --git a/arch/arm64/include/asm/atomic_lse.h b/arch/arm64/include/asm/atomic_lse.h
index 39c1d340fec5..197e06afbf71 100644
--- a/arch/arm64/include/asm/atomic_lse.h
+++ b/arch/arm64/include/asm/atomic_lse.h
@@ -36,7 +36,7 @@ static inline void atomic_andnot(int i, atomic_t *v)
" stclr %w[i], %[v]\n")
: [i] "+r" (w0), [v] "+Q" (v->counter)
: "r" (x1)
- : __LL_SC_CLOBBERS);
+ : "x30");
}
static inline void atomic_or(int i, atomic_t *v)
@@ -48,7 +48,7 @@ static inline void atomic_or(int i, atomic_t *v)
" stset %w[i], %[v]\n")
: [i] "+r" (w0), [v] "+Q" (v->counter)
: "r" (x1)
- : __LL_SC_CLOBBERS);
+ : "x30");
}
static inline void atomic_xor(int i, atomic_t *v)
@@ -60,7 +60,7 @@ static inline void atomic_xor(int i, atomic_t *v)
" steor %w[i], %[v]\n")
: [i] "+r" (w0), [v] "+Q" (v->counter)
: "r" (x1)
- : __LL_SC_CLOBBERS);
+ : "x30");
}
static inline void atomic_add(int i, atomic_t *v)
@@ -72,7 +72,7 @@ static inline void atomic_add(int i, atomic_t *v)
" stadd %w[i], %[v]\n")
: [i] "+r" (w0), [v] "+Q" (v->counter)
: "r" (x1)
- : __LL_SC_CLOBBERS);
+ : "x30");
}
#define ATOMIC_OP_ADD_RETURN(name, mb, cl...) \
@@ -90,7 +90,7 @@ static inline int atomic_add_return##name(int i, atomic_t *v) \
" add %w[i], %w[i], w30") \
: [i] "+r" (w0), [v] "+Q" (v->counter) \
: "r" (x1) \
- : __LL_SC_CLOBBERS, ##cl); \
+ : "x30" , ##cl); \
\
return w0; \
}
@@ -116,7 +116,7 @@ static inline void atomic_and(int i, atomic_t *v)
" stclr %w[i], %[v]")
: [i] "+r" (w0), [v] "+Q" (v->counter)
: "r" (x1)
- : __LL_SC_CLOBBERS);
+ : "x30");
}
static inline void atomic_sub(int i, atomic_t *v)
@@ -133,7 +133,7 @@ static inline void atomic_sub(int i, atomic_t *v)
" stadd %w[i], %[v]")
: [i] "+r" (w0), [v] "+Q" (v->counter)
: "r" (x1)
- : __LL_SC_CLOBBERS);
+ : "x30");
}
#define ATOMIC_OP_SUB_RETURN(name, mb, cl...) \
@@ -153,7 +153,7 @@ static inline int atomic_sub_return##name(int i, atomic_t *v) \
" add %w[i], %w[i], w30") \
: [i] "+r" (w0), [v] "+Q" (v->counter) \
: "r" (x1) \
- : __LL_SC_CLOBBERS , ##cl); \
+ : "x30" , ##cl); \
\
return w0; \
}
@@ -177,7 +177,7 @@ static inline void atomic64_andnot(long i, atomic64_t *v)
" stclr %[i], %[v]\n")
: [i] "+r" (x0), [v] "+Q" (v->counter)
: "r" (x1)
- : __LL_SC_CLOBBERS);
+ : "x30");
}
static inline void atomic64_or(long i, atomic64_t *v)
@@ -189,7 +189,7 @@ static inline void atomic64_or(long i, atomic64_t *v)
" stset %[i], %[v]\n")
: [i] "+r" (x0), [v] "+Q" (v->counter)
: "r" (x1)
- : __LL_SC_CLOBBERS);
+ : "x30");
}
static inline void atomic64_xor(long i, atomic64_t *v)
@@ -201,7 +201,7 @@ static inline void atomic64_xor(long i, atomic64_t *v)
" steor %[i], %[v]\n")
: [i] "+r" (x0), [v] "+Q" (v->counter)
: "r" (x1)
- : __LL_SC_CLOBBERS);
+ : "x30");
}
static inline void atomic64_add(long i, atomic64_t *v)
@@ -213,7 +213,7 @@ static inline void atomic64_add(long i, atomic64_t *v)
" stadd %[i], %[v]\n")
: [i] "+r" (x0), [v] "+Q" (v->counter)
: "r" (x1)
- : __LL_SC_CLOBBERS);
+ : "x30");
}
#define ATOMIC64_OP_ADD_RETURN(name, mb, cl...) \
@@ -231,7 +231,7 @@ static inline long atomic64_add_return##name(long i, atomic64_t *v) \
" add %[i], %[i], x30") \
: [i] "+r" (x0), [v] "+Q" (v->counter) \
: "r" (x1) \
- : __LL_SC_CLOBBERS, ##cl); \
+ : "x30" , ##cl); \
\
return x0; \
}
@@ -257,7 +257,7 @@ static inline void atomic64_and(long i, atomic64_t *v)
" stclr %[i], %[v]")
: [i] "+r" (x0), [v] "+Q" (v->counter)
: "r" (x1)
- : __LL_SC_CLOBBERS);
+ : "x30");
}
static inline void atomic64_sub(long i, atomic64_t *v)
@@ -274,7 +274,7 @@ static inline void atomic64_sub(long i, atomic64_t *v)
" stadd %[i], %[v]")
: [i] "+r" (x0), [v] "+Q" (v->counter)
: "r" (x1)
- : __LL_SC_CLOBBERS);
+ : "x30");
}
#define ATOMIC64_OP_SUB_RETURN(name, mb, cl...) \
@@ -294,7 +294,7 @@ static inline long atomic64_sub_return##name(long i, atomic64_t *v) \
" add %[i], %[i], x30") \
: [i] "+r" (x0), [v] "+Q" (v->counter) \
: "r" (x1) \
- : __LL_SC_CLOBBERS, ##cl); \
+ : "x30" , ##cl); \
\
return x0; \
}
@@ -330,7 +330,7 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
"2:")
: [ret] "+&r" (x0), [v] "+Q" (v->counter)
:
- : __LL_SC_CLOBBERS, "cc", "memory");
+ : "x30", "cc", "memory");
return x0;
}
@@ -359,7 +359,7 @@ static inline unsigned long __cmpxchg_case_##name(volatile void *ptr, \
" mov %" #w "[ret], " #w "30") \
: [ret] "+r" (x0), [v] "+Q" (*(unsigned long *)ptr) \
: [old] "r" (x1), [new] "r" (x2) \
- : __LL_SC_CLOBBERS, ##cl); \
+ : "x30" , ##cl); \
\
return x0; \
}
@@ -416,7 +416,7 @@ static inline long __cmpxchg_double##name(unsigned long old1, \
[v] "+Q" (*(unsigned long *)ptr) \
: [new1] "r" (x2), [new2] "r" (x3), [ptr] "r" (x4), \
[oldval1] "r" (oldval1), [oldval2] "r" (oldval2) \
- : __LL_SC_CLOBBERS, ##cl); \
+ : "x30" , ##cl); \
\
return x0; \
}
diff --git a/arch/arm64/include/asm/boot.h b/arch/arm64/include/asm/boot.h
index ebf2481889c3..81151b67b26b 100644
--- a/arch/arm64/include/asm/boot.h
+++ b/arch/arm64/include/asm/boot.h
@@ -11,10 +11,4 @@
#define MIN_FDT_ALIGN 8
#define MAX_FDT_SIZE SZ_2M
-/*
- * arm64 requires the kernel image to placed
- * TEXT_OFFSET bytes beyond a 2 MB aligned base
- */
-#define MIN_KIMG_ALIGN SZ_2M
-
#endif
diff --git a/arch/arm64/include/asm/brk-imm.h b/arch/arm64/include/asm/brk-imm.h
deleted file mode 100644
index ed693c5bcec0..000000000000
--- a/arch/arm64/include/asm/brk-imm.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Copyright (C) 2012 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef __ASM_BRK_IMM_H
-#define __ASM_BRK_IMM_H
-
-/*
- * #imm16 values used for BRK instruction generation
- * Allowed values for kgdb are 0x400 - 0x7ff
- * 0x100: for triggering a fault on purpose (reserved)
- * 0x400: for dynamic BRK instruction
- * 0x401: for compile time BRK instruction
- * 0x800: kernel-mode BUG() and WARN() traps
- */
-#define FAULT_BRK_IMM 0x100
-#define KGDB_DYN_DBG_BRK_IMM 0x400
-#define KGDB_COMPILED_DBG_BRK_IMM 0x401
-#define BUG_BRK_IMM 0x800
-
-#endif
diff --git a/arch/arm64/include/asm/bug.h b/arch/arm64/include/asm/bug.h
index 561190d15881..4a748ce9ba1a 100644
--- a/arch/arm64/include/asm/bug.h
+++ b/arch/arm64/include/asm/bug.h
@@ -18,7 +18,7 @@
#ifndef _ARCH_ARM64_ASM_BUG_H
#define _ARCH_ARM64_ASM_BUG_H
-#include <asm/brk-imm.h>
+#include <asm/debug-monitors.h>
#ifdef CONFIG_GENERIC_BUG
#define HAVE_ARCH_BUG
diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h
index df06d37374cc..a7d7d360a514 100644
--- a/arch/arm64/include/asm/cacheflush.h
+++ b/arch/arm64/include/asm/cacheflush.h
@@ -73,7 +73,6 @@ extern void flush_cache_all(void);
extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
extern void flush_icache_range(unsigned long start, unsigned long end);
extern void __flush_dcache_area(void *addr, size_t len);
-extern void __clean_dcache_area_pou(void *addr, size_t len);
extern long __flush_cache_user_range(unsigned long start, unsigned long end);
static inline void flush_cache_mm(struct mm_struct *mm)
diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h
index 510c7b404454..9ea611ea69df 100644
--- a/arch/arm64/include/asm/cmpxchg.h
+++ b/arch/arm64/include/asm/cmpxchg.h
@@ -19,6 +19,7 @@
#define __ASM_CMPXCHG_H
#include <linux/bug.h>
+#include <linux/mmdebug.h>
#include <asm/atomic.h>
#include <asm/barrier.h>
diff --git a/arch/arm64/include/asm/cpu.h b/arch/arm64/include/asm/cpu.h
index 13a6103130cd..b5e9cee4b5f8 100644
--- a/arch/arm64/include/asm/cpu.h
+++ b/arch/arm64/include/asm/cpu.h
@@ -36,7 +36,6 @@ struct cpuinfo_arm64 {
u64 reg_id_aa64isar1;
u64 reg_id_aa64mmfr0;
u64 reg_id_aa64mmfr1;
- u64 reg_id_aa64mmfr2;
u64 reg_id_aa64pfr0;
u64 reg_id_aa64pfr1;
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 44e8b3d76fb7..2e73ef7b9239 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -30,11 +30,8 @@
#define ARM64_HAS_LSE_ATOMICS 5
#define ARM64_WORKAROUND_CAVIUM_23154 6
#define ARM64_WORKAROUND_834220 7
-#define ARM64_HAS_NO_HW_PREFETCH 8
-#define ARM64_HAS_UAO 9
-#define ARM64_ALT_PAN_NOT_UAO 10
-#define ARM64_NCAPS 11
+#define ARM64_NCAPS 8
#ifndef __ASSEMBLY__
@@ -181,7 +178,7 @@ u64 read_system_reg(u32 id);
static inline bool cpu_supports_mixed_endian_el0(void)
{
- return id_aa64mmfr0_mixed_endian_el0(read_cpuid(SYS_ID_AA64MMFR0_EL1));
+ return id_aa64mmfr0_mixed_endian_el0(read_cpuid(ID_AA64MMFR0_EL1));
}
static inline bool system_supports_mixed_endian_el0(void)
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
index 1ddc9c930097..a2014784cab2 100644
--- a/arch/arm64/include/asm/cputype.h
+++ b/arch/arm64/include/asm/cputype.h
@@ -36,6 +36,12 @@
#define MMFR0_16KGRAN_SHFT 20
#define MMFR0_EL1_16KGRAN_MASK (MMFR0_16KGRAN_SIZE << MMFR0_16KGRAN_SHFT)
+#define read_cpuid(reg) ({ \
+ u64 __val; \
+ asm("mrs %0, " #reg : "=r" (__val)); \
+ __val; \
+})
+
#define MIDR_REVISION_MASK 0xf
#define MIDR_REVISION(midr) ((midr) & MIDR_REVISION_MASK)
#define MIDR_PARTNUM_SHIFT 4
@@ -55,22 +61,11 @@
#define MIDR_IMPLEMENTOR(midr) \
(((midr) & MIDR_IMPLEMENTOR_MASK) >> MIDR_IMPLEMENTOR_SHIFT)
-#define MIDR_CPU_MODEL(imp, partnum) \
+#define MIDR_CPU_PART(imp, partnum) \
(((imp) << MIDR_IMPLEMENTOR_SHIFT) | \
(0xf << MIDR_ARCHITECTURE_SHIFT) | \
((partnum) << MIDR_PARTNUM_SHIFT))
-#define MIDR_CPU_MODEL_MASK (MIDR_IMPLEMENTOR_MASK | MIDR_PARTNUM_MASK | \
- MIDR_ARCHITECTURE_MASK)
-
-#define MIDR_IS_CPU_MODEL_RANGE(midr, model, rv_min, rv_max) \
-({ \
- u32 _model = (midr) & MIDR_CPU_MODEL_MASK; \
- u32 rv = (midr) & (MIDR_REVISION_MASK | MIDR_VARIANT_MASK); \
- \
- _model == (model) && rv >= (rv_min) && rv <= (rv_max); \
- })
-
#define ARM_CPU_IMP_ARM 0x41
#define ARM_CPU_IMP_APM 0x50
#define ARM_CPU_IMP_CAVIUM 0x43
@@ -88,22 +83,8 @@
#define CAVIUM_CPU_PART_THUNDERX 0x0A1
-#define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53)
-#define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57)
-#define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
-#define MIDR_KRYO2XX_SILVER \
- MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, ARM_CPU_PART_KRYO2XX_SILVER)
-
#ifndef __ASSEMBLY__
-#include <asm/sysreg.h>
-
-#define read_cpuid(reg) ({ \
- u64 __val; \
- asm("mrs_s %0, " __stringify(reg) : "=r" (__val)); \
- __val; \
-})
-
/*
* The CPU ID never changes at run time, so we might as well tell the
* compiler that it's constant. Use this function to read the CPU ID
@@ -111,12 +92,12 @@
*/
static inline u32 __attribute_const__ read_cpuid_id(void)
{
- return read_cpuid(SYS_MIDR_EL1);
+ return read_cpuid(MIDR_EL1);
}
static inline u64 __attribute_const__ read_cpuid_mpidr(void)
{
- return read_cpuid(SYS_MPIDR_EL1);
+ return read_cpuid(MPIDR_EL1);
}
static inline unsigned int __attribute_const__ read_cpuid_implementor(void)
@@ -131,7 +112,7 @@ static inline unsigned int __attribute_const__ read_cpuid_part_number(void)
static inline u32 __attribute_const__ read_cpuid_cachetype(void)
{
- return read_cpuid(SYS_CTR_EL0);
+ return read_cpuid(CTR_EL0);
}
#endif /* __ASSEMBLY__ */
diff --git a/arch/arm64/include/asm/debug-monitors.h b/arch/arm64/include/asm/debug-monitors.h
index 2fcb9b7c876c..279c85b5ec09 100644
--- a/arch/arm64/include/asm/debug-monitors.h
+++ b/arch/arm64/include/asm/debug-monitors.h
@@ -20,7 +20,6 @@
#include <linux/errno.h>
#include <linux/types.h>
-#include <asm/brk-imm.h>
#include <asm/esr.h>
#include <asm/insn.h>
#include <asm/ptrace.h>
@@ -48,6 +47,19 @@
#define BREAK_INSTR_SIZE AARCH64_INSN_SIZE
/*
+ * #imm16 values used for BRK instruction generation
+ * Allowed values for kgbd are 0x400 - 0x7ff
+ * 0x100: for triggering a fault on purpose (reserved)
+ * 0x400: for dynamic BRK instruction
+ * 0x401: for compile time BRK instruction
+ * 0x800: kernel-mode BUG() and WARN() traps
+ */
+#define FAULT_BRK_IMM 0x100
+#define KGDB_DYN_DBG_BRK_IMM 0x400
+#define KGDB_COMPILED_DBG_BRK_IMM 0x401
+#define BUG_BRK_IMM 0x800
+
+/*
* BRK instruction encoding
* The #imm16 value should be placed at bits[20:5] within BRK ins
*/
diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h
index 24ed037f09fd..faad6df49e5b 100644
--- a/arch/arm64/include/asm/elf.h
+++ b/arch/arm64/include/asm/elf.h
@@ -24,6 +24,15 @@
#include <asm/ptrace.h>
#include <asm/user.h>
+typedef unsigned long elf_greg_t;
+
+#define ELF_NGREG (sizeof(struct user_pt_regs) / sizeof(elf_greg_t))
+#define ELF_CORE_COPY_REGS(dest, regs) \
+ *(struct user_pt_regs *)&(dest) = (regs)->user_regs;
+
+typedef elf_greg_t elf_gregset_t[ELF_NGREG];
+typedef struct user_fpsimd_state elf_fpregset_t;
+
/*
* AArch64 static relocation types.
*/
@@ -77,8 +86,6 @@
#define R_AARCH64_MOVW_PREL_G2_NC 292
#define R_AARCH64_MOVW_PREL_G3 293
-#define R_AARCH64_RELATIVE 1027
-
/*
* These are used to set parameters in the core dumps.
*/
@@ -120,17 +127,6 @@
*/
#define ELF_ET_DYN_BASE (2 * TASK_SIZE_64 / 3)
-#ifndef __ASSEMBLY__
-
-typedef unsigned long elf_greg_t;
-
-#define ELF_NGREG (sizeof(struct user_pt_regs) / sizeof(elf_greg_t))
-#define ELF_CORE_COPY_REGS(dest, regs) \
- *(struct user_pt_regs *)&(dest) = (regs)->user_regs;
-
-typedef elf_greg_t elf_gregset_t[ELF_NGREG];
-typedef struct user_fpsimd_state elf_fpregset_t;
-
/*
* When the program starts, a1 contains a pointer to a function to be
* registered with atexit, as per the SVR4 ABI. A value of 0 means we have no
@@ -190,6 +186,4 @@ extern int aarch32_setup_vectors_page(struct linux_binprm *bprm,
#endif /* CONFIG_COMPAT */
-#endif /* !__ASSEMBLY__ */
-
#endif
diff --git a/arch/arm64/include/asm/fixmap.h b/arch/arm64/include/asm/fixmap.h
index 1a617d46fce9..309704544d22 100644
--- a/arch/arm64/include/asm/fixmap.h
+++ b/arch/arm64/include/asm/fixmap.h
@@ -62,16 +62,6 @@ enum fixed_addresses {
FIX_BTMAP_END = __end_of_permanent_fixed_addresses,
FIX_BTMAP_BEGIN = FIX_BTMAP_END + TOTAL_FIX_BTMAPS - 1,
-
- /*
- * Used for kernel page table creation, so unmapped memory may be used
- * for tables.
- */
- FIX_PTE,
- FIX_PMD,
- FIX_PUD,
- FIX_PGD,
-
__end_of_fixed_addresses
};
diff --git a/arch/arm64/include/asm/ftrace.h b/arch/arm64/include/asm/ftrace.h
index 3c60f37e48ab..c5534facf941 100644
--- a/arch/arm64/include/asm/ftrace.h
+++ b/arch/arm64/include/asm/ftrace.h
@@ -28,8 +28,6 @@ struct dyn_arch_ftrace {
extern unsigned long ftrace_graph_call;
-extern void return_to_handler(void);
-
static inline unsigned long ftrace_call_adjust(unsigned long addr)
{
/*
diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h
index f2585cdd32c2..007a69fc4f40 100644
--- a/arch/arm64/include/asm/futex.h
+++ b/arch/arm64/include/asm/futex.h
@@ -42,8 +42,10 @@
"4: mov %w0, %w5\n" \
" b 3b\n" \
" .popsection\n" \
- _ASM_EXTABLE(1b, 4b) \
- _ASM_EXTABLE(2b, 4b) \
+" .pushsection __ex_table,\"a\"\n" \
+" .align 3\n" \
+" .quad 1b, 4b, 2b, 4b\n" \
+" .popsection\n" \
ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, \
CONFIG_ARM64_PAN) \
: "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp) \
@@ -119,7 +121,6 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
return -EFAULT;
asm volatile("// futex_atomic_cmpxchg_inatomic\n"
-ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN, CONFIG_ARM64_PAN)
" prfm pstl1strm, %2\n"
"1: ldxr %w1, %2\n"
" sub %w3, %w1, %w4\n"
@@ -132,9 +133,10 @@ ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN, CONFIG_ARM64_PAN)
"4: mov %w0, %w6\n"
" b 3b\n"
" .popsection\n"
- _ASM_EXTABLE(1b, 4b)
- _ASM_EXTABLE(2b, 4b)
-ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, CONFIG_ARM64_PAN)
+" .pushsection __ex_table,\"a\"\n"
+" .align 3\n"
+" .quad 1b, 4b, 2b, 4b\n"
+" .popsection\n"
: "+r" (ret), "=&r" (val), "+Q" (*uaddr), "=&r" (tmp)
: "r" (oldval), "r" (newval), "Ir" (-EFAULT)
: "memory");
diff --git a/arch/arm64/include/asm/hugetlb.h b/arch/arm64/include/asm/hugetlb.h
index bbc1e35aa601..bb4052e85dba 100644
--- a/arch/arm64/include/asm/hugetlb.h
+++ b/arch/arm64/include/asm/hugetlb.h
@@ -26,7 +26,36 @@ static inline pte_t huge_ptep_get(pte_t *ptep)
return *ptep;
}
+static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte)
+{
+ set_pte_at(mm, addr, ptep, pte);
+}
+
+static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep)
+{
+ ptep_clear_flush(vma, addr, ptep);
+}
+
+static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
+{
+ ptep_set_wrprotect(mm, addr, ptep);
+}
+static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
+{
+ return ptep_get_and_clear(mm, addr, ptep);
+}
+
+static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep,
+ pte_t pte, int dirty)
+{
+ return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
+}
static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
unsigned long addr, unsigned long end,
@@ -68,19 +97,4 @@ static inline void arch_clear_hugepage_flags(struct page *page)
clear_bit(PG_dcache_clean, &page->flags);
}
-extern pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
- struct page *page, int writable);
-#define arch_make_huge_pte arch_make_huge_pte
-extern void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t pte);
-extern int huge_ptep_set_access_flags(struct vm_area_struct *vma,
- unsigned long addr, pte_t *ptep,
- pte_t pte, int dirty);
-extern pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
- unsigned long addr, pte_t *ptep);
-extern void huge_ptep_set_wrprotect(struct mm_struct *mm,
- unsigned long addr, pte_t *ptep);
-extern void huge_ptep_clear_flush(struct vm_area_struct *vma,
- unsigned long addr, pte_t *ptep);
-
#endif /* __ASM_HUGETLB_H */
diff --git a/arch/arm64/include/asm/irq.h b/arch/arm64/include/asm/irq.h
index 3e1c0e7ef082..a8464c18ef88 100644
--- a/arch/arm64/include/asm/irq.h
+++ b/arch/arm64/include/asm/irq.h
@@ -1,45 +1,10 @@
#ifndef __ASM_IRQ_H
#define __ASM_IRQ_H
-#define IRQ_STACK_SIZE THREAD_SIZE
-#define IRQ_STACK_START_SP THREAD_START_SP
-
-#ifndef __ASSEMBLER__
-
-#include <linux/percpu.h>
-
#include <asm-generic/irq.h>
-#include <asm/thread_info.h>
struct pt_regs;
-DECLARE_PER_CPU(unsigned long [IRQ_STACK_SIZE/sizeof(long)], irq_stack);
-
-/*
- * The highest address on the stack, and the first to be used. Used to
- * find the dummy-stack frame put down by el?_irq() in entry.S, which
- * is structured as follows:
- *
- * ------------
- * | | <- irq_stack_ptr
- * top ------------
- * | x19 | <- irq_stack_ptr - 0x08
- * ------------
- * | x29 | <- irq_stack_ptr - 0x10
- * ------------
- *
- * where x19 holds a copy of the task stack pointer where the struct pt_regs
- * from kernel_entry can be found.
- *
- */
-#define IRQ_STACK_PTR(cpu) ((unsigned long)per_cpu(irq_stack, cpu) + IRQ_STACK_START_SP)
-
-/*
- * The offset from irq_stack_ptr where entry.S will store the original
- * stack pointer. Used by unwind_frame() and dump_backtrace().
- */
-#define IRQ_STACK_TO_TASK_STACK(ptr) (*((unsigned long *)((ptr) - 0x08)))
-
extern void set_handle_irq(void (*handle_irq)(struct pt_regs *));
static inline int nr_legacy_irqs(void)
@@ -50,14 +15,4 @@ static inline int nr_legacy_irqs(void)
void arch_trigger_all_cpu_backtrace(void);
#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
-static inline bool on_irq_stack(unsigned long sp, int cpu)
-{
- /* variable names the same as kernel/stacktrace.c */
- unsigned long low = (unsigned long)per_cpu(irq_stack, cpu);
- unsigned long high = low + IRQ_STACK_START_SP;
-
- return (low <= sp && sp <= high);
-}
-
-#endif /* !__ASSEMBLER__ */
#endif
diff --git a/arch/arm64/include/asm/kasan.h b/arch/arm64/include/asm/kasan.h
index 71ad0f93eb71..2774fa384c47 100644
--- a/arch/arm64/include/asm/kasan.h
+++ b/arch/arm64/include/asm/kasan.h
@@ -7,14 +7,13 @@
#include <linux/linkage.h>
#include <asm/memory.h>
-#include <asm/pgtable-types.h>
/*
* KASAN_SHADOW_START: beginning of the kernel virtual addresses.
* KASAN_SHADOW_END: KASAN_SHADOW_START + 1/8 of kernel virtual addresses.
*/
#define KASAN_SHADOW_START (VA_START)
-#define KASAN_SHADOW_END (KASAN_SHADOW_START + KASAN_SHADOW_SIZE)
+#define KASAN_SHADOW_END (KASAN_SHADOW_START + (1UL << (VA_BITS - 3)))
/*
* This value is used to map an address to the corresponding shadow
@@ -29,12 +28,10 @@
#define KASAN_SHADOW_OFFSET (KASAN_SHADOW_END - (1ULL << (64 - 3)))
void kasan_init(void);
-void kasan_copy_shadow(pgd_t *pgdir);
asmlinkage void kasan_early_init(void);
#else
static inline void kasan_init(void) { }
-static inline void kasan_copy_shadow(pgd_t *pgdir) { }
#endif
#endif
diff --git a/arch/arm64/include/asm/kernel-pgtable.h b/arch/arm64/include/asm/kernel-pgtable.h
index 5c6375d8528b..a459714ee29e 100644
--- a/arch/arm64/include/asm/kernel-pgtable.h
+++ b/arch/arm64/include/asm/kernel-pgtable.h
@@ -79,17 +79,5 @@
#define SWAPPER_MM_MMUFLAGS (PTE_ATTRINDX(MT_NORMAL) | SWAPPER_PTE_FLAGS)
#endif
-/*
- * To make optimal use of block mappings when laying out the linear
- * mapping, round down the base of physical memory to a size that can
- * be mapped efficiently, i.e., either PUD_SIZE (4k granule) or PMD_SIZE
- * (64k granule), or a multiple that can be mapped using contiguous bits
- * in the page tables: 32 * PMD_SIZE (16k granule)
- */
-#ifdef CONFIG_ARM64_64K_PAGES
-#define ARM64_MEMSTART_ALIGN SZ_512M
-#else
-#define ARM64_MEMSTART_ALIGN SZ_1G
-#endif
#endif /* __ASM_KERNEL_PGTABLE_H */
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index 419bc6661b5c..5e377101f919 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -102,8 +102,6 @@
#define KVM_ARM64_DEBUG_DIRTY_SHIFT 0
#define KVM_ARM64_DEBUG_DIRTY (1 << KVM_ARM64_DEBUG_DIRTY_SHIFT)
-#define kvm_ksym_ref(sym) phys_to_virt((u64)&sym - kimage_voffset)
-
#ifndef __ASSEMBLY__
struct kvm;
struct kvm_vcpu;
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 90c6368ad7c8..a35ce7266aac 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -222,7 +222,7 @@ static inline void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
struct kvm_vcpu *kvm_arm_get_running_vcpu(void);
struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void);
-u64 __kvm_call_hyp(void *hypfn, ...);
+u64 kvm_call_hyp(void *hypfn, ...);
void force_vm_exit(const cpumask_t *mask);
void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot);
@@ -243,8 +243,8 @@ static inline void __cpu_init_hyp_mode(phys_addr_t boot_pgd_ptr,
* Call initialization code, and switch to the full blown
* HYP code.
*/
- __kvm_call_hyp((void *)boot_pgd_ptr, pgd_ptr,
- hyp_stack_ptr, vector_ptr);
+ kvm_call_hyp((void *)boot_pgd_ptr, pgd_ptr,
+ hyp_stack_ptr, vector_ptr);
}
static inline void kvm_arch_hardware_disable(void) {}
@@ -258,6 +258,4 @@ void kvm_arm_setup_debug(struct kvm_vcpu *vcpu);
void kvm_arm_clear_debug(struct kvm_vcpu *vcpu);
void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu);
-#define kvm_call_hyp(f, ...) __kvm_call_hyp(kvm_ksym_ref(f), ##__VA_ARGS__)
-
#endif /* __ARM64_KVM_HOST_H__ */
diff --git a/arch/arm64/include/asm/lse.h b/arch/arm64/include/asm/lse.h
index 23acc00be32d..3de42d68611d 100644
--- a/arch/arm64/include/asm/lse.h
+++ b/arch/arm64/include/asm/lse.h
@@ -26,7 +26,6 @@ __asm__(".arch_extension lse");
/* Macro for constructing calls to out-of-line ll/sc atomics */
#define __LL_SC_CALL(op) "bl\t" __stringify(__LL_SC_PREFIX(op)) "\n"
-#define __LL_SC_CLOBBERS "x16", "x17", "x30"
/* In-line patching at runtime */
#define ARM64_LSE_ATOMIC_INSN(llsc, lse) \
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index 12f8a00fb3f1..853953cd1f08 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -24,7 +24,6 @@
#include <linux/compiler.h>
#include <linux/const.h>
#include <linux/types.h>
-#include <asm/bug.h>
#include <asm/sizes.h>
/*
@@ -46,15 +45,15 @@
* VA_START - the first kernel virtual address.
* TASK_SIZE - the maximum size of a user space task.
* TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area.
+ * The module space lives between the addresses given by TASK_SIZE
+ * and PAGE_OFFSET - it must be within 128MB of the kernel text.
*/
#define VA_BITS (CONFIG_ARM64_VA_BITS)
#define VA_START (UL(0xffffffffffffffff) << VA_BITS)
#define PAGE_OFFSET (UL(0xffffffffffffffff) << (VA_BITS - 1))
-#define KIMAGE_VADDR (MODULES_END)
-#define MODULES_END (MODULES_VADDR + MODULES_VSIZE)
-#define MODULES_VADDR (VA_START + KASAN_SHADOW_SIZE)
-#define MODULES_VSIZE (SZ_128M)
-#define PCI_IO_END (PAGE_OFFSET - SZ_2M)
+#define MODULES_END (PAGE_OFFSET)
+#define MODULES_VADDR (MODULES_END - SZ_64M)
+#define PCI_IO_END (MODULES_VADDR - SZ_2M)
#define PCI_IO_START (PCI_IO_END - PCI_IO_SIZE)
#define FIXADDR_TOP (PCI_IO_START - SZ_2M)
#define TASK_SIZE_64 (UL(1) << VA_BITS)
@@ -72,27 +71,12 @@
#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 4))
/*
- * The size of the KASAN shadow region. This should be 1/8th of the
- * size of the entire kernel virtual address space.
- */
-#ifdef CONFIG_KASAN
-#define KASAN_SHADOW_SIZE (UL(1) << (VA_BITS - 3))
-#else
-#define KASAN_SHADOW_SIZE (0)
-#endif
-
-/*
* Physical vs virtual RAM address space conversion. These are
* private definitions which should NOT be used outside memory.h
* files. Use virt_to_phys/phys_to_virt/__pa/__va instead.
*/
-#define __virt_to_phys(x) ({ \
- phys_addr_t __x = (phys_addr_t)(x); \
- __x & BIT(VA_BITS - 1) ? (__x & ~PAGE_OFFSET) + PHYS_OFFSET : \
- (__x - kimage_voffset); })
-
-#define __phys_to_virt(x) ((unsigned long)((x) - PHYS_OFFSET) | PAGE_OFFSET)
-#define __phys_to_kimg(x) ((unsigned long)((x) + kimage_voffset))
+#define __virt_to_phys(x) (((phys_addr_t)(x) - PAGE_OFFSET + PHYS_OFFSET))
+#define __phys_to_virt(x) ((unsigned long)((x) - PHYS_OFFSET + PAGE_OFFSET))
/*
* Convert a page to/from a physical address
@@ -116,40 +100,19 @@
#define MT_S2_NORMAL 0xf
#define MT_S2_DEVICE_nGnRE 0x1
-#ifdef CONFIG_ARM64_4K_PAGES
-#define IOREMAP_MAX_ORDER (PUD_SHIFT)
-#else
-#define IOREMAP_MAX_ORDER (PMD_SHIFT)
-#endif
-
-#ifdef CONFIG_BLK_DEV_INITRD
-#define __early_init_dt_declare_initrd(__start, __end) \
- do { \
- initrd_start = (__start); \
- initrd_end = (__end); \
- } while (0)
-#endif
-
#ifndef __ASSEMBLY__
-#include <linux/bitops.h>
-#include <linux/mmdebug.h>
-
-extern s64 memstart_addr;
+extern phys_addr_t memstart_addr;
/* PHYS_OFFSET - the physical address of the start of memory. */
-#define PHYS_OFFSET ({ VM_BUG_ON(memstart_addr & 1); memstart_addr; })
-
-/* the virtual base of the kernel image (minus TEXT_OFFSET) */
-extern u64 kimage_vaddr;
-
-/* the offset between the kernel virtual and physical mappings */
-extern u64 kimage_voffset;
+#define PHYS_OFFSET ({ memstart_addr; })
/*
- * Allow all memory at the discovery stage. We will clip it later.
+ * The maximum physical address that the linear direct mapping
+ * of system RAM can cover. (PAGE_OFFSET can be interpreted as
+ * a 2's complement signed quantity and negated to derive the
+ * maximum size of the linear mapping.)
*/
-#define MIN_MEMBLOCK_ADDR 0
-#define MAX_MEMBLOCK_ADDR U64_MAX
+#define MAX_MEMBLOCK_ADDR ({ memstart_addr - PAGE_OFFSET - 1; })
/*
* PFNs are used to describe any physical page; this means
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
index a00f7cf35bbd..24165784b803 100644
--- a/arch/arm64/include/asm/mmu_context.h
+++ b/arch/arm64/include/asm/mmu_context.h
@@ -27,7 +27,6 @@
#include <asm-generic/mm_hooks.h>
#include <asm/cputype.h>
#include <asm/pgtable.h>
-#include <asm/tlbflush.h>
#ifdef CONFIG_PID_IN_CONTEXTIDR
static inline void contextidr_thread_switch(struct task_struct *next)
@@ -49,7 +48,7 @@ static inline void contextidr_thread_switch(struct task_struct *next)
*/
static inline void cpu_set_reserved_ttbr0(void)
{
- unsigned long ttbr = virt_to_phys(empty_zero_page);
+ unsigned long ttbr = page_to_phys(empty_zero_page);
asm(
" msr ttbr0_el1, %0 // set TTBR0\n"
@@ -74,7 +73,7 @@ static inline bool __cpu_uses_extended_idmap(void)
/*
* Set TCR.T0SZ to its default value (based on VA_BITS)
*/
-static inline void __cpu_set_tcr_t0sz(unsigned long t0sz)
+static inline void cpu_set_default_tcr_t0sz(void)
{
unsigned long tcr;
@@ -87,62 +86,7 @@ static inline void __cpu_set_tcr_t0sz(unsigned long t0sz)
" msr tcr_el1, %0 ;"
" isb"
: "=&r" (tcr)
- : "r"(t0sz), "I"(TCR_T0SZ_OFFSET), "I"(TCR_TxSZ_WIDTH));
-}
-
-#define cpu_set_default_tcr_t0sz() __cpu_set_tcr_t0sz(TCR_T0SZ(VA_BITS))
-#define cpu_set_idmap_tcr_t0sz() __cpu_set_tcr_t0sz(idmap_t0sz)
-
-/*
- * Remove the idmap from TTBR0_EL1 and install the pgd of the active mm.
- *
- * The idmap lives in the same VA range as userspace, but uses global entries
- * and may use a different TCR_EL1.T0SZ. To avoid issues resulting from
- * speculative TLB fetches, we must temporarily install the reserved page
- * tables while we invalidate the TLBs and set up the correct TCR_EL1.T0SZ.
- *
- * If current is a not a user task, the mm covers the TTBR1_EL1 page tables,
- * which should not be installed in TTBR0_EL1. In this case we can leave the
- * reserved page tables in place.
- */
-static inline void cpu_uninstall_idmap(void)
-{
- struct mm_struct *mm = current->active_mm;
-
- cpu_set_reserved_ttbr0();
- local_flush_tlb_all();
- cpu_set_default_tcr_t0sz();
-
- if (mm != &init_mm)
- cpu_switch_mm(mm->pgd, mm);
-}
-
-static inline void cpu_install_idmap(void)
-{
- cpu_set_reserved_ttbr0();
- local_flush_tlb_all();
- cpu_set_idmap_tcr_t0sz();
-
- cpu_switch_mm(idmap_pg_dir, &init_mm);
-}
-
-/*
- * Atomically replaces the active TTBR1_EL1 PGD with a new VA-compatible PGD,
- * avoiding the possibility of conflicting TLB entries being allocated.
- */
-static inline void cpu_replace_ttbr1(pgd_t *pgd)
-{
- typedef void (ttbr_replace_func)(phys_addr_t);
- extern ttbr_replace_func idmap_cpu_replace_ttbr1;
- ttbr_replace_func *replace_phys;
-
- phys_addr_t pgd_phys = virt_to_phys(pgd);
-
- replace_phys = (void *)virt_to_phys(idmap_cpu_replace_ttbr1);
-
- cpu_install_idmap();
- replace_phys(pgd_phys);
- cpu_uninstall_idmap();
+ : "r"(TCR_T0SZ(VA_BITS)), "I"(TCR_T0SZ_OFFSET), "I"(TCR_TxSZ_WIDTH));
}
/*
diff --git a/arch/arm64/include/asm/module.h b/arch/arm64/include/asm/module.h
index e12af6754634..e80e232b730e 100644
--- a/arch/arm64/include/asm/module.h
+++ b/arch/arm64/include/asm/module.h
@@ -20,21 +20,4 @@
#define MODULE_ARCH_VERMAGIC "aarch64"
-#ifdef CONFIG_ARM64_MODULE_PLTS
-struct mod_arch_specific {
- struct elf64_shdr *plt;
- int plt_num_entries;
- int plt_max_entries;
-};
-#endif
-
-u64 module_emit_plt_entry(struct module *mod, const Elf64_Rela *rela,
- Elf64_Sym *sym);
-
-#ifdef CONFIG_RANDOMIZE_BASE
-extern u64 module_alloc_base;
-#else
-#define module_alloc_base ((u64)_etext - MODULES_VSIZE)
-#endif
-
#endif /* __ASM_MODULE_H */
diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h
index ff98585d085a..c15053902942 100644
--- a/arch/arm64/include/asm/pgalloc.h
+++ b/arch/arm64/include/asm/pgalloc.h
@@ -42,20 +42,11 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
free_page((unsigned long)pmd);
}
-static inline void __pud_populate(pud_t *pud, phys_addr_t pmd, pudval_t prot)
-{
- set_pud(pud, __pud(pmd | prot));
-}
-
static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
{
- __pud_populate(pud, __pa(pmd), PMD_TYPE_TABLE);
-}
-#else
-static inline void __pud_populate(pud_t *pud, phys_addr_t pmd, pudval_t prot)
-{
- BUILD_BUG();
+ set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
}
+
#endif /* CONFIG_PGTABLE_LEVELS > 2 */
#if CONFIG_PGTABLE_LEVELS > 3
@@ -71,20 +62,11 @@ static inline void pud_free(struct mm_struct *mm, pud_t *pud)
free_page((unsigned long)pud);
}
-static inline void __pgd_populate(pgd_t *pgdp, phys_addr_t pud, pgdval_t prot)
-{
- set_pgd(pgdp, __pgd(pud | prot));
-}
-
static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
{
- __pgd_populate(pgd, __pa(pud), PUD_TYPE_TABLE);
-}
-#else
-static inline void __pgd_populate(pgd_t *pgdp, phys_addr_t pud, pgdval_t prot)
-{
- BUILD_BUG();
+ set_pgd(pgd, __pgd(__pa(pud) | PUD_TYPE_TABLE));
}
+
#endif /* CONFIG_PGTABLE_LEVELS > 3 */
extern pgd_t *pgd_alloc(struct mm_struct *mm);
diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
index 5c25b831273d..d6739e836f7b 100644
--- a/arch/arm64/include/asm/pgtable-hwdef.h
+++ b/arch/arm64/include/asm/pgtable-hwdef.h
@@ -90,23 +90,7 @@
/*
* Contiguous page definitions.
*/
-#ifdef CONFIG_ARM64_64K_PAGES
-#define CONT_PTE_SHIFT 5
-#define CONT_PMD_SHIFT 5
-#elif defined(CONFIG_ARM64_16K_PAGES)
-#define CONT_PTE_SHIFT 7
-#define CONT_PMD_SHIFT 5
-#else
-#define CONT_PTE_SHIFT 4
-#define CONT_PMD_SHIFT 4
-#endif
-
-#define CONT_PTES (1 << CONT_PTE_SHIFT)
-#define CONT_PTE_SIZE (CONT_PTES * PAGE_SIZE)
-#define CONT_PTE_MASK (~(CONT_PTE_SIZE - 1))
-#define CONT_PMDS (1 << CONT_PMD_SHIFT)
-#define CONT_PMD_SIZE (CONT_PMDS * PMD_SIZE)
-#define CONT_PMD_MASK (~(CONT_PMD_SIZE - 1))
+#define CONT_PTES (_AC(1, UL) << CONT_SHIFT)
/* the the numerical offset of the PTE within a range of CONT_PTES */
#define CONT_RANGE_OFFSET(addr) (((addr)>>PAGE_SHIFT)&(CONT_PTES-1))
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index c3c2518eecfe..eaa9cabf4066 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -36,13 +36,19 @@
*
* VMEMAP_SIZE: allows the whole linear region to be covered by a struct page array
* (rounded up to PUD_SIZE).
- * VMALLOC_START: beginning of the kernel vmalloc space
+ * VMALLOC_START: beginning of the kernel VA space
* VMALLOC_END: extends to the available space below vmmemmap, PCI I/O space,
* fixed mappings and modules
*/
#define VMEMMAP_SIZE ALIGN((1UL << (VA_BITS - PAGE_SHIFT)) * sizeof(struct page), PUD_SIZE)
-#define VMALLOC_START (MODULES_END)
+#ifndef CONFIG_KASAN
+#define VMALLOC_START (VA_START)
+#else
+#include <asm/kasan.h>
+#define VMALLOC_START (KASAN_SHADOW_END + SZ_64K)
+#endif
+
#define VMALLOC_END (PAGE_OFFSET - PUD_SIZE - VMEMMAP_SIZE - SZ_64K)
#define VMEMMAP_START (VMALLOC_END + SZ_64K)
@@ -53,7 +59,6 @@
#ifndef __ASSEMBLY__
-#include <asm/fixmap.h>
#include <linux/mmdebug.h>
extern void __pte_error(const char *file, int line, unsigned long val);
@@ -64,11 +69,11 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
#define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
#define PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
-#define PROT_DEVICE_nGnRnE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRnE))
-#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRE))
-#define PROT_NORMAL_NC (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_NC))
-#define PROT_NORMAL_WT (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_WT))
-#define PROT_NORMAL (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL))
+#define PROT_DEVICE_nGnRnE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRnE))
+#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE))
+#define PROT_NORMAL_NC (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_NORMAL_NC))
+#define PROT_NORMAL_WT (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_NORMAL_WT))
+#define PROT_NORMAL (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_NORMAL))
#define PROT_SECT_DEVICE_nGnRE (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_DEVICE_nGnRE))
#define PROT_SECT_NORMAL (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL))
@@ -78,7 +83,7 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
#define PAGE_KERNEL __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE)
#define PAGE_KERNEL_RO __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_RDONLY)
-#define PAGE_KERNEL_ROX __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_RDONLY)
+#define PAGE_KERNEL_ROX __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_RDONLY)
#define PAGE_KERNEL_EXEC __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE)
#define PAGE_KERNEL_EXEC_CONT __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_CONT)
@@ -118,8 +123,8 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
* ZERO_PAGE is a global shared page that is always zero: used
* for zero-mapped memory areas etc..
*/
-extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
-#define ZERO_PAGE(vaddr) virt_to_page(empty_zero_page)
+extern struct page *empty_zero_page;
+#define ZERO_PAGE(vaddr) (empty_zero_page)
#define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte_val(pte))
@@ -131,6 +136,16 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
#define pte_clear(mm,addr,ptep) set_pte(ptep, __pte(0))
#define pte_page(pte) (pfn_to_page(pte_pfn(pte)))
+/* Find an entry in the third-level page table. */
+#define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
+
+#define pte_offset_kernel(dir,addr) (pmd_page_vaddr(*(dir)) + pte_index(addr))
+
+#define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr))
+#define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir), (addr))
+#define pte_unmap(pte) do { } while (0)
+#define pte_unmap_nested(pte) do { } while (0)
+
/*
* The following only work if pte_present(). Undefined behaviour otherwise.
*/
@@ -140,7 +155,6 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
#define pte_write(pte) (!!(pte_val(pte) & PTE_WRITE))
#define pte_exec(pte) (!(pte_val(pte) & PTE_UXN))
#define pte_cont(pte) (!!(pte_val(pte) & PTE_CONT))
-#define pte_user(pte) (!!(pte_val(pte) & PTE_USER))
#ifdef CONFIG_ARM64_HW_AFDBM
#define pte_hw_dirty(pte) (pte_write(pte) && !(pte_val(pte) & PTE_RDONLY))
@@ -151,18 +165,10 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
#define pte_dirty(pte) (pte_sw_dirty(pte) || pte_hw_dirty(pte))
#define pte_valid(pte) (!!(pte_val(pte) & PTE_VALID))
+#define pte_valid_user(pte) \
+ ((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER))
#define pte_valid_not_user(pte) \
((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID)
-#define pte_valid_young(pte) \
- ((pte_val(pte) & (PTE_VALID | PTE_AF)) == (PTE_VALID | PTE_AF))
-
-/*
- * Could the pte be present in the TLB? We must check mm_tlb_flush_pending
- * so that we don't erroneously return false for pages that have been
- * remapped as PROT_NONE but are yet to be flushed from the TLB.
- */
-#define pte_accessible(mm, pte) \
- (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid_young(pte))
static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot)
{
@@ -213,8 +219,7 @@ static inline pte_t pte_mkspecial(pte_t pte)
static inline pte_t pte_mkcont(pte_t pte)
{
- pte = set_pte_bit(pte, __pgprot(PTE_CONT));
- return set_pte_bit(pte, __pgprot(PTE_TYPE_PAGE));
+ return set_pte_bit(pte, __pgprot(PTE_CONT));
}
static inline pte_t pte_mknoncont(pte_t pte)
@@ -222,11 +227,6 @@ static inline pte_t pte_mknoncont(pte_t pte)
return clear_pte_bit(pte, __pgprot(PTE_CONT));
}
-static inline pmd_t pmd_mkcont(pmd_t pmd)
-{
- return __pmd(pmd_val(pmd) | PMD_SECT_CONT);
-}
-
static inline void set_pte(pte_t *ptep, pte_t pte)
{
*ptep = pte;
@@ -264,13 +264,13 @@ extern void __sync_icache_dcache(pte_t pteval, unsigned long addr);
static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte)
{
- if (pte_present(pte)) {
+ if (pte_valid_user(pte)) {
+ if (!pte_special(pte) && pte_exec(pte))
+ __sync_icache_dcache(pte, addr);
if (pte_sw_dirty(pte) && pte_write(pte))
pte_val(pte) &= ~PTE_RDONLY;
else
pte_val(pte) |= PTE_RDONLY;
- if (pte_user(pte) && pte_exec(pte) && !pte_special(pte))
- __sync_icache_dcache(pte, addr);
}
/*
@@ -300,7 +300,7 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
/*
* Hugetlb definitions.
*/
-#define HUGE_MAX_HSTATE 4
+#define HUGE_MAX_HSTATE 2
#define HPAGE_SHIFT PMD_SHIFT
#define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT)
#define HPAGE_MASK (~(HPAGE_SIZE - 1))
@@ -354,7 +354,6 @@ void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address,
#define pmd_mksplitting(pmd) pte_pmd(pte_mkspecial(pmd_pte(pmd)))
#define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd)))
#define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd)))
-#define pmd_mkclean(pmd) pte_pmd(pte_mkclean(pmd_pte(pmd)))
#define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd)))
#define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd)))
#define pmd_mknotpresent(pmd) (__pmd(pmd_val(pmd) & ~PMD_TYPE_MASK))
@@ -427,31 +426,13 @@ static inline void pmd_clear(pmd_t *pmdp)
set_pmd(pmdp, __pmd(0));
}
-static inline phys_addr_t pmd_page_paddr(pmd_t pmd)
+static inline pte_t *pmd_page_vaddr(pmd_t pmd)
{
- return pmd_val(pmd) & PHYS_MASK & (s32)PAGE_MASK;
+ return __va(pmd_val(pmd) & PHYS_MASK & (s32)PAGE_MASK);
}
-/* Find an entry in the third-level page table. */
-#define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
-
-#define pte_offset_phys(dir,addr) (pmd_page_paddr(*(dir)) + pte_index(addr) * sizeof(pte_t))
-#define pte_offset_kernel(dir,addr) ((pte_t *)__va(pte_offset_phys((dir), (addr))))
-
-#define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr))
-#define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir), (addr))
-#define pte_unmap(pte) do { } while (0)
-#define pte_unmap_nested(pte) do { } while (0)
-
-#define pte_set_fixmap(addr) ((pte_t *)set_fixmap_offset(FIX_PTE, addr))
-#define pte_set_fixmap_offset(pmd, addr) pte_set_fixmap(pte_offset_phys(pmd, addr))
-#define pte_clear_fixmap() clear_fixmap(FIX_PTE)
-
#define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK))
-/* use ONLY for statically allocated translation tables */
-#define pte_offset_kimg(dir,addr) ((pte_t *)__phys_to_kimg(pte_offset_phys((dir), (addr))))
-
/*
* Conversion functions: convert a page and protection to a page entry,
* and a page entry and page directory to the page they refer to.
@@ -478,37 +459,21 @@ static inline void pud_clear(pud_t *pudp)
set_pud(pudp, __pud(0));
}
-static inline phys_addr_t pud_page_paddr(pud_t pud)
+static inline pmd_t *pud_page_vaddr(pud_t pud)
{
- return pud_val(pud) & PHYS_MASK & (s32)PAGE_MASK;
+ return __va(pud_val(pud) & PHYS_MASK & (s32)PAGE_MASK);
}
/* Find an entry in the second-level page table. */
#define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
-#define pmd_offset_phys(dir, addr) (pud_page_paddr(*(dir)) + pmd_index(addr) * sizeof(pmd_t))
-#define pmd_offset(dir, addr) ((pmd_t *)__va(pmd_offset_phys((dir), (addr))))
-
-#define pmd_set_fixmap(addr) ((pmd_t *)set_fixmap_offset(FIX_PMD, addr))
-#define pmd_set_fixmap_offset(pud, addr) pmd_set_fixmap(pmd_offset_phys(pud, addr))
-#define pmd_clear_fixmap() clear_fixmap(FIX_PMD)
+static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
+{
+ return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(addr);
+}
#define pud_page(pud) pfn_to_page(__phys_to_pfn(pud_val(pud) & PHYS_MASK))
-/* use ONLY for statically allocated translation tables */
-#define pmd_offset_kimg(dir,addr) ((pmd_t *)__phys_to_kimg(pmd_offset_phys((dir), (addr))))
-
-#else
-
-#define pud_page_paddr(pud) ({ BUILD_BUG(); 0; })
-
-/* Match pmd_offset folding in <asm/generic/pgtable-nopmd.h> */
-#define pmd_set_fixmap(addr) NULL
-#define pmd_set_fixmap_offset(pudp, addr) ((pmd_t *)pudp)
-#define pmd_clear_fixmap()
-
-#define pmd_offset_kimg(dir,addr) ((pmd_t *)dir)
-
#endif /* CONFIG_PGTABLE_LEVELS > 2 */
#if CONFIG_PGTABLE_LEVELS > 3
@@ -530,37 +495,21 @@ static inline void pgd_clear(pgd_t *pgdp)
set_pgd(pgdp, __pgd(0));
}
-static inline phys_addr_t pgd_page_paddr(pgd_t pgd)
+static inline pud_t *pgd_page_vaddr(pgd_t pgd)
{
- return pgd_val(pgd) & PHYS_MASK & (s32)PAGE_MASK;
+ return __va(pgd_val(pgd) & PHYS_MASK & (s32)PAGE_MASK);
}
/* Find an entry in the frst-level page table. */
#define pud_index(addr) (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
-#define pud_offset_phys(dir, addr) (pgd_page_paddr(*(dir)) + pud_index(addr) * sizeof(pud_t))
-#define pud_offset(dir, addr) ((pud_t *)__va(pud_offset_phys((dir), (addr))))
-
-#define pud_set_fixmap(addr) ((pud_t *)set_fixmap_offset(FIX_PUD, addr))
-#define pud_set_fixmap_offset(pgd, addr) pud_set_fixmap(pud_offset_phys(pgd, addr))
-#define pud_clear_fixmap() clear_fixmap(FIX_PUD)
+static inline pud_t *pud_offset(pgd_t *pgd, unsigned long addr)
+{
+ return (pud_t *)pgd_page_vaddr(*pgd) + pud_index(addr);
+}
#define pgd_page(pgd) pfn_to_page(__phys_to_pfn(pgd_val(pgd) & PHYS_MASK))
-/* use ONLY for statically allocated translation tables */
-#define pud_offset_kimg(dir,addr) ((pud_t *)__phys_to_kimg(pud_offset_phys((dir), (addr))))
-
-#else
-
-#define pgd_page_paddr(pgd) ({ BUILD_BUG(); 0;})
-
-/* Match pud_offset folding in <asm/generic/pgtable-nopud.h> */
-#define pud_set_fixmap(addr) NULL
-#define pud_set_fixmap_offset(pgdp, addr) ((pud_t *)pgdp)
-#define pud_clear_fixmap()
-
-#define pud_offset_kimg(dir,addr) ((pud_t *)dir)
-
#endif /* CONFIG_PGTABLE_LEVELS > 3 */
#define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd_val(pgd))
@@ -568,16 +517,11 @@ static inline phys_addr_t pgd_page_paddr(pgd_t pgd)
/* to find an entry in a page-table-directory */
#define pgd_index(addr) (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
-#define pgd_offset_raw(pgd, addr) ((pgd) + pgd_index(addr))
-
-#define pgd_offset(mm, addr) (pgd_offset_raw((mm)->pgd, (addr)))
+#define pgd_offset(mm, addr) ((mm)->pgd+pgd_index(addr))
/* to find an entry in a kernel page-table-directory */
#define pgd_offset_k(addr) pgd_offset(&init_mm, addr)
-#define pgd_set_fixmap(addr) ((pgd_t *)set_fixmap_offset(FIX_PGD, addr))
-#define pgd_clear_fixmap() clear_fixmap(FIX_PGD)
-
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY |
@@ -697,7 +641,6 @@ extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
* bits 0-1: present (must be zero)
* bits 2-7: swap type
* bits 8-57: swap offset
- * bit 58: PTE_PROT_NONE (must be zero)
*/
#define __SWP_TYPE_SHIFT 2
#define __SWP_TYPE_BITS 6
@@ -723,8 +666,7 @@ extern int kern_addr_valid(unsigned long addr);
#include <asm-generic/pgtable.h>
-void pgd_cache_init(void);
-#define pgtable_cache_init pgd_cache_init
+#define pgtable_cache_init() do { } while (0)
/*
* On AArch64, the cache coherency is handled via the set_pte_at() function.
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index c97d01eb219f..c8c7c4d38171 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -29,10 +29,8 @@
#include <linux/string.h>
-#include <asm/alternative.h>
#include <asm/fpsimd.h>
#include <asm/hw_breakpoint.h>
-#include <asm/lse.h>
#include <asm/pgtable-hwdef.h>
#include <asm/ptrace.h>
#include <asm/types.h>
@@ -182,11 +180,9 @@ static inline void prefetchw(const void *ptr)
}
#define ARCH_HAS_SPINLOCK_PREFETCH
-static inline void spin_lock_prefetch(const void *ptr)
+static inline void spin_lock_prefetch(const void *x)
{
- asm volatile(ARM64_LSE_ATOMIC_INSN(
- "prfm pstl1strm, %a0",
- "nop") : : "p" (ptr));
+ prefetchw(x);
}
#define HAVE_ARCH_PICK_MMAP_LAYOUT
@@ -194,6 +190,5 @@ static inline void spin_lock_prefetch(const void *ptr)
#endif
void cpu_enable_pan(void *__unused);
-void cpu_enable_uao(void *__unused);
#endif /* __ASM_PROCESSOR_H */
diff --git a/arch/arm64/include/asm/shmparam.h b/arch/arm64/include/asm/shmparam.h
index e368a55ebd22..4df608a8459e 100644
--- a/arch/arm64/include/asm/shmparam.h
+++ b/arch/arm64/include/asm/shmparam.h
@@ -21,7 +21,7 @@
* alignment value. Since we don't have aliasing D-caches, the rest of
* the time we can safely use PAGE_SIZE.
*/
-#define COMPAT_SHMLBA (4 * PAGE_SIZE)
+#define COMPAT_SHMLBA 0x4000
#include <asm-generic/shmparam.h>
diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h
index 2013a4dc5124..c63c432f1a82 100644
--- a/arch/arm64/include/asm/smp.h
+++ b/arch/arm64/include/asm/smp.h
@@ -63,15 +63,7 @@ extern void secondary_entry(void);
extern void arch_send_call_function_single_ipi(int cpu);
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
-
-#ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
extern void arch_send_wakeup_ipi_mask(const struct cpumask *mask);
-#else
-static inline void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
-{
- BUILD_BUG();
-}
-#endif
extern int __cpu_disable(void);
diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h
index 7b369923b2eb..be4d2f9b7e8c 100644
--- a/arch/arm64/include/asm/spinlock.h
+++ b/arch/arm64/include/asm/spinlock.h
@@ -26,28 +26,9 @@
* The memory barriers are implicit with the load-acquire and store-release
* instructions.
*/
-static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
-{
- unsigned int tmp;
- arch_spinlock_t lockval;
- asm volatile(
-" sevl\n"
-"1: wfe\n"
-"2: ldaxr %w0, %2\n"
-" eor %w1, %w0, %w0, ror #16\n"
-" cbnz %w1, 1b\n"
- ARM64_LSE_ATOMIC_INSN(
- /* LL/SC */
-" stxr %w1, %w0, %2\n"
-" cbnz %w1, 2b\n", /* Serialise against any concurrent lockers */
- /* LSE atomics */
-" nop\n"
-" nop\n")
- : "=&r" (lockval), "=&r" (tmp), "+Q" (*lock)
- :
- : "memory");
-}
+#define arch_spin_unlock_wait(lock) \
+ do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
diff --git a/arch/arm64/include/asm/stacktrace.h b/arch/arm64/include/asm/stacktrace.h
index 801a16dbbdf6..7318f6d54aa9 100644
--- a/arch/arm64/include/asm/stacktrace.h
+++ b/arch/arm64/include/asm/stacktrace.h
@@ -16,19 +16,14 @@
#ifndef __ASM_STACKTRACE_H
#define __ASM_STACKTRACE_H
-struct task_struct;
-
struct stackframe {
unsigned long fp;
unsigned long sp;
unsigned long pc;
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- unsigned int graph;
-#endif
};
-extern int unwind_frame(struct task_struct *tsk, struct stackframe *frame);
-extern void walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
+extern int unwind_frame(struct stackframe *frame);
+extern void walk_stackframe(struct stackframe *frame,
int (*fn)(struct stackframe *, void *), void *data);
#endif /* __ASM_STACKTRACE_H */
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index b9fd8ec79033..d48ab5b41f52 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -70,19 +70,15 @@
#define SYS_ID_AA64MMFR0_EL1 sys_reg(3, 0, 0, 7, 0)
#define SYS_ID_AA64MMFR1_EL1 sys_reg(3, 0, 0, 7, 1)
-#define SYS_ID_AA64MMFR2_EL1 sys_reg(3, 0, 0, 7, 2)
#define SYS_CNTFRQ_EL0 sys_reg(3, 3, 14, 0, 0)
#define SYS_CTR_EL0 sys_reg(3, 3, 0, 0, 1)
#define SYS_DCZID_EL0 sys_reg(3, 3, 0, 0, 7)
#define REG_PSTATE_PAN_IMM sys_reg(0, 0, 4, 0, 4)
-#define REG_PSTATE_UAO_IMM sys_reg(0, 0, 4, 0, 3)
#define SET_PSTATE_PAN(x) __inst_arm(0xd5000000 | REG_PSTATE_PAN_IMM |\
(!!x)<<8 | 0x1f)
-#define SET_PSTATE_UAO(x) __inst_arm(0xd5000000 | REG_PSTATE_UAO_IMM |\
- (!!x)<<8 | 0x1f)
/* SCTLR_EL1 */
#define SCTLR_EL1_CP15BEN (0x1 << 5)
@@ -139,9 +135,6 @@
#define ID_AA64MMFR1_VMIDBITS_SHIFT 4
#define ID_AA64MMFR1_HADBS_SHIFT 0
-/* id_aa64mmfr2 */
-#define ID_AA64MMFR2_UAO_SHIFT 4
-
/* id_aa64dfr0 */
#define ID_AA64DFR0_CTX_CMPS_SHIFT 28
#define ID_AA64DFR0_WRPS_SHIFT 20
@@ -201,32 +194,32 @@
#ifdef __ASSEMBLY__
.irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30
- .equ .L__reg_num_x\num, \num
+ .equ __reg_num_x\num, \num
.endr
- .equ .L__reg_num_xzr, 31
+ .equ __reg_num_xzr, 31
.macro mrs_s, rt, sreg
- .inst 0xd5200000|(\sreg)|(.L__reg_num_\rt)
+ .inst 0xd5200000|(\sreg)|(__reg_num_\rt)
.endm
.macro msr_s, sreg, rt
- .inst 0xd5000000|(\sreg)|(.L__reg_num_\rt)
+ .inst 0xd5000000|(\sreg)|(__reg_num_\rt)
.endm
#else
asm(
" .irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30\n"
-" .equ .L__reg_num_x\\num, \\num\n"
+" .equ __reg_num_x\\num, \\num\n"
" .endr\n"
-" .equ .L__reg_num_xzr, 31\n"
+" .equ __reg_num_xzr, 31\n"
"\n"
" .macro mrs_s, rt, sreg\n"
-" .inst 0xd5200000|(\\sreg)|(.L__reg_num_\\rt)\n"
+" .inst 0xd5200000|(\\sreg)|(__reg_num_\\rt)\n"
" .endm\n"
"\n"
" .macro msr_s, sreg, rt\n"
-" .inst 0xd5000000|(\\sreg)|(.L__reg_num_\\rt)\n"
+" .inst 0xd5000000|(\\sreg)|(__reg_num_\\rt)\n"
" .endm\n"
);
diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
index 4a9e82e4f724..bfef76e14e2d 100644
--- a/arch/arm64/include/asm/thread_info.h
+++ b/arch/arm64/include/asm/thread_info.h
@@ -73,16 +73,10 @@ register unsigned long current_stack_pointer asm ("sp");
*/
static inline struct thread_info *current_thread_info(void) __attribute_const__;
-/*
- * struct thread_info can be accessed directly via sp_el0.
- */
static inline struct thread_info *current_thread_info(void)
{
- unsigned long sp_el0;
-
- asm ("mrs %0, sp_el0" : "=r" (sp_el0));
-
- return (struct thread_info *)sp_el0;
+ return (struct thread_info *)
+ (current_stack_pointer & ~(THREAD_SIZE - 1));
}
#define thread_saved_pc(tsk) \
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 0685d74572af..b2ede967fe7d 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -36,11 +36,11 @@
#define VERIFY_WRITE 1
/*
- * The exception table consists of pairs of relative offsets: the first
- * is the relative offset to an instruction that is allowed to fault,
- * and the second is the relative offset at which the program should
- * continue. No registers are modified, so it is entirely up to the
- * continuation code to figure out what to do.
+ * The exception table consists of pairs of addresses: the first is the
+ * address of an instruction that is allowed to fault, and the second is
+ * the address at which the program should continue. No registers are
+ * modified, so it is entirely up to the continuation code to figure out
+ * what to do.
*
* All the routines below use bits of fixup code that are out of line
* with the main instruction path. This means when everything is well,
@@ -50,11 +50,9 @@
struct exception_table_entry
{
- int insn, fixup;
+ unsigned long insn, fixup;
};
-#define ARCH_HAS_RELATIVE_EXTABLE
-
extern int fixup_exception(struct pt_regs *regs);
#define KERNEL_DS (-1UL)
@@ -66,16 +64,6 @@ extern int fixup_exception(struct pt_regs *regs);
static inline void set_fs(mm_segment_t fs)
{
current_thread_info()->addr_limit = fs;
-
- /*
- * Enable/disable UAO so that copy_to_user() etc can access
- * kernel memory with the unprivileged instructions.
- */
- if (IS_ENABLED(CONFIG_ARM64_UAO) && fs == KERNEL_DS)
- asm(ALTERNATIVE("nop", SET_PSTATE_UAO(1), ARM64_HAS_UAO));
- else
- asm(ALTERNATIVE("nop", SET_PSTATE_UAO(0), ARM64_HAS_UAO,
- CONFIG_ARM64_UAO));
}
#define segment_eq(a, b) ((a) == (b))
@@ -117,12 +105,6 @@ static inline void set_fs(mm_segment_t fs)
#define access_ok(type, addr, size) __range_ok(addr, size)
#define user_addr_max get_fs
-#define _ASM_EXTABLE(from, to) \
- " .pushsection __ex_table, \"a\"\n" \
- " .align 3\n" \
- " .long (" #from " - .), (" #to " - .)\n" \
- " .popsection\n"
-
/*
* The "__xxx" versions of the user access functions do not verify the address
* space - it must have been done previously with a separate "access_ok()"
@@ -131,10 +113,9 @@ static inline void set_fs(mm_segment_t fs)
* The "__xxx_error" versions set the third argument to -EFAULT if an error
* occurs, and leave it unchanged on success.
*/
-#define __get_user_asm(instr, alt_instr, reg, x, addr, err, feature) \
+#define __get_user_asm(instr, reg, x, addr, err) \
asm volatile( \
- "1:"ALTERNATIVE(instr " " reg "1, [%2]\n", \
- alt_instr " " reg "1, [%2]\n", feature) \
+ "1: " instr " " reg "1, [%2]\n" \
"2:\n" \
" .section .fixup, \"ax\"\n" \
" .align 2\n" \
@@ -142,7 +123,10 @@ static inline void set_fs(mm_segment_t fs)
" mov %1, #0\n" \
" b 2b\n" \
" .previous\n" \
- _ASM_EXTABLE(1b, 3b) \
+ " .section __ex_table,\"a\"\n" \
+ " .align 3\n" \
+ " .quad 1b, 3b\n" \
+ " .previous" \
: "+r" (err), "=&r" (x) \
: "r" (addr), "i" (-EFAULT))
@@ -150,30 +134,26 @@ static inline void set_fs(mm_segment_t fs)
do { \
unsigned long __gu_val; \
__chk_user_ptr(ptr); \
- asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_ALT_PAN_NOT_UAO,\
+ asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN, \
CONFIG_ARM64_PAN)); \
switch (sizeof(*(ptr))) { \
case 1: \
- __get_user_asm("ldrb", "ldtrb", "%w", __gu_val, (ptr), \
- (err), ARM64_HAS_UAO); \
+ __get_user_asm("ldrb", "%w", __gu_val, (ptr), (err)); \
break; \
case 2: \
- __get_user_asm("ldrh", "ldtrh", "%w", __gu_val, (ptr), \
- (err), ARM64_HAS_UAO); \
+ __get_user_asm("ldrh", "%w", __gu_val, (ptr), (err)); \
break; \
case 4: \
- __get_user_asm("ldr", "ldtr", "%w", __gu_val, (ptr), \
- (err), ARM64_HAS_UAO); \
+ __get_user_asm("ldr", "%w", __gu_val, (ptr), (err)); \
break; \
case 8: \
- __get_user_asm("ldr", "ldtr", "%", __gu_val, (ptr), \
- (err), ARM64_HAS_UAO); \
+ __get_user_asm("ldr", "%", __gu_val, (ptr), (err)); \
break; \
default: \
BUILD_BUG(); \
} \
(x) = (__force __typeof__(*(ptr)))__gu_val; \
- asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_ALT_PAN_NOT_UAO,\
+ asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, \
CONFIG_ARM64_PAN)); \
} while (0)
@@ -201,17 +181,19 @@ do { \
((x) = 0, -EFAULT); \
})
-#define __put_user_asm(instr, alt_instr, reg, x, addr, err, feature) \
+#define __put_user_asm(instr, reg, x, addr, err) \
asm volatile( \
- "1:"ALTERNATIVE(instr " " reg "1, [%2]\n", \
- alt_instr " " reg "1, [%2]\n", feature) \
+ "1: " instr " " reg "1, [%2]\n" \
"2:\n" \
" .section .fixup,\"ax\"\n" \
" .align 2\n" \
"3: mov %w0, %3\n" \
" b 2b\n" \
" .previous\n" \
- _ASM_EXTABLE(1b, 3b) \
+ " .section __ex_table,\"a\"\n" \
+ " .align 3\n" \
+ " .quad 1b, 3b\n" \
+ " .previous" \
: "+r" (err) \
: "r" (x), "r" (addr), "i" (-EFAULT))
@@ -219,29 +201,25 @@ do { \
do { \
__typeof__(*(ptr)) __pu_val = (x); \
__chk_user_ptr(ptr); \
- asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_ALT_PAN_NOT_UAO,\
+ asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN, \
CONFIG_ARM64_PAN)); \
switch (sizeof(*(ptr))) { \
case 1: \
- __put_user_asm("strb", "sttrb", "%w", __pu_val, (ptr), \
- (err), ARM64_HAS_UAO); \
+ __put_user_asm("strb", "%w", __pu_val, (ptr), (err)); \
break; \
case 2: \
- __put_user_asm("strh", "sttrh", "%w", __pu_val, (ptr), \
- (err), ARM64_HAS_UAO); \
+ __put_user_asm("strh", "%w", __pu_val, (ptr), (err)); \
break; \
case 4: \
- __put_user_asm("str", "sttr", "%w", __pu_val, (ptr), \
- (err), ARM64_HAS_UAO); \
+ __put_user_asm("str", "%w", __pu_val, (ptr), (err)); \
break; \
case 8: \
- __put_user_asm("str", "sttr", "%", __pu_val, (ptr), \
- (err), ARM64_HAS_UAO); \
+ __put_user_asm("str", "%", __pu_val, (ptr), (err)); \
break; \
default: \
BUILD_BUG(); \
} \
- asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_ALT_PAN_NOT_UAO,\
+ asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, \
CONFIG_ARM64_PAN)); \
} while (0)
diff --git a/arch/arm64/include/asm/word-at-a-time.h b/arch/arm64/include/asm/word-at-a-time.h
index 2b79b8a89457..aab5bf09e9d9 100644
--- a/arch/arm64/include/asm/word-at-a-time.h
+++ b/arch/arm64/include/asm/word-at-a-time.h
@@ -16,8 +16,6 @@
#ifndef __ASM_WORD_AT_A_TIME_H
#define __ASM_WORD_AT_A_TIME_H
-#include <asm/uaccess.h>
-
#ifndef __AARCH64EB__
#include <linux/kernel.h>
@@ -83,7 +81,10 @@ static inline unsigned long load_unaligned_zeropad(const void *addr)
#endif
" b 2b\n"
" .popsection\n"
- _ASM_EXTABLE(1b, 3b)
+ " .pushsection __ex_table,\"a\"\n"
+ " .align 3\n"
+ " .quad 1b, 3b\n"
+ " .popsection"
: "=&r" (ret), "=&r" (offset)
: "r" (addr), "Q" (*(unsigned long *)addr));
diff --git a/arch/arm64/include/uapi/asm/ptrace.h b/arch/arm64/include/uapi/asm/ptrace.h
index b5c3933ed441..208db3df135a 100644
--- a/arch/arm64/include/uapi/asm/ptrace.h
+++ b/arch/arm64/include/uapi/asm/ptrace.h
@@ -45,7 +45,6 @@
#define PSR_A_BIT 0x00000100
#define PSR_D_BIT 0x00000200
#define PSR_PAN_BIT 0x00400000
-#define PSR_UAO_BIT 0x00800000
#define PSR_Q_BIT 0x08000000
#define PSR_V_BIT 0x10000000
#define PSR_C_BIT 0x20000000
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index f6e0269de4c7..9f7794c5743f 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -30,7 +30,6 @@ arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \
../../arm/kernel/opcodes.o
arm64-obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o entry-ftrace.o
arm64-obj-$(CONFIG_MODULES) += arm64ksyms.o module.o
-arm64-obj-$(CONFIG_ARM64_MODULE_PLTS) += module-plts.o
arm64-obj-$(CONFIG_PERF_EVENTS) += perf_regs.o perf_callchain.o
arm64-obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o perf_debug.o \
perf_trace_counters.o \
@@ -44,8 +43,6 @@ arm64-obj-$(CONFIG_EFI) += efi.o efi-entry.stub.o
arm64-obj-$(CONFIG_PCI) += pci.o
arm64-obj-$(CONFIG_ARMV8_DEPRECATED) += armv8_deprecated.o
arm64-obj-$(CONFIG_ACPI) += acpi.o
-arm64-obj-$(CONFIG_ARM64_ACPI_PARKING_PROTOCOL) += acpi_parking_protocol.o
-arm64-obj-$(CONFIG_RANDOMIZE_BASE) += kaslr.o
obj-y += $(arm64-obj-y) vdso/
obj-m += $(arm64-obj-m)
diff --git a/arch/arm64/kernel/acpi_parking_protocol.c b/arch/arm64/kernel/acpi_parking_protocol.c
deleted file mode 100644
index 4b1e5a7a98da..000000000000
--- a/arch/arm64/kernel/acpi_parking_protocol.c
+++ /dev/null
@@ -1,153 +0,0 @@
-/*
- * ARM64 ACPI Parking Protocol implementation
- *
- * Authors: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
- * Mark Salter <msalter@redhat.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-#include <linux/acpi.h>
-#include <linux/types.h>
-
-#include <asm/cpu_ops.h>
-
-struct cpu_mailbox_entry {
- phys_addr_t mailbox_addr;
- u8 version;
- u8 gic_cpu_id;
-};
-
-static struct cpu_mailbox_entry cpu_mailbox_entries[NR_CPUS];
-
-void __init acpi_set_mailbox_entry(int cpu,
- struct acpi_madt_generic_interrupt *p)
-{
- struct cpu_mailbox_entry *cpu_entry = &cpu_mailbox_entries[cpu];
-
- cpu_entry->mailbox_addr = p->parked_address;
- cpu_entry->version = p->parking_version;
- cpu_entry->gic_cpu_id = p->cpu_interface_number;
-}
-
-bool acpi_parking_protocol_valid(int cpu)
-{
- struct cpu_mailbox_entry *cpu_entry = &cpu_mailbox_entries[cpu];
-
- return cpu_entry->mailbox_addr && cpu_entry->version;
-}
-
-static int acpi_parking_protocol_cpu_init(unsigned int cpu)
-{
- pr_debug("%s: ACPI parked addr=%llx\n", __func__,
- cpu_mailbox_entries[cpu].mailbox_addr);
-
- return 0;
-}
-
-static int acpi_parking_protocol_cpu_prepare(unsigned int cpu)
-{
- return 0;
-}
-
-struct parking_protocol_mailbox {
- __le32 cpu_id;
- __le32 reserved;
- __le64 entry_point;
-};
-
-static int acpi_parking_protocol_cpu_boot(unsigned int cpu)
-{
- struct cpu_mailbox_entry *cpu_entry = &cpu_mailbox_entries[cpu];
- struct parking_protocol_mailbox __iomem *mailbox;
- __le32 cpu_id;
-
- /*
- * Map mailbox memory with attribute device nGnRE (ie ioremap -
- * this deviates from the parking protocol specifications since
- * the mailboxes are required to be mapped nGnRnE; the attribute
- * discrepancy is harmless insofar as the protocol specification
- * is concerned).
- * If the mailbox is mistakenly allocated in the linear mapping
- * by FW ioremap will fail since the mapping will be prevented
- * by the kernel (it clashes with the linear mapping attributes
- * specifications).
- */
- mailbox = ioremap(cpu_entry->mailbox_addr, sizeof(*mailbox));
- if (!mailbox)
- return -EIO;
-
- cpu_id = readl_relaxed(&mailbox->cpu_id);
- /*
- * Check if firmware has set-up the mailbox entry properly
- * before kickstarting the respective cpu.
- */
- if (cpu_id != ~0U) {
- iounmap(mailbox);
- return -ENXIO;
- }
-
- /*
- * We write the entry point and cpu id as LE regardless of the
- * native endianness of the kernel. Therefore, any boot-loaders
- * that read this address need to convert this address to the
- * Boot-Loader's endianness before jumping.
- */
- writeq_relaxed(__pa(secondary_entry), &mailbox->entry_point);
- writel_relaxed(cpu_entry->gic_cpu_id, &mailbox->cpu_id);
-
- arch_send_wakeup_ipi_mask(cpumask_of(cpu));
-
- iounmap(mailbox);
-
- return 0;
-}
-
-static void acpi_parking_protocol_cpu_postboot(void)
-{
- int cpu = smp_processor_id();
- struct cpu_mailbox_entry *cpu_entry = &cpu_mailbox_entries[cpu];
- struct parking_protocol_mailbox __iomem *mailbox;
- __le64 entry_point;
-
- /*
- * Map mailbox memory with attribute device nGnRE (ie ioremap -
- * this deviates from the parking protocol specifications since
- * the mailboxes are required to be mapped nGnRnE; the attribute
- * discrepancy is harmless insofar as the protocol specification
- * is concerned).
- * If the mailbox is mistakenly allocated in the linear mapping
- * by FW ioremap will fail since the mapping will be prevented
- * by the kernel (it clashes with the linear mapping attributes
- * specifications).
- */
- mailbox = ioremap(cpu_entry->mailbox_addr, sizeof(*mailbox));
- if (!mailbox)
- return;
-
- entry_point = readl_relaxed(&mailbox->entry_point);
- /*
- * Check if firmware has cleared the entry_point as expected
- * by the protocol specification.
- */
- WARN_ON(entry_point);
-
- iounmap(mailbox);
-}
-
-const struct cpu_operations acpi_parking_protocol_ops = {
- .name = "parking-protocol",
- .cpu_init = acpi_parking_protocol_cpu_init,
- .cpu_prepare = acpi_parking_protocol_cpu_prepare,
- .cpu_boot = acpi_parking_protocol_cpu_boot,
- .cpu_postboot = acpi_parking_protocol_cpu_postboot
-};
diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c
index e6cb1dc63a2a..47a8caf28bcc 100644
--- a/arch/arm64/kernel/alternative.c
+++ b/arch/arm64/kernel/alternative.c
@@ -158,3 +158,9 @@ void apply_alternatives(void *start, size_t length)
__apply_alternatives(&region);
}
+
+void free_alternatives_memory(void)
+{
+ free_reserved_area(__alt_instructions, __alt_instructions_end,
+ 0, "alternatives");
+}
diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c
index c37202c0c838..937f5e58a4d3 100644
--- a/arch/arm64/kernel/armv8_deprecated.c
+++ b/arch/arm64/kernel/armv8_deprecated.c
@@ -62,7 +62,7 @@ struct insn_emulation {
};
static LIST_HEAD(insn_emulation);
-static int nr_insn_emulated __initdata;
+static int nr_insn_emulated;
static DEFINE_RAW_SPINLOCK(insn_emulation_lock);
static void register_emulation_hooks(struct insn_emulation_ops *ops)
@@ -173,7 +173,7 @@ static int update_insn_emulation_mode(struct insn_emulation *insn,
return ret;
}
-static void __init register_insn_emulation(struct insn_emulation_ops *ops)
+static void register_insn_emulation(struct insn_emulation_ops *ops)
{
unsigned long flags;
struct insn_emulation *insn;
@@ -237,7 +237,7 @@ static struct ctl_table ctl_abi[] = {
{ }
};
-static void __init register_insn_emulation_sysctl(struct ctl_table *table)
+static void register_insn_emulation_sysctl(struct ctl_table *table)
{
unsigned long flags;
int i = 0;
@@ -297,8 +297,11 @@ static void __init register_insn_emulation_sysctl(struct ctl_table *table)
"4: mov %w0, %w5\n" \
" b 3b\n" \
" .popsection" \
- _ASM_EXTABLE(0b, 4b) \
- _ASM_EXTABLE(1b, 4b) \
+ " .pushsection __ex_table,\"a\"\n" \
+ " .align 3\n" \
+ " .quad 0b, 4b\n" \
+ " .quad 1b, 4b\n" \
+ " .popsection\n" \
ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, \
CONFIG_ARM64_PAN) \
: "=&r" (res), "+r" (data), "=&r" (temp) \
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index 1555520bbb74..34aa4b3b47e9 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -21,12 +21,26 @@
#include <asm/cputype.h>
#include <asm/cpufeature.h>
+#define MIDR_CORTEX_A53 MIDR_CPU_PART(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53)
+#define MIDR_CORTEX_A57 MIDR_CPU_PART(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57)
+#define MIDR_THUNDERX MIDR_CPU_PART(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
+#define MIDR_KRYO2XX_SILVER \
+ MIDR_CPU_PART(ARM_CPU_IMP_QCOM, ARM_CPU_PART_KRYO2XX_SILVER)
+
+#define CPU_MODEL_MASK (MIDR_IMPLEMENTOR_MASK | MIDR_PARTNUM_MASK | \
+ MIDR_ARCHITECTURE_MASK)
+
static bool __maybe_unused
is_affected_midr_range(const struct arm64_cpu_capabilities *entry)
{
- return MIDR_IS_CPU_MODEL_RANGE(read_cpuid_id(), entry->midr_model,
- entry->midr_range_min,
- entry->midr_range_max);
+ u32 midr = read_cpuid_id();
+
+ if ((midr & CPU_MODEL_MASK) != entry->midr_model)
+ return false;
+
+ midr &= MIDR_REVISION_MASK | MIDR_VARIANT_MASK;
+
+ return (midr >= entry->midr_range_min && midr <= entry->midr_range_max);
}
#define MIDR_RANGE(model, min, max) \
diff --git a/arch/arm64/kernel/cpu_ops.c b/arch/arm64/kernel/cpu_ops.c
index c7cfb8fe06f9..b6bd7d447768 100644
--- a/arch/arm64/kernel/cpu_ops.c
+++ b/arch/arm64/kernel/cpu_ops.c
@@ -25,30 +25,19 @@
#include <asm/smp_plat.h>
extern const struct cpu_operations smp_spin_table_ops;
-extern const struct cpu_operations acpi_parking_protocol_ops;
extern const struct cpu_operations cpu_psci_ops;
const struct cpu_operations *cpu_ops[NR_CPUS];
-static const struct cpu_operations *dt_supported_cpu_ops[] __initconst = {
+static const struct cpu_operations *supported_cpu_ops[] __initconst = {
&smp_spin_table_ops,
&cpu_psci_ops,
NULL,
};
-static const struct cpu_operations *acpi_supported_cpu_ops[] __initconst = {
-#ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
- &acpi_parking_protocol_ops,
-#endif
- &cpu_psci_ops,
- NULL,
-};
-
static const struct cpu_operations * __init cpu_get_ops(const char *name)
{
- const struct cpu_operations **ops;
-
- ops = acpi_disabled ? dt_supported_cpu_ops : acpi_supported_cpu_ops;
+ const struct cpu_operations **ops = supported_cpu_ops;
while (*ops) {
if (!strcmp(name, (*ops)->name))
@@ -86,16 +75,8 @@ static const char *__init cpu_read_enable_method(int cpu)
}
} else {
enable_method = acpi_get_enable_method(cpu);
- if (!enable_method) {
- /*
- * In ACPI systems the boot CPU does not require
- * checking the enable method since for some
- * boot protocol (ie parking protocol) it need not
- * be initialized. Don't warn spuriously.
- */
- if (cpu != 0)
- pr_err("Unsupported ACPI enable-method\n");
- }
+ if (!enable_method)
+ pr_err("Unsupported ACPI enable-method\n");
}
return enable_method;
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 7566cad9fa1d..0669c63281ea 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -67,10 +67,6 @@ DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
.width = 0, \
}
-/* meta feature for alternatives */
-static bool __maybe_unused
-cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry);
-
static struct arm64_ftr_bits ftr_id_aa64isar0[] = {
ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 32, 32, 0),
ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64ISAR0_RDM_SHIFT, 4, 0),
@@ -127,11 +123,6 @@ static struct arm64_ftr_bits ftr_id_aa64mmfr1[] = {
ARM64_FTR_END,
};
-static struct arm64_ftr_bits ftr_id_aa64mmfr2[] = {
- ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR2_UAO_SHIFT, 4, 0),
- ARM64_FTR_END,
-};
-
static struct arm64_ftr_bits ftr_ctr[] = {
U_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RAO */
ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 28, 3, 0),
@@ -293,7 +284,6 @@ static struct arm64_ftr_reg arm64_ftr_regs[] = {
/* Op1 = 0, CRn = 0, CRm = 7 */
ARM64_FTR_REG(SYS_ID_AA64MMFR0_EL1, ftr_id_aa64mmfr0),
ARM64_FTR_REG(SYS_ID_AA64MMFR1_EL1, ftr_id_aa64mmfr1),
- ARM64_FTR_REG(SYS_ID_AA64MMFR2_EL1, ftr_id_aa64mmfr2),
/* Op1 = 3, CRn = 0, CRm = 0 */
ARM64_FTR_REG(SYS_CTR_EL0, ftr_ctr),
@@ -418,7 +408,6 @@ void __init init_cpu_features(struct cpuinfo_arm64 *info)
init_cpu_ftr_reg(SYS_ID_AA64ISAR1_EL1, info->reg_id_aa64isar1);
init_cpu_ftr_reg(SYS_ID_AA64MMFR0_EL1, info->reg_id_aa64mmfr0);
init_cpu_ftr_reg(SYS_ID_AA64MMFR1_EL1, info->reg_id_aa64mmfr1);
- init_cpu_ftr_reg(SYS_ID_AA64MMFR2_EL1, info->reg_id_aa64mmfr2);
init_cpu_ftr_reg(SYS_ID_AA64PFR0_EL1, info->reg_id_aa64pfr0);
init_cpu_ftr_reg(SYS_ID_AA64PFR1_EL1, info->reg_id_aa64pfr1);
init_cpu_ftr_reg(SYS_ID_DFR0_EL1, info->reg_id_dfr0);
@@ -528,8 +517,6 @@ void update_cpu_features(int cpu,
info->reg_id_aa64mmfr0, boot->reg_id_aa64mmfr0);
taint |= check_update_ftr_reg(SYS_ID_AA64MMFR1_EL1, cpu,
info->reg_id_aa64mmfr1, boot->reg_id_aa64mmfr1);
- taint |= check_update_ftr_reg(SYS_ID_AA64MMFR2_EL1, cpu,
- info->reg_id_aa64mmfr2, boot->reg_id_aa64mmfr2);
/*
* EL3 is not our concern.
@@ -634,18 +621,6 @@ static bool has_useable_gicv3_cpuif(const struct arm64_cpu_capabilities *entry)
return has_sre;
}
-static bool has_no_hw_prefetch(const struct arm64_cpu_capabilities *entry)
-{
- u32 midr = read_cpuid_id();
- u32 rv_min, rv_max;
-
- /* Cavium ThunderX pass 1.x and 2.x */
- rv_min = 0;
- rv_max = (1 << MIDR_VARIANT_SHIFT) | MIDR_REVISION_MASK;
-
- return MIDR_IS_CPU_MODEL_RANGE(midr, MIDR_THUNDERX, rv_min, rv_max);
-}
-
static const struct arm64_cpu_capabilities arm64_features[] = {
{
.desc = "GIC system register CPU interface",
@@ -676,28 +651,6 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.min_field_value = 2,
},
#endif /* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */
- {
- .desc = "Software prefetching using PRFM",
- .capability = ARM64_HAS_NO_HW_PREFETCH,
- .matches = has_no_hw_prefetch,
- },
-#ifdef CONFIG_ARM64_UAO
- {
- .desc = "User Access Override",
- .capability = ARM64_HAS_UAO,
- .matches = has_cpuid_feature,
- .sys_reg = SYS_ID_AA64MMFR2_EL1,
- .field_pos = ID_AA64MMFR2_UAO_SHIFT,
- .min_field_value = 1,
- .enable = cpu_enable_uao,
- },
-#endif /* CONFIG_ARM64_UAO */
-#ifdef CONFIG_ARM64_PAN
- {
- .capability = ARM64_ALT_PAN_NOT_UAO,
- .matches = cpufeature_pan_not_uao,
- },
-#endif /* CONFIG_ARM64_PAN */
{},
};
@@ -731,7 +684,7 @@ static const struct arm64_cpu_capabilities arm64_hwcaps[] = {
{},
};
-static void __init cap_set_hwcap(const struct arm64_cpu_capabilities *cap)
+static void cap_set_hwcap(const struct arm64_cpu_capabilities *cap)
{
switch (cap->hwcap_type) {
case CAP_HWCAP:
@@ -776,12 +729,12 @@ static bool __maybe_unused cpus_have_hwcap(const struct arm64_cpu_capabilities *
return rc;
}
-static void __init setup_cpu_hwcaps(void)
+static void setup_cpu_hwcaps(void)
{
int i;
const struct arm64_cpu_capabilities *hwcaps = arm64_hwcaps;
- for (i = 0; hwcaps[i].matches; i++)
+ for (i = 0; hwcaps[i].desc; i++)
if (hwcaps[i].matches(&hwcaps[i]))
cap_set_hwcap(&hwcaps[i]);
}
@@ -791,11 +744,11 @@ void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
{
int i;
- for (i = 0; caps[i].matches; i++) {
+ for (i = 0; caps[i].desc; i++) {
if (!caps[i].matches(&caps[i]))
continue;
- if (!cpus_have_cap(caps[i].capability) && caps[i].desc)
+ if (!cpus_have_cap(caps[i].capability))
pr_info("%s %s\n", info, caps[i].desc);
cpus_set_cap(caps[i].capability);
}
@@ -805,12 +758,11 @@ void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
* Run through the enabled capabilities and enable() it on all active
* CPUs
*/
-static void __init
-enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps)
+static void enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps)
{
int i;
- for (i = 0; caps[i].matches; i++)
+ for (i = 0; caps[i].desc; i++)
if (caps[i].enable && cpus_have_cap(caps[i].capability))
on_each_cpu(caps[i].enable, NULL, true);
}
@@ -838,36 +790,35 @@ static inline void set_sys_caps_initialised(void)
static u64 __raw_read_system_reg(u32 sys_id)
{
switch (sys_id) {
- case SYS_ID_PFR0_EL1: return read_cpuid(SYS_ID_PFR0_EL1);
- case SYS_ID_PFR1_EL1: return read_cpuid(SYS_ID_PFR1_EL1);
- case SYS_ID_DFR0_EL1: return read_cpuid(SYS_ID_DFR0_EL1);
- case SYS_ID_MMFR0_EL1: return read_cpuid(SYS_ID_MMFR0_EL1);
- case SYS_ID_MMFR1_EL1: return read_cpuid(SYS_ID_MMFR1_EL1);
- case SYS_ID_MMFR2_EL1: return read_cpuid(SYS_ID_MMFR2_EL1);
- case SYS_ID_MMFR3_EL1: return read_cpuid(SYS_ID_MMFR3_EL1);
- case SYS_ID_ISAR0_EL1: return read_cpuid(SYS_ID_ISAR0_EL1);
- case SYS_ID_ISAR1_EL1: return read_cpuid(SYS_ID_ISAR1_EL1);
- case SYS_ID_ISAR2_EL1: return read_cpuid(SYS_ID_ISAR2_EL1);
- case SYS_ID_ISAR3_EL1: return read_cpuid(SYS_ID_ISAR3_EL1);
- case SYS_ID_ISAR4_EL1: return read_cpuid(SYS_ID_ISAR4_EL1);
- case SYS_ID_ISAR5_EL1: return read_cpuid(SYS_ID_ISAR4_EL1);
- case SYS_MVFR0_EL1: return read_cpuid(SYS_MVFR0_EL1);
- case SYS_MVFR1_EL1: return read_cpuid(SYS_MVFR1_EL1);
- case SYS_MVFR2_EL1: return read_cpuid(SYS_MVFR2_EL1);
-
- case SYS_ID_AA64PFR0_EL1: return read_cpuid(SYS_ID_AA64PFR0_EL1);
- case SYS_ID_AA64PFR1_EL1: return read_cpuid(SYS_ID_AA64PFR0_EL1);
- case SYS_ID_AA64DFR0_EL1: return read_cpuid(SYS_ID_AA64DFR0_EL1);
- case SYS_ID_AA64DFR1_EL1: return read_cpuid(SYS_ID_AA64DFR0_EL1);
- case SYS_ID_AA64MMFR0_EL1: return read_cpuid(SYS_ID_AA64MMFR0_EL1);
- case SYS_ID_AA64MMFR1_EL1: return read_cpuid(SYS_ID_AA64MMFR1_EL1);
- case SYS_ID_AA64MMFR2_EL1: return read_cpuid(SYS_ID_AA64MMFR2_EL1);
- case SYS_ID_AA64ISAR0_EL1: return read_cpuid(SYS_ID_AA64ISAR0_EL1);
- case SYS_ID_AA64ISAR1_EL1: return read_cpuid(SYS_ID_AA64ISAR1_EL1);
-
- case SYS_CNTFRQ_EL0: return read_cpuid(SYS_CNTFRQ_EL0);
- case SYS_CTR_EL0: return read_cpuid(SYS_CTR_EL0);
- case SYS_DCZID_EL0: return read_cpuid(SYS_DCZID_EL0);
+ case SYS_ID_PFR0_EL1: return (u64)read_cpuid(ID_PFR0_EL1);
+ case SYS_ID_PFR1_EL1: return (u64)read_cpuid(ID_PFR1_EL1);
+ case SYS_ID_DFR0_EL1: return (u64)read_cpuid(ID_DFR0_EL1);
+ case SYS_ID_MMFR0_EL1: return (u64)read_cpuid(ID_MMFR0_EL1);
+ case SYS_ID_MMFR1_EL1: return (u64)read_cpuid(ID_MMFR1_EL1);
+ case SYS_ID_MMFR2_EL1: return (u64)read_cpuid(ID_MMFR2_EL1);
+ case SYS_ID_MMFR3_EL1: return (u64)read_cpuid(ID_MMFR3_EL1);
+ case SYS_ID_ISAR0_EL1: return (u64)read_cpuid(ID_ISAR0_EL1);
+ case SYS_ID_ISAR1_EL1: return (u64)read_cpuid(ID_ISAR1_EL1);
+ case SYS_ID_ISAR2_EL1: return (u64)read_cpuid(ID_ISAR2_EL1);
+ case SYS_ID_ISAR3_EL1: return (u64)read_cpuid(ID_ISAR3_EL1);
+ case SYS_ID_ISAR4_EL1: return (u64)read_cpuid(ID_ISAR4_EL1);
+ case SYS_ID_ISAR5_EL1: return (u64)read_cpuid(ID_ISAR4_EL1);
+ case SYS_MVFR0_EL1: return (u64)read_cpuid(MVFR0_EL1);
+ case SYS_MVFR1_EL1: return (u64)read_cpuid(MVFR1_EL1);
+ case SYS_MVFR2_EL1: return (u64)read_cpuid(MVFR2_EL1);
+
+ case SYS_ID_AA64PFR0_EL1: return (u64)read_cpuid(ID_AA64PFR0_EL1);
+ case SYS_ID_AA64PFR1_EL1: return (u64)read_cpuid(ID_AA64PFR0_EL1);
+ case SYS_ID_AA64DFR0_EL1: return (u64)read_cpuid(ID_AA64DFR0_EL1);
+ case SYS_ID_AA64DFR1_EL1: return (u64)read_cpuid(ID_AA64DFR0_EL1);
+ case SYS_ID_AA64MMFR0_EL1: return (u64)read_cpuid(ID_AA64MMFR0_EL1);
+ case SYS_ID_AA64MMFR1_EL1: return (u64)read_cpuid(ID_AA64MMFR1_EL1);
+ case SYS_ID_AA64ISAR0_EL1: return (u64)read_cpuid(ID_AA64ISAR0_EL1);
+ case SYS_ID_AA64ISAR1_EL1: return (u64)read_cpuid(ID_AA64ISAR1_EL1);
+
+ case SYS_CNTFRQ_EL0: return (u64)read_cpuid(CNTFRQ_EL0);
+ case SYS_CTR_EL0: return (u64)read_cpuid(CTR_EL0);
+ case SYS_DCZID_EL0: return (u64)read_cpuid(DCZID_EL0);
default:
BUG();
return 0;
@@ -917,7 +868,7 @@ void verify_local_cpu_capabilities(void)
return;
caps = arm64_features;
- for (i = 0; caps[i].matches; i++) {
+ for (i = 0; caps[i].desc; i++) {
if (!cpus_have_cap(caps[i].capability) || !caps[i].sys_reg)
continue;
/*
@@ -930,7 +881,7 @@ void verify_local_cpu_capabilities(void)
caps[i].enable(NULL);
}
- for (i = 0, caps = arm64_hwcaps; caps[i].matches; i++) {
+ for (i = 0, caps = arm64_hwcaps; caps[i].desc; i++) {
if (!cpus_have_hwcap(&caps[i]))
continue;
if (!feature_matches(__raw_read_system_reg(caps[i].sys_reg), &caps[i]))
@@ -946,7 +897,7 @@ static inline void set_sys_caps_initialised(void)
#endif /* CONFIG_HOTPLUG_CPU */
-static void __init setup_feature_capabilities(void)
+static void setup_feature_capabilities(void)
{
update_cpu_capabilities(arm64_features, "detected feature:");
enable_cpu_capabilities(arm64_features);
@@ -976,9 +927,3 @@ void __init setup_cpu_features(void)
pr_warn("L1_CACHE_BYTES smaller than the Cache Writeback Granule (%d < %d)\n",
L1_CACHE_BYTES, cls);
}
-
-static bool __maybe_unused
-cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry)
-{
- return (cpus_have_cap(ARM64_HAS_PAN) && !cpus_have_cap(ARM64_HAS_UAO));
-}
diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
index 48281e4a719f..3691553f218e 100644
--- a/arch/arm64/kernel/cpuinfo.c
+++ b/arch/arm64/kernel/cpuinfo.c
@@ -211,41 +211,40 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
{
info->reg_cntfrq = arch_timer_get_cntfrq();
info->reg_ctr = read_cpuid_cachetype();
- info->reg_dczid = read_cpuid(SYS_DCZID_EL0);
+ info->reg_dczid = read_cpuid(DCZID_EL0);
info->reg_midr = read_cpuid_id();
- info->reg_id_aa64dfr0 = read_cpuid(SYS_ID_AA64DFR0_EL1);
- info->reg_id_aa64dfr1 = read_cpuid(SYS_ID_AA64DFR1_EL1);
- info->reg_id_aa64isar0 = read_cpuid(SYS_ID_AA64ISAR0_EL1);
- info->reg_id_aa64isar1 = read_cpuid(SYS_ID_AA64ISAR1_EL1);
+ info->reg_id_aa64dfr0 = read_cpuid(ID_AA64DFR0_EL1);
+ info->reg_id_aa64dfr1 = read_cpuid(ID_AA64DFR1_EL1);
+ info->reg_id_aa64isar0 = read_cpuid(ID_AA64ISAR0_EL1);
+ info->reg_id_aa64isar1 = read_cpuid(ID_AA64ISAR1_EL1);
/*
* Explicitly mask out 16KB granule since we donot
* want to support it
*/
- info->reg_id_aa64mmfr0 = read_cpuid(SYS_ID_AA64MMFR0_EL1) &
+ info->reg_id_aa64mmfr0 = read_cpuid(ID_AA64MMFR0_EL1) &
(~MMFR0_EL1_16KGRAN_MASK);
- info->reg_id_aa64mmfr1 = read_cpuid(SYS_ID_AA64MMFR1_EL1);
- info->reg_id_aa64mmfr2 = read_cpuid(SYS_ID_AA64MMFR2_EL1);
- info->reg_id_aa64pfr0 = read_cpuid(SYS_ID_AA64PFR0_EL1);
- info->reg_id_aa64pfr1 = read_cpuid(SYS_ID_AA64PFR1_EL1);
-
- info->reg_id_dfr0 = read_cpuid(SYS_ID_DFR0_EL1);
- info->reg_id_isar0 = read_cpuid(SYS_ID_ISAR0_EL1);
- info->reg_id_isar1 = read_cpuid(SYS_ID_ISAR1_EL1);
- info->reg_id_isar2 = read_cpuid(SYS_ID_ISAR2_EL1);
- info->reg_id_isar3 = read_cpuid(SYS_ID_ISAR3_EL1);
- info->reg_id_isar4 = read_cpuid(SYS_ID_ISAR4_EL1);
- info->reg_id_isar5 = read_cpuid(SYS_ID_ISAR5_EL1);
- info->reg_id_mmfr0 = read_cpuid(SYS_ID_MMFR0_EL1);
- info->reg_id_mmfr1 = read_cpuid(SYS_ID_MMFR1_EL1);
- info->reg_id_mmfr2 = read_cpuid(SYS_ID_MMFR2_EL1);
- info->reg_id_mmfr3 = read_cpuid(SYS_ID_MMFR3_EL1);
- info->reg_id_pfr0 = read_cpuid(SYS_ID_PFR0_EL1);
- info->reg_id_pfr1 = read_cpuid(SYS_ID_PFR1_EL1);
-
- info->reg_mvfr0 = read_cpuid(SYS_MVFR0_EL1);
- info->reg_mvfr1 = read_cpuid(SYS_MVFR1_EL1);
- info->reg_mvfr2 = read_cpuid(SYS_MVFR2_EL1);
+ info->reg_id_aa64mmfr1 = read_cpuid(ID_AA64MMFR1_EL1);
+ info->reg_id_aa64pfr0 = read_cpuid(ID_AA64PFR0_EL1);
+ info->reg_id_aa64pfr1 = read_cpuid(ID_AA64PFR1_EL1);
+
+ info->reg_id_dfr0 = read_cpuid(ID_DFR0_EL1);
+ info->reg_id_isar0 = read_cpuid(ID_ISAR0_EL1);
+ info->reg_id_isar1 = read_cpuid(ID_ISAR1_EL1);
+ info->reg_id_isar2 = read_cpuid(ID_ISAR2_EL1);
+ info->reg_id_isar3 = read_cpuid(ID_ISAR3_EL1);
+ info->reg_id_isar4 = read_cpuid(ID_ISAR4_EL1);
+ info->reg_id_isar5 = read_cpuid(ID_ISAR5_EL1);
+ info->reg_id_mmfr0 = read_cpuid(ID_MMFR0_EL1);
+ info->reg_id_mmfr1 = read_cpuid(ID_MMFR1_EL1);
+ info->reg_id_mmfr2 = read_cpuid(ID_MMFR2_EL1);
+ info->reg_id_mmfr3 = read_cpuid(ID_MMFR3_EL1);
+ info->reg_id_pfr0 = read_cpuid(ID_PFR0_EL1);
+ info->reg_id_pfr1 = read_cpuid(ID_PFR1_EL1);
+
+ info->reg_mvfr0 = read_cpuid(MVFR0_EL1);
+ info->reg_mvfr1 = read_cpuid(MVFR1_EL1);
+ info->reg_mvfr2 = read_cpuid(MVFR2_EL1);
cpuinfo_detect_icache_policy(info);
diff --git a/arch/arm64/kernel/efi-entry.S b/arch/arm64/kernel/efi-entry.S
index f82036e02485..a773db92908b 100644
--- a/arch/arm64/kernel/efi-entry.S
+++ b/arch/arm64/kernel/efi-entry.S
@@ -61,7 +61,7 @@ ENTRY(entry)
*/
mov x20, x0 // DTB address
ldr x0, [sp, #16] // relocated _text address
- movz x21, #:abs_g0:stext_offset
+ ldr x21, =stext_offset
add x21, x0, x21
/*
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index b67e70c34888..e3131b39fbf2 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -27,7 +27,6 @@
#include <asm/cpufeature.h>
#include <asm/errno.h>
#include <asm/esr.h>
-#include <asm/irq.h>
#include <asm/thread_info.h>
#include <asm/unistd.h>
@@ -89,12 +88,9 @@
.if \el == 0
mrs x21, sp_el0
- mov tsk, sp
- and tsk, tsk, #~(THREAD_SIZE - 1) // Ensure MDSCR_EL1.SS is clear,
+ get_thread_info tsk // Ensure MDSCR_EL1.SS is clear,
ldr x19, [tsk, #TI_FLAGS] // since we can unmask debug
disable_step_tsk x19, x20 // exceptions when scheduling.
-
- mov x29, xzr // fp pointed to user-space
.else
add x21, sp, #S_FRAME_SIZE
.endif
@@ -112,13 +108,6 @@
.endif
/*
- * Set sp_el0 to current thread_info.
- */
- .if \el == 0
- msr sp_el0, tsk
- .endif
-
- /*
* Registers that may be useful after this macro is invoked:
*
* x21 - aborted SP
@@ -175,44 +164,8 @@ alternative_endif
.endm
.macro get_thread_info, rd
- mrs \rd, sp_el0
- .endm
-
- .macro irq_stack_entry
- mov x19, sp // preserve the original sp
-
- /*
- * Compare sp with the current thread_info, if the top
- * ~(THREAD_SIZE - 1) bits match, we are on a task stack, and
- * should switch to the irq stack.
- */
- and x25, x19, #~(THREAD_SIZE - 1)
- cmp x25, tsk
- b.ne 9998f
-
- this_cpu_ptr irq_stack, x25, x26
- mov x26, #IRQ_STACK_START_SP
- add x26, x25, x26
-
- /* switch to the irq stack */
- mov sp, x26
-
- /*
- * Add a dummy stack frame, this non-standard format is fixed up
- * by unwind_frame()
- */
- stp x29, x19, [sp, #-16]!
- mov x29, sp
-
-9998:
- .endm
-
- /*
- * x19 should be preserved between irq_stack_entry and
- * irq_stack_exit.
- */
- .macro irq_stack_exit
- mov sp, x19
+ mov \rd, sp
+ and \rd, \rd, #~(THREAD_SIZE - 1) // top of stack
.endm
/*
@@ -230,11 +183,10 @@ tsk .req x28 // current thread_info
* Interrupt handling.
*/
.macro irq_handler
- ldr_l x1, handle_arch_irq
+ adrp x1, handle_arch_irq
+ ldr x1, [x1, #:lo12:handle_arch_irq]
mov x0, sp
- irq_stack_entry
blr x1
- irq_stack_exit
.endm
.text
@@ -406,10 +358,10 @@ el1_irq:
bl trace_hardirqs_off
#endif
- get_thread_info tsk
irq_handler
#ifdef CONFIG_PREEMPT
+ get_thread_info tsk
ldr w24, [tsk, #TI_PREEMPT] // get preempt count
cbnz w24, 1f // preempt count != 0
ldr x0, [tsk, #TI_FLAGS] // get flags
@@ -674,8 +626,6 @@ ENTRY(cpu_switch_to)
mov v15.16b, v15.16b
#endif
mov sp, x9
- and x9, x9, #~(THREAD_SIZE - 1)
- msr sp_el0, x9
ret
ENDPROC(cpu_switch_to)
@@ -703,14 +653,14 @@ ret_fast_syscall_trace:
work_pending:
tbnz x1, #TIF_NEED_RESCHED, work_resched
/* TIF_SIGPENDING, TIF_NOTIFY_RESUME or TIF_FOREIGN_FPSTATE case */
+ ldr x2, [sp, #S_PSTATE]
mov x0, sp // 'regs'
+ tst x2, #PSR_MODE_MASK // user mode regs?
+ b.ne no_work_pending // returning to kernel
enable_irq // enable interrupts for do_notify_resume()
bl do_notify_resume
b ret_to_user
work_resched:
-#ifdef CONFIG_TRACE_IRQFLAGS
- bl trace_hardirqs_off // the IRQs are off here, inform the tracing code
-#endif
bl schedule
/*
@@ -722,6 +672,7 @@ ret_to_user:
and x2, x1, #_TIF_WORK_MASK
cbnz x2, work_pending
enable_step_tsk x1, x2
+no_work_pending:
kernel_exit 0
ENDPROC(ret_to_user)
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index acc1afd5c749..4c46c54a3ad7 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -289,7 +289,7 @@ static struct notifier_block fpsimd_cpu_pm_notifier_block = {
.notifier_call = fpsimd_cpu_pm_notifier,
};
-static void __init fpsimd_pm_init(void)
+static void fpsimd_pm_init(void)
{
cpu_pm_register_notifier(&fpsimd_cpu_pm_notifier_block);
}
diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c
index ebecf9aa33d1..c851be795080 100644
--- a/arch/arm64/kernel/ftrace.c
+++ b/arch/arm64/kernel/ftrace.c
@@ -29,11 +29,12 @@ static int ftrace_modify_code(unsigned long pc, u32 old, u32 new,
/*
* Note:
- * We are paranoid about modifying text, as if a bug were to happen, it
- * could cause us to read or write to someplace that could cause harm.
- * Carefully read and modify the code with aarch64_insn_*() which uses
- * probe_kernel_*(), and make sure what we read is what we expected it
- * to be before modifying it.
+ * Due to modules and __init, code can disappear and change,
+ * we need to protect against faulting as well as code changing.
+ * We do this by aarch64_insn_*() which use the probe_kernel_*().
+ *
+ * No lock is held here because all the modifications are run
+ * through stop_machine().
*/
if (validate) {
if (aarch64_insn_read((void *)pc, &replaced))
@@ -92,11 +93,6 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
return ftrace_modify_code(pc, old, new, true);
}
-void arch_ftrace_update_code(int command)
-{
- ftrace_modify_all_code(command);
-}
-
int __init ftrace_dyn_arch_init(void)
{
return 0;
@@ -129,20 +125,23 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
* on other archs. It's unlikely on AArch64.
*/
old = *parent;
+ *parent = return_hooker;
trace.func = self_addr;
trace.depth = current->curr_ret_stack + 1;
/* Only trace if the calling function expects to */
- if (!ftrace_graph_entry(&trace))
+ if (!ftrace_graph_entry(&trace)) {
+ *parent = old;
return;
+ }
err = ftrace_push_return_trace(old, self_addr, &trace.depth,
frame_pointer);
- if (err == -EBUSY)
+ if (err == -EBUSY) {
+ *parent = old;
return;
- else
- *parent = return_hooker;
+ }
}
#ifdef CONFIG_DYNAMIC_FTRACE
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index a88a15447c3b..b685257926f0 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -29,7 +29,6 @@
#include <asm/asm-offsets.h>
#include <asm/cache.h>
#include <asm/cputype.h>
-#include <asm/elf.h>
#include <asm/kernel-pgtable.h>
#include <asm/memory.h>
#include <asm/pgtable-hwdef.h>
@@ -68,11 +67,12 @@
* in the entry routines.
*/
__HEAD
-_head:
+
/*
* DO NOT MODIFY. Image header expected by Linux boot-loaders.
*/
#ifdef CONFIG_EFI
+efi_head:
/*
* This add instruction has no meaningful effect except that
* its opcode forms the magic "MZ" signature required by UEFI.
@@ -83,9 +83,9 @@ _head:
b stext // branch to kernel start, magic
.long 0 // reserved
#endif
- le64sym _kernel_offset_le // Image load offset from start of RAM, little-endian
- le64sym _kernel_size_le // Effective size of kernel image, little-endian
- le64sym _kernel_flags_le // Informative flags, little-endian
+ .quad _kernel_offset_le // Image load offset from start of RAM, little-endian
+ .quad _kernel_size_le // Effective size of kernel image, little-endian
+ .quad _kernel_flags_le // Informative flags, little-endian
.quad 0 // reserved
.quad 0 // reserved
.quad 0 // reserved
@@ -94,14 +94,14 @@ _head:
.byte 0x4d
.byte 0x64
#ifdef CONFIG_EFI
- .long pe_header - _head // Offset to the PE header.
+ .long pe_header - efi_head // Offset to the PE header.
#else
.word 0 // reserved
#endif
#ifdef CONFIG_EFI
.globl __efistub_stext_offset
- .set __efistub_stext_offset, stext - _head
+ .set __efistub_stext_offset, stext - efi_head
.align 3
pe_header:
.ascii "PE"
@@ -124,7 +124,7 @@ optional_header:
.long _end - stext // SizeOfCode
.long 0 // SizeOfInitializedData
.long 0 // SizeOfUninitializedData
- .long __efistub_entry - _head // AddressOfEntryPoint
+ .long __efistub_entry - efi_head // AddressOfEntryPoint
.long __efistub_stext_offset // BaseOfCode
extra_header_fields:
@@ -139,7 +139,7 @@ extra_header_fields:
.short 0 // MinorSubsystemVersion
.long 0 // Win32VersionValue
- .long _end - _head // SizeOfImage
+ .long _end - efi_head // SizeOfImage
// Everything before the kernel image is considered part of the header
.long __efistub_stext_offset // SizeOfHeaders
@@ -210,7 +210,6 @@ section_table:
ENTRY(stext)
bl preserve_boot_args
bl el2_setup // Drop to EL1, w20=cpu_boot_mode
- mov x23, xzr // KASLR offset, defaults to 0
adrp x24, __PHYS_OFFSET
bl set_cpu_boot_mode_flag
bl __create_page_tables // x25=TTBR0, x26=TTBR1
@@ -220,13 +219,11 @@ ENTRY(stext)
* On return, the CPU will be ready for the MMU to be turned on and
* the TCR will have been set.
*/
- ldr x27, 0f // address to jump to after
+ ldr x27, =__mmap_switched // address to jump to after
// MMU has been enabled
adr_l lr, __enable_mmu // return (PIC) address
b __cpu_setup // initialise processor
ENDPROC(stext)
- .align 3
-0: .quad __mmap_switched - (_head - TEXT_OFFSET) + KIMAGE_VADDR
/*
* Preserve the arguments passed by the bootloader in x0 .. x3
@@ -314,7 +311,7 @@ ENDPROC(preserve_boot_args)
__create_page_tables:
adrp x25, idmap_pg_dir
adrp x26, swapper_pg_dir
- mov x28, lr
+ mov x27, lr
/*
* Invalidate the idmap and swapper page tables to avoid potential
@@ -392,11 +389,9 @@ __create_page_tables:
* Map the kernel image (starting with PHYS_OFFSET).
*/
mov x0, x26 // swapper_pg_dir
- ldr x5, =KIMAGE_VADDR
- add x5, x5, x23 // add KASLR displacement
+ mov x5, #PAGE_OFFSET
create_pgd_entry x0, x5, x3, x6
- ldr w6, kernel_img_size
- add x6, x6, x5
+ ldr x6, =KERNEL_END // __va(KERNEL_END)
mov x3, x24 // phys offset
create_block_map x0, x7, x3, x5, x6
@@ -410,11 +405,9 @@ __create_page_tables:
dmb sy
bl __inval_cache_range
- ret x28
+ mov lr, x27
+ ret
ENDPROC(__create_page_tables)
-
-kernel_img_size:
- .long _end - (_head - TEXT_OFFSET)
.ltorg
/*
@@ -422,81 +415,21 @@ kernel_img_size:
*/
.set initial_sp, init_thread_union + THREAD_START_SP
__mmap_switched:
- mov x28, lr // preserve LR
- adr_l x8, vectors // load VBAR_EL1 with virtual
- msr vbar_el1, x8 // vector table address
- isb
+ adr_l x6, __bss_start
+ adr_l x7, __bss_stop
- // Clear BSS
- adr_l x0, __bss_start
- mov x1, xzr
- adr_l x2, __bss_stop
- sub x2, x2, x0
- bl __pi_memset
- dsb ishst // Make zero page visible to PTW
-
-#ifdef CONFIG_RELOCATABLE
-
- /*
- * Iterate over each entry in the relocation table, and apply the
- * relocations in place.
- */
- adr_l x8, __dynsym_start // start of symbol table
- adr_l x9, __reloc_start // start of reloc table
- adr_l x10, __reloc_end // end of reloc table
-
-0: cmp x9, x10
+1: cmp x6, x7
b.hs 2f
- ldp x11, x12, [x9], #24
- ldr x13, [x9, #-8]
- cmp w12, #R_AARCH64_RELATIVE
- b.ne 1f
- add x13, x13, x23 // relocate
- str x13, [x11, x23]
- b 0b
-
-1: cmp w12, #R_AARCH64_ABS64
- b.ne 0b
- add x12, x12, x12, lsl #1 // symtab offset: 24x top word
- add x12, x8, x12, lsr #(32 - 3) // ... shifted into bottom word
- ldrsh w14, [x12, #6] // Elf64_Sym::st_shndx
- ldr x15, [x12, #8] // Elf64_Sym::st_value
- cmp w14, #-0xf // SHN_ABS (0xfff1) ?
- add x14, x15, x23 // relocate
- csel x15, x14, x15, ne
- add x15, x13, x15
- str x15, [x11, x23]
- b 0b
-
-2: adr_l x8, kimage_vaddr // make relocated kimage_vaddr
- dc cvac, x8 // value visible to secondaries
- dsb sy // with MMU off
-#endif
-
+ str xzr, [x6], #8 // Clear BSS
+ b 1b
+2:
adr_l sp, initial_sp, x4
- mov x4, sp
- and x4, x4, #~(THREAD_SIZE - 1)
- msr sp_el0, x4 // Save thread_info
str_l x21, __fdt_pointer, x5 // Save FDT pointer
-
- ldr_l x4, kimage_vaddr // Save the offset between
- sub x4, x4, x24 // the kernel virtual and
- str_l x4, kimage_voffset, x5 // physical mappings
-
+ str_l x24, memstart_addr, x6 // Save PHYS_OFFSET
mov x29, #0
#ifdef CONFIG_KASAN
bl kasan_early_init
#endif
-#ifdef CONFIG_RANDOMIZE_BASE
- cbnz x23, 0f // already running randomized?
- mov x0, x21 // pass FDT address in x0
- bl kaslr_early_init // parse FDT for KASLR options
- cbz x0, 0f // KASLR disabled? just proceed
- mov x23, x0 // record KASLR offset
- ret x28 // we must enable KASLR, return
- // to __enable_mmu()
-0:
-#endif
b start_kernel
ENDPROC(__mmap_switched)
@@ -505,10 +438,6 @@ ENDPROC(__mmap_switched)
* hotplug and needs to have the same protections as the text region
*/
.section ".text","ax"
-
-ENTRY(kimage_vaddr)
- .quad _text - TEXT_OFFSET
-
/*
* If we're fortunate enough to boot at EL2, ensure that the world is
* sane before dropping to EL1.
@@ -674,22 +603,14 @@ ENTRY(secondary_startup)
adrp x26, swapper_pg_dir
bl __cpu_setup // initialise processor
- ldr x8, kimage_vaddr
- ldr w9, 0f
- sub x27, x8, w9, sxtw // address to jump to after enabling the MMU
+ ldr x21, =secondary_data
+ ldr x27, =__secondary_switched // address to jump to after enabling the MMU
b __enable_mmu
ENDPROC(secondary_startup)
-0: .long (_text - TEXT_OFFSET) - __secondary_switched
ENTRY(__secondary_switched)
- adr_l x5, vectors
- msr vbar_el1, x5
- isb
-
- ldr_l x0, secondary_data // get secondary_data.stack
+ ldr x0, [x21] // get secondary_data.stack
mov sp, x0
- and x0, x0, #~(THREAD_SIZE - 1)
- msr sp_el0, x0 // save thread_info
mov x29, #0
b secondary_start_kernel
ENDPROC(__secondary_switched)
@@ -707,11 +628,12 @@ ENDPROC(__secondary_switched)
*/
.section ".idmap.text", "ax"
__enable_mmu:
- mrs x18, sctlr_el1 // preserve old SCTLR_EL1 value
mrs x1, ID_AA64MMFR0_EL1
ubfx x2, x1, #ID_AA64MMFR0_TGRAN_SHIFT, 4
cmp x2, #ID_AA64MMFR0_TGRAN_SUPPORTED
b.ne __no_granule_support
+ ldr x5, =vectors
+ msr vbar_el1, x5
msr ttbr0_el1, x25 // load TTBR0
msr ttbr1_el1, x26 // load TTBR1
isb
@@ -725,26 +647,6 @@ __enable_mmu:
ic iallu
dsb nsh
isb
-#ifdef CONFIG_RANDOMIZE_BASE
- mov x19, x0 // preserve new SCTLR_EL1 value
- blr x27
-
- /*
- * If we return here, we have a KASLR displacement in x23 which we need
- * to take into account by discarding the current kernel mapping and
- * creating a new one.
- */
- msr sctlr_el1, x18 // disable the MMU
- isb
- bl __create_page_tables // recreate kernel mapping
-
- msr sctlr_el1, x19 // re-enable the MMU
- isb
- ic iallu // flush instructions fetched
- dsb nsh // via old mapping
- isb
- add x27, x27, x23 // relocated __mmap_switched
-#endif
br x27
ENDPROC(__enable_mmu)
diff --git a/arch/arm64/kernel/image.h b/arch/arm64/kernel/image.h
index db1bf57948f1..bc2abb8b1599 100644
--- a/arch/arm64/kernel/image.h
+++ b/arch/arm64/kernel/image.h
@@ -26,40 +26,31 @@
* There aren't any ELF relocations we can use to endian-swap values known only
* at link time (e.g. the subtraction of two symbol addresses), so we must get
* the linker to endian-swap certain values before emitting them.
- *
- * Note that, in order for this to work when building the ELF64 PIE executable
- * (for KASLR), these values should not be referenced via R_AARCH64_ABS64
- * relocations, since these are fixed up at runtime rather than at build time
- * when PIE is in effect. So we need to split them up in 32-bit high and low
- * words.
*/
#ifdef CONFIG_CPU_BIG_ENDIAN
-#define DATA_LE32(data) \
- ((((data) & 0x000000ff) << 24) | \
- (((data) & 0x0000ff00) << 8) | \
- (((data) & 0x00ff0000) >> 8) | \
- (((data) & 0xff000000) >> 24))
+#define DATA_LE64(data) \
+ ((((data) & 0x00000000000000ff) << 56) | \
+ (((data) & 0x000000000000ff00) << 40) | \
+ (((data) & 0x0000000000ff0000) << 24) | \
+ (((data) & 0x00000000ff000000) << 8) | \
+ (((data) & 0x000000ff00000000) >> 8) | \
+ (((data) & 0x0000ff0000000000) >> 24) | \
+ (((data) & 0x00ff000000000000) >> 40) | \
+ (((data) & 0xff00000000000000) >> 56))
#else
-#define DATA_LE32(data) ((data) & 0xffffffff)
+#define DATA_LE64(data) ((data) & 0xffffffffffffffff)
#endif
-#define DEFINE_IMAGE_LE64(sym, data) \
- sym##_lo32 = DATA_LE32((data) & 0xffffffff); \
- sym##_hi32 = DATA_LE32((data) >> 32)
-
#ifdef CONFIG_CPU_BIG_ENDIAN
-#define __HEAD_FLAG_BE 1
+#define __HEAD_FLAG_BE 1
#else
-#define __HEAD_FLAG_BE 0
+#define __HEAD_FLAG_BE 0
#endif
-#define __HEAD_FLAG_PAGE_SIZE ((PAGE_SHIFT - 10) / 2)
-
-#define __HEAD_FLAG_PHYS_BASE 1
+#define __HEAD_FLAG_PAGE_SIZE ((PAGE_SHIFT - 10) / 2)
-#define __HEAD_FLAGS ((__HEAD_FLAG_BE << 0) | \
- (__HEAD_FLAG_PAGE_SIZE << 1) | \
- (__HEAD_FLAG_PHYS_BASE << 3))
+#define __HEAD_FLAGS ((__HEAD_FLAG_BE << 0) | \
+ (__HEAD_FLAG_PAGE_SIZE << 1))
/*
* These will output as part of the Image header, which should be little-endian
@@ -67,23 +58,13 @@
* endian swapped in head.S, all are done here for consistency.
*/
#define HEAD_SYMBOLS \
- DEFINE_IMAGE_LE64(_kernel_size_le, _end - _text); \
- DEFINE_IMAGE_LE64(_kernel_offset_le, TEXT_OFFSET); \
- DEFINE_IMAGE_LE64(_kernel_flags_le, __HEAD_FLAGS);
+ _kernel_size_le = DATA_LE64(_end - _text); \
+ _kernel_offset_le = DATA_LE64(TEXT_OFFSET); \
+ _kernel_flags_le = DATA_LE64(__HEAD_FLAGS);
#ifdef CONFIG_EFI
/*
- * Prevent the symbol aliases below from being emitted into the kallsyms
- * table, by forcing them to be absolute symbols (which are conveniently
- * ignored by scripts/kallsyms) rather than section relative symbols.
- * The distinction is only relevant for partial linking, and only for symbols
- * that are defined within a section declaration (which is not the case for
- * the definitions below) so the resulting values will be identical.
- */
-#define KALLSYMS_HIDE(sym) ABSOLUTE(sym)
-
-/*
* The EFI stub has its own symbol namespace prefixed by __efistub_, to
* isolate it from the kernel proper. The following symbols are legally
* accessed by the stub, so provide some aliases to make them accessible.
@@ -92,25 +73,25 @@
* linked at. The routines below are all implemented in assembler in a
* position independent manner
*/
-__efistub_memcmp = KALLSYMS_HIDE(__pi_memcmp);
-__efistub_memchr = KALLSYMS_HIDE(__pi_memchr);
-__efistub_memcpy = KALLSYMS_HIDE(__pi_memcpy);
-__efistub_memmove = KALLSYMS_HIDE(__pi_memmove);
-__efistub_memset = KALLSYMS_HIDE(__pi_memset);
-__efistub_strlen = KALLSYMS_HIDE(__pi_strlen);
-__efistub_strcmp = KALLSYMS_HIDE(__pi_strcmp);
-__efistub_strncmp = KALLSYMS_HIDE(__pi_strncmp);
-__efistub___flush_dcache_area = KALLSYMS_HIDE(__pi___flush_dcache_area);
+__efistub_memcmp = __pi_memcmp;
+__efistub_memchr = __pi_memchr;
+__efistub_memcpy = __pi_memcpy;
+__efistub_memmove = __pi_memmove;
+__efistub_memset = __pi_memset;
+__efistub_strlen = __pi_strlen;
+__efistub_strcmp = __pi_strcmp;
+__efistub_strncmp = __pi_strncmp;
+__efistub___flush_dcache_area = __pi___flush_dcache_area;
#ifdef CONFIG_KASAN
-__efistub___memcpy = KALLSYMS_HIDE(__pi_memcpy);
-__efistub___memmove = KALLSYMS_HIDE(__pi_memmove);
-__efistub___memset = KALLSYMS_HIDE(__pi_memset);
+__efistub___memcpy = __pi_memcpy;
+__efistub___memmove = __pi_memmove;
+__efistub___memset = __pi_memset;
#endif
-__efistub__text = KALLSYMS_HIDE(_text);
-__efistub__end = KALLSYMS_HIDE(_end);
-__efistub__edata = KALLSYMS_HIDE(_edata);
+__efistub__text = _text;
+__efistub__end = _end;
+__efistub__edata = _edata;
#endif
diff --git a/arch/arm64/kernel/irq.c b/arch/arm64/kernel/irq.c
index 2386b26c0712..9f17ec071ee0 100644
--- a/arch/arm64/kernel/irq.c
+++ b/arch/arm64/kernel/irq.c
@@ -30,9 +30,6 @@
unsigned long irq_err_count;
-/* irq stack only needs to be 16 byte aligned - not IRQ_STACK_SIZE aligned. */
-DEFINE_PER_CPU(unsigned long [IRQ_STACK_SIZE/sizeof(long)], irq_stack) __aligned(16);
-
int arch_show_interrupts(struct seq_file *p, int prec)
{
show_ipi_list(p, prec);
diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c
deleted file mode 100644
index 582983920054..000000000000
--- a/arch/arm64/kernel/kaslr.c
+++ /dev/null
@@ -1,177 +0,0 @@
-/*
- * Copyright (C) 2016 Linaro Ltd <ard.biesheuvel@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/crc32.h>
-#include <linux/init.h>
-#include <linux/libfdt.h>
-#include <linux/mm_types.h>
-#include <linux/sched.h>
-#include <linux/types.h>
-
-#include <asm/fixmap.h>
-#include <asm/kernel-pgtable.h>
-#include <asm/memory.h>
-#include <asm/mmu.h>
-#include <asm/pgtable.h>
-#include <asm/sections.h>
-
-u64 __read_mostly module_alloc_base;
-u16 __initdata memstart_offset_seed;
-
-static __init u64 get_kaslr_seed(void *fdt)
-{
- int node, len;
- u64 *prop;
- u64 ret;
-
- node = fdt_path_offset(fdt, "/chosen");
- if (node < 0)
- return 0;
-
- prop = fdt_getprop_w(fdt, node, "kaslr-seed", &len);
- if (!prop || len != sizeof(u64))
- return 0;
-
- ret = fdt64_to_cpu(*prop);
- *prop = 0;
- return ret;
-}
-
-static __init const u8 *get_cmdline(void *fdt)
-{
- static __initconst const u8 default_cmdline[] = CONFIG_CMDLINE;
-
- if (!IS_ENABLED(CONFIG_CMDLINE_FORCE)) {
- int node;
- const u8 *prop;
-
- node = fdt_path_offset(fdt, "/chosen");
- if (node < 0)
- goto out;
-
- prop = fdt_getprop(fdt, node, "bootargs", NULL);
- if (!prop)
- goto out;
- return prop;
- }
-out:
- return default_cmdline;
-}
-
-extern void *__init __fixmap_remap_fdt(phys_addr_t dt_phys, int *size,
- pgprot_t prot);
-
-/*
- * This routine will be executed with the kernel mapped at its default virtual
- * address, and if it returns successfully, the kernel will be remapped, and
- * start_kernel() will be executed from a randomized virtual offset. The
- * relocation will result in all absolute references (e.g., static variables
- * containing function pointers) to be reinitialized, and zero-initialized
- * .bss variables will be reset to 0.
- */
-u64 __init kaslr_early_init(u64 dt_phys)
-{
- void *fdt;
- u64 seed, offset, mask, module_range;
- const u8 *cmdline, *str;
- int size;
-
- /*
- * Set a reasonable default for module_alloc_base in case
- * we end up running with module randomization disabled.
- */
- module_alloc_base = (u64)_etext - MODULES_VSIZE;
-
- /*
- * Try to map the FDT early. If this fails, we simply bail,
- * and proceed with KASLR disabled. We will make another
- * attempt at mapping the FDT in setup_machine()
- */
- early_fixmap_init();
- fdt = __fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL);
- if (!fdt)
- return 0;
-
- /*
- * Retrieve (and wipe) the seed from the FDT
- */
- seed = get_kaslr_seed(fdt);
- if (!seed)
- return 0;
-
- /*
- * Check if 'nokaslr' appears on the command line, and
- * return 0 if that is the case.
- */
- cmdline = get_cmdline(fdt);
- str = strstr(cmdline, "nokaslr");
- if (str == cmdline || (str > cmdline && *(str - 1) == ' '))
- return 0;
-
- /*
- * OK, so we are proceeding with KASLR enabled. Calculate a suitable
- * kernel image offset from the seed. Let's place the kernel in the
- * lower half of the VMALLOC area (VA_BITS - 2).
- * Even if we could randomize at page granularity for 16k and 64k pages,
- * let's always round to 2 MB so we don't interfere with the ability to
- * map using contiguous PTEs
- */
- mask = ((1UL << (VA_BITS - 2)) - 1) & ~(SZ_2M - 1);
- offset = seed & mask;
-
- /* use the top 16 bits to randomize the linear region */
- memstart_offset_seed = seed >> 48;
-
- /*
- * The kernel Image should not extend across a 1GB/32MB/512MB alignment
- * boundary (for 4KB/16KB/64KB granule kernels, respectively). If this
- * happens, increase the KASLR offset by the size of the kernel image.
- */
- if ((((u64)_text + offset) >> SWAPPER_TABLE_SHIFT) !=
- (((u64)_end + offset) >> SWAPPER_TABLE_SHIFT))
- offset = (offset + (u64)(_end - _text)) & mask;
-
- if (IS_ENABLED(CONFIG_KASAN))
- /*
- * KASAN does not expect the module region to intersect the
- * vmalloc region, since shadow memory is allocated for each
- * module at load time, whereas the vmalloc region is shadowed
- * by KASAN zero pages. So keep modules out of the vmalloc
- * region if KASAN is enabled.
- */
- return offset;
-
- if (IS_ENABLED(CONFIG_RANDOMIZE_MODULE_REGION_FULL)) {
- /*
- * Randomize the module region independently from the core
- * kernel. This prevents modules from leaking any information
- * about the address of the kernel itself, but results in
- * branches between modules and the core kernel that are
- * resolved via PLTs. (Branches between modules will be
- * resolved normally.)
- */
- module_range = VMALLOC_END - VMALLOC_START - MODULES_VSIZE;
- module_alloc_base = VMALLOC_START;
- } else {
- /*
- * Randomize the module region by setting module_alloc_base to
- * a PAGE_SIZE multiple in the range [_etext - MODULES_VSIZE,
- * _stext) . This guarantees that the resulting region still
- * covers [_stext, _etext], and that all relative branches can
- * be resolved without veneers.
- */
- module_range = MODULES_VSIZE - (u64)(_etext - _stext);
- module_alloc_base = (u64)_etext + offset - MODULES_VSIZE;
- }
-
- /* use the lower 21 bits to randomize the base of the module region */
- module_alloc_base += (module_range * (seed & ((1 << 21) - 1))) >> 21;
- module_alloc_base &= PAGE_MASK;
-
- return offset;
-}
diff --git a/arch/arm64/kernel/module-plts.c b/arch/arm64/kernel/module-plts.c
deleted file mode 100644
index 1ce90d8450ae..000000000000
--- a/arch/arm64/kernel/module-plts.c
+++ /dev/null
@@ -1,201 +0,0 @@
-/*
- * Copyright (C) 2014-2016 Linaro Ltd. <ard.biesheuvel@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/elf.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/sort.h>
-
-struct plt_entry {
- /*
- * A program that conforms to the AArch64 Procedure Call Standard
- * (AAPCS64) must assume that a veneer that alters IP0 (x16) and/or
- * IP1 (x17) may be inserted at any branch instruction that is
- * exposed to a relocation that supports long branches. Since that
- * is exactly what we are dealing with here, we are free to use x16
- * as a scratch register in the PLT veneers.
- */
- __le32 mov0; /* movn x16, #0x.... */
- __le32 mov1; /* movk x16, #0x...., lsl #16 */
- __le32 mov2; /* movk x16, #0x...., lsl #32 */
- __le32 br; /* br x16 */
-};
-
-u64 module_emit_plt_entry(struct module *mod, const Elf64_Rela *rela,
- Elf64_Sym *sym)
-{
- struct plt_entry *plt = (struct plt_entry *)mod->arch.plt->sh_addr;
- int i = mod->arch.plt_num_entries;
- u64 val = sym->st_value + rela->r_addend;
-
- /*
- * We only emit PLT entries against undefined (SHN_UNDEF) symbols,
- * which are listed in the ELF symtab section, but without a type
- * or a size.
- * So, similar to how the module loader uses the Elf64_Sym::st_value
- * field to store the resolved addresses of undefined symbols, let's
- * borrow the Elf64_Sym::st_size field (whose value is never used by
- * the module loader, even for symbols that are defined) to record
- * the address of a symbol's associated PLT entry as we emit it for a
- * zero addend relocation (which is the only kind we have to deal with
- * in practice). This allows us to find duplicates without having to
- * go through the table every time.
- */
- if (rela->r_addend == 0 && sym->st_size != 0) {
- BUG_ON(sym->st_size < (u64)plt || sym->st_size >= (u64)&plt[i]);
- return sym->st_size;
- }
-
- mod->arch.plt_num_entries++;
- BUG_ON(mod->arch.plt_num_entries > mod->arch.plt_max_entries);
-
- /*
- * MOVK/MOVN/MOVZ opcode:
- * +--------+------------+--------+-----------+-------------+---------+
- * | sf[31] | opc[30:29] | 100101 | hw[22:21] | imm16[20:5] | Rd[4:0] |
- * +--------+------------+--------+-----------+-------------+---------+
- *
- * Rd := 0x10 (x16)
- * hw := 0b00 (no shift), 0b01 (lsl #16), 0b10 (lsl #32)
- * opc := 0b11 (MOVK), 0b00 (MOVN), 0b10 (MOVZ)
- * sf := 1 (64-bit variant)
- */
- plt[i] = (struct plt_entry){
- cpu_to_le32(0x92800010 | (((~val ) & 0xffff)) << 5),
- cpu_to_le32(0xf2a00010 | ((( val >> 16) & 0xffff)) << 5),
- cpu_to_le32(0xf2c00010 | ((( val >> 32) & 0xffff)) << 5),
- cpu_to_le32(0xd61f0200)
- };
-
- if (rela->r_addend == 0)
- sym->st_size = (u64)&plt[i];
-
- return (u64)&plt[i];
-}
-
-#define cmp_3way(a,b) ((a) < (b) ? -1 : (a) > (b))
-
-static int cmp_rela(const void *a, const void *b)
-{
- const Elf64_Rela *x = a, *y = b;
- int i;
-
- /* sort by type, symbol index and addend */
- i = cmp_3way(ELF64_R_TYPE(x->r_info), ELF64_R_TYPE(y->r_info));
- if (i == 0)
- i = cmp_3way(ELF64_R_SYM(x->r_info), ELF64_R_SYM(y->r_info));
- if (i == 0)
- i = cmp_3way(x->r_addend, y->r_addend);
- return i;
-}
-
-static bool duplicate_rel(const Elf64_Rela *rela, int num)
-{
- /*
- * Entries are sorted by type, symbol index and addend. That means
- * that, if a duplicate entry exists, it must be in the preceding
- * slot.
- */
- return num > 0 && cmp_rela(rela + num, rela + num - 1) == 0;
-}
-
-static unsigned int count_plts(Elf64_Sym *syms, Elf64_Rela *rela, int num)
-{
- unsigned int ret = 0;
- Elf64_Sym *s;
- int i;
-
- for (i = 0; i < num; i++) {
- switch (ELF64_R_TYPE(rela[i].r_info)) {
- case R_AARCH64_JUMP26:
- case R_AARCH64_CALL26:
- /*
- * We only have to consider branch targets that resolve
- * to undefined symbols. This is not simply a heuristic,
- * it is a fundamental limitation, since the PLT itself
- * is part of the module, and needs to be within 128 MB
- * as well, so modules can never grow beyond that limit.
- */
- s = syms + ELF64_R_SYM(rela[i].r_info);
- if (s->st_shndx != SHN_UNDEF)
- break;
-
- /*
- * Jump relocations with non-zero addends against
- * undefined symbols are supported by the ELF spec, but
- * do not occur in practice (e.g., 'jump n bytes past
- * the entry point of undefined function symbol f').
- * So we need to support them, but there is no need to
- * take them into consideration when trying to optimize
- * this code. So let's only check for duplicates when
- * the addend is zero: this allows us to record the PLT
- * entry address in the symbol table itself, rather than
- * having to search the list for duplicates each time we
- * emit one.
- */
- if (rela[i].r_addend != 0 || !duplicate_rel(rela, i))
- ret++;
- break;
- }
- }
- return ret;
-}
-
-int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
- char *secstrings, struct module *mod)
-{
- unsigned long plt_max_entries = 0;
- Elf64_Sym *syms = NULL;
- int i;
-
- /*
- * Find the empty .plt section so we can expand it to store the PLT
- * entries. Record the symtab address as well.
- */
- for (i = 0; i < ehdr->e_shnum; i++) {
- if (strcmp(".plt", secstrings + sechdrs[i].sh_name) == 0)
- mod->arch.plt = sechdrs + i;
- else if (sechdrs[i].sh_type == SHT_SYMTAB)
- syms = (Elf64_Sym *)sechdrs[i].sh_addr;
- }
-
- if (!mod->arch.plt) {
- pr_err("%s: module PLT section missing\n", mod->name);
- return -ENOEXEC;
- }
- if (!syms) {
- pr_err("%s: module symtab section missing\n", mod->name);
- return -ENOEXEC;
- }
-
- for (i = 0; i < ehdr->e_shnum; i++) {
- Elf64_Rela *rels = (void *)ehdr + sechdrs[i].sh_offset;
- int numrels = sechdrs[i].sh_size / sizeof(Elf64_Rela);
- Elf64_Shdr *dstsec = sechdrs + sechdrs[i].sh_info;
-
- if (sechdrs[i].sh_type != SHT_RELA)
- continue;
-
- /* ignore relocations that operate on non-exec sections */
- if (!(dstsec->sh_flags & SHF_EXECINSTR))
- continue;
-
- /* sort by type, symbol index and addend */
- sort(rels, numrels, sizeof(Elf64_Rela), cmp_rela, NULL);
-
- plt_max_entries += count_plts(syms, rels, numrels);
- }
-
- mod->arch.plt->sh_type = SHT_NOBITS;
- mod->arch.plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
- mod->arch.plt->sh_addralign = L1_CACHE_BYTES;
- mod->arch.plt->sh_size = plt_max_entries * sizeof(struct plt_entry);
- mod->arch.plt_num_entries = 0;
- mod->arch.plt_max_entries = plt_max_entries;
- return 0;
-}
diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
index 7f316982ce00..f4bc779e62e8 100644
--- a/arch/arm64/kernel/module.c
+++ b/arch/arm64/kernel/module.c
@@ -30,30 +30,17 @@
#include <asm/insn.h>
#include <asm/sections.h>
+#define AARCH64_INSN_IMM_MOVNZ AARCH64_INSN_IMM_MAX
+#define AARCH64_INSN_IMM_MOVK AARCH64_INSN_IMM_16
+
void *module_alloc(unsigned long size)
{
void *p;
- p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base,
- module_alloc_base + MODULES_VSIZE,
+ p = __vmalloc_node_range(size, MODULE_ALIGN, MODULES_VADDR, MODULES_END,
GFP_KERNEL, PAGE_KERNEL_EXEC, 0,
NUMA_NO_NODE, __builtin_return_address(0));
- if (!p && IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
- !IS_ENABLED(CONFIG_KASAN))
- /*
- * KASAN can only deal with module allocations being served
- * from the reserved module region, since the remainder of
- * the vmalloc region is already backed by zero shadow pages,
- * and punching holes into it is non-trivial. Since the module
- * region is not randomized when KASAN is enabled, it is even
- * less likely that the module region gets exhausted, so we
- * can simply omit this fallback in that case.
- */
- p = __vmalloc_node_range(size, MODULE_ALIGN, VMALLOC_START,
- VMALLOC_END, GFP_KERNEL, PAGE_KERNEL_EXEC, 0,
- NUMA_NO_NODE, __builtin_return_address(0));
-
if (p && (kasan_module_alloc(p, size) < 0)) {
vfree(p);
return NULL;
@@ -88,18 +75,15 @@ static u64 do_reloc(enum aarch64_reloc_op reloc_op, void *place, u64 val)
static int reloc_data(enum aarch64_reloc_op op, void *place, u64 val, int len)
{
+ u64 imm_mask = (1 << len) - 1;
s64 sval = do_reloc(op, place, val);
switch (len) {
case 16:
*(s16 *)place = sval;
- if (sval < S16_MIN || sval > U16_MAX)
- return -ERANGE;
break;
case 32:
*(s32 *)place = sval;
- if (sval < S32_MIN || sval > U32_MAX)
- return -ERANGE;
break;
case 64:
*(s64 *)place = sval;
@@ -108,23 +92,34 @@ static int reloc_data(enum aarch64_reloc_op op, void *place, u64 val, int len)
pr_err("Invalid length (%d) for data relocation\n", len);
return 0;
}
+
+ /*
+ * Extract the upper value bits (including the sign bit) and
+ * shift them to bit 0.
+ */
+ sval = (s64)(sval & ~(imm_mask >> 1)) >> (len - 1);
+
+ /*
+ * Overflow has occurred if the value is not representable in
+ * len bits (i.e the bottom len bits are not sign-extended and
+ * the top bits are not all zero).
+ */
+ if ((u64)(sval + 1) > 2)
+ return -ERANGE;
+
return 0;
}
-enum aarch64_insn_movw_imm_type {
- AARCH64_INSN_IMM_MOVNZ,
- AARCH64_INSN_IMM_MOVKZ,
-};
-
static int reloc_insn_movw(enum aarch64_reloc_op op, void *place, u64 val,
- int lsb, enum aarch64_insn_movw_imm_type imm_type)
+ int lsb, enum aarch64_insn_imm_type imm_type)
{
- u64 imm;
+ u64 imm, limit = 0;
s64 sval;
u32 insn = le32_to_cpu(*(u32 *)place);
sval = do_reloc(op, place, val);
- imm = sval >> lsb;
+ sval >>= lsb;
+ imm = sval & 0xffff;
if (imm_type == AARCH64_INSN_IMM_MOVNZ) {
/*
@@ -133,7 +128,7 @@ static int reloc_insn_movw(enum aarch64_reloc_op op, void *place, u64 val,
* immediate is less than zero.
*/
insn &= ~(3 << 29);
- if (sval >= 0) {
+ if ((s64)imm >= 0) {
/* >=0: Set the instruction to MOVZ (opcode 10b). */
insn |= 2 << 29;
} else {
@@ -145,13 +140,29 @@ static int reloc_insn_movw(enum aarch64_reloc_op op, void *place, u64 val,
*/
imm = ~imm;
}
+ imm_type = AARCH64_INSN_IMM_MOVK;
}
/* Update the instruction with the new encoding. */
- insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm);
+ insn = aarch64_insn_encode_immediate(imm_type, insn, imm);
*(u32 *)place = cpu_to_le32(insn);
- if (imm > U16_MAX)
+ /* Shift out the immediate field. */
+ sval >>= 16;
+
+ /*
+ * For unsigned immediates, the overflow check is straightforward.
+ * For signed immediates, the sign bit is actually the bit past the
+ * most significant bit of the field.
+ * The AARCH64_INSN_IMM_16 immediate type is unsigned.
+ */
+ if (imm_type != AARCH64_INSN_IMM_16) {
+ sval++;
+ limit++;
+ }
+
+ /* Check the upper bits depending on the sign of the immediate. */
+ if ((u64)sval > limit)
return -ERANGE;
return 0;
@@ -256,25 +267,25 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
overflow_check = false;
case R_AARCH64_MOVW_UABS_G0:
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
- AARCH64_INSN_IMM_MOVKZ);
+ AARCH64_INSN_IMM_16);
break;
case R_AARCH64_MOVW_UABS_G1_NC:
overflow_check = false;
case R_AARCH64_MOVW_UABS_G1:
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
- AARCH64_INSN_IMM_MOVKZ);
+ AARCH64_INSN_IMM_16);
break;
case R_AARCH64_MOVW_UABS_G2_NC:
overflow_check = false;
case R_AARCH64_MOVW_UABS_G2:
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
- AARCH64_INSN_IMM_MOVKZ);
+ AARCH64_INSN_IMM_16);
break;
case R_AARCH64_MOVW_UABS_G3:
/* We're using the top bits so we can't overflow. */
overflow_check = false;
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 48,
- AARCH64_INSN_IMM_MOVKZ);
+ AARCH64_INSN_IMM_16);
break;
case R_AARCH64_MOVW_SABS_G0:
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
@@ -291,7 +302,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
case R_AARCH64_MOVW_PREL_G0_NC:
overflow_check = false;
ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0,
- AARCH64_INSN_IMM_MOVKZ);
+ AARCH64_INSN_IMM_MOVK);
break;
case R_AARCH64_MOVW_PREL_G0:
ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0,
@@ -300,7 +311,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
case R_AARCH64_MOVW_PREL_G1_NC:
overflow_check = false;
ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16,
- AARCH64_INSN_IMM_MOVKZ);
+ AARCH64_INSN_IMM_MOVK);
break;
case R_AARCH64_MOVW_PREL_G1:
ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16,
@@ -309,7 +320,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
case R_AARCH64_MOVW_PREL_G2_NC:
overflow_check = false;
ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32,
- AARCH64_INSN_IMM_MOVKZ);
+ AARCH64_INSN_IMM_MOVK);
break;
case R_AARCH64_MOVW_PREL_G2:
ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32,
@@ -377,13 +388,6 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
case R_AARCH64_CALL26:
ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 26,
AARCH64_INSN_IMM_26);
-
- if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
- ovf == -ERANGE) {
- val = module_emit_plt_entry(me, &rel[i], sym);
- ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2,
- 26, AARCH64_INSN_IMM_26);
- }
break;
default:
diff --git a/arch/arm64/kernel/module.lds b/arch/arm64/kernel/module.lds
deleted file mode 100644
index 8949f6c6f729..000000000000
--- a/arch/arm64/kernel/module.lds
+++ /dev/null
@@ -1,3 +0,0 @@
-SECTIONS {
- .plt (NOLOAD) : { BYTE(0) }
-}
diff --git a/arch/arm64/kernel/perf_callchain.c b/arch/arm64/kernel/perf_callchain.c
index ff4665462a02..3aa74830cc69 100644
--- a/arch/arm64/kernel/perf_callchain.c
+++ b/arch/arm64/kernel/perf_callchain.c
@@ -164,11 +164,8 @@ void perf_callchain_kernel(struct perf_callchain_entry *entry,
frame.fp = regs->regs[29];
frame.sp = regs->sp;
frame.pc = regs->pc;
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- frame.graph = current->curr_ret_stack;
-#endif
- walk_stackframe(current, &frame, callchain_trace, entry);
+ walk_stackframe(&frame, callchain_trace, entry);
}
unsigned long perf_instruction_pointer(struct pt_regs *regs)
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 496e2e33bbec..129fb3f8c322 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -46,7 +46,6 @@
#include <linux/notifier.h>
#include <trace/events/power.h>
-#include <asm/alternative.h>
#include <asm/compat.h>
#include <asm/cacheflush.h>
#include <asm/fpsimd.h>
@@ -351,9 +350,6 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
} else {
memset(childregs, 0, sizeof(struct pt_regs));
childregs->pstate = PSR_MODE_EL1h;
- if (IS_ENABLED(CONFIG_ARM64_UAO) &&
- cpus_have_cap(ARM64_HAS_UAO))
- childregs->pstate |= PSR_UAO_BIT;
p->thread.cpu_context.x19 = stack_start;
p->thread.cpu_context.x20 = stk_sz;
}
@@ -382,17 +378,6 @@ static void tls_thread_switch(struct task_struct *next)
: : "r" (tpidr), "r" (tpidrro));
}
-/* Restore the UAO state depending on next's addr_limit */
-static void uao_thread_switch(struct task_struct *next)
-{
- if (IS_ENABLED(CONFIG_ARM64_UAO)) {
- if (task_thread_info(next)->addr_limit == KERNEL_DS)
- asm(ALTERNATIVE("nop", SET_PSTATE_UAO(1), ARM64_HAS_UAO));
- else
- asm(ALTERNATIVE("nop", SET_PSTATE_UAO(0), ARM64_HAS_UAO));
- }
-}
-
/*
* Thread switching.
*/
@@ -405,7 +390,6 @@ struct task_struct *__switch_to(struct task_struct *prev,
tls_thread_switch(next);
hw_breakpoint_thread_switch(next);
contextidr_thread_switch(next);
- uao_thread_switch(next);
/*
* Complete any pending TLB or cache maintenance on this CPU in case
@@ -430,14 +414,11 @@ unsigned long get_wchan(struct task_struct *p)
frame.fp = thread_saved_fp(p);
frame.sp = thread_saved_sp(p);
frame.pc = thread_saved_pc(p);
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- frame.graph = p->curr_ret_stack;
-#endif
stack_page = (unsigned long)task_stack_page(p);
do {
if (frame.sp < stack_page ||
frame.sp >= stack_page + THREAD_SIZE ||
- unwind_frame(p, &frame))
+ unwind_frame(&frame))
return 0;
if (!in_sched_functions(frame.pc))
return frame.pc;
diff --git a/arch/arm64/kernel/return_address.c b/arch/arm64/kernel/return_address.c
index 1718706fde83..6c4fd2810ecb 100644
--- a/arch/arm64/kernel/return_address.c
+++ b/arch/arm64/kernel/return_address.c
@@ -43,11 +43,8 @@ void *return_address(unsigned int level)
frame.fp = (unsigned long)__builtin_frame_address(0);
frame.sp = current_stack_pointer;
frame.pc = (unsigned long)return_address; /* dummy */
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- frame.graph = current->curr_ret_stack;
-#endif
- walk_stackframe(current, &frame, save_return_addr, &data);
+ walk_stackframe(&frame, save_return_addr, &data);
if (!data.level)
return data.addr;
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index d1e01e6498bb..cd4eb7ee618c 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -63,7 +63,6 @@
#include <asm/memblock.h>
#include <asm/efi.h>
#include <asm/xen/hypervisor.h>
-#include <asm/mmu_context.h>
unsigned int boot_reason;
EXPORT_SYMBOL(boot_reason);
@@ -328,12 +327,6 @@ void __init setup_arch(char **cmdline_p)
*/
local_async_enable();
- /*
- * TTBR0 is only used for the identity mapping at this stage. Make it
- * point to zero page to avoid speculatively fetching new entries.
- */
- cpu_uninstall_idmap();
-
efi_init();
arm64_memblock_init();
@@ -409,32 +402,3 @@ void arch_setup_pdev_archdata(struct platform_device *pdev)
pdev->archdata.dma_mask = DMA_BIT_MASK(32);
pdev->dev.dma_mask = &pdev->archdata.dma_mask;
}
-
-/*
- * Dump out kernel offset information on panic.
- */
-static int dump_kernel_offset(struct notifier_block *self, unsigned long v,
- void *p)
-{
- u64 const kaslr_offset = kimage_vaddr - KIMAGE_VADDR;
-
- if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_offset > 0) {
- pr_emerg("Kernel Offset: 0x%llx from 0x%lx\n",
- kaslr_offset, KIMAGE_VADDR);
- } else {
- pr_emerg("Kernel Offset: disabled\n");
- }
- return 0;
-}
-
-static struct notifier_block kernel_offset_notifier = {
- .notifier_call = dump_kernel_offset
-};
-
-static int __init register_kernel_offset_dumper(void)
-{
- atomic_notifier_chain_register(&panic_notifier_list,
- &kernel_offset_notifier);
- return 0;
-}
-__initcall(register_kernel_offset_dumper);
diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S
index e33fe33876ab..f586f7c875e2 100644
--- a/arch/arm64/kernel/sleep.S
+++ b/arch/arm64/kernel/sleep.S
@@ -173,9 +173,6 @@ ENTRY(cpu_resume)
/* load physical address of identity map page table in x1 */
adrp x1, idmap_pg_dir
mov sp, x2
- /* save thread_info */
- and x2, x2, #~(THREAD_SIZE - 1)
- msr sp_el0, x2
/*
* cpu_do_resume expects x0 to contain context physical address
* pointer and x1 to contain physical address of 1:1 page tables
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index b2f5631c3785..08e78e47be95 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -154,7 +154,9 @@ asmlinkage void secondary_start_kernel(void)
* TTBR0 is only used for the identity mapping at this stage. Make it
* point to zero page to avoid speculatively fetching new entries.
*/
- cpu_uninstall_idmap();
+ cpu_set_reserved_ttbr0();
+ local_flush_tlb_all();
+ cpu_set_default_tcr_t0sz();
preempt_disable();
trace_hardirqs_off();
@@ -457,17 +459,6 @@ acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor)
/* map the logical cpu id to cpu MPIDR */
cpu_logical_map(cpu_count) = hwid;
- /*
- * Set-up the ACPI parking protocol cpu entries
- * while initializing the cpu_logical_map to
- * avoid parsing MADT entries multiple times for
- * nothing (ie a valid cpu_logical_map entry should
- * contain a valid parking protocol data set to
- * initialize the cpu if the parking protocol is
- * the only available enable method).
- */
- acpi_set_mailbox_entry(cpu_count, processor);
-
cpu_count++;
}
@@ -710,12 +701,10 @@ void arch_send_call_function_single_ipi(int cpu)
smp_cross_call_common(cpumask_of(cpu), IPI_CALL_FUNC);
}
-#ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
{
- smp_cross_call(mask, IPI_WAKEUP);
+ smp_cross_call_common(mask, IPI_WAKEUP);
}
-#endif
#ifdef CONFIG_IRQ_WORK
void arch_irq_work_raise(void)
@@ -866,17 +855,12 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
break;
#endif
- case IPI_CPU_BACKTRACE:
- ipi_cpu_backtrace(cpu, regs);
+ case IPI_WAKEUP:
break;
-#ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
- case IPI_WAKEUP:
- WARN_ONCE(!acpi_parking_protocol_valid(cpu),
- "CPU%u: Wake-up IPI outside the ACPI parking protocol\n",
- cpu);
+ case IPI_CPU_BACKTRACE:
+ ipi_cpu_backtrace(cpu, regs);
break;
-#endif
default:
pr_crit("CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr);
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index 85aea381fbf6..665c7fedc65b 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -18,11 +18,9 @@
#include <linux/kasan.h>
#include <linux/kernel.h>
#include <linux/export.h>
-#include <linux/ftrace.h>
#include <linux/sched.h>
#include <linux/stacktrace.h>
-#include <asm/irq.h>
#include <asm/stacktrace.h>
/*
@@ -38,29 +36,15 @@
* ldp x29, x30, [sp]
* add sp, sp, #0x10
*/
-int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
+int notrace unwind_frame(struct stackframe *frame)
{
unsigned long high, low;
unsigned long fp = frame->fp;
- unsigned long irq_stack_ptr;
-
- /*
- * Switching between stacks is valid when tracing current and in
- * non-preemptible context.
- */
- if (tsk == current && !preemptible())
- irq_stack_ptr = IRQ_STACK_PTR(smp_processor_id());
- else
- irq_stack_ptr = 0;
low = frame->sp;
- /* irq stacks are not THREAD_SIZE aligned */
- if (on_irq_stack(frame->sp, raw_smp_processor_id()))
- high = irq_stack_ptr;
- else
- high = ALIGN(low, THREAD_SIZE) - 0x20;
+ high = ALIGN(low, THREAD_SIZE);
- if (fp < low || fp > high || fp & 0xf)
+ if (fp < low || fp > high - 0x18 || fp & 0xf)
return -EINVAL;
kasan_disable_current();
@@ -69,55 +53,12 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
frame->fp = *(unsigned long *)(fp);
frame->pc = *(unsigned long *)(fp + 8);
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- if (tsk && tsk->ret_stack &&
- (frame->pc == (unsigned long)return_to_handler)) {
- /*
- * This is a case where function graph tracer has
- * modified a return address (LR) in a stack frame
- * to hook a function return.
- * So replace it to an original value.
- */
- frame->pc = tsk->ret_stack[frame->graph--].ret;
- }
-#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
-
- /*
- * Check whether we are going to walk through from interrupt stack
- * to task stack.
- * If we reach the end of the stack - and its an interrupt stack,
- * unpack the dummy frame to find the original elr.
- *
- * Check the frame->fp we read from the bottom of the irq_stack,
- * and the original task stack pointer are both in current->stack.
- */
- if (frame->sp == irq_stack_ptr) {
- struct pt_regs *irq_args;
- unsigned long orig_sp = IRQ_STACK_TO_TASK_STACK(irq_stack_ptr);
-
- if (object_is_on_stack((void *)orig_sp) &&
- object_is_on_stack((void *)frame->fp)) {
- frame->sp = orig_sp;
-
- /* orig_sp is the saved pt_regs, find the elr */
- irq_args = (struct pt_regs *)orig_sp;
- frame->pc = irq_args->pc;
- } else {
- /*
- * This frame has a non-standard format, and we
- * didn't fix it, because the data looked wrong.
- * Refuse to output this frame.
- */
- return -EINVAL;
- }
- }
-
kasan_enable_current();
return 0;
}
-void notrace walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
+void notrace walk_stackframe(struct stackframe *frame,
int (*fn)(struct stackframe *, void *), void *data)
{
while (1) {
@@ -125,7 +66,7 @@ void notrace walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
if (fn(frame, data))
break;
- ret = unwind_frame(tsk, frame);
+ ret = unwind_frame(frame);
if (ret < 0)
break;
}
@@ -176,11 +117,8 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
frame.sp = current_stack_pointer;
frame.pc = (unsigned long)save_stack_trace_tsk;
}
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- frame.graph = tsk->curr_ret_stack;
-#endif
- walk_stackframe(tsk, &frame, save_trace, &data);
+ walk_stackframe(&frame, save_trace, &data);
if (trace->nr_entries < trace->max_entries)
trace->entries[trace->nr_entries++] = ULONG_MAX;
}
diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c
index 66055392f445..1095aa483a1c 100644
--- a/arch/arm64/kernel/suspend.c
+++ b/arch/arm64/kernel/suspend.c
@@ -60,6 +60,7 @@ void __init cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *))
*/
int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
{
+ struct mm_struct *mm = current->active_mm;
int ret;
unsigned long flags;
@@ -86,11 +87,22 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
ret = __cpu_suspend_enter(arg, fn);
if (ret == 0) {
/*
- * We are resuming from reset with the idmap active in TTBR0_EL1.
- * We must uninstall the idmap and restore the expected MMU
- * state before we can possibly return to userspace.
+ * We are resuming from reset with TTBR0_EL1 set to the
+ * idmap to enable the MMU; set the TTBR0 to the reserved
+ * page tables to prevent speculative TLB allocations, flush
+ * the local tlb and set the default tcr_el1.t0sz so that
+ * the TTBR0 address space set-up is properly restored.
+ * If the current active_mm != &init_mm we entered cpu_suspend
+ * with mappings in TTBR0 that must be restored, so we switch
+ * them back to complete the address space configuration
+ * restoration before returning.
*/
- cpu_uninstall_idmap();
+ cpu_set_reserved_ttbr0();
+ local_flush_tlb_all();
+ cpu_set_default_tcr_t0sz();
+
+ if (mm != &init_mm)
+ cpu_switch_mm(mm->pgd, mm);
/*
* Restore per-cpu offset before any kernel
diff --git a/arch/arm64/kernel/time.c b/arch/arm64/kernel/time.c
index 59779699a1a4..13339b6ffc1a 100644
--- a/arch/arm64/kernel/time.c
+++ b/arch/arm64/kernel/time.c
@@ -52,11 +52,8 @@ unsigned long profile_pc(struct pt_regs *regs)
frame.fp = regs->regs[29];
frame.sp = regs->sp;
frame.pc = regs->pc;
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- frame.graph = -1; /* no task info */
-#endif
do {
- int ret = unwind_frame(NULL, &frame);
+ int ret = unwind_frame(&frame);
if (ret < 0)
return 0;
} while (in_lock_functions(frame.pc));
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index e8b1f7910490..61ac4cdaed5c 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -148,24 +148,17 @@ static void dump_instr(const char *lvl, struct pt_regs *regs)
static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
{
struct stackframe frame;
- unsigned long irq_stack_ptr;
- int skip;
-
- /*
- * Switching between stacks is valid when tracing current and in
- * non-preemptible context.
- */
- if (tsk == current && !preemptible())
- irq_stack_ptr = IRQ_STACK_PTR(smp_processor_id());
- else
- irq_stack_ptr = 0;
pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
if (!tsk)
tsk = current;
- if (tsk == current) {
+ if (regs) {
+ frame.fp = regs->regs[29];
+ frame.sp = regs->sp;
+ frame.pc = regs->pc;
+ } else if (tsk == current) {
frame.fp = (unsigned long)__builtin_frame_address(0);
frame.sp = current_stack_pointer;
frame.pc = (unsigned long)dump_backtrace;
@@ -177,49 +170,21 @@ static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
frame.sp = thread_saved_sp(tsk);
frame.pc = thread_saved_pc(tsk);
}
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- frame.graph = tsk->curr_ret_stack;
-#endif
- skip = !!regs;
- printk("Call trace:\n");
+ pr_emerg("Call trace:\n");
while (1) {
unsigned long where = frame.pc;
unsigned long stack;
int ret;
- /* skip until specified stack frame */
- if (!skip) {
- dump_backtrace_entry(where);
- } else if (frame.fp == regs->regs[29]) {
- skip = 0;
- /*
- * Mostly, this is the case where this function is
- * called in panic/abort. As exception handler's
- * stack frame does not contain the corresponding pc
- * at which an exception has taken place, use regs->pc
- * instead.
- */
- dump_backtrace_entry(regs->pc);
- }
- ret = unwind_frame(tsk, &frame);
+ dump_backtrace_entry(where);
+ ret = unwind_frame(&frame);
if (ret < 0)
break;
stack = frame.sp;
- if (in_exception_text(where)) {
- /*
- * If we switched to the irq_stack before calling this
- * exception handler, then the pt_regs will be on the
- * task stack. The easiest way to tell is if the large
- * pt_regs would overlap with the end of the irq_stack.
- */
- if (stack < irq_stack_ptr &&
- (stack + sizeof(struct pt_regs)) > irq_stack_ptr)
- stack = IRQ_STACK_TO_TASK_STACK(irq_stack_ptr);
-
+ if (in_exception_text(where))
dump_mem("", "Exception stack", stack,
stack + sizeof(struct pt_regs), false);
- }
}
}
@@ -533,22 +498,22 @@ asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
void __pte_error(const char *file, int line, unsigned long val)
{
- pr_err("%s:%d: bad pte %016lx.\n", file, line, val);
+ pr_crit("%s:%d: bad pte %016lx.\n", file, line, val);
}
void __pmd_error(const char *file, int line, unsigned long val)
{
- pr_err("%s:%d: bad pmd %016lx.\n", file, line, val);
+ pr_crit("%s:%d: bad pmd %016lx.\n", file, line, val);
}
void __pud_error(const char *file, int line, unsigned long val)
{
- pr_err("%s:%d: bad pud %016lx.\n", file, line, val);
+ pr_crit("%s:%d: bad pud %016lx.\n", file, line, val);
}
void __pgd_error(const char *file, int line, unsigned long val)
{
- pr_err("%s:%d: bad pgd %016lx.\n", file, line, val);
+ pr_crit("%s:%d: bad pgd %016lx.\n", file, line, val);
}
/* GENERIC_BUG traps */
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index 6b562a318f84..b3f48316f888 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -88,16 +88,15 @@ SECTIONS
EXIT_CALL
*(.discard)
*(.discard.*)
- *(.interp .dynamic)
}
- . = KIMAGE_VADDR + TEXT_OFFSET;
+ . = PAGE_OFFSET + TEXT_OFFSET;
.head.text : {
_text = .;
HEAD_TEXT
}
- ALIGN_DEBUG_RO_MIN(PAGE_SIZE)
+ ALIGN_DEBUG_RO
.text : { /* Real text segment */
_stext = .; /* Text and read-only data */
__exception_text_start = .;
@@ -115,12 +114,14 @@ SECTIONS
*(.got) /* Global offset table */
}
+ ALIGN_DEBUG_RO
RO_DATA(PAGE_SIZE)
EXCEPTION_TABLE(8)
NOTES
+ ALIGN_DEBUG_RO
+ _etext = .; /* End of text and rodata section */
ALIGN_DEBUG_RO_MIN(PAGE_SIZE)
- _etext = .; /* End of text and rodata section */
__init_begin = .;
INIT_TEXT_SECTION(8)
@@ -128,6 +129,7 @@ SECTIONS
ARM_EXIT_KEEP(EXIT_TEXT)
}
+ ALIGN_DEBUG_RO_MIN(16)
.init.data : {
INIT_DATA
INIT_SETUP(16)
@@ -142,6 +144,9 @@ SECTIONS
PERCPU_SECTION(L1_CACHE_BYTES)
+ . = ALIGN(PAGE_SIZE);
+ __init_end = .;
+
. = ALIGN(4);
.altinstructions : {
__alt_instructions = .;
@@ -151,25 +156,8 @@ SECTIONS
.altinstr_replacement : {
*(.altinstr_replacement)
}
- .rela : ALIGN(8) {
- __reloc_start = .;
- *(.rela .rela*)
- __reloc_end = .;
- }
- .dynsym : ALIGN(8) {
- __dynsym_start = .;
- *(.dynsym)
- }
- .dynstr : {
- *(.dynstr)
- }
- .hash : {
- *(.hash)
- }
. = ALIGN(PAGE_SIZE);
- __init_end = .;
-
_data = .;
_sdata = .;
RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
@@ -203,4 +191,4 @@ ASSERT(__idmap_text_end - (__idmap_text_start & ~(SZ_4K - 1)) <= SZ_4K,
/*
* If padding is applied before .head.text, virt<->phys conversions will fail.
*/
-ASSERT(_text == (KIMAGE_VADDR + TEXT_OFFSET), "HEAD is misaligned")
+ASSERT(_text == (PAGE_OFFSET + TEXT_OFFSET), "HEAD is misaligned")
diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S
index 309e3479dc2c..86c289832272 100644
--- a/arch/arm64/kvm/hyp.S
+++ b/arch/arm64/kvm/hyp.S
@@ -923,7 +923,7 @@ __hyp_panic_str:
.align 2
/*
- * u64 __kvm_call_hyp(void *hypfn, ...);
+ * u64 kvm_call_hyp(void *hypfn, ...);
*
* This is not really a variadic function in the classic C-way and care must
* be taken when calling this to ensure parameters are passed in registers
@@ -940,10 +940,10 @@ __hyp_panic_str:
* used to implement __hyp_get_vectors in the same way as in
* arch/arm64/kernel/hyp_stub.S.
*/
-ENTRY(__kvm_call_hyp)
+ENTRY(kvm_call_hyp)
hvc #0
ret
-ENDPROC(__kvm_call_hyp)
+ENDPROC(kvm_call_hyp)
.macro invalid_vector label, target
.align 2
diff --git a/arch/arm64/lib/Makefile b/arch/arm64/lib/Makefile
index c86b7909ef31..1a811ecf71da 100644
--- a/arch/arm64/lib/Makefile
+++ b/arch/arm64/lib/Makefile
@@ -4,16 +4,15 @@ lib-y := bitops.o clear_user.o delay.o copy_from_user.o \
memcmp.o strcmp.o strncmp.o strlen.o strnlen.o \
strchr.o strrchr.o
-# Tell the compiler to treat all general purpose registers (with the
-# exception of the IP registers, which are already handled by the caller
-# in case of a PLT) as callee-saved, which allows for efficient runtime
-# patching of the bl instruction in the caller with an atomic instruction
-# when supported by the CPU. Result and argument registers are handled
-# correctly, based on the function prototype.
+# Tell the compiler to treat all general purpose registers as
+# callee-saved, which allows for efficient runtime patching of the bl
+# instruction in the caller with an atomic instruction when supported by
+# the CPU. Result and argument registers are handled correctly, based on
+# the function prototype.
lib-$(CONFIG_ARM64_LSE_ATOMICS) += atomic_ll_sc.o
CFLAGS_atomic_ll_sc.o := -fcall-used-x0 -ffixed-x1 -ffixed-x2 \
-ffixed-x3 -ffixed-x4 -ffixed-x5 -ffixed-x6 \
-ffixed-x7 -fcall-saved-x8 -fcall-saved-x9 \
-fcall-saved-x10 -fcall-saved-x11 -fcall-saved-x12 \
-fcall-saved-x13 -fcall-saved-x14 -fcall-saved-x15 \
- -fcall-saved-x18
+ -fcall-saved-x16 -fcall-saved-x17 -fcall-saved-x18
diff --git a/arch/arm64/lib/clear_user.S b/arch/arm64/lib/clear_user.S
index 5d1cad3ce6d6..a9723c71c52b 100644
--- a/arch/arm64/lib/clear_user.S
+++ b/arch/arm64/lib/clear_user.S
@@ -33,28 +33,28 @@
* Alignment fixed up by hardware.
*/
ENTRY(__clear_user)
-ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_ALT_PAN_NOT_UAO, \
+ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_HAS_PAN, \
CONFIG_ARM64_PAN)
mov x2, x1 // save the size for fixup return
subs x1, x1, #8
b.mi 2f
1:
-uao_user_alternative 9f, str, sttr, xzr, x0, 8
+USER(9f, str xzr, [x0], #8 )
subs x1, x1, #8
b.pl 1b
2: adds x1, x1, #4
b.mi 3f
-uao_user_alternative 9f, str, sttr, wzr, x0, 4
+USER(9f, str wzr, [x0], #4 )
sub x1, x1, #4
3: adds x1, x1, #2
b.mi 4f
-uao_user_alternative 9f, strh, sttrh, wzr, x0, 2
+USER(9f, strh wzr, [x0], #2 )
sub x1, x1, #2
4: adds x1, x1, #1
b.mi 5f
-uao_user_alternative 9f, strb, sttrb, wzr, x0, 0
+USER(9f, strb wzr, [x0] )
5: mov x0, #0
-ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_ALT_PAN_NOT_UAO, \
+ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_HAS_PAN, \
CONFIG_ARM64_PAN)
ret
ENDPROC(__clear_user)
diff --git a/arch/arm64/lib/copy_from_user.S b/arch/arm64/lib/copy_from_user.S
index 17e8306dca29..4699cd74f87e 100644
--- a/arch/arm64/lib/copy_from_user.S
+++ b/arch/arm64/lib/copy_from_user.S
@@ -34,7 +34,7 @@
*/
.macro ldrb1 ptr, regB, val
- uao_user_alternative 9998f, ldrb, ldtrb, \ptr, \regB, \val
+ USER(9998f, ldrb \ptr, [\regB], \val)
.endm
.macro strb1 ptr, regB, val
@@ -42,7 +42,7 @@
.endm
.macro ldrh1 ptr, regB, val
- uao_user_alternative 9998f, ldrh, ldtrh, \ptr, \regB, \val
+ USER(9998f, ldrh \ptr, [\regB], \val)
.endm
.macro strh1 ptr, regB, val
@@ -50,7 +50,7 @@
.endm
.macro ldr1 ptr, regB, val
- uao_user_alternative 9998f, ldr, ldtr, \ptr, \regB, \val
+ USER(9998f, ldr \ptr, [\regB], \val)
.endm
.macro str1 ptr, regB, val
@@ -58,7 +58,7 @@
.endm
.macro ldp1 ptr, regB, regC, val
- uao_ldp 9998f, \ptr, \regB, \regC, \val
+ USER(9998f, ldp \ptr, \regB, [\regC], \val)
.endm
.macro stp1 ptr, regB, regC, val
@@ -67,11 +67,11 @@
end .req x5
ENTRY(__copy_from_user)
-ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_ALT_PAN_NOT_UAO, \
+ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_HAS_PAN, \
CONFIG_ARM64_PAN)
add end, x0, x2
#include "copy_template.S"
-ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_ALT_PAN_NOT_UAO, \
+ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_HAS_PAN, \
CONFIG_ARM64_PAN)
mov x0, #0 // Nothing to copy
ret
diff --git a/arch/arm64/lib/copy_in_user.S b/arch/arm64/lib/copy_in_user.S
index f7292dd08c84..81c8fc93c100 100644
--- a/arch/arm64/lib/copy_in_user.S
+++ b/arch/arm64/lib/copy_in_user.S
@@ -35,44 +35,44 @@
* x0 - bytes not copied
*/
.macro ldrb1 ptr, regB, val
- uao_user_alternative 9998f, ldrb, ldtrb, \ptr, \regB, \val
+ USER(9998f, ldrb \ptr, [\regB], \val)
.endm
.macro strb1 ptr, regB, val
- uao_user_alternative 9998f, strb, sttrb, \ptr, \regB, \val
+ USER(9998f, strb \ptr, [\regB], \val)
.endm
.macro ldrh1 ptr, regB, val
- uao_user_alternative 9998f, ldrh, ldtrh, \ptr, \regB, \val
+ USER(9998f, ldrh \ptr, [\regB], \val)
.endm
.macro strh1 ptr, regB, val
- uao_user_alternative 9998f, strh, sttrh, \ptr, \regB, \val
+ USER(9998f, strh \ptr, [\regB], \val)
.endm
.macro ldr1 ptr, regB, val
- uao_user_alternative 9998f, ldr, ldtr, \ptr, \regB, \val
+ USER(9998f, ldr \ptr, [\regB], \val)
.endm
.macro str1 ptr, regB, val
- uao_user_alternative 9998f, str, sttr, \ptr, \regB, \val
+ USER(9998f, str \ptr, [\regB], \val)
.endm
.macro ldp1 ptr, regB, regC, val
- uao_ldp 9998f, \ptr, \regB, \regC, \val
+ USER(9998f, ldp \ptr, \regB, [\regC], \val)
.endm
.macro stp1 ptr, regB, regC, val
- uao_stp 9998f, \ptr, \regB, \regC, \val
+ USER(9998f, stp \ptr, \regB, [\regC], \val)
.endm
end .req x5
ENTRY(__copy_in_user)
-ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_ALT_PAN_NOT_UAO, \
+ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_HAS_PAN, \
CONFIG_ARM64_PAN)
add end, x0, x2
#include "copy_template.S"
-ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_ALT_PAN_NOT_UAO, \
+ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_HAS_PAN, \
CONFIG_ARM64_PAN)
mov x0, #0
ret
diff --git a/arch/arm64/lib/copy_page.S b/arch/arm64/lib/copy_page.S
index 4c1e700840b6..512b9a7b980e 100644
--- a/arch/arm64/lib/copy_page.S
+++ b/arch/arm64/lib/copy_page.S
@@ -18,8 +18,6 @@
#include <linux/const.h>
#include <asm/assembler.h>
#include <asm/page.h>
-#include <asm/cpufeature.h>
-#include <asm/alternative.h>
/*
* Copy a page from src to dest (both are page aligned)
@@ -29,65 +27,20 @@
* x1 - src
*/
ENTRY(copy_page)
-alternative_if_not ARM64_HAS_NO_HW_PREFETCH
- nop
- nop
-alternative_else
- # Prefetch two cache lines ahead.
- prfm pldl1strm, [x1, #128]
- prfm pldl1strm, [x1, #256]
-alternative_endif
-
- ldp x2, x3, [x1]
+ /* Assume cache line size is 64 bytes. */
+ prfm pldl1strm, [x1, #64]
+1: ldp x2, x3, [x1]
ldp x4, x5, [x1, #16]
ldp x6, x7, [x1, #32]
ldp x8, x9, [x1, #48]
- ldp x10, x11, [x1, #64]
- ldp x12, x13, [x1, #80]
- ldp x14, x15, [x1, #96]
- ldp x16, x17, [x1, #112]
-
- mov x18, #(PAGE_SIZE - 128)
- add x1, x1, #128
-1:
- subs x18, x18, #128
-
-alternative_if_not ARM64_HAS_NO_HW_PREFETCH
- nop
-alternative_else
- prfm pldl1strm, [x1, #384]
-alternative_endif
-
+ add x1, x1, #64
+ prfm pldl1strm, [x1, #64]
stnp x2, x3, [x0]
- ldp x2, x3, [x1]
stnp x4, x5, [x0, #16]
- ldp x4, x5, [x1, #16]
stnp x6, x7, [x0, #32]
- ldp x6, x7, [x1, #32]
stnp x8, x9, [x0, #48]
- ldp x8, x9, [x1, #48]
- stnp x10, x11, [x0, #64]
- ldp x10, x11, [x1, #64]
- stnp x12, x13, [x0, #80]
- ldp x12, x13, [x1, #80]
- stnp x14, x15, [x0, #96]
- ldp x14, x15, [x1, #96]
- stnp x16, x17, [x0, #112]
- ldp x16, x17, [x1, #112]
-
- add x0, x0, #128
- add x1, x1, #128
-
- b.gt 1b
-
- stnp x2, x3, [x0]
- stnp x4, x5, [x0, #16]
- stnp x6, x7, [x0, #32]
- stnp x8, x9, [x0, #48]
- stnp x10, x11, [x0, #64]
- stnp x12, x13, [x0, #80]
- stnp x14, x15, [x0, #96]
- stnp x16, x17, [x0, #112]
-
+ add x0, x0, #64
+ tst x1, #(PAGE_SIZE - 1)
+ b.ne 1b
ret
ENDPROC(copy_page)
diff --git a/arch/arm64/lib/copy_to_user.S b/arch/arm64/lib/copy_to_user.S
index 21faae60f988..7512bbbc07ac 100644
--- a/arch/arm64/lib/copy_to_user.S
+++ b/arch/arm64/lib/copy_to_user.S
@@ -37,7 +37,7 @@
.endm
.macro strb1 ptr, regB, val
- uao_user_alternative 9998f, strb, sttrb, \ptr, \regB, \val
+ USER(9998f, strb \ptr, [\regB], \val)
.endm
.macro ldrh1 ptr, regB, val
@@ -45,7 +45,7 @@
.endm
.macro strh1 ptr, regB, val
- uao_user_alternative 9998f, strh, sttrh, \ptr, \regB, \val
+ USER(9998f, strh \ptr, [\regB], \val)
.endm
.macro ldr1 ptr, regB, val
@@ -53,7 +53,7 @@
.endm
.macro str1 ptr, regB, val
- uao_user_alternative 9998f, str, sttr, \ptr, \regB, \val
+ USER(9998f, str \ptr, [\regB], \val)
.endm
.macro ldp1 ptr, regB, regC, val
@@ -61,16 +61,16 @@
.endm
.macro stp1 ptr, regB, regC, val
- uao_stp 9998f, \ptr, \regB, \regC, \val
+ USER(9998f, stp \ptr, \regB, [\regC], \val)
.endm
end .req x5
ENTRY(__copy_to_user)
-ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_ALT_PAN_NOT_UAO, \
+ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_HAS_PAN, \
CONFIG_ARM64_PAN)
add end, x0, x2
#include "copy_template.S"
-ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_ALT_PAN_NOT_UAO, \
+ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_HAS_PAN, \
CONFIG_ARM64_PAN)
mov x0, #0
ret
diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
index 2f06997dcd59..ce32a2229a9d 100644
--- a/arch/arm64/mm/cache.S
+++ b/arch/arm64/mm/cache.S
@@ -154,32 +154,26 @@ ENDPROC(__flush_cache_user_range)
/*
* __flush_dcache_area(kaddr, size)
*
- * Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
- * are cleaned and invalidated to the PoC.
+ * Ensure that the data held in the page kaddr is written back to the
+ * page in question.
*
* - kaddr - kernel address
* - size - size in question
*/
ENTRY(__flush_dcache_area)
- dcache_by_line_op civac, sy, x0, x1, x2, x3
+ dcache_line_size x2, x3
+ add x1, x0, x1
+ sub x3, x2, #1
+ bic x0, x0, x3
+1: dc civac, x0 // clean & invalidate D line / unified line
+ add x0, x0, x2
+ cmp x0, x1
+ b.lo 1b
+ dsb sy
ret
ENDPIPROC(__flush_dcache_area)
/*
- * __clean_dcache_area_pou(kaddr, size)
- *
- * Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
- * are cleaned to the PoU.
- *
- * - kaddr - kernel address
- * - size - size in question
- */
-ENTRY(__clean_dcache_area_pou)
- dcache_by_line_op cvau, ish, x0, x1, x2, x3
- ret
-ENDPROC(__clean_dcache_area_pou)
-
-/*
* __inval_cache_range(start, end)
* - start - start address of region
* - end - end address of region
diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
index 7275628ba59f..e87f53ff5f58 100644
--- a/arch/arm64/mm/context.c
+++ b/arch/arm64/mm/context.c
@@ -187,7 +187,7 @@ switch_mm_fastpath:
static int asids_init(void)
{
- int fld = cpuid_feature_extract_field(read_cpuid(SYS_ID_AA64MMFR0_EL1), 4);
+ int fld = cpuid_feature_extract_field(read_cpuid(ID_AA64MMFR0_EL1), 4);
switch (fld) {
default:
diff --git a/arch/arm64/mm/copypage.c b/arch/arm64/mm/copypage.c
index 22e4cb4d6f53..13bbc3be6f5a 100644
--- a/arch/arm64/mm/copypage.c
+++ b/arch/arm64/mm/copypage.c
@@ -24,9 +24,8 @@
void __cpu_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr)
{
- struct page *page = virt_to_page(kto);
copy_page(kto, kfrom);
- flush_dcache_page(page);
+ __flush_dcache_area(kto, PAGE_SIZE);
}
EXPORT_SYMBOL_GPL(__cpu_copy_user_page);
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 7109e27de235..e5389bc981ee 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -52,7 +52,7 @@ static pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot,
static struct gen_pool *atomic_pool;
#define NO_KERNEL_MAPPING_DUMMY 0x2222
#define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
-static size_t atomic_pool_size __initdata = DEFAULT_DMA_COHERENT_POOL_SIZE;
+static size_t atomic_pool_size = DEFAULT_DMA_COHERENT_POOL_SIZE;
static int __init early_coherent_pool(char *p)
{
@@ -1015,7 +1015,7 @@ static int __iommu_attach_notifier(struct notifier_block *nb,
return 0;
}
-static int __init register_iommu_dma_ops_notifier(struct bus_type *bus)
+static int register_iommu_dma_ops_notifier(struct bus_type *bus)
{
struct notifier_block *nb = kzalloc(sizeof(*nb), GFP_KERNEL);
int ret;
diff --git a/arch/arm64/mm/dump.c b/arch/arm64/mm/dump.c
index 6be918478f85..5a22a119a74c 100644
--- a/arch/arm64/mm/dump.c
+++ b/arch/arm64/mm/dump.c
@@ -35,9 +35,7 @@ struct addr_marker {
};
enum address_markers_idx {
- MODULES_START_NR = 0,
- MODULES_END_NR,
- VMALLOC_START_NR,
+ VMALLOC_START_NR = 0,
VMALLOC_END_NR,
#ifdef CONFIG_SPARSEMEM_VMEMMAP
VMEMMAP_START_NR,
@@ -47,12 +45,12 @@ enum address_markers_idx {
FIXADDR_END_NR,
PCI_START_NR,
PCI_END_NR,
+ MODULES_START_NR,
+ MODUELS_END_NR,
KERNEL_SPACE_NR,
};
static struct addr_marker address_markers[] = {
- { MODULES_VADDR, "Modules start" },
- { MODULES_END, "Modules end" },
{ VMALLOC_START, "vmalloc() Area" },
{ VMALLOC_END, "vmalloc() End" },
#ifdef CONFIG_SPARSEMEM_VMEMMAP
@@ -63,7 +61,9 @@ static struct addr_marker address_markers[] = {
{ FIXADDR_TOP, "Fixmap end" },
{ PCI_IO_START, "PCI I/O start" },
{ PCI_IO_END, "PCI I/O end" },
- { PAGE_OFFSET, "Linear Mapping" },
+ { MODULES_VADDR, "Modules start" },
+ { MODULES_END, "Modules end" },
+ { PAGE_OFFSET, "Kernel Mapping" },
{ -1, NULL },
};
@@ -90,11 +90,6 @@ struct prot_bits {
static const struct prot_bits pte_bits[] = {
{
- .mask = PTE_VALID,
- .val = PTE_VALID,
- .set = " ",
- .clear = "F",
- }, {
.mask = PTE_USER,
.val = PTE_USER,
.set = "USR",
diff --git a/arch/arm64/mm/extable.c b/arch/arm64/mm/extable.c
index 81acd4706878..79444279ba8c 100644
--- a/arch/arm64/mm/extable.c
+++ b/arch/arm64/mm/extable.c
@@ -11,7 +11,7 @@ int fixup_exception(struct pt_regs *regs)
fixup = search_exception_tables(instruction_pointer(regs));
if (fixup)
- regs->pc = (unsigned long)&fixup->fixup + fixup->fixup;
+ regs->pc = fixup->fixup;
return fixup != NULL;
}
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 7bb08670fc10..c2a5a018bd00 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -193,14 +193,6 @@ out:
return fault;
}
-static inline int permission_fault(unsigned int esr)
-{
- unsigned int ec = (esr & ESR_ELx_EC_MASK) >> ESR_ELx_EC_SHIFT;
- unsigned int fsc_type = esr & ESR_ELx_FSC_TYPE;
-
- return (ec == ESR_ELx_EC_DABT_CUR && fsc_type == ESR_ELx_FSC_PERM);
-}
-
static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
struct pt_regs *regs)
{
@@ -234,13 +226,12 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
mm_flags |= FAULT_FLAG_WRITE;
}
- if (permission_fault(esr) && (addr < USER_DS)) {
- if (get_fs() == KERNEL_DS)
- die("Accessing user space memory with fs=KERNEL_DS", regs, esr);
-
- if (!search_exception_tables(regs->pc))
- die("Accessing user space memory outside uaccess.h routines", regs, esr);
- }
+ /*
+ * PAN bit set implies the fault happened in kernel space, but not
+ * in the arch's user access functions.
+ */
+ if (IS_ENABLED(CONFIG_ARM64_PAN) && (regs->pstate & PSR_PAN_BIT))
+ goto no_context;
/*
* As per x86, we may deadlock here. However, since the kernel only
@@ -572,16 +563,3 @@ void cpu_enable_pan(void *__unused)
config_sctlr_el1(SCTLR_EL1_SPAN, 0);
}
#endif /* CONFIG_ARM64_PAN */
-
-#ifdef CONFIG_ARM64_UAO
-/*
- * Kernel threads have fs=KERNEL_DS by default, and don't need to call
- * set_fs(), devtmpfs in particular relies on this behaviour.
- * We need to enable the feature at runtime (instead of adding it to
- * PSR_MODE_EL1h) as the feature may not be implemented by the cpu.
- */
-void cpu_enable_uao(void *__unused)
-{
- asm(SET_PSTATE_UAO(1));
-}
-#endif /* CONFIG_ARM64_UAO */
diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c
index 61b0911aa475..07844184975b 100644
--- a/arch/arm64/mm/flush.c
+++ b/arch/arm64/mm/flush.c
@@ -34,24 +34,19 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
__flush_icache_all();
}
-static void sync_icache_aliases(void *kaddr, unsigned long len)
-{
- unsigned long addr = (unsigned long)kaddr;
-
- if (icache_is_aliasing()) {
- __clean_dcache_area_pou(kaddr, len);
- __flush_icache_all();
- } else {
- flush_icache_range(addr, addr + len);
- }
-}
-
static void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
unsigned long uaddr, void *kaddr,
unsigned long len)
{
- if (vma->vm_flags & VM_EXEC)
- sync_icache_aliases(kaddr, len);
+ if (vma->vm_flags & VM_EXEC) {
+ unsigned long addr = (unsigned long)kaddr;
+ if (icache_is_aliasing()) {
+ __flush_dcache_area(kaddr, len);
+ __flush_icache_all();
+ } else {
+ flush_icache_range(addr, addr + len);
+ }
+ }
}
/*
@@ -79,11 +74,13 @@ void __sync_icache_dcache(pte_t pte, unsigned long addr)
if (!page_mapping(page))
return;
- if (!test_and_set_bit(PG_dcache_clean, &page->flags))
- sync_icache_aliases(page_address(page),
- PAGE_SIZE << compound_order(page));
- else if (icache_is_aivivt())
+ if (!test_and_set_bit(PG_dcache_clean, &page->flags)) {
+ __flush_dcache_area(page_address(page),
+ PAGE_SIZE << compound_order(page));
__flush_icache_all();
+ } else if (icache_is_aivivt()) {
+ __flush_icache_all();
+ }
}
/*
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
index da30529bb1f6..383b03ff38f8 100644
--- a/arch/arm64/mm/hugetlbpage.c
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -41,273 +41,15 @@ int pud_huge(pud_t pud)
#endif
}
-static int find_num_contig(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t pte, size_t *pgsize)
-{
- pgd_t *pgd = pgd_offset(mm, addr);
- pud_t *pud;
- pmd_t *pmd;
-
- *pgsize = PAGE_SIZE;
- if (!pte_cont(pte))
- return 1;
- if (!pgd_present(*pgd)) {
- VM_BUG_ON(!pgd_present(*pgd));
- return 1;
- }
- pud = pud_offset(pgd, addr);
- if (!pud_present(*pud)) {
- VM_BUG_ON(!pud_present(*pud));
- return 1;
- }
- pmd = pmd_offset(pud, addr);
- if (!pmd_present(*pmd)) {
- VM_BUG_ON(!pmd_present(*pmd));
- return 1;
- }
- if ((pte_t *)pmd == ptep) {
- *pgsize = PMD_SIZE;
- return CONT_PMDS;
- }
- return CONT_PTES;
-}
-
-void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t pte)
-{
- size_t pgsize;
- int i;
- int ncontig = find_num_contig(mm, addr, ptep, pte, &pgsize);
- unsigned long pfn;
- pgprot_t hugeprot;
-
- if (ncontig == 1) {
- set_pte_at(mm, addr, ptep, pte);
- return;
- }
-
- pfn = pte_pfn(pte);
- hugeprot = __pgprot(pte_val(pfn_pte(pfn, __pgprot(0))) ^ pte_val(pte));
- for (i = 0; i < ncontig; i++) {
- pr_debug("%s: set pte %p to 0x%llx\n", __func__, ptep,
- pte_val(pfn_pte(pfn, hugeprot)));
- set_pte_at(mm, addr, ptep, pfn_pte(pfn, hugeprot));
- ptep++;
- pfn += pgsize >> PAGE_SHIFT;
- addr += pgsize;
- }
-}
-
-pte_t *huge_pte_alloc(struct mm_struct *mm,
- unsigned long addr, unsigned long sz)
-{
- pgd_t *pgd;
- pud_t *pud;
- pte_t *pte = NULL;
-
- pr_debug("%s: addr:0x%lx sz:0x%lx\n", __func__, addr, sz);
- pgd = pgd_offset(mm, addr);
- pud = pud_alloc(mm, pgd, addr);
- if (!pud)
- return NULL;
-
- if (sz == PUD_SIZE) {
- pte = (pte_t *)pud;
- } else if (sz == (PAGE_SIZE * CONT_PTES)) {
- pmd_t *pmd = pmd_alloc(mm, pud, addr);
-
- WARN_ON(addr & (sz - 1));
- /*
- * Note that if this code were ever ported to the
- * 32-bit arm platform then it will cause trouble in
- * the case where CONFIG_HIGHPTE is set, since there
- * will be no pte_unmap() to correspond with this
- * pte_alloc_map().
- */
- pte = pte_alloc_map(mm, NULL, pmd, addr);
- } else if (sz == PMD_SIZE) {
- if (IS_ENABLED(CONFIG_ARCH_WANT_HUGE_PMD_SHARE) &&
- pud_none(*pud))
- pte = huge_pmd_share(mm, addr, pud);
- else
- pte = (pte_t *)pmd_alloc(mm, pud, addr);
- } else if (sz == (PMD_SIZE * CONT_PMDS)) {
- pmd_t *pmd;
-
- pmd = pmd_alloc(mm, pud, addr);
- WARN_ON(addr & (sz - 1));
- return (pte_t *)pmd;
- }
-
- pr_debug("%s: addr:0x%lx sz:0x%lx ret pte=%p/0x%llx\n", __func__, addr,
- sz, pte, pte_val(*pte));
- return pte;
-}
-
-pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
-{
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd = NULL;
- pte_t *pte = NULL;
-
- pgd = pgd_offset(mm, addr);
- pr_debug("%s: addr:0x%lx pgd:%p\n", __func__, addr, pgd);
- if (!pgd_present(*pgd))
- return NULL;
- pud = pud_offset(pgd, addr);
- if (!pud_present(*pud))
- return NULL;
-
- if (pud_huge(*pud))
- return (pte_t *)pud;
- pmd = pmd_offset(pud, addr);
- if (!pmd_present(*pmd))
- return NULL;
-
- if (pte_cont(pmd_pte(*pmd))) {
- pmd = pmd_offset(
- pud, (addr & CONT_PMD_MASK));
- return (pte_t *)pmd;
- }
- if (pmd_huge(*pmd))
- return (pte_t *)pmd;
- pte = pte_offset_kernel(pmd, addr);
- if (pte_present(*pte) && pte_cont(*pte)) {
- pte = pte_offset_kernel(
- pmd, (addr & CONT_PTE_MASK));
- return pte;
- }
- return NULL;
-}
-
-pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
- struct page *page, int writable)
-{
- size_t pagesize = huge_page_size(hstate_vma(vma));
-
- if (pagesize == CONT_PTE_SIZE) {
- entry = pte_mkcont(entry);
- } else if (pagesize == CONT_PMD_SIZE) {
- entry = pmd_pte(pmd_mkcont(pte_pmd(entry)));
- } else if (pagesize != PUD_SIZE && pagesize != PMD_SIZE) {
- pr_warn("%s: unrecognized huge page size 0x%lx\n",
- __func__, pagesize);
- }
- return entry;
-}
-
-pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
- unsigned long addr, pte_t *ptep)
-{
- pte_t pte;
-
- if (pte_cont(*ptep)) {
- int ncontig, i;
- size_t pgsize;
- pte_t *cpte;
- bool is_dirty = false;
-
- cpte = huge_pte_offset(mm, addr);
- ncontig = find_num_contig(mm, addr, cpte, *cpte, &pgsize);
- /* save the 1st pte to return */
- pte = ptep_get_and_clear(mm, addr, cpte);
- for (i = 1; i < ncontig; ++i) {
- /*
- * If HW_AFDBM is enabled, then the HW could
- * turn on the dirty bit for any of the page
- * in the set, so check them all.
- */
- ++cpte;
- if (pte_dirty(ptep_get_and_clear(mm, addr, cpte)))
- is_dirty = true;
- }
- if (is_dirty)
- return pte_mkdirty(pte);
- else
- return pte;
- } else {
- return ptep_get_and_clear(mm, addr, ptep);
- }
-}
-
-int huge_ptep_set_access_flags(struct vm_area_struct *vma,
- unsigned long addr, pte_t *ptep,
- pte_t pte, int dirty)
-{
- pte_t *cpte;
-
- if (pte_cont(pte)) {
- int ncontig, i, changed = 0;
- size_t pgsize = 0;
- unsigned long pfn = pte_pfn(pte);
- /* Select all bits except the pfn */
- pgprot_t hugeprot =
- __pgprot(pte_val(pfn_pte(pfn, __pgprot(0))) ^
- pte_val(pte));
-
- cpte = huge_pte_offset(vma->vm_mm, addr);
- pfn = pte_pfn(*cpte);
- ncontig = find_num_contig(vma->vm_mm, addr, cpte,
- *cpte, &pgsize);
- for (i = 0; i < ncontig; ++i, ++cpte) {
- changed = ptep_set_access_flags(vma, addr, cpte,
- pfn_pte(pfn,
- hugeprot),
- dirty);
- pfn += pgsize >> PAGE_SHIFT;
- }
- return changed;
- } else {
- return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
- }
-}
-
-void huge_ptep_set_wrprotect(struct mm_struct *mm,
- unsigned long addr, pte_t *ptep)
-{
- if (pte_cont(*ptep)) {
- int ncontig, i;
- pte_t *cpte;
- size_t pgsize = 0;
-
- cpte = huge_pte_offset(mm, addr);
- ncontig = find_num_contig(mm, addr, cpte, *cpte, &pgsize);
- for (i = 0; i < ncontig; ++i, ++cpte)
- ptep_set_wrprotect(mm, addr, cpte);
- } else {
- ptep_set_wrprotect(mm, addr, ptep);
- }
-}
-
-void huge_ptep_clear_flush(struct vm_area_struct *vma,
- unsigned long addr, pte_t *ptep)
-{
- if (pte_cont(*ptep)) {
- int ncontig, i;
- pte_t *cpte;
- size_t pgsize = 0;
-
- cpte = huge_pte_offset(vma->vm_mm, addr);
- ncontig = find_num_contig(vma->vm_mm, addr, cpte,
- *cpte, &pgsize);
- for (i = 0; i < ncontig; ++i, ++cpte)
- ptep_clear_flush(vma, addr, cpte);
- } else {
- ptep_clear_flush(vma, addr, ptep);
- }
-}
-
static __init int setup_hugepagesz(char *opt)
{
unsigned long ps = memparse(opt, &opt);
-
if (ps == PMD_SIZE) {
hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
} else if (ps == PUD_SIZE) {
hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
} else {
- pr_err("hugepagesz: Unsupported page size %lu K\n", ps >> 10);
+ pr_err("hugepagesz: Unsupported page size %lu M\n", ps >> 20);
return 0;
}
return 1;
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 0d5b0d0578b3..52406a2a39ad 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -35,10 +35,7 @@
#include <linux/efi.h>
#include <linux/swiotlb.h>
-#include <asm/boot.h>
#include <asm/fixmap.h>
-#include <asm/kasan.h>
-#include <asm/kernel-pgtable.h>
#include <asm/memory.h>
#include <asm/sections.h>
#include <asm/setup.h>
@@ -48,13 +45,7 @@
#include "mm.h"
-/*
- * We need to be able to catch inadvertent references to memstart_addr
- * that occur (potentially in generic code) before arm64_memblock_init()
- * executes, which assigns it its actual value. So use a default value
- * that cannot be mistaken for a real physical address.
- */
-s64 memstart_addr __read_mostly = -1;
+phys_addr_t memstart_addr __read_mostly = 0;
phys_addr_t arm64_dma_phys_limit __read_mostly;
#ifdef CONFIG_BLK_DEV_INITRD
@@ -67,8 +58,8 @@ static int __init early_initrd(char *p)
if (*endp == ',') {
size = memparse(endp + 1, NULL);
- initrd_start = start;
- initrd_end = start + size;
+ initrd_start = (unsigned long)__va(start);
+ initrd_end = (unsigned long)__va(start + size);
}
return 0;
}
@@ -80,7 +71,7 @@ early_param("initrd", early_initrd);
* currently assumes that for memory starting above 4G, 32-bit devices will
* use a DMA offset.
*/
-static phys_addr_t __init max_zone_dma_phys(void)
+static phys_addr_t max_zone_dma_phys(void)
{
phys_addr_t offset = memblock_start_of_DRAM() & GENMASK_ULL(63, 32);
return min(offset + (1ULL << 32), memblock_end_of_DRAM());
@@ -137,11 +128,11 @@ EXPORT_SYMBOL(pfn_valid);
#endif
#ifndef CONFIG_SPARSEMEM
-static void __init arm64_memory_present(void)
+static void arm64_memory_present(void)
{
}
#else
-static void __init arm64_memory_present(void)
+static void arm64_memory_present(void)
{
struct memblock_region *reg;
@@ -170,57 +161,7 @@ early_param("mem", early_mem);
void __init arm64_memblock_init(void)
{
- const s64 linear_region_size = -(s64)PAGE_OFFSET;
-
- /*
- * Ensure that the linear region takes up exactly half of the kernel
- * virtual address space. This way, we can distinguish a linear address
- * from a kernel/module/vmalloc address by testing a single bit.
- */
- BUILD_BUG_ON(linear_region_size != BIT(VA_BITS - 1));
-
- /*
- * Select a suitable value for the base of physical memory.
- */
- memstart_addr = round_down(memblock_start_of_DRAM(),
- ARM64_MEMSTART_ALIGN);
-
- /*
- * Remove the memory that we will not be able to cover with the
- * linear mapping. Take care not to clip the kernel which may be
- * high in memory.
- */
- memblock_remove(max_t(u64, memstart_addr + linear_region_size, __pa(_end)),
- ULLONG_MAX);
- if (memblock_end_of_DRAM() > linear_region_size)
- memblock_remove(0, memblock_end_of_DRAM() - linear_region_size);
-
- /*
- * Apply the memory limit if it was set. Since the kernel may be loaded
- * high up in memory, add back the kernel region that must be accessible
- * via the linear mapping.
- */
- if (memory_limit != (phys_addr_t)ULLONG_MAX) {
- memblock_enforce_memory_limit(memory_limit);
- memblock_add(__pa(_text), (u64)(_end - _text));
- }
-
- if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
- extern u16 memstart_offset_seed;
- u64 range = linear_region_size -
- (memblock_end_of_DRAM() - memblock_start_of_DRAM());
-
- /*
- * If the size of the linear region exceeds, by a sufficient
- * margin, the size of the region that the available physical
- * memory spans, randomize the linear region as well.
- */
- if (memstart_offset_seed > 0 && range >= ARM64_MEMSTART_ALIGN) {
- range = range / ARM64_MEMSTART_ALIGN + 1;
- memstart_addr -= ARM64_MEMSTART_ALIGN *
- ((range * memstart_offset_seed) >> 16);
- }
- }
+ memblock_enforce_memory_limit(memory_limit);
/*
* Register the kernel text, kernel data, initrd, and initial
@@ -228,13 +169,8 @@ void __init arm64_memblock_init(void)
*/
memblock_reserve(__pa(_text), _end - _text);
#ifdef CONFIG_BLK_DEV_INITRD
- if (initrd_start) {
- memblock_reserve(initrd_start, initrd_end - initrd_start);
-
- /* the generic initrd code expects virtual addresses */
- initrd_start = __phys_to_virt(initrd_start);
- initrd_end = __phys_to_virt(initrd_end);
- }
+ if (initrd_start)
+ memblock_reserve(__virt_to_phys(initrd_start), initrd_end - initrd_start);
#endif
early_init_fdt_scan_reserved_mem();
@@ -368,36 +304,35 @@ void __init mem_init(void)
#ifdef CONFIG_KASAN
" kasan : 0x%16lx - 0x%16lx (%6ld GB)\n"
#endif
- " modules : 0x%16lx - 0x%16lx (%6ld MB)\n"
" vmalloc : 0x%16lx - 0x%16lx (%6ld GB)\n"
- " .init : 0x%p" " - 0x%p" " (%6ld KB)\n"
- " .text : 0x%p" " - 0x%p" " (%6ld KB)\n"
- " .data : 0x%p" " - 0x%p" " (%6ld KB)\n"
#ifdef CONFIG_SPARSEMEM_VMEMMAP
" vmemmap : 0x%16lx - 0x%16lx (%6ld GB maximum)\n"
" 0x%16lx - 0x%16lx (%6ld MB actual)\n"
#endif
" fixed : 0x%16lx - 0x%16lx (%6ld KB)\n"
" PCI I/O : 0x%16lx - 0x%16lx (%6ld MB)\n"
- " memory : 0x%16lx - 0x%16lx (%6ld MB)\n",
+ " modules : 0x%16lx - 0x%16lx (%6ld MB)\n"
+ " memory : 0x%16lx - 0x%16lx (%6ld MB)\n"
+ " .init : 0x%p" " - 0x%p" " (%6ld KB)\n"
+ " .text : 0x%p" " - 0x%p" " (%6ld KB)\n"
+ " .data : 0x%p" " - 0x%p" " (%6ld KB)\n",
#ifdef CONFIG_KASAN
MLG(KASAN_SHADOW_START, KASAN_SHADOW_END),
#endif
- MLM(MODULES_VADDR, MODULES_END),
MLG(VMALLOC_START, VMALLOC_END),
- MLK_ROUNDUP(__init_begin, __init_end),
- MLK_ROUNDUP(_text, _etext),
- MLK_ROUNDUP(_sdata, _edata),
#ifdef CONFIG_SPARSEMEM_VMEMMAP
MLG(VMEMMAP_START,
VMEMMAP_START + VMEMMAP_SIZE),
- MLM((unsigned long)phys_to_page(memblock_start_of_DRAM()),
+ MLM((unsigned long)virt_to_page(PAGE_OFFSET),
(unsigned long)virt_to_page(high_memory)),
#endif
MLK(FIXADDR_START, FIXADDR_TOP),
MLM(PCI_IO_START, PCI_IO_END),
- MLM(__phys_to_virt(memblock_start_of_DRAM()),
- (unsigned long)high_memory));
+ MLM(MODULES_VADDR, MODULES_END),
+ MLM(PAGE_OFFSET, (unsigned long)high_memory),
+ MLK_ROUNDUP(__init_begin, __init_end),
+ MLK_ROUNDUP(_text, _etext),
+ MLK_ROUNDUP(_sdata, _edata));
#undef MLK
#undef MLM
@@ -430,8 +365,9 @@ static inline void poison_init_mem(void *s, size_t count)
void free_initmem(void)
{
- free_initmem_default(0);
fixup_init();
+ free_initmem_default(0);
+ free_alternatives_memory();
}
#ifdef CONFIG_BLK_DEV_INITRD
@@ -465,27 +401,3 @@ void set_kernel_text_ro(void)
set_memory_ro(start, (end - start) >> PAGE_SHIFT);
}
#endif
-/*
- * Dump out memory limit information on panic.
- */
-static int dump_mem_limit(struct notifier_block *self, unsigned long v, void *p)
-{
- if (memory_limit != (phys_addr_t)ULLONG_MAX) {
- pr_emerg("Memory Limit: %llu MB\n", memory_limit >> 20);
- } else {
- pr_emerg("Memory Limit: none\n");
- }
- return 0;
-}
-
-static struct notifier_block mem_limit_notifier = {
- .notifier_call = dump_mem_limit,
-};
-
-static int __init register_mem_limit_dumper(void)
-{
- atomic_notifier_chain_register(&panic_notifier_list,
- &mem_limit_notifier);
- return 0;
-}
-__initcall(register_mem_limit_dumper);
diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c
index 757009daa9ed..cf038c7d9fa9 100644
--- a/arch/arm64/mm/kasan_init.c
+++ b/arch/arm64/mm/kasan_init.c
@@ -16,12 +16,9 @@
#include <linux/memblock.h>
#include <linux/start_kernel.h>
-#include <asm/mmu_context.h>
-#include <asm/kernel-pgtable.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
-#include <asm/sections.h>
#include <asm/tlbflush.h>
static pgd_t tmp_pg_dir[PTRS_PER_PGD] __initdata __aligned(PGD_SIZE);
@@ -35,7 +32,7 @@ static void __init kasan_early_pte_populate(pmd_t *pmd, unsigned long addr,
if (pmd_none(*pmd))
pmd_populate_kernel(&init_mm, pmd, kasan_zero_pte);
- pte = pte_offset_kimg(pmd, addr);
+ pte = pte_offset_kernel(pmd, addr);
do {
next = addr + PAGE_SIZE;
set_pte(pte, pfn_pte(virt_to_pfn(kasan_zero_page),
@@ -53,7 +50,7 @@ static void __init kasan_early_pmd_populate(pud_t *pud,
if (pud_none(*pud))
pud_populate(&init_mm, pud, kasan_zero_pmd);
- pmd = pmd_offset_kimg(pud, addr);
+ pmd = pmd_offset(pud, addr);
do {
next = pmd_addr_end(addr, end);
kasan_early_pte_populate(pmd, addr, next);
@@ -70,7 +67,7 @@ static void __init kasan_early_pud_populate(pgd_t *pgd,
if (pgd_none(*pgd))
pgd_populate(&init_mm, pgd, kasan_zero_pud);
- pud = pud_offset_kimg(pgd, addr);
+ pud = pud_offset(pgd, addr);
do {
next = pud_addr_end(addr, end);
kasan_early_pmd_populate(pud, addr, next);
@@ -99,21 +96,6 @@ asmlinkage void __init kasan_early_init(void)
kasan_map_early_shadow();
}
-/*
- * Copy the current shadow region into a new pgdir.
- */
-void __init kasan_copy_shadow(pgd_t *pgdir)
-{
- pgd_t *pgd, *pgd_new, *pgd_end;
-
- pgd = pgd_offset_k(KASAN_SHADOW_START);
- pgd_end = pgd_offset_k(KASAN_SHADOW_END);
- pgd_new = pgd_offset_raw(pgdir, KASAN_SHADOW_START);
- do {
- set_pgd(pgd_new, *pgd);
- } while (pgd++, pgd_new++, pgd != pgd_end);
-}
-
static void __init clear_pgds(unsigned long start,
unsigned long end)
{
@@ -126,18 +108,18 @@ static void __init clear_pgds(unsigned long start,
set_pgd(pgd_offset_k(start), __pgd(0));
}
+static void __init cpu_set_ttbr1(unsigned long ttbr1)
+{
+ asm(
+ " msr ttbr1_el1, %0\n"
+ " isb"
+ :
+ : "r" (ttbr1));
+}
+
void __init kasan_init(void)
{
- u64 kimg_shadow_start, kimg_shadow_end;
- u64 mod_shadow_start, mod_shadow_end;
struct memblock_region *reg;
- int i;
-
- kimg_shadow_start = (u64)kasan_mem_to_shadow(_text);
- kimg_shadow_end = (u64)kasan_mem_to_shadow(_end);
-
- mod_shadow_start = (u64)kasan_mem_to_shadow((void *)MODULES_VADDR);
- mod_shadow_end = (u64)kasan_mem_to_shadow((void *)MODULES_END);
/*
* We are going to perform proper setup of shadow memory.
@@ -147,33 +129,13 @@ void __init kasan_init(void)
* setup will be finished.
*/
memcpy(tmp_pg_dir, swapper_pg_dir, sizeof(tmp_pg_dir));
- dsb(ishst);
- cpu_replace_ttbr1(tmp_pg_dir);
+ cpu_set_ttbr1(__pa(tmp_pg_dir));
+ flush_tlb_all();
clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
- vmemmap_populate(kimg_shadow_start, kimg_shadow_end,
- pfn_to_nid(virt_to_pfn(_text)));
-
- /*
- * vmemmap_populate() has populated the shadow region that covers the
- * kernel image with SWAPPER_BLOCK_SIZE mappings, so we have to round
- * the start and end addresses to SWAPPER_BLOCK_SIZE as well, to prevent
- * kasan_populate_zero_shadow() from replacing the page table entries
- * (PMD or PTE) at the edges of the shadow region for the kernel
- * image.
- */
- kimg_shadow_start = round_down(kimg_shadow_start, SWAPPER_BLOCK_SIZE);
- kimg_shadow_end = round_up(kimg_shadow_end, SWAPPER_BLOCK_SIZE);
-
kasan_populate_zero_shadow((void *)KASAN_SHADOW_START,
- (void *)mod_shadow_start);
- kasan_populate_zero_shadow((void *)kimg_shadow_end,
- kasan_mem_to_shadow((void *)PAGE_OFFSET));
-
- if (kimg_shadow_start > mod_shadow_end)
- kasan_populate_zero_shadow((void *)mod_shadow_end,
- (void *)kimg_shadow_start);
+ kasan_mem_to_shadow((void *)MODULES_VADDR));
for_each_memblock(memory, reg) {
void *start = (void *)__phys_to_virt(reg->base);
@@ -193,16 +155,9 @@ void __init kasan_init(void)
pfn_to_nid(virt_to_pfn(start)));
}
- /*
- * KAsan may reuse the contents of kasan_zero_pte directly, so we
- * should make sure that it maps the zero page read-only.
- */
- for (i = 0; i < PTRS_PER_PTE; i++)
- set_pte(&kasan_zero_pte[i],
- pfn_pte(virt_to_pfn(kasan_zero_page), PAGE_KERNEL_RO));
-
memset(kasan_zero_page, 0, PAGE_SIZE);
- cpu_replace_ttbr1(swapper_pg_dir);
+ cpu_set_ttbr1(__pa(swapper_pg_dir));
+ flush_tlb_all();
/* At this point kasan is fully initialized. Enable error messages */
init_task.kasan_depth = 0;
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 62096a7e047a..e82aabb3c5e2 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -32,10 +32,8 @@
#include <linux/dma-contiguous.h>
#include <linux/cma.h>
-#include <asm/barrier.h>
#include <asm/cputype.h>
#include <asm/fixmap.h>
-#include <asm/kasan.h>
#include <asm/kernel-pgtable.h>
#include <asm/sections.h>
#include <asm/setup.h>
@@ -48,21 +46,14 @@
u64 idmap_t0sz = TCR_T0SZ(VA_BITS);
-u64 kimage_voffset __read_mostly;
-EXPORT_SYMBOL(kimage_voffset);
-
/*
* Empty_zero_page is a special page that is used for zero-initialized data
* and COW.
*/
-unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
+struct page *empty_zero_page;
EXPORT_SYMBOL(empty_zero_page);
-static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
-static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss __maybe_unused;
-static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss __maybe_unused;
-
-static bool dma_overlap(phys_addr_t start, phys_addr_t end);
+static bool __init dma_overlap(phys_addr_t start, phys_addr_t end);
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
unsigned long size, pgprot_t vma_prot)
@@ -75,30 +66,16 @@ pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
}
EXPORT_SYMBOL(phys_mem_access_prot);
-static phys_addr_t __init early_pgtable_alloc(void)
+static void __init *early_alloc(unsigned long sz)
{
phys_addr_t phys;
void *ptr;
- phys = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+ phys = memblock_alloc(sz, sz);
BUG_ON(!phys);
-
- /*
- * The FIX_{PGD,PUD,PMD} slots may be in active use, but the FIX_PTE
- * slot will be free, so we can (ab)use the FIX_PTE slot to initialise
- * any level of table.
- */
- ptr = pte_set_fixmap(phys);
-
- memset(ptr, 0, PAGE_SIZE);
-
- /*
- * Implicit barriers also ensure the zeroed page is visible to the page
- * table walker
- */
- pte_clear_fixmap();
-
- return phys;
+ ptr = __va(phys);
+ memset(ptr, 0, sz);
+ return ptr;
}
/*
@@ -122,30 +99,24 @@ static void split_pmd(pmd_t *pmd, pte_t *pte)
static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
unsigned long end, unsigned long pfn,
pgprot_t prot,
- phys_addr_t (*pgtable_alloc)(void))
+ void *(*alloc)(unsigned long size))
{
pte_t *pte;
if (pmd_none(*pmd) || pmd_sect(*pmd)) {
- phys_addr_t pte_phys;
- BUG_ON(!pgtable_alloc);
- pte_phys = pgtable_alloc();
- pte = pte_set_fixmap(pte_phys);
+ pte = alloc(PTRS_PER_PTE * sizeof(pte_t));
if (pmd_sect(*pmd))
split_pmd(pmd, pte);
- __pmd_populate(pmd, pte_phys, PMD_TYPE_TABLE);
+ __pmd_populate(pmd, __pa(pte), PMD_TYPE_TABLE);
flush_tlb_all();
- pte_clear_fixmap();
}
BUG_ON(pmd_bad(*pmd));
- pte = pte_set_fixmap_offset(pmd, addr);
+ pte = pte_offset_kernel(pmd, addr);
do {
set_pte(pte, pfn_pte(pfn, prot));
pfn++;
} while (pte++, addr += PAGE_SIZE, addr != end);
-
- pte_clear_fixmap();
}
static void split_pud(pud_t *old_pud, pmd_t *pmd)
@@ -160,29 +131,10 @@ static void split_pud(pud_t *old_pud, pmd_t *pmd)
} while (pmd++, i++, i < PTRS_PER_PMD);
}
-#ifdef CONFIG_DEBUG_PAGEALLOC
-static bool block_mappings_allowed(phys_addr_t (*pgtable_alloc)(void))
-{
-
- /*
- * If debug_page_alloc is enabled we must map the linear map
- * using pages. However, other mappings created by
- * create_mapping_noalloc must use sections in some cases. Allow
- * sections to be used in those cases, where no pgtable_alloc
- * function is provided.
- */
- return !pgtable_alloc || !debug_pagealloc_enabled();
-}
-#else
-static bool block_mappings_allowed(phys_addr_t (*pgtable_alloc)(void))
-{
- return true;
-}
-#endif
-
-static void alloc_init_pmd(pud_t *pud, unsigned long addr, unsigned long end,
+static void alloc_init_pmd(struct mm_struct *mm, pud_t *pud,
+ unsigned long addr, unsigned long end,
phys_addr_t phys, pgprot_t prot,
- phys_addr_t (*pgtable_alloc)(void))
+ void *(*alloc)(unsigned long size), bool pages)
{
pmd_t *pmd;
unsigned long next;
@@ -191,10 +143,7 @@ static void alloc_init_pmd(pud_t *pud, unsigned long addr, unsigned long end,
* Check for initial section mappings in the pgd/pud and remove them.
*/
if (pud_none(*pud) || pud_sect(*pud)) {
- phys_addr_t pmd_phys;
- BUG_ON(!pgtable_alloc);
- pmd_phys = pgtable_alloc();
- pmd = pmd_set_fixmap(pmd_phys);
+ pmd = alloc(PTRS_PER_PMD * sizeof(pmd_t));
if (pud_sect(*pud)) {
/*
* need to have the 1G of mappings continue to be
@@ -202,21 +151,19 @@ static void alloc_init_pmd(pud_t *pud, unsigned long addr, unsigned long end,
*/
split_pud(pud, pmd);
}
- __pud_populate(pud, pmd_phys, PUD_TYPE_TABLE);
+ pud_populate(mm, pud, pmd);
flush_tlb_all();
- pmd_clear_fixmap();
}
BUG_ON(pud_bad(*pud));
- pmd = pmd_set_fixmap_offset(pud, addr);
+ pmd = pmd_offset(pud, addr);
do {
next = pmd_addr_end(addr, end);
/* try section mapping first */
- if (((addr | next | phys) & ~SECTION_MASK) == 0 &&
- block_mappings_allowed(pgtable_alloc) &&
- !dma_overlap(phys, phys + next - addr)) {
+ if (!pages && ((addr | next | phys) & ~SECTION_MASK) == 0) {
pmd_t old_pmd =*pmd;
- pmd_set_huge(pmd, phys, prot);
+ set_pmd(pmd, __pmd(phys |
+ pgprot_val(mk_sect_prot(prot))));
/*
* Check for previous table entries created during
* boot (__create_page_tables) and flush them.
@@ -224,19 +171,17 @@ static void alloc_init_pmd(pud_t *pud, unsigned long addr, unsigned long end,
if (!pmd_none(old_pmd)) {
flush_tlb_all();
if (pmd_table(old_pmd)) {
- phys_addr_t table = pmd_page_paddr(old_pmd);
+ phys_addr_t table = __pa(pte_offset_map(&old_pmd, 0));
if (!WARN_ON_ONCE(slab_is_available()))
memblock_free(table, PAGE_SIZE);
}
}
} else {
alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys),
- prot, pgtable_alloc);
+ prot, alloc);
}
phys += next - addr;
} while (pmd++, addr = next, addr != end);
-
- pmd_clear_fixmap();
}
static inline bool use_1G_block(unsigned long addr, unsigned long next,
@@ -251,22 +196,21 @@ static inline bool use_1G_block(unsigned long addr, unsigned long next,
return true;
}
-static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
+static void alloc_init_pud(struct mm_struct *mm, pgd_t *pgd,
+ unsigned long addr, unsigned long end,
phys_addr_t phys, pgprot_t prot,
- phys_addr_t (*pgtable_alloc)(void))
+ void *(*alloc)(unsigned long size), bool force_pages)
{
pud_t *pud;
unsigned long next;
if (pgd_none(*pgd)) {
- phys_addr_t pud_phys;
- BUG_ON(!pgtable_alloc);
- pud_phys = pgtable_alloc();
- __pgd_populate(pgd, pud_phys, PUD_TYPE_TABLE);
+ pud = alloc(PTRS_PER_PUD * sizeof(pud_t));
+ pgd_populate(mm, pgd, pud);
}
BUG_ON(pgd_bad(*pgd));
- pud = pud_set_fixmap_offset(pgd, addr);
+ pud = pud_offset(pgd, addr);
do {
next = pud_addr_end(addr, end);
@@ -274,10 +218,12 @@ static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
* For 4K granule only, attempt to put down a 1GB block
*/
if (use_1G_block(addr, next, phys) &&
- block_mappings_allowed(pgtable_alloc) &&
- !dma_overlap(phys, phys + next - addr)) {
+ !force_pages &&
+ !dma_overlap(phys, phys + next - addr) &&
+ !IS_ENABLED(CONFIG_FORCE_PAGES)) {
pud_t old_pud = *pud;
- pud_set_huge(pud, phys, prot);
+ set_pud(pud, __pud(phys |
+ pgprot_val(mk_sect_prot(prot))));
/*
* If we have an old value for a pud, it will
@@ -289,274 +235,359 @@ static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
if (!pud_none(old_pud)) {
flush_tlb_all();
if (pud_table(old_pud)) {
- phys_addr_t table = pud_page_paddr(old_pud);
+ phys_addr_t table = __pa(pmd_offset(&old_pud, 0));
if (!WARN_ON_ONCE(slab_is_available()))
memblock_free(table, PAGE_SIZE);
}
}
} else {
- alloc_init_pmd(pud, addr, next, phys, prot,
- pgtable_alloc);
+ alloc_init_pmd(mm, pud, addr, next, phys, prot, alloc, force_pages);
}
phys += next - addr;
} while (pud++, addr = next, addr != end);
-
- pud_clear_fixmap();
}
/*
* Create the page directory entries and any necessary page tables for the
* mapping specified by 'md'.
*/
-static void init_pgd(pgd_t *pgd, phys_addr_t phys, unsigned long virt,
+static void __create_mapping(struct mm_struct *mm, pgd_t *pgd,
+ phys_addr_t phys, unsigned long virt,
phys_addr_t size, pgprot_t prot,
- phys_addr_t (*pgtable_alloc)(void))
+ void *(*alloc)(unsigned long size), bool force_pages)
{
unsigned long addr, length, end, next;
- /*
- * If the virtual and physical address don't have the same offset
- * within a page, we cannot map the region as the caller expects.
- */
- if (WARN_ON((phys ^ virt) & ~PAGE_MASK))
- return;
-
- phys &= PAGE_MASK;
addr = virt & PAGE_MASK;
length = PAGE_ALIGN(size + (virt & ~PAGE_MASK));
end = addr + length;
do {
next = pgd_addr_end(addr, end);
- alloc_init_pud(pgd, addr, next, phys, prot, pgtable_alloc);
+ alloc_init_pud(mm, pgd, addr, next, phys, prot, alloc, force_pages);
phys += next - addr;
} while (pgd++, addr = next, addr != end);
}
-static phys_addr_t late_pgtable_alloc(void)
+static void *late_alloc(unsigned long size)
{
- void *ptr = (void *)__get_free_page(PGALLOC_GFP);
- BUG_ON(!ptr);
-
- /* Ensure the zeroed page is visible to the page table walker */
- dsb(ishst);
- return __pa(ptr);
-}
+ void *ptr;
-static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
- unsigned long virt, phys_addr_t size,
- pgprot_t prot,
- phys_addr_t (*alloc)(void))
-{
- init_pgd(pgd_offset_raw(pgdir, virt), phys, virt, size, prot, alloc);
+ BUG_ON(size > PAGE_SIZE);
+ ptr = (void *)__get_free_page(PGALLOC_GFP);
+ BUG_ON(!ptr);
+ return ptr;
}
-/*
- * This function can only be used to modify existing table entries,
- * without allocating new levels of table. Note that this permits the
- * creation of new section or page entries.
- */
-static void __init create_mapping_noalloc(phys_addr_t phys, unsigned long virt,
- phys_addr_t size, pgprot_t prot)
+static void __init create_mapping(phys_addr_t phys, unsigned long virt,
+ phys_addr_t size, pgprot_t prot, bool force_pages)
{
if (virt < VMALLOC_START) {
pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
&phys, virt);
return;
}
- __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot,
- NULL);
+ __create_mapping(&init_mm, pgd_offset_k(virt & PAGE_MASK), phys, virt,
+ size, prot, early_alloc, force_pages);
}
void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
unsigned long virt, phys_addr_t size,
pgprot_t prot)
{
- __create_pgd_mapping(mm->pgd, phys, virt, size, prot,
- late_pgtable_alloc);
+ __create_mapping(mm, pgd_offset(mm, virt), phys, virt, size, prot,
+ late_alloc, false);
}
-static void create_mapping_late(phys_addr_t phys, unsigned long virt,
- phys_addr_t size, pgprot_t prot)
+static inline pmd_t *pmd_off_k(unsigned long virt)
{
- if (virt < VMALLOC_START) {
- pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
- &phys, virt);
- return;
- }
-
- __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot,
- late_pgtable_alloc);
+ return pmd_offset(pud_offset(pgd_offset_k(virt), virt), virt);
}
-static void __init __map_memblock(pgd_t *pgd, phys_addr_t start, phys_addr_t end)
+void __init remap_as_pages(unsigned long start, unsigned long size)
{
- unsigned long kernel_start = __pa(_stext);
- unsigned long kernel_end = __pa(_etext);
+ unsigned long addr;
+ unsigned long end = start + size;
/*
- * Take care not to create a writable alias for the
- * read-only text and rodata sections of the kernel image.
+ * Make start and end PMD_SIZE aligned, observing memory
+ * boundaries
*/
+ if (memblock_is_memory(start & PMD_MASK))
+ start = start & PMD_MASK;
+ if (memblock_is_memory(ALIGN(end, PMD_SIZE)))
+ end = ALIGN(end, PMD_SIZE);
- /* No overlap with the kernel text */
- if (end < kernel_start || start >= kernel_end) {
- __create_pgd_mapping(pgd, start, __phys_to_virt(start),
- end - start, PAGE_KERNEL,
- early_pgtable_alloc);
- return;
- }
+ size = end - start;
/*
- * This block overlaps the kernel text mapping.
- * Map the portion(s) which don't overlap.
+ * Clear previous low-memory mapping
*/
- if (start < kernel_start)
- __create_pgd_mapping(pgd, start,
- __phys_to_virt(start),
- kernel_start - start, PAGE_KERNEL,
- early_pgtable_alloc);
- if (kernel_end < end)
- __create_pgd_mapping(pgd, kernel_end,
- __phys_to_virt(kernel_end),
- end - kernel_end, PAGE_KERNEL,
- early_pgtable_alloc);
+ for (addr = __phys_to_virt(start); addr < __phys_to_virt(end);
+ addr += PMD_SIZE) {
+ pmd_t *pmd;
+ pmd = pmd_off_k(addr);
+ if (pmd_bad(*pmd) || pmd_sect(*pmd))
+ pmd_clear(pmd);
+ }
- /*
- * Map the linear alias of the [_stext, _etext) interval as
- * read-only/non-executable. This makes the contents of the
- * region accessible to subsystems such as hibernate, but
- * protects it from inadvertent modification or execution.
- */
- __create_pgd_mapping(pgd, kernel_start, __phys_to_virt(kernel_start),
- kernel_end - kernel_start, PAGE_KERNEL_RO,
- early_pgtable_alloc);
+ create_mapping(start, __phys_to_virt(start), size, PAGE_KERNEL, true);
}
-static void __init map_mem(pgd_t *pgd)
+struct dma_contig_early_reserve {
+ phys_addr_t base;
+ unsigned long size;
+};
+
+static struct dma_contig_early_reserve dma_mmu_remap[MAX_CMA_AREAS] __initdata;
+
+static int dma_mmu_remap_num __initdata;
+
+void __init dma_contiguous_early_fixup(phys_addr_t base, unsigned long size)
{
- struct memblock_region *reg;
+ dma_mmu_remap[dma_mmu_remap_num].base = base;
+ dma_mmu_remap[dma_mmu_remap_num].size = size;
+ dma_mmu_remap_num++;
+}
- /* map all the memory banks */
- for_each_memblock(memory, reg) {
- phys_addr_t start = reg->base;
- phys_addr_t end = start + reg->size;
+static bool __init dma_overlap(phys_addr_t start, phys_addr_t end)
+{
+ int i;
- if (start >= end)
- break;
+ for (i = 0; i < dma_mmu_remap_num; i++) {
+ phys_addr_t dma_base = dma_mmu_remap[i].base;
+ phys_addr_t dma_end = dma_mmu_remap[i].base +
+ dma_mmu_remap[i].size;
- __map_memblock(pgd, start, end);
+ if ((dma_base < end) && (dma_end > start))
+ return true;
}
+ return false;
}
-void mark_rodata_ro(void)
+static void __init dma_contiguous_remap(void)
{
- if (!IS_ENABLED(CONFIG_DEBUG_RODATA))
+ int i;
+ for (i = 0; i < dma_mmu_remap_num; i++)
+ remap_as_pages(dma_mmu_remap[i].base,
+ dma_mmu_remap[i].size);
+}
+
+static void create_mapping_late(phys_addr_t phys, unsigned long virt,
+ phys_addr_t size, pgprot_t prot)
+{
+ if (virt < VMALLOC_START) {
+ pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
+ &phys, virt);
return;
+ }
- create_mapping_late(__pa(_stext), (unsigned long)_stext,
- (unsigned long)_etext - (unsigned long)_stext,
- PAGE_KERNEL_ROX);
+ return __create_mapping(&init_mm, pgd_offset_k(virt & PAGE_MASK),
+ phys, virt, size, prot, late_alloc,
+ IS_ENABLED(CONFIG_FORCE_PAGES));
}
-void fixup_init(void)
+#ifdef CONFIG_DEBUG_RODATA
+static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
{
/*
- * Unmap the __init region but leave the VM area in place. This
- * prevents the region from being reused for kernel modules, which
- * is not supported by kallsyms.
+ * Set up the executable regions using the existing section mappings
+ * for now. This will get more fine grained later once all memory
+ * is mapped
*/
- unmap_kernel_range((u64)__init_begin, (u64)(__init_end - __init_begin));
+ unsigned long kernel_x_start = round_down(__pa(_stext), SWAPPER_BLOCK_SIZE);
+ unsigned long kernel_x_end = round_up(__pa(__init_end), SWAPPER_BLOCK_SIZE);
+
+ if (end < kernel_x_start) {
+ create_mapping(start, __phys_to_virt(start),
+ end - start, PAGE_KERNEL, false);
+ } else if (start >= kernel_x_end) {
+ create_mapping(start, __phys_to_virt(start),
+ end - start, PAGE_KERNEL, false);
+ } else {
+ if (start < kernel_x_start)
+ create_mapping(start, __phys_to_virt(start),
+ kernel_x_start - start,
+ PAGE_KERNEL, false);
+ create_mapping(kernel_x_start,
+ __phys_to_virt(kernel_x_start),
+ kernel_x_end - kernel_x_start,
+ PAGE_KERNEL_EXEC, false);
+ if (kernel_x_end < end)
+ create_mapping(kernel_x_end,
+ __phys_to_virt(kernel_x_end),
+ end - kernel_x_end,
+ PAGE_KERNEL, false);
+ }
}
+#else
+static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
+{
+ create_mapping(start, __phys_to_virt(start), end - start,
+ PAGE_KERNEL_EXEC, false);
+}
+#endif
-static void __init map_kernel_chunk(pgd_t *pgd, void *va_start, void *va_end,
- pgprot_t prot, struct vm_struct *vma)
+static void __init map_mem(void)
{
- phys_addr_t pa_start = __pa(va_start);
- unsigned long size = va_end - va_start;
+ struct memblock_region *reg;
+ phys_addr_t limit;
- BUG_ON(!PAGE_ALIGNED(pa_start));
- BUG_ON(!PAGE_ALIGNED(size));
+ /*
+ * Temporarily limit the memblock range. We need to do this as
+ * create_mapping requires puds, pmds and ptes to be allocated from
+ * memory addressable from the initial direct kernel mapping.
+ *
+ * The initial direct kernel mapping, located at swapper_pg_dir, gives
+ * us PUD_SIZE (with SECTION maps) or PMD_SIZE (without SECTION maps,
+ * memory starting from PHYS_OFFSET (which must be aligned to 2MB as
+ * per Documentation/arm64/booting.txt).
+ */
+ limit = PHYS_OFFSET + SWAPPER_INIT_MAP_SIZE;
+ memblock_set_current_limit(limit);
+
+ /* map all the memory banks */
+ for_each_memblock(memory, reg) {
+ phys_addr_t start = reg->base;
+ phys_addr_t end = start + reg->size;
- __create_pgd_mapping(pgd, pa_start, (unsigned long)va_start, size, prot,
- early_pgtable_alloc);
+ if (start >= end)
+ break;
- vma->addr = va_start;
- vma->phys_addr = pa_start;
- vma->size = size;
- vma->flags = VM_MAP;
- vma->caller = __builtin_return_address(0);
+ if (ARM64_SWAPPER_USES_SECTION_MAPS) {
+ /*
+ * For the first memory bank align the start address and
+ * current memblock limit to prevent create_mapping() from
+ * allocating pte page tables from unmapped memory. With
+ * the section maps, if the first block doesn't end on section
+ * size boundary, create_mapping() will try to allocate a pte
+ * page, which may be returned from an unmapped area.
+ * When section maps are not used, the pte page table for the
+ * current limit is already present in swapper_pg_dir.
+ */
+ if (start < limit)
+ start = ALIGN(start, SECTION_SIZE);
+ if (end < limit) {
+ limit = end & SECTION_MASK;
+ memblock_set_current_limit(limit);
+ }
+ }
+ __map_memblock(start, end);
+ }
- vm_area_add_early(vma);
+ /* Limit no longer required. */
+ memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
}
-
-/*
- * Create fine-grained mappings for the kernel.
- */
-static void __init map_kernel(pgd_t *pgd)
+#ifdef CONFIG_FORCE_PAGES
+static noinline void __init split_and_set_pmd(pmd_t *pmd, unsigned long addr,
+ unsigned long end, unsigned long pfn)
{
- static struct vm_struct vmlinux_text, vmlinux_init, vmlinux_data;
+ pte_t *pte, *start_pte;
- map_kernel_chunk(pgd, _stext, _etext, PAGE_KERNEL_EXEC, &vmlinux_text);
- map_kernel_chunk(pgd, __init_begin, __init_end, PAGE_KERNEL_EXEC,
- &vmlinux_init);
- map_kernel_chunk(pgd, _data, _end, PAGE_KERNEL, &vmlinux_data);
+ start_pte = early_alloc(PTRS_PER_PTE * sizeof(pte_t));
+ pte = start_pte;
- if (!pgd_val(*pgd_offset_raw(pgd, FIXADDR_START))) {
- /*
- * The fixmap falls in a separate pgd to the kernel, and doesn't
- * live in the carveout for the swapper_pg_dir. We can simply
- * re-use the existing dir for the fixmap.
- */
- set_pgd(pgd_offset_raw(pgd, FIXADDR_START),
- *pgd_offset_k(FIXADDR_START));
- } else if (CONFIG_PGTABLE_LEVELS > 3) {
- /*
- * The fixmap shares its top level pgd entry with the kernel
- * mapping. This can really only occur when we are running
- * with 16k/4 levels, so we can simply reuse the pud level
- * entry instead.
- */
- BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
- set_pud(pud_set_fixmap_offset(pgd, FIXADDR_START),
- __pud(__pa(bm_pmd) | PUD_TYPE_TABLE));
- pud_clear_fixmap();
- } else {
- BUG();
- }
+ do {
+ set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
+ pfn++;
+ } while (pte++, addr += PAGE_SIZE, addr != end);
- kasan_copy_shadow(pgd);
+ set_pmd(pmd, __pmd((__pa(start_pte)) | PMD_TYPE_TABLE));
}
-struct dma_contig_early_reserve {
- phys_addr_t base;
- unsigned long size;
-};
+static noinline void __init remap_pages(void)
+{
+ struct memblock_region *reg;
-static struct dma_contig_early_reserve dma_mmu_remap[MAX_CMA_AREAS];
+ for_each_memblock(memory, reg) {
+ phys_addr_t phys_pgd = reg->base;
+ phys_addr_t phys_end = reg->base + reg->size;
+ unsigned long addr_pgd = (unsigned long)__va(phys_pgd);
+ unsigned long end = (unsigned long)__va(phys_end);
+ pmd_t *pmd = NULL;
+ pud_t *pud = NULL;
+ pgd_t *pgd = NULL;
+ unsigned long next_pud, next_pmd, next_pgd;
+ unsigned long addr_pmd, addr_pud;
+ phys_addr_t phys_pud, phys_pmd;
+
+ if (phys_pgd >= phys_end)
+ break;
-static int dma_mmu_remap_num;
+ pgd = pgd_offset(&init_mm, addr_pgd);
+ do {
+ next_pgd = pgd_addr_end(addr_pgd, end);
+ pud = pud_offset(pgd, addr_pgd);
+ addr_pud = addr_pgd;
+ phys_pud = phys_pgd;
+ do {
+ next_pud = pud_addr_end(addr_pud, next_pgd);
+ pmd = pmd_offset(pud, addr_pud);
+ addr_pmd = addr_pud;
+ phys_pmd = phys_pud;
+ do {
+ next_pmd = pmd_addr_end(addr_pmd,
+ next_pud);
+ if (pmd_none(*pmd) || pmd_bad(*pmd))
+ split_and_set_pmd(pmd, addr_pmd,
+ next_pmd, __phys_to_pfn(phys_pmd));
+ pmd++;
+ phys_pmd += next_pmd - addr_pmd;
+ } while (addr_pmd = next_pmd,
+ addr_pmd < next_pud);
+ phys_pud += next_pud - addr_pud;
+ } while (pud++, addr_pud = next_pud,
+ addr_pud < next_pgd);
+ phys_pgd += next_pgd - addr_pgd;
+ } while (pgd++, addr_pgd = next_pgd, addr_pgd < end);
+ }
+}
-void __init dma_contiguous_early_fixup(phys_addr_t base, unsigned long size)
+#else
+static void __init remap_pages(void)
{
- dma_mmu_remap[dma_mmu_remap_num].base = base;
- dma_mmu_remap[dma_mmu_remap_num].size = size;
- dma_mmu_remap_num++;
+
}
+#endif
-static bool dma_overlap(phys_addr_t start, phys_addr_t end)
+static void __init fixup_executable(void)
{
- int i;
+#ifdef CONFIG_DEBUG_RODATA
+ /* now that we are actually fully mapped, make the start/end more fine grained */
+ if (!IS_ALIGNED((unsigned long)_stext, SWAPPER_BLOCK_SIZE)) {
+ unsigned long aligned_start = round_down(__pa(_stext),
+ SWAPPER_BLOCK_SIZE);
- for (i = 0; i < dma_mmu_remap_num; i++) {
- phys_addr_t dma_base = dma_mmu_remap[i].base;
- phys_addr_t dma_end = dma_mmu_remap[i].base +
- dma_mmu_remap[i].size;
+ create_mapping(aligned_start, __phys_to_virt(aligned_start),
+ __pa(_stext) - aligned_start,
+ PAGE_KERNEL, false);
+ }
- if ((dma_base < end) && (dma_end > start))
- return true;
+ if (!IS_ALIGNED((unsigned long)__init_end, SWAPPER_BLOCK_SIZE)) {
+ unsigned long aligned_end = round_up(__pa(__init_end),
+ SWAPPER_BLOCK_SIZE);
+ create_mapping(__pa(__init_end), (unsigned long)__init_end,
+ aligned_end - __pa(__init_end),
+ PAGE_KERNEL, false);
}
- return false;
+#endif
+}
+
+#ifdef CONFIG_DEBUG_RODATA
+void mark_rodata_ro(void)
+{
+ create_mapping_late(__pa(_stext), (unsigned long)_stext,
+ (unsigned long)_etext - (unsigned long)_stext,
+ PAGE_KERNEL_ROX);
+
+}
+#endif
+
+void fixup_init(void)
+{
+ create_mapping_late(__pa(__init_begin), (unsigned long)__init_begin,
+ (unsigned long)__init_end - (unsigned long)__init_begin,
+ PAGE_KERNEL);
}
/*
@@ -565,35 +596,39 @@ static bool dma_overlap(phys_addr_t start, phys_addr_t end)
*/
void __init paging_init(void)
{
- phys_addr_t pgd_phys = early_pgtable_alloc();
- pgd_t *pgd = pgd_set_fixmap(pgd_phys);
+ void *zero_page;
- map_kernel(pgd);
- map_mem(pgd);
+ map_mem();
+ fixup_executable();
+ dma_contiguous_remap();
+ remap_pages();
/*
- * We want to reuse the original swapper_pg_dir so we don't have to
- * communicate the new address to non-coherent secondaries in
- * secondary_entry, and so cpu_switch_mm can generate the address with
- * adrp+add rather than a load from some global variable.
- *
- * To do this we need to go via a temporary pgd.
+ * Finally flush the caches and tlb to ensure that we're in a
+ * consistent state.
*/
- cpu_replace_ttbr1(__va(pgd_phys));
- memcpy(swapper_pg_dir, pgd, PAGE_SIZE);
- cpu_replace_ttbr1(swapper_pg_dir);
+ flush_tlb_all();
+
+ /* allocate the zero page. */
+ zero_page = early_alloc(PAGE_SIZE);
- pgd_clear_fixmap();
- memblock_free(pgd_phys, PAGE_SIZE);
+ bootmem_init();
+
+ empty_zero_page = virt_to_page(zero_page);
+
+ /* Ensure the zero page is visible to the page table walker */
+ dsb(ishst);
/*
- * We only reuse the PGD from the swapper_pg_dir, not the pud + pmd
- * allocated with it.
+ * TTBR0 is only used for the identity mapping at this stage. Make it
+ * point to zero page to avoid speculatively fetching new entries.
*/
- memblock_free(__pa(swapper_pg_dir) + PAGE_SIZE,
- SWAPPER_DIR_SIZE - PAGE_SIZE);
-
- bootmem_init();
+ cpu_set_reserved_ttbr0();
+ local_flush_tlb_all();
+ cpu_set_default_tcr_t0sz();
+ flush_tlb_all();
+ set_kernel_text_ro();
+ flush_tlb_all();
}
/*
@@ -680,13 +715,21 @@ void vmemmap_free(unsigned long start, unsigned long end)
}
#endif /* CONFIG_SPARSEMEM_VMEMMAP */
+static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
+#if CONFIG_PGTABLE_LEVELS > 2
+static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss;
+#endif
+#if CONFIG_PGTABLE_LEVELS > 3
+static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss;
+#endif
+
static inline pud_t * fixmap_pud(unsigned long addr)
{
pgd_t *pgd = pgd_offset_k(addr);
BUG_ON(pgd_none(*pgd) || pgd_bad(*pgd));
- return pud_offset_kimg(pgd, addr);
+ return pud_offset(pgd, addr);
}
static inline pmd_t * fixmap_pmd(unsigned long addr)
@@ -695,12 +738,16 @@ static inline pmd_t * fixmap_pmd(unsigned long addr)
BUG_ON(pud_none(*pud) || pud_bad(*pud));
- return pmd_offset_kimg(pud, addr);
+ return pmd_offset(pud, addr);
}
static inline pte_t * fixmap_pte(unsigned long addr)
{
- return &bm_pte[pte_index(addr)];
+ pmd_t *pmd = fixmap_pmd(addr);
+
+ BUG_ON(pmd_none(*pmd) || pmd_bad(*pmd));
+
+ return pte_offset_kernel(pmd, addr);
}
void __init early_fixmap_init(void)
@@ -711,26 +758,15 @@ void __init early_fixmap_init(void)
unsigned long addr = FIXADDR_START;
pgd = pgd_offset_k(addr);
- if (CONFIG_PGTABLE_LEVELS > 3 &&
- !(pgd_none(*pgd) || pgd_page_paddr(*pgd) == __pa(bm_pud))) {
- /*
- * We only end up here if the kernel mapping and the fixmap
- * share the top level pgd entry, which should only happen on
- * 16k/4 levels configurations.
- */
- BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
- pud = pud_offset_kimg(pgd, addr);
- } else {
- pgd_populate(&init_mm, pgd, bm_pud);
- pud = fixmap_pud(addr);
- }
+ pgd_populate(&init_mm, pgd, bm_pud);
+ pud = pud_offset(pgd, addr);
pud_populate(&init_mm, pud, bm_pmd);
- pmd = fixmap_pmd(addr);
+ pmd = pmd_offset(pud, addr);
pmd_populate_kernel(&init_mm, pmd, bm_pte);
/*
* The boot-ioremap range spans multiple pmds, for which
- * we are not prepared:
+ * we are not preparted:
*/
BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
!= (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
@@ -769,10 +805,11 @@ void __set_fixmap(enum fixed_addresses idx,
}
}
-void *__init __fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot)
+void *__init fixmap_remap_fdt(phys_addr_t dt_phys)
{
const u64 dt_virt_base = __fix_to_virt(FIX_FDT);
- int offset;
+ pgprot_t prot = PAGE_KERNEL_RO;
+ int size, offset;
void *dt_virt;
/*
@@ -789,7 +826,7 @@ void *__init __fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot)
/*
* Make sure that the FDT region can be mapped without the need to
* allocate additional translation table pages, so that it is safe
- * to call create_mapping_noalloc() this early.
+ * to call create_mapping() this early.
*
* On 64k pages, the FDT will be mapped using PTEs, so we need to
* be in the same PMD as the rest of the fixmap.
@@ -805,73 +842,21 @@ void *__init __fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot)
dt_virt = (void *)dt_virt_base + offset;
/* map the first chunk so we can read the size from the header */
- create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE),
- dt_virt_base, SWAPPER_BLOCK_SIZE, prot);
+ create_mapping(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base,
+ SWAPPER_BLOCK_SIZE, prot, false);
if (fdt_check_header(dt_virt) != 0)
return NULL;
- *size = fdt_totalsize(dt_virt);
- if (*size > MAX_FDT_SIZE)
+ size = fdt_totalsize(dt_virt);
+ if (size > MAX_FDT_SIZE)
return NULL;
- if (offset + *size > SWAPPER_BLOCK_SIZE)
- create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base,
- round_up(offset + *size, SWAPPER_BLOCK_SIZE), prot);
-
- return dt_virt;
-}
-
-void *__init fixmap_remap_fdt(phys_addr_t dt_phys)
-{
- void *dt_virt;
- int size;
-
- dt_virt = __fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL_RO);
- if (!dt_virt)
- return NULL;
+ if (offset + size > SWAPPER_BLOCK_SIZE)
+ create_mapping(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base,
+ round_up(offset + size, SWAPPER_BLOCK_SIZE), prot, false);
memblock_reserve(dt_phys, size);
- return dt_virt;
-}
-
-int __init arch_ioremap_pud_supported(void)
-{
- /* only 4k granule supports level 1 block mappings */
- return IS_ENABLED(CONFIG_ARM64_4K_PAGES);
-}
-
-int __init arch_ioremap_pmd_supported(void)
-{
- return 1;
-}
-
-int pud_set_huge(pud_t *pud, phys_addr_t phys, pgprot_t prot)
-{
- BUG_ON(phys & ~PUD_MASK);
- set_pud(pud, __pud(phys | PUD_TYPE_SECT | pgprot_val(mk_sect_prot(prot))));
- return 1;
-}
-
-int pmd_set_huge(pmd_t *pmd, phys_addr_t phys, pgprot_t prot)
-{
- BUG_ON(phys & ~PMD_MASK);
- set_pmd(pmd, __pmd(phys | PMD_TYPE_SECT | pgprot_val(mk_sect_prot(prot))));
- return 1;
-}
-int pud_clear_huge(pud_t *pud)
-{
- if (!pud_sect(*pud))
- return 0;
- pud_clear(pud);
- return 1;
-}
-
-int pmd_clear_huge(pmd_t *pmd)
-{
- if (!pmd_sect(*pmd))
- return 0;
- pmd_clear(pmd);
- return 1;
+ return dt_virt;
}
diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
index 4754762bde49..0f35e8ba0f33 100644
--- a/arch/arm64/mm/pageattr.c
+++ b/arch/arm64/mm/pageattr.c
@@ -14,7 +14,6 @@
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/sched.h>
-#include <linux/vmalloc.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
@@ -37,32 +36,14 @@ static int change_page_range(pte_t *ptep, pgtable_t token, unsigned long addr,
return 0;
}
-/*
- * This function assumes that the range is mapped with PAGE_SIZE pages.
- */
-static int __change_memory_common(unsigned long start, unsigned long size,
- pgprot_t set_mask, pgprot_t clear_mask)
-{
- struct page_change_data data;
- int ret;
-
- data.set_mask = set_mask;
- data.clear_mask = clear_mask;
-
- ret = apply_to_page_range(&init_mm, start, size, change_page_range,
- &data);
-
- flush_tlb_kernel_range(start, start + size);
- return ret;
-}
-
static int change_memory_common(unsigned long addr, int numpages,
pgprot_t set_mask, pgprot_t clear_mask)
{
unsigned long start = addr;
unsigned long size = PAGE_SIZE*numpages;
unsigned long end = start + size;
- struct vm_struct *area;
+ int ret;
+ struct page_change_data data;
if (!PAGE_ALIGNED(addr)) {
start &= PAGE_MASK;
@@ -77,29 +58,18 @@ static int change_memory_common(unsigned long addr, int numpages,
if (end < MODULES_VADDR || end >= MODULES_END)
return -EINVAL;
}
- /*
- * Kernel VA mappings are always live, and splitting live section
- * mappings into page mappings may cause TLB conflicts. This means
- * we have to ensure that changing the permission bits of the range
- * we are operating on does not result in such splitting.
- *
- * Let's restrict ourselves to mappings created by vmalloc (or vmap).
- * Those are guaranteed to consist entirely of page mappings, and
- * splitting is never needed.
- *
- * So check whether the [addr, addr + size) interval is entirely
- * covered by precisely one VM area that has the VM_ALLOC flag set.
- */
- area = find_vm_area((void *)addr);
- if (!area ||
- end > (unsigned long)area->addr + area->size ||
- !(area->flags & VM_ALLOC))
- return -EINVAL;
if (!numpages)
return 0;
- return __change_memory_common(start, size, set_mask, clear_mask);
+ data.set_mask = set_mask;
+ data.clear_mask = clear_mask;
+
+ ret = apply_to_page_range(&init_mm, start, size, change_page_range,
+ &data);
+
+ flush_tlb_kernel_range(start, end);
+ return ret;
}
int set_memory_ro(unsigned long addr, int numpages)
@@ -131,19 +101,3 @@ int set_memory_x(unsigned long addr, int numpages)
__pgprot(PTE_PXN));
}
EXPORT_SYMBOL_GPL(set_memory_x);
-
-#ifdef CONFIG_DEBUG_PAGEALLOC
-void __kernel_map_pages(struct page *page, int numpages, int enable)
-{
- unsigned long addr = (unsigned long) page_address(page);
-
- if (enable)
- __change_memory_common(addr, PAGE_SIZE * numpages,
- __pgprot(PTE_VALID),
- __pgprot(0));
- else
- __change_memory_common(addr, PAGE_SIZE * numpages,
- __pgprot(0),
- __pgprot(PTE_VALID));
-}
-#endif
diff --git a/arch/arm64/mm/pgd.c b/arch/arm64/mm/pgd.c
index ae11d4e03d0e..cb3ba1b812e7 100644
--- a/arch/arm64/mm/pgd.c
+++ b/arch/arm64/mm/pgd.c
@@ -46,14 +46,14 @@ void pgd_free(struct mm_struct *mm, pgd_t *pgd)
kmem_cache_free(pgd_cache, pgd);
}
-void __init pgd_cache_init(void)
+static int __init pgd_cache_init(void)
{
- if (PGD_SIZE == PAGE_SIZE)
- return;
-
/*
* Naturally aligned pgds required by the architecture.
*/
- pgd_cache = kmem_cache_create("pgd_cache", PGD_SIZE, PGD_SIZE,
- SLAB_PANIC, NULL);
+ if (PGD_SIZE != PAGE_SIZE)
+ pgd_cache = kmem_cache_create("pgd_cache", PGD_SIZE, PGD_SIZE,
+ SLAB_PANIC, NULL);
+ return 0;
}
+core_initcall(pgd_cache_init);
diff --git a/arch/arm64/mm/proc-macros.S b/arch/arm64/mm/proc-macros.S
index 984edcda1850..d69dffffaa89 100644
--- a/arch/arm64/mm/proc-macros.S
+++ b/arch/arm64/mm/proc-macros.S
@@ -74,25 +74,3 @@
msr pmuserenr_el0, xzr // Disable PMU access from EL0
9000:
.endm
-
-/*
- * Macro to perform a data cache maintenance for the interval
- * [kaddr, kaddr + size)
- *
- * op: operation passed to dc instruction
- * domain: domain used in dsb instruciton
- * kaddr: starting virtual address of the region
- * size: size of the region
- * Corrupts: kaddr, size, tmp1, tmp2
- */
- .macro dcache_by_line_op op, domain, kaddr, size, tmp1, tmp2
- dcache_line_size \tmp1, \tmp2
- add \size, \kaddr, \size
- sub \tmp2, \tmp1, #1
- bic \kaddr, \kaddr, \tmp2
-9998: dc \op, \kaddr
- add \kaddr, \kaddr, \tmp1
- cmp \kaddr, \size
- b.lo 9998b
- dsb \domain
- .endm
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index dc22de0ce413..cd8853e11f2a 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -186,33 +186,7 @@ ENTRY(cpu_do_switch_mm)
ret
ENDPROC(cpu_do_switch_mm)
- .pushsection ".idmap.text", "ax"
-/*
- * void idmap_cpu_replace_ttbr1(phys_addr_t new_pgd)
- *
- * This is the low-level counterpart to cpu_replace_ttbr1, and should not be
- * called by anything else. It can only be executed from a TTBR0 mapping.
- */
-ENTRY(idmap_cpu_replace_ttbr1)
- mrs x2, daif
- msr daifset, #0xf
-
- adrp x1, empty_zero_page
- msr ttbr1_el1, x1
- isb
-
- tlbi vmalle1
- dsb nsh
- isb
-
- msr ttbr1_el1, x0
- isb
-
- msr daif, x2
-
- ret
-ENDPROC(idmap_cpu_replace_ttbr1)
- .popsection
+ .section ".text.init", #alloc, #execinstr
/*
* __cpu_setup
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index d2256fa97ea0..729f89163bc3 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -11,7 +11,6 @@ config PARISC
select RTC_DRV_GENERIC
select INIT_ALL_POSSIBLE
select BUG
- select BUILDTIME_EXTABLE_SORT
select HAVE_PERF_EVENTS
select GENERIC_ATOMIC64 if !64BIT
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
diff --git a/arch/parisc/include/asm/assembly.h b/arch/parisc/include/asm/assembly.h
index 60e6f07b7e32..b3069fd83468 100644
--- a/arch/parisc/include/asm/assembly.h
+++ b/arch/parisc/include/asm/assembly.h
@@ -523,7 +523,7 @@
*/
#define ASM_EXCEPTIONTABLE_ENTRY(fault_addr, except_addr) \
.section __ex_table,"aw" ! \
- .word (fault_addr - .), (except_addr - .) ! \
+ ASM_ULONG_INSN fault_addr, except_addr ! \
.previous
diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
index 6f893d29f1b2..1960b87c1c8b 100644
--- a/arch/parisc/include/asm/uaccess.h
+++ b/arch/parisc/include/asm/uaccess.h
@@ -60,15 +60,14 @@ static inline long access_ok(int type, const void __user * addr,
* use a 32bit (unsigned int) address here.
*/
-#define ARCH_HAS_RELATIVE_EXTABLE
struct exception_table_entry {
- int insn; /* relative address of insn that is allowed to fault. */
- int fixup; /* relative address of fixup routine */
+ unsigned long insn; /* address of insn that is allowed to fault. */
+ unsigned long fixup; /* fixup routine */
};
#define ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr )\
".section __ex_table,\"aw\"\n" \
- ".word (" #fault_addr " - .), (" #except_addr " - .)\n\t" \
+ ASM_WORD_INSN #fault_addr ", " #except_addr "\n\t" \
".previous\n"
/*
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
index 16dbe81c97c9..f9064449908a 100644
--- a/arch/parisc/mm/fault.c
+++ b/arch/parisc/mm/fault.c
@@ -140,6 +140,12 @@ int fixup_exception(struct pt_regs *regs)
{
const struct exception_table_entry *fix;
+ /* If we only stored 32bit addresses in the exception table we can drop
+ * out if we faulted on a 64bit address. */
+ if ((sizeof(regs->iaoq[0]) > sizeof(fix->insn))
+ && (regs->iaoq[0] >> 32))
+ return 0;
+
fix = search_exception_tables(regs->iaoq[0]);
if (fix) {
struct exception_data *d;
@@ -149,8 +155,7 @@ int fixup_exception(struct pt_regs *regs)
d->fault_space = regs->isr;
d->fault_addr = regs->ior;
- regs->iaoq[0] = (unsigned long)&fix->fixup + fix->fixup;
- regs->iaoq[0] &= ~3;
+ regs->iaoq[0] = ((fix->fixup) & ~3);
/*
* NOTE: In some cases the faulting instruction
* may be in the delay slot of a branch. We
diff --git a/arch/powerpc/include/asm/word-at-a-time.h b/arch/powerpc/include/asm/word-at-a-time.h
index 4afe66aa1400..e4396a7d0f7c 100644
--- a/arch/powerpc/include/asm/word-at-a-time.h
+++ b/arch/powerpc/include/asm/word-at-a-time.h
@@ -82,7 +82,7 @@ static inline unsigned long create_zero_mask(unsigned long bits)
"andc %1,%1,%2\n\t"
"popcntd %0,%1"
: "=r" (leading_zero_bits), "=&r" (trailing_zero_bit_mask)
- : "b" (bits));
+ : "r" (bits));
return leading_zero_bits;
}
diff --git a/arch/powerpc/include/uapi/asm/cputable.h b/arch/powerpc/include/uapi/asm/cputable.h
index 2734c005da21..43686043e297 100644
--- a/arch/powerpc/include/uapi/asm/cputable.h
+++ b/arch/powerpc/include/uapi/asm/cputable.h
@@ -31,7 +31,6 @@
#define PPC_FEATURE_PSERIES_PERFMON_COMPAT \
0x00000040
-/* Reserved - do not use 0x00000004 */
#define PPC_FEATURE_TRUE_LE 0x00000002
#define PPC_FEATURE_PPC_LE 0x00000001
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 646bf4d222c1..ef2ad2d682da 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -569,6 +569,24 @@ static void tm_reclaim_thread(struct thread_struct *thr,
if (!MSR_TM_SUSPENDED(mfmsr()))
return;
+ /*
+ * Use the current MSR TM suspended bit to track if we have
+ * checkpointed state outstanding.
+ * On signal delivery, we'd normally reclaim the checkpointed
+ * state to obtain stack pointer (see:get_tm_stackpointer()).
+ * This will then directly return to userspace without going
+ * through __switch_to(). However, if the stack frame is bad,
+ * we need to exit this thread which calls __switch_to() which
+ * will again attempt to reclaim the already saved tm state.
+ * Hence we need to check that we've not already reclaimed
+ * this state.
+ * We do this using the current MSR, rather tracking it in
+ * some specific thread_struct bit, as it has the additional
+ * benifit of checking for a potential TM bad thing exception.
+ */
+ if (!MSR_TM_SUSPENDED(mfmsr()))
+ return;
+
tm_reclaim(thr, thr->regs->msr, cause);
/* Having done the reclaim, we now have the checkpointed
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index a15fe1d4e84a..7030b035905d 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -148,25 +148,23 @@ static struct ibm_pa_feature {
unsigned long cpu_features; /* CPU_FTR_xxx bit */
unsigned long mmu_features; /* MMU_FTR_xxx bit */
unsigned int cpu_user_ftrs; /* PPC_FEATURE_xxx bit */
- unsigned int cpu_user_ftrs2; /* PPC_FEATURE2_xxx bit */
unsigned char pabyte; /* byte number in ibm,pa-features */
unsigned char pabit; /* bit number (big-endian) */
unsigned char invert; /* if 1, pa bit set => clear feature */
} ibm_pa_features[] __initdata = {
- {0, 0, PPC_FEATURE_HAS_MMU, 0, 0, 0, 0},
- {0, 0, PPC_FEATURE_HAS_FPU, 0, 0, 1, 0},
- {CPU_FTR_CTRL, 0, 0, 0, 0, 3, 0},
- {CPU_FTR_NOEXECUTE, 0, 0, 0, 0, 6, 0},
- {CPU_FTR_NODSISRALIGN, 0, 0, 0, 1, 1, 1},
- {0, MMU_FTR_CI_LARGE_PAGE, 0, 0, 1, 2, 0},
- {CPU_FTR_REAL_LE, 0, PPC_FEATURE_TRUE_LE, 0, 5, 0, 0},
+ {0, 0, PPC_FEATURE_HAS_MMU, 0, 0, 0},
+ {0, 0, PPC_FEATURE_HAS_FPU, 0, 1, 0},
+ {CPU_FTR_CTRL, 0, 0, 0, 3, 0},
+ {CPU_FTR_NOEXECUTE, 0, 0, 0, 6, 0},
+ {CPU_FTR_NODSISRALIGN, 0, 0, 1, 1, 1},
+ {0, MMU_FTR_CI_LARGE_PAGE, 0, 1, 2, 0},
+ {CPU_FTR_REAL_LE, PPC_FEATURE_TRUE_LE, 5, 0, 0},
/*
- * If the kernel doesn't support TM (ie CONFIG_PPC_TRANSACTIONAL_MEM=n),
- * we don't want to turn on TM here, so we use the *_COMP versions
- * which are 0 if the kernel doesn't support TM.
+ * If the kernel doesn't support TM (ie. CONFIG_PPC_TRANSACTIONAL_MEM=n),
+ * we don't want to turn on CPU_FTR_TM here, so we use CPU_FTR_TM_COMP
+ * which is 0 if the kernel doesn't support TM.
*/
- {CPU_FTR_TM_COMP, 0, 0,
- PPC_FEATURE2_HTM_COMP|PPC_FEATURE2_HTM_NOSC_COMP, 22, 0, 0},
+ {CPU_FTR_TM_COMP, 0, 0, 22, 0, 0},
};
static void __init scan_features(unsigned long node, const unsigned char *ftrs,
@@ -197,12 +195,10 @@ static void __init scan_features(unsigned long node, const unsigned char *ftrs,
if (bit ^ fp->invert) {
cur_cpu_spec->cpu_features |= fp->cpu_features;
cur_cpu_spec->cpu_user_features |= fp->cpu_user_ftrs;
- cur_cpu_spec->cpu_user_features2 |= fp->cpu_user_ftrs2;
cur_cpu_spec->mmu_features |= fp->mmu_features;
} else {
cur_cpu_spec->cpu_features &= ~fp->cpu_features;
cur_cpu_spec->cpu_user_features &= ~fp->cpu_user_ftrs;
- cur_cpu_spec->cpu_user_features2 &= ~fp->cpu_user_ftrs2;
cur_cpu_spec->mmu_features &= ~fp->mmu_features;
}
}
diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h
index 081b2ad99d73..d29ad9545b41 100644
--- a/arch/s390/include/asm/mmu.h
+++ b/arch/s390/include/asm/mmu.h
@@ -11,7 +11,7 @@ typedef struct {
spinlock_t list_lock;
struct list_head pgtable_list;
struct list_head gmap_list;
- unsigned long asce;
+ unsigned long asce_bits;
unsigned long asce_limit;
unsigned long vdso_base;
/* The mmu context allocates 4K page tables. */
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index 22877c9440ea..e485817f7b1a 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -26,28 +26,12 @@ static inline int init_new_context(struct task_struct *tsk,
mm->context.has_pgste = 0;
mm->context.use_skey = 0;
#endif
- switch (mm->context.asce_limit) {
- case 1UL << 42:
- /*
- * forked 3-level task, fall through to set new asce with new
- * mm->pgd
- */
- case 0:
+ if (mm->context.asce_limit == 0) {
/* context created by exec, set asce limit to 4TB */
+ mm->context.asce_bits = _ASCE_TABLE_LENGTH |
+ _ASCE_USER_BITS | _ASCE_TYPE_REGION3;
mm->context.asce_limit = STACK_TOP_MAX;
- mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
- _ASCE_USER_BITS | _ASCE_TYPE_REGION3;
- break;
- case 1UL << 53:
- /* forked 4-level task, set new asce with new mm->pgd */
- mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
- _ASCE_USER_BITS | _ASCE_TYPE_REGION2;
- break;
- case 1UL << 31:
- /* forked 2-level compat task, set new asce with new mm->pgd */
- mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
- _ASCE_USER_BITS | _ASCE_TYPE_SEGMENT;
- /* pgd_alloc() did not increase mm->nr_pmds */
+ } else if (mm->context.asce_limit == (1UL << 31)) {
mm_inc_nr_pmds(mm);
}
crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm));
@@ -58,7 +42,7 @@ static inline int init_new_context(struct task_struct *tsk,
static inline void set_user_asce(struct mm_struct *mm)
{
- S390_lowcore.user_asce = mm->context.asce;
+ S390_lowcore.user_asce = mm->context.asce_bits | __pa(mm->pgd);
if (current->thread.mm_segment.ar4)
__ctl_load(S390_lowcore.user_asce, 7, 7);
set_cpu_flag(CIF_ASCE);
@@ -87,7 +71,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
{
int cpu = smp_processor_id();
- S390_lowcore.user_asce = next->context.asce;
+ S390_lowcore.user_asce = next->context.asce_bits | __pa(next->pgd);
if (prev == next)
return;
if (MACHINE_HAS_TLB_LC)
diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h
index 6dafabb6ae1a..2b2ced9dc00a 100644
--- a/arch/s390/include/asm/pci.h
+++ b/arch/s390/include/asm/pci.h
@@ -45,8 +45,7 @@ struct zpci_fmb {
u64 rpcit_ops;
u64 dma_rbytes;
u64 dma_wbytes;
- u64 pad[2];
-} __packed __aligned(128);
+} __packed __aligned(64);
enum zpci_state {
ZPCI_FN_STATE_RESERVED,
diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h
index 5991cdcb5b40..d7cc79fb6191 100644
--- a/arch/s390/include/asm/pgalloc.h
+++ b/arch/s390/include/asm/pgalloc.h
@@ -56,8 +56,8 @@ static inline unsigned long pgd_entry_type(struct mm_struct *mm)
return _REGION2_ENTRY_EMPTY;
}
-int crst_table_upgrade(struct mm_struct *);
-void crst_table_downgrade(struct mm_struct *);
+int crst_table_upgrade(struct mm_struct *, unsigned long limit);
+void crst_table_downgrade(struct mm_struct *, unsigned long limit);
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
{
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index c1ea67db8404..b16c3d0a1b9f 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -163,7 +163,7 @@ extern __vector128 init_task_fpu_regs[__NUM_VXRS];
regs->psw.mask = PSW_USER_BITS | PSW_MASK_BA; \
regs->psw.addr = new_psw | PSW_ADDR_AMODE; \
regs->gprs[15] = new_stackp; \
- crst_table_downgrade(current->mm); \
+ crst_table_downgrade(current->mm, 1UL << 31); \
execve_tail(); \
} while (0)
diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h
index a2e6ef32e054..ca148f7c3eaa 100644
--- a/arch/s390/include/asm/tlbflush.h
+++ b/arch/s390/include/asm/tlbflush.h
@@ -110,7 +110,8 @@ static inline void __tlb_flush_asce(struct mm_struct *mm, unsigned long asce)
static inline void __tlb_flush_kernel(void)
{
if (MACHINE_HAS_IDTE)
- __tlb_flush_idte(init_mm.context.asce);
+ __tlb_flush_idte((unsigned long) init_mm.pgd |
+ init_mm.context.asce_bits);
else
__tlb_flush_global();
}
@@ -132,7 +133,8 @@ static inline void __tlb_flush_asce(struct mm_struct *mm, unsigned long asce)
static inline void __tlb_flush_kernel(void)
{
if (MACHINE_HAS_TLB_LC)
- __tlb_flush_idte_local(init_mm.context.asce);
+ __tlb_flush_idte_local((unsigned long) init_mm.pgd |
+ init_mm.context.asce_bits);
else
__tlb_flush_local();
}
@@ -146,7 +148,8 @@ static inline void __tlb_flush_mm(struct mm_struct * mm)
* only ran on the local cpu.
*/
if (MACHINE_HAS_IDTE && list_empty(&mm->context.gmap_list))
- __tlb_flush_asce(mm, mm->context.asce);
+ __tlb_flush_asce(mm, (unsigned long) mm->pgd |
+ mm->context.asce_bits);
else
__tlb_flush_full(mm);
}
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index feff9caf89b5..c722400c7697 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -89,8 +89,7 @@ void __init paging_init(void)
asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
pgd_type = _REGION3_ENTRY_EMPTY;
}
- init_mm.context.asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits;
- S390_lowcore.kernel_asce = init_mm.context.asce;
+ S390_lowcore.kernel_asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits;
clear_table((unsigned long *) init_mm.pgd, pgd_type,
sizeof(unsigned long)*2048);
vmem_map_init();
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
index f2b6b1d9c804..ea01477b4aa6 100644
--- a/arch/s390/mm/mmap.c
+++ b/arch/s390/mm/mmap.c
@@ -174,7 +174,7 @@ int s390_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
if (!(flags & MAP_FIXED))
addr = 0;
if ((addr + len) >= TASK_SIZE)
- return crst_table_upgrade(current->mm);
+ return crst_table_upgrade(current->mm, 1UL << 53);
return 0;
}
@@ -191,7 +191,7 @@ s390_get_unmapped_area(struct file *filp, unsigned long addr,
return area;
if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < (1UL << 53)) {
/* Upgrade the page table to 4 levels and retry. */
- rc = crst_table_upgrade(mm);
+ rc = crst_table_upgrade(mm, 1UL << 53);
if (rc)
return (unsigned long) rc;
area = arch_get_unmapped_area(filp, addr, len, pgoff, flags);
@@ -213,7 +213,7 @@ s390_get_unmapped_area_topdown(struct file *filp, const unsigned long addr,
return area;
if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < (1UL << 53)) {
/* Upgrade the page table to 4 levels and retry. */
- rc = crst_table_upgrade(mm);
+ rc = crst_table_upgrade(mm, 1UL << 53);
if (rc)
return (unsigned long) rc;
area = arch_get_unmapped_area_topdown(filp, addr, len,
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 471a370a527b..54ef3bc01b43 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -49,52 +49,81 @@ static void __crst_table_upgrade(void *arg)
__tlb_flush_local();
}
-int crst_table_upgrade(struct mm_struct *mm)
+int crst_table_upgrade(struct mm_struct *mm, unsigned long limit)
{
unsigned long *table, *pgd;
+ unsigned long entry;
+ int flush;
- /* upgrade should only happen from 3 to 4 levels */
- BUG_ON(mm->context.asce_limit != (1UL << 42));
-
+ BUG_ON(limit > (1UL << 53));
+ flush = 0;
+repeat:
table = crst_table_alloc(mm);
if (!table)
return -ENOMEM;
-
spin_lock_bh(&mm->page_table_lock);
- pgd = (unsigned long *) mm->pgd;
- crst_table_init(table, _REGION2_ENTRY_EMPTY);
- pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd);
- mm->pgd = (pgd_t *) table;
- mm->context.asce_limit = 1UL << 53;
- mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
- _ASCE_USER_BITS | _ASCE_TYPE_REGION2;
- mm->task_size = mm->context.asce_limit;
+ if (mm->context.asce_limit < limit) {
+ pgd = (unsigned long *) mm->pgd;
+ if (mm->context.asce_limit <= (1UL << 31)) {
+ entry = _REGION3_ENTRY_EMPTY;
+ mm->context.asce_limit = 1UL << 42;
+ mm->context.asce_bits = _ASCE_TABLE_LENGTH |
+ _ASCE_USER_BITS |
+ _ASCE_TYPE_REGION3;
+ } else {
+ entry = _REGION2_ENTRY_EMPTY;
+ mm->context.asce_limit = 1UL << 53;
+ mm->context.asce_bits = _ASCE_TABLE_LENGTH |
+ _ASCE_USER_BITS |
+ _ASCE_TYPE_REGION2;
+ }
+ crst_table_init(table, entry);
+ pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd);
+ mm->pgd = (pgd_t *) table;
+ mm->task_size = mm->context.asce_limit;
+ table = NULL;
+ flush = 1;
+ }
spin_unlock_bh(&mm->page_table_lock);
-
- on_each_cpu(__crst_table_upgrade, mm, 0);
+ if (table)
+ crst_table_free(mm, table);
+ if (mm->context.asce_limit < limit)
+ goto repeat;
+ if (flush)
+ on_each_cpu(__crst_table_upgrade, mm, 0);
return 0;
}
-void crst_table_downgrade(struct mm_struct *mm)
+void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
{
pgd_t *pgd;
- /* downgrade should only happen from 3 to 2 levels (compat only) */
- BUG_ON(mm->context.asce_limit != (1UL << 42));
-
if (current->active_mm == mm) {
clear_user_asce();
__tlb_flush_mm(mm);
}
-
- pgd = mm->pgd;
- mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
- mm->context.asce_limit = 1UL << 31;
- mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
- _ASCE_USER_BITS | _ASCE_TYPE_SEGMENT;
- mm->task_size = mm->context.asce_limit;
- crst_table_free(mm, (unsigned long *) pgd);
-
+ while (mm->context.asce_limit > limit) {
+ pgd = mm->pgd;
+ switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) {
+ case _REGION_ENTRY_TYPE_R2:
+ mm->context.asce_limit = 1UL << 42;
+ mm->context.asce_bits = _ASCE_TABLE_LENGTH |
+ _ASCE_USER_BITS |
+ _ASCE_TYPE_REGION3;
+ break;
+ case _REGION_ENTRY_TYPE_R3:
+ mm->context.asce_limit = 1UL << 31;
+ mm->context.asce_bits = _ASCE_TABLE_LENGTH |
+ _ASCE_USER_BITS |
+ _ASCE_TYPE_SEGMENT;
+ break;
+ default:
+ BUG();
+ }
+ mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
+ mm->task_size = mm->context.asce_limit;
+ crst_table_free(mm, (unsigned long *) pgd);
+ }
if (current->active_mm == mm)
set_user_asce(mm);
}
diff --git a/arch/x86/crypto/sha-mb/sha1_mb.c b/arch/x86/crypto/sha-mb/sha1_mb.c
index 8381c09d2870..a841e9765bd6 100644
--- a/arch/x86/crypto/sha-mb/sha1_mb.c
+++ b/arch/x86/crypto/sha-mb/sha1_mb.c
@@ -453,10 +453,10 @@ static int sha_complete_job(struct mcryptd_hash_request_ctx *rctx,
req = cast_mcryptd_ctx_to_req(req_ctx);
if (irqs_disabled())
- req_ctx->complete(&req->base, ret);
+ rctx->complete(&req->base, ret);
else {
local_bh_disable();
- req_ctx->complete(&req->base, ret);
+ rctx->complete(&req->base, ret);
local_bh_enable();
}
}
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
index 08b1f2f6ea50..0010c78c4998 100644
--- a/arch/x86/include/asm/efi.h
+++ b/arch/x86/include/asm/efi.h
@@ -25,8 +25,6 @@
#define EFI32_LOADER_SIGNATURE "EL32"
#define EFI64_LOADER_SIGNATURE "EL64"
-#define MAX_CMDLINE_ADDRESS UINT_MAX
-
#ifdef CONFIG_X86_32
diff --git a/arch/x86/include/asm/hugetlb.h b/arch/x86/include/asm/hugetlb.h
index e6a8613fbfb0..f8a29d2c97b0 100644
--- a/arch/x86/include/asm/hugetlb.h
+++ b/arch/x86/include/asm/hugetlb.h
@@ -4,7 +4,6 @@
#include <asm/page.h>
#include <asm-generic/hugetlb.h>
-#define hugepages_supported() cpu_has_pse
static inline int is_hugepage_only_range(struct mm_struct *mm,
unsigned long addr,
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
index df6b4eeac0bd..7af2505f20c2 100644
--- a/arch/x86/kernel/apic/vector.c
+++ b/arch/x86/kernel/apic/vector.c
@@ -254,8 +254,7 @@ static void clear_irq_vector(int irq, struct apic_chip_data *data)
struct irq_desc *desc;
int cpu, vector;
- if (!data->cfg.vector)
- return;
+ BUG_ON(!data->cfg.vector);
vector = data->cfg.vector;
for_each_cpu_and(cpu, data->domain, cpu_online_mask)
diff --git a/arch/x86/kernel/cpu/mcheck/mce-genpool.c b/arch/x86/kernel/cpu/mcheck/mce-genpool.c
index 2658e2af74ec..0a850100c594 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-genpool.c
+++ b/arch/x86/kernel/cpu/mcheck/mce-genpool.c
@@ -29,7 +29,7 @@ static char gen_pool_buf[MCE_POOLSZ];
void mce_gen_pool_process(void)
{
struct llist_node *head;
- struct mce_evt_llist *node, *tmp;
+ struct mce_evt_llist *node;
struct mce *mce;
head = llist_del_all(&mce_event_llist);
@@ -37,7 +37,7 @@ void mce_gen_pool_process(void)
return;
head = llist_reverse_order(head);
- llist_for_each_entry_safe(node, tmp, head, llnode) {
+ llist_for_each_entry(node, head, llnode) {
mce = &node->mce;
atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, mce);
gen_pool_free(mce_evt_pool, (unsigned long)node, sizeof(*node));
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
index 05538582a809..2c5aaf8c2e2f 100644
--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
+++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
@@ -385,9 +385,6 @@ static void intel_thermal_interrupt(void)
{
__u64 msr_val;
- if (static_cpu_has(X86_FEATURE_HWP))
- wrmsrl_safe(MSR_HWP_STATUS, 0);
-
rdmsrl(MSR_IA32_THERM_STATUS, msr_val);
/* Check for violation of core thermal thresholds*/
diff --git a/arch/x86/kernel/sysfb_efi.c b/arch/x86/kernel/sysfb_efi.c
index 5da924bbf0a0..b285d4e8c68e 100644
--- a/arch/x86/kernel/sysfb_efi.c
+++ b/arch/x86/kernel/sysfb_efi.c
@@ -106,24 +106,14 @@ static int __init efifb_set_system(const struct dmi_system_id *id)
continue;
for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
resource_size_t start, end;
- unsigned long flags;
-
- flags = pci_resource_flags(dev, i);
- if (!(flags & IORESOURCE_MEM))
- continue;
-
- if (flags & IORESOURCE_UNSET)
- continue;
-
- if (pci_resource_len(dev, i) == 0)
- continue;
start = pci_resource_start(dev, i);
+ if (start == 0)
+ break;
end = pci_resource_end(dev, i);
if (screen_info.lfb_base >= start &&
screen_info.lfb_base < end) {
found_bar = 1;
- break;
}
}
}
diff --git a/arch/x86/kernel/tsc_msr.c b/arch/x86/kernel/tsc_msr.c
index 6aa0f4d9eea6..92ae6acac8a7 100644
--- a/arch/x86/kernel/tsc_msr.c
+++ b/arch/x86/kernel/tsc_msr.c
@@ -92,7 +92,7 @@ unsigned long try_msr_calibrate_tsc(void)
if (freq_desc_tables[cpu_index].msr_plat) {
rdmsr(MSR_PLATFORM_INFO, lo, hi);
- ratio = (lo >> 8) & 0xff;
+ ratio = (lo >> 8) & 0x1f;
} else {
rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
ratio = (hi >> 8) & 0x1f;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 605cea75eb0d..7eb4ebd3ebea 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -697,6 +697,7 @@ static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
if ((xcr0 & XFEATURE_MASK_AVX512) != XFEATURE_MASK_AVX512)
return 1;
}
+ kvm_put_guest_xcr0(vcpu);
vcpu->arch.xcr0 = xcr0;
if ((xcr0 ^ old_xcr0) & XFEATURE_MASK_EXTEND)
@@ -6494,6 +6495,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
kvm_x86_ops->prepare_guest_switch(vcpu);
if (vcpu->fpu_active)
kvm_load_guest_fpu(vcpu);
+ kvm_load_guest_xcr0(vcpu);
+
vcpu->mode = IN_GUEST_MODE;
srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
@@ -6516,8 +6519,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
goto cancel_injection;
}
- kvm_load_guest_xcr0(vcpu);
-
if (req_immediate_exit)
smp_send_reschedule(vcpu->cpu);
@@ -6567,8 +6568,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
vcpu->mode = OUTSIDE_GUEST_MODE;
smp_wmb();
- kvm_put_guest_xcr0(vcpu);
-
/* Interrupt is enabled by handle_external_intr() */
kvm_x86_ops->handle_external_intr(vcpu);
@@ -7216,6 +7215,7 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
* and assume host would use all available bits.
* Guest xcr0 would be loaded later.
*/
+ kvm_put_guest_xcr0(vcpu);
vcpu->guest_fpu_loaded = 1;
__kernel_fpu_begin();
__copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state);
@@ -7224,6 +7224,8 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
{
+ kvm_put_guest_xcr0(vcpu);
+
if (!vcpu->guest_fpu_loaded) {
vcpu->fpu_counter = 0;
return;
diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio.c
index ddb2244b06a1..637ab34ed632 100644
--- a/arch/x86/mm/kmmio.c
+++ b/arch/x86/mm/kmmio.c
@@ -33,7 +33,7 @@
struct kmmio_fault_page {
struct list_head list;
struct kmmio_fault_page *release_next;
- unsigned long addr; /* the requested address */
+ unsigned long page; /* location of the fault page */
pteval_t old_presence; /* page presence prior to arming */
bool armed;
@@ -70,16 +70,9 @@ unsigned int kmmio_count;
static struct list_head kmmio_page_table[KMMIO_PAGE_TABLE_SIZE];
static LIST_HEAD(kmmio_probes);
-static struct list_head *kmmio_page_list(unsigned long addr)
+static struct list_head *kmmio_page_list(unsigned long page)
{
- unsigned int l;
- pte_t *pte = lookup_address(addr, &l);
-
- if (!pte)
- return NULL;
- addr &= page_level_mask(l);
-
- return &kmmio_page_table[hash_long(addr, KMMIO_PAGE_HASH_BITS)];
+ return &kmmio_page_table[hash_long(page, KMMIO_PAGE_HASH_BITS)];
}
/* Accessed per-cpu */
@@ -105,19 +98,15 @@ static struct kmmio_probe *get_kmmio_probe(unsigned long addr)
}
/* You must be holding RCU read lock. */
-static struct kmmio_fault_page *get_kmmio_fault_page(unsigned long addr)
+static struct kmmio_fault_page *get_kmmio_fault_page(unsigned long page)
{
struct list_head *head;
struct kmmio_fault_page *f;
- unsigned int l;
- pte_t *pte = lookup_address(addr, &l);
- if (!pte)
- return NULL;
- addr &= page_level_mask(l);
- head = kmmio_page_list(addr);
+ page &= PAGE_MASK;
+ head = kmmio_page_list(page);
list_for_each_entry_rcu(f, head, list) {
- if (f->addr == addr)
+ if (f->page == page)
return f;
}
return NULL;
@@ -148,10 +137,10 @@ static void clear_pte_presence(pte_t *pte, bool clear, pteval_t *old)
static int clear_page_presence(struct kmmio_fault_page *f, bool clear)
{
unsigned int level;
- pte_t *pte = lookup_address(f->addr, &level);
+ pte_t *pte = lookup_address(f->page, &level);
if (!pte) {
- pr_err("no pte for addr 0x%08lx\n", f->addr);
+ pr_err("no pte for page 0x%08lx\n", f->page);
return -1;
}
@@ -167,7 +156,7 @@ static int clear_page_presence(struct kmmio_fault_page *f, bool clear)
return -1;
}
- __flush_tlb_one(f->addr);
+ __flush_tlb_one(f->page);
return 0;
}
@@ -187,12 +176,12 @@ static int arm_kmmio_fault_page(struct kmmio_fault_page *f)
int ret;
WARN_ONCE(f->armed, KERN_ERR pr_fmt("kmmio page already armed.\n"));
if (f->armed) {
- pr_warning("double-arm: addr 0x%08lx, ref %d, old %d\n",
- f->addr, f->count, !!f->old_presence);
+ pr_warning("double-arm: page 0x%08lx, ref %d, old %d\n",
+ f->page, f->count, !!f->old_presence);
}
ret = clear_page_presence(f, true);
- WARN_ONCE(ret < 0, KERN_ERR pr_fmt("arming at 0x%08lx failed.\n"),
- f->addr);
+ WARN_ONCE(ret < 0, KERN_ERR pr_fmt("arming 0x%08lx failed.\n"),
+ f->page);
f->armed = true;
return ret;
}
@@ -202,7 +191,7 @@ static void disarm_kmmio_fault_page(struct kmmio_fault_page *f)
{
int ret = clear_page_presence(f, false);
WARN_ONCE(ret < 0,
- KERN_ERR "kmmio disarming at 0x%08lx failed.\n", f->addr);
+ KERN_ERR "kmmio disarming 0x%08lx failed.\n", f->page);
f->armed = false;
}
@@ -226,12 +215,6 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr)
struct kmmio_context *ctx;
struct kmmio_fault_page *faultpage;
int ret = 0; /* default to fault not handled */
- unsigned long page_base = addr;
- unsigned int l;
- pte_t *pte = lookup_address(addr, &l);
- if (!pte)
- return -EINVAL;
- page_base &= page_level_mask(l);
/*
* Preemption is now disabled to prevent process switch during
@@ -244,7 +227,7 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr)
preempt_disable();
rcu_read_lock();
- faultpage = get_kmmio_fault_page(page_base);
+ faultpage = get_kmmio_fault_page(addr);
if (!faultpage) {
/*
* Either this page fault is not caused by kmmio, or
@@ -256,7 +239,7 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr)
ctx = &get_cpu_var(kmmio_ctx);
if (ctx->active) {
- if (page_base == ctx->addr) {
+ if (addr == ctx->addr) {
/*
* A second fault on the same page means some other
* condition needs handling by do_page_fault(), the
@@ -284,9 +267,9 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr)
ctx->active++;
ctx->fpage = faultpage;
- ctx->probe = get_kmmio_probe(page_base);
+ ctx->probe = get_kmmio_probe(addr);
ctx->saved_flags = (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF));
- ctx->addr = page_base;
+ ctx->addr = addr;
if (ctx->probe && ctx->probe->pre_handler)
ctx->probe->pre_handler(ctx->probe, regs, addr);
@@ -371,11 +354,12 @@ out:
}
/* You must be holding kmmio_lock. */
-static int add_kmmio_fault_page(unsigned long addr)
+static int add_kmmio_fault_page(unsigned long page)
{
struct kmmio_fault_page *f;
- f = get_kmmio_fault_page(addr);
+ page &= PAGE_MASK;
+ f = get_kmmio_fault_page(page);
if (f) {
if (!f->count)
arm_kmmio_fault_page(f);
@@ -388,25 +372,26 @@ static int add_kmmio_fault_page(unsigned long addr)
return -1;
f->count = 1;
- f->addr = addr;
+ f->page = page;
if (arm_kmmio_fault_page(f)) {
kfree(f);
return -1;
}
- list_add_rcu(&f->list, kmmio_page_list(f->addr));
+ list_add_rcu(&f->list, kmmio_page_list(f->page));
return 0;
}
/* You must be holding kmmio_lock. */
-static void release_kmmio_fault_page(unsigned long addr,
+static void release_kmmio_fault_page(unsigned long page,
struct kmmio_fault_page **release_list)
{
struct kmmio_fault_page *f;
- f = get_kmmio_fault_page(addr);
+ page &= PAGE_MASK;
+ f = get_kmmio_fault_page(page);
if (!f)
return;
@@ -435,27 +420,18 @@ int register_kmmio_probe(struct kmmio_probe *p)
int ret = 0;
unsigned long size = 0;
const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
- unsigned int l;
- pte_t *pte;
spin_lock_irqsave(&kmmio_lock, flags);
if (get_kmmio_probe(p->addr)) {
ret = -EEXIST;
goto out;
}
-
- pte = lookup_address(p->addr, &l);
- if (!pte) {
- ret = -EINVAL;
- goto out;
- }
-
kmmio_count++;
list_add_rcu(&p->list, &kmmio_probes);
while (size < size_lim) {
if (add_kmmio_fault_page(p->addr + size))
pr_err("Unable to set page fault.\n");
- size += page_level_size(l);
+ size += PAGE_SIZE;
}
out:
spin_unlock_irqrestore(&kmmio_lock, flags);
@@ -530,17 +506,11 @@ void unregister_kmmio_probe(struct kmmio_probe *p)
const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
struct kmmio_fault_page *release_list = NULL;
struct kmmio_delayed_release *drelease;
- unsigned int l;
- pte_t *pte;
-
- pte = lookup_address(p->addr, &l);
- if (!pte)
- return;
spin_lock_irqsave(&kmmio_lock, flags);
while (size < size_lim) {
release_kmmio_fault_page(p->addr + size, &release_list);
- size += page_level_size(l);
+ size += PAGE_SIZE;
}
list_del_rcu(&p->list);
kmmio_count--;
diff --git a/block/partition-generic.c b/block/partition-generic.c
index 91327dbfbb1d..ae95e963c5bb 100644
--- a/block/partition-generic.c
+++ b/block/partition-generic.c
@@ -360,20 +360,15 @@ struct hd_struct *add_partition(struct gendisk *disk, int partno,
goto out_del;
}
- err = hd_ref_init(p);
- if (err) {
- if (flags & ADDPART_FLAG_WHOLEDISK)
- goto out_remove_file;
- goto out_del;
- }
-
/* everything is up and running, commence */
rcu_assign_pointer(ptbl->part[partno], p);
/* suppress uevent if the disk suppresses it */
if (!dev_get_uevent_suppress(ddev))
kobject_uevent(&pdev->kobj, KOBJ_ADD);
- return p;
+
+ if (!hd_ref_init(p))
+ return p;
out_free_info:
free_part_info(p);
@@ -382,8 +377,6 @@ out_free_stats:
out_free:
kfree(p);
return ERR_PTR(err);
-out_remove_file:
- device_remove_file(pdev, &dev_attr_whole_disk);
out_del:
kobject_put(p->holder_dir);
device_del(pdev);
diff --git a/crypto/ahash.c b/crypto/ahash.c
index dac1c24e9c3e..d19b52324cf5 100644
--- a/crypto/ahash.c
+++ b/crypto/ahash.c
@@ -69,9 +69,8 @@ static int hash_walk_new_entry(struct crypto_hash_walk *walk)
struct scatterlist *sg;
sg = walk->sg;
+ walk->pg = sg_page(sg);
walk->offset = sg->offset;
- walk->pg = sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT);
- walk->offset = offset_in_page(walk->offset);
walk->entrylen = sg->length;
if (walk->entrylen > walk->total)
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index d4944318ca1f..ae8c57fd8bc7 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -1849,7 +1849,6 @@ static int alg_test_drbg(const struct alg_test_desc *desc, const char *driver,
static int do_test_rsa(struct crypto_akcipher *tfm,
struct akcipher_testvec *vecs)
{
- char *xbuf[XBUFSIZE];
struct akcipher_request *req;
void *outbuf_enc = NULL;
void *outbuf_dec = NULL;
@@ -1858,12 +1857,9 @@ static int do_test_rsa(struct crypto_akcipher *tfm,
int err = -ENOMEM;
struct scatterlist src, dst, src_tab[2];
- if (testmgr_alloc_buf(xbuf))
- return err;
-
req = akcipher_request_alloc(tfm, GFP_KERNEL);
if (!req)
- goto free_xbuf;
+ return err;
init_completion(&result.completion);
@@ -1881,14 +1877,9 @@ static int do_test_rsa(struct crypto_akcipher *tfm,
if (!outbuf_enc)
goto free_req;
- if (WARN_ON(vecs->m_size > PAGE_SIZE))
- goto free_all;
-
- memcpy(xbuf[0], vecs->m, vecs->m_size);
-
sg_init_table(src_tab, 2);
- sg_set_buf(&src_tab[0], xbuf[0], 8);
- sg_set_buf(&src_tab[1], xbuf[0] + 8, vecs->m_size - 8);
+ sg_set_buf(&src_tab[0], vecs->m, 8);
+ sg_set_buf(&src_tab[1], vecs->m + 8, vecs->m_size - 8);
sg_init_one(&dst, outbuf_enc, out_len_max);
akcipher_request_set_crypt(req, src_tab, &dst, vecs->m_size,
out_len_max);
@@ -1907,7 +1898,7 @@ static int do_test_rsa(struct crypto_akcipher *tfm,
goto free_all;
}
/* verify that encrypted message is equal to expected */
- if (memcmp(vecs->c, outbuf_enc, vecs->c_size)) {
+ if (memcmp(vecs->c, sg_virt(req->dst), vecs->c_size)) {
pr_err("alg: rsa: encrypt test failed. Invalid output\n");
err = -EINVAL;
goto free_all;
@@ -1922,13 +1913,7 @@ static int do_test_rsa(struct crypto_akcipher *tfm,
err = -ENOMEM;
goto free_all;
}
-
- if (WARN_ON(vecs->c_size > PAGE_SIZE))
- goto free_all;
-
- memcpy(xbuf[0], vecs->c, vecs->c_size);
-
- sg_init_one(&src, xbuf[0], vecs->c_size);
+ sg_init_one(&src, vecs->c, vecs->c_size);
sg_init_one(&dst, outbuf_dec, out_len_max);
init_completion(&result.completion);
akcipher_request_set_crypt(req, &src, &dst, vecs->c_size, out_len_max);
@@ -1955,8 +1940,6 @@ free_all:
kfree(outbuf_enc);
free_req:
akcipher_request_free(req);
-free_xbuf:
- testmgr_free_buf(xbuf);
return err;
}
diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
index 9f77943653fb..6979186dbd4b 100644
--- a/drivers/acpi/acpi_processor.c
+++ b/drivers/acpi/acpi_processor.c
@@ -491,58 +491,6 @@ static void acpi_processor_remove(struct acpi_device *device)
}
#endif /* CONFIG_ACPI_HOTPLUG_CPU */
-#ifdef CONFIG_X86
-static bool acpi_hwp_native_thermal_lvt_set;
-static acpi_status __init acpi_hwp_native_thermal_lvt_osc(acpi_handle handle,
- u32 lvl,
- void *context,
- void **rv)
-{
- u8 sb_uuid_str[] = "4077A616-290C-47BE-9EBD-D87058713953";
- u32 capbuf[2];
- struct acpi_osc_context osc_context = {
- .uuid_str = sb_uuid_str,
- .rev = 1,
- .cap.length = 8,
- .cap.pointer = capbuf,
- };
-
- if (acpi_hwp_native_thermal_lvt_set)
- return AE_CTRL_TERMINATE;
-
- capbuf[0] = 0x0000;
- capbuf[1] = 0x1000; /* set bit 12 */
-
- if (ACPI_SUCCESS(acpi_run_osc(handle, &osc_context))) {
- if (osc_context.ret.pointer && osc_context.ret.length > 1) {
- u32 *capbuf_ret = osc_context.ret.pointer;
-
- if (capbuf_ret[1] & 0x1000) {
- acpi_handle_info(handle,
- "_OSC native thermal LVT Acked\n");
- acpi_hwp_native_thermal_lvt_set = true;
- }
- }
- kfree(osc_context.ret.pointer);
- }
-
- return AE_OK;
-}
-
-void __init acpi_early_processor_osc(void)
-{
- if (boot_cpu_has(X86_FEATURE_HWP)) {
- acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
- ACPI_UINT32_MAX,
- acpi_hwp_native_thermal_lvt_osc,
- NULL, NULL, NULL);
- acpi_get_devices(ACPI_PROCESSOR_DEVICE_HID,
- acpi_hwp_native_thermal_lvt_osc,
- NULL, NULL);
- }
-}
-#endif
-
/*
* The following ACPI IDs are known to be suitable for representing as
* processor devices.
diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c
index 28c50c6b5f45..bc32f3194afe 100644
--- a/drivers/acpi/acpica/dsmethod.c
+++ b/drivers/acpi/acpica/dsmethod.c
@@ -417,9 +417,6 @@ acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node,
obj_desc->method.mutex->mutex.
original_sync_level =
obj_desc->method.mutex->mutex.sync_level;
-
- obj_desc->method.mutex->mutex.thread_id =
- acpi_os_get_thread_id();
}
}
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index ca4f28432d87..a212cefae524 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -1004,9 +1004,6 @@ static int __init acpi_bus_init(void)
goto error1;
}
- /* Set capability bits for _OSC under processor scope */
- acpi_early_processor_osc();
-
/*
* _OSC method may exist in module level code,
* so it must be run after ACPI_FULL_INITIALIZATION
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 0f3f41c13b38..11d87bf67e73 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -130,12 +130,6 @@ void acpi_early_processor_set_pdc(void);
static inline void acpi_early_processor_set_pdc(void) {}
#endif
-#ifdef CONFIG_X86
-void acpi_early_processor_osc(void);
-#else
-static inline void acpi_early_processor_osc(void) {}
-#endif
-
/* --------------------------------------------------------------------------
Embedded Controller
-------------------------------------------------------------------------- */
diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c
index 639adb1f8abd..04975b851c23 100644
--- a/drivers/ata/ahci_platform.c
+++ b/drivers/ata/ahci_platform.c
@@ -51,9 +51,6 @@ static int ahci_probe(struct platform_device *pdev)
if (rc)
return rc;
- of_property_read_u32(dev->of_node,
- "ports-implemented", &hpriv->force_port_map);
-
if (of_device_is_compatible(dev->of_node, "hisilicon,hisi-ahci"))
hpriv->flags |= AHCI_HFLAG_NO_FBS | AHCI_HFLAG_NO_NCQ;
diff --git a/drivers/ata/ahci_xgene.c b/drivers/ata/ahci_xgene.c
index e916bff6cee8..e2c6d9e0c5ac 100644
--- a/drivers/ata/ahci_xgene.c
+++ b/drivers/ata/ahci_xgene.c
@@ -739,9 +739,9 @@ static int xgene_ahci_probe(struct platform_device *pdev)
dev_warn(&pdev->dev, "%s: Error reading device info. Assume version1\n",
__func__);
version = XGENE_AHCI_V1;
- } else if (info->valid & ACPI_VALID_CID) {
- version = XGENE_AHCI_V2;
}
+ if (info->valid & ACPI_VALID_CID)
+ version = XGENE_AHCI_V2;
}
}
#endif
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index 9628fa131757..998c6a85ad89 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -467,7 +467,6 @@ void ahci_save_initial_config(struct device *dev, struct ahci_host_priv *hpriv)
dev_info(dev, "forcing port_map 0x%x -> 0x%x\n",
port_map, hpriv->force_port_map);
port_map = hpriv->force_port_map;
- hpriv->saved_port_map = port_map;
}
if (hpriv->mask_port_map) {
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index a48824deabc5..65f50eccd49b 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -1381,7 +1381,7 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
mutex_lock(&genpd->lock);
- if (!list_empty(&subdomain->master_links) || subdomain->device_count) {
+ if (!list_empty(&subdomain->slave_links) || subdomain->device_count) {
pr_warn("%s: unable to remove subdomain %s\n", genpd->name,
subdomain->name);
ret = -EBUSY;
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 80cf8add46ff..423f4ca7d712 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -488,12 +488,6 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
bvec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
iov_iter_bvec(&iter, ITER_BVEC | rw, bvec,
bio_segments(bio), blk_rq_bytes(cmd->rq));
- /*
- * This bio may be started from the middle of the 'bvec'
- * because of bio splitting, so offset from the bvec must
- * be passed to iov iterator
- */
- iter.iov_offset = bio->bi_iter.bi_bvec_done;
cmd->iocb.ki_pos = pos;
cmd->iocb.ki_filp = file;
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 8f1ce6d57a08..93b3f99b6865 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -618,8 +618,8 @@ static void nbd_request_handler(struct request_queue *q)
req, req->cmd_type);
if (unlikely(!nbd->sock)) {
- dev_err_ratelimited(disk_to_dev(nbd->disk),
- "Attempted send on closed socket\n");
+ dev_err(disk_to_dev(nbd->disk),
+ "Attempted send on closed socket\n");
req->errors++;
nbd_end_request(nbd, req);
spin_lock_irq(q->queue_lock);
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
index 78a39f736c64..562b5a4ca7b7 100644
--- a/drivers/block/paride/pd.c
+++ b/drivers/block/paride/pd.c
@@ -126,7 +126,7 @@
*/
#include <linux/types.h>
-static int verbose = 0;
+static bool verbose = 0;
static int major = PD_MAJOR;
static char *name = PD_NAME;
static int cluster = 64;
@@ -161,7 +161,7 @@ enum {D_PRT, D_PRO, D_UNI, D_MOD, D_GEO, D_SBY, D_DLY, D_SLV};
static DEFINE_MUTEX(pd_mutex);
static DEFINE_SPINLOCK(pd_lock);
-module_param(verbose, int, 0);
+module_param(verbose, bool, 0);
module_param(major, int, 0);
module_param(name, charp, 0);
module_param(cluster, int, 0);
diff --git a/drivers/block/paride/pt.c b/drivers/block/paride/pt.c
index 216a94fed5b4..1740d75e8a32 100644
--- a/drivers/block/paride/pt.c
+++ b/drivers/block/paride/pt.c
@@ -117,7 +117,7 @@
*/
-static int verbose = 0;
+static bool verbose = 0;
static int major = PT_MAJOR;
static char *name = PT_NAME;
static int disable = 0;
@@ -152,7 +152,7 @@ static int (*drives[4])[6] = {&drive0, &drive1, &drive2, &drive3};
#include <asm/uaccess.h>
-module_param(verbose, int, 0);
+module_param(verbose, bool, 0);
module_param(major, int, 0);
module_param(name, charp, 0);
module_param_array(drive0, int, NULL, 0);
diff --git a/drivers/bus/imx-weim.c b/drivers/bus/imx-weim.c
index 1827fc4d15c1..e98d15eaa799 100644
--- a/drivers/bus/imx-weim.c
+++ b/drivers/bus/imx-weim.c
@@ -150,7 +150,7 @@ static int __init weim_parse_dt(struct platform_device *pdev,
return ret;
}
- for_each_available_child_of_node(pdev->dev.of_node, child) {
+ for_each_child_of_node(pdev->dev.of_node, child) {
if (!child->name)
continue;
diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c
index bbf206e3da0d..3ace102a2a0a 100644
--- a/drivers/clk/clk-divider.c
+++ b/drivers/clk/clk-divider.c
@@ -422,12 +422,6 @@ const struct clk_ops clk_divider_ops = {
};
EXPORT_SYMBOL_GPL(clk_divider_ops);
-const struct clk_ops clk_divider_ro_ops = {
- .recalc_rate = clk_divider_recalc_rate,
- .round_rate = clk_divider_round_rate,
-};
-EXPORT_SYMBOL_GPL(clk_divider_ro_ops);
-
static struct clk *_register_divider(struct device *dev, const char *name,
const char *parent_name, unsigned long flags,
void __iomem *reg, u8 shift, u8 width,
@@ -451,10 +445,7 @@ static struct clk *_register_divider(struct device *dev, const char *name,
return ERR_PTR(-ENOMEM);
init.name = name;
- if (clk_divider_flags & CLK_DIVIDER_READ_ONLY)
- init.ops = &clk_divider_ro_ops;
- else
- init.ops = &clk_divider_ops;
+ init.ops = &clk_divider_ops;
init.flags = flags | CLK_IS_BASIC;
init.parent_names = (parent_name ? &parent_name: NULL);
init.num_parents = (parent_name ? 1 : 0);
diff --git a/drivers/clk/meson/clkc.c b/drivers/clk/meson/clkc.c
index d920d410b51d..c83ae1367abc 100644
--- a/drivers/clk/meson/clkc.c
+++ b/drivers/clk/meson/clkc.c
@@ -198,7 +198,7 @@ meson_clk_register_fixed_rate(const struct clk_conf *clk_conf,
}
void __init meson_clk_register_clks(const struct clk_conf *clk_confs,
- unsigned int nr_confs,
+ size_t nr_confs,
void __iomem *clk_base)
{
unsigned int i;
diff --git a/drivers/clk/nxp/clk-lpc18xx-ccu.c b/drivers/clk/nxp/clk-lpc18xx-ccu.c
index 558da89555af..13aabbb3acbe 100644
--- a/drivers/clk/nxp/clk-lpc18xx-ccu.c
+++ b/drivers/clk/nxp/clk-lpc18xx-ccu.c
@@ -222,7 +222,7 @@ static void lpc18xx_ccu_register_branch_gate_div(struct lpc18xx_clk_branch *bran
div->width = 1;
div_hw = &div->hw;
- div_ops = &clk_divider_ro_ops;
+ div_ops = &clk_divider_ops;
}
branch->gate.reg = branch->offset + reg_base;
diff --git a/drivers/clk/qcom/gcc-msm8960.c b/drivers/clk/qcom/gcc-msm8960.c
index 8b9c388bc43c..6c899fb5b856 100644
--- a/drivers/clk/qcom/gcc-msm8960.c
+++ b/drivers/clk/qcom/gcc-msm8960.c
@@ -2722,7 +2722,7 @@ static struct clk_rcg ce3_src = {
},
.freq_tbl = clk_tbl_ce3,
.clkr = {
- .enable_reg = 0x36c0,
+ .enable_reg = 0x2c08,
.enable_mask = BIT(7),
.hw.init = &(struct clk_init_data){
.name = "ce3_src",
@@ -2738,7 +2738,7 @@ static struct clk_branch ce3_core_clk = {
.halt_reg = 0x2fdc,
.halt_bit = 5,
.clkr = {
- .enable_reg = 0x36cc,
+ .enable_reg = 0x36c4,
.enable_mask = BIT(4),
.hw.init = &(struct clk_init_data){
.name = "ce3_core_clk",
diff --git a/drivers/clk/rockchip/clk.c b/drivers/clk/rockchip/clk.c
index 9b6c8188efac..be6c7fd8315d 100644
--- a/drivers/clk/rockchip/clk.c
+++ b/drivers/clk/rockchip/clk.c
@@ -70,7 +70,7 @@ static struct clk *rockchip_clk_register_branch(const char *name,
if (gate_offset >= 0) {
gate = kzalloc(sizeof(*gate), GFP_KERNEL);
if (!gate)
- goto err_gate;
+ return ERR_PTR(-ENOMEM);
gate->flags = gate_flags;
gate->reg = base + gate_offset;
@@ -82,7 +82,7 @@ static struct clk *rockchip_clk_register_branch(const char *name,
if (div_width > 0) {
div = kzalloc(sizeof(*div), GFP_KERNEL);
if (!div)
- goto err_div;
+ return ERR_PTR(-ENOMEM);
div->flags = div_flags;
div->reg = base + muxdiv_offset;
@@ -90,9 +90,7 @@ static struct clk *rockchip_clk_register_branch(const char *name,
div->width = div_width;
div->lock = lock;
div->table = div_table;
- div_ops = (div_flags & CLK_DIVIDER_READ_ONLY)
- ? &clk_divider_ro_ops
- : &clk_divider_ops;
+ div_ops = &clk_divider_ops;
}
clk = clk_register_composite(NULL, name, parent_names, num_parents,
@@ -102,11 +100,6 @@ static struct clk *rockchip_clk_register_branch(const char *name,
flags);
return clk;
-err_div:
- kfree(gate);
-err_gate:
- kfree(mux);
- return ERR_PTR(-ENOMEM);
}
static struct clk *rockchip_clk_register_frac_branch(const char *name,
diff --git a/drivers/clk/versatile/clk-sp810.c b/drivers/clk/versatile/clk-sp810.c
index 897c36c1754a..a1cdef6b0f90 100644
--- a/drivers/clk/versatile/clk-sp810.c
+++ b/drivers/clk/versatile/clk-sp810.c
@@ -92,7 +92,6 @@ static void __init clk_sp810_of_setup(struct device_node *node)
int num = ARRAY_SIZE(parent_names);
char name[12];
struct clk_init_data init;
- static int instance;
int i;
bool deprecated;
@@ -119,7 +118,7 @@ static void __init clk_sp810_of_setup(struct device_node *node)
deprecated = !of_find_property(node, "assigned-clock-parents", NULL);
for (i = 0; i < ARRAY_SIZE(sp810->timerclken); i++) {
- snprintf(name, sizeof(name), "sp810_%d_%d", instance, i);
+ snprintf(name, ARRAY_SIZE(name), "timerclken%d", i);
sp810->timerclken[i].sp810 = sp810;
sp810->timerclken[i].channel = i;
@@ -140,6 +139,5 @@ static void __init clk_sp810_of_setup(struct device_node *node)
}
of_clk_add_provider(node, clk_sp810_timerclken_of_get, sp810);
- instance++;
}
CLK_OF_DECLARE(sp810, "arm,sp810", clk_sp810_of_setup);
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index f53b02a6bc05..98fb8821382d 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -667,11 +667,6 @@ static int core_get_max_pstate(void)
if (err)
goto skip_tar;
- /* For level 1 and 2, bits[23:16] contain the ratio */
- if (tdp_ctrl)
- tdp_ratio >>= 16;
-
- tdp_ratio &= 0xff; /* ratios are only 8 bits long */
if (tdp_ratio - 1 == tar) {
max_pstate = tar;
pr_debug("max_pstate=TAC %x\n", max_pstate);
diff --git a/drivers/cpuidle/cpuidle-arm.c b/drivers/cpuidle/cpuidle-arm.c
index e342565e8715..545069d5fdfb 100644
--- a/drivers/cpuidle/cpuidle-arm.c
+++ b/drivers/cpuidle/cpuidle-arm.c
@@ -50,7 +50,7 @@ static int arm_enter_idle_state(struct cpuidle_device *dev,
* call the CPU ops suspend protocol with idle index as a
* parameter.
*/
- ret = arm_cpuidle_suspend(idx);
+ arm_cpuidle_suspend(idx);
cpu_pm_exit();
}
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
index 60fc0fa26fd3..3d9acc53d247 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
@@ -225,9 +225,6 @@ static int ccp_aes_cmac_export(struct ahash_request *req, void *out)
struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
struct ccp_aes_cmac_exp_ctx state;
- /* Don't let anything leak to 'out' */
- memset(&state, 0, sizeof(state));
-
state.null_msg = rctx->null_msg;
memcpy(state.iv, rctx->iv, sizeof(state.iv));
state.buf_count = rctx->buf_count;
diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c
index ab9945f2cb7a..8ef06fad8b14 100644
--- a/drivers/crypto/ccp/ccp-crypto-sha.c
+++ b/drivers/crypto/ccp/ccp-crypto-sha.c
@@ -212,9 +212,6 @@ static int ccp_sha_export(struct ahash_request *req, void *out)
struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
struct ccp_sha_exp_ctx state;
- /* Don't let anything leak to 'out' */
- memset(&state, 0, sizeof(state));
-
state.type = rctx->type;
state.msg_bits = rctx->msg_bits;
state.first = rctx->first;
diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h
index b9178d0a3093..3f76bd495bcb 100644
--- a/drivers/crypto/qat/qat_common/adf_common_drv.h
+++ b/drivers/crypto/qat/qat_common/adf_common_drv.h
@@ -145,8 +145,6 @@ int adf_enable_aer(struct adf_accel_dev *accel_dev, struct pci_driver *adf);
void adf_disable_aer(struct adf_accel_dev *accel_dev);
int adf_init_aer(void);
void adf_exit_aer(void);
-int adf_init_pf_wq(void);
-void adf_exit_pf_wq(void);
int adf_init_admin_comms(struct adf_accel_dev *accel_dev);
void adf_exit_admin_comms(struct adf_accel_dev *accel_dev);
int adf_send_admin_init(struct adf_accel_dev *accel_dev);
diff --git a/drivers/crypto/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
index e7480f373532..473d36d91644 100644
--- a/drivers/crypto/qat/qat_common/adf_ctl_drv.c
+++ b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
@@ -469,17 +469,12 @@ static int __init adf_register_ctl_device_driver(void)
if (adf_init_aer())
goto err_aer;
- if (adf_init_pf_wq())
- goto err_pf_wq;
-
if (qat_crypto_register())
goto err_crypto_register;
return 0;
err_crypto_register:
- adf_exit_pf_wq();
-err_pf_wq:
adf_exit_aer();
err_aer:
adf_chr_drv_destroy();
@@ -492,7 +487,6 @@ static void __exit adf_unregister_ctl_device_driver(void)
{
adf_chr_drv_destroy();
adf_exit_aer();
- adf_exit_pf_wq();
qat_crypto_unregister();
adf_clean_vf_map(false);
mutex_destroy(&adf_ctl_lock);
diff --git a/drivers/crypto/qat/qat_common/adf_sriov.c b/drivers/crypto/qat/qat_common/adf_sriov.c
index 38a0415e767d..1117a8b58280 100644
--- a/drivers/crypto/qat/qat_common/adf_sriov.c
+++ b/drivers/crypto/qat/qat_common/adf_sriov.c
@@ -119,6 +119,11 @@ static int adf_enable_sriov(struct adf_accel_dev *accel_dev)
int i;
u32 reg;
+ /* Workqueue for PF2VF responses */
+ pf2vf_resp_wq = create_workqueue("qat_pf2vf_resp_wq");
+ if (!pf2vf_resp_wq)
+ return -ENOMEM;
+
for (i = 0, vf_info = accel_dev->pf.vf_info; i < totalvfs;
i++, vf_info++) {
/* This ptr will be populated when VFs will be created */
@@ -211,6 +216,11 @@ void adf_disable_sriov(struct adf_accel_dev *accel_dev)
kfree(accel_dev->pf.vf_info);
accel_dev->pf.vf_info = NULL;
+
+ if (pf2vf_resp_wq) {
+ destroy_workqueue(pf2vf_resp_wq);
+ pf2vf_resp_wq = NULL;
+ }
}
EXPORT_SYMBOL_GPL(adf_disable_sriov);
@@ -294,19 +304,3 @@ int adf_sriov_configure(struct pci_dev *pdev, int numvfs)
return numvfs;
}
EXPORT_SYMBOL_GPL(adf_sriov_configure);
-
-int __init adf_init_pf_wq(void)
-{
- /* Workqueue for PF2VF responses */
- pf2vf_resp_wq = create_workqueue("qat_pf2vf_resp_wq");
-
- return !pf2vf_resp_wq ? -ENOMEM : 0;
-}
-
-void adf_exit_pf_wq(void)
-{
- if (pf2vf_resp_wq) {
- destroy_workqueue(pf2vf_resp_wq);
- pf2vf_resp_wq = NULL;
- }
-}
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index a04fea4d0063..b6f9f42e2985 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -63,14 +63,6 @@ static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr,
ptr->eptr = upper_32_bits(dma_addr);
}
-static void copy_talitos_ptr(struct talitos_ptr *dst_ptr,
- struct talitos_ptr *src_ptr, bool is_sec1)
-{
- dst_ptr->ptr = src_ptr->ptr;
- if (!is_sec1)
- dst_ptr->eptr = src_ptr->eptr;
-}
-
static void to_talitos_ptr_len(struct talitos_ptr *ptr, unsigned int len,
bool is_sec1)
{
@@ -1091,20 +1083,21 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
sg_count = dma_map_sg(dev, areq->src, edesc->src_nents ?: 1,
(areq->src == areq->dst) ? DMA_BIDIRECTIONAL
: DMA_TO_DEVICE);
+
/* hmac data */
desc->ptr[1].len = cpu_to_be16(areq->assoclen);
if (sg_count > 1 &&
(ret = sg_to_link_tbl_offset(areq->src, sg_count, 0,
areq->assoclen,
&edesc->link_tbl[tbl_off])) > 1) {
+ tbl_off += ret;
+
to_talitos_ptr(&desc->ptr[1], edesc->dma_link_tbl + tbl_off *
sizeof(struct talitos_ptr), 0);
desc->ptr[1].j_extent = DESC_PTR_LNKTBL_JUMP;
dma_sync_single_for_device(dev, edesc->dma_link_tbl,
edesc->dma_len, DMA_BIDIRECTIONAL);
-
- tbl_off += ret;
} else {
to_talitos_ptr(&desc->ptr[1], sg_dma_address(areq->src), 0);
desc->ptr[1].j_extent = 0;
@@ -1133,13 +1126,11 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV)
sg_link_tbl_len += authsize;
- if (sg_count == 1) {
- to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src) +
- areq->assoclen, 0);
- } else if ((ret = sg_to_link_tbl_offset(areq->src, sg_count,
- areq->assoclen, sg_link_tbl_len,
- &edesc->link_tbl[tbl_off])) >
- 1) {
+ if (sg_count > 1 &&
+ (ret = sg_to_link_tbl_offset(areq->src, sg_count, areq->assoclen,
+ sg_link_tbl_len,
+ &edesc->link_tbl[tbl_off])) > 1) {
+ tbl_off += ret;
desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl +
tbl_off *
@@ -1147,10 +1138,8 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
dma_sync_single_for_device(dev, edesc->dma_link_tbl,
edesc->dma_len,
DMA_BIDIRECTIONAL);
- tbl_off += ret;
- } else {
- copy_talitos_ptr(&desc->ptr[4], &edesc->link_tbl[tbl_off], 0);
- }
+ } else
+ to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src), 0);
/* cipher out */
desc->ptr[5].len = cpu_to_be16(cryptlen);
@@ -1162,13 +1151,11 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
edesc->icv_ool = false;
- if (sg_count == 1) {
- to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst) +
- areq->assoclen, 0);
- } else if ((sg_count =
- sg_to_link_tbl_offset(areq->dst, sg_count,
+ if (sg_count > 1 &&
+ (sg_count = sg_to_link_tbl_offset(areq->dst, sg_count,
areq->assoclen, cryptlen,
- &edesc->link_tbl[tbl_off])) > 1) {
+ &edesc->link_tbl[tbl_off])) >
+ 1) {
struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl +
@@ -1191,9 +1178,8 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
edesc->dma_len, DMA_BIDIRECTIONAL);
edesc->icv_ool = true;
- } else {
- copy_talitos_ptr(&desc->ptr[5], &edesc->link_tbl[tbl_off], 0);
- }
+ } else
+ to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst), 0);
/* iv out */
map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
@@ -2533,11 +2519,21 @@ struct talitos_crypto_alg {
struct talitos_alg_template algt;
};
-static int talitos_init_common(struct talitos_ctx *ctx,
- struct talitos_crypto_alg *talitos_alg)
+static int talitos_cra_init(struct crypto_tfm *tfm)
{
+ struct crypto_alg *alg = tfm->__crt_alg;
+ struct talitos_crypto_alg *talitos_alg;
+ struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
struct talitos_private *priv;
+ if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
+ talitos_alg = container_of(__crypto_ahash_alg(alg),
+ struct talitos_crypto_alg,
+ algt.alg.hash);
+ else
+ talitos_alg = container_of(alg, struct talitos_crypto_alg,
+ algt.alg.crypto);
+
/* update context with ptr to dev */
ctx->dev = talitos_alg->dev;
@@ -2555,33 +2551,10 @@ static int talitos_init_common(struct talitos_ctx *ctx,
return 0;
}
-static int talitos_cra_init(struct crypto_tfm *tfm)
-{
- struct crypto_alg *alg = tfm->__crt_alg;
- struct talitos_crypto_alg *talitos_alg;
- struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
-
- if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
- talitos_alg = container_of(__crypto_ahash_alg(alg),
- struct talitos_crypto_alg,
- algt.alg.hash);
- else
- talitos_alg = container_of(alg, struct talitos_crypto_alg,
- algt.alg.crypto);
-
- return talitos_init_common(ctx, talitos_alg);
-}
-
static int talitos_cra_init_aead(struct crypto_aead *tfm)
{
- struct aead_alg *alg = crypto_aead_alg(tfm);
- struct talitos_crypto_alg *talitos_alg;
- struct talitos_ctx *ctx = crypto_aead_ctx(tfm);
-
- talitos_alg = container_of(alg, struct talitos_crypto_alg,
- algt.alg.aead);
-
- return talitos_init_common(ctx, talitos_alg);
+ talitos_cra_init(crypto_aead_tfm(tfm));
+ return 0;
}
static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index c66133b5e852..4f099ea29f83 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -130,14 +130,26 @@ static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
static void dwc_initialize(struct dw_dma_chan *dwc)
{
struct dw_dma *dw = to_dw_dma(dwc->chan.device);
+ struct dw_dma_slave *dws = dwc->chan.private;
u32 cfghi = DWC_CFGH_FIFO_MODE;
u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority);
if (dwc->initialized == true)
return;
- cfghi |= DWC_CFGH_DST_PER(dwc->dst_id);
- cfghi |= DWC_CFGH_SRC_PER(dwc->src_id);
+ if (dws) {
+ /*
+ * We need controller-specific data to set up slave
+ * transfers.
+ */
+ BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev);
+
+ cfghi |= DWC_CFGH_DST_PER(dws->dst_id);
+ cfghi |= DWC_CFGH_SRC_PER(dws->src_id);
+ } else {
+ cfghi |= DWC_CFGH_DST_PER(dwc->dst_id);
+ cfghi |= DWC_CFGH_SRC_PER(dwc->src_id);
+ }
channel_writel(dwc, CFG_LO, cfglo);
channel_writel(dwc, CFG_HI, cfghi);
@@ -924,7 +936,7 @@ bool dw_dma_filter(struct dma_chan *chan, void *param)
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
struct dw_dma_slave *dws = param;
- if (dws->dma_dev != chan->device->dev)
+ if (!dws || dws->dma_dev != chan->device->dev)
return false;
/* We have to copy data since dws can be temporary storage */
@@ -1148,14 +1160,6 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
* doesn't mean what you think it means), and status writeback.
*/
- /*
- * We need controller-specific data to set up slave transfers.
- */
- if (chan->private && !dw_dma_filter(chan, chan->private)) {
- dev_warn(chan2dev(chan), "Wrong controller-specific data\n");
- return -EINVAL;
- }
-
/* Enable controller here if needed */
if (!dw->in_use)
dw_dma_on(dw);
@@ -1217,14 +1221,6 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
spin_lock_irqsave(&dwc->lock, flags);
list_splice_init(&dwc->free_list, &list);
dwc->descs_allocated = 0;
-
- /* Clear custom channel configuration */
- dwc->src_id = 0;
- dwc->dst_id = 0;
-
- dwc->src_master = 0;
- dwc->dst_master = 0;
-
dwc->initialized = false;
/* Disable interrupts */
diff --git a/drivers/dma/hsu/hsu.c b/drivers/dma/hsu/hsu.c
index efc02b98e6ba..823ad728aecf 100644
--- a/drivers/dma/hsu/hsu.c
+++ b/drivers/dma/hsu/hsu.c
@@ -135,7 +135,7 @@ static u32 hsu_dma_chan_get_sr(struct hsu_dma_chan *hsuc)
sr = hsu_chan_readl(hsuc, HSU_CH_SR);
spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
- return sr & ~(HSU_CH_SR_DESCE_ANY | HSU_CH_SR_CDESC_ANY);
+ return sr;
}
irqreturn_t hsu_dma_irq(struct hsu_dma_chip *chip, unsigned short nr)
diff --git a/drivers/dma/hsu/hsu.h b/drivers/dma/hsu/hsu.h
index 26da2865b025..f06579c6d548 100644
--- a/drivers/dma/hsu/hsu.h
+++ b/drivers/dma/hsu/hsu.h
@@ -41,9 +41,6 @@
#define HSU_CH_SR_DESCTO(x) BIT(8 + (x))
#define HSU_CH_SR_DESCTO_ANY (BIT(11) | BIT(10) | BIT(9) | BIT(8))
#define HSU_CH_SR_CHE BIT(15)
-#define HSU_CH_SR_DESCE(x) BIT(16 + (x))
-#define HSU_CH_SR_DESCE_ANY (BIT(19) | BIT(18) | BIT(17) | BIT(16))
-#define HSU_CH_SR_CDESC_ANY (BIT(31) | BIT(30))
/* Bits in HSU_CH_CR */
#define HSU_CH_CR_CHA BIT(0)
diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c
index 55f5d33f6dc7..a59061e4221a 100644
--- a/drivers/dma/pxa_dma.c
+++ b/drivers/dma/pxa_dma.c
@@ -122,7 +122,6 @@ struct pxad_chan {
struct pxad_device {
struct dma_device slave;
int nr_chans;
- int nr_requestors;
void __iomem *base;
struct pxad_phy *phys;
spinlock_t phy_lock; /* Phy association */
@@ -474,7 +473,7 @@ static void pxad_free_phy(struct pxad_chan *chan)
return;
/* clear the channel mapping in DRCMR */
- if (chan->drcmr <= pdev->nr_requestors) {
+ if (chan->drcmr <= DRCMR_CHLNUM) {
reg = pxad_drcmr(chan->drcmr);
writel_relaxed(0, chan->phy->base + reg);
}
@@ -510,7 +509,6 @@ static bool is_running_chan_misaligned(struct pxad_chan *chan)
static void phy_enable(struct pxad_phy *phy, bool misaligned)
{
- struct pxad_device *pdev;
u32 reg, dalgn;
if (!phy->vchan)
@@ -520,8 +518,7 @@ static void phy_enable(struct pxad_phy *phy, bool misaligned)
"%s(); phy=%p(%d) misaligned=%d\n", __func__,
phy, phy->idx, misaligned);
- pdev = to_pxad_dev(phy->vchan->vc.chan.device);
- if (phy->vchan->drcmr <= pdev->nr_requestors) {
+ if (phy->vchan->drcmr <= DRCMR_CHLNUM) {
reg = pxad_drcmr(phy->vchan->drcmr);
writel_relaxed(DRCMR_MAPVLD | phy->idx, phy->base + reg);
}
@@ -917,7 +914,6 @@ static void pxad_get_config(struct pxad_chan *chan,
{
u32 maxburst = 0, dev_addr = 0;
enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
- struct pxad_device *pdev = to_pxad_dev(chan->vc.chan.device);
*dcmd = 0;
if (dir == DMA_DEV_TO_MEM) {
@@ -926,7 +922,7 @@ static void pxad_get_config(struct pxad_chan *chan,
dev_addr = chan->cfg.src_addr;
*dev_src = dev_addr;
*dcmd |= PXA_DCMD_INCTRGADDR;
- if (chan->drcmr <= pdev->nr_requestors)
+ if (chan->drcmr <= DRCMR_CHLNUM)
*dcmd |= PXA_DCMD_FLOWSRC;
}
if (dir == DMA_MEM_TO_DEV) {
@@ -935,7 +931,7 @@ static void pxad_get_config(struct pxad_chan *chan,
dev_addr = chan->cfg.dst_addr;
*dev_dst = dev_addr;
*dcmd |= PXA_DCMD_INCSRCADDR;
- if (chan->drcmr <= pdev->nr_requestors)
+ if (chan->drcmr <= DRCMR_CHLNUM)
*dcmd |= PXA_DCMD_FLOWTRG;
}
if (dir == DMA_MEM_TO_MEM)
@@ -1345,15 +1341,13 @@ static struct dma_chan *pxad_dma_xlate(struct of_phandle_args *dma_spec,
static int pxad_init_dmadev(struct platform_device *op,
struct pxad_device *pdev,
- unsigned int nr_phy_chans,
- unsigned int nr_requestors)
+ unsigned int nr_phy_chans)
{
int ret;
unsigned int i;
struct pxad_chan *c;
pdev->nr_chans = nr_phy_chans;
- pdev->nr_requestors = nr_requestors;
INIT_LIST_HEAD(&pdev->slave.channels);
pdev->slave.device_alloc_chan_resources = pxad_alloc_chan_resources;
pdev->slave.device_free_chan_resources = pxad_free_chan_resources;
@@ -1388,7 +1382,7 @@ static int pxad_probe(struct platform_device *op)
const struct of_device_id *of_id;
struct mmp_dma_platdata *pdata = dev_get_platdata(&op->dev);
struct resource *iores;
- int ret, dma_channels = 0, nb_requestors = 0;
+ int ret, dma_channels = 0;
const enum dma_slave_buswidth widths =
DMA_SLAVE_BUSWIDTH_1_BYTE | DMA_SLAVE_BUSWIDTH_2_BYTES |
DMA_SLAVE_BUSWIDTH_4_BYTES;
@@ -1405,23 +1399,13 @@ static int pxad_probe(struct platform_device *op)
return PTR_ERR(pdev->base);
of_id = of_match_device(pxad_dt_ids, &op->dev);
- if (of_id) {
+ if (of_id)
of_property_read_u32(op->dev.of_node, "#dma-channels",
&dma_channels);
- ret = of_property_read_u32(op->dev.of_node, "#dma-requests",
- &nb_requestors);
- if (ret) {
- dev_warn(pdev->slave.dev,
- "#dma-requests set to default 32 as missing in OF: %d",
- ret);
- nb_requestors = 32;
- };
- } else if (pdata && pdata->dma_channels) {
+ else if (pdata && pdata->dma_channels)
dma_channels = pdata->dma_channels;
- nb_requestors = pdata->nb_requestors;
- } else {
+ else
dma_channels = 32; /* default 32 channel */
- }
dma_cap_set(DMA_SLAVE, pdev->slave.cap_mask);
dma_cap_set(DMA_MEMCPY, pdev->slave.cap_mask);
@@ -1438,7 +1422,7 @@ static int pxad_probe(struct platform_device *op)
pdev->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
pdev->slave.dev = &op->dev;
- ret = pxad_init_dmadev(op, pdev, dma_channels, nb_requestors);
+ ret = pxad_init_dmadev(op, pdev, dma_channels);
if (ret) {
dev_err(pdev->slave.dev, "unable to register\n");
return ret;
@@ -1457,8 +1441,7 @@ static int pxad_probe(struct platform_device *op)
platform_set_drvdata(op, pdev);
pxad_init_debugfs(pdev);
- dev_info(pdev->slave.dev, "initialized %d channels on %d requestors\n",
- dma_channels, nb_requestors);
+ dev_info(pdev->slave.dev, "initialized %d channels\n", dma_channels);
return 0;
}
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
index 792bdae2b91d..01087a38da22 100644
--- a/drivers/edac/i7core_edac.c
+++ b/drivers/edac/i7core_edac.c
@@ -1866,7 +1866,7 @@ static int i7core_mce_check_error(struct notifier_block *nb, unsigned long val,
i7_dev = get_i7core_dev(mce->socketid);
if (!i7_dev)
- return NOTIFY_DONE;
+ return NOTIFY_BAD;
mci = i7_dev->mci;
pvt = mci->pvt_info;
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index 37649221f81c..cbee3179ec08 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -1396,7 +1396,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
}
ch_way = TAD_CH(reg) + 1;
- sck_way = TAD_SOCK(reg);
+ sck_way = 1 << TAD_SOCK(reg);
if (ch_way == 3)
idx = addr >> 6;
@@ -1435,7 +1435,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
switch(ch_way) {
case 2:
case 4:
- sck_xch = (1 << sck_way) * (ch_way >> 1);
+ sck_xch = 1 << sck_way * (ch_way >> 1);
break;
default:
sprintf(msg, "Invalid mirror set. Can't decode addr");
@@ -1471,7 +1471,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
ch_addr = addr - offset;
ch_addr >>= (6 + shiftup);
- ch_addr /= sck_xch;
+ ch_addr /= ch_way * sck_way;
ch_addr <<= (6 + shiftup);
ch_addr |= addr & ((1 << (6 + shiftup)) - 1);
@@ -2254,7 +2254,7 @@ static int sbridge_mce_check_error(struct notifier_block *nb, unsigned long val,
mci = get_mci_for_node_id(mce->socketid);
if (!mci)
- return NOTIFY_DONE;
+ return NOTIFY_BAD;
pvt = mci->pvt_info;
/*
diff --git a/drivers/extcon/extcon-max77843.c b/drivers/extcon/extcon-max77843.c
index b6cb30d207be..9f9ea334399c 100644
--- a/drivers/extcon/extcon-max77843.c
+++ b/drivers/extcon/extcon-max77843.c
@@ -803,7 +803,7 @@ static int max77843_muic_probe(struct platform_device *pdev)
/* Clear IRQ bits before request IRQs */
ret = regmap_bulk_read(max77843->regmap_muic,
MAX77843_MUIC_REG_INT1, info->status,
- MAX77843_MUIC_STATUS_NUM);
+ MAX77843_MUIC_IRQ_NUM);
if (ret) {
dev_err(&pdev->dev, "Failed to Clear IRQ bits\n");
goto err_muic_irq;
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 3b52677f459a..027ca212179f 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -180,7 +180,6 @@ static int generic_ops_register(void)
{
generic_ops.get_variable = efi.get_variable;
generic_ops.set_variable = efi.set_variable;
- generic_ops.set_variable_nonblocking = efi.set_variable_nonblocking;
generic_ops.get_next_variable = efi.get_next_variable;
generic_ops.query_variable_store = efi_query_variable_store;
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index d775e2bfc017..b2a172d93a08 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -8,7 +8,7 @@ cflags-$(CONFIG_X86_32) := -march=i386
cflags-$(CONFIG_X86_64) := -mcmodel=small
cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2 \
-fPIC -fno-strict-aliasing -mno-red-zone \
- -mno-mmx -mno-sse
+ -mno-mmx -mno-sse -DDISABLE_BRANCH_PROFILING
cflags-$(CONFIG_ARM64) := $(subst -pg,,$(KBUILD_CFLAGS))
cflags-$(CONFIG_ARM) := $(subst -pg,,$(KBUILD_CFLAGS)) \
@@ -16,7 +16,7 @@ cflags-$(CONFIG_ARM) := $(subst -pg,,$(KBUILD_CFLAGS)) \
cflags-$(CONFIG_EFI_ARMSTUB) += -I$(srctree)/scripts/dtc/libfdt
-KBUILD_CFLAGS := $(cflags-y) -DDISABLE_BRANCH_PROFILING \
+KBUILD_CFLAGS := $(cflags-y) \
$(call cc-option,-ffreestanding) \
$(call cc-option,-fno-stack-protector)
@@ -35,8 +35,7 @@ $(obj)/lib-%.o: $(srctree)/lib/%.c FORCE
lib-$(CONFIG_EFI_ARMSTUB) += arm-stub.o fdt.o string.o \
$(patsubst %.c,lib-%.o,$(arm-deps))
-lib-$(CONFIG_ARM) += arm32-stub.o
-lib-$(CONFIG_ARM64) += arm64-stub.o random.o
+lib-$(CONFIG_ARM64) += arm64-stub.o
CFLAGS_arm64-stub.o := -DTEXT_OFFSET=$(TEXT_OFFSET)
#
diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/arm-stub.c
index d5aa1d16154f..950c87f5d279 100644
--- a/drivers/firmware/efi/libstub/arm-stub.c
+++ b/drivers/firmware/efi/libstub/arm-stub.c
@@ -18,8 +18,6 @@
#include "efistub.h"
-bool __nokaslr;
-
static int efi_secureboot_enabled(efi_system_table_t *sys_table_arg)
{
static efi_guid_t const var_guid = EFI_GLOBAL_VARIABLE_GUID;
@@ -209,6 +207,14 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
pr_efi_err(sys_table, "Failed to find DRAM base\n");
goto fail;
}
+ status = handle_kernel_image(sys_table, image_addr, &image_size,
+ &reserve_addr,
+ &reserve_size,
+ dram_base, image);
+ if (status != EFI_SUCCESS) {
+ pr_efi_err(sys_table, "Failed to relocate kernel\n");
+ goto fail;
+ }
/*
* Get the command line from EFI, using the LOADED_IMAGE
@@ -218,28 +224,7 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
cmdline_ptr = efi_convert_cmdline(sys_table, image, &cmdline_size);
if (!cmdline_ptr) {
pr_efi_err(sys_table, "getting command line via LOADED_IMAGE_PROTOCOL\n");
- goto fail;
- }
-
- /* check whether 'nokaslr' was passed on the command line */
- if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
- static const u8 default_cmdline[] = CONFIG_CMDLINE;
- const u8 *str, *cmdline = cmdline_ptr;
-
- if (IS_ENABLED(CONFIG_CMDLINE_FORCE))
- cmdline = default_cmdline;
- str = strstr(cmdline, "nokaslr");
- if (str == cmdline || (str > cmdline && *(str - 1) == ' '))
- __nokaslr = true;
- }
-
- status = handle_kernel_image(sys_table, image_addr, &image_size,
- &reserve_addr,
- &reserve_size,
- dram_base, image);
- if (status != EFI_SUCCESS) {
- pr_efi_err(sys_table, "Failed to relocate kernel\n");
- goto fail_free_cmdline;
+ goto fail_free_image;
}
status = efi_parse_options(cmdline_ptr);
@@ -259,7 +244,7 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
if (status != EFI_SUCCESS) {
pr_efi_err(sys_table, "Failed to load device tree!\n");
- goto fail_free_image;
+ goto fail_free_cmdline;
}
}
@@ -301,11 +286,12 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
efi_free(sys_table, initrd_size, initrd_addr);
efi_free(sys_table, fdt_size, fdt_addr);
+fail_free_cmdline:
+ efi_free(sys_table, cmdline_size, (unsigned long)cmdline_ptr);
+
fail_free_image:
efi_free(sys_table, image_size, *image_addr);
efi_free(sys_table, reserve_size, reserve_addr);
-fail_free_cmdline:
- efi_free(sys_table, cmdline_size, (unsigned long)cmdline_ptr);
fail:
return EFI_ERROR;
}
diff --git a/drivers/firmware/efi/libstub/arm64-stub.c b/drivers/firmware/efi/libstub/arm64-stub.c
index e0e6b74fef8f..78dfbd34b6bf 100644
--- a/drivers/firmware/efi/libstub/arm64-stub.c
+++ b/drivers/firmware/efi/libstub/arm64-stub.c
@@ -13,10 +13,6 @@
#include <asm/efi.h>
#include <asm/sections.h>
-#include "efistub.h"
-
-extern bool __nokaslr;
-
efi_status_t __init handle_kernel_image(efi_system_table_t *sys_table_arg,
unsigned long *image_addr,
unsigned long *image_size,
@@ -27,52 +23,26 @@ efi_status_t __init handle_kernel_image(efi_system_table_t *sys_table_arg,
{
efi_status_t status;
unsigned long kernel_size, kernel_memsize = 0;
+ unsigned long nr_pages;
void *old_image_addr = (void *)*image_addr;
unsigned long preferred_offset;
- u64 phys_seed = 0;
-
- if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
- if (!__nokaslr) {
- status = efi_get_random_bytes(sys_table_arg,
- sizeof(phys_seed),
- (u8 *)&phys_seed);
- if (status == EFI_NOT_FOUND) {
- pr_efi(sys_table_arg, "EFI_RNG_PROTOCOL unavailable, no randomness supplied\n");
- } else if (status != EFI_SUCCESS) {
- pr_efi_err(sys_table_arg, "efi_get_random_bytes() failed\n");
- return status;
- }
- } else {
- pr_efi(sys_table_arg, "KASLR disabled on kernel command line\n");
- }
- }
/*
* The preferred offset of the kernel Image is TEXT_OFFSET bytes beyond
* a 2 MB aligned base, which itself may be lower than dram_base, as
* long as the resulting offset equals or exceeds it.
*/
- preferred_offset = round_down(dram_base, MIN_KIMG_ALIGN) + TEXT_OFFSET;
+ preferred_offset = round_down(dram_base, SZ_2M) + TEXT_OFFSET;
if (preferred_offset < dram_base)
- preferred_offset += MIN_KIMG_ALIGN;
+ preferred_offset += SZ_2M;
+ /* Relocate the image, if required. */
kernel_size = _edata - _text;
- kernel_memsize = kernel_size + (_end - _edata);
-
- if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && phys_seed != 0) {
- /*
- * If KASLR is enabled, and we have some randomness available,
- * locate the kernel at a randomized offset in physical memory.
- */
- *reserve_size = kernel_memsize + TEXT_OFFSET;
- status = efi_random_alloc(sys_table_arg, *reserve_size,
- MIN_KIMG_ALIGN, reserve_addr,
- phys_seed);
+ if (*image_addr != preferred_offset) {
+ kernel_memsize = kernel_size + (_end - _edata);
- *image_addr = *reserve_addr + TEXT_OFFSET;
- } else {
/*
- * Else, try a straight allocation at the preferred offset.
+ * First, try a straight allocation at the preferred offset.
* This will work around the issue where, if dram_base == 0x0,
* efi_low_alloc() refuses to allocate at 0x0 (to prevent the
* address of the allocation to be mistaken for a FAIL return
@@ -82,31 +52,27 @@ efi_status_t __init handle_kernel_image(efi_system_table_t *sys_table_arg,
* Mustang), we can still place the kernel at the address
* 'dram_base + TEXT_OFFSET'.
*/
- if (*image_addr == preferred_offset)
- return EFI_SUCCESS;
-
*image_addr = *reserve_addr = preferred_offset;
- *reserve_size = round_up(kernel_memsize, EFI_ALLOC_ALIGN);
-
+ nr_pages = round_up(kernel_memsize, EFI_ALLOC_ALIGN) /
+ EFI_PAGE_SIZE;
status = efi_call_early(allocate_pages, EFI_ALLOCATE_ADDRESS,
- EFI_LOADER_DATA,
- *reserve_size / EFI_PAGE_SIZE,
+ EFI_LOADER_DATA, nr_pages,
(efi_physical_addr_t *)reserve_addr);
- }
-
- if (status != EFI_SUCCESS) {
- *reserve_size = kernel_memsize + TEXT_OFFSET;
- status = efi_low_alloc(sys_table_arg, *reserve_size,
- MIN_KIMG_ALIGN, reserve_addr);
-
if (status != EFI_SUCCESS) {
- pr_efi_err(sys_table_arg, "Failed to relocate kernel\n");
- *reserve_size = 0;
- return status;
+ kernel_memsize += TEXT_OFFSET;
+ status = efi_low_alloc(sys_table_arg, kernel_memsize,
+ SZ_2M, reserve_addr);
+
+ if (status != EFI_SUCCESS) {
+ pr_efi_err(sys_table_arg, "Failed to relocate kernel\n");
+ return status;
+ }
+ *image_addr = *reserve_addr + TEXT_OFFSET;
}
- *image_addr = *reserve_addr + TEXT_OFFSET;
+ memcpy((void *)*image_addr, old_image_addr, kernel_size);
+ *reserve_size = kernel_memsize;
}
- memcpy((void *)*image_addr, old_image_addr, kernel_size);
+
return EFI_SUCCESS;
}
diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c
index 29ed2f9b218c..f07d4a67fa76 100644
--- a/drivers/firmware/efi/libstub/efi-stub-helper.c
+++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
@@ -649,10 +649,6 @@ static u8 *efi_utf16_to_utf8(u8 *dst, const u16 *src, int n)
return dst;
}
-#ifndef MAX_CMDLINE_ADDRESS
-#define MAX_CMDLINE_ADDRESS ULONG_MAX
-#endif
-
/*
* Convert the unicode UEFI command line to ASCII to pass to kernel.
* Size of memory allocated return in *cmd_line_len.
@@ -688,8 +684,7 @@ char *efi_convert_cmdline(efi_system_table_t *sys_table_arg,
options_bytes++; /* NUL termination */
- status = efi_high_alloc(sys_table_arg, options_bytes, 0,
- &cmdline_addr, MAX_CMDLINE_ADDRESS);
+ status = efi_low_alloc(sys_table_arg, options_bytes, 0, &cmdline_addr);
if (status != EFI_SUCCESS)
return NULL;
diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h
index 5ed3d3f38166..6b6548fda089 100644
--- a/drivers/firmware/efi/libstub/efistub.h
+++ b/drivers/firmware/efi/libstub/efistub.h
@@ -43,11 +43,4 @@ void efi_get_virtmap(efi_memory_desc_t *memory_map, unsigned long map_size,
unsigned long desc_size, efi_memory_desc_t *runtime_map,
int *count);
-efi_status_t efi_get_random_bytes(efi_system_table_t *sys_table,
- unsigned long size, u8 *out);
-
-efi_status_t efi_random_alloc(efi_system_table_t *sys_table_arg,
- unsigned long size, unsigned long align,
- unsigned long *addr, unsigned long random_seed);
-
#endif
diff --git a/drivers/firmware/efi/libstub/fdt.c b/drivers/firmware/efi/libstub/fdt.c
index b1c22cf18f7d..b62e2f5dcab3 100644
--- a/drivers/firmware/efi/libstub/fdt.c
+++ b/drivers/firmware/efi/libstub/fdt.c
@@ -147,20 +147,6 @@ efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
if (status)
goto fdt_set_fail;
- if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
- efi_status_t efi_status;
-
- efi_status = efi_get_random_bytes(sys_table, sizeof(fdt_val64),
- (u8 *)&fdt_val64);
- if (efi_status == EFI_SUCCESS) {
- status = fdt_setprop(fdt, node, "kaslr-seed",
- &fdt_val64, sizeof(fdt_val64));
- if (status)
- goto fdt_set_fail;
- } else if (efi_status != EFI_NOT_FOUND) {
- return efi_status;
- }
- }
return EFI_SUCCESS;
fdt_set_fail:
diff --git a/drivers/firmware/efi/libstub/random.c b/drivers/firmware/efi/libstub/random.c
deleted file mode 100644
index 53f6d3fe6d86..000000000000
--- a/drivers/firmware/efi/libstub/random.c
+++ /dev/null
@@ -1,135 +0,0 @@
-/*
- * Copyright (C) 2016 Linaro Ltd; <ard.biesheuvel@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#include <linux/efi.h>
-#include <asm/efi.h>
-
-#include "efistub.h"
-
-struct efi_rng_protocol {
- efi_status_t (*get_info)(struct efi_rng_protocol *,
- unsigned long *, efi_guid_t *);
- efi_status_t (*get_rng)(struct efi_rng_protocol *,
- efi_guid_t *, unsigned long, u8 *out);
-};
-
-efi_status_t efi_get_random_bytes(efi_system_table_t *sys_table_arg,
- unsigned long size, u8 *out)
-{
- efi_guid_t rng_proto = EFI_RNG_PROTOCOL_GUID;
- efi_status_t status;
- struct efi_rng_protocol *rng;
-
- status = efi_call_early(locate_protocol, &rng_proto, NULL,
- (void **)&rng);
- if (status != EFI_SUCCESS)
- return status;
-
- return rng->get_rng(rng, NULL, size, out);
-}
-
-/*
- * Return the number of slots covered by this entry, i.e., the number of
- * addresses it covers that are suitably aligned and supply enough room
- * for the allocation.
- */
-static unsigned long get_entry_num_slots(efi_memory_desc_t *md,
- unsigned long size,
- unsigned long align)
-{
- u64 start, end;
-
- if (md->type != EFI_CONVENTIONAL_MEMORY)
- return 0;
-
- start = round_up(md->phys_addr, align);
- end = round_down(md->phys_addr + md->num_pages * EFI_PAGE_SIZE - size,
- align);
-
- if (start > end)
- return 0;
-
- return (end - start + 1) / align;
-}
-
-/*
- * The UEFI memory descriptors have a virtual address field that is only used
- * when installing the virtual mapping using SetVirtualAddressMap(). Since it
- * is unused here, we can reuse it to keep track of each descriptor's slot
- * count.
- */
-#define MD_NUM_SLOTS(md) ((md)->virt_addr)
-
-efi_status_t efi_random_alloc(efi_system_table_t *sys_table_arg,
- unsigned long size,
- unsigned long align,
- unsigned long *addr,
- unsigned long random_seed)
-{
- unsigned long map_size, desc_size, total_slots = 0, target_slot;
- efi_status_t status;
- efi_memory_desc_t *memory_map;
- int map_offset;
-
- status = efi_get_memory_map(sys_table_arg, &memory_map, &map_size,
- &desc_size, NULL, NULL);
- if (status != EFI_SUCCESS)
- return status;
-
- if (align < EFI_ALLOC_ALIGN)
- align = EFI_ALLOC_ALIGN;
-
- /* count the suitable slots in each memory map entry */
- for (map_offset = 0; map_offset < map_size; map_offset += desc_size) {
- efi_memory_desc_t *md = (void *)memory_map + map_offset;
- unsigned long slots;
-
- slots = get_entry_num_slots(md, size, align);
- MD_NUM_SLOTS(md) = slots;
- total_slots += slots;
- }
-
- /* find a random number between 0 and total_slots */
- target_slot = (total_slots * (u16)random_seed) >> 16;
-
- /*
- * target_slot is now a value in the range [0, total_slots), and so
- * it corresponds with exactly one of the suitable slots we recorded
- * when iterating over the memory map the first time around.
- *
- * So iterate over the memory map again, subtracting the number of
- * slots of each entry at each iteration, until we have found the entry
- * that covers our chosen slot. Use the residual value of target_slot
- * to calculate the randomly chosen address, and allocate it directly
- * using EFI_ALLOCATE_ADDRESS.
- */
- for (map_offset = 0; map_offset < map_size; map_offset += desc_size) {
- efi_memory_desc_t *md = (void *)memory_map + map_offset;
- efi_physical_addr_t target;
- unsigned long pages;
-
- if (target_slot >= MD_NUM_SLOTS(md)) {
- target_slot -= MD_NUM_SLOTS(md);
- continue;
- }
-
- target = round_up(md->phys_addr, align) + target_slot * align;
- pages = round_up(size, EFI_PAGE_SIZE) / EFI_PAGE_SIZE;
-
- status = efi_call_early(allocate_pages, EFI_ALLOCATE_ADDRESS,
- EFI_LOADER_DATA, pages, &target);
- if (status == EFI_SUCCESS)
- *addr = target;
- break;
- }
-
- efi_call_early(free_pool, memory_map);
-
- return status;
-}
diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c
index 6f182fd91a6d..7f2ea21c730d 100644
--- a/drivers/firmware/efi/vars.c
+++ b/drivers/firmware/efi/vars.c
@@ -202,44 +202,29 @@ static const struct variable_validate variable_validate[] = {
{ NULL_GUID, "", NULL },
};
-/*
- * Check if @var_name matches the pattern given in @match_name.
- *
- * @var_name: an array of @len non-NUL characters.
- * @match_name: a NUL-terminated pattern string, optionally ending in "*". A
- * final "*" character matches any trailing characters @var_name,
- * including the case when there are none left in @var_name.
- * @match: on output, the number of non-wildcard characters in @match_name
- * that @var_name matches, regardless of the return value.
- * @return: whether @var_name fully matches @match_name.
- */
static bool
variable_matches(const char *var_name, size_t len, const char *match_name,
int *match)
{
for (*match = 0; ; (*match)++) {
char c = match_name[*match];
+ char u = var_name[*match];
- switch (c) {
- case '*':
- /* Wildcard in @match_name means we've matched. */
+ /* Wildcard in the matching name means we've matched */
+ if (c == '*')
return true;
- case '\0':
- /* @match_name has ended. Has @var_name too? */
- return (*match == len);
+ /* Case sensitive match */
+ if (!c && *match == len)
+ return true;
- default:
- /*
- * We've reached a non-wildcard char in @match_name.
- * Continue only if there's an identical character in
- * @var_name.
- */
- if (*match < len && c == var_name[*match])
- continue;
+ if (c != u)
return false;
- }
+
+ if (!c)
+ return true;
}
+ return true;
}
bool
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 053fc2f465df..bb1099c549df 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1673,7 +1673,6 @@ struct amdgpu_uvd {
struct amdgpu_bo *vcpu_bo;
void *cpu_addr;
uint64_t gpu_addr;
- unsigned fw_version;
atomic_t handles[AMDGPU_MAX_UVD_HANDLES];
struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES];
struct delayed_work idle_work;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
index 5a8fbadbd27b..8ac49812a716 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
@@ -63,6 +63,10 @@ bool amdgpu_has_atpx(void) {
return amdgpu_atpx_priv.atpx_detected;
}
+bool amdgpu_has_atpx_dgpu_power_cntl(void) {
+ return amdgpu_atpx_priv.atpx.functions.power_cntl;
+}
+
/**
* amdgpu_atpx_call - call an ATPX method
*
@@ -142,10 +146,6 @@ static void amdgpu_atpx_parse_functions(struct amdgpu_atpx_functions *f, u32 mas
*/
static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx)
{
- /* make sure required functions are enabled */
- /* dGPU power control is required */
- atpx->functions.power_cntl = true;
-
if (atpx->functions.px_params) {
union acpi_object *info;
struct atpx_px_params output;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index c961fe093e12..9d88023df836 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -61,6 +61,12 @@ static const char *amdgpu_asic_name[] = {
"LAST",
};
+#if defined(CONFIG_VGA_SWITCHEROO)
+bool amdgpu_has_atpx_dgpu_power_cntl(void);
+#else
+static inline bool amdgpu_has_atpx_dgpu_power_cntl(void) { return false; }
+#endif
+
bool amdgpu_device_is_px(struct drm_device *dev)
{
struct amdgpu_device *adev = dev->dev_private;
@@ -1469,7 +1475,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
if (amdgpu_runtime_pm == 1)
runtime = true;
- if (amdgpu_device_is_px(ddev))
+ if (amdgpu_device_is_px(ddev) && amdgpu_has_atpx_dgpu_power_cntl())
runtime = true;
vga_switcheroo_register_client(adev->pdev, &amdgpu_switcheroo_ops, runtime);
if (runtime)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 4488e82f87b0..e23843f4d877 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -303,7 +303,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
fw_info.feature = adev->vce.fb_version;
break;
case AMDGPU_INFO_FW_UVD:
- fw_info.ver = adev->uvd.fw_version;
+ fw_info.ver = 0;
fw_info.feature = 0;
break;
case AMDGPU_INFO_FW_GMC:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index 89df7871653d..064ebb347074 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -52,7 +52,7 @@ struct amdgpu_hpd;
#define AMDGPU_MAX_HPD_PINS 6
#define AMDGPU_MAX_CRTCS 6
-#define AMDGPU_MAX_AFMT_BLOCKS 9
+#define AMDGPU_MAX_AFMT_BLOCKS 7
enum amdgpu_rmx_type {
RMX_OFF,
@@ -308,8 +308,8 @@ struct amdgpu_mode_info {
struct atom_context *atom_context;
struct card_info *atom_card_info;
bool mode_config_initialized;
- struct amdgpu_crtc *crtcs[AMDGPU_MAX_CRTCS];
- struct amdgpu_afmt *afmt[AMDGPU_MAX_AFMT_BLOCKS];
+ struct amdgpu_crtc *crtcs[6];
+ struct amdgpu_afmt *afmt[7];
/* DVI-I properties */
struct drm_property *coherent_mode_property;
/* DAC enable load detect */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 73628c7599e7..b8fbbd7699e4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -540,7 +540,6 @@ int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
if (!metadata_size) {
if (bo->metadata_size) {
kfree(bo->metadata);
- bo->metadata = NULL;
bo->metadata_size = 0;
}
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 3b35ad83867c..53f987aeeacf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -156,9 +156,6 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
DRM_INFO("Found UVD firmware Version: %hu.%hu Family ID: %hu\n",
version_major, version_minor, family_id);
- adev->uvd.fw_version = ((version_major << 24) | (version_minor << 16) |
- (family_id << 8));
-
bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8)
+ AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE;
r = amdgpu_bo_create(adev, bo_size, PAGE_SIZE, true,
@@ -276,8 +273,6 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev)
memcpy(adev->uvd.cpu_addr, (adev->uvd.fw->data) + offset,
(adev->uvd.fw->size) - offset);
- cancel_delayed_work_sync(&adev->uvd.idle_work);
-
size = amdgpu_bo_size(adev->uvd.vcpu_bo);
size -= le32_to_cpu(hdr->ucode_size_bytes);
ptr = adev->uvd.cpu_addr;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index bb0da76051a1..a745eeeb5d82 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -220,7 +220,6 @@ int amdgpu_vce_suspend(struct amdgpu_device *adev)
if (i == AMDGPU_MAX_VCE_HANDLES)
return 0;
- cancel_delayed_work_sync(&adev->vce.idle_work);
/* TODO: suspending running encoding sessions isn't supported */
return -EINVAL;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
index 1cd6de575305..1e0bba29e167 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
@@ -298,10 +298,6 @@ bool amdgpu_atombios_encoder_mode_fixup(struct drm_encoder *encoder,
&& (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2)))
adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2;
- /* vertical FP must be at least 1 */
- if (mode->crtc_vsync_start == mode->crtc_vdisplay)
- adjusted_mode->crtc_vsync_start++;
-
/* get the native mode for scaling */
if (amdgpu_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT))
amdgpu_panel_mode_fixup(encoder, adjusted_mode);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 946300764609..aa491540ba85 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -3628,7 +3628,7 @@ static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vm_id, uint64_t pd_addr)
{
int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
- uint32_t seq = ring->fence_drv.sync_seq[ring->idx];
+ uint32_t seq = ring->fence_drv.sync_seq;
uint64_t addr = ring->fence_drv.gpu_addr;
amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
index d5e19b5fbbfb..c34c393e9aea 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
@@ -513,7 +513,7 @@ static int dbgdev_wave_control_set_registers(
union SQ_CMD_BITS *in_reg_sq_cmd,
union GRBM_GFX_INDEX_BITS *in_reg_gfx_index)
{
- int status = 0;
+ int status;
union SQ_CMD_BITS reg_sq_cmd;
union GRBM_GFX_INDEX_BITS reg_gfx_index;
struct HsaDbgWaveMsgAMDGen2 *pMsg;
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index d268bf18a662..39d7e2e15c11 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -1665,19 +1665,13 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_branch *mstb;
int len, ret, port_num;
- port = drm_dp_get_validated_port_ref(mgr, port);
- if (!port)
- return -EINVAL;
-
port_num = port->port_num;
mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
if (!mstb) {
mstb = drm_dp_get_last_connected_port_and_mstb(mgr, port->parent, &port_num);
- if (!mstb) {
- drm_dp_put_port(port);
+ if (!mstb)
return -EINVAL;
- }
}
txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
@@ -1703,7 +1697,6 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
kfree(txmsg);
fail_put:
drm_dp_put_mst_branch_device(mstb);
- drm_dp_put_port(port);
return ret;
}
@@ -1786,11 +1779,6 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
req_payload.start_slot = cur_slots;
if (mgr->proposed_vcpis[i]) {
port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
- port = drm_dp_get_validated_port_ref(mgr, port);
- if (!port) {
- mutex_unlock(&mgr->payload_lock);
- return -EINVAL;
- }
req_payload.num_slots = mgr->proposed_vcpis[i]->num_slots;
} else {
port = NULL;
@@ -1816,9 +1804,6 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
mgr->payloads[i].payload_state = req_payload.payload_state;
}
cur_slots += req_payload.num_slots;
-
- if (port)
- drm_dp_put_port(port);
}
for (i = 0; i < mgr->max_payloads; i++) {
@@ -2124,8 +2109,6 @@ int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr)
if (mgr->mst_primary) {
int sret;
- u8 guid[16];
-
sret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
if (sret != DP_RECEIVER_CAP_SIZE) {
DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
@@ -2140,16 +2123,6 @@ int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr)
ret = -1;
goto out_unlock;
}
-
- /* Some hubs forget their guids after they resume */
- sret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
- if (sret != 16) {
- DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
- ret = -1;
- goto out_unlock;
- }
- drm_dp_check_mstb_guid(mgr->mst_primary, guid);
-
ret = 0;
} else
ret = -1;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 7e461dca564c..bc7b8faba84d 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -2838,14 +2838,7 @@ enum skl_disp_power_wells {
#define GEN6_RP_STATE_CAP (MCHBAR_MIRROR_BASE_SNB + 0x5998)
#define BXT_RP_STATE_CAP 0x138170
-/*
- * Make these a multiple of magic 25 to avoid SNB (eg. Dell XPS
- * 8300) freezing up around GPU hangs. Looks as if even
- * scheduling/timer interrupts start misbehaving if the RPS
- * EI/thresholds are "bad", leading to a very sluggish or even
- * frozen machine.
- */
-#define INTERVAL_1_28_US(us) roundup(((us) * 100) >> 7, 25)
+#define INTERVAL_1_28_US(us) (((us) * 100) >> 7)
#define INTERVAL_1_33_US(us) (((us) * 3) >> 2)
#define INTERVAL_0_833_US(us) (((us) * 6) / 5)
#define GT_INTERVAL_FROM_US(dev_priv, us) (IS_GEN9(dev_priv) ? \
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 97d1ed20418b..6a2c76e367a5 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -248,14 +248,8 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder,
pipe_config->has_pch_encoder = true;
/* LPT FDI RX only supports 8bpc. */
- if (HAS_PCH_LPT(dev)) {
- if (pipe_config->bw_constrained && pipe_config->pipe_bpp < 24) {
- DRM_DEBUG_KMS("LPT only supports 24bpp\n");
- return false;
- }
-
+ if (HAS_PCH_LPT(dev))
pipe_config->pipe_bpp = 24;
- }
/* FDI must always be 2.7 GHz */
if (HAS_DDI(dev)) {
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
index fc28c512ece3..9e530a739354 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -180,8 +180,7 @@ struct stepping_info {
static const struct stepping_info skl_stepping_info[] = {
{'A', '0'}, {'B', '0'}, {'C', '0'},
{'D', '0'}, {'E', '0'}, {'F', '0'},
- {'G', '0'}, {'H', '0'}, {'I', '0'},
- {'J', '0'}, {'K', '0'}
+ {'G', '0'}, {'H', '0'}, {'I', '0'}
};
static struct stepping_info bxt_stepping_info[] = {
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 3c6b07683bd9..7e6158b889da 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -464,17 +464,9 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port,
} else if (IS_BROADWELL(dev)) {
ddi_translations_fdi = bdw_ddi_translations_fdi;
ddi_translations_dp = bdw_ddi_translations_dp;
-
- if (dev_priv->edp_low_vswing) {
- ddi_translations_edp = bdw_ddi_translations_edp;
- n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp);
- } else {
- ddi_translations_edp = bdw_ddi_translations_dp;
- n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
- }
-
+ ddi_translations_edp = bdw_ddi_translations_edp;
ddi_translations_hdmi = bdw_ddi_translations_hdmi;
-
+ n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp);
n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
hdmi_default_entry = 7;
@@ -3196,6 +3188,12 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
intel_ddi_clock_get(encoder, pipe_config);
}
+static void intel_ddi_destroy(struct drm_encoder *encoder)
+{
+ /* HDMI has nothing special to destroy, so we can go with this. */
+ intel_dp_encoder_destroy(encoder);
+}
+
static bool intel_ddi_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
@@ -3214,8 +3212,7 @@ static bool intel_ddi_compute_config(struct intel_encoder *encoder,
}
static const struct drm_encoder_funcs intel_ddi_funcs = {
- .reset = intel_dp_encoder_reset,
- .destroy = intel_dp_encoder_destroy,
+ .destroy = intel_ddi_destroy,
};
static struct intel_connector *
@@ -3287,7 +3284,6 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
intel_encoder->post_disable = intel_ddi_post_disable;
intel_encoder->get_hw_state = intel_ddi_get_hw_state;
intel_encoder->get_config = intel_ddi_get_config;
- intel_encoder->suspend = intel_dp_encoder_suspend;
intel_dig_port->port = port;
intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) &
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index afa81691163d..f859a5b87ed4 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -4447,7 +4447,7 @@ int skl_update_scaler_crtc(struct intel_crtc_state *state)
intel_crtc->base.base.id, intel_crtc->pipe, SKL_CRTC_INDEX);
return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
- &state->scaler_state.scaler_id, BIT(DRM_ROTATE_0),
+ &state->scaler_state.scaler_id, DRM_ROTATE_0,
state->pipe_src_w, state->pipe_src_h,
adjusted_mode->crtc_hdisplay, adjusted_mode->crtc_vdisplay);
}
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index e55a82a99e7f..78b8ec84d576 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -5035,7 +5035,7 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
kfree(intel_dig_port);
}
-void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
+static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
@@ -5077,7 +5077,7 @@ static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
edp_panel_vdd_schedule_off(intel_dp);
}
-void intel_dp_encoder_reset(struct drm_encoder *encoder)
+static void intel_dp_encoder_reset(struct drm_encoder *encoder)
{
struct intel_dp *intel_dp;
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index 06bd9257acdc..0639275fc471 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -477,8 +477,6 @@ static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
struct intel_connector *intel_connector = to_intel_connector(connector);
struct drm_device *dev = connector->dev;
- intel_connector->unregister(intel_connector);
-
/* need to nuke the connector */
drm_modeset_lock_all(dev);
if (connector->state->crtc) {
@@ -492,7 +490,11 @@ static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
WARN(ret, "Disabling mst crtc failed with %i\n", ret);
}
+ drm_modeset_unlock_all(dev);
+ intel_connector->unregister(intel_connector);
+
+ drm_modeset_lock_all(dev);
intel_connector_remove_from_fbdev(intel_connector);
drm_connector_cleanup(connector);
drm_modeset_unlock_all(dev);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index f34a219ec5c4..0d00f07b7163 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -1204,8 +1204,6 @@ void intel_dp_set_link_params(struct intel_dp *intel_dp,
void intel_dp_start_link_train(struct intel_dp *intel_dp);
void intel_dp_stop_link_train(struct intel_dp *intel_dp);
void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
-void intel_dp_encoder_reset(struct drm_encoder *encoder);
-void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder);
void intel_dp_encoder_destroy(struct drm_encoder *encoder);
int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc);
bool intel_dp_compute_config(struct intel_encoder *encoder,
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 4b8ed9f2dabc..e6c035b0fc1c 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -1388,16 +1388,8 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
hdmi_to_dig_port(intel_hdmi));
}
- if (!live_status) {
- DRM_DEBUG_KMS("HDMI live status down\n");
- /*
- * Live status register is not reliable on all intel platforms.
- * So consider live_status only for certain platforms, for
- * others, read EDID to determine presence of sink.
- */
- if (INTEL_INFO(dev_priv)->gen < 7 || IS_IVYBRIDGE(dev_priv))
- live_status = true;
- }
+ if (!live_status)
+ DRM_DEBUG_KMS("Live status not up!");
intel_hdmi_unset_edid(connector);
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 7058f75c7b42..d69547a65dbb 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -776,11 +776,11 @@ static int logical_ring_prepare(struct drm_i915_gem_request *req, int bytes)
if (unlikely(total_bytes > remain_usable)) {
/*
* The base request will fit but the reserved space
- * falls off the end. So don't need an immediate wrap
- * and only need to effectively wait for the reserved
- * size space from the start of ringbuffer.
+ * falls off the end. So only need to to wait for the
+ * reserved size after flushing out the remainder.
*/
wait_bytes = remain_actual + ringbuf->reserved_size;
+ need_wrap = true;
} else if (total_bytes > ringbuf->space) {
/* No wrapping required, just waiting. */
wait_bytes = total_bytes;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 0a68d2ec89dc..f091ad12d694 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -6620,12 +6620,6 @@ static void broadwell_init_clock_gating(struct drm_device *dev)
misccpctl = I915_READ(GEN7_MISCCPCTL);
I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
I915_WRITE(GEN8_L3SQCREG1, BDW_WA_L3SQCREG1_DEFAULT);
- /*
- * Wait at least 100 clocks before re-enabling clock gating. See
- * the definition of L3SQCREG1 in BSpec.
- */
- POSTING_READ(GEN8_L3SQCREG1);
- udelay(1);
I915_WRITE(GEN7_MISCCPCTL, misccpctl);
/*
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 9d48443bca2e..f6b2a814e629 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1922,17 +1922,6 @@ i915_dispatch_execbuffer(struct drm_i915_gem_request *req,
return 0;
}
-static void cleanup_phys_status_page(struct intel_engine_cs *ring)
-{
- struct drm_i915_private *dev_priv = to_i915(ring->dev);
-
- if (!dev_priv->status_page_dmah)
- return;
-
- drm_pci_free(ring->dev, dev_priv->status_page_dmah);
- ring->status_page.page_addr = NULL;
-}
-
static void cleanup_status_page(struct intel_engine_cs *ring)
{
struct drm_i915_gem_object *obj;
@@ -1949,9 +1938,9 @@ static void cleanup_status_page(struct intel_engine_cs *ring)
static int init_status_page(struct intel_engine_cs *ring)
{
- struct drm_i915_gem_object *obj = ring->status_page.obj;
+ struct drm_i915_gem_object *obj;
- if (obj == NULL) {
+ if ((obj = ring->status_page.obj) == NULL) {
unsigned flags;
int ret;
@@ -2145,7 +2134,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
if (ret)
goto error;
} else {
- WARN_ON(ring->id != RCS);
+ BUG_ON(ring->id != RCS);
ret = init_phys_status_page(ring);
if (ret)
goto error;
@@ -2190,12 +2179,7 @@ void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
if (ring->cleanup)
ring->cleanup(ring);
- if (I915_NEED_GFX_HWS(ring->dev)) {
- cleanup_status_page(ring);
- } else {
- WARN_ON(ring->id != RCS);
- cleanup_phys_status_page(ring);
- }
+ cleanup_status_page(ring);
i915_cmd_parser_fini_ring(ring);
i915_gem_batch_pool_fini(&ring->batch_pool);
@@ -2357,11 +2341,11 @@ static int __intel_ring_prepare(struct intel_engine_cs *ring, int bytes)
if (unlikely(total_bytes > remain_usable)) {
/*
* The base request will fit but the reserved space
- * falls off the end. So don't need an immediate wrap
- * and only need to effectively wait for the reserved
- * size space from the start of ringbuffer.
+ * falls off the end. So only need to to wait for the
+ * reserved size after flushing out the remainder.
*/
wait_bytes = remain_actual + ringbuf->reserved_size;
+ need_wrap = true;
} else if (total_bytes > ringbuf->space) {
/* No wrapping required, just waiting. */
wait_bytes = total_bytes;
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index cc91ae832ffb..43cba129a0c0 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -1132,11 +1132,7 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev)
} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
dev_priv->uncore.funcs.force_wake_get =
fw_domains_get_with_thread_status;
- if (IS_HASWELL(dev))
- dev_priv->uncore.funcs.force_wake_put =
- fw_domains_put_with_fifo;
- else
- dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
+ dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
} else if (IS_IVYBRIDGE(dev)) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/ramht.c b/drivers/gpu/drm/nouveau/nvkm/core/ramht.c
index 89da47234016..3216e157a8a0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/ramht.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/ramht.c
@@ -131,7 +131,7 @@ nvkm_ramht_del(struct nvkm_ramht **pramht)
struct nvkm_ramht *ramht = *pramht;
if (ramht) {
nvkm_gpuobj_del(&ramht->gpuobj);
- vfree(*pramht);
+ kfree(*pramht);
*pramht = NULL;
}
}
@@ -143,8 +143,8 @@ nvkm_ramht_new(struct nvkm_device *device, u32 size, u32 align,
struct nvkm_ramht *ramht;
int ret, i;
- if (!(ramht = *pramht = vzalloc(sizeof(*ramht) +
- (size >> 3) * sizeof(*ramht->data))))
+ if (!(ramht = *pramht = kzalloc(sizeof(*ramht) + (size >> 3) *
+ sizeof(*ramht->data), GFP_KERNEL)))
return -ENOMEM;
ramht->device = device;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index 36655a74c538..9f5dfc85147a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -1717,8 +1717,6 @@ gf100_gr_init(struct gf100_gr *gr)
gf100_gr_mmio(gr, gr->func->mmio);
- nvkm_mask(device, TPC_UNIT(0, 0, 0x05c), 0x00000001, 0x00000001);
-
memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
for (i = 0, gpc = -1; i < gr->tpc_total; i++) {
do {
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 5edebf495c07..183aea1abebc 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -375,15 +375,10 @@ static int qxl_crtc_cursor_set2(struct drm_crtc *crtc,
qxl_bo_kunmap(user_bo);
- qcrtc->cur_x += qcrtc->hot_spot_x - hot_x;
- qcrtc->cur_y += qcrtc->hot_spot_y - hot_y;
- qcrtc->hot_spot_x = hot_x;
- qcrtc->hot_spot_y = hot_y;
-
cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
cmd->type = QXL_CURSOR_SET;
- cmd->u.set.position.x = qcrtc->cur_x + qcrtc->hot_spot_x;
- cmd->u.set.position.y = qcrtc->cur_y + qcrtc->hot_spot_y;
+ cmd->u.set.position.x = qcrtc->cur_x;
+ cmd->u.set.position.y = qcrtc->cur_y;
cmd->u.set.shape = qxl_bo_physical_address(qdev, cursor_bo, 0);
@@ -446,8 +441,8 @@ static int qxl_crtc_cursor_move(struct drm_crtc *crtc,
cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
cmd->type = QXL_CURSOR_MOVE;
- cmd->u.position.x = qcrtc->cur_x + qcrtc->hot_spot_x;
- cmd->u.position.y = qcrtc->cur_y + qcrtc->hot_spot_y;
+ cmd->u.position.x = qcrtc->cur_x;
+ cmd->u.position.y = qcrtc->cur_y;
qxl_release_unmap(qdev, release, &cmd->release_info);
qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index 3ab90179e9ab..01a86948eb8c 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -135,8 +135,6 @@ struct qxl_crtc {
int index;
int cur_x;
int cur_y;
- int hot_spot_x;
- int hot_spot_y;
};
struct qxl_output {
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 79bab6fd76bb..dac78ad24b31 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -1739,7 +1739,6 @@ static u32 radeon_get_pll_use_mask(struct drm_crtc *crtc)
static int radeon_get_shared_dp_ppll(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct radeon_device *rdev = dev->dev_private;
struct drm_crtc *test_crtc;
struct radeon_crtc *test_radeon_crtc;
@@ -1749,10 +1748,6 @@ static int radeon_get_shared_dp_ppll(struct drm_crtc *crtc)
test_radeon_crtc = to_radeon_crtc(test_crtc);
if (test_radeon_crtc->encoder &&
ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_radeon_crtc->encoder))) {
- /* PPLL2 is exclusive to UNIPHYA on DCE61 */
- if (ASIC_IS_DCE61(rdev) && !ASIC_IS_DCE8(rdev) &&
- test_radeon_crtc->pll_id == ATOM_PPLL2)
- continue;
/* for DP use the same PLL for all */
if (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID)
return test_radeon_crtc->pll_id;
@@ -1774,7 +1769,6 @@ static int radeon_get_shared_nondp_ppll(struct drm_crtc *crtc)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct radeon_device *rdev = dev->dev_private;
struct drm_crtc *test_crtc;
struct radeon_crtc *test_radeon_crtc;
u32 adjusted_clock, test_adjusted_clock;
@@ -1790,10 +1784,6 @@ static int radeon_get_shared_nondp_ppll(struct drm_crtc *crtc)
test_radeon_crtc = to_radeon_crtc(test_crtc);
if (test_radeon_crtc->encoder &&
!ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_radeon_crtc->encoder))) {
- /* PPLL2 is exclusive to UNIPHYA on DCE61 */
- if (ASIC_IS_DCE61(rdev) && !ASIC_IS_DCE8(rdev) &&
- test_radeon_crtc->pll_id == ATOM_PPLL2)
- continue;
/* check if we are already driving this connector with another crtc */
if (test_radeon_crtc->connector == radeon_crtc->connector) {
/* if we are, return that pll */
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 0b04b9282f56..adf74f4366bb 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -310,10 +310,6 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
&& (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2)))
adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2;
- /* vertical FP must be at least 1 */
- if (mode->crtc_vsync_start == mode->crtc_vdisplay)
- adjusted_mode->crtc_vsync_start++;
-
/* get the native mode for scaling */
if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) {
radeon_panel_mode_fixup(encoder, adjusted_mode);
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 32491355a1d4..2ad462896896 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -2608,152 +2608,10 @@ static void evergreen_agp_enable(struct radeon_device *rdev)
WREG32(VM_CONTEXT1_CNTL, 0);
}
-static const unsigned ni_dig_offsets[] =
-{
- NI_DIG0_REGISTER_OFFSET,
- NI_DIG1_REGISTER_OFFSET,
- NI_DIG2_REGISTER_OFFSET,
- NI_DIG3_REGISTER_OFFSET,
- NI_DIG4_REGISTER_OFFSET,
- NI_DIG5_REGISTER_OFFSET
-};
-
-static const unsigned ni_tx_offsets[] =
-{
- NI_DCIO_UNIPHY0_UNIPHY_TX_CONTROL1,
- NI_DCIO_UNIPHY1_UNIPHY_TX_CONTROL1,
- NI_DCIO_UNIPHY2_UNIPHY_TX_CONTROL1,
- NI_DCIO_UNIPHY3_UNIPHY_TX_CONTROL1,
- NI_DCIO_UNIPHY4_UNIPHY_TX_CONTROL1,
- NI_DCIO_UNIPHY5_UNIPHY_TX_CONTROL1
-};
-
-static const unsigned evergreen_dp_offsets[] =
-{
- EVERGREEN_DP0_REGISTER_OFFSET,
- EVERGREEN_DP1_REGISTER_OFFSET,
- EVERGREEN_DP2_REGISTER_OFFSET,
- EVERGREEN_DP3_REGISTER_OFFSET,
- EVERGREEN_DP4_REGISTER_OFFSET,
- EVERGREEN_DP5_REGISTER_OFFSET
-};
-
-
-/*
- * Assumption is that EVERGREEN_CRTC_MASTER_EN enable for requested crtc
- * We go from crtc to connector and it is not relible since it
- * should be an opposite direction .If crtc is enable then
- * find the dig_fe which selects this crtc and insure that it enable.
- * if such dig_fe is found then find dig_be which selects found dig_be and
- * insure that it enable and in DP_SST mode.
- * if UNIPHY_PLL_CONTROL1.enable then we should disconnect timing
- * from dp symbols clocks .
- */
-static bool evergreen_is_dp_sst_stream_enabled(struct radeon_device *rdev,
- unsigned crtc_id, unsigned *ret_dig_fe)
-{
- unsigned i;
- unsigned dig_fe;
- unsigned dig_be;
- unsigned dig_en_be;
- unsigned uniphy_pll;
- unsigned digs_fe_selected;
- unsigned dig_be_mode;
- unsigned dig_fe_mask;
- bool is_enabled = false;
- bool found_crtc = false;
-
- /* loop through all running dig_fe to find selected crtc */
- for (i = 0; i < ARRAY_SIZE(ni_dig_offsets); i++) {
- dig_fe = RREG32(NI_DIG_FE_CNTL + ni_dig_offsets[i]);
- if (dig_fe & NI_DIG_FE_CNTL_SYMCLK_FE_ON &&
- crtc_id == NI_DIG_FE_CNTL_SOURCE_SELECT(dig_fe)) {
- /* found running pipe */
- found_crtc = true;
- dig_fe_mask = 1 << i;
- dig_fe = i;
- break;
- }
- }
-
- if (found_crtc) {
- /* loop through all running dig_be to find selected dig_fe */
- for (i = 0; i < ARRAY_SIZE(ni_dig_offsets); i++) {
- dig_be = RREG32(NI_DIG_BE_CNTL + ni_dig_offsets[i]);
- /* if dig_fe_selected by dig_be? */
- digs_fe_selected = NI_DIG_BE_CNTL_FE_SOURCE_SELECT(dig_be);
- dig_be_mode = NI_DIG_FE_CNTL_MODE(dig_be);
- if (dig_fe_mask & digs_fe_selected &&
- /* if dig_be in sst mode? */
- dig_be_mode == NI_DIG_BE_DPSST) {
- dig_en_be = RREG32(NI_DIG_BE_EN_CNTL +
- ni_dig_offsets[i]);
- uniphy_pll = RREG32(NI_DCIO_UNIPHY0_PLL_CONTROL1 +
- ni_tx_offsets[i]);
- /* dig_be enable and tx is running */
- if (dig_en_be & NI_DIG_BE_EN_CNTL_ENABLE &&
- dig_en_be & NI_DIG_BE_EN_CNTL_SYMBCLK_ON &&
- uniphy_pll & NI_DCIO_UNIPHY0_PLL_CONTROL1_ENABLE) {
- is_enabled = true;
- *ret_dig_fe = dig_fe;
- break;
- }
- }
- }
- }
-
- return is_enabled;
-}
-
-/*
- * Blank dig when in dp sst mode
- * Dig ignores crtc timing
- */
-static void evergreen_blank_dp_output(struct radeon_device *rdev,
- unsigned dig_fe)
-{
- unsigned stream_ctrl;
- unsigned fifo_ctrl;
- unsigned counter = 0;
-
- if (dig_fe >= ARRAY_SIZE(evergreen_dp_offsets)) {
- DRM_ERROR("invalid dig_fe %d\n", dig_fe);
- return;
- }
-
- stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL +
- evergreen_dp_offsets[dig_fe]);
- if (!(stream_ctrl & EVERGREEN_DP_VID_STREAM_CNTL_ENABLE)) {
- DRM_ERROR("dig %d , should be enable\n", dig_fe);
- return;
- }
-
- stream_ctrl &=~EVERGREEN_DP_VID_STREAM_CNTL_ENABLE;
- WREG32(EVERGREEN_DP_VID_STREAM_CNTL +
- evergreen_dp_offsets[dig_fe], stream_ctrl);
-
- stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL +
- evergreen_dp_offsets[dig_fe]);
- while (counter < 32 && stream_ctrl & EVERGREEN_DP_VID_STREAM_STATUS) {
- msleep(1);
- counter++;
- stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL +
- evergreen_dp_offsets[dig_fe]);
- }
- if (counter >= 32 )
- DRM_ERROR("counter exceeds %d\n", counter);
-
- fifo_ctrl = RREG32(EVERGREEN_DP_STEER_FIFO + evergreen_dp_offsets[dig_fe]);
- fifo_ctrl |= EVERGREEN_DP_STEER_FIFO_RESET;
- WREG32(EVERGREEN_DP_STEER_FIFO + evergreen_dp_offsets[dig_fe], fifo_ctrl);
-
-}
-
void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save)
{
u32 crtc_enabled, tmp, frame_count, blackout;
int i, j;
- unsigned dig_fe;
if (!ASIC_IS_NODCE(rdev)) {
save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
@@ -2793,17 +2651,7 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
break;
udelay(1);
}
- /*we should disable dig if it drives dp sst*/
- /*but we are in radeon_device_init and the topology is unknown*/
- /*and it is available after radeon_modeset_init*/
- /*the following method radeon_atom_encoder_dpms_dig*/
- /*does the job if we initialize it properly*/
- /*for now we do it this manually*/
- /**/
- if (ASIC_IS_DCE5(rdev) &&
- evergreen_is_dp_sst_stream_enabled(rdev, i ,&dig_fe))
- evergreen_blank_dp_output(rdev, dig_fe);
- /*we could remove 6 lines below*/
+
/* XXX this is a hack to avoid strange behavior with EFI on certain systems */
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
index b436badf9efa..aa939dfed3a3 100644
--- a/drivers/gpu/drm/radeon/evergreen_reg.h
+++ b/drivers/gpu/drm/radeon/evergreen_reg.h
@@ -250,43 +250,8 @@
/* HDMI blocks at 0x7030, 0x7c30, 0x10830, 0x11430, 0x12030, 0x12c30 */
#define EVERGREEN_HDMI_BASE 0x7030
-/*DIG block*/
-#define NI_DIG0_REGISTER_OFFSET (0x7000 - 0x7000)
-#define NI_DIG1_REGISTER_OFFSET (0x7C00 - 0x7000)
-#define NI_DIG2_REGISTER_OFFSET (0x10800 - 0x7000)
-#define NI_DIG3_REGISTER_OFFSET (0x11400 - 0x7000)
-#define NI_DIG4_REGISTER_OFFSET (0x12000 - 0x7000)
-#define NI_DIG5_REGISTER_OFFSET (0x12C00 - 0x7000)
-
-
-#define NI_DIG_FE_CNTL 0x7000
-# define NI_DIG_FE_CNTL_SOURCE_SELECT(x) ((x) & 0x3)
-# define NI_DIG_FE_CNTL_SYMCLK_FE_ON (1<<24)
-
-
-#define NI_DIG_BE_CNTL 0x7140
-# define NI_DIG_BE_CNTL_FE_SOURCE_SELECT(x) (((x) >> 8 ) & 0x3F)
-# define NI_DIG_FE_CNTL_MODE(x) (((x) >> 16) & 0x7 )
-
-#define NI_DIG_BE_EN_CNTL 0x7144
-# define NI_DIG_BE_EN_CNTL_ENABLE (1 << 0)
-# define NI_DIG_BE_EN_CNTL_SYMBCLK_ON (1 << 8)
-# define NI_DIG_BE_DPSST 0
/* Display Port block */
-#define EVERGREEN_DP0_REGISTER_OFFSET (0x730C - 0x730C)
-#define EVERGREEN_DP1_REGISTER_OFFSET (0x7F0C - 0x730C)
-#define EVERGREEN_DP2_REGISTER_OFFSET (0x10B0C - 0x730C)
-#define EVERGREEN_DP3_REGISTER_OFFSET (0x1170C - 0x730C)
-#define EVERGREEN_DP4_REGISTER_OFFSET (0x1230C - 0x730C)
-#define EVERGREEN_DP5_REGISTER_OFFSET (0x12F0C - 0x730C)
-
-
-#define EVERGREEN_DP_VID_STREAM_CNTL 0x730C
-# define EVERGREEN_DP_VID_STREAM_CNTL_ENABLE (1 << 0)
-# define EVERGREEN_DP_VID_STREAM_STATUS (1 <<16)
-#define EVERGREEN_DP_STEER_FIFO 0x7310
-# define EVERGREEN_DP_STEER_FIFO_RESET (1 << 0)
#define EVERGREEN_DP_SEC_CNTL 0x7280
# define EVERGREEN_DP_SEC_STREAM_ENABLE (1 << 0)
# define EVERGREEN_DP_SEC_ASP_ENABLE (1 << 4)
@@ -301,15 +266,4 @@
# define EVERGREEN_DP_SEC_N_BASE_MULTIPLE(x) (((x) & 0xf) << 24)
# define EVERGREEN_DP_SEC_SS_EN (1 << 28)
-/*DCIO_UNIPHY block*/
-#define NI_DCIO_UNIPHY0_UNIPHY_TX_CONTROL1 (0x6600 -0x6600)
-#define NI_DCIO_UNIPHY1_UNIPHY_TX_CONTROL1 (0x6640 -0x6600)
-#define NI_DCIO_UNIPHY2_UNIPHY_TX_CONTROL1 (0x6680 - 0x6600)
-#define NI_DCIO_UNIPHY3_UNIPHY_TX_CONTROL1 (0x66C0 - 0x6600)
-#define NI_DCIO_UNIPHY4_UNIPHY_TX_CONTROL1 (0x6700 - 0x6600)
-#define NI_DCIO_UNIPHY5_UNIPHY_TX_CONTROL1 (0x6740 - 0x6600)
-
-#define NI_DCIO_UNIPHY0_PLL_CONTROL1 0x6618
-# define NI_DCIO_UNIPHY0_PLL_CONTROL1_ENABLE (1 << 0)
-
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
index c4b4f298a283..9bc408c9f9f6 100644
--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
@@ -62,6 +62,10 @@ bool radeon_has_atpx(void) {
return radeon_atpx_priv.atpx_detected;
}
+bool radeon_has_atpx_dgpu_power_cntl(void) {
+ return radeon_atpx_priv.atpx.functions.power_cntl;
+}
+
/**
* radeon_atpx_call - call an ATPX method
*
@@ -141,10 +145,6 @@ static void radeon_atpx_parse_functions(struct radeon_atpx_functions *f, u32 mas
*/
static int radeon_atpx_validate(struct radeon_atpx *atpx)
{
- /* make sure required functions are enabled */
- /* dGPU power control is required */
- atpx->functions.power_cntl = true;
-
if (atpx->functions.px_params) {
union acpi_object *info;
struct atpx_px_params output;
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 9cfc1c3e1965..340f3f549f29 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -1996,12 +1996,10 @@ radeon_add_atom_connector(struct drm_device *dev,
rdev->mode_info.dither_property,
RADEON_FMT_DITHER_DISABLE);
- if (radeon_audio != 0) {
+ if (radeon_audio != 0)
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.audio_property,
RADEON_AUDIO_AUTO);
- radeon_connector->audio = RADEON_AUDIO_AUTO;
- }
if (ASIC_IS_DCE5(rdev))
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.output_csc_property,
@@ -2126,7 +2124,6 @@ radeon_add_atom_connector(struct drm_device *dev,
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.audio_property,
RADEON_AUDIO_AUTO);
- radeon_connector->audio = RADEON_AUDIO_AUTO;
}
if (connector_type == DRM_MODE_CONNECTOR_DVII) {
radeon_connector->dac_load_detect = true;
@@ -2182,7 +2179,6 @@ radeon_add_atom_connector(struct drm_device *dev,
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.audio_property,
RADEON_AUDIO_AUTO);
- radeon_connector->audio = RADEON_AUDIO_AUTO;
}
if (ASIC_IS_DCE5(rdev))
drm_object_attach_property(&radeon_connector->base.base,
@@ -2235,7 +2231,6 @@ radeon_add_atom_connector(struct drm_device *dev,
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.audio_property,
RADEON_AUDIO_AUTO);
- radeon_connector->audio = RADEON_AUDIO_AUTO;
}
if (ASIC_IS_DCE5(rdev))
drm_object_attach_property(&radeon_connector->base.base,
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index c566993a2ec3..f78f111e68de 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -103,6 +103,12 @@ static const char radeon_family_name[][16] = {
"LAST",
};
+#if defined(CONFIG_VGA_SWITCHEROO)
+bool radeon_has_atpx_dgpu_power_cntl(void);
+#else
+static inline bool radeon_has_atpx_dgpu_power_cntl(void) { return false; }
+#endif
+
#define RADEON_PX_QUIRK_DISABLE_PX (1 << 0)
#define RADEON_PX_QUIRK_LONG_WAKEUP (1 << 1)
@@ -1433,7 +1439,7 @@ int radeon_device_init(struct radeon_device *rdev,
* ignore it */
vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
- if (rdev->flags & RADEON_IS_PX)
+ if ((rdev->flags & RADEON_IS_PX) && radeon_has_atpx_dgpu_power_cntl())
runtime = true;
vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops, runtime);
if (runtime)
diff --git a/drivers/gpu/drm/radeon/radeon_dp_auxch.c b/drivers/gpu/drm/radeon/radeon_dp_auxch.c
index db64e0062689..3b0c229d7dcd 100644
--- a/drivers/gpu/drm/radeon/radeon_dp_auxch.c
+++ b/drivers/gpu/drm/radeon/radeon_dp_auxch.c
@@ -105,7 +105,7 @@ radeon_dp_aux_transfer_native(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg
tmp &= AUX_HPD_SEL(0x7);
tmp |= AUX_HPD_SEL(chan->rec.hpd);
- tmp |= AUX_EN | AUX_LS_READ_EN | AUX_HPD_DISCON(0x1);
+ tmp |= AUX_EN | AUX_LS_READ_EN;
WREG32(AUX_CONTROL + aux_offset[instance], tmp);
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index f342aad79cc6..e06ac546a90f 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -235,8 +235,6 @@ static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp)
{
struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo);
- if (radeon_ttm_tt_has_userptr(bo->ttm))
- return -EPERM;
return drm_vma_node_verify_access(&rbo->gem_base.vma_node, filp);
}
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index caa73de584a5..7285adb27099 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -2931,7 +2931,6 @@ static struct si_dpm_quirk si_dpm_quirk_list[] = {
{ PCI_VENDOR_ID_ATI, 0x6811, 0x1462, 0x2015, 0, 120000 },
{ PCI_VENDOR_ID_ATI, 0x6811, 0x1043, 0x2015, 0, 120000 },
{ PCI_VENDOR_ID_ATI, 0x6811, 0x148c, 0x2015, 0, 120000 },
- { PCI_VENDOR_ID_ATI, 0x6810, 0x1682, 0x9275, 0, 120000 },
{ 0, 0, 0, 0 },
};
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
index 0585fd2031dd..a0e28f3a278d 100644
--- a/drivers/gpu/ipu-v3/ipu-common.c
+++ b/drivers/gpu/ipu-v3/ipu-common.c
@@ -1068,6 +1068,7 @@ static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base)
goto err_register;
}
+ pdev->dev.of_node = of_node;
pdev->dev.parent = dev;
ret = platform_device_add_data(pdev, &reg->pdata,
@@ -1078,12 +1079,6 @@ static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base)
platform_device_put(pdev);
goto err_register;
}
-
- /*
- * Set of_node only after calling platform_device_add. Otherwise
- * the platform:imx-ipuv3-crtc modalias won't be used.
- */
- pdev->dev.of_node = of_node;
}
return 0;
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 909ab0176ef2..8b78a7f1f779 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -255,7 +255,6 @@
#define USB_DEVICE_ID_CORSAIR_K90 0x1b02
#define USB_VENDOR_ID_CREATIVELABS 0x041e
-#define USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51 0x322c
#define USB_DEVICE_ID_PRODIKEYS_PCMIDI 0x2801
#define USB_VENDOR_ID_CVTOUCH 0x1ff7
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index dc8e6adf95a4..7dd0953cd70f 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -70,7 +70,6 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_3AXIS_5BUTTON_STICK, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL },
- { USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_ELAN, HID_ANY_ID, HID_QUIRK_ALWAYS_POLL },
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 5c02d7bbc7f2..3c0f47ac8e53 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -3449,10 +3449,6 @@ static const struct wacom_features wacom_features_0x33E =
{ "Wacom Intuos PT M 2", 21600, 13500, 2047, 63,
INTUOSHT2, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .touch_max = 16,
.check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE };
-static const struct wacom_features wacom_features_0x343 =
- { "Wacom DTK1651", 34616, 19559, 1023, 0,
- DTUS, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 4,
- WACOM_DTU_OFFSET, WACOM_DTU_OFFSET };
static const struct wacom_features wacom_features_HID_ANY_ID =
{ "Wacom HID", .type = HID_GENERIC };
@@ -3618,7 +3614,6 @@ const struct hid_device_id wacom_ids[] = {
{ USB_DEVICE_WACOM(0x33C) },
{ USB_DEVICE_WACOM(0x33D) },
{ USB_DEVICE_WACOM(0x33E) },
- { USB_DEVICE_WACOM(0x343) },
{ USB_DEVICE_WACOM(0x4001) },
{ USB_DEVICE_WACOM(0x4004) },
{ USB_DEVICE_WACOM(0x5000) },
diff --git a/drivers/hwtracing/stm/Kconfig b/drivers/hwtracing/stm/Kconfig
index e7a348807f0c..83e9f591a54b 100644
--- a/drivers/hwtracing/stm/Kconfig
+++ b/drivers/hwtracing/stm/Kconfig
@@ -1,7 +1,6 @@
config STM
tristate "System Trace Module devices"
select CONFIGFS_FS
- select SRCU
help
A System Trace Module (STM) is a device exporting data in System
Trace Protocol (STP) format as defined by MIPI STP standards.
diff --git a/drivers/i2c/busses/i2c-cpm.c b/drivers/i2c/busses/i2c-cpm.c
index b167ab25310a..714bdc837769 100644
--- a/drivers/i2c/busses/i2c-cpm.c
+++ b/drivers/i2c/busses/i2c-cpm.c
@@ -116,8 +116,8 @@ struct cpm_i2c {
cbd_t __iomem *rbase;
u_char *txbuf[CPM_MAXBD];
u_char *rxbuf[CPM_MAXBD];
- dma_addr_t txdma[CPM_MAXBD];
- dma_addr_t rxdma[CPM_MAXBD];
+ u32 txdma[CPM_MAXBD];
+ u32 rxdma[CPM_MAXBD];
};
static irqreturn_t cpm_i2c_interrupt(int irq, void *dev_id)
diff --git a/drivers/i2c/busses/i2c-exynos5.c b/drivers/i2c/busses/i2c-exynos5.c
index f54ece8fce78..b29c7500461a 100644
--- a/drivers/i2c/busses/i2c-exynos5.c
+++ b/drivers/i2c/busses/i2c-exynos5.c
@@ -671,9 +671,7 @@ static int exynos5_i2c_xfer(struct i2c_adapter *adap,
return -EIO;
}
- ret = clk_enable(i2c->clk);
- if (ret)
- return ret;
+ clk_prepare_enable(i2c->clk);
for (i = 0; i < num; i++, msgs++) {
stop = (i == num - 1);
@@ -697,7 +695,7 @@ static int exynos5_i2c_xfer(struct i2c_adapter *adap,
}
out:
- clk_disable(i2c->clk);
+ clk_disable_unprepare(i2c->clk);
return ret;
}
@@ -749,9 +747,7 @@ static int exynos5_i2c_probe(struct platform_device *pdev)
return -ENOENT;
}
- ret = clk_prepare_enable(i2c->clk);
- if (ret)
- return ret;
+ clk_prepare_enable(i2c->clk);
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
i2c->regs = devm_ioremap_resource(&pdev->dev, mem);
@@ -803,10 +799,6 @@ static int exynos5_i2c_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, i2c);
- clk_disable(i2c->clk);
-
- return 0;
-
err_clk:
clk_disable_unprepare(i2c->clk);
return ret;
@@ -818,8 +810,6 @@ static int exynos5_i2c_remove(struct platform_device *pdev)
i2c_del_adapter(&i2c->adap);
- clk_unprepare(i2c->clk);
-
return 0;
}
@@ -831,8 +821,6 @@ static int exynos5_i2c_suspend_noirq(struct device *dev)
i2c->suspended = 1;
- clk_unprepare(i2c->clk);
-
return 0;
}
@@ -842,9 +830,7 @@ static int exynos5_i2c_resume_noirq(struct device *dev)
struct exynos5_i2c *i2c = platform_get_drvdata(pdev);
int ret = 0;
- ret = clk_prepare_enable(i2c->clk);
- if (ret)
- return ret;
+ clk_prepare_enable(i2c->clk);
ret = exynos5_hsi2c_clock_setup(i2c);
if (ret) {
@@ -853,7 +839,7 @@ static int exynos5_i2c_resume_noirq(struct device *dev)
}
exynos5_i2c_init(i2c);
- clk_disable(i2c->clk);
+ clk_disable_unprepare(i2c->clk);
i2c->suspended = 0;
return 0;
diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c
index f2a7f72f7aa6..b13936dacc78 100644
--- a/drivers/iio/magnetometer/ak8975.c
+++ b/drivers/iio/magnetometer/ak8975.c
@@ -462,8 +462,6 @@ static int ak8975_setup_irq(struct ak8975_data *data)
int rc;
int irq;
- init_waitqueue_head(&data->data_ready_queue);
- clear_bit(0, &data->flags);
if (client->irq)
irq = client->irq;
else
@@ -479,6 +477,8 @@ static int ak8975_setup_irq(struct ak8975_data *data)
return rc;
}
+ init_waitqueue_head(&data->data_ready_queue);
+ clear_bit(0, &data->flags);
data->eoc_irq = irq;
return rc;
@@ -732,7 +732,7 @@ static int ak8975_probe(struct i2c_client *client,
int eoc_gpio;
int err;
const char *name = NULL;
- enum asahi_compass_chipset chipset = AK_MAX_TYPE;
+ enum asahi_compass_chipset chipset;
/* Grab and set up the supplied GPIO. */
if (client->dev.platform_data)
diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
index 564adf3116e8..6b4e8a008bc0 100644
--- a/drivers/infiniband/core/ucm.c
+++ b/drivers/infiniband/core/ucm.c
@@ -48,7 +48,6 @@
#include <asm/uaccess.h>
-#include <rdma/ib.h>
#include <rdma/ib_cm.h>
#include <rdma/ib_user_cm.h>
#include <rdma/ib_marshall.h>
@@ -1104,9 +1103,6 @@ static ssize_t ib_ucm_write(struct file *filp, const char __user *buf,
struct ib_ucm_cmd_hdr hdr;
ssize_t result;
- if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
- return -EACCES;
-
if (len < sizeof(hdr))
return -EINVAL;
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index 886f61ea6cc7..8b5a934e1133 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -1574,9 +1574,6 @@ static ssize_t ucma_write(struct file *filp, const char __user *buf,
struct rdma_ucm_cmd_hdr hdr;
ssize_t ret;
- if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
- return -EACCES;
-
if (len < sizeof(hdr))
return -EINVAL;
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 24f3ca2c4ad7..e3ef28861be6 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -48,8 +48,6 @@
#include <asm/uaccess.h>
-#include <rdma/ib.h>
-
#include "uverbs.h"
MODULE_AUTHOR("Roland Dreier");
@@ -684,9 +682,6 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
int srcu_key;
ssize_t ret;
- if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
- return -EACCES;
-
if (count < sizeof hdr)
return -EINVAL;
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index bc147582bed9..de9cd6901752 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -162,7 +162,7 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
cq->bar2_va = c4iw_bar2_addrs(rdev, cq->cqid, T4_BAR2_QTYPE_INGRESS,
&cq->bar2_qid,
user ? &cq->bar2_pa : NULL);
- if (user && !cq->bar2_pa) {
+ if (user && !cq->bar2_va) {
pr_warn(MOD "%s: cqid %u not in BAR2 range.\n",
pci_name(rdev->lldi.pdev), cq->cqid);
ret = -EINVAL;
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 53aa7515f542..aa515afee724 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -185,10 +185,6 @@ void __iomem *c4iw_bar2_addrs(struct c4iw_rdev *rdev, unsigned int qid,
if (pbar2_pa)
*pbar2_pa = (rdev->bar2_pa + bar2_qoffset) & PAGE_MASK;
-
- if (is_t4(rdev->lldi.adapter_type))
- return NULL;
-
return rdev->bar2_kva + bar2_qoffset;
}
@@ -274,7 +270,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
/*
* User mode must have bar2 access.
*/
- if (user && (!wq->sq.bar2_pa || !wq->rq.bar2_pa)) {
+ if (user && (!wq->sq.bar2_va || !wq->rq.bar2_va)) {
pr_warn(MOD "%s: sqid %u or rqid %u not in BAR2 range.\n",
pci_name(rdev->lldi.pdev), wq->sq.qid, wq->rq.qid);
goto free_dma;
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index fd17443aeacd..c4e091528390 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -273,7 +273,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
sizeof(struct mlx5_wqe_ctrl_seg)) /
sizeof(struct mlx5_wqe_data_seg);
props->max_sge = min(max_rq_sg, max_sq_sg);
- props->max_sge_rd = MLX5_MAX_SGE_RD;
+ props->max_sge_rd = props->max_sge;
props->max_cq = 1 << MLX5_CAP_GEN(mdev, log_max_cq);
props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_cq_sz)) - 1;
props->max_mr = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
@@ -405,8 +405,8 @@ static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port,
struct mlx5_ib_dev *dev = to_mdev(ibdev);
struct mlx5_core_dev *mdev = dev->mdev;
struct mlx5_hca_vport_context *rep;
- u16 max_mtu;
- u16 oper_mtu;
+ int max_mtu;
+ int oper_mtu;
int err;
u8 ib_link_width_oper;
u8 vl_hw_cap;
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
index 24f4a782e0f4..e449e394963f 100644
--- a/drivers/infiniband/hw/qib/qib_file_ops.c
+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
@@ -45,8 +45,6 @@
#include <linux/export.h>
#include <linux/uio.h>
-#include <rdma/ib.h>
-
#include "qib.h"
#include "qib_common.h"
#include "qib_user_sdma.h"
@@ -2069,9 +2067,6 @@ static ssize_t qib_write(struct file *fp, const char __user *data,
ssize_t ret = 0;
void *dest;
- if (WARN_ON_ONCE(!ib_safe_file_access(fp)))
- return -EACCES;
-
if (count < sizeof(cmd.type)) {
ret = -EINVAL;
goto bail;
diff --git a/drivers/input/misc/max8997_haptic.c b/drivers/input/misc/max8997_haptic.c
index 8d6326d7e7be..a806ba3818f7 100644
--- a/drivers/input/misc/max8997_haptic.c
+++ b/drivers/input/misc/max8997_haptic.c
@@ -255,14 +255,12 @@ static int max8997_haptic_probe(struct platform_device *pdev)
struct max8997_dev *iodev = dev_get_drvdata(pdev->dev.parent);
const struct max8997_platform_data *pdata =
dev_get_platdata(iodev->dev);
- const struct max8997_haptic_platform_data *haptic_pdata = NULL;
+ const struct max8997_haptic_platform_data *haptic_pdata =
+ pdata->haptic_pdata;
struct max8997_haptic *chip;
struct input_dev *input_dev;
int error;
- if (pdata)
- haptic_pdata = pdata->haptic_pdata;
-
if (!haptic_pdata) {
dev_err(&pdev->dev, "no haptic platform data\n");
return -EINVAL;
diff --git a/drivers/input/misc/pmic8xxx-pwrkey.c b/drivers/input/misc/pmic8xxx-pwrkey.c
index 67aab86048ad..3f02e0e03d12 100644
--- a/drivers/input/misc/pmic8xxx-pwrkey.c
+++ b/drivers/input/misc/pmic8xxx-pwrkey.c
@@ -353,8 +353,7 @@ static int pmic8xxx_pwrkey_probe(struct platform_device *pdev)
if (of_property_read_u32(pdev->dev.of_node, "debounce", &kpd_delay))
kpd_delay = 15625;
- /* Valid range of pwr key trigger delay is 1/64 sec to 2 seconds. */
- if (kpd_delay > USEC_PER_SEC * 2 || kpd_delay < USEC_PER_SEC / 64) {
+ if (kpd_delay > 62500 || kpd_delay == 0) {
dev_err(&pdev->dev, "invalid power key trigger delay\n");
return -EINVAL;
}
@@ -386,8 +385,8 @@ static int pmic8xxx_pwrkey_probe(struct platform_device *pdev)
pwr->name = "pmic8xxx_pwrkey";
pwr->phys = "pmic8xxx_pwrkey/input0";
- delay = (kpd_delay << 6) / USEC_PER_SEC;
- delay = ilog2(delay);
+ delay = (kpd_delay << 10) / USEC_PER_SEC;
+ delay = 1 + ilog2(delay);
err = regmap_read(regmap, PON_CNTL_1, &pon_cntl);
if (err < 0) {
diff --git a/drivers/input/tablet/gtco.c b/drivers/input/tablet/gtco.c
index 7c18249d6c8e..3a7f3a4a4396 100644
--- a/drivers/input/tablet/gtco.c
+++ b/drivers/input/tablet/gtco.c
@@ -858,14 +858,6 @@ static int gtco_probe(struct usb_interface *usbinterface,
goto err_free_buf;
}
- /* Sanity check that a device has an endpoint */
- if (usbinterface->altsetting[0].desc.bNumEndpoints < 1) {
- dev_err(&usbinterface->dev,
- "Invalid number of endpoints\n");
- error = -EINVAL;
- goto err_free_urb;
- }
-
/*
* The endpoint is always altsetting 0, we know this since we know
* this device only has one interrupt endpoint
@@ -887,7 +879,7 @@ static int gtco_probe(struct usb_interface *usbinterface,
* HID report descriptor
*/
if (usb_get_extra_descriptor(usbinterface->cur_altsetting,
- HID_DEVICE_TYPE, &hid_desc) != 0) {
+ HID_DEVICE_TYPE, &hid_desc) != 0){
dev_err(&usbinterface->dev,
"Can't retrieve exta USB descriptor to get hid report descriptor length\n");
error = -EIO;
diff --git a/drivers/input/touchscreen/zforce_ts.c b/drivers/input/touchscreen/zforce_ts.c
index 7b3845aa5983..9bbadaaf6bc3 100644
--- a/drivers/input/touchscreen/zforce_ts.c
+++ b/drivers/input/touchscreen/zforce_ts.c
@@ -370,8 +370,8 @@ static int zforce_touch_event(struct zforce_ts *ts, u8 *payload)
point.coord_x = point.coord_y = 0;
}
- point.state = payload[9 * i + 5] & 0x0f;
- point.id = (payload[9 * i + 5] & 0xf0) >> 4;
+ point.state = payload[9 * i + 5] & 0x03;
+ point.id = (payload[9 * i + 5] & 0xfc) >> 2;
/* determine touch major, minor and orientation */
point.area_major = max(payload[9 * i + 6],
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index b9319b76a8a1..fc836f523afa 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -91,7 +91,6 @@ struct iommu_dev_data {
struct list_head dev_data_list; /* For global dev_data_list */
struct protection_domain *domain; /* Domain the device is bound to */
u16 devid; /* PCI Device ID */
- u16 alias; /* Alias Device ID */
bool iommu_v2; /* Device can make use of IOMMUv2 */
bool passthrough; /* Device is identity mapped */
struct {
@@ -126,13 +125,6 @@ static struct protection_domain *to_pdomain(struct iommu_domain *dom)
return container_of(dom, struct protection_domain, domain);
}
-static inline u16 get_device_id(struct device *dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
-
- return PCI_DEVID(pdev->bus->number, pdev->devfn);
-}
-
static struct iommu_dev_data *alloc_dev_data(u16 devid)
{
struct iommu_dev_data *dev_data;
@@ -170,68 +162,6 @@ out_unlock:
return dev_data;
}
-static int __last_alias(struct pci_dev *pdev, u16 alias, void *data)
-{
- *(u16 *)data = alias;
- return 0;
-}
-
-static u16 get_alias(struct device *dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- u16 devid, ivrs_alias, pci_alias;
-
- devid = get_device_id(dev);
- ivrs_alias = amd_iommu_alias_table[devid];
- pci_for_each_dma_alias(pdev, __last_alias, &pci_alias);
-
- if (ivrs_alias == pci_alias)
- return ivrs_alias;
-
- /*
- * DMA alias showdown
- *
- * The IVRS is fairly reliable in telling us about aliases, but it
- * can't know about every screwy device. If we don't have an IVRS
- * reported alias, use the PCI reported alias. In that case we may
- * still need to initialize the rlookup and dev_table entries if the
- * alias is to a non-existent device.
- */
- if (ivrs_alias == devid) {
- if (!amd_iommu_rlookup_table[pci_alias]) {
- amd_iommu_rlookup_table[pci_alias] =
- amd_iommu_rlookup_table[devid];
- memcpy(amd_iommu_dev_table[pci_alias].data,
- amd_iommu_dev_table[devid].data,
- sizeof(amd_iommu_dev_table[pci_alias].data));
- }
-
- return pci_alias;
- }
-
- pr_info("AMD-Vi: Using IVRS reported alias %02x:%02x.%d "
- "for device %s[%04x:%04x], kernel reported alias "
- "%02x:%02x.%d\n", PCI_BUS_NUM(ivrs_alias), PCI_SLOT(ivrs_alias),
- PCI_FUNC(ivrs_alias), dev_name(dev), pdev->vendor, pdev->device,
- PCI_BUS_NUM(pci_alias), PCI_SLOT(pci_alias),
- PCI_FUNC(pci_alias));
-
- /*
- * If we don't have a PCI DMA alias and the IVRS alias is on the same
- * bus, then the IVRS table may know about a quirk that we don't.
- */
- if (pci_alias == devid &&
- PCI_BUS_NUM(ivrs_alias) == pdev->bus->number) {
- pdev->dev_flags |= PCI_DEV_FLAGS_DMA_ALIAS_DEVFN;
- pdev->dma_alias_devfn = ivrs_alias & 0xff;
- pr_info("AMD-Vi: Added PCI DMA alias %02x.%d for %s\n",
- PCI_SLOT(ivrs_alias), PCI_FUNC(ivrs_alias),
- dev_name(dev));
- }
-
- return ivrs_alias;
-}
-
static struct iommu_dev_data *find_dev_data(u16 devid)
{
struct iommu_dev_data *dev_data;
@@ -244,6 +174,13 @@ static struct iommu_dev_data *find_dev_data(u16 devid)
return dev_data;
}
+static inline u16 get_device_id(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ return PCI_DEVID(pdev->bus->number, pdev->devfn);
+}
+
static struct iommu_dev_data *get_dev_data(struct device *dev)
{
return dev->archdata.iommu;
@@ -371,8 +308,6 @@ static int iommu_init_device(struct device *dev)
if (!dev_data)
return -ENOMEM;
- dev_data->alias = get_alias(dev);
-
if (pci_iommuv2_capable(pdev)) {
struct amd_iommu *iommu;
@@ -393,7 +328,7 @@ static void iommu_ignore_device(struct device *dev)
u16 devid, alias;
devid = get_device_id(dev);
- alias = get_alias(dev);
+ alias = amd_iommu_alias_table[devid];
memset(&amd_iommu_dev_table[devid], 0, sizeof(struct dev_table_entry));
memset(&amd_iommu_dev_table[alias], 0, sizeof(struct dev_table_entry));
@@ -1082,7 +1017,7 @@ static int device_flush_dte(struct iommu_dev_data *dev_data)
int ret;
iommu = amd_iommu_rlookup_table[dev_data->devid];
- alias = dev_data->alias;
+ alias = amd_iommu_alias_table[dev_data->devid];
ret = iommu_flush_dte(iommu, dev_data->devid);
if (!ret && alias != dev_data->devid)
@@ -1956,7 +1891,7 @@ static void do_attach(struct iommu_dev_data *dev_data,
bool ats;
iommu = amd_iommu_rlookup_table[dev_data->devid];
- alias = dev_data->alias;
+ alias = amd_iommu_alias_table[dev_data->devid];
ats = dev_data->ats.enabled;
/* Update data structures */
@@ -1990,7 +1925,7 @@ static void do_detach(struct iommu_dev_data *dev_data)
return;
iommu = amd_iommu_rlookup_table[dev_data->devid];
- alias = dev_data->alias;
+ alias = amd_iommu_alias_table[dev_data->devid];
/* decrease reference counters */
dev_data->domain->dev_iommu[iommu->index] -= 1;
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 58f2fe687a24..72d6182666cb 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -403,7 +403,7 @@ static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
unsigned int s_length = sg_dma_len(s);
unsigned int s_dma_len = s->length;
- s->offset += s_offset;
+ s->offset = s_offset;
s->length = s_length;
sg_dma_address(s) = dma_addr + s_offset;
dma_addr += s_dma_len;
@@ -422,7 +422,7 @@ static void __invalidate_sg(struct scatterlist *sg, int nents)
for_each_sg(sg, s, nents, i) {
if (sg_dma_address(s) != DMA_ERROR_CODE)
- s->offset += sg_dma_address(s);
+ s->offset = sg_dma_address(s);
if (sg_dma_len(s))
s->length = sg_dma_len(s);
sg_dma_address(s) = DMA_ERROR_CODE;
diff --git a/drivers/irqchip/irq-mxs.c b/drivers/irqchip/irq-mxs.c
index 17304705f2cf..efe50845939d 100644
--- a/drivers/irqchip/irq-mxs.c
+++ b/drivers/irqchip/irq-mxs.c
@@ -183,7 +183,7 @@ static void __iomem * __init icoll_init_iobase(struct device_node *np)
void __iomem *icoll_base;
icoll_base = of_io_request_and_map(np, 0, np->name);
- if (IS_ERR(icoll_base))
+ if (!icoll_base)
panic("%s: unable to map resource", np->full_name);
return icoll_base;
}
diff --git a/drivers/irqchip/irq-sunxi-nmi.c b/drivers/irqchip/irq-sunxi-nmi.c
index 1254e98f6b57..4ef178078e5b 100644
--- a/drivers/irqchip/irq-sunxi-nmi.c
+++ b/drivers/irqchip/irq-sunxi-nmi.c
@@ -154,9 +154,9 @@ static int __init sunxi_sc_nmi_irq_init(struct device_node *node,
gc = irq_get_domain_generic_chip(domain, 0);
gc->reg_base = of_io_request_and_map(node, 0, of_node_full_name(node));
- if (IS_ERR(gc->reg_base)) {
+ if (!gc->reg_base) {
pr_err("unable to map resource\n");
- ret = PTR_ERR(gc->reg_base);
+ ret = -ENOMEM;
goto fail_irqd_remove;
}
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
index 3970cda10080..27f2ef300f8b 100644
--- a/drivers/md/dm-cache-metadata.c
+++ b/drivers/md/dm-cache-metadata.c
@@ -867,55 +867,39 @@ static int blocks_are_unmapped_or_clean(struct dm_cache_metadata *cmd,
return 0;
}
-static bool cmd_write_lock(struct dm_cache_metadata *cmd)
-{
- down_write(&cmd->root_lock);
- if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) {
- up_write(&cmd->root_lock);
- return false;
+#define WRITE_LOCK(cmd) \
+ down_write(&cmd->root_lock); \
+ if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) { \
+ up_write(&cmd->root_lock); \
+ return -EINVAL; \
}
- return true;
-}
-
-#define WRITE_LOCK(cmd) \
- do { \
- if (!cmd_write_lock((cmd))) \
- return -EINVAL; \
- } while(0)
-#define WRITE_LOCK_VOID(cmd) \
- do { \
- if (!cmd_write_lock((cmd))) \
- return; \
- } while(0)
+#define WRITE_LOCK_VOID(cmd) \
+ down_write(&cmd->root_lock); \
+ if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) { \
+ up_write(&cmd->root_lock); \
+ return; \
+ }
#define WRITE_UNLOCK(cmd) \
- up_write(&(cmd)->root_lock)
+ up_write(&cmd->root_lock)
-static bool cmd_read_lock(struct dm_cache_metadata *cmd)
-{
- down_read(&cmd->root_lock);
- if (cmd->fail_io) {
- up_read(&cmd->root_lock);
- return false;
+#define READ_LOCK(cmd) \
+ down_read(&cmd->root_lock); \
+ if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) { \
+ up_read(&cmd->root_lock); \
+ return -EINVAL; \
}
- return true;
-}
-
-#define READ_LOCK(cmd) \
- do { \
- if (!cmd_read_lock((cmd))) \
- return -EINVAL; \
- } while(0)
-#define READ_LOCK_VOID(cmd) \
- do { \
- if (!cmd_read_lock((cmd))) \
- return; \
- } while(0)
+#define READ_LOCK_VOID(cmd) \
+ down_read(&cmd->root_lock); \
+ if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) { \
+ up_read(&cmd->root_lock); \
+ return; \
+ }
#define READ_UNLOCK(cmd) \
- up_read(&(cmd)->root_lock)
+ up_read(&cmd->root_lock)
int dm_cache_resize(struct dm_cache_metadata *cmd, dm_cblock_t new_cache_size)
{
diff --git a/drivers/md/md.c b/drivers/md/md.c
index c57fdf847b47..b1e1f6b95782 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -293,8 +293,6 @@ static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
* go away inside make_request
*/
sectors = bio_sectors(bio);
- /* bio could be mergeable after passing to underlayer */
- bio->bi_rw &= ~REQ_NOMERGE;
mddev->pers->make_request(mddev, bio);
cpu = part_stat_lock();
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index 11f39791ec33..33bdd81065e8 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -1502,7 +1502,7 @@ static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking)
* Will sleep if required for nonblocking == false.
*/
static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb,
- void *pb, int nonblocking)
+ int nonblocking)
{
unsigned long flags;
int ret;
@@ -1523,10 +1523,10 @@ static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb,
/*
* Only remove the buffer from done_list if v4l2_buffer can handle all
* the planes.
+ * Verifying planes is NOT necessary since it already has been checked
+ * before the buffer is queued/prepared. So it can never fail.
*/
- ret = call_bufop(q, verify_planes_array, *vb, pb);
- if (!ret)
- list_del(&(*vb)->done_entry);
+ list_del(&(*vb)->done_entry);
spin_unlock_irqrestore(&q->done_lock, flags);
return ret;
@@ -1604,7 +1604,7 @@ int vb2_core_dqbuf(struct vb2_queue *q, void *pb, bool nonblocking)
struct vb2_buffer *vb = NULL;
int ret;
- ret = __vb2_get_done_vb(q, &vb, pb, nonblocking);
+ ret = __vb2_get_done_vb(q, &vb, nonblocking);
if (ret < 0)
return ret;
diff --git a/drivers/media/v4l2-core/videobuf2-memops.c b/drivers/media/v4l2-core/videobuf2-memops.c
index 3c3b517f1d1c..dbec5923fcf0 100644
--- a/drivers/media/v4l2-core/videobuf2-memops.c
+++ b/drivers/media/v4l2-core/videobuf2-memops.c
@@ -49,7 +49,7 @@ struct frame_vector *vb2_create_framevec(unsigned long start,
vec = frame_vector_create(nr);
if (!vec)
return ERR_PTR(-ENOMEM);
- ret = get_vaddr_frames(start & PAGE_MASK, nr, write, true, vec);
+ ret = get_vaddr_frames(start, nr, write, 1, vec);
if (ret < 0)
goto out_destroy;
/* We accept only complete set of PFNs */
diff --git a/drivers/mfd/intel-lpss.c b/drivers/mfd/intel-lpss.c
index 68aa31ae553a..6255513f54c7 100644
--- a/drivers/mfd/intel-lpss.c
+++ b/drivers/mfd/intel-lpss.c
@@ -445,7 +445,6 @@ int intel_lpss_probe(struct device *dev,
err_remove_ltr:
intel_lpss_debugfs_remove(lpss);
intel_lpss_ltr_hide(lpss);
- intel_lpss_unregister_clock(lpss);
err_clk_register:
ida_simple_remove(&intel_lpss_devid_ida, lpss->devid);
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 9f0d9b7b7e17..8a08ca61062a 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -439,7 +439,7 @@ config ARM_CHARLCD
still useful.
config BMP085
- tristate
+ bool
depends on SYSFS
config BMP085_I2C
diff --git a/drivers/misc/ad525x_dpot.c b/drivers/misc/ad525x_dpot.c
index f1a0b99f5a9a..15e88078ba1e 100644
--- a/drivers/misc/ad525x_dpot.c
+++ b/drivers/misc/ad525x_dpot.c
@@ -216,7 +216,7 @@ static s32 dpot_read_i2c(struct dpot_data *dpot, u8 reg)
*/
value = swab16(value);
- if (dpot->uid == DPOT_UID(AD5274_ID))
+ if (dpot->uid == DPOT_UID(AD5271_ID))
value = value >> 2;
return value;
default:
diff --git a/drivers/misc/cxl/irq.c b/drivers/misc/cxl/irq.c
index efbb6945eb18..09a406058c46 100644
--- a/drivers/misc/cxl/irq.c
+++ b/drivers/misc/cxl/irq.c
@@ -288,6 +288,7 @@ unsigned int cxl_map_irq(struct cxl *adapter, irq_hw_number_t hwirq,
void cxl_unmap_irq(unsigned int virq, void *cookie)
{
free_irq(virq, cookie);
+ irq_dispose_mapping(virq);
}
static int cxl_register_one_irq(struct cxl *adapter,
diff --git a/drivers/misc/mic/scif/scif_rma.c b/drivers/misc/mic/scif/scif_rma.c
index 6a451bd65bf3..8310b4dbff06 100644
--- a/drivers/misc/mic/scif/scif_rma.c
+++ b/drivers/misc/mic/scif/scif_rma.c
@@ -1511,7 +1511,7 @@ off_t scif_register_pinned_pages(scif_epd_t epd,
if ((map_flags & SCIF_MAP_FIXED) &&
((ALIGN(offset, PAGE_SIZE) != offset) ||
(offset < 0) ||
- (len > LONG_MAX - offset)))
+ (offset + (off_t)len < offset)))
return -EINVAL;
might_sleep();
@@ -1614,7 +1614,7 @@ off_t scif_register(scif_epd_t epd, void *addr, size_t len, off_t offset,
if ((map_flags & SCIF_MAP_FIXED) &&
((ALIGN(offset, PAGE_SIZE) != offset) ||
(offset < 0) ||
- (len > LONG_MAX - offset)))
+ (offset + (off_t)len < offset)))
return -EINVAL;
/* Unsupported protection requested */
@@ -1732,8 +1732,7 @@ scif_unregister(scif_epd_t epd, off_t offset, size_t len)
/* Offset is not page aligned or offset+len wraps around */
if ((ALIGN(offset, PAGE_SIZE) != offset) ||
- (offset < 0) ||
- (len > LONG_MAX - offset))
+ (offset + (off_t)len < offset))
return -EINVAL;
err = scif_verify_epd(ep);
diff --git a/drivers/mtd/nand/brcmnand/brcmnand.c b/drivers/mtd/nand/brcmnand/brcmnand.c
index 4a07ba1195b5..12c6190c6e33 100644
--- a/drivers/mtd/nand/brcmnand/brcmnand.c
+++ b/drivers/mtd/nand/brcmnand/brcmnand.c
@@ -309,36 +309,6 @@ static const u16 brcmnand_regs_v60[] = {
[BRCMNAND_FC_BASE] = 0x400,
};
-/* BRCMNAND v7.1 */
-static const u16 brcmnand_regs_v71[] = {
- [BRCMNAND_CMD_START] = 0x04,
- [BRCMNAND_CMD_EXT_ADDRESS] = 0x08,
- [BRCMNAND_CMD_ADDRESS] = 0x0c,
- [BRCMNAND_INTFC_STATUS] = 0x14,
- [BRCMNAND_CS_SELECT] = 0x18,
- [BRCMNAND_CS_XOR] = 0x1c,
- [BRCMNAND_LL_OP] = 0x20,
- [BRCMNAND_CS0_BASE] = 0x50,
- [BRCMNAND_CS1_BASE] = 0,
- [BRCMNAND_CORR_THRESHOLD] = 0xdc,
- [BRCMNAND_CORR_THRESHOLD_EXT] = 0xe0,
- [BRCMNAND_UNCORR_COUNT] = 0xfc,
- [BRCMNAND_CORR_COUNT] = 0x100,
- [BRCMNAND_CORR_EXT_ADDR] = 0x10c,
- [BRCMNAND_CORR_ADDR] = 0x110,
- [BRCMNAND_UNCORR_EXT_ADDR] = 0x114,
- [BRCMNAND_UNCORR_ADDR] = 0x118,
- [BRCMNAND_SEMAPHORE] = 0x150,
- [BRCMNAND_ID] = 0x194,
- [BRCMNAND_ID_EXT] = 0x198,
- [BRCMNAND_LL_RDATA] = 0x19c,
- [BRCMNAND_OOB_READ_BASE] = 0x200,
- [BRCMNAND_OOB_READ_10_BASE] = 0,
- [BRCMNAND_OOB_WRITE_BASE] = 0x280,
- [BRCMNAND_OOB_WRITE_10_BASE] = 0,
- [BRCMNAND_FC_BASE] = 0x400,
-};
-
enum brcmnand_cs_reg {
BRCMNAND_CS_CFG_EXT = 0,
BRCMNAND_CS_CFG,
@@ -434,9 +404,7 @@ static int brcmnand_revision_init(struct brcmnand_controller *ctrl)
}
/* Register offsets */
- if (ctrl->nand_version >= 0x0701)
- ctrl->reg_offsets = brcmnand_regs_v71;
- else if (ctrl->nand_version >= 0x0600)
+ if (ctrl->nand_version >= 0x0600)
ctrl->reg_offsets = brcmnand_regs_v60;
else if (ctrl->nand_version >= 0x0500)
ctrl->reg_offsets = brcmnand_regs_v50;
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index ce7b2cab5762..3ff583f165cd 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -3979,6 +3979,7 @@ static int nand_dt_init(struct mtd_info *mtd, struct nand_chip *chip,
* This is the first phase of the normal nand_scan() function. It reads the
* flash ID and sets up MTD fields accordingly.
*
+ * The mtd->owner field must be set to the module of the caller.
*/
int nand_scan_ident(struct mtd_info *mtd, int maxchips,
struct nand_flash_dev *table)
@@ -4402,12 +4403,19 @@ EXPORT_SYMBOL(nand_scan_tail);
*
* This fills out all the uninitialized function pointers with the defaults.
* The flash ID is read and the mtd/chip structures are filled with the
- * appropriate values.
+ * appropriate values. The mtd->owner field must be set to the module of the
+ * caller.
*/
int nand_scan(struct mtd_info *mtd, int maxchips)
{
int ret;
+ /* Many callers got this wrong, so check for it for a while... */
+ if (!mtd->owner && caller_is_module()) {
+ pr_crit("%s called with NULL mtd->owner!\n", __func__);
+ BUG();
+ }
+
ret = nand_scan_ident(mtd, maxchips, NULL);
if (!ret)
ret = nand_scan_tail(mtd);
diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
index 37e4135ab213..32477c4eb421 100644
--- a/drivers/mtd/spi-nor/spi-nor.c
+++ b/drivers/mtd/spi-nor/spi-nor.c
@@ -1067,6 +1067,45 @@ static int spansion_quad_enable(struct spi_nor *nor)
return 0;
}
+static int micron_quad_enable(struct spi_nor *nor)
+{
+ int ret;
+ u8 val;
+
+ ret = nor->read_reg(nor, SPINOR_OP_RD_EVCR, &val, 1);
+ if (ret < 0) {
+ dev_err(nor->dev, "error %d reading EVCR\n", ret);
+ return ret;
+ }
+
+ write_enable(nor);
+
+ /* set EVCR, enable quad I/O */
+ nor->cmd_buf[0] = val & ~EVCR_QUAD_EN_MICRON;
+ ret = nor->write_reg(nor, SPINOR_OP_WD_EVCR, nor->cmd_buf, 1);
+ if (ret < 0) {
+ dev_err(nor->dev, "error while writing EVCR register\n");
+ return ret;
+ }
+
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ return ret;
+
+ /* read EVCR and check it */
+ ret = nor->read_reg(nor, SPINOR_OP_RD_EVCR, &val, 1);
+ if (ret < 0) {
+ dev_err(nor->dev, "error %d reading EVCR\n", ret);
+ return ret;
+ }
+ if (val & EVCR_QUAD_EN_MICRON) {
+ dev_err(nor->dev, "Micron EVCR Quad bit not clear\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int set_quad_mode(struct spi_nor *nor, const struct flash_info *info)
{
int status;
@@ -1080,7 +1119,12 @@ static int set_quad_mode(struct spi_nor *nor, const struct flash_info *info)
}
return status;
case SNOR_MFR_MICRON:
- return 0;
+ status = micron_quad_enable(nor);
+ if (status) {
+ dev_err(nor->dev, "Micron quad-read not enabled\n");
+ return -EINVAL;
+ }
+ return status;
default:
status = spansion_quad_enable(nor);
if (status) {
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index 2ff465848b65..8f76f4558a88 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -1412,7 +1412,7 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = -EIO;
- netdev->hw_features = NETIF_F_HW_VLAN_CTAG_RX;
+ netdev->hw_features = NETIF_F_SG | NETIF_F_HW_VLAN_CTAG_RX;
netdev->features |= (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
/* Init PHY as early as possible due to power saving issue */
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index d1c217eaf417..206b6a71a545 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -550,7 +550,6 @@ static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
nicvf_config_vlan_stripping(nic, nic->netdev->features);
/* Enable Receive queue */
- memset(&rq_cfg, 0, sizeof(struct rq_cfg));
rq_cfg.ena = 1;
rq_cfg.tcp_ena = 0;
nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, *(u64 *)&rq_cfg);
@@ -583,7 +582,6 @@ void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs,
qidx, (u64)(cq->dmem.phys_base));
/* Enable Completion queue */
- memset(&cq_cfg, 0, sizeof(struct cq_cfg));
cq_cfg.ena = 1;
cq_cfg.reset = 0;
cq_cfg.caching = 0;
@@ -632,7 +630,6 @@ static void nicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs,
qidx, (u64)(sq->dmem.phys_base));
/* Enable send queue & set queue size */
- memset(&sq_cfg, 0, sizeof(struct sq_cfg));
sq_cfg.ena = 1;
sq_cfg.reset = 0;
sq_cfg.ldwb = 0;
@@ -669,7 +666,6 @@ static void nicvf_rbdr_config(struct nicvf *nic, struct queue_set *qs,
/* Enable RBDR & set queue size */
/* Buffer size should be in multiples of 128 bytes */
- memset(&rbdr_cfg, 0, sizeof(struct rbdr_cfg));
rbdr_cfg.ena = 1;
rbdr_cfg.reset = 0;
rbdr_cfg.ldwb = 0;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index f6147ffc7fbc..b2a32209ffbf 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1557,15 +1557,9 @@ fec_enet_rx(struct net_device *ndev, int budget)
struct fec_enet_private *fep = netdev_priv(ndev);
for_each_set_bit(queue_id, &fep->work_rx, FEC_ENET_MAX_RX_QS) {
- int ret;
-
- ret = fec_enet_rx_queue(ndev,
+ clear_bit(queue_id, &fep->work_rx);
+ pkt_received += fec_enet_rx_queue(ndev,
budget - pkt_received, queue_id);
-
- if (ret < budget - pkt_received)
- clear_bit(queue_id, &fep->work_rx);
-
- pkt_received += ret;
}
return pkt_received;
}
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index 1257b18e6b90..973dade2d07f 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -270,17 +270,11 @@ jme_reset_mac_processor(struct jme_adapter *jme)
}
static inline void
-jme_clear_pm_enable_wol(struct jme_adapter *jme)
+jme_clear_pm(struct jme_adapter *jme)
{
jwrite32(jme, JME_PMCS, PMCS_STMASK | jme->reg_pmcs);
}
-static inline void
-jme_clear_pm_disable_wol(struct jme_adapter *jme)
-{
- jwrite32(jme, JME_PMCS, PMCS_STMASK);
-}
-
static int
jme_reload_eeprom(struct jme_adapter *jme)
{
@@ -1859,7 +1853,7 @@ jme_open(struct net_device *netdev)
struct jme_adapter *jme = netdev_priv(netdev);
int rc;
- jme_clear_pm_disable_wol(jme);
+ jme_clear_pm(jme);
JME_NAPI_ENABLE(jme);
tasklet_init(&jme->linkch_task, jme_link_change_tasklet,
@@ -1931,11 +1925,11 @@ jme_wait_link(struct jme_adapter *jme)
static void
jme_powersave_phy(struct jme_adapter *jme)
{
- if (jme->reg_pmcs && device_may_wakeup(&jme->pdev->dev)) {
+ if (jme->reg_pmcs) {
jme_set_100m_half(jme);
if (jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN))
jme_wait_link(jme);
- jme_clear_pm_enable_wol(jme);
+ jme_clear_pm(jme);
} else {
jme_phy_off(jme);
}
@@ -2652,6 +2646,9 @@ jme_set_wol(struct net_device *netdev,
if (wol->wolopts & WAKE_MAGIC)
jme->reg_pmcs |= PMCS_MFEN;
+ jwrite32(jme, JME_PMCS, jme->reg_pmcs);
+ device_set_wakeup_enable(&jme->pdev->dev, !!(jme->reg_pmcs));
+
return 0;
}
@@ -3175,8 +3172,8 @@ jme_init_one(struct pci_dev *pdev,
jme->mii_if.mdio_read = jme_mdio_read;
jme->mii_if.mdio_write = jme_mdio_write;
- jme_clear_pm_disable_wol(jme);
- device_init_wakeup(&pdev->dev, true);
+ jme_clear_pm(jme);
+ device_set_wakeup_enable(&pdev->dev, true);
jme_set_phyfifo_5level(jme);
jme->pcirev = pdev->revision;
@@ -3307,7 +3304,7 @@ jme_resume(struct device *dev)
if (!netif_running(netdev))
return 0;
- jme_clear_pm_disable_wol(jme);
+ jme_clear_pm(jme);
jme_phy_on(jme);
if (test_bit(JME_FLAG_SSET, &jme->flags))
jme_set_settings(netdev, &jme->old_ecmd);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index bbff8ec6713e..e7a5000aa12c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -704,7 +704,7 @@ static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb,
if (ipv6h->nexthdr == IPPROTO_FRAGMENT || ipv6h->nexthdr == IPPROTO_HOPOPTS)
return -1;
- hw_checksum = csum_add(hw_checksum, (__force __wsum)htons(ipv6h->nexthdr));
+ hw_checksum = csum_add(hw_checksum, (__force __wsum)(ipv6h->nexthdr << 8));
csum_pseudo_hdr = csum_partial(&ipv6h->saddr,
sizeof(ipv6h->saddr) + sizeof(ipv6h->daddr), 0);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index e4019a803a9c..4421bf5463f6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -400,6 +400,7 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev,
u32 packets = 0;
u32 bytes = 0;
int factor = priv->cqe_factor;
+ u64 timestamp = 0;
int done = 0;
int budget = priv->tx_work_limit;
u32 last_nr_txbb;
@@ -439,12 +440,9 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev,
new_index = be16_to_cpu(cqe->wqe_index) & size_mask;
do {
- u64 timestamp = 0;
-
txbbs_skipped += last_nr_txbb;
ring_index = (ring_index + last_nr_txbb) & size_mask;
-
- if (unlikely(ring->tx_info[ring_index].ts_requested))
+ if (ring->tx_info[ring_index].ts_requested)
timestamp = mlx4_en_get_cqe_ts(cqe);
/* free next descriptor */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index cbd17e25beeb..1203d892e842 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -1372,7 +1372,7 @@ static int mlx5e_set_dev_port_mtu(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
- u16 hw_mtu;
+ int hw_mtu;
int err;
err = mlx5_set_port_mtu(mdev, MLX5E_SW2HW_MTU(netdev->mtu), 1);
@@ -1891,27 +1891,22 @@ static int mlx5e_set_features(struct net_device *netdev,
return err;
}
-#define MXL5_HW_MIN_MTU 64
-#define MXL5E_MIN_MTU (MXL5_HW_MIN_MTU + ETH_FCS_LEN)
-
static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
bool was_opened;
- u16 max_mtu;
- u16 min_mtu;
+ int max_mtu;
int err = 0;
mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
max_mtu = MLX5E_HW2SW_MTU(max_mtu);
- min_mtu = MLX5E_HW2SW_MTU(MXL5E_MIN_MTU);
- if (new_mtu > max_mtu || new_mtu < min_mtu) {
+ if (new_mtu > max_mtu) {
netdev_err(netdev,
- "%s: Bad MTU (%d), valid range is: [%d..%d]\n",
- __func__, new_mtu, min_mtu, max_mtu);
+ "%s: Bad MTU (%d) > (%d) Max\n",
+ __func__, new_mtu, max_mtu);
return -EINVAL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index 53a793bc2e3d..a87e773e93f3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -246,8 +246,8 @@ int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
}
EXPORT_SYMBOL_GPL(mlx5_query_port_admin_status);
-static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, u16 *admin_mtu,
- u16 *max_mtu, u16 *oper_mtu, u8 port)
+static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, int *admin_mtu,
+ int *max_mtu, int *oper_mtu, u8 port)
{
u32 in[MLX5_ST_SZ_DW(pmtu_reg)];
u32 out[MLX5_ST_SZ_DW(pmtu_reg)];
@@ -267,7 +267,7 @@ static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, u16 *admin_mtu,
*admin_mtu = MLX5_GET(pmtu_reg, out, admin_mtu);
}
-int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port)
+int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port)
{
u32 in[MLX5_ST_SZ_DW(pmtu_reg)];
u32 out[MLX5_ST_SZ_DW(pmtu_reg)];
@@ -282,14 +282,14 @@ int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port)
}
EXPORT_SYMBOL_GPL(mlx5_set_port_mtu);
-void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, u16 *max_mtu,
+void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu,
u8 port)
{
mlx5_query_port_mtu(dev, NULL, max_mtu, NULL, port);
}
EXPORT_SYMBOL_GPL(mlx5_query_port_max_mtu);
-void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, u16 *oper_mtu,
+void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu,
u8 port)
{
mlx5_query_port_mtu(dev, NULL, NULL, oper_mtu, port);
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
index 96a5028621c8..bdd83d95ec0a 100644
--- a/drivers/net/usb/cdc_mbim.c
+++ b/drivers/net/usb/cdc_mbim.c
@@ -617,13 +617,8 @@ static const struct usb_device_id mbim_devs[] = {
{ USB_VENDOR_AND_INTERFACE_INFO(0x0bdb, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&cdc_mbim_info,
},
-
- /* Some Huawei devices, ME906s-158 (12d1:15c1) and E3372
- * (12d1:157d), are known to fail unless the NDP is placed
- * after the IP packets. Applying the quirk to all Huawei
- * devices is broader than necessary, but harmless.
- */
- { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
+ /* Huawei E3372 fails unless NDP comes after the IP packets */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x12d1, 0x157d, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&cdc_mbim_info_ndp_to_end,
},
/* default entry */
diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
index 1b271b99c49e..8f8793004b9f 100644
--- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
@@ -274,9 +274,6 @@ void ar5008_hw_cmn_spur_mitigate(struct ath_hw *ah,
};
static const int inc[4] = { 0, 100, 0, 0 };
- memset(&mask_m, 0, sizeof(int8_t) * 123);
- memset(&mask_p, 0, sizeof(int8_t) * 123);
-
cur_bin = -6000;
upper = bin + 100;
lower = bin - 100;
@@ -427,9 +424,14 @@ static void ar5008_hw_spur_mitigate(struct ath_hw *ah,
int tmp, new;
int i;
+ int8_t mask_m[123];
+ int8_t mask_p[123];
int cur_bb_spur;
bool is2GHz = IS_CHAN_2GHZ(chan);
+ memset(&mask_m, 0, sizeof(int8_t) * 123);
+ memset(&mask_p, 0, sizeof(int8_t) * 123);
+
for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
cur_bb_spur = ah->eep_ops->get_spur_channel(ah, i, is2GHz);
if (AR_NO_SPUR == cur_bb_spur)
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.c b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
index 53d7445a5d12..db6624527d99 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
@@ -178,9 +178,14 @@ static void ar9002_hw_spur_mitigate(struct ath_hw *ah,
int i;
struct chan_centers centers;
+ int8_t mask_m[123];
+ int8_t mask_p[123];
int cur_bb_spur;
bool is2GHz = IS_CHAN_2GHZ(chan);
+ memset(&mask_m, 0, sizeof(int8_t) * 123);
+ memset(&mask_p, 0, sizeof(int8_t) * 123);
+
ath9k_hw_get_channel_centers(ah, chan, &centers);
freq = centers.synth_center;
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index f96ab2f4b90e..e88afac51c5d 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -1557,8 +1557,6 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
/* the fw is stopped, the aux sta is dead: clean up driver state */
iwl_mvm_del_aux_sta(mvm);
- iwl_free_fw_paging(mvm);
-
/*
* Clear IN_HW_RESTART flag when stopping the hw (as restart_complete()
* won't be called in this case).
diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c
index 13c97f665ba8..c3adf2bcdc85 100644
--- a/drivers/net/wireless/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/iwlwifi/mvm/ops.c
@@ -645,6 +645,8 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++)
kfree(mvm->nvm_sections[i].data);
+ iwl_free_fw_paging(mvm);
+
iwl_mvm_tof_clean(mvm);
ieee80211_free_hw(mvm->hw);
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index 00e0332e2544..8c7204738aa3 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -731,8 +731,8 @@ static int iwl_pcie_rsa_race_bug_wa(struct iwl_trans *trans)
*/
val = iwl_read_prph(trans, PREG_AUX_BUS_WPROT_0);
if (val & (BIT(1) | BIT(17))) {
- IWL_DEBUG_INFO(trans,
- "can't access the RSA semaphore it is write protected\n");
+ IWL_INFO(trans,
+ "can't access the RSA semaphore it is write protected\n");
return 0;
}
diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c
index d6c4f0f60839..a6c8a4f7bfe9 100644
--- a/drivers/net/wireless/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/mwifiex/sta_ioctl.c
@@ -313,7 +313,6 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
mwifiex_dbg(adapter, ERROR,
"Attempt to reconnect on csa closed chan(%d)\n",
bss_desc->channel);
- ret = -1;
goto done;
}
diff --git a/drivers/nvmem/mxs-ocotp.c b/drivers/nvmem/mxs-ocotp.c
index 2bb3c5799ac4..8ba19bba3156 100644
--- a/drivers/nvmem/mxs-ocotp.c
+++ b/drivers/nvmem/mxs-ocotp.c
@@ -94,7 +94,7 @@ static int mxs_ocotp_read(void *context, const void *reg, size_t reg_size,
if (ret)
goto close_banks;
- while (val_size >= reg_size) {
+ while (val_size) {
if ((offset < OCOTP_DATA_OFFSET) || (offset % 16)) {
/* fill up non-data register */
*buf = 0;
@@ -103,7 +103,7 @@ static int mxs_ocotp_read(void *context, const void *reg, size_t reg_size,
}
buf++;
- val_size -= reg_size;
+ val_size--;
offset += reg_size;
}
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 0438512f4d69..0acebc87ec20 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -760,16 +760,6 @@ const void * __init of_flat_dt_match_machine(const void *default_match,
}
#ifdef CONFIG_BLK_DEV_INITRD
-#ifndef __early_init_dt_declare_initrd
-static void __early_init_dt_declare_initrd(unsigned long start,
- unsigned long end)
-{
- initrd_start = (unsigned long)__va(start);
- initrd_end = (unsigned long)__va(end);
- initrd_below_start_ok = 1;
-}
-#endif
-
/**
* early_init_dt_check_for_initrd - Decode initrd location from flat tree
* @node: reference to node containing initrd location ('chosen')
@@ -792,7 +782,9 @@ static void __init early_init_dt_check_for_initrd(unsigned long node)
return;
end = of_read_number(prop, len/4);
- __early_init_dt_declare_initrd(start, end);
+ initrd_start = (unsigned long)__va(start);
+ initrd_end = (unsigned long)__va(end);
+ initrd_below_start_ok = 1;
pr_debug("initrd_start=0x%llx initrd_end=0x%llx\n",
(unsigned long long)start, (unsigned long long)end);
@@ -1014,16 +1006,13 @@ int __init early_init_dt_scan_chosen(unsigned long node, const char *uname,
}
#ifdef CONFIG_HAVE_MEMBLOCK
-#ifndef MIN_MEMBLOCK_ADDR
-#define MIN_MEMBLOCK_ADDR __pa(PAGE_OFFSET)
-#endif
#ifndef MAX_MEMBLOCK_ADDR
#define MAX_MEMBLOCK_ADDR ((phys_addr_t)~0)
#endif
void __init __weak early_init_dt_add_memory_arch(u64 base, u64 size)
{
- const u64 phys_offset = MIN_MEMBLOCK_ADDR;
+ const u64 phys_offset = __pa(PAGE_OFFSET);
if (!PAGE_ALIGNED(base)) {
if (size < PAGE_SIZE - (base & ~PAGE_MASK)) {
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
index 3d8019eb3d84..5c717275a7fa 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
@@ -939,8 +939,7 @@ static int mtk_gpio_set_debounce(struct gpio_chip *chip, unsigned offset,
struct mtk_pinctrl *pctl = dev_get_drvdata(chip->dev);
int eint_num, virq, eint_offset;
unsigned int set_offset, bit, clr_bit, clr_offset, rst, i, unmask, dbnc;
- static const unsigned int debounce_time[] = {500, 1000, 16000, 32000, 64000,
- 128000, 256000};
+ static const unsigned int dbnc_arr[] = {0 , 1, 16, 32, 64, 128, 256};
const struct mtk_desc_pin *pin;
struct irq_data *d;
@@ -958,9 +957,9 @@ static int mtk_gpio_set_debounce(struct gpio_chip *chip, unsigned offset,
if (!mtk_eint_can_en_debounce(pctl, eint_num))
return -ENOSYS;
- dbnc = ARRAY_SIZE(debounce_time);
- for (i = 0; i < ARRAY_SIZE(debounce_time); i++) {
- if (debounce <= debounce_time[i]) {
+ dbnc = ARRAY_SIZE(dbnc_arr);
+ for (i = 0; i < ARRAY_SIZE(dbnc_arr); i++) {
+ if (debounce <= dbnc_arr[i]) {
dbnc = i;
break;
}
diff --git a/drivers/pinctrl/pinctrl-at91-pio4.c b/drivers/pinctrl/pinctrl-at91-pio4.c
index b3235fd2950c..33edd07d9149 100644
--- a/drivers/pinctrl/pinctrl-at91-pio4.c
+++ b/drivers/pinctrl/pinctrl-at91-pio4.c
@@ -717,11 +717,9 @@ static int atmel_conf_pin_config_group_set(struct pinctrl_dev *pctldev,
break;
case PIN_CONFIG_BIAS_PULL_UP:
conf |= ATMEL_PIO_PUEN_MASK;
- conf &= (~ATMEL_PIO_PDEN_MASK);
break;
case PIN_CONFIG_BIAS_PULL_DOWN:
conf |= ATMEL_PIO_PDEN_MASK;
- conf &= (~ATMEL_PIO_PUEN_MASK);
break;
case PIN_CONFIG_DRIVE_OPEN_DRAIN:
if (arg == 0)
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
index 23b6b8c29a99..ef04b962c3d5 100644
--- a/drivers/pinctrl/pinctrl-single.c
+++ b/drivers/pinctrl/pinctrl-single.c
@@ -1273,9 +1273,9 @@ static int pcs_parse_bits_in_pinctrl_entry(struct pcs_device *pcs,
/* Parse pins in each row from LSB */
while (mask) {
- bit_pos = __ffs(mask);
+ bit_pos = ffs(mask);
pin_num_from_lsb = bit_pos / pcs->bits_per_pin;
- mask_pos = ((pcs->fmask) << bit_pos);
+ mask_pos = ((pcs->fmask) << (bit_pos - 1));
val_pos = val & mask_pos;
submask = mask & mask_pos;
@@ -1847,7 +1847,7 @@ static int pcs_probe(struct platform_device *pdev)
ret = of_property_read_u32(np, "pinctrl-single,function-mask",
&pcs->fmask);
if (!ret) {
- pcs->fshift = __ffs(pcs->fmask);
+ pcs->fshift = ffs(pcs->fmask) - 1;
pcs->fmax = pcs->fmask >> pcs->fshift;
} else {
/* If mask property doesn't exist, function mux is invalid. */
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index f774cb576ffa..b0f62141ea4d 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -131,7 +131,7 @@ MODULE_LICENSE("GPL");
/* Field definitions */
#define HCI_ACCEL_MASK 0x7fff
#define HCI_HOTKEY_DISABLE 0x0b
-#define HCI_HOTKEY_ENABLE 0x09
+#define HCI_HOTKEY_ENABLE 0x01
#define HCI_HOTKEY_SPECIAL_FUNCTIONS 0x10
#define HCI_LCD_BRIGHTNESS_BITS 3
#define HCI_LCD_BRIGHTNESS_SHIFT (16-HCI_LCD_BRIGHTNESS_BITS)
diff --git a/drivers/pwm/pwm-brcmstb.c b/drivers/pwm/pwm-brcmstb.c
index 5d5adee16886..423ce087cd9c 100644
--- a/drivers/pwm/pwm-brcmstb.c
+++ b/drivers/pwm/pwm-brcmstb.c
@@ -274,8 +274,8 @@ static int brcmstb_pwm_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
p->base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(p->base)) {
- ret = PTR_ERR(p->base);
+ if (!p->base) {
+ ret = -ENOMEM;
goto out_clk;
}
diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c
index 5cf4a97e0304..f2e1a39ce0f3 100644
--- a/drivers/regulator/axp20x-regulator.c
+++ b/drivers/regulator/axp20x-regulator.c
@@ -221,10 +221,10 @@ static const struct regulator_desc axp22x_regulators[] = {
AXP22X_ELDO2_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(1)),
AXP_DESC(AXP22X, ELDO3, "eldo3", "eldoin", 700, 3300, 100,
AXP22X_ELDO3_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(2)),
- AXP_DESC_IO(AXP22X, LDO_IO0, "ldo_io0", "ips", 700, 3300, 100,
+ AXP_DESC_IO(AXP22X, LDO_IO0, "ldo_io0", "ips", 1800, 3300, 100,
AXP22X_LDO_IO0_V_OUT, 0x1f, AXP20X_GPIO0_CTRL, 0x07,
AXP22X_IO_ENABLED, AXP22X_IO_DISABLED),
- AXP_DESC_IO(AXP22X, LDO_IO1, "ldo_io1", "ips", 700, 3300, 100,
+ AXP_DESC_IO(AXP22X, LDO_IO1, "ldo_io1", "ips", 1800, 3300, 100,
AXP22X_LDO_IO1_V_OUT, 0x1f, AXP20X_GPIO1_CTRL, 0x07,
AXP22X_IO_ENABLED, AXP22X_IO_DISABLED),
AXP_DESC_FIXED(AXP22X, RTC_LDO, "rtc_ldo", "ips", 3000),
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 27a5deb1213e..d49d8606da15 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -150,7 +150,7 @@ static void regulator_lock_supply(struct regulator_dev *rdev)
{
int i;
- for (i = 0; rdev; rdev = rdev_get_supply(rdev), i++)
+ for (i = 0; rdev->supply; rdev = rdev_get_supply(rdev), i++)
mutex_lock_nested(&rdev->mutex, i);
}
diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c
index b6d831b84e1d..72fc3c32db49 100644
--- a/drivers/regulator/s2mps11.c
+++ b/drivers/regulator/s2mps11.c
@@ -305,7 +305,7 @@ static struct regulator_ops s2mps11_buck_ops = {
.enable_mask = S2MPS11_ENABLE_MASK \
}
-#define regulator_desc_s2mps11_buck67810(num, min, step) { \
+#define regulator_desc_s2mps11_buck6_10(num, min, step) { \
.name = "BUCK"#num, \
.id = S2MPS11_BUCK##num, \
.ops = &s2mps11_buck_ops, \
@@ -321,22 +321,6 @@ static struct regulator_ops s2mps11_buck_ops = {
.enable_mask = S2MPS11_ENABLE_MASK \
}
-#define regulator_desc_s2mps11_buck9 { \
- .name = "BUCK9", \
- .id = S2MPS11_BUCK9, \
- .ops = &s2mps11_buck_ops, \
- .type = REGULATOR_VOLTAGE, \
- .owner = THIS_MODULE, \
- .min_uV = MIN_3000_MV, \
- .uV_step = STEP_25_MV, \
- .n_voltages = S2MPS11_BUCK9_N_VOLTAGES, \
- .ramp_delay = S2MPS11_RAMP_DELAY, \
- .vsel_reg = S2MPS11_REG_B9CTRL2, \
- .vsel_mask = S2MPS11_BUCK9_VSEL_MASK, \
- .enable_reg = S2MPS11_REG_B9CTRL1, \
- .enable_mask = S2MPS11_ENABLE_MASK \
-}
-
static const struct regulator_desc s2mps11_regulators[] = {
regulator_desc_s2mps11_ldo(1, STEP_25_MV),
regulator_desc_s2mps11_ldo(2, STEP_50_MV),
@@ -381,11 +365,11 @@ static const struct regulator_desc s2mps11_regulators[] = {
regulator_desc_s2mps11_buck1_4(3),
regulator_desc_s2mps11_buck1_4(4),
regulator_desc_s2mps11_buck5,
- regulator_desc_s2mps11_buck67810(6, MIN_600_MV, STEP_6_25_MV),
- regulator_desc_s2mps11_buck67810(7, MIN_600_MV, STEP_6_25_MV),
- regulator_desc_s2mps11_buck67810(8, MIN_600_MV, STEP_6_25_MV),
- regulator_desc_s2mps11_buck9,
- regulator_desc_s2mps11_buck67810(10, MIN_750_MV, STEP_12_5_MV),
+ regulator_desc_s2mps11_buck6_10(6, MIN_600_MV, STEP_6_25_MV),
+ regulator_desc_s2mps11_buck6_10(7, MIN_600_MV, STEP_6_25_MV),
+ regulator_desc_s2mps11_buck6_10(8, MIN_600_MV, STEP_6_25_MV),
+ regulator_desc_s2mps11_buck6_10(9, MIN_3000_MV, STEP_25_MV),
+ regulator_desc_s2mps11_buck6_10(10, MIN_750_MV, STEP_12_5_MV),
};
static struct regulator_ops s2mps14_reg_ops;
diff --git a/drivers/regulator/s5m8767.c b/drivers/regulator/s5m8767.c
index 27343e1c43ef..58f5d3b8e981 100644
--- a/drivers/regulator/s5m8767.c
+++ b/drivers/regulator/s5m8767.c
@@ -202,10 +202,9 @@ static int s5m8767_get_register(struct s5m8767_info *s5m8767, int reg_id,
}
}
- if (i >= s5m8767->num_regulators)
- return -EINVAL;
-
- *enable_ctrl = s5m8767_opmode_reg[reg_id][mode] << S5M8767_ENCTRL_SHIFT;
+ if (i < s5m8767->num_regulators)
+ *enable_ctrl =
+ s5m8767_opmode_reg[reg_id][mode] << S5M8767_ENCTRL_SHIFT;
return 0;
}
@@ -938,12 +937,8 @@ static int s5m8767_pmic_probe(struct platform_device *pdev)
else
regulators[id].vsel_mask = 0xff;
- ret = s5m8767_get_register(s5m8767, id, &enable_reg,
+ s5m8767_get_register(s5m8767, id, &enable_reg,
&enable_val);
- if (ret) {
- dev_err(s5m8767->dev, "error reading registers\n");
- return ret;
- }
regulators[id].enable_reg = enable_reg;
regulators[id].enable_mask = S5M8767_ENCTRL_MASK;
regulators[id].enable_val = enable_val;
diff --git a/drivers/rtc/rtc-ds1685.c b/drivers/rtc/rtc-ds1685.c
index d5c1b057a739..05a51ef52703 100644
--- a/drivers/rtc/rtc-ds1685.c
+++ b/drivers/rtc/rtc-ds1685.c
@@ -187,9 +187,9 @@ ds1685_rtc_end_data_access(struct ds1685_priv *rtc)
* Only use this where you are certain another lock will not be held.
*/
static inline void
-ds1685_rtc_begin_ctrl_access(struct ds1685_priv *rtc, unsigned long *flags)
+ds1685_rtc_begin_ctrl_access(struct ds1685_priv *rtc, unsigned long flags)
{
- spin_lock_irqsave(&rtc->lock, *flags);
+ spin_lock_irqsave(&rtc->lock, flags);
ds1685_rtc_switch_to_bank1(rtc);
}
@@ -1304,7 +1304,7 @@ ds1685_rtc_sysfs_ctrl_regs_store(struct device *dev,
{
struct ds1685_priv *rtc = dev_get_drvdata(dev);
u8 reg = 0, bit = 0, tmp;
- unsigned long flags;
+ unsigned long flags = 0;
long int val = 0;
const struct ds1685_rtc_ctrl_regs *reg_info =
ds1685_rtc_sysfs_ctrl_regs_lookup(attr->attr.name);
@@ -1325,7 +1325,7 @@ ds1685_rtc_sysfs_ctrl_regs_store(struct device *dev,
bit = reg_info->bit;
/* Safe to spinlock during a write. */
- ds1685_rtc_begin_ctrl_access(rtc, &flags);
+ ds1685_rtc_begin_ctrl_access(rtc, flags);
tmp = rtc->read(rtc, reg);
rtc->write(rtc, reg, (val ? (tmp | bit) : (tmp & ~(bit))));
ds1685_rtc_end_ctrl_access(rtc, flags);
diff --git a/drivers/rtc/rtc-hym8563.c b/drivers/rtc/rtc-hym8563.c
index b1b4746a0eab..097325d96db5 100644
--- a/drivers/rtc/rtc-hym8563.c
+++ b/drivers/rtc/rtc-hym8563.c
@@ -144,7 +144,7 @@ static int hym8563_rtc_set_time(struct device *dev, struct rtc_time *tm)
* it does not seem to carry it over a subsequent write/read.
* So we'll limit ourself to 100 years, starting at 2000 for now.
*/
- buf[6] = bin2bcd(tm->tm_year - 100);
+ buf[6] = tm->tm_year - 100;
/*
* CTL1 only contains TEST-mode bits apart from stop,
diff --git a/drivers/rtc/rtc-max77686.c b/drivers/rtc/rtc-max77686.c
index 725dccae24e7..7184a0eda793 100644
--- a/drivers/rtc/rtc-max77686.c
+++ b/drivers/rtc/rtc-max77686.c
@@ -465,7 +465,7 @@ static int max77686_rtc_probe(struct platform_device *pdev)
info->virq = regmap_irq_get_virq(max77686->rtc_irq_data,
MAX77686_RTCIRQ_RTCA1);
- if (info->virq <= 0) {
+ if (!info->virq) {
ret = -ENXIO;
goto err_rtc;
}
diff --git a/drivers/rtc/rtc-rx8025.c b/drivers/rtc/rtc-rx8025.c
index 17341feadad1..bd911bafb809 100644
--- a/drivers/rtc/rtc-rx8025.c
+++ b/drivers/rtc/rtc-rx8025.c
@@ -65,6 +65,7 @@
static const struct i2c_device_id rx8025_id[] = {
{ "rx8025", 0 },
+ { "rv8803", 1 },
{ }
};
MODULE_DEVICE_TABLE(i2c, rx8025_id);
diff --git a/drivers/rtc/rtc-vr41xx.c b/drivers/rtc/rtc-vr41xx.c
index e1b86bb01062..f64c282275b3 100644
--- a/drivers/rtc/rtc-vr41xx.c
+++ b/drivers/rtc/rtc-vr41xx.c
@@ -272,13 +272,12 @@ static irqreturn_t rtclong1_interrupt(int irq, void *dev_id)
}
static const struct rtc_class_ops vr41xx_rtc_ops = {
- .release = vr41xx_rtc_release,
- .ioctl = vr41xx_rtc_ioctl,
- .read_time = vr41xx_rtc_read_time,
- .set_time = vr41xx_rtc_set_time,
- .read_alarm = vr41xx_rtc_read_alarm,
- .set_alarm = vr41xx_rtc_set_alarm,
- .alarm_irq_enable = vr41xx_rtc_alarm_irq_enable,
+ .release = vr41xx_rtc_release,
+ .ioctl = vr41xx_rtc_ioctl,
+ .read_time = vr41xx_rtc_read_time,
+ .set_time = vr41xx_rtc_set_time,
+ .read_alarm = vr41xx_rtc_read_alarm,
+ .set_alarm = vr41xx_rtc_set_alarm,
};
static int rtc_probe(struct platform_device *pdev)
diff --git a/drivers/scsi/device_handler/Kconfig b/drivers/scsi/device_handler/Kconfig
index 0b331c9c0a8f..e5647d59224f 100644
--- a/drivers/scsi/device_handler/Kconfig
+++ b/drivers/scsi/device_handler/Kconfig
@@ -13,13 +13,13 @@ menuconfig SCSI_DH
config SCSI_DH_RDAC
tristate "LSI RDAC Device Handler"
- depends on SCSI_DH && SCSI
+ depends on SCSI_DH
help
If you have a LSI RDAC select y. Otherwise, say N.
config SCSI_DH_HP_SW
tristate "HP/COMPAQ MSA Device Handler"
- depends on SCSI_DH && SCSI
+ depends on SCSI_DH
help
If you have a HP/COMPAQ MSA device that requires START_STOP to
be sent to start it and cannot upgrade the firmware then select y.
@@ -27,13 +27,13 @@ config SCSI_DH_HP_SW
config SCSI_DH_EMC
tristate "EMC CLARiiON Device Handler"
- depends on SCSI_DH && SCSI
+ depends on SCSI_DH
help
If you have a EMC CLARiiON select y. Otherwise, say N.
config SCSI_DH_ALUA
tristate "SPC-3 ALUA Device Handler"
- depends on SCSI_DH && SCSI
+ depends on SCSI_DH
help
SCSI Device handler for generic SPC-3 Asymmetric Logical Unit
Access (ALUA).
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index b0d92b84bcdc..db9446c612da 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -2855,7 +2855,7 @@ lpfc_online(struct lpfc_hba *phba)
}
vports = lpfc_create_vport_work_array(phba);
- if (vports != NULL) {
+ if (vports != NULL)
for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
struct Scsi_Host *shost;
shost = lpfc_shost_from_vport(vports[i]);
@@ -2872,8 +2872,7 @@ lpfc_online(struct lpfc_hba *phba)
}
spin_unlock_irq(shost->host_lock);
}
- }
- lpfc_destroy_vport_work_array(phba, vports);
+ lpfc_destroy_vport_work_array(phba, vports);
lpfc_unblock_mgmt_io(phba);
return 0;
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 00ce3e269a43..97a1c1c33b05 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -6282,13 +6282,12 @@ out:
}
for (i = 0; i < ioc->sge_count; i++) {
- if (kbuff_arr[i]) {
+ if (kbuff_arr[i])
dma_free_coherent(&instance->pdev->dev,
le32_to_cpu(kern_sge32[i].length),
kbuff_arr[i],
le32_to_cpu(kern_sge32[i].phys_addr));
kbuff_arr[i] = NULL;
- }
}
megasas_return_cmd(instance, cmd);
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index 634254a52301..5d0ec42a9317 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -4214,7 +4214,7 @@ static struct scsi_host_template qla1280_driver_template = {
.eh_bus_reset_handler = qla1280_eh_bus_reset,
.eh_host_reset_handler = qla1280_eh_adapter_reset,
.bios_param = qla1280_biosparam,
- .can_queue = MAX_OUTSTANDING_COMMANDS,
+ .can_queue = 0xfffff,
.this_id = -1,
.sg_tablesize = SG_ALL,
.use_clustering = ENABLE_CLUSTERING,
diff --git a/drivers/soc/rockchip/pm_domains.c b/drivers/soc/rockchip/pm_domains.c
index 4a65c5bda146..534c58937a56 100644
--- a/drivers/soc/rockchip/pm_domains.c
+++ b/drivers/soc/rockchip/pm_domains.c
@@ -419,7 +419,6 @@ static int rockchip_pm_domain_probe(struct platform_device *pdev)
if (error) {
dev_err(dev, "failed to handle node %s: %d\n",
node->name, error);
- of_node_put(node);
goto err_out;
}
}
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index 73c8ea0b1360..b25dc71b0ea9 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -111,7 +111,7 @@ static const struct lpss_config lpss_platforms[] = {
.reg_general = -1,
.reg_ssp = 0x20,
.reg_cs_ctrl = 0x24,
- .reg_capabilities = -1,
+ .reg_capabilities = 0xfc,
.rx_threshold = 1,
.tx_threshold_lo = 32,
.tx_threshold_hi = 56,
diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
index 035767c02072..79a8bc4f6cec 100644
--- a/drivers/spi/spi-rockchip.c
+++ b/drivers/spi/spi-rockchip.c
@@ -265,10 +265,7 @@ static inline u32 rx_max(struct rockchip_spi *rs)
static void rockchip_spi_set_cs(struct spi_device *spi, bool enable)
{
u32 ser;
- struct spi_master *master = spi->master;
- struct rockchip_spi *rs = spi_master_get_devdata(master);
-
- pm_runtime_get_sync(rs->dev);
+ struct rockchip_spi *rs = spi_master_get_devdata(spi->master);
ser = readl_relaxed(rs->regs + ROCKCHIP_SPI_SER) & SER_MASK;
@@ -293,8 +290,6 @@ static void rockchip_spi_set_cs(struct spi_device *spi, bool enable)
ser &= ~(1 << spi->chip_select);
writel_relaxed(ser, rs->regs + ROCKCHIP_SPI_SER);
-
- pm_runtime_put_sync(rs->dev);
}
static int rockchip_spi_prepare_message(struct spi_master *master,
diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c
index 5044c6198332..64318fcfacf2 100644
--- a/drivers/spi/spi-ti-qspi.c
+++ b/drivers/spi/spi-ti-qspi.c
@@ -94,7 +94,6 @@ struct ti_qspi {
#define QSPI_FLEN(n) ((n - 1) << 0)
#define QSPI_WLEN_MAX_BITS 128
#define QSPI_WLEN_MAX_BYTES 16
-#define QSPI_WLEN_MASK QSPI_WLEN(QSPI_WLEN_MAX_BITS)
/* STATUS REGISTER */
#define BUSY 0x01
@@ -225,16 +224,16 @@ static inline int ti_qspi_poll_wc(struct ti_qspi *qspi)
return -ETIMEDOUT;
}
-static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t,
- int count)
+static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t)
{
- int wlen, xfer_len;
+ int wlen, count, xfer_len;
unsigned int cmd;
const u8 *txbuf;
u32 data;
txbuf = t->tx_buf;
cmd = qspi->cmd | QSPI_WR_SNGL;
+ count = t->len;
wlen = t->bits_per_word >> 3; /* in bytes */
xfer_len = wlen;
@@ -294,10 +293,9 @@ static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t,
return 0;
}
-static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t,
- int count)
+static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t)
{
- int wlen;
+ int wlen, count;
unsigned int cmd;
u8 *rxbuf;
@@ -314,6 +312,7 @@ static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t,
cmd |= QSPI_RD_SNGL;
break;
}
+ count = t->len;
wlen = t->bits_per_word >> 3; /* in bytes */
while (count) {
@@ -344,13 +343,12 @@ static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t,
return 0;
}
-static int qspi_transfer_msg(struct ti_qspi *qspi, struct spi_transfer *t,
- int count)
+static int qspi_transfer_msg(struct ti_qspi *qspi, struct spi_transfer *t)
{
int ret;
if (t->tx_buf) {
- ret = qspi_write_msg(qspi, t, count);
+ ret = qspi_write_msg(qspi, t);
if (ret) {
dev_dbg(qspi->dev, "Error while writing\n");
return ret;
@@ -358,7 +356,7 @@ static int qspi_transfer_msg(struct ti_qspi *qspi, struct spi_transfer *t,
}
if (t->rx_buf) {
- ret = qspi_read_msg(qspi, t, count);
+ ret = qspi_read_msg(qspi, t);
if (ret) {
dev_dbg(qspi->dev, "Error while reading\n");
return ret;
@@ -375,8 +373,7 @@ static int ti_qspi_start_transfer_one(struct spi_master *master,
struct spi_device *spi = m->spi;
struct spi_transfer *t;
int status = 0, ret;
- unsigned int frame_len_words, transfer_len_words;
- int wlen;
+ int frame_length;
/* setup device control reg */
qspi->dc = 0;
@@ -388,38 +385,30 @@ static int ti_qspi_start_transfer_one(struct spi_master *master,
if (spi->mode & SPI_CS_HIGH)
qspi->dc |= QSPI_CSPOL(spi->chip_select);
- frame_len_words = 0;
- list_for_each_entry(t, &m->transfers, transfer_list)
- frame_len_words += t->len / (t->bits_per_word >> 3);
- frame_len_words = min_t(unsigned int, frame_len_words, QSPI_FRAME);
+ frame_length = (m->frame_length << 3) / spi->bits_per_word;
+
+ frame_length = clamp(frame_length, 0, QSPI_FRAME);
/* setup command reg */
qspi->cmd = 0;
qspi->cmd |= QSPI_EN_CS(spi->chip_select);
- qspi->cmd |= QSPI_FLEN(frame_len_words);
+ qspi->cmd |= QSPI_FLEN(frame_length);
ti_qspi_write(qspi, qspi->dc, QSPI_SPI_DC_REG);
mutex_lock(&qspi->list_lock);
list_for_each_entry(t, &m->transfers, transfer_list) {
- qspi->cmd = ((qspi->cmd & ~QSPI_WLEN_MASK) |
- QSPI_WLEN(t->bits_per_word));
-
- wlen = t->bits_per_word >> 3;
- transfer_len_words = min(t->len / wlen, frame_len_words);
+ qspi->cmd |= QSPI_WLEN(t->bits_per_word);
- ret = qspi_transfer_msg(qspi, t, transfer_len_words * wlen);
+ ret = qspi_transfer_msg(qspi, t);
if (ret) {
dev_dbg(qspi->dev, "transfer message failed\n");
mutex_unlock(&qspi->list_lock);
return -EINVAL;
}
- m->actual_length += transfer_len_words * wlen;
- frame_len_words -= transfer_len_words;
- if (frame_len_words == 0)
- break;
+ m->actual_length += t->len;
}
mutex_unlock(&qspi->list_lock);
diff --git a/drivers/staging/rdma/hfi1/TODO b/drivers/staging/rdma/hfi1/TODO
index 4c6f1d7d2eaf..05de0dad8762 100644
--- a/drivers/staging/rdma/hfi1/TODO
+++ b/drivers/staging/rdma/hfi1/TODO
@@ -3,4 +3,4 @@ July, 2015
- Remove unneeded file entries in sysfs
- Remove software processing of IB protocol and place in library for use
by qib, ipath (if still present), hfi1, and eventually soft-roce
-- Replace incorrect uAPI
+
diff --git a/drivers/staging/rdma/hfi1/file_ops.c b/drivers/staging/rdma/hfi1/file_ops.c
index c851e51b1dc3..aae9826ec62b 100644
--- a/drivers/staging/rdma/hfi1/file_ops.c
+++ b/drivers/staging/rdma/hfi1/file_ops.c
@@ -62,8 +62,6 @@
#include <linux/cred.h>
#include <linux/uio.h>
-#include <rdma/ib.h>
-
#include "hfi.h"
#include "pio.h"
#include "device.h"
@@ -216,10 +214,6 @@ static ssize_t hfi1_file_write(struct file *fp, const char __user *data,
int uctxt_required = 1;
int must_be_root = 0;
- /* FIXME: This interface cannot continue out of staging */
- if (WARN_ON_ONCE(!ib_safe_file_access(fp)))
- return -EACCES;
-
if (count < sizeof(cmd)) {
ret = -EINVAL;
goto bail;
diff --git a/drivers/thermal/rockchip_thermal.c b/drivers/thermal/rockchip_thermal.c
index 7106288efae3..e845841ab036 100644
--- a/drivers/thermal/rockchip_thermal.c
+++ b/drivers/thermal/rockchip_thermal.c
@@ -545,14 +545,15 @@ static int rockchip_configure_from_dt(struct device *dev,
thermal->chip->tshut_temp);
thermal->tshut_temp = thermal->chip->tshut_temp;
} else {
- if (shut_temp > INT_MAX) {
- dev_err(dev, "Invalid tshut temperature specified: %d\n",
- shut_temp);
- return -ERANGE;
- }
thermal->tshut_temp = shut_temp;
}
+ if (thermal->tshut_temp > INT_MAX) {
+ dev_err(dev, "Invalid tshut temperature specified: %d\n",
+ thermal->tshut_temp);
+ return -ERANGE;
+ }
+
if (of_property_read_u32(np, "rockchip,hw-tshut-mode", &tshut_mode)) {
dev_warn(dev,
"Missing tshut mode property, using default (%s)\n",
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 63a06ab6ba03..51c7507b0444 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -38,6 +38,7 @@
#include <linux/major.h>
#include <linux/module.h>
#include <linux/mm.h>
+#include <linux/notifier.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
@@ -115,6 +116,8 @@ struct sci_port {
struct timer_list rx_timer;
unsigned int rx_timeout;
#endif
+
+ struct notifier_block freq_transition;
};
#define SCI_NPORTS CONFIG_SERIAL_SH_SCI_NR_UARTS
@@ -1603,6 +1606,29 @@ static irqreturn_t sci_mpxed_interrupt(int irq, void *ptr)
return ret;
}
+/*
+ * Here we define a transition notifier so that we can update all of our
+ * ports' baud rate when the peripheral clock changes.
+ */
+static int sci_notifier(struct notifier_block *self,
+ unsigned long phase, void *p)
+{
+ struct sci_port *sci_port;
+ unsigned long flags;
+
+ sci_port = container_of(self, struct sci_port, freq_transition);
+
+ if (phase == CPUFREQ_POSTCHANGE) {
+ struct uart_port *port = &sci_port->port;
+
+ spin_lock_irqsave(&port->lock, flags);
+ port->uartclk = clk_get_rate(sci_port->iclk);
+ spin_unlock_irqrestore(&port->lock, flags);
+ }
+
+ return NOTIFY_OK;
+}
+
static const struct sci_irq_desc {
const char *desc;
irq_handler_t handler;
@@ -2533,6 +2559,9 @@ static int sci_remove(struct platform_device *dev)
{
struct sci_port *port = platform_get_drvdata(dev);
+ cpufreq_unregister_notifier(&port->freq_transition,
+ CPUFREQ_TRANSITION_NOTIFIER);
+
uart_remove_one_port(&sci_uart_driver, &port->port);
sci_cleanup_single(port);
@@ -2685,6 +2714,16 @@ static int sci_probe(struct platform_device *dev)
if (ret)
return ret;
+ sp->freq_transition.notifier_call = sci_notifier;
+
+ ret = cpufreq_register_notifier(&sp->freq_transition,
+ CPUFREQ_TRANSITION_NOTIFIER);
+ if (unlikely(ret < 0)) {
+ uart_remove_one_port(&sci_uart_driver, &sp->port);
+ sci_cleanup_single(sp);
+ return ret;
+ }
+
#ifdef CONFIG_SH_STANDARD_BIOS
sh_bios_gdb_detach();
#endif
diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
index b8b580e5ae6e..9eb1cff28bd4 100644
--- a/drivers/usb/core/hcd-pci.c
+++ b/drivers/usb/core/hcd-pci.c
@@ -74,15 +74,6 @@ static void for_each_companion(struct pci_dev *pdev, struct usb_hcd *hcd,
if (companion->bus != pdev->bus ||
PCI_SLOT(companion->devfn) != slot)
continue;
-
- /*
- * Companion device should be either UHCI,OHCI or EHCI host
- * controller, otherwise skip.
- */
- if (companion->class != CL_UHCI && companion->class != CL_OHCI &&
- companion->class != CL_EHCI)
- continue;
-
companion_hcd = pci_get_drvdata(companion);
if (!companion_hcd || !companion_hcd->self.root_hub)
continue;
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 278d251d8235..c5fd3ce3ed9a 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -710,7 +710,6 @@ static void ffs_user_copy_worker(struct work_struct *work)
work);
int ret = io_data->req->status ? io_data->req->status :
io_data->req->actual;
- bool kiocb_has_eventfd = io_data->kiocb->ki_flags & IOCB_EVENTFD;
ffs_log("enter: ret %d", ret);
@@ -724,11 +723,13 @@ static void ffs_user_copy_worker(struct work_struct *work)
io_data->kiocb->ki_complete(io_data->kiocb, ret, ret);
- if (io_data->ffs->ffs_eventfd && !kiocb_has_eventfd)
+ if (io_data->ffs->ffs_eventfd &&
+ !(io_data->kiocb->ki_flags & IOCB_EVENTFD))
eventfd_signal(io_data->ffs->ffs_eventfd, 1);
usb_ep_free_request(io_data->ep, io_data->req);
+ io_data->kiocb->private = NULL;
if (io_data->read)
kfree(io_data->to_free);
kfree(io_data->buf);
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index ac298e632d73..2ac142e3cce9 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -1923,12 +1923,6 @@ no_bw:
kfree(xhci->rh_bw);
kfree(xhci->ext_caps);
- xhci->usb2_ports = NULL;
- xhci->usb3_ports = NULL;
- xhci->port_array = NULL;
- xhci->rh_bw = NULL;
- xhci->ext_caps = NULL;
-
xhci->page_size = 0;
xhci->page_shift = 0;
xhci->bus_state[0].bus_suspended = 0;
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index ea4fb4b0cd44..6c47c26b5df7 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -48,7 +48,6 @@
#define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI 0xa12f
#define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI 0x9d2f
#define PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI 0x0aa8
-#define PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI 0x1aa8
static const char hcd_name[] = "xhci_hcd";
@@ -157,8 +156,7 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
(pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
- pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI ||
- pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI)) {
+ pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI)) {
xhci->quirks |= XHCI_PME_STUCK_QUIRK;
}
if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index f20f0b0d26ef..dd7669331d00 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -1113,8 +1113,8 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
/* Resume root hubs only when have pending events. */
status = readl(&xhci->op_regs->status);
if (status & STS_EINT) {
- usb_hcd_resume_root_hub(xhci->shared_hcd);
usb_hcd_resume_root_hub(hcd);
+ usb_hcd_resume_root_hub(xhci->shared_hcd);
}
}
@@ -1129,10 +1129,10 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
/* Re-enable port polling. */
xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
- set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
- usb_hcd_poll_rh_status(xhci->shared_hcd);
set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
usb_hcd_poll_rh_status(hcd);
+ set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
+ usb_hcd_poll_rh_status(xhci->shared_hcd);
return retval;
}
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index a2b43a6e7fa7..bdc0f2f24f19 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -108,7 +108,6 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0x826B) }, /* Cygnal Integrated Products, Inc., Fasttrax GPS demonstration module */
{ USB_DEVICE(0x10C4, 0x8281) }, /* Nanotec Plug & Drive */
{ USB_DEVICE(0x10C4, 0x8293) }, /* Telegesis ETRX2USB */
- { USB_DEVICE(0x10C4, 0x82F4) }, /* Starizona MicroTouch */
{ USB_DEVICE(0x10C4, 0x82F9) }, /* Procyon AVS */
{ USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */
{ USB_DEVICE(0x10C4, 0x8382) }, /* Cygnal Integrated Products, Inc. */
@@ -118,7 +117,6 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0x8418) }, /* IRZ Automation Teleport SG-10 GSM/GPRS Modem */
{ USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */
{ USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */
- { USB_DEVICE(0x10C4, 0x84B6) }, /* Starizona Hyperion */
{ USB_DEVICE(0x10C4, 0x85EA) }, /* AC-Services IBUS-IF */
{ USB_DEVICE(0x10C4, 0x85EB) }, /* AC-Services CIS-IBUS */
{ USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */
@@ -142,8 +140,6 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0xF004) }, /* Elan Digital Systems USBcount50 */
{ USB_DEVICE(0x10C5, 0xEA61) }, /* Silicon Labs MobiData GPRS USB Modem */
{ USB_DEVICE(0x10CE, 0xEA6A) }, /* Silicon Labs MobiData GPRS USB Modem 100EU */
- { USB_DEVICE(0x12B8, 0xEC60) }, /* Link G4 ECU */
- { USB_DEVICE(0x12B8, 0xEC62) }, /* Link G4+ ECU */
{ USB_DEVICE(0x13AD, 0x9999) }, /* Baltech card reader */
{ USB_DEVICE(0x1555, 0x0004) }, /* Owen AC4 USB-RS485 Converter */
{ USB_DEVICE(0x166A, 0x0201) }, /* Clipsal 5500PACA C-Bus Pascal Automation Controller */
diff --git a/drivers/usb/usbip/usbip_common.c b/drivers/usb/usbip/usbip_common.c
index e40da7759a0e..facaaf003f19 100644
--- a/drivers/usb/usbip/usbip_common.c
+++ b/drivers/usb/usbip/usbip_common.c
@@ -741,17 +741,6 @@ int usbip_recv_xbuff(struct usbip_device *ud, struct urb *urb)
if (!(size > 0))
return 0;
- if (size > urb->transfer_buffer_length) {
- /* should not happen, probably malicious packet */
- if (ud->side == USBIP_STUB) {
- usbip_event_add(ud, SDEV_EVENT_ERROR_TCP);
- return 0;
- } else {
- usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
- return -EPIPE;
- }
- }
-
ret = usbip_recv(ud->tcp_socket, urb->transfer_buffer, size);
if (ret != size) {
dev_err(&urb->dev->dev, "recv xbuf, %d\n", ret);
diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
index 9868d8a5c1ed..42ea4028cfe1 100644
--- a/drivers/video/fbdev/Kconfig
+++ b/drivers/video/fbdev/Kconfig
@@ -2249,6 +2249,7 @@ config XEN_FBDEV_FRONTEND
select FB_SYS_IMAGEBLIT
select FB_SYS_FOPS
select FB_DEFERRED_IO
+ select INPUT_XEN_KBDDEV_FRONTEND if INPUT_MISC
select XEN_XENBUS_FRONTEND
default y
help
diff --git a/drivers/video/fbdev/da8xx-fb.c b/drivers/video/fbdev/da8xx-fb.c
index d00510029c93..0081725c6b5b 100644
--- a/drivers/video/fbdev/da8xx-fb.c
+++ b/drivers/video/fbdev/da8xx-fb.c
@@ -209,7 +209,8 @@ static struct fb_videomode known_lcd_panels[] = {
.lower_margin = 2,
.hsync_len = 0,
.vsync_len = 0,
- .sync = FB_SYNC_CLK_INVERT,
+ .sync = FB_SYNC_CLK_INVERT |
+ FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
},
/* Sharp LK043T1DG01 */
[1] = {
@@ -223,7 +224,7 @@ static struct fb_videomode known_lcd_panels[] = {
.lower_margin = 2,
.hsync_len = 41,
.vsync_len = 10,
- .sync = 0,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
.flag = 0,
},
[2] = {
@@ -238,7 +239,7 @@ static struct fb_videomode known_lcd_panels[] = {
.lower_margin = 10,
.hsync_len = 10,
.vsync_len = 10,
- .sync = 0,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
.flag = 0,
},
[3] = {
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 364bc44610c1..12eab503efd1 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -152,8 +152,6 @@ static DECLARE_WAIT_QUEUE_HEAD(balloon_wq);
static void balloon_process(struct work_struct *work);
static DECLARE_DELAYED_WORK(balloon_worker, balloon_process);
-static void release_memory_resource(struct resource *resource);
-
/* When ballooning out (allocating memory to return to Xen) we don't really
want the kernel to try too hard since that can trigger the oom killer. */
#define GFP_BALLOON \
@@ -270,20 +268,6 @@ static struct resource *additional_memory_resource(phys_addr_t size)
return NULL;
}
-#ifdef CONFIG_SPARSEMEM
- {
- unsigned long limit = 1UL << (MAX_PHYSMEM_BITS - PAGE_SHIFT);
- unsigned long pfn = res->start >> PAGE_SHIFT;
-
- if (pfn > limit) {
- pr_err("New System RAM resource outside addressable RAM (%lu > %lu)\n",
- pfn, limit);
- release_memory_resource(res);
- return NULL;
- }
- }
-#endif
-
return res;
}
diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c
index f4edd6df3df2..38272ad24551 100644
--- a/drivers/xen/evtchn.c
+++ b/drivers/xen/evtchn.c
@@ -316,6 +316,7 @@ static int evtchn_resize_ring(struct per_user_data *u)
{
unsigned int new_size;
evtchn_port_t *new_ring, *old_ring;
+ unsigned int p, c;
/*
* Ensure the ring is large enough to capture all possible
@@ -345,17 +346,20 @@ static int evtchn_resize_ring(struct per_user_data *u)
/*
* Copy the old ring contents to the new ring.
*
- * To take care of wrapping, a full ring, and the new index
- * pointing into the second half, simply copy the old contents
- * twice.
+ * If the ring contents crosses the end of the current ring,
+ * it needs to be copied in two chunks.
*
* +---------+ +------------------+
- * |34567 12| -> |34567 1234567 12|
- * +-----p-c-+ +-------c------p---+
+ * |34567 12| -> | 1234567 |
+ * +-----p-c-+ +------------------+
*/
- memcpy(new_ring, old_ring, u->ring_size * sizeof(*u->ring));
- memcpy(new_ring + u->ring_size, old_ring,
- u->ring_size * sizeof(*u->ring));
+ p = evtchn_ring_offset(u, u->ring_prod);
+ c = evtchn_ring_offset(u, u->ring_cons);
+ if (p < c) {
+ memcpy(new_ring + c, u->ring + c, (u->ring_size - c) * sizeof(*u->ring));
+ memcpy(new_ring + u->ring_size, u->ring, p * sizeof(*u->ring));
+ } else
+ memcpy(new_ring + c, u->ring + c, (p - c) * sizeof(*u->ring));
u->ring = new_ring;
u->ring_size = new_size;
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
index 0f5d05bf2131..b7fcc0de0b2f 100644
--- a/fs/debugfs/inode.c
+++ b/fs/debugfs/inode.c
@@ -457,7 +457,7 @@ struct dentry *debugfs_create_automount(const char *name,
if (unlikely(!inode))
return failed_creating(dentry);
- make_empty_dir_inode(inode);
+ inode->i_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
inode->i_flags |= S_AUTOMOUNT;
inode->i_private = data;
dentry->d_fsdata = (void *)f;
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index daad932eeb38..786cb51cab56 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -933,15 +933,6 @@ struct ext4_inode_info {
* by other means, so we have i_data_sem.
*/
struct rw_semaphore i_data_sem;
- /*
- * i_mmap_sem is for serializing page faults with truncate / punch hole
- * operations. We have to make sure that new page cannot be faulted in
- * a section of the inode that is being punched. We cannot easily use
- * i_data_sem for this since we need protection for the whole punch
- * operation and i_data_sem ranks below transaction start so we have
- * to occasionally drop it.
- */
- struct rw_semaphore i_mmap_sem;
struct inode vfs_inode;
struct jbd2_inode *jinode;
@@ -2517,7 +2508,6 @@ extern int ext4_chunk_trans_blocks(struct inode *, int nrblocks);
extern int ext4_zero_partial_blocks(handle_t *handle, struct inode *inode,
loff_t lstart, loff_t lend);
extern int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
-extern int ext4_filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
extern qsize_t *ext4_get_reserved_space(struct inode *inode);
extern void ext4_da_update_reserve_space(struct inode *inode,
int used, int quota_claim);
@@ -2882,9 +2872,6 @@ static inline int ext4_update_inode_size(struct inode *inode, loff_t newsize)
return changed;
}
-int ext4_update_disksize_before_punch(struct inode *inode, loff_t offset,
- loff_t len);
-
struct ext4_group_info {
unsigned long bb_state;
struct rb_root bb_free_root;
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 3578b25fccfd..551353b1b17a 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -4685,6 +4685,10 @@ static int ext4_alloc_file_blocks(struct file *file, ext4_lblk_t offset,
if (len <= EXT_UNWRITTEN_MAX_LEN)
flags |= EXT4_GET_BLOCKS_NO_NORMALIZE;
+ /* Wait all existing dio workers, newcomers will block on i_mutex */
+ ext4_inode_block_unlocked_dio(inode);
+ inode_dio_wait(inode);
+
/*
* credits to insert 1 extent into extent tree
*/
@@ -4748,6 +4752,8 @@ retry:
goto retry;
}
+ ext4_inode_resume_unlocked_dio(inode);
+
return ret > 0 ? ret2 : ret;
}
@@ -4764,6 +4770,7 @@ static long ext4_zero_range(struct file *file, loff_t offset,
int partial_begin, partial_end;
loff_t start, end;
ext4_lblk_t lblk;
+ struct address_space *mapping = inode->i_mapping;
unsigned int blkbits = inode->i_blkbits;
trace_ext4_zero_range(inode, offset, len, mode);
@@ -4779,6 +4786,17 @@ static long ext4_zero_range(struct file *file, loff_t offset,
}
/*
+ * Write out all dirty pages to avoid race conditions
+ * Then release them.
+ */
+ if (mapping->nrpages && mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
+ ret = filemap_write_and_wait_range(mapping, offset,
+ offset + len - 1);
+ if (ret)
+ return ret;
+ }
+
+ /*
* Round up offset. This is not fallocate, we neet to zero out
* blocks, so convert interior block aligned part of the range to
* unwritten and possibly manually zero out unaligned parts of the
@@ -4821,10 +4839,6 @@ static long ext4_zero_range(struct file *file, loff_t offset,
if (mode & FALLOC_FL_KEEP_SIZE)
flags |= EXT4_GET_BLOCKS_KEEP_SIZE;
- /* Wait all existing dio workers, newcomers will block on i_mutex */
- ext4_inode_block_unlocked_dio(inode);
- inode_dio_wait(inode);
-
/* Preallocate the range including the unaligned edges */
if (partial_begin || partial_end) {
ret = ext4_alloc_file_blocks(file,
@@ -4833,7 +4847,7 @@ static long ext4_zero_range(struct file *file, loff_t offset,
round_down(offset, 1 << blkbits)) >> blkbits,
new_size, flags, mode);
if (ret)
- goto out_dio;
+ goto out_mutex;
}
@@ -4842,23 +4856,16 @@ static long ext4_zero_range(struct file *file, loff_t offset,
flags |= (EXT4_GET_BLOCKS_CONVERT_UNWRITTEN |
EXT4_EX_NOCACHE);
- /*
- * Prevent page faults from reinstantiating pages we have
- * released from page cache.
- */
- down_write(&EXT4_I(inode)->i_mmap_sem);
- ret = ext4_update_disksize_before_punch(inode, offset, len);
- if (ret) {
- up_write(&EXT4_I(inode)->i_mmap_sem);
- goto out_dio;
- }
- /* Now release the pages and zero block aligned part of pages */
+ /* Now release the pages and zero block aligned part of pages*/
truncate_pagecache_range(inode, start, end - 1);
inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
+ /* Wait all existing dio workers, newcomers will block on i_mutex */
+ ext4_inode_block_unlocked_dio(inode);
+ inode_dio_wait(inode);
+
ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size,
flags, mode);
- up_write(&EXT4_I(inode)->i_mmap_sem);
if (ret)
goto out_dio;
}
@@ -4991,13 +4998,8 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
goto out;
}
- /* Wait all existing dio workers, newcomers will block on i_mutex */
- ext4_inode_block_unlocked_dio(inode);
- inode_dio_wait(inode);
-
ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size,
flags, mode);
- ext4_inode_resume_unlocked_dio(inode);
if (ret)
goto out;
@@ -5492,7 +5494,21 @@ int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
return ret;
}
+ /*
+ * Need to round down offset to be aligned with page size boundary
+ * for page size > block size.
+ */
+ ioffset = round_down(offset, PAGE_SIZE);
+
+ /* Write out all dirty pages */
+ ret = filemap_write_and_wait_range(inode->i_mapping, ioffset,
+ LLONG_MAX);
+ if (ret)
+ return ret;
+
+ /* Take mutex lock */
mutex_lock(&inode->i_mutex);
+
/*
* There is no need to overlap collapse range with EOF, in which case
* it is effectively a truncate operation
@@ -5508,43 +5524,17 @@ int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
goto out_mutex;
}
+ truncate_pagecache(inode, ioffset);
+
/* Wait for existing dio to complete */
ext4_inode_block_unlocked_dio(inode);
inode_dio_wait(inode);
- /*
- * Prevent page faults from reinstantiating pages we have released from
- * page cache.
- */
- down_write(&EXT4_I(inode)->i_mmap_sem);
- /*
- * Need to round down offset to be aligned with page size boundary
- * for page size > block size.
- */
- ioffset = round_down(offset, PAGE_SIZE);
- /*
- * Write tail of the last page before removed range since it will get
- * removed from the page cache below.
- */
- ret = filemap_write_and_wait_range(inode->i_mapping, ioffset, offset);
- if (ret)
- goto out_mmap;
- /*
- * Write data that will be shifted to preserve them when discarding
- * page cache below. We are also protected from pages becoming dirty
- * by i_mmap_sem.
- */
- ret = filemap_write_and_wait_range(inode->i_mapping, offset + len,
- LLONG_MAX);
- if (ret)
- goto out_mmap;
- truncate_pagecache(inode, ioffset);
-
credits = ext4_writepage_trans_blocks(inode);
handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
- goto out_mmap;
+ goto out_dio;
}
down_write(&EXT4_I(inode)->i_data_sem);
@@ -5583,8 +5573,7 @@ int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
out_stop:
ext4_journal_stop(handle);
-out_mmap:
- up_write(&EXT4_I(inode)->i_mmap_sem);
+out_dio:
ext4_inode_resume_unlocked_dio(inode);
out_mutex:
mutex_unlock(&inode->i_mutex);
@@ -5638,7 +5627,21 @@ int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len)
return ret;
}
+ /*
+ * Need to round down to align start offset to page size boundary
+ * for page size > block size.
+ */
+ ioffset = round_down(offset, PAGE_SIZE);
+
+ /* Write out all dirty pages */
+ ret = filemap_write_and_wait_range(inode->i_mapping, ioffset,
+ LLONG_MAX);
+ if (ret)
+ return ret;
+
+ /* Take mutex lock */
mutex_lock(&inode->i_mutex);
+
/* Currently just for extent based files */
if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
ret = -EOPNOTSUPP;
@@ -5657,32 +5660,17 @@ int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len)
goto out_mutex;
}
+ truncate_pagecache(inode, ioffset);
+
/* Wait for existing dio to complete */
ext4_inode_block_unlocked_dio(inode);
inode_dio_wait(inode);
- /*
- * Prevent page faults from reinstantiating pages we have released from
- * page cache.
- */
- down_write(&EXT4_I(inode)->i_mmap_sem);
- /*
- * Need to round down to align start offset to page size boundary
- * for page size > block size.
- */
- ioffset = round_down(offset, PAGE_SIZE);
- /* Write out all dirty pages */
- ret = filemap_write_and_wait_range(inode->i_mapping, ioffset,
- LLONG_MAX);
- if (ret)
- goto out_mmap;
- truncate_pagecache(inode, ioffset);
-
credits = ext4_writepage_trans_blocks(inode);
handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
- goto out_mmap;
+ goto out_dio;
}
/* Expand file to avoid data loss if there is error while shifting */
@@ -5753,8 +5741,7 @@ int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len)
out_stop:
ext4_journal_stop(handle);
-out_mmap:
- up_write(&EXT4_I(inode)->i_mmap_sem);
+out_dio:
ext4_inode_resume_unlocked_dio(inode);
out_mutex:
mutex_unlock(&inode->i_mutex);
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 0d24ebcd7c9e..113837e7ba98 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -209,18 +209,15 @@ static int ext4_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
int result;
handle_t *handle = NULL;
- struct inode *inode = file_inode(vma->vm_file);
- struct super_block *sb = inode->i_sb;
+ struct super_block *sb = file_inode(vma->vm_file)->i_sb;
bool write = vmf->flags & FAULT_FLAG_WRITE;
if (write) {
sb_start_pagefault(sb);
file_update_time(vma->vm_file);
- down_read(&EXT4_I(inode)->i_mmap_sem);
handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE,
EXT4_DATA_TRANS_BLOCKS(sb));
- } else
- down_read(&EXT4_I(inode)->i_mmap_sem);
+ }
if (IS_ERR(handle))
result = VM_FAULT_SIGBUS;
@@ -231,10 +228,8 @@ static int ext4_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
if (write) {
if (!IS_ERR(handle))
ext4_journal_stop(handle);
- up_read(&EXT4_I(inode)->i_mmap_sem);
sb_end_pagefault(sb);
- } else
- up_read(&EXT4_I(inode)->i_mmap_sem);
+ }
return result;
}
@@ -251,12 +246,10 @@ static int ext4_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
if (write) {
sb_start_pagefault(sb);
file_update_time(vma->vm_file);
- down_read(&EXT4_I(inode)->i_mmap_sem);
handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE,
ext4_chunk_trans_blocks(inode,
PMD_SIZE / PAGE_SIZE));
- } else
- down_read(&EXT4_I(inode)->i_mmap_sem);
+ }
if (IS_ERR(handle))
result = VM_FAULT_SIGBUS;
@@ -267,71 +260,30 @@ static int ext4_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
if (write) {
if (!IS_ERR(handle))
ext4_journal_stop(handle);
- up_read(&EXT4_I(inode)->i_mmap_sem);
sb_end_pagefault(sb);
- } else
- up_read(&EXT4_I(inode)->i_mmap_sem);
+ }
return result;
}
static int ext4_dax_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
{
- int err;
- struct inode *inode = file_inode(vma->vm_file);
-
- sb_start_pagefault(inode->i_sb);
- file_update_time(vma->vm_file);
- down_read(&EXT4_I(inode)->i_mmap_sem);
- err = __dax_mkwrite(vma, vmf, ext4_get_block_dax,
- ext4_end_io_unwritten);
- up_read(&EXT4_I(inode)->i_mmap_sem);
- sb_end_pagefault(inode->i_sb);
-
- return err;
-}
-
-/*
- * Handle write fault for VM_MIXEDMAP mappings. Similarly to ext4_dax_mkwrite()
- * handler we check for races agaist truncate. Note that since we cycle through
- * i_mmap_sem, we are sure that also any hole punching that began before we
- * were called is finished by now and so if it included part of the file we
- * are working on, our pte will get unmapped and the check for pte_same() in
- * wp_pfn_shared() fails. Thus fault gets retried and things work out as
- * desired.
- */
-static int ext4_dax_pfn_mkwrite(struct vm_area_struct *vma,
- struct vm_fault *vmf)
-{
- struct inode *inode = file_inode(vma->vm_file);
- struct super_block *sb = inode->i_sb;
- int ret = VM_FAULT_NOPAGE;
- loff_t size;
-
- sb_start_pagefault(sb);
- file_update_time(vma->vm_file);
- down_read(&EXT4_I(inode)->i_mmap_sem);
- size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
- if (vmf->pgoff >= size)
- ret = VM_FAULT_SIGBUS;
- up_read(&EXT4_I(inode)->i_mmap_sem);
- sb_end_pagefault(sb);
-
- return ret;
+ return dax_mkwrite(vma, vmf, ext4_get_block_dax,
+ ext4_end_io_unwritten);
}
static const struct vm_operations_struct ext4_dax_vm_ops = {
.fault = ext4_dax_fault,
.pmd_fault = ext4_dax_pmd_fault,
.page_mkwrite = ext4_dax_mkwrite,
- .pfn_mkwrite = ext4_dax_pfn_mkwrite,
+ .pfn_mkwrite = dax_pfn_mkwrite,
};
#else
#define ext4_dax_vm_ops ext4_file_vm_ops
#endif
static const struct vm_operations_struct ext4_file_vm_ops = {
- .fault = ext4_filemap_fault,
+ .fault = filemap_fault,
.map_pages = filemap_map_pages,
.page_mkwrite = ext4_page_mkwrite,
};
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index e31d762eedce..06bda0361e7c 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -3587,35 +3587,6 @@ int ext4_can_truncate(struct inode *inode)
}
/*
- * We have to make sure i_disksize gets properly updated before we truncate
- * page cache due to hole punching or zero range. Otherwise i_disksize update
- * can get lost as it may have been postponed to submission of writeback but
- * that will never happen after we truncate page cache.
- */
-int ext4_update_disksize_before_punch(struct inode *inode, loff_t offset,
- loff_t len)
-{
- handle_t *handle;
- loff_t size = i_size_read(inode);
-
- WARN_ON(!mutex_is_locked(&inode->i_mutex));
- if (offset > size || offset + len < size)
- return 0;
-
- if (EXT4_I(inode)->i_disksize >= size)
- return 0;
-
- handle = ext4_journal_start(inode, EXT4_HT_MISC, 1);
- if (IS_ERR(handle))
- return PTR_ERR(handle);
- ext4_update_i_disksize(inode, size);
- ext4_mark_inode_dirty(handle, inode);
- ext4_journal_stop(handle);
-
- return 0;
-}
-
-/*
* ext4_punch_hole: punches a hole in a file by releaseing the blocks
* associated with the given offset and length
*
@@ -3680,26 +3651,17 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
}
- /* Wait all existing dio workers, newcomers will block on i_mutex */
- ext4_inode_block_unlocked_dio(inode);
- inode_dio_wait(inode);
-
- /*
- * Prevent page faults from reinstantiating pages we have released from
- * page cache.
- */
- down_write(&EXT4_I(inode)->i_mmap_sem);
first_block_offset = round_up(offset, sb->s_blocksize);
last_block_offset = round_down((offset + length), sb->s_blocksize) - 1;
/* Now release the pages and zero block aligned part of pages*/
- if (last_block_offset > first_block_offset) {
- ret = ext4_update_disksize_before_punch(inode, offset, length);
- if (ret)
- goto out_dio;
+ if (last_block_offset > first_block_offset)
truncate_pagecache_range(inode, first_block_offset,
last_block_offset);
- }
+
+ /* Wait all existing dio workers, newcomers will block on i_mutex */
+ ext4_inode_block_unlocked_dio(inode);
+ inode_dio_wait(inode);
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
credits = ext4_writepage_trans_blocks(inode);
@@ -3746,12 +3708,16 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
if (IS_SYNC(inode))
ext4_handle_sync(handle);
+ /* Now release the pages again to reduce race window */
+ if (last_block_offset > first_block_offset)
+ truncate_pagecache_range(inode, first_block_offset,
+ last_block_offset);
+
inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
ext4_mark_inode_dirty(handle, inode);
out_stop:
ext4_journal_stop(handle);
out_dio:
- up_write(&EXT4_I(inode)->i_mmap_sem);
ext4_inode_resume_unlocked_dio(inode);
out_mutex:
mutex_unlock(&inode->i_mutex);
@@ -4885,7 +4851,6 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
} else
ext4_wait_for_tail_page_commit(inode);
}
- down_write(&EXT4_I(inode)->i_mmap_sem);
/*
* Truncate pagecache after we've waited for commit
* in data=journal mode to make pages freeable.
@@ -4893,7 +4858,6 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
truncate_pagecache(inode, inode->i_size);
if (shrink)
ext4_truncate(inode);
- up_write(&EXT4_I(inode)->i_mmap_sem);
}
if (!rc) {
@@ -5145,8 +5109,6 @@ int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
might_sleep();
trace_ext4_mark_inode_dirty(inode, _RET_IP_);
err = ext4_reserve_inode_write(handle, inode, &iloc);
- if (err)
- return err;
if (ext4_handle_valid(handle) &&
EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize &&
!ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND)) {
@@ -5177,7 +5139,9 @@ int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
}
}
}
- return ext4_mark_iloc_dirty(handle, inode, &iloc);
+ if (!err)
+ err = ext4_mark_iloc_dirty(handle, inode, &iloc);
+ return err;
}
/*
@@ -5342,8 +5306,6 @@ int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
sb_start_pagefault(inode->i_sb);
file_update_time(vma->vm_file);
-
- down_read(&EXT4_I(inode)->i_mmap_sem);
/* Delalloc case is easy... */
if (test_opt(inode->i_sb, DELALLOC) &&
!ext4_should_journal_data(inode) &&
@@ -5413,19 +5375,6 @@ retry_alloc:
out_ret:
ret = block_page_mkwrite_return(ret);
out:
- up_read(&EXT4_I(inode)->i_mmap_sem);
sb_end_pagefault(inode->i_sb);
return ret;
}
-
-int ext4_filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
-{
- struct inode *inode = file_inode(vma->vm_file);
- int err;
-
- down_read(&EXT4_I(inode)->i_mmap_sem);
- err = filemap_fault(vma, vmf);
- up_read(&EXT4_I(inode)->i_mmap_sem);
-
- return err;
-}
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 852c26806af2..ba1cf0bf2f81 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -958,7 +958,6 @@ static void init_once(void *foo)
INIT_LIST_HEAD(&ei->i_orphan);
init_rwsem(&ei->xattr_sem);
init_rwsem(&ei->i_data_sem);
- init_rwsem(&ei->i_mmap_sem);
inode_init_once(&ei->vfs_inode);
}
diff --git a/fs/ext4/truncate.h b/fs/ext4/truncate.h
index c70d06a383e2..011ba6670d99 100644
--- a/fs/ext4/truncate.h
+++ b/fs/ext4/truncate.h
@@ -10,10 +10,8 @@
*/
static inline void ext4_truncate_failed_write(struct inode *inode)
{
- down_write(&EXT4_I(inode)->i_mmap_sem);
truncate_inode_pages(inode->i_mapping, inode->i_size);
ext4_truncate(inode);
- up_write(&EXT4_I(inode)->i_mmap_sem);
}
/*
diff --git a/fs/isofs/rock.c b/fs/isofs/rock.c
index 204659a5f6db..735d7522a3a9 100644
--- a/fs/isofs/rock.c
+++ b/fs/isofs/rock.c
@@ -203,8 +203,6 @@ int get_rock_ridge_filename(struct iso_directory_record *de,
int retnamlen = 0;
int truncate = 0;
int ret = 0;
- char *p;
- int len;
if (!ISOFS_SB(inode->i_sb)->s_rock)
return 0;
@@ -269,17 +267,12 @@ repeat:
rr->u.NM.flags);
break;
}
- len = rr->len - 5;
- if (retnamlen + len >= 254) {
+ if ((strlen(retname) + rr->len - 5) >= 254) {
truncate = 1;
break;
}
- p = memchr(rr->u.NM.name, '\0', len);
- if (unlikely(p))
- len = p - rr->u.NM.name;
- memcpy(retname + retnamlen, rr->u.NM.name, len);
- retnamlen += len;
- retname[retnamlen] = '\0';
+ strncat(retname, rr->u.NM.name, rr->len - 5);
+ retnamlen += rr->len - 5;
break;
case SIG('R', 'E'):
kfree(rs.buffer);
diff --git a/fs/namei.c b/fs/namei.c
index 441033da002b..558ea922a515 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -2912,10 +2912,22 @@ no_open:
dentry = lookup_real(dir, dentry, nd->flags);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
- }
- if (create_error && !dentry->d_inode) {
- error = create_error;
- goto out;
+
+ if (create_error) {
+ int open_flag = op->open_flag;
+
+ error = create_error;
+ if ((open_flag & O_EXCL)) {
+ if (!dentry->d_inode)
+ goto out;
+ } else if (!dentry->d_inode) {
+ goto out;
+ } else if ((open_flag & O_TRUNC) &&
+ d_is_reg(dentry)) {
+ goto out;
+ }
+ /* will fail later, go on to get the right error */
+ }
}
looked_up:
path->dentry = dentry;
@@ -4197,11 +4209,7 @@ int vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
bool new_is_dir = false;
unsigned max_links = new_dir->i_sb->s_max_links;
- /*
- * Check source == target.
- * On overlayfs need to look at underlying inodes.
- */
- if (vfs_select_inode(old_dentry, 0) == vfs_select_inode(new_dentry, 0))
+ if (source == target)
return 0;
error = may_delete(old_dir, old_dentry, is_dir);
diff --git a/fs/ocfs2/acl.c b/fs/ocfs2/acl.c
index 2162434728c0..0cdf497c91ef 100644
--- a/fs/ocfs2/acl.c
+++ b/fs/ocfs2/acl.c
@@ -322,90 +322,3 @@ struct posix_acl *ocfs2_iop_get_acl(struct inode *inode, int type)
brelse(di_bh);
return acl;
}
-
-int ocfs2_acl_chmod(struct inode *inode, struct buffer_head *bh)
-{
- struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
- struct posix_acl *acl;
- int ret;
-
- if (S_ISLNK(inode->i_mode))
- return -EOPNOTSUPP;
-
- if (!(osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL))
- return 0;
-
- acl = ocfs2_get_acl_nolock(inode, ACL_TYPE_ACCESS, bh);
- if (IS_ERR(acl) || !acl)
- return PTR_ERR(acl);
- ret = __posix_acl_chmod(&acl, GFP_KERNEL, inode->i_mode);
- if (ret)
- return ret;
- ret = ocfs2_set_acl(NULL, inode, NULL, ACL_TYPE_ACCESS,
- acl, NULL, NULL);
- posix_acl_release(acl);
- return ret;
-}
-
-/*
- * Initialize the ACLs of a new inode. If parent directory has default ACL,
- * then clone to new inode. Called from ocfs2_mknod.
- */
-int ocfs2_init_acl(handle_t *handle,
- struct inode *inode,
- struct inode *dir,
- struct buffer_head *di_bh,
- struct buffer_head *dir_bh,
- struct ocfs2_alloc_context *meta_ac,
- struct ocfs2_alloc_context *data_ac)
-{
- struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
- struct posix_acl *acl = NULL;
- int ret = 0, ret2;
- umode_t mode;
-
- if (!S_ISLNK(inode->i_mode)) {
- if (osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) {
- acl = ocfs2_get_acl_nolock(dir, ACL_TYPE_DEFAULT,
- dir_bh);
- if (IS_ERR(acl))
- return PTR_ERR(acl);
- }
- if (!acl) {
- mode = inode->i_mode & ~current_umask();
- ret = ocfs2_acl_set_mode(inode, di_bh, handle, mode);
- if (ret) {
- mlog_errno(ret);
- goto cleanup;
- }
- }
- }
- if ((osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) && acl) {
- if (S_ISDIR(inode->i_mode)) {
- ret = ocfs2_set_acl(handle, inode, di_bh,
- ACL_TYPE_DEFAULT, acl,
- meta_ac, data_ac);
- if (ret)
- goto cleanup;
- }
- mode = inode->i_mode;
- ret = __posix_acl_create(&acl, GFP_NOFS, &mode);
- if (ret < 0)
- return ret;
-
- ret2 = ocfs2_acl_set_mode(inode, di_bh, handle, mode);
- if (ret2) {
- mlog_errno(ret2);
- ret = ret2;
- goto cleanup;
- }
- if (ret > 0) {
- ret = ocfs2_set_acl(handle, inode,
- di_bh, ACL_TYPE_ACCESS,
- acl, meta_ac, data_ac);
- }
- }
-cleanup:
- posix_acl_release(acl);
- return ret;
-}
diff --git a/fs/ocfs2/acl.h b/fs/ocfs2/acl.h
index 2783a75b3999..3fce68d08625 100644
--- a/fs/ocfs2/acl.h
+++ b/fs/ocfs2/acl.h
@@ -35,10 +35,5 @@ int ocfs2_set_acl(handle_t *handle,
struct posix_acl *acl,
struct ocfs2_alloc_context *meta_ac,
struct ocfs2_alloc_context *data_ac);
-extern int ocfs2_acl_chmod(struct inode *, struct buffer_head *);
-extern int ocfs2_init_acl(handle_t *, struct inode *, struct inode *,
- struct buffer_head *, struct buffer_head *,
- struct ocfs2_alloc_context *,
- struct ocfs2_alloc_context *);
#endif /* OCFS2_ACL_H */
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 77d30cbd944d..0e5b4515f92e 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -1268,20 +1268,20 @@ bail_unlock_rw:
if (size_change)
ocfs2_rw_unlock(inode, 1);
bail:
+ brelse(bh);
/* Release quota pointers in case we acquired them */
for (qtype = 0; qtype < OCFS2_MAXQUOTAS; qtype++)
dqput(transfer_to[qtype]);
if (!status && attr->ia_valid & ATTR_MODE) {
- status = ocfs2_acl_chmod(inode, bh);
+ status = posix_acl_chmod(inode, inode->i_mode);
if (status < 0)
mlog_errno(status);
}
if (inode_locked)
ocfs2_inode_unlock(inode, 1);
- brelse(bh);
return status;
}
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
index 62af9554541d..3123408da935 100644
--- a/fs/ocfs2/namei.c
+++ b/fs/ocfs2/namei.c
@@ -259,6 +259,7 @@ static int ocfs2_mknod(struct inode *dir,
struct ocfs2_dir_lookup_result lookup = { NULL, };
sigset_t oldset;
int did_block_signals = 0;
+ struct posix_acl *default_acl = NULL, *acl = NULL;
struct ocfs2_dentry_lock *dl = NULL;
trace_ocfs2_mknod(dir, dentry, dentry->d_name.len, dentry->d_name.name,
@@ -366,6 +367,12 @@ static int ocfs2_mknod(struct inode *dir,
goto leave;
}
+ status = posix_acl_create(dir, &inode->i_mode, &default_acl, &acl);
+ if (status) {
+ mlog_errno(status);
+ goto leave;
+ }
+
handle = ocfs2_start_trans(osb, ocfs2_mknod_credits(osb->sb,
S_ISDIR(mode),
xattr_credits));
@@ -414,8 +421,16 @@ static int ocfs2_mknod(struct inode *dir,
inc_nlink(dir);
}
- status = ocfs2_init_acl(handle, inode, dir, new_fe_bh, parent_fe_bh,
- meta_ac, data_ac);
+ if (default_acl) {
+ status = ocfs2_set_acl(handle, inode, new_fe_bh,
+ ACL_TYPE_DEFAULT, default_acl,
+ meta_ac, data_ac);
+ }
+ if (!status && acl) {
+ status = ocfs2_set_acl(handle, inode, new_fe_bh,
+ ACL_TYPE_ACCESS, acl,
+ meta_ac, data_ac);
+ }
if (status < 0) {
mlog_errno(status);
@@ -457,6 +472,10 @@ static int ocfs2_mknod(struct inode *dir,
d_instantiate(dentry, inode);
status = 0;
leave:
+ if (default_acl)
+ posix_acl_release(default_acl);
+ if (acl)
+ posix_acl_release(acl);
if (status < 0 && did_quota_inode)
dquot_free_inode(inode);
if (handle)
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
index 6a0c55d7dff0..252119860e6c 100644
--- a/fs/ocfs2/refcounttree.c
+++ b/fs/ocfs2/refcounttree.c
@@ -4248,12 +4248,20 @@ static int ocfs2_reflink(struct dentry *old_dentry, struct inode *dir,
struct inode *inode = d_inode(old_dentry);
struct buffer_head *old_bh = NULL;
struct inode *new_orphan_inode = NULL;
+ struct posix_acl *default_acl, *acl;
+ umode_t mode;
if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb)))
return -EOPNOTSUPP;
+ mode = inode->i_mode;
+ error = posix_acl_create(dir, &mode, &default_acl, &acl);
+ if (error) {
+ mlog_errno(error);
+ return error;
+ }
- error = ocfs2_create_inode_in_orphan(dir, inode->i_mode,
+ error = ocfs2_create_inode_in_orphan(dir, mode,
&new_orphan_inode);
if (error) {
mlog_errno(error);
@@ -4292,11 +4300,16 @@ static int ocfs2_reflink(struct dentry *old_dentry, struct inode *dir,
/* If the security isn't preserved, we need to re-initialize them. */
if (!preserve) {
error = ocfs2_init_security_and_acl(dir, new_orphan_inode,
- &new_dentry->d_name);
+ &new_dentry->d_name,
+ default_acl, acl);
if (error)
mlog_errno(error);
}
out:
+ if (default_acl)
+ posix_acl_release(default_acl);
+ if (acl)
+ posix_acl_release(acl);
if (!error) {
error = ocfs2_mv_orphaned_inode_to_new(dir, new_orphan_inode,
new_dentry);
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
index 877830b05e12..e9164f09841b 100644
--- a/fs/ocfs2/xattr.c
+++ b/fs/ocfs2/xattr.c
@@ -7197,10 +7197,12 @@ out:
*/
int ocfs2_init_security_and_acl(struct inode *dir,
struct inode *inode,
- const struct qstr *qstr)
+ const struct qstr *qstr,
+ struct posix_acl *default_acl,
+ struct posix_acl *acl)
{
- int ret = 0;
struct buffer_head *dir_bh = NULL;
+ int ret = 0;
ret = ocfs2_init_security_get(inode, dir, qstr, NULL);
if (ret) {
@@ -7213,9 +7215,11 @@ int ocfs2_init_security_and_acl(struct inode *dir,
mlog_errno(ret);
goto leave;
}
- ret = ocfs2_init_acl(NULL, inode, dir, NULL, dir_bh, NULL, NULL);
- if (ret)
- mlog_errno(ret);
+
+ if (!ret && default_acl)
+ ret = ocfs2_iop_set_acl(inode, default_acl, ACL_TYPE_DEFAULT);
+ if (!ret && acl)
+ ret = ocfs2_iop_set_acl(inode, acl, ACL_TYPE_ACCESS);
ocfs2_inode_unlock(dir, 0);
brelse(dir_bh);
diff --git a/fs/ocfs2/xattr.h b/fs/ocfs2/xattr.h
index 1633cc15ea1f..f10d5b93c366 100644
--- a/fs/ocfs2/xattr.h
+++ b/fs/ocfs2/xattr.h
@@ -94,5 +94,7 @@ int ocfs2_reflink_xattrs(struct inode *old_inode,
bool preserve_security);
int ocfs2_init_security_and_acl(struct inode *dir,
struct inode *inode,
- const struct qstr *qstr);
+ const struct qstr *qstr,
+ struct posix_acl *default_acl,
+ struct posix_acl *acl);
#endif /* OCFS2_XATTR_H */
diff --git a/fs/open.c b/fs/open.c
index 157b9940dd73..6a24f988d253 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -840,12 +840,16 @@ EXPORT_SYMBOL(file_path);
int vfs_open(const struct path *path, struct file *file,
const struct cred *cred)
{
- struct inode *inode = vfs_select_inode(path->dentry, file->f_flags);
-
- if (IS_ERR(inode))
- return PTR_ERR(inode);
+ struct dentry *dentry = path->dentry;
+ struct inode *inode = dentry->d_inode;
file->f_path = *path;
+ if (dentry->d_flags & DCACHE_OP_SELECT_INODE) {
+ inode = dentry->d_op->d_select_inode(dentry, file->f_flags);
+ if (IS_ERR(inode))
+ return PTR_ERR(inode);
+ }
+
return do_dentry_open(file, inode, NULL, cred);
}
diff --git a/fs/pnode.c b/fs/pnode.c
index 99899705b105..6367e1e435c6 100644
--- a/fs/pnode.c
+++ b/fs/pnode.c
@@ -198,15 +198,10 @@ static struct mount *next_group(struct mount *m, struct mount *origin)
/* all accesses are serialized by namespace_sem */
static struct user_namespace *user_ns;
-static struct mount *last_dest, *first_source, *last_source, *dest_master;
+static struct mount *last_dest, *last_source, *dest_master;
static struct mountpoint *mp;
static struct hlist_head *list;
-static inline bool peers(struct mount *m1, struct mount *m2)
-{
- return m1->mnt_group_id == m2->mnt_group_id && m1->mnt_group_id;
-}
-
static int propagate_one(struct mount *m)
{
struct mount *child;
@@ -217,26 +212,24 @@ static int propagate_one(struct mount *m)
/* skip if mountpoint isn't covered by it */
if (!is_subdir(mp->m_dentry, m->mnt.mnt_root))
return 0;
- if (peers(m, last_dest)) {
+ if (m->mnt_group_id == last_dest->mnt_group_id) {
type = CL_MAKE_SHARED;
} else {
struct mount *n, *p;
- bool done;
for (n = m; ; n = p) {
p = n->mnt_master;
- if (p == dest_master || IS_MNT_MARKED(p))
+ if (p == dest_master || IS_MNT_MARKED(p)) {
+ while (last_dest->mnt_master != p) {
+ last_source = last_source->mnt_master;
+ last_dest = last_source->mnt_parent;
+ }
+ if (n->mnt_group_id != last_dest->mnt_group_id) {
+ last_source = last_source->mnt_master;
+ last_dest = last_source->mnt_parent;
+ }
break;
+ }
}
- do {
- struct mount *parent = last_source->mnt_parent;
- if (last_source == first_source)
- break;
- done = parent->mnt_master == p;
- if (done && peers(n, parent))
- break;
- last_source = last_source->mnt_master;
- } while (!done);
-
type = CL_SLAVE;
/* beginning of peer group among the slaves? */
if (IS_MNT_SHARED(m))
@@ -288,7 +281,6 @@ int propagate_mnt(struct mount *dest_mnt, struct mountpoint *dest_mp,
*/
user_ns = current->nsproxy->mnt_ns->user_ns;
last_dest = dest_mnt;
- first_source = source_mnt;
last_source = source_mnt;
mp = dest_mp;
list = tree_list;
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 5120d772d9d6..f5d4bc730c0d 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -954,8 +954,7 @@ static ssize_t environ_read(struct file *file, char __user *buf,
int ret = 0;
struct mm_struct *mm = file->private_data;
- /* Ensure the process spawned far enough to have an environment. */
- if (!mm || !mm->env_end)
+ if (!mm)
return 0;
page = (char *)__get_free_page(GFP_TEMPORARY);
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index be3003ef2b4e..ee79fadfc6e7 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -1731,32 +1731,6 @@ static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma,
return page;
}
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-static struct page *can_gather_numa_stats_pmd(pmd_t pmd,
- struct vm_area_struct *vma,
- unsigned long addr)
-{
- struct page *page;
- int nid;
-
- if (!pmd_present(pmd))
- return NULL;
-
- page = vm_normal_page_pmd(vma, addr, pmd);
- if (!page)
- return NULL;
-
- if (PageReserved(page))
- return NULL;
-
- nid = page_to_nid(page);
- if (!node_isset(nid, node_states[N_MEMORY]))
- return NULL;
-
- return page;
-}
-#endif
-
static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
unsigned long end, struct mm_walk *walk)
{
@@ -1766,13 +1740,13 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
pte_t *orig_pte;
pte_t *pte;
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
+ pte_t huge_pte = *(pte_t *)pmd;
struct page *page;
- page = can_gather_numa_stats_pmd(*pmd, vma, addr);
+ page = can_gather_numa_stats(huge_pte, vma, addr);
if (page)
- gather_stats(page, md, pmd_dirty(*pmd),
+ gather_stats(page, md, pte_dirty(huge_pte),
HPAGE_PMD_SIZE/PAGE_SIZE);
spin_unlock(ptl);
return 0;
@@ -1780,7 +1754,6 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
if (pmd_trans_unstable(pmd))
return 0;
-#endif
orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
do {
struct page *page = can_gather_numa_stats(*pte, vma, addr);
diff --git a/include/asm-generic/fixmap.h b/include/asm-generic/fixmap.h
index 827e4d3bbc7a..1cbb8338edf3 100644
--- a/include/asm-generic/fixmap.h
+++ b/include/asm-generic/fixmap.h
@@ -70,12 +70,12 @@ static inline unsigned long virt_to_fix(const unsigned long vaddr)
#endif
/* Return a pointer with offset calculated */
-#define __set_fixmap_offset(idx, phys, flags) \
-({ \
- unsigned long ________addr; \
- __set_fixmap(idx, phys, flags); \
- ________addr = fix_to_virt(idx) + ((phys) & (PAGE_SIZE - 1)); \
- ________addr; \
+#define __set_fixmap_offset(idx, phys, flags) \
+({ \
+ unsigned long addr; \
+ __set_fixmap(idx, phys, flags); \
+ addr = fix_to_virt(idx) + ((phys) & (PAGE_SIZE - 1)); \
+ addr; \
})
#define set_fixmap_offset(idx, phys) \
diff --git a/include/asm-generic/futex.h b/include/asm-generic/futex.h
index bf2d34c9d804..e56272c919b5 100644
--- a/include/asm-generic/futex.h
+++ b/include/asm-generic/futex.h
@@ -108,15 +108,11 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
u32 val;
preempt_disable();
- if (unlikely(get_user(val, uaddr) != 0)) {
- preempt_enable();
+ if (unlikely(get_user(val, uaddr) != 0))
return -EFAULT;
- }
- if (val == oldval && unlikely(put_user(newval, uaddr) != 0)) {
- preempt_enable();
+ if (val == oldval && unlikely(put_user(newval, uaddr) != 0))
return -EFAULT;
- }
*uval = val;
preempt_enable();
diff --git a/include/drm/drm_cache.h b/include/drm/drm_cache.h
index cebecff536a3..461a0558bca4 100644
--- a/include/drm/drm_cache.h
+++ b/include/drm/drm_cache.h
@@ -39,8 +39,6 @@ static inline bool drm_arch_can_wc_memory(void)
{
#if defined(CONFIG_PPC) && !defined(CONFIG_NOT_COHERENT_CACHE)
return false;
-#elif defined(CONFIG_MIPS) && defined(CONFIG_CPU_LOONGSON3)
- return false;
#else
return true;
#endif
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 67bc2da5d233..83d1926c61e4 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -165,13 +165,12 @@ void bpf_register_prog_type(struct bpf_prog_type_list *tl);
void bpf_register_map_type(struct bpf_map_type_list *tl);
struct bpf_prog *bpf_prog_get(u32 ufd);
-struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog);
void bpf_prog_put(struct bpf_prog *prog);
void bpf_prog_put_rcu(struct bpf_prog *prog);
struct bpf_map *bpf_map_get_with_uref(u32 ufd);
struct bpf_map *__bpf_map_get(struct fd f);
-struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref);
+void bpf_map_inc(struct bpf_map *map, bool uref);
void bpf_map_put_with_uref(struct bpf_map *map);
void bpf_map_put(struct bpf_map *map);
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 788c7c49a673..e63d3a513e67 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -435,7 +435,6 @@ struct cgroup_subsys {
int (*can_attach)(struct cgroup_taskset *tset);
void (*cancel_attach)(struct cgroup_taskset *tset);
void (*attach)(struct cgroup_taskset *tset);
- void (*post_attach)(void);
int (*can_fork)(struct task_struct *task, void **priv_p);
void (*cancel_fork)(struct task_struct *task, void *priv);
void (*fork)(struct task_struct *task, void *priv);
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index 8d5915f78935..23026ba6ff25 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -387,7 +387,6 @@ struct clk_divider {
#define CLK_DIVIDER_MAX_AT_ZERO BIT(6)
extern const struct clk_ops clk_divider_ops;
-extern const struct clk_ops clk_divider_ro_ops;
unsigned long divider_recalc_rate(struct clk_hw *hw, unsigned long parent_rate,
unsigned int val, const struct clk_div_table *table,
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index 85a868ccb493..fea160ee5803 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -137,6 +137,8 @@ static inline void set_mems_allowed(nodemask_t nodemask)
task_unlock(current);
}
+extern void cpuset_post_attach_flush(void);
+
#else /* !CONFIG_CPUSETS */
static inline bool cpusets_enabled(void) { return false; }
@@ -243,6 +245,10 @@ static inline bool read_mems_allowed_retry(unsigned int seq)
return false;
}
+static inline void cpuset_post_attach_flush(void)
+{
+}
+
#endif /* !CONFIG_CPUSETS */
#endif /* _LINUX_CPUSET_H */
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index d81746d3b2da..f513dd855cb2 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -592,16 +592,4 @@ static inline struct dentry *d_real(struct dentry *dentry)
return dentry;
}
-static inline struct inode *vfs_select_inode(struct dentry *dentry,
- unsigned open_flags)
-{
- struct inode *inode = d_inode(dentry);
-
- if (inode && unlikely(dentry->d_flags & DCACHE_OP_SELECT_INODE))
- inode = dentry->d_op->d_select_inode(dentry, open_flags);
-
- return inode;
-}
-
-
#endif /* __LINUX_DCACHE_H */
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 333d0ca6940f..47be3ad7d3e5 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -299,7 +299,7 @@ typedef struct {
void *open_protocol_information;
void *protocols_per_handle;
void *locate_handle_buffer;
- efi_status_t (*locate_protocol)(efi_guid_t *, void *, void **);
+ void *locate_protocol;
void *install_multiple_protocol_interfaces;
void *uninstall_multiple_protocol_interfaces;
void *calculate_crc32;
@@ -599,10 +599,6 @@ void efi_native_runtime_setup(void);
#define EFI_PROPERTIES_TABLE_GUID \
EFI_GUID( 0x880aaca3, 0x4adc, 0x4a04, 0x90, 0x79, 0xb7, 0x47, 0x34, 0x08, 0x25, 0xe5 )
-#define EFI_RNG_PROTOCOL_GUID \
- EFI_GUID(0x3152bca5, 0xeade, 0x433d, \
- 0x86, 0x2e, 0xc0, 0x1c, 0xdc, 0x29, 0x1f, 0x44)
-
typedef struct {
efi_guid_t guid;
u64 table;
diff --git a/include/linux/hash.h b/include/linux/hash.h
index a75b1009d3f7..d0494c399392 100644
--- a/include/linux/hash.h
+++ b/include/linux/hash.h
@@ -33,28 +33,12 @@
#error Wordsize not 32 or 64
#endif
-/*
- * The above primes are actively bad for hashing, since they are
- * too sparse. The 32-bit one is mostly ok, the 64-bit one causes
- * real problems. Besides, the "prime" part is pointless for the
- * multiplicative hash.
- *
- * Although a random odd number will do, it turns out that the golden
- * ratio phi = (sqrt(5)-1)/2, or its negative, has particularly nice
- * properties.
- *
- * These are the negative, (1 - phi) = (phi^2) = (3 - sqrt(5))/2.
- * (See Knuth vol 3, section 6.4, exercise 9.)
- */
-#define GOLDEN_RATIO_32 0x61C88647
-#define GOLDEN_RATIO_64 0x61C8864680B583EBull
-
static __always_inline u64 hash_64(u64 val, unsigned int bits)
{
u64 hash = val;
-#if BITS_PER_LONG == 64
- hash = hash * GOLDEN_RATIO_64;
+#if defined(CONFIG_ARCH_HAS_FAST_MULTIPLIER) && BITS_PER_LONG == 64
+ hash = hash * GOLDEN_RATIO_PRIME_64;
#else
/* Sigh, gcc can't optimise this alone like it does for 32 bits. */
u64 n = hash;
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index b0eb06423d5e..685c262e0be8 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -96,7 +96,9 @@ u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
struct address_space *mapping,
pgoff_t idx, unsigned long address);
+#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
+#endif
extern int hugepages_treat_as_movable;
extern int sysctl_hugetlb_shm_group;
diff --git a/include/linux/mfd/samsung/s2mps11.h b/include/linux/mfd/samsung/s2mps11.h
index 2c14eeca46f0..b288965e8101 100644
--- a/include/linux/mfd/samsung/s2mps11.h
+++ b/include/linux/mfd/samsung/s2mps11.h
@@ -173,12 +173,10 @@ enum s2mps11_regulators {
#define S2MPS11_LDO_VSEL_MASK 0x3F
#define S2MPS11_BUCK_VSEL_MASK 0xFF
-#define S2MPS11_BUCK9_VSEL_MASK 0x1F
#define S2MPS11_ENABLE_MASK (0x03 << S2MPS11_ENABLE_SHIFT)
#define S2MPS11_ENABLE_SHIFT 0x06
#define S2MPS11_LDO_N_VOLTAGES (S2MPS11_LDO_VSEL_MASK + 1)
#define S2MPS11_BUCK_N_VOLTAGES (S2MPS11_BUCK_VSEL_MASK + 1)
-#define S2MPS11_BUCK9_N_VOLTAGES (S2MPS11_BUCK9_VSEL_MASK + 1)
#define S2MPS11_RAMP_DELAY 25000 /* uV/us */
#define S2MPS11_CTRL1_PWRHOLD_MASK BIT(4)
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index a91b67b18a73..0b473cbfa7ef 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -334,17 +334,6 @@ enum {
MLX5_CAP_OFF_CMDIF_CSUM = 46,
};
-enum {
- /*
- * Max wqe size for rdma read is 512 bytes, so this
- * limits our max_sge_rd as the wqe needs to fit:
- * - ctrl segment (16 bytes)
- * - rdma segment (16 bytes)
- * - scatter elements (16 bytes each)
- */
- MLX5_MAX_SGE_RD = (512 - 16 - 16) / 16
-};
-
struct mlx5_inbox_hdr {
__be16 opcode;
u8 rsvd[4];
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 412aa988c6ad..af3efd9157f0 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -792,9 +792,9 @@ int mlx5_set_port_admin_status(struct mlx5_core_dev *dev,
int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
enum mlx5_port_status *status);
-int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port);
-void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, u16 *max_mtu, u8 port);
-void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, u16 *oper_mtu,
+int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port);
+void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu, u8 port);
+void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu,
u8 port);
int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev,
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 57a44fa9ab89..cbe9b794c714 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1097,8 +1097,6 @@ struct zap_details {
struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
pte_t pte);
-struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
- pmd_t pmd);
int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
unsigned long size);
diff --git a/include/linux/net.h b/include/linux/net.h
index 25ef630f1bd6..0b4ac7da583a 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -245,15 +245,7 @@ do { \
net_ratelimited_function(pr_warn, fmt, ##__VA_ARGS__)
#define net_info_ratelimited(fmt, ...) \
net_ratelimited_function(pr_info, fmt, ##__VA_ARGS__)
-#if defined(CONFIG_DYNAMIC_DEBUG)
-#define net_dbg_ratelimited(fmt, ...) \
-do { \
- DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
- if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT) && \
- net_ratelimit()) \
- __dynamic_pr_debug(&descriptor, fmt, ##__VA_ARGS__); \
-} while (0)
-#elif defined(DEBUG)
+#if defined(DEBUG)
#define net_dbg_ratelimited(fmt, ...) \
net_ratelimited_function(pr_debug, fmt, ##__VA_ARGS__)
#else
diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h
index 7d250f14d032..4984d372b04b 100644
--- a/include/media/videobuf2-core.h
+++ b/include/media/videobuf2-core.h
@@ -363,7 +363,6 @@ struct vb2_ops {
};
struct vb2_buf_ops {
- int (*verify_planes_array)(struct vb2_buffer *vb, const void *pb);
int (*fill_user_buffer)(struct vb2_buffer *vb, void *pb);
int (*fill_vb2_buffer)(struct vb2_buffer *vb, const void *pb,
struct vb2_plane *planes);
diff --git a/include/net/codel.h b/include/net/codel.h
index d168aca115cc..267e70210061 100644
--- a/include/net/codel.h
+++ b/include/net/codel.h
@@ -162,14 +162,12 @@ struct codel_vars {
* struct codel_stats - contains codel shared variables and stats
* @maxpacket: largest packet we've seen so far
* @drop_count: temp count of dropped packets in dequeue()
- * @drop_len: bytes of dropped packets in dequeue()
* ecn_mark: number of packets we ECN marked instead of dropping
* ce_mark: number of packets CE marked because sojourn time was above ce_threshold
*/
struct codel_stats {
u32 maxpacket;
u32 drop_count;
- u32 drop_len;
u32 ecn_mark;
u32 ce_mark;
};
@@ -310,7 +308,6 @@ static struct sk_buff *codel_dequeue(struct Qdisc *sch,
vars->rec_inv_sqrt);
goto end;
}
- stats->drop_len += qdisc_pkt_len(skb);
qdisc_drop(skb, sch);
stats->drop_count++;
skb = dequeue_func(vars, sch);
@@ -333,7 +330,6 @@ static struct sk_buff *codel_dequeue(struct Qdisc *sch,
if (params->ecn && INET_ECN_set_ce(skb)) {
stats->ecn_mark++;
} else {
- stats->drop_len += qdisc_pkt_len(skb);
qdisc_drop(skb, sch);
stats->drop_count++;
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index a6cc576fd467..0816c872b689 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -1588,23 +1588,6 @@ static inline void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp)
}
#endif /* CONFIG_IP_VS_NFCT */
-/* Really using conntrack? */
-static inline bool ip_vs_conn_uses_conntrack(struct ip_vs_conn *cp,
- struct sk_buff *skb)
-{
-#ifdef CONFIG_IP_VS_NFCT
- enum ip_conntrack_info ctinfo;
- struct nf_conn *ct;
-
- if (!(cp->flags & IP_VS_CONN_F_NFCT))
- return false;
- ct = nf_ct_get(skb, &ctinfo);
- if (ct && !nf_ct_is_untracked(ct))
- return true;
-#endif
- return false;
-}
-
static inline int
ip_vs_dest_conn_overhead(struct ip_vs_dest *dest)
{
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 86df0835f6b5..b2a8e6338576 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -396,8 +396,7 @@ struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
struct Qdisc *qdisc);
void qdisc_reset(struct Qdisc *qdisc);
void qdisc_destroy(struct Qdisc *qdisc);
-void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, unsigned int n,
- unsigned int len);
+void qdisc_tree_decrease_qlen(struct Qdisc *qdisc, unsigned int n);
struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
const struct Qdisc_ops *ops);
struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
@@ -699,23 +698,6 @@ static inline void qdisc_reset_queue(struct Qdisc *sch)
sch->qstats.backlog = 0;
}
-static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new,
- struct Qdisc **pold)
-{
- struct Qdisc *old;
-
- sch_tree_lock(sch);
- old = *pold;
- *pold = new;
- if (old != NULL) {
- qdisc_tree_reduce_backlog(old, old->q.qlen, old->qstats.backlog);
- qdisc_reset(old);
- }
- sch_tree_unlock(sch);
-
- return old;
-}
-
static inline unsigned int __qdisc_queue_drop(struct Qdisc *sch,
struct sk_buff_head *list)
{
diff --git a/include/rdma/ib.h b/include/rdma/ib.h
index a6b93706b0fc..cf8f9e700e48 100644
--- a/include/rdma/ib.h
+++ b/include/rdma/ib.h
@@ -34,7 +34,6 @@
#define _RDMA_IB_H
#include <linux/types.h>
-#include <linux/sched.h>
struct ib_addr {
union {
@@ -87,19 +86,4 @@ struct sockaddr_ib {
__u64 sib_scope_id;
};
-/*
- * The IB interfaces that use write() as bi-directional ioctl() are
- * fundamentally unsafe, since there are lots of ways to trigger "write()"
- * calls from various contexts with elevated privileges. That includes the
- * traditional suid executable error message writes, but also various kernel
- * interfaces that can write to file descriptors.
- *
- * This function provides protection for the legacy API by restricting the
- * calling context.
- */
-static inline bool ib_safe_file_access(struct file *filp)
-{
- return filp->f_cred == current_cred() && segment_eq(get_fs(), USER_DS);
-}
-
#endif /* _RDMA_IB_H */
diff --git a/include/uapi/linux/if.h b/include/uapi/linux/if.h
index 752f5dc040a5..9cf2394f0bcf 100644
--- a/include/uapi/linux/if.h
+++ b/include/uapi/linux/if.h
@@ -19,20 +19,14 @@
#ifndef _LINUX_IF_H
#define _LINUX_IF_H
-#include <linux/libc-compat.h> /* for compatibility with glibc */
#include <linux/types.h> /* for "__kernel_caddr_t" et al */
#include <linux/socket.h> /* for "struct sockaddr" et al */
#include <linux/compiler.h> /* for "__user" et al */
-#if __UAPI_DEF_IF_IFNAMSIZ
#define IFNAMSIZ 16
-#endif /* __UAPI_DEF_IF_IFNAMSIZ */
#define IFALIASZ 256
#include <linux/hdlc/ioctl.h>
-/* For glibc compatibility. An empty enum does not compile. */
-#if __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO != 0 && \
- __UAPI_DEF_IF_NET_DEVICE_FLAGS != 0
/**
* enum net_device_flags - &struct net_device flags
*
@@ -74,8 +68,6 @@
* @IFF_ECHO: echo sent packets. Volatile.
*/
enum net_device_flags {
-/* for compatibility with glibc net/if.h */
-#if __UAPI_DEF_IF_NET_DEVICE_FLAGS
IFF_UP = 1<<0, /* sysfs */
IFF_BROADCAST = 1<<1, /* volatile */
IFF_DEBUG = 1<<2, /* sysfs */
@@ -92,17 +84,11 @@ enum net_device_flags {
IFF_PORTSEL = 1<<13, /* sysfs */
IFF_AUTOMEDIA = 1<<14, /* sysfs */
IFF_DYNAMIC = 1<<15, /* sysfs */
-#endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS */
-#if __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO
IFF_LOWER_UP = 1<<16, /* volatile */
IFF_DORMANT = 1<<17, /* volatile */
IFF_ECHO = 1<<18, /* volatile */
-#endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO */
};
-#endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO != 0 && __UAPI_DEF_IF_NET_DEVICE_FLAGS != 0 */
-/* for compatibility with glibc net/if.h */
-#if __UAPI_DEF_IF_NET_DEVICE_FLAGS
#define IFF_UP IFF_UP
#define IFF_BROADCAST IFF_BROADCAST
#define IFF_DEBUG IFF_DEBUG
@@ -119,13 +105,9 @@ enum net_device_flags {
#define IFF_PORTSEL IFF_PORTSEL
#define IFF_AUTOMEDIA IFF_AUTOMEDIA
#define IFF_DYNAMIC IFF_DYNAMIC
-#endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS */
-
-#if __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO
#define IFF_LOWER_UP IFF_LOWER_UP
#define IFF_DORMANT IFF_DORMANT
#define IFF_ECHO IFF_ECHO
-#endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO */
#define IFF_VOLATILE (IFF_LOOPBACK|IFF_POINTOPOINT|IFF_BROADCAST|IFF_ECHO|\
IFF_MASTER|IFF_SLAVE|IFF_RUNNING|IFF_LOWER_UP|IFF_DORMANT)
@@ -184,8 +166,6 @@ enum {
* being very small might be worth keeping for clean configuration.
*/
-/* for compatibility with glibc net/if.h */
-#if __UAPI_DEF_IF_IFMAP
struct ifmap {
unsigned long mem_start;
unsigned long mem_end;
@@ -195,7 +175,6 @@ struct ifmap {
unsigned char port;
/* 3 bytes spare */
};
-#endif /* __UAPI_DEF_IF_IFMAP */
struct if_settings {
unsigned int type; /* Type of physical device or protocol */
@@ -221,8 +200,6 @@ struct if_settings {
* remainder may be interface specific.
*/
-/* for compatibility with glibc net/if.h */
-#if __UAPI_DEF_IF_IFREQ
struct ifreq {
#define IFHWADDRLEN 6
union
@@ -246,7 +223,6 @@ struct ifreq {
struct if_settings ifru_settings;
} ifr_ifru;
};
-#endif /* __UAPI_DEF_IF_IFREQ */
#define ifr_name ifr_ifrn.ifrn_name /* interface name */
#define ifr_hwaddr ifr_ifru.ifru_hwaddr /* MAC address */
@@ -273,8 +249,6 @@ struct ifreq {
* must know all networks accessible).
*/
-/* for compatibility with glibc net/if.h */
-#if __UAPI_DEF_IF_IFCONF
struct ifconf {
int ifc_len; /* size of buffer */
union {
@@ -282,8 +256,6 @@ struct ifconf {
struct ifreq __user *ifcu_req;
} ifc_ifcu;
};
-#endif /* __UAPI_DEF_IF_IFCONF */
-
#define ifc_buf ifc_ifcu.ifcu_buf /* buffer address */
#define ifc_req ifc_ifcu.ifcu_req /* array of structures */
diff --git a/include/uapi/linux/libc-compat.h b/include/uapi/linux/libc-compat.h
index d5e38c73377c..7d024ceb075d 100644
--- a/include/uapi/linux/libc-compat.h
+++ b/include/uapi/linux/libc-compat.h
@@ -51,40 +51,6 @@
/* We have included glibc headers... */
#if defined(__GLIBC__)
-/* Coordinate with glibc net/if.h header. */
-#if defined(_NET_IF_H)
-
-/* GLIBC headers included first so don't define anything
- * that would already be defined. */
-
-#define __UAPI_DEF_IF_IFCONF 0
-#define __UAPI_DEF_IF_IFMAP 0
-#define __UAPI_DEF_IF_IFNAMSIZ 0
-#define __UAPI_DEF_IF_IFREQ 0
-/* Everything up to IFF_DYNAMIC, matches net/if.h until glibc 2.23 */
-#define __UAPI_DEF_IF_NET_DEVICE_FLAGS 0
-/* For the future if glibc adds IFF_LOWER_UP, IFF_DORMANT and IFF_ECHO */
-#ifndef __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO
-#define __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO 1
-#endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO */
-
-#else /* _NET_IF_H */
-
-/* Linux headers included first, and we must define everything
- * we need. The expectation is that glibc will check the
- * __UAPI_DEF_* defines and adjust appropriately. */
-
-#define __UAPI_DEF_IF_IFCONF 1
-#define __UAPI_DEF_IF_IFMAP 1
-#define __UAPI_DEF_IF_IFNAMSIZ 1
-#define __UAPI_DEF_IF_IFREQ 1
-/* Everything up to IFF_DYNAMIC, matches net/if.h until glibc 2.23 */
-#define __UAPI_DEF_IF_NET_DEVICE_FLAGS 1
-/* For the future if glibc adds IFF_LOWER_UP, IFF_DORMANT and IFF_ECHO */
-#define __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO 1
-
-#endif /* _NET_IF_H */
-
/* Coordinate with glibc netinet/in.h header. */
#if defined(_NETINET_IN_H)
@@ -151,16 +117,6 @@
* that we need. */
#else /* !defined(__GLIBC__) */
-/* Definitions for if.h */
-#define __UAPI_DEF_IF_IFCONF 1
-#define __UAPI_DEF_IF_IFMAP 1
-#define __UAPI_DEF_IF_IFNAMSIZ 1
-#define __UAPI_DEF_IF_IFREQ 1
-/* Everything up to IFF_DYNAMIC, matches net/if.h until glibc 2.23 */
-#define __UAPI_DEF_IF_NET_DEVICE_FLAGS 1
-/* For the future if glibc adds IFF_LOWER_UP, IFF_DORMANT and IFF_ECHO */
-#define __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO 1
-
/* Definitions for in.h */
#define __UAPI_DEF_IN_ADDR 1
#define __UAPI_DEF_IN_IPPROTO 1
diff --git a/include/uapi/linux/v4l2-dv-timings.h b/include/uapi/linux/v4l2-dv-timings.h
index 086168e18ca8..c039f1d68a09 100644
--- a/include/uapi/linux/v4l2-dv-timings.h
+++ b/include/uapi/linux/v4l2-dv-timings.h
@@ -183,8 +183,7 @@
#define V4L2_DV_BT_CEA_3840X2160P24 { \
.type = V4L2_DV_BT_656_1120, \
- V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
- V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
297000000, 1276, 88, 296, 8, 10, 72, 0, 0, 0, \
V4L2_DV_BT_STD_CEA861, \
V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
@@ -192,16 +191,14 @@
#define V4L2_DV_BT_CEA_3840X2160P25 { \
.type = V4L2_DV_BT_656_1120, \
- V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
- V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
297000000, 1056, 88, 296, 8, 10, 72, 0, 0, 0, \
V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
}
#define V4L2_DV_BT_CEA_3840X2160P30 { \
.type = V4L2_DV_BT_656_1120, \
- V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
- V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
297000000, 176, 88, 296, 8, 10, 72, 0, 0, 0, \
V4L2_DV_BT_STD_CEA861, \
V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
@@ -209,16 +206,14 @@
#define V4L2_DV_BT_CEA_3840X2160P50 { \
.type = V4L2_DV_BT_656_1120, \
- V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
- V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
594000000, 1056, 88, 296, 8, 10, 72, 0, 0, 0, \
V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
}
#define V4L2_DV_BT_CEA_3840X2160P60 { \
.type = V4L2_DV_BT_656_1120, \
- V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
- V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
594000000, 176, 88, 296, 8, 10, 72, 0, 0, 0, \
V4L2_DV_BT_STD_CEA861, \
V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
@@ -226,8 +221,7 @@
#define V4L2_DV_BT_CEA_4096X2160P24 { \
.type = V4L2_DV_BT_656_1120, \
- V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
- V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
297000000, 1020, 88, 296, 8, 10, 72, 0, 0, 0, \
V4L2_DV_BT_STD_CEA861, \
V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
@@ -235,16 +229,14 @@
#define V4L2_DV_BT_CEA_4096X2160P25 { \
.type = V4L2_DV_BT_656_1120, \
- V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
- V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
297000000, 968, 88, 128, 8, 10, 72, 0, 0, 0, \
V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
}
#define V4L2_DV_BT_CEA_4096X2160P30 { \
.type = V4L2_DV_BT_656_1120, \
- V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
- V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
297000000, 88, 88, 128, 8, 10, 72, 0, 0, 0, \
V4L2_DV_BT_STD_CEA861, \
V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
@@ -252,16 +244,14 @@
#define V4L2_DV_BT_CEA_4096X2160P50 { \
.type = V4L2_DV_BT_656_1120, \
- V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
- V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
594000000, 968, 88, 128, 8, 10, 72, 0, 0, 0, \
V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
}
#define V4L2_DV_BT_CEA_4096X2160P60 { \
.type = V4L2_DV_BT_656_1120, \
- V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
- V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
594000000, 88, 88, 128, 8, 10, 72, 0, 0, 0, \
V4L2_DV_BT_STD_CEA861, \
V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
diff --git a/include/xen/page.h b/include/xen/page.h
index 9dc46cb8a0fd..96294ac93755 100644
--- a/include/xen/page.h
+++ b/include/xen/page.h
@@ -15,9 +15,9 @@
*/
#define xen_pfn_to_page(xen_pfn) \
- (pfn_to_page((unsigned long)(xen_pfn) >> (PAGE_SHIFT - XEN_PAGE_SHIFT)))
+ ((pfn_to_page(((unsigned long)(xen_pfn) << XEN_PAGE_SHIFT) >> PAGE_SHIFT)))
#define page_to_xen_pfn(page) \
- ((page_to_pfn(page)) << (PAGE_SHIFT - XEN_PAGE_SHIFT))
+ (((page_to_pfn(page)) << PAGE_SHIFT) >> XEN_PAGE_SHIFT)
#define XEN_PFN_PER_PAGE (PAGE_SIZE / XEN_PAGE_SIZE)
diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c
index d1a7646f79c5..5a8a797d50b7 100644
--- a/kernel/bpf/inode.c
+++ b/kernel/bpf/inode.c
@@ -31,10 +31,10 @@ static void *bpf_any_get(void *raw, enum bpf_type type)
{
switch (type) {
case BPF_TYPE_PROG:
- raw = bpf_prog_inc(raw);
+ atomic_inc(&((struct bpf_prog *)raw)->aux->refcnt);
break;
case BPF_TYPE_MAP:
- raw = bpf_map_inc(raw, true);
+ bpf_map_inc(raw, true);
break;
default:
WARN_ON_ONCE(1);
@@ -277,8 +277,7 @@ static void *bpf_obj_do_get(const struct filename *pathname,
goto out;
raw = bpf_any_get(inode->i_private, *type);
- if (!IS_ERR(raw))
- touch_atime(&path);
+ touch_atime(&path);
path_put(&path);
return raw;
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 4e32cc94edd9..3b39550d8485 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -181,18 +181,11 @@ struct bpf_map *__bpf_map_get(struct fd f)
return f.file->private_data;
}
-/* prog's and map's refcnt limit */
-#define BPF_MAX_REFCNT 32768
-
-struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref)
+void bpf_map_inc(struct bpf_map *map, bool uref)
{
- if (atomic_inc_return(&map->refcnt) > BPF_MAX_REFCNT) {
- atomic_dec(&map->refcnt);
- return ERR_PTR(-EBUSY);
- }
+ atomic_inc(&map->refcnt);
if (uref)
atomic_inc(&map->usercnt);
- return map;
}
struct bpf_map *bpf_map_get_with_uref(u32 ufd)
@@ -204,7 +197,7 @@ struct bpf_map *bpf_map_get_with_uref(u32 ufd)
if (IS_ERR(map))
return map;
- map = bpf_map_inc(map, true);
+ bpf_map_inc(map, true);
fdput(f);
return map;
@@ -587,15 +580,6 @@ static struct bpf_prog *__bpf_prog_get(struct fd f)
return f.file->private_data;
}
-struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog)
-{
- if (atomic_inc_return(&prog->aux->refcnt) > BPF_MAX_REFCNT) {
- atomic_dec(&prog->aux->refcnt);
- return ERR_PTR(-EBUSY);
- }
- return prog;
-}
-
/* called by sockets/tracing/seccomp before attaching program to an event
* pairs with bpf_prog_put()
*/
@@ -608,7 +592,7 @@ struct bpf_prog *bpf_prog_get(u32 ufd)
if (IS_ERR(prog))
return prog;
- prog = bpf_prog_inc(prog);
+ atomic_inc(&prog->aux->refcnt);
fdput(f);
return prog;
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 2cbfba78d3db..2e7f7ab739e4 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -239,6 +239,15 @@ static const char * const reg_type_str[] = {
[CONST_IMM] = "imm",
};
+static const struct {
+ int map_type;
+ int func_id;
+} func_limit[] = {
+ {BPF_MAP_TYPE_PROG_ARRAY, BPF_FUNC_tail_call},
+ {BPF_MAP_TYPE_PERF_EVENT_ARRAY, BPF_FUNC_perf_event_read},
+ {BPF_MAP_TYPE_PERF_EVENT_ARRAY, BPF_FUNC_perf_event_output},
+};
+
static void print_verifier_state(struct verifier_env *env)
{
enum bpf_reg_type t;
@@ -889,44 +898,24 @@ static int check_func_arg(struct verifier_env *env, u32 regno,
static int check_map_func_compatibility(struct bpf_map *map, int func_id)
{
+ bool bool_map, bool_func;
+ int i;
+
if (!map)
return 0;
- /* We need a two way check, first is from map perspective ... */
- switch (map->map_type) {
- case BPF_MAP_TYPE_PROG_ARRAY:
- if (func_id != BPF_FUNC_tail_call)
- goto error;
- break;
- case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
- if (func_id != BPF_FUNC_perf_event_read &&
- func_id != BPF_FUNC_perf_event_output)
- goto error;
- break;
- default:
- break;
- }
-
- /* ... and second from the function itself. */
- switch (func_id) {
- case BPF_FUNC_tail_call:
- if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
- goto error;
- break;
- case BPF_FUNC_perf_event_read:
- case BPF_FUNC_perf_event_output:
- if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY)
- goto error;
- break;
- default:
- break;
+ for (i = 0; i < ARRAY_SIZE(func_limit); i++) {
+ bool_map = (map->map_type == func_limit[i].map_type);
+ bool_func = (func_id == func_limit[i].func_id);
+ /* only when map & func pair match it can continue.
+ * don't allow any other map type to be passed into
+ * the special func;
+ */
+ if (bool_func && bool_map != bool_func)
+ return -EINVAL;
}
return 0;
-error:
- verbose("cannot pass map_type %d into func %d\n",
- map->map_type, func_id);
- return -EINVAL;
}
static int check_call(struct verifier_env *env, int func_id)
@@ -1359,7 +1348,6 @@ static int check_ld_abs(struct verifier_env *env, struct bpf_insn *insn)
}
if (insn->dst_reg != BPF_REG_0 || insn->off != 0 ||
- BPF_SIZE(insn->code) == BPF_DW ||
(mode == BPF_ABS && insn->src_reg != BPF_REG_0)) {
verbose("BPF_LD_ABS uses reserved fields\n");
return -EINVAL;
@@ -2015,6 +2003,7 @@ static int replace_map_fd_with_map_ptr(struct verifier_env *env)
if (IS_ERR(map)) {
verbose("fd %d is not pointing to valid bpf_map\n",
insn->imm);
+ fdput(f);
return PTR_ERR(map);
}
@@ -2034,18 +2023,15 @@ static int replace_map_fd_with_map_ptr(struct verifier_env *env)
return -E2BIG;
}
+ /* remember this map */
+ env->used_maps[env->used_map_cnt++] = map;
+
/* hold the map. If the program is rejected by verifier,
* the map will be released by release_maps() or it
* will be used by the valid program until it's unloaded
* and all maps are released in free_bpf_prog_info()
*/
- map = bpf_map_inc(map, false);
- if (IS_ERR(map)) {
- fdput(f);
- return PTR_ERR(map);
- }
- env->used_maps[env->used_map_cnt++] = map;
-
+ bpf_map_inc(map, false);
fdput(f);
next_insn:
insn++;
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 9d03abef6676..e8d71110ed2a 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -2776,10 +2776,9 @@ static ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf,
size_t nbytes, loff_t off, bool threadgroup)
{
struct task_struct *tsk;
- struct cgroup_subsys *ss;
struct cgroup *cgrp;
pid_t pid;
- int ssid, ret;
+ int ret;
if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0)
return -EINVAL;
@@ -2827,10 +2826,8 @@ out_unlock_rcu:
rcu_read_unlock();
out_unlock_threadgroup:
percpu_up_write(&cgroup_threadgroup_rwsem);
- for_each_subsys(ss, ssid)
- if (ss->post_attach)
- ss->post_attach();
cgroup_kn_unlock(of->kn);
+ cpuset_post_attach_flush();
return ret ?: nbytes;
}
@@ -4747,15 +4744,14 @@ static void css_free_work_fn(struct work_struct *work)
if (ss) {
/* css free path */
- struct cgroup_subsys_state *parent = css->parent;
int id = css->id;
+ if (css->parent)
+ css_put(css->parent);
+
ss->css_free(css);
cgroup_idr_remove(&ss->css_idr, id);
cgroup_put(cgrp);
-
- if (parent)
- css_put(parent);
} else {
/* cgroup free path */
atomic_dec(&cgrp->root->nr_cgrps);
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index a65d63463420..2df78d45a096 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -57,6 +57,7 @@
#include <asm/uaccess.h>
#include <linux/atomic.h>
#include <linux/mutex.h>
+#include <linux/workqueue.h>
#include <linux/cgroup.h>
#include <linux/wait.h>
@@ -1014,7 +1015,7 @@ static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
}
}
-static void cpuset_post_attach(void)
+void cpuset_post_attach_flush(void)
{
flush_workqueue(cpuset_migrate_mm_wq);
}
@@ -2100,7 +2101,6 @@ struct cgroup_subsys cpuset_cgrp_subsys = {
.allow_attach = cpuset_allow_attach,
.cancel_attach = cpuset_cancel_attach,
.attach = cpuset_attach,
- .post_attach = cpuset_post_attach,
.bind = cpuset_bind,
.legacy_cftypes = files,
.early_init = 1,
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index 014b69528194..adfdc0536117 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -347,7 +347,6 @@ void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size,
bool truncated)
{
struct ring_buffer *rb = handle->rb;
- bool wakeup = truncated;
unsigned long aux_head;
u64 flags = 0;
@@ -376,16 +375,9 @@ void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size,
aux_head = rb->user_page->aux_head = local_read(&rb->aux_head);
if (aux_head - local_read(&rb->aux_wakeup) >= rb->aux_watermark) {
- wakeup = true;
- local_add(rb->aux_watermark, &rb->aux_wakeup);
- }
-
- if (wakeup) {
- if (truncated)
- handle->event->pending_disable = 1;
perf_output_wakeup(handle);
+ local_add(rb->aux_watermark, &rb->aux_wakeup);
}
-
handle->event = NULL;
local_set(&rb->aux_nest, 0);
diff --git a/kernel/futex.c b/kernel/futex.c
index 9d8163afd87c..461c72b2dac2 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -1244,20 +1244,10 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this,
if (unlikely(should_fail_futex(true)))
ret = -EFAULT;
- if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)) {
+ if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
ret = -EFAULT;
- } else if (curval != uval) {
- /*
- * If a unconditional UNLOCK_PI operation (user space did not
- * try the TID->0 transition) raced with a waiter setting the
- * FUTEX_WAITERS flag between get_user() and locking the hash
- * bucket lock, retry the operation.
- */
- if ((FUTEX_TID_MASK & curval) == uval)
- ret = -EAGAIN;
- else
- ret = -EINVAL;
- }
+ else if (curval != uval)
+ ret = -EINVAL;
if (ret) {
raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
return ret;
@@ -1484,8 +1474,8 @@ void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
if (likely(&hb1->chain != &hb2->chain)) {
plist_del(&q->list, &hb1->chain);
hb_waiters_dec(hb1);
- hb_waiters_inc(hb2);
plist_add(&q->list, &hb2->chain);
+ hb_waiters_inc(hb2);
q->lock_ptr = &hb2->lock;
}
get_futex_key_refs(key2);
@@ -2548,15 +2538,6 @@ retry:
if (ret == -EFAULT)
goto pi_faulted;
/*
- * A unconditional UNLOCK_PI op raced against a waiter
- * setting the FUTEX_WAITERS bit. Try again.
- */
- if (ret == -EAGAIN) {
- spin_unlock(&hb->lock);
- put_futex_key(&key);
- goto retry;
- }
- /*
* wake_futex_pi has detected invalid state. Tell user
* space.
*/
diff --git a/kernel/locking/mcs_spinlock.h b/kernel/locking/mcs_spinlock.h
index c835270f0c2f..5b9102a47ea5 100644
--- a/kernel/locking/mcs_spinlock.h
+++ b/kernel/locking/mcs_spinlock.h
@@ -67,13 +67,7 @@ void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
node->locked = 0;
node->next = NULL;
- /*
- * We rely on the full barrier with global transitivity implied by the
- * below xchg() to order the initialization stores above against any
- * observation of @node. And to provide the ACQUIRE ordering associated
- * with a LOCK primitive.
- */
- prev = xchg(lock, node);
+ prev = xchg_acquire(lock, node);
if (likely(prev == NULL)) {
/*
* Lock acquired, don't need to set node->locked to 1. Threads
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 25afcb8a1402..db0472b37feb 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -11223,7 +11223,7 @@ void set_curr_task(int cpu, struct task_struct *p)
/* task_group_lock serializes the addition/removal of task groups */
static DEFINE_SPINLOCK(task_group_lock);
-static void sched_free_group(struct task_group *tg)
+static void free_sched_group(struct task_group *tg)
{
free_fair_sched_group(tg);
free_rt_sched_group(tg);
@@ -11249,7 +11249,7 @@ struct task_group *sched_create_group(struct task_group *parent)
return tg;
err:
- sched_free_group(tg);
+ free_sched_group(tg);
return ERR_PTR(-ENOMEM);
}
@@ -11269,16 +11269,17 @@ void sched_online_group(struct task_group *tg, struct task_group *parent)
}
/* rcu callback to free various structures associated with a task group */
-static void sched_free_group_rcu(struct rcu_head *rhp)
+static void free_sched_group_rcu(struct rcu_head *rhp)
{
/* now it should be safe to free those cfs_rqs */
- sched_free_group(container_of(rhp, struct task_group, rcu));
+ free_sched_group(container_of(rhp, struct task_group, rcu));
}
+/* Destroy runqueue etc associated with a task group */
void sched_destroy_group(struct task_group *tg)
{
/* wait for possible concurrent references to cfs_rqs complete */
- call_rcu(&tg->rcu, sched_free_group_rcu);
+ call_rcu(&tg->rcu, free_sched_group_rcu);
}
void sched_offline_group(struct task_group *tg)
@@ -11739,26 +11740,31 @@ cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
if (IS_ERR(tg))
return ERR_PTR(-ENOMEM);
- sched_online_group(tg, parent);
-
return &tg->css;
}
-static void cpu_cgroup_css_released(struct cgroup_subsys_state *css)
+static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
{
struct task_group *tg = css_tg(css);
+ struct task_group *parent = css_tg(css->parent);
- sched_offline_group(tg);
+ if (parent)
+ sched_online_group(tg, parent);
+ return 0;
}
static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
{
struct task_group *tg = css_tg(css);
- /*
- * Relies on the RCU grace period between css_released() and this.
- */
- sched_free_group(tg);
+ sched_destroy_group(tg);
+}
+
+static void cpu_cgroup_css_offline(struct cgroup_subsys_state *css)
+{
+ struct task_group *tg = css_tg(css);
+
+ sched_offline_group(tg);
}
static void cpu_cgroup_fork(struct task_struct *task, void *private)
@@ -12187,8 +12193,9 @@ static struct cftype cpu_files[] = {
struct cgroup_subsys cpu_cgrp_subsys = {
.css_alloc = cpu_cgroup_css_alloc,
- .css_released = cpu_cgroup_css_released,
.css_free = cpu_cgroup_css_free,
+ .css_online = cpu_cgroup_css_online,
+ .css_offline = cpu_cgroup_css_offline,
.fork = cpu_cgroup_fork,
.can_attach = cpu_cgroup_can_attach,
.attach = cpu_cgroup_attach,
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 26960e49bb8c..fda3b6e1b3a0 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -2108,13 +2108,8 @@ event_create_dir(struct dentry *parent, struct trace_event_file *file)
trace_create_file("filter", 0644, file->dir, file,
&ftrace_event_filter_fops);
- /*
- * Only event directories that can be enabled should have
- * triggers.
- */
- if (!(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
- trace_create_file("trigger", 0644, file->dir, file,
- &event_trigger_fops);
+ trace_create_file("trigger", 0644, file->dir, file,
+ &event_trigger_fops);
trace_create_file("format", 0444, file->dir, call,
&ftrace_event_format_fops);
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 316b316c7528..ef84d9874d03 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -652,35 +652,6 @@ static void set_work_pool_and_clear_pending(struct work_struct *work,
*/
smp_wmb();
set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0);
- /*
- * The following mb guarantees that previous clear of a PENDING bit
- * will not be reordered with any speculative LOADS or STORES from
- * work->current_func, which is executed afterwards. This possible
- * reordering can lead to a missed execution on attempt to qeueue
- * the same @work. E.g. consider this case:
- *
- * CPU#0 CPU#1
- * ---------------------------- --------------------------------
- *
- * 1 STORE event_indicated
- * 2 queue_work_on() {
- * 3 test_and_set_bit(PENDING)
- * 4 } set_..._and_clear_pending() {
- * 5 set_work_data() # clear bit
- * 6 smp_mb()
- * 7 work->current_func() {
- * 8 LOAD event_indicated
- * }
- *
- * Without an explicit full barrier speculative LOAD on line 8 can
- * be executed before CPU#0 does STORE on line 1. If that happens,
- * CPU#0 observes the PENDING bit is still set and new execution of
- * a @work is not queued in a hope, that CPU#1 will eventually
- * finish the queued @work. Meanwhile CPU#1 does not see
- * event_indicated is set, because speculative LOAD was executed
- * before actual STORE.
- */
- smp_mb();
}
static void clear_work_data(struct work_struct *work)
@@ -4476,17 +4447,6 @@ static void rebind_workers(struct worker_pool *pool)
pool->attrs->cpumask) < 0);
spin_lock_irq(&pool->lock);
-
- /*
- * XXX: CPU hotplug notifiers are weird and can call DOWN_FAILED
- * w/o preceding DOWN_PREPARE. Work around it. CPU hotplug is
- * being reworked and this can go away in time.
- */
- if (!(pool->flags & POOL_DISASSOCIATED)) {
- spin_unlock_irq(&pool->lock);
- return;
- }
-
pool->flags &= ~POOL_DISASSOCIATED;
for_each_pool_worker(worker, pool) {
diff --git a/lib/assoc_array.c b/lib/assoc_array.c
index 59fd7c0b119c..03dd576e6773 100644
--- a/lib/assoc_array.c
+++ b/lib/assoc_array.c
@@ -524,9 +524,7 @@ static bool assoc_array_insert_into_terminal_node(struct assoc_array_edit *edit,
free_slot = i;
continue;
}
- if (assoc_array_ptr_is_leaf(ptr) &&
- ops->compare_object(assoc_array_ptr_to_leaf(ptr),
- index_key)) {
+ if (ops->compare_object(assoc_array_ptr_to_leaf(ptr), index_key)) {
pr_devel("replace in slot %d\n", i);
edit->leaf_p = &node->slots[i];
edit->dead_leaf = node->slots[i];
diff --git a/lib/extable.c b/lib/extable.c
index 0be02ad561e9..4cac81ec225e 100644
--- a/lib/extable.c
+++ b/lib/extable.c
@@ -14,37 +14,7 @@
#include <linux/sort.h>
#include <asm/uaccess.h>
-#ifndef ARCH_HAS_RELATIVE_EXTABLE
-#define ex_to_insn(x) ((x)->insn)
-#else
-static inline unsigned long ex_to_insn(const struct exception_table_entry *x)
-{
- return (unsigned long)&x->insn + x->insn;
-}
-#endif
-
#ifndef ARCH_HAS_SORT_EXTABLE
-#ifndef ARCH_HAS_RELATIVE_EXTABLE
-#define swap_ex NULL
-#else
-static void swap_ex(void *a, void *b, int size)
-{
- struct exception_table_entry *x = a, *y = b, tmp;
- int delta = b - a;
-
- tmp = *x;
- x->insn = y->insn + delta;
- y->insn = tmp.insn - delta;
-
-#ifdef swap_ex_entry_fixup
- swap_ex_entry_fixup(x, y, tmp, delta);
-#else
- x->fixup = y->fixup + delta;
- y->fixup = tmp.fixup - delta;
-#endif
-}
-#endif /* ARCH_HAS_RELATIVE_EXTABLE */
-
/*
* The exception table needs to be sorted so that the binary
* search that we use to find entries in it works properly.
@@ -56,9 +26,9 @@ static int cmp_ex(const void *a, const void *b)
const struct exception_table_entry *x = a, *y = b;
/* avoid overflow */
- if (ex_to_insn(x) > ex_to_insn(y))
+ if (x->insn > y->insn)
return 1;
- if (ex_to_insn(x) < ex_to_insn(y))
+ if (x->insn < y->insn)
return -1;
return 0;
}
@@ -67,7 +37,7 @@ void sort_extable(struct exception_table_entry *start,
struct exception_table_entry *finish)
{
sort(start, finish - start, sizeof(struct exception_table_entry),
- cmp_ex, swap_ex);
+ cmp_ex, NULL);
}
#ifdef CONFIG_MODULES
@@ -78,15 +48,13 @@ void sort_extable(struct exception_table_entry *start,
void trim_init_extable(struct module *m)
{
/*trim the beginning*/
- while (m->num_exentries &&
- within_module_init(ex_to_insn(&m->extable[0]), m)) {
+ while (m->num_exentries && within_module_init(m->extable[0].insn, m)) {
m->extable++;
m->num_exentries--;
}
/*trim the end*/
while (m->num_exentries &&
- within_module_init(ex_to_insn(&m->extable[m->num_exentries - 1]),
- m))
+ within_module_init(m->extable[m->num_exentries-1].insn, m))
m->num_exentries--;
}
#endif /* CONFIG_MODULES */
@@ -113,13 +81,13 @@ search_extable(const struct exception_table_entry *first,
* careful, the distance between value and insn
* can be larger than MAX_LONG:
*/
- if (ex_to_insn(mid) < value)
+ if (mid->insn < value)
first = mid + 1;
- else if (ex_to_insn(mid) > value)
+ else if (mid->insn > value)
last = mid - 1;
else
return mid;
- }
- return NULL;
+ }
+ return NULL;
}
#endif
diff --git a/lib/lz4/lz4defs.h b/lib/lz4/lz4defs.h
index 0710a62ad2f6..abcecdc2d0f2 100644
--- a/lib/lz4/lz4defs.h
+++ b/lib/lz4/lz4defs.h
@@ -11,7 +11,8 @@
/*
* Detects 64 bits mode
*/
-#if defined(CONFIG_64BIT)
+#if (defined(__x86_64__) || defined(__x86_64) || defined(__amd64__) \
+ || defined(__ppc64__) || defined(__LP64__))
#define LZ4_ARCH64 1
#else
#define LZ4_ARCH64 0
@@ -34,10 +35,6 @@ typedef struct _U64_S { u64 v; } U64_S;
#define PUT4(s, d) (A32(d) = A32(s))
#define PUT8(s, d) (A64(d) = A64(s))
-
-#define LZ4_READ_LITTLEENDIAN_16(d, s, p) \
- (d = s - A16(p))
-
#define LZ4_WRITE_LITTLEENDIAN_16(p, v) \
do { \
A16(p) = v; \
@@ -54,13 +51,10 @@ typedef struct _U64_S { u64 v; } U64_S;
#define PUT8(s, d) \
put_unaligned(get_unaligned((const u64 *) s), (u64 *) d)
-#define LZ4_READ_LITTLEENDIAN_16(d, s, p) \
- (d = s - get_unaligned_le16(p))
-
-#define LZ4_WRITE_LITTLEENDIAN_16(p, v) \
- do { \
- put_unaligned_le16(v, (u16 *)(p)); \
- p += 2; \
+#define LZ4_WRITE_LITTLEENDIAN_16(p, v) \
+ do { \
+ put_unaligned(v, (u16 *)(p)); \
+ p += 2; \
} while (0)
#endif
@@ -146,6 +140,9 @@ typedef struct _U64_S { u64 v; } U64_S;
#endif
+#define LZ4_READ_LITTLEENDIAN_16(d, s, p) \
+ (d = s - get_unaligned_le16(p))
+
#define LZ4_WILDCOPY(s, d, e) \
do { \
LZ4_COPYPACKET(s, d); \
diff --git a/lib/mpi/mpicoder.c b/lib/mpi/mpicoder.c
index e00ff00e861c..3db76b8c1115 100644
--- a/lib/mpi/mpicoder.c
+++ b/lib/mpi/mpicoder.c
@@ -128,23 +128,6 @@ leave:
}
EXPORT_SYMBOL_GPL(mpi_read_from_buffer);
-static int count_lzeros(MPI a)
-{
- mpi_limb_t alimb;
- int i, lzeros = 0;
-
- for (i = a->nlimbs - 1; i >= 0; i--) {
- alimb = a->d[i];
- if (alimb == 0) {
- lzeros += sizeof(mpi_limb_t);
- } else {
- lzeros += count_leading_zeros(alimb) / 8;
- break;
- }
- }
- return lzeros;
-}
-
/**
* mpi_read_buffer() - read MPI to a bufer provided by user (msb first)
*
@@ -163,7 +146,7 @@ int mpi_read_buffer(MPI a, uint8_t *buf, unsigned buf_len, unsigned *nbytes,
uint8_t *p;
mpi_limb_t alimb;
unsigned int n = mpi_get_size(a);
- int i, lzeros;
+ int i, lzeros = 0;
if (buf_len < n || !buf || !nbytes)
return -EINVAL;
@@ -171,7 +154,14 @@ int mpi_read_buffer(MPI a, uint8_t *buf, unsigned buf_len, unsigned *nbytes,
if (sign)
*sign = a->sign;
- lzeros = count_lzeros(a);
+ p = (void *)&a->d[a->nlimbs] - 1;
+
+ for (i = a->nlimbs * sizeof(alimb) - 1; i >= 0; i--, p--) {
+ if (!*p)
+ lzeros++;
+ else
+ break;
+ }
p = buf;
*nbytes = n - lzeros;
@@ -353,7 +343,7 @@ int mpi_write_to_sgl(MPI a, struct scatterlist *sgl, unsigned *nbytes,
u8 *p, *p2;
mpi_limb_t alimb, alimb2;
unsigned int n = mpi_get_size(a);
- int i, x, y = 0, lzeros, buf_len;
+ int i, x, y = 0, lzeros = 0, buf_len;
if (!nbytes || *nbytes < n)
return -EINVAL;
@@ -361,7 +351,14 @@ int mpi_write_to_sgl(MPI a, struct scatterlist *sgl, unsigned *nbytes,
if (sign)
*sign = a->sign;
- lzeros = count_lzeros(a);
+ p = (void *)&a->d[a->nlimbs] - 1;
+
+ for (i = a->nlimbs * sizeof(alimb) - 1; i >= 0; i--, p--) {
+ if (!*p)
+ lzeros++;
+ else
+ break;
+ }
*nbytes = n - lzeros;
buf_len = sgl->length;
diff --git a/lib/test-string_helpers.c b/lib/test-string_helpers.c
index 25b5cbfb7615..98866a770770 100644
--- a/lib/test-string_helpers.c
+++ b/lib/test-string_helpers.c
@@ -327,67 +327,36 @@ out:
}
#define string_get_size_maxbuf 16
-#define test_string_get_size_one(size, blk_size, exp_result10, exp_result2) \
+#define test_string_get_size_one(size, blk_size, units, exp_result) \
do { \
- BUILD_BUG_ON(sizeof(exp_result10) >= string_get_size_maxbuf); \
- BUILD_BUG_ON(sizeof(exp_result2) >= string_get_size_maxbuf); \
- __test_string_get_size((size), (blk_size), (exp_result10), \
- (exp_result2)); \
+ BUILD_BUG_ON(sizeof(exp_result) >= string_get_size_maxbuf); \
+ __test_string_get_size((size), (blk_size), (units), \
+ (exp_result)); \
} while (0)
-static __init void test_string_get_size_check(const char *units,
- const char *exp,
- char *res,
- const u64 size,
- const u64 blk_size)
-{
- if (!memcmp(res, exp, strlen(exp) + 1))
- return;
-
- res[string_get_size_maxbuf - 1] = '\0';
-
- pr_warn("Test 'test_string_get_size' failed!\n");
- pr_warn("string_get_size(size = %llu, blk_size = %llu, units = %s)\n",
- size, blk_size, units);
- pr_warn("expected: '%s', got '%s'\n", exp, res);
-}
-
static __init void __test_string_get_size(const u64 size, const u64 blk_size,
- const char *exp_result10,
- const char *exp_result2)
+ const enum string_size_units units,
+ const char *exp_result)
{
- char buf10[string_get_size_maxbuf];
- char buf2[string_get_size_maxbuf];
-
- string_get_size(size, blk_size, STRING_UNITS_10, buf10, sizeof(buf10));
- string_get_size(size, blk_size, STRING_UNITS_2, buf2, sizeof(buf2));
+ char buf[string_get_size_maxbuf];
- test_string_get_size_check("STRING_UNITS_10", exp_result10, buf10,
- size, blk_size);
+ string_get_size(size, blk_size, units, buf, sizeof(buf));
+ if (!memcmp(buf, exp_result, strlen(exp_result) + 1))
+ return;
- test_string_get_size_check("STRING_UNITS_2", exp_result2, buf2,
- size, blk_size);
+ buf[sizeof(buf) - 1] = '\0';
+ pr_warn("Test 'test_string_get_size_one' failed!\n");
+ pr_warn("string_get_size(size = %llu, blk_size = %llu, units = %d\n",
+ size, blk_size, units);
+ pr_warn("expected: '%s', got '%s'\n", exp_result, buf);
}
static __init void test_string_get_size(void)
{
- /* small values */
- test_string_get_size_one(0, 512, "0 B", "0 B");
- test_string_get_size_one(1, 512, "512 B", "512 B");
- test_string_get_size_one(1100, 1, "1.10 kB", "1.07 KiB");
-
- /* normal values */
- test_string_get_size_one(16384, 512, "8.39 MB", "8.00 MiB");
- test_string_get_size_one(500118192, 512, "256 GB", "238 GiB");
- test_string_get_size_one(8192, 4096, "33.6 MB", "32.0 MiB");
-
- /* weird block sizes */
- test_string_get_size_one(3000, 1900, "5.70 MB", "5.44 MiB");
-
- /* huge values */
- test_string_get_size_one(U64_MAX, 4096, "75.6 ZB", "64.0 ZiB");
- test_string_get_size_one(4096, U64_MAX, "75.6 ZB", "64.0 ZiB");
+ test_string_get_size_one(16384, 512, STRING_UNITS_2, "8.00 MiB");
+ test_string_get_size_one(8192, 4096, STRING_UNITS_10, "32.7 MB");
+ test_string_get_size_one(1, 512, STRING_UNITS_10, "512 B");
}
static int __init test_string_helpers_init(void)
diff --git a/mm/compaction.c b/mm/compaction.c
index 7f9e60489d67..e0d4a58bcee4 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -905,8 +905,16 @@ isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn,
pfn = isolate_migratepages_block(cc, pfn, block_end_pfn,
ISOLATE_UNEVICTABLE);
- if (!pfn)
+ /*
+ * In case of fatal failure, release everything that might
+ * have been isolated in the previous iteration, and signal
+ * the failure back to caller.
+ */
+ if (!pfn) {
+ putback_movable_pages(&cc->migratepages);
+ cc->nr_migratepages = 0;
break;
+ }
if (cc->nr_migratepages == COMPACT_CLUSTER_MAX)
break;
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 530e6427f823..62fe06bb7d04 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2134,9 +2134,10 @@ int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
* page fault if needed.
*/
return 0;
- if (vma->vm_ops || (vm_flags & VM_NO_THP))
+ if (vma->vm_ops)
/* khugepaged not yet working on file or special mappings */
return 0;
+ VM_BUG_ON_VMA(vm_flags & VM_NO_THP, vma);
hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
hend = vma->vm_end & HPAGE_PMD_MASK;
if (hstart < hend)
@@ -2497,7 +2498,8 @@ static bool hugepage_vma_check(struct vm_area_struct *vma)
return false;
if (is_vma_temporary_stack(vma))
return false;
- return !(vma->vm_flags & VM_NO_THP);
+ VM_BUG_ON_VMA(vma->vm_flags & VM_NO_THP, vma);
+ return true;
}
static void collapse_huge_page(struct mm_struct *mm,
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index d1f6dc5a715d..7535ef32a75b 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -196,7 +196,6 @@ static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
/* "mc" and its members are protected by cgroup_mutex */
static struct move_charge_struct {
spinlock_t lock; /* for from, to */
- struct mm_struct *mm;
struct mem_cgroup *from;
struct mem_cgroup *to;
unsigned long flags;
@@ -4801,8 +4800,6 @@ static void __mem_cgroup_clear_mc(void)
static void mem_cgroup_clear_mc(void)
{
- struct mm_struct *mm = mc.mm;
-
/*
* we must clear moving_task before waking up waiters at the end of
* task migration.
@@ -4812,10 +4809,7 @@ static void mem_cgroup_clear_mc(void)
spin_lock(&mc.lock);
mc.from = NULL;
mc.to = NULL;
- mc.mm = NULL;
spin_unlock(&mc.lock);
-
- mmput(mm);
}
static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
@@ -4872,7 +4866,6 @@ static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
VM_BUG_ON(mc.moved_swap);
spin_lock(&mc.lock);
- mc.mm = mm;
mc.from = from;
mc.to = memcg;
mc.flags = move_flags;
@@ -4882,9 +4875,8 @@ static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
ret = mem_cgroup_precharge_mc(mm);
if (ret)
mem_cgroup_clear_mc();
- } else {
- mmput(mm);
}
+ mmput(mm);
return ret;
}
@@ -4998,11 +4990,11 @@ put: /* get_mctgt_type() gets the page */
return ret;
}
-static void mem_cgroup_move_charge(void)
+static void mem_cgroup_move_charge(struct mm_struct *mm)
{
struct mm_walk mem_cgroup_move_charge_walk = {
.pmd_entry = mem_cgroup_move_charge_pte_range,
- .mm = mc.mm,
+ .mm = mm,
};
lru_add_drain_all();
@@ -5014,7 +5006,7 @@ static void mem_cgroup_move_charge(void)
atomic_inc(&mc.from->moving_account);
synchronize_rcu();
retry:
- if (unlikely(!down_read_trylock(&mc.mm->mmap_sem))) {
+ if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
/*
* Someone who are holding the mmap_sem might be waiting in
* waitq. So we cancel all extra charges, wake up all waiters,
@@ -5031,16 +5023,23 @@ retry:
* additional charge, the page walk just aborts.
*/
walk_page_range(0, ~0UL, &mem_cgroup_move_charge_walk);
- up_read(&mc.mm->mmap_sem);
+ up_read(&mm->mmap_sem);
atomic_dec(&mc.from->moving_account);
}
-static void mem_cgroup_move_task(void)
+static void mem_cgroup_move_task(struct cgroup_taskset *tset)
{
- if (mc.to) {
- mem_cgroup_move_charge();
- mem_cgroup_clear_mc();
+ struct cgroup_subsys_state *css;
+ struct task_struct *p = cgroup_taskset_first(tset, &css);
+ struct mm_struct *mm = get_task_mm(p);
+
+ if (mm) {
+ if (mc.to)
+ mem_cgroup_move_charge(mm);
+ mmput(mm);
}
+ if (mc.to)
+ mem_cgroup_clear_mc();
}
#else /* !CONFIG_MMU */
static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
@@ -5054,7 +5053,7 @@ static int mem_cgroup_allow_attach(struct cgroup_taskset *tset)
static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
{
}
-static void mem_cgroup_move_task(void)
+static void mem_cgroup_move_task(struct cgroup_taskset *tset)
{
}
#endif
@@ -5270,7 +5269,6 @@ struct cgroup_subsys memory_cgrp_subsys = {
.cancel_attach = mem_cgroup_cancel_attach,
.attach = mem_cgroup_move_task,
.allow_attach = mem_cgroup_allow_attach,
- .post_attach = mem_cgroup_move_task,
.bind = mem_cgroup_bind,
.dfl_cftypes = memory_files,
.legacy_cftypes = mem_cgroup_legacy_files,
diff --git a/mm/memory.c b/mm/memory.c
index b536e3d60fc7..6098837a4e5e 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -797,46 +797,6 @@ out:
return pfn_to_page(pfn);
}
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
- pmd_t pmd)
-{
- unsigned long pfn = pmd_pfn(pmd);
-
- /*
- * There is no pmd_special() but there may be special pmds, e.g.
- * in a direct-access (dax) mapping, so let's just replicate the
- * !HAVE_PTE_SPECIAL case from vm_normal_page() here.
- */
- if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
- if (vma->vm_flags & VM_MIXEDMAP) {
- if (!pfn_valid(pfn))
- return NULL;
- goto out;
- } else {
- unsigned long off;
- off = (addr - vma->vm_start) >> PAGE_SHIFT;
- if (pfn == vma->vm_pgoff + off)
- return NULL;
- if (!is_cow_mapping(vma->vm_flags))
- return NULL;
- }
- }
-
- if (is_zero_pfn(pfn))
- return NULL;
- if (unlikely(pfn > highest_memmap_pfn))
- return NULL;
-
- /*
- * NOTE! We still have PageReserved() pages in the page tables.
- * eg. VDSO mappings can cause them to exist.
- */
-out:
- return pfn_to_page(pfn);
-}
-#endif
-
/*
* copy one vm_area from one task to the other. Assumes the page tables
* already present in the new task to be cleared in the whole range
diff --git a/mm/migrate.c b/mm/migrate.c
index 3db1b0277eb4..cd1e63062459 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -963,13 +963,7 @@ out:
dec_zone_page_state(page, NR_ISOLATED_ANON +
page_is_file_cache(page));
/* Soft-offlined page shouldn't go through lru cache list */
- if (reason == MR_MEMORY_FAILURE && rc == MIGRATEPAGE_SUCCESS) {
- /*
- * With this release, we free successfully migrated
- * page and set PG_HWPoison on just freed page
- * intentionally. Although it's rather weird, it's how
- * HWPoison flag works at the moment.
- */
+ if (reason == MR_MEMORY_FAILURE) {
put_page(page);
if (!test_set_page_hwpoison(page))
num_poisoned_pages_inc();
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 8bf8e06a56a6..112c0bebfff3 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -1899,8 +1899,7 @@ bool wb_over_bg_thresh(struct bdi_writeback *wb)
if (gdtc->dirty > gdtc->bg_thresh)
return true;
- if (wb_stat(wb, WB_RECLAIMABLE) >
- wb_calc_thresh(gdtc->wb, gdtc->bg_thresh))
+ if (wb_stat(wb, WB_RECLAIMABLE) > __wb_calc_thresh(gdtc))
return true;
if (mdtc) {
@@ -1914,8 +1913,7 @@ bool wb_over_bg_thresh(struct bdi_writeback *wb)
if (mdtc->dirty > mdtc->bg_thresh)
return true;
- if (wb_stat(wb, WB_RECLAIMABLE) >
- wb_calc_thresh(mdtc->wb, mdtc->bg_thresh))
+ if (wb_stat(wb, WB_RECLAIMABLE) > __wb_calc_thresh(mdtc))
return true;
}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index c8a31783c2d6..ffcb2b56f6c1 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -6294,7 +6294,7 @@ int __meminit init_per_zone_wmark_min(void)
setup_per_zone_inactive_ratio();
return 0;
}
-core_initcall(init_per_zone_wmark_min)
+module_init(init_per_zone_wmark_min)
/*
* min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
diff --git a/mm/slub.c b/mm/slub.c
index fdc0721ebc31..2d5bbea0f0e8 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2832,7 +2832,6 @@ struct detached_freelist {
void *tail;
void *freelist;
int cnt;
- struct kmem_cache *s;
};
/*
@@ -2847,9 +2846,8 @@ struct detached_freelist {
* synchronization primitive. Look ahead in the array is limited due
* to performance reasons.
*/
-static inline
-int build_detached_freelist(struct kmem_cache *s, size_t size,
- void **p, struct detached_freelist *df)
+static int build_detached_freelist(struct kmem_cache *s, size_t size,
+ void **p, struct detached_freelist *df)
{
size_t first_skipped_index = 0;
int lookahead = 3;
@@ -2865,11 +2863,8 @@ int build_detached_freelist(struct kmem_cache *s, size_t size,
if (!object)
return 0;
- /* Support for memcg, compiler can optimize this out */
- df->s = cache_from_obj(s, object);
-
/* Start new detached freelist */
- set_freepointer(df->s, object, NULL);
+ set_freepointer(s, object, NULL);
df->page = virt_to_head_page(object);
df->tail = object;
df->freelist = object;
@@ -2884,7 +2879,7 @@ int build_detached_freelist(struct kmem_cache *s, size_t size,
/* df->page is always set at this point */
if (df->page == virt_to_head_page(object)) {
/* Opportunity build freelist */
- set_freepointer(df->s, object, df->freelist);
+ set_freepointer(s, object, df->freelist);
df->freelist = object;
df->cnt++;
p[size] = NULL; /* mark object processed */
@@ -2903,20 +2898,25 @@ int build_detached_freelist(struct kmem_cache *s, size_t size,
return first_skipped_index;
}
+
/* Note that interrupts must be enabled when calling this function. */
-void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
+void kmem_cache_free_bulk(struct kmem_cache *orig_s, size_t size, void **p)
{
if (WARN_ON(!size))
return;
do {
struct detached_freelist df;
+ struct kmem_cache *s;
+
+ /* Support for memcg */
+ s = cache_from_obj(orig_s, p[size - 1]);
size = build_detached_freelist(s, size, p, &df);
if (unlikely(!df.page))
continue;
- slab_free(df.s, df.page, df.freelist, df.tail, df.cnt,_RET_IP_);
+ slab_free(s, df.page, df.freelist, df.tail, df.cnt, _RET_IP_);
} while (likely(size));
}
EXPORT_SYMBOL(kmem_cache_free_bulk);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index d5c3ef60a71e..73f5cec91063 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2683,7 +2683,7 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
sc->gfp_mask |= __GFP_HIGHMEM;
for_each_zone_zonelist_nodemask(zone, z, zonelist,
- gfp_zone(sc->gfp_mask), sc->nodemask) {
+ requested_highidx, sc->nodemask) {
enum zone_type classzone_idx;
if (!populated_zone(zone))
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index c1ea19478119..fc083996e40a 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -1732,13 +1732,10 @@ static struct page *isolate_source_page(struct size_class *class)
static unsigned long zs_can_compact(struct size_class *class)
{
unsigned long obj_wasted;
- unsigned long obj_allocated = zs_stat_get(class, OBJ_ALLOCATED);
- unsigned long obj_used = zs_stat_get(class, OBJ_USED);
- if (obj_allocated <= obj_used)
- return 0;
+ obj_wasted = zs_stat_get(class, OBJ_ALLOCATED) -
+ zs_stat_get(class, OBJ_USED);
- obj_wasted = obj_allocated - obj_used;
obj_wasted /= get_maxobj_per_zspage(class->size,
class->pages_per_zspage);
diff --git a/mm/zswap.c b/mm/zswap.c
index 340261946fda..bf14508afd64 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -170,8 +170,6 @@ static struct zswap_tree *zswap_trees[MAX_SWAPFILES];
static LIST_HEAD(zswap_pools);
/* protects zswap_pools list modification */
static DEFINE_SPINLOCK(zswap_pools_lock);
-/* pool counter to provide unique names to zpool */
-static atomic_t zswap_pools_count = ATOMIC_INIT(0);
/* used by param callback function */
static bool zswap_init_started;
@@ -567,7 +565,6 @@ static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor)
static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
{
struct zswap_pool *pool;
- char name[38]; /* 'zswap' + 32 char (max) num + \0 */
gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
pool = kzalloc(sizeof(*pool), GFP_KERNEL);
@@ -576,10 +573,7 @@ static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
return NULL;
}
- /* unique name for each pool specifically required by zsmalloc */
- snprintf(name, 38, "zswap%x", atomic_inc_return(&zswap_pools_count));
-
- pool->zpool = zpool_create_pool(type, name, gfp, &zswap_zpool_ops);
+ pool->zpool = zpool_create_pool(type, "zswap", gfp, &zswap_zpool_ops);
if (!pool->zpool) {
pr_err("%s zpool not available\n", type);
goto error;
diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
index 5f19133c5530..a49c705fb86b 100644
--- a/net/batman-adv/distributed-arp-table.c
+++ b/net/batman-adv/distributed-arp-table.c
@@ -553,7 +553,6 @@ static void batadv_choose_next_candidate(struct batadv_priv *bat_priv,
* be sent to
* @bat_priv: the bat priv with all the soft interface information
* @ip_dst: ipv4 to look up in the DHT
- * @vid: VLAN identifier
*
* An originator O is selected if and only if its DHT_ID value is one of three
* closest values (from the LEFT, with wrap around if needed) then the hash
@@ -562,8 +561,7 @@ static void batadv_choose_next_candidate(struct batadv_priv *bat_priv,
* Returns the candidate array of size BATADV_DAT_CANDIDATE_NUM.
*/
static struct batadv_dat_candidate *
-batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst,
- unsigned short vid)
+batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst)
{
int select;
batadv_dat_addr_t last_max = BATADV_DAT_ADDR_MAX, ip_key;
@@ -579,7 +577,7 @@ batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst,
return NULL;
dat.ip = ip_dst;
- dat.vid = vid;
+ dat.vid = 0;
ip_key = (batadv_dat_addr_t)batadv_hash_dat(&dat,
BATADV_DAT_ADDR_MAX);
@@ -599,7 +597,6 @@ batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst,
* @bat_priv: the bat priv with all the soft interface information
* @skb: payload to send
* @ip: the DHT key
- * @vid: VLAN identifier
* @packet_subtype: unicast4addr packet subtype to use
*
* This function copies the skb with pskb_copy() and is sent as unicast packet
@@ -610,7 +607,7 @@ batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst,
*/
static bool batadv_dat_send_data(struct batadv_priv *bat_priv,
struct sk_buff *skb, __be32 ip,
- unsigned short vid, int packet_subtype)
+ int packet_subtype)
{
int i;
bool ret = false;
@@ -619,7 +616,7 @@ static bool batadv_dat_send_data(struct batadv_priv *bat_priv,
struct sk_buff *tmp_skb;
struct batadv_dat_candidate *cand;
- cand = batadv_dat_select_candidates(bat_priv, ip, vid);
+ cand = batadv_dat_select_candidates(bat_priv, ip);
if (!cand)
goto out;
@@ -1007,7 +1004,7 @@ bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv,
ret = true;
} else {
/* Send the request to the DHT */
- ret = batadv_dat_send_data(bat_priv, skb, ip_dst, vid,
+ ret = batadv_dat_send_data(bat_priv, skb, ip_dst,
BATADV_P_DAT_DHT_GET);
}
out:
@@ -1135,8 +1132,8 @@ void batadv_dat_snoop_outgoing_arp_reply(struct batadv_priv *bat_priv,
/* Send the ARP reply to the candidates for both the IP addresses that
* the node obtained from the ARP reply
*/
- batadv_dat_send_data(bat_priv, skb, ip_src, vid, BATADV_P_DAT_DHT_PUT);
- batadv_dat_send_data(bat_priv, skb, ip_dst, vid, BATADV_P_DAT_DHT_PUT);
+ batadv_dat_send_data(bat_priv, skb, ip_src, BATADV_P_DAT_DHT_PUT);
+ batadv_dat_send_data(bat_priv, skb, ip_dst, BATADV_P_DAT_DHT_PUT);
}
/**
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index d8a2f33e60e5..3207667e69de 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -104,15 +104,6 @@ static void _batadv_update_route(struct batadv_priv *bat_priv,
neigh_node = NULL;
spin_lock_bh(&orig_node->neigh_list_lock);
- /* curr_router used earlier may not be the current orig_ifinfo->router
- * anymore because it was dereferenced outside of the neigh_list_lock
- * protected region. After the new best neighbor has replace the current
- * best neighbor the reference counter needs to decrease. Consequently,
- * the code needs to ensure the curr_router variable contains a pointer
- * to the replaced best neighbor.
- */
- curr_router = rcu_dereference_protected(orig_ifinfo->router, true);
-
rcu_assign_pointer(orig_ifinfo->router, neigh_node);
spin_unlock_bh(&orig_node->neigh_list_lock);
batadv_orig_ifinfo_free_ref(orig_ifinfo);
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c
index 0e0c3b8ed927..f664324805eb 100644
--- a/net/batman-adv/send.c
+++ b/net/batman-adv/send.c
@@ -630,9 +630,6 @@ batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
if (pending) {
hlist_del(&forw_packet->list);
- if (!forw_packet->own)
- atomic_inc(&bat_priv->bcast_queue_left);
-
batadv_forw_packet_free(forw_packet);
}
}
@@ -660,9 +657,6 @@ batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
if (pending) {
hlist_del(&forw_packet->list);
- if (!forw_packet->own)
- atomic_inc(&bat_priv->batman_queue_left);
-
batadv_forw_packet_free(forw_packet);
}
}
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 720f1a5b81ac..ac4d08de5df4 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -407,17 +407,11 @@ void batadv_interface_rx(struct net_device *soft_iface,
*/
nf_reset(skb);
- if (unlikely(!pskb_may_pull(skb, ETH_HLEN)))
- goto dropped;
-
vid = batadv_get_vid(skb, 0);
ethhdr = eth_hdr(skb);
switch (ntohs(ethhdr->h_proto)) {
case ETH_P_8021Q:
- if (!pskb_may_pull(skb, VLAN_ETH_HLEN))
- goto dropped;
-
vhdr = (struct vlan_ethhdr *)skb->data;
if (vhdr->h_vlan_encapsulated_proto != ethertype)
@@ -429,6 +423,8 @@ void batadv_interface_rx(struct net_device *soft_iface,
}
/* skb->dev & skb->pkt_type are set here */
+ if (unlikely(!pskb_may_pull(skb, ETH_HLEN)))
+ goto dropped;
skb->protocol = eth_type_trans(skb, soft_iface);
/* should not be necessary anymore as we use skb_pull_rcsum()
diff --git a/net/bridge/br_ioctl.c b/net/bridge/br_ioctl.c
index 60a3dbfca8a1..263b4de4de57 100644
--- a/net/bridge/br_ioctl.c
+++ b/net/bridge/br_ioctl.c
@@ -21,19 +21,18 @@
#include <asm/uaccess.h>
#include "br_private.h"
+/* called with RTNL */
static int get_bridge_ifindices(struct net *net, int *indices, int num)
{
struct net_device *dev;
int i = 0;
- rcu_read_lock();
- for_each_netdev_rcu(net, dev) {
+ for_each_netdev(net, dev) {
if (i >= num)
break;
if (dev->priv_flags & IFF_EBRIDGE)
indices[i++] = dev->ifindex;
}
- rcu_read_unlock();
return i;
}
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index ea9893743a0f..03661d97463c 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -1270,7 +1270,6 @@ static int br_ip4_multicast_query(struct net_bridge *br,
struct br_ip saddr;
unsigned long max_delay;
unsigned long now = jiffies;
- unsigned int offset = skb_transport_offset(skb);
__be32 group;
int err = 0;
@@ -1281,14 +1280,14 @@ static int br_ip4_multicast_query(struct net_bridge *br,
group = ih->group;
- if (skb->len == offset + sizeof(*ih)) {
+ if (skb->len == sizeof(*ih)) {
max_delay = ih->code * (HZ / IGMP_TIMER_SCALE);
if (!max_delay) {
max_delay = 10 * HZ;
group = 0;
}
- } else if (skb->len >= offset + sizeof(*ih3)) {
+ } else if (skb->len >= sizeof(*ih3)) {
ih3 = igmpv3_query_hdr(skb);
if (ih3->nsrcs)
goto out;
@@ -1349,7 +1348,6 @@ static int br_ip6_multicast_query(struct net_bridge *br,
struct br_ip saddr;
unsigned long max_delay;
unsigned long now = jiffies;
- unsigned int offset = skb_transport_offset(skb);
const struct in6_addr *group = NULL;
bool is_general_query;
int err = 0;
@@ -1359,8 +1357,8 @@ static int br_ip6_multicast_query(struct net_bridge *br,
(port && port->state == BR_STATE_DISABLED))
goto out;
- if (skb->len == offset + sizeof(*mld)) {
- if (!pskb_may_pull(skb, offset + sizeof(*mld))) {
+ if (skb->len == sizeof(*mld)) {
+ if (!pskb_may_pull(skb, sizeof(*mld))) {
err = -EINVAL;
goto out;
}
@@ -1369,7 +1367,7 @@ static int br_ip6_multicast_query(struct net_bridge *br,
if (max_delay)
group = &mld->mld_mca;
} else {
- if (!pskb_may_pull(skb, offset + sizeof(*mld2q))) {
+ if (!pskb_may_pull(skb, sizeof(*mld2q))) {
err = -EINVAL;
goto out;
}
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 87b91ffbdec3..ca966f7de351 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1175,16 +1175,14 @@ static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
static int rtnl_fill_link_ifmap(struct sk_buff *skb, struct net_device *dev)
{
- struct rtnl_link_ifmap map;
-
- memset(&map, 0, sizeof(map));
- map.mem_start = dev->mem_start;
- map.mem_end = dev->mem_end;
- map.base_addr = dev->base_addr;
- map.irq = dev->irq;
- map.dma = dev->dma;
- map.port = dev->if_port;
-
+ struct rtnl_link_ifmap map = {
+ .mem_start = dev->mem_start,
+ .mem_end = dev->mem_end,
+ .base_addr = dev->base_addr,
+ .irq = dev->irq,
+ .dma = dev->dma,
+ .port = dev->if_port,
+ };
if (nla_put(skb, IFLA_MAP, sizeof(map), &map))
return -EMSGSIZE;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 38467f386b14..732be5afa6ce 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -4444,16 +4444,15 @@ int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci)
__skb_push(skb, offset);
err = __vlan_insert_tag(skb, skb->vlan_proto,
skb_vlan_tag_get(skb));
- if (err) {
- __skb_pull(skb, offset);
+ if (err)
return err;
- }
-
skb->protocol = skb->vlan_proto;
skb->mac_len += VLAN_HLEN;
-
- skb_postpush_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN);
__skb_pull(skb, offset);
+
+ if (skb->ip_summed == CHECKSUM_COMPLETE)
+ skb->csum = csum_add(skb->csum, csum_partial(skb->data
+ + (2 * ETH_ALEN), VLAN_HLEN, 0));
}
__vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci);
return 0;
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index b1dc096d22f8..607a14f20d88 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -1034,13 +1034,10 @@ source_ok:
if (!fld.daddr) {
fld.daddr = fld.saddr;
+ err = -EADDRNOTAVAIL;
if (dev_out)
dev_put(dev_out);
- err = -EINVAL;
dev_out = init_net.loopback_dev;
- if (!dev_out->dn_ptr)
- goto out;
- err = -EADDRNOTAVAIL;
dev_hold(dev_out);
if (!fld.daddr) {
fld.daddr =
@@ -1113,8 +1110,6 @@ source_ok:
if (dev_out == NULL)
goto out;
dn_db = rcu_dereference_raw(dev_out->dn_ptr);
- if (!dn_db)
- goto e_inval;
/* Possible improvement - check all devices for local addr */
if (dn_dev_islocal(dev_out, fld.daddr)) {
dev_put(dev_out);
@@ -1156,8 +1151,6 @@ select_source:
dev_put(dev_out);
dev_out = init_net.loopback_dev;
dev_hold(dev_out);
- if (!dev_out->dn_ptr)
- goto e_inval;
fld.flowidn_oif = dev_out->ifindex;
if (res.fi)
dn_fib_info_put(res.fi);
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 98c754e61024..f97ae9d93ee9 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -905,11 +905,7 @@ void fib_del_ifaddr(struct in_ifaddr *ifa, struct in_ifaddr *iprim)
if (ifa->ifa_flags & IFA_F_SECONDARY) {
prim = inet_ifa_byprefix(in_dev, any, ifa->ifa_mask);
if (!prim) {
- /* if the device has been deleted, we don't perform
- * address promotion
- */
- if (!in_dev->dead)
- pr_warn("%s: bug: prim == NULL\n", __func__);
+ pr_warn("%s: bug: prim == NULL\n", __func__);
return;
}
if (iprim && iprim != prim) {
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 2b68418c7198..d97268e8ff10 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -975,8 +975,6 @@ fib_convert_metrics(struct fib_info *fi, const struct fib_config *cfg)
val = 65535 - 40;
if (type == RTAX_MTU && val > 65535 - 15)
val = 65535 - 15;
- if (type == RTAX_HOPLIMIT && val > 255)
- val = 255;
if (type == RTAX_FEATURES && (val & ~RTAX_FEATURE_MASK))
return -EINVAL;
fi->fib_metrics[type - 1] = val;
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 7dc962b89fa1..614521437e30 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -180,7 +180,6 @@ static __be16 tnl_flags_to_gre_flags(__be16 tflags)
return flags;
}
-/* Fills in tpi and returns header length to be pulled. */
static int parse_gre_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
bool *csum_err)
{
@@ -240,7 +239,7 @@ static int parse_gre_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
return -EINVAL;
}
}
- return hdr_len;
+ return iptunnel_pull_header(skb, hdr_len, tpi->proto);
}
static void ipgre_err(struct sk_buff *skb, u32 info,
@@ -343,7 +342,7 @@ static void gre_err(struct sk_buff *skb, u32 info)
struct tnl_ptk_info tpi;
bool csum_err = false;
- if (parse_gre_header(skb, &tpi, &csum_err) < 0) {
+ if (parse_gre_header(skb, &tpi, &csum_err)) {
if (!csum_err) /* ignore csum errors. */
return;
}
@@ -421,7 +420,6 @@ static int gre_rcv(struct sk_buff *skb)
{
struct tnl_ptk_info tpi;
bool csum_err = false;
- int hdr_len;
#ifdef CONFIG_NET_IPGRE_BROADCAST
if (ipv4_is_multicast(ip_hdr(skb)->daddr)) {
@@ -431,10 +429,7 @@ static int gre_rcv(struct sk_buff *skb)
}
#endif
- hdr_len = parse_gre_header(skb, &tpi, &csum_err);
- if (hdr_len < 0)
- goto drop;
- if (iptunnel_pull_header(skb, hdr_len, tpi.proto) < 0)
+ if (parse_gre_header(skb, &tpi, &csum_err) < 0)
goto drop;
if (ipgre_rcv(skb, &tpi) == PACKET_RCVD)
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index fb54659320d8..79a957ea6545 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -2047,18 +2047,6 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
*/
if (fi && res->prefixlen < 4)
fi = NULL;
- } else if ((type == RTN_LOCAL) && (orig_oif != 0) &&
- (orig_oif != dev_out->ifindex)) {
- /* For local routes that require a particular output interface
- * we do not want to cache the result. Caching the result
- * causes incorrect behaviour when there are multiple source
- * addresses on the interface, the end result being that if the
- * intended recipient is waiting on that interface for the
- * packet he won't receive it because it will be delivered on
- * the loopback interface and the IP_PKTINFO ipi_ifindex will
- * be set to the loopback interface as well.
- */
- fi = NULL;
}
fnhe = NULL;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 0dd207cd1f38..9f069bd9de46 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2625,10 +2625,8 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
*/
if (unlikely((NET_IP_ALIGN && ((unsigned long)skb->data & 3)) ||
skb_headroom(skb) >= 0xFFFF)) {
- struct sk_buff *nskb;
-
- skb_mstamp_get(&skb->skb_mstamp);
- nskb = __pskb_copy(skb, MAX_TCP_HEADER, GFP_ATOMIC);
+ struct sk_buff *nskb = __pskb_copy(skb, MAX_TCP_HEADER,
+ GFP_ATOMIC);
err = nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) :
-ENOBUFS;
} else {
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index a234552a7e3d..45f5ae51de65 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -496,8 +496,10 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
IP6CB(head)->flags |= IP6SKB_FRAGMENTED;
/* Yes, and fold redundant checksum back. 8) */
- skb_postpush_rcsum(head, skb_network_header(head),
- skb_network_header_len(head));
+ if (head->ip_summed == CHECKSUM_COMPLETE)
+ head->csum = csum_partial(skb_network_header(head),
+ skb_network_header_len(head),
+ head->csum);
rcu_read_lock();
IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMOKS);
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 161cdc072547..01d7ee57d937 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1721,8 +1721,6 @@ static int ip6_convert_metrics(struct mx6_config *mxc,
} else {
val = nla_get_u32(nla);
}
- if (type == RTAX_HOPLIMIT && val > 255)
- val = 255;
if (type == RTAX_FEATURES && (val & ~RTAX_FEATURE_MASK))
goto err;
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index bb8edb9ef506..8dab4e569571 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -626,7 +626,6 @@ static void llc_cmsg_rcv(struct msghdr *msg, struct sk_buff *skb)
if (llc->cmsg_flags & LLC_CMSG_PKTINFO) {
struct llc_pktinfo info;
- memset(&info, 0, sizeof(info));
info.lpi_ifindex = llc_sk(skb->sk)->dev->ifindex;
llc_pdu_decode_dsap(skb, &info.lpi_sap);
llc_pdu_decode_da(skb, info.lpi_mac);
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index bcb0a1b64556..7a2b7915093b 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -1750,7 +1750,7 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
ret = dev_alloc_name(ndev, ndev->name);
if (ret < 0) {
- ieee80211_if_free(ndev);
+ free_netdev(ndev);
return ret;
}
@@ -1836,7 +1836,7 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
ret = register_netdevice(ndev);
if (ret) {
- ieee80211_if_free(ndev);
+ free_netdev(ndev);
return ret;
}
}
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 4da560005b0e..f57b4dcdb233 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -1757,34 +1757,15 @@ ip_vs_in(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int
cp = pp->conn_in_get(ipvs, af, skb, &iph);
conn_reuse_mode = sysctl_conn_reuse_mode(ipvs);
- if (conn_reuse_mode && !iph.fragoffs && is_new_conn(skb, &iph) && cp) {
- bool uses_ct = false, resched = false;
-
- if (unlikely(sysctl_expire_nodest_conn(ipvs)) && cp->dest &&
- unlikely(!atomic_read(&cp->dest->weight))) {
- resched = true;
- uses_ct = ip_vs_conn_uses_conntrack(cp, skb);
- } else if (is_new_conn_expected(cp, conn_reuse_mode)) {
- uses_ct = ip_vs_conn_uses_conntrack(cp, skb);
- if (!atomic_read(&cp->n_control)) {
- resched = true;
- } else {
- /* Do not reschedule controlling connection
- * that uses conntrack while it is still
- * referenced by controlled connection(s).
- */
- resched = !uses_ct;
- }
- }
-
- if (resched) {
- if (!atomic_read(&cp->n_control))
- ip_vs_conn_expire_now(cp);
- __ip_vs_conn_put(cp);
- if (uses_ct)
- return NF_DROP;
- cp = NULL;
- }
+ if (conn_reuse_mode && !iph.fragoffs &&
+ is_new_conn(skb, &iph) && cp &&
+ ((unlikely(sysctl_expire_nodest_conn(ipvs)) && cp->dest &&
+ unlikely(!atomic_read(&cp->dest->weight))) ||
+ unlikely(is_new_conn_expected(cp, conn_reuse_mode)))) {
+ if (!atomic_read(&cp->n_control))
+ ip_vs_conn_expire_now(cp);
+ __ip_vs_conn_put(cp);
+ cp = NULL;
}
if (unlikely(!cp)) {
diff --git a/net/netfilter/ipvs/ip_vs_pe_sip.c b/net/netfilter/ipvs/ip_vs_pe_sip.c
index 0a6eb5c0d9e9..1b8d594e493a 100644
--- a/net/netfilter/ipvs/ip_vs_pe_sip.c
+++ b/net/netfilter/ipvs/ip_vs_pe_sip.c
@@ -70,10 +70,10 @@ ip_vs_sip_fill_param(struct ip_vs_conn_param *p, struct sk_buff *skb)
const char *dptr;
int retc;
- retc = ip_vs_fill_iph_skb(p->af, skb, false, &iph);
+ ip_vs_fill_iph_skb(p->af, skb, false, &iph);
/* Only useful with UDP */
- if (!retc || iph.protocol != IPPROTO_UDP)
+ if (iph.protocol != IPPROTO_UDP)
return -EINVAL;
/* todo: IPv6 fragments:
* I think this only should be done for the first fragment. /HS
@@ -88,7 +88,7 @@ ip_vs_sip_fill_param(struct ip_vs_conn_param *p, struct sk_buff *skb)
dptr = skb->data + dataoff;
datalen = skb->len - dataoff;
- if (get_callid(dptr, 0, datalen, &matchoff, &matchlen))
+ if (get_callid(dptr, dataoff, datalen, &matchoff, &matchlen))
return -EINVAL;
/* N.B: pe_data is only set on success,
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 86a3c6f0c871..3cb3cb831591 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -1757,7 +1757,6 @@ void nf_conntrack_init_end(void)
int nf_conntrack_init_net(struct net *net)
{
- static atomic64_t unique_id;
int ret = -ENOMEM;
int cpu;
@@ -1780,8 +1779,7 @@ int nf_conntrack_init_net(struct net *net)
if (!net->ct.stat)
goto err_pcpu_lists;
- net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%llu",
- (u64)atomic64_inc_return(&unique_id));
+ net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net);
if (!net->ct.slabname)
goto err_slabname;
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 992b35fb8615..59651af8cc27 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1305,7 +1305,7 @@ static int netlink_release(struct socket *sock)
skb_queue_purge(&sk->sk_write_queue);
- if (nlk->portid && nlk->bound) {
+ if (nlk->portid) {
struct netlink_notify n = {
.net = sock_net(sk),
.protocol = sk->sk_protocol,
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index 7cb8184ac165..c88d0f2d3e01 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -158,7 +158,9 @@ static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key,
new_mpls_lse = (__be32 *)skb_mpls_header(skb);
*new_mpls_lse = mpls->mpls_lse;
- skb_postpush_rcsum(skb, new_mpls_lse, MPLS_HLEN);
+ if (skb->ip_summed == CHECKSUM_COMPLETE)
+ skb->csum = csum_add(skb->csum, csum_partial(new_mpls_lse,
+ MPLS_HLEN, 0));
hdr = eth_hdr(skb);
hdr->h_proto = mpls->mpls_ethertype;
@@ -278,7 +280,7 @@ static int set_eth_addr(struct sk_buff *skb, struct sw_flow_key *flow_key,
ether_addr_copy_masked(eth_hdr(skb)->h_dest, key->eth_dst,
mask->eth_dst);
- skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
+ ovs_skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
ether_addr_copy(flow_key->eth.src, eth_hdr(skb)->h_source);
ether_addr_copy(flow_key->eth.dst, eth_hdr(skb)->h_dest);
@@ -461,7 +463,7 @@ static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
mask_ipv6_addr(saddr, key->ipv6_src, mask->ipv6_src, masked);
if (unlikely(memcmp(saddr, masked, sizeof(masked)))) {
- set_ipv6_addr(skb, flow_key->ip.proto, saddr, masked,
+ set_ipv6_addr(skb, key->ipv6_proto, saddr, masked,
true);
memcpy(&flow_key->ipv6.addr.src, masked,
sizeof(flow_key->ipv6.addr.src));
@@ -483,7 +485,7 @@ static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
NULL, &flags)
!= NEXTHDR_ROUTING);
- set_ipv6_addr(skb, flow_key->ip.proto, daddr, masked,
+ set_ipv6_addr(skb, key->ipv6_proto, daddr, masked,
recalc_csum);
memcpy(&flow_key->ipv6.addr.dst, masked,
sizeof(flow_key->ipv6.addr.dst));
@@ -637,7 +639,7 @@ static int ovs_vport_output(struct net *net, struct sock *sk, struct sk_buff *sk
/* Reconstruct the MAC header. */
skb_push(skb, data->l2_len);
memcpy(skb->data, &data->l2_data, data->l2_len);
- skb_postpush_rcsum(skb, skb->data, data->l2_len);
+ ovs_skb_postpush_rcsum(skb, skb->data, data->l2_len);
skb_reset_mac_header(skb);
ovs_vport_send(vport, skb);
diff --git a/net/openvswitch/vport-netdev.c b/net/openvswitch/vport-netdev.c
index 76fcaf1fd2a9..6b0190b987ec 100644
--- a/net/openvswitch/vport-netdev.c
+++ b/net/openvswitch/vport-netdev.c
@@ -58,7 +58,7 @@ static void netdev_port_receive(struct sk_buff *skb)
return;
skb_push(skb, ETH_HLEN);
- skb_postpush_rcsum(skb, skb->data, ETH_HLEN);
+ ovs_skb_postpush_rcsum(skb, skb->data, ETH_HLEN);
ovs_vport_receive(vport, skb, skb_tunnel_info(skb));
return;
error:
diff --git a/net/openvswitch/vport.h b/net/openvswitch/vport.h
index 6e2b62f9d595..8ea3a96980ac 100644
--- a/net/openvswitch/vport.h
+++ b/net/openvswitch/vport.h
@@ -184,6 +184,13 @@ static inline struct vport *vport_from_priv(void *priv)
int ovs_vport_receive(struct vport *, struct sk_buff *,
const struct ip_tunnel_info *);
+static inline void ovs_skb_postpush_rcsum(struct sk_buff *skb,
+ const void *start, unsigned int len)
+{
+ if (skb->ip_summed == CHECKSUM_COMPLETE)
+ skb->csum = csum_add(skb->csum, csum_partial(start, len, 0));
+}
+
static inline const char *ovs_vport_name(struct vport *vport)
{
return vport->dev->name;
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 9cc7b512b472..da1ae0e13cb5 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -3436,7 +3436,6 @@ static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
i->ifindex = mreq->mr_ifindex;
i->alen = mreq->mr_alen;
memcpy(i->addr, mreq->mr_address, i->alen);
- memset(i->addr + i->alen, 0, sizeof(i->addr) - i->alen);
i->count = 1;
i->next = po->mclist;
po->mclist = i;
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index b855352167b1..34967c19da85 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -744,15 +744,14 @@ static u32 qdisc_alloc_handle(struct net_device *dev)
return 0;
}
-void qdisc_tree_reduce_backlog(struct Qdisc *sch, unsigned int n,
- unsigned int len)
+void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
{
const struct Qdisc_class_ops *cops;
unsigned long cl;
u32 parentid;
int drops;
- if (n == 0 && len == 0)
+ if (n == 0)
return;
drops = max_t(int, n, 0);
rcu_read_lock();
@@ -775,12 +774,11 @@ void qdisc_tree_reduce_backlog(struct Qdisc *sch, unsigned int n,
cops->put(sch, cl);
}
sch->q.qlen -= n;
- sch->qstats.backlog -= len;
__qdisc_qstats_drop(sch, drops);
}
rcu_read_unlock();
}
-EXPORT_SYMBOL(qdisc_tree_reduce_backlog);
+EXPORT_SYMBOL(qdisc_tree_decrease_qlen);
static void notify_and_destroy(struct net *net, struct sk_buff *skb,
struct nlmsghdr *n, u32 clid,
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index baafddf229ce..c538d9e4a8f6 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -1624,8 +1624,13 @@ static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
new->reshape_fail = cbq_reshape_fail;
#endif
}
+ sch_tree_lock(sch);
+ *old = cl->q;
+ cl->q = new;
+ qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
+ qdisc_reset(*old);
+ sch_tree_unlock(sch);
- *old = qdisc_replace(sch, new, &cl->q);
return 0;
}
@@ -1909,7 +1914,7 @@ static int cbq_delete(struct Qdisc *sch, unsigned long arg)
{
struct cbq_sched_data *q = qdisc_priv(sch);
struct cbq_class *cl = (struct cbq_class *)arg;
- unsigned int qlen, backlog;
+ unsigned int qlen;
if (cl->filters || cl->children || cl == &q->link)
return -EBUSY;
@@ -1917,9 +1922,8 @@ static int cbq_delete(struct Qdisc *sch, unsigned long arg)
sch_tree_lock(sch);
qlen = cl->q->q.qlen;
- backlog = cl->q->qstats.backlog;
qdisc_reset(cl->q);
- qdisc_tree_reduce_backlog(cl->q, qlen, backlog);
+ qdisc_tree_decrease_qlen(cl->q, qlen);
if (cl->next_alive)
cbq_deactivate_class(cl);
diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c
index 0a08c860eee4..5ffb8b8337c7 100644
--- a/net/sched/sch_choke.c
+++ b/net/sched/sch_choke.c
@@ -128,8 +128,8 @@ static void choke_drop_by_idx(struct Qdisc *sch, unsigned int idx)
choke_zap_tail_holes(q);
qdisc_qstats_backlog_dec(sch, skb);
- qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(skb));
qdisc_drop(skb, sch);
+ qdisc_tree_decrease_qlen(sch, 1);
--sch->q.qlen;
}
@@ -456,7 +456,6 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt)
old = q->tab;
if (old) {
unsigned int oqlen = sch->q.qlen, tail = 0;
- unsigned dropped = 0;
while (q->head != q->tail) {
struct sk_buff *skb = q->tab[q->head];
@@ -468,12 +467,11 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt)
ntab[tail++] = skb;
continue;
}
- dropped += qdisc_pkt_len(skb);
qdisc_qstats_backlog_dec(sch, skb);
--sch->q.qlen;
qdisc_drop(skb, sch);
}
- qdisc_tree_reduce_backlog(sch, oqlen - sch->q.qlen, dropped);
+ qdisc_tree_decrease_qlen(sch, oqlen - sch->q.qlen);
q->head = 0;
q->tail = tail;
}
diff --git a/net/sched/sch_codel.c b/net/sched/sch_codel.c
index 9b7e2980ee5c..535007d5f0b5 100644
--- a/net/sched/sch_codel.c
+++ b/net/sched/sch_codel.c
@@ -79,13 +79,12 @@ static struct sk_buff *codel_qdisc_dequeue(struct Qdisc *sch)
skb = codel_dequeue(sch, &q->params, &q->vars, &q->stats, dequeue);
- /* We cant call qdisc_tree_reduce_backlog() if our qlen is 0,
+ /* We cant call qdisc_tree_decrease_qlen() if our qlen is 0,
* or HTB crashes. Defer it for next round.
*/
if (q->stats.drop_count && sch->q.qlen) {
- qdisc_tree_reduce_backlog(sch, q->stats.drop_count, q->stats.drop_len);
+ qdisc_tree_decrease_qlen(sch, q->stats.drop_count);
q->stats.drop_count = 0;
- q->stats.drop_len = 0;
}
if (skb)
qdisc_bstats_update(sch, skb);
@@ -117,7 +116,7 @@ static int codel_change(struct Qdisc *sch, struct nlattr *opt)
{
struct codel_sched_data *q = qdisc_priv(sch);
struct nlattr *tb[TCA_CODEL_MAX + 1];
- unsigned int qlen, dropped = 0;
+ unsigned int qlen;
int err;
if (!opt)
@@ -157,11 +156,10 @@ static int codel_change(struct Qdisc *sch, struct nlattr *opt)
while (sch->q.qlen > sch->limit) {
struct sk_buff *skb = __skb_dequeue(&sch->q);
- dropped += qdisc_pkt_len(skb);
qdisc_qstats_backlog_dec(sch, skb);
qdisc_drop(skb, sch);
}
- qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped);
+ qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
sch_tree_unlock(sch);
return 0;
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
index d6e3ad43cecb..f26bdea875c1 100644
--- a/net/sched/sch_drr.c
+++ b/net/sched/sch_drr.c
@@ -53,10 +53,9 @@ static struct drr_class *drr_find_class(struct Qdisc *sch, u32 classid)
static void drr_purge_queue(struct drr_class *cl)
{
unsigned int len = cl->qdisc->q.qlen;
- unsigned int backlog = cl->qdisc->qstats.backlog;
qdisc_reset(cl->qdisc);
- qdisc_tree_reduce_backlog(cl->qdisc, len, backlog);
+ qdisc_tree_decrease_qlen(cl->qdisc, len);
}
static const struct nla_policy drr_policy[TCA_DRR_MAX + 1] = {
@@ -227,7 +226,11 @@ static int drr_graft_class(struct Qdisc *sch, unsigned long arg,
new = &noop_qdisc;
}
- *old = qdisc_replace(sch, new, &cl->qdisc);
+ sch_tree_lock(sch);
+ drr_purge_queue(cl);
+ *old = cl->qdisc;
+ cl->qdisc = new;
+ sch_tree_unlock(sch);
return 0;
}
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
index d0dff0cd8186..f357f34d02d2 100644
--- a/net/sched/sch_dsmark.c
+++ b/net/sched/sch_dsmark.c
@@ -73,7 +73,13 @@ static int dsmark_graft(struct Qdisc *sch, unsigned long arg,
new = &noop_qdisc;
}
- *old = qdisc_replace(sch, new, &p->q);
+ sch_tree_lock(sch);
+ *old = p->q;
+ p->q = new;
+ qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
+ qdisc_reset(*old);
+ sch_tree_unlock(sch);
+
return 0;
}
@@ -258,7 +264,6 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch)
return err;
}
- qdisc_qstats_backlog_inc(sch, skb);
sch->q.qlen++;
return NET_XMIT_SUCCESS;
@@ -281,7 +286,6 @@ static struct sk_buff *dsmark_dequeue(struct Qdisc *sch)
return NULL;
qdisc_bstats_update(sch, skb);
- qdisc_qstats_backlog_dec(sch, skb);
sch->q.qlen--;
index = skb->tc_index & (p->indices - 1);
@@ -397,7 +401,6 @@ static void dsmark_reset(struct Qdisc *sch)
pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
qdisc_reset(p->q);
- sch->qstats.backlog = 0;
sch->q.qlen = 0;
}
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
index 3c6a47d66a04..109b2322778f 100644
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@ -662,7 +662,6 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt)
struct fq_sched_data *q = qdisc_priv(sch);
struct nlattr *tb[TCA_FQ_MAX + 1];
int err, drop_count = 0;
- unsigned drop_len = 0;
u32 fq_log;
if (!opt)
@@ -737,11 +736,10 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt)
if (!skb)
break;
- drop_len += qdisc_pkt_len(skb);
kfree_skb(skb);
drop_count++;
}
- qdisc_tree_reduce_backlog(sch, drop_count, drop_len);
+ qdisc_tree_decrease_qlen(sch, drop_count);
sch_tree_unlock(sch);
return err;
diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
index d3fc8f9dd3d4..4c834e93dafb 100644
--- a/net/sched/sch_fq_codel.c
+++ b/net/sched/sch_fq_codel.c
@@ -175,7 +175,7 @@ static unsigned int fq_codel_qdisc_drop(struct Qdisc *sch)
static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
struct fq_codel_sched_data *q = qdisc_priv(sch);
- unsigned int idx, prev_backlog;
+ unsigned int idx;
struct fq_codel_flow *flow;
int uninitialized_var(ret);
@@ -203,7 +203,6 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
if (++sch->q.qlen <= sch->limit)
return NET_XMIT_SUCCESS;
- prev_backlog = sch->qstats.backlog;
q->drop_overlimit++;
/* Return Congestion Notification only if we dropped a packet
* from this flow.
@@ -212,7 +211,7 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
return NET_XMIT_CN;
/* As we dropped a packet, better let upper stack know this */
- qdisc_tree_reduce_backlog(sch, 1, prev_backlog - sch->qstats.backlog);
+ qdisc_tree_decrease_qlen(sch, 1);
return NET_XMIT_SUCCESS;
}
@@ -242,7 +241,6 @@ static struct sk_buff *fq_codel_dequeue(struct Qdisc *sch)
struct fq_codel_flow *flow;
struct list_head *head;
u32 prev_drop_count, prev_ecn_mark;
- unsigned int prev_backlog;
begin:
head = &q->new_flows;
@@ -261,7 +259,6 @@ begin:
prev_drop_count = q->cstats.drop_count;
prev_ecn_mark = q->cstats.ecn_mark;
- prev_backlog = sch->qstats.backlog;
skb = codel_dequeue(sch, &q->cparams, &flow->cvars, &q->cstats,
dequeue);
@@ -279,14 +276,12 @@ begin:
}
qdisc_bstats_update(sch, skb);
flow->deficit -= qdisc_pkt_len(skb);
- /* We cant call qdisc_tree_reduce_backlog() if our qlen is 0,
+ /* We cant call qdisc_tree_decrease_qlen() if our qlen is 0,
* or HTB crashes. Defer it for next round.
*/
if (q->cstats.drop_count && sch->q.qlen) {
- qdisc_tree_reduce_backlog(sch, q->cstats.drop_count,
- q->cstats.drop_len);
+ qdisc_tree_decrease_qlen(sch, q->cstats.drop_count);
q->cstats.drop_count = 0;
- q->cstats.drop_len = 0;
}
return skb;
}
@@ -377,13 +372,11 @@ static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt)
while (sch->q.qlen > sch->limit) {
struct sk_buff *skb = fq_codel_dequeue(sch);
- q->cstats.drop_len += qdisc_pkt_len(skb);
kfree_skb(skb);
q->cstats.drop_count++;
}
- qdisc_tree_reduce_backlog(sch, q->cstats.drop_count, q->cstats.drop_len);
+ qdisc_tree_decrease_qlen(sch, q->cstats.drop_count);
q->cstats.drop_count = 0;
- q->cstats.drop_len = 0;
sch_tree_unlock(sch);
return 0;
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index aa4725038f94..16bc83b2842a 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -159,15 +159,12 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
if (validate)
skb = validate_xmit_skb_list(skb, dev);
- if (likely(skb)) {
+ if (skb) {
HARD_TX_LOCK(dev, txq, smp_processor_id());
if (!netif_xmit_frozen_or_stopped(txq))
skb = dev_hard_start_xmit(skb, dev, txq, &ret);
HARD_TX_UNLOCK(dev, txq);
- } else {
- spin_lock(root_lock);
- return qdisc_qlen(q);
}
spin_lock(root_lock);
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index d783d7cc3348..b7ebe2c87586 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -895,10 +895,9 @@ static void
hfsc_purge_queue(struct Qdisc *sch, struct hfsc_class *cl)
{
unsigned int len = cl->qdisc->q.qlen;
- unsigned int backlog = cl->qdisc->qstats.backlog;
qdisc_reset(cl->qdisc);
- qdisc_tree_reduce_backlog(cl->qdisc, len, backlog);
+ qdisc_tree_decrease_qlen(cl->qdisc, len);
}
static void
@@ -1216,7 +1215,11 @@ hfsc_graft_class(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
new = &noop_qdisc;
}
- *old = qdisc_replace(sch, new, &cl->qdisc);
+ sch_tree_lock(sch);
+ hfsc_purge_queue(sch, cl);
+ *old = cl->qdisc;
+ cl->qdisc = new;
+ sch_tree_unlock(sch);
return 0;
}
diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c
index 13d6f83ec491..86b04e31e60b 100644
--- a/net/sched/sch_hhf.c
+++ b/net/sched/sch_hhf.c
@@ -382,7 +382,6 @@ static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
struct hhf_sched_data *q = qdisc_priv(sch);
enum wdrr_bucket_idx idx;
struct wdrr_bucket *bucket;
- unsigned int prev_backlog;
idx = hhf_classify(skb, sch);
@@ -410,7 +409,6 @@ static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
if (++sch->q.qlen <= sch->limit)
return NET_XMIT_SUCCESS;
- prev_backlog = sch->qstats.backlog;
q->drop_overlimit++;
/* Return Congestion Notification only if we dropped a packet from this
* bucket.
@@ -419,7 +417,7 @@ static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
return NET_XMIT_CN;
/* As we dropped a packet, better let upper stack know this. */
- qdisc_tree_reduce_backlog(sch, 1, prev_backlog - sch->qstats.backlog);
+ qdisc_tree_decrease_qlen(sch, 1);
return NET_XMIT_SUCCESS;
}
@@ -529,7 +527,7 @@ static int hhf_change(struct Qdisc *sch, struct nlattr *opt)
{
struct hhf_sched_data *q = qdisc_priv(sch);
struct nlattr *tb[TCA_HHF_MAX + 1];
- unsigned int qlen, prev_backlog;
+ unsigned int qlen;
int err;
u64 non_hh_quantum;
u32 new_quantum = q->quantum;
@@ -579,14 +577,12 @@ static int hhf_change(struct Qdisc *sch, struct nlattr *opt)
}
qlen = sch->q.qlen;
- prev_backlog = sch->qstats.backlog;
while (sch->q.qlen > sch->limit) {
struct sk_buff *skb = hhf_dequeue(sch);
kfree_skb(skb);
}
- qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen,
- prev_backlog - sch->qstats.backlog);
+ qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
sch_tree_unlock(sch);
return 0;
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 87b02ed3d5f2..15ccd7f8fb2a 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -600,7 +600,6 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
htb_activate(q, cl);
}
- qdisc_qstats_backlog_inc(sch, skb);
sch->q.qlen++;
return NET_XMIT_SUCCESS;
}
@@ -890,7 +889,6 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch)
ok:
qdisc_bstats_update(sch, skb);
qdisc_unthrottled(sch);
- qdisc_qstats_backlog_dec(sch, skb);
sch->q.qlen--;
return skb;
}
@@ -957,7 +955,6 @@ static unsigned int htb_drop(struct Qdisc *sch)
unsigned int len;
if (cl->un.leaf.q->ops->drop &&
(len = cl->un.leaf.q->ops->drop(cl->un.leaf.q))) {
- sch->qstats.backlog -= len;
sch->q.qlen--;
if (!cl->un.leaf.q->q.qlen)
htb_deactivate(q, cl);
@@ -987,12 +984,12 @@ static void htb_reset(struct Qdisc *sch)
}
cl->prio_activity = 0;
cl->cmode = HTB_CAN_SEND;
+
}
}
qdisc_watchdog_cancel(&q->watchdog);
__skb_queue_purge(&q->direct_queue);
sch->q.qlen = 0;
- sch->qstats.backlog = 0;
memset(q->hlevel, 0, sizeof(q->hlevel));
memset(q->row_mask, 0, sizeof(q->row_mask));
for (i = 0; i < TC_HTB_NUMPRIO; i++)
@@ -1166,7 +1163,14 @@ static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
cl->common.classid)) == NULL)
return -ENOBUFS;
- *old = qdisc_replace(sch, new, &cl->un.leaf.q);
+ sch_tree_lock(sch);
+ *old = cl->un.leaf.q;
+ cl->un.leaf.q = new;
+ if (*old != NULL) {
+ qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
+ qdisc_reset(*old);
+ }
+ sch_tree_unlock(sch);
return 0;
}
@@ -1268,6 +1272,7 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg)
{
struct htb_sched *q = qdisc_priv(sch);
struct htb_class *cl = (struct htb_class *)arg;
+ unsigned int qlen;
struct Qdisc *new_q = NULL;
int last_child = 0;
@@ -1287,11 +1292,9 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg)
sch_tree_lock(sch);
if (!cl->level) {
- unsigned int qlen = cl->un.leaf.q->q.qlen;
- unsigned int backlog = cl->un.leaf.q->qstats.backlog;
-
+ qlen = cl->un.leaf.q->q.qlen;
qdisc_reset(cl->un.leaf.q);
- qdisc_tree_reduce_backlog(cl->un.leaf.q, qlen, backlog);
+ qdisc_tree_decrease_qlen(cl->un.leaf.q, qlen);
}
/* delete from hash and active; remainder in destroy_class */
@@ -1425,11 +1428,10 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
sch_tree_lock(sch);
if (parent && !parent->level) {
unsigned int qlen = parent->un.leaf.q->q.qlen;
- unsigned int backlog = parent->un.leaf.q->qstats.backlog;
/* turn parent into inner node */
qdisc_reset(parent->un.leaf.q);
- qdisc_tree_reduce_backlog(parent->un.leaf.q, qlen, backlog);
+ qdisc_tree_decrease_qlen(parent->un.leaf.q, qlen);
qdisc_destroy(parent->un.leaf.q);
if (parent->prio_activity)
htb_deactivate(q, parent);
diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c
index bcdd54bb101c..4e904ca0af9d 100644
--- a/net/sched/sch_multiq.c
+++ b/net/sched/sch_multiq.c
@@ -218,8 +218,7 @@ static int multiq_tune(struct Qdisc *sch, struct nlattr *opt)
if (q->queues[i] != &noop_qdisc) {
struct Qdisc *child = q->queues[i];
q->queues[i] = &noop_qdisc;
- qdisc_tree_reduce_backlog(child, child->q.qlen,
- child->qstats.backlog);
+ qdisc_tree_decrease_qlen(child, child->q.qlen);
qdisc_destroy(child);
}
}
@@ -239,9 +238,8 @@ static int multiq_tune(struct Qdisc *sch, struct nlattr *opt)
q->queues[i] = child;
if (old != &noop_qdisc) {
- qdisc_tree_reduce_backlog(old,
- old->q.qlen,
- old->qstats.backlog);
+ qdisc_tree_decrease_qlen(old,
+ old->q.qlen);
qdisc_destroy(old);
}
sch_tree_unlock(sch);
@@ -305,7 +303,13 @@ static int multiq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
if (new == NULL)
new = &noop_qdisc;
- *old = qdisc_replace(sch, new, &q->queues[band]);
+ sch_tree_lock(sch);
+ *old = q->queues[band];
+ q->queues[band] = new;
+ qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
+ qdisc_reset(*old);
+ sch_tree_unlock(sch);
+
return 0;
}
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 4befe97a9034..5abd1d9de989 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -395,25 +395,6 @@ static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
sch->q.qlen++;
}
-/* netem can't properly corrupt a megapacket (like we get from GSO), so instead
- * when we statistically choose to corrupt one, we instead segment it, returning
- * the first packet to be corrupted, and re-enqueue the remaining frames
- */
-static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch)
-{
- struct sk_buff *segs;
- netdev_features_t features = netif_skb_features(skb);
-
- segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
-
- if (IS_ERR_OR_NULL(segs)) {
- qdisc_reshape_fail(skb, sch);
- return NULL;
- }
- consume_skb(skb);
- return segs;
-}
-
/*
* Insert one skb into qdisc.
* Note: parent depends on return value to account for queue length.
@@ -426,11 +407,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
/* We don't fill cb now as skb_unshare() may invalidate it */
struct netem_skb_cb *cb;
struct sk_buff *skb2;
- struct sk_buff *segs = NULL;
- unsigned int len = 0, last_len, prev_len = qdisc_pkt_len(skb);
- int nb = 0;
int count = 1;
- int rc = NET_XMIT_SUCCESS;
/* Random duplication */
if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
@@ -476,23 +453,10 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
* do it now in software before we mangle it.
*/
if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
- if (skb_is_gso(skb)) {
- segs = netem_segment(skb, sch);
- if (!segs)
- return NET_XMIT_DROP;
- } else {
- segs = skb;
- }
-
- skb = segs;
- segs = segs->next;
-
if (!(skb = skb_unshare(skb, GFP_ATOMIC)) ||
(skb->ip_summed == CHECKSUM_PARTIAL &&
- skb_checksum_help(skb))) {
- rc = qdisc_drop(skb, sch);
- goto finish_segs;
- }
+ skb_checksum_help(skb)))
+ return qdisc_drop(skb, sch);
skb->data[prandom_u32() % skb_headlen(skb)] ^=
1<<(prandom_u32() % 8);
@@ -552,27 +516,6 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
sch->qstats.requeues++;
}
-finish_segs:
- if (segs) {
- while (segs) {
- skb2 = segs->next;
- segs->next = NULL;
- qdisc_skb_cb(segs)->pkt_len = segs->len;
- last_len = segs->len;
- rc = qdisc_enqueue(segs, sch);
- if (rc != NET_XMIT_SUCCESS) {
- if (net_xmit_drop_count(rc))
- qdisc_qstats_drop(sch);
- } else {
- nb++;
- len += last_len;
- }
- segs = skb2;
- }
- sch->q.qlen += nb;
- if (nb > 1)
- qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len);
- }
return NET_XMIT_SUCCESS;
}
@@ -655,8 +598,7 @@ deliver:
if (unlikely(err != NET_XMIT_SUCCESS)) {
if (net_xmit_drop_count(err)) {
qdisc_qstats_drop(sch);
- qdisc_tree_reduce_backlog(sch, 1,
- qdisc_pkt_len(skb));
+ qdisc_tree_decrease_qlen(sch, 1);
}
}
goto tfifo_dequeue;
@@ -1095,7 +1037,15 @@ static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
{
struct netem_sched_data *q = qdisc_priv(sch);
- *old = qdisc_replace(sch, new, &q->qdisc);
+ sch_tree_lock(sch);
+ *old = q->qdisc;
+ q->qdisc = new;
+ if (*old) {
+ qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
+ qdisc_reset(*old);
+ }
+ sch_tree_unlock(sch);
+
return 0;
}
diff --git a/net/sched/sch_pie.c b/net/sched/sch_pie.c
index 71ae3b9629f9..b783a446d884 100644
--- a/net/sched/sch_pie.c
+++ b/net/sched/sch_pie.c
@@ -183,7 +183,7 @@ static int pie_change(struct Qdisc *sch, struct nlattr *opt)
{
struct pie_sched_data *q = qdisc_priv(sch);
struct nlattr *tb[TCA_PIE_MAX + 1];
- unsigned int qlen, dropped = 0;
+ unsigned int qlen;
int err;
if (!opt)
@@ -232,11 +232,10 @@ static int pie_change(struct Qdisc *sch, struct nlattr *opt)
while (sch->q.qlen > sch->limit) {
struct sk_buff *skb = __skb_dequeue(&sch->q);
- dropped += qdisc_pkt_len(skb);
qdisc_qstats_backlog_dec(sch, skb);
qdisc_drop(skb, sch);
}
- qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped);
+ qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
sch_tree_unlock(sch);
return 0;
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index 0d4630b155fe..59ef2daf652c 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -205,7 +205,7 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
struct Qdisc *child = q->queues[i];
q->queues[i] = &noop_qdisc;
if (child != &noop_qdisc) {
- qdisc_tree_reduce_backlog(child, child->q.qlen, child->qstats.backlog);
+ qdisc_tree_decrease_qlen(child, child->q.qlen);
qdisc_destroy(child);
}
}
@@ -224,9 +224,8 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
q->queues[i] = child;
if (old != &noop_qdisc) {
- qdisc_tree_reduce_backlog(old,
- old->q.qlen,
- old->qstats.backlog);
+ qdisc_tree_decrease_qlen(old,
+ old->q.qlen);
qdisc_destroy(old);
}
sch_tree_unlock(sch);
@@ -291,7 +290,13 @@ static int prio_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
if (new == NULL)
new = &noop_qdisc;
- *old = qdisc_replace(sch, new, &q->queues[band]);
+ sch_tree_lock(sch);
+ *old = q->queues[band];
+ q->queues[band] = new;
+ qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
+ qdisc_reset(*old);
+ sch_tree_unlock(sch);
+
return 0;
}
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
index 8d2d8d953432..3dc3a6e56052 100644
--- a/net/sched/sch_qfq.c
+++ b/net/sched/sch_qfq.c
@@ -220,10 +220,9 @@ static struct qfq_class *qfq_find_class(struct Qdisc *sch, u32 classid)
static void qfq_purge_queue(struct qfq_class *cl)
{
unsigned int len = cl->qdisc->q.qlen;
- unsigned int backlog = cl->qdisc->qstats.backlog;
qdisc_reset(cl->qdisc);
- qdisc_tree_reduce_backlog(cl->qdisc, len, backlog);
+ qdisc_tree_decrease_qlen(cl->qdisc, len);
}
static const struct nla_policy qfq_policy[TCA_QFQ_MAX + 1] = {
@@ -618,7 +617,11 @@ static int qfq_graft_class(struct Qdisc *sch, unsigned long arg,
new = &noop_qdisc;
}
- *old = qdisc_replace(sch, new, &cl->qdisc);
+ sch_tree_lock(sch);
+ qfq_purge_queue(cl);
+ *old = cl->qdisc;
+ cl->qdisc = new;
+ sch_tree_unlock(sch);
return 0;
}
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index 8c0508c0e287..6c0534cc7758 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -210,8 +210,7 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt)
q->flags = ctl->flags;
q->limit = ctl->limit;
if (child) {
- qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen,
- q->qdisc->qstats.backlog);
+ qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen);
qdisc_destroy(q->qdisc);
q->qdisc = child;
}
@@ -314,7 +313,12 @@ static int red_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
if (new == NULL)
new = &noop_qdisc;
- *old = qdisc_replace(sch, new, &q->qdisc);
+ sch_tree_lock(sch);
+ *old = q->qdisc;
+ q->qdisc = new;
+ qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
+ qdisc_reset(*old);
+ sch_tree_unlock(sch);
return 0;
}
diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c
index c69611640fa5..5bbb6332ec57 100644
--- a/net/sched/sch_sfb.c
+++ b/net/sched/sch_sfb.c
@@ -510,8 +510,7 @@ static int sfb_change(struct Qdisc *sch, struct nlattr *opt)
sch_tree_lock(sch);
- qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen,
- q->qdisc->qstats.backlog);
+ qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen);
qdisc_destroy(q->qdisc);
q->qdisc = child;
@@ -607,7 +606,12 @@ static int sfb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
if (new == NULL)
new = &noop_qdisc;
- *old = qdisc_replace(sch, new, &q->qdisc);
+ sch_tree_lock(sch);
+ *old = q->qdisc;
+ q->qdisc = new;
+ qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
+ qdisc_reset(*old);
+ sch_tree_unlock(sch);
return 0;
}
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 498f0a2cb47f..3abab534eb5c 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -346,7 +346,7 @@ static int
sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
struct sfq_sched_data *q = qdisc_priv(sch);
- unsigned int hash, dropped;
+ unsigned int hash;
sfq_index x, qlen;
struct sfq_slot *slot;
int uninitialized_var(ret);
@@ -461,7 +461,7 @@ enqueue:
return NET_XMIT_SUCCESS;
qlen = slot->qlen;
- dropped = sfq_drop(sch);
+ sfq_drop(sch);
/* Return Congestion Notification only if we dropped a packet
* from this flow.
*/
@@ -469,7 +469,7 @@ enqueue:
return NET_XMIT_CN;
/* As we dropped a packet, better let upper stack know this */
- qdisc_tree_reduce_backlog(sch, 1, dropped);
+ qdisc_tree_decrease_qlen(sch, 1);
return NET_XMIT_SUCCESS;
}
@@ -537,7 +537,6 @@ static void sfq_rehash(struct Qdisc *sch)
struct sfq_slot *slot;
struct sk_buff_head list;
int dropped = 0;
- unsigned int drop_len = 0;
__skb_queue_head_init(&list);
@@ -566,7 +565,6 @@ static void sfq_rehash(struct Qdisc *sch)
if (x >= SFQ_MAX_FLOWS) {
drop:
qdisc_qstats_backlog_dec(sch, skb);
- drop_len += qdisc_pkt_len(skb);
kfree_skb(skb);
dropped++;
continue;
@@ -596,7 +594,7 @@ drop:
}
}
sch->q.qlen -= dropped;
- qdisc_tree_reduce_backlog(sch, dropped, drop_len);
+ qdisc_tree_decrease_qlen(sch, dropped);
}
static void sfq_perturbation(unsigned long arg)
@@ -620,7 +618,7 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
struct sfq_sched_data *q = qdisc_priv(sch);
struct tc_sfq_qopt *ctl = nla_data(opt);
struct tc_sfq_qopt_v1 *ctl_v1 = NULL;
- unsigned int qlen, dropped = 0;
+ unsigned int qlen;
struct red_parms *p = NULL;
if (opt->nla_len < nla_attr_size(sizeof(*ctl)))
@@ -669,8 +667,8 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
qlen = sch->q.qlen;
while (sch->q.qlen > q->limit)
- dropped += sfq_drop(sch);
- qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped);
+ sfq_drop(sch);
+ qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
del_timer(&q->perturb_timer);
if (q->perturb_period) {
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index c2fbde742f37..a4afde14e865 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -160,7 +160,6 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch)
struct tbf_sched_data *q = qdisc_priv(sch);
struct sk_buff *segs, *nskb;
netdev_features_t features = netif_skb_features(skb);
- unsigned int len = 0, prev_len = qdisc_pkt_len(skb);
int ret, nb;
segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
@@ -173,7 +172,6 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch)
nskb = segs->next;
segs->next = NULL;
qdisc_skb_cb(segs)->pkt_len = segs->len;
- len += segs->len;
ret = qdisc_enqueue(segs, q->qdisc);
if (ret != NET_XMIT_SUCCESS) {
if (net_xmit_drop_count(ret))
@@ -185,7 +183,7 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch)
}
sch->q.qlen += nb;
if (nb > 1)
- qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len);
+ qdisc_tree_decrease_qlen(sch, 1 - nb);
consume_skb(skb);
return nb > 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP;
}
@@ -401,8 +399,7 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
sch_tree_lock(sch);
if (child) {
- qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen,
- q->qdisc->qstats.backlog);
+ qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen);
qdisc_destroy(q->qdisc);
q->qdisc = child;
}
@@ -505,7 +502,13 @@ static int tbf_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
if (new == NULL)
new = &noop_qdisc;
- *old = qdisc_replace(sch, new, &q->qdisc);
+ sch_tree_lock(sch);
+ *old = q->qdisc;
+ q->qdisc = new;
+ qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
+ qdisc_reset(*old);
+ sch_tree_unlock(sch);
+
return 0;
}
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 63fb5ee212cf..21e20353178e 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -1182,14 +1182,14 @@ int sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h)
}
crq->q.reader = 0;
+ crq->item = cache_get(h);
crq->buf = buf;
crq->len = 0;
crq->readers = 0;
spin_lock(&queue_lock);
- if (test_bit(CACHE_PENDING, &h->flags)) {
- crq->item = cache_get(h);
+ if (test_bit(CACHE_PENDING, &h->flags))
list_add_tail(&crq->q.list, &detail->queue);
- } else
+ else
/* Lost a race, no longer PENDING, so don't enqueue */
ret = -EAGAIN;
spin_unlock(&queue_lock);
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index 9b5bd6d142dc..7fd1220fbfa0 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -1794,8 +1794,27 @@ vsock_stream_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
else if (sk->sk_shutdown & RCV_SHUTDOWN)
err = 0;
- if (copied > 0)
+ if (copied > 0) {
+ /* We only do these additional bookkeeping/notification steps
+ * if we actually copied something out of the queue pair
+ * instead of just peeking ahead.
+ */
+
+ if (!(flags & MSG_PEEK)) {
+ /* If the other side has shutdown for sending and there
+ * is nothing more to read, then modify the socket
+ * state.
+ */
+ if (vsk->peer_shutdown & SEND_SHUTDOWN) {
+ if (vsock_stream_has_data(vsk) <= 0) {
+ sk->sk_state = SS_UNCONNECTED;
+ sock_set_flag(sk, SOCK_DONE);
+ sk->sk_state_change(sk);
+ }
+ }
+ }
err = copied;
+ }
out_wait:
finish_wait(sk_sleep(sk), &wait);
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 6e9722f71f9a..30f54d1fc841 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -13216,7 +13216,7 @@ static int nl80211_netlink_notify(struct notifier_block * nb,
struct wireless_dev *wdev;
struct cfg80211_beacon_registration *reg, *tmp;
- if (state != NETLINK_URELEASE || notify->protocol != NETLINK_GENERIC)
+ if (state != NETLINK_URELEASE)
return NOTIFY_DONE;
rcu_read_lock();
diff --git a/net/x25/x25_facilities.c b/net/x25/x25_facilities.c
index 997ff7b2509b..7ecd04c21360 100644
--- a/net/x25/x25_facilities.c
+++ b/net/x25/x25_facilities.c
@@ -277,7 +277,6 @@ int x25_negotiate_facilities(struct sk_buff *skb, struct sock *sk,
memset(&theirs, 0, sizeof(theirs));
memcpy(new, ours, sizeof(*new));
- memset(dte, 0, sizeof(*dte));
len = x25_parse_facilities(skb, &theirs, dte, &x25->vc_facil_mask);
if (len < 0)
diff --git a/samples/bpf/trace_output_kern.c b/samples/bpf/trace_output_kern.c
index 9b96f4fb8cea..8d8d1ec429eb 100644
--- a/samples/bpf/trace_output_kern.c
+++ b/samples/bpf/trace_output_kern.c
@@ -18,6 +18,7 @@ int bpf_prog1(struct pt_regs *ctx)
u64 cookie;
} data;
+ memset(&data, 0, sizeof(data));
data.pid = bpf_get_current_pid_tgid();
data.cookie = 0x12345678;
diff --git a/scripts/kconfig/confdata.c b/scripts/kconfig/confdata.c
index dd243d2abd87..0b7dc2fd7bac 100644
--- a/scripts/kconfig/confdata.c
+++ b/scripts/kconfig/confdata.c
@@ -267,8 +267,10 @@ int conf_read_simple(const char *name, int def)
if (in)
goto load;
sym_add_change_count(1);
- if (!sym_defconfig_list)
+ if (!sym_defconfig_list) {
+ sym_calc_value(modules_sym);
return 1;
+ }
for_all_defaults(sym_defconfig_list, prop) {
if (expr_calc_value(prop->visible.expr) == no ||
@@ -401,6 +403,7 @@ setsym:
}
free(line);
fclose(in);
+ sym_calc_value(modules_sym);
return 0;
}
@@ -411,12 +414,8 @@ int conf_read(const char *name)
sym_set_change_count(0);
- if (conf_read_simple(name, S_DEF_USER)) {
- sym_calc_value(modules_sym);
+ if (conf_read_simple(name, S_DEF_USER))
return 1;
- }
-
- sym_calc_value(modules_sym);
for_all_symbols(i, sym) {
sym_calc_value(sym);
@@ -847,7 +846,6 @@ static int conf_split_config(void)
name = conf_get_autoconfig_name();
conf_read_simple(name, S_DEF_AUTO);
- sym_calc_value(modules_sym);
if (chdir("include/config"))
return 1;
diff --git a/scripts/sortextable.c b/scripts/sortextable.c
index a2c0d620ca80..c2423d913b46 100644
--- a/scripts/sortextable.c
+++ b/scripts/sortextable.c
@@ -266,9 +266,9 @@ do_file(char const *const fname)
break;
} /* end switch */
if (memcmp(ELFMAG, ehdr->e_ident, SELFMAG) != 0
- || (r2(&ehdr->e_type) != ET_EXEC && r2(&ehdr->e_type) != ET_DYN)
+ || r2(&ehdr->e_type) != ET_EXEC
|| ehdr->e_ident[EI_VERSION] != EV_CURRENT) {
- fprintf(stderr, "unrecognized ET_EXEC/ET_DYN file %s\n", fname);
+ fprintf(stderr, "unrecognized ET_EXEC file %s\n", fname);
fail_file();
}
@@ -282,13 +282,12 @@ do_file(char const *const fname)
case EM_386:
case EM_X86_64:
case EM_S390:
- case EM_AARCH64:
- case EM_PARISC:
custom_sort = sort_relative_table;
break;
case EM_ARCOMPACT:
case EM_ARCV2:
case EM_ARM:
+ case EM_AARCH64:
case EM_MICROBLAZE:
case EM_MIPS:
case EM_XTENSA:
@@ -305,7 +304,7 @@ do_file(char const *const fname)
if (r2(&ehdr->e_ehsize) != sizeof(Elf32_Ehdr)
|| r2(&ehdr->e_shentsize) != sizeof(Elf32_Shdr)) {
fprintf(stderr,
- "unrecognized ET_EXEC/ET_DYN file: %s\n", fname);
+ "unrecognized ET_EXEC file: %s\n", fname);
fail_file();
}
do32(ehdr, fname, custom_sort);
@@ -315,7 +314,7 @@ do_file(char const *const fname)
if (r2(&ghdr->e_ehsize) != sizeof(Elf64_Ehdr)
|| r2(&ghdr->e_shentsize) != sizeof(Elf64_Shdr)) {
fprintf(stderr,
- "unrecognized ET_EXEC/ET_DYN file: %s\n", fname);
+ "unrecognized ET_EXEC file: %s\n", fname);
fail_file();
}
do64(ghdr, fname, custom_sort);
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
index 367dbf0d285e..5c4fa8eba1d0 100644
--- a/sound/pci/hda/hda_generic.c
+++ b/sound/pci/hda/hda_generic.c
@@ -843,7 +843,7 @@ static hda_nid_t path_power_update(struct hda_codec *codec,
bool allow_powerdown)
{
hda_nid_t nid, changed = 0;
- int i, state, power;
+ int i, state;
for (i = 0; i < path->depth; i++) {
nid = path->path[i];
@@ -855,9 +855,7 @@ static hda_nid_t path_power_update(struct hda_codec *codec,
state = AC_PWRST_D0;
else
state = AC_PWRST_D3;
- power = snd_hda_codec_read(codec, nid, 0,
- AC_VERB_GET_POWER_STATE, 0);
- if (power != (state | (state << 4))) {
+ if (!snd_hda_check_power_state(codec, nid, state)) {
snd_hda_codec_write(codec, nid, 0,
AC_VERB_SET_POWER_STATE, state);
changed = nid;
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 411630e9c034..2ff692dd2c5f 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -2207,9 +2207,6 @@ static const struct pci_device_id azx_ids[] = {
/* Broxton-P(Apollolake) */
{ PCI_DEVICE(0x8086, 0x5a98),
.driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_BROXTON },
- /* Broxton-T */
- { PCI_DEVICE(0x8086, 0x1a98),
- .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_BROXTON },
/* Haswell */
{ PCI_DEVICE(0x8086, 0x0a0c),
.driver_data = AZX_DRIVER_HDMI | AZX_DCAPS_INTEL_HASWELL },
diff --git a/sound/pci/hda/hda_sysfs.c b/sound/pci/hda/hda_sysfs.c
index 9739fce9e032..64e0d1d81ca5 100644
--- a/sound/pci/hda/hda_sysfs.c
+++ b/sound/pci/hda/hda_sysfs.c
@@ -141,6 +141,14 @@ static int reconfig_codec(struct hda_codec *codec)
err = snd_hda_codec_configure(codec);
if (err < 0)
goto error;
+ /* rebuild PCMs */
+ err = snd_hda_codec_build_pcms(codec);
+ if (err < 0)
+ goto error;
+ /* rebuild mixers */
+ err = snd_hda_codec_build_controls(codec);
+ if (err < 0)
+ goto error;
err = snd_card_register(codec->card);
error:
snd_hda_power_down(codec);
diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
index 80bbadc83721..a47e8ae0eb30 100644
--- a/sound/pci/hda/patch_cirrus.c
+++ b/sound/pci/hda/patch_cirrus.c
@@ -361,7 +361,6 @@ static int cs_parse_auto_config(struct hda_codec *codec)
{
struct cs_spec *spec = codec->spec;
int err;
- int i;
err = snd_hda_parse_pin_defcfg(codec, &spec->gen.autocfg, NULL, 0);
if (err < 0)
@@ -371,19 +370,6 @@ static int cs_parse_auto_config(struct hda_codec *codec)
if (err < 0)
return err;
- /* keep the ADCs powered up when it's dynamically switchable */
- if (spec->gen.dyn_adc_switch) {
- unsigned int done = 0;
- for (i = 0; i < spec->gen.input_mux.num_items; i++) {
- int idx = spec->gen.dyn_adc_idx[i];
- if (done & (1 << idx))
- continue;
- snd_hda_gen_fix_pin_power(codec,
- spec->gen.adc_nids[idx]);
- done |= 1 << idx;
- }
- }
-
return 0;
}
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 4918ffa5ba68..1402ba954b3d 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -5449,7 +5449,6 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1028, 0x064a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x064b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x0665, "Dell XPS 13", ALC288_FIXUP_DELL_XPS_13),
- SND_PCI_QUIRK(0x1028, 0x0669, "Dell Optiplex 9020m", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x069a, "Dell Vostro 5480", ALC290_FIXUP_SUBWOOFER_HSJACK),
SND_PCI_QUIRK(0x1028, 0x06c7, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x06d9, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
@@ -5584,7 +5583,6 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x5034, "Thinkpad T450", ALC292_FIXUP_TPT440_DOCK),
SND_PCI_QUIRK(0x17aa, 0x5036, "Thinkpad T450s", ALC292_FIXUP_TPT440_DOCK),
SND_PCI_QUIRK(0x17aa, 0x503c, "Thinkpad L450", ALC292_FIXUP_TPT440_DOCK),
- SND_PCI_QUIRK(0x17aa, 0x504a, "ThinkPad X260", ALC292_FIXUP_TPT440_DOCK),
SND_PCI_QUIRK(0x17aa, 0x504b, "Thinkpad", ALC293_FIXUP_LENOVO_SPK_NOISE),
SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
@@ -6426,7 +6424,6 @@ enum {
ALC668_FIXUP_DELL_DISABLE_AAMIX,
ALC668_FIXUP_DELL_XPS13,
ALC662_FIXUP_ASUS_Nx50,
- ALC668_FIXUP_ASUS_Nx51,
};
static const struct hda_fixup alc662_fixups[] = {
@@ -6673,15 +6670,6 @@ static const struct hda_fixup alc662_fixups[] = {
.chained = true,
.chain_id = ALC662_FIXUP_BASS_1A
},
- [ALC668_FIXUP_ASUS_Nx51] = {
- .type = HDA_FIXUP_PINS,
- .v.pins = (const struct hda_pintbl[]) {
- {0x1a, 0x90170151}, /* bass speaker */
- {}
- },
- .chained = true,
- .chain_id = ALC662_FIXUP_BASS_CHMAP,
- },
};
static const struct snd_pci_quirk alc662_fixup_tbl[] = {
@@ -6704,14 +6692,11 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
SND_PCI_QUIRK(0x1028, 0x0698, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x069f, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800),
- SND_PCI_QUIRK(0x1043, 0x1080, "Asus UX501VW", ALC668_FIXUP_HEADSET_MODE),
SND_PCI_QUIRK(0x1043, 0x11cd, "Asus N550", ALC662_FIXUP_ASUS_Nx50),
SND_PCI_QUIRK(0x1043, 0x13df, "Asus N550JX", ALC662_FIXUP_BASS_1A),
SND_PCI_QUIRK(0x1043, 0x129d, "Asus N750", ALC662_FIXUP_ASUS_Nx50),
SND_PCI_QUIRK(0x1043, 0x1477, "ASUS N56VZ", ALC662_FIXUP_BASS_MODE4_CHMAP),
SND_PCI_QUIRK(0x1043, 0x15a7, "ASUS UX51VZH", ALC662_FIXUP_BASS_16),
- SND_PCI_QUIRK(0x1043, 0x177d, "ASUS N551", ALC668_FIXUP_ASUS_Nx51),
- SND_PCI_QUIRK(0x1043, 0x17bd, "ASUS N751", ALC668_FIXUP_ASUS_Nx51),
SND_PCI_QUIRK(0x1043, 0x1b73, "ASUS N55SF", ALC662_FIXUP_BASS_16),
SND_PCI_QUIRK(0x1043, 0x1bf3, "ASUS N76VZ", ALC662_FIXUP_BASS_MODE4_CHMAP),
SND_PCI_QUIRK(0x1043, 0x8469, "ASUS mobo", ALC662_FIXUP_NO_JACK_DETECT),
diff --git a/sound/pci/pcxhr/pcxhr_core.c b/sound/pci/pcxhr/pcxhr_core.c
index d7e71f309299..c5194f5b150a 100644
--- a/sound/pci/pcxhr/pcxhr_core.c
+++ b/sound/pci/pcxhr/pcxhr_core.c
@@ -1341,6 +1341,5 @@ irqreturn_t pcxhr_threaded_irq(int irq, void *dev_id)
}
pcxhr_msg_thread(mgr);
- mutex_unlock(&mgr->lock);
return IRQ_HANDLED;
}
diff --git a/sound/soc/codecs/rt5640.c b/sound/soc/codecs/rt5640.c
index b1c8bb39cdf1..f2beb1aa5763 100644
--- a/sound/soc/codecs/rt5640.c
+++ b/sound/soc/codecs/rt5640.c
@@ -359,7 +359,7 @@ static const DECLARE_TLV_DB_RANGE(bst_tlv,
/* Interface data select */
static const char * const rt5640_data_select[] = {
- "Normal", "Swap", "left copy to right", "right copy to left"};
+ "Normal", "left copy to right", "right copy to left", "Swap"};
static SOC_ENUM_SINGLE_DECL(rt5640_if1_dac_enum, RT5640_DIG_INF_DATA,
RT5640_IF1_DAC_SEL_SFT, rt5640_data_select);
diff --git a/sound/soc/codecs/rt5640.h b/sound/soc/codecs/rt5640.h
index 243f42633989..3deb8babeabb 100644
--- a/sound/soc/codecs/rt5640.h
+++ b/sound/soc/codecs/rt5640.h
@@ -442,39 +442,39 @@
#define RT5640_IF1_DAC_SEL_MASK (0x3 << 14)
#define RT5640_IF1_DAC_SEL_SFT 14
#define RT5640_IF1_DAC_SEL_NOR (0x0 << 14)
-#define RT5640_IF1_DAC_SEL_SWAP (0x1 << 14)
-#define RT5640_IF1_DAC_SEL_L2R (0x2 << 14)
-#define RT5640_IF1_DAC_SEL_R2L (0x3 << 14)
+#define RT5640_IF1_DAC_SEL_L2R (0x1 << 14)
+#define RT5640_IF1_DAC_SEL_R2L (0x2 << 14)
+#define RT5640_IF1_DAC_SEL_SWAP (0x3 << 14)
#define RT5640_IF1_ADC_SEL_MASK (0x3 << 12)
#define RT5640_IF1_ADC_SEL_SFT 12
#define RT5640_IF1_ADC_SEL_NOR (0x0 << 12)
-#define RT5640_IF1_ADC_SEL_SWAP (0x1 << 12)
-#define RT5640_IF1_ADC_SEL_L2R (0x2 << 12)
-#define RT5640_IF1_ADC_SEL_R2L (0x3 << 12)
+#define RT5640_IF1_ADC_SEL_L2R (0x1 << 12)
+#define RT5640_IF1_ADC_SEL_R2L (0x2 << 12)
+#define RT5640_IF1_ADC_SEL_SWAP (0x3 << 12)
#define RT5640_IF2_DAC_SEL_MASK (0x3 << 10)
#define RT5640_IF2_DAC_SEL_SFT 10
#define RT5640_IF2_DAC_SEL_NOR (0x0 << 10)
-#define RT5640_IF2_DAC_SEL_SWAP (0x1 << 10)
-#define RT5640_IF2_DAC_SEL_L2R (0x2 << 10)
-#define RT5640_IF2_DAC_SEL_R2L (0x3 << 10)
+#define RT5640_IF2_DAC_SEL_L2R (0x1 << 10)
+#define RT5640_IF2_DAC_SEL_R2L (0x2 << 10)
+#define RT5640_IF2_DAC_SEL_SWAP (0x3 << 10)
#define RT5640_IF2_ADC_SEL_MASK (0x3 << 8)
#define RT5640_IF2_ADC_SEL_SFT 8
#define RT5640_IF2_ADC_SEL_NOR (0x0 << 8)
-#define RT5640_IF2_ADC_SEL_SWAP (0x1 << 8)
-#define RT5640_IF2_ADC_SEL_L2R (0x2 << 8)
-#define RT5640_IF2_ADC_SEL_R2L (0x3 << 8)
+#define RT5640_IF2_ADC_SEL_L2R (0x1 << 8)
+#define RT5640_IF2_ADC_SEL_R2L (0x2 << 8)
+#define RT5640_IF2_ADC_SEL_SWAP (0x3 << 8)
#define RT5640_IF3_DAC_SEL_MASK (0x3 << 6)
#define RT5640_IF3_DAC_SEL_SFT 6
#define RT5640_IF3_DAC_SEL_NOR (0x0 << 6)
-#define RT5640_IF3_DAC_SEL_SWAP (0x1 << 6)
-#define RT5640_IF3_DAC_SEL_L2R (0x2 << 6)
-#define RT5640_IF3_DAC_SEL_R2L (0x3 << 6)
+#define RT5640_IF3_DAC_SEL_L2R (0x1 << 6)
+#define RT5640_IF3_DAC_SEL_R2L (0x2 << 6)
+#define RT5640_IF3_DAC_SEL_SWAP (0x3 << 6)
#define RT5640_IF3_ADC_SEL_MASK (0x3 << 4)
#define RT5640_IF3_ADC_SEL_SFT 4
#define RT5640_IF3_ADC_SEL_NOR (0x0 << 4)
-#define RT5640_IF3_ADC_SEL_SWAP (0x1 << 4)
-#define RT5640_IF3_ADC_SEL_L2R (0x2 << 4)
-#define RT5640_IF3_ADC_SEL_R2L (0x3 << 4)
+#define RT5640_IF3_ADC_SEL_L2R (0x1 << 4)
+#define RT5640_IF3_ADC_SEL_R2L (0x2 << 4)
+#define RT5640_IF3_ADC_SEL_SWAP (0x3 << 4)
/* REC Left Mixer Control 1 (0x3b) */
#define RT5640_G_HP_L_RM_L_MASK (0x7 << 13)
diff --git a/sound/soc/codecs/ssm4567.c b/sound/soc/codecs/ssm4567.c
index 080c78e88e10..e619d5651b09 100644
--- a/sound/soc/codecs/ssm4567.c
+++ b/sound/soc/codecs/ssm4567.c
@@ -352,11 +352,6 @@ static int ssm4567_set_power(struct ssm4567 *ssm4567, bool enable)
regcache_cache_only(ssm4567->regmap, !enable);
if (enable) {
- ret = regmap_write(ssm4567->regmap, SSM4567_REG_SOFT_RESET,
- 0x00);
- if (ret)
- return ret;
-
ret = regmap_update_bits(ssm4567->regmap,
SSM4567_REG_POWER_CTRL,
SSM4567_POWER_SPWDN, 0x00);
diff --git a/sound/soc/samsung/s3c-i2s-v2.c b/sound/soc/samsung/s3c-i2s-v2.c
index b6ab3fc5789e..df65c5b494b1 100644
--- a/sound/soc/samsung/s3c-i2s-v2.c
+++ b/sound/soc/samsung/s3c-i2s-v2.c
@@ -709,7 +709,7 @@ static int s3c2412_i2s_resume(struct snd_soc_dai *dai)
#endif
int s3c_i2sv2_register_component(struct device *dev, int id,
- const struct snd_soc_component_driver *cmp_drv,
+ struct snd_soc_component_driver *cmp_drv,
struct snd_soc_dai_driver *dai_drv)
{
struct snd_soc_dai_ops *ops = (struct snd_soc_dai_ops *)dai_drv->ops;
diff --git a/sound/soc/samsung/s3c-i2s-v2.h b/sound/soc/samsung/s3c-i2s-v2.h
index d0684145ed1f..90abab364b49 100644
--- a/sound/soc/samsung/s3c-i2s-v2.h
+++ b/sound/soc/samsung/s3c-i2s-v2.h
@@ -101,7 +101,7 @@ extern int s3c_i2sv2_probe(struct snd_soc_dai *dai,
* soc core.
*/
extern int s3c_i2sv2_register_component(struct device *dev, int id,
- const struct snd_soc_component_driver *cmp_drv,
+ struct snd_soc_component_driver *cmp_drv,
struct snd_soc_dai_driver *dai_drv);
#endif /* __SND_SOC_S3C24XX_S3C_I2SV2_I2S_H */
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index fd847be299eb..94806ad6437b 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -2204,13 +2204,6 @@ static ssize_t dapm_widget_show_component(struct snd_soc_component *cmpnt,
int count = 0;
char *state = "not set";
- /* card won't be set for the dummy component, as a spot fix
- * we're checking for that case specifically here but in future
- * we will ensure that the dummy component looks like others.
- */
- if (!cmpnt->card)
- return 0;
-
list_for_each_entry(w, &cmpnt->card->widgets, list) {
if (w->dapm != dapm)
continue;
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index db11ecf0b74d..001fb4dc0722 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -1138,11 +1138,8 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
case USB_ID(0x047F, 0x0415): /* Plantronics BT-300 */
case USB_ID(0x047F, 0xAA05): /* Plantronics DA45 */
case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */
- case USB_ID(0x0556, 0x0014): /* Phoenix Audio TMX320VC */
case USB_ID(0x074D, 0x3553): /* Outlaw RR2150 (Micronas UAC3553B) */
- case USB_ID(0x1de7, 0x0013): /* Phoenix Audio MT202exe */
case USB_ID(0x1de7, 0x0014): /* Phoenix Audio TMX320 */
- case USB_ID(0x1de7, 0x0114): /* Phoenix Audio MT202pcs */
case USB_ID(0x21B4, 0x0081): /* AudioQuest DragonFly */
return true;
}
diff --git a/tools/lib/traceevent/parse-filter.c b/tools/lib/traceevent/parse-filter.c
index 88cccea3ca99..0144b3d1bb77 100644
--- a/tools/lib/traceevent/parse-filter.c
+++ b/tools/lib/traceevent/parse-filter.c
@@ -1164,11 +1164,11 @@ process_filter(struct event_format *event, struct filter_arg **parg,
current_op = current_exp;
ret = collapse_tree(current_op, parg, error_str);
- /* collapse_tree() may free current_op, and updates parg accordingly */
- current_op = NULL;
if (ret < 0)
goto fail;
+ *parg = current_op;
+
free(token);
return 0;
diff --git a/tools/perf/Documentation/perf-stat.txt b/tools/perf/Documentation/perf-stat.txt
index 90c3558c2c12..4e074a660826 100644
--- a/tools/perf/Documentation/perf-stat.txt
+++ b/tools/perf/Documentation/perf-stat.txt
@@ -62,14 +62,6 @@ OPTIONS
--scale::
scale/normalize counter values
--d::
---detailed::
- print more detailed statistics, can be specified up to 3 times
-
- -d: detailed events, L1 and LLC data cache
- -d -d: more detailed events, dTLB and iTLB events
- -d -d -d: very detailed events, adding prefetch events
-
-r::
--repeat=<n>::
repeat command and print average + stddev (max: 100). 0 means forever.
diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
index 3900386a3629..81def6c3f24b 100644
--- a/tools/perf/ui/browsers/hists.c
+++ b/tools/perf/ui/browsers/hists.c
@@ -2059,12 +2059,10 @@ skip_annotation:
*
* See hist_browser__show_entry.
*/
- if (sort__has_sym && browser->selection->sym) {
- nr_options += add_script_opt(browser,
- &actions[nr_options],
- &options[nr_options],
- NULL, browser->selection->sym);
- }
+ nr_options += add_script_opt(browser,
+ &actions[nr_options],
+ &options[nr_options],
+ NULL, browser->selection->sym);
}
nr_options += add_script_opt(browser, &actions[nr_options],
&options[nr_options], NULL, NULL);
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index 956187bf1a85..8b10621b415c 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -274,7 +274,7 @@ int perf_event__synthesize_mmap_events(struct perf_tool *tool,
strcpy(execname, "");
/* 00400000-0040c000 r-xp 00000000 fd:01 41038 /bin/cat */
- n = sscanf(bf, "%"PRIx64"-%"PRIx64" %s %"PRIx64" %x:%x %u %[^\n]\n",
+ n = sscanf(bf, "%"PRIx64"-%"PRIx64" %s %"PRIx64" %x:%x %u %s\n",
&event->mmap2.start, &event->mmap2.len, prot,
&event->mmap2.pgoff, &event->mmap2.maj,
&event->mmap2.min,
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index b4b96120fc3b..d1392194a9a9 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -1211,12 +1211,12 @@ void perf_evlist__set_maps(struct perf_evlist *evlist, struct cpu_map *cpus,
*/
if (cpus != evlist->cpus) {
cpu_map__put(evlist->cpus);
- evlist->cpus = cpu_map__get(cpus);
+ evlist->cpus = cpus;
}
if (threads != evlist->threads) {
thread_map__put(evlist->threads);
- evlist->threads = thread_map__get(threads);
+ evlist->threads = threads;
}
perf_evlist__propagate_maps(evlist);
diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
index 9227c2f076c3..97f963a3dcb9 100644
--- a/tools/perf/util/intel-pt.c
+++ b/tools/perf/util/intel-pt.c
@@ -1127,7 +1127,7 @@ static int intel_pt_synth_transaction_sample(struct intel_pt_queue *ptq)
pr_err("Intel Processor Trace: failed to deliver transaction event, error %d\n",
ret);
- if (pt->synth_opts.last_branch)
+ if (pt->synth_opts.callchain)
intel_pt_reset_last_branch_rb(ptq);
return ret;
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
index a7b9022b5c8f..ea6064696fe4 100644
--- a/virt/kvm/arm/arch_timer.c
+++ b/virt/kvm/arm/arch_timer.c
@@ -86,8 +86,6 @@ static void kvm_timer_inject_irq_work(struct work_struct *work)
vcpu = container_of(work, struct kvm_vcpu, arch.timer_cpu.expired);
vcpu->arch.timer_cpu.armed = false;
- WARN_ON(!kvm_timer_should_fire(vcpu));
-
/*
* If the vcpu is blocked we want to wake it up so that it will see
* the timer has expired when entering the guest.
@@ -95,46 +93,10 @@ static void kvm_timer_inject_irq_work(struct work_struct *work)
kvm_vcpu_kick(vcpu);
}
-static u64 kvm_timer_compute_delta(struct kvm_vcpu *vcpu)
-{
- cycle_t cval, now;
-
- cval = vcpu->arch.timer_cpu.cntv_cval;
- now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff;
-
- if (now < cval) {
- u64 ns;
-
- ns = cyclecounter_cyc2ns(timecounter->cc,
- cval - now,
- timecounter->mask,
- &timecounter->frac);
- return ns;
- }
-
- return 0;
-}
-
static enum hrtimer_restart kvm_timer_expire(struct hrtimer *hrt)
{
struct arch_timer_cpu *timer;
- struct kvm_vcpu *vcpu;
- u64 ns;
-
timer = container_of(hrt, struct arch_timer_cpu, timer);
- vcpu = container_of(timer, struct kvm_vcpu, arch.timer_cpu);
-
- /*
- * Check that the timer has really expired from the guest's
- * PoV (NTP on the host may have forced it to expire
- * early). If we should have slept longer, restart it.
- */
- ns = kvm_timer_compute_delta(vcpu);
- if (unlikely(ns)) {
- hrtimer_forward_now(hrt, ns_to_ktime(ns));
- return HRTIMER_RESTART;
- }
-
queue_work(wqueue, &timer->expired);
return HRTIMER_NORESTART;
}
@@ -208,6 +170,8 @@ static int kvm_timer_update_state(struct kvm_vcpu *vcpu)
void kvm_timer_schedule(struct kvm_vcpu *vcpu)
{
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
+ u64 ns;
+ cycle_t cval, now;
BUG_ON(timer_is_armed(timer));
@@ -227,7 +191,14 @@ void kvm_timer_schedule(struct kvm_vcpu *vcpu)
return;
/* The timer has not yet expired, schedule a background timer */
- timer_arm(timer, kvm_timer_compute_delta(vcpu));
+ cval = timer->cntv_cval;
+ now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff;
+
+ ns = cyclecounter_cyc2ns(timecounter->cc,
+ cval - now,
+ timecounter->mask,
+ &timecounter->frac);
+ timer_arm(timer, ns);
}
void kvm_timer_unschedule(struct kvm_vcpu *vcpu)