summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/arm64/booting.txt20
-rw-r--r--Documentation/arm64/silicon-errata.txt58
-rw-r--r--Documentation/devicetree/bindings/arm/cache.txt195
-rw-r--r--Documentation/devicetree/bindings/ata/ahci-platform.txt4
-rw-r--r--Documentation/devicetree/bindings/fb/mdss-mdp.txt2
-rw-r--r--Documentation/devicetree/bindings/input/hbtp-input.txt38
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/gt9xx/gt9xx.txt10
-rw-r--r--Documentation/devicetree/bindings/leds/leds-qpnp-wled.txt27
-rw-r--r--Documentation/devicetree/bindings/pil/pil-q6v5-mss.txt1
-rw-r--r--Documentation/devicetree/bindings/power/reset/reboot-mode.txt25
-rw-r--r--Documentation/devicetree/bindings/power/reset/syscon-reboot-mode.txt35
-rw-r--r--Documentation/devicetree/bindings/qbt1000/qbt1000.txt54
-rw-r--r--Documentation/devicetree/bindings/qdsp/msm-ssc-sensors.txt15
-rw-r--r--Documentation/features/time/irq-time-acct/arch-support.txt2
-rw-r--r--Documentation/features/vm/huge-vmap/arch-support.txt2
-rw-r--r--MAINTAINERS4
-rw-r--r--Makefile2
-rw-r--r--arch/arc/include/asm/io.h27
-rw-r--r--arch/arm/boot/dts/am43x-epos-evm.dts5
-rw-r--r--arch/arm/boot/dts/armada-375.dtsi2
-rw-r--r--arch/arm/boot/dts/armada-385-linksys.dtsi2
-rw-r--r--arch/arm/boot/dts/at91sam9x5.dtsi2
-rw-r--r--arch/arm/boot/dts/pxa3xx.dtsi2
-rw-r--r--arch/arm/boot/dts/qcom/Makefile1
-rw-r--r--arch/arm/boot/dts/qcom/msm-audio-lpass.dtsi110
-rw-r--r--arch/arm/boot/dts/qcom/msm-pm2falcon.dtsi5
-rw-r--r--arch/arm/boot/dts/qcom/msm-pmcobalt-rpm-regulator.dtsi2
-rw-r--r--arch/arm/boot/dts/qcom/msm-pmi8994.dtsi1
-rw-r--r--arch/arm/boot/dts/qcom/msm-pmicobalt.dtsi9
-rw-r--r--arch/arm/boot/dts/qcom/msm8996-ion.dtsi7
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt-audio.dtsi32
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt-blsp.dtsi6
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt-cdp.dtsi4
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt-ion.dtsi5
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt-mdss-panels.dtsi20
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt-mdss.dtsi2
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt-mtp.dtsi4
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt-pinctrl.dtsi88
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt-qrd-skuk.dtsi2
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt-qrd-vr1.dts29
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt-qrd-vr1.dtsi27
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt-qrd.dtsi4
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt-regulator.dtsi6
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt-v2-qrd-vr1.dts23
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt-v2.dtsi190
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt.dtsi3
-rw-r--r--arch/arm/boot/dts/qcom/msmfalcon-coresight.dtsi433
-rw-r--r--arch/arm/boot/dts/qcom/msmfalcon-ion.dtsi5
-rw-r--r--arch/arm/boot/dts/qcom/msmfalcon-smp2p.dtsi23
-rw-r--r--arch/arm/boot/dts/qcom/msmfalcon.dtsi55
-rw-r--r--arch/arm/boot/dts/qcom/msmtriton-ion.dtsi5
-rw-r--r--arch/arm/boot/dts/qcom/msmtriton-smp2p.dtsi23
-rw-r--r--arch/arm/boot/dts/qcom/msmtriton.dtsi53
-rw-r--r--arch/arm/configs/msmcortex_defconfig1
-rw-r--r--arch/arm/configs/msmfalcon_defconfig2
-rw-r--r--arch/arm/include/asm/kvm_asm.h2
-rw-r--r--arch/arm/kvm/arm.c5
-rw-r--r--arch/arm/mach-cns3xxx/pcie.c6
-rw-r--r--arch/arm/mach-exynos/Kconfig1
-rw-r--r--arch/arm/mach-exynos/pm_domains.c2
-rw-r--r--arch/arm/mach-omap2/cpuidle34xx.c69
-rw-r--r--arch/arm/mach-omap2/io.c1
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.c8
-rw-r--r--arch/arm/mach-prima2/Kconfig1
-rw-r--r--arch/arm/mach-qcom/board-falcon.c17
-rw-r--r--arch/arm/mach-socfpga/headsmp.S1
-rw-r--r--arch/arm64/Kconfig91
-rw-r--r--arch/arm64/Makefile11
-rw-r--r--arch/arm64/configs/msmcortex-perf_defconfig1
-rw-r--r--arch/arm64/configs/msmcortex_defconfig1
-rw-r--r--arch/arm64/include/asm/Kbuild1
-rw-r--r--arch/arm64/include/asm/acpi.h19
-rw-r--r--arch/arm64/include/asm/alternative.h64
-rw-r--r--arch/arm64/include/asm/assembler.h37
-rw-r--r--arch/arm64/include/asm/atomic_lse.h38
-rw-r--r--arch/arm64/include/asm/boot.h6
-rw-r--r--arch/arm64/include/asm/brk-imm.h25
-rw-r--r--arch/arm64/include/asm/bug.h2
-rw-r--r--arch/arm64/include/asm/cacheflush.h1
-rw-r--r--arch/arm64/include/asm/checksum.h51
-rw-r--r--arch/arm64/include/asm/cmpxchg.h1
-rw-r--r--arch/arm64/include/asm/cpu.h1
-rw-r--r--arch/arm64/include/asm/cpufeature.h7
-rw-r--r--arch/arm64/include/asm/cputype.h39
-rw-r--r--arch/arm64/include/asm/debug-monitors.h14
-rw-r--r--arch/arm64/include/asm/elf.h24
-rw-r--r--arch/arm64/include/asm/fixmap.h10
-rw-r--r--arch/arm64/include/asm/ftrace.h2
-rw-r--r--arch/arm64/include/asm/futex.h14
-rw-r--r--arch/arm64/include/asm/hugetlb.h44
-rw-r--r--arch/arm64/include/asm/irq.h45
-rw-r--r--arch/arm64/include/asm/kasan.h5
-rw-r--r--arch/arm64/include/asm/kernel-pgtable.h12
-rw-r--r--arch/arm64/include/asm/kvm_asm.h2
-rw-r--r--arch/arm64/include/asm/kvm_host.h8
-rw-r--r--arch/arm64/include/asm/lse.h1
-rw-r--r--arch/arm64/include/asm/memory.h65
-rw-r--r--arch/arm64/include/asm/mmu_context.h62
-rw-r--r--arch/arm64/include/asm/module.h17
-rw-r--r--arch/arm64/include/asm/pgalloc.h26
-rw-r--r--arch/arm64/include/asm/pgtable-hwdef.h18
-rw-r--r--arch/arm64/include/asm/pgtable.h156
-rw-r--r--arch/arm64/include/asm/processor.h9
-rw-r--r--arch/arm64/include/asm/shmparam.h2
-rw-r--r--arch/arm64/include/asm/smp.h8
-rw-r--r--arch/arm64/include/asm/spinlock.h23
-rw-r--r--arch/arm64/include/asm/stacktrace.h9
-rw-r--r--arch/arm64/include/asm/sysreg.h23
-rw-r--r--arch/arm64/include/asm/thread_info.h10
-rw-r--r--arch/arm64/include/asm/uaccess.h82
-rw-r--r--arch/arm64/include/asm/word-at-a-time.h7
-rw-r--r--arch/arm64/include/uapi/asm/ptrace.h1
-rw-r--r--arch/arm64/kernel/Makefile3
-rw-r--r--arch/arm64/kernel/acpi_parking_protocol.c153
-rw-r--r--arch/arm64/kernel/alternative.c6
-rw-r--r--arch/arm64/kernel/armv8_deprecated.c13
-rw-r--r--arch/arm64/kernel/cpu_errata.c20
-rw-r--r--arch/arm64/kernel/cpu_ops.c27
-rw-r--r--arch/arm64/kernel/cpufeature.c133
-rw-r--r--arch/arm64/kernel/cpuinfo.c55
-rw-r--r--arch/arm64/kernel/efi-entry.S2
-rw-r--r--arch/arm64/kernel/entry.S69
-rw-r--r--arch/arm64/kernel/fpsimd.c2
-rw-r--r--arch/arm64/kernel/ftrace.c27
-rw-r--r--arch/arm64/kernel/head.S152
-rw-r--r--arch/arm64/kernel/image.h85
-rw-r--r--arch/arm64/kernel/irq.c3
-rw-r--r--arch/arm64/kernel/kaslr.c177
-rw-r--r--arch/arm64/kernel/module-plts.c201
-rw-r--r--arch/arm64/kernel/module.c96
-rw-r--r--arch/arm64/kernel/module.lds3
-rw-r--r--arch/arm64/kernel/perf_callchain.c5
-rw-r--r--arch/arm64/kernel/process.c21
-rw-r--r--arch/arm64/kernel/return_address.c5
-rw-r--r--arch/arm64/kernel/setup.c36
-rw-r--r--arch/arm64/kernel/sleep.S3
-rw-r--r--arch/arm64/kernel/smp.c30
-rw-r--r--arch/arm64/kernel/stacktrace.c74
-rw-r--r--arch/arm64/kernel/suspend.c20
-rw-r--r--arch/arm64/kernel/time.c5
-rw-r--r--arch/arm64/kernel/traps.c61
-rw-r--r--arch/arm64/kernel/vmlinux.lds.S32
-rw-r--r--arch/arm64/kvm/hyp.S6
-rw-r--r--arch/arm64/lib/Makefile13
-rw-r--r--arch/arm64/lib/clear_user.S12
-rw-r--r--arch/arm64/lib/copy_from_user.S12
-rw-r--r--arch/arm64/lib/copy_in_user.S20
-rw-r--r--arch/arm64/lib/copy_page.S63
-rw-r--r--arch/arm64/lib/copy_to_user.S12
-rw-r--r--arch/arm64/mm/cache.S28
-rw-r--r--arch/arm64/mm/context.c2
-rw-r--r--arch/arm64/mm/copypage.c3
-rw-r--r--arch/arm64/mm/dma-mapping.c4
-rw-r--r--arch/arm64/mm/dump.c17
-rw-r--r--arch/arm64/mm/extable.c2
-rw-r--r--arch/arm64/mm/fault.c34
-rw-r--r--arch/arm64/mm/flush.c33
-rw-r--r--arch/arm64/mm/hugetlbpage.c260
-rw-r--r--arch/arm64/mm/init.c132
-rw-r--r--arch/arm64/mm/kasan_init.c79
-rw-r--r--arch/arm64/mm/mmu.c701
-rw-r--r--arch/arm64/mm/pageattr.c66
-rw-r--r--arch/arm64/mm/pgd.c12
-rw-r--r--arch/arm64/mm/proc-macros.S22
-rw-r--r--arch/arm64/mm/proc.S28
-rw-r--r--arch/parisc/Kconfig1
-rw-r--r--arch/parisc/include/asm/assembly.h2
-rw-r--r--arch/parisc/include/asm/uaccess.h7
-rw-r--r--arch/parisc/mm/fault.c9
-rw-r--r--arch/powerpc/include/asm/word-at-a-time.h2
-rw-r--r--arch/powerpc/include/uapi/asm/cputable.h1
-rw-r--r--arch/powerpc/kernel/process.c18
-rw-r--r--arch/powerpc/kernel/prom.c26
-rw-r--r--arch/s390/include/asm/mmu.h2
-rw-r--r--arch/s390/include/asm/mmu_context.h28
-rw-r--r--arch/s390/include/asm/pci.h3
-rw-r--r--arch/s390/include/asm/pgalloc.h4
-rw-r--r--arch/s390/include/asm/processor.h2
-rw-r--r--arch/s390/include/asm/tlbflush.h9
-rw-r--r--arch/s390/mm/init.c3
-rw-r--r--arch/s390/mm/mmap.c6
-rw-r--r--arch/s390/mm/pgtable.c85
-rw-r--r--arch/x86/crypto/sha-mb/sha1_mb.c4
-rw-r--r--arch/x86/include/asm/efi.h2
-rw-r--r--arch/x86/include/asm/hugetlb.h1
-rw-r--r--arch/x86/kernel/apic/vector.c3
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce-genpool.c4
-rw-r--r--arch/x86/kernel/cpu/mcheck/therm_throt.c3
-rw-r--r--arch/x86/kernel/sysfb_efi.c14
-rw-r--r--arch/x86/kernel/tsc_msr.c2
-rw-r--r--arch/x86/kvm/x86.c10
-rw-r--r--arch/x86/mm/kmmio.c88
-rw-r--r--block/partition-generic.c13
-rw-r--r--crypto/ahash.c3
-rw-r--r--crypto/testmgr.c27
-rw-r--r--drivers/acpi/acpi_processor.c52
-rw-r--r--drivers/acpi/acpica/dsmethod.c3
-rw-r--r--drivers/acpi/bus.c3
-rw-r--r--drivers/acpi/internal.h6
-rw-r--r--drivers/android/binder.c2
-rw-r--r--drivers/ata/ahci_platform.c3
-rw-r--r--drivers/ata/ahci_xgene.c4
-rw-r--r--drivers/ata/libahci.c1
-rw-r--r--drivers/base/power/domain.c2
-rw-r--r--drivers/block/loop.c6
-rw-r--r--drivers/block/nbd.c4
-rw-r--r--drivers/block/paride/pd.c4
-rw-r--r--drivers/block/paride/pt.c4
-rw-r--r--drivers/bluetooth/bluetooth-power.c19
-rw-r--r--drivers/bus/imx-weim.c2
-rw-r--r--drivers/char/adsprpc.c2
-rw-r--r--drivers/clk/clk-divider.c11
-rw-r--r--drivers/clk/clk.c26
-rw-r--r--drivers/clk/meson/clkc.c2
-rw-r--r--drivers/clk/msm/clock-gcc-cobalt.c25
-rw-r--r--drivers/clk/msm/clock-osm.c106
-rw-r--r--drivers/clk/nxp/clk-lpc18xx-ccu.c2
-rw-r--r--drivers/clk/qcom/clk-branch.c3
-rw-r--r--drivers/clk/qcom/clk-branch.h4
-rw-r--r--drivers/clk/qcom/clk-smd-rpm.c4
-rw-r--r--drivers/clk/qcom/gcc-msm8960.c4
-rw-r--r--drivers/clk/qcom/gcc-msm8996.c10
-rw-r--r--drivers/clk/qcom/gcc-msmfalcon.c12
-rw-r--r--drivers/clk/rockchip/clk.c13
-rw-r--r--drivers/clk/versatile/clk-sp810.c4
-rw-r--r--drivers/cpufreq/intel_pstate.c5
-rw-r--r--drivers/cpuidle/cpuidle-arm.c2
-rw-r--r--drivers/cpuidle/lpm-levels-of.c2
-rw-r--r--drivers/cpuidle/lpm-levels.c4
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-cmac.c3
-rw-r--r--drivers/crypto/ccp/ccp-crypto-sha.c3
-rw-r--r--drivers/crypto/msm/qce.c6
-rw-r--r--drivers/crypto/qat/qat_common/adf_common_drv.h2
-rw-r--r--drivers/crypto/qat/qat_common/adf_ctl_drv.c6
-rw-r--r--drivers/crypto/qat/qat_common/adf_sriov.c26
-rw-r--r--drivers/crypto/talitos.c87
-rw-r--r--drivers/devfreq/governor_bw_vbif.c12
-rw-r--r--drivers/dma/dw/core.c34
-rw-r--r--drivers/dma/hsu/hsu.c2
-rw-r--r--drivers/dma/hsu/hsu.h3
-rw-r--r--drivers/dma/pxa_dma.c39
-rw-r--r--drivers/edac/i7core_edac.c2
-rw-r--r--drivers/edac/sb_edac.c8
-rw-r--r--drivers/extcon/extcon-max77843.c2
-rw-r--r--drivers/firmware/efi/efi.c1
-rw-r--r--drivers/firmware/efi/libstub/Makefile7
-rw-r--r--drivers/firmware/efi/libstub/arm-stub.c40
-rw-r--r--drivers/firmware/efi/libstub/arm64-stub.c78
-rw-r--r--drivers/firmware/efi/libstub/efi-stub-helper.c7
-rw-r--r--drivers/firmware/efi/libstub/efistub.h7
-rw-r--r--drivers/firmware/efi/libstub/fdt.c14
-rw-r--r--drivers/firmware/efi/libstub/random.c135
-rw-r--r--drivers/firmware/efi/vars.c37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_encoders.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c2
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c29
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h9
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c8
-rw-r--r--drivers/gpu/drm/i915/intel_csr.c3
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c22
-rw-r--r--drivers/gpu/drm/i915/intel_display.c2
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c4
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c6
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h2
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c12
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c6
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c6
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c30
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/ramht.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c13
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h2
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c10
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c4
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c154
-rw-r--r--drivers/gpu/drm/radeon/evergreen_reg.h46
-rw-r--r--drivers/gpu/drm/radeon/radeon_atpx_handler.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_dp_auxch.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c2
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c1
-rw-r--r--drivers/gpu/ipu-v3/ipu-common.c7
-rw-r--r--drivers/gpu/msm/adreno.c23
-rw-r--r--drivers/gpu/msm/adreno.h28
-rw-r--r--drivers/gpu/msm/adreno_a3xx.c5
-rw-r--r--drivers/gpu/msm/adreno_a4xx.c5
-rw-r--r--drivers/gpu/msm/adreno_a5xx.c5
-rw-r--r--drivers/gpu/msm/kgsl_pwrctrl.c45
-rw-r--r--drivers/gpu/msm/kgsl_pwrscale.c6
-rw-r--r--drivers/gpu/msm/kgsl_pwrscale.h4
-rw-r--r--drivers/hid/hid-ids.h1
-rw-r--r--drivers/hid/usbhid/hid-quirks.c1
-rw-r--r--drivers/hid/wacom_wac.c5
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.c6
-rw-r--r--drivers/hwtracing/stm/Kconfig1
-rw-r--r--drivers/i2c/busses/i2c-cpm.c4
-rw-r--r--drivers/i2c/busses/i2c-exynos5.c24
-rw-r--r--drivers/iio/magnetometer/ak8975.c6
-rw-r--r--drivers/infiniband/core/ucm.c4
-rw-r--r--drivers/infiniband/core/ucma.c3
-rw-r--r--drivers/infiniband/core/uverbs_main.c5
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c6
-rw-r--r--drivers/infiniband/hw/mlx5/main.c6
-rw-r--r--drivers/infiniband/hw/qib/qib_file_ops.c5
-rw-r--r--drivers/input/misc/hbtp_input.c674
-rw-r--r--drivers/input/misc/max8997_haptic.c6
-rw-r--r--drivers/input/misc/ots_pat9125/pat9125_linux_driver.c29
-rw-r--r--drivers/input/misc/pmic8xxx-pwrkey.c7
-rw-r--r--drivers/input/tablet/gtco.c10
-rw-r--r--drivers/input/touchscreen/gt9xx/goodix_tool.c45
-rw-r--r--drivers/input/touchscreen/gt9xx/gt9xx.c823
-rw-r--r--drivers/input/touchscreen/gt9xx/gt9xx.h28
-rw-r--r--drivers/input/touchscreen/gt9xx/gt9xx_update.c23
-rw-r--r--drivers/input/touchscreen/zforce_ts.c4
-rw-r--r--drivers/iommu/amd_iommu.c87
-rw-r--r--drivers/iommu/arm-smmu.c6
-rw-r--r--drivers/iommu/dma-iommu.c4
-rw-r--r--drivers/irqchip/irq-mxs.c2
-rw-r--r--drivers/irqchip/irq-sunxi-nmi.c4
-rw-r--r--drivers/leds/leds-qpnp-flash-v2.c11
-rw-r--r--drivers/leds/leds-qpnp-wled.c570
-rw-r--r--drivers/md/Kconfig16
-rw-r--r--drivers/md/Makefile4
-rw-r--r--drivers/md/dm-android-verity.c925
-rw-r--r--drivers/md/dm-android-verity.h124
-rw-r--r--drivers/md/dm-cache-metadata.c64
-rw-r--r--drivers/md/dm-linear.c24
-rw-r--r--drivers/md/dm-verity-fec.c19
-rw-r--r--drivers/md/dm-verity-fec.h4
-rw-r--r--drivers/md/dm-verity-target.c14
-rw-r--r--drivers/md/dm-verity.h10
-rw-r--r--drivers/md/md.c2
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp40.c2
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp44.c2
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp46.c2
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp47.c2
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c23
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.h1
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c6
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c19
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp_util.h3
-rw-r--r--drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c26
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/actuator/msm_actuator.c2
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_5_0_1_hwreg.h10
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_cci_i2c.c6
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_qup_i2c.c39
-rw-r--r--drivers/media/platform/msm/sde/rotator/sde_rotator_core.c168
-rw-r--r--drivers/media/platform/msm/sde/rotator/sde_rotator_core.h10
-rw-r--r--drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c4
-rw-r--r--drivers/media/platform/msm/vidc/hfi_response_handler.c13
-rw-r--r--drivers/media/platform/msm/vidc/msm_smem.c2
-rw-r--r--drivers/media/platform/msm/vidc/msm_vidc.c1
-rw-r--r--drivers/media/platform/msm/vidc/msm_vidc_common.c51
-rw-r--r--drivers/media/platform/msm/vidc/msm_vidc_debug.c4
-rw-r--r--drivers/media/platform/msm/vidc/msm_vidc_internal.h1
-rw-r--r--drivers/media/platform/msm/vidc/venus_hfi.c8
-rw-r--r--drivers/media/platform/msm/vidc/vidc_hfi_api.h1
-rw-r--r--drivers/media/platform/msm/vidc/vidc_hfi_helper.h6
-rw-r--r--drivers/media/v4l2-core/videobuf2-core.c10
-rw-r--r--drivers/media/v4l2-core/videobuf2-memops.c2
-rw-r--r--drivers/mfd/intel-lpss.c1
-rw-r--r--drivers/mfd/wcd934x-regmap.c7
-rw-r--r--drivers/misc/Kconfig2
-rw-r--r--drivers/misc/ad525x_dpot.c2
-rw-r--r--drivers/misc/cxl/irq.c1
-rw-r--r--drivers/misc/mic/scif/scif_rma.c7
-rw-r--r--drivers/misc/qseecom.c45
-rw-r--r--drivers/mtd/nand/brcmnand/brcmnand.c34
-rw-r--r--drivers/mtd/nand/nand_base.c10
-rw-r--r--drivers/mtd/spi-nor/spi-nor.c46
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c2
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c4
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c10
-rw-r--r--drivers/net/ethernet/jme.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c10
-rw-r--r--drivers/net/usb/cdc_mbim.c9
-rw-r--r--drivers/net/wireless/ath/ath9k/ar5008_phy.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_phy.c5
-rw-r--r--drivers/net/wireless/ath/wil6210/interrupt.c4
-rw-r--r--drivers/net/wireless/ath/wil6210/main.c30
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h3
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/ops.c2
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c4
-rw-r--r--drivers/net/wireless/mwifiex/sta_ioctl.c1
-rw-r--r--drivers/nvmem/mxs-ocotp.c4
-rw-r--r--drivers/of/fdt.c19
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common.c9
-rw-r--r--drivers/pinctrl/pinctrl-at91-pio4.c2
-rw-r--r--drivers/pinctrl/pinctrl-single.c6
-rw-r--r--drivers/platform/msm/gsi/gsi.c17
-rw-r--r--drivers/platform/msm/gsi/gsi.h11
-rw-r--r--drivers/platform/msm/ipa/ipa_clients/ipa_usb.c499
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_flt.c36
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_i.h2
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_nat.c77
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_rt.c26
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c5
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa.c11
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_client.c57
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_dp.c127
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_i.h5
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_nat.c76
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_utils.c2
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c8
-rw-r--r--drivers/platform/msm/qpnp-revid.c2
-rw-r--r--drivers/platform/x86/toshiba_acpi.c2
-rw-r--r--drivers/power/qcom-charger/fg-core.h9
-rw-r--r--drivers/power/qcom-charger/fg-util.c11
-rw-r--r--drivers/power/qcom-charger/pmic-voter.c1
-rw-r--r--drivers/power/qcom-charger/qpnp-fg-gen3.c44
-rw-r--r--drivers/power/qcom-charger/qpnp-smb2.c7
-rw-r--r--drivers/power/qcom-charger/smb-lib.c138
-rw-r--r--drivers/power/qcom-charger/smb-reg.h1
-rw-r--r--drivers/power/qcom-charger/smb1351-charger.c8
-rw-r--r--drivers/power/qcom-charger/smb138x-charger.c6
-rw-r--r--drivers/power/reset/Kconfig14
-rw-r--r--drivers/power/reset/Makefile2
-rw-r--r--drivers/power/reset/msm-poweroff.c1
-rw-r--r--drivers/power/reset/reboot-mode.c140
-rw-r--r--drivers/power/reset/reboot-mode.h14
-rw-r--r--drivers/power/reset/syscon-reboot-mode.c99
-rw-r--r--drivers/pwm/pwm-brcmstb.c4
-rw-r--r--drivers/regulator/axp20x-regulator.c4
-rw-r--r--drivers/regulator/s2mps11.c28
-rw-r--r--drivers/regulator/s5m8767.c13
-rw-r--r--drivers/rtc/rtc-ds1685.c8
-rw-r--r--drivers/rtc/rtc-hym8563.c2
-rw-r--r--drivers/rtc/rtc-max77686.c2
-rw-r--r--drivers/rtc/rtc-rx8025.c1
-rw-r--r--drivers/rtc/rtc-vr41xx.c13
-rw-r--r--drivers/scsi/device_handler/Kconfig8
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c5
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c3
-rw-r--r--drivers/scsi/qla1280.c2
-rw-r--r--drivers/sensors/sensors_ssc.c13
-rw-r--r--drivers/soc/qcom/Kconfig9
-rw-r--r--drivers/soc/qcom/Makefile1
-rw-r--r--drivers/soc/qcom/glink_spi_xprt.c19
-rw-r--r--drivers/soc/qcom/icnss.c73
-rw-r--r--drivers/soc/qcom/pil-q6v5.c16
-rw-r--r--drivers/soc/qcom/pil-q6v5.h1
-rw-r--r--drivers/soc/qcom/qbt1000.c1263
-rw-r--r--drivers/soc/qcom/qdsp6v2/apr_tal_glink.c1
-rw-r--r--drivers/soc/qcom/watchdog_v2.c2
-rw-r--r--drivers/soc/qcom/wcd-dsp-glink.c17
-rw-r--r--drivers/soc/rockchip/pm_domains.c1
-rw-r--r--drivers/spi/spi-pxa2xx.c2
-rw-r--r--drivers/spi/spi-rockchip.c7
-rw-r--r--drivers/spi/spi-ti-qspi.c45
-rw-r--r--drivers/spmi/spmi-pmic-arb.c6
-rw-r--r--drivers/staging/android/ion/ion_heap.c10
-rw-r--r--drivers/staging/rdma/hfi1/TODO2
-rw-r--r--drivers/staging/rdma/hfi1/file_ops.c6
-rw-r--r--drivers/thermal/cpu_cooling.c56
-rw-r--r--drivers/thermal/msm_lmh_dcvs.c74
-rw-r--r--drivers/thermal/rockchip_thermal.c11
-rw-r--r--drivers/tty/serial/msm_serial_hs.c24
-rw-r--r--drivers/tty/serial/sh-sci.c39
-rw-r--r--drivers/usb/core/hcd-pci.c9
-rw-r--r--drivers/usb/dwc3/dwc3-msm.c10
-rw-r--r--drivers/usb/dwc3/gadget.c4
-rw-r--r--drivers/usb/gadget/function/f_fs.c5
-rw-r--r--drivers/usb/gadget/function/f_gsi.c3
-rw-r--r--drivers/usb/host/xhci-mem.c6
-rw-r--r--drivers/usb/host/xhci-pci.c4
-rw-r--r--drivers/usb/host/xhci-plat.c4
-rw-r--r--drivers/usb/host/xhci.c6
-rw-r--r--drivers/usb/pd/policy_engine.c113
-rw-r--r--drivers/usb/pd/qpnp-pdphy.c2
-rw-r--r--drivers/usb/serial/cp210x.c4
-rw-r--r--drivers/usb/usbip/usbip_common.c11
-rw-r--r--drivers/video/fbdev/Kconfig1
-rw-r--r--drivers/video/fbdev/da8xx-fb.c7
-rw-r--r--drivers/video/fbdev/msm/Makefile1
-rw-r--r--drivers/video/fbdev/msm/mdss_debug.c32
-rw-r--r--drivers/video/fbdev/msm/mdss_dp.c761
-rw-r--r--drivers/video/fbdev/msm/mdss_dp.h112
-rw-r--r--drivers/video/fbdev/msm/mdss_dp_aux.c341
-rw-r--r--drivers/video/fbdev/msm/mdss_dp_hdcp2p2.c861
-rw-r--r--drivers/video/fbdev/msm/mdss_dp_util.c106
-rw-r--r--drivers/video/fbdev/msm/mdss_dp_util.h4
-rw-r--r--drivers/video/fbdev/msm/mdss_dsi.c1
-rw-r--r--drivers/video/fbdev/msm/mdss_dsi_host.c5
-rw-r--r--drivers/video/fbdev/msm/mdss_dsi_panel.c12
-rw-r--r--drivers/video/fbdev/msm/mdss_fb.c11
-rw-r--r--drivers/video/fbdev/msm/mdss_hdcp.h (renamed from drivers/video/fbdev/msm/mdss_hdcp_1x.h)10
-rw-r--r--drivers/video/fbdev/msm/mdss_hdcp_1x.c2
-rw-r--r--drivers/video/fbdev/msm/mdss_hdmi_edid.c86
-rw-r--r--drivers/video/fbdev/msm/mdss_hdmi_hdcp2p2.c2
-rw-r--r--drivers/video/fbdev/msm/mdss_hdmi_tx.c2
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp.c2
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp.h18
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_ctl.c27
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_intf_video.c48
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_layer.c221
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_overlay.c2
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_pp.c145
-rw-r--r--drivers/video/fbdev/msm/mdss_panel.c10
-rw-r--r--drivers/video/fbdev/msm/mdss_panel.h1
-rw-r--r--drivers/video/fbdev/msm/msm_ext_display.c71
-rw-r--r--drivers/xen/balloon.c16
-rw-r--r--drivers/xen/evtchn.c20
-rw-r--r--fs/debugfs/inode.c2
-rw-r--r--fs/ext4/ext4.h13
-rw-r--r--fs/ext4/extents.c133
-rw-r--r--fs/ext4/file.c66
-rw-r--r--fs/ext4/inode.c77
-rw-r--r--fs/ext4/super.c1
-rw-r--r--fs/ext4/truncate.h2
-rw-r--r--fs/isofs/rock.c13
-rw-r--r--fs/namei.c26
-rw-r--r--fs/ocfs2/acl.c87
-rw-r--r--fs/ocfs2/acl.h5
-rw-r--r--fs/ocfs2/file.c4
-rw-r--r--fs/ocfs2/namei.c23
-rw-r--r--fs/ocfs2/refcounttree.c17
-rw-r--r--fs/ocfs2/xattr.c14
-rw-r--r--fs/ocfs2/xattr.h4
-rw-r--r--fs/open.c12
-rw-r--r--fs/pnode.c32
-rw-r--r--fs/proc/base.c3
-rw-r--r--fs/proc/task_mmu.c33
-rw-r--r--include/asm-generic/fixmap.h12
-rw-r--r--include/asm-generic/futex.h8
-rw-r--r--include/drm/drm_cache.h2
-rw-r--r--include/dt-bindings/clock/msm-clocks-cobalt.h2
-rw-r--r--include/dt-bindings/clock/msm-clocks-hwio-cobalt.h2
-rw-r--r--include/linux/bluetooth-power.h1
-rw-r--r--include/linux/bpf.h3
-rw-r--r--include/linux/cgroup-defs.h1
-rw-r--r--include/linux/clk-provider.h1
-rw-r--r--include/linux/clk/msm-clk.h11
-rw-r--r--include/linux/cpu_cooling.h16
-rw-r--r--include/linux/cpuset.h6
-rw-r--r--include/linux/dcache.h12
-rw-r--r--include/linux/efi.h6
-rw-r--r--include/linux/hash.h20
-rw-r--r--include/linux/hugetlb.h2
-rw-r--r--include/linux/ipa_usb.h7
-rw-r--r--include/linux/mfd/samsung/s2mps11.h2
-rw-r--r--include/linux/mlx5/device.h11
-rw-r--r--include/linux/mlx5/driver.h6
-rw-r--r--include/linux/mm.h2
-rw-r--r--include/linux/net.h10
-rw-r--r--include/linux/qpnp/qpnp-revid.h4
-rw-r--r--include/linux/sched/core_ctl.h (renamed from kernel/sched/core_ctl.h)7
-rw-r--r--include/media/videobuf2-core.h1
-rw-r--r--include/net/cfg80211.h73
-rw-r--r--include/net/codel.h4
-rw-r--r--include/net/ip_vs.h17
-rw-r--r--include/net/sch_generic.h20
-rw-r--r--include/rdma/ib.h16
-rw-r--r--include/trace/events/sched.h15
-rw-r--r--include/uapi/linux/Kbuild1
-rw-r--r--include/uapi/linux/hbtp_input.h15
-rw-r--r--include/uapi/linux/if.h28
-rw-r--r--include/uapi/linux/libc-compat.h44
-rw-r--r--include/uapi/linux/msm_mdp.h9
-rw-r--r--include/uapi/linux/qbt1000.h99
-rw-r--r--include/uapi/linux/v4l2-dv-timings.h30
-rw-r--r--include/uapi/linux/videodev2.h1
-rw-r--r--include/xen/page.h4
-rw-r--r--init/do_mounts.h3
-rw-r--r--kernel/bpf/inode.c7
-rw-r--r--kernel/bpf/syscall.c24
-rw-r--r--kernel/bpf/verifier.c66
-rw-r--r--kernel/cgroup.c7
-rw-r--r--kernel/cpuset.c4
-rw-r--r--kernel/events/ring_buffer.c10
-rw-r--r--kernel/futex.c27
-rw-r--r--kernel/locking/mcs_spinlock.h8
-rw-r--r--kernel/sched/core.c10
-rw-r--r--kernel/sched/core_ctl.c33
-rw-r--r--kernel/sched/fair.c5
-rw-r--r--kernel/sched/hmp.c8
-rw-r--r--kernel/sched/sched.h1
-rw-r--r--kernel/trace/trace_events.c9
-rw-r--r--kernel/workqueue.c40
-rw-r--r--lib/assoc_array.c4
-rw-r--r--lib/extable.c50
-rw-r--r--lib/lz4/lz4defs.h21
-rw-r--r--lib/mpi/mpicoder.c39
-rw-r--r--lib/test-string_helpers.c67
-rw-r--r--mm/compaction.c10
-rw-r--r--mm/huge_memory.c6
-rw-r--r--mm/memcontrol.c36
-rw-r--r--mm/memory.c40
-rw-r--r--mm/migrate.c8
-rw-r--r--mm/page-writeback.c6
-rw-r--r--mm/page_alloc.c2
-rw-r--r--mm/slub.c22
-rw-r--r--mm/vmscan.c2
-rw-r--r--mm/zsmalloc.c7
-rw-r--r--mm/zswap.c8
-rw-r--r--net/batman-adv/distributed-arp-table.c17
-rw-r--r--net/batman-adv/routing.c9
-rw-r--r--net/batman-adv/send.c6
-rw-r--r--net/batman-adv/soft-interface.c8
-rw-r--r--net/bridge/br_ioctl.c5
-rw-r--r--net/bridge/br_multicast.c12
-rw-r--r--net/core/skbuff.c11
-rw-r--r--net/decnet/dn_route.c9
-rw-r--r--net/ipv4/fib_frontend.c6
-rw-r--r--net/ipv4/fib_semantics.c2
-rw-r--r--net/ipv4/ip_gre.c11
-rw-r--r--net/ipv4/route.c12
-rw-r--r--net/ipv4/tcp_output.c6
-rw-r--r--net/ipv6/reassembly.c6
-rw-r--r--net/ipv6/route.c2
-rw-r--r--net/llc/af_llc.c1
-rw-r--r--net/mac80211/iface.c4
-rw-r--r--net/mac80211/util.c44
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c37
-rw-r--r--net/netfilter/ipvs/ip_vs_pe_sip.c6
-rw-r--r--net/netfilter/nf_conntrack_core.c4
-rw-r--r--net/netfilter/xt_qtaguid.c3
-rw-r--r--net/netlink/af_netlink.c2
-rw-r--r--net/openvswitch/actions.c12
-rw-r--r--net/openvswitch/vport-netdev.c2
-rw-r--r--net/openvswitch/vport.h7
-rw-r--r--net/packet/af_packet.c1
-rw-r--r--net/sched/sch_api.c8
-rw-r--r--net/sched/sch_cbq.c12
-rw-r--r--net/sched/sch_choke.c6
-rw-r--r--net/sched/sch_codel.c10
-rw-r--r--net/sched/sch_drr.c9
-rw-r--r--net/sched/sch_dsmark.c11
-rw-r--r--net/sched/sch_fq.c4
-rw-r--r--net/sched/sch_fq_codel.c17
-rw-r--r--net/sched/sch_generic.c5
-rw-r--r--net/sched/sch_hfsc.c9
-rw-r--r--net/sched/sch_hhf.c10
-rw-r--r--net/sched/sch_htb.c24
-rw-r--r--net/sched/sch_multiq.c16
-rw-r--r--net/sched/sch_netem.c74
-rw-r--r--net/sched/sch_pie.c5
-rw-r--r--net/sched/sch_prio.c15
-rw-r--r--net/sched/sch_qfq.c9
-rw-r--r--net/sched/sch_red.c10
-rw-r--r--net/sched/sch_sfb.c10
-rw-r--r--net/sched/sch_sfq.c16
-rw-r--r--net/sched/sch_tbf.c15
-rw-r--r--net/sunrpc/cache.c6
-rw-r--r--net/vmw_vsock/af_vsock.c21
-rw-r--r--net/wireless/nl80211.c511
-rw-r--r--net/wireless/util.c54
-rw-r--r--net/x25/x25_facilities.c1
-rw-r--r--samples/bpf/trace_output_kern.c1
-rw-r--r--scripts/kconfig/confdata.c12
-rw-r--r--scripts/sortextable.c11
-rw-r--r--sound/pci/hda/hda_generic.c6
-rw-r--r--sound/pci/hda/hda_intel.c3
-rw-r--r--sound/pci/hda/hda_sysfs.c8
-rw-r--r--sound/pci/hda/patch_cirrus.c14
-rw-r--r--sound/pci/hda/patch_realtek.c15
-rw-r--r--sound/pci/pcxhr/pcxhr_core.c1
-rw-r--r--sound/soc/codecs/rt5640.c2
-rw-r--r--sound/soc/codecs/rt5640.h36
-rw-r--r--sound/soc/codecs/ssm4567.c5
-rw-r--r--sound/soc/codecs/wcd-dsp-mgr.c4
-rw-r--r--sound/soc/codecs/wcd-mbhc-v2.c2
-rw-r--r--sound/soc/codecs/wcd9335.c115
-rw-r--r--sound/soc/codecs/wcd934x/wcd934x-dsd.c47
-rw-r--r--sound/soc/codecs/wcd934x/wcd934x-dsd.h5
-rw-r--r--sound/soc/codecs/wcd934x/wcd934x-dsp-cntl.c168
-rw-r--r--sound/soc/codecs/wcd934x/wcd934x-mbhc.c67
-rw-r--r--sound/soc/codecs/wcd934x/wcd934x-mbhc.h2
-rw-r--r--sound/soc/codecs/wcd934x/wcd934x.c252
-rw-r--r--sound/soc/msm/msmcobalt.c249
-rw-r--r--sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c454
-rw-r--r--sound/soc/msm/qdsp6v2/q6adm.c2
-rw-r--r--sound/soc/msm/qdsp6v2/q6asm.c5
-rw-r--r--sound/soc/samsung/s3c-i2s-v2.c2
-rw-r--r--sound/soc/samsung/s3c-i2s-v2.h2
-rw-r--r--sound/soc/soc-dapm.c7
-rw-r--r--sound/usb/quirks.c3
-rw-r--r--tools/lib/traceevent/parse-filter.c4
-rw-r--r--tools/perf/Documentation/perf-stat.txt8
-rw-r--r--tools/perf/ui/browsers/hists.c10
-rw-r--r--tools/perf/util/event.c2
-rw-r--r--tools/perf/util/evlist.c4
-rw-r--r--tools/perf/util/intel-pt.c2
-rw-r--r--virt/kvm/arm/arch_timer.c49
699 files changed, 18516 insertions, 4508 deletions
diff --git a/Documentation/arm64/booting.txt b/Documentation/arm64/booting.txt
index 701d39d3171a..56d6d8b796db 100644
--- a/Documentation/arm64/booting.txt
+++ b/Documentation/arm64/booting.txt
@@ -109,7 +109,13 @@ Header notes:
1 - 4K
2 - 16K
3 - 64K
- Bits 3-63: Reserved.
+ Bit 3: Kernel physical placement
+ 0 - 2MB aligned base should be as close as possible
+ to the base of DRAM, since memory below it is not
+ accessible via the linear mapping
+ 1 - 2MB aligned base may be anywhere in physical
+ memory
+ Bits 4-63: Reserved.
- When image_size is zero, a bootloader should attempt to keep as much
memory as possible free for use by the kernel immediately after the
@@ -117,14 +123,14 @@ Header notes:
depending on selected features, and is effectively unbound.
The Image must be placed text_offset bytes from a 2MB aligned base
-address near the start of usable system RAM and called there. Memory
-below that base address is currently unusable by Linux, and therefore it
-is strongly recommended that this location is the start of system RAM.
-The region between the 2 MB aligned base address and the start of the
-image has no special significance to the kernel, and may be used for
-other purposes.
+address anywhere in usable system RAM and called there. The region
+between the 2 MB aligned base address and the start of the image has no
+special significance to the kernel, and may be used for other purposes.
At least image_size bytes from the start of the image must be free for
use by the kernel.
+NOTE: versions prior to v4.6 cannot make use of memory below the
+physical offset of the Image so it is recommended that the Image be
+placed as close as possible to the start of system RAM.
Any memory described to the kernel (even that below the start of the
image) which is not marked as reserved from the kernel (e.g., with a
diff --git a/Documentation/arm64/silicon-errata.txt b/Documentation/arm64/silicon-errata.txt
new file mode 100644
index 000000000000..58b71ddf9b60
--- /dev/null
+++ b/Documentation/arm64/silicon-errata.txt
@@ -0,0 +1,58 @@
+ Silicon Errata and Software Workarounds
+ =======================================
+
+Author: Will Deacon <will.deacon@arm.com>
+Date : 27 November 2015
+
+It is an unfortunate fact of life that hardware is often produced with
+so-called "errata", which can cause it to deviate from the architecture
+under specific circumstances. For hardware produced by ARM, these
+errata are broadly classified into the following categories:
+
+ Category A: A critical error without a viable workaround.
+ Category B: A significant or critical error with an acceptable
+ workaround.
+ Category C: A minor error that is not expected to occur under normal
+ operation.
+
+For more information, consult one of the "Software Developers Errata
+Notice" documents available on infocenter.arm.com (registration
+required).
+
+As far as Linux is concerned, Category B errata may require some special
+treatment in the operating system. For example, avoiding a particular
+sequence of code, or configuring the processor in a particular way. A
+less common situation may require similar actions in order to declassify
+a Category A erratum into a Category C erratum. These are collectively
+known as "software workarounds" and are only required in the minority of
+cases (e.g. those cases that both require a non-secure workaround *and*
+can be triggered by Linux).
+
+For software workarounds that may adversely impact systems unaffected by
+the erratum in question, a Kconfig entry is added under "Kernel
+Features" -> "ARM errata workarounds via the alternatives framework".
+These are enabled by default and patched in at runtime when an affected
+CPU is detected. For less-intrusive workarounds, a Kconfig option is not
+available and the code is structured (preferably with a comment) in such
+a way that the erratum will not be hit.
+
+This approach can make it slightly onerous to determine exactly which
+errata are worked around in an arbitrary kernel source tree, so this
+file acts as a registry of software workarounds in the Linux Kernel and
+will be updated when new workarounds are committed and backported to
+stable kernels.
+
+| Implementor | Component | Erratum ID | Kconfig |
++----------------+-----------------+-----------------+-------------------------+
+| ARM | Cortex-A53 | #826319 | ARM64_ERRATUM_826319 |
+| ARM | Cortex-A53 | #827319 | ARM64_ERRATUM_827319 |
+| ARM | Cortex-A53 | #824069 | ARM64_ERRATUM_824069 |
+| ARM | Cortex-A53 | #819472 | ARM64_ERRATUM_819472 |
+| ARM | Cortex-A53 | #845719 | ARM64_ERRATUM_845719 |
+| ARM | Cortex-A53 | #843419 | ARM64_ERRATUM_843419 |
+| ARM | Cortex-A57 | #832075 | ARM64_ERRATUM_832075 |
+| ARM | Cortex-A57 | #852523 | N/A |
+| ARM | Cortex-A57 | #834220 | ARM64_ERRATUM_834220 |
+| | | | |
+| Cavium | ThunderX ITS | #22375, #24313 | CAVIUM_ERRATUM_22375 |
+| Cavium | ThunderX GICv3 | #23154 | CAVIUM_ERRATUM_23154 |
diff --git a/Documentation/devicetree/bindings/arm/cache.txt b/Documentation/devicetree/bindings/arm/cache.txt
new file mode 100644
index 000000000000..a9594f026506
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/cache.txt
@@ -0,0 +1,195 @@
+==========================================
+ARM processors cache binding description
+==========================================
+
+Device tree bindings for ARM processor caches adhere to the cache bindings
+described in [3], in section 3.8 for multi-level and shared caches.
+On ARM based systems most of the cache properties related to cache
+geometry are probeable in HW, hence, unless otherwise stated, the properties
+defined in ePAPR for multi-level and shared caches are to be considered
+optional by default.
+
+On ARM, caches are either architected (directly controlled by the processor
+through coprocessor instructions and tightly coupled with the processor
+implementation) or unarchitected (controlled through a memory mapped
+interface, implemented as a stand-alone IP external to the processor
+implementation).
+
+This document provides the device tree bindings for ARM architected caches.
+
+- ARM architected cache node
+
+ Description: must be a direct child of the cpu node. A system
+ can contain multiple architected cache nodes per cpu node,
+ linked through the next-level-cache phandle. The
+ next-level-cache property in the cpu node points to
+ the first level of architected cache for the CPU.
+ The next-level-cache property in architected cache nodes
+ points to the respective next level of caching in the
+ hierarchy. An architected cache node with an empty or
+ missing next-level-cache property represents the last
+ architected cache level for the CPU.
+ On ARM v7 and v8 architectures, the order in which cache
+ nodes are linked through the next-level-cache phandle must
+ follow the ordering specified in the processors CLIDR (v7)
+ and CLIDR_EL1 (v8) registers, as described in [1][2],
+ implying that a cache node pointed at by a
+ next-level-cache phandle must correspond to a level
+ defined in CLIDR (v7) and CLIDR_EL1 (v8) greater than the
+ one the cache node containing the next-level-cache
+ phandle corresponds to.
+
+ Since on ARM most of the cache properties are probeable in HW the
+ properties described in [3] - section 3.8 multi-level and shared
+ caches - shall be considered optional, with the following properties
+ updates, specific for the ARM architected cache node.
+
+ - compatible
+ Usage: Required
+ Value type: <string>
+ Definition: value shall be "arm,arch-cache".
+
+ - interrupts
+ Usage: Optional
+ Value type: See definition
+ Definition: standard device tree property [3] that defines
+ the interrupt line associated with the cache.
+ The property can be accompanied by an
+ interrupt-names property, as described in [4].
+
+ - power-domain
+ Usage: Optional
+ Value type: phandle
+ Definition: A phandle and power domain specifier as defined by
+ bindings of power controller specified by the
+ phandle [5].
+
+ - qcom,dump-size
+ Usage: Optional
+ Value type: <integer>
+ Definition: The memory size needed to contain a copy of the
+ cache data and associated tag ram.
+ size = nways * nsets * (bytes per cache line +
+ bytes tag ram per line)
+
+Example(dual-cluster big.LITTLE system 32-bit)
+
+ cpus {
+ #size-cells = <0>;
+ #address-cells = <1>;
+
+ cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a15";
+ reg = <0x0>;
+ next-level-cache = <&L1_0>;
+
+ L1_0: l1-cache {
+ compatible = "arm,arch-cache";
+ next-level-cache = <&L2_0>;
+ };
+
+ L2_0: l2-cache {
+ compatible = "arm,arch-cache";
+ };
+ };
+
+ cpu@1 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a15";
+ reg = <0x1>;
+ next-level-cache = <&L1_1>;
+
+ L1_1: l1-cache {
+ compatible = "arm,arch-cache";
+ next-level-cache = <&L2_0>;
+ };
+ };
+
+ cpu@2 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a15";
+ reg = <0x2>;
+ next-level-cache = <&L1_2>;
+
+ L1_2: l1-cache {
+ compatible = "arm,arch-cache";
+ next-level-cache = <&L2_0>;
+ };
+ };
+
+ cpu@3 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a15";
+ reg = <0x3>;
+ next-level-cache = <&L1_3>;
+
+ L1_3: l1-cache {
+ compatible = "arm,arch-cache";
+ next-level-cache = <&L2_0>;
+ };
+ };
+
+ cpu@100 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a7";
+ reg = <0x100>;
+ next-level-cache = <&L1_4>;
+
+ L1_4: l1-cache {
+ compatible = "arm,arch-cache";
+ next-level-cache = <&L2_1>;
+ };
+
+ L2_1: l2-cache {
+ compatible = "arm,arch-cache";
+ };
+ };
+
+ cpu@101 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a7";
+ reg = <0x101>;
+ next-level-cache = <&L1_5>;
+
+ L1_5: l1-cache {
+ compatible = "arm,arch-cache";
+ next-level-cache = <&L2_1>;
+ };
+ };
+
+ cpu@102 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a7";
+ reg = <0x102>;
+ next-level-cache = <&L1_6>;
+
+ L1_6: l1-cache {
+ compatible = "arm,arch-cache";
+ next-level-cache = <&L2_1>;
+ };
+ };
+
+ cpu@103 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a7";
+ reg = <0x103>;
+ next-level-cache = <&L1_7>;
+
+ L1_7: l1-cache {
+ compatible = "arm,arch-cache";
+ next-level-cache = <&L2_1>;
+ };
+ };
+ };
+
+[1] ARMv7-AR Reference Manual
+ http://infocenter.arm.com/help/index.jsp
+[2] ARMv8-A Reference Manual
+ http://infocenter.arm.com/help/index.jsp
+[3] ePAPR standard
+ https://www.power.org/documentation/epapr-version-1-1/
+[4] Kernel documentation - resource property bindings
+ Documentation/devicetree/bindings/resource-names.txt
+[5] Kernel documentation - power domain bindings
+ Documentation/devicetree/bindings/power/power_domain.txt
diff --git a/Documentation/devicetree/bindings/ata/ahci-platform.txt b/Documentation/devicetree/bindings/ata/ahci-platform.txt
index c2340eeeb97f..c000832a7fb9 100644
--- a/Documentation/devicetree/bindings/ata/ahci-platform.txt
+++ b/Documentation/devicetree/bindings/ata/ahci-platform.txt
@@ -30,6 +30,10 @@ Optional properties:
- target-supply : regulator for SATA target power
- phys : reference to the SATA PHY node
- phy-names : must be "sata-phy"
+- ports-implemented : Mask that indicates which ports that the HBA supports
+ are available for software to use. Useful if PORTS_IMPL
+ is not programmed by the BIOS, which is true with
+ some embedded SOC's.
Required properties when using sub-nodes:
- #address-cells : number of cells to encode an address
diff --git a/Documentation/devicetree/bindings/fb/mdss-mdp.txt b/Documentation/devicetree/bindings/fb/mdss-mdp.txt
index c8cf395d5669..389c2c32fd5b 100644
--- a/Documentation/devicetree/bindings/fb/mdss-mdp.txt
+++ b/Documentation/devicetree/bindings/fb/mdss-mdp.txt
@@ -566,6 +566,8 @@ Subnode properties:
Example: Width = 1920, Height = 1080, BytesPerPixel = 4,
Number of frame-buffers reserved = 2.
Size = 1920*1080*4*2 = ROUND_1MB(15.8MB) = 16MB.
+- qcom,mdss-intf: Phandle to the kernel driver module that is mapped to the
+ frame buffer virtual device.
- qcom,mdss-fb-splash-logo-enabled: The boolean entry enables the framebuffer
driver to display the splash logo image.
It is independent of continuous splash
diff --git a/Documentation/devicetree/bindings/input/hbtp-input.txt b/Documentation/devicetree/bindings/input/hbtp-input.txt
index f422d75e037e..ad28952c5e7e 100644
--- a/Documentation/devicetree/bindings/input/hbtp-input.txt
+++ b/Documentation/devicetree/bindings/input/hbtp-input.txt
@@ -20,6 +20,19 @@ Optional properties:
- qcom,dig-vtg-max : Digital supply maximum voltage in uV
- qcom,display-resolution : Display resolution - maxX, maxY
- qcom,use-scale : boolean, enables the scaling for touch coordinates
+ - pinctrl-names : defines pinctrl names
+ "pmx_ts_active" : Required pinctrl name.
+ This should specify active config of TS RST gpio
+ "pmx_ts_suspend" : Required pinctrl name
+ This should specify suspend config of TS RST gpio
+ "ddic_rst_active" : Required pinctrl name
+ This should specify active config of DDIC RST gpio
+ "ddic_rst_suspend" : Required pinctrl name
+ This should specify suspend config of DDIC RST gpio
+ - pinctrl-0 : pin control to be used for TS active config
+ - pinctrl-1 : pin control to be used for TS suspend config
+ - pinctrl-2 : pin control to be used for DDIC active config
+ - pinctrl-3 : pin control to be used for DDIC suspend config
Optional properties if qcom,use-scale DT property is defined:
- qcom,def-maxx : default X-resolution of the touch panel.
@@ -29,6 +42,20 @@ Optional properties if qcom,use-scale DT property is defined:
- qcom,des-maxy : desired Y-resolution of the touch panel.
(Above two properties should be defined in pairs only)
+Optional Properties if pinctrl names are defined:
+ - qcom,pmx-ts-on-seq-delay-us : unsigned integer type for
+ delay after active TS RST gpio is changed
+ - qcom,fb-resume-delay-us : unsigned integer type for
+ delay in early resume framebuffer callback
+ - qcom,ddic-rst-on-seq-delay-us : array of unsigned integer type for
+ delay of each step in series of DDIC RST gpio control
+
+Optional Properties if qcom,afe-vtg and qcom,dig-vtg are defined
+ - qcom,afe-power-on-delay-us : unsigned integer type for
+ delay between turning on analog and digital power supply
+ - qcom,afe-power-off-delay-us : unsigned integer type for
+ delay between turning off digital and analog power supply
+
Example:
&soc {
hbtp {
@@ -47,5 +74,16 @@ Example:
qcom,default-max-y = <1920>;
qcom,desired-max-x = <720>;
qcom,desired-max-y = <1280>;
+ pinctrl-names = "pmx_ts_active","pmx_ts_suspend",
+ "ddic_rst_active", "ddic_rst_suspend";
+ pinctrl-0 = <&ts_rst_active>;
+ pinctrl-1 = <&ts_rst_suspend>;
+ pinctrl-2 = <&ddic_rst_active>;
+ pinctrl-3 = <&ddic_rst_suspend>;
+ qcom,pmx-ts-on-seq-delay-us = <1000>;
+ qcom,ddic-rst-on-seq-delay-us = <10000 10000 10000 10000>;
+ qcom,fb-resume-delay-us = <90000>;
+ qcom,afe-power-on-delay-us = <1000>;
+ qcom,afe-power-off-delay-us = <6>;
};
};
diff --git a/Documentation/devicetree/bindings/input/touchscreen/gt9xx/gt9xx.txt b/Documentation/devicetree/bindings/input/touchscreen/gt9xx/gt9xx.txt
index 4c676fa66e62..bde115155eba 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/gt9xx/gt9xx.txt
+++ b/Documentation/devicetree/bindings/input/touchscreen/gt9xx/gt9xx.txt
@@ -34,7 +34,7 @@ Optional properties:
It is a four tuple consisting of min x,
min y, max x and max y values.
- goodix,i2c-pull-up : To specify pull up is required.
- - goodix,no-force-update : To specify force update is allowed.
+ - goodix,force-update : To specify force update is allowed.
- goodix,enable-power-off : Power off touchscreen during suspend.
- goodix,button-map : Button map of key codes. The number of key codes
depend on panel.
@@ -57,6 +57,12 @@ Optional properties:
- goodix,cfg-data5 : Touch screen controller config data group 5. Ask vendor
to provide that.
- goodix,fw-name : Touch screen controller firmware file name.
+ - goodix,slide-wakeup : To specify slide-wakeup property is enabled or not.
+ - goodix,dbl-clk-wakeup : To specify dbl-clk-wakeup property is enabled or not.
+ - goodix,change-x2y : To specify change-x2y property is enabled or not.
+ - goodix,driver-send-cfg : To specify driver-send-cfg property is enabled or not.
+ - goodix,have-touch-key : To specify have-touch-key property is enabled or not.
+ - goodix,with-pen : To specify with-pen property is enabled or not.
Example:
i2c@f9927000 {
goodix@5d {
@@ -92,5 +98,7 @@ i2c@f9927000 {
FF FF FF FF FF FF FF 22 22 22
22 22 22 FF 07 01];
goodix,fw_name = "gtp_fw.bin";
+ goodix,have-touch-key;
+ goodix,driver-send-cfg;
};
};
diff --git a/Documentation/devicetree/bindings/leds/leds-qpnp-wled.txt b/Documentation/devicetree/bindings/leds/leds-qpnp-wled.txt
index 8389fe57898a..ebbcfe5b2fd0 100644
--- a/Documentation/devicetree/bindings/leds/leds-qpnp-wled.txt
+++ b/Documentation/devicetree/bindings/leds/leds-qpnp-wled.txt
@@ -12,6 +12,8 @@ Required properties:
- reg-names : names associated with base addresses. It
should be "qpnp-wled-ctrl-base", "qpnp-wled-sink-base",
"qpnp-wled-ibb-base", "qpnp-wled-lab-base".
+- qcom,pmic-revid : phandle of PMIC revid module. This is used to
+ identify the PMIC subtype.
Optional properties for WLED:
- interrupts : Specifies the interrupts associated with WLED. The available
@@ -25,8 +27,21 @@ Optional properties for WLED:
are "wled1", "wled2", "wled3", "wled4" and "auto". default is "auto".
- qcom,vref-mv : maximum reference voltage in mv. default is 350.
- qcom,switch-freq-khz : switch frequency in khz. default is 800.
-- qcom,ovp-mv : over voltage protection value in mv. default is 17800.
-- qcom,ilim-ma : maximum current limiter in ma. default is 980.
+- qcom,ovp-mv : Over voltage protection threshold in mV. Default is
+ 29500. Supported values are:
+ - 31000, 29500, 19400, 17800 for pmi8994/8952/8996.
+ - 31100, 29600, 19600, 18100 for pmicobalt/pm2falcon.
+ Should only be used if qcom,disp-type-amoled is not
+ specified.
+- qcom,ilim-ma : Current limit threshold in mA.
+ For pmi8994/8952/8996, default value for LCD is 980mA
+ and AMOLED is 385mA.
+ Supported values are:
+ - 105, 385, 660, 980, 1150, 1420, 1700, 1980.
+ For pmicobalt/pm2falcon, default value for LCD is
+ 970mA and AMOLED is 620mA.
+ Supported values are:
+ - 105, 280, 450, 620, 970, 1150, 1300, 1500.
- qcom,boost-duty-ns : maximum boost duty cycle in ns. default is 104.
- qcom,mod-freq-khz : modulation frequency in khz. default is 9600.
- qcom,dim-mode : dimming mode. supporting dimming modes are "analog",
@@ -54,7 +69,13 @@ Optional properties if 'qcom,disp-type-amoled' is mentioned in DT:
- qcom,loop-ea-gm : control the gm for gm stage in control loop. default is 3.
- qcom,loop-comp-res-kohm : control to select the compensation resistor in kohm. default is 320.
- qcom,vref-psm-mv : reference psm voltage in mv. default for amoled is 450.
-- qcom,avdd-trim-steps-from-center : The number of steps to trim the OVP threshold voltage. The possible values can be between -7 to 8.
+- qcom,avdd-mode-spmi: Boolean property to enable AMOLED_VOUT programming via SPMI. If not specified,
+ AMOLED_VOUT is programmed via S-wire. This can be specified only for newer
+ PMICs like pmicobalt/pm2falcon.
+- qcom,avdd-target-voltage-mv: The voltage required for AMOLED_VOUT. Accepted values are in the range
+ of 5650 to 7900 in steps of 150. Default value is 7600. Unit is in mV.
+ For old revisions, accepted values are: 7900, 7600, 7300, 6400, 6100,
+ 5800.
Example:
qcom,leds@d800 {
diff --git a/Documentation/devicetree/bindings/pil/pil-q6v5-mss.txt b/Documentation/devicetree/bindings/pil/pil-q6v5-mss.txt
index a3dc40936e8e..54a3c5689b9c 100644
--- a/Documentation/devicetree/bindings/pil/pil-q6v5-mss.txt
+++ b/Documentation/devicetree/bindings/pil/pil-q6v5-mss.txt
@@ -79,6 +79,7 @@ Optional properties:
current issue.
- qcom,qdsp6v61-1-1: Boolean- Present if the qdsp version is v61 1.1
- qcom,qdsp6v62-1-2: Boolean- Present if the qdsp version is v62 1.2
+- qcom,qdsp6v62-1-5: Boolean- Present if the qdsp version is v62 1.5
- qcom,mx-spike-wa: Boolean- Present if we need to assert QDSP6 I/O clamp, memory
wordline clamp, and compiler memory clamp during MSS restart.
- qcom,qdsp6v56-1-10: Boolean- Present if the qdsp version is v56 1.10
diff --git a/Documentation/devicetree/bindings/power/reset/reboot-mode.txt b/Documentation/devicetree/bindings/power/reset/reboot-mode.txt
new file mode 100644
index 000000000000..de34f27d509e
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/reset/reboot-mode.txt
@@ -0,0 +1,25 @@
+Generic reboot mode core map driver
+
+This driver get reboot mode arguments and call the write
+interface to store the magic value in special register
+or ram. Then the bootloader can read it and take different
+action according to the argument stored.
+
+All mode properties are vendor specific, it is a indication to tell
+the bootloader what to do when the system reboots, and should be named
+as mode-xxx = <magic> (xxx is mode name, magic should be a none-zero value).
+
+For example modes common on Android platform:
+- mode-normal: Normal reboot mode, system reboot with command "reboot".
+- mode-recovery: Android Recovery mode, it is a mode to format the device or update a new image.
+- mode-bootloader: Android fastboot mode, it's a mode to re-flash partitions on the Android based device.
+- mode-loader: A bootloader mode, it's a mode used to download image on Rockchip platform,
+ usually used in development.
+
+Example:
+ reboot-mode {
+ mode-normal = <BOOT_NORMAL>;
+ mode-recovery = <BOOT_RECOVERY>;
+ mode-bootloader = <BOOT_FASTBOOT>;
+ mode-loader = <BOOT_BL_DOWNLOAD>;
+ }
diff --git a/Documentation/devicetree/bindings/power/reset/syscon-reboot-mode.txt b/Documentation/devicetree/bindings/power/reset/syscon-reboot-mode.txt
new file mode 100644
index 000000000000..f7ce1d8af04a
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/reset/syscon-reboot-mode.txt
@@ -0,0 +1,35 @@
+SYSCON reboot mode driver
+
+This driver gets reboot mode magic value form reboot-mode driver
+and stores it in a SYSCON mapped register. Then the bootloader
+can read it and take different action according to the magic
+value stored.
+
+This DT node should be represented as a sub-node of a "syscon", "simple-mfd"
+node.
+
+Required properties:
+- compatible: should be "syscon-reboot-mode"
+- offset: offset in the register map for the storage register (in bytes)
+
+Optional property:
+- mask: bits mask of the bits in the register to store the reboot mode magic value,
+ default set to 0xffffffff if missing.
+
+The rest of the properties should follow the generic reboot-mode description
+found in reboot-mode.txt
+
+Example:
+ pmu: pmu@20004000 {
+ compatible = "rockchip,rk3066-pmu", "syscon", "simple-mfd";
+ reg = <0x20004000 0x100>;
+
+ reboot-mode {
+ compatible = "syscon-reboot-mode";
+ offset = <0x40>;
+ mode-normal = <BOOT_NORMAL>;
+ mode-recovery = <BOOT_RECOVERY>;
+ mode-bootloader = <BOOT_FASTBOOT>;
+ mode-loader = <BOOT_BL_DOWNLOAD>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/qbt1000/qbt1000.txt b/Documentation/devicetree/bindings/qbt1000/qbt1000.txt
new file mode 100644
index 000000000000..c9861e4948f9
--- /dev/null
+++ b/Documentation/devicetree/bindings/qbt1000/qbt1000.txt
@@ -0,0 +1,54 @@
+Qualcomm Technologies, Inc. QBT1000 Specific Bindings
+
+QBT is a fingerprint sensor ASIC capable of performing fingerprint image scans
+and detecting finger presence on the sensor using programmable firmware.
+
+=======================
+Required Node Structure
+=======================
+
+- compatible
+ Usage: required
+ Value type: <string>
+ Definition: "qcom,qbt1000".
+
+- clock-names
+ Usage: required
+ Value type: <stringlist>
+ Definition: List of clock names that need to be voted on/off.
+
+- clocks
+ Usage: required
+ Value type: <prop_encoded-array>
+ Definition: Property pair that represents the clock controller and the clock
+ id. This in combination with the clock-name is used to obtain
+ the handle for the clock that needs to be voted on/off.
+
+- clock-frequency
+ Usage: required
+ Value type: <u32>
+ Definition: Frequency of clock in Hz.
+
+- qcom,ipc-gpio
+ Usage: required
+ Value type: <phandle>
+ Definition: phandle for GPIO to be used for IPC.
+
+- qcom,finger-detect-gpio
+ Usage: required
+ Value type: <phandle>
+ Definition: phandle for GPIO to be used for finger detect.
+
+=======
+Example
+=======
+
+qcom,qbt1000 {
+ compatible = "qcom,qbt1000";
+ clock-names = "core", "iface";
+ clocks = <&clock_gcc clk_gcc_blsp2_qup6_spi_apps_clk>,
+ <&clock_gcc clk_gcc_blsp2_ahb_clk>;
+ clock-frequency = <15000000>;
+ qcom,ipc-gpio = <&tlmm 121 0>;
+ qcom,finger-detect-gpio = <&pmcobalt_gpios 2 0>;
+};
diff --git a/Documentation/devicetree/bindings/qdsp/msm-ssc-sensors.txt b/Documentation/devicetree/bindings/qdsp/msm-ssc-sensors.txt
index 165153dde994..ea671a1ff14a 100644
--- a/Documentation/devicetree/bindings/qdsp/msm-ssc-sensors.txt
+++ b/Documentation/devicetree/bindings/qdsp/msm-ssc-sensors.txt
@@ -1,11 +1,16 @@
-* msm-ssc-sensors
+Qualcomm Technologies, Inc. SSC Driver
+
+msm-ssc-sensors driver implements the mechanism that allows to load SLPI firmware images.
Required properties:
- - compatible: "qcom,msm-ssc-sensors"
+ - compatible: This must be "qcom,msm-ssc-sensors"
+ - qcom,firmware-name: SLPI firmware name, must be "slpi_v1" or "slpi_v2"
Example:
+ The following for msmcobalt version 1.
- qcom,msm-ssc-sensors {
- compatible = "qcom,msm-ssc-sensors";
- };
+ qcom,msm-ssc-sensors {
+ compatible = "qcom,msm-ssc-sensors";
+ qcom,firmware-name = "slpi_v1";
+ };
diff --git a/Documentation/features/time/irq-time-acct/arch-support.txt b/Documentation/features/time/irq-time-acct/arch-support.txt
index e63316239938..4199ffecc0ff 100644
--- a/Documentation/features/time/irq-time-acct/arch-support.txt
+++ b/Documentation/features/time/irq-time-acct/arch-support.txt
@@ -9,7 +9,7 @@
| alpha: | .. |
| arc: | TODO |
| arm: | ok |
- | arm64: | .. |
+ | arm64: | ok |
| avr32: | TODO |
| blackfin: | TODO |
| c6x: | TODO |
diff --git a/Documentation/features/vm/huge-vmap/arch-support.txt b/Documentation/features/vm/huge-vmap/arch-support.txt
index af6816bccb43..df1d1f3c9af2 100644
--- a/Documentation/features/vm/huge-vmap/arch-support.txt
+++ b/Documentation/features/vm/huge-vmap/arch-support.txt
@@ -9,7 +9,7 @@
| alpha: | TODO |
| arc: | TODO |
| arm: | TODO |
- | arm64: | TODO |
+ | arm64: | ok |
| avr32: | TODO |
| blackfin: | TODO |
| c6x: | TODO |
diff --git a/MAINTAINERS b/MAINTAINERS
index 4c3e1d2ac31b..ab65bbecb159 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -4097,8 +4097,8 @@ F: Documentation/efi-stub.txt
F: arch/ia64/kernel/efi.c
F: arch/x86/boot/compressed/eboot.[ch]
F: arch/x86/include/asm/efi.h
-F: arch/x86/platform/efi/*
-F: drivers/firmware/efi/*
+F: arch/x86/platform/efi/
+F: drivers/firmware/efi/
F: include/linux/efi*.h
EFI VARIABLE FILESYSTEM
diff --git a/Makefile b/Makefile
index e59197c28c78..7d76ff290be4 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
VERSION = 4
PATCHLEVEL = 4
-SUBLEVEL = 8
+SUBLEVEL = 11
EXTRAVERSION =
NAME = Blurry Fish Butt
diff --git a/arch/arc/include/asm/io.h b/arch/arc/include/asm/io.h
index 27b17adea50d..cb69299a492e 100644
--- a/arch/arc/include/asm/io.h
+++ b/arch/arc/include/asm/io.h
@@ -13,6 +13,15 @@
#include <asm/byteorder.h>
#include <asm/page.h>
+#ifdef CONFIG_ISA_ARCV2
+#include <asm/barrier.h>
+#define __iormb() rmb()
+#define __iowmb() wmb()
+#else
+#define __iormb() do { } while (0)
+#define __iowmb() do { } while (0)
+#endif
+
extern void __iomem *ioremap(unsigned long physaddr, unsigned long size);
extern void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size,
unsigned long flags);
@@ -22,6 +31,15 @@ extern void iounmap(const void __iomem *addr);
#define ioremap_wc(phy, sz) ioremap(phy, sz)
#define ioremap_wt(phy, sz) ioremap(phy, sz)
+/*
+ * io{read,write}{16,32}be() macros
+ */
+#define ioread16be(p) ({ u16 __v = be16_to_cpu((__force __be16)__raw_readw(p)); __iormb(); __v; })
+#define ioread32be(p) ({ u32 __v = be32_to_cpu((__force __be32)__raw_readl(p)); __iormb(); __v; })
+
+#define iowrite16be(v,p) ({ __iowmb(); __raw_writew((__force u16)cpu_to_be16(v), p); })
+#define iowrite32be(v,p) ({ __iowmb(); __raw_writel((__force u32)cpu_to_be32(v), p); })
+
/* Change struct page to physical address */
#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
@@ -99,15 +117,6 @@ static inline void __raw_writel(u32 w, volatile void __iomem *addr)
}
-#ifdef CONFIG_ISA_ARCV2
-#include <asm/barrier.h>
-#define __iormb() rmb()
-#define __iowmb() wmb()
-#else
-#define __iormb() do { } while (0)
-#define __iowmb() do { } while (0)
-#endif
-
/*
* MMIO can also get buffered/optimized in micro-arch, so barriers needed
* Based on ARM model for the typical use case
diff --git a/arch/arm/boot/dts/am43x-epos-evm.dts b/arch/arm/boot/dts/am43x-epos-evm.dts
index 47954ed990f8..00707aac72fc 100644
--- a/arch/arm/boot/dts/am43x-epos-evm.dts
+++ b/arch/arm/boot/dts/am43x-epos-evm.dts
@@ -792,3 +792,8 @@
tx-num-evt = <32>;
rx-num-evt = <32>;
};
+
+&synctimer_32kclk {
+ assigned-clocks = <&mux_synctimer32k_ck>;
+ assigned-clock-parents = <&clkdiv32k_ick>;
+};
diff --git a/arch/arm/boot/dts/armada-375.dtsi b/arch/arm/boot/dts/armada-375.dtsi
index 7ccce7529b0c..cc952cf8ec30 100644
--- a/arch/arm/boot/dts/armada-375.dtsi
+++ b/arch/arm/boot/dts/armada-375.dtsi
@@ -529,7 +529,7 @@
};
sata@a0000 {
- compatible = "marvell,orion-sata";
+ compatible = "marvell,armada-370-sata";
reg = <0xa0000 0x5000>;
interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&gateclk 14>, <&gateclk 20>;
diff --git a/arch/arm/boot/dts/armada-385-linksys.dtsi b/arch/arm/boot/dts/armada-385-linksys.dtsi
index 3710755c6d76..85d2c377c332 100644
--- a/arch/arm/boot/dts/armada-385-linksys.dtsi
+++ b/arch/arm/boot/dts/armada-385-linksys.dtsi
@@ -117,7 +117,7 @@
};
/* USB part of the eSATA/USB 2.0 port */
- usb@50000 {
+ usb@58000 {
status = "okay";
};
diff --git a/arch/arm/boot/dts/at91sam9x5.dtsi b/arch/arm/boot/dts/at91sam9x5.dtsi
index 0827d594b1f0..cd0cd5fd09a3 100644
--- a/arch/arm/boot/dts/at91sam9x5.dtsi
+++ b/arch/arm/boot/dts/at91sam9x5.dtsi
@@ -106,7 +106,7 @@
pmc: pmc@fffffc00 {
compatible = "atmel,at91sam9x5-pmc", "syscon";
- reg = <0xfffffc00 0x100>;
+ reg = <0xfffffc00 0x200>;
interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
interrupt-controller;
#address-cells = <1>;
diff --git a/arch/arm/boot/dts/pxa3xx.dtsi b/arch/arm/boot/dts/pxa3xx.dtsi
index cf6998a0804d..564341af7e97 100644
--- a/arch/arm/boot/dts/pxa3xx.dtsi
+++ b/arch/arm/boot/dts/pxa3xx.dtsi
@@ -30,7 +30,7 @@
reg = <0x43100000 90>;
interrupts = <45>;
clocks = <&clks CLK_NAND>;
- dmas = <&pdma 97>;
+ dmas = <&pdma 97 3>;
dma-names = "data";
#address-cells = <1>;
#size-cells = <1>;
diff --git a/arch/arm/boot/dts/qcom/Makefile b/arch/arm/boot/dts/qcom/Makefile
index bffa21a06462..f884e0ece735 100644
--- a/arch/arm/boot/dts/qcom/Makefile
+++ b/arch/arm/boot/dts/qcom/Makefile
@@ -114,6 +114,7 @@ dtb-$(CONFIG_ARCH_MSMCOBALT) += msmcobalt-sim.dtb \
msmcobalt-v2-qrd.dtb \
msmcobalt-qrd-skuk.dtb \
msmcobalt-qrd-vr1.dtb \
+ msmcobalt-v2-qrd-vr1.dtb \
apqcobalt-mtp.dtb \
apqcobalt-cdp.dtb \
apqcobalt-v2-mtp.dtb \
diff --git a/arch/arm/boot/dts/qcom/msm-audio-lpass.dtsi b/arch/arm/boot/dts/qcom/msm-audio-lpass.dtsi
index ba27e3912ee6..eb8a7a98b6b4 100644
--- a/arch/arm/boot/dts/qcom/msm-audio-lpass.dtsi
+++ b/arch/arm/boot/dts/qcom/msm-audio-lpass.dtsi
@@ -371,13 +371,84 @@
qcom,adsp-state = <0>;
};
+ qcom,msm-dai-tdm-pri-rx {
+ compatible = "qcom,msm-dai-tdm";
+ qcom,msm-cpudai-tdm-group-id = <37120>;
+ qcom,msm-cpudai-tdm-group-num-ports = <1>;
+ qcom,msm-cpudai-tdm-group-port-id = <36864>;
+ qcom,msm-cpudai-tdm-clk-rate = <1536000>;
+ dai_pri_tdm_rx_0: qcom,msm-dai-q6-tdm-pri-rx-0 {
+ compatible = "qcom,msm-dai-q6-tdm";
+ qcom,msm-cpudai-tdm-dev-id = <36864>;
+ qcom,msm-cpudai-tdm-sync-mode = <1>;
+ qcom,msm-cpudai-tdm-sync-src = <1>;
+ qcom,msm-cpudai-tdm-data-out = <0>;
+ qcom,msm-cpudai-tdm-invert-sync = <1>;
+ qcom,msm-cpudai-tdm-data-delay = <1>;
+ qcom,msm-cpudai-tdm-data-align = <0>;
+ };
+ };
+
+ qcom,msm-dai-tdm-pri-tx {
+ compatible = "qcom,msm-dai-tdm";
+ qcom,msm-cpudai-tdm-group-id = <37121>;
+ qcom,msm-cpudai-tdm-group-num-ports = <1>;
+ qcom,msm-cpudai-tdm-group-port-id = <36865>;
+ qcom,msm-cpudai-tdm-clk-rate = <1536000>;
+ dai_pri_tdm_tx_0: qcom,msm-dai-q6-tdm-pri-tx-0 {
+ compatible = "qcom,msm-dai-q6-tdm";
+ qcom,msm-cpudai-tdm-dev-id = <36865>;
+ qcom,msm-cpudai-tdm-sync-mode = <1>;
+ qcom,msm-cpudai-tdm-sync-src = <1>;
+ qcom,msm-cpudai-tdm-data-out = <0>;
+ qcom,msm-cpudai-tdm-invert-sync = <1>;
+ qcom,msm-cpudai-tdm-data-delay = <1>;
+ qcom,msm-cpudai-tdm-data-align = <0>;
+ };
+ };
+
+ qcom,msm-dai-tdm-sec-rx {
+ compatible = "qcom,msm-dai-tdm";
+ qcom,msm-cpudai-tdm-group-id = <37136>;
+ qcom,msm-cpudai-tdm-group-num-ports = <1>;
+ qcom,msm-cpudai-tdm-group-port-id = <36880>;
+ qcom,msm-cpudai-tdm-clk-rate = <1536000>;
+ dai_sec_tdm_rx_0: qcom,msm-dai-q6-tdm-sec-rx-0 {
+ compatible = "qcom,msm-dai-q6-tdm";
+ qcom,msm-cpudai-tdm-dev-id = <36880>;
+ qcom,msm-cpudai-tdm-sync-mode = <1>;
+ qcom,msm-cpudai-tdm-sync-src = <1>;
+ qcom,msm-cpudai-tdm-data-out = <0>;
+ qcom,msm-cpudai-tdm-invert-sync = <1>;
+ qcom,msm-cpudai-tdm-data-delay = <1>;
+ qcom,msm-cpudai-tdm-data-align = <0>;
+ };
+ };
+
+ qcom,msm-dai-tdm-sec-tx {
+ compatible = "qcom,msm-dai-tdm";
+ qcom,msm-cpudai-tdm-group-id = <37137>;
+ qcom,msm-cpudai-tdm-group-num-ports = <1>;
+ qcom,msm-cpudai-tdm-group-port-id = <36881>;
+ qcom,msm-cpudai-tdm-clk-rate = <1536000>;
+ dai_sec_tdm_tx_0: qcom,msm-dai-q6-tdm-sec-tx-0 {
+ compatible = "qcom,msm-dai-q6-tdm";
+ qcom,msm-cpudai-tdm-dev-id = <36881>;
+ qcom,msm-cpudai-tdm-sync-mode = <1>;
+ qcom,msm-cpudai-tdm-sync-src = <1>;
+ qcom,msm-cpudai-tdm-data-out = <0>;
+ qcom,msm-cpudai-tdm-invert-sync = <1>;
+ qcom,msm-cpudai-tdm-data-delay = <1>;
+ qcom,msm-cpudai-tdm-data-align = <0>;
+ };
+ };
+
qcom,msm-dai-tdm-tert-rx {
compatible = "qcom,msm-dai-tdm";
qcom,msm-cpudai-tdm-group-id = <37152>;
qcom,msm-cpudai-tdm-group-num-ports = <1>;
qcom,msm-cpudai-tdm-group-port-id = <36896>;
qcom,msm-cpudai-tdm-clk-rate = <1536000>;
- pinctrl-names = "default", "sleep";
dai_tert_tdm_rx_0: qcom,msm-dai-q6-tdm-tert-rx-0 {
compatible = "qcom,msm-dai-q6-tdm";
qcom,msm-cpudai-tdm-dev-id = <36896>;
@@ -396,7 +467,6 @@
qcom,msm-cpudai-tdm-group-num-ports = <1>;
qcom,msm-cpudai-tdm-group-port-id = <36897 >;
qcom,msm-cpudai-tdm-clk-rate = <1536000>;
- pinctrl-names = "default", "sleep";
dai_tert_tdm_tx_0: qcom,msm-dai-q6-tdm-tert-tx-0 {
compatible = "qcom,msm-dai-q6-tdm";
qcom,msm-cpudai-tdm-dev-id = <36897 >;
@@ -408,4 +478,40 @@
qcom,msm-cpudai-tdm-data-align = <0>;
};
};
+
+ qcom,msm-dai-tdm-quat-rx {
+ compatible = "qcom,msm-dai-tdm";
+ qcom,msm-cpudai-tdm-group-id = <37168>;
+ qcom,msm-cpudai-tdm-group-num-ports = <1>;
+ qcom,msm-cpudai-tdm-group-port-id = <36912>;
+ qcom,msm-cpudai-tdm-clk-rate = <1536000>;
+ dai_quat_tdm_rx_0: qcom,msm-dai-q6-tdm-quat-rx-0 {
+ compatible = "qcom,msm-dai-q6-tdm";
+ qcom,msm-cpudai-tdm-dev-id = <36912>;
+ qcom,msm-cpudai-tdm-sync-mode = <1>;
+ qcom,msm-cpudai-tdm-sync-src = <1>;
+ qcom,msm-cpudai-tdm-data-out = <0>;
+ qcom,msm-cpudai-tdm-invert-sync = <1>;
+ qcom,msm-cpudai-tdm-data-delay = <1>;
+ qcom,msm-cpudai-tdm-data-align = <0>;
+ };
+ };
+
+ qcom,msm-dai-tdm-quat-tx {
+ compatible = "qcom,msm-dai-tdm";
+ qcom,msm-cpudai-tdm-group-id = <37169>;
+ qcom,msm-cpudai-tdm-group-num-ports = <1>;
+ qcom,msm-cpudai-tdm-group-port-id = <36913 >;
+ qcom,msm-cpudai-tdm-clk-rate = <1536000>;
+ dai_quat_tdm_tx_0: qcom,msm-dai-q6-tdm-quat-tx-0 {
+ compatible = "qcom,msm-dai-q6-tdm";
+ qcom,msm-cpudai-tdm-dev-id = <36913 >;
+ qcom,msm-cpudai-tdm-sync-mode = <1>;
+ qcom,msm-cpudai-tdm-sync-src = <1>;
+ qcom,msm-cpudai-tdm-data-out = <0>;
+ qcom,msm-cpudai-tdm-invert-sync = <1>;
+ qcom,msm-cpudai-tdm-data-delay = <1>;
+ qcom,msm-cpudai-tdm-data-align = <0>;
+ };
+ };
};
diff --git a/arch/arm/boot/dts/qcom/msm-pm2falcon.dtsi b/arch/arm/boot/dts/qcom/msm-pm2falcon.dtsi
index 399892f52b6f..41589d02f6fc 100644
--- a/arch/arm/boot/dts/qcom/msm-pm2falcon.dtsi
+++ b/arch/arm/boot/dts/qcom/msm-pm2falcon.dtsi
@@ -236,8 +236,8 @@
qcom,fdbk-output = "auto";
qcom,vref-mv = <350>;
qcom,switch-freq-khz = <800>;
- qcom,ovp-mv = <29500>;
- qcom,ilim-ma = <980>;
+ qcom,ovp-mv = <29600>;
+ qcom,ilim-ma = <970>;
qcom,boost-duty-ns = <26>;
qcom,mod-freq-khz = <9600>;
qcom,dim-mode = "hybrid";
@@ -248,6 +248,7 @@
qcom,en-phase-stag;
qcom,led-strings-list = [00 01 02];
qcom,en-ext-pfet-sc-pro;
+ qcom,pmic-revid = <&pm2falcon_revid>;
status = "ok";
};
diff --git a/arch/arm/boot/dts/qcom/msm-pmcobalt-rpm-regulator.dtsi b/arch/arm/boot/dts/qcom/msm-pmcobalt-rpm-regulator.dtsi
index 7a8e71d14291..7243a6b1d6d4 100644
--- a/arch/arm/boot/dts/qcom/msm-pmcobalt-rpm-regulator.dtsi
+++ b/arch/arm/boot/dts/qcom/msm-pmcobalt-rpm-regulator.dtsi
@@ -592,7 +592,7 @@
regulator-bob {
compatible = "qcom,rpm-smd-regulator";
- regulator-name = "pmcobalt_bob";
+ regulator-name = "pmicobalt_bob";
qcom,set = <3>;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/qcom/msm-pmi8994.dtsi b/arch/arm/boot/dts/qcom/msm-pmi8994.dtsi
index 96dfed8464e9..bba70329c819 100644
--- a/arch/arm/boot/dts/qcom/msm-pmi8994.dtsi
+++ b/arch/arm/boot/dts/qcom/msm-pmi8994.dtsi
@@ -537,6 +537,7 @@
qcom,en-phase-stag;
qcom,led-strings-list = [00 01 02 03];
qcom,en-ext-pfet-sc-pro;
+ qcom,pmic-revid = <&pmi8994_revid>;
};
pmi8994_haptics: qcom,haptic@c000 {
diff --git a/arch/arm/boot/dts/qcom/msm-pmicobalt.dtsi b/arch/arm/boot/dts/qcom/msm-pmicobalt.dtsi
index 28d230dfb6bf..a5243aff4282 100644
--- a/arch/arm/boot/dts/qcom/msm-pmicobalt.dtsi
+++ b/arch/arm/boot/dts/qcom/msm-pmicobalt.dtsi
@@ -596,8 +596,8 @@
qcom,fdbk-output = "auto";
qcom,vref-mv = <350>;
qcom,switch-freq-khz = <800>;
- qcom,ovp-mv = <29500>;
- qcom,ilim-ma = <980>;
+ qcom,ovp-mv = <29600>;
+ qcom,ilim-ma = <970>;
qcom,boost-duty-ns = <26>;
qcom,mod-freq-khz = <9600>;
qcom,dim-mode = "hybrid";
@@ -608,6 +608,7 @@
qcom,en-phase-stag;
qcom,led-strings-list = [00 01 02 03];
qcom,en-ext-pfet-sc-pro;
+ qcom,pmic-revid = <&pmicobalt_revid>;
};
pmicobalt_haptics: qcom,haptic@c000 {
@@ -740,7 +741,7 @@
qcom,led-mask = <3>;
qcom,default-led-trigger = "switch0_trigger";
reg0 {
- regulator-name = "pmcobalt_bob";
+ regulator-name = "pmicobalt_bob";
max-voltage-uv = <3600000>;
};
};
@@ -751,7 +752,7 @@
qcom,led-mask = <4>;
qcom,default-led-trigger = "switch1_trigger";
reg0 {
- regulator-name = "pmcobalt_bob";
+ regulator-name = "pmicobalt_bob";
max-voltage-uv = <3600000>;
};
};
diff --git a/arch/arm/boot/dts/qcom/msm8996-ion.dtsi b/arch/arm/boot/dts/qcom/msm8996-ion.dtsi
index 6b64e9b8976a..9b287e3f23c5 100644
--- a/arch/arm/boot/dts/qcom/msm8996-ion.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8996-ion.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -21,11 +21,6 @@
qcom,ion-heap-type = "SYSTEM";
};
- system_contig_heap: qcom,ion-heap@21 {
- reg = <21>;
- qcom,ion-heap-type = "SYSTEM_CONTIG";
- };
-
qcom,ion-heap@22 { /* ADSP HEAP */
reg = <22>;
memory-region = <&adsp_mem>;
diff --git a/arch/arm/boot/dts/qcom/msmcobalt-audio.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-audio.dtsi
index a1d80075abe0..ca12a520c2aa 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt-audio.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt-audio.dtsi
@@ -85,14 +85,15 @@
asoc-platform = <&pcm0>, <&pcm1>, <&pcm2>, <&voip>, <&voice>,
<&loopback>, <&compress>, <&hostless>,
<&afe>, <&lsm>, <&routing>, <&cpe>, <&compr>,
- <&pcm_noirq>;
+ <&pcm_noirq>, <&cpe3>;
asoc-platform-names = "msm-pcm-dsp.0", "msm-pcm-dsp.1",
"msm-pcm-dsp.2", "msm-voip-dsp",
"msm-pcm-voice", "msm-pcm-loopback",
"msm-compress-dsp", "msm-pcm-hostless",
"msm-pcm-afe", "msm-lsm-client",
"msm-pcm-routing", "msm-cpe-lsm",
- "msm-compr-dsp", "msm-pcm-dsp-noirq";
+ "msm-compr-dsp", "msm-pcm-dsp-noirq",
+ "msm-cpe-lsm.3";
asoc-cpu = <&dai_hdmi>, <&dai_dp>,
<&dai_mi2s0>, <&dai_mi2s1>,
<&dai_mi2s2>, <&dai_mi2s3>,
@@ -107,7 +108,10 @@
<&incall_music_2_rx>, <&sb_5_rx>, <&sb_6_rx>,
<&sb_7_rx>, <&sb_7_tx>, <&sb_8_tx>,
<&usb_audio_rx>, <&usb_audio_tx>,
- <&dai_tert_tdm_rx_0>, <&dai_tert_tdm_tx_0>;
+ <&dai_pri_tdm_rx_0>, <&dai_pri_tdm_tx_0>,
+ <&dai_sec_tdm_rx_0>, <&dai_sec_tdm_tx_0>,
+ <&dai_tert_tdm_rx_0>, <&dai_tert_tdm_tx_0>,
+ <&dai_quat_tdm_rx_0>, <&dai_quat_tdm_tx_0>;
asoc-cpu-names = "msm-dai-q6-hdmi.8", "msm-dai-q6-dp.24608",
"msm-dai-q6-mi2s.0", "msm-dai-q6-mi2s.1",
"msm-dai-q6-mi2s.2", "msm-dai-q6-mi2s.3",
@@ -126,7 +130,10 @@
"msm-dai-q6-dev.16396", "msm-dai-q6-dev.16398",
"msm-dai-q6-dev.16399", "msm-dai-q6-dev.16401",
"msm-dai-q6-dev.28672", "msm-dai-q6-dev.28673",
- "msm-dai-q6-tdm.36896", "msm-dai-q6-tdm.36897";
+ "msm-dai-q6-tdm.36864", "msm-dai-q6-tdm.36865",
+ "msm-dai-q6-tdm.36880", "msm-dai-q6-tdm.36881",
+ "msm-dai-q6-tdm.36896", "msm-dai-q6-tdm.36897",
+ "msm-dai-q6-tdm.36912", "msm-dai-q6-tdm.36913";
asoc-codec = <&stub_codec>, <&ext_disp_audio_codec>;
asoc-codec-names = "msm-stub-codec.1",
"msm-ext-disp-audio-codec-rx";
@@ -212,7 +219,10 @@
<&incall_music_2_rx>, <&sb_5_rx>, <&sb_6_rx>,
<&sb_7_rx>, <&sb_7_tx>, <&sb_8_tx>,
<&usb_audio_rx>, <&usb_audio_tx>,
- <&dai_tert_tdm_rx_0>, <&dai_tert_tdm_tx_0>;
+ <&dai_pri_tdm_rx_0>, <&dai_pri_tdm_tx_0>,
+ <&dai_sec_tdm_rx_0>, <&dai_sec_tdm_tx_0>,
+ <&dai_tert_tdm_rx_0>, <&dai_tert_tdm_tx_0>,
+ <&dai_quat_tdm_rx_0>, <&dai_quat_tdm_tx_0>;
asoc-cpu-names = "msm-dai-q6-hdmi.8", "msm-dai-q6-dp.24608",
"msm-dai-q6-mi2s.0", "msm-dai-q6-mi2s.1",
"msm-dai-q6-mi2s.2", "msm-dai-q6-mi2s.3",
@@ -231,7 +241,10 @@
"msm-dai-q6-dev.16396", "msm-dai-q6-dev.16398",
"msm-dai-q6-dev.16399", "msm-dai-q6-dev.16401",
"msm-dai-q6-dev.28672", "msm-dai-q6-dev.28673",
- "msm-dai-q6-tdm.36896", "msm-dai-q6-tdm.36897";
+ "msm-dai-q6-tdm.36864", "msm-dai-q6-tdm.36865",
+ "msm-dai-q6-tdm.36880", "msm-dai-q6-tdm.36881",
+ "msm-dai-q6-tdm.36896", "msm-dai-q6-tdm.36897",
+ "msm-dai-q6-tdm.36912", "msm-dai-q6-tdm.36913";
asoc-codec = <&stub_codec>, <&ext_disp_audio_codec>;
asoc-codec-names = "msm-stub-codec.1",
"msm-ext-disp-audio-codec-rx";
@@ -244,6 +257,12 @@
cpe: qcom,msm-cpe-lsm {
compatible = "qcom,msm-cpe-lsm";
+ qcom,msm-cpe-lsm-id = <1>;
+ };
+
+ cpe3: qcom,msm-cpe-lsm@3 {
+ compatible = "qcom,msm-cpe-lsm";
+ qcom,msm-cpe-lsm-id = <3>;
};
qcom,wcd-dsp-mgr {
@@ -364,6 +383,7 @@
qcom,cdc-slim-ifd-elemental-addr = [00 00 A0 01 17 02];
qcom,cdc-dmic-sample-rate = <4800000>;
qcom,cdc-mad-dmic-rate = <600000>;
+ qcom,cdc-ecpp-dmic-rate = <1200000>;
};
wcd934x_cdc: tavil_codec {
diff --git a/arch/arm/boot/dts/qcom/msmcobalt-blsp.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-blsp.dtsi
index 929a079c64c3..a660ea06795e 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt-blsp.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt-blsp.dtsi
@@ -736,8 +736,10 @@
clocks = <&clock_gcc clk_gcc_blsp1_uart3_apps_clk>,
<&clock_gcc clk_gcc_blsp1_ahb_clk>;
pinctrl-names = "sleep", "default";
- pinctrl-0 = <&blsp1_uart3_sleep>;
- pinctrl-1 = <&blsp1_uart3_active>;
+ pinctrl-0 = <&blsp1_uart3_tx_sleep>, <&blsp1_uart3_rxcts_sleep>,
+ <&blsp1_uart3_rfr_sleep>;
+ pinctrl-1 = <&blsp1_uart3_tx_active>,
+ <&blsp1_uart3_rxcts_active>, <&blsp1_uart3_rfr_active>;
qcom,msm-bus,name = "buart3";
qcom,msm-bus,num-cases = <2>;
diff --git a/arch/arm/boot/dts/qcom/msmcobalt-cdp.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-cdp.dtsi
index fcceac6e2469..4822823aa63f 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt-cdp.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt-cdp.dtsi
@@ -302,6 +302,10 @@
qpnp,qpnp-labibb-mode = "lcd";
};
+&pmicobalt_wled {
+ qcom,led-strings-list = [00 01];
+};
+
&dsi_dual_nt35597_video {
qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
qcom,mdss-dsi-bl-min-level = <1>;
diff --git a/arch/arm/boot/dts/qcom/msmcobalt-ion.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-ion.dtsi
index dfa3dae8e371..7b15fd81c710 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt-ion.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt-ion.dtsi
@@ -21,11 +21,6 @@
qcom,ion-heap-type = "SYSTEM";
};
- system_contig_heap: qcom,ion-heap@21 {
- reg = <21>;
- qcom,ion-heap-type = "SYSTEM_CONTIG";
- };
-
qcom,ion-heap@22 { /* ADSP HEAP */
reg = <22>;
memory-region = <&adsp_mem>;
diff --git a/arch/arm/boot/dts/qcom/msmcobalt-mdss-panels.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-mdss-panels.dtsi
index 0278cbde90ce..6afd593f9610 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt-mdss-panels.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt-mdss-panels.dtsi
@@ -92,6 +92,8 @@
qcom,mdss-dsi-t-clk-post = <0x0d>;
qcom,mdss-dsi-t-clk-pre = <0x2d>;
qcom,cmd-sync-wait-broadcast;
+ qcom,esd-check-enabled;
+ qcom,mdss-dsi-panel-status-check-mode = "bta_check";
};
&dsi_dual_nt35597_truly_video {
@@ -110,12 +112,28 @@
qcom,mdss-dsi-panel-timings = [00 11 04 04 07 0c 04 04 03 03 04 00];
qcom,mdss-dsi-t-clk-post = <0x05>;
qcom,mdss-dsi-t-clk-pre = <0x1b>;
+ qcom,esd-check-enabled;
+ qcom,mdss-dsi-panel-status-check-mode = "reg_read";
+ qcom,mdss-dsi-panel-status-command = [06 01 00 01 00 00 01 0a];
+ qcom,mdss-dsi-panel-status-command-state = "dsi_hs_mode";
+ qcom,mdss-dsi-panel-status-value = <0x9c>;
+ qcom,mdss-dsi-panel-on-check-value = <0x9c>;
+ qcom,mdss-dsi-panel-status-read-length = <1>;
+ qcom,mdss-dsi-panel-max-error-count = <3>;
};
&dsi_nt35597_dsc_cmd {
qcom,mdss-dsi-panel-timings = [00 11 04 04 07 0c 04 04 03 03 04 00];
qcom,mdss-dsi-t-clk-post = <0x05>;
qcom,mdss-dsi-t-clk-pre = <0x1b>;
+ qcom,esd-check-enabled;
+ qcom,mdss-dsi-panel-status-check-mode = "reg_read";
+ qcom,mdss-dsi-panel-status-command = [06 01 00 01 00 00 01 0a];
+ qcom,mdss-dsi-panel-status-command-state = "dsi_hs_mode";
+ qcom,mdss-dsi-panel-status-value = <0x9c>;
+ qcom,mdss-dsi-panel-on-check-value = <0x9c>;
+ qcom,mdss-dsi-panel-status-read-length = <1>;
+ qcom,mdss-dsi-panel-max-error-count = <3>;
};
&dsi_sharp_4k_dsc_video {
@@ -140,6 +158,8 @@
qcom,mdss-dsi-panel-timings = [00 17 05 05 09 0f 05 06 04 03 04 00];
qcom,mdss-dsi-t-clk-post = <0x06>;
qcom,mdss-dsi-t-clk-pre = <0x23>;
+ qcom,esd-check-enabled;
+ qcom,mdss-dsi-panel-status-check-mode = "te_signal_check";
};
&dsi_sharp_1080_cmd {
diff --git a/arch/arm/boot/dts/qcom/msmcobalt-mdss.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-mdss.dtsi
index ec38e46b1d89..9c72ebf4a0bd 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt-mdss.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt-mdss.dtsi
@@ -268,11 +268,13 @@
mdss_fb2: qcom,mdss_fb_hdmi {
cell-index = <2>;
compatible = "qcom,mdss-fb";
+ qcom,mdss-intf = <&mdss_hdmi_tx>;
};
mdss_fb3: qcom,mdss_fb_dp {
cell-index = <3>;
compatible = "qcom,mdss-fb";
+ qcom,mdss-intf = <&mdss_dp_ctrl>;
};
};
diff --git a/arch/arm/boot/dts/qcom/msmcobalt-mtp.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-mtp.dtsi
index 7948dc3489cb..b77bab712ecf 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt-mtp.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt-mtp.dtsi
@@ -355,6 +355,10 @@
qpnp,qpnp-labibb-mode = "lcd";
};
+&pmicobalt_wled {
+ qcom,led-strings-list = [00 01];
+};
+
&dsi_dual_nt35597_video {
qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
qcom,mdss-dsi-bl-min-level = <1>;
diff --git a/arch/arm/boot/dts/qcom/msmcobalt-pinctrl.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-pinctrl.dtsi
index 3975bc5d16f5..e5fd988dccce 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt-pinctrl.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt-pinctrl.dtsi
@@ -1223,29 +1223,83 @@
};
};
- blsp1_uart3_active: blsp1_uart3_active {
- mux {
- pins = "gpio45", "gpio46", "gpio47", "gpio48";
- function = "blsp_uart3_a";
+ blsp1_uart3: blsp1_uart3 {
+ blsp1_uart3_tx_active: blsp1_uart3_tx_active {
+ mux {
+ pins = "gpio45";
+ function = "blsp_uart3_a";
+ };
+
+ config {
+ pins = "gpio45";
+ drive-strength = <2>;
+ bias-disable;
+ };
};
- config {
- pins = "gpio45", "gpio46", "gpio47", "gpio48";
- drive-strength = <2>;
- bias-disable;
+ blsp1_uart3_tx_sleep: blsp1_uart3_tx_sleep {
+ mux {
+ pins = "gpio45";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio45";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
};
- };
- blsp1_uart3_sleep: blsp1_uart3_sleep {
- mux {
- pins = "gpio45", "gpio46", "gpio47", "gpio48";
- function = "gpio";
+ blsp1_uart3_rxcts_active: blsp1_uart3_rxcts_active {
+ mux {
+ pins = "gpio46", "gpio47";
+ function = "blsp_uart3_a";
+ };
+
+ config {
+ pins = "gpio46", "gpio47";
+ drive-strength = <2>;
+ bias-disable;
+ };
};
- config {
- pins = "gpio45", "gpio46", "gpio47", "gpio48";
- drive-strength = <2>;
- bias-pull-up;
+ blsp1_uart3_rxcts_sleep: blsp1_uart3_rxcts_sleep {
+ mux {
+ pins = "gpio46", "gpio47";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio46", "gpio47";
+ drive-strength = <2>;
+ bias-no-pull;
+ };
+ };
+
+ blsp1_uart3_rfr_active: blsp1_uart3_rfr_active {
+ mux {
+ pins = "gpio48";
+ function = "blsp_uart3_a";
+ };
+
+ config {
+ pins = "gpio48";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+
+ blsp1_uart3_rfr_sleep: blsp1_uart3_rfr_sleep {
+ mux {
+ pins = "gpio48";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio48";
+ drive-strength = <2>;
+ bias-no-pull;
+ };
};
};
diff --git a/arch/arm/boot/dts/qcom/msmcobalt-qrd-skuk.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-qrd-skuk.dtsi
index 5f89985db0a3..58471f6d0fd1 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt-qrd-skuk.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt-qrd-skuk.dtsi
@@ -62,7 +62,7 @@
50000000 100000000 200000000>;
qcom,bus-speed-mode = "SDR12", "SDR25", "SDR50", "DDR50", "SDR104";
- cd-gpios = <&tlmm 95 0x1>;
+ cd-gpios = <&tlmm 95 0x0>;
status = "ok";
};
diff --git a/arch/arm/boot/dts/qcom/msmcobalt-qrd-vr1.dts b/arch/arm/boot/dts/qcom/msmcobalt-qrd-vr1.dts
index ee6a58a41b4f..e53912071502 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt-qrd-vr1.dts
+++ b/arch/arm/boot/dts/qcom/msmcobalt-qrd-vr1.dts
@@ -21,32 +21,3 @@
compatible = "qcom,msmcobalt-qrd", "qcom,msmcobalt", "qcom,qrd";
qcom,board-id = <0x02000b 0x80>;
};
-
-&soc {
- sound-tavil {
- qcom,model = "msmcobalt-qvr-tavil-snd-card";
- qcom,audio-routing =
- "RX_BIAS", "MCLK",
- "MADINPUT", "MCLK",
- "AMIC2", "MIC BIAS2",
- "MIC BIAS2", "Headset Mic",
- "DMIC0", "MIC BIAS1",
- "MIC BIAS1", "Digital Mic0",
- "DMIC1", "MIC BIAS1",
- "MIC BIAS1", "Digital Mic1",
- "DMIC2", "MIC BIAS3",
- "MIC BIAS3", "Digital Mic2",
- "DMIC4", "MIC BIAS4",
- "MIC BIAS4", "Digital Mic4",
- "SpkrLeft IN", "SPK1 OUT";
-
- qcom,msm-mbhc-hphl-swh = <1>;
- /delete-property/ qcom,us-euro-gpios;
- /delete-property/ qcom,hph-en0-gpio;
- /delete-property/ qcom,hph-en0-gpio;
-
- qcom,wsa-max-devs = <1>;
- qcom,wsa-devs = <&wsa881x_0211>, <&wsa881x_0213>;
- qcom,wsa-aux-dev-prefix = "SpkrLeft", "SpkrLeft";
- };
-};
diff --git a/arch/arm/boot/dts/qcom/msmcobalt-qrd-vr1.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-qrd-vr1.dtsi
index f8069856f3d8..f0607ac3a34a 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt-qrd-vr1.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt-qrd-vr1.dtsi
@@ -99,4 +99,31 @@
debounce-interval = <15>;
};
};
+
+ sound-tavil {
+ qcom,model = "msmcobalt-qvr-tavil-snd-card";
+ qcom,audio-routing =
+ "RX_BIAS", "MCLK",
+ "MADINPUT", "MCLK",
+ "AMIC2", "MIC BIAS2",
+ "MIC BIAS2", "Headset Mic",
+ "DMIC0", "MIC BIAS1",
+ "MIC BIAS1", "Digital Mic0",
+ "DMIC1", "MIC BIAS1",
+ "MIC BIAS1", "Digital Mic1",
+ "DMIC2", "MIC BIAS3",
+ "MIC BIAS3", "Digital Mic2",
+ "DMIC4", "MIC BIAS4",
+ "MIC BIAS4", "Digital Mic4",
+ "SpkrLeft IN", "SPK1 OUT";
+
+ qcom,msm-mbhc-hphl-swh = <1>;
+ /delete-property/ qcom,us-euro-gpios;
+ /delete-property/ qcom,hph-en0-gpio;
+ /delete-property/ qcom,hph-en0-gpio;
+
+ qcom,wsa-max-devs = <1>;
+ qcom,wsa-devs = <&wsa881x_0211>, <&wsa881x_0213>;
+ qcom,wsa-aux-dev-prefix = "SpkrLeft", "SpkrLeft";
+ };
};
diff --git a/arch/arm/boot/dts/qcom/msmcobalt-qrd.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-qrd.dtsi
index 51e1154beaa9..682ea8a260ef 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt-qrd.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt-qrd.dtsi
@@ -263,6 +263,10 @@
qpnp,qpnp-labibb-mode = "lcd";
};
+&pmicobalt_wled {
+ qcom,led-strings-list = [01 02];
+};
+
&dsi_dual_nt35597_video {
qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
qcom,mdss-dsi-bl-min-level = <1>;
diff --git a/arch/arm/boot/dts/qcom/msmcobalt-regulator.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-regulator.dtsi
index 2a61cccad273..bb72cf3a0d2c 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt-regulator.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt-regulator.dtsi
@@ -501,7 +501,7 @@
};
pmicobalt_bob_pin1: regulator-bob-pin1 {
compatible = "qcom,rpm-smd-regulator";
- regulator-name = "pmcobalt_bob_pin1";
+ regulator-name = "pmicobalt_bob_pin1";
qcom,set = <3>;
regulator-min-microvolt = <3312000>;
regulator-max-microvolt = <3600000>;
@@ -509,7 +509,7 @@
};
pmicobalt_bob_pin2: regulator-bob-pin2 {
compatible = "qcom,rpm-smd-regulator";
- regulator-name = "pmcobalt_bob_pin2";
+ regulator-name = "pmicobalt_bob_pin2";
qcom,set = <3>;
regulator-min-microvolt = <3312000>;
regulator-max-microvolt = <3600000>;
@@ -517,7 +517,7 @@
};
pmicobalt_bob_pin3: regulator-bob-pin3 {
compatible = "qcom,rpm-smd-regulator";
- regulator-name = "pmcobalt_bob_pin3";
+ regulator-name = "pmicobalt_bob_pin3";
qcom,set = <3>;
regulator-min-microvolt = <3312000>;
regulator-max-microvolt = <3600000>;
diff --git a/arch/arm/boot/dts/qcom/msmcobalt-v2-qrd-vr1.dts b/arch/arm/boot/dts/qcom/msmcobalt-v2-qrd-vr1.dts
new file mode 100644
index 000000000000..15dd2d550b31
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/msmcobalt-v2-qrd-vr1.dts
@@ -0,0 +1,23 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+
+/dts-v1/;
+
+#include "msmcobalt-v2.dtsi"
+#include "msmcobalt-qrd-vr1.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. MSM COBALT V2 VR1 Board";
+ compatible = "qcom,msmcobalt-qrd", "qcom,msmcobalt", "qcom,qrd";
+ qcom,board-id = <0x02000b 0x80>;
+};
diff --git a/arch/arm/boot/dts/qcom/msmcobalt-v2.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-v2.dtsi
index 81e2203a5e61..beecee843778 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt-v2.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt-v2.dtsi
@@ -237,6 +237,84 @@
regulator-max-microvolt = <1056000>;
};
+&pcie0 {
+ qcom,phy-sequence = <0x804 0x01 0x00
+ 0x034 0x14 0x00
+ 0x138 0x30 0x00
+ 0x048 0x0f 0x00
+ 0x15c 0x06 0x00
+ 0x090 0x01 0x00
+ 0x088 0x20 0x00
+ 0x0f0 0x00 0x00
+ 0x0f8 0x01 0x00
+ 0x0f4 0xc9 0x00
+ 0x11c 0xff 0x00
+ 0x120 0x3f 0x00
+ 0x164 0x01 0x00
+ 0x154 0x00 0x00
+ 0x148 0x0a 0x00
+ 0x05C 0x19 0x00
+ 0x038 0x90 0x00
+ 0x0b0 0x82 0x00
+ 0x0c0 0x03 0x00
+ 0x0bc 0x55 0x00
+ 0x0b8 0x55 0x00
+ 0x0a0 0x00 0x00
+ 0x09c 0x0d 0x00
+ 0x098 0x04 0x00
+ 0x13c 0x00 0x00
+ 0x060 0x08 0x00
+ 0x068 0x16 0x00
+ 0x070 0x34 0x00
+ 0x15c 0x06 0x00
+ 0x138 0x33 0x00
+ 0x03c 0x02 0x00
+ 0x040 0x07 0x00
+ 0x080 0x04 0x00
+ 0x0dc 0x00 0x00
+ 0x0d8 0x3f 0x00
+ 0x00c 0x09 0x00
+ 0x010 0x01 0x00
+ 0x01c 0x40 0x00
+ 0x020 0x01 0x00
+ 0x014 0x02 0x00
+ 0x018 0x00 0x00
+ 0x024 0x7e 0x00
+ 0x028 0x15 0x00
+ 0x244 0x02 0x00
+ 0x2a4 0x12 0x00
+ 0x260 0x10 0x00
+ 0x28c 0x06 0x00
+ 0x504 0x03 0x00
+ 0x500 0x1c 0x00
+ 0x50c 0x14 0x00
+ 0x4d4 0x0a 0x00
+ 0x4d8 0x04 0x00
+ 0x4dc 0x1a 0x00
+ 0x434 0x4b 0x00
+ 0x414 0x04 0x00
+ 0x40c 0x04 0x00
+ 0x4f8 0x00 0x00
+ 0x4fc 0x80 0x00
+ 0x51c 0x40 0x00
+ 0x444 0x71 0x00
+ 0x43c 0x40 0x00
+ 0x854 0x04 0x00
+ 0x62c 0x52 0x00
+ 0x9ac 0x00 0x00
+ 0x8a0 0x01 0x00
+ 0x9e0 0x00 0x00
+ 0x9dc 0x01 0x00
+ 0x9a8 0x00 0x00
+ 0x8a4 0x01 0x00
+ 0x8a8 0x73 0x00
+ 0x9d8 0x99 0x00
+ 0x9b0 0x03 0x00
+ 0x804 0x03 0x00
+ 0x800 0x00 0x00
+ 0x808 0x03 0x00>;
+};
+
&apc0_cpr {
compatible = "qcom,cprh-msmcobalt-v2-kbss-regulator";
qcom,cpr-corner-switch-delay-time = <1042>;
@@ -326,18 +404,18 @@
1900800000>;
qcom,cpr-ro-scaling-factor =
- <4001 4019 3747 3758 3564 3480 2336
- 2247 3442 3147 2136 4156 4028 3030
- 3727 3198>,
- <4001 4019 3747 3758 3564 3480 2336
- 2247 3442 3147 2136 4156 4028 3030
- 3727 3198>,
- <3704 3601 3465 3567 3356 3473 2686
- 2773 3049 2932 2235 3816 3800 3097
- 2966 2808>,
- <2974 3092 3288 3329 2905 3096 3119
- 3225 2865 3140 2892 3592 3408 3576
- 1559 1392>;
+ <2595 2794 2577 2762 2471 2674 2199
+ 2553 3189 3255 3192 2962 3054 2982
+ 2042 2945>,
+ <2595 2794 2577 2762 2471 2674 2199
+ 2553 3189 3255 3192 2962 3054 2982
+ 2042 2945>,
+ <2391 2550 2483 2638 2382 2564 2259
+ 2555 2766 3041 2988 2935 2873 2688
+ 2013 2784>,
+ <2066 2153 2300 2434 2220 2386 2288
+ 2465 2028 2511 2487 2734 2554 2117
+ 1892 2377>;
qcom,cpr-open-loop-voltage-fuse-adjustment =
/* Speed bin 0 */
@@ -487,18 +565,18 @@
2112000000 2208000000>;
qcom,cpr-ro-scaling-factor =
- <4001 4019 3747 3758 3564 3480 2336
- 2247 3442 3147 2136 4156 4028 3030
- 3727 3190>,
- <4001 4019 3747 3758 3564 3480 2336
- 2247 3442 3147 2136 4156 4028 3030
- 3727 3198>,
- <3704 3601 3465 3567 3356 3473 2686
- 2773 3049 2932 2235 3816 3800 3097
- 2966 2808>,
- <2974 3092 3288 3329 2905 3096 3119
- 3225 2865 3140 2892 3592 3408 3576
- 1559 1392>;
+ <2857 3057 2828 2952 2699 2798 2446
+ 2631 2629 2578 2244 3344 3289 3137
+ 3164 2655>,
+ <2857 3057 2828 2952 2699 2798 2446
+ 2631 2629 2578 2244 3344 3289 3137
+ 3164 2655>,
+ <2603 2755 2676 2777 2573 2685 2465
+ 2610 2312 2423 2243 3104 3022 3036
+ 2740 2303>,
+ <1901 2016 2096 2228 2034 2161 2077
+ 2188 1565 1870 1925 2235 2205 2413
+ 1762 1478>;
qcom,cpr-open-loop-voltage-fuse-adjustment =
/* Speed bin 0 */
@@ -556,6 +634,7 @@
&gfx_cpr {
compatible = "qcom,cpr4-msmcobalt-v2-mmss-regulator";
+ qcom,cpr-aging-ref-voltage = <1024000>;
};
&gfx_vreg {
@@ -606,29 +685,50 @@
0 0 1627 0 1578 1353 0 0>;
qcom,cpr-ro-scaling-factor =
- < 0 0 0 0 3005 3111 0 0
- 0 0 3487 0 3280 1896 1874 0>,
- < 0 0 0 0 3005 3111 0 0
- 0 0 3487 0 3280 1896 1874 0>,
- < 0 0 0 0 3005 3111 0 0
- 0 0 3487 0 3280 1896 1874 0>,
- < 0 0 0 0 3005 3111 0 0
- 0 0 3487 0 3280 1896 1874 0>,
- < 0 0 0 0 3005 3111 0 0
- 0 0 3487 0 3280 1896 1874 0>,
- < 0 0 0 0 3005 3111 0 0
- 0 0 3487 0 3280 1896 1874 0>,
- < 0 0 0 0 3005 3111 0 0
- 0 0 3487 0 3280 1896 1874 0>,
- < 0 0 0 0 3005 3111 0 0
- 0 0 3487 0 3280 1896 1874 0>;
+ < 0 0 0 0 2377 2571 0 0
+ 0 0 2168 0 2209 1849 1997 0>,
+ < 0 0 0 0 2377 2571 0 0
+ 0 0 2168 0 2209 1849 1997 0>,
+ < 0 0 0 0 2377 2571 0 0
+ 0 0 2168 0 2209 1849 1997 0>,
+ < 0 0 0 0 2377 2571 0 0
+ 0 0 2168 0 2209 1849 1997 0>,
+ < 0 0 0 0 2377 2571 0 0
+ 0 0 2168 0 2209 1849 1997 0>,
+ < 0 0 0 0 2377 2571 0 0
+ 0 0 2168 0 2209 1849 1997 0>,
+ < 0 0 0 0 2377 2571 0 0
+ 0 0 2168 0 2209 1849 1997 0>,
+ < 0 0 0 0 2377 2571 0 0
+ 0 0 2168 0 2209 1849 1997 0>;
qcom,cpr-open-loop-voltage-fuse-adjustment =
- < 100000 0 0 0>;
+ < 100000 0 0 0>,
+ < 100000 0 0 0>,
+ < 85000 (-15000) (-15000) (-15000)>,
+ < 85000 (-15000) (-15000) (-15000)>,
+ < 85000 (-15000) (-15000) (-15000)>,
+ < 85000 (-15000) (-15000) (-15000)>,
+ < 85000 (-15000) (-15000) (-15000)>,
+ < 85000 (-15000) (-15000) (-15000)>;
qcom,cpr-closed-loop-voltage-adjustment =
< 96000 18000 4000 0
- 0 13000 9000 0>;
+ 0 13000 9000 0>,
+ < 96000 18000 4000 0
+ 0 13000 9000 0>,
+ < 81000 3000 (-11000) (-15000)
+ (-15000) (-2000) (-6000) (-15000)>,
+ < 81000 3000 (-11000) (-15000)
+ (-15000) (-2000) (-6000) (-15000)>,
+ < 81000 3000 (-11000) (-15000)
+ (-15000) (-2000) (-6000) (-15000)>,
+ < 81000 3000 (-11000) (-15000)
+ (-15000) (-2000) (-6000) (-15000)>,
+ < 81000 3000 (-11000) (-15000)
+ (-15000) (-2000) (-6000) (-15000)>,
+ < 81000 3000 (-11000) (-15000)
+ (-15000) (-2000) (-6000) (-15000)>;
qcom,cpr-floor-to-ceiling-max-range =
<50000 50000 50000 50000 50000 50000 70000 70000>;
@@ -642,7 +742,7 @@
qcom,cpr-aging-max-voltage-adjustment = <15000>;
qcom,cpr-aging-ref-corner = <8>;
qcom,cpr-aging-ro-scaling-factor = <2950>;
- qcom,allow-aging-voltage-adjustment = <0>;
+ qcom,allow-aging-voltage-adjustment = <0 0 1 1 1 1 1 1>;
};
&qusb_phy0 {
@@ -840,3 +940,7 @@
lanes-per-direction = <2>;
};
+
+&ssc_sensors {
+ qcom,firmware-name = "slpi_v2";
+};
diff --git a/arch/arm/boot/dts/qcom/msmcobalt.dtsi b/arch/arm/boot/dts/qcom/msmcobalt.dtsi
index b21e2dcf8c1a..60b514c7ca20 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt.dtsi
@@ -2810,9 +2810,10 @@
};
};
- qcom,msm-ssc-sensors {
+ ssc_sensors: qcom,msm-ssc-sensors {
compatible = "qcom,msm-ssc-sensors";
status = "ok";
+ qcom,firmware-name = "slpi_v1";
};
dcc: dcc@10b3000 {
diff --git a/arch/arm/boot/dts/qcom/msmfalcon-coresight.dtsi b/arch/arm/boot/dts/qcom/msmfalcon-coresight.dtsi
index 3826b00bf09e..2f1ef974811e 100644
--- a/arch/arm/boot/dts/qcom/msmfalcon-coresight.dtsi
+++ b/arch/arm/boot/dts/qcom/msmfalcon-coresight.dtsi
@@ -138,6 +138,14 @@
<&funnel_in0_out_funnel_merg>;
};
};
+ port@2 {
+ reg = <1>;
+ funnel_merg_in_funnel_in1:endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&funnel_in1_out_funnel_merg>;
+ };
+ };
};
};
@@ -183,6 +191,167 @@
};
};
+ funnel_in1: funnel@6042000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b908>;
+
+ reg = <0x6042000 0x1000>;
+ reg-names = "funnel-base";
+
+ coresight-name = "coresight-funnel-in1";
+
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
+ clock-names = "apb_pclk", "core_a_clk";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ funnel_in1_out_funnel_merg: endpoint {
+ remote-endpoint =
+ <&funnel_merg_in_funnel_in1>;
+ };
+ };
+ port@5 {
+ reg = <6>;
+ funnel_in1_in_funnel_apss_merg: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&funnel_apss_merg_out_funnel_in1>;
+ };
+ };
+ };
+ };
+
+ funnel_apss_merg: funnel@7b70000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b908>;
+
+ reg = <0x7b70000 0x1000>;
+ reg-names = "funnel-base";
+
+ coresight-name = "coresight-funnel-apss-merg";
+
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
+ clock-names = "apb_pclk", "core_a_clk";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ funnel_apss_merg_out_funnel_in1: endpoint {
+ remote-endpoint =
+ <&funnel_in1_in_funnel_apss_merg>;
+ };
+ };
+ port@1 {
+ reg = <0>;
+ funnel_apss_merg_in_funnel_apss: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&funnel_apss_out_funnel_apss_merg>;
+ };
+ };
+ };
+ };
+
+ funnel_apss: funnel@7b60000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b908>;
+
+ reg = <0x7b60000 0x1000>;
+ reg-names = "funnel-base";
+
+ coresight-name = "coresight-funnel-apss";
+
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
+ clock-names = "apb_pclk", "core_a_clk";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ funnel_apss_out_funnel_apss_merg: endpoint {
+ remote-endpoint =
+ <&funnel_apss_merg_in_funnel_apss>;
+ };
+ };
+ port@1 {
+ reg = <0>;
+ funnel_apss_in_etm0: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&etm0_out_funnel_apss>;
+ };
+ };
+ port@2 {
+ reg = <1>;
+ funnel_apss_in_etm1: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&etm1_out_funnel_apss>;
+ };
+ };
+ port@3 {
+ reg = <2>;
+ funnel_apss_in_etm2: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&etm2_out_funnel_apss>;
+ };
+ };
+ port@4 {
+ reg = <3>;
+ funnel_apss_in_etm3: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&etm3_out_funnel_apss>;
+ };
+ };
+ port@5 {
+ reg = <4>;
+ funnel_apss_in_etm4: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&etm4_out_funnel_apss>;
+ };
+ };
+ port@6 {
+ reg = <5>;
+ funnel_apss_in_etm5: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&etm5_out_funnel_apss>;
+ };
+ };
+ port@7 {
+ reg = <6>;
+ funnel_apss_in_etm6: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&etm6_out_funnel_apss>;
+ };
+ };
+ port@8 {
+ reg = <7>;
+ funnel_apss_in_etm7: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&etm7_out_funnel_apss>;
+ };
+ };
+ };
+ };
+
stm: stm@6002000 {
compatible = "arm,primecell";
arm,primecell-periphid = <0x0003b962>;
@@ -204,6 +373,166 @@
};
};
+ etm0: etm@7840000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b95d>;
+
+ reg = <0x7840000 0x1000>;
+ cpu = <&CPU0>;
+
+ coresight-name = "coresight-etm0";
+
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
+ clock-names = "apb_pclk", "core_a_clk";
+
+ port{
+ etm0_out_funnel_apss: endpoint {
+ remote-endpoint = <&funnel_apss_in_etm0>;
+ };
+ };
+ };
+
+ etm1: etm@7940000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b95d>;
+
+ reg = <0x7940000 0x1000>;
+ cpu = <&CPU1>;
+
+ coresight-name = "coresight-etm1";
+
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
+ clock-names = "apb_pclk", "core_a_clk";
+
+ port{
+ etm1_out_funnel_apss: endpoint {
+ remote-endpoint = <&funnel_apss_in_etm1>;
+ };
+ };
+ };
+
+ etm2: etm@7a40000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b95d>;
+
+ reg = <0x7a40000 0x1000>;
+ cpu = <&CPU2>;
+
+ coresight-name = "coresight-etm2";
+
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
+ clock-names = "apb_pclk", "core_a_clk";
+
+ port{
+ etm2_out_funnel_apss: endpoint {
+ remote-endpoint = <&funnel_apss_in_etm2>;
+ };
+ };
+ };
+
+ etm3: etm@7b40000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b95d>;
+
+ reg = <0x7b40000 0x1000>;
+ cpu = <&CPU3>;
+
+ coresight-name = "coresight-etm3";
+
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
+ clock-names = "apb_pclk", "core_a_clk";
+
+ port{
+ etm3_out_funnel_apss: endpoint {
+ remote-endpoint = <&funnel_apss_in_etm3>;
+ };
+ };
+ };
+
+ etm4: etm@7c40000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b95d>;
+
+ reg = <0x7c40000 0x1000>;
+ cpu = <&CPU4>;
+
+ coresight-name = "coresight-etm4";
+
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
+ clock-names = "apb_pclk", "core_a_clk";
+
+ port{
+ etm4_out_funnel_apss: endpoint {
+ remote-endpoint = <&funnel_apss_in_etm4>;
+ };
+ };
+ };
+
+ etm5: etm@7d40000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b95d>;
+
+ reg = <0x7d40000 0x1000>;
+ cpu = <&CPU5>;
+
+ coresight-name = "coresight-etm5";
+
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
+ clock-names = "apb_pclk", "core_a_clk";
+
+ port{
+ etm5_out_funnel_apss: endpoint {
+ remote-endpoint = <&funnel_apss_in_etm5>;
+ };
+ };
+ };
+
+ etm6: etm@7e40000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b95d>;
+
+ reg = <0x7e40000 0x1000>;
+ cpu = <&CPU6>;
+
+ coresight-name = "coresight-etm6";
+
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
+ clock-names = "apb_pclk", "core_a_clk";
+
+ port{
+ etm6_out_funnel_apss: endpoint {
+ remote-endpoint = <&funnel_apss_in_etm6>;
+ };
+ };
+ };
+
+ etm7: etm@7f40000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b95d>;
+
+ reg = <0x7f40000 0x1000>;
+ cpu = <&CPU7>;
+
+ coresight-name = "coresight-etm7";
+
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
+ clock-names = "apb_pclk", "core_a_clk";
+
+ port{
+ etm7_out_funnel_apss: endpoint {
+ remote-endpoint = <&funnel_apss_in_etm7>;
+ };
+ };
+ };
+
cti0: cti@6010000 {
compatible = "arm,coresight-cti";
reg = <0x6010000 0x1000>;
@@ -396,6 +725,110 @@
clock-names = "core_clk", "core_a_clk";
};
+ cti_cpu0: cti@7820000 {
+ compatible = "arm,coresight-cti";
+ reg = <0x7820000 0x1000>;
+ reg-names = "cti-base";
+
+ coresight-name = "coresight-cti-cpu0";
+ cpu = <&CPU0>;
+
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
+ clock-names = "core_clk", "core_a_clk";
+ };
+
+ cti_cpu1: cti@7920000 {
+ compatible = "arm,coresight-cti";
+ reg = <0x7920000 0x1000>;
+ reg-names = "cti-base";
+
+ coresight-name = "coresight-cti-cpu1";
+ cpu = <&CPU1>;
+
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
+ clock-names = "core_clk", "core_a_clk";
+ };
+
+ cti_cpu2: cti@7a20000 {
+ compatible = "arm,coresight-cti";
+ reg = <0x7a20000 0x1000>;
+ reg-names = "cti-base";
+
+ coresight-name = "coresight-cti-cpu2";
+ cpu = <&CPU2>;
+
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
+ clock-names = "core_clk", "core_a_clk";
+ };
+
+ cti_cpu3: cti@7b20000 {
+ compatible = "arm,coresight-cti";
+ reg = <0x7b20000 0x1000>;
+ reg-names = "cti-base";
+
+ coresight-name = "coresight-cti-cpu3";
+ cpu = <&CPU3>;
+
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
+ clock-names = "core_clk", "core_a_clk";
+ };
+
+ cti_cpu4: cti@7c20000 {
+ compatible = "arm,coresight-cti";
+ reg = <0x7c20000 0x1000>;
+ reg-names = "cti-base";
+
+ coresight-name = "coresight-cti-cpu4";
+ cpu = <&CPU4>;
+
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
+ clock-names = "core_clk", "core_a_clk";
+ };
+
+ cti_cpu5: cti@7d20000 {
+ compatible = "arm,coresight-cti";
+ reg = <0x7d20000 0x1000>;
+ reg-names = "cti-base";
+
+ coresight-name = "coresight-cti-cpu5";
+ cpu = <&CPU5>;
+
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
+ clock-names = "core_clk", "core_a_clk";
+ };
+
+ cti_cpu6: cti@7e20000 {
+ compatible = "arm,coresight-cti";
+ reg = <0x7e20000 0x1000>;
+ reg-names = "cti-base";
+
+ coresight-name = "coresight-cti-cpu6";
+ cpu = <&CPU6>;
+
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
+ clock-names = "core_clk", "core_a_clk";
+ };
+
+ cti_cpu7: cti@7f20000 {
+ compatible = "arm,coresight-cti";
+ reg = <0x7f20000 0x1000>;
+ reg-names = "cti-base";
+
+ coresight-name = "coresight-cti-cpu7";
+ cpu = <&CPU7>;
+
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
+ clock-names = "core_clk", "core_a_clk";
+ };
+
funnel_qatb: funnel@6005000 {
compatible = "arm,primecell";
arm,primecell-periphid = <0x0003b908>;
diff --git a/arch/arm/boot/dts/qcom/msmfalcon-ion.dtsi b/arch/arm/boot/dts/qcom/msmfalcon-ion.dtsi
index f6deef335844..00b9e61d01b8 100644
--- a/arch/arm/boot/dts/qcom/msmfalcon-ion.dtsi
+++ b/arch/arm/boot/dts/qcom/msmfalcon-ion.dtsi
@@ -21,11 +21,6 @@
qcom,ion-heap-type = "SYSTEM";
};
- system_contig_heap: qcom,ion-heap@21 {
- reg = <21>;
- qcom,ion-heap-type = "SYSTEM_CONTIG";
- };
-
qcom,ion-heap@22 { /* ADSP HEAP */
reg = <22>;
memory-region = <&adsp_mem>;
diff --git a/arch/arm/boot/dts/qcom/msmfalcon-smp2p.dtsi b/arch/arm/boot/dts/qcom/msmfalcon-smp2p.dtsi
index e93067e3697c..b43fae954ec2 100644
--- a/arch/arm/boot/dts/qcom/msmfalcon-smp2p.dtsi
+++ b/arch/arm/boot/dts/qcom/msmfalcon-smp2p.dtsi
@@ -173,6 +173,29 @@
gpios = <&smp2pgpio_smp2p_5_out 0 0>;
};
+ /* ssr - inbound entry from mss */
+ smp2pgpio_ssr_smp2p_1_in: qcom,smp2pgpio-ssr-smp2p-1-in {
+ compatible = "qcom,smp2pgpio";
+ qcom,entry-name = "slave-kernel";
+ qcom,remote-pid = <1>;
+ qcom,is-inbound;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ /* ssr - outbound entry to mss */
+ smp2pgpio_ssr_smp2p_1_out: qcom,smp2pgpio-ssr-smp2p-1-out {
+ compatible = "qcom,smp2pgpio";
+ qcom,entry-name = "master-kernel";
+ qcom,remote-pid = <1>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
/* ssr - inbound entry from lpass */
smp2pgpio_ssr_smp2p_2_in: qcom,smp2pgpio-ssr-smp2p-2-in {
compatible = "qcom,smp2pgpio";
diff --git a/arch/arm/boot/dts/qcom/msmfalcon.dtsi b/arch/arm/boot/dts/qcom/msmfalcon.dtsi
index 8e8c407734eb..7a5d83a12bfa 100644
--- a/arch/arm/boot/dts/qcom/msmfalcon.dtsi
+++ b/arch/arm/boot/dts/qcom/msmfalcon.dtsi
@@ -447,7 +447,7 @@
<0x10b4000 0x800>;
reg-names = "dcc-base", "dcc-ram-base";
- clocks = <&clock_rpmcc RPM_QDSS_CLK>;
+ clocks = <&clock_rpmcc GCC_DCC_AHB_CLK>;
clock-names = "dcc_clk";
};
@@ -754,6 +754,59 @@
qcom,gpio-force-stop = <&smp2pgpio_ssr_smp2p_5_out 0 0>;
status = "ok";
};
+
+ pil_modem: qcom,mss@4080000 {
+ compatible = "qcom,pil-q6v55-mss";
+ reg = <0x4080000 0x100>,
+ <0x1f63000 0x008>,
+ <0x1f65000 0x008>,
+ <0x1f64000 0x008>,
+ <0x4180000 0x040>,
+ <0x00179000 0x004>;
+ reg-names = "qdsp6_base", "halt_q6", "halt_modem",
+ "halt_nc", "rmb_base", "restart_reg";
+
+ clocks = <&clock_rpmcc RPM_XO_CLK_SRC>,
+ <&clock_gcc GCC_MSS_CFG_AHB_CLK>,
+ <&clock_gcc GCC_BIMC_MSS_Q6_AXI_CLK>,
+ <&clock_gcc GCC_BOOT_ROM_AHB_CLK>,
+ <&clock_gcc GPLL0_OUT_MSSCC>,
+ <&clock_gcc GCC_MSS_SNOC_AXI_CLK>,
+ <&clock_gcc GCC_MSS_MNOC_BIMC_AXI_CLK>,
+ <&clock_rpmcc RPM_QDSS_CLK>;
+ clock-names = "xo", "iface_clk", "bus_clk",
+ "mem_clk", "gpll0_mss_clk", "snoc_axi_clk",
+ "mnoc_axi_clk", "qdss_clk";
+ qcom,proxy-clock-names = "xo", "qdss_clk";
+ qcom,active-clock-names = "iface_clk", "bus_clk", "mem_clk",
+ "gpll0_mss_clk", "snoc_axi_clk",
+ "mnoc_axi_clk";
+
+ interrupts = <0 448 1>;
+ vdd_cx-supply = <&pmfalcon_s3b_level>;
+ vdd_cx-voltage = <RPM_SMD_REGULATOR_LEVEL_TURBO>;
+ vdd_mx-supply = <&pmfalcon_s5b_level>;
+ vdd_mx-uV = <RPM_SMD_REGULATOR_LEVEL_TURBO>;
+ qcom,firmware-name = "modem";
+ qcom,pil-self-auth;
+ qcom,sysmon-id = <0>;
+ qcom,ssctl-instance-id = <0x12>;
+ qcom,override-acc;
+ qcom,qdsp6v62-1-5;
+ memory-region = <&modem_fw_mem>;
+ qcom,mem-protect-id = <0xF>;
+
+ /* GPIO inputs from mss */
+ qcom,gpio-err-fatal = <&smp2pgpio_ssr_smp2p_1_in 0 0>;
+ qcom,gpio-err-ready = <&smp2pgpio_ssr_smp2p_1_in 1 0>;
+ qcom,gpio-proxy-unvote = <&smp2pgpio_ssr_smp2p_1_in 2 0>;
+ qcom,gpio-stop-ack = <&smp2pgpio_ssr_smp2p_1_in 3 0>;
+ qcom,gpio-shutdown-ack = <&smp2pgpio_ssr_smp2p_1_in 7 0>;
+
+ /* GPIO output to mss */
+ qcom,gpio-force-stop = <&smp2pgpio_ssr_smp2p_1_out 0 0>;
+ status = "ok";
+ };
};
#include "msmfalcon-ion.dtsi"
diff --git a/arch/arm/boot/dts/qcom/msmtriton-ion.dtsi b/arch/arm/boot/dts/qcom/msmtriton-ion.dtsi
index f6deef335844..00b9e61d01b8 100644
--- a/arch/arm/boot/dts/qcom/msmtriton-ion.dtsi
+++ b/arch/arm/boot/dts/qcom/msmtriton-ion.dtsi
@@ -21,11 +21,6 @@
qcom,ion-heap-type = "SYSTEM";
};
- system_contig_heap: qcom,ion-heap@21 {
- reg = <21>;
- qcom,ion-heap-type = "SYSTEM_CONTIG";
- };
-
qcom,ion-heap@22 { /* ADSP HEAP */
reg = <22>;
memory-region = <&adsp_mem>;
diff --git a/arch/arm/boot/dts/qcom/msmtriton-smp2p.dtsi b/arch/arm/boot/dts/qcom/msmtriton-smp2p.dtsi
index 1a72414de094..b458bbb08dc2 100644
--- a/arch/arm/boot/dts/qcom/msmtriton-smp2p.dtsi
+++ b/arch/arm/boot/dts/qcom/msmtriton-smp2p.dtsi
@@ -134,6 +134,29 @@
gpios = <&smp2pgpio_sleepstate_2_out 0 0>;
};
+ /* ssr - inbound entry from mss */
+ smp2pgpio_ssr_smp2p_1_in: qcom,smp2pgpio-ssr-smp2p-1-in {
+ compatible = "qcom,smp2pgpio";
+ qcom,entry-name = "slave-kernel";
+ qcom,remote-pid = <1>;
+ qcom,is-inbound;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ /* ssr - outbound entry to mss */
+ smp2pgpio_ssr_smp2p_1_out: qcom,smp2pgpio-ssr-smp2p-1-out {
+ compatible = "qcom,smp2pgpio";
+ qcom,entry-name = "master-kernel";
+ qcom,remote-pid = <1>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
/* ssr - inbound entry from lpass */
smp2pgpio_ssr_smp2p_2_in: qcom,smp2pgpio-ssr-smp2p-2-in {
compatible = "qcom,smp2pgpio";
diff --git a/arch/arm/boot/dts/qcom/msmtriton.dtsi b/arch/arm/boot/dts/qcom/msmtriton.dtsi
index 083c14af7839..807c40fcc46e 100644
--- a/arch/arm/boot/dts/qcom/msmtriton.dtsi
+++ b/arch/arm/boot/dts/qcom/msmtriton.dtsi
@@ -609,6 +609,59 @@
memory-region = <&venus_fw_mem>;
status = "ok";
};
+
+ pil_modem: qcom,mss@4080000 {
+ compatible = "qcom,pil-q6v55-mss";
+ reg = <0x4080000 0x100>,
+ <0x1f63000 0x008>,
+ <0x1f65000 0x008>,
+ <0x1f64000 0x008>,
+ <0x4180000 0x040>,
+ <0x00179000 0x004>;
+ reg-names = "qdsp6_base", "halt_q6", "halt_modem",
+ "halt_nc", "rmb_base", "restart_reg";
+
+ clocks = <&clock_rpmcc RPM_XO_CLK_SRC>,
+ <&clock_gcc GCC_MSS_CFG_AHB_CLK>,
+ <&clock_gcc GCC_BIMC_MSS_Q6_AXI_CLK>,
+ <&clock_gcc GCC_BOOT_ROM_AHB_CLK>,
+ <&clock_gcc GPLL0_OUT_MSSCC>,
+ <&clock_gcc GCC_MSS_SNOC_AXI_CLK>,
+ <&clock_gcc GCC_MSS_MNOC_BIMC_AXI_CLK>,
+ <&clock_rpmcc RPM_QDSS_CLK>;
+ clock-names = "xo", "iface_clk", "bus_clk",
+ "mem_clk", "gpll0_mss_clk", "snoc_axi_clk",
+ "mnoc_axi_clk", "qdss_clk";
+ qcom,proxy-clock-names = "xo", "qdss_clk";
+ qcom,active-clock-names = "iface_clk", "bus_clk", "mem_clk",
+ "gpll0_mss_clk", "snoc_axi_clk",
+ "mnoc_axi_clk";
+
+ interrupts = <0 448 1>;
+ vdd_cx-supply = <&pmfalcon_s3b_level>;
+ vdd_cx-voltage = <RPM_SMD_REGULATOR_LEVEL_TURBO>;
+ vdd_mx-supply = <&pmfalcon_s5b_level>;
+ vdd_mx-uV = <RPM_SMD_REGULATOR_LEVEL_TURBO>;
+ qcom,firmware-name = "modem";
+ qcom,pil-self-auth;
+ qcom,sysmon-id = <0>;
+ qcom,ssctl-instance-id = <0x12>;
+ qcom,override-acc;
+ qcom,qdsp6v62-1-5;
+ memory-region = <&modem_fw_mem>;
+ qcom,mem-protect-id = <0xF>;
+
+ /* GPIO inputs from mss */
+ qcom,gpio-err-fatal = <&smp2pgpio_ssr_smp2p_1_in 0 0>;
+ qcom,gpio-err-ready = <&smp2pgpio_ssr_smp2p_1_in 1 0>;
+ qcom,gpio-proxy-unvote = <&smp2pgpio_ssr_smp2p_1_in 2 0>;
+ qcom,gpio-stop-ack = <&smp2pgpio_ssr_smp2p_1_in 3 0>;
+ qcom,gpio-shutdown-ack = <&smp2pgpio_ssr_smp2p_1_in 7 0>;
+
+ /* GPIO output to mss */
+ qcom,gpio-force-stop = <&smp2pgpio_ssr_smp2p_1_out 0 0>;
+ status = "ok";
+ };
};
#include "msmtriton-ion.dtsi"
diff --git a/arch/arm/configs/msmcortex_defconfig b/arch/arm/configs/msmcortex_defconfig
index 48507eebe9f3..8be2b187a524 100644
--- a/arch/arm/configs/msmcortex_defconfig
+++ b/arch/arm/configs/msmcortex_defconfig
@@ -442,6 +442,7 @@ CONFIG_QCOM_SCM=y
CONFIG_QCOM_WATCHDOG_V2=y
CONFIG_QCOM_MEMORY_DUMP_V2=y
CONFIG_ICNSS=y
+CONFIG_ICNSS_DEBUG=y
CONFIG_MSM_GLADIATOR_ERP_V2=y
CONFIG_PANIC_ON_GLADIATOR_ERROR_V2=y
CONFIG_MSM_GLADIATOR_HANG_DETECT=y
diff --git a/arch/arm/configs/msmfalcon_defconfig b/arch/arm/configs/msmfalcon_defconfig
index 41572d54afcb..0788a03ed219 100644
--- a/arch/arm/configs/msmfalcon_defconfig
+++ b/arch/arm/configs/msmfalcon_defconfig
@@ -336,7 +336,6 @@ CONFIG_FB_MSM=y
CONFIG_FB_MSM_MDSS=y
CONFIG_FB_MSM_MDSS_WRITEBACK=y
CONFIG_FB_MSM_MDSS_HDMI_PANEL=y
-CONFIG_FB_MSM_MDSS_DP_PANEL=y
CONFIG_FB_MSM_MDSS_XLOG_DEBUG=y
CONFIG_LOGO=y
# CONFIG_LOGO_LINUX_MONO is not set
@@ -452,6 +451,7 @@ CONFIG_QCOM_SCM=y
CONFIG_QCOM_WATCHDOG_V2=y
CONFIG_QCOM_MEMORY_DUMP_V2=y
CONFIG_ICNSS=y
+CONFIG_ICNSS_DEBUG=y
CONFIG_MSM_GLADIATOR_ERP_V2=y
CONFIG_PANIC_ON_GLADIATOR_ERROR_V2=y
CONFIG_MSM_GLADIATOR_HANG_DETECT=y
diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h
index 194c91b610ff..c35c349da069 100644
--- a/arch/arm/include/asm/kvm_asm.h
+++ b/arch/arm/include/asm/kvm_asm.h
@@ -79,6 +79,8 @@
#define rr_lo_hi(a1, a2) a1, a2
#endif
+#define kvm_ksym_ref(kva) (kva)
+
#ifndef __ASSEMBLY__
struct kvm;
struct kvm_vcpu;
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index e06fd299de08..70e6d557c75f 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -969,7 +969,7 @@ static void cpu_init_hyp_mode(void *dummy)
pgd_ptr = kvm_mmu_get_httbr();
stack_page = __this_cpu_read(kvm_arm_hyp_stack_page);
hyp_stack_ptr = stack_page + PAGE_SIZE;
- vector_ptr = (unsigned long)__kvm_hyp_vector;
+ vector_ptr = (unsigned long)kvm_ksym_ref(__kvm_hyp_vector);
__cpu_init_hyp_mode(boot_pgd_ptr, pgd_ptr, hyp_stack_ptr, vector_ptr);
@@ -1061,7 +1061,8 @@ static int init_hyp_mode(void)
/*
* Map the Hyp-code called directly from the host
*/
- err = create_hyp_mappings(__kvm_hyp_code_start, __kvm_hyp_code_end);
+ err = create_hyp_mappings(kvm_ksym_ref(__kvm_hyp_code_start),
+ kvm_ksym_ref(__kvm_hyp_code_end));
if (err) {
kvm_err("Cannot map world-switch code\n");
goto out_free_mappings;
diff --git a/arch/arm/mach-cns3xxx/pcie.c b/arch/arm/mach-cns3xxx/pcie.c
index 47905a50e075..318394ed5c7a 100644
--- a/arch/arm/mach-cns3xxx/pcie.c
+++ b/arch/arm/mach-cns3xxx/pcie.c
@@ -220,13 +220,13 @@ static void cns3xxx_write_config(struct cns3xxx_pcie *cnspci,
u32 mask = (0x1ull << (size * 8)) - 1;
int shift = (where % 4) * 8;
- v = readl_relaxed(base + (where & 0xffc));
+ v = readl_relaxed(base);
v &= ~(mask << shift);
v |= (val & mask) << shift;
- writel_relaxed(v, base + (where & 0xffc));
- readl_relaxed(base + (where & 0xffc));
+ writel_relaxed(v, base);
+ readl_relaxed(base);
}
static void __init cns3xxx_pcie_hw_init(struct cns3xxx_pcie *cnspci)
diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig
index 3a10f1a8317a..bfd8bb371477 100644
--- a/arch/arm/mach-exynos/Kconfig
+++ b/arch/arm/mach-exynos/Kconfig
@@ -26,6 +26,7 @@ menuconfig ARCH_EXYNOS
select S5P_DEV_MFC
select SRAM
select THERMAL
+ select THERMAL_OF
select MFD_SYSCON
help
Support for SAMSUNG EXYNOS SoCs (EXYNOS4/5)
diff --git a/arch/arm/mach-exynos/pm_domains.c b/arch/arm/mach-exynos/pm_domains.c
index 7c21760f590f..875a2bab64f6 100644
--- a/arch/arm/mach-exynos/pm_domains.c
+++ b/arch/arm/mach-exynos/pm_domains.c
@@ -92,7 +92,7 @@ static int exynos_pd_power(struct generic_pm_domain *domain, bool power_on)
if (IS_ERR(pd->clk[i]))
break;
- if (IS_ERR(pd->clk[i]))
+ if (IS_ERR(pd->pclk[i]))
continue; /* Skip on first power up */
if (clk_set_parent(pd->clk[i], pd->pclk[i]))
pr_err("%s: error setting parent to clock%d\n",
diff --git a/arch/arm/mach-omap2/cpuidle34xx.c b/arch/arm/mach-omap2/cpuidle34xx.c
index aa7b379e2661..2a3db0bd9e15 100644
--- a/arch/arm/mach-omap2/cpuidle34xx.c
+++ b/arch/arm/mach-omap2/cpuidle34xx.c
@@ -34,6 +34,7 @@
#include "pm.h"
#include "control.h"
#include "common.h"
+#include "soc.h"
/* Mach specific information to be recorded in the C-state driver_data */
struct omap3_idle_statedata {
@@ -315,6 +316,69 @@ static struct cpuidle_driver omap3_idle_driver = {
.safe_state_index = 0,
};
+/*
+ * Numbers based on measurements made in October 2009 for PM optimized kernel
+ * with CPU freq enabled on device Nokia N900. Assumes OPP2 (main idle OPP,
+ * and worst case latencies).
+ */
+static struct cpuidle_driver omap3430_idle_driver = {
+ .name = "omap3430_idle",
+ .owner = THIS_MODULE,
+ .states = {
+ {
+ .enter = omap3_enter_idle_bm,
+ .exit_latency = 110 + 162,
+ .target_residency = 5,
+ .name = "C1",
+ .desc = "MPU ON + CORE ON",
+ },
+ {
+ .enter = omap3_enter_idle_bm,
+ .exit_latency = 106 + 180,
+ .target_residency = 309,
+ .name = "C2",
+ .desc = "MPU ON + CORE ON",
+ },
+ {
+ .enter = omap3_enter_idle_bm,
+ .exit_latency = 107 + 410,
+ .target_residency = 46057,
+ .name = "C3",
+ .desc = "MPU RET + CORE ON",
+ },
+ {
+ .enter = omap3_enter_idle_bm,
+ .exit_latency = 121 + 3374,
+ .target_residency = 46057,
+ .name = "C4",
+ .desc = "MPU OFF + CORE ON",
+ },
+ {
+ .enter = omap3_enter_idle_bm,
+ .exit_latency = 855 + 1146,
+ .target_residency = 46057,
+ .name = "C5",
+ .desc = "MPU RET + CORE RET",
+ },
+ {
+ .enter = omap3_enter_idle_bm,
+ .exit_latency = 7580 + 4134,
+ .target_residency = 484329,
+ .name = "C6",
+ .desc = "MPU OFF + CORE RET",
+ },
+ {
+ .enter = omap3_enter_idle_bm,
+ .exit_latency = 7505 + 15274,
+ .target_residency = 484329,
+ .name = "C7",
+ .desc = "MPU OFF + CORE OFF",
+ },
+ },
+ .state_count = ARRAY_SIZE(omap3_idle_data),
+ .safe_state_index = 0,
+};
+
/* Public functions */
/**
@@ -333,5 +397,8 @@ int __init omap3_idle_init(void)
if (!mpu_pd || !core_pd || !per_pd || !cam_pd)
return -ENODEV;
- return cpuidle_register(&omap3_idle_driver, NULL);
+ if (cpu_is_omap3430())
+ return cpuidle_register(&omap3430_idle_driver, NULL);
+ else
+ return cpuidle_register(&omap3_idle_driver, NULL);
}
diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c
index 3eaeaca5da05..3a911d8dea8b 100644
--- a/arch/arm/mach-omap2/io.c
+++ b/arch/arm/mach-omap2/io.c
@@ -368,6 +368,7 @@ void __init omap5_map_io(void)
void __init dra7xx_map_io(void)
{
iotable_init(dra7xx_io_desc, ARRAY_SIZE(dra7xx_io_desc));
+ omap_barriers_init();
}
#endif
/*
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index 8e0bd5939e5a..147c90e70b2e 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -1416,9 +1416,7 @@ static void _enable_sysc(struct omap_hwmod *oh)
(sf & SYSC_HAS_CLOCKACTIVITY))
_set_clockactivity(oh, oh->class->sysc->clockact, &v);
- /* If the cached value is the same as the new value, skip the write */
- if (oh->_sysc_cache != v)
- _write_sysconfig(v, oh);
+ _write_sysconfig(v, oh);
/*
* Set the autoidle bit only after setting the smartidle bit
@@ -1481,7 +1479,9 @@ static void _idle_sysc(struct omap_hwmod *oh)
_set_master_standbymode(oh, idlemode, &v);
}
- _write_sysconfig(v, oh);
+ /* If the cached value is the same as the new value, skip the write */
+ if (oh->_sysc_cache != v)
+ _write_sysconfig(v, oh);
}
/**
diff --git a/arch/arm/mach-prima2/Kconfig b/arch/arm/mach-prima2/Kconfig
index 9ab8932403e5..56e55fd37d13 100644
--- a/arch/arm/mach-prima2/Kconfig
+++ b/arch/arm/mach-prima2/Kconfig
@@ -1,6 +1,7 @@
menuconfig ARCH_SIRF
bool "CSR SiRF" if ARCH_MULTI_V7
select ARCH_HAS_RESET_CONTROLLER
+ select RESET_CONTROLLER
select ARCH_REQUIRE_GPIOLIB
select GENERIC_IRQ_CHIP
select NO_IOPORT_MAP
diff --git a/arch/arm/mach-qcom/board-falcon.c b/arch/arm/mach-qcom/board-falcon.c
index e9374050b2cb..aec16886308d 100644
--- a/arch/arm/mach-qcom/board-falcon.c
+++ b/arch/arm/mach-qcom/board-falcon.c
@@ -31,3 +31,20 @@ DT_MACHINE_START(MSMFALCON_DT,
.init_machine = msmfalcon_init,
.dt_compat = msmfalcon_dt_match,
MACHINE_END
+
+static const char *msmtriton_dt_match[] __initconst = {
+ "qcom,msmtriton",
+ "qcom,apqtriton",
+ NULL
+};
+
+static void __init msmtriton_init(void)
+{
+ board_dt_populate(NULL);
+}
+
+DT_MACHINE_START(MSMTRITON_DT,
+ "Qualcomm Technologies, Inc. MSM TRITON (Flattened Device Tree)")
+ .init_machine = msmtriton_init,
+ .dt_compat = msmtriton_dt_match,
+MACHINE_END
diff --git a/arch/arm/mach-socfpga/headsmp.S b/arch/arm/mach-socfpga/headsmp.S
index 5d94b7a2fb10..c160fa3007e9 100644
--- a/arch/arm/mach-socfpga/headsmp.S
+++ b/arch/arm/mach-socfpga/headsmp.S
@@ -13,6 +13,7 @@
#include <asm/assembler.h>
.arch armv7-a
+ .arm
ENTRY(secondary_trampoline)
/* CPU1 will always fetch from 0x0 when it is brought out of reset.
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 0314b80695ca..65fbf6633e28 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -51,6 +51,7 @@ config ARM64
select HAVE_ALIGNED_STRUCT_PAGE if SLUB
select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_BITREVERSE
+ select HAVE_ARCH_HUGE_VMAP
select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_KASAN if SPARSEMEM_VMEMMAP && !(ARM64_16K_PAGES && ARM64_VA_BITS_48)
select HAVE_ARCH_KGDB
@@ -424,6 +425,7 @@ config ARM64_ERRATUM_843419
bool "Cortex-A53: 843419: A load or store might access an incorrect address"
depends on MODULES
default y
+ select ARM64_MODULE_CMODEL_LARGE
help
This option builds kernel modules using the large memory model in
order to avoid the use of the ADRP instruction, which can cause
@@ -601,6 +603,9 @@ config ARCH_NR_GPIO
source kernel/Kconfig.preempt
source kernel/Kconfig.hz
+config ARCH_SUPPORTS_DEBUG_PAGEALLOC
+ def_bool y
+
config ARCH_HAS_HOLES_MEMORYMODEL
def_bool y if SPARSEMEM
@@ -651,9 +656,6 @@ config PERF_EVENTS_RESET_PMU_DEBUGFS
config SYS_SUPPORTS_HUGETLBFS
def_bool y
-config ARCH_WANT_GENERAL_HUGETLB
- def_bool y
-
config ARCH_WANT_HUGE_PMD_SHARE
def_bool y if ARM64_4K_PAGES || (ARM64_16K_PAGES && !ARM64_VA_BITS_36)
@@ -826,10 +828,93 @@ config ARM64_LSE_ATOMICS
endmenu
+config ARM64_UAO
+ bool "Enable support for User Access Override (UAO)"
+ default y
+ help
+ User Access Override (UAO; part of the ARMv8.2 Extensions)
+ causes the 'unprivileged' variant of the load/store instructions to
+ be overriden to be privileged.
+
+ This option changes get_user() and friends to use the 'unprivileged'
+ variant of the load/store instructions. This ensures that user-space
+ really did have access to the supplied memory. When addr_limit is
+ set to kernel memory the UAO bit will be set, allowing privileged
+ access to kernel memory.
+
+ Choosing this option will cause copy_to_user() et al to use user-space
+ memory permissions.
+
+ The feature is detected at runtime, the kernel will use the
+ regular load/store instructions if the cpu does not implement the
+ feature.
+
+config ARM64_MODULE_CMODEL_LARGE
+ bool
+
+config ARM64_MODULE_PLTS
+ bool
+ select ARM64_MODULE_CMODEL_LARGE
+ select HAVE_MOD_ARCH_SPECIFIC
+
+config RELOCATABLE
+ bool
+ help
+ This builds the kernel as a Position Independent Executable (PIE),
+ which retains all relocation metadata required to relocate the
+ kernel binary at runtime to a different virtual address than the
+ address it was linked at.
+ Since AArch64 uses the RELA relocation format, this requires a
+ relocation pass at runtime even if the kernel is loaded at the
+ same address it was linked at.
+
+config RANDOMIZE_BASE
+ bool "Randomize the address of the kernel image"
+ select ARM64_MODULE_PLTS
+ select RELOCATABLE
+ help
+ Randomizes the virtual address at which the kernel image is
+ loaded, as a security feature that deters exploit attempts
+ relying on knowledge of the location of kernel internals.
+
+ It is the bootloader's job to provide entropy, by passing a
+ random u64 value in /chosen/kaslr-seed at kernel entry.
+
+ When booting via the UEFI stub, it will invoke the firmware's
+ EFI_RNG_PROTOCOL implementation (if available) to supply entropy
+ to the kernel proper. In addition, it will randomise the physical
+ location of the kernel Image as well.
+
+ If unsure, say N.
+
+config RANDOMIZE_MODULE_REGION_FULL
+ bool "Randomize the module region independently from the core kernel"
+ depends on RANDOMIZE_BASE
+ default y
+ help
+ Randomizes the location of the module region without considering the
+ location of the core kernel. This way, it is impossible for modules
+ to leak information about the location of core kernel data structures
+ but it does imply that function calls between modules and the core
+ kernel will need to be resolved via veneers in the module PLT.
+
+ When this option is not set, the module region will be randomized over
+ a limited range that contains the [_stext, _etext] interval of the
+ core kernel, so branch relocations are always in range.
+
endmenu
menu "Boot options"
+config ARM64_ACPI_PARKING_PROTOCOL
+ bool "Enable support for the ARM64 ACPI parking protocol"
+ depends on ACPI
+ help
+ Enable support for the ARM64 ACPI parking protocol. If disabled
+ the kernel will not allow booting through the ARM64 ACPI parking
+ protocol even if the corresponding data is present in the ACPI
+ MADT table.
+
config CMDLINE
string "Default kernel command string"
default ""
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 285b32fa41c1..14c74a66c58e 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -15,6 +15,10 @@ CPPFLAGS_vmlinux.lds = -DTEXT_OFFSET=$(TEXT_OFFSET)
OBJCOPYFLAGS :=-O binary -R .note -R .note.gnu.build-id -R .comment -S
GZFLAGS :=-9
+ifneq ($(CONFIG_RELOCATABLE),)
+LDFLAGS_vmlinux += -pie
+endif
+
KBUILD_DEFCONFIG := defconfig
# Check for binutils support for specific extensions
@@ -29,6 +33,7 @@ endif
KBUILD_CFLAGS += -mgeneral-regs-only $(lseinstr)
KBUILD_CFLAGS += -fno-pic
KBUILD_CFLAGS += $(call cc-option, -mpc-relative-literal-loads)
+KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
KBUILD_AFLAGS += $(lseinstr)
ifeq ($(CONFIG_CPU_BIG_ENDIAN), y)
@@ -43,10 +48,14 @@ endif
CHECKFLAGS += -D__aarch64__
-ifeq ($(CONFIG_ARM64_ERRATUM_843419), y)
+ifeq ($(CONFIG_ARM64_MODULE_CMODEL_LARGE), y)
KBUILD_CFLAGS_MODULE += -mcmodel=large
endif
+ifeq ($(CONFIG_ARM64_MODULE_PLTS),y)
+KBUILD_LDFLAGS_MODULE += -T $(srctree)/arch/arm64/kernel/module.lds
+endif
+
# Default value
head-y := arch/arm64/kernel/head.o
diff --git a/arch/arm64/configs/msmcortex-perf_defconfig b/arch/arm64/configs/msmcortex-perf_defconfig
index 5d2f29b053c4..0bda100dfb5a 100644
--- a/arch/arm64/configs/msmcortex-perf_defconfig
+++ b/arch/arm64/configs/msmcortex-perf_defconfig
@@ -321,6 +321,7 @@ CONFIG_QPNP_SMB2=y
CONFIG_SMB138X_CHARGER=y
CONFIG_QPNP_QNOVO=y
CONFIG_SENSORS_QPNP_ADC_VOLTAGE=y
+CONFIG_CPU_THERMAL=y
CONFIG_LIMITS_MONITOR=y
CONFIG_LIMITS_LITE_HW=y
CONFIG_THERMAL_MONITOR=y
diff --git a/arch/arm64/configs/msmcortex_defconfig b/arch/arm64/configs/msmcortex_defconfig
index 741f2e5bc8f6..3568fe4ed29f 100644
--- a/arch/arm64/configs/msmcortex_defconfig
+++ b/arch/arm64/configs/msmcortex_defconfig
@@ -324,6 +324,7 @@ CONFIG_QPNP_SMB2=y
CONFIG_SMB138X_CHARGER=y
CONFIG_QPNP_QNOVO=y
CONFIG_SENSORS_QPNP_ADC_VOLTAGE=y
+CONFIG_CPU_THERMAL=y
CONFIG_LIMITS_MONITOR=y
CONFIG_LIMITS_LITE_HW=y
CONFIG_THERMAL_MONITOR=y
diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild
index 861ed8acdc3f..d49e164867e1 100644
--- a/arch/arm64/include/asm/Kbuild
+++ b/arch/arm64/include/asm/Kbuild
@@ -2,7 +2,6 @@
generic-y += bug.h
generic-y += bugs.h
-generic-y += checksum.h
generic-y += clkdev.h
generic-y += cputime.h
generic-y += current.h
diff --git a/arch/arm64/include/asm/acpi.h b/arch/arm64/include/asm/acpi.h
index caafd63b8092..aee323b13802 100644
--- a/arch/arm64/include/asm/acpi.h
+++ b/arch/arm64/include/asm/acpi.h
@@ -87,9 +87,26 @@ void __init acpi_init_cpus(void);
static inline void acpi_init_cpus(void) { }
#endif /* CONFIG_ACPI */
+#ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
+bool acpi_parking_protocol_valid(int cpu);
+void __init
+acpi_set_mailbox_entry(int cpu, struct acpi_madt_generic_interrupt *processor);
+#else
+static inline bool acpi_parking_protocol_valid(int cpu) { return false; }
+static inline void
+acpi_set_mailbox_entry(int cpu, struct acpi_madt_generic_interrupt *processor)
+{}
+#endif
+
static inline const char *acpi_get_enable_method(int cpu)
{
- return acpi_psci_present() ? "psci" : NULL;
+ if (acpi_psci_present())
+ return "psci";
+
+ if (acpi_parking_protocol_valid(cpu))
+ return "parking-protocol";
+
+ return NULL;
}
#ifdef CONFIG_ACPI_APEI
diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h
index d56ec0715157..beccbdefa106 100644
--- a/arch/arm64/include/asm/alternative.h
+++ b/arch/arm64/include/asm/alternative.h
@@ -1,6 +1,8 @@
#ifndef __ASM_ALTERNATIVE_H
#define __ASM_ALTERNATIVE_H
+#include <asm/cpufeature.h>
+
#ifndef __ASSEMBLY__
#include <linux/init.h>
@@ -19,7 +21,6 @@ struct alt_instr {
void __init apply_alternatives_all(void);
void apply_alternatives(void *start, size_t length);
-void free_alternatives_memory(void);
#define ALTINSTR_ENTRY(feature) \
" .word 661b - .\n" /* label */ \
@@ -64,6 +65,8 @@ void free_alternatives_memory(void);
#else
+#include <asm/assembler.h>
+
.macro altinstruction_entry orig_offset alt_offset feature orig_len alt_len
.word \orig_offset - .
.word \alt_offset - .
@@ -137,6 +140,65 @@ void free_alternatives_memory(void);
alternative_insn insn1, insn2, cap, IS_ENABLED(cfg)
+/*
+ * Generate the assembly for UAO alternatives with exception table entries.
+ * This is complicated as there is no post-increment or pair versions of the
+ * unprivileged instructions, and USER() only works for single instructions.
+ */
+#ifdef CONFIG_ARM64_UAO
+ .macro uao_ldp l, reg1, reg2, addr, post_inc
+ alternative_if_not ARM64_HAS_UAO
+8888: ldp \reg1, \reg2, [\addr], \post_inc;
+8889: nop;
+ nop;
+ alternative_else
+ ldtr \reg1, [\addr];
+ ldtr \reg2, [\addr, #8];
+ add \addr, \addr, \post_inc;
+ alternative_endif
+
+ _asm_extable 8888b,\l;
+ _asm_extable 8889b,\l;
+ .endm
+
+ .macro uao_stp l, reg1, reg2, addr, post_inc
+ alternative_if_not ARM64_HAS_UAO
+8888: stp \reg1, \reg2, [\addr], \post_inc;
+8889: nop;
+ nop;
+ alternative_else
+ sttr \reg1, [\addr];
+ sttr \reg2, [\addr, #8];
+ add \addr, \addr, \post_inc;
+ alternative_endif
+
+ _asm_extable 8888b,\l;
+ _asm_extable 8889b,\l;
+ .endm
+
+ .macro uao_user_alternative l, inst, alt_inst, reg, addr, post_inc
+ alternative_if_not ARM64_HAS_UAO
+8888: \inst \reg, [\addr], \post_inc;
+ nop;
+ alternative_else
+ \alt_inst \reg, [\addr];
+ add \addr, \addr, \post_inc;
+ alternative_endif
+
+ _asm_extable 8888b,\l;
+ .endm
+#else
+ .macro uao_ldp l, reg1, reg2, addr, post_inc
+ USER(\l, ldp \reg1, \reg2, [\addr], \post_inc)
+ .endm
+ .macro uao_stp l, reg1, reg2, addr, post_inc
+ USER(\l, stp \reg1, \reg2, [\addr], \post_inc)
+ .endm
+ .macro uao_user_alternative l, inst, alt_inst, reg, addr, post_inc
+ USER(\l, \inst \reg, [\addr], \post_inc)
+ .endm
+#endif
+
#endif /* __ASSEMBLY__ */
/*
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index 09f13a96941b..fcaf3cce639a 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -106,12 +106,19 @@
dmb \opt
.endm
+/*
+ * Emit an entry into the exception table
+ */
+ .macro _asm_extable, from, to
+ .pushsection __ex_table, "a"
+ .align 3
+ .long (\from - .), (\to - .)
+ .popsection
+ .endm
+
#define USER(l, x...) \
9999: x; \
- .section __ex_table,"a"; \
- .align 3; \
- .quad 9999b,l; \
- .previous
+ _asm_extable 9999b, l
/*
* Register aliases.
@@ -205,6 +212,17 @@ lr .req x30 // link register
str \src, [\tmp, :lo12:\sym]
.endm
+ /*
+ * @sym: The name of the per-cpu variable
+ * @reg: Result of per_cpu(sym, smp_processor_id())
+ * @tmp: scratch register
+ */
+ .macro this_cpu_ptr, sym, reg, tmp
+ adr_l \reg, \sym
+ mrs \tmp, tpidr_el1
+ add \reg, \reg, \tmp
+ .endm
+
/*
* Annotate a function as position independent, i.e., safe to be called before
* the kernel virtual mapping is activated.
@@ -216,4 +234,15 @@ lr .req x30 // link register
.size __pi_##x, . - x; \
ENDPROC(x)
+ /*
+ * Emit a 64-bit absolute little endian symbol reference in a way that
+ * ensures that it will be resolved at build time, even when building a
+ * PIE binary. This requires cooperation from the linker script, which
+ * must emit the lo32/hi32 halves individually.
+ */
+ .macro le64sym, sym
+ .long \sym\()_lo32
+ .long \sym\()_hi32
+ .endm
+
#endif /* __ASM_ASSEMBLER_H */
diff --git a/arch/arm64/include/asm/atomic_lse.h b/arch/arm64/include/asm/atomic_lse.h
index 197e06afbf71..39c1d340fec5 100644
--- a/arch/arm64/include/asm/atomic_lse.h
+++ b/arch/arm64/include/asm/atomic_lse.h
@@ -36,7 +36,7 @@ static inline void atomic_andnot(int i, atomic_t *v)
" stclr %w[i], %[v]\n")
: [i] "+r" (w0), [v] "+Q" (v->counter)
: "r" (x1)
- : "x30");
+ : __LL_SC_CLOBBERS);
}
static inline void atomic_or(int i, atomic_t *v)
@@ -48,7 +48,7 @@ static inline void atomic_or(int i, atomic_t *v)
" stset %w[i], %[v]\n")
: [i] "+r" (w0), [v] "+Q" (v->counter)
: "r" (x1)
- : "x30");
+ : __LL_SC_CLOBBERS);
}
static inline void atomic_xor(int i, atomic_t *v)
@@ -60,7 +60,7 @@ static inline void atomic_xor(int i, atomic_t *v)
" steor %w[i], %[v]\n")
: [i] "+r" (w0), [v] "+Q" (v->counter)
: "r" (x1)
- : "x30");
+ : __LL_SC_CLOBBERS);
}
static inline void atomic_add(int i, atomic_t *v)
@@ -72,7 +72,7 @@ static inline void atomic_add(int i, atomic_t *v)
" stadd %w[i], %[v]\n")
: [i] "+r" (w0), [v] "+Q" (v->counter)
: "r" (x1)
- : "x30");
+ : __LL_SC_CLOBBERS);
}
#define ATOMIC_OP_ADD_RETURN(name, mb, cl...) \
@@ -90,7 +90,7 @@ static inline int atomic_add_return##name(int i, atomic_t *v) \
" add %w[i], %w[i], w30") \
: [i] "+r" (w0), [v] "+Q" (v->counter) \
: "r" (x1) \
- : "x30" , ##cl); \
+ : __LL_SC_CLOBBERS, ##cl); \
\
return w0; \
}
@@ -116,7 +116,7 @@ static inline void atomic_and(int i, atomic_t *v)
" stclr %w[i], %[v]")
: [i] "+r" (w0), [v] "+Q" (v->counter)
: "r" (x1)
- : "x30");
+ : __LL_SC_CLOBBERS);
}
static inline void atomic_sub(int i, atomic_t *v)
@@ -133,7 +133,7 @@ static inline void atomic_sub(int i, atomic_t *v)
" stadd %w[i], %[v]")
: [i] "+r" (w0), [v] "+Q" (v->counter)
: "r" (x1)
- : "x30");
+ : __LL_SC_CLOBBERS);
}
#define ATOMIC_OP_SUB_RETURN(name, mb, cl...) \
@@ -153,7 +153,7 @@ static inline int atomic_sub_return##name(int i, atomic_t *v) \
" add %w[i], %w[i], w30") \
: [i] "+r" (w0), [v] "+Q" (v->counter) \
: "r" (x1) \
- : "x30" , ##cl); \
+ : __LL_SC_CLOBBERS , ##cl); \
\
return w0; \
}
@@ -177,7 +177,7 @@ static inline void atomic64_andnot(long i, atomic64_t *v)
" stclr %[i], %[v]\n")
: [i] "+r" (x0), [v] "+Q" (v->counter)
: "r" (x1)
- : "x30");
+ : __LL_SC_CLOBBERS);
}
static inline void atomic64_or(long i, atomic64_t *v)
@@ -189,7 +189,7 @@ static inline void atomic64_or(long i, atomic64_t *v)
" stset %[i], %[v]\n")
: [i] "+r" (x0), [v] "+Q" (v->counter)
: "r" (x1)
- : "x30");
+ : __LL_SC_CLOBBERS);
}
static inline void atomic64_xor(long i, atomic64_t *v)
@@ -201,7 +201,7 @@ static inline void atomic64_xor(long i, atomic64_t *v)
" steor %[i], %[v]\n")
: [i] "+r" (x0), [v] "+Q" (v->counter)
: "r" (x1)
- : "x30");
+ : __LL_SC_CLOBBERS);
}
static inline void atomic64_add(long i, atomic64_t *v)
@@ -213,7 +213,7 @@ static inline void atomic64_add(long i, atomic64_t *v)
" stadd %[i], %[v]\n")
: [i] "+r" (x0), [v] "+Q" (v->counter)
: "r" (x1)
- : "x30");
+ : __LL_SC_CLOBBERS);
}
#define ATOMIC64_OP_ADD_RETURN(name, mb, cl...) \
@@ -231,7 +231,7 @@ static inline long atomic64_add_return##name(long i, atomic64_t *v) \
" add %[i], %[i], x30") \
: [i] "+r" (x0), [v] "+Q" (v->counter) \
: "r" (x1) \
- : "x30" , ##cl); \
+ : __LL_SC_CLOBBERS, ##cl); \
\
return x0; \
}
@@ -257,7 +257,7 @@ static inline void atomic64_and(long i, atomic64_t *v)
" stclr %[i], %[v]")
: [i] "+r" (x0), [v] "+Q" (v->counter)
: "r" (x1)
- : "x30");
+ : __LL_SC_CLOBBERS);
}
static inline void atomic64_sub(long i, atomic64_t *v)
@@ -274,7 +274,7 @@ static inline void atomic64_sub(long i, atomic64_t *v)
" stadd %[i], %[v]")
: [i] "+r" (x0), [v] "+Q" (v->counter)
: "r" (x1)
- : "x30");
+ : __LL_SC_CLOBBERS);
}
#define ATOMIC64_OP_SUB_RETURN(name, mb, cl...) \
@@ -294,7 +294,7 @@ static inline long atomic64_sub_return##name(long i, atomic64_t *v) \
" add %[i], %[i], x30") \
: [i] "+r" (x0), [v] "+Q" (v->counter) \
: "r" (x1) \
- : "x30" , ##cl); \
+ : __LL_SC_CLOBBERS, ##cl); \
\
return x0; \
}
@@ -330,7 +330,7 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
"2:")
: [ret] "+&r" (x0), [v] "+Q" (v->counter)
:
- : "x30", "cc", "memory");
+ : __LL_SC_CLOBBERS, "cc", "memory");
return x0;
}
@@ -359,7 +359,7 @@ static inline unsigned long __cmpxchg_case_##name(volatile void *ptr, \
" mov %" #w "[ret], " #w "30") \
: [ret] "+r" (x0), [v] "+Q" (*(unsigned long *)ptr) \
: [old] "r" (x1), [new] "r" (x2) \
- : "x30" , ##cl); \
+ : __LL_SC_CLOBBERS, ##cl); \
\
return x0; \
}
@@ -416,7 +416,7 @@ static inline long __cmpxchg_double##name(unsigned long old1, \
[v] "+Q" (*(unsigned long *)ptr) \
: [new1] "r" (x2), [new2] "r" (x3), [ptr] "r" (x4), \
[oldval1] "r" (oldval1), [oldval2] "r" (oldval2) \
- : "x30" , ##cl); \
+ : __LL_SC_CLOBBERS, ##cl); \
\
return x0; \
}
diff --git a/arch/arm64/include/asm/boot.h b/arch/arm64/include/asm/boot.h
index 81151b67b26b..ebf2481889c3 100644
--- a/arch/arm64/include/asm/boot.h
+++ b/arch/arm64/include/asm/boot.h
@@ -11,4 +11,10 @@
#define MIN_FDT_ALIGN 8
#define MAX_FDT_SIZE SZ_2M
+/*
+ * arm64 requires the kernel image to placed
+ * TEXT_OFFSET bytes beyond a 2 MB aligned base
+ */
+#define MIN_KIMG_ALIGN SZ_2M
+
#endif
diff --git a/arch/arm64/include/asm/brk-imm.h b/arch/arm64/include/asm/brk-imm.h
new file mode 100644
index 000000000000..ed693c5bcec0
--- /dev/null
+++ b/arch/arm64/include/asm/brk-imm.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2012 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_BRK_IMM_H
+#define __ASM_BRK_IMM_H
+
+/*
+ * #imm16 values used for BRK instruction generation
+ * Allowed values for kgdb are 0x400 - 0x7ff
+ * 0x100: for triggering a fault on purpose (reserved)
+ * 0x400: for dynamic BRK instruction
+ * 0x401: for compile time BRK instruction
+ * 0x800: kernel-mode BUG() and WARN() traps
+ */
+#define FAULT_BRK_IMM 0x100
+#define KGDB_DYN_DBG_BRK_IMM 0x400
+#define KGDB_COMPILED_DBG_BRK_IMM 0x401
+#define BUG_BRK_IMM 0x800
+
+#endif
diff --git a/arch/arm64/include/asm/bug.h b/arch/arm64/include/asm/bug.h
index 4a748ce9ba1a..561190d15881 100644
--- a/arch/arm64/include/asm/bug.h
+++ b/arch/arm64/include/asm/bug.h
@@ -18,7 +18,7 @@
#ifndef _ARCH_ARM64_ASM_BUG_H
#define _ARCH_ARM64_ASM_BUG_H
-#include <asm/debug-monitors.h>
+#include <asm/brk-imm.h>
#ifdef CONFIG_GENERIC_BUG
#define HAVE_ARCH_BUG
diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h
index a7d7d360a514..df06d37374cc 100644
--- a/arch/arm64/include/asm/cacheflush.h
+++ b/arch/arm64/include/asm/cacheflush.h
@@ -73,6 +73,7 @@ extern void flush_cache_all(void);
extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
extern void flush_icache_range(unsigned long start, unsigned long end);
extern void __flush_dcache_area(void *addr, size_t len);
+extern void __clean_dcache_area_pou(void *addr, size_t len);
extern long __flush_cache_user_range(unsigned long start, unsigned long end);
static inline void flush_cache_mm(struct mm_struct *mm)
diff --git a/arch/arm64/include/asm/checksum.h b/arch/arm64/include/asm/checksum.h
new file mode 100644
index 000000000000..09f65339d66d
--- /dev/null
+++ b/arch/arm64/include/asm/checksum.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2016 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __ASM_CHECKSUM_H
+#define __ASM_CHECKSUM_H
+
+#include <linux/types.h>
+
+static inline __sum16 csum_fold(__wsum csum)
+{
+ u32 sum = (__force u32)csum;
+ sum += (sum >> 16) | (sum << 16);
+ return ~(__force __sum16)(sum >> 16);
+}
+#define csum_fold csum_fold
+
+static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
+{
+ __uint128_t tmp;
+ u64 sum;
+
+ tmp = *(const __uint128_t *)iph;
+ iph += 16;
+ ihl -= 4;
+ tmp += ((tmp >> 64) | (tmp << 64));
+ sum = tmp >> 64;
+ do {
+ sum += *(const u32 *)iph;
+ iph += 4;
+ } while (--ihl);
+
+ sum += ((sum >> 32) | (sum << 32));
+ return csum_fold(sum >> 32);
+}
+#define ip_fast_csum ip_fast_csum
+
+#include <asm-generic/checksum.h>
+
+#endif /* __ASM_CHECKSUM_H */
diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h
index 9ea611ea69df..510c7b404454 100644
--- a/arch/arm64/include/asm/cmpxchg.h
+++ b/arch/arm64/include/asm/cmpxchg.h
@@ -19,7 +19,6 @@
#define __ASM_CMPXCHG_H
#include <linux/bug.h>
-#include <linux/mmdebug.h>
#include <asm/atomic.h>
#include <asm/barrier.h>
diff --git a/arch/arm64/include/asm/cpu.h b/arch/arm64/include/asm/cpu.h
index b5e9cee4b5f8..13a6103130cd 100644
--- a/arch/arm64/include/asm/cpu.h
+++ b/arch/arm64/include/asm/cpu.h
@@ -36,6 +36,7 @@ struct cpuinfo_arm64 {
u64 reg_id_aa64isar1;
u64 reg_id_aa64mmfr0;
u64 reg_id_aa64mmfr1;
+ u64 reg_id_aa64mmfr2;
u64 reg_id_aa64pfr0;
u64 reg_id_aa64pfr1;
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 2e73ef7b9239..44e8b3d76fb7 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -30,8 +30,11 @@
#define ARM64_HAS_LSE_ATOMICS 5
#define ARM64_WORKAROUND_CAVIUM_23154 6
#define ARM64_WORKAROUND_834220 7
+#define ARM64_HAS_NO_HW_PREFETCH 8
+#define ARM64_HAS_UAO 9
+#define ARM64_ALT_PAN_NOT_UAO 10
-#define ARM64_NCAPS 8
+#define ARM64_NCAPS 11
#ifndef __ASSEMBLY__
@@ -178,7 +181,7 @@ u64 read_system_reg(u32 id);
static inline bool cpu_supports_mixed_endian_el0(void)
{
- return id_aa64mmfr0_mixed_endian_el0(read_cpuid(ID_AA64MMFR0_EL1));
+ return id_aa64mmfr0_mixed_endian_el0(read_cpuid(SYS_ID_AA64MMFR0_EL1));
}
static inline bool system_supports_mixed_endian_el0(void)
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
index a2014784cab2..1ddc9c930097 100644
--- a/arch/arm64/include/asm/cputype.h
+++ b/arch/arm64/include/asm/cputype.h
@@ -36,12 +36,6 @@
#define MMFR0_16KGRAN_SHFT 20
#define MMFR0_EL1_16KGRAN_MASK (MMFR0_16KGRAN_SIZE << MMFR0_16KGRAN_SHFT)
-#define read_cpuid(reg) ({ \
- u64 __val; \
- asm("mrs %0, " #reg : "=r" (__val)); \
- __val; \
-})
-
#define MIDR_REVISION_MASK 0xf
#define MIDR_REVISION(midr) ((midr) & MIDR_REVISION_MASK)
#define MIDR_PARTNUM_SHIFT 4
@@ -61,11 +55,22 @@
#define MIDR_IMPLEMENTOR(midr) \
(((midr) & MIDR_IMPLEMENTOR_MASK) >> MIDR_IMPLEMENTOR_SHIFT)
-#define MIDR_CPU_PART(imp, partnum) \
+#define MIDR_CPU_MODEL(imp, partnum) \
(((imp) << MIDR_IMPLEMENTOR_SHIFT) | \
(0xf << MIDR_ARCHITECTURE_SHIFT) | \
((partnum) << MIDR_PARTNUM_SHIFT))
+#define MIDR_CPU_MODEL_MASK (MIDR_IMPLEMENTOR_MASK | MIDR_PARTNUM_MASK | \
+ MIDR_ARCHITECTURE_MASK)
+
+#define MIDR_IS_CPU_MODEL_RANGE(midr, model, rv_min, rv_max) \
+({ \
+ u32 _model = (midr) & MIDR_CPU_MODEL_MASK; \
+ u32 rv = (midr) & (MIDR_REVISION_MASK | MIDR_VARIANT_MASK); \
+ \
+ _model == (model) && rv >= (rv_min) && rv <= (rv_max); \
+ })
+
#define ARM_CPU_IMP_ARM 0x41
#define ARM_CPU_IMP_APM 0x50
#define ARM_CPU_IMP_CAVIUM 0x43
@@ -83,8 +88,22 @@
#define CAVIUM_CPU_PART_THUNDERX 0x0A1
+#define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53)
+#define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57)
+#define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
+#define MIDR_KRYO2XX_SILVER \
+ MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, ARM_CPU_PART_KRYO2XX_SILVER)
+
#ifndef __ASSEMBLY__
+#include <asm/sysreg.h>
+
+#define read_cpuid(reg) ({ \
+ u64 __val; \
+ asm("mrs_s %0, " __stringify(reg) : "=r" (__val)); \
+ __val; \
+})
+
/*
* The CPU ID never changes at run time, so we might as well tell the
* compiler that it's constant. Use this function to read the CPU ID
@@ -92,12 +111,12 @@
*/
static inline u32 __attribute_const__ read_cpuid_id(void)
{
- return read_cpuid(MIDR_EL1);
+ return read_cpuid(SYS_MIDR_EL1);
}
static inline u64 __attribute_const__ read_cpuid_mpidr(void)
{
- return read_cpuid(MPIDR_EL1);
+ return read_cpuid(SYS_MPIDR_EL1);
}
static inline unsigned int __attribute_const__ read_cpuid_implementor(void)
@@ -112,7 +131,7 @@ static inline unsigned int __attribute_const__ read_cpuid_part_number(void)
static inline u32 __attribute_const__ read_cpuid_cachetype(void)
{
- return read_cpuid(CTR_EL0);
+ return read_cpuid(SYS_CTR_EL0);
}
#endif /* __ASSEMBLY__ */
diff --git a/arch/arm64/include/asm/debug-monitors.h b/arch/arm64/include/asm/debug-monitors.h
index 279c85b5ec09..2fcb9b7c876c 100644
--- a/arch/arm64/include/asm/debug-monitors.h
+++ b/arch/arm64/include/asm/debug-monitors.h
@@ -20,6 +20,7 @@
#include <linux/errno.h>
#include <linux/types.h>
+#include <asm/brk-imm.h>
#include <asm/esr.h>
#include <asm/insn.h>
#include <asm/ptrace.h>
@@ -47,19 +48,6 @@
#define BREAK_INSTR_SIZE AARCH64_INSN_SIZE
/*
- * #imm16 values used for BRK instruction generation
- * Allowed values for kgbd are 0x400 - 0x7ff
- * 0x100: for triggering a fault on purpose (reserved)
- * 0x400: for dynamic BRK instruction
- * 0x401: for compile time BRK instruction
- * 0x800: kernel-mode BUG() and WARN() traps
- */
-#define FAULT_BRK_IMM 0x100
-#define KGDB_DYN_DBG_BRK_IMM 0x400
-#define KGDB_COMPILED_DBG_BRK_IMM 0x401
-#define BUG_BRK_IMM 0x800
-
-/*
* BRK instruction encoding
* The #imm16 value should be placed at bits[20:5] within BRK ins
*/
diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h
index faad6df49e5b..24ed037f09fd 100644
--- a/arch/arm64/include/asm/elf.h
+++ b/arch/arm64/include/asm/elf.h
@@ -24,15 +24,6 @@
#include <asm/ptrace.h>
#include <asm/user.h>
-typedef unsigned long elf_greg_t;
-
-#define ELF_NGREG (sizeof(struct user_pt_regs) / sizeof(elf_greg_t))
-#define ELF_CORE_COPY_REGS(dest, regs) \
- *(struct user_pt_regs *)&(dest) = (regs)->user_regs;
-
-typedef elf_greg_t elf_gregset_t[ELF_NGREG];
-typedef struct user_fpsimd_state elf_fpregset_t;
-
/*
* AArch64 static relocation types.
*/
@@ -86,6 +77,8 @@ typedef struct user_fpsimd_state elf_fpregset_t;
#define R_AARCH64_MOVW_PREL_G2_NC 292
#define R_AARCH64_MOVW_PREL_G3 293
+#define R_AARCH64_RELATIVE 1027
+
/*
* These are used to set parameters in the core dumps.
*/
@@ -127,6 +120,17 @@ typedef struct user_fpsimd_state elf_fpregset_t;
*/
#define ELF_ET_DYN_BASE (2 * TASK_SIZE_64 / 3)
+#ifndef __ASSEMBLY__
+
+typedef unsigned long elf_greg_t;
+
+#define ELF_NGREG (sizeof(struct user_pt_regs) / sizeof(elf_greg_t))
+#define ELF_CORE_COPY_REGS(dest, regs) \
+ *(struct user_pt_regs *)&(dest) = (regs)->user_regs;
+
+typedef elf_greg_t elf_gregset_t[ELF_NGREG];
+typedef struct user_fpsimd_state elf_fpregset_t;
+
/*
* When the program starts, a1 contains a pointer to a function to be
* registered with atexit, as per the SVR4 ABI. A value of 0 means we have no
@@ -186,4 +190,6 @@ extern int aarch32_setup_vectors_page(struct linux_binprm *bprm,
#endif /* CONFIG_COMPAT */
+#endif /* !__ASSEMBLY__ */
+
#endif
diff --git a/arch/arm64/include/asm/fixmap.h b/arch/arm64/include/asm/fixmap.h
index 309704544d22..1a617d46fce9 100644
--- a/arch/arm64/include/asm/fixmap.h
+++ b/arch/arm64/include/asm/fixmap.h
@@ -62,6 +62,16 @@ enum fixed_addresses {
FIX_BTMAP_END = __end_of_permanent_fixed_addresses,
FIX_BTMAP_BEGIN = FIX_BTMAP_END + TOTAL_FIX_BTMAPS - 1,
+
+ /*
+ * Used for kernel page table creation, so unmapped memory may be used
+ * for tables.
+ */
+ FIX_PTE,
+ FIX_PMD,
+ FIX_PUD,
+ FIX_PGD,
+
__end_of_fixed_addresses
};
diff --git a/arch/arm64/include/asm/ftrace.h b/arch/arm64/include/asm/ftrace.h
index c5534facf941..3c60f37e48ab 100644
--- a/arch/arm64/include/asm/ftrace.h
+++ b/arch/arm64/include/asm/ftrace.h
@@ -28,6 +28,8 @@ struct dyn_arch_ftrace {
extern unsigned long ftrace_graph_call;
+extern void return_to_handler(void);
+
static inline unsigned long ftrace_call_adjust(unsigned long addr)
{
/*
diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h
index 007a69fc4f40..f2585cdd32c2 100644
--- a/arch/arm64/include/asm/futex.h
+++ b/arch/arm64/include/asm/futex.h
@@ -42,10 +42,8 @@
"4: mov %w0, %w5\n" \
" b 3b\n" \
" .popsection\n" \
-" .pushsection __ex_table,\"a\"\n" \
-" .align 3\n" \
-" .quad 1b, 4b, 2b, 4b\n" \
-" .popsection\n" \
+ _ASM_EXTABLE(1b, 4b) \
+ _ASM_EXTABLE(2b, 4b) \
ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, \
CONFIG_ARM64_PAN) \
: "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp) \
@@ -121,6 +119,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
return -EFAULT;
asm volatile("// futex_atomic_cmpxchg_inatomic\n"
+ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN, CONFIG_ARM64_PAN)
" prfm pstl1strm, %2\n"
"1: ldxr %w1, %2\n"
" sub %w3, %w1, %w4\n"
@@ -133,10 +132,9 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
"4: mov %w0, %w6\n"
" b 3b\n"
" .popsection\n"
-" .pushsection __ex_table,\"a\"\n"
-" .align 3\n"
-" .quad 1b, 4b, 2b, 4b\n"
-" .popsection\n"
+ _ASM_EXTABLE(1b, 4b)
+ _ASM_EXTABLE(2b, 4b)
+ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, CONFIG_ARM64_PAN)
: "+r" (ret), "=&r" (val), "+Q" (*uaddr), "=&r" (tmp)
: "r" (oldval), "r" (newval), "Ir" (-EFAULT)
: "memory");
diff --git a/arch/arm64/include/asm/hugetlb.h b/arch/arm64/include/asm/hugetlb.h
index bb4052e85dba..bbc1e35aa601 100644
--- a/arch/arm64/include/asm/hugetlb.h
+++ b/arch/arm64/include/asm/hugetlb.h
@@ -26,36 +26,7 @@ static inline pte_t huge_ptep_get(pte_t *ptep)
return *ptep;
}
-static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t pte)
-{
- set_pte_at(mm, addr, ptep, pte);
-}
-
-static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
- unsigned long addr, pte_t *ptep)
-{
- ptep_clear_flush(vma, addr, ptep);
-}
-
-static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
- unsigned long addr, pte_t *ptep)
-{
- ptep_set_wrprotect(mm, addr, ptep);
-}
-static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
- unsigned long addr, pte_t *ptep)
-{
- return ptep_get_and_clear(mm, addr, ptep);
-}
-
-static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
- unsigned long addr, pte_t *ptep,
- pte_t pte, int dirty)
-{
- return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
-}
static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
unsigned long addr, unsigned long end,
@@ -97,4 +68,19 @@ static inline void arch_clear_hugepage_flags(struct page *page)
clear_bit(PG_dcache_clean, &page->flags);
}
+extern pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
+ struct page *page, int writable);
+#define arch_make_huge_pte arch_make_huge_pte
+extern void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte);
+extern int huge_ptep_set_access_flags(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep,
+ pte_t pte, int dirty);
+extern pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep);
+extern void huge_ptep_set_wrprotect(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep);
+extern void huge_ptep_clear_flush(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep);
+
#endif /* __ASM_HUGETLB_H */
diff --git a/arch/arm64/include/asm/irq.h b/arch/arm64/include/asm/irq.h
index a8464c18ef88..3e1c0e7ef082 100644
--- a/arch/arm64/include/asm/irq.h
+++ b/arch/arm64/include/asm/irq.h
@@ -1,10 +1,45 @@
#ifndef __ASM_IRQ_H
#define __ASM_IRQ_H
+#define IRQ_STACK_SIZE THREAD_SIZE
+#define IRQ_STACK_START_SP THREAD_START_SP
+
+#ifndef __ASSEMBLER__
+
+#include <linux/percpu.h>
+
#include <asm-generic/irq.h>
+#include <asm/thread_info.h>
struct pt_regs;
+DECLARE_PER_CPU(unsigned long [IRQ_STACK_SIZE/sizeof(long)], irq_stack);
+
+/*
+ * The highest address on the stack, and the first to be used. Used to
+ * find the dummy-stack frame put down by el?_irq() in entry.S, which
+ * is structured as follows:
+ *
+ * ------------
+ * | | <- irq_stack_ptr
+ * top ------------
+ * | x19 | <- irq_stack_ptr - 0x08
+ * ------------
+ * | x29 | <- irq_stack_ptr - 0x10
+ * ------------
+ *
+ * where x19 holds a copy of the task stack pointer where the struct pt_regs
+ * from kernel_entry can be found.
+ *
+ */
+#define IRQ_STACK_PTR(cpu) ((unsigned long)per_cpu(irq_stack, cpu) + IRQ_STACK_START_SP)
+
+/*
+ * The offset from irq_stack_ptr where entry.S will store the original
+ * stack pointer. Used by unwind_frame() and dump_backtrace().
+ */
+#define IRQ_STACK_TO_TASK_STACK(ptr) (*((unsigned long *)((ptr) - 0x08)))
+
extern void set_handle_irq(void (*handle_irq)(struct pt_regs *));
static inline int nr_legacy_irqs(void)
@@ -15,4 +50,14 @@ static inline int nr_legacy_irqs(void)
void arch_trigger_all_cpu_backtrace(void);
#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
+static inline bool on_irq_stack(unsigned long sp, int cpu)
+{
+ /* variable names the same as kernel/stacktrace.c */
+ unsigned long low = (unsigned long)per_cpu(irq_stack, cpu);
+ unsigned long high = low + IRQ_STACK_START_SP;
+
+ return (low <= sp && sp <= high);
+}
+
+#endif /* !__ASSEMBLER__ */
#endif
diff --git a/arch/arm64/include/asm/kasan.h b/arch/arm64/include/asm/kasan.h
index 2774fa384c47..71ad0f93eb71 100644
--- a/arch/arm64/include/asm/kasan.h
+++ b/arch/arm64/include/asm/kasan.h
@@ -7,13 +7,14 @@
#include <linux/linkage.h>
#include <asm/memory.h>
+#include <asm/pgtable-types.h>
/*
* KASAN_SHADOW_START: beginning of the kernel virtual addresses.
* KASAN_SHADOW_END: KASAN_SHADOW_START + 1/8 of kernel virtual addresses.
*/
#define KASAN_SHADOW_START (VA_START)
-#define KASAN_SHADOW_END (KASAN_SHADOW_START + (1UL << (VA_BITS - 3)))
+#define KASAN_SHADOW_END (KASAN_SHADOW_START + KASAN_SHADOW_SIZE)
/*
* This value is used to map an address to the corresponding shadow
@@ -28,10 +29,12 @@
#define KASAN_SHADOW_OFFSET (KASAN_SHADOW_END - (1ULL << (64 - 3)))
void kasan_init(void);
+void kasan_copy_shadow(pgd_t *pgdir);
asmlinkage void kasan_early_init(void);
#else
static inline void kasan_init(void) { }
+static inline void kasan_copy_shadow(pgd_t *pgdir) { }
#endif
#endif
diff --git a/arch/arm64/include/asm/kernel-pgtable.h b/arch/arm64/include/asm/kernel-pgtable.h
index a459714ee29e..5c6375d8528b 100644
--- a/arch/arm64/include/asm/kernel-pgtable.h
+++ b/arch/arm64/include/asm/kernel-pgtable.h
@@ -79,5 +79,17 @@
#define SWAPPER_MM_MMUFLAGS (PTE_ATTRINDX(MT_NORMAL) | SWAPPER_PTE_FLAGS)
#endif
+/*
+ * To make optimal use of block mappings when laying out the linear
+ * mapping, round down the base of physical memory to a size that can
+ * be mapped efficiently, i.e., either PUD_SIZE (4k granule) or PMD_SIZE
+ * (64k granule), or a multiple that can be mapped using contiguous bits
+ * in the page tables: 32 * PMD_SIZE (16k granule)
+ */
+#ifdef CONFIG_ARM64_64K_PAGES
+#define ARM64_MEMSTART_ALIGN SZ_512M
+#else
+#define ARM64_MEMSTART_ALIGN SZ_1G
+#endif
#endif /* __ASM_KERNEL_PGTABLE_H */
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index 5e377101f919..419bc6661b5c 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -102,6 +102,8 @@
#define KVM_ARM64_DEBUG_DIRTY_SHIFT 0
#define KVM_ARM64_DEBUG_DIRTY (1 << KVM_ARM64_DEBUG_DIRTY_SHIFT)
+#define kvm_ksym_ref(sym) phys_to_virt((u64)&sym - kimage_voffset)
+
#ifndef __ASSEMBLY__
struct kvm;
struct kvm_vcpu;
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index a35ce7266aac..90c6368ad7c8 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -222,7 +222,7 @@ static inline void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
struct kvm_vcpu *kvm_arm_get_running_vcpu(void);
struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void);
-u64 kvm_call_hyp(void *hypfn, ...);
+u64 __kvm_call_hyp(void *hypfn, ...);
void force_vm_exit(const cpumask_t *mask);
void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot);
@@ -243,8 +243,8 @@ static inline void __cpu_init_hyp_mode(phys_addr_t boot_pgd_ptr,
* Call initialization code, and switch to the full blown
* HYP code.
*/
- kvm_call_hyp((void *)boot_pgd_ptr, pgd_ptr,
- hyp_stack_ptr, vector_ptr);
+ __kvm_call_hyp((void *)boot_pgd_ptr, pgd_ptr,
+ hyp_stack_ptr, vector_ptr);
}
static inline void kvm_arch_hardware_disable(void) {}
@@ -258,4 +258,6 @@ void kvm_arm_setup_debug(struct kvm_vcpu *vcpu);
void kvm_arm_clear_debug(struct kvm_vcpu *vcpu);
void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu);
+#define kvm_call_hyp(f, ...) __kvm_call_hyp(kvm_ksym_ref(f), ##__VA_ARGS__)
+
#endif /* __ARM64_KVM_HOST_H__ */
diff --git a/arch/arm64/include/asm/lse.h b/arch/arm64/include/asm/lse.h
index 3de42d68611d..23acc00be32d 100644
--- a/arch/arm64/include/asm/lse.h
+++ b/arch/arm64/include/asm/lse.h
@@ -26,6 +26,7 @@ __asm__(".arch_extension lse");
/* Macro for constructing calls to out-of-line ll/sc atomics */
#define __LL_SC_CALL(op) "bl\t" __stringify(__LL_SC_PREFIX(op)) "\n"
+#define __LL_SC_CLOBBERS "x16", "x17", "x30"
/* In-line patching at runtime */
#define ARM64_LSE_ATOMIC_INSN(llsc, lse) \
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index 853953cd1f08..12f8a00fb3f1 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -24,6 +24,7 @@
#include <linux/compiler.h>
#include <linux/const.h>
#include <linux/types.h>
+#include <asm/bug.h>
#include <asm/sizes.h>
/*
@@ -45,15 +46,15 @@
* VA_START - the first kernel virtual address.
* TASK_SIZE - the maximum size of a user space task.
* TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area.
- * The module space lives between the addresses given by TASK_SIZE
- * and PAGE_OFFSET - it must be within 128MB of the kernel text.
*/
#define VA_BITS (CONFIG_ARM64_VA_BITS)
#define VA_START (UL(0xffffffffffffffff) << VA_BITS)
#define PAGE_OFFSET (UL(0xffffffffffffffff) << (VA_BITS - 1))
-#define MODULES_END (PAGE_OFFSET)
-#define MODULES_VADDR (MODULES_END - SZ_64M)
-#define PCI_IO_END (MODULES_VADDR - SZ_2M)
+#define KIMAGE_VADDR (MODULES_END)
+#define MODULES_END (MODULES_VADDR + MODULES_VSIZE)
+#define MODULES_VADDR (VA_START + KASAN_SHADOW_SIZE)
+#define MODULES_VSIZE (SZ_128M)
+#define PCI_IO_END (PAGE_OFFSET - SZ_2M)
#define PCI_IO_START (PCI_IO_END - PCI_IO_SIZE)
#define FIXADDR_TOP (PCI_IO_START - SZ_2M)
#define TASK_SIZE_64 (UL(1) << VA_BITS)
@@ -71,12 +72,27 @@
#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 4))
/*
+ * The size of the KASAN shadow region. This should be 1/8th of the
+ * size of the entire kernel virtual address space.
+ */
+#ifdef CONFIG_KASAN
+#define KASAN_SHADOW_SIZE (UL(1) << (VA_BITS - 3))
+#else
+#define KASAN_SHADOW_SIZE (0)
+#endif
+
+/*
* Physical vs virtual RAM address space conversion. These are
* private definitions which should NOT be used outside memory.h
* files. Use virt_to_phys/phys_to_virt/__pa/__va instead.
*/
-#define __virt_to_phys(x) (((phys_addr_t)(x) - PAGE_OFFSET + PHYS_OFFSET))
-#define __phys_to_virt(x) ((unsigned long)((x) - PHYS_OFFSET + PAGE_OFFSET))
+#define __virt_to_phys(x) ({ \
+ phys_addr_t __x = (phys_addr_t)(x); \
+ __x & BIT(VA_BITS - 1) ? (__x & ~PAGE_OFFSET) + PHYS_OFFSET : \
+ (__x - kimage_voffset); })
+
+#define __phys_to_virt(x) ((unsigned long)((x) - PHYS_OFFSET) | PAGE_OFFSET)
+#define __phys_to_kimg(x) ((unsigned long)((x) + kimage_voffset))
/*
* Convert a page to/from a physical address
@@ -100,19 +116,40 @@
#define MT_S2_NORMAL 0xf
#define MT_S2_DEVICE_nGnRE 0x1
+#ifdef CONFIG_ARM64_4K_PAGES
+#define IOREMAP_MAX_ORDER (PUD_SHIFT)
+#else
+#define IOREMAP_MAX_ORDER (PMD_SHIFT)
+#endif
+
+#ifdef CONFIG_BLK_DEV_INITRD
+#define __early_init_dt_declare_initrd(__start, __end) \
+ do { \
+ initrd_start = (__start); \
+ initrd_end = (__end); \
+ } while (0)
+#endif
+
#ifndef __ASSEMBLY__
-extern phys_addr_t memstart_addr;
+#include <linux/bitops.h>
+#include <linux/mmdebug.h>
+
+extern s64 memstart_addr;
/* PHYS_OFFSET - the physical address of the start of memory. */
-#define PHYS_OFFSET ({ memstart_addr; })
+#define PHYS_OFFSET ({ VM_BUG_ON(memstart_addr & 1); memstart_addr; })
+
+/* the virtual base of the kernel image (minus TEXT_OFFSET) */
+extern u64 kimage_vaddr;
+
+/* the offset between the kernel virtual and physical mappings */
+extern u64 kimage_voffset;
/*
- * The maximum physical address that the linear direct mapping
- * of system RAM can cover. (PAGE_OFFSET can be interpreted as
- * a 2's complement signed quantity and negated to derive the
- * maximum size of the linear mapping.)
+ * Allow all memory at the discovery stage. We will clip it later.
*/
-#define MAX_MEMBLOCK_ADDR ({ memstart_addr - PAGE_OFFSET - 1; })
+#define MIN_MEMBLOCK_ADDR 0
+#define MAX_MEMBLOCK_ADDR U64_MAX
/*
* PFNs are used to describe any physical page; this means
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
index 32441df2270e..21d5a6e45891 100644
--- a/arch/arm64/include/asm/mmu_context.h
+++ b/arch/arm64/include/asm/mmu_context.h
@@ -28,6 +28,7 @@
#include <asm/cputype.h>
#include <asm/pgtable.h>
#include <linux/msm_rtb.h>
+#include <asm/tlbflush.h>
#ifdef CONFIG_PID_IN_CONTEXTIDR
static inline void contextidr_thread_switch(struct task_struct *next)
@@ -53,7 +54,7 @@ static inline void contextidr_thread_switch(struct task_struct *next)
*/
static inline void cpu_set_reserved_ttbr0(void)
{
- unsigned long ttbr = page_to_phys(empty_zero_page);
+ unsigned long ttbr = virt_to_phys(empty_zero_page);
asm(
" msr ttbr0_el1, %0 // set TTBR0\n"
@@ -78,7 +79,7 @@ static inline bool __cpu_uses_extended_idmap(void)
/*
* Set TCR.T0SZ to its default value (based on VA_BITS)
*/
-static inline void cpu_set_default_tcr_t0sz(void)
+static inline void __cpu_set_tcr_t0sz(unsigned long t0sz)
{
unsigned long tcr;
@@ -91,7 +92,62 @@ static inline void cpu_set_default_tcr_t0sz(void)
" msr tcr_el1, %0 ;"
" isb"
: "=&r" (tcr)
- : "r"(TCR_T0SZ(VA_BITS)), "I"(TCR_T0SZ_OFFSET), "I"(TCR_TxSZ_WIDTH));
+ : "r"(t0sz), "I"(TCR_T0SZ_OFFSET), "I"(TCR_TxSZ_WIDTH));
+}
+
+#define cpu_set_default_tcr_t0sz() __cpu_set_tcr_t0sz(TCR_T0SZ(VA_BITS))
+#define cpu_set_idmap_tcr_t0sz() __cpu_set_tcr_t0sz(idmap_t0sz)
+
+/*
+ * Remove the idmap from TTBR0_EL1 and install the pgd of the active mm.
+ *
+ * The idmap lives in the same VA range as userspace, but uses global entries
+ * and may use a different TCR_EL1.T0SZ. To avoid issues resulting from
+ * speculative TLB fetches, we must temporarily install the reserved page
+ * tables while we invalidate the TLBs and set up the correct TCR_EL1.T0SZ.
+ *
+ * If current is a not a user task, the mm covers the TTBR1_EL1 page tables,
+ * which should not be installed in TTBR0_EL1. In this case we can leave the
+ * reserved page tables in place.
+ */
+static inline void cpu_uninstall_idmap(void)
+{
+ struct mm_struct *mm = current->active_mm;
+
+ cpu_set_reserved_ttbr0();
+ local_flush_tlb_all();
+ cpu_set_default_tcr_t0sz();
+
+ if (mm != &init_mm)
+ cpu_switch_mm(mm->pgd, mm);
+}
+
+static inline void cpu_install_idmap(void)
+{
+ cpu_set_reserved_ttbr0();
+ local_flush_tlb_all();
+ cpu_set_idmap_tcr_t0sz();
+
+ cpu_switch_mm(idmap_pg_dir, &init_mm);
+}
+
+/*
+ * Atomically replaces the active TTBR1_EL1 PGD with a new VA-compatible PGD,
+ * avoiding the possibility of conflicting TLB entries being allocated.
+ */
+static inline void cpu_replace_ttbr1(pgd_t *pgd)
+{
+ typedef void (ttbr_replace_func)(phys_addr_t);
+ extern ttbr_replace_func idmap_cpu_replace_ttbr1;
+ ttbr_replace_func *replace_phys;
+
+ phys_addr_t pgd_phys = virt_to_phys(pgd);
+
+ replace_phys = (void *)virt_to_phys(idmap_cpu_replace_ttbr1);
+
+ cpu_install_idmap();
+ replace_phys(pgd_phys);
+ cpu_uninstall_idmap();
}
/*
diff --git a/arch/arm64/include/asm/module.h b/arch/arm64/include/asm/module.h
index e80e232b730e..e12af6754634 100644
--- a/arch/arm64/include/asm/module.h
+++ b/arch/arm64/include/asm/module.h
@@ -20,4 +20,21 @@
#define MODULE_ARCH_VERMAGIC "aarch64"
+#ifdef CONFIG_ARM64_MODULE_PLTS
+struct mod_arch_specific {
+ struct elf64_shdr *plt;
+ int plt_num_entries;
+ int plt_max_entries;
+};
+#endif
+
+u64 module_emit_plt_entry(struct module *mod, const Elf64_Rela *rela,
+ Elf64_Sym *sym);
+
+#ifdef CONFIG_RANDOMIZE_BASE
+extern u64 module_alloc_base;
+#else
+#define module_alloc_base ((u64)_etext - MODULES_VSIZE)
+#endif
+
#endif /* __ASM_MODULE_H */
diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h
index c15053902942..ff98585d085a 100644
--- a/arch/arm64/include/asm/pgalloc.h
+++ b/arch/arm64/include/asm/pgalloc.h
@@ -42,11 +42,20 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
free_page((unsigned long)pmd);
}
-static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
+static inline void __pud_populate(pud_t *pud, phys_addr_t pmd, pudval_t prot)
{
- set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
+ set_pud(pud, __pud(pmd | prot));
}
+static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
+{
+ __pud_populate(pud, __pa(pmd), PMD_TYPE_TABLE);
+}
+#else
+static inline void __pud_populate(pud_t *pud, phys_addr_t pmd, pudval_t prot)
+{
+ BUILD_BUG();
+}
#endif /* CONFIG_PGTABLE_LEVELS > 2 */
#if CONFIG_PGTABLE_LEVELS > 3
@@ -62,11 +71,20 @@ static inline void pud_free(struct mm_struct *mm, pud_t *pud)
free_page((unsigned long)pud);
}
-static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
+static inline void __pgd_populate(pgd_t *pgdp, phys_addr_t pud, pgdval_t prot)
{
- set_pgd(pgd, __pgd(__pa(pud) | PUD_TYPE_TABLE));
+ set_pgd(pgdp, __pgd(pud | prot));
}
+static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
+{
+ __pgd_populate(pgd, __pa(pud), PUD_TYPE_TABLE);
+}
+#else
+static inline void __pgd_populate(pgd_t *pgdp, phys_addr_t pud, pgdval_t prot)
+{
+ BUILD_BUG();
+}
#endif /* CONFIG_PGTABLE_LEVELS > 3 */
extern pgd_t *pgd_alloc(struct mm_struct *mm);
diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
index d6739e836f7b..5c25b831273d 100644
--- a/arch/arm64/include/asm/pgtable-hwdef.h
+++ b/arch/arm64/include/asm/pgtable-hwdef.h
@@ -90,7 +90,23 @@
/*
* Contiguous page definitions.
*/
-#define CONT_PTES (_AC(1, UL) << CONT_SHIFT)
+#ifdef CONFIG_ARM64_64K_PAGES
+#define CONT_PTE_SHIFT 5
+#define CONT_PMD_SHIFT 5
+#elif defined(CONFIG_ARM64_16K_PAGES)
+#define CONT_PTE_SHIFT 7
+#define CONT_PMD_SHIFT 5
+#else
+#define CONT_PTE_SHIFT 4
+#define CONT_PMD_SHIFT 4
+#endif
+
+#define CONT_PTES (1 << CONT_PTE_SHIFT)
+#define CONT_PTE_SIZE (CONT_PTES * PAGE_SIZE)
+#define CONT_PTE_MASK (~(CONT_PTE_SIZE - 1))
+#define CONT_PMDS (1 << CONT_PMD_SHIFT)
+#define CONT_PMD_SIZE (CONT_PMDS * PMD_SIZE)
+#define CONT_PMD_MASK (~(CONT_PMD_SIZE - 1))
/* the the numerical offset of the PTE within a range of CONT_PTES */
#define CONT_RANGE_OFFSET(addr) (((addr)>>PAGE_SHIFT)&(CONT_PTES-1))
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index eaa9cabf4066..c3c2518eecfe 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -36,19 +36,13 @@
*
* VMEMAP_SIZE: allows the whole linear region to be covered by a struct page array
* (rounded up to PUD_SIZE).
- * VMALLOC_START: beginning of the kernel VA space
+ * VMALLOC_START: beginning of the kernel vmalloc space
* VMALLOC_END: extends to the available space below vmmemmap, PCI I/O space,
* fixed mappings and modules
*/
#define VMEMMAP_SIZE ALIGN((1UL << (VA_BITS - PAGE_SHIFT)) * sizeof(struct page), PUD_SIZE)
-#ifndef CONFIG_KASAN
-#define VMALLOC_START (VA_START)
-#else
-#include <asm/kasan.h>
-#define VMALLOC_START (KASAN_SHADOW_END + SZ_64K)
-#endif
-
+#define VMALLOC_START (MODULES_END)
#define VMALLOC_END (PAGE_OFFSET - PUD_SIZE - VMEMMAP_SIZE - SZ_64K)
#define VMEMMAP_START (VMALLOC_END + SZ_64K)
@@ -59,6 +53,7 @@
#ifndef __ASSEMBLY__
+#include <asm/fixmap.h>
#include <linux/mmdebug.h>
extern void __pte_error(const char *file, int line, unsigned long val);
@@ -69,11 +64,11 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
#define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
#define PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
-#define PROT_DEVICE_nGnRnE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRnE))
-#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE))
-#define PROT_NORMAL_NC (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_NORMAL_NC))
-#define PROT_NORMAL_WT (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_NORMAL_WT))
-#define PROT_NORMAL (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_NORMAL))
+#define PROT_DEVICE_nGnRnE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRnE))
+#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRE))
+#define PROT_NORMAL_NC (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_NC))
+#define PROT_NORMAL_WT (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_WT))
+#define PROT_NORMAL (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL))
#define PROT_SECT_DEVICE_nGnRE (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_DEVICE_nGnRE))
#define PROT_SECT_NORMAL (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL))
@@ -83,7 +78,7 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
#define PAGE_KERNEL __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE)
#define PAGE_KERNEL_RO __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_RDONLY)
-#define PAGE_KERNEL_ROX __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_RDONLY)
+#define PAGE_KERNEL_ROX __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_RDONLY)
#define PAGE_KERNEL_EXEC __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE)
#define PAGE_KERNEL_EXEC_CONT __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_CONT)
@@ -123,8 +118,8 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
* ZERO_PAGE is a global shared page that is always zero: used
* for zero-mapped memory areas etc..
*/
-extern struct page *empty_zero_page;
-#define ZERO_PAGE(vaddr) (empty_zero_page)
+extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
+#define ZERO_PAGE(vaddr) virt_to_page(empty_zero_page)
#define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte_val(pte))
@@ -136,16 +131,6 @@ extern struct page *empty_zero_page;
#define pte_clear(mm,addr,ptep) set_pte(ptep, __pte(0))
#define pte_page(pte) (pfn_to_page(pte_pfn(pte)))
-/* Find an entry in the third-level page table. */
-#define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
-
-#define pte_offset_kernel(dir,addr) (pmd_page_vaddr(*(dir)) + pte_index(addr))
-
-#define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr))
-#define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir), (addr))
-#define pte_unmap(pte) do { } while (0)
-#define pte_unmap_nested(pte) do { } while (0)
-
/*
* The following only work if pte_present(). Undefined behaviour otherwise.
*/
@@ -155,6 +140,7 @@ extern struct page *empty_zero_page;
#define pte_write(pte) (!!(pte_val(pte) & PTE_WRITE))
#define pte_exec(pte) (!(pte_val(pte) & PTE_UXN))
#define pte_cont(pte) (!!(pte_val(pte) & PTE_CONT))
+#define pte_user(pte) (!!(pte_val(pte) & PTE_USER))
#ifdef CONFIG_ARM64_HW_AFDBM
#define pte_hw_dirty(pte) (pte_write(pte) && !(pte_val(pte) & PTE_RDONLY))
@@ -165,10 +151,18 @@ extern struct page *empty_zero_page;
#define pte_dirty(pte) (pte_sw_dirty(pte) || pte_hw_dirty(pte))
#define pte_valid(pte) (!!(pte_val(pte) & PTE_VALID))
-#define pte_valid_user(pte) \
- ((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER))
#define pte_valid_not_user(pte) \
((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID)
+#define pte_valid_young(pte) \
+ ((pte_val(pte) & (PTE_VALID | PTE_AF)) == (PTE_VALID | PTE_AF))
+
+/*
+ * Could the pte be present in the TLB? We must check mm_tlb_flush_pending
+ * so that we don't erroneously return false for pages that have been
+ * remapped as PROT_NONE but are yet to be flushed from the TLB.
+ */
+#define pte_accessible(mm, pte) \
+ (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid_young(pte))
static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot)
{
@@ -219,7 +213,8 @@ static inline pte_t pte_mkspecial(pte_t pte)
static inline pte_t pte_mkcont(pte_t pte)
{
- return set_pte_bit(pte, __pgprot(PTE_CONT));
+ pte = set_pte_bit(pte, __pgprot(PTE_CONT));
+ return set_pte_bit(pte, __pgprot(PTE_TYPE_PAGE));
}
static inline pte_t pte_mknoncont(pte_t pte)
@@ -227,6 +222,11 @@ static inline pte_t pte_mknoncont(pte_t pte)
return clear_pte_bit(pte, __pgprot(PTE_CONT));
}
+static inline pmd_t pmd_mkcont(pmd_t pmd)
+{
+ return __pmd(pmd_val(pmd) | PMD_SECT_CONT);
+}
+
static inline void set_pte(pte_t *ptep, pte_t pte)
{
*ptep = pte;
@@ -264,13 +264,13 @@ extern void __sync_icache_dcache(pte_t pteval, unsigned long addr);
static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte)
{
- if (pte_valid_user(pte)) {
- if (!pte_special(pte) && pte_exec(pte))
- __sync_icache_dcache(pte, addr);
+ if (pte_present(pte)) {
if (pte_sw_dirty(pte) && pte_write(pte))
pte_val(pte) &= ~PTE_RDONLY;
else
pte_val(pte) |= PTE_RDONLY;
+ if (pte_user(pte) && pte_exec(pte) && !pte_special(pte))
+ __sync_icache_dcache(pte, addr);
}
/*
@@ -300,7 +300,7 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
/*
* Hugetlb definitions.
*/
-#define HUGE_MAX_HSTATE 2
+#define HUGE_MAX_HSTATE 4
#define HPAGE_SHIFT PMD_SHIFT
#define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT)
#define HPAGE_MASK (~(HPAGE_SIZE - 1))
@@ -354,6 +354,7 @@ void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address,
#define pmd_mksplitting(pmd) pte_pmd(pte_mkspecial(pmd_pte(pmd)))
#define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd)))
#define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd)))
+#define pmd_mkclean(pmd) pte_pmd(pte_mkclean(pmd_pte(pmd)))
#define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd)))
#define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd)))
#define pmd_mknotpresent(pmd) (__pmd(pmd_val(pmd) & ~PMD_TYPE_MASK))
@@ -426,13 +427,31 @@ static inline void pmd_clear(pmd_t *pmdp)
set_pmd(pmdp, __pmd(0));
}
-static inline pte_t *pmd_page_vaddr(pmd_t pmd)
+static inline phys_addr_t pmd_page_paddr(pmd_t pmd)
{
- return __va(pmd_val(pmd) & PHYS_MASK & (s32)PAGE_MASK);
+ return pmd_val(pmd) & PHYS_MASK & (s32)PAGE_MASK;
}
+/* Find an entry in the third-level page table. */
+#define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
+
+#define pte_offset_phys(dir,addr) (pmd_page_paddr(*(dir)) + pte_index(addr) * sizeof(pte_t))
+#define pte_offset_kernel(dir,addr) ((pte_t *)__va(pte_offset_phys((dir), (addr))))
+
+#define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr))
+#define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir), (addr))
+#define pte_unmap(pte) do { } while (0)
+#define pte_unmap_nested(pte) do { } while (0)
+
+#define pte_set_fixmap(addr) ((pte_t *)set_fixmap_offset(FIX_PTE, addr))
+#define pte_set_fixmap_offset(pmd, addr) pte_set_fixmap(pte_offset_phys(pmd, addr))
+#define pte_clear_fixmap() clear_fixmap(FIX_PTE)
+
#define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK))
+/* use ONLY for statically allocated translation tables */
+#define pte_offset_kimg(dir,addr) ((pte_t *)__phys_to_kimg(pte_offset_phys((dir), (addr))))
+
/*
* Conversion functions: convert a page and protection to a page entry,
* and a page entry and page directory to the page they refer to.
@@ -459,21 +478,37 @@ static inline void pud_clear(pud_t *pudp)
set_pud(pudp, __pud(0));
}
-static inline pmd_t *pud_page_vaddr(pud_t pud)
+static inline phys_addr_t pud_page_paddr(pud_t pud)
{
- return __va(pud_val(pud) & PHYS_MASK & (s32)PAGE_MASK);
+ return pud_val(pud) & PHYS_MASK & (s32)PAGE_MASK;
}
/* Find an entry in the second-level page table. */
#define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
-static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
-{
- return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(addr);
-}
+#define pmd_offset_phys(dir, addr) (pud_page_paddr(*(dir)) + pmd_index(addr) * sizeof(pmd_t))
+#define pmd_offset(dir, addr) ((pmd_t *)__va(pmd_offset_phys((dir), (addr))))
+
+#define pmd_set_fixmap(addr) ((pmd_t *)set_fixmap_offset(FIX_PMD, addr))
+#define pmd_set_fixmap_offset(pud, addr) pmd_set_fixmap(pmd_offset_phys(pud, addr))
+#define pmd_clear_fixmap() clear_fixmap(FIX_PMD)
#define pud_page(pud) pfn_to_page(__phys_to_pfn(pud_val(pud) & PHYS_MASK))
+/* use ONLY for statically allocated translation tables */
+#define pmd_offset_kimg(dir,addr) ((pmd_t *)__phys_to_kimg(pmd_offset_phys((dir), (addr))))
+
+#else
+
+#define pud_page_paddr(pud) ({ BUILD_BUG(); 0; })
+
+/* Match pmd_offset folding in <asm/generic/pgtable-nopmd.h> */
+#define pmd_set_fixmap(addr) NULL
+#define pmd_set_fixmap_offset(pudp, addr) ((pmd_t *)pudp)
+#define pmd_clear_fixmap()
+
+#define pmd_offset_kimg(dir,addr) ((pmd_t *)dir)
+
#endif /* CONFIG_PGTABLE_LEVELS > 2 */
#if CONFIG_PGTABLE_LEVELS > 3
@@ -495,21 +530,37 @@ static inline void pgd_clear(pgd_t *pgdp)
set_pgd(pgdp, __pgd(0));
}
-static inline pud_t *pgd_page_vaddr(pgd_t pgd)
+static inline phys_addr_t pgd_page_paddr(pgd_t pgd)
{
- return __va(pgd_val(pgd) & PHYS_MASK & (s32)PAGE_MASK);
+ return pgd_val(pgd) & PHYS_MASK & (s32)PAGE_MASK;
}
/* Find an entry in the frst-level page table. */
#define pud_index(addr) (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
-static inline pud_t *pud_offset(pgd_t *pgd, unsigned long addr)
-{
- return (pud_t *)pgd_page_vaddr(*pgd) + pud_index(addr);
-}
+#define pud_offset_phys(dir, addr) (pgd_page_paddr(*(dir)) + pud_index(addr) * sizeof(pud_t))
+#define pud_offset(dir, addr) ((pud_t *)__va(pud_offset_phys((dir), (addr))))
+
+#define pud_set_fixmap(addr) ((pud_t *)set_fixmap_offset(FIX_PUD, addr))
+#define pud_set_fixmap_offset(pgd, addr) pud_set_fixmap(pud_offset_phys(pgd, addr))
+#define pud_clear_fixmap() clear_fixmap(FIX_PUD)
#define pgd_page(pgd) pfn_to_page(__phys_to_pfn(pgd_val(pgd) & PHYS_MASK))
+/* use ONLY for statically allocated translation tables */
+#define pud_offset_kimg(dir,addr) ((pud_t *)__phys_to_kimg(pud_offset_phys((dir), (addr))))
+
+#else
+
+#define pgd_page_paddr(pgd) ({ BUILD_BUG(); 0;})
+
+/* Match pud_offset folding in <asm/generic/pgtable-nopud.h> */
+#define pud_set_fixmap(addr) NULL
+#define pud_set_fixmap_offset(pgdp, addr) ((pud_t *)pgdp)
+#define pud_clear_fixmap()
+
+#define pud_offset_kimg(dir,addr) ((pud_t *)dir)
+
#endif /* CONFIG_PGTABLE_LEVELS > 3 */
#define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd_val(pgd))
@@ -517,11 +568,16 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long addr)
/* to find an entry in a page-table-directory */
#define pgd_index(addr) (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
-#define pgd_offset(mm, addr) ((mm)->pgd+pgd_index(addr))
+#define pgd_offset_raw(pgd, addr) ((pgd) + pgd_index(addr))
+
+#define pgd_offset(mm, addr) (pgd_offset_raw((mm)->pgd, (addr)))
/* to find an entry in a kernel page-table-directory */
#define pgd_offset_k(addr) pgd_offset(&init_mm, addr)
+#define pgd_set_fixmap(addr) ((pgd_t *)set_fixmap_offset(FIX_PGD, addr))
+#define pgd_clear_fixmap() clear_fixmap(FIX_PGD)
+
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY |
@@ -641,6 +697,7 @@ extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
* bits 0-1: present (must be zero)
* bits 2-7: swap type
* bits 8-57: swap offset
+ * bit 58: PTE_PROT_NONE (must be zero)
*/
#define __SWP_TYPE_SHIFT 2
#define __SWP_TYPE_BITS 6
@@ -666,7 +723,8 @@ extern int kern_addr_valid(unsigned long addr);
#include <asm-generic/pgtable.h>
-#define pgtable_cache_init() do { } while (0)
+void pgd_cache_init(void);
+#define pgtable_cache_init pgd_cache_init
/*
* On AArch64, the cache coherency is handled via the set_pte_at() function.
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index c8c7c4d38171..c97d01eb219f 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -29,8 +29,10 @@
#include <linux/string.h>
+#include <asm/alternative.h>
#include <asm/fpsimd.h>
#include <asm/hw_breakpoint.h>
+#include <asm/lse.h>
#include <asm/pgtable-hwdef.h>
#include <asm/ptrace.h>
#include <asm/types.h>
@@ -180,9 +182,11 @@ static inline void prefetchw(const void *ptr)
}
#define ARCH_HAS_SPINLOCK_PREFETCH
-static inline void spin_lock_prefetch(const void *x)
+static inline void spin_lock_prefetch(const void *ptr)
{
- prefetchw(x);
+ asm volatile(ARM64_LSE_ATOMIC_INSN(
+ "prfm pstl1strm, %a0",
+ "nop") : : "p" (ptr));
}
#define HAVE_ARCH_PICK_MMAP_LAYOUT
@@ -190,5 +194,6 @@ static inline void spin_lock_prefetch(const void *x)
#endif
void cpu_enable_pan(void *__unused);
+void cpu_enable_uao(void *__unused);
#endif /* __ASM_PROCESSOR_H */
diff --git a/arch/arm64/include/asm/shmparam.h b/arch/arm64/include/asm/shmparam.h
index 4df608a8459e..e368a55ebd22 100644
--- a/arch/arm64/include/asm/shmparam.h
+++ b/arch/arm64/include/asm/shmparam.h
@@ -21,7 +21,7 @@
* alignment value. Since we don't have aliasing D-caches, the rest of
* the time we can safely use PAGE_SIZE.
*/
-#define COMPAT_SHMLBA 0x4000
+#define COMPAT_SHMLBA (4 * PAGE_SIZE)
#include <asm-generic/shmparam.h>
diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h
index c63c432f1a82..2013a4dc5124 100644
--- a/arch/arm64/include/asm/smp.h
+++ b/arch/arm64/include/asm/smp.h
@@ -63,7 +63,15 @@ extern void secondary_entry(void);
extern void arch_send_call_function_single_ipi(int cpu);
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
+
+#ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
extern void arch_send_wakeup_ipi_mask(const struct cpumask *mask);
+#else
+static inline void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
+{
+ BUILD_BUG();
+}
+#endif
extern int __cpu_disable(void);
diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h
index be4d2f9b7e8c..7b369923b2eb 100644
--- a/arch/arm64/include/asm/spinlock.h
+++ b/arch/arm64/include/asm/spinlock.h
@@ -26,9 +26,28 @@
* The memory barriers are implicit with the load-acquire and store-release
* instructions.
*/
+static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
+{
+ unsigned int tmp;
+ arch_spinlock_t lockval;
-#define arch_spin_unlock_wait(lock) \
- do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
+ asm volatile(
+" sevl\n"
+"1: wfe\n"
+"2: ldaxr %w0, %2\n"
+" eor %w1, %w0, %w0, ror #16\n"
+" cbnz %w1, 1b\n"
+ ARM64_LSE_ATOMIC_INSN(
+ /* LL/SC */
+" stxr %w1, %w0, %2\n"
+" cbnz %w1, 2b\n", /* Serialise against any concurrent lockers */
+ /* LSE atomics */
+" nop\n"
+" nop\n")
+ : "=&r" (lockval), "=&r" (tmp), "+Q" (*lock)
+ :
+ : "memory");
+}
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
diff --git a/arch/arm64/include/asm/stacktrace.h b/arch/arm64/include/asm/stacktrace.h
index 7318f6d54aa9..801a16dbbdf6 100644
--- a/arch/arm64/include/asm/stacktrace.h
+++ b/arch/arm64/include/asm/stacktrace.h
@@ -16,14 +16,19 @@
#ifndef __ASM_STACKTRACE_H
#define __ASM_STACKTRACE_H
+struct task_struct;
+
struct stackframe {
unsigned long fp;
unsigned long sp;
unsigned long pc;
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ unsigned int graph;
+#endif
};
-extern int unwind_frame(struct stackframe *frame);
-extern void walk_stackframe(struct stackframe *frame,
+extern int unwind_frame(struct task_struct *tsk, struct stackframe *frame);
+extern void walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
int (*fn)(struct stackframe *, void *), void *data);
#endif /* __ASM_STACKTRACE_H */
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index d48ab5b41f52..b9fd8ec79033 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -70,15 +70,19 @@
#define SYS_ID_AA64MMFR0_EL1 sys_reg(3, 0, 0, 7, 0)
#define SYS_ID_AA64MMFR1_EL1 sys_reg(3, 0, 0, 7, 1)
+#define SYS_ID_AA64MMFR2_EL1 sys_reg(3, 0, 0, 7, 2)
#define SYS_CNTFRQ_EL0 sys_reg(3, 3, 14, 0, 0)
#define SYS_CTR_EL0 sys_reg(3, 3, 0, 0, 1)
#define SYS_DCZID_EL0 sys_reg(3, 3, 0, 0, 7)
#define REG_PSTATE_PAN_IMM sys_reg(0, 0, 4, 0, 4)
+#define REG_PSTATE_UAO_IMM sys_reg(0, 0, 4, 0, 3)
#define SET_PSTATE_PAN(x) __inst_arm(0xd5000000 | REG_PSTATE_PAN_IMM |\
(!!x)<<8 | 0x1f)
+#define SET_PSTATE_UAO(x) __inst_arm(0xd5000000 | REG_PSTATE_UAO_IMM |\
+ (!!x)<<8 | 0x1f)
/* SCTLR_EL1 */
#define SCTLR_EL1_CP15BEN (0x1 << 5)
@@ -135,6 +139,9 @@
#define ID_AA64MMFR1_VMIDBITS_SHIFT 4
#define ID_AA64MMFR1_HADBS_SHIFT 0
+/* id_aa64mmfr2 */
+#define ID_AA64MMFR2_UAO_SHIFT 4
+
/* id_aa64dfr0 */
#define ID_AA64DFR0_CTX_CMPS_SHIFT 28
#define ID_AA64DFR0_WRPS_SHIFT 20
@@ -194,32 +201,32 @@
#ifdef __ASSEMBLY__
.irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30
- .equ __reg_num_x\num, \num
+ .equ .L__reg_num_x\num, \num
.endr
- .equ __reg_num_xzr, 31
+ .equ .L__reg_num_xzr, 31
.macro mrs_s, rt, sreg
- .inst 0xd5200000|(\sreg)|(__reg_num_\rt)
+ .inst 0xd5200000|(\sreg)|(.L__reg_num_\rt)
.endm
.macro msr_s, sreg, rt
- .inst 0xd5000000|(\sreg)|(__reg_num_\rt)
+ .inst 0xd5000000|(\sreg)|(.L__reg_num_\rt)
.endm
#else
asm(
" .irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30\n"
-" .equ __reg_num_x\\num, \\num\n"
+" .equ .L__reg_num_x\\num, \\num\n"
" .endr\n"
-" .equ __reg_num_xzr, 31\n"
+" .equ .L__reg_num_xzr, 31\n"
"\n"
" .macro mrs_s, rt, sreg\n"
-" .inst 0xd5200000|(\\sreg)|(__reg_num_\\rt)\n"
+" .inst 0xd5200000|(\\sreg)|(.L__reg_num_\\rt)\n"
" .endm\n"
"\n"
" .macro msr_s, sreg, rt\n"
-" .inst 0xd5000000|(\\sreg)|(__reg_num_\\rt)\n"
+" .inst 0xd5000000|(\\sreg)|(.L__reg_num_\\rt)\n"
" .endm\n"
);
diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
index bfef76e14e2d..4a9e82e4f724 100644
--- a/arch/arm64/include/asm/thread_info.h
+++ b/arch/arm64/include/asm/thread_info.h
@@ -73,10 +73,16 @@ register unsigned long current_stack_pointer asm ("sp");
*/
static inline struct thread_info *current_thread_info(void) __attribute_const__;
+/*
+ * struct thread_info can be accessed directly via sp_el0.
+ */
static inline struct thread_info *current_thread_info(void)
{
- return (struct thread_info *)
- (current_stack_pointer & ~(THREAD_SIZE - 1));
+ unsigned long sp_el0;
+
+ asm ("mrs %0, sp_el0" : "=r" (sp_el0));
+
+ return (struct thread_info *)sp_el0;
}
#define thread_saved_pc(tsk) \
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index b2ede967fe7d..0685d74572af 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -36,11 +36,11 @@
#define VERIFY_WRITE 1
/*
- * The exception table consists of pairs of addresses: the first is the
- * address of an instruction that is allowed to fault, and the second is
- * the address at which the program should continue. No registers are
- * modified, so it is entirely up to the continuation code to figure out
- * what to do.
+ * The exception table consists of pairs of relative offsets: the first
+ * is the relative offset to an instruction that is allowed to fault,
+ * and the second is the relative offset at which the program should
+ * continue. No registers are modified, so it is entirely up to the
+ * continuation code to figure out what to do.
*
* All the routines below use bits of fixup code that are out of line
* with the main instruction path. This means when everything is well,
@@ -50,9 +50,11 @@
struct exception_table_entry
{
- unsigned long insn, fixup;
+ int insn, fixup;
};
+#define ARCH_HAS_RELATIVE_EXTABLE
+
extern int fixup_exception(struct pt_regs *regs);
#define KERNEL_DS (-1UL)
@@ -64,6 +66,16 @@ extern int fixup_exception(struct pt_regs *regs);
static inline void set_fs(mm_segment_t fs)
{
current_thread_info()->addr_limit = fs;
+
+ /*
+ * Enable/disable UAO so that copy_to_user() etc can access
+ * kernel memory with the unprivileged instructions.
+ */
+ if (IS_ENABLED(CONFIG_ARM64_UAO) && fs == KERNEL_DS)
+ asm(ALTERNATIVE("nop", SET_PSTATE_UAO(1), ARM64_HAS_UAO));
+ else
+ asm(ALTERNATIVE("nop", SET_PSTATE_UAO(0), ARM64_HAS_UAO,
+ CONFIG_ARM64_UAO));
}
#define segment_eq(a, b) ((a) == (b))
@@ -105,6 +117,12 @@ static inline void set_fs(mm_segment_t fs)
#define access_ok(type, addr, size) __range_ok(addr, size)
#define user_addr_max get_fs
+#define _ASM_EXTABLE(from, to) \
+ " .pushsection __ex_table, \"a\"\n" \
+ " .align 3\n" \
+ " .long (" #from " - .), (" #to " - .)\n" \
+ " .popsection\n"
+
/*
* The "__xxx" versions of the user access functions do not verify the address
* space - it must have been done previously with a separate "access_ok()"
@@ -113,9 +131,10 @@ static inline void set_fs(mm_segment_t fs)
* The "__xxx_error" versions set the third argument to -EFAULT if an error
* occurs, and leave it unchanged on success.
*/
-#define __get_user_asm(instr, reg, x, addr, err) \
+#define __get_user_asm(instr, alt_instr, reg, x, addr, err, feature) \
asm volatile( \
- "1: " instr " " reg "1, [%2]\n" \
+ "1:"ALTERNATIVE(instr " " reg "1, [%2]\n", \
+ alt_instr " " reg "1, [%2]\n", feature) \
"2:\n" \
" .section .fixup, \"ax\"\n" \
" .align 2\n" \
@@ -123,10 +142,7 @@ static inline void set_fs(mm_segment_t fs)
" mov %1, #0\n" \
" b 2b\n" \
" .previous\n" \
- " .section __ex_table,\"a\"\n" \
- " .align 3\n" \
- " .quad 1b, 3b\n" \
- " .previous" \
+ _ASM_EXTABLE(1b, 3b) \
: "+r" (err), "=&r" (x) \
: "r" (addr), "i" (-EFAULT))
@@ -134,26 +150,30 @@ static inline void set_fs(mm_segment_t fs)
do { \
unsigned long __gu_val; \
__chk_user_ptr(ptr); \
- asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN, \
+ asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_ALT_PAN_NOT_UAO,\
CONFIG_ARM64_PAN)); \
switch (sizeof(*(ptr))) { \
case 1: \
- __get_user_asm("ldrb", "%w", __gu_val, (ptr), (err)); \
+ __get_user_asm("ldrb", "ldtrb", "%w", __gu_val, (ptr), \
+ (err), ARM64_HAS_UAO); \
break; \
case 2: \
- __get_user_asm("ldrh", "%w", __gu_val, (ptr), (err)); \
+ __get_user_asm("ldrh", "ldtrh", "%w", __gu_val, (ptr), \
+ (err), ARM64_HAS_UAO); \
break; \
case 4: \
- __get_user_asm("ldr", "%w", __gu_val, (ptr), (err)); \
+ __get_user_asm("ldr", "ldtr", "%w", __gu_val, (ptr), \
+ (err), ARM64_HAS_UAO); \
break; \
case 8: \
- __get_user_asm("ldr", "%", __gu_val, (ptr), (err)); \
+ __get_user_asm("ldr", "ldtr", "%", __gu_val, (ptr), \
+ (err), ARM64_HAS_UAO); \
break; \
default: \
BUILD_BUG(); \
} \
(x) = (__force __typeof__(*(ptr)))__gu_val; \
- asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, \
+ asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_ALT_PAN_NOT_UAO,\
CONFIG_ARM64_PAN)); \
} while (0)
@@ -181,19 +201,17 @@ do { \
((x) = 0, -EFAULT); \
})
-#define __put_user_asm(instr, reg, x, addr, err) \
+#define __put_user_asm(instr, alt_instr, reg, x, addr, err, feature) \
asm volatile( \
- "1: " instr " " reg "1, [%2]\n" \
+ "1:"ALTERNATIVE(instr " " reg "1, [%2]\n", \
+ alt_instr " " reg "1, [%2]\n", feature) \
"2:\n" \
" .section .fixup,\"ax\"\n" \
" .align 2\n" \
"3: mov %w0, %3\n" \
" b 2b\n" \
" .previous\n" \
- " .section __ex_table,\"a\"\n" \
- " .align 3\n" \
- " .quad 1b, 3b\n" \
- " .previous" \
+ _ASM_EXTABLE(1b, 3b) \
: "+r" (err) \
: "r" (x), "r" (addr), "i" (-EFAULT))
@@ -201,25 +219,29 @@ do { \
do { \
__typeof__(*(ptr)) __pu_val = (x); \
__chk_user_ptr(ptr); \
- asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN, \
+ asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_ALT_PAN_NOT_UAO,\
CONFIG_ARM64_PAN)); \
switch (sizeof(*(ptr))) { \
case 1: \
- __put_user_asm("strb", "%w", __pu_val, (ptr), (err)); \
+ __put_user_asm("strb", "sttrb", "%w", __pu_val, (ptr), \
+ (err), ARM64_HAS_UAO); \
break; \
case 2: \
- __put_user_asm("strh", "%w", __pu_val, (ptr), (err)); \
+ __put_user_asm("strh", "sttrh", "%w", __pu_val, (ptr), \
+ (err), ARM64_HAS_UAO); \
break; \
case 4: \
- __put_user_asm("str", "%w", __pu_val, (ptr), (err)); \
+ __put_user_asm("str", "sttr", "%w", __pu_val, (ptr), \
+ (err), ARM64_HAS_UAO); \
break; \
case 8: \
- __put_user_asm("str", "%", __pu_val, (ptr), (err)); \
+ __put_user_asm("str", "sttr", "%", __pu_val, (ptr), \
+ (err), ARM64_HAS_UAO); \
break; \
default: \
BUILD_BUG(); \
} \
- asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, \
+ asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_ALT_PAN_NOT_UAO,\
CONFIG_ARM64_PAN)); \
} while (0)
diff --git a/arch/arm64/include/asm/word-at-a-time.h b/arch/arm64/include/asm/word-at-a-time.h
index aab5bf09e9d9..2b79b8a89457 100644
--- a/arch/arm64/include/asm/word-at-a-time.h
+++ b/arch/arm64/include/asm/word-at-a-time.h
@@ -16,6 +16,8 @@
#ifndef __ASM_WORD_AT_A_TIME_H
#define __ASM_WORD_AT_A_TIME_H
+#include <asm/uaccess.h>
+
#ifndef __AARCH64EB__
#include <linux/kernel.h>
@@ -81,10 +83,7 @@ static inline unsigned long load_unaligned_zeropad(const void *addr)
#endif
" b 2b\n"
" .popsection\n"
- " .pushsection __ex_table,\"a\"\n"
- " .align 3\n"
- " .quad 1b, 3b\n"
- " .popsection"
+ _ASM_EXTABLE(1b, 3b)
: "=&r" (ret), "=&r" (offset)
: "r" (addr), "Q" (*(unsigned long *)addr));
diff --git a/arch/arm64/include/uapi/asm/ptrace.h b/arch/arm64/include/uapi/asm/ptrace.h
index 208db3df135a..b5c3933ed441 100644
--- a/arch/arm64/include/uapi/asm/ptrace.h
+++ b/arch/arm64/include/uapi/asm/ptrace.h
@@ -45,6 +45,7 @@
#define PSR_A_BIT 0x00000100
#define PSR_D_BIT 0x00000200
#define PSR_PAN_BIT 0x00400000
+#define PSR_UAO_BIT 0x00800000
#define PSR_Q_BIT 0x08000000
#define PSR_V_BIT 0x10000000
#define PSR_C_BIT 0x20000000
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index 9f7794c5743f..f6e0269de4c7 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -30,6 +30,7 @@ arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \
../../arm/kernel/opcodes.o
arm64-obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o entry-ftrace.o
arm64-obj-$(CONFIG_MODULES) += arm64ksyms.o module.o
+arm64-obj-$(CONFIG_ARM64_MODULE_PLTS) += module-plts.o
arm64-obj-$(CONFIG_PERF_EVENTS) += perf_regs.o perf_callchain.o
arm64-obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o perf_debug.o \
perf_trace_counters.o \
@@ -43,6 +44,8 @@ arm64-obj-$(CONFIG_EFI) += efi.o efi-entry.stub.o
arm64-obj-$(CONFIG_PCI) += pci.o
arm64-obj-$(CONFIG_ARMV8_DEPRECATED) += armv8_deprecated.o
arm64-obj-$(CONFIG_ACPI) += acpi.o
+arm64-obj-$(CONFIG_ARM64_ACPI_PARKING_PROTOCOL) += acpi_parking_protocol.o
+arm64-obj-$(CONFIG_RANDOMIZE_BASE) += kaslr.o
obj-y += $(arm64-obj-y) vdso/
obj-m += $(arm64-obj-m)
diff --git a/arch/arm64/kernel/acpi_parking_protocol.c b/arch/arm64/kernel/acpi_parking_protocol.c
new file mode 100644
index 000000000000..4b1e5a7a98da
--- /dev/null
+++ b/arch/arm64/kernel/acpi_parking_protocol.c
@@ -0,0 +1,153 @@
+/*
+ * ARM64 ACPI Parking Protocol implementation
+ *
+ * Authors: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+ * Mark Salter <msalter@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/acpi.h>
+#include <linux/types.h>
+
+#include <asm/cpu_ops.h>
+
+struct cpu_mailbox_entry {
+ phys_addr_t mailbox_addr;
+ u8 version;
+ u8 gic_cpu_id;
+};
+
+static struct cpu_mailbox_entry cpu_mailbox_entries[NR_CPUS];
+
+void __init acpi_set_mailbox_entry(int cpu,
+ struct acpi_madt_generic_interrupt *p)
+{
+ struct cpu_mailbox_entry *cpu_entry = &cpu_mailbox_entries[cpu];
+
+ cpu_entry->mailbox_addr = p->parked_address;
+ cpu_entry->version = p->parking_version;
+ cpu_entry->gic_cpu_id = p->cpu_interface_number;
+}
+
+bool acpi_parking_protocol_valid(int cpu)
+{
+ struct cpu_mailbox_entry *cpu_entry = &cpu_mailbox_entries[cpu];
+
+ return cpu_entry->mailbox_addr && cpu_entry->version;
+}
+
+static int acpi_parking_protocol_cpu_init(unsigned int cpu)
+{
+ pr_debug("%s: ACPI parked addr=%llx\n", __func__,
+ cpu_mailbox_entries[cpu].mailbox_addr);
+
+ return 0;
+}
+
+static int acpi_parking_protocol_cpu_prepare(unsigned int cpu)
+{
+ return 0;
+}
+
+struct parking_protocol_mailbox {
+ __le32 cpu_id;
+ __le32 reserved;
+ __le64 entry_point;
+};
+
+static int acpi_parking_protocol_cpu_boot(unsigned int cpu)
+{
+ struct cpu_mailbox_entry *cpu_entry = &cpu_mailbox_entries[cpu];
+ struct parking_protocol_mailbox __iomem *mailbox;
+ __le32 cpu_id;
+
+ /*
+ * Map mailbox memory with attribute device nGnRE (ie ioremap -
+ * this deviates from the parking protocol specifications since
+ * the mailboxes are required to be mapped nGnRnE; the attribute
+ * discrepancy is harmless insofar as the protocol specification
+ * is concerned).
+ * If the mailbox is mistakenly allocated in the linear mapping
+ * by FW ioremap will fail since the mapping will be prevented
+ * by the kernel (it clashes with the linear mapping attributes
+ * specifications).
+ */
+ mailbox = ioremap(cpu_entry->mailbox_addr, sizeof(*mailbox));
+ if (!mailbox)
+ return -EIO;
+
+ cpu_id = readl_relaxed(&mailbox->cpu_id);
+ /*
+ * Check if firmware has set-up the mailbox entry properly
+ * before kickstarting the respective cpu.
+ */
+ if (cpu_id != ~0U) {
+ iounmap(mailbox);
+ return -ENXIO;
+ }
+
+ /*
+ * We write the entry point and cpu id as LE regardless of the
+ * native endianness of the kernel. Therefore, any boot-loaders
+ * that read this address need to convert this address to the
+ * Boot-Loader's endianness before jumping.
+ */
+ writeq_relaxed(__pa(secondary_entry), &mailbox->entry_point);
+ writel_relaxed(cpu_entry->gic_cpu_id, &mailbox->cpu_id);
+
+ arch_send_wakeup_ipi_mask(cpumask_of(cpu));
+
+ iounmap(mailbox);
+
+ return 0;
+}
+
+static void acpi_parking_protocol_cpu_postboot(void)
+{
+ int cpu = smp_processor_id();
+ struct cpu_mailbox_entry *cpu_entry = &cpu_mailbox_entries[cpu];
+ struct parking_protocol_mailbox __iomem *mailbox;
+ __le64 entry_point;
+
+ /*
+ * Map mailbox memory with attribute device nGnRE (ie ioremap -
+ * this deviates from the parking protocol specifications since
+ * the mailboxes are required to be mapped nGnRnE; the attribute
+ * discrepancy is harmless insofar as the protocol specification
+ * is concerned).
+ * If the mailbox is mistakenly allocated in the linear mapping
+ * by FW ioremap will fail since the mapping will be prevented
+ * by the kernel (it clashes with the linear mapping attributes
+ * specifications).
+ */
+ mailbox = ioremap(cpu_entry->mailbox_addr, sizeof(*mailbox));
+ if (!mailbox)
+ return;
+
+ entry_point = readl_relaxed(&mailbox->entry_point);
+ /*
+ * Check if firmware has cleared the entry_point as expected
+ * by the protocol specification.
+ */
+ WARN_ON(entry_point);
+
+ iounmap(mailbox);
+}
+
+const struct cpu_operations acpi_parking_protocol_ops = {
+ .name = "parking-protocol",
+ .cpu_init = acpi_parking_protocol_cpu_init,
+ .cpu_prepare = acpi_parking_protocol_cpu_prepare,
+ .cpu_boot = acpi_parking_protocol_cpu_boot,
+ .cpu_postboot = acpi_parking_protocol_cpu_postboot
+};
diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c
index 47a8caf28bcc..e6cb1dc63a2a 100644
--- a/arch/arm64/kernel/alternative.c
+++ b/arch/arm64/kernel/alternative.c
@@ -158,9 +158,3 @@ void apply_alternatives(void *start, size_t length)
__apply_alternatives(&region);
}
-
-void free_alternatives_memory(void)
-{
- free_reserved_area(__alt_instructions, __alt_instructions_end,
- 0, "alternatives");
-}
diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c
index 937f5e58a4d3..c37202c0c838 100644
--- a/arch/arm64/kernel/armv8_deprecated.c
+++ b/arch/arm64/kernel/armv8_deprecated.c
@@ -62,7 +62,7 @@ struct insn_emulation {
};
static LIST_HEAD(insn_emulation);
-static int nr_insn_emulated;
+static int nr_insn_emulated __initdata;
static DEFINE_RAW_SPINLOCK(insn_emulation_lock);
static void register_emulation_hooks(struct insn_emulation_ops *ops)
@@ -173,7 +173,7 @@ static int update_insn_emulation_mode(struct insn_emulation *insn,
return ret;
}
-static void register_insn_emulation(struct insn_emulation_ops *ops)
+static void __init register_insn_emulation(struct insn_emulation_ops *ops)
{
unsigned long flags;
struct insn_emulation *insn;
@@ -237,7 +237,7 @@ static struct ctl_table ctl_abi[] = {
{ }
};
-static void register_insn_emulation_sysctl(struct ctl_table *table)
+static void __init register_insn_emulation_sysctl(struct ctl_table *table)
{
unsigned long flags;
int i = 0;
@@ -297,11 +297,8 @@ static void register_insn_emulation_sysctl(struct ctl_table *table)
"4: mov %w0, %w5\n" \
" b 3b\n" \
" .popsection" \
- " .pushsection __ex_table,\"a\"\n" \
- " .align 3\n" \
- " .quad 0b, 4b\n" \
- " .quad 1b, 4b\n" \
- " .popsection\n" \
+ _ASM_EXTABLE(0b, 4b) \
+ _ASM_EXTABLE(1b, 4b) \
ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, \
CONFIG_ARM64_PAN) \
: "=&r" (res), "+r" (data), "=&r" (temp) \
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index 34aa4b3b47e9..1555520bbb74 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -21,26 +21,12 @@
#include <asm/cputype.h>
#include <asm/cpufeature.h>
-#define MIDR_CORTEX_A53 MIDR_CPU_PART(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53)
-#define MIDR_CORTEX_A57 MIDR_CPU_PART(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57)
-#define MIDR_THUNDERX MIDR_CPU_PART(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
-#define MIDR_KRYO2XX_SILVER \
- MIDR_CPU_PART(ARM_CPU_IMP_QCOM, ARM_CPU_PART_KRYO2XX_SILVER)
-
-#define CPU_MODEL_MASK (MIDR_IMPLEMENTOR_MASK | MIDR_PARTNUM_MASK | \
- MIDR_ARCHITECTURE_MASK)
-
static bool __maybe_unused
is_affected_midr_range(const struct arm64_cpu_capabilities *entry)
{
- u32 midr = read_cpuid_id();
-
- if ((midr & CPU_MODEL_MASK) != entry->midr_model)
- return false;
-
- midr &= MIDR_REVISION_MASK | MIDR_VARIANT_MASK;
-
- return (midr >= entry->midr_range_min && midr <= entry->midr_range_max);
+ return MIDR_IS_CPU_MODEL_RANGE(read_cpuid_id(), entry->midr_model,
+ entry->midr_range_min,
+ entry->midr_range_max);
}
#define MIDR_RANGE(model, min, max) \
diff --git a/arch/arm64/kernel/cpu_ops.c b/arch/arm64/kernel/cpu_ops.c
index b6bd7d447768..c7cfb8fe06f9 100644
--- a/arch/arm64/kernel/cpu_ops.c
+++ b/arch/arm64/kernel/cpu_ops.c
@@ -25,19 +25,30 @@
#include <asm/smp_plat.h>
extern const struct cpu_operations smp_spin_table_ops;
+extern const struct cpu_operations acpi_parking_protocol_ops;
extern const struct cpu_operations cpu_psci_ops;
const struct cpu_operations *cpu_ops[NR_CPUS];
-static const struct cpu_operations *supported_cpu_ops[] __initconst = {
+static const struct cpu_operations *dt_supported_cpu_ops[] __initconst = {
&smp_spin_table_ops,
&cpu_psci_ops,
NULL,
};
+static const struct cpu_operations *acpi_supported_cpu_ops[] __initconst = {
+#ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
+ &acpi_parking_protocol_ops,
+#endif
+ &cpu_psci_ops,
+ NULL,
+};
+
static const struct cpu_operations * __init cpu_get_ops(const char *name)
{
- const struct cpu_operations **ops = supported_cpu_ops;
+ const struct cpu_operations **ops;
+
+ ops = acpi_disabled ? dt_supported_cpu_ops : acpi_supported_cpu_ops;
while (*ops) {
if (!strcmp(name, (*ops)->name))
@@ -75,8 +86,16 @@ static const char *__init cpu_read_enable_method(int cpu)
}
} else {
enable_method = acpi_get_enable_method(cpu);
- if (!enable_method)
- pr_err("Unsupported ACPI enable-method\n");
+ if (!enable_method) {
+ /*
+ * In ACPI systems the boot CPU does not require
+ * checking the enable method since for some
+ * boot protocol (ie parking protocol) it need not
+ * be initialized. Don't warn spuriously.
+ */
+ if (cpu != 0)
+ pr_err("Unsupported ACPI enable-method\n");
+ }
}
return enable_method;
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 0669c63281ea..7566cad9fa1d 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -67,6 +67,10 @@ DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
.width = 0, \
}
+/* meta feature for alternatives */
+static bool __maybe_unused
+cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry);
+
static struct arm64_ftr_bits ftr_id_aa64isar0[] = {
ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 32, 32, 0),
ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64ISAR0_RDM_SHIFT, 4, 0),
@@ -123,6 +127,11 @@ static struct arm64_ftr_bits ftr_id_aa64mmfr1[] = {
ARM64_FTR_END,
};
+static struct arm64_ftr_bits ftr_id_aa64mmfr2[] = {
+ ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR2_UAO_SHIFT, 4, 0),
+ ARM64_FTR_END,
+};
+
static struct arm64_ftr_bits ftr_ctr[] = {
U_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RAO */
ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 28, 3, 0),
@@ -284,6 +293,7 @@ static struct arm64_ftr_reg arm64_ftr_regs[] = {
/* Op1 = 0, CRn = 0, CRm = 7 */
ARM64_FTR_REG(SYS_ID_AA64MMFR0_EL1, ftr_id_aa64mmfr0),
ARM64_FTR_REG(SYS_ID_AA64MMFR1_EL1, ftr_id_aa64mmfr1),
+ ARM64_FTR_REG(SYS_ID_AA64MMFR2_EL1, ftr_id_aa64mmfr2),
/* Op1 = 3, CRn = 0, CRm = 0 */
ARM64_FTR_REG(SYS_CTR_EL0, ftr_ctr),
@@ -408,6 +418,7 @@ void __init init_cpu_features(struct cpuinfo_arm64 *info)
init_cpu_ftr_reg(SYS_ID_AA64ISAR1_EL1, info->reg_id_aa64isar1);
init_cpu_ftr_reg(SYS_ID_AA64MMFR0_EL1, info->reg_id_aa64mmfr0);
init_cpu_ftr_reg(SYS_ID_AA64MMFR1_EL1, info->reg_id_aa64mmfr1);
+ init_cpu_ftr_reg(SYS_ID_AA64MMFR2_EL1, info->reg_id_aa64mmfr2);
init_cpu_ftr_reg(SYS_ID_AA64PFR0_EL1, info->reg_id_aa64pfr0);
init_cpu_ftr_reg(SYS_ID_AA64PFR1_EL1, info->reg_id_aa64pfr1);
init_cpu_ftr_reg(SYS_ID_DFR0_EL1, info->reg_id_dfr0);
@@ -517,6 +528,8 @@ void update_cpu_features(int cpu,
info->reg_id_aa64mmfr0, boot->reg_id_aa64mmfr0);
taint |= check_update_ftr_reg(SYS_ID_AA64MMFR1_EL1, cpu,
info->reg_id_aa64mmfr1, boot->reg_id_aa64mmfr1);
+ taint |= check_update_ftr_reg(SYS_ID_AA64MMFR2_EL1, cpu,
+ info->reg_id_aa64mmfr2, boot->reg_id_aa64mmfr2);
/*
* EL3 is not our concern.
@@ -621,6 +634,18 @@ static bool has_useable_gicv3_cpuif(const struct arm64_cpu_capabilities *entry)
return has_sre;
}
+static bool has_no_hw_prefetch(const struct arm64_cpu_capabilities *entry)
+{
+ u32 midr = read_cpuid_id();
+ u32 rv_min, rv_max;
+
+ /* Cavium ThunderX pass 1.x and 2.x */
+ rv_min = 0;
+ rv_max = (1 << MIDR_VARIANT_SHIFT) | MIDR_REVISION_MASK;
+
+ return MIDR_IS_CPU_MODEL_RANGE(midr, MIDR_THUNDERX, rv_min, rv_max);
+}
+
static const struct arm64_cpu_capabilities arm64_features[] = {
{
.desc = "GIC system register CPU interface",
@@ -651,6 +676,28 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.min_field_value = 2,
},
#endif /* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */
+ {
+ .desc = "Software prefetching using PRFM",
+ .capability = ARM64_HAS_NO_HW_PREFETCH,
+ .matches = has_no_hw_prefetch,
+ },
+#ifdef CONFIG_ARM64_UAO
+ {
+ .desc = "User Access Override",
+ .capability = ARM64_HAS_UAO,
+ .matches = has_cpuid_feature,
+ .sys_reg = SYS_ID_AA64MMFR2_EL1,
+ .field_pos = ID_AA64MMFR2_UAO_SHIFT,
+ .min_field_value = 1,
+ .enable = cpu_enable_uao,
+ },
+#endif /* CONFIG_ARM64_UAO */
+#ifdef CONFIG_ARM64_PAN
+ {
+ .capability = ARM64_ALT_PAN_NOT_UAO,
+ .matches = cpufeature_pan_not_uao,
+ },
+#endif /* CONFIG_ARM64_PAN */
{},
};
@@ -684,7 +731,7 @@ static const struct arm64_cpu_capabilities arm64_hwcaps[] = {
{},
};
-static void cap_set_hwcap(const struct arm64_cpu_capabilities *cap)
+static void __init cap_set_hwcap(const struct arm64_cpu_capabilities *cap)
{
switch (cap->hwcap_type) {
case CAP_HWCAP:
@@ -729,12 +776,12 @@ static bool __maybe_unused cpus_have_hwcap(const struct arm64_cpu_capabilities *
return rc;
}
-static void setup_cpu_hwcaps(void)
+static void __init setup_cpu_hwcaps(void)
{
int i;
const struct arm64_cpu_capabilities *hwcaps = arm64_hwcaps;
- for (i = 0; hwcaps[i].desc; i++)
+ for (i = 0; hwcaps[i].matches; i++)
if (hwcaps[i].matches(&hwcaps[i]))
cap_set_hwcap(&hwcaps[i]);
}
@@ -744,11 +791,11 @@ void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
{
int i;
- for (i = 0; caps[i].desc; i++) {
+ for (i = 0; caps[i].matches; i++) {
if (!caps[i].matches(&caps[i]))
continue;
- if (!cpus_have_cap(caps[i].capability))
+ if (!cpus_have_cap(caps[i].capability) && caps[i].desc)
pr_info("%s %s\n", info, caps[i].desc);
cpus_set_cap(caps[i].capability);
}
@@ -758,11 +805,12 @@ void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
* Run through the enabled capabilities and enable() it on all active
* CPUs
*/
-static void enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps)
+static void __init
+enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps)
{
int i;
- for (i = 0; caps[i].desc; i++)
+ for (i = 0; caps[i].matches; i++)
if (caps[i].enable && cpus_have_cap(caps[i].capability))
on_each_cpu(caps[i].enable, NULL, true);
}
@@ -790,35 +838,36 @@ static inline void set_sys_caps_initialised(void)
static u64 __raw_read_system_reg(u32 sys_id)
{
switch (sys_id) {
- case SYS_ID_PFR0_EL1: return (u64)read_cpuid(ID_PFR0_EL1);
- case SYS_ID_PFR1_EL1: return (u64)read_cpuid(ID_PFR1_EL1);
- case SYS_ID_DFR0_EL1: return (u64)read_cpuid(ID_DFR0_EL1);
- case SYS_ID_MMFR0_EL1: return (u64)read_cpuid(ID_MMFR0_EL1);
- case SYS_ID_MMFR1_EL1: return (u64)read_cpuid(ID_MMFR1_EL1);
- case SYS_ID_MMFR2_EL1: return (u64)read_cpuid(ID_MMFR2_EL1);
- case SYS_ID_MMFR3_EL1: return (u64)read_cpuid(ID_MMFR3_EL1);
- case SYS_ID_ISAR0_EL1: return (u64)read_cpuid(ID_ISAR0_EL1);
- case SYS_ID_ISAR1_EL1: return (u64)read_cpuid(ID_ISAR1_EL1);
- case SYS_ID_ISAR2_EL1: return (u64)read_cpuid(ID_ISAR2_EL1);
- case SYS_ID_ISAR3_EL1: return (u64)read_cpuid(ID_ISAR3_EL1);
- case SYS_ID_ISAR4_EL1: return (u64)read_cpuid(ID_ISAR4_EL1);
- case SYS_ID_ISAR5_EL1: return (u64)read_cpuid(ID_ISAR4_EL1);
- case SYS_MVFR0_EL1: return (u64)read_cpuid(MVFR0_EL1);
- case SYS_MVFR1_EL1: return (u64)read_cpuid(MVFR1_EL1);
- case SYS_MVFR2_EL1: return (u64)read_cpuid(MVFR2_EL1);
-
- case SYS_ID_AA64PFR0_EL1: return (u64)read_cpuid(ID_AA64PFR0_EL1);
- case SYS_ID_AA64PFR1_EL1: return (u64)read_cpuid(ID_AA64PFR0_EL1);
- case SYS_ID_AA64DFR0_EL1: return (u64)read_cpuid(ID_AA64DFR0_EL1);
- case SYS_ID_AA64DFR1_EL1: return (u64)read_cpuid(ID_AA64DFR0_EL1);
- case SYS_ID_AA64MMFR0_EL1: return (u64)read_cpuid(ID_AA64MMFR0_EL1);
- case SYS_ID_AA64MMFR1_EL1: return (u64)read_cpuid(ID_AA64MMFR1_EL1);
- case SYS_ID_AA64ISAR0_EL1: return (u64)read_cpuid(ID_AA64ISAR0_EL1);
- case SYS_ID_AA64ISAR1_EL1: return (u64)read_cpuid(ID_AA64ISAR1_EL1);
-
- case SYS_CNTFRQ_EL0: return (u64)read_cpuid(CNTFRQ_EL0);
- case SYS_CTR_EL0: return (u64)read_cpuid(CTR_EL0);
- case SYS_DCZID_EL0: return (u64)read_cpuid(DCZID_EL0);
+ case SYS_ID_PFR0_EL1: return read_cpuid(SYS_ID_PFR0_EL1);
+ case SYS_ID_PFR1_EL1: return read_cpuid(SYS_ID_PFR1_EL1);
+ case SYS_ID_DFR0_EL1: return read_cpuid(SYS_ID_DFR0_EL1);
+ case SYS_ID_MMFR0_EL1: return read_cpuid(SYS_ID_MMFR0_EL1);
+ case SYS_ID_MMFR1_EL1: return read_cpuid(SYS_ID_MMFR1_EL1);
+ case SYS_ID_MMFR2_EL1: return read_cpuid(SYS_ID_MMFR2_EL1);
+ case SYS_ID_MMFR3_EL1: return read_cpuid(SYS_ID_MMFR3_EL1);
+ case SYS_ID_ISAR0_EL1: return read_cpuid(SYS_ID_ISAR0_EL1);
+ case SYS_ID_ISAR1_EL1: return read_cpuid(SYS_ID_ISAR1_EL1);
+ case SYS_ID_ISAR2_EL1: return read_cpuid(SYS_ID_ISAR2_EL1);
+ case SYS_ID_ISAR3_EL1: return read_cpuid(SYS_ID_ISAR3_EL1);
+ case SYS_ID_ISAR4_EL1: return read_cpuid(SYS_ID_ISAR4_EL1);
+ case SYS_ID_ISAR5_EL1: return read_cpuid(SYS_ID_ISAR4_EL1);
+ case SYS_MVFR0_EL1: return read_cpuid(SYS_MVFR0_EL1);
+ case SYS_MVFR1_EL1: return read_cpuid(SYS_MVFR1_EL1);
+ case SYS_MVFR2_EL1: return read_cpuid(SYS_MVFR2_EL1);
+
+ case SYS_ID_AA64PFR0_EL1: return read_cpuid(SYS_ID_AA64PFR0_EL1);
+ case SYS_ID_AA64PFR1_EL1: return read_cpuid(SYS_ID_AA64PFR0_EL1);
+ case SYS_ID_AA64DFR0_EL1: return read_cpuid(SYS_ID_AA64DFR0_EL1);
+ case SYS_ID_AA64DFR1_EL1: return read_cpuid(SYS_ID_AA64DFR0_EL1);
+ case SYS_ID_AA64MMFR0_EL1: return read_cpuid(SYS_ID_AA64MMFR0_EL1);
+ case SYS_ID_AA64MMFR1_EL1: return read_cpuid(SYS_ID_AA64MMFR1_EL1);
+ case SYS_ID_AA64MMFR2_EL1: return read_cpuid(SYS_ID_AA64MMFR2_EL1);
+ case SYS_ID_AA64ISAR0_EL1: return read_cpuid(SYS_ID_AA64ISAR0_EL1);
+ case SYS_ID_AA64ISAR1_EL1: return read_cpuid(SYS_ID_AA64ISAR1_EL1);
+
+ case SYS_CNTFRQ_EL0: return read_cpuid(SYS_CNTFRQ_EL0);
+ case SYS_CTR_EL0: return read_cpuid(SYS_CTR_EL0);
+ case SYS_DCZID_EL0: return read_cpuid(SYS_DCZID_EL0);
default:
BUG();
return 0;
@@ -868,7 +917,7 @@ void verify_local_cpu_capabilities(void)
return;
caps = arm64_features;
- for (i = 0; caps[i].desc; i++) {
+ for (i = 0; caps[i].matches; i++) {
if (!cpus_have_cap(caps[i].capability) || !caps[i].sys_reg)
continue;
/*
@@ -881,7 +930,7 @@ void verify_local_cpu_capabilities(void)
caps[i].enable(NULL);
}
- for (i = 0, caps = arm64_hwcaps; caps[i].desc; i++) {
+ for (i = 0, caps = arm64_hwcaps; caps[i].matches; i++) {
if (!cpus_have_hwcap(&caps[i]))
continue;
if (!feature_matches(__raw_read_system_reg(caps[i].sys_reg), &caps[i]))
@@ -897,7 +946,7 @@ static inline void set_sys_caps_initialised(void)
#endif /* CONFIG_HOTPLUG_CPU */
-static void setup_feature_capabilities(void)
+static void __init setup_feature_capabilities(void)
{
update_cpu_capabilities(arm64_features, "detected feature:");
enable_cpu_capabilities(arm64_features);
@@ -927,3 +976,9 @@ void __init setup_cpu_features(void)
pr_warn("L1_CACHE_BYTES smaller than the Cache Writeback Granule (%d < %d)\n",
L1_CACHE_BYTES, cls);
}
+
+static bool __maybe_unused
+cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry)
+{
+ return (cpus_have_cap(ARM64_HAS_PAN) && !cpus_have_cap(ARM64_HAS_UAO));
+}
diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
index c3c6557eb083..17cf73e70752 100644
--- a/arch/arm64/kernel/cpuinfo.c
+++ b/arch/arm64/kernel/cpuinfo.c
@@ -214,40 +214,41 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
{
info->reg_cntfrq = arch_timer_get_cntfrq();
info->reg_ctr = read_cpuid_cachetype();
- info->reg_dczid = read_cpuid(DCZID_EL0);
+ info->reg_dczid = read_cpuid(SYS_DCZID_EL0);
info->reg_midr = read_cpuid_id();
- info->reg_id_aa64dfr0 = read_cpuid(ID_AA64DFR0_EL1);
- info->reg_id_aa64dfr1 = read_cpuid(ID_AA64DFR1_EL1);
- info->reg_id_aa64isar0 = read_cpuid(ID_AA64ISAR0_EL1);
- info->reg_id_aa64isar1 = read_cpuid(ID_AA64ISAR1_EL1);
+ info->reg_id_aa64dfr0 = read_cpuid(SYS_ID_AA64DFR0_EL1);
+ info->reg_id_aa64dfr1 = read_cpuid(SYS_ID_AA64DFR1_EL1);
+ info->reg_id_aa64isar0 = read_cpuid(SYS_ID_AA64ISAR0_EL1);
+ info->reg_id_aa64isar1 = read_cpuid(SYS_ID_AA64ISAR1_EL1);
/*
* Explicitly mask out 16KB granule since we donot
* want to support it
*/
- info->reg_id_aa64mmfr0 = read_cpuid(ID_AA64MMFR0_EL1) &
+ info->reg_id_aa64mmfr0 = read_cpuid(SYS_ID_AA64MMFR0_EL1) &
(~MMFR0_EL1_16KGRAN_MASK);
- info->reg_id_aa64mmfr1 = read_cpuid(ID_AA64MMFR1_EL1);
- info->reg_id_aa64pfr0 = read_cpuid(ID_AA64PFR0_EL1);
- info->reg_id_aa64pfr1 = read_cpuid(ID_AA64PFR1_EL1);
-
- info->reg_id_dfr0 = read_cpuid(ID_DFR0_EL1);
- info->reg_id_isar0 = read_cpuid(ID_ISAR0_EL1);
- info->reg_id_isar1 = read_cpuid(ID_ISAR1_EL1);
- info->reg_id_isar2 = read_cpuid(ID_ISAR2_EL1);
- info->reg_id_isar3 = read_cpuid(ID_ISAR3_EL1);
- info->reg_id_isar4 = read_cpuid(ID_ISAR4_EL1);
- info->reg_id_isar5 = read_cpuid(ID_ISAR5_EL1);
- info->reg_id_mmfr0 = read_cpuid(ID_MMFR0_EL1);
- info->reg_id_mmfr1 = read_cpuid(ID_MMFR1_EL1);
- info->reg_id_mmfr2 = read_cpuid(ID_MMFR2_EL1);
- info->reg_id_mmfr3 = read_cpuid(ID_MMFR3_EL1);
- info->reg_id_pfr0 = read_cpuid(ID_PFR0_EL1);
- info->reg_id_pfr1 = read_cpuid(ID_PFR1_EL1);
-
- info->reg_mvfr0 = read_cpuid(MVFR0_EL1);
- info->reg_mvfr1 = read_cpuid(MVFR1_EL1);
- info->reg_mvfr2 = read_cpuid(MVFR2_EL1);
+ info->reg_id_aa64mmfr1 = read_cpuid(SYS_ID_AA64MMFR1_EL1);
+ info->reg_id_aa64mmfr2 = read_cpuid(SYS_ID_AA64MMFR2_EL1);
+ info->reg_id_aa64pfr0 = read_cpuid(SYS_ID_AA64PFR0_EL1);
+ info->reg_id_aa64pfr1 = read_cpuid(SYS_ID_AA64PFR1_EL1);
+
+ info->reg_id_dfr0 = read_cpuid(SYS_ID_DFR0_EL1);
+ info->reg_id_isar0 = read_cpuid(SYS_ID_ISAR0_EL1);
+ info->reg_id_isar1 = read_cpuid(SYS_ID_ISAR1_EL1);
+ info->reg_id_isar2 = read_cpuid(SYS_ID_ISAR2_EL1);
+ info->reg_id_isar3 = read_cpuid(SYS_ID_ISAR3_EL1);
+ info->reg_id_isar4 = read_cpuid(SYS_ID_ISAR4_EL1);
+ info->reg_id_isar5 = read_cpuid(SYS_ID_ISAR5_EL1);
+ info->reg_id_mmfr0 = read_cpuid(SYS_ID_MMFR0_EL1);
+ info->reg_id_mmfr1 = read_cpuid(SYS_ID_MMFR1_EL1);
+ info->reg_id_mmfr2 = read_cpuid(SYS_ID_MMFR2_EL1);
+ info->reg_id_mmfr3 = read_cpuid(SYS_ID_MMFR3_EL1);
+ info->reg_id_pfr0 = read_cpuid(SYS_ID_PFR0_EL1);
+ info->reg_id_pfr1 = read_cpuid(SYS_ID_PFR1_EL1);
+
+ info->reg_mvfr0 = read_cpuid(SYS_MVFR0_EL1);
+ info->reg_mvfr1 = read_cpuid(SYS_MVFR1_EL1);
+ info->reg_mvfr2 = read_cpuid(SYS_MVFR2_EL1);
cpuinfo_detect_icache_policy(info);
diff --git a/arch/arm64/kernel/efi-entry.S b/arch/arm64/kernel/efi-entry.S
index a773db92908b..f82036e02485 100644
--- a/arch/arm64/kernel/efi-entry.S
+++ b/arch/arm64/kernel/efi-entry.S
@@ -61,7 +61,7 @@ ENTRY(entry)
*/
mov x20, x0 // DTB address
ldr x0, [sp, #16] // relocated _text address
- ldr x21, =stext_offset
+ movz x21, #:abs_g0:stext_offset
add x21, x0, x21
/*
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index e3131b39fbf2..b67e70c34888 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -27,6 +27,7 @@
#include <asm/cpufeature.h>
#include <asm/errno.h>
#include <asm/esr.h>
+#include <asm/irq.h>
#include <asm/thread_info.h>
#include <asm/unistd.h>
@@ -88,9 +89,12 @@
.if \el == 0
mrs x21, sp_el0
- get_thread_info tsk // Ensure MDSCR_EL1.SS is clear,
+ mov tsk, sp
+ and tsk, tsk, #~(THREAD_SIZE - 1) // Ensure MDSCR_EL1.SS is clear,
ldr x19, [tsk, #TI_FLAGS] // since we can unmask debug
disable_step_tsk x19, x20 // exceptions when scheduling.
+
+ mov x29, xzr // fp pointed to user-space
.else
add x21, sp, #S_FRAME_SIZE
.endif
@@ -108,6 +112,13 @@
.endif
/*
+ * Set sp_el0 to current thread_info.
+ */
+ .if \el == 0
+ msr sp_el0, tsk
+ .endif
+
+ /*
* Registers that may be useful after this macro is invoked:
*
* x21 - aborted SP
@@ -164,8 +175,44 @@ alternative_endif
.endm
.macro get_thread_info, rd
- mov \rd, sp
- and \rd, \rd, #~(THREAD_SIZE - 1) // top of stack
+ mrs \rd, sp_el0
+ .endm
+
+ .macro irq_stack_entry
+ mov x19, sp // preserve the original sp
+
+ /*
+ * Compare sp with the current thread_info, if the top
+ * ~(THREAD_SIZE - 1) bits match, we are on a task stack, and
+ * should switch to the irq stack.
+ */
+ and x25, x19, #~(THREAD_SIZE - 1)
+ cmp x25, tsk
+ b.ne 9998f
+
+ this_cpu_ptr irq_stack, x25, x26
+ mov x26, #IRQ_STACK_START_SP
+ add x26, x25, x26
+
+ /* switch to the irq stack */
+ mov sp, x26
+
+ /*
+ * Add a dummy stack frame, this non-standard format is fixed up
+ * by unwind_frame()
+ */
+ stp x29, x19, [sp, #-16]!
+ mov x29, sp
+
+9998:
+ .endm
+
+ /*
+ * x19 should be preserved between irq_stack_entry and
+ * irq_stack_exit.
+ */
+ .macro irq_stack_exit
+ mov sp, x19
.endm
/*
@@ -183,10 +230,11 @@ tsk .req x28 // current thread_info
* Interrupt handling.
*/
.macro irq_handler
- adrp x1, handle_arch_irq
- ldr x1, [x1, #:lo12:handle_arch_irq]
+ ldr_l x1, handle_arch_irq
mov x0, sp
+ irq_stack_entry
blr x1
+ irq_stack_exit
.endm
.text
@@ -358,10 +406,10 @@ el1_irq:
bl trace_hardirqs_off
#endif
+ get_thread_info tsk
irq_handler
#ifdef CONFIG_PREEMPT
- get_thread_info tsk
ldr w24, [tsk, #TI_PREEMPT] // get preempt count
cbnz w24, 1f // preempt count != 0
ldr x0, [tsk, #TI_FLAGS] // get flags
@@ -626,6 +674,8 @@ ENTRY(cpu_switch_to)
mov v15.16b, v15.16b
#endif
mov sp, x9
+ and x9, x9, #~(THREAD_SIZE - 1)
+ msr sp_el0, x9
ret
ENDPROC(cpu_switch_to)
@@ -653,14 +703,14 @@ ret_fast_syscall_trace:
work_pending:
tbnz x1, #TIF_NEED_RESCHED, work_resched
/* TIF_SIGPENDING, TIF_NOTIFY_RESUME or TIF_FOREIGN_FPSTATE case */
- ldr x2, [sp, #S_PSTATE]
mov x0, sp // 'regs'
- tst x2, #PSR_MODE_MASK // user mode regs?
- b.ne no_work_pending // returning to kernel
enable_irq // enable interrupts for do_notify_resume()
bl do_notify_resume
b ret_to_user
work_resched:
+#ifdef CONFIG_TRACE_IRQFLAGS
+ bl trace_hardirqs_off // the IRQs are off here, inform the tracing code
+#endif
bl schedule
/*
@@ -672,7 +722,6 @@ ret_to_user:
and x2, x1, #_TIF_WORK_MASK
cbnz x2, work_pending
enable_step_tsk x1, x2
-no_work_pending:
kernel_exit 0
ENDPROC(ret_to_user)
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index 4c46c54a3ad7..acc1afd5c749 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -289,7 +289,7 @@ static struct notifier_block fpsimd_cpu_pm_notifier_block = {
.notifier_call = fpsimd_cpu_pm_notifier,
};
-static void fpsimd_pm_init(void)
+static void __init fpsimd_pm_init(void)
{
cpu_pm_register_notifier(&fpsimd_cpu_pm_notifier_block);
}
diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c
index c851be795080..ebecf9aa33d1 100644
--- a/arch/arm64/kernel/ftrace.c
+++ b/arch/arm64/kernel/ftrace.c
@@ -29,12 +29,11 @@ static int ftrace_modify_code(unsigned long pc, u32 old, u32 new,
/*
* Note:
- * Due to modules and __init, code can disappear and change,
- * we need to protect against faulting as well as code changing.
- * We do this by aarch64_insn_*() which use the probe_kernel_*().
- *
- * No lock is held here because all the modifications are run
- * through stop_machine().
+ * We are paranoid about modifying text, as if a bug were to happen, it
+ * could cause us to read or write to someplace that could cause harm.
+ * Carefully read and modify the code with aarch64_insn_*() which uses
+ * probe_kernel_*(), and make sure what we read is what we expected it
+ * to be before modifying it.
*/
if (validate) {
if (aarch64_insn_read((void *)pc, &replaced))
@@ -93,6 +92,11 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
return ftrace_modify_code(pc, old, new, true);
}
+void arch_ftrace_update_code(int command)
+{
+ ftrace_modify_all_code(command);
+}
+
int __init ftrace_dyn_arch_init(void)
{
return 0;
@@ -125,23 +129,20 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
* on other archs. It's unlikely on AArch64.
*/
old = *parent;
- *parent = return_hooker;
trace.func = self_addr;
trace.depth = current->curr_ret_stack + 1;
/* Only trace if the calling function expects to */
- if (!ftrace_graph_entry(&trace)) {
- *parent = old;
+ if (!ftrace_graph_entry(&trace))
return;
- }
err = ftrace_push_return_trace(old, self_addr, &trace.depth,
frame_pointer);
- if (err == -EBUSY) {
- *parent = old;
+ if (err == -EBUSY)
return;
- }
+ else
+ *parent = return_hooker;
}
#ifdef CONFIG_DYNAMIC_FTRACE
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index b685257926f0..a88a15447c3b 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -29,6 +29,7 @@
#include <asm/asm-offsets.h>
#include <asm/cache.h>
#include <asm/cputype.h>
+#include <asm/elf.h>
#include <asm/kernel-pgtable.h>
#include <asm/memory.h>
#include <asm/pgtable-hwdef.h>
@@ -67,12 +68,11 @@
* in the entry routines.
*/
__HEAD
-
+_head:
/*
* DO NOT MODIFY. Image header expected by Linux boot-loaders.
*/
#ifdef CONFIG_EFI
-efi_head:
/*
* This add instruction has no meaningful effect except that
* its opcode forms the magic "MZ" signature required by UEFI.
@@ -83,9 +83,9 @@ efi_head:
b stext // branch to kernel start, magic
.long 0 // reserved
#endif
- .quad _kernel_offset_le // Image load offset from start of RAM, little-endian
- .quad _kernel_size_le // Effective size of kernel image, little-endian
- .quad _kernel_flags_le // Informative flags, little-endian
+ le64sym _kernel_offset_le // Image load offset from start of RAM, little-endian
+ le64sym _kernel_size_le // Effective size of kernel image, little-endian
+ le64sym _kernel_flags_le // Informative flags, little-endian
.quad 0 // reserved
.quad 0 // reserved
.quad 0 // reserved
@@ -94,14 +94,14 @@ efi_head:
.byte 0x4d
.byte 0x64
#ifdef CONFIG_EFI
- .long pe_header - efi_head // Offset to the PE header.
+ .long pe_header - _head // Offset to the PE header.
#else
.word 0 // reserved
#endif
#ifdef CONFIG_EFI
.globl __efistub_stext_offset
- .set __efistub_stext_offset, stext - efi_head
+ .set __efistub_stext_offset, stext - _head
.align 3
pe_header:
.ascii "PE"
@@ -124,7 +124,7 @@ optional_header:
.long _end - stext // SizeOfCode
.long 0 // SizeOfInitializedData
.long 0 // SizeOfUninitializedData
- .long __efistub_entry - efi_head // AddressOfEntryPoint
+ .long __efistub_entry - _head // AddressOfEntryPoint
.long __efistub_stext_offset // BaseOfCode
extra_header_fields:
@@ -139,7 +139,7 @@ extra_header_fields:
.short 0 // MinorSubsystemVersion
.long 0 // Win32VersionValue
- .long _end - efi_head // SizeOfImage
+ .long _end - _head // SizeOfImage
// Everything before the kernel image is considered part of the header
.long __efistub_stext_offset // SizeOfHeaders
@@ -210,6 +210,7 @@ section_table:
ENTRY(stext)
bl preserve_boot_args
bl el2_setup // Drop to EL1, w20=cpu_boot_mode
+ mov x23, xzr // KASLR offset, defaults to 0
adrp x24, __PHYS_OFFSET
bl set_cpu_boot_mode_flag
bl __create_page_tables // x25=TTBR0, x26=TTBR1
@@ -219,11 +220,13 @@ ENTRY(stext)
* On return, the CPU will be ready for the MMU to be turned on and
* the TCR will have been set.
*/
- ldr x27, =__mmap_switched // address to jump to after
+ ldr x27, 0f // address to jump to after
// MMU has been enabled
adr_l lr, __enable_mmu // return (PIC) address
b __cpu_setup // initialise processor
ENDPROC(stext)
+ .align 3
+0: .quad __mmap_switched - (_head - TEXT_OFFSET) + KIMAGE_VADDR
/*
* Preserve the arguments passed by the bootloader in x0 .. x3
@@ -311,7 +314,7 @@ ENDPROC(preserve_boot_args)
__create_page_tables:
adrp x25, idmap_pg_dir
adrp x26, swapper_pg_dir
- mov x27, lr
+ mov x28, lr
/*
* Invalidate the idmap and swapper page tables to avoid potential
@@ -389,9 +392,11 @@ __create_page_tables:
* Map the kernel image (starting with PHYS_OFFSET).
*/
mov x0, x26 // swapper_pg_dir
- mov x5, #PAGE_OFFSET
+ ldr x5, =KIMAGE_VADDR
+ add x5, x5, x23 // add KASLR displacement
create_pgd_entry x0, x5, x3, x6
- ldr x6, =KERNEL_END // __va(KERNEL_END)
+ ldr w6, kernel_img_size
+ add x6, x6, x5
mov x3, x24 // phys offset
create_block_map x0, x7, x3, x5, x6
@@ -405,9 +410,11 @@ __create_page_tables:
dmb sy
bl __inval_cache_range
- mov lr, x27
- ret
+ ret x28
ENDPROC(__create_page_tables)
+
+kernel_img_size:
+ .long _end - (_head - TEXT_OFFSET)
.ltorg
/*
@@ -415,21 +422,81 @@ ENDPROC(__create_page_tables)
*/
.set initial_sp, init_thread_union + THREAD_START_SP
__mmap_switched:
- adr_l x6, __bss_start
- adr_l x7, __bss_stop
+ mov x28, lr // preserve LR
+ adr_l x8, vectors // load VBAR_EL1 with virtual
+ msr vbar_el1, x8 // vector table address
+ isb
-1: cmp x6, x7
+ // Clear BSS
+ adr_l x0, __bss_start
+ mov x1, xzr
+ adr_l x2, __bss_stop
+ sub x2, x2, x0
+ bl __pi_memset
+ dsb ishst // Make zero page visible to PTW
+
+#ifdef CONFIG_RELOCATABLE
+
+ /*
+ * Iterate over each entry in the relocation table, and apply the
+ * relocations in place.
+ */
+ adr_l x8, __dynsym_start // start of symbol table
+ adr_l x9, __reloc_start // start of reloc table
+ adr_l x10, __reloc_end // end of reloc table
+
+0: cmp x9, x10
b.hs 2f
- str xzr, [x6], #8 // Clear BSS
- b 1b
-2:
+ ldp x11, x12, [x9], #24
+ ldr x13, [x9, #-8]
+ cmp w12, #R_AARCH64_RELATIVE
+ b.ne 1f
+ add x13, x13, x23 // relocate
+ str x13, [x11, x23]
+ b 0b
+
+1: cmp w12, #R_AARCH64_ABS64
+ b.ne 0b
+ add x12, x12, x12, lsl #1 // symtab offset: 24x top word
+ add x12, x8, x12, lsr #(32 - 3) // ... shifted into bottom word
+ ldrsh w14, [x12, #6] // Elf64_Sym::st_shndx
+ ldr x15, [x12, #8] // Elf64_Sym::st_value
+ cmp w14, #-0xf // SHN_ABS (0xfff1) ?
+ add x14, x15, x23 // relocate
+ csel x15, x14, x15, ne
+ add x15, x13, x15
+ str x15, [x11, x23]
+ b 0b
+
+2: adr_l x8, kimage_vaddr // make relocated kimage_vaddr
+ dc cvac, x8 // value visible to secondaries
+ dsb sy // with MMU off
+#endif
+
adr_l sp, initial_sp, x4
+ mov x4, sp
+ and x4, x4, #~(THREAD_SIZE - 1)
+ msr sp_el0, x4 // Save thread_info
str_l x21, __fdt_pointer, x5 // Save FDT pointer
- str_l x24, memstart_addr, x6 // Save PHYS_OFFSET
+
+ ldr_l x4, kimage_vaddr // Save the offset between
+ sub x4, x4, x24 // the kernel virtual and
+ str_l x4, kimage_voffset, x5 // physical mappings
+
mov x29, #0
#ifdef CONFIG_KASAN
bl kasan_early_init
#endif
+#ifdef CONFIG_RANDOMIZE_BASE
+ cbnz x23, 0f // already running randomized?
+ mov x0, x21 // pass FDT address in x0
+ bl kaslr_early_init // parse FDT for KASLR options
+ cbz x0, 0f // KASLR disabled? just proceed
+ mov x23, x0 // record KASLR offset
+ ret x28 // we must enable KASLR, return
+ // to __enable_mmu()
+0:
+#endif
b start_kernel
ENDPROC(__mmap_switched)
@@ -438,6 +505,10 @@ ENDPROC(__mmap_switched)
* hotplug and needs to have the same protections as the text region
*/
.section ".text","ax"
+
+ENTRY(kimage_vaddr)
+ .quad _text - TEXT_OFFSET
+
/*
* If we're fortunate enough to boot at EL2, ensure that the world is
* sane before dropping to EL1.
@@ -603,14 +674,22 @@ ENTRY(secondary_startup)
adrp x26, swapper_pg_dir
bl __cpu_setup // initialise processor
- ldr x21, =secondary_data
- ldr x27, =__secondary_switched // address to jump to after enabling the MMU
+ ldr x8, kimage_vaddr
+ ldr w9, 0f
+ sub x27, x8, w9, sxtw // address to jump to after enabling the MMU
b __enable_mmu
ENDPROC(secondary_startup)
+0: .long (_text - TEXT_OFFSET) - __secondary_switched
ENTRY(__secondary_switched)
- ldr x0, [x21] // get secondary_data.stack
+ adr_l x5, vectors
+ msr vbar_el1, x5
+ isb
+
+ ldr_l x0, secondary_data // get secondary_data.stack
mov sp, x0
+ and x0, x0, #~(THREAD_SIZE - 1)
+ msr sp_el0, x0 // save thread_info
mov x29, #0
b secondary_start_kernel
ENDPROC(__secondary_switched)
@@ -628,12 +707,11 @@ ENDPROC(__secondary_switched)
*/
.section ".idmap.text", "ax"
__enable_mmu:
+ mrs x18, sctlr_el1 // preserve old SCTLR_EL1 value
mrs x1, ID_AA64MMFR0_EL1
ubfx x2, x1, #ID_AA64MMFR0_TGRAN_SHIFT, 4
cmp x2, #ID_AA64MMFR0_TGRAN_SUPPORTED
b.ne __no_granule_support
- ldr x5, =vectors
- msr vbar_el1, x5
msr ttbr0_el1, x25 // load TTBR0
msr ttbr1_el1, x26 // load TTBR1
isb
@@ -647,6 +725,26 @@ __enable_mmu:
ic iallu
dsb nsh
isb
+#ifdef CONFIG_RANDOMIZE_BASE
+ mov x19, x0 // preserve new SCTLR_EL1 value
+ blr x27
+
+ /*
+ * If we return here, we have a KASLR displacement in x23 which we need
+ * to take into account by discarding the current kernel mapping and
+ * creating a new one.
+ */
+ msr sctlr_el1, x18 // disable the MMU
+ isb
+ bl __create_page_tables // recreate kernel mapping
+
+ msr sctlr_el1, x19 // re-enable the MMU
+ isb
+ ic iallu // flush instructions fetched
+ dsb nsh // via old mapping
+ isb
+ add x27, x27, x23 // relocated __mmap_switched
+#endif
br x27
ENDPROC(__enable_mmu)
diff --git a/arch/arm64/kernel/image.h b/arch/arm64/kernel/image.h
index bc2abb8b1599..db1bf57948f1 100644
--- a/arch/arm64/kernel/image.h
+++ b/arch/arm64/kernel/image.h
@@ -26,31 +26,40 @@
* There aren't any ELF relocations we can use to endian-swap values known only
* at link time (e.g. the subtraction of two symbol addresses), so we must get
* the linker to endian-swap certain values before emitting them.
+ *
+ * Note that, in order for this to work when building the ELF64 PIE executable
+ * (for KASLR), these values should not be referenced via R_AARCH64_ABS64
+ * relocations, since these are fixed up at runtime rather than at build time
+ * when PIE is in effect. So we need to split them up in 32-bit high and low
+ * words.
*/
#ifdef CONFIG_CPU_BIG_ENDIAN
-#define DATA_LE64(data) \
- ((((data) & 0x00000000000000ff) << 56) | \
- (((data) & 0x000000000000ff00) << 40) | \
- (((data) & 0x0000000000ff0000) << 24) | \
- (((data) & 0x00000000ff000000) << 8) | \
- (((data) & 0x000000ff00000000) >> 8) | \
- (((data) & 0x0000ff0000000000) >> 24) | \
- (((data) & 0x00ff000000000000) >> 40) | \
- (((data) & 0xff00000000000000) >> 56))
+#define DATA_LE32(data) \
+ ((((data) & 0x000000ff) << 24) | \
+ (((data) & 0x0000ff00) << 8) | \
+ (((data) & 0x00ff0000) >> 8) | \
+ (((data) & 0xff000000) >> 24))
#else
-#define DATA_LE64(data) ((data) & 0xffffffffffffffff)
+#define DATA_LE32(data) ((data) & 0xffffffff)
#endif
+#define DEFINE_IMAGE_LE64(sym, data) \
+ sym##_lo32 = DATA_LE32((data) & 0xffffffff); \
+ sym##_hi32 = DATA_LE32((data) >> 32)
+
#ifdef CONFIG_CPU_BIG_ENDIAN
-#define __HEAD_FLAG_BE 1
+#define __HEAD_FLAG_BE 1
#else
-#define __HEAD_FLAG_BE 0
+#define __HEAD_FLAG_BE 0
#endif
-#define __HEAD_FLAG_PAGE_SIZE ((PAGE_SHIFT - 10) / 2)
+#define __HEAD_FLAG_PAGE_SIZE ((PAGE_SHIFT - 10) / 2)
+
+#define __HEAD_FLAG_PHYS_BASE 1
-#define __HEAD_FLAGS ((__HEAD_FLAG_BE << 0) | \
- (__HEAD_FLAG_PAGE_SIZE << 1))
+#define __HEAD_FLAGS ((__HEAD_FLAG_BE << 0) | \
+ (__HEAD_FLAG_PAGE_SIZE << 1) | \
+ (__HEAD_FLAG_PHYS_BASE << 3))
/*
* These will output as part of the Image header, which should be little-endian
@@ -58,13 +67,23 @@
* endian swapped in head.S, all are done here for consistency.
*/
#define HEAD_SYMBOLS \
- _kernel_size_le = DATA_LE64(_end - _text); \
- _kernel_offset_le = DATA_LE64(TEXT_OFFSET); \
- _kernel_flags_le = DATA_LE64(__HEAD_FLAGS);
+ DEFINE_IMAGE_LE64(_kernel_size_le, _end - _text); \
+ DEFINE_IMAGE_LE64(_kernel_offset_le, TEXT_OFFSET); \
+ DEFINE_IMAGE_LE64(_kernel_flags_le, __HEAD_FLAGS);
#ifdef CONFIG_EFI
/*
+ * Prevent the symbol aliases below from being emitted into the kallsyms
+ * table, by forcing them to be absolute symbols (which are conveniently
+ * ignored by scripts/kallsyms) rather than section relative symbols.
+ * The distinction is only relevant for partial linking, and only for symbols
+ * that are defined within a section declaration (which is not the case for
+ * the definitions below) so the resulting values will be identical.
+ */
+#define KALLSYMS_HIDE(sym) ABSOLUTE(sym)
+
+/*
* The EFI stub has its own symbol namespace prefixed by __efistub_, to
* isolate it from the kernel proper. The following symbols are legally
* accessed by the stub, so provide some aliases to make them accessible.
@@ -73,25 +92,25 @@
* linked at. The routines below are all implemented in assembler in a
* position independent manner
*/
-__efistub_memcmp = __pi_memcmp;
-__efistub_memchr = __pi_memchr;
-__efistub_memcpy = __pi_memcpy;
-__efistub_memmove = __pi_memmove;
-__efistub_memset = __pi_memset;
-__efistub_strlen = __pi_strlen;
-__efistub_strcmp = __pi_strcmp;
-__efistub_strncmp = __pi_strncmp;
-__efistub___flush_dcache_area = __pi___flush_dcache_area;
+__efistub_memcmp = KALLSYMS_HIDE(__pi_memcmp);
+__efistub_memchr = KALLSYMS_HIDE(__pi_memchr);
+__efistub_memcpy = KALLSYMS_HIDE(__pi_memcpy);
+__efistub_memmove = KALLSYMS_HIDE(__pi_memmove);
+__efistub_memset = KALLSYMS_HIDE(__pi_memset);
+__efistub_strlen = KALLSYMS_HIDE(__pi_strlen);
+__efistub_strcmp = KALLSYMS_HIDE(__pi_strcmp);
+__efistub_strncmp = KALLSYMS_HIDE(__pi_strncmp);
+__efistub___flush_dcache_area = KALLSYMS_HIDE(__pi___flush_dcache_area);
#ifdef CONFIG_KASAN
-__efistub___memcpy = __pi_memcpy;
-__efistub___memmove = __pi_memmove;
-__efistub___memset = __pi_memset;
+__efistub___memcpy = KALLSYMS_HIDE(__pi_memcpy);
+__efistub___memmove = KALLSYMS_HIDE(__pi_memmove);
+__efistub___memset = KALLSYMS_HIDE(__pi_memset);
#endif
-__efistub__text = _text;
-__efistub__end = _end;
-__efistub__edata = _edata;
+__efistub__text = KALLSYMS_HIDE(_text);
+__efistub__end = KALLSYMS_HIDE(_end);
+__efistub__edata = KALLSYMS_HIDE(_edata);
#endif
diff --git a/arch/arm64/kernel/irq.c b/arch/arm64/kernel/irq.c
index 9f17ec071ee0..2386b26c0712 100644
--- a/arch/arm64/kernel/irq.c
+++ b/arch/arm64/kernel/irq.c
@@ -30,6 +30,9 @@
unsigned long irq_err_count;
+/* irq stack only needs to be 16 byte aligned - not IRQ_STACK_SIZE aligned. */
+DEFINE_PER_CPU(unsigned long [IRQ_STACK_SIZE/sizeof(long)], irq_stack) __aligned(16);
+
int arch_show_interrupts(struct seq_file *p, int prec)
{
show_ipi_list(p, prec);
diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c
new file mode 100644
index 000000000000..582983920054
--- /dev/null
+++ b/arch/arm64/kernel/kaslr.c
@@ -0,0 +1,177 @@
+/*
+ * Copyright (C) 2016 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/crc32.h>
+#include <linux/init.h>
+#include <linux/libfdt.h>
+#include <linux/mm_types.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+
+#include <asm/fixmap.h>
+#include <asm/kernel-pgtable.h>
+#include <asm/memory.h>
+#include <asm/mmu.h>
+#include <asm/pgtable.h>
+#include <asm/sections.h>
+
+u64 __read_mostly module_alloc_base;
+u16 __initdata memstart_offset_seed;
+
+static __init u64 get_kaslr_seed(void *fdt)
+{
+ int node, len;
+ u64 *prop;
+ u64 ret;
+
+ node = fdt_path_offset(fdt, "/chosen");
+ if (node < 0)
+ return 0;
+
+ prop = fdt_getprop_w(fdt, node, "kaslr-seed", &len);
+ if (!prop || len != sizeof(u64))
+ return 0;
+
+ ret = fdt64_to_cpu(*prop);
+ *prop = 0;
+ return ret;
+}
+
+static __init const u8 *get_cmdline(void *fdt)
+{
+ static __initconst const u8 default_cmdline[] = CONFIG_CMDLINE;
+
+ if (!IS_ENABLED(CONFIG_CMDLINE_FORCE)) {
+ int node;
+ const u8 *prop;
+
+ node = fdt_path_offset(fdt, "/chosen");
+ if (node < 0)
+ goto out;
+
+ prop = fdt_getprop(fdt, node, "bootargs", NULL);
+ if (!prop)
+ goto out;
+ return prop;
+ }
+out:
+ return default_cmdline;
+}
+
+extern void *__init __fixmap_remap_fdt(phys_addr_t dt_phys, int *size,
+ pgprot_t prot);
+
+/*
+ * This routine will be executed with the kernel mapped at its default virtual
+ * address, and if it returns successfully, the kernel will be remapped, and
+ * start_kernel() will be executed from a randomized virtual offset. The
+ * relocation will result in all absolute references (e.g., static variables
+ * containing function pointers) to be reinitialized, and zero-initialized
+ * .bss variables will be reset to 0.
+ */
+u64 __init kaslr_early_init(u64 dt_phys)
+{
+ void *fdt;
+ u64 seed, offset, mask, module_range;
+ const u8 *cmdline, *str;
+ int size;
+
+ /*
+ * Set a reasonable default for module_alloc_base in case
+ * we end up running with module randomization disabled.
+ */
+ module_alloc_base = (u64)_etext - MODULES_VSIZE;
+
+ /*
+ * Try to map the FDT early. If this fails, we simply bail,
+ * and proceed with KASLR disabled. We will make another
+ * attempt at mapping the FDT in setup_machine()
+ */
+ early_fixmap_init();
+ fdt = __fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL);
+ if (!fdt)
+ return 0;
+
+ /*
+ * Retrieve (and wipe) the seed from the FDT
+ */
+ seed = get_kaslr_seed(fdt);
+ if (!seed)
+ return 0;
+
+ /*
+ * Check if 'nokaslr' appears on the command line, and
+ * return 0 if that is the case.
+ */
+ cmdline = get_cmdline(fdt);
+ str = strstr(cmdline, "nokaslr");
+ if (str == cmdline || (str > cmdline && *(str - 1) == ' '))
+ return 0;
+
+ /*
+ * OK, so we are proceeding with KASLR enabled. Calculate a suitable
+ * kernel image offset from the seed. Let's place the kernel in the
+ * lower half of the VMALLOC area (VA_BITS - 2).
+ * Even if we could randomize at page granularity for 16k and 64k pages,
+ * let's always round to 2 MB so we don't interfere with the ability to
+ * map using contiguous PTEs
+ */
+ mask = ((1UL << (VA_BITS - 2)) - 1) & ~(SZ_2M - 1);
+ offset = seed & mask;
+
+ /* use the top 16 bits to randomize the linear region */
+ memstart_offset_seed = seed >> 48;
+
+ /*
+ * The kernel Image should not extend across a 1GB/32MB/512MB alignment
+ * boundary (for 4KB/16KB/64KB granule kernels, respectively). If this
+ * happens, increase the KASLR offset by the size of the kernel image.
+ */
+ if ((((u64)_text + offset) >> SWAPPER_TABLE_SHIFT) !=
+ (((u64)_end + offset) >> SWAPPER_TABLE_SHIFT))
+ offset = (offset + (u64)(_end - _text)) & mask;
+
+ if (IS_ENABLED(CONFIG_KASAN))
+ /*
+ * KASAN does not expect the module region to intersect the
+ * vmalloc region, since shadow memory is allocated for each
+ * module at load time, whereas the vmalloc region is shadowed
+ * by KASAN zero pages. So keep modules out of the vmalloc
+ * region if KASAN is enabled.
+ */
+ return offset;
+
+ if (IS_ENABLED(CONFIG_RANDOMIZE_MODULE_REGION_FULL)) {
+ /*
+ * Randomize the module region independently from the core
+ * kernel. This prevents modules from leaking any information
+ * about the address of the kernel itself, but results in
+ * branches between modules and the core kernel that are
+ * resolved via PLTs. (Branches between modules will be
+ * resolved normally.)
+ */
+ module_range = VMALLOC_END - VMALLOC_START - MODULES_VSIZE;
+ module_alloc_base = VMALLOC_START;
+ } else {
+ /*
+ * Randomize the module region by setting module_alloc_base to
+ * a PAGE_SIZE multiple in the range [_etext - MODULES_VSIZE,
+ * _stext) . This guarantees that the resulting region still
+ * covers [_stext, _etext], and that all relative branches can
+ * be resolved without veneers.
+ */
+ module_range = MODULES_VSIZE - (u64)(_etext - _stext);
+ module_alloc_base = (u64)_etext + offset - MODULES_VSIZE;
+ }
+
+ /* use the lower 21 bits to randomize the base of the module region */
+ module_alloc_base += (module_range * (seed & ((1 << 21) - 1))) >> 21;
+ module_alloc_base &= PAGE_MASK;
+
+ return offset;
+}
diff --git a/arch/arm64/kernel/module-plts.c b/arch/arm64/kernel/module-plts.c
new file mode 100644
index 000000000000..1ce90d8450ae
--- /dev/null
+++ b/arch/arm64/kernel/module-plts.c
@@ -0,0 +1,201 @@
+/*
+ * Copyright (C) 2014-2016 Linaro Ltd. <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/elf.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sort.h>
+
+struct plt_entry {
+ /*
+ * A program that conforms to the AArch64 Procedure Call Standard
+ * (AAPCS64) must assume that a veneer that alters IP0 (x16) and/or
+ * IP1 (x17) may be inserted at any branch instruction that is
+ * exposed to a relocation that supports long branches. Since that
+ * is exactly what we are dealing with here, we are free to use x16
+ * as a scratch register in the PLT veneers.
+ */
+ __le32 mov0; /* movn x16, #0x.... */
+ __le32 mov1; /* movk x16, #0x...., lsl #16 */
+ __le32 mov2; /* movk x16, #0x...., lsl #32 */
+ __le32 br; /* br x16 */
+};
+
+u64 module_emit_plt_entry(struct module *mod, const Elf64_Rela *rela,
+ Elf64_Sym *sym)
+{
+ struct plt_entry *plt = (struct plt_entry *)mod->arch.plt->sh_addr;
+ int i = mod->arch.plt_num_entries;
+ u64 val = sym->st_value + rela->r_addend;
+
+ /*
+ * We only emit PLT entries against undefined (SHN_UNDEF) symbols,
+ * which are listed in the ELF symtab section, but without a type
+ * or a size.
+ * So, similar to how the module loader uses the Elf64_Sym::st_value
+ * field to store the resolved addresses of undefined symbols, let's
+ * borrow the Elf64_Sym::st_size field (whose value is never used by
+ * the module loader, even for symbols that are defined) to record
+ * the address of a symbol's associated PLT entry as we emit it for a
+ * zero addend relocation (which is the only kind we have to deal with
+ * in practice). This allows us to find duplicates without having to
+ * go through the table every time.
+ */
+ if (rela->r_addend == 0 && sym->st_size != 0) {
+ BUG_ON(sym->st_size < (u64)plt || sym->st_size >= (u64)&plt[i]);
+ return sym->st_size;
+ }
+
+ mod->arch.plt_num_entries++;
+ BUG_ON(mod->arch.plt_num_entries > mod->arch.plt_max_entries);
+
+ /*
+ * MOVK/MOVN/MOVZ opcode:
+ * +--------+------------+--------+-----------+-------------+---------+
+ * | sf[31] | opc[30:29] | 100101 | hw[22:21] | imm16[20:5] | Rd[4:0] |
+ * +--------+------------+--------+-----------+-------------+---------+
+ *
+ * Rd := 0x10 (x16)
+ * hw := 0b00 (no shift), 0b01 (lsl #16), 0b10 (lsl #32)
+ * opc := 0b11 (MOVK), 0b00 (MOVN), 0b10 (MOVZ)
+ * sf := 1 (64-bit variant)
+ */
+ plt[i] = (struct plt_entry){
+ cpu_to_le32(0x92800010 | (((~val ) & 0xffff)) << 5),
+ cpu_to_le32(0xf2a00010 | ((( val >> 16) & 0xffff)) << 5),
+ cpu_to_le32(0xf2c00010 | ((( val >> 32) & 0xffff)) << 5),
+ cpu_to_le32(0xd61f0200)
+ };
+
+ if (rela->r_addend == 0)
+ sym->st_size = (u64)&plt[i];
+
+ return (u64)&plt[i];
+}
+
+#define cmp_3way(a,b) ((a) < (b) ? -1 : (a) > (b))
+
+static int cmp_rela(const void *a, const void *b)
+{
+ const Elf64_Rela *x = a, *y = b;
+ int i;
+
+ /* sort by type, symbol index and addend */
+ i = cmp_3way(ELF64_R_TYPE(x->r_info), ELF64_R_TYPE(y->r_info));
+ if (i == 0)
+ i = cmp_3way(ELF64_R_SYM(x->r_info), ELF64_R_SYM(y->r_info));
+ if (i == 0)
+ i = cmp_3way(x->r_addend, y->r_addend);
+ return i;
+}
+
+static bool duplicate_rel(const Elf64_Rela *rela, int num)
+{
+ /*
+ * Entries are sorted by type, symbol index and addend. That means
+ * that, if a duplicate entry exists, it must be in the preceding
+ * slot.
+ */
+ return num > 0 && cmp_rela(rela + num, rela + num - 1) == 0;
+}
+
+static unsigned int count_plts(Elf64_Sym *syms, Elf64_Rela *rela, int num)
+{
+ unsigned int ret = 0;
+ Elf64_Sym *s;
+ int i;
+
+ for (i = 0; i < num; i++) {
+ switch (ELF64_R_TYPE(rela[i].r_info)) {
+ case R_AARCH64_JUMP26:
+ case R_AARCH64_CALL26:
+ /*
+ * We only have to consider branch targets that resolve
+ * to undefined symbols. This is not simply a heuristic,
+ * it is a fundamental limitation, since the PLT itself
+ * is part of the module, and needs to be within 128 MB
+ * as well, so modules can never grow beyond that limit.
+ */
+ s = syms + ELF64_R_SYM(rela[i].r_info);
+ if (s->st_shndx != SHN_UNDEF)
+ break;
+
+ /*
+ * Jump relocations with non-zero addends against
+ * undefined symbols are supported by the ELF spec, but
+ * do not occur in practice (e.g., 'jump n bytes past
+ * the entry point of undefined function symbol f').
+ * So we need to support them, but there is no need to
+ * take them into consideration when trying to optimize
+ * this code. So let's only check for duplicates when
+ * the addend is zero: this allows us to record the PLT
+ * entry address in the symbol table itself, rather than
+ * having to search the list for duplicates each time we
+ * emit one.
+ */
+ if (rela[i].r_addend != 0 || !duplicate_rel(rela, i))
+ ret++;
+ break;
+ }
+ }
+ return ret;
+}
+
+int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
+ char *secstrings, struct module *mod)
+{
+ unsigned long plt_max_entries = 0;
+ Elf64_Sym *syms = NULL;
+ int i;
+
+ /*
+ * Find the empty .plt section so we can expand it to store the PLT
+ * entries. Record the symtab address as well.
+ */
+ for (i = 0; i < ehdr->e_shnum; i++) {
+ if (strcmp(".plt", secstrings + sechdrs[i].sh_name) == 0)
+ mod->arch.plt = sechdrs + i;
+ else if (sechdrs[i].sh_type == SHT_SYMTAB)
+ syms = (Elf64_Sym *)sechdrs[i].sh_addr;
+ }
+
+ if (!mod->arch.plt) {
+ pr_err("%s: module PLT section missing\n", mod->name);
+ return -ENOEXEC;
+ }
+ if (!syms) {
+ pr_err("%s: module symtab section missing\n", mod->name);
+ return -ENOEXEC;
+ }
+
+ for (i = 0; i < ehdr->e_shnum; i++) {
+ Elf64_Rela *rels = (void *)ehdr + sechdrs[i].sh_offset;
+ int numrels = sechdrs[i].sh_size / sizeof(Elf64_Rela);
+ Elf64_Shdr *dstsec = sechdrs + sechdrs[i].sh_info;
+
+ if (sechdrs[i].sh_type != SHT_RELA)
+ continue;
+
+ /* ignore relocations that operate on non-exec sections */
+ if (!(dstsec->sh_flags & SHF_EXECINSTR))
+ continue;
+
+ /* sort by type, symbol index and addend */
+ sort(rels, numrels, sizeof(Elf64_Rela), cmp_rela, NULL);
+
+ plt_max_entries += count_plts(syms, rels, numrels);
+ }
+
+ mod->arch.plt->sh_type = SHT_NOBITS;
+ mod->arch.plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
+ mod->arch.plt->sh_addralign = L1_CACHE_BYTES;
+ mod->arch.plt->sh_size = plt_max_entries * sizeof(struct plt_entry);
+ mod->arch.plt_num_entries = 0;
+ mod->arch.plt_max_entries = plt_max_entries;
+ return 0;
+}
diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
index f4bc779e62e8..7f316982ce00 100644
--- a/arch/arm64/kernel/module.c
+++ b/arch/arm64/kernel/module.c
@@ -30,17 +30,30 @@
#include <asm/insn.h>
#include <asm/sections.h>
-#define AARCH64_INSN_IMM_MOVNZ AARCH64_INSN_IMM_MAX
-#define AARCH64_INSN_IMM_MOVK AARCH64_INSN_IMM_16
-
void *module_alloc(unsigned long size)
{
void *p;
- p = __vmalloc_node_range(size, MODULE_ALIGN, MODULES_VADDR, MODULES_END,
+ p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base,
+ module_alloc_base + MODULES_VSIZE,
GFP_KERNEL, PAGE_KERNEL_EXEC, 0,
NUMA_NO_NODE, __builtin_return_address(0));
+ if (!p && IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
+ !IS_ENABLED(CONFIG_KASAN))
+ /*
+ * KASAN can only deal with module allocations being served
+ * from the reserved module region, since the remainder of
+ * the vmalloc region is already backed by zero shadow pages,
+ * and punching holes into it is non-trivial. Since the module
+ * region is not randomized when KASAN is enabled, it is even
+ * less likely that the module region gets exhausted, so we
+ * can simply omit this fallback in that case.
+ */
+ p = __vmalloc_node_range(size, MODULE_ALIGN, VMALLOC_START,
+ VMALLOC_END, GFP_KERNEL, PAGE_KERNEL_EXEC, 0,
+ NUMA_NO_NODE, __builtin_return_address(0));
+
if (p && (kasan_module_alloc(p, size) < 0)) {
vfree(p);
return NULL;
@@ -75,15 +88,18 @@ static u64 do_reloc(enum aarch64_reloc_op reloc_op, void *place, u64 val)
static int reloc_data(enum aarch64_reloc_op op, void *place, u64 val, int len)
{
- u64 imm_mask = (1 << len) - 1;
s64 sval = do_reloc(op, place, val);
switch (len) {
case 16:
*(s16 *)place = sval;
+ if (sval < S16_MIN || sval > U16_MAX)
+ return -ERANGE;
break;
case 32:
*(s32 *)place = sval;
+ if (sval < S32_MIN || sval > U32_MAX)
+ return -ERANGE;
break;
case 64:
*(s64 *)place = sval;
@@ -92,34 +108,23 @@ static int reloc_data(enum aarch64_reloc_op op, void *place, u64 val, int len)
pr_err("Invalid length (%d) for data relocation\n", len);
return 0;
}
-
- /*
- * Extract the upper value bits (including the sign bit) and
- * shift them to bit 0.
- */
- sval = (s64)(sval & ~(imm_mask >> 1)) >> (len - 1);
-
- /*
- * Overflow has occurred if the value is not representable in
- * len bits (i.e the bottom len bits are not sign-extended and
- * the top bits are not all zero).
- */
- if ((u64)(sval + 1) > 2)
- return -ERANGE;
-
return 0;
}
+enum aarch64_insn_movw_imm_type {
+ AARCH64_INSN_IMM_MOVNZ,
+ AARCH64_INSN_IMM_MOVKZ,
+};
+
static int reloc_insn_movw(enum aarch64_reloc_op op, void *place, u64 val,
- int lsb, enum aarch64_insn_imm_type imm_type)
+ int lsb, enum aarch64_insn_movw_imm_type imm_type)
{
- u64 imm, limit = 0;
+ u64 imm;
s64 sval;
u32 insn = le32_to_cpu(*(u32 *)place);
sval = do_reloc(op, place, val);
- sval >>= lsb;
- imm = sval & 0xffff;
+ imm = sval >> lsb;
if (imm_type == AARCH64_INSN_IMM_MOVNZ) {
/*
@@ -128,7 +133,7 @@ static int reloc_insn_movw(enum aarch64_reloc_op op, void *place, u64 val,
* immediate is less than zero.
*/
insn &= ~(3 << 29);
- if ((s64)imm >= 0) {
+ if (sval >= 0) {
/* >=0: Set the instruction to MOVZ (opcode 10b). */
insn |= 2 << 29;
} else {
@@ -140,29 +145,13 @@ static int reloc_insn_movw(enum aarch64_reloc_op op, void *place, u64 val,
*/
imm = ~imm;
}
- imm_type = AARCH64_INSN_IMM_MOVK;
}
/* Update the instruction with the new encoding. */
- insn = aarch64_insn_encode_immediate(imm_type, insn, imm);
+ insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm);
*(u32 *)place = cpu_to_le32(insn);
- /* Shift out the immediate field. */
- sval >>= 16;
-
- /*
- * For unsigned immediates, the overflow check is straightforward.
- * For signed immediates, the sign bit is actually the bit past the
- * most significant bit of the field.
- * The AARCH64_INSN_IMM_16 immediate type is unsigned.
- */
- if (imm_type != AARCH64_INSN_IMM_16) {
- sval++;
- limit++;
- }
-
- /* Check the upper bits depending on the sign of the immediate. */
- if ((u64)sval > limit)
+ if (imm > U16_MAX)
return -ERANGE;
return 0;
@@ -267,25 +256,25 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
overflow_check = false;
case R_AARCH64_MOVW_UABS_G0:
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
- AARCH64_INSN_IMM_16);
+ AARCH64_INSN_IMM_MOVKZ);
break;
case R_AARCH64_MOVW_UABS_G1_NC:
overflow_check = false;
case R_AARCH64_MOVW_UABS_G1:
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
- AARCH64_INSN_IMM_16);
+ AARCH64_INSN_IMM_MOVKZ);
break;
case R_AARCH64_MOVW_UABS_G2_NC:
overflow_check = false;
case R_AARCH64_MOVW_UABS_G2:
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
- AARCH64_INSN_IMM_16);
+ AARCH64_INSN_IMM_MOVKZ);
break;
case R_AARCH64_MOVW_UABS_G3:
/* We're using the top bits so we can't overflow. */
overflow_check = false;
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 48,
- AARCH64_INSN_IMM_16);
+ AARCH64_INSN_IMM_MOVKZ);
break;
case R_AARCH64_MOVW_SABS_G0:
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
@@ -302,7 +291,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
case R_AARCH64_MOVW_PREL_G0_NC:
overflow_check = false;
ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0,
- AARCH64_INSN_IMM_MOVK);
+ AARCH64_INSN_IMM_MOVKZ);
break;
case R_AARCH64_MOVW_PREL_G0:
ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0,
@@ -311,7 +300,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
case R_AARCH64_MOVW_PREL_G1_NC:
overflow_check = false;
ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16,
- AARCH64_INSN_IMM_MOVK);
+ AARCH64_INSN_IMM_MOVKZ);
break;
case R_AARCH64_MOVW_PREL_G1:
ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16,
@@ -320,7 +309,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
case R_AARCH64_MOVW_PREL_G2_NC:
overflow_check = false;
ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32,
- AARCH64_INSN_IMM_MOVK);
+ AARCH64_INSN_IMM_MOVKZ);
break;
case R_AARCH64_MOVW_PREL_G2:
ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32,
@@ -388,6 +377,13 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
case R_AARCH64_CALL26:
ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 26,
AARCH64_INSN_IMM_26);
+
+ if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
+ ovf == -ERANGE) {
+ val = module_emit_plt_entry(me, &rel[i], sym);
+ ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2,
+ 26, AARCH64_INSN_IMM_26);
+ }
break;
default:
diff --git a/arch/arm64/kernel/module.lds b/arch/arm64/kernel/module.lds
new file mode 100644
index 000000000000..8949f6c6f729
--- /dev/null
+++ b/arch/arm64/kernel/module.lds
@@ -0,0 +1,3 @@
+SECTIONS {
+ .plt (NOLOAD) : { BYTE(0) }
+}
diff --git a/arch/arm64/kernel/perf_callchain.c b/arch/arm64/kernel/perf_callchain.c
index 3aa74830cc69..ff4665462a02 100644
--- a/arch/arm64/kernel/perf_callchain.c
+++ b/arch/arm64/kernel/perf_callchain.c
@@ -164,8 +164,11 @@ void perf_callchain_kernel(struct perf_callchain_entry *entry,
frame.fp = regs->regs[29];
frame.sp = regs->sp;
frame.pc = regs->pc;
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ frame.graph = current->curr_ret_stack;
+#endif
- walk_stackframe(&frame, callchain_trace, entry);
+ walk_stackframe(current, &frame, callchain_trace, entry);
}
unsigned long perf_instruction_pointer(struct pt_regs *regs)
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 7c4563cbccf3..8202bd87f3f8 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -46,6 +46,7 @@
#include <linux/notifier.h>
#include <trace/events/power.h>
+#include <asm/alternative.h>
#include <asm/compat.h>
#include <asm/cacheflush.h>
#include <asm/fpsimd.h>
@@ -350,6 +351,9 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
} else {
memset(childregs, 0, sizeof(struct pt_regs));
childregs->pstate = PSR_MODE_EL1h;
+ if (IS_ENABLED(CONFIG_ARM64_UAO) &&
+ cpus_have_cap(ARM64_HAS_UAO))
+ childregs->pstate |= PSR_UAO_BIT;
p->thread.cpu_context.x19 = stack_start;
p->thread.cpu_context.x20 = stk_sz;
}
@@ -378,6 +382,17 @@ static void tls_thread_switch(struct task_struct *next)
: : "r" (tpidr), "r" (tpidrro));
}
+/* Restore the UAO state depending on next's addr_limit */
+static void uao_thread_switch(struct task_struct *next)
+{
+ if (IS_ENABLED(CONFIG_ARM64_UAO)) {
+ if (task_thread_info(next)->addr_limit == KERNEL_DS)
+ asm(ALTERNATIVE("nop", SET_PSTATE_UAO(1), ARM64_HAS_UAO));
+ else
+ asm(ALTERNATIVE("nop", SET_PSTATE_UAO(0), ARM64_HAS_UAO));
+ }
+}
+
/*
* Thread switching.
*/
@@ -390,6 +405,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
tls_thread_switch(next);
hw_breakpoint_thread_switch(next);
contextidr_thread_switch(next);
+ uao_thread_switch(next);
/*
* Complete any pending TLB or cache maintenance on this CPU in case
@@ -414,11 +430,14 @@ unsigned long get_wchan(struct task_struct *p)
frame.fp = thread_saved_fp(p);
frame.sp = thread_saved_sp(p);
frame.pc = thread_saved_pc(p);
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ frame.graph = p->curr_ret_stack;
+#endif
stack_page = (unsigned long)task_stack_page(p);
do {
if (frame.sp < stack_page ||
frame.sp >= stack_page + THREAD_SIZE ||
- unwind_frame(&frame))
+ unwind_frame(p, &frame))
return 0;
if (!in_sched_functions(frame.pc))
return frame.pc;
diff --git a/arch/arm64/kernel/return_address.c b/arch/arm64/kernel/return_address.c
index 6c4fd2810ecb..1718706fde83 100644
--- a/arch/arm64/kernel/return_address.c
+++ b/arch/arm64/kernel/return_address.c
@@ -43,8 +43,11 @@ void *return_address(unsigned int level)
frame.fp = (unsigned long)__builtin_frame_address(0);
frame.sp = current_stack_pointer;
frame.pc = (unsigned long)return_address; /* dummy */
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ frame.graph = current->curr_ret_stack;
+#endif
- walk_stackframe(&frame, save_return_addr, &data);
+ walk_stackframe(current, &frame, save_return_addr, &data);
if (!data.level)
return data.addr;
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index cd4eb7ee618c..d1e01e6498bb 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -63,6 +63,7 @@
#include <asm/memblock.h>
#include <asm/efi.h>
#include <asm/xen/hypervisor.h>
+#include <asm/mmu_context.h>
unsigned int boot_reason;
EXPORT_SYMBOL(boot_reason);
@@ -327,6 +328,12 @@ void __init setup_arch(char **cmdline_p)
*/
local_async_enable();
+ /*
+ * TTBR0 is only used for the identity mapping at this stage. Make it
+ * point to zero page to avoid speculatively fetching new entries.
+ */
+ cpu_uninstall_idmap();
+
efi_init();
arm64_memblock_init();
@@ -402,3 +409,32 @@ void arch_setup_pdev_archdata(struct platform_device *pdev)
pdev->archdata.dma_mask = DMA_BIT_MASK(32);
pdev->dev.dma_mask = &pdev->archdata.dma_mask;
}
+
+/*
+ * Dump out kernel offset information on panic.
+ */
+static int dump_kernel_offset(struct notifier_block *self, unsigned long v,
+ void *p)
+{
+ u64 const kaslr_offset = kimage_vaddr - KIMAGE_VADDR;
+
+ if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_offset > 0) {
+ pr_emerg("Kernel Offset: 0x%llx from 0x%lx\n",
+ kaslr_offset, KIMAGE_VADDR);
+ } else {
+ pr_emerg("Kernel Offset: disabled\n");
+ }
+ return 0;
+}
+
+static struct notifier_block kernel_offset_notifier = {
+ .notifier_call = dump_kernel_offset
+};
+
+static int __init register_kernel_offset_dumper(void)
+{
+ atomic_notifier_chain_register(&panic_notifier_list,
+ &kernel_offset_notifier);
+ return 0;
+}
+__initcall(register_kernel_offset_dumper);
diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S
index f586f7c875e2..e33fe33876ab 100644
--- a/arch/arm64/kernel/sleep.S
+++ b/arch/arm64/kernel/sleep.S
@@ -173,6 +173,9 @@ ENTRY(cpu_resume)
/* load physical address of identity map page table in x1 */
adrp x1, idmap_pg_dir
mov sp, x2
+ /* save thread_info */
+ and x2, x2, #~(THREAD_SIZE - 1)
+ msr sp_el0, x2
/*
* cpu_do_resume expects x0 to contain context physical address
* pointer and x1 to contain physical address of 1:1 page tables
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 08e78e47be95..b2f5631c3785 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -154,9 +154,7 @@ asmlinkage void secondary_start_kernel(void)
* TTBR0 is only used for the identity mapping at this stage. Make it
* point to zero page to avoid speculatively fetching new entries.
*/
- cpu_set_reserved_ttbr0();
- local_flush_tlb_all();
- cpu_set_default_tcr_t0sz();
+ cpu_uninstall_idmap();
preempt_disable();
trace_hardirqs_off();
@@ -459,6 +457,17 @@ acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor)
/* map the logical cpu id to cpu MPIDR */
cpu_logical_map(cpu_count) = hwid;
+ /*
+ * Set-up the ACPI parking protocol cpu entries
+ * while initializing the cpu_logical_map to
+ * avoid parsing MADT entries multiple times for
+ * nothing (ie a valid cpu_logical_map entry should
+ * contain a valid parking protocol data set to
+ * initialize the cpu if the parking protocol is
+ * the only available enable method).
+ */
+ acpi_set_mailbox_entry(cpu_count, processor);
+
cpu_count++;
}
@@ -701,10 +710,12 @@ void arch_send_call_function_single_ipi(int cpu)
smp_cross_call_common(cpumask_of(cpu), IPI_CALL_FUNC);
}
+#ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
{
- smp_cross_call_common(mask, IPI_WAKEUP);
+ smp_cross_call(mask, IPI_WAKEUP);
}
+#endif
#ifdef CONFIG_IRQ_WORK
void arch_irq_work_raise(void)
@@ -855,13 +866,18 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
break;
#endif
- case IPI_WAKEUP:
- break;
-
case IPI_CPU_BACKTRACE:
ipi_cpu_backtrace(cpu, regs);
break;
+#ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
+ case IPI_WAKEUP:
+ WARN_ONCE(!acpi_parking_protocol_valid(cpu),
+ "CPU%u: Wake-up IPI outside the ACPI parking protocol\n",
+ cpu);
+ break;
+#endif
+
default:
pr_crit("CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr);
break;
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index 665c7fedc65b..85aea381fbf6 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -18,9 +18,11 @@
#include <linux/kasan.h>
#include <linux/kernel.h>
#include <linux/export.h>
+#include <linux/ftrace.h>
#include <linux/sched.h>
#include <linux/stacktrace.h>
+#include <asm/irq.h>
#include <asm/stacktrace.h>
/*
@@ -36,15 +38,29 @@
* ldp x29, x30, [sp]
* add sp, sp, #0x10
*/
-int notrace unwind_frame(struct stackframe *frame)
+int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
{
unsigned long high, low;
unsigned long fp = frame->fp;
+ unsigned long irq_stack_ptr;
+
+ /*
+ * Switching between stacks is valid when tracing current and in
+ * non-preemptible context.
+ */
+ if (tsk == current && !preemptible())
+ irq_stack_ptr = IRQ_STACK_PTR(smp_processor_id());
+ else
+ irq_stack_ptr = 0;
low = frame->sp;
- high = ALIGN(low, THREAD_SIZE);
+ /* irq stacks are not THREAD_SIZE aligned */
+ if (on_irq_stack(frame->sp, raw_smp_processor_id()))
+ high = irq_stack_ptr;
+ else
+ high = ALIGN(low, THREAD_SIZE) - 0x20;
- if (fp < low || fp > high - 0x18 || fp & 0xf)
+ if (fp < low || fp > high || fp & 0xf)
return -EINVAL;
kasan_disable_current();
@@ -53,12 +69,55 @@ int notrace unwind_frame(struct stackframe *frame)
frame->fp = *(unsigned long *)(fp);
frame->pc = *(unsigned long *)(fp + 8);
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ if (tsk && tsk->ret_stack &&
+ (frame->pc == (unsigned long)return_to_handler)) {
+ /*
+ * This is a case where function graph tracer has
+ * modified a return address (LR) in a stack frame
+ * to hook a function return.
+ * So replace it to an original value.
+ */
+ frame->pc = tsk->ret_stack[frame->graph--].ret;
+ }
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+
+ /*
+ * Check whether we are going to walk through from interrupt stack
+ * to task stack.
+ * If we reach the end of the stack - and its an interrupt stack,
+ * unpack the dummy frame to find the original elr.
+ *
+ * Check the frame->fp we read from the bottom of the irq_stack,
+ * and the original task stack pointer are both in current->stack.
+ */
+ if (frame->sp == irq_stack_ptr) {
+ struct pt_regs *irq_args;
+ unsigned long orig_sp = IRQ_STACK_TO_TASK_STACK(irq_stack_ptr);
+
+ if (object_is_on_stack((void *)orig_sp) &&
+ object_is_on_stack((void *)frame->fp)) {
+ frame->sp = orig_sp;
+
+ /* orig_sp is the saved pt_regs, find the elr */
+ irq_args = (struct pt_regs *)orig_sp;
+ frame->pc = irq_args->pc;
+ } else {
+ /*
+ * This frame has a non-standard format, and we
+ * didn't fix it, because the data looked wrong.
+ * Refuse to output this frame.
+ */
+ return -EINVAL;
+ }
+ }
+
kasan_enable_current();
return 0;
}
-void notrace walk_stackframe(struct stackframe *frame,
+void notrace walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
int (*fn)(struct stackframe *, void *), void *data)
{
while (1) {
@@ -66,7 +125,7 @@ void notrace walk_stackframe(struct stackframe *frame,
if (fn(frame, data))
break;
- ret = unwind_frame(frame);
+ ret = unwind_frame(tsk, frame);
if (ret < 0)
break;
}
@@ -117,8 +176,11 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
frame.sp = current_stack_pointer;
frame.pc = (unsigned long)save_stack_trace_tsk;
}
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ frame.graph = tsk->curr_ret_stack;
+#endif
- walk_stackframe(&frame, save_trace, &data);
+ walk_stackframe(tsk, &frame, save_trace, &data);
if (trace->nr_entries < trace->max_entries)
trace->entries[trace->nr_entries++] = ULONG_MAX;
}
diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c
index 1095aa483a1c..66055392f445 100644
--- a/arch/arm64/kernel/suspend.c
+++ b/arch/arm64/kernel/suspend.c
@@ -60,7 +60,6 @@ void __init cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *))
*/
int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
{
- struct mm_struct *mm = current->active_mm;
int ret;
unsigned long flags;
@@ -87,22 +86,11 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
ret = __cpu_suspend_enter(arg, fn);
if (ret == 0) {
/*
- * We are resuming from reset with TTBR0_EL1 set to the
- * idmap to enable the MMU; set the TTBR0 to the reserved
- * page tables to prevent speculative TLB allocations, flush
- * the local tlb and set the default tcr_el1.t0sz so that
- * the TTBR0 address space set-up is properly restored.
- * If the current active_mm != &init_mm we entered cpu_suspend
- * with mappings in TTBR0 that must be restored, so we switch
- * them back to complete the address space configuration
- * restoration before returning.
+ * We are resuming from reset with the idmap active in TTBR0_EL1.
+ * We must uninstall the idmap and restore the expected MMU
+ * state before we can possibly return to userspace.
*/
- cpu_set_reserved_ttbr0();
- local_flush_tlb_all();
- cpu_set_default_tcr_t0sz();
-
- if (mm != &init_mm)
- cpu_switch_mm(mm->pgd, mm);
+ cpu_uninstall_idmap();
/*
* Restore per-cpu offset before any kernel
diff --git a/arch/arm64/kernel/time.c b/arch/arm64/kernel/time.c
index 13339b6ffc1a..59779699a1a4 100644
--- a/arch/arm64/kernel/time.c
+++ b/arch/arm64/kernel/time.c
@@ -52,8 +52,11 @@ unsigned long profile_pc(struct pt_regs *regs)
frame.fp = regs->regs[29];
frame.sp = regs->sp;
frame.pc = regs->pc;
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ frame.graph = -1; /* no task info */
+#endif
do {
- int ret = unwind_frame(&frame);
+ int ret = unwind_frame(NULL, &frame);
if (ret < 0)
return 0;
} while (in_lock_functions(frame.pc));
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index 7d54c168d19c..48b75ece4c17 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -150,17 +150,24 @@ static void dump_instr(const char *lvl, struct pt_regs *regs)
static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
{
struct stackframe frame;
+ unsigned long irq_stack_ptr;
+ int skip;
+
+ /*
+ * Switching between stacks is valid when tracing current and in
+ * non-preemptible context.
+ */
+ if (tsk == current && !preemptible())
+ irq_stack_ptr = IRQ_STACK_PTR(smp_processor_id());
+ else
+ irq_stack_ptr = 0;
pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
if (!tsk)
tsk = current;
- if (regs) {
- frame.fp = regs->regs[29];
- frame.sp = regs->sp;
- frame.pc = regs->pc;
- } else if (tsk == current) {
+ if (tsk == current) {
frame.fp = (unsigned long)__builtin_frame_address(0);
frame.sp = current_stack_pointer;
frame.pc = (unsigned long)dump_backtrace;
@@ -172,21 +179,49 @@ static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
frame.sp = thread_saved_sp(tsk);
frame.pc = thread_saved_pc(tsk);
}
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ frame.graph = tsk->curr_ret_stack;
+#endif
- pr_emerg("Call trace:\n");
+ skip = !!regs;
+ printk("Call trace:\n");
while (1) {
unsigned long where = frame.pc;
unsigned long stack;
int ret;
- dump_backtrace_entry(where);
- ret = unwind_frame(&frame);
+ /* skip until specified stack frame */
+ if (!skip) {
+ dump_backtrace_entry(where);
+ } else if (frame.fp == regs->regs[29]) {
+ skip = 0;
+ /*
+ * Mostly, this is the case where this function is
+ * called in panic/abort. As exception handler's
+ * stack frame does not contain the corresponding pc
+ * at which an exception has taken place, use regs->pc
+ * instead.
+ */
+ dump_backtrace_entry(regs->pc);
+ }
+ ret = unwind_frame(tsk, &frame);
if (ret < 0)
break;
stack = frame.sp;
- if (in_exception_text(where))
+ if (in_exception_text(where)) {
+ /*
+ * If we switched to the irq_stack before calling this
+ * exception handler, then the pt_regs will be on the
+ * task stack. The easiest way to tell is if the large
+ * pt_regs would overlap with the end of the irq_stack.
+ */
+ if (stack < irq_stack_ptr &&
+ (stack + sizeof(struct pt_regs)) > irq_stack_ptr)
+ stack = IRQ_STACK_TO_TASK_STACK(irq_stack_ptr);
+
dump_mem("", "Exception stack", stack,
stack + sizeof(struct pt_regs), false);
+ }
}
}
@@ -502,22 +537,22 @@ asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
void __pte_error(const char *file, int line, unsigned long val)
{
- pr_crit("%s:%d: bad pte %016lx.\n", file, line, val);
+ pr_err("%s:%d: bad pte %016lx.\n", file, line, val);
}
void __pmd_error(const char *file, int line, unsigned long val)
{
- pr_crit("%s:%d: bad pmd %016lx.\n", file, line, val);
+ pr_err("%s:%d: bad pmd %016lx.\n", file, line, val);
}
void __pud_error(const char *file, int line, unsigned long val)
{
- pr_crit("%s:%d: bad pud %016lx.\n", file, line, val);
+ pr_err("%s:%d: bad pud %016lx.\n", file, line, val);
}
void __pgd_error(const char *file, int line, unsigned long val)
{
- pr_crit("%s:%d: bad pgd %016lx.\n", file, line, val);
+ pr_err("%s:%d: bad pgd %016lx.\n", file, line, val);
}
/* GENERIC_BUG traps */
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index b3f48316f888..6b562a318f84 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -88,15 +88,16 @@ SECTIONS
EXIT_CALL
*(.discard)
*(.discard.*)
+ *(.interp .dynamic)
}
- . = PAGE_OFFSET + TEXT_OFFSET;
+ . = KIMAGE_VADDR + TEXT_OFFSET;
.head.text : {
_text = .;
HEAD_TEXT
}
- ALIGN_DEBUG_RO
+ ALIGN_DEBUG_RO_MIN(PAGE_SIZE)
.text : { /* Real text segment */
_stext = .; /* Text and read-only data */
__exception_text_start = .;
@@ -114,14 +115,12 @@ SECTIONS
*(.got) /* Global offset table */
}
- ALIGN_DEBUG_RO
RO_DATA(PAGE_SIZE)
EXCEPTION_TABLE(8)
NOTES
- ALIGN_DEBUG_RO
- _etext = .; /* End of text and rodata section */
ALIGN_DEBUG_RO_MIN(PAGE_SIZE)
+ _etext = .; /* End of text and rodata section */
__init_begin = .;
INIT_TEXT_SECTION(8)
@@ -129,7 +128,6 @@ SECTIONS
ARM_EXIT_KEEP(EXIT_TEXT)
}
- ALIGN_DEBUG_RO_MIN(16)
.init.data : {
INIT_DATA
INIT_SETUP(16)
@@ -144,9 +142,6 @@ SECTIONS
PERCPU_SECTION(L1_CACHE_BYTES)
- . = ALIGN(PAGE_SIZE);
- __init_end = .;
-
. = ALIGN(4);
.altinstructions : {
__alt_instructions = .;
@@ -156,8 +151,25 @@ SECTIONS
.altinstr_replacement : {
*(.altinstr_replacement)
}
+ .rela : ALIGN(8) {
+ __reloc_start = .;
+ *(.rela .rela*)
+ __reloc_end = .;
+ }
+ .dynsym : ALIGN(8) {
+ __dynsym_start = .;
+ *(.dynsym)
+ }
+ .dynstr : {
+ *(.dynstr)
+ }
+ .hash : {
+ *(.hash)
+ }
. = ALIGN(PAGE_SIZE);
+ __init_end = .;
+
_data = .;
_sdata = .;
RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
@@ -191,4 +203,4 @@ ASSERT(__idmap_text_end - (__idmap_text_start & ~(SZ_4K - 1)) <= SZ_4K,
/*
* If padding is applied before .head.text, virt<->phys conversions will fail.
*/
-ASSERT(_text == (PAGE_OFFSET + TEXT_OFFSET), "HEAD is misaligned")
+ASSERT(_text == (KIMAGE_VADDR + TEXT_OFFSET), "HEAD is misaligned")
diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S
index 86c289832272..309e3479dc2c 100644
--- a/arch/arm64/kvm/hyp.S
+++ b/arch/arm64/kvm/hyp.S
@@ -923,7 +923,7 @@ __hyp_panic_str:
.align 2
/*
- * u64 kvm_call_hyp(void *hypfn, ...);
+ * u64 __kvm_call_hyp(void *hypfn, ...);
*
* This is not really a variadic function in the classic C-way and care must
* be taken when calling this to ensure parameters are passed in registers
@@ -940,10 +940,10 @@ __hyp_panic_str:
* used to implement __hyp_get_vectors in the same way as in
* arch/arm64/kernel/hyp_stub.S.
*/
-ENTRY(kvm_call_hyp)
+ENTRY(__kvm_call_hyp)
hvc #0
ret
-ENDPROC(kvm_call_hyp)
+ENDPROC(__kvm_call_hyp)
.macro invalid_vector label, target
.align 2
diff --git a/arch/arm64/lib/Makefile b/arch/arm64/lib/Makefile
index 1a811ecf71da..c86b7909ef31 100644
--- a/arch/arm64/lib/Makefile
+++ b/arch/arm64/lib/Makefile
@@ -4,15 +4,16 @@ lib-y := bitops.o clear_user.o delay.o copy_from_user.o \
memcmp.o strcmp.o strncmp.o strlen.o strnlen.o \
strchr.o strrchr.o
-# Tell the compiler to treat all general purpose registers as
-# callee-saved, which allows for efficient runtime patching of the bl
-# instruction in the caller with an atomic instruction when supported by
-# the CPU. Result and argument registers are handled correctly, based on
-# the function prototype.
+# Tell the compiler to treat all general purpose registers (with the
+# exception of the IP registers, which are already handled by the caller
+# in case of a PLT) as callee-saved, which allows for efficient runtime
+# patching of the bl instruction in the caller with an atomic instruction
+# when supported by the CPU. Result and argument registers are handled
+# correctly, based on the function prototype.
lib-$(CONFIG_ARM64_LSE_ATOMICS) += atomic_ll_sc.o
CFLAGS_atomic_ll_sc.o := -fcall-used-x0 -ffixed-x1 -ffixed-x2 \
-ffixed-x3 -ffixed-x4 -ffixed-x5 -ffixed-x6 \
-ffixed-x7 -fcall-saved-x8 -fcall-saved-x9 \
-fcall-saved-x10 -fcall-saved-x11 -fcall-saved-x12 \
-fcall-saved-x13 -fcall-saved-x14 -fcall-saved-x15 \
- -fcall-saved-x16 -fcall-saved-x17 -fcall-saved-x18
+ -fcall-saved-x18
diff --git a/arch/arm64/lib/clear_user.S b/arch/arm64/lib/clear_user.S
index a9723c71c52b..5d1cad3ce6d6 100644
--- a/arch/arm64/lib/clear_user.S
+++ b/arch/arm64/lib/clear_user.S
@@ -33,28 +33,28 @@
* Alignment fixed up by hardware.
*/
ENTRY(__clear_user)
-ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_HAS_PAN, \
+ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_ALT_PAN_NOT_UAO, \
CONFIG_ARM64_PAN)
mov x2, x1 // save the size for fixup return
subs x1, x1, #8
b.mi 2f
1:
-USER(9f, str xzr, [x0], #8 )
+uao_user_alternative 9f, str, sttr, xzr, x0, 8
subs x1, x1, #8
b.pl 1b
2: adds x1, x1, #4
b.mi 3f
-USER(9f, str wzr, [x0], #4 )
+uao_user_alternative 9f, str, sttr, wzr, x0, 4
sub x1, x1, #4
3: adds x1, x1, #2
b.mi 4f
-USER(9f, strh wzr, [x0], #2 )
+uao_user_alternative 9f, strh, sttrh, wzr, x0, 2
sub x1, x1, #2
4: adds x1, x1, #1
b.mi 5f
-USER(9f, strb wzr, [x0] )
+uao_user_alternative 9f, strb, sttrb, wzr, x0, 0
5: mov x0, #0
-ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_HAS_PAN, \
+ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_ALT_PAN_NOT_UAO, \
CONFIG_ARM64_PAN)
ret
ENDPROC(__clear_user)
diff --git a/arch/arm64/lib/copy_from_user.S b/arch/arm64/lib/copy_from_user.S
index 4699cd74f87e..17e8306dca29 100644
--- a/arch/arm64/lib/copy_from_user.S
+++ b/arch/arm64/lib/copy_from_user.S
@@ -34,7 +34,7 @@
*/
.macro ldrb1 ptr, regB, val
- USER(9998f, ldrb \ptr, [\regB], \val)
+ uao_user_alternative 9998f, ldrb, ldtrb, \ptr, \regB, \val
.endm
.macro strb1 ptr, regB, val
@@ -42,7 +42,7 @@
.endm
.macro ldrh1 ptr, regB, val
- USER(9998f, ldrh \ptr, [\regB], \val)
+ uao_user_alternative 9998f, ldrh, ldtrh, \ptr, \regB, \val
.endm
.macro strh1 ptr, regB, val
@@ -50,7 +50,7 @@
.endm
.macro ldr1 ptr, regB, val
- USER(9998f, ldr \ptr, [\regB], \val)
+ uao_user_alternative 9998f, ldr, ldtr, \ptr, \regB, \val
.endm
.macro str1 ptr, regB, val
@@ -58,7 +58,7 @@
.endm
.macro ldp1 ptr, regB, regC, val
- USER(9998f, ldp \ptr, \regB, [\regC], \val)
+ uao_ldp 9998f, \ptr, \regB, \regC, \val
.endm
.macro stp1 ptr, regB, regC, val
@@ -67,11 +67,11 @@
end .req x5
ENTRY(__copy_from_user)
-ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_HAS_PAN, \
+ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_ALT_PAN_NOT_UAO, \
CONFIG_ARM64_PAN)
add end, x0, x2
#include "copy_template.S"
-ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_HAS_PAN, \
+ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_ALT_PAN_NOT_UAO, \
CONFIG_ARM64_PAN)
mov x0, #0 // Nothing to copy
ret
diff --git a/arch/arm64/lib/copy_in_user.S b/arch/arm64/lib/copy_in_user.S
index 81c8fc93c100..f7292dd08c84 100644
--- a/arch/arm64/lib/copy_in_user.S
+++ b/arch/arm64/lib/copy_in_user.S
@@ -35,44 +35,44 @@
* x0 - bytes not copied
*/
.macro ldrb1 ptr, regB, val
- USER(9998f, ldrb \ptr, [\regB], \val)
+ uao_user_alternative 9998f, ldrb, ldtrb, \ptr, \regB, \val
.endm
.macro strb1 ptr, regB, val
- USER(9998f, strb \ptr, [\regB], \val)
+ uao_user_alternative 9998f, strb, sttrb, \ptr, \regB, \val
.endm
.macro ldrh1 ptr, regB, val
- USER(9998f, ldrh \ptr, [\regB], \val)
+ uao_user_alternative 9998f, ldrh, ldtrh, \ptr, \regB, \val
.endm
.macro strh1 ptr, regB, val
- USER(9998f, strh \ptr, [\regB], \val)
+ uao_user_alternative 9998f, strh, sttrh, \ptr, \regB, \val
.endm
.macro ldr1 ptr, regB, val
- USER(9998f, ldr \ptr, [\regB], \val)
+ uao_user_alternative 9998f, ldr, ldtr, \ptr, \regB, \val
.endm
.macro str1 ptr, regB, val
- USER(9998f, str \ptr, [\regB], \val)
+ uao_user_alternative 9998f, str, sttr, \ptr, \regB, \val
.endm
.macro ldp1 ptr, regB, regC, val
- USER(9998f, ldp \ptr, \regB, [\regC], \val)
+ uao_ldp 9998f, \ptr, \regB, \regC, \val
.endm
.macro stp1 ptr, regB, regC, val
- USER(9998f, stp \ptr, \regB, [\regC], \val)
+ uao_stp 9998f, \ptr, \regB, \regC, \val
.endm
end .req x5
ENTRY(__copy_in_user)
-ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_HAS_PAN, \
+ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_ALT_PAN_NOT_UAO, \
CONFIG_ARM64_PAN)
add end, x0, x2
#include "copy_template.S"
-ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_HAS_PAN, \
+ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_ALT_PAN_NOT_UAO, \
CONFIG_ARM64_PAN)
mov x0, #0
ret
diff --git a/arch/arm64/lib/copy_page.S b/arch/arm64/lib/copy_page.S
index 512b9a7b980e..4c1e700840b6 100644
--- a/arch/arm64/lib/copy_page.S
+++ b/arch/arm64/lib/copy_page.S
@@ -18,6 +18,8 @@
#include <linux/const.h>
#include <asm/assembler.h>
#include <asm/page.h>
+#include <asm/cpufeature.h>
+#include <asm/alternative.h>
/*
* Copy a page from src to dest (both are page aligned)
@@ -27,20 +29,65 @@
* x1 - src
*/
ENTRY(copy_page)
- /* Assume cache line size is 64 bytes. */
- prfm pldl1strm, [x1, #64]
-1: ldp x2, x3, [x1]
+alternative_if_not ARM64_HAS_NO_HW_PREFETCH
+ nop
+ nop
+alternative_else
+ # Prefetch two cache lines ahead.
+ prfm pldl1strm, [x1, #128]
+ prfm pldl1strm, [x1, #256]
+alternative_endif
+
+ ldp x2, x3, [x1]
ldp x4, x5, [x1, #16]
ldp x6, x7, [x1, #32]
ldp x8, x9, [x1, #48]
- add x1, x1, #64
- prfm pldl1strm, [x1, #64]
+ ldp x10, x11, [x1, #64]
+ ldp x12, x13, [x1, #80]
+ ldp x14, x15, [x1, #96]
+ ldp x16, x17, [x1, #112]
+
+ mov x18, #(PAGE_SIZE - 128)
+ add x1, x1, #128
+1:
+ subs x18, x18, #128
+
+alternative_if_not ARM64_HAS_NO_HW_PREFETCH
+ nop
+alternative_else
+ prfm pldl1strm, [x1, #384]
+alternative_endif
+
stnp x2, x3, [x0]
+ ldp x2, x3, [x1]
stnp x4, x5, [x0, #16]
+ ldp x4, x5, [x1, #16]
stnp x6, x7, [x0, #32]
+ ldp x6, x7, [x1, #32]
stnp x8, x9, [x0, #48]
- add x0, x0, #64
- tst x1, #(PAGE_SIZE - 1)
- b.ne 1b
+ ldp x8, x9, [x1, #48]
+ stnp x10, x11, [x0, #64]
+ ldp x10, x11, [x1, #64]
+ stnp x12, x13, [x0, #80]
+ ldp x12, x13, [x1, #80]
+ stnp x14, x15, [x0, #96]
+ ldp x14, x15, [x1, #96]
+ stnp x16, x17, [x0, #112]
+ ldp x16, x17, [x1, #112]
+
+ add x0, x0, #128
+ add x1, x1, #128
+
+ b.gt 1b
+
+ stnp x2, x3, [x0]
+ stnp x4, x5, [x0, #16]
+ stnp x6, x7, [x0, #32]
+ stnp x8, x9, [x0, #48]
+ stnp x10, x11, [x0, #64]
+ stnp x12, x13, [x0, #80]
+ stnp x14, x15, [x0, #96]
+ stnp x16, x17, [x0, #112]
+
ret
ENDPROC(copy_page)
diff --git a/arch/arm64/lib/copy_to_user.S b/arch/arm64/lib/copy_to_user.S
index 7512bbbc07ac..21faae60f988 100644
--- a/arch/arm64/lib/copy_to_user.S
+++ b/arch/arm64/lib/copy_to_user.S
@@ -37,7 +37,7 @@
.endm
.macro strb1 ptr, regB, val
- USER(9998f, strb \ptr, [\regB], \val)
+ uao_user_alternative 9998f, strb, sttrb, \ptr, \regB, \val
.endm
.macro ldrh1 ptr, regB, val
@@ -45,7 +45,7 @@
.endm
.macro strh1 ptr, regB, val
- USER(9998f, strh \ptr, [\regB], \val)
+ uao_user_alternative 9998f, strh, sttrh, \ptr, \regB, \val
.endm
.macro ldr1 ptr, regB, val
@@ -53,7 +53,7 @@
.endm
.macro str1 ptr, regB, val
- USER(9998f, str \ptr, [\regB], \val)
+ uao_user_alternative 9998f, str, sttr, \ptr, \regB, \val
.endm
.macro ldp1 ptr, regB, regC, val
@@ -61,16 +61,16 @@
.endm
.macro stp1 ptr, regB, regC, val
- USER(9998f, stp \ptr, \regB, [\regC], \val)
+ uao_stp 9998f, \ptr, \regB, \regC, \val
.endm
end .req x5
ENTRY(__copy_to_user)
-ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_HAS_PAN, \
+ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_ALT_PAN_NOT_UAO, \
CONFIG_ARM64_PAN)
add end, x0, x2
#include "copy_template.S"
-ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_HAS_PAN, \
+ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_ALT_PAN_NOT_UAO, \
CONFIG_ARM64_PAN)
mov x0, #0
ret
diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
index ce32a2229a9d..2f06997dcd59 100644
--- a/arch/arm64/mm/cache.S
+++ b/arch/arm64/mm/cache.S
@@ -154,26 +154,32 @@ ENDPROC(__flush_cache_user_range)
/*
* __flush_dcache_area(kaddr, size)
*
- * Ensure that the data held in the page kaddr is written back to the
- * page in question.
+ * Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
+ * are cleaned and invalidated to the PoC.
*
* - kaddr - kernel address
* - size - size in question
*/
ENTRY(__flush_dcache_area)
- dcache_line_size x2, x3
- add x1, x0, x1
- sub x3, x2, #1
- bic x0, x0, x3
-1: dc civac, x0 // clean & invalidate D line / unified line
- add x0, x0, x2
- cmp x0, x1
- b.lo 1b
- dsb sy
+ dcache_by_line_op civac, sy, x0, x1, x2, x3
ret
ENDPIPROC(__flush_dcache_area)
/*
+ * __clean_dcache_area_pou(kaddr, size)
+ *
+ * Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
+ * are cleaned to the PoU.
+ *
+ * - kaddr - kernel address
+ * - size - size in question
+ */
+ENTRY(__clean_dcache_area_pou)
+ dcache_by_line_op cvau, ish, x0, x1, x2, x3
+ ret
+ENDPROC(__clean_dcache_area_pou)
+
+/*
* __inval_cache_range(start, end)
* - start - start address of region
* - end - end address of region
diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
index e87f53ff5f58..7275628ba59f 100644
--- a/arch/arm64/mm/context.c
+++ b/arch/arm64/mm/context.c
@@ -187,7 +187,7 @@ switch_mm_fastpath:
static int asids_init(void)
{
- int fld = cpuid_feature_extract_field(read_cpuid(ID_AA64MMFR0_EL1), 4);
+ int fld = cpuid_feature_extract_field(read_cpuid(SYS_ID_AA64MMFR0_EL1), 4);
switch (fld) {
default:
diff --git a/arch/arm64/mm/copypage.c b/arch/arm64/mm/copypage.c
index 13bbc3be6f5a..22e4cb4d6f53 100644
--- a/arch/arm64/mm/copypage.c
+++ b/arch/arm64/mm/copypage.c
@@ -24,8 +24,9 @@
void __cpu_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr)
{
+ struct page *page = virt_to_page(kto);
copy_page(kto, kfrom);
- __flush_dcache_area(kto, PAGE_SIZE);
+ flush_dcache_page(page);
}
EXPORT_SYMBOL_GPL(__cpu_copy_user_page);
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index eff70892dada..07c67348b815 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -63,7 +63,7 @@ static int __get_iommu_pgprot(struct dma_attrs *attrs, int prot,
static struct gen_pool *atomic_pool;
#define NO_KERNEL_MAPPING_DUMMY 0x2222
#define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
-static size_t atomic_pool_size = DEFAULT_DMA_COHERENT_POOL_SIZE;
+static size_t atomic_pool_size __initdata = DEFAULT_DMA_COHERENT_POOL_SIZE;
static int __init early_coherent_pool(char *p)
{
@@ -1026,7 +1026,7 @@ static int __iommu_attach_notifier(struct notifier_block *nb,
return 0;
}
-static int register_iommu_dma_ops_notifier(struct bus_type *bus)
+static int __init register_iommu_dma_ops_notifier(struct bus_type *bus)
{
struct notifier_block *nb = kzalloc(sizeof(*nb), GFP_KERNEL);
int ret;
diff --git a/arch/arm64/mm/dump.c b/arch/arm64/mm/dump.c
index 5a22a119a74c..6be918478f85 100644
--- a/arch/arm64/mm/dump.c
+++ b/arch/arm64/mm/dump.c
@@ -35,7 +35,9 @@ struct addr_marker {
};
enum address_markers_idx {
- VMALLOC_START_NR = 0,
+ MODULES_START_NR = 0,
+ MODULES_END_NR,
+ VMALLOC_START_NR,
VMALLOC_END_NR,
#ifdef CONFIG_SPARSEMEM_VMEMMAP
VMEMMAP_START_NR,
@@ -45,12 +47,12 @@ enum address_markers_idx {
FIXADDR_END_NR,
PCI_START_NR,
PCI_END_NR,
- MODULES_START_NR,
- MODUELS_END_NR,
KERNEL_SPACE_NR,
};
static struct addr_marker address_markers[] = {
+ { MODULES_VADDR, "Modules start" },
+ { MODULES_END, "Modules end" },
{ VMALLOC_START, "vmalloc() Area" },
{ VMALLOC_END, "vmalloc() End" },
#ifdef CONFIG_SPARSEMEM_VMEMMAP
@@ -61,9 +63,7 @@ static struct addr_marker address_markers[] = {
{ FIXADDR_TOP, "Fixmap end" },
{ PCI_IO_START, "PCI I/O start" },
{ PCI_IO_END, "PCI I/O end" },
- { MODULES_VADDR, "Modules start" },
- { MODULES_END, "Modules end" },
- { PAGE_OFFSET, "Kernel Mapping" },
+ { PAGE_OFFSET, "Linear Mapping" },
{ -1, NULL },
};
@@ -90,6 +90,11 @@ struct prot_bits {
static const struct prot_bits pte_bits[] = {
{
+ .mask = PTE_VALID,
+ .val = PTE_VALID,
+ .set = " ",
+ .clear = "F",
+ }, {
.mask = PTE_USER,
.val = PTE_USER,
.set = "USR",
diff --git a/arch/arm64/mm/extable.c b/arch/arm64/mm/extable.c
index 79444279ba8c..81acd4706878 100644
--- a/arch/arm64/mm/extable.c
+++ b/arch/arm64/mm/extable.c
@@ -11,7 +11,7 @@ int fixup_exception(struct pt_regs *regs)
fixup = search_exception_tables(instruction_pointer(regs));
if (fixup)
- regs->pc = fixup->fixup;
+ regs->pc = (unsigned long)&fixup->fixup + fixup->fixup;
return fixup != NULL;
}
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 88ada16a1301..69079e5bfc84 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -197,6 +197,14 @@ out:
return fault;
}
+static inline int permission_fault(unsigned int esr)
+{
+ unsigned int ec = (esr & ESR_ELx_EC_MASK) >> ESR_ELx_EC_SHIFT;
+ unsigned int fsc_type = esr & ESR_ELx_FSC_TYPE;
+
+ return (ec == ESR_ELx_EC_DABT_CUR && fsc_type == ESR_ELx_FSC_PERM);
+}
+
static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
struct pt_regs *regs)
{
@@ -230,12 +238,13 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
mm_flags |= FAULT_FLAG_WRITE;
}
- /*
- * PAN bit set implies the fault happened in kernel space, but not
- * in the arch's user access functions.
- */
- if (IS_ENABLED(CONFIG_ARM64_PAN) && (regs->pstate & PSR_PAN_BIT))
- goto no_context;
+ if (permission_fault(esr) && (addr < USER_DS)) {
+ if (get_fs() == KERNEL_DS)
+ die("Accessing user space memory with fs=KERNEL_DS", regs, esr);
+
+ if (!search_exception_tables(regs->pc))
+ die("Accessing user space memory outside uaccess.h routines", regs, esr);
+ }
/*
* As per x86, we may deadlock here. However, since the kernel only
@@ -567,3 +576,16 @@ void cpu_enable_pan(void *__unused)
config_sctlr_el1(SCTLR_EL1_SPAN, 0);
}
#endif /* CONFIG_ARM64_PAN */
+
+#ifdef CONFIG_ARM64_UAO
+/*
+ * Kernel threads have fs=KERNEL_DS by default, and don't need to call
+ * set_fs(), devtmpfs in particular relies on this behaviour.
+ * We need to enable the feature at runtime (instead of adding it to
+ * PSR_MODE_EL1h) as the feature may not be implemented by the cpu.
+ */
+void cpu_enable_uao(void *__unused)
+{
+ asm(SET_PSTATE_UAO(1));
+}
+#endif /* CONFIG_ARM64_UAO */
diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c
index 07844184975b..61b0911aa475 100644
--- a/arch/arm64/mm/flush.c
+++ b/arch/arm64/mm/flush.c
@@ -34,19 +34,24 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
__flush_icache_all();
}
+static void sync_icache_aliases(void *kaddr, unsigned long len)
+{
+ unsigned long addr = (unsigned long)kaddr;
+
+ if (icache_is_aliasing()) {
+ __clean_dcache_area_pou(kaddr, len);
+ __flush_icache_all();
+ } else {
+ flush_icache_range(addr, addr + len);
+ }
+}
+
static void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
unsigned long uaddr, void *kaddr,
unsigned long len)
{
- if (vma->vm_flags & VM_EXEC) {
- unsigned long addr = (unsigned long)kaddr;
- if (icache_is_aliasing()) {
- __flush_dcache_area(kaddr, len);
- __flush_icache_all();
- } else {
- flush_icache_range(addr, addr + len);
- }
- }
+ if (vma->vm_flags & VM_EXEC)
+ sync_icache_aliases(kaddr, len);
}
/*
@@ -74,13 +79,11 @@ void __sync_icache_dcache(pte_t pte, unsigned long addr)
if (!page_mapping(page))
return;
- if (!test_and_set_bit(PG_dcache_clean, &page->flags)) {
- __flush_dcache_area(page_address(page),
- PAGE_SIZE << compound_order(page));
+ if (!test_and_set_bit(PG_dcache_clean, &page->flags))
+ sync_icache_aliases(page_address(page),
+ PAGE_SIZE << compound_order(page));
+ else if (icache_is_aivivt())
__flush_icache_all();
- } else if (icache_is_aivivt()) {
- __flush_icache_all();
- }
}
/*
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
index 383b03ff38f8..da30529bb1f6 100644
--- a/arch/arm64/mm/hugetlbpage.c
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -41,15 +41,273 @@ int pud_huge(pud_t pud)
#endif
}
+static int find_num_contig(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte, size_t *pgsize)
+{
+ pgd_t *pgd = pgd_offset(mm, addr);
+ pud_t *pud;
+ pmd_t *pmd;
+
+ *pgsize = PAGE_SIZE;
+ if (!pte_cont(pte))
+ return 1;
+ if (!pgd_present(*pgd)) {
+ VM_BUG_ON(!pgd_present(*pgd));
+ return 1;
+ }
+ pud = pud_offset(pgd, addr);
+ if (!pud_present(*pud)) {
+ VM_BUG_ON(!pud_present(*pud));
+ return 1;
+ }
+ pmd = pmd_offset(pud, addr);
+ if (!pmd_present(*pmd)) {
+ VM_BUG_ON(!pmd_present(*pmd));
+ return 1;
+ }
+ if ((pte_t *)pmd == ptep) {
+ *pgsize = PMD_SIZE;
+ return CONT_PMDS;
+ }
+ return CONT_PTES;
+}
+
+void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte)
+{
+ size_t pgsize;
+ int i;
+ int ncontig = find_num_contig(mm, addr, ptep, pte, &pgsize);
+ unsigned long pfn;
+ pgprot_t hugeprot;
+
+ if (ncontig == 1) {
+ set_pte_at(mm, addr, ptep, pte);
+ return;
+ }
+
+ pfn = pte_pfn(pte);
+ hugeprot = __pgprot(pte_val(pfn_pte(pfn, __pgprot(0))) ^ pte_val(pte));
+ for (i = 0; i < ncontig; i++) {
+ pr_debug("%s: set pte %p to 0x%llx\n", __func__, ptep,
+ pte_val(pfn_pte(pfn, hugeprot)));
+ set_pte_at(mm, addr, ptep, pfn_pte(pfn, hugeprot));
+ ptep++;
+ pfn += pgsize >> PAGE_SHIFT;
+ addr += pgsize;
+ }
+}
+
+pte_t *huge_pte_alloc(struct mm_struct *mm,
+ unsigned long addr, unsigned long sz)
+{
+ pgd_t *pgd;
+ pud_t *pud;
+ pte_t *pte = NULL;
+
+ pr_debug("%s: addr:0x%lx sz:0x%lx\n", __func__, addr, sz);
+ pgd = pgd_offset(mm, addr);
+ pud = pud_alloc(mm, pgd, addr);
+ if (!pud)
+ return NULL;
+
+ if (sz == PUD_SIZE) {
+ pte = (pte_t *)pud;
+ } else if (sz == (PAGE_SIZE * CONT_PTES)) {
+ pmd_t *pmd = pmd_alloc(mm, pud, addr);
+
+ WARN_ON(addr & (sz - 1));
+ /*
+ * Note that if this code were ever ported to the
+ * 32-bit arm platform then it will cause trouble in
+ * the case where CONFIG_HIGHPTE is set, since there
+ * will be no pte_unmap() to correspond with this
+ * pte_alloc_map().
+ */
+ pte = pte_alloc_map(mm, NULL, pmd, addr);
+ } else if (sz == PMD_SIZE) {
+ if (IS_ENABLED(CONFIG_ARCH_WANT_HUGE_PMD_SHARE) &&
+ pud_none(*pud))
+ pte = huge_pmd_share(mm, addr, pud);
+ else
+ pte = (pte_t *)pmd_alloc(mm, pud, addr);
+ } else if (sz == (PMD_SIZE * CONT_PMDS)) {
+ pmd_t *pmd;
+
+ pmd = pmd_alloc(mm, pud, addr);
+ WARN_ON(addr & (sz - 1));
+ return (pte_t *)pmd;
+ }
+
+ pr_debug("%s: addr:0x%lx sz:0x%lx ret pte=%p/0x%llx\n", __func__, addr,
+ sz, pte, pte_val(*pte));
+ return pte;
+}
+
+pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
+{
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd = NULL;
+ pte_t *pte = NULL;
+
+ pgd = pgd_offset(mm, addr);
+ pr_debug("%s: addr:0x%lx pgd:%p\n", __func__, addr, pgd);
+ if (!pgd_present(*pgd))
+ return NULL;
+ pud = pud_offset(pgd, addr);
+ if (!pud_present(*pud))
+ return NULL;
+
+ if (pud_huge(*pud))
+ return (pte_t *)pud;
+ pmd = pmd_offset(pud, addr);
+ if (!pmd_present(*pmd))
+ return NULL;
+
+ if (pte_cont(pmd_pte(*pmd))) {
+ pmd = pmd_offset(
+ pud, (addr & CONT_PMD_MASK));
+ return (pte_t *)pmd;
+ }
+ if (pmd_huge(*pmd))
+ return (pte_t *)pmd;
+ pte = pte_offset_kernel(pmd, addr);
+ if (pte_present(*pte) && pte_cont(*pte)) {
+ pte = pte_offset_kernel(
+ pmd, (addr & CONT_PTE_MASK));
+ return pte;
+ }
+ return NULL;
+}
+
+pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
+ struct page *page, int writable)
+{
+ size_t pagesize = huge_page_size(hstate_vma(vma));
+
+ if (pagesize == CONT_PTE_SIZE) {
+ entry = pte_mkcont(entry);
+ } else if (pagesize == CONT_PMD_SIZE) {
+ entry = pmd_pte(pmd_mkcont(pte_pmd(entry)));
+ } else if (pagesize != PUD_SIZE && pagesize != PMD_SIZE) {
+ pr_warn("%s: unrecognized huge page size 0x%lx\n",
+ __func__, pagesize);
+ }
+ return entry;
+}
+
+pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
+{
+ pte_t pte;
+
+ if (pte_cont(*ptep)) {
+ int ncontig, i;
+ size_t pgsize;
+ pte_t *cpte;
+ bool is_dirty = false;
+
+ cpte = huge_pte_offset(mm, addr);
+ ncontig = find_num_contig(mm, addr, cpte, *cpte, &pgsize);
+ /* save the 1st pte to return */
+ pte = ptep_get_and_clear(mm, addr, cpte);
+ for (i = 1; i < ncontig; ++i) {
+ /*
+ * If HW_AFDBM is enabled, then the HW could
+ * turn on the dirty bit for any of the page
+ * in the set, so check them all.
+ */
+ ++cpte;
+ if (pte_dirty(ptep_get_and_clear(mm, addr, cpte)))
+ is_dirty = true;
+ }
+ if (is_dirty)
+ return pte_mkdirty(pte);
+ else
+ return pte;
+ } else {
+ return ptep_get_and_clear(mm, addr, ptep);
+ }
+}
+
+int huge_ptep_set_access_flags(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep,
+ pte_t pte, int dirty)
+{
+ pte_t *cpte;
+
+ if (pte_cont(pte)) {
+ int ncontig, i, changed = 0;
+ size_t pgsize = 0;
+ unsigned long pfn = pte_pfn(pte);
+ /* Select all bits except the pfn */
+ pgprot_t hugeprot =
+ __pgprot(pte_val(pfn_pte(pfn, __pgprot(0))) ^
+ pte_val(pte));
+
+ cpte = huge_pte_offset(vma->vm_mm, addr);
+ pfn = pte_pfn(*cpte);
+ ncontig = find_num_contig(vma->vm_mm, addr, cpte,
+ *cpte, &pgsize);
+ for (i = 0; i < ncontig; ++i, ++cpte) {
+ changed = ptep_set_access_flags(vma, addr, cpte,
+ pfn_pte(pfn,
+ hugeprot),
+ dirty);
+ pfn += pgsize >> PAGE_SHIFT;
+ }
+ return changed;
+ } else {
+ return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
+ }
+}
+
+void huge_ptep_set_wrprotect(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
+{
+ if (pte_cont(*ptep)) {
+ int ncontig, i;
+ pte_t *cpte;
+ size_t pgsize = 0;
+
+ cpte = huge_pte_offset(mm, addr);
+ ncontig = find_num_contig(mm, addr, cpte, *cpte, &pgsize);
+ for (i = 0; i < ncontig; ++i, ++cpte)
+ ptep_set_wrprotect(mm, addr, cpte);
+ } else {
+ ptep_set_wrprotect(mm, addr, ptep);
+ }
+}
+
+void huge_ptep_clear_flush(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep)
+{
+ if (pte_cont(*ptep)) {
+ int ncontig, i;
+ pte_t *cpte;
+ size_t pgsize = 0;
+
+ cpte = huge_pte_offset(vma->vm_mm, addr);
+ ncontig = find_num_contig(vma->vm_mm, addr, cpte,
+ *cpte, &pgsize);
+ for (i = 0; i < ncontig; ++i, ++cpte)
+ ptep_clear_flush(vma, addr, cpte);
+ } else {
+ ptep_clear_flush(vma, addr, ptep);
+ }
+}
+
static __init int setup_hugepagesz(char *opt)
{
unsigned long ps = memparse(opt, &opt);
+
if (ps == PMD_SIZE) {
hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
} else if (ps == PUD_SIZE) {
hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
} else {
- pr_err("hugepagesz: Unsupported page size %lu M\n", ps >> 20);
+ pr_err("hugepagesz: Unsupported page size %lu K\n", ps >> 10);
return 0;
}
return 1;
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 52406a2a39ad..0d5b0d0578b3 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -35,7 +35,10 @@
#include <linux/efi.h>
#include <linux/swiotlb.h>
+#include <asm/boot.h>
#include <asm/fixmap.h>
+#include <asm/kasan.h>
+#include <asm/kernel-pgtable.h>
#include <asm/memory.h>
#include <asm/sections.h>
#include <asm/setup.h>
@@ -45,7 +48,13 @@
#include "mm.h"
-phys_addr_t memstart_addr __read_mostly = 0;
+/*
+ * We need to be able to catch inadvertent references to memstart_addr
+ * that occur (potentially in generic code) before arm64_memblock_init()
+ * executes, which assigns it its actual value. So use a default value
+ * that cannot be mistaken for a real physical address.
+ */
+s64 memstart_addr __read_mostly = -1;
phys_addr_t arm64_dma_phys_limit __read_mostly;
#ifdef CONFIG_BLK_DEV_INITRD
@@ -58,8 +67,8 @@ static int __init early_initrd(char *p)
if (*endp == ',') {
size = memparse(endp + 1, NULL);
- initrd_start = (unsigned long)__va(start);
- initrd_end = (unsigned long)__va(start + size);
+ initrd_start = start;
+ initrd_end = start + size;
}
return 0;
}
@@ -71,7 +80,7 @@ early_param("initrd", early_initrd);
* currently assumes that for memory starting above 4G, 32-bit devices will
* use a DMA offset.
*/
-static phys_addr_t max_zone_dma_phys(void)
+static phys_addr_t __init max_zone_dma_phys(void)
{
phys_addr_t offset = memblock_start_of_DRAM() & GENMASK_ULL(63, 32);
return min(offset + (1ULL << 32), memblock_end_of_DRAM());
@@ -128,11 +137,11 @@ EXPORT_SYMBOL(pfn_valid);
#endif
#ifndef CONFIG_SPARSEMEM
-static void arm64_memory_present(void)
+static void __init arm64_memory_present(void)
{
}
#else
-static void arm64_memory_present(void)
+static void __init arm64_memory_present(void)
{
struct memblock_region *reg;
@@ -161,7 +170,57 @@ early_param("mem", early_mem);
void __init arm64_memblock_init(void)
{
- memblock_enforce_memory_limit(memory_limit);
+ const s64 linear_region_size = -(s64)PAGE_OFFSET;
+
+ /*
+ * Ensure that the linear region takes up exactly half of the kernel
+ * virtual address space. This way, we can distinguish a linear address
+ * from a kernel/module/vmalloc address by testing a single bit.
+ */
+ BUILD_BUG_ON(linear_region_size != BIT(VA_BITS - 1));
+
+ /*
+ * Select a suitable value for the base of physical memory.
+ */
+ memstart_addr = round_down(memblock_start_of_DRAM(),
+ ARM64_MEMSTART_ALIGN);
+
+ /*
+ * Remove the memory that we will not be able to cover with the
+ * linear mapping. Take care not to clip the kernel which may be
+ * high in memory.
+ */
+ memblock_remove(max_t(u64, memstart_addr + linear_region_size, __pa(_end)),
+ ULLONG_MAX);
+ if (memblock_end_of_DRAM() > linear_region_size)
+ memblock_remove(0, memblock_end_of_DRAM() - linear_region_size);
+
+ /*
+ * Apply the memory limit if it was set. Since the kernel may be loaded
+ * high up in memory, add back the kernel region that must be accessible
+ * via the linear mapping.
+ */
+ if (memory_limit != (phys_addr_t)ULLONG_MAX) {
+ memblock_enforce_memory_limit(memory_limit);
+ memblock_add(__pa(_text), (u64)(_end - _text));
+ }
+
+ if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
+ extern u16 memstart_offset_seed;
+ u64 range = linear_region_size -
+ (memblock_end_of_DRAM() - memblock_start_of_DRAM());
+
+ /*
+ * If the size of the linear region exceeds, by a sufficient
+ * margin, the size of the region that the available physical
+ * memory spans, randomize the linear region as well.
+ */
+ if (memstart_offset_seed > 0 && range >= ARM64_MEMSTART_ALIGN) {
+ range = range / ARM64_MEMSTART_ALIGN + 1;
+ memstart_addr -= ARM64_MEMSTART_ALIGN *
+ ((range * memstart_offset_seed) >> 16);
+ }
+ }
/*
* Register the kernel text, kernel data, initrd, and initial
@@ -169,8 +228,13 @@ void __init arm64_memblock_init(void)
*/
memblock_reserve(__pa(_text), _end - _text);
#ifdef CONFIG_BLK_DEV_INITRD
- if (initrd_start)
- memblock_reserve(__virt_to_phys(initrd_start), initrd_end - initrd_start);
+ if (initrd_start) {
+ memblock_reserve(initrd_start, initrd_end - initrd_start);
+
+ /* the generic initrd code expects virtual addresses */
+ initrd_start = __phys_to_virt(initrd_start);
+ initrd_end = __phys_to_virt(initrd_end);
+ }
#endif
early_init_fdt_scan_reserved_mem();
@@ -304,35 +368,36 @@ void __init mem_init(void)
#ifdef CONFIG_KASAN
" kasan : 0x%16lx - 0x%16lx (%6ld GB)\n"
#endif
+ " modules : 0x%16lx - 0x%16lx (%6ld MB)\n"
" vmalloc : 0x%16lx - 0x%16lx (%6ld GB)\n"
+ " .init : 0x%p" " - 0x%p" " (%6ld KB)\n"
+ " .text : 0x%p" " - 0x%p" " (%6ld KB)\n"
+ " .data : 0x%p" " - 0x%p" " (%6ld KB)\n"
#ifdef CONFIG_SPARSEMEM_VMEMMAP
" vmemmap : 0x%16lx - 0x%16lx (%6ld GB maximum)\n"
" 0x%16lx - 0x%16lx (%6ld MB actual)\n"
#endif
" fixed : 0x%16lx - 0x%16lx (%6ld KB)\n"
" PCI I/O : 0x%16lx - 0x%16lx (%6ld MB)\n"
- " modules : 0x%16lx - 0x%16lx (%6ld MB)\n"
- " memory : 0x%16lx - 0x%16lx (%6ld MB)\n"
- " .init : 0x%p" " - 0x%p" " (%6ld KB)\n"
- " .text : 0x%p" " - 0x%p" " (%6ld KB)\n"
- " .data : 0x%p" " - 0x%p" " (%6ld KB)\n",
+ " memory : 0x%16lx - 0x%16lx (%6ld MB)\n",
#ifdef CONFIG_KASAN
MLG(KASAN_SHADOW_START, KASAN_SHADOW_END),
#endif
+ MLM(MODULES_VADDR, MODULES_END),
MLG(VMALLOC_START, VMALLOC_END),
+ MLK_ROUNDUP(__init_begin, __init_end),
+ MLK_ROUNDUP(_text, _etext),
+ MLK_ROUNDUP(_sdata, _edata),
#ifdef CONFIG_SPARSEMEM_VMEMMAP
MLG(VMEMMAP_START,
VMEMMAP_START + VMEMMAP_SIZE),
- MLM((unsigned long)virt_to_page(PAGE_OFFSET),
+ MLM((unsigned long)phys_to_page(memblock_start_of_DRAM()),
(unsigned long)virt_to_page(high_memory)),
#endif
MLK(FIXADDR_START, FIXADDR_TOP),
MLM(PCI_IO_START, PCI_IO_END),
- MLM(MODULES_VADDR, MODULES_END),
- MLM(PAGE_OFFSET, (unsigned long)high_memory),
- MLK_ROUNDUP(__init_begin, __init_end),
- MLK_ROUNDUP(_text, _etext),
- MLK_ROUNDUP(_sdata, _edata));
+ MLM(__phys_to_virt(memblock_start_of_DRAM()),
+ (unsigned long)high_memory));
#undef MLK
#undef MLM
@@ -365,9 +430,8 @@ static inline void poison_init_mem(void *s, size_t count)
void free_initmem(void)
{
- fixup_init();
free_initmem_default(0);
- free_alternatives_memory();
+ fixup_init();
}
#ifdef CONFIG_BLK_DEV_INITRD
@@ -401,3 +465,27 @@ void set_kernel_text_ro(void)
set_memory_ro(start, (end - start) >> PAGE_SHIFT);
}
#endif
+/*
+ * Dump out memory limit information on panic.
+ */
+static int dump_mem_limit(struct notifier_block *self, unsigned long v, void *p)
+{
+ if (memory_limit != (phys_addr_t)ULLONG_MAX) {
+ pr_emerg("Memory Limit: %llu MB\n", memory_limit >> 20);
+ } else {
+ pr_emerg("Memory Limit: none\n");
+ }
+ return 0;
+}
+
+static struct notifier_block mem_limit_notifier = {
+ .notifier_call = dump_mem_limit,
+};
+
+static int __init register_mem_limit_dumper(void)
+{
+ atomic_notifier_chain_register(&panic_notifier_list,
+ &mem_limit_notifier);
+ return 0;
+}
+__initcall(register_mem_limit_dumper);
diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c
index cf038c7d9fa9..757009daa9ed 100644
--- a/arch/arm64/mm/kasan_init.c
+++ b/arch/arm64/mm/kasan_init.c
@@ -16,9 +16,12 @@
#include <linux/memblock.h>
#include <linux/start_kernel.h>
+#include <asm/mmu_context.h>
+#include <asm/kernel-pgtable.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
+#include <asm/sections.h>
#include <asm/tlbflush.h>
static pgd_t tmp_pg_dir[PTRS_PER_PGD] __initdata __aligned(PGD_SIZE);
@@ -32,7 +35,7 @@ static void __init kasan_early_pte_populate(pmd_t *pmd, unsigned long addr,
if (pmd_none(*pmd))
pmd_populate_kernel(&init_mm, pmd, kasan_zero_pte);
- pte = pte_offset_kernel(pmd, addr);
+ pte = pte_offset_kimg(pmd, addr);
do {
next = addr + PAGE_SIZE;
set_pte(pte, pfn_pte(virt_to_pfn(kasan_zero_page),
@@ -50,7 +53,7 @@ static void __init kasan_early_pmd_populate(pud_t *pud,
if (pud_none(*pud))
pud_populate(&init_mm, pud, kasan_zero_pmd);
- pmd = pmd_offset(pud, addr);
+ pmd = pmd_offset_kimg(pud, addr);
do {
next = pmd_addr_end(addr, end);
kasan_early_pte_populate(pmd, addr, next);
@@ -67,7 +70,7 @@ static void __init kasan_early_pud_populate(pgd_t *pgd,
if (pgd_none(*pgd))
pgd_populate(&init_mm, pgd, kasan_zero_pud);
- pud = pud_offset(pgd, addr);
+ pud = pud_offset_kimg(pgd, addr);
do {
next = pud_addr_end(addr, end);
kasan_early_pmd_populate(pud, addr, next);
@@ -96,6 +99,21 @@ asmlinkage void __init kasan_early_init(void)
kasan_map_early_shadow();
}
+/*
+ * Copy the current shadow region into a new pgdir.
+ */
+void __init kasan_copy_shadow(pgd_t *pgdir)
+{
+ pgd_t *pgd, *pgd_new, *pgd_end;
+
+ pgd = pgd_offset_k(KASAN_SHADOW_START);
+ pgd_end = pgd_offset_k(KASAN_SHADOW_END);
+ pgd_new = pgd_offset_raw(pgdir, KASAN_SHADOW_START);
+ do {
+ set_pgd(pgd_new, *pgd);
+ } while (pgd++, pgd_new++, pgd != pgd_end);
+}
+
static void __init clear_pgds(unsigned long start,
unsigned long end)
{
@@ -108,18 +126,18 @@ static void __init clear_pgds(unsigned long start,
set_pgd(pgd_offset_k(start), __pgd(0));
}
-static void __init cpu_set_ttbr1(unsigned long ttbr1)
-{
- asm(
- " msr ttbr1_el1, %0\n"
- " isb"
- :
- : "r" (ttbr1));
-}
-
void __init kasan_init(void)
{
+ u64 kimg_shadow_start, kimg_shadow_end;
+ u64 mod_shadow_start, mod_shadow_end;
struct memblock_region *reg;
+ int i;
+
+ kimg_shadow_start = (u64)kasan_mem_to_shadow(_text);
+ kimg_shadow_end = (u64)kasan_mem_to_shadow(_end);
+
+ mod_shadow_start = (u64)kasan_mem_to_shadow((void *)MODULES_VADDR);
+ mod_shadow_end = (u64)kasan_mem_to_shadow((void *)MODULES_END);
/*
* We are going to perform proper setup of shadow memory.
@@ -129,13 +147,33 @@ void __init kasan_init(void)
* setup will be finished.
*/
memcpy(tmp_pg_dir, swapper_pg_dir, sizeof(tmp_pg_dir));
- cpu_set_ttbr1(__pa(tmp_pg_dir));
- flush_tlb_all();
+ dsb(ishst);
+ cpu_replace_ttbr1(tmp_pg_dir);
clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
+ vmemmap_populate(kimg_shadow_start, kimg_shadow_end,
+ pfn_to_nid(virt_to_pfn(_text)));
+
+ /*
+ * vmemmap_populate() has populated the shadow region that covers the
+ * kernel image with SWAPPER_BLOCK_SIZE mappings, so we have to round
+ * the start and end addresses to SWAPPER_BLOCK_SIZE as well, to prevent
+ * kasan_populate_zero_shadow() from replacing the page table entries
+ * (PMD or PTE) at the edges of the shadow region for the kernel
+ * image.
+ */
+ kimg_shadow_start = round_down(kimg_shadow_start, SWAPPER_BLOCK_SIZE);
+ kimg_shadow_end = round_up(kimg_shadow_end, SWAPPER_BLOCK_SIZE);
+
kasan_populate_zero_shadow((void *)KASAN_SHADOW_START,
- kasan_mem_to_shadow((void *)MODULES_VADDR));
+ (void *)mod_shadow_start);
+ kasan_populate_zero_shadow((void *)kimg_shadow_end,
+ kasan_mem_to_shadow((void *)PAGE_OFFSET));
+
+ if (kimg_shadow_start > mod_shadow_end)
+ kasan_populate_zero_shadow((void *)mod_shadow_end,
+ (void *)kimg_shadow_start);
for_each_memblock(memory, reg) {
void *start = (void *)__phys_to_virt(reg->base);
@@ -155,9 +193,16 @@ void __init kasan_init(void)
pfn_to_nid(virt_to_pfn(start)));
}
+ /*
+ * KAsan may reuse the contents of kasan_zero_pte directly, so we
+ * should make sure that it maps the zero page read-only.
+ */
+ for (i = 0; i < PTRS_PER_PTE; i++)
+ set_pte(&kasan_zero_pte[i],
+ pfn_pte(virt_to_pfn(kasan_zero_page), PAGE_KERNEL_RO));
+
memset(kasan_zero_page, 0, PAGE_SIZE);
- cpu_set_ttbr1(__pa(swapper_pg_dir));
- flush_tlb_all();
+ cpu_replace_ttbr1(swapper_pg_dir);
/* At this point kasan is fully initialized. Enable error messages */
init_task.kasan_depth = 0;
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index e82aabb3c5e2..62096a7e047a 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -32,8 +32,10 @@
#include <linux/dma-contiguous.h>
#include <linux/cma.h>
+#include <asm/barrier.h>
#include <asm/cputype.h>
#include <asm/fixmap.h>
+#include <asm/kasan.h>
#include <asm/kernel-pgtable.h>
#include <asm/sections.h>
#include <asm/setup.h>
@@ -46,14 +48,21 @@
u64 idmap_t0sz = TCR_T0SZ(VA_BITS);
+u64 kimage_voffset __read_mostly;
+EXPORT_SYMBOL(kimage_voffset);
+
/*
* Empty_zero_page is a special page that is used for zero-initialized data
* and COW.
*/
-struct page *empty_zero_page;
+unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
EXPORT_SYMBOL(empty_zero_page);
-static bool __init dma_overlap(phys_addr_t start, phys_addr_t end);
+static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
+static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss __maybe_unused;
+static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss __maybe_unused;
+
+static bool dma_overlap(phys_addr_t start, phys_addr_t end);
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
unsigned long size, pgprot_t vma_prot)
@@ -66,16 +75,30 @@ pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
}
EXPORT_SYMBOL(phys_mem_access_prot);
-static void __init *early_alloc(unsigned long sz)
+static phys_addr_t __init early_pgtable_alloc(void)
{
phys_addr_t phys;
void *ptr;
- phys = memblock_alloc(sz, sz);
+ phys = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
BUG_ON(!phys);
- ptr = __va(phys);
- memset(ptr, 0, sz);
- return ptr;
+
+ /*
+ * The FIX_{PGD,PUD,PMD} slots may be in active use, but the FIX_PTE
+ * slot will be free, so we can (ab)use the FIX_PTE slot to initialise
+ * any level of table.
+ */
+ ptr = pte_set_fixmap(phys);
+
+ memset(ptr, 0, PAGE_SIZE);
+
+ /*
+ * Implicit barriers also ensure the zeroed page is visible to the page
+ * table walker
+ */
+ pte_clear_fixmap();
+
+ return phys;
}
/*
@@ -99,24 +122,30 @@ static void split_pmd(pmd_t *pmd, pte_t *pte)
static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
unsigned long end, unsigned long pfn,
pgprot_t prot,
- void *(*alloc)(unsigned long size))
+ phys_addr_t (*pgtable_alloc)(void))
{
pte_t *pte;
if (pmd_none(*pmd) || pmd_sect(*pmd)) {
- pte = alloc(PTRS_PER_PTE * sizeof(pte_t));
+ phys_addr_t pte_phys;
+ BUG_ON(!pgtable_alloc);
+ pte_phys = pgtable_alloc();
+ pte = pte_set_fixmap(pte_phys);
if (pmd_sect(*pmd))
split_pmd(pmd, pte);
- __pmd_populate(pmd, __pa(pte), PMD_TYPE_TABLE);
+ __pmd_populate(pmd, pte_phys, PMD_TYPE_TABLE);
flush_tlb_all();
+ pte_clear_fixmap();
}
BUG_ON(pmd_bad(*pmd));
- pte = pte_offset_kernel(pmd, addr);
+ pte = pte_set_fixmap_offset(pmd, addr);
do {
set_pte(pte, pfn_pte(pfn, prot));
pfn++;
} while (pte++, addr += PAGE_SIZE, addr != end);
+
+ pte_clear_fixmap();
}
static void split_pud(pud_t *old_pud, pmd_t *pmd)
@@ -131,10 +160,29 @@ static void split_pud(pud_t *old_pud, pmd_t *pmd)
} while (pmd++, i++, i < PTRS_PER_PMD);
}
-static void alloc_init_pmd(struct mm_struct *mm, pud_t *pud,
- unsigned long addr, unsigned long end,
+#ifdef CONFIG_DEBUG_PAGEALLOC
+static bool block_mappings_allowed(phys_addr_t (*pgtable_alloc)(void))
+{
+
+ /*
+ * If debug_page_alloc is enabled we must map the linear map
+ * using pages. However, other mappings created by
+ * create_mapping_noalloc must use sections in some cases. Allow
+ * sections to be used in those cases, where no pgtable_alloc
+ * function is provided.
+ */
+ return !pgtable_alloc || !debug_pagealloc_enabled();
+}
+#else
+static bool block_mappings_allowed(phys_addr_t (*pgtable_alloc)(void))
+{
+ return true;
+}
+#endif
+
+static void alloc_init_pmd(pud_t *pud, unsigned long addr, unsigned long end,
phys_addr_t phys, pgprot_t prot,
- void *(*alloc)(unsigned long size), bool pages)
+ phys_addr_t (*pgtable_alloc)(void))
{
pmd_t *pmd;
unsigned long next;
@@ -143,7 +191,10 @@ static void alloc_init_pmd(struct mm_struct *mm, pud_t *pud,
* Check for initial section mappings in the pgd/pud and remove them.
*/
if (pud_none(*pud) || pud_sect(*pud)) {
- pmd = alloc(PTRS_PER_PMD * sizeof(pmd_t));
+ phys_addr_t pmd_phys;
+ BUG_ON(!pgtable_alloc);
+ pmd_phys = pgtable_alloc();
+ pmd = pmd_set_fixmap(pmd_phys);
if (pud_sect(*pud)) {
/*
* need to have the 1G of mappings continue to be
@@ -151,19 +202,21 @@ static void alloc_init_pmd(struct mm_struct *mm, pud_t *pud,
*/
split_pud(pud, pmd);
}
- pud_populate(mm, pud, pmd);
+ __pud_populate(pud, pmd_phys, PUD_TYPE_TABLE);
flush_tlb_all();
+ pmd_clear_fixmap();
}
BUG_ON(pud_bad(*pud));
- pmd = pmd_offset(pud, addr);
+ pmd = pmd_set_fixmap_offset(pud, addr);
do {
next = pmd_addr_end(addr, end);
/* try section mapping first */
- if (!pages && ((addr | next | phys) & ~SECTION_MASK) == 0) {
+ if (((addr | next | phys) & ~SECTION_MASK) == 0 &&
+ block_mappings_allowed(pgtable_alloc) &&
+ !dma_overlap(phys, phys + next - addr)) {
pmd_t old_pmd =*pmd;
- set_pmd(pmd, __pmd(phys |
- pgprot_val(mk_sect_prot(prot))));
+ pmd_set_huge(pmd, phys, prot);
/*
* Check for previous table entries created during
* boot (__create_page_tables) and flush them.
@@ -171,17 +224,19 @@ static void alloc_init_pmd(struct mm_struct *mm, pud_t *pud,
if (!pmd_none(old_pmd)) {
flush_tlb_all();
if (pmd_table(old_pmd)) {
- phys_addr_t table = __pa(pte_offset_map(&old_pmd, 0));
+ phys_addr_t table = pmd_page_paddr(old_pmd);
if (!WARN_ON_ONCE(slab_is_available()))
memblock_free(table, PAGE_SIZE);
}
}
} else {
alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys),
- prot, alloc);
+ prot, pgtable_alloc);
}
phys += next - addr;
} while (pmd++, addr = next, addr != end);
+
+ pmd_clear_fixmap();
}
static inline bool use_1G_block(unsigned long addr, unsigned long next,
@@ -196,21 +251,22 @@ static inline bool use_1G_block(unsigned long addr, unsigned long next,
return true;
}
-static void alloc_init_pud(struct mm_struct *mm, pgd_t *pgd,
- unsigned long addr, unsigned long end,
+static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
phys_addr_t phys, pgprot_t prot,
- void *(*alloc)(unsigned long size), bool force_pages)
+ phys_addr_t (*pgtable_alloc)(void))
{
pud_t *pud;
unsigned long next;
if (pgd_none(*pgd)) {
- pud = alloc(PTRS_PER_PUD * sizeof(pud_t));
- pgd_populate(mm, pgd, pud);
+ phys_addr_t pud_phys;
+ BUG_ON(!pgtable_alloc);
+ pud_phys = pgtable_alloc();
+ __pgd_populate(pgd, pud_phys, PUD_TYPE_TABLE);
}
BUG_ON(pgd_bad(*pgd));
- pud = pud_offset(pgd, addr);
+ pud = pud_set_fixmap_offset(pgd, addr);
do {
next = pud_addr_end(addr, end);
@@ -218,12 +274,10 @@ static void alloc_init_pud(struct mm_struct *mm, pgd_t *pgd,
* For 4K granule only, attempt to put down a 1GB block
*/
if (use_1G_block(addr, next, phys) &&
- !force_pages &&
- !dma_overlap(phys, phys + next - addr) &&
- !IS_ENABLED(CONFIG_FORCE_PAGES)) {
+ block_mappings_allowed(pgtable_alloc) &&
+ !dma_overlap(phys, phys + next - addr)) {
pud_t old_pud = *pud;
- set_pud(pud, __pud(phys |
- pgprot_val(mk_sect_prot(prot))));
+ pud_set_huge(pud, phys, prot);
/*
* If we have an old value for a pud, it will
@@ -235,359 +289,274 @@ static void alloc_init_pud(struct mm_struct *mm, pgd_t *pgd,
if (!pud_none(old_pud)) {
flush_tlb_all();
if (pud_table(old_pud)) {
- phys_addr_t table = __pa(pmd_offset(&old_pud, 0));
+ phys_addr_t table = pud_page_paddr(old_pud);
if (!WARN_ON_ONCE(slab_is_available()))
memblock_free(table, PAGE_SIZE);
}
}
} else {
- alloc_init_pmd(mm, pud, addr, next, phys, prot, alloc, force_pages);
+ alloc_init_pmd(pud, addr, next, phys, prot,
+ pgtable_alloc);
}
phys += next - addr;
} while (pud++, addr = next, addr != end);
+
+ pud_clear_fixmap();
}
/*
* Create the page directory entries and any necessary page tables for the
* mapping specified by 'md'.
*/
-static void __create_mapping(struct mm_struct *mm, pgd_t *pgd,
- phys_addr_t phys, unsigned long virt,
+static void init_pgd(pgd_t *pgd, phys_addr_t phys, unsigned long virt,
phys_addr_t size, pgprot_t prot,
- void *(*alloc)(unsigned long size), bool force_pages)
+ phys_addr_t (*pgtable_alloc)(void))
{
unsigned long addr, length, end, next;
+ /*
+ * If the virtual and physical address don't have the same offset
+ * within a page, we cannot map the region as the caller expects.
+ */
+ if (WARN_ON((phys ^ virt) & ~PAGE_MASK))
+ return;
+
+ phys &= PAGE_MASK;
addr = virt & PAGE_MASK;
length = PAGE_ALIGN(size + (virt & ~PAGE_MASK));
end = addr + length;
do {
next = pgd_addr_end(addr, end);
- alloc_init_pud(mm, pgd, addr, next, phys, prot, alloc, force_pages);
+ alloc_init_pud(pgd, addr, next, phys, prot, pgtable_alloc);
phys += next - addr;
} while (pgd++, addr = next, addr != end);
}
-static void *late_alloc(unsigned long size)
+static phys_addr_t late_pgtable_alloc(void)
{
- void *ptr;
-
- BUG_ON(size > PAGE_SIZE);
- ptr = (void *)__get_free_page(PGALLOC_GFP);
+ void *ptr = (void *)__get_free_page(PGALLOC_GFP);
BUG_ON(!ptr);
- return ptr;
+
+ /* Ensure the zeroed page is visible to the page table walker */
+ dsb(ishst);
+ return __pa(ptr);
+}
+
+static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
+ unsigned long virt, phys_addr_t size,
+ pgprot_t prot,
+ phys_addr_t (*alloc)(void))
+{
+ init_pgd(pgd_offset_raw(pgdir, virt), phys, virt, size, prot, alloc);
}
-static void __init create_mapping(phys_addr_t phys, unsigned long virt,
- phys_addr_t size, pgprot_t prot, bool force_pages)
+/*
+ * This function can only be used to modify existing table entries,
+ * without allocating new levels of table. Note that this permits the
+ * creation of new section or page entries.
+ */
+static void __init create_mapping_noalloc(phys_addr_t phys, unsigned long virt,
+ phys_addr_t size, pgprot_t prot)
{
if (virt < VMALLOC_START) {
pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
&phys, virt);
return;
}
- __create_mapping(&init_mm, pgd_offset_k(virt & PAGE_MASK), phys, virt,
- size, prot, early_alloc, force_pages);
+ __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot,
+ NULL);
}
void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
unsigned long virt, phys_addr_t size,
pgprot_t prot)
{
- __create_mapping(mm, pgd_offset(mm, virt), phys, virt, size, prot,
- late_alloc, false);
+ __create_pgd_mapping(mm->pgd, phys, virt, size, prot,
+ late_pgtable_alloc);
}
-static inline pmd_t *pmd_off_k(unsigned long virt)
+static void create_mapping_late(phys_addr_t phys, unsigned long virt,
+ phys_addr_t size, pgprot_t prot)
{
- return pmd_offset(pud_offset(pgd_offset_k(virt), virt), virt);
+ if (virt < VMALLOC_START) {
+ pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
+ &phys, virt);
+ return;
+ }
+
+ __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot,
+ late_pgtable_alloc);
}
-void __init remap_as_pages(unsigned long start, unsigned long size)
+static void __init __map_memblock(pgd_t *pgd, phys_addr_t start, phys_addr_t end)
{
- unsigned long addr;
- unsigned long end = start + size;
+ unsigned long kernel_start = __pa(_stext);
+ unsigned long kernel_end = __pa(_etext);
/*
- * Make start and end PMD_SIZE aligned, observing memory
- * boundaries
+ * Take care not to create a writable alias for the
+ * read-only text and rodata sections of the kernel image.
*/
- if (memblock_is_memory(start & PMD_MASK))
- start = start & PMD_MASK;
- if (memblock_is_memory(ALIGN(end, PMD_SIZE)))
- end = ALIGN(end, PMD_SIZE);
- size = end - start;
+ /* No overlap with the kernel text */
+ if (end < kernel_start || start >= kernel_end) {
+ __create_pgd_mapping(pgd, start, __phys_to_virt(start),
+ end - start, PAGE_KERNEL,
+ early_pgtable_alloc);
+ return;
+ }
/*
- * Clear previous low-memory mapping
+ * This block overlaps the kernel text mapping.
+ * Map the portion(s) which don't overlap.
*/
- for (addr = __phys_to_virt(start); addr < __phys_to_virt(end);
- addr += PMD_SIZE) {
- pmd_t *pmd;
- pmd = pmd_off_k(addr);
- if (pmd_bad(*pmd) || pmd_sect(*pmd))
- pmd_clear(pmd);
- }
+ if (start < kernel_start)
+ __create_pgd_mapping(pgd, start,
+ __phys_to_virt(start),
+ kernel_start - start, PAGE_KERNEL,
+ early_pgtable_alloc);
+ if (kernel_end < end)
+ __create_pgd_mapping(pgd, kernel_end,
+ __phys_to_virt(kernel_end),
+ end - kernel_end, PAGE_KERNEL,
+ early_pgtable_alloc);
- create_mapping(start, __phys_to_virt(start), size, PAGE_KERNEL, true);
+ /*
+ * Map the linear alias of the [_stext, _etext) interval as
+ * read-only/non-executable. This makes the contents of the
+ * region accessible to subsystems such as hibernate, but
+ * protects it from inadvertent modification or execution.
+ */
+ __create_pgd_mapping(pgd, kernel_start, __phys_to_virt(kernel_start),
+ kernel_end - kernel_start, PAGE_KERNEL_RO,
+ early_pgtable_alloc);
}
-struct dma_contig_early_reserve {
- phys_addr_t base;
- unsigned long size;
-};
-
-static struct dma_contig_early_reserve dma_mmu_remap[MAX_CMA_AREAS] __initdata;
-
-static int dma_mmu_remap_num __initdata;
-
-void __init dma_contiguous_early_fixup(phys_addr_t base, unsigned long size)
+static void __init map_mem(pgd_t *pgd)
{
- dma_mmu_remap[dma_mmu_remap_num].base = base;
- dma_mmu_remap[dma_mmu_remap_num].size = size;
- dma_mmu_remap_num++;
-}
+ struct memblock_region *reg;
-static bool __init dma_overlap(phys_addr_t start, phys_addr_t end)
-{
- int i;
+ /* map all the memory banks */
+ for_each_memblock(memory, reg) {
+ phys_addr_t start = reg->base;
+ phys_addr_t end = start + reg->size;
- for (i = 0; i < dma_mmu_remap_num; i++) {
- phys_addr_t dma_base = dma_mmu_remap[i].base;
- phys_addr_t dma_end = dma_mmu_remap[i].base +
- dma_mmu_remap[i].size;
+ if (start >= end)
+ break;
- if ((dma_base < end) && (dma_end > start))
- return true;
+ __map_memblock(pgd, start, end);
}
- return false;
-}
-
-static void __init dma_contiguous_remap(void)
-{
- int i;
- for (i = 0; i < dma_mmu_remap_num; i++)
- remap_as_pages(dma_mmu_remap[i].base,
- dma_mmu_remap[i].size);
}
-static void create_mapping_late(phys_addr_t phys, unsigned long virt,
- phys_addr_t size, pgprot_t prot)
+void mark_rodata_ro(void)
{
- if (virt < VMALLOC_START) {
- pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
- &phys, virt);
+ if (!IS_ENABLED(CONFIG_DEBUG_RODATA))
return;
- }
- return __create_mapping(&init_mm, pgd_offset_k(virt & PAGE_MASK),
- phys, virt, size, prot, late_alloc,
- IS_ENABLED(CONFIG_FORCE_PAGES));
+ create_mapping_late(__pa(_stext), (unsigned long)_stext,
+ (unsigned long)_etext - (unsigned long)_stext,
+ PAGE_KERNEL_ROX);
}
-#ifdef CONFIG_DEBUG_RODATA
-static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
+void fixup_init(void)
{
/*
- * Set up the executable regions using the existing section mappings
- * for now. This will get more fine grained later once all memory
- * is mapped
+ * Unmap the __init region but leave the VM area in place. This
+ * prevents the region from being reused for kernel modules, which
+ * is not supported by kallsyms.
*/
- unsigned long kernel_x_start = round_down(__pa(_stext), SWAPPER_BLOCK_SIZE);
- unsigned long kernel_x_end = round_up(__pa(__init_end), SWAPPER_BLOCK_SIZE);
-
- if (end < kernel_x_start) {
- create_mapping(start, __phys_to_virt(start),
- end - start, PAGE_KERNEL, false);
- } else if (start >= kernel_x_end) {
- create_mapping(start, __phys_to_virt(start),
- end - start, PAGE_KERNEL, false);
- } else {
- if (start < kernel_x_start)
- create_mapping(start, __phys_to_virt(start),
- kernel_x_start - start,
- PAGE_KERNEL, false);
- create_mapping(kernel_x_start,
- __phys_to_virt(kernel_x_start),
- kernel_x_end - kernel_x_start,
- PAGE_KERNEL_EXEC, false);
- if (kernel_x_end < end)
- create_mapping(kernel_x_end,
- __phys_to_virt(kernel_x_end),
- end - kernel_x_end,
- PAGE_KERNEL, false);
- }
+ unmap_kernel_range((u64)__init_begin, (u64)(__init_end - __init_begin));
}
-#else
-static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
-{
- create_mapping(start, __phys_to_virt(start), end - start,
- PAGE_KERNEL_EXEC, false);
-}
-#endif
-static void __init map_mem(void)
+static void __init map_kernel_chunk(pgd_t *pgd, void *va_start, void *va_end,
+ pgprot_t prot, struct vm_struct *vma)
{
- struct memblock_region *reg;
- phys_addr_t limit;
+ phys_addr_t pa_start = __pa(va_start);
+ unsigned long size = va_end - va_start;
- /*
- * Temporarily limit the memblock range. We need to do this as
- * create_mapping requires puds, pmds and ptes to be allocated from
- * memory addressable from the initial direct kernel mapping.
- *
- * The initial direct kernel mapping, located at swapper_pg_dir, gives
- * us PUD_SIZE (with SECTION maps) or PMD_SIZE (without SECTION maps,
- * memory starting from PHYS_OFFSET (which must be aligned to 2MB as
- * per Documentation/arm64/booting.txt).
- */
- limit = PHYS_OFFSET + SWAPPER_INIT_MAP_SIZE;
- memblock_set_current_limit(limit);
-
- /* map all the memory banks */
- for_each_memblock(memory, reg) {
- phys_addr_t start = reg->base;
- phys_addr_t end = start + reg->size;
+ BUG_ON(!PAGE_ALIGNED(pa_start));
+ BUG_ON(!PAGE_ALIGNED(size));
- if (start >= end)
- break;
+ __create_pgd_mapping(pgd, pa_start, (unsigned long)va_start, size, prot,
+ early_pgtable_alloc);
- if (ARM64_SWAPPER_USES_SECTION_MAPS) {
- /*
- * For the first memory bank align the start address and
- * current memblock limit to prevent create_mapping() from
- * allocating pte page tables from unmapped memory. With
- * the section maps, if the first block doesn't end on section
- * size boundary, create_mapping() will try to allocate a pte
- * page, which may be returned from an unmapped area.
- * When section maps are not used, the pte page table for the
- * current limit is already present in swapper_pg_dir.
- */
- if (start < limit)
- start = ALIGN(start, SECTION_SIZE);
- if (end < limit) {
- limit = end & SECTION_MASK;
- memblock_set_current_limit(limit);
- }
- }
- __map_memblock(start, end);
- }
+ vma->addr = va_start;
+ vma->phys_addr = pa_start;
+ vma->size = size;
+ vma->flags = VM_MAP;
+ vma->caller = __builtin_return_address(0);
- /* Limit no longer required. */
- memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
+ vm_area_add_early(vma);
}
-#ifdef CONFIG_FORCE_PAGES
-static noinline void __init split_and_set_pmd(pmd_t *pmd, unsigned long addr,
- unsigned long end, unsigned long pfn)
+
+/*
+ * Create fine-grained mappings for the kernel.
+ */
+static void __init map_kernel(pgd_t *pgd)
{
- pte_t *pte, *start_pte;
+ static struct vm_struct vmlinux_text, vmlinux_init, vmlinux_data;
- start_pte = early_alloc(PTRS_PER_PTE * sizeof(pte_t));
- pte = start_pte;
+ map_kernel_chunk(pgd, _stext, _etext, PAGE_KERNEL_EXEC, &vmlinux_text);
+ map_kernel_chunk(pgd, __init_begin, __init_end, PAGE_KERNEL_EXEC,
+ &vmlinux_init);
+ map_kernel_chunk(pgd, _data, _end, PAGE_KERNEL, &vmlinux_data);
- do {
- set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
- pfn++;
- } while (pte++, addr += PAGE_SIZE, addr != end);
+ if (!pgd_val(*pgd_offset_raw(pgd, FIXADDR_START))) {
+ /*
+ * The fixmap falls in a separate pgd to the kernel, and doesn't
+ * live in the carveout for the swapper_pg_dir. We can simply
+ * re-use the existing dir for the fixmap.
+ */
+ set_pgd(pgd_offset_raw(pgd, FIXADDR_START),
+ *pgd_offset_k(FIXADDR_START));
+ } else if (CONFIG_PGTABLE_LEVELS > 3) {
+ /*
+ * The fixmap shares its top level pgd entry with the kernel
+ * mapping. This can really only occur when we are running
+ * with 16k/4 levels, so we can simply reuse the pud level
+ * entry instead.
+ */
+ BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
+ set_pud(pud_set_fixmap_offset(pgd, FIXADDR_START),
+ __pud(__pa(bm_pmd) | PUD_TYPE_TABLE));
+ pud_clear_fixmap();
+ } else {
+ BUG();
+ }
- set_pmd(pmd, __pmd((__pa(start_pte)) | PMD_TYPE_TABLE));
+ kasan_copy_shadow(pgd);
}
-static noinline void __init remap_pages(void)
-{
- struct memblock_region *reg;
+struct dma_contig_early_reserve {
+ phys_addr_t base;
+ unsigned long size;
+};
- for_each_memblock(memory, reg) {
- phys_addr_t phys_pgd = reg->base;
- phys_addr_t phys_end = reg->base + reg->size;
- unsigned long addr_pgd = (unsigned long)__va(phys_pgd);
- unsigned long end = (unsigned long)__va(phys_end);
- pmd_t *pmd = NULL;
- pud_t *pud = NULL;
- pgd_t *pgd = NULL;
- unsigned long next_pud, next_pmd, next_pgd;
- unsigned long addr_pmd, addr_pud;
- phys_addr_t phys_pud, phys_pmd;
-
- if (phys_pgd >= phys_end)
- break;
+static struct dma_contig_early_reserve dma_mmu_remap[MAX_CMA_AREAS];
- pgd = pgd_offset(&init_mm, addr_pgd);
- do {
- next_pgd = pgd_addr_end(addr_pgd, end);
- pud = pud_offset(pgd, addr_pgd);
- addr_pud = addr_pgd;
- phys_pud = phys_pgd;
- do {
- next_pud = pud_addr_end(addr_pud, next_pgd);
- pmd = pmd_offset(pud, addr_pud);
- addr_pmd = addr_pud;
- phys_pmd = phys_pud;
- do {
- next_pmd = pmd_addr_end(addr_pmd,
- next_pud);
- if (pmd_none(*pmd) || pmd_bad(*pmd))
- split_and_set_pmd(pmd, addr_pmd,
- next_pmd, __phys_to_pfn(phys_pmd));
- pmd++;
- phys_pmd += next_pmd - addr_pmd;
- } while (addr_pmd = next_pmd,
- addr_pmd < next_pud);
- phys_pud += next_pud - addr_pud;
- } while (pud++, addr_pud = next_pud,
- addr_pud < next_pgd);
- phys_pgd += next_pgd - addr_pgd;
- } while (pgd++, addr_pgd = next_pgd, addr_pgd < end);
- }
-}
+static int dma_mmu_remap_num;
-#else
-static void __init remap_pages(void)
+void __init dma_contiguous_early_fixup(phys_addr_t base, unsigned long size)
{
-
+ dma_mmu_remap[dma_mmu_remap_num].base = base;
+ dma_mmu_remap[dma_mmu_remap_num].size = size;
+ dma_mmu_remap_num++;
}
-#endif
-static void __init fixup_executable(void)
+static bool dma_overlap(phys_addr_t start, phys_addr_t end)
{
-#ifdef CONFIG_DEBUG_RODATA
- /* now that we are actually fully mapped, make the start/end more fine grained */
- if (!IS_ALIGNED((unsigned long)_stext, SWAPPER_BLOCK_SIZE)) {
- unsigned long aligned_start = round_down(__pa(_stext),
- SWAPPER_BLOCK_SIZE);
+ int i;
- create_mapping(aligned_start, __phys_to_virt(aligned_start),
- __pa(_stext) - aligned_start,
- PAGE_KERNEL, false);
- }
+ for (i = 0; i < dma_mmu_remap_num; i++) {
+ phys_addr_t dma_base = dma_mmu_remap[i].base;
+ phys_addr_t dma_end = dma_mmu_remap[i].base +
+ dma_mmu_remap[i].size;
- if (!IS_ALIGNED((unsigned long)__init_end, SWAPPER_BLOCK_SIZE)) {
- unsigned long aligned_end = round_up(__pa(__init_end),
- SWAPPER_BLOCK_SIZE);
- create_mapping(__pa(__init_end), (unsigned long)__init_end,
- aligned_end - __pa(__init_end),
- PAGE_KERNEL, false);
+ if ((dma_base < end) && (dma_end > start))
+ return true;
}
-#endif
-}
-
-#ifdef CONFIG_DEBUG_RODATA
-void mark_rodata_ro(void)
-{
- create_mapping_late(__pa(_stext), (unsigned long)_stext,
- (unsigned long)_etext - (unsigned long)_stext,
- PAGE_KERNEL_ROX);
-
-}
-#endif
-
-void fixup_init(void)
-{
- create_mapping_late(__pa(__init_begin), (unsigned long)__init_begin,
- (unsigned long)__init_end - (unsigned long)__init_begin,
- PAGE_KERNEL);
+ return false;
}
/*
@@ -596,39 +565,35 @@ void fixup_init(void)
*/
void __init paging_init(void)
{
- void *zero_page;
+ phys_addr_t pgd_phys = early_pgtable_alloc();
+ pgd_t *pgd = pgd_set_fixmap(pgd_phys);
- map_mem();
- fixup_executable();
- dma_contiguous_remap();
- remap_pages();
+ map_kernel(pgd);
+ map_mem(pgd);
/*
- * Finally flush the caches and tlb to ensure that we're in a
- * consistent state.
+ * We want to reuse the original swapper_pg_dir so we don't have to
+ * communicate the new address to non-coherent secondaries in
+ * secondary_entry, and so cpu_switch_mm can generate the address with
+ * adrp+add rather than a load from some global variable.
+ *
+ * To do this we need to go via a temporary pgd.
*/
- flush_tlb_all();
-
- /* allocate the zero page. */
- zero_page = early_alloc(PAGE_SIZE);
+ cpu_replace_ttbr1(__va(pgd_phys));
+ memcpy(swapper_pg_dir, pgd, PAGE_SIZE);
+ cpu_replace_ttbr1(swapper_pg_dir);
- bootmem_init();
-
- empty_zero_page = virt_to_page(zero_page);
-
- /* Ensure the zero page is visible to the page table walker */
- dsb(ishst);
+ pgd_clear_fixmap();
+ memblock_free(pgd_phys, PAGE_SIZE);
/*
- * TTBR0 is only used for the identity mapping at this stage. Make it
- * point to zero page to avoid speculatively fetching new entries.
+ * We only reuse the PGD from the swapper_pg_dir, not the pud + pmd
+ * allocated with it.
*/
- cpu_set_reserved_ttbr0();
- local_flush_tlb_all();
- cpu_set_default_tcr_t0sz();
- flush_tlb_all();
- set_kernel_text_ro();
- flush_tlb_all();
+ memblock_free(__pa(swapper_pg_dir) + PAGE_SIZE,
+ SWAPPER_DIR_SIZE - PAGE_SIZE);
+
+ bootmem_init();
}
/*
@@ -715,21 +680,13 @@ void vmemmap_free(unsigned long start, unsigned long end)
}
#endif /* CONFIG_SPARSEMEM_VMEMMAP */
-static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
-#if CONFIG_PGTABLE_LEVELS > 2
-static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss;
-#endif
-#if CONFIG_PGTABLE_LEVELS > 3
-static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss;
-#endif
-
static inline pud_t * fixmap_pud(unsigned long addr)
{
pgd_t *pgd = pgd_offset_k(addr);
BUG_ON(pgd_none(*pgd) || pgd_bad(*pgd));
- return pud_offset(pgd, addr);
+ return pud_offset_kimg(pgd, addr);
}
static inline pmd_t * fixmap_pmd(unsigned long addr)
@@ -738,16 +695,12 @@ static inline pmd_t * fixmap_pmd(unsigned long addr)
BUG_ON(pud_none(*pud) || pud_bad(*pud));
- return pmd_offset(pud, addr);
+ return pmd_offset_kimg(pud, addr);
}
static inline pte_t * fixmap_pte(unsigned long addr)
{
- pmd_t *pmd = fixmap_pmd(addr);
-
- BUG_ON(pmd_none(*pmd) || pmd_bad(*pmd));
-
- return pte_offset_kernel(pmd, addr);
+ return &bm_pte[pte_index(addr)];
}
void __init early_fixmap_init(void)
@@ -758,15 +711,26 @@ void __init early_fixmap_init(void)
unsigned long addr = FIXADDR_START;
pgd = pgd_offset_k(addr);
- pgd_populate(&init_mm, pgd, bm_pud);
- pud = pud_offset(pgd, addr);
+ if (CONFIG_PGTABLE_LEVELS > 3 &&
+ !(pgd_none(*pgd) || pgd_page_paddr(*pgd) == __pa(bm_pud))) {
+ /*
+ * We only end up here if the kernel mapping and the fixmap
+ * share the top level pgd entry, which should only happen on
+ * 16k/4 levels configurations.
+ */
+ BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
+ pud = pud_offset_kimg(pgd, addr);
+ } else {
+ pgd_populate(&init_mm, pgd, bm_pud);
+ pud = fixmap_pud(addr);
+ }
pud_populate(&init_mm, pud, bm_pmd);
- pmd = pmd_offset(pud, addr);
+ pmd = fixmap_pmd(addr);
pmd_populate_kernel(&init_mm, pmd, bm_pte);
/*
* The boot-ioremap range spans multiple pmds, for which
- * we are not preparted:
+ * we are not prepared:
*/
BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
!= (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
@@ -805,11 +769,10 @@ void __set_fixmap(enum fixed_addresses idx,
}
}
-void *__init fixmap_remap_fdt(phys_addr_t dt_phys)
+void *__init __fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot)
{
const u64 dt_virt_base = __fix_to_virt(FIX_FDT);
- pgprot_t prot = PAGE_KERNEL_RO;
- int size, offset;
+ int offset;
void *dt_virt;
/*
@@ -826,7 +789,7 @@ void *__init fixmap_remap_fdt(phys_addr_t dt_phys)
/*
* Make sure that the FDT region can be mapped without the need to
* allocate additional translation table pages, so that it is safe
- * to call create_mapping() this early.
+ * to call create_mapping_noalloc() this early.
*
* On 64k pages, the FDT will be mapped using PTEs, so we need to
* be in the same PMD as the rest of the fixmap.
@@ -842,21 +805,73 @@ void *__init fixmap_remap_fdt(phys_addr_t dt_phys)
dt_virt = (void *)dt_virt_base + offset;
/* map the first chunk so we can read the size from the header */
- create_mapping(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base,
- SWAPPER_BLOCK_SIZE, prot, false);
+ create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE),
+ dt_virt_base, SWAPPER_BLOCK_SIZE, prot);
if (fdt_check_header(dt_virt) != 0)
return NULL;
- size = fdt_totalsize(dt_virt);
- if (size > MAX_FDT_SIZE)
+ *size = fdt_totalsize(dt_virt);
+ if (*size > MAX_FDT_SIZE)
return NULL;
- if (offset + size > SWAPPER_BLOCK_SIZE)
- create_mapping(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base,
- round_up(offset + size, SWAPPER_BLOCK_SIZE), prot, false);
+ if (offset + *size > SWAPPER_BLOCK_SIZE)
+ create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base,
+ round_up(offset + *size, SWAPPER_BLOCK_SIZE), prot);
- memblock_reserve(dt_phys, size);
+ return dt_virt;
+}
+
+void *__init fixmap_remap_fdt(phys_addr_t dt_phys)
+{
+ void *dt_virt;
+ int size;
+
+ dt_virt = __fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL_RO);
+ if (!dt_virt)
+ return NULL;
+ memblock_reserve(dt_phys, size);
return dt_virt;
}
+
+int __init arch_ioremap_pud_supported(void)
+{
+ /* only 4k granule supports level 1 block mappings */
+ return IS_ENABLED(CONFIG_ARM64_4K_PAGES);
+}
+
+int __init arch_ioremap_pmd_supported(void)
+{
+ return 1;
+}
+
+int pud_set_huge(pud_t *pud, phys_addr_t phys, pgprot_t prot)
+{
+ BUG_ON(phys & ~PUD_MASK);
+ set_pud(pud, __pud(phys | PUD_TYPE_SECT | pgprot_val(mk_sect_prot(prot))));
+ return 1;
+}
+
+int pmd_set_huge(pmd_t *pmd, phys_addr_t phys, pgprot_t prot)
+{
+ BUG_ON(phys & ~PMD_MASK);
+ set_pmd(pmd, __pmd(phys | PMD_TYPE_SECT | pgprot_val(mk_sect_prot(prot))));
+ return 1;
+}
+
+int pud_clear_huge(pud_t *pud)
+{
+ if (!pud_sect(*pud))
+ return 0;
+ pud_clear(pud);
+ return 1;
+}
+
+int pmd_clear_huge(pmd_t *pmd)
+{
+ if (!pmd_sect(*pmd))
+ return 0;
+ pmd_clear(pmd);
+ return 1;
+}
diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
index 0f35e8ba0f33..4754762bde49 100644
--- a/arch/arm64/mm/pageattr.c
+++ b/arch/arm64/mm/pageattr.c
@@ -14,6 +14,7 @@
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/sched.h>
+#include <linux/vmalloc.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
@@ -36,14 +37,32 @@ static int change_page_range(pte_t *ptep, pgtable_t token, unsigned long addr,
return 0;
}
+/*
+ * This function assumes that the range is mapped with PAGE_SIZE pages.
+ */
+static int __change_memory_common(unsigned long start, unsigned long size,
+ pgprot_t set_mask, pgprot_t clear_mask)
+{
+ struct page_change_data data;
+ int ret;
+
+ data.set_mask = set_mask;
+ data.clear_mask = clear_mask;
+
+ ret = apply_to_page_range(&init_mm, start, size, change_page_range,
+ &data);
+
+ flush_tlb_kernel_range(start, start + size);
+ return ret;
+}
+
static int change_memory_common(unsigned long addr, int numpages,
pgprot_t set_mask, pgprot_t clear_mask)
{
unsigned long start = addr;
unsigned long size = PAGE_SIZE*numpages;
unsigned long end = start + size;
- int ret;
- struct page_change_data data;
+ struct vm_struct *area;
if (!PAGE_ALIGNED(addr)) {
start &= PAGE_MASK;
@@ -58,18 +77,29 @@ static int change_memory_common(unsigned long addr, int numpages,
if (end < MODULES_VADDR || end >= MODULES_END)
return -EINVAL;
}
+ /*
+ * Kernel VA mappings are always live, and splitting live section
+ * mappings into page mappings may cause TLB conflicts. This means
+ * we have to ensure that changing the permission bits of the range
+ * we are operating on does not result in such splitting.
+ *
+ * Let's restrict ourselves to mappings created by vmalloc (or vmap).
+ * Those are guaranteed to consist entirely of page mappings, and
+ * splitting is never needed.
+ *
+ * So check whether the [addr, addr + size) interval is entirely
+ * covered by precisely one VM area that has the VM_ALLOC flag set.
+ */
+ area = find_vm_area((void *)addr);
+ if (!area ||
+ end > (unsigned long)area->addr + area->size ||
+ !(area->flags & VM_ALLOC))
+ return -EINVAL;
if (!numpages)
return 0;
- data.set_mask = set_mask;
- data.clear_mask = clear_mask;
-
- ret = apply_to_page_range(&init_mm, start, size, change_page_range,
- &data);
-
- flush_tlb_kernel_range(start, end);
- return ret;
+ return __change_memory_common(start, size, set_mask, clear_mask);
}
int set_memory_ro(unsigned long addr, int numpages)
@@ -101,3 +131,19 @@ int set_memory_x(unsigned long addr, int numpages)
__pgprot(PTE_PXN));
}
EXPORT_SYMBOL_GPL(set_memory_x);
+
+#ifdef CONFIG_DEBUG_PAGEALLOC
+void __kernel_map_pages(struct page *page, int numpages, int enable)
+{
+ unsigned long addr = (unsigned long) page_address(page);
+
+ if (enable)
+ __change_memory_common(addr, PAGE_SIZE * numpages,
+ __pgprot(PTE_VALID),
+ __pgprot(0));
+ else
+ __change_memory_common(addr, PAGE_SIZE * numpages,
+ __pgprot(0),
+ __pgprot(PTE_VALID));
+}
+#endif
diff --git a/arch/arm64/mm/pgd.c b/arch/arm64/mm/pgd.c
index cb3ba1b812e7..ae11d4e03d0e 100644
--- a/arch/arm64/mm/pgd.c
+++ b/arch/arm64/mm/pgd.c
@@ -46,14 +46,14 @@ void pgd_free(struct mm_struct *mm, pgd_t *pgd)
kmem_cache_free(pgd_cache, pgd);
}
-static int __init pgd_cache_init(void)
+void __init pgd_cache_init(void)
{
+ if (PGD_SIZE == PAGE_SIZE)
+ return;
+
/*
* Naturally aligned pgds required by the architecture.
*/
- if (PGD_SIZE != PAGE_SIZE)
- pgd_cache = kmem_cache_create("pgd_cache", PGD_SIZE, PGD_SIZE,
- SLAB_PANIC, NULL);
- return 0;
+ pgd_cache = kmem_cache_create("pgd_cache", PGD_SIZE, PGD_SIZE,
+ SLAB_PANIC, NULL);
}
-core_initcall(pgd_cache_init);
diff --git a/arch/arm64/mm/proc-macros.S b/arch/arm64/mm/proc-macros.S
index d69dffffaa89..984edcda1850 100644
--- a/arch/arm64/mm/proc-macros.S
+++ b/arch/arm64/mm/proc-macros.S
@@ -74,3 +74,25 @@
msr pmuserenr_el0, xzr // Disable PMU access from EL0
9000:
.endm
+
+/*
+ * Macro to perform a data cache maintenance for the interval
+ * [kaddr, kaddr + size)
+ *
+ * op: operation passed to dc instruction
+ * domain: domain used in dsb instruciton
+ * kaddr: starting virtual address of the region
+ * size: size of the region
+ * Corrupts: kaddr, size, tmp1, tmp2
+ */
+ .macro dcache_by_line_op op, domain, kaddr, size, tmp1, tmp2
+ dcache_line_size \tmp1, \tmp2
+ add \size, \kaddr, \size
+ sub \tmp2, \tmp1, #1
+ bic \kaddr, \kaddr, \tmp2
+9998: dc \op, \kaddr
+ add \kaddr, \kaddr, \tmp1
+ cmp \kaddr, \size
+ b.lo 9998b
+ dsb \domain
+ .endm
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index cd8853e11f2a..dc22de0ce413 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -186,7 +186,33 @@ ENTRY(cpu_do_switch_mm)
ret
ENDPROC(cpu_do_switch_mm)
- .section ".text.init", #alloc, #execinstr
+ .pushsection ".idmap.text", "ax"
+/*
+ * void idmap_cpu_replace_ttbr1(phys_addr_t new_pgd)
+ *
+ * This is the low-level counterpart to cpu_replace_ttbr1, and should not be
+ * called by anything else. It can only be executed from a TTBR0 mapping.
+ */
+ENTRY(idmap_cpu_replace_ttbr1)
+ mrs x2, daif
+ msr daifset, #0xf
+
+ adrp x1, empty_zero_page
+ msr ttbr1_el1, x1
+ isb
+
+ tlbi vmalle1
+ dsb nsh
+ isb
+
+ msr ttbr1_el1, x0
+ isb
+
+ msr daif, x2
+
+ ret
+ENDPROC(idmap_cpu_replace_ttbr1)
+ .popsection
/*
* __cpu_setup
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index 729f89163bc3..d2256fa97ea0 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -11,6 +11,7 @@ config PARISC
select RTC_DRV_GENERIC
select INIT_ALL_POSSIBLE
select BUG
+ select BUILDTIME_EXTABLE_SORT
select HAVE_PERF_EVENTS
select GENERIC_ATOMIC64 if !64BIT
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
diff --git a/arch/parisc/include/asm/assembly.h b/arch/parisc/include/asm/assembly.h
index b3069fd83468..60e6f07b7e32 100644
--- a/arch/parisc/include/asm/assembly.h
+++ b/arch/parisc/include/asm/assembly.h
@@ -523,7 +523,7 @@
*/
#define ASM_EXCEPTIONTABLE_ENTRY(fault_addr, except_addr) \
.section __ex_table,"aw" ! \
- ASM_ULONG_INSN fault_addr, except_addr ! \
+ .word (fault_addr - .), (except_addr - .) ! \
.previous
diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
index 1960b87c1c8b..6f893d29f1b2 100644
--- a/arch/parisc/include/asm/uaccess.h
+++ b/arch/parisc/include/asm/uaccess.h
@@ -60,14 +60,15 @@ static inline long access_ok(int type, const void __user * addr,
* use a 32bit (unsigned int) address here.
*/
+#define ARCH_HAS_RELATIVE_EXTABLE
struct exception_table_entry {
- unsigned long insn; /* address of insn that is allowed to fault. */
- unsigned long fixup; /* fixup routine */
+ int insn; /* relative address of insn that is allowed to fault. */
+ int fixup; /* relative address of fixup routine */
};
#define ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr )\
".section __ex_table,\"aw\"\n" \
- ASM_WORD_INSN #fault_addr ", " #except_addr "\n\t" \
+ ".word (" #fault_addr " - .), (" #except_addr " - .)\n\t" \
".previous\n"
/*
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
index f9064449908a..16dbe81c97c9 100644
--- a/arch/parisc/mm/fault.c
+++ b/arch/parisc/mm/fault.c
@@ -140,12 +140,6 @@ int fixup_exception(struct pt_regs *regs)
{
const struct exception_table_entry *fix;
- /* If we only stored 32bit addresses in the exception table we can drop
- * out if we faulted on a 64bit address. */
- if ((sizeof(regs->iaoq[0]) > sizeof(fix->insn))
- && (regs->iaoq[0] >> 32))
- return 0;
-
fix = search_exception_tables(regs->iaoq[0]);
if (fix) {
struct exception_data *d;
@@ -155,7 +149,8 @@ int fixup_exception(struct pt_regs *regs)
d->fault_space = regs->isr;
d->fault_addr = regs->ior;
- regs->iaoq[0] = ((fix->fixup) & ~3);
+ regs->iaoq[0] = (unsigned long)&fix->fixup + fix->fixup;
+ regs->iaoq[0] &= ~3;
/*
* NOTE: In some cases the faulting instruction
* may be in the delay slot of a branch. We
diff --git a/arch/powerpc/include/asm/word-at-a-time.h b/arch/powerpc/include/asm/word-at-a-time.h
index e4396a7d0f7c..4afe66aa1400 100644
--- a/arch/powerpc/include/asm/word-at-a-time.h
+++ b/arch/powerpc/include/asm/word-at-a-time.h
@@ -82,7 +82,7 @@ static inline unsigned long create_zero_mask(unsigned long bits)
"andc %1,%1,%2\n\t"
"popcntd %0,%1"
: "=r" (leading_zero_bits), "=&r" (trailing_zero_bit_mask)
- : "r" (bits));
+ : "b" (bits));
return leading_zero_bits;
}
diff --git a/arch/powerpc/include/uapi/asm/cputable.h b/arch/powerpc/include/uapi/asm/cputable.h
index 43686043e297..2734c005da21 100644
--- a/arch/powerpc/include/uapi/asm/cputable.h
+++ b/arch/powerpc/include/uapi/asm/cputable.h
@@ -31,6 +31,7 @@
#define PPC_FEATURE_PSERIES_PERFMON_COMPAT \
0x00000040
+/* Reserved - do not use 0x00000004 */
#define PPC_FEATURE_TRUE_LE 0x00000002
#define PPC_FEATURE_PPC_LE 0x00000001
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 36795d1e7558..a7b91b54c813 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -569,24 +569,6 @@ static void tm_reclaim_thread(struct thread_struct *thr,
if (!MSR_TM_SUSPENDED(mfmsr()))
return;
- /*
- * Use the current MSR TM suspended bit to track if we have
- * checkpointed state outstanding.
- * On signal delivery, we'd normally reclaim the checkpointed
- * state to obtain stack pointer (see:get_tm_stackpointer()).
- * This will then directly return to userspace without going
- * through __switch_to(). However, if the stack frame is bad,
- * we need to exit this thread which calls __switch_to() which
- * will again attempt to reclaim the already saved tm state.
- * Hence we need to check that we've not already reclaimed
- * this state.
- * We do this using the current MSR, rather tracking it in
- * some specific thread_struct bit, as it has the additional
- * benifit of checking for a potential TM bad thing exception.
- */
- if (!MSR_TM_SUSPENDED(mfmsr()))
- return;
-
tm_reclaim(thr, thr->regs->msr, cause);
/* Having done the reclaim, we now have the checkpointed
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 7030b035905d..a15fe1d4e84a 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -148,23 +148,25 @@ static struct ibm_pa_feature {
unsigned long cpu_features; /* CPU_FTR_xxx bit */
unsigned long mmu_features; /* MMU_FTR_xxx bit */
unsigned int cpu_user_ftrs; /* PPC_FEATURE_xxx bit */
+ unsigned int cpu_user_ftrs2; /* PPC_FEATURE2_xxx bit */
unsigned char pabyte; /* byte number in ibm,pa-features */
unsigned char pabit; /* bit number (big-endian) */
unsigned char invert; /* if 1, pa bit set => clear feature */
} ibm_pa_features[] __initdata = {
- {0, 0, PPC_FEATURE_HAS_MMU, 0, 0, 0},
- {0, 0, PPC_FEATURE_HAS_FPU, 0, 1, 0},
- {CPU_FTR_CTRL, 0, 0, 0, 3, 0},
- {CPU_FTR_NOEXECUTE, 0, 0, 0, 6, 0},
- {CPU_FTR_NODSISRALIGN, 0, 0, 1, 1, 1},
- {0, MMU_FTR_CI_LARGE_PAGE, 0, 1, 2, 0},
- {CPU_FTR_REAL_LE, PPC_FEATURE_TRUE_LE, 5, 0, 0},
+ {0, 0, PPC_FEATURE_HAS_MMU, 0, 0, 0, 0},
+ {0, 0, PPC_FEATURE_HAS_FPU, 0, 0, 1, 0},
+ {CPU_FTR_CTRL, 0, 0, 0, 0, 3, 0},
+ {CPU_FTR_NOEXECUTE, 0, 0, 0, 0, 6, 0},
+ {CPU_FTR_NODSISRALIGN, 0, 0, 0, 1, 1, 1},
+ {0, MMU_FTR_CI_LARGE_PAGE, 0, 0, 1, 2, 0},
+ {CPU_FTR_REAL_LE, 0, PPC_FEATURE_TRUE_LE, 0, 5, 0, 0},
/*
- * If the kernel doesn't support TM (ie. CONFIG_PPC_TRANSACTIONAL_MEM=n),
- * we don't want to turn on CPU_FTR_TM here, so we use CPU_FTR_TM_COMP
- * which is 0 if the kernel doesn't support TM.
+ * If the kernel doesn't support TM (ie CONFIG_PPC_TRANSACTIONAL_MEM=n),
+ * we don't want to turn on TM here, so we use the *_COMP versions
+ * which are 0 if the kernel doesn't support TM.
*/
- {CPU_FTR_TM_COMP, 0, 0, 22, 0, 0},
+ {CPU_FTR_TM_COMP, 0, 0,
+ PPC_FEATURE2_HTM_COMP|PPC_FEATURE2_HTM_NOSC_COMP, 22, 0, 0},
};
static void __init scan_features(unsigned long node, const unsigned char *ftrs,
@@ -195,10 +197,12 @@ static void __init scan_features(unsigned long node, const unsigned char *ftrs,
if (bit ^ fp->invert) {
cur_cpu_spec->cpu_features |= fp->cpu_features;
cur_cpu_spec->cpu_user_features |= fp->cpu_user_ftrs;
+ cur_cpu_spec->cpu_user_features2 |= fp->cpu_user_ftrs2;
cur_cpu_spec->mmu_features |= fp->mmu_features;
} else {
cur_cpu_spec->cpu_features &= ~fp->cpu_features;
cur_cpu_spec->cpu_user_features &= ~fp->cpu_user_ftrs;
+ cur_cpu_spec->cpu_user_features2 &= ~fp->cpu_user_ftrs2;
cur_cpu_spec->mmu_features &= ~fp->mmu_features;
}
}
diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h
index d29ad9545b41..081b2ad99d73 100644
--- a/arch/s390/include/asm/mmu.h
+++ b/arch/s390/include/asm/mmu.h
@@ -11,7 +11,7 @@ typedef struct {
spinlock_t list_lock;
struct list_head pgtable_list;
struct list_head gmap_list;
- unsigned long asce_bits;
+ unsigned long asce;
unsigned long asce_limit;
unsigned long vdso_base;
/* The mmu context allocates 4K page tables. */
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index e485817f7b1a..22877c9440ea 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -26,12 +26,28 @@ static inline int init_new_context(struct task_struct *tsk,
mm->context.has_pgste = 0;
mm->context.use_skey = 0;
#endif
- if (mm->context.asce_limit == 0) {
+ switch (mm->context.asce_limit) {
+ case 1UL << 42:
+ /*
+ * forked 3-level task, fall through to set new asce with new
+ * mm->pgd
+ */
+ case 0:
/* context created by exec, set asce limit to 4TB */
- mm->context.asce_bits = _ASCE_TABLE_LENGTH |
- _ASCE_USER_BITS | _ASCE_TYPE_REGION3;
mm->context.asce_limit = STACK_TOP_MAX;
- } else if (mm->context.asce_limit == (1UL << 31)) {
+ mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
+ _ASCE_USER_BITS | _ASCE_TYPE_REGION3;
+ break;
+ case 1UL << 53:
+ /* forked 4-level task, set new asce with new mm->pgd */
+ mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
+ _ASCE_USER_BITS | _ASCE_TYPE_REGION2;
+ break;
+ case 1UL << 31:
+ /* forked 2-level compat task, set new asce with new mm->pgd */
+ mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
+ _ASCE_USER_BITS | _ASCE_TYPE_SEGMENT;
+ /* pgd_alloc() did not increase mm->nr_pmds */
mm_inc_nr_pmds(mm);
}
crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm));
@@ -42,7 +58,7 @@ static inline int init_new_context(struct task_struct *tsk,
static inline void set_user_asce(struct mm_struct *mm)
{
- S390_lowcore.user_asce = mm->context.asce_bits | __pa(mm->pgd);
+ S390_lowcore.user_asce = mm->context.asce;
if (current->thread.mm_segment.ar4)
__ctl_load(S390_lowcore.user_asce, 7, 7);
set_cpu_flag(CIF_ASCE);
@@ -71,7 +87,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
{
int cpu = smp_processor_id();
- S390_lowcore.user_asce = next->context.asce_bits | __pa(next->pgd);
+ S390_lowcore.user_asce = next->context.asce;
if (prev == next)
return;
if (MACHINE_HAS_TLB_LC)
diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h
index 2b2ced9dc00a..6dafabb6ae1a 100644
--- a/arch/s390/include/asm/pci.h
+++ b/arch/s390/include/asm/pci.h
@@ -45,7 +45,8 @@ struct zpci_fmb {
u64 rpcit_ops;
u64 dma_rbytes;
u64 dma_wbytes;
-} __packed __aligned(64);
+ u64 pad[2];
+} __packed __aligned(128);
enum zpci_state {
ZPCI_FN_STATE_RESERVED,
diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h
index d7cc79fb6191..5991cdcb5b40 100644
--- a/arch/s390/include/asm/pgalloc.h
+++ b/arch/s390/include/asm/pgalloc.h
@@ -56,8 +56,8 @@ static inline unsigned long pgd_entry_type(struct mm_struct *mm)
return _REGION2_ENTRY_EMPTY;
}
-int crst_table_upgrade(struct mm_struct *, unsigned long limit);
-void crst_table_downgrade(struct mm_struct *, unsigned long limit);
+int crst_table_upgrade(struct mm_struct *);
+void crst_table_downgrade(struct mm_struct *);
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
{
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index b16c3d0a1b9f..c1ea67db8404 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -163,7 +163,7 @@ extern __vector128 init_task_fpu_regs[__NUM_VXRS];
regs->psw.mask = PSW_USER_BITS | PSW_MASK_BA; \
regs->psw.addr = new_psw | PSW_ADDR_AMODE; \
regs->gprs[15] = new_stackp; \
- crst_table_downgrade(current->mm, 1UL << 31); \
+ crst_table_downgrade(current->mm); \
execve_tail(); \
} while (0)
diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h
index ca148f7c3eaa..a2e6ef32e054 100644
--- a/arch/s390/include/asm/tlbflush.h
+++ b/arch/s390/include/asm/tlbflush.h
@@ -110,8 +110,7 @@ static inline void __tlb_flush_asce(struct mm_struct *mm, unsigned long asce)
static inline void __tlb_flush_kernel(void)
{
if (MACHINE_HAS_IDTE)
- __tlb_flush_idte((unsigned long) init_mm.pgd |
- init_mm.context.asce_bits);
+ __tlb_flush_idte(init_mm.context.asce);
else
__tlb_flush_global();
}
@@ -133,8 +132,7 @@ static inline void __tlb_flush_asce(struct mm_struct *mm, unsigned long asce)
static inline void __tlb_flush_kernel(void)
{
if (MACHINE_HAS_TLB_LC)
- __tlb_flush_idte_local((unsigned long) init_mm.pgd |
- init_mm.context.asce_bits);
+ __tlb_flush_idte_local(init_mm.context.asce);
else
__tlb_flush_local();
}
@@ -148,8 +146,7 @@ static inline void __tlb_flush_mm(struct mm_struct * mm)
* only ran on the local cpu.
*/
if (MACHINE_HAS_IDTE && list_empty(&mm->context.gmap_list))
- __tlb_flush_asce(mm, (unsigned long) mm->pgd |
- mm->context.asce_bits);
+ __tlb_flush_asce(mm, mm->context.asce);
else
__tlb_flush_full(mm);
}
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index c722400c7697..feff9caf89b5 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -89,7 +89,8 @@ void __init paging_init(void)
asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
pgd_type = _REGION3_ENTRY_EMPTY;
}
- S390_lowcore.kernel_asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits;
+ init_mm.context.asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits;
+ S390_lowcore.kernel_asce = init_mm.context.asce;
clear_table((unsigned long *) init_mm.pgd, pgd_type,
sizeof(unsigned long)*2048);
vmem_map_init();
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
index ea01477b4aa6..f2b6b1d9c804 100644
--- a/arch/s390/mm/mmap.c
+++ b/arch/s390/mm/mmap.c
@@ -174,7 +174,7 @@ int s390_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
if (!(flags & MAP_FIXED))
addr = 0;
if ((addr + len) >= TASK_SIZE)
- return crst_table_upgrade(current->mm, 1UL << 53);
+ return crst_table_upgrade(current->mm);
return 0;
}
@@ -191,7 +191,7 @@ s390_get_unmapped_area(struct file *filp, unsigned long addr,
return area;
if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < (1UL << 53)) {
/* Upgrade the page table to 4 levels and retry. */
- rc = crst_table_upgrade(mm, 1UL << 53);
+ rc = crst_table_upgrade(mm);
if (rc)
return (unsigned long) rc;
area = arch_get_unmapped_area(filp, addr, len, pgoff, flags);
@@ -213,7 +213,7 @@ s390_get_unmapped_area_topdown(struct file *filp, const unsigned long addr,
return area;
if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < (1UL << 53)) {
/* Upgrade the page table to 4 levels and retry. */
- rc = crst_table_upgrade(mm, 1UL << 53);
+ rc = crst_table_upgrade(mm);
if (rc)
return (unsigned long) rc;
area = arch_get_unmapped_area_topdown(filp, addr, len,
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 54ef3bc01b43..471a370a527b 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -49,81 +49,52 @@ static void __crst_table_upgrade(void *arg)
__tlb_flush_local();
}
-int crst_table_upgrade(struct mm_struct *mm, unsigned long limit)
+int crst_table_upgrade(struct mm_struct *mm)
{
unsigned long *table, *pgd;
- unsigned long entry;
- int flush;
- BUG_ON(limit > (1UL << 53));
- flush = 0;
-repeat:
+ /* upgrade should only happen from 3 to 4 levels */
+ BUG_ON(mm->context.asce_limit != (1UL << 42));
+
table = crst_table_alloc(mm);
if (!table)
return -ENOMEM;
+
spin_lock_bh(&mm->page_table_lock);
- if (mm->context.asce_limit < limit) {
- pgd = (unsigned long *) mm->pgd;
- if (mm->context.asce_limit <= (1UL << 31)) {
- entry = _REGION3_ENTRY_EMPTY;
- mm->context.asce_limit = 1UL << 42;
- mm->context.asce_bits = _ASCE_TABLE_LENGTH |
- _ASCE_USER_BITS |
- _ASCE_TYPE_REGION3;
- } else {
- entry = _REGION2_ENTRY_EMPTY;
- mm->context.asce_limit = 1UL << 53;
- mm->context.asce_bits = _ASCE_TABLE_LENGTH |
- _ASCE_USER_BITS |
- _ASCE_TYPE_REGION2;
- }
- crst_table_init(table, entry);
- pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd);
- mm->pgd = (pgd_t *) table;
- mm->task_size = mm->context.asce_limit;
- table = NULL;
- flush = 1;
- }
+ pgd = (unsigned long *) mm->pgd;
+ crst_table_init(table, _REGION2_ENTRY_EMPTY);
+ pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd);
+ mm->pgd = (pgd_t *) table;
+ mm->context.asce_limit = 1UL << 53;
+ mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
+ _ASCE_USER_BITS | _ASCE_TYPE_REGION2;
+ mm->task_size = mm->context.asce_limit;
spin_unlock_bh(&mm->page_table_lock);
- if (table)
- crst_table_free(mm, table);
- if (mm->context.asce_limit < limit)
- goto repeat;
- if (flush)
- on_each_cpu(__crst_table_upgrade, mm, 0);
+
+ on_each_cpu(__crst_table_upgrade, mm, 0);
return 0;
}
-void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
+void crst_table_downgrade(struct mm_struct *mm)
{
pgd_t *pgd;
+ /* downgrade should only happen from 3 to 2 levels (compat only) */
+ BUG_ON(mm->context.asce_limit != (1UL << 42));
+
if (current->active_mm == mm) {
clear_user_asce();
__tlb_flush_mm(mm);
}
- while (mm->context.asce_limit > limit) {
- pgd = mm->pgd;
- switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) {
- case _REGION_ENTRY_TYPE_R2:
- mm->context.asce_limit = 1UL << 42;
- mm->context.asce_bits = _ASCE_TABLE_LENGTH |
- _ASCE_USER_BITS |
- _ASCE_TYPE_REGION3;
- break;
- case _REGION_ENTRY_TYPE_R3:
- mm->context.asce_limit = 1UL << 31;
- mm->context.asce_bits = _ASCE_TABLE_LENGTH |
- _ASCE_USER_BITS |
- _ASCE_TYPE_SEGMENT;
- break;
- default:
- BUG();
- }
- mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
- mm->task_size = mm->context.asce_limit;
- crst_table_free(mm, (unsigned long *) pgd);
- }
+
+ pgd = mm->pgd;
+ mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
+ mm->context.asce_limit = 1UL << 31;
+ mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
+ _ASCE_USER_BITS | _ASCE_TYPE_SEGMENT;
+ mm->task_size = mm->context.asce_limit;
+ crst_table_free(mm, (unsigned long *) pgd);
+
if (current->active_mm == mm)
set_user_asce(mm);
}
diff --git a/arch/x86/crypto/sha-mb/sha1_mb.c b/arch/x86/crypto/sha-mb/sha1_mb.c
index a841e9765bd6..8381c09d2870 100644
--- a/arch/x86/crypto/sha-mb/sha1_mb.c
+++ b/arch/x86/crypto/sha-mb/sha1_mb.c
@@ -453,10 +453,10 @@ static int sha_complete_job(struct mcryptd_hash_request_ctx *rctx,
req = cast_mcryptd_ctx_to_req(req_ctx);
if (irqs_disabled())
- rctx->complete(&req->base, ret);
+ req_ctx->complete(&req->base, ret);
else {
local_bh_disable();
- rctx->complete(&req->base, ret);
+ req_ctx->complete(&req->base, ret);
local_bh_enable();
}
}
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
index 0010c78c4998..08b1f2f6ea50 100644
--- a/arch/x86/include/asm/efi.h
+++ b/arch/x86/include/asm/efi.h
@@ -25,6 +25,8 @@
#define EFI32_LOADER_SIGNATURE "EL32"
#define EFI64_LOADER_SIGNATURE "EL64"
+#define MAX_CMDLINE_ADDRESS UINT_MAX
+
#ifdef CONFIG_X86_32
diff --git a/arch/x86/include/asm/hugetlb.h b/arch/x86/include/asm/hugetlb.h
index f8a29d2c97b0..e6a8613fbfb0 100644
--- a/arch/x86/include/asm/hugetlb.h
+++ b/arch/x86/include/asm/hugetlb.h
@@ -4,6 +4,7 @@
#include <asm/page.h>
#include <asm-generic/hugetlb.h>
+#define hugepages_supported() cpu_has_pse
static inline int is_hugepage_only_range(struct mm_struct *mm,
unsigned long addr,
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
index 7af2505f20c2..df6b4eeac0bd 100644
--- a/arch/x86/kernel/apic/vector.c
+++ b/arch/x86/kernel/apic/vector.c
@@ -254,7 +254,8 @@ static void clear_irq_vector(int irq, struct apic_chip_data *data)
struct irq_desc *desc;
int cpu, vector;
- BUG_ON(!data->cfg.vector);
+ if (!data->cfg.vector)
+ return;
vector = data->cfg.vector;
for_each_cpu_and(cpu, data->domain, cpu_online_mask)
diff --git a/arch/x86/kernel/cpu/mcheck/mce-genpool.c b/arch/x86/kernel/cpu/mcheck/mce-genpool.c
index 0a850100c594..2658e2af74ec 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-genpool.c
+++ b/arch/x86/kernel/cpu/mcheck/mce-genpool.c
@@ -29,7 +29,7 @@ static char gen_pool_buf[MCE_POOLSZ];
void mce_gen_pool_process(void)
{
struct llist_node *head;
- struct mce_evt_llist *node;
+ struct mce_evt_llist *node, *tmp;
struct mce *mce;
head = llist_del_all(&mce_event_llist);
@@ -37,7 +37,7 @@ void mce_gen_pool_process(void)
return;
head = llist_reverse_order(head);
- llist_for_each_entry(node, head, llnode) {
+ llist_for_each_entry_safe(node, tmp, head, llnode) {
mce = &node->mce;
atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, mce);
gen_pool_free(mce_evt_pool, (unsigned long)node, sizeof(*node));
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
index 2c5aaf8c2e2f..05538582a809 100644
--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
+++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
@@ -385,6 +385,9 @@ static void intel_thermal_interrupt(void)
{
__u64 msr_val;
+ if (static_cpu_has(X86_FEATURE_HWP))
+ wrmsrl_safe(MSR_HWP_STATUS, 0);
+
rdmsrl(MSR_IA32_THERM_STATUS, msr_val);
/* Check for violation of core thermal thresholds*/
diff --git a/arch/x86/kernel/sysfb_efi.c b/arch/x86/kernel/sysfb_efi.c
index b285d4e8c68e..5da924bbf0a0 100644
--- a/arch/x86/kernel/sysfb_efi.c
+++ b/arch/x86/kernel/sysfb_efi.c
@@ -106,14 +106,24 @@ static int __init efifb_set_system(const struct dmi_system_id *id)
continue;
for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
resource_size_t start, end;
+ unsigned long flags;
+
+ flags = pci_resource_flags(dev, i);
+ if (!(flags & IORESOURCE_MEM))
+ continue;
+
+ if (flags & IORESOURCE_UNSET)
+ continue;
+
+ if (pci_resource_len(dev, i) == 0)
+ continue;
start = pci_resource_start(dev, i);
- if (start == 0)
- break;
end = pci_resource_end(dev, i);
if (screen_info.lfb_base >= start &&
screen_info.lfb_base < end) {
found_bar = 1;
+ break;
}
}
}
diff --git a/arch/x86/kernel/tsc_msr.c b/arch/x86/kernel/tsc_msr.c
index 92ae6acac8a7..6aa0f4d9eea6 100644
--- a/arch/x86/kernel/tsc_msr.c
+++ b/arch/x86/kernel/tsc_msr.c
@@ -92,7 +92,7 @@ unsigned long try_msr_calibrate_tsc(void)
if (freq_desc_tables[cpu_index].msr_plat) {
rdmsr(MSR_PLATFORM_INFO, lo, hi);
- ratio = (lo >> 8) & 0x1f;
+ ratio = (lo >> 8) & 0xff;
} else {
rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
ratio = (hi >> 8) & 0x1f;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 7eb4ebd3ebea..605cea75eb0d 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -697,7 +697,6 @@ static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
if ((xcr0 & XFEATURE_MASK_AVX512) != XFEATURE_MASK_AVX512)
return 1;
}
- kvm_put_guest_xcr0(vcpu);
vcpu->arch.xcr0 = xcr0;
if ((xcr0 ^ old_xcr0) & XFEATURE_MASK_EXTEND)
@@ -6495,8 +6494,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
kvm_x86_ops->prepare_guest_switch(vcpu);
if (vcpu->fpu_active)
kvm_load_guest_fpu(vcpu);
- kvm_load_guest_xcr0(vcpu);
-
vcpu->mode = IN_GUEST_MODE;
srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
@@ -6519,6 +6516,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
goto cancel_injection;
}
+ kvm_load_guest_xcr0(vcpu);
+
if (req_immediate_exit)
smp_send_reschedule(vcpu->cpu);
@@ -6568,6 +6567,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
vcpu->mode = OUTSIDE_GUEST_MODE;
smp_wmb();
+ kvm_put_guest_xcr0(vcpu);
+
/* Interrupt is enabled by handle_external_intr() */
kvm_x86_ops->handle_external_intr(vcpu);
@@ -7215,7 +7216,6 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
* and assume host would use all available bits.
* Guest xcr0 would be loaded later.
*/
- kvm_put_guest_xcr0(vcpu);
vcpu->guest_fpu_loaded = 1;
__kernel_fpu_begin();
__copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state);
@@ -7224,8 +7224,6 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
{
- kvm_put_guest_xcr0(vcpu);
-
if (!vcpu->guest_fpu_loaded) {
vcpu->fpu_counter = 0;
return;
diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio.c
index 637ab34ed632..ddb2244b06a1 100644
--- a/arch/x86/mm/kmmio.c
+++ b/arch/x86/mm/kmmio.c
@@ -33,7 +33,7 @@
struct kmmio_fault_page {
struct list_head list;
struct kmmio_fault_page *release_next;
- unsigned long page; /* location of the fault page */
+ unsigned long addr; /* the requested address */
pteval_t old_presence; /* page presence prior to arming */
bool armed;
@@ -70,9 +70,16 @@ unsigned int kmmio_count;
static struct list_head kmmio_page_table[KMMIO_PAGE_TABLE_SIZE];
static LIST_HEAD(kmmio_probes);
-static struct list_head *kmmio_page_list(unsigned long page)
+static struct list_head *kmmio_page_list(unsigned long addr)
{
- return &kmmio_page_table[hash_long(page, KMMIO_PAGE_HASH_BITS)];
+ unsigned int l;
+ pte_t *pte = lookup_address(addr, &l);
+
+ if (!pte)
+ return NULL;
+ addr &= page_level_mask(l);
+
+ return &kmmio_page_table[hash_long(addr, KMMIO_PAGE_HASH_BITS)];
}
/* Accessed per-cpu */
@@ -98,15 +105,19 @@ static struct kmmio_probe *get_kmmio_probe(unsigned long addr)
}
/* You must be holding RCU read lock. */
-static struct kmmio_fault_page *get_kmmio_fault_page(unsigned long page)
+static struct kmmio_fault_page *get_kmmio_fault_page(unsigned long addr)
{
struct list_head *head;
struct kmmio_fault_page *f;
+ unsigned int l;
+ pte_t *pte = lookup_address(addr, &l);
- page &= PAGE_MASK;
- head = kmmio_page_list(page);
+ if (!pte)
+ return NULL;
+ addr &= page_level_mask(l);
+ head = kmmio_page_list(addr);
list_for_each_entry_rcu(f, head, list) {
- if (f->page == page)
+ if (f->addr == addr)
return f;
}
return NULL;
@@ -137,10 +148,10 @@ static void clear_pte_presence(pte_t *pte, bool clear, pteval_t *old)
static int clear_page_presence(struct kmmio_fault_page *f, bool clear)
{
unsigned int level;
- pte_t *pte = lookup_address(f->page, &level);
+ pte_t *pte = lookup_address(f->addr, &level);
if (!pte) {
- pr_err("no pte for page 0x%08lx\n", f->page);
+ pr_err("no pte for addr 0x%08lx\n", f->addr);
return -1;
}
@@ -156,7 +167,7 @@ static int clear_page_presence(struct kmmio_fault_page *f, bool clear)
return -1;
}
- __flush_tlb_one(f->page);
+ __flush_tlb_one(f->addr);
return 0;
}
@@ -176,12 +187,12 @@ static int arm_kmmio_fault_page(struct kmmio_fault_page *f)
int ret;
WARN_ONCE(f->armed, KERN_ERR pr_fmt("kmmio page already armed.\n"));
if (f->armed) {
- pr_warning("double-arm: page 0x%08lx, ref %d, old %d\n",
- f->page, f->count, !!f->old_presence);
+ pr_warning("double-arm: addr 0x%08lx, ref %d, old %d\n",
+ f->addr, f->count, !!f->old_presence);
}
ret = clear_page_presence(f, true);
- WARN_ONCE(ret < 0, KERN_ERR pr_fmt("arming 0x%08lx failed.\n"),
- f->page);
+ WARN_ONCE(ret < 0, KERN_ERR pr_fmt("arming at 0x%08lx failed.\n"),
+ f->addr);
f->armed = true;
return ret;
}
@@ -191,7 +202,7 @@ static void disarm_kmmio_fault_page(struct kmmio_fault_page *f)
{
int ret = clear_page_presence(f, false);
WARN_ONCE(ret < 0,
- KERN_ERR "kmmio disarming 0x%08lx failed.\n", f->page);
+ KERN_ERR "kmmio disarming at 0x%08lx failed.\n", f->addr);
f->armed = false;
}
@@ -215,6 +226,12 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr)
struct kmmio_context *ctx;
struct kmmio_fault_page *faultpage;
int ret = 0; /* default to fault not handled */
+ unsigned long page_base = addr;
+ unsigned int l;
+ pte_t *pte = lookup_address(addr, &l);
+ if (!pte)
+ return -EINVAL;
+ page_base &= page_level_mask(l);
/*
* Preemption is now disabled to prevent process switch during
@@ -227,7 +244,7 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr)
preempt_disable();
rcu_read_lock();
- faultpage = get_kmmio_fault_page(addr);
+ faultpage = get_kmmio_fault_page(page_base);
if (!faultpage) {
/*
* Either this page fault is not caused by kmmio, or
@@ -239,7 +256,7 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr)
ctx = &get_cpu_var(kmmio_ctx);
if (ctx->active) {
- if (addr == ctx->addr) {
+ if (page_base == ctx->addr) {
/*
* A second fault on the same page means some other
* condition needs handling by do_page_fault(), the
@@ -267,9 +284,9 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr)
ctx->active++;
ctx->fpage = faultpage;
- ctx->probe = get_kmmio_probe(addr);
+ ctx->probe = get_kmmio_probe(page_base);
ctx->saved_flags = (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF));
- ctx->addr = addr;
+ ctx->addr = page_base;
if (ctx->probe && ctx->probe->pre_handler)
ctx->probe->pre_handler(ctx->probe, regs, addr);
@@ -354,12 +371,11 @@ out:
}
/* You must be holding kmmio_lock. */
-static int add_kmmio_fault_page(unsigned long page)
+static int add_kmmio_fault_page(unsigned long addr)
{
struct kmmio_fault_page *f;
- page &= PAGE_MASK;
- f = get_kmmio_fault_page(page);
+ f = get_kmmio_fault_page(addr);
if (f) {
if (!f->count)
arm_kmmio_fault_page(f);
@@ -372,26 +388,25 @@ static int add_kmmio_fault_page(unsigned long page)
return -1;
f->count = 1;
- f->page = page;
+ f->addr = addr;
if (arm_kmmio_fault_page(f)) {
kfree(f);
return -1;
}
- list_add_rcu(&f->list, kmmio_page_list(f->page));
+ list_add_rcu(&f->list, kmmio_page_list(f->addr));
return 0;
}
/* You must be holding kmmio_lock. */
-static void release_kmmio_fault_page(unsigned long page,
+static void release_kmmio_fault_page(unsigned long addr,
struct kmmio_fault_page **release_list)
{
struct kmmio_fault_page *f;
- page &= PAGE_MASK;
- f = get_kmmio_fault_page(page);
+ f = get_kmmio_fault_page(addr);
if (!f)
return;
@@ -420,18 +435,27 @@ int register_kmmio_probe(struct kmmio_probe *p)
int ret = 0;
unsigned long size = 0;
const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
+ unsigned int l;
+ pte_t *pte;
spin_lock_irqsave(&kmmio_lock, flags);
if (get_kmmio_probe(p->addr)) {
ret = -EEXIST;
goto out;
}
+
+ pte = lookup_address(p->addr, &l);
+ if (!pte) {
+ ret = -EINVAL;
+ goto out;
+ }
+
kmmio_count++;
list_add_rcu(&p->list, &kmmio_probes);
while (size < size_lim) {
if (add_kmmio_fault_page(p->addr + size))
pr_err("Unable to set page fault.\n");
- size += PAGE_SIZE;
+ size += page_level_size(l);
}
out:
spin_unlock_irqrestore(&kmmio_lock, flags);
@@ -506,11 +530,17 @@ void unregister_kmmio_probe(struct kmmio_probe *p)
const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
struct kmmio_fault_page *release_list = NULL;
struct kmmio_delayed_release *drelease;
+ unsigned int l;
+ pte_t *pte;
+
+ pte = lookup_address(p->addr, &l);
+ if (!pte)
+ return;
spin_lock_irqsave(&kmmio_lock, flags);
while (size < size_lim) {
release_kmmio_fault_page(p->addr + size, &release_list);
- size += PAGE_SIZE;
+ size += page_level_size(l);
}
list_del_rcu(&p->list);
kmmio_count--;
diff --git a/block/partition-generic.c b/block/partition-generic.c
index ae95e963c5bb..91327dbfbb1d 100644
--- a/block/partition-generic.c
+++ b/block/partition-generic.c
@@ -360,15 +360,20 @@ struct hd_struct *add_partition(struct gendisk *disk, int partno,
goto out_del;
}
+ err = hd_ref_init(p);
+ if (err) {
+ if (flags & ADDPART_FLAG_WHOLEDISK)
+ goto out_remove_file;
+ goto out_del;
+ }
+
/* everything is up and running, commence */
rcu_assign_pointer(ptbl->part[partno], p);
/* suppress uevent if the disk suppresses it */
if (!dev_get_uevent_suppress(ddev))
kobject_uevent(&pdev->kobj, KOBJ_ADD);
-
- if (!hd_ref_init(p))
- return p;
+ return p;
out_free_info:
free_part_info(p);
@@ -377,6 +382,8 @@ out_free_stats:
out_free:
kfree(p);
return ERR_PTR(err);
+out_remove_file:
+ device_remove_file(pdev, &dev_attr_whole_disk);
out_del:
kobject_put(p->holder_dir);
device_del(pdev);
diff --git a/crypto/ahash.c b/crypto/ahash.c
index d19b52324cf5..dac1c24e9c3e 100644
--- a/crypto/ahash.c
+++ b/crypto/ahash.c
@@ -69,8 +69,9 @@ static int hash_walk_new_entry(struct crypto_hash_walk *walk)
struct scatterlist *sg;
sg = walk->sg;
- walk->pg = sg_page(sg);
walk->offset = sg->offset;
+ walk->pg = sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT);
+ walk->offset = offset_in_page(walk->offset);
walk->entrylen = sg->length;
if (walk->entrylen > walk->total)
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index ae8c57fd8bc7..d4944318ca1f 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -1849,6 +1849,7 @@ static int alg_test_drbg(const struct alg_test_desc *desc, const char *driver,
static int do_test_rsa(struct crypto_akcipher *tfm,
struct akcipher_testvec *vecs)
{
+ char *xbuf[XBUFSIZE];
struct akcipher_request *req;
void *outbuf_enc = NULL;
void *outbuf_dec = NULL;
@@ -1857,9 +1858,12 @@ static int do_test_rsa(struct crypto_akcipher *tfm,
int err = -ENOMEM;
struct scatterlist src, dst, src_tab[2];
+ if (testmgr_alloc_buf(xbuf))
+ return err;
+
req = akcipher_request_alloc(tfm, GFP_KERNEL);
if (!req)
- return err;
+ goto free_xbuf;
init_completion(&result.completion);
@@ -1877,9 +1881,14 @@ static int do_test_rsa(struct crypto_akcipher *tfm,
if (!outbuf_enc)
goto free_req;
+ if (WARN_ON(vecs->m_size > PAGE_SIZE))
+ goto free_all;
+
+ memcpy(xbuf[0], vecs->m, vecs->m_size);
+
sg_init_table(src_tab, 2);
- sg_set_buf(&src_tab[0], vecs->m, 8);
- sg_set_buf(&src_tab[1], vecs->m + 8, vecs->m_size - 8);
+ sg_set_buf(&src_tab[0], xbuf[0], 8);
+ sg_set_buf(&src_tab[1], xbuf[0] + 8, vecs->m_size - 8);
sg_init_one(&dst, outbuf_enc, out_len_max);
akcipher_request_set_crypt(req, src_tab, &dst, vecs->m_size,
out_len_max);
@@ -1898,7 +1907,7 @@ static int do_test_rsa(struct crypto_akcipher *tfm,
goto free_all;
}
/* verify that encrypted message is equal to expected */
- if (memcmp(vecs->c, sg_virt(req->dst), vecs->c_size)) {
+ if (memcmp(vecs->c, outbuf_enc, vecs->c_size)) {
pr_err("alg: rsa: encrypt test failed. Invalid output\n");
err = -EINVAL;
goto free_all;
@@ -1913,7 +1922,13 @@ static int do_test_rsa(struct crypto_akcipher *tfm,
err = -ENOMEM;
goto free_all;
}
- sg_init_one(&src, vecs->c, vecs->c_size);
+
+ if (WARN_ON(vecs->c_size > PAGE_SIZE))
+ goto free_all;
+
+ memcpy(xbuf[0], vecs->c, vecs->c_size);
+
+ sg_init_one(&src, xbuf[0], vecs->c_size);
sg_init_one(&dst, outbuf_dec, out_len_max);
init_completion(&result.completion);
akcipher_request_set_crypt(req, &src, &dst, vecs->c_size, out_len_max);
@@ -1940,6 +1955,8 @@ free_all:
kfree(outbuf_enc);
free_req:
akcipher_request_free(req);
+free_xbuf:
+ testmgr_free_buf(xbuf);
return err;
}
diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
index 6979186dbd4b..9f77943653fb 100644
--- a/drivers/acpi/acpi_processor.c
+++ b/drivers/acpi/acpi_processor.c
@@ -491,6 +491,58 @@ static void acpi_processor_remove(struct acpi_device *device)
}
#endif /* CONFIG_ACPI_HOTPLUG_CPU */
+#ifdef CONFIG_X86
+static bool acpi_hwp_native_thermal_lvt_set;
+static acpi_status __init acpi_hwp_native_thermal_lvt_osc(acpi_handle handle,
+ u32 lvl,
+ void *context,
+ void **rv)
+{
+ u8 sb_uuid_str[] = "4077A616-290C-47BE-9EBD-D87058713953";
+ u32 capbuf[2];
+ struct acpi_osc_context osc_context = {
+ .uuid_str = sb_uuid_str,
+ .rev = 1,
+ .cap.length = 8,
+ .cap.pointer = capbuf,
+ };
+
+ if (acpi_hwp_native_thermal_lvt_set)
+ return AE_CTRL_TERMINATE;
+
+ capbuf[0] = 0x0000;
+ capbuf[1] = 0x1000; /* set bit 12 */
+
+ if (ACPI_SUCCESS(acpi_run_osc(handle, &osc_context))) {
+ if (osc_context.ret.pointer && osc_context.ret.length > 1) {
+ u32 *capbuf_ret = osc_context.ret.pointer;
+
+ if (capbuf_ret[1] & 0x1000) {
+ acpi_handle_info(handle,
+ "_OSC native thermal LVT Acked\n");
+ acpi_hwp_native_thermal_lvt_set = true;
+ }
+ }
+ kfree(osc_context.ret.pointer);
+ }
+
+ return AE_OK;
+}
+
+void __init acpi_early_processor_osc(void)
+{
+ if (boot_cpu_has(X86_FEATURE_HWP)) {
+ acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
+ ACPI_UINT32_MAX,
+ acpi_hwp_native_thermal_lvt_osc,
+ NULL, NULL, NULL);
+ acpi_get_devices(ACPI_PROCESSOR_DEVICE_HID,
+ acpi_hwp_native_thermal_lvt_osc,
+ NULL, NULL);
+ }
+}
+#endif
+
/*
* The following ACPI IDs are known to be suitable for representing as
* processor devices.
diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c
index bc32f3194afe..28c50c6b5f45 100644
--- a/drivers/acpi/acpica/dsmethod.c
+++ b/drivers/acpi/acpica/dsmethod.c
@@ -417,6 +417,9 @@ acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node,
obj_desc->method.mutex->mutex.
original_sync_level =
obj_desc->method.mutex->mutex.sync_level;
+
+ obj_desc->method.mutex->mutex.thread_id =
+ acpi_os_get_thread_id();
}
}
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index a212cefae524..ca4f28432d87 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -1004,6 +1004,9 @@ static int __init acpi_bus_init(void)
goto error1;
}
+ /* Set capability bits for _OSC under processor scope */
+ acpi_early_processor_osc();
+
/*
* _OSC method may exist in module level code,
* so it must be run after ACPI_FULL_INITIALIZATION
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 11d87bf67e73..0f3f41c13b38 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -130,6 +130,12 @@ void acpi_early_processor_set_pdc(void);
static inline void acpi_early_processor_set_pdc(void) {}
#endif
+#ifdef CONFIG_X86
+void acpi_early_processor_osc(void);
+#else
+static inline void acpi_early_processor_osc(void) {}
+#endif
+
/* --------------------------------------------------------------------------
Embedded Controller
-------------------------------------------------------------------------- */
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 20d17906fc9b..f80cfc36a354 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -3444,7 +3444,7 @@ static void print_binder_node(struct seq_file *m, struct binder_node *node)
static void print_binder_ref(struct seq_file *m, struct binder_ref *ref)
{
- seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %p\n",
+ seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
ref->debug_id, ref->desc, ref->node->proc ? "" : "dead ",
ref->node->debug_id, ref->strong, ref->weak, ref->death);
}
diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c
index 04975b851c23..639adb1f8abd 100644
--- a/drivers/ata/ahci_platform.c
+++ b/drivers/ata/ahci_platform.c
@@ -51,6 +51,9 @@ static int ahci_probe(struct platform_device *pdev)
if (rc)
return rc;
+ of_property_read_u32(dev->of_node,
+ "ports-implemented", &hpriv->force_port_map);
+
if (of_device_is_compatible(dev->of_node, "hisilicon,hisi-ahci"))
hpriv->flags |= AHCI_HFLAG_NO_FBS | AHCI_HFLAG_NO_NCQ;
diff --git a/drivers/ata/ahci_xgene.c b/drivers/ata/ahci_xgene.c
index e2c6d9e0c5ac..e916bff6cee8 100644
--- a/drivers/ata/ahci_xgene.c
+++ b/drivers/ata/ahci_xgene.c
@@ -739,9 +739,9 @@ static int xgene_ahci_probe(struct platform_device *pdev)
dev_warn(&pdev->dev, "%s: Error reading device info. Assume version1\n",
__func__);
version = XGENE_AHCI_V1;
- }
- if (info->valid & ACPI_VALID_CID)
+ } else if (info->valid & ACPI_VALID_CID) {
version = XGENE_AHCI_V2;
+ }
}
}
#endif
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index 998c6a85ad89..9628fa131757 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -467,6 +467,7 @@ void ahci_save_initial_config(struct device *dev, struct ahci_host_priv *hpriv)
dev_info(dev, "forcing port_map 0x%x -> 0x%x\n",
port_map, hpriv->force_port_map);
port_map = hpriv->force_port_map;
+ hpriv->saved_port_map = port_map;
}
if (hpriv->mask_port_map) {
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 65f50eccd49b..a48824deabc5 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -1381,7 +1381,7 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
mutex_lock(&genpd->lock);
- if (!list_empty(&subdomain->slave_links) || subdomain->device_count) {
+ if (!list_empty(&subdomain->master_links) || subdomain->device_count) {
pr_warn("%s: unable to remove subdomain %s\n", genpd->name,
subdomain->name);
ret = -EBUSY;
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 423f4ca7d712..80cf8add46ff 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -488,6 +488,12 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
bvec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
iov_iter_bvec(&iter, ITER_BVEC | rw, bvec,
bio_segments(bio), blk_rq_bytes(cmd->rq));
+ /*
+ * This bio may be started from the middle of the 'bvec'
+ * because of bio splitting, so offset from the bvec must
+ * be passed to iov iterator
+ */
+ iter.iov_offset = bio->bi_iter.bi_bvec_done;
cmd->iocb.ki_pos = pos;
cmd->iocb.ki_filp = file;
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 93b3f99b6865..8f1ce6d57a08 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -618,8 +618,8 @@ static void nbd_request_handler(struct request_queue *q)
req, req->cmd_type);
if (unlikely(!nbd->sock)) {
- dev_err(disk_to_dev(nbd->disk),
- "Attempted send on closed socket\n");
+ dev_err_ratelimited(disk_to_dev(nbd->disk),
+ "Attempted send on closed socket\n");
req->errors++;
nbd_end_request(nbd, req);
spin_lock_irq(q->queue_lock);
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
index 562b5a4ca7b7..78a39f736c64 100644
--- a/drivers/block/paride/pd.c
+++ b/drivers/block/paride/pd.c
@@ -126,7 +126,7 @@
*/
#include <linux/types.h>
-static bool verbose = 0;
+static int verbose = 0;
static int major = PD_MAJOR;
static char *name = PD_NAME;
static int cluster = 64;
@@ -161,7 +161,7 @@ enum {D_PRT, D_PRO, D_UNI, D_MOD, D_GEO, D_SBY, D_DLY, D_SLV};
static DEFINE_MUTEX(pd_mutex);
static DEFINE_SPINLOCK(pd_lock);
-module_param(verbose, bool, 0);
+module_param(verbose, int, 0);
module_param(major, int, 0);
module_param(name, charp, 0);
module_param(cluster, int, 0);
diff --git a/drivers/block/paride/pt.c b/drivers/block/paride/pt.c
index 1740d75e8a32..216a94fed5b4 100644
--- a/drivers/block/paride/pt.c
+++ b/drivers/block/paride/pt.c
@@ -117,7 +117,7 @@
*/
-static bool verbose = 0;
+static int verbose = 0;
static int major = PT_MAJOR;
static char *name = PT_NAME;
static int disable = 0;
@@ -152,7 +152,7 @@ static int (*drives[4])[6] = {&drive0, &drive1, &drive2, &drive3};
#include <asm/uaccess.h>
-module_param(verbose, bool, 0);
+module_param(verbose, int, 0);
module_param(major, int, 0);
module_param(name, charp, 0);
module_param_array(drive0, int, NULL, 0);
diff --git a/drivers/bluetooth/bluetooth-power.c b/drivers/bluetooth/bluetooth-power.c
index 1317ddaa3c23..b05b999fbbdc 100644
--- a/drivers/bluetooth/bluetooth-power.c
+++ b/drivers/bluetooth/bluetooth-power.c
@@ -46,6 +46,7 @@ static const struct of_device_id bt_power_match_table[] = {
static struct bluetooth_power_platform_data *bt_power_pdata;
static struct platform_device *btpdev;
static bool previous;
+static int pwr_state;
struct class *bt_class;
static int bt_major;
@@ -636,6 +637,7 @@ static int bt_power_probe(struct platform_device *pdev)
memcpy(bt_power_pdata, pdev->dev.platform_data,
sizeof(struct bluetooth_power_platform_data));
+ pwr_state = 0;
} else {
BT_PWR_ERR("Failed to get platform data");
goto free_pdata;
@@ -680,7 +682,7 @@ int bt_register_slimdev(struct device *dev)
static long bt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
- int ret;
+ int ret, pwr_cntrl = 0;
switch (cmd) {
case BT_CMD_SLIM_TEST:
@@ -692,6 +694,18 @@ static long bt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
bt_power_pdata->slim_dev->platform_data
);
break;
+ case BT_CMD_PWR_CTRL:
+ pwr_cntrl = (int)arg;
+ BT_PWR_ERR("BT_CMD_PWR_CTRL pwr_cntrl:%d", pwr_cntrl);
+ if (pwr_state != pwr_cntrl) {
+ ret = bluetooth_power(pwr_cntrl);
+ if (!ret)
+ pwr_state = pwr_cntrl;
+ } else {
+ BT_PWR_ERR("BT chip state is already :%d no change d\n"
+ , pwr_state);
+ }
+ break;
default:
return -EINVAL;
}
@@ -711,6 +725,7 @@ static struct platform_driver bt_power_driver = {
static const struct file_operations bt_dev_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = bt_ioctl,
+ .compat_ioctl = bt_ioctl,
};
static int __init bluetooth_power_init(void)
@@ -733,7 +748,7 @@ static int __init bluetooth_power_init(void)
if (device_create(bt_class, NULL, MKDEV(bt_major, 0),
- NULL, "pintest") == NULL) {
+ NULL, "btpower") == NULL) {
BTFMSLIM_ERR("failed to allocate char dev\n");
goto chrdev_unreg;
}
diff --git a/drivers/bus/imx-weim.c b/drivers/bus/imx-weim.c
index e98d15eaa799..1827fc4d15c1 100644
--- a/drivers/bus/imx-weim.c
+++ b/drivers/bus/imx-weim.c
@@ -150,7 +150,7 @@ static int __init weim_parse_dt(struct platform_device *pdev,
return ret;
}
- for_each_child_of_node(pdev->dev.of_node, child) {
+ for_each_available_child_of_node(pdev->dev.of_node, child) {
if (!child->name)
continue;
diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c
index 67c1207d35be..ef8aaac6e0a2 100644
--- a/drivers/char/adsprpc.c
+++ b/drivers/char/adsprpc.c
@@ -1070,7 +1070,7 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
int idx = list[i].pgidx;
if (map->attr & FASTRPC_ATTR_NOVA) {
- offset = (uintptr_t)lpra[i].buf.pv;
+ offset = 0;
} else {
down_read(&current->mm->mmap_sem);
VERIFY(err, NULL != (vma = find_vma(current->mm,
diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c
index 3ace102a2a0a..bbf206e3da0d 100644
--- a/drivers/clk/clk-divider.c
+++ b/drivers/clk/clk-divider.c
@@ -422,6 +422,12 @@ const struct clk_ops clk_divider_ops = {
};
EXPORT_SYMBOL_GPL(clk_divider_ops);
+const struct clk_ops clk_divider_ro_ops = {
+ .recalc_rate = clk_divider_recalc_rate,
+ .round_rate = clk_divider_round_rate,
+};
+EXPORT_SYMBOL_GPL(clk_divider_ro_ops);
+
static struct clk *_register_divider(struct device *dev, const char *name,
const char *parent_name, unsigned long flags,
void __iomem *reg, u8 shift, u8 width,
@@ -445,7 +451,10 @@ static struct clk *_register_divider(struct device *dev, const char *name,
return ERR_PTR(-ENOMEM);
init.name = name;
- init.ops = &clk_divider_ops;
+ if (clk_divider_flags & CLK_DIVIDER_READ_ONLY)
+ init.ops = &clk_divider_ro_ops;
+ else
+ init.ops = &clk_divider_ops;
init.flags = flags | CLK_IS_BASIC;
init.parent_names = (parent_name ? &parent_name: NULL);
init.num_parents = (parent_name ? 1 : 0);
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 1c625764133d..97a604755053 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -1449,7 +1449,7 @@ static struct clk_core *clk_propagate_rate_change(struct clk_core *core,
* walk down a subtree and set the new rates notifying the rate
* change on the way
*/
-static void clk_change_rate(struct clk_core *core)
+static int clk_change_rate(struct clk_core *core)
{
struct clk_core *child;
struct hlist_node *tmp;
@@ -1457,6 +1457,7 @@ static void clk_change_rate(struct clk_core *core)
unsigned long best_parent_rate = 0;
bool skip_set_rate = false;
struct clk_core *old_parent;
+ int rc = 0;
old_rate = core->rate;
@@ -1484,8 +1485,12 @@ static void clk_change_rate(struct clk_core *core)
trace_clk_set_rate(core, core->new_rate);
- if (!skip_set_rate && core->ops->set_rate)
- core->ops->set_rate(core->hw, core->new_rate, best_parent_rate);
+ if (!skip_set_rate && core->ops->set_rate) {
+ rc = core->ops->set_rate(core->hw, core->new_rate,
+ best_parent_rate);
+ if (rc)
+ goto out;
+ }
trace_clk_set_rate_complete(core, core->new_rate);
@@ -1511,6 +1516,13 @@ static void clk_change_rate(struct clk_core *core)
/* handle the new child who might not be in core->children yet */
if (core->new_child)
clk_change_rate(core->new_child);
+
+ return rc;
+
+out:
+ trace_clk_set_rate_complete(core, core->new_rate);
+
+ return rc;
}
static int clk_core_set_rate_nolock(struct clk_core *core,
@@ -1545,7 +1557,13 @@ static int clk_core_set_rate_nolock(struct clk_core *core,
}
/* change the rates */
- clk_change_rate(top);
+ ret = clk_change_rate(top);
+ if (ret) {
+ pr_err("%s: failed to set %s rate\n", __func__,
+ top->name);
+ clk_propagate_rate_change(top, ABORT_RATE_CHANGE);
+ return ret;
+ }
core->req_rate = req_rate;
diff --git a/drivers/clk/meson/clkc.c b/drivers/clk/meson/clkc.c
index c83ae1367abc..d920d410b51d 100644
--- a/drivers/clk/meson/clkc.c
+++ b/drivers/clk/meson/clkc.c
@@ -198,7 +198,7 @@ meson_clk_register_fixed_rate(const struct clk_conf *clk_conf,
}
void __init meson_clk_register_clks(const struct clk_conf *clk_confs,
- size_t nr_confs,
+ unsigned int nr_confs,
void __iomem *clk_base)
{
unsigned int i;
diff --git a/drivers/clk/msm/clock-gcc-cobalt.c b/drivers/clk/msm/clock-gcc-cobalt.c
index 46e791b3cb99..f2a3f7402f67 100644
--- a/drivers/clk/msm/clock-gcc-cobalt.c
+++ b/drivers/clk/msm/clock-gcc-cobalt.c
@@ -1707,17 +1707,6 @@ static struct branch_clk gcc_gpu_bimc_gfx_clk = {
},
};
-static struct branch_clk gcc_gpu_bimc_gfx_src_clk = {
- .cbcr_reg = GCC_GPU_BIMC_GFX_SRC_CBCR,
- .has_sibling = 1,
- .base = &virt_base,
- .c = {
- .dbg_name = "gcc_gpu_bimc_gfx_src_clk",
- .ops = &clk_ops_branch,
- CLK_INIT(gcc_gpu_bimc_gfx_src_clk.c),
- },
-};
-
static struct branch_clk gcc_gpu_cfg_ahb_clk = {
.cbcr_reg = GCC_GPU_CFG_AHB_CBCR,
.has_sibling = 1,
@@ -1731,17 +1720,6 @@ static struct branch_clk gcc_gpu_cfg_ahb_clk = {
},
};
-static struct branch_clk gcc_gpu_snoc_dvm_gfx_clk = {
- .cbcr_reg = GCC_GPU_SNOC_DVM_GFX_CBCR,
- .has_sibling = 1,
- .base = &virt_base,
- .c = {
- .dbg_name = "gcc_gpu_snoc_dvm_gfx_clk",
- .ops = &clk_ops_branch,
- CLK_INIT(gcc_gpu_snoc_dvm_gfx_clk.c),
- },
-};
-
static struct branch_clk gcc_gpu_iref_clk = {
.cbcr_reg = GCC_GPU_IREF_EN,
.has_sibling = 1,
@@ -2454,7 +2432,6 @@ static struct mux_clk gcc_debug_mux = {
{ &gcc_mss_mnoc_bimc_axi_clk.c, 0x0120 },
{ &gcc_mss_snoc_axi_clk.c, 0x0123 },
{ &gcc_gpu_cfg_ahb_clk.c, 0x013b },
- { &gcc_gpu_bimc_gfx_src_clk.c, 0x013e },
{ &gcc_gpu_bimc_gfx_clk.c, 0x013f },
{ &gcc_qspi_ahb_clk.c, 0x0156 },
{ &gcc_qspi_ref_clk.c, 0x0157 },
@@ -2649,9 +2626,7 @@ static struct clk_lookup msm_clocks_gcc_cobalt[] = {
CLK_LIST(gcc_gp2_clk),
CLK_LIST(gcc_gp3_clk),
CLK_LIST(gcc_gpu_bimc_gfx_clk),
- CLK_LIST(gcc_gpu_bimc_gfx_src_clk),
CLK_LIST(gcc_gpu_cfg_ahb_clk),
- CLK_LIST(gcc_gpu_snoc_dvm_gfx_clk),
CLK_LIST(gcc_gpu_iref_clk),
CLK_LIST(gcc_hmss_ahb_clk),
CLK_LIST(gcc_hmss_dvm_bus_clk),
diff --git a/drivers/clk/msm/clock-osm.c b/drivers/clk/msm/clock-osm.c
index a119c0b27321..5391ef456aae 100644
--- a/drivers/clk/msm/clock-osm.c
+++ b/drivers/clk/msm/clock-osm.c
@@ -80,6 +80,7 @@ enum clk_osm_trace_packet_id {
#define MEM_ACC_SEQ_REG_VAL_START(n) (SEQ_REG(60 + (n)))
#define SEQ_REG1_MSMCOBALT_V2 0x1048
#define VERSION_REG 0x0
+#define VERSION_1P1 0x00010100
#define OSM_TABLE_SIZE 40
#define MAX_CLUSTER_CNT 2
@@ -203,7 +204,6 @@ enum clk_osm_trace_packet_id {
#define TRACE_CTRL_ENABLE 1
#define TRACE_CTRL_DISABLE 0
#define TRACE_CTRL_ENABLE_WDOG_STATUS BIT(30)
-#define TRACE_CTRL_ENABLE_WDOG_STATUS_MASK BIT(30)
#define TRACE_CTRL_PACKET_TYPE_MASK BVAL(2, 1, 3)
#define TRACE_CTRL_PACKET_TYPE_SHIFT 1
#define TRACE_CTRL_PERIODIC_TRACE_EN_MASK BIT(3)
@@ -317,6 +317,7 @@ struct clk_osm {
unsigned long pbases[NUM_BASES];
spinlock_t lock;
+ u32 version;
u32 cpu_reg_mask;
u32 num_entries;
u32 cluster_num;
@@ -354,6 +355,7 @@ struct clk_osm {
struct notifier_block panic_notifier;
u32 trace_periodic_timer;
bool trace_en;
+ bool wdog_trace_en;
};
static bool msmcobalt_v1;
@@ -2145,6 +2147,36 @@ DEFINE_SIMPLE_ATTRIBUTE(debugfs_trace_enable_fops,
debugfs_set_trace_enable,
"%llu\n");
+static int debugfs_get_wdog_trace(void *data, u64 *val)
+{
+ struct clk_osm *c = data;
+
+ *val = c->wdog_trace_en;
+ return 0;
+}
+
+static int debugfs_set_wdog_trace(void *data, u64 val)
+{
+ struct clk_osm *c = data;
+ int regval;
+
+ if (c->version >= VERSION_1P1) {
+ regval = clk_osm_read_reg(c, TRACE_CTRL);
+ regval = val ? regval | TRACE_CTRL_ENABLE_WDOG_STATUS :
+ regval & ~TRACE_CTRL_ENABLE_WDOG_STATUS;
+ clk_osm_write_reg(c, regval, TRACE_CTRL);
+ c->wdog_trace_en = val ? true : false;
+ } else {
+ pr_info("wdog status registers enabled by default\n");
+ }
+
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(debugfs_trace_wdog_enable_fops,
+ debugfs_get_wdog_trace,
+ debugfs_set_wdog_trace,
+ "%llu\n");
+
#define MAX_DEBUG_BUF_LEN 15
static DEFINE_MUTEX(debug_buf_mutex);
@@ -2416,6 +2448,15 @@ static void populate_debugfs_dir(struct clk_osm *c)
goto exit;
}
+ temp = debugfs_create_file("wdog_trace_enable",
+ S_IRUGO | S_IWUSR,
+ c->debugfs, c,
+ &debugfs_trace_wdog_enable_fops);
+ if (IS_ERR_OR_NULL(temp)) {
+ pr_err("debugfs_trace_wdog_enable_fops debugfs file creation failed\n");
+ goto exit;
+ }
+
temp = debugfs_create_file("trace_enable",
S_IRUGO | S_IWUSR,
c->debugfs, c,
@@ -2694,18 +2735,6 @@ static int cpu_clock_osm_driver_probe(struct platform_device *pdev)
return rc;
}
- if (msmcobalt_v2) {
- /* Enable OSM WDOG registers */
- clk_osm_masked_write_reg(&pwrcl_clk,
- TRACE_CTRL_ENABLE_WDOG_STATUS,
- TRACE_CTRL,
- TRACE_CTRL_ENABLE_WDOG_STATUS_MASK);
- clk_osm_masked_write_reg(&perfcl_clk,
- TRACE_CTRL_ENABLE_WDOG_STATUS,
- TRACE_CTRL,
- TRACE_CTRL_ENABLE_WDOG_STATUS_MASK);
- }
-
/*
* The hmss_gpll0 clock runs at 300 MHz. Ensure it is at the correct
* frequency before enabling OSM. LUT index 0 is always sourced from
@@ -2719,33 +2748,26 @@ static int cpu_clock_osm_driver_probe(struct platform_device *pdev)
}
clk_prepare_enable(&sys_apcsaux_clk_gcc.c);
- /* Set boot rate */
- rc = clk_set_rate(&pwrcl_clk.c, msmcobalt_v1 ?
- MSMCOBALTV1_PWRCL_BOOT_RATE :
- MSMCOBALTV2_PWRCL_BOOT_RATE);
+ rc = clk_set_rate(&osm_clk_src.c, osm_clk_init_rate);
if (rc) {
- dev_err(&pdev->dev, "Unable to set boot rate on pwr cluster, rc=%d\n",
+ dev_err(&pdev->dev, "Unable to set init rate on osm_clk, rc=%d\n",
rc);
- clk_disable_unprepare(&sys_apcsaux_clk_gcc.c);
- return rc;
+ goto exit2;
}
- rc = clk_set_rate(&perfcl_clk.c, msmcobalt_v1 ?
- MSMCOBALTV1_PERFCL_BOOT_RATE :
- MSMCOBALTV2_PERFCL_BOOT_RATE);
+ /* Make sure index zero is selected */
+ rc = clk_set_rate(&pwrcl_clk.c, init_rate);
if (rc) {
- dev_err(&pdev->dev, "Unable to set boot rate on perf cluster, rc=%d\n",
+ dev_err(&pdev->dev, "Unable to set init rate on pwr cluster, rc=%d\n",
rc);
- clk_disable_unprepare(&sys_apcsaux_clk_gcc.c);
- return rc;
+ goto exit2;
}
- rc = clk_set_rate(&osm_clk_src.c, osm_clk_init_rate);
+ rc = clk_set_rate(&perfcl_clk.c, init_rate);
if (rc) {
- dev_err(&pdev->dev, "Unable to set init rate on osm_clk, rc=%d\n",
+ dev_err(&pdev->dev, "Unable to set init rate on perf cluster, rc=%d\n",
rc);
- clk_disable_unprepare(&sys_apcsaux_clk_gcc.c);
- return rc;
+ goto exit2;
}
get_online_cpus();
@@ -2756,6 +2778,28 @@ static int cpu_clock_osm_driver_probe(struct platform_device *pdev)
"Failed to enable clock for cpu %d\n", cpu);
}
+ /* Set final boot rate */
+ rc = clk_set_rate(&pwrcl_clk.c, msmcobalt_v1 ?
+ MSMCOBALTV1_PWRCL_BOOT_RATE :
+ MSMCOBALTV2_PWRCL_BOOT_RATE);
+ if (rc) {
+ dev_err(&pdev->dev, "Unable to set boot rate on pwr cluster, rc=%d\n",
+ rc);
+ goto exit2;
+ }
+
+ rc = clk_set_rate(&perfcl_clk.c, msmcobalt_v1 ?
+ MSMCOBALTV1_PERFCL_BOOT_RATE :
+ MSMCOBALTV2_PERFCL_BOOT_RATE);
+ if (rc) {
+ dev_err(&pdev->dev, "Unable to set boot rate on perf cluster, rc=%d\n",
+ rc);
+ goto exit2;
+ }
+
+ pwrcl_clk.version = clk_osm_read_reg(&pwrcl_clk, VERSION_REG);
+ perfcl_clk.version = clk_osm_read_reg(&perfcl_clk, VERSION_REG);
+
populate_opp_table(pdev);
populate_debugfs_dir(&pwrcl_clk);
populate_debugfs_dir(&perfcl_clk);
@@ -2769,6 +2813,8 @@ static int cpu_clock_osm_driver_probe(struct platform_device *pdev)
return 0;
+exit2:
+ clk_disable_unprepare(&sys_apcsaux_clk_gcc.c);
exit:
dev_err(&pdev->dev, "OSM driver failed to initialize, rc=%d\n",
rc);
diff --git a/drivers/clk/nxp/clk-lpc18xx-ccu.c b/drivers/clk/nxp/clk-lpc18xx-ccu.c
index 13aabbb3acbe..558da89555af 100644
--- a/drivers/clk/nxp/clk-lpc18xx-ccu.c
+++ b/drivers/clk/nxp/clk-lpc18xx-ccu.c
@@ -222,7 +222,7 @@ static void lpc18xx_ccu_register_branch_gate_div(struct lpc18xx_clk_branch *bran
div->width = 1;
div_hw = &div->hw;
- div_ops = &clk_divider_ops;
+ div_ops = &clk_divider_ro_ops;
}
branch->gate.reg = branch->offset + reg_base;
diff --git a/drivers/clk/qcom/clk-branch.c b/drivers/clk/qcom/clk-branch.c
index 0987c8e7f807..8aea1d519311 100644
--- a/drivers/clk/qcom/clk-branch.c
+++ b/drivers/clk/qcom/clk-branch.c
@@ -83,9 +83,6 @@ static int clk_branch_wait(const struct clk_branch *br, bool enabling,
if (br->halt_check == BRANCH_HALT_DELAY || (!enabling && voted)) {
udelay(10);
- } else if ((br->halt_check == BRANCH_HALT_NO_CHECK_ON_DISABLE) &&
- !enabling) {
- return 0;
} else if (br->halt_check == BRANCH_HALT_ENABLE ||
br->halt_check == BRANCH_HALT ||
(enabling && voted)) {
diff --git a/drivers/clk/qcom/clk-branch.h b/drivers/clk/qcom/clk-branch.h
index 331f58d651e5..8a934cf8bed1 100644
--- a/drivers/clk/qcom/clk-branch.h
+++ b/drivers/clk/qcom/clk-branch.h
@@ -42,10 +42,6 @@ struct clk_branch {
#define BRANCH_HALT_ENABLE 1 /* pol: 0 = halt */
#define BRANCH_HALT_ENABLE_VOTED (BRANCH_HALT_ENABLE | BRANCH_VOTED)
#define BRANCH_HALT_DELAY 2 /* No bit to check; just delay */
-/* No halt check during clk disable for the clocks controlled by other masters
- * via voting registers like SMMU clocks.
- */
-#define BRANCH_HALT_NO_CHECK_ON_DISABLE 4
struct clk_regmap clkr;
};
diff --git a/drivers/clk/qcom/clk-smd-rpm.c b/drivers/clk/qcom/clk-smd-rpm.c
index 612e7b37a8d0..bc982c9bfa71 100644
--- a/drivers/clk/qcom/clk-smd-rpm.c
+++ b/drivers/clk/qcom/clk-smd-rpm.c
@@ -662,8 +662,6 @@ static struct clk_hw *msmfalcon_clks[] = {
[RPM_AGGR2_NOC_A_CLK] = &msmfalcon_aggre2_noc_a_clk.hw,
[RPM_CNOC_CLK] = &msmfalcon_cnoc_clk.hw,
[RPM_CNOC_A_CLK] = &msmfalcon_cnoc_a_clk.hw,
- [RPM_MMAXI_CLK] = &msmfalcon_mmssnoc_axi_clk.hw,
- [RPM_MMAXI_A_CLK] = &msmfalcon_mmssnoc_axi_a_clk.hw,
[RPM_IPA_CLK] = &msmfalcon_ipa_clk.hw,
[RPM_IPA_A_CLK] = &msmfalcon_ipa_a_clk.hw,
[RPM_CE1_CLK] = &msmfalcon_ce1_clk.hw,
@@ -684,6 +682,8 @@ static struct clk_hw *msmfalcon_clks[] = {
[RPM_LN_BB_CLK3_PIN_AO] = &msmfalcon_ln_bb_clk3_pin_ao.hw,
[RPM_CNOC_PERIPH_CLK] = &msmfalcon_cnoc_periph_clk.hw,
[RPM_CNOC_PERIPH_A_CLK] = &msmfalcon_cnoc_periph_a_clk.hw,
+ [MMSSNOC_AXI_CLK] = &msmfalcon_mmssnoc_axi_clk.hw,
+ [MMSSNOC_AXI_A_CLK] = &msmfalcon_mmssnoc_axi_a_clk.hw,
/* Voter Clocks */
[BIMC_MSMBUS_CLK] = &bimc_msmbus_clk.hw,
diff --git a/drivers/clk/qcom/gcc-msm8960.c b/drivers/clk/qcom/gcc-msm8960.c
index 6c899fb5b856..8b9c388bc43c 100644
--- a/drivers/clk/qcom/gcc-msm8960.c
+++ b/drivers/clk/qcom/gcc-msm8960.c
@@ -2722,7 +2722,7 @@ static struct clk_rcg ce3_src = {
},
.freq_tbl = clk_tbl_ce3,
.clkr = {
- .enable_reg = 0x2c08,
+ .enable_reg = 0x36c0,
.enable_mask = BIT(7),
.hw.init = &(struct clk_init_data){
.name = "ce3_src",
@@ -2738,7 +2738,7 @@ static struct clk_branch ce3_core_clk = {
.halt_reg = 0x2fdc,
.halt_bit = 5,
.clkr = {
- .enable_reg = 0x36c4,
+ .enable_reg = 0x36cc,
.enable_mask = BIT(4),
.hw.init = &(struct clk_init_data){
.name = "ce3_core_clk",
diff --git a/drivers/clk/qcom/gcc-msm8996.c b/drivers/clk/qcom/gcc-msm8996.c
index a7b8ac07e73a..0f39bf278cd4 100644
--- a/drivers/clk/qcom/gcc-msm8996.c
+++ b/drivers/clk/qcom/gcc-msm8996.c
@@ -1253,7 +1253,7 @@ static struct clk_branch gcc_mmss_noc_cfg_ahb_clk = {
static struct clk_branch gcc_mmss_bimc_gfx_clk = {
.halt_reg = 0x9010,
- .halt_check = BRANCH_HALT_NO_CHECK_ON_DISABLE,
+ .halt_check = BRANCH_VOTED,
.clkr = {
.enable_reg = 0x9010,
.enable_mask = BIT(0),
@@ -2692,7 +2692,7 @@ static struct clk_branch gcc_aggre0_cnoc_ahb_clk = {
static struct clk_branch gcc_smmu_aggre0_axi_clk = {
.halt_reg = 0x81014,
- .halt_check = BRANCH_HALT_NO_CHECK_ON_DISABLE,
+ .halt_check = BRANCH_VOTED,
.clkr = {
.enable_reg = 0x81014,
.enable_mask = BIT(0),
@@ -2717,7 +2717,7 @@ static struct clk_gate2 gcc_aggre0_noc_qosgen_extref_clk = {
static struct clk_branch gcc_smmu_aggre0_ahb_clk = {
.halt_reg = 0x81018,
- .halt_check = BRANCH_HALT_NO_CHECK_ON_DISABLE,
+ .halt_check = BRANCH_VOTED,
.clkr = {
.enable_reg = 0x81018,
.enable_mask = BIT(0),
@@ -2871,7 +2871,7 @@ static struct clk_branch gcc_rx1_usb2_clkref_clk = {
static struct clk_branch hlos1_vote_lpass_core_smmu_clk = {
.halt_reg = 0x7d010,
- .halt_check = BRANCH_HALT_NO_CHECK_ON_DISABLE,
+ .halt_check = BRANCH_VOTED,
.clkr = {
.enable_reg = 0x7d010,
.enable_mask = BIT(0),
@@ -2884,7 +2884,7 @@ static struct clk_branch hlos1_vote_lpass_core_smmu_clk = {
static struct clk_branch hlos1_vote_lpass_adsp_smmu_clk = {
.halt_reg = 0x7d014,
- .halt_check = BRANCH_HALT_NO_CHECK_ON_DISABLE,
+ .halt_check = BRANCH_VOTED,
.clkr = {
.enable_reg = 0x7d014,
.enable_mask = BIT(0),
diff --git a/drivers/clk/qcom/gcc-msmfalcon.c b/drivers/clk/qcom/gcc-msmfalcon.c
index 2cbc9dff047b..42b91d70aa54 100644
--- a/drivers/clk/qcom/gcc-msmfalcon.c
+++ b/drivers/clk/qcom/gcc-msmfalcon.c
@@ -1173,7 +1173,7 @@ static struct clk_branch gcc_aggre2_usb3_axi_clk = {
static struct clk_branch gcc_bimc_gfx_clk = {
.halt_reg = 0x7106c,
- .halt_check = BRANCH_HALT_NO_CHECK_ON_DISABLE,
+ .halt_check = BRANCH_VOTED,
.clkr = {
.enable_reg = 0x7106c,
.enable_mask = BIT(0),
@@ -1711,7 +1711,7 @@ static struct clk_branch gcc_gp3_clk = {
static struct clk_branch gcc_gpu_bimc_gfx_clk = {
.halt_reg = 0x71010,
- .halt_check = BRANCH_HALT_NO_CHECK_ON_DISABLE,
+ .halt_check = BRANCH_VOTED,
.clkr = {
.enable_reg = 0x71010,
.enable_mask = BIT(0),
@@ -1737,7 +1737,7 @@ static struct clk_branch gcc_gpu_bimc_gfx_src_clk = {
static struct clk_branch gcc_gpu_cfg_ahb_clk = {
.halt_reg = 0x71004,
- .halt_check = BRANCH_HALT_NO_CHECK_ON_DISABLE,
+ .halt_check = BRANCH_VOTED,
.clkr = {
.enable_reg = 0x71004,
.enable_mask = BIT(0),
@@ -2516,7 +2516,7 @@ static struct clk_branch gcc_usb_phy_cfg_ahb2phy_clk = {
static struct clk_branch hlos1_vote_lpass_adsp_smmu_clk = {
.halt_reg = 0x7d014,
- .halt_check = BRANCH_HALT_NO_CHECK_ON_DISABLE,
+ .halt_check = BRANCH_VOTED,
.clkr = {
.enable_reg = 0x7d014,
.enable_mask = BIT(0),
@@ -2529,7 +2529,7 @@ static struct clk_branch hlos1_vote_lpass_adsp_smmu_clk = {
static struct clk_branch hlos1_vote_turing_adsp_smmu_clk = {
.halt_reg = 0x7d048,
- .halt_check = BRANCH_HALT_NO_CHECK_ON_DISABLE,
+ .halt_check = BRANCH_VOTED,
.clkr = {
.enable_reg = 0x7d048,
.enable_mask = BIT(0),
@@ -2542,7 +2542,7 @@ static struct clk_branch hlos1_vote_turing_adsp_smmu_clk = {
static struct clk_branch hlos2_vote_turing_adsp_smmu_clk = {
.halt_reg = 0x7e048,
- .halt_check = BRANCH_HALT_NO_CHECK_ON_DISABLE,
+ .halt_check = BRANCH_VOTED,
.clkr = {
.enable_reg = 0x7e048,
.enable_mask = BIT(0),
diff --git a/drivers/clk/rockchip/clk.c b/drivers/clk/rockchip/clk.c
index be6c7fd8315d..9b6c8188efac 100644
--- a/drivers/clk/rockchip/clk.c
+++ b/drivers/clk/rockchip/clk.c
@@ -70,7 +70,7 @@ static struct clk *rockchip_clk_register_branch(const char *name,
if (gate_offset >= 0) {
gate = kzalloc(sizeof(*gate), GFP_KERNEL);
if (!gate)
- return ERR_PTR(-ENOMEM);
+ goto err_gate;
gate->flags = gate_flags;
gate->reg = base + gate_offset;
@@ -82,7 +82,7 @@ static struct clk *rockchip_clk_register_branch(const char *name,
if (div_width > 0) {
div = kzalloc(sizeof(*div), GFP_KERNEL);
if (!div)
- return ERR_PTR(-ENOMEM);
+ goto err_div;
div->flags = div_flags;
div->reg = base + muxdiv_offset;
@@ -90,7 +90,9 @@ static struct clk *rockchip_clk_register_branch(const char *name,
div->width = div_width;
div->lock = lock;
div->table = div_table;
- div_ops = &clk_divider_ops;
+ div_ops = (div_flags & CLK_DIVIDER_READ_ONLY)
+ ? &clk_divider_ro_ops
+ : &clk_divider_ops;
}
clk = clk_register_composite(NULL, name, parent_names, num_parents,
@@ -100,6 +102,11 @@ static struct clk *rockchip_clk_register_branch(const char *name,
flags);
return clk;
+err_div:
+ kfree(gate);
+err_gate:
+ kfree(mux);
+ return ERR_PTR(-ENOMEM);
}
static struct clk *rockchip_clk_register_frac_branch(const char *name,
diff --git a/drivers/clk/versatile/clk-sp810.c b/drivers/clk/versatile/clk-sp810.c
index a1cdef6b0f90..897c36c1754a 100644
--- a/drivers/clk/versatile/clk-sp810.c
+++ b/drivers/clk/versatile/clk-sp810.c
@@ -92,6 +92,7 @@ static void __init clk_sp810_of_setup(struct device_node *node)
int num = ARRAY_SIZE(parent_names);
char name[12];
struct clk_init_data init;
+ static int instance;
int i;
bool deprecated;
@@ -118,7 +119,7 @@ static void __init clk_sp810_of_setup(struct device_node *node)
deprecated = !of_find_property(node, "assigned-clock-parents", NULL);
for (i = 0; i < ARRAY_SIZE(sp810->timerclken); i++) {
- snprintf(name, ARRAY_SIZE(name), "timerclken%d", i);
+ snprintf(name, sizeof(name), "sp810_%d_%d", instance, i);
sp810->timerclken[i].sp810 = sp810;
sp810->timerclken[i].channel = i;
@@ -139,5 +140,6 @@ static void __init clk_sp810_of_setup(struct device_node *node)
}
of_clk_add_provider(node, clk_sp810_timerclken_of_get, sp810);
+ instance++;
}
CLK_OF_DECLARE(sp810, "arm,sp810", clk_sp810_of_setup);
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 98fb8821382d..f53b02a6bc05 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -667,6 +667,11 @@ static int core_get_max_pstate(void)
if (err)
goto skip_tar;
+ /* For level 1 and 2, bits[23:16] contain the ratio */
+ if (tdp_ctrl)
+ tdp_ratio >>= 16;
+
+ tdp_ratio &= 0xff; /* ratios are only 8 bits long */
if (tdp_ratio - 1 == tar) {
max_pstate = tar;
pr_debug("max_pstate=TAC %x\n", max_pstate);
diff --git a/drivers/cpuidle/cpuidle-arm.c b/drivers/cpuidle/cpuidle-arm.c
index 545069d5fdfb..e342565e8715 100644
--- a/drivers/cpuidle/cpuidle-arm.c
+++ b/drivers/cpuidle/cpuidle-arm.c
@@ -50,7 +50,7 @@ static int arm_enter_idle_state(struct cpuidle_device *dev,
* call the CPU ops suspend protocol with idle index as a
* parameter.
*/
- arm_cpuidle_suspend(idx);
+ ret = arm_cpuidle_suspend(idx);
cpu_pm_exit();
}
diff --git a/drivers/cpuidle/lpm-levels-of.c b/drivers/cpuidle/lpm-levels-of.c
index b40231dd8dd1..ffbfd1c11af9 100644
--- a/drivers/cpuidle/lpm-levels-of.c
+++ b/drivers/cpuidle/lpm-levels-of.c
@@ -748,7 +748,7 @@ static int calculate_residency(struct power_params *base_pwr,
residency /= (int32_t)(base_pwr->ss_power - next_pwr->ss_power);
if (residency < 0) {
- __WARN_printf("%s: Incorrect power attributes for LPM\n",
+ pr_err("%s: residency < 0 for LPM\n",
__func__);
return next_pwr->time_overhead_us;
}
diff --git a/drivers/cpuidle/lpm-levels.c b/drivers/cpuidle/lpm-levels.c
index 37e504381313..de033cc37a15 100644
--- a/drivers/cpuidle/lpm-levels.c
+++ b/drivers/cpuidle/lpm-levels.c
@@ -85,7 +85,9 @@ struct lpm_debug {
struct lpm_cluster *lpm_root_node;
-static bool lpm_prediction;
+#define MAXSAMPLES 5
+
+static bool lpm_prediction = true;
module_param_named(lpm_prediction,
lpm_prediction, bool, S_IRUGO | S_IWUSR | S_IWGRP);
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
index 3d9acc53d247..60fc0fa26fd3 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
@@ -225,6 +225,9 @@ static int ccp_aes_cmac_export(struct ahash_request *req, void *out)
struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
struct ccp_aes_cmac_exp_ctx state;
+ /* Don't let anything leak to 'out' */
+ memset(&state, 0, sizeof(state));
+
state.null_msg = rctx->null_msg;
memcpy(state.iv, rctx->iv, sizeof(state.iv));
state.buf_count = rctx->buf_count;
diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c
index 8ef06fad8b14..ab9945f2cb7a 100644
--- a/drivers/crypto/ccp/ccp-crypto-sha.c
+++ b/drivers/crypto/ccp/ccp-crypto-sha.c
@@ -212,6 +212,9 @@ static int ccp_sha_export(struct ahash_request *req, void *out)
struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
struct ccp_sha_exp_ctx state;
+ /* Don't let anything leak to 'out' */
+ memset(&state, 0, sizeof(state));
+
state.type = rctx->type;
state.msg_bits = rctx->msg_bits;
state.first = rctx->first;
diff --git a/drivers/crypto/msm/qce.c b/drivers/crypto/msm/qce.c
index 7ddbb1938400..4cf95b90a2df 100644
--- a/drivers/crypto/msm/qce.c
+++ b/drivers/crypto/msm/qce.c
@@ -1,6 +1,6 @@
/* Qualcomm Crypto Engine driver.
*
- * Copyright (c) 2010-2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2010-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -1962,8 +1962,8 @@ int qce_aead_req(void *handle, struct qce_req *q_req)
else
q_req->cryptlen = areq->cryptlen - authsize;
- if ((q_req->cryptlen > ULONG_MAX - ivsize) ||
- (q_req->cryptlen + ivsize > ULONG_MAX - areq->assoclen)) {
+ if ((q_req->cryptlen > UINT_MAX - ivsize) ||
+ (q_req->cryptlen + ivsize > UINT_MAX - areq->assoclen)) {
pr_err("Integer overflow on total aead req length.\n");
return -EINVAL;
}
diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h
index 3f76bd495bcb..b9178d0a3093 100644
--- a/drivers/crypto/qat/qat_common/adf_common_drv.h
+++ b/drivers/crypto/qat/qat_common/adf_common_drv.h
@@ -145,6 +145,8 @@ int adf_enable_aer(struct adf_accel_dev *accel_dev, struct pci_driver *adf);
void adf_disable_aer(struct adf_accel_dev *accel_dev);
int adf_init_aer(void);
void adf_exit_aer(void);
+int adf_init_pf_wq(void);
+void adf_exit_pf_wq(void);
int adf_init_admin_comms(struct adf_accel_dev *accel_dev);
void adf_exit_admin_comms(struct adf_accel_dev *accel_dev);
int adf_send_admin_init(struct adf_accel_dev *accel_dev);
diff --git a/drivers/crypto/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
index 473d36d91644..e7480f373532 100644
--- a/drivers/crypto/qat/qat_common/adf_ctl_drv.c
+++ b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
@@ -469,12 +469,17 @@ static int __init adf_register_ctl_device_driver(void)
if (adf_init_aer())
goto err_aer;
+ if (adf_init_pf_wq())
+ goto err_pf_wq;
+
if (qat_crypto_register())
goto err_crypto_register;
return 0;
err_crypto_register:
+ adf_exit_pf_wq();
+err_pf_wq:
adf_exit_aer();
err_aer:
adf_chr_drv_destroy();
@@ -487,6 +492,7 @@ static void __exit adf_unregister_ctl_device_driver(void)
{
adf_chr_drv_destroy();
adf_exit_aer();
+ adf_exit_pf_wq();
qat_crypto_unregister();
adf_clean_vf_map(false);
mutex_destroy(&adf_ctl_lock);
diff --git a/drivers/crypto/qat/qat_common/adf_sriov.c b/drivers/crypto/qat/qat_common/adf_sriov.c
index 1117a8b58280..38a0415e767d 100644
--- a/drivers/crypto/qat/qat_common/adf_sriov.c
+++ b/drivers/crypto/qat/qat_common/adf_sriov.c
@@ -119,11 +119,6 @@ static int adf_enable_sriov(struct adf_accel_dev *accel_dev)
int i;
u32 reg;
- /* Workqueue for PF2VF responses */
- pf2vf_resp_wq = create_workqueue("qat_pf2vf_resp_wq");
- if (!pf2vf_resp_wq)
- return -ENOMEM;
-
for (i = 0, vf_info = accel_dev->pf.vf_info; i < totalvfs;
i++, vf_info++) {
/* This ptr will be populated when VFs will be created */
@@ -216,11 +211,6 @@ void adf_disable_sriov(struct adf_accel_dev *accel_dev)
kfree(accel_dev->pf.vf_info);
accel_dev->pf.vf_info = NULL;
-
- if (pf2vf_resp_wq) {
- destroy_workqueue(pf2vf_resp_wq);
- pf2vf_resp_wq = NULL;
- }
}
EXPORT_SYMBOL_GPL(adf_disable_sriov);
@@ -304,3 +294,19 @@ int adf_sriov_configure(struct pci_dev *pdev, int numvfs)
return numvfs;
}
EXPORT_SYMBOL_GPL(adf_sriov_configure);
+
+int __init adf_init_pf_wq(void)
+{
+ /* Workqueue for PF2VF responses */
+ pf2vf_resp_wq = create_workqueue("qat_pf2vf_resp_wq");
+
+ return !pf2vf_resp_wq ? -ENOMEM : 0;
+}
+
+void adf_exit_pf_wq(void)
+{
+ if (pf2vf_resp_wq) {
+ destroy_workqueue(pf2vf_resp_wq);
+ pf2vf_resp_wq = NULL;
+ }
+}
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index b6f9f42e2985..a04fea4d0063 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -63,6 +63,14 @@ static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr,
ptr->eptr = upper_32_bits(dma_addr);
}
+static void copy_talitos_ptr(struct talitos_ptr *dst_ptr,
+ struct talitos_ptr *src_ptr, bool is_sec1)
+{
+ dst_ptr->ptr = src_ptr->ptr;
+ if (!is_sec1)
+ dst_ptr->eptr = src_ptr->eptr;
+}
+
static void to_talitos_ptr_len(struct talitos_ptr *ptr, unsigned int len,
bool is_sec1)
{
@@ -1083,21 +1091,20 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
sg_count = dma_map_sg(dev, areq->src, edesc->src_nents ?: 1,
(areq->src == areq->dst) ? DMA_BIDIRECTIONAL
: DMA_TO_DEVICE);
-
/* hmac data */
desc->ptr[1].len = cpu_to_be16(areq->assoclen);
if (sg_count > 1 &&
(ret = sg_to_link_tbl_offset(areq->src, sg_count, 0,
areq->assoclen,
&edesc->link_tbl[tbl_off])) > 1) {
- tbl_off += ret;
-
to_talitos_ptr(&desc->ptr[1], edesc->dma_link_tbl + tbl_off *
sizeof(struct talitos_ptr), 0);
desc->ptr[1].j_extent = DESC_PTR_LNKTBL_JUMP;
dma_sync_single_for_device(dev, edesc->dma_link_tbl,
edesc->dma_len, DMA_BIDIRECTIONAL);
+
+ tbl_off += ret;
} else {
to_talitos_ptr(&desc->ptr[1], sg_dma_address(areq->src), 0);
desc->ptr[1].j_extent = 0;
@@ -1126,11 +1133,13 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV)
sg_link_tbl_len += authsize;
- if (sg_count > 1 &&
- (ret = sg_to_link_tbl_offset(areq->src, sg_count, areq->assoclen,
- sg_link_tbl_len,
- &edesc->link_tbl[tbl_off])) > 1) {
- tbl_off += ret;
+ if (sg_count == 1) {
+ to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src) +
+ areq->assoclen, 0);
+ } else if ((ret = sg_to_link_tbl_offset(areq->src, sg_count,
+ areq->assoclen, sg_link_tbl_len,
+ &edesc->link_tbl[tbl_off])) >
+ 1) {
desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl +
tbl_off *
@@ -1138,8 +1147,10 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
dma_sync_single_for_device(dev, edesc->dma_link_tbl,
edesc->dma_len,
DMA_BIDIRECTIONAL);
- } else
- to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src), 0);
+ tbl_off += ret;
+ } else {
+ copy_talitos_ptr(&desc->ptr[4], &edesc->link_tbl[tbl_off], 0);
+ }
/* cipher out */
desc->ptr[5].len = cpu_to_be16(cryptlen);
@@ -1151,11 +1162,13 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
edesc->icv_ool = false;
- if (sg_count > 1 &&
- (sg_count = sg_to_link_tbl_offset(areq->dst, sg_count,
+ if (sg_count == 1) {
+ to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst) +
+ areq->assoclen, 0);
+ } else if ((sg_count =
+ sg_to_link_tbl_offset(areq->dst, sg_count,
areq->assoclen, cryptlen,
- &edesc->link_tbl[tbl_off])) >
- 1) {
+ &edesc->link_tbl[tbl_off])) > 1) {
struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl +
@@ -1178,8 +1191,9 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
edesc->dma_len, DMA_BIDIRECTIONAL);
edesc->icv_ool = true;
- } else
- to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst), 0);
+ } else {
+ copy_talitos_ptr(&desc->ptr[5], &edesc->link_tbl[tbl_off], 0);
+ }
/* iv out */
map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
@@ -2519,21 +2533,11 @@ struct talitos_crypto_alg {
struct talitos_alg_template algt;
};
-static int talitos_cra_init(struct crypto_tfm *tfm)
+static int talitos_init_common(struct talitos_ctx *ctx,
+ struct talitos_crypto_alg *talitos_alg)
{
- struct crypto_alg *alg = tfm->__crt_alg;
- struct talitos_crypto_alg *talitos_alg;
- struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
struct talitos_private *priv;
- if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
- talitos_alg = container_of(__crypto_ahash_alg(alg),
- struct talitos_crypto_alg,
- algt.alg.hash);
- else
- talitos_alg = container_of(alg, struct talitos_crypto_alg,
- algt.alg.crypto);
-
/* update context with ptr to dev */
ctx->dev = talitos_alg->dev;
@@ -2551,10 +2555,33 @@ static int talitos_cra_init(struct crypto_tfm *tfm)
return 0;
}
+static int talitos_cra_init(struct crypto_tfm *tfm)
+{
+ struct crypto_alg *alg = tfm->__crt_alg;
+ struct talitos_crypto_alg *talitos_alg;
+ struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
+ talitos_alg = container_of(__crypto_ahash_alg(alg),
+ struct talitos_crypto_alg,
+ algt.alg.hash);
+ else
+ talitos_alg = container_of(alg, struct talitos_crypto_alg,
+ algt.alg.crypto);
+
+ return talitos_init_common(ctx, talitos_alg);
+}
+
static int talitos_cra_init_aead(struct crypto_aead *tfm)
{
- talitos_cra_init(crypto_aead_tfm(tfm));
- return 0;
+ struct aead_alg *alg = crypto_aead_alg(tfm);
+ struct talitos_crypto_alg *talitos_alg;
+ struct talitos_ctx *ctx = crypto_aead_ctx(tfm);
+
+ talitos_alg = container_of(alg, struct talitos_crypto_alg,
+ algt.alg.aead);
+
+ return talitos_init_common(ctx, talitos_alg);
}
static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
diff --git a/drivers/devfreq/governor_bw_vbif.c b/drivers/devfreq/governor_bw_vbif.c
index da1eefb8c94e..33e144b653d0 100644
--- a/drivers/devfreq/governor_bw_vbif.c
+++ b/drivers/devfreq/governor_bw_vbif.c
@@ -78,15 +78,13 @@ static int devfreq_vbif_ev_handler(struct devfreq *devfreq,
case DEVFREQ_GOV_START:
mutex_lock(&df_lock);
df = devfreq;
- if (df->profile->get_dev_status)
- ret = df->profile->get_dev_status(df->dev.parent,
- &stat);
+ if (df->profile->get_dev_status &&
+ !df->profile->get_dev_status(df->dev.parent, &stat) &&
+ stat.private_data)
+ dev_ab = stat.private_data;
else
- ret = 0;
- if (ret || !stat.private_data)
pr_warn("Device doesn't take AB votes!\n");
- else
- dev_ab = stat.private_data;
+
mutex_unlock(&df_lock);
ret = devfreq_vbif_update_bw(0, 0);
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index 4f099ea29f83..c66133b5e852 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -130,26 +130,14 @@ static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
static void dwc_initialize(struct dw_dma_chan *dwc)
{
struct dw_dma *dw = to_dw_dma(dwc->chan.device);
- struct dw_dma_slave *dws = dwc->chan.private;
u32 cfghi = DWC_CFGH_FIFO_MODE;
u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority);
if (dwc->initialized == true)
return;
- if (dws) {
- /*
- * We need controller-specific data to set up slave
- * transfers.
- */
- BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev);
-
- cfghi |= DWC_CFGH_DST_PER(dws->dst_id);
- cfghi |= DWC_CFGH_SRC_PER(dws->src_id);
- } else {
- cfghi |= DWC_CFGH_DST_PER(dwc->dst_id);
- cfghi |= DWC_CFGH_SRC_PER(dwc->src_id);
- }
+ cfghi |= DWC_CFGH_DST_PER(dwc->dst_id);
+ cfghi |= DWC_CFGH_SRC_PER(dwc->src_id);
channel_writel(dwc, CFG_LO, cfglo);
channel_writel(dwc, CFG_HI, cfghi);
@@ -936,7 +924,7 @@ bool dw_dma_filter(struct dma_chan *chan, void *param)
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
struct dw_dma_slave *dws = param;
- if (!dws || dws->dma_dev != chan->device->dev)
+ if (dws->dma_dev != chan->device->dev)
return false;
/* We have to copy data since dws can be temporary storage */
@@ -1160,6 +1148,14 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
* doesn't mean what you think it means), and status writeback.
*/
+ /*
+ * We need controller-specific data to set up slave transfers.
+ */
+ if (chan->private && !dw_dma_filter(chan, chan->private)) {
+ dev_warn(chan2dev(chan), "Wrong controller-specific data\n");
+ return -EINVAL;
+ }
+
/* Enable controller here if needed */
if (!dw->in_use)
dw_dma_on(dw);
@@ -1221,6 +1217,14 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
spin_lock_irqsave(&dwc->lock, flags);
list_splice_init(&dwc->free_list, &list);
dwc->descs_allocated = 0;
+
+ /* Clear custom channel configuration */
+ dwc->src_id = 0;
+ dwc->dst_id = 0;
+
+ dwc->src_master = 0;
+ dwc->dst_master = 0;
+
dwc->initialized = false;
/* Disable interrupts */
diff --git a/drivers/dma/hsu/hsu.c b/drivers/dma/hsu/hsu.c
index 823ad728aecf..efc02b98e6ba 100644
--- a/drivers/dma/hsu/hsu.c
+++ b/drivers/dma/hsu/hsu.c
@@ -135,7 +135,7 @@ static u32 hsu_dma_chan_get_sr(struct hsu_dma_chan *hsuc)
sr = hsu_chan_readl(hsuc, HSU_CH_SR);
spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
- return sr;
+ return sr & ~(HSU_CH_SR_DESCE_ANY | HSU_CH_SR_CDESC_ANY);
}
irqreturn_t hsu_dma_irq(struct hsu_dma_chip *chip, unsigned short nr)
diff --git a/drivers/dma/hsu/hsu.h b/drivers/dma/hsu/hsu.h
index f06579c6d548..26da2865b025 100644
--- a/drivers/dma/hsu/hsu.h
+++ b/drivers/dma/hsu/hsu.h
@@ -41,6 +41,9 @@
#define HSU_CH_SR_DESCTO(x) BIT(8 + (x))
#define HSU_CH_SR_DESCTO_ANY (BIT(11) | BIT(10) | BIT(9) | BIT(8))
#define HSU_CH_SR_CHE BIT(15)
+#define HSU_CH_SR_DESCE(x) BIT(16 + (x))
+#define HSU_CH_SR_DESCE_ANY (BIT(19) | BIT(18) | BIT(17) | BIT(16))
+#define HSU_CH_SR_CDESC_ANY (BIT(31) | BIT(30))
/* Bits in HSU_CH_CR */
#define HSU_CH_CR_CHA BIT(0)
diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c
index a59061e4221a..55f5d33f6dc7 100644
--- a/drivers/dma/pxa_dma.c
+++ b/drivers/dma/pxa_dma.c
@@ -122,6 +122,7 @@ struct pxad_chan {
struct pxad_device {
struct dma_device slave;
int nr_chans;
+ int nr_requestors;
void __iomem *base;
struct pxad_phy *phys;
spinlock_t phy_lock; /* Phy association */
@@ -473,7 +474,7 @@ static void pxad_free_phy(struct pxad_chan *chan)
return;
/* clear the channel mapping in DRCMR */
- if (chan->drcmr <= DRCMR_CHLNUM) {
+ if (chan->drcmr <= pdev->nr_requestors) {
reg = pxad_drcmr(chan->drcmr);
writel_relaxed(0, chan->phy->base + reg);
}
@@ -509,6 +510,7 @@ static bool is_running_chan_misaligned(struct pxad_chan *chan)
static void phy_enable(struct pxad_phy *phy, bool misaligned)
{
+ struct pxad_device *pdev;
u32 reg, dalgn;
if (!phy->vchan)
@@ -518,7 +520,8 @@ static void phy_enable(struct pxad_phy *phy, bool misaligned)
"%s(); phy=%p(%d) misaligned=%d\n", __func__,
phy, phy->idx, misaligned);
- if (phy->vchan->drcmr <= DRCMR_CHLNUM) {
+ pdev = to_pxad_dev(phy->vchan->vc.chan.device);
+ if (phy->vchan->drcmr <= pdev->nr_requestors) {
reg = pxad_drcmr(phy->vchan->drcmr);
writel_relaxed(DRCMR_MAPVLD | phy->idx, phy->base + reg);
}
@@ -914,6 +917,7 @@ static void pxad_get_config(struct pxad_chan *chan,
{
u32 maxburst = 0, dev_addr = 0;
enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
+ struct pxad_device *pdev = to_pxad_dev(chan->vc.chan.device);
*dcmd = 0;
if (dir == DMA_DEV_TO_MEM) {
@@ -922,7 +926,7 @@ static void pxad_get_config(struct pxad_chan *chan,
dev_addr = chan->cfg.src_addr;
*dev_src = dev_addr;
*dcmd |= PXA_DCMD_INCTRGADDR;
- if (chan->drcmr <= DRCMR_CHLNUM)
+ if (chan->drcmr <= pdev->nr_requestors)
*dcmd |= PXA_DCMD_FLOWSRC;
}
if (dir == DMA_MEM_TO_DEV) {
@@ -931,7 +935,7 @@ static void pxad_get_config(struct pxad_chan *chan,
dev_addr = chan->cfg.dst_addr;
*dev_dst = dev_addr;
*dcmd |= PXA_DCMD_INCSRCADDR;
- if (chan->drcmr <= DRCMR_CHLNUM)
+ if (chan->drcmr <= pdev->nr_requestors)
*dcmd |= PXA_DCMD_FLOWTRG;
}
if (dir == DMA_MEM_TO_MEM)
@@ -1341,13 +1345,15 @@ static struct dma_chan *pxad_dma_xlate(struct of_phandle_args *dma_spec,
static int pxad_init_dmadev(struct platform_device *op,
struct pxad_device *pdev,
- unsigned int nr_phy_chans)
+ unsigned int nr_phy_chans,
+ unsigned int nr_requestors)
{
int ret;
unsigned int i;
struct pxad_chan *c;
pdev->nr_chans = nr_phy_chans;
+ pdev->nr_requestors = nr_requestors;
INIT_LIST_HEAD(&pdev->slave.channels);
pdev->slave.device_alloc_chan_resources = pxad_alloc_chan_resources;
pdev->slave.device_free_chan_resources = pxad_free_chan_resources;
@@ -1382,7 +1388,7 @@ static int pxad_probe(struct platform_device *op)
const struct of_device_id *of_id;
struct mmp_dma_platdata *pdata = dev_get_platdata(&op->dev);
struct resource *iores;
- int ret, dma_channels = 0;
+ int ret, dma_channels = 0, nb_requestors = 0;
const enum dma_slave_buswidth widths =
DMA_SLAVE_BUSWIDTH_1_BYTE | DMA_SLAVE_BUSWIDTH_2_BYTES |
DMA_SLAVE_BUSWIDTH_4_BYTES;
@@ -1399,13 +1405,23 @@ static int pxad_probe(struct platform_device *op)
return PTR_ERR(pdev->base);
of_id = of_match_device(pxad_dt_ids, &op->dev);
- if (of_id)
+ if (of_id) {
of_property_read_u32(op->dev.of_node, "#dma-channels",
&dma_channels);
- else if (pdata && pdata->dma_channels)
+ ret = of_property_read_u32(op->dev.of_node, "#dma-requests",
+ &nb_requestors);
+ if (ret) {
+ dev_warn(pdev->slave.dev,
+ "#dma-requests set to default 32 as missing in OF: %d",
+ ret);
+ nb_requestors = 32;
+ };
+ } else if (pdata && pdata->dma_channels) {
dma_channels = pdata->dma_channels;
- else
+ nb_requestors = pdata->nb_requestors;
+ } else {
dma_channels = 32; /* default 32 channel */
+ }
dma_cap_set(DMA_SLAVE, pdev->slave.cap_mask);
dma_cap_set(DMA_MEMCPY, pdev->slave.cap_mask);
@@ -1422,7 +1438,7 @@ static int pxad_probe(struct platform_device *op)
pdev->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
pdev->slave.dev = &op->dev;
- ret = pxad_init_dmadev(op, pdev, dma_channels);
+ ret = pxad_init_dmadev(op, pdev, dma_channels, nb_requestors);
if (ret) {
dev_err(pdev->slave.dev, "unable to register\n");
return ret;
@@ -1441,7 +1457,8 @@ static int pxad_probe(struct platform_device *op)
platform_set_drvdata(op, pdev);
pxad_init_debugfs(pdev);
- dev_info(pdev->slave.dev, "initialized %d channels\n", dma_channels);
+ dev_info(pdev->slave.dev, "initialized %d channels on %d requestors\n",
+ dma_channels, nb_requestors);
return 0;
}
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
index 01087a38da22..792bdae2b91d 100644
--- a/drivers/edac/i7core_edac.c
+++ b/drivers/edac/i7core_edac.c
@@ -1866,7 +1866,7 @@ static int i7core_mce_check_error(struct notifier_block *nb, unsigned long val,
i7_dev = get_i7core_dev(mce->socketid);
if (!i7_dev)
- return NOTIFY_BAD;
+ return NOTIFY_DONE;
mci = i7_dev->mci;
pvt = mci->pvt_info;
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index cbee3179ec08..37649221f81c 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -1396,7 +1396,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
}
ch_way = TAD_CH(reg) + 1;
- sck_way = 1 << TAD_SOCK(reg);
+ sck_way = TAD_SOCK(reg);
if (ch_way == 3)
idx = addr >> 6;
@@ -1435,7 +1435,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
switch(ch_way) {
case 2:
case 4:
- sck_xch = 1 << sck_way * (ch_way >> 1);
+ sck_xch = (1 << sck_way) * (ch_way >> 1);
break;
default:
sprintf(msg, "Invalid mirror set. Can't decode addr");
@@ -1471,7 +1471,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
ch_addr = addr - offset;
ch_addr >>= (6 + shiftup);
- ch_addr /= ch_way * sck_way;
+ ch_addr /= sck_xch;
ch_addr <<= (6 + shiftup);
ch_addr |= addr & ((1 << (6 + shiftup)) - 1);
@@ -2254,7 +2254,7 @@ static int sbridge_mce_check_error(struct notifier_block *nb, unsigned long val,
mci = get_mci_for_node_id(mce->socketid);
if (!mci)
- return NOTIFY_BAD;
+ return NOTIFY_DONE;
pvt = mci->pvt_info;
/*
diff --git a/drivers/extcon/extcon-max77843.c b/drivers/extcon/extcon-max77843.c
index 9f9ea334399c..b6cb30d207be 100644
--- a/drivers/extcon/extcon-max77843.c
+++ b/drivers/extcon/extcon-max77843.c
@@ -803,7 +803,7 @@ static int max77843_muic_probe(struct platform_device *pdev)
/* Clear IRQ bits before request IRQs */
ret = regmap_bulk_read(max77843->regmap_muic,
MAX77843_MUIC_REG_INT1, info->status,
- MAX77843_MUIC_IRQ_NUM);
+ MAX77843_MUIC_STATUS_NUM);
if (ret) {
dev_err(&pdev->dev, "Failed to Clear IRQ bits\n");
goto err_muic_irq;
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 027ca212179f..3b52677f459a 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -180,6 +180,7 @@ static int generic_ops_register(void)
{
generic_ops.get_variable = efi.get_variable;
generic_ops.set_variable = efi.set_variable;
+ generic_ops.set_variable_nonblocking = efi.set_variable_nonblocking;
generic_ops.get_next_variable = efi.get_next_variable;
generic_ops.query_variable_store = efi_query_variable_store;
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index b2a172d93a08..d775e2bfc017 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -8,7 +8,7 @@ cflags-$(CONFIG_X86_32) := -march=i386
cflags-$(CONFIG_X86_64) := -mcmodel=small
cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2 \
-fPIC -fno-strict-aliasing -mno-red-zone \
- -mno-mmx -mno-sse -DDISABLE_BRANCH_PROFILING
+ -mno-mmx -mno-sse
cflags-$(CONFIG_ARM64) := $(subst -pg,,$(KBUILD_CFLAGS))
cflags-$(CONFIG_ARM) := $(subst -pg,,$(KBUILD_CFLAGS)) \
@@ -16,7 +16,7 @@ cflags-$(CONFIG_ARM) := $(subst -pg,,$(KBUILD_CFLAGS)) \
cflags-$(CONFIG_EFI_ARMSTUB) += -I$(srctree)/scripts/dtc/libfdt
-KBUILD_CFLAGS := $(cflags-y) \
+KBUILD_CFLAGS := $(cflags-y) -DDISABLE_BRANCH_PROFILING \
$(call cc-option,-ffreestanding) \
$(call cc-option,-fno-stack-protector)
@@ -35,7 +35,8 @@ $(obj)/lib-%.o: $(srctree)/lib/%.c FORCE
lib-$(CONFIG_EFI_ARMSTUB) += arm-stub.o fdt.o string.o \
$(patsubst %.c,lib-%.o,$(arm-deps))
-lib-$(CONFIG_ARM64) += arm64-stub.o
+lib-$(CONFIG_ARM) += arm32-stub.o
+lib-$(CONFIG_ARM64) += arm64-stub.o random.o
CFLAGS_arm64-stub.o := -DTEXT_OFFSET=$(TEXT_OFFSET)
#
diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/arm-stub.c
index 950c87f5d279..d5aa1d16154f 100644
--- a/drivers/firmware/efi/libstub/arm-stub.c
+++ b/drivers/firmware/efi/libstub/arm-stub.c
@@ -18,6 +18,8 @@
#include "efistub.h"
+bool __nokaslr;
+
static int efi_secureboot_enabled(efi_system_table_t *sys_table_arg)
{
static efi_guid_t const var_guid = EFI_GLOBAL_VARIABLE_GUID;
@@ -207,14 +209,6 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
pr_efi_err(sys_table, "Failed to find DRAM base\n");
goto fail;
}
- status = handle_kernel_image(sys_table, image_addr, &image_size,
- &reserve_addr,
- &reserve_size,
- dram_base, image);
- if (status != EFI_SUCCESS) {
- pr_efi_err(sys_table, "Failed to relocate kernel\n");
- goto fail;
- }
/*
* Get the command line from EFI, using the LOADED_IMAGE
@@ -224,7 +218,28 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
cmdline_ptr = efi_convert_cmdline(sys_table, image, &cmdline_size);
if (!cmdline_ptr) {
pr_efi_err(sys_table, "getting command line via LOADED_IMAGE_PROTOCOL\n");
- goto fail_free_image;
+ goto fail;
+ }
+
+ /* check whether 'nokaslr' was passed on the command line */
+ if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
+ static const u8 default_cmdline[] = CONFIG_CMDLINE;
+ const u8 *str, *cmdline = cmdline_ptr;
+
+ if (IS_ENABLED(CONFIG_CMDLINE_FORCE))
+ cmdline = default_cmdline;
+ str = strstr(cmdline, "nokaslr");
+ if (str == cmdline || (str > cmdline && *(str - 1) == ' '))
+ __nokaslr = true;
+ }
+
+ status = handle_kernel_image(sys_table, image_addr, &image_size,
+ &reserve_addr,
+ &reserve_size,
+ dram_base, image);
+ if (status != EFI_SUCCESS) {
+ pr_efi_err(sys_table, "Failed to relocate kernel\n");
+ goto fail_free_cmdline;
}
status = efi_parse_options(cmdline_ptr);
@@ -244,7 +259,7 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
if (status != EFI_SUCCESS) {
pr_efi_err(sys_table, "Failed to load device tree!\n");
- goto fail_free_cmdline;
+ goto fail_free_image;
}
}
@@ -286,12 +301,11 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
efi_free(sys_table, initrd_size, initrd_addr);
efi_free(sys_table, fdt_size, fdt_addr);
-fail_free_cmdline:
- efi_free(sys_table, cmdline_size, (unsigned long)cmdline_ptr);
-
fail_free_image:
efi_free(sys_table, image_size, *image_addr);
efi_free(sys_table, reserve_size, reserve_addr);
+fail_free_cmdline:
+ efi_free(sys_table, cmdline_size, (unsigned long)cmdline_ptr);
fail:
return EFI_ERROR;
}
diff --git a/drivers/firmware/efi/libstub/arm64-stub.c b/drivers/firmware/efi/libstub/arm64-stub.c
index 78dfbd34b6bf..e0e6b74fef8f 100644
--- a/drivers/firmware/efi/libstub/arm64-stub.c
+++ b/drivers/firmware/efi/libstub/arm64-stub.c
@@ -13,6 +13,10 @@
#include <asm/efi.h>
#include <asm/sections.h>
+#include "efistub.h"
+
+extern bool __nokaslr;
+
efi_status_t __init handle_kernel_image(efi_system_table_t *sys_table_arg,
unsigned long *image_addr,
unsigned long *image_size,
@@ -23,26 +27,52 @@ efi_status_t __init handle_kernel_image(efi_system_table_t *sys_table_arg,
{
efi_status_t status;
unsigned long kernel_size, kernel_memsize = 0;
- unsigned long nr_pages;
void *old_image_addr = (void *)*image_addr;
unsigned long preferred_offset;
+ u64 phys_seed = 0;
+
+ if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
+ if (!__nokaslr) {
+ status = efi_get_random_bytes(sys_table_arg,
+ sizeof(phys_seed),
+ (u8 *)&phys_seed);
+ if (status == EFI_NOT_FOUND) {
+ pr_efi(sys_table_arg, "EFI_RNG_PROTOCOL unavailable, no randomness supplied\n");
+ } else if (status != EFI_SUCCESS) {
+ pr_efi_err(sys_table_arg, "efi_get_random_bytes() failed\n");
+ return status;
+ }
+ } else {
+ pr_efi(sys_table_arg, "KASLR disabled on kernel command line\n");
+ }
+ }
/*
* The preferred offset of the kernel Image is TEXT_OFFSET bytes beyond
* a 2 MB aligned base, which itself may be lower than dram_base, as
* long as the resulting offset equals or exceeds it.
*/
- preferred_offset = round_down(dram_base, SZ_2M) + TEXT_OFFSET;
+ preferred_offset = round_down(dram_base, MIN_KIMG_ALIGN) + TEXT_OFFSET;
if (preferred_offset < dram_base)
- preferred_offset += SZ_2M;
+ preferred_offset += MIN_KIMG_ALIGN;
- /* Relocate the image, if required. */
kernel_size = _edata - _text;
- if (*image_addr != preferred_offset) {
- kernel_memsize = kernel_size + (_end - _edata);
+ kernel_memsize = kernel_size + (_end - _edata);
+
+ if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && phys_seed != 0) {
+ /*
+ * If KASLR is enabled, and we have some randomness available,
+ * locate the kernel at a randomized offset in physical memory.
+ */
+ *reserve_size = kernel_memsize + TEXT_OFFSET;
+ status = efi_random_alloc(sys_table_arg, *reserve_size,
+ MIN_KIMG_ALIGN, reserve_addr,
+ phys_seed);
+ *image_addr = *reserve_addr + TEXT_OFFSET;
+ } else {
/*
- * First, try a straight allocation at the preferred offset.
+ * Else, try a straight allocation at the preferred offset.
* This will work around the issue where, if dram_base == 0x0,
* efi_low_alloc() refuses to allocate at 0x0 (to prevent the
* address of the allocation to be mistaken for a FAIL return
@@ -52,27 +82,31 @@ efi_status_t __init handle_kernel_image(efi_system_table_t *sys_table_arg,
* Mustang), we can still place the kernel at the address
* 'dram_base + TEXT_OFFSET'.
*/
+ if (*image_addr == preferred_offset)
+ return EFI_SUCCESS;
+
*image_addr = *reserve_addr = preferred_offset;
- nr_pages = round_up(kernel_memsize, EFI_ALLOC_ALIGN) /
- EFI_PAGE_SIZE;
+ *reserve_size = round_up(kernel_memsize, EFI_ALLOC_ALIGN);
+
status = efi_call_early(allocate_pages, EFI_ALLOCATE_ADDRESS,
- EFI_LOADER_DATA, nr_pages,
+ EFI_LOADER_DATA,
+ *reserve_size / EFI_PAGE_SIZE,
(efi_physical_addr_t *)reserve_addr);
- if (status != EFI_SUCCESS) {
- kernel_memsize += TEXT_OFFSET;
- status = efi_low_alloc(sys_table_arg, kernel_memsize,
- SZ_2M, reserve_addr);
+ }
- if (status != EFI_SUCCESS) {
- pr_efi_err(sys_table_arg, "Failed to relocate kernel\n");
- return status;
- }
- *image_addr = *reserve_addr + TEXT_OFFSET;
+ if (status != EFI_SUCCESS) {
+ *reserve_size = kernel_memsize + TEXT_OFFSET;
+ status = efi_low_alloc(sys_table_arg, *reserve_size,
+ MIN_KIMG_ALIGN, reserve_addr);
+
+ if (status != EFI_SUCCESS) {
+ pr_efi_err(sys_table_arg, "Failed to relocate kernel\n");
+ *reserve_size = 0;
+ return status;
}
- memcpy((void *)*image_addr, old_image_addr, kernel_size);
- *reserve_size = kernel_memsize;
+ *image_addr = *reserve_addr + TEXT_OFFSET;
}
-
+ memcpy((void *)*image_addr, old_image_addr, kernel_size);
return EFI_SUCCESS;
}
diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c
index f07d4a67fa76..29ed2f9b218c 100644
--- a/drivers/firmware/efi/libstub/efi-stub-helper.c
+++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
@@ -649,6 +649,10 @@ static u8 *efi_utf16_to_utf8(u8 *dst, const u16 *src, int n)
return dst;
}
+#ifndef MAX_CMDLINE_ADDRESS
+#define MAX_CMDLINE_ADDRESS ULONG_MAX
+#endif
+
/*
* Convert the unicode UEFI command line to ASCII to pass to kernel.
* Size of memory allocated return in *cmd_line_len.
@@ -684,7 +688,8 @@ char *efi_convert_cmdline(efi_system_table_t *sys_table_arg,
options_bytes++; /* NUL termination */
- status = efi_low_alloc(sys_table_arg, options_bytes, 0, &cmdline_addr);
+ status = efi_high_alloc(sys_table_arg, options_bytes, 0,
+ &cmdline_addr, MAX_CMDLINE_ADDRESS);
if (status != EFI_SUCCESS)
return NULL;
diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h
index 6b6548fda089..5ed3d3f38166 100644
--- a/drivers/firmware/efi/libstub/efistub.h
+++ b/drivers/firmware/efi/libstub/efistub.h
@@ -43,4 +43,11 @@ void efi_get_virtmap(efi_memory_desc_t *memory_map, unsigned long map_size,
unsigned long desc_size, efi_memory_desc_t *runtime_map,
int *count);
+efi_status_t efi_get_random_bytes(efi_system_table_t *sys_table,
+ unsigned long size, u8 *out);
+
+efi_status_t efi_random_alloc(efi_system_table_t *sys_table_arg,
+ unsigned long size, unsigned long align,
+ unsigned long *addr, unsigned long random_seed);
+
#endif
diff --git a/drivers/firmware/efi/libstub/fdt.c b/drivers/firmware/efi/libstub/fdt.c
index b62e2f5dcab3..b1c22cf18f7d 100644
--- a/drivers/firmware/efi/libstub/fdt.c
+++ b/drivers/firmware/efi/libstub/fdt.c
@@ -147,6 +147,20 @@ efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
if (status)
goto fdt_set_fail;
+ if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
+ efi_status_t efi_status;
+
+ efi_status = efi_get_random_bytes(sys_table, sizeof(fdt_val64),
+ (u8 *)&fdt_val64);
+ if (efi_status == EFI_SUCCESS) {
+ status = fdt_setprop(fdt, node, "kaslr-seed",
+ &fdt_val64, sizeof(fdt_val64));
+ if (status)
+ goto fdt_set_fail;
+ } else if (efi_status != EFI_NOT_FOUND) {
+ return efi_status;
+ }
+ }
return EFI_SUCCESS;
fdt_set_fail:
diff --git a/drivers/firmware/efi/libstub/random.c b/drivers/firmware/efi/libstub/random.c
new file mode 100644
index 000000000000..53f6d3fe6d86
--- /dev/null
+++ b/drivers/firmware/efi/libstub/random.c
@@ -0,0 +1,135 @@
+/*
+ * Copyright (C) 2016 Linaro Ltd; <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/efi.h>
+#include <asm/efi.h>
+
+#include "efistub.h"
+
+struct efi_rng_protocol {
+ efi_status_t (*get_info)(struct efi_rng_protocol *,
+ unsigned long *, efi_guid_t *);
+ efi_status_t (*get_rng)(struct efi_rng_protocol *,
+ efi_guid_t *, unsigned long, u8 *out);
+};
+
+efi_status_t efi_get_random_bytes(efi_system_table_t *sys_table_arg,
+ unsigned long size, u8 *out)
+{
+ efi_guid_t rng_proto = EFI_RNG_PROTOCOL_GUID;
+ efi_status_t status;
+ struct efi_rng_protocol *rng;
+
+ status = efi_call_early(locate_protocol, &rng_proto, NULL,
+ (void **)&rng);
+ if (status != EFI_SUCCESS)
+ return status;
+
+ return rng->get_rng(rng, NULL, size, out);
+}
+
+/*
+ * Return the number of slots covered by this entry, i.e., the number of
+ * addresses it covers that are suitably aligned and supply enough room
+ * for the allocation.
+ */
+static unsigned long get_entry_num_slots(efi_memory_desc_t *md,
+ unsigned long size,
+ unsigned long align)
+{
+ u64 start, end;
+
+ if (md->type != EFI_CONVENTIONAL_MEMORY)
+ return 0;
+
+ start = round_up(md->phys_addr, align);
+ end = round_down(md->phys_addr + md->num_pages * EFI_PAGE_SIZE - size,
+ align);
+
+ if (start > end)
+ return 0;
+
+ return (end - start + 1) / align;
+}
+
+/*
+ * The UEFI memory descriptors have a virtual address field that is only used
+ * when installing the virtual mapping using SetVirtualAddressMap(). Since it
+ * is unused here, we can reuse it to keep track of each descriptor's slot
+ * count.
+ */
+#define MD_NUM_SLOTS(md) ((md)->virt_addr)
+
+efi_status_t efi_random_alloc(efi_system_table_t *sys_table_arg,
+ unsigned long size,
+ unsigned long align,
+ unsigned long *addr,
+ unsigned long random_seed)
+{
+ unsigned long map_size, desc_size, total_slots = 0, target_slot;
+ efi_status_t status;
+ efi_memory_desc_t *memory_map;
+ int map_offset;
+
+ status = efi_get_memory_map(sys_table_arg, &memory_map, &map_size,
+ &desc_size, NULL, NULL);
+ if (status != EFI_SUCCESS)
+ return status;
+
+ if (align < EFI_ALLOC_ALIGN)
+ align = EFI_ALLOC_ALIGN;
+
+ /* count the suitable slots in each memory map entry */
+ for (map_offset = 0; map_offset < map_size; map_offset += desc_size) {
+ efi_memory_desc_t *md = (void *)memory_map + map_offset;
+ unsigned long slots;
+
+ slots = get_entry_num_slots(md, size, align);
+ MD_NUM_SLOTS(md) = slots;
+ total_slots += slots;
+ }
+
+ /* find a random number between 0 and total_slots */
+ target_slot = (total_slots * (u16)random_seed) >> 16;
+
+ /*
+ * target_slot is now a value in the range [0, total_slots), and so
+ * it corresponds with exactly one of the suitable slots we recorded
+ * when iterating over the memory map the first time around.
+ *
+ * So iterate over the memory map again, subtracting the number of
+ * slots of each entry at each iteration, until we have found the entry
+ * that covers our chosen slot. Use the residual value of target_slot
+ * to calculate the randomly chosen address, and allocate it directly
+ * using EFI_ALLOCATE_ADDRESS.
+ */
+ for (map_offset = 0; map_offset < map_size; map_offset += desc_size) {
+ efi_memory_desc_t *md = (void *)memory_map + map_offset;
+ efi_physical_addr_t target;
+ unsigned long pages;
+
+ if (target_slot >= MD_NUM_SLOTS(md)) {
+ target_slot -= MD_NUM_SLOTS(md);
+ continue;
+ }
+
+ target = round_up(md->phys_addr, align) + target_slot * align;
+ pages = round_up(size, EFI_PAGE_SIZE) / EFI_PAGE_SIZE;
+
+ status = efi_call_early(allocate_pages, EFI_ALLOCATE_ADDRESS,
+ EFI_LOADER_DATA, pages, &target);
+ if (status == EFI_SUCCESS)
+ *addr = target;
+ break;
+ }
+
+ efi_call_early(free_pool, memory_map);
+
+ return status;
+}
diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c
index 7f2ea21c730d..6f182fd91a6d 100644
--- a/drivers/firmware/efi/vars.c
+++ b/drivers/firmware/efi/vars.c
@@ -202,29 +202,44 @@ static const struct variable_validate variable_validate[] = {
{ NULL_GUID, "", NULL },
};
+/*
+ * Check if @var_name matches the pattern given in @match_name.
+ *
+ * @var_name: an array of @len non-NUL characters.
+ * @match_name: a NUL-terminated pattern string, optionally ending in "*". A
+ * final "*" character matches any trailing characters @var_name,
+ * including the case when there are none left in @var_name.
+ * @match: on output, the number of non-wildcard characters in @match_name
+ * that @var_name matches, regardless of the return value.
+ * @return: whether @var_name fully matches @match_name.
+ */
static bool
variable_matches(const char *var_name, size_t len, const char *match_name,
int *match)
{
for (*match = 0; ; (*match)++) {
char c = match_name[*match];
- char u = var_name[*match];
- /* Wildcard in the matching name means we've matched */
- if (c == '*')
+ switch (c) {
+ case '*':
+ /* Wildcard in @match_name means we've matched. */
return true;
- /* Case sensitive match */
- if (!c && *match == len)
- return true;
+ case '\0':
+ /* @match_name has ended. Has @var_name too? */
+ return (*match == len);
- if (c != u)
+ default:
+ /*
+ * We've reached a non-wildcard char in @match_name.
+ * Continue only if there's an identical character in
+ * @var_name.
+ */
+ if (*match < len && c == var_name[*match])
+ continue;
return false;
-
- if (!c)
- return true;
+ }
}
- return true;
}
bool
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index bb1099c549df..053fc2f465df 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1673,6 +1673,7 @@ struct amdgpu_uvd {
struct amdgpu_bo *vcpu_bo;
void *cpu_addr;
uint64_t gpu_addr;
+ unsigned fw_version;
atomic_t handles[AMDGPU_MAX_UVD_HANDLES];
struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES];
struct delayed_work idle_work;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
index 8ac49812a716..5a8fbadbd27b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
@@ -63,10 +63,6 @@ bool amdgpu_has_atpx(void) {
return amdgpu_atpx_priv.atpx_detected;
}
-bool amdgpu_has_atpx_dgpu_power_cntl(void) {
- return amdgpu_atpx_priv.atpx.functions.power_cntl;
-}
-
/**
* amdgpu_atpx_call - call an ATPX method
*
@@ -146,6 +142,10 @@ static void amdgpu_atpx_parse_functions(struct amdgpu_atpx_functions *f, u32 mas
*/
static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx)
{
+ /* make sure required functions are enabled */
+ /* dGPU power control is required */
+ atpx->functions.power_cntl = true;
+
if (atpx->functions.px_params) {
union acpi_object *info;
struct atpx_px_params output;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 9d88023df836..c961fe093e12 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -61,12 +61,6 @@ static const char *amdgpu_asic_name[] = {
"LAST",
};
-#if defined(CONFIG_VGA_SWITCHEROO)
-bool amdgpu_has_atpx_dgpu_power_cntl(void);
-#else
-static inline bool amdgpu_has_atpx_dgpu_power_cntl(void) { return false; }
-#endif
-
bool amdgpu_device_is_px(struct drm_device *dev)
{
struct amdgpu_device *adev = dev->dev_private;
@@ -1475,7 +1469,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
if (amdgpu_runtime_pm == 1)
runtime = true;
- if (amdgpu_device_is_px(ddev) && amdgpu_has_atpx_dgpu_power_cntl())
+ if (amdgpu_device_is_px(ddev))
runtime = true;
vga_switcheroo_register_client(adev->pdev, &amdgpu_switcheroo_ops, runtime);
if (runtime)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index e23843f4d877..4488e82f87b0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -303,7 +303,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
fw_info.feature = adev->vce.fb_version;
break;
case AMDGPU_INFO_FW_UVD:
- fw_info.ver = 0;
+ fw_info.ver = adev->uvd.fw_version;
fw_info.feature = 0;
break;
case AMDGPU_INFO_FW_GMC:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index 064ebb347074..89df7871653d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -52,7 +52,7 @@ struct amdgpu_hpd;
#define AMDGPU_MAX_HPD_PINS 6
#define AMDGPU_MAX_CRTCS 6
-#define AMDGPU_MAX_AFMT_BLOCKS 7
+#define AMDGPU_MAX_AFMT_BLOCKS 9
enum amdgpu_rmx_type {
RMX_OFF,
@@ -308,8 +308,8 @@ struct amdgpu_mode_info {
struct atom_context *atom_context;
struct card_info *atom_card_info;
bool mode_config_initialized;
- struct amdgpu_crtc *crtcs[6];
- struct amdgpu_afmt *afmt[7];
+ struct amdgpu_crtc *crtcs[AMDGPU_MAX_CRTCS];
+ struct amdgpu_afmt *afmt[AMDGPU_MAX_AFMT_BLOCKS];
/* DVI-I properties */
struct drm_property *coherent_mode_property;
/* DAC enable load detect */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index b8fbbd7699e4..73628c7599e7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -540,6 +540,7 @@ int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
if (!metadata_size) {
if (bo->metadata_size) {
kfree(bo->metadata);
+ bo->metadata = NULL;
bo->metadata_size = 0;
}
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 53f987aeeacf..3b35ad83867c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -156,6 +156,9 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
DRM_INFO("Found UVD firmware Version: %hu.%hu Family ID: %hu\n",
version_major, version_minor, family_id);
+ adev->uvd.fw_version = ((version_major << 24) | (version_minor << 16) |
+ (family_id << 8));
+
bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8)
+ AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE;
r = amdgpu_bo_create(adev, bo_size, PAGE_SIZE, true,
@@ -273,6 +276,8 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev)
memcpy(adev->uvd.cpu_addr, (adev->uvd.fw->data) + offset,
(adev->uvd.fw->size) - offset);
+ cancel_delayed_work_sync(&adev->uvd.idle_work);
+
size = amdgpu_bo_size(adev->uvd.vcpu_bo);
size -= le32_to_cpu(hdr->ucode_size_bytes);
ptr = adev->uvd.cpu_addr;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index a745eeeb5d82..bb0da76051a1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -220,6 +220,7 @@ int amdgpu_vce_suspend(struct amdgpu_device *adev)
if (i == AMDGPU_MAX_VCE_HANDLES)
return 0;
+ cancel_delayed_work_sync(&adev->vce.idle_work);
/* TODO: suspending running encoding sessions isn't supported */
return -EINVAL;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
index 1e0bba29e167..1cd6de575305 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
@@ -298,6 +298,10 @@ bool amdgpu_atombios_encoder_mode_fixup(struct drm_encoder *encoder,
&& (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2)))
adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2;
+ /* vertical FP must be at least 1 */
+ if (mode->crtc_vsync_start == mode->crtc_vdisplay)
+ adjusted_mode->crtc_vsync_start++;
+
/* get the native mode for scaling */
if (amdgpu_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT))
amdgpu_panel_mode_fixup(encoder, adjusted_mode);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index aa491540ba85..946300764609 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -3628,7 +3628,7 @@ static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vm_id, uint64_t pd_addr)
{
int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
- uint32_t seq = ring->fence_drv.sync_seq;
+ uint32_t seq = ring->fence_drv.sync_seq[ring->idx];
uint64_t addr = ring->fence_drv.gpu_addr;
amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
index c34c393e9aea..d5e19b5fbbfb 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
@@ -513,7 +513,7 @@ static int dbgdev_wave_control_set_registers(
union SQ_CMD_BITS *in_reg_sq_cmd,
union GRBM_GFX_INDEX_BITS *in_reg_gfx_index)
{
- int status;
+ int status = 0;
union SQ_CMD_BITS reg_sq_cmd;
union GRBM_GFX_INDEX_BITS reg_gfx_index;
struct HsaDbgWaveMsgAMDGen2 *pMsg;
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 39d7e2e15c11..d268bf18a662 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -1665,13 +1665,19 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_branch *mstb;
int len, ret, port_num;
+ port = drm_dp_get_validated_port_ref(mgr, port);
+ if (!port)
+ return -EINVAL;
+
port_num = port->port_num;
mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
if (!mstb) {
mstb = drm_dp_get_last_connected_port_and_mstb(mgr, port->parent, &port_num);
- if (!mstb)
+ if (!mstb) {
+ drm_dp_put_port(port);
return -EINVAL;
+ }
}
txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
@@ -1697,6 +1703,7 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
kfree(txmsg);
fail_put:
drm_dp_put_mst_branch_device(mstb);
+ drm_dp_put_port(port);
return ret;
}
@@ -1779,6 +1786,11 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
req_payload.start_slot = cur_slots;
if (mgr->proposed_vcpis[i]) {
port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
+ port = drm_dp_get_validated_port_ref(mgr, port);
+ if (!port) {
+ mutex_unlock(&mgr->payload_lock);
+ return -EINVAL;
+ }
req_payload.num_slots = mgr->proposed_vcpis[i]->num_slots;
} else {
port = NULL;
@@ -1804,6 +1816,9 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
mgr->payloads[i].payload_state = req_payload.payload_state;
}
cur_slots += req_payload.num_slots;
+
+ if (port)
+ drm_dp_put_port(port);
}
for (i = 0; i < mgr->max_payloads; i++) {
@@ -2109,6 +2124,8 @@ int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr)
if (mgr->mst_primary) {
int sret;
+ u8 guid[16];
+
sret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
if (sret != DP_RECEIVER_CAP_SIZE) {
DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
@@ -2123,6 +2140,16 @@ int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr)
ret = -1;
goto out_unlock;
}
+
+ /* Some hubs forget their guids after they resume */
+ sret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
+ if (sret != 16) {
+ DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
+ ret = -1;
+ goto out_unlock;
+ }
+ drm_dp_check_mstb_guid(mgr->mst_primary, guid);
+
ret = 0;
} else
ret = -1;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index bc7b8faba84d..7e461dca564c 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -2838,7 +2838,14 @@ enum skl_disp_power_wells {
#define GEN6_RP_STATE_CAP (MCHBAR_MIRROR_BASE_SNB + 0x5998)
#define BXT_RP_STATE_CAP 0x138170
-#define INTERVAL_1_28_US(us) (((us) * 100) >> 7)
+/*
+ * Make these a multiple of magic 25 to avoid SNB (eg. Dell XPS
+ * 8300) freezing up around GPU hangs. Looks as if even
+ * scheduling/timer interrupts start misbehaving if the RPS
+ * EI/thresholds are "bad", leading to a very sluggish or even
+ * frozen machine.
+ */
+#define INTERVAL_1_28_US(us) roundup(((us) * 100) >> 7, 25)
#define INTERVAL_1_33_US(us) (((us) * 3) >> 2)
#define INTERVAL_0_833_US(us) (((us) * 6) / 5)
#define GT_INTERVAL_FROM_US(dev_priv, us) (IS_GEN9(dev_priv) ? \
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 6a2c76e367a5..97d1ed20418b 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -248,8 +248,14 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder,
pipe_config->has_pch_encoder = true;
/* LPT FDI RX only supports 8bpc. */
- if (HAS_PCH_LPT(dev))
+ if (HAS_PCH_LPT(dev)) {
+ if (pipe_config->bw_constrained && pipe_config->pipe_bpp < 24) {
+ DRM_DEBUG_KMS("LPT only supports 24bpp\n");
+ return false;
+ }
+
pipe_config->pipe_bpp = 24;
+ }
/* FDI must always be 2.7 GHz */
if (HAS_DDI(dev)) {
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
index 9e530a739354..fc28c512ece3 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -180,7 +180,8 @@ struct stepping_info {
static const struct stepping_info skl_stepping_info[] = {
{'A', '0'}, {'B', '0'}, {'C', '0'},
{'D', '0'}, {'E', '0'}, {'F', '0'},
- {'G', '0'}, {'H', '0'}, {'I', '0'}
+ {'G', '0'}, {'H', '0'}, {'I', '0'},
+ {'J', '0'}, {'K', '0'}
};
static struct stepping_info bxt_stepping_info[] = {
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 7e6158b889da..3c6b07683bd9 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -464,9 +464,17 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port,
} else if (IS_BROADWELL(dev)) {
ddi_translations_fdi = bdw_ddi_translations_fdi;
ddi_translations_dp = bdw_ddi_translations_dp;
- ddi_translations_edp = bdw_ddi_translations_edp;
+
+ if (dev_priv->edp_low_vswing) {
+ ddi_translations_edp = bdw_ddi_translations_edp;
+ n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp);
+ } else {
+ ddi_translations_edp = bdw_ddi_translations_dp;
+ n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
+ }
+
ddi_translations_hdmi = bdw_ddi_translations_hdmi;
- n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp);
+
n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
hdmi_default_entry = 7;
@@ -3188,12 +3196,6 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
intel_ddi_clock_get(encoder, pipe_config);
}
-static void intel_ddi_destroy(struct drm_encoder *encoder)
-{
- /* HDMI has nothing special to destroy, so we can go with this. */
- intel_dp_encoder_destroy(encoder);
-}
-
static bool intel_ddi_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
@@ -3212,7 +3214,8 @@ static bool intel_ddi_compute_config(struct intel_encoder *encoder,
}
static const struct drm_encoder_funcs intel_ddi_funcs = {
- .destroy = intel_ddi_destroy,
+ .reset = intel_dp_encoder_reset,
+ .destroy = intel_dp_encoder_destroy,
};
static struct intel_connector *
@@ -3284,6 +3287,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
intel_encoder->post_disable = intel_ddi_post_disable;
intel_encoder->get_hw_state = intel_ddi_get_hw_state;
intel_encoder->get_config = intel_ddi_get_config;
+ intel_encoder->suspend = intel_dp_encoder_suspend;
intel_dig_port->port = port;
intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) &
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index f859a5b87ed4..afa81691163d 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -4447,7 +4447,7 @@ int skl_update_scaler_crtc(struct intel_crtc_state *state)
intel_crtc->base.base.id, intel_crtc->pipe, SKL_CRTC_INDEX);
return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
- &state->scaler_state.scaler_id, DRM_ROTATE_0,
+ &state->scaler_state.scaler_id, BIT(DRM_ROTATE_0),
state->pipe_src_w, state->pipe_src_h,
adjusted_mode->crtc_hdisplay, adjusted_mode->crtc_vdisplay);
}
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 78b8ec84d576..e55a82a99e7f 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -5035,7 +5035,7 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
kfree(intel_dig_port);
}
-static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
+void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
@@ -5077,7 +5077,7 @@ static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
edp_panel_vdd_schedule_off(intel_dp);
}
-static void intel_dp_encoder_reset(struct drm_encoder *encoder)
+void intel_dp_encoder_reset(struct drm_encoder *encoder)
{
struct intel_dp *intel_dp;
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index 0639275fc471..06bd9257acdc 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -477,6 +477,8 @@ static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
struct intel_connector *intel_connector = to_intel_connector(connector);
struct drm_device *dev = connector->dev;
+ intel_connector->unregister(intel_connector);
+
/* need to nuke the connector */
drm_modeset_lock_all(dev);
if (connector->state->crtc) {
@@ -490,11 +492,7 @@ static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
WARN(ret, "Disabling mst crtc failed with %i\n", ret);
}
- drm_modeset_unlock_all(dev);
- intel_connector->unregister(intel_connector);
-
- drm_modeset_lock_all(dev);
intel_connector_remove_from_fbdev(intel_connector);
drm_connector_cleanup(connector);
drm_modeset_unlock_all(dev);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 0d00f07b7163..f34a219ec5c4 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -1204,6 +1204,8 @@ void intel_dp_set_link_params(struct intel_dp *intel_dp,
void intel_dp_start_link_train(struct intel_dp *intel_dp);
void intel_dp_stop_link_train(struct intel_dp *intel_dp);
void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
+void intel_dp_encoder_reset(struct drm_encoder *encoder);
+void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder);
void intel_dp_encoder_destroy(struct drm_encoder *encoder);
int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc);
bool intel_dp_compute_config(struct intel_encoder *encoder,
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index e6c035b0fc1c..4b8ed9f2dabc 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -1388,8 +1388,16 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
hdmi_to_dig_port(intel_hdmi));
}
- if (!live_status)
- DRM_DEBUG_KMS("Live status not up!");
+ if (!live_status) {
+ DRM_DEBUG_KMS("HDMI live status down\n");
+ /*
+ * Live status register is not reliable on all intel platforms.
+ * So consider live_status only for certain platforms, for
+ * others, read EDID to determine presence of sink.
+ */
+ if (INTEL_INFO(dev_priv)->gen < 7 || IS_IVYBRIDGE(dev_priv))
+ live_status = true;
+ }
intel_hdmi_unset_edid(connector);
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index d69547a65dbb..7058f75c7b42 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -776,11 +776,11 @@ static int logical_ring_prepare(struct drm_i915_gem_request *req, int bytes)
if (unlikely(total_bytes > remain_usable)) {
/*
* The base request will fit but the reserved space
- * falls off the end. So only need to to wait for the
- * reserved size after flushing out the remainder.
+ * falls off the end. So don't need an immediate wrap
+ * and only need to effectively wait for the reserved
+ * size space from the start of ringbuffer.
*/
wait_bytes = remain_actual + ringbuf->reserved_size;
- need_wrap = true;
} else if (total_bytes > ringbuf->space) {
/* No wrapping required, just waiting. */
wait_bytes = total_bytes;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index f091ad12d694..0a68d2ec89dc 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -6620,6 +6620,12 @@ static void broadwell_init_clock_gating(struct drm_device *dev)
misccpctl = I915_READ(GEN7_MISCCPCTL);
I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
I915_WRITE(GEN8_L3SQCREG1, BDW_WA_L3SQCREG1_DEFAULT);
+ /*
+ * Wait at least 100 clocks before re-enabling clock gating. See
+ * the definition of L3SQCREG1 in BSpec.
+ */
+ POSTING_READ(GEN8_L3SQCREG1);
+ udelay(1);
I915_WRITE(GEN7_MISCCPCTL, misccpctl);
/*
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index f6b2a814e629..9d48443bca2e 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1922,6 +1922,17 @@ i915_dispatch_execbuffer(struct drm_i915_gem_request *req,
return 0;
}
+static void cleanup_phys_status_page(struct intel_engine_cs *ring)
+{
+ struct drm_i915_private *dev_priv = to_i915(ring->dev);
+
+ if (!dev_priv->status_page_dmah)
+ return;
+
+ drm_pci_free(ring->dev, dev_priv->status_page_dmah);
+ ring->status_page.page_addr = NULL;
+}
+
static void cleanup_status_page(struct intel_engine_cs *ring)
{
struct drm_i915_gem_object *obj;
@@ -1938,9 +1949,9 @@ static void cleanup_status_page(struct intel_engine_cs *ring)
static int init_status_page(struct intel_engine_cs *ring)
{
- struct drm_i915_gem_object *obj;
+ struct drm_i915_gem_object *obj = ring->status_page.obj;
- if ((obj = ring->status_page.obj) == NULL) {
+ if (obj == NULL) {
unsigned flags;
int ret;
@@ -2134,7 +2145,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
if (ret)
goto error;
} else {
- BUG_ON(ring->id != RCS);
+ WARN_ON(ring->id != RCS);
ret = init_phys_status_page(ring);
if (ret)
goto error;
@@ -2179,7 +2190,12 @@ void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
if (ring->cleanup)
ring->cleanup(ring);
- cleanup_status_page(ring);
+ if (I915_NEED_GFX_HWS(ring->dev)) {
+ cleanup_status_page(ring);
+ } else {
+ WARN_ON(ring->id != RCS);
+ cleanup_phys_status_page(ring);
+ }
i915_cmd_parser_fini_ring(ring);
i915_gem_batch_pool_fini(&ring->batch_pool);
@@ -2341,11 +2357,11 @@ static int __intel_ring_prepare(struct intel_engine_cs *ring, int bytes)
if (unlikely(total_bytes > remain_usable)) {
/*
* The base request will fit but the reserved space
- * falls off the end. So only need to to wait for the
- * reserved size after flushing out the remainder.
+ * falls off the end. So don't need an immediate wrap
+ * and only need to effectively wait for the reserved
+ * size space from the start of ringbuffer.
*/
wait_bytes = remain_actual + ringbuf->reserved_size;
- need_wrap = true;
} else if (total_bytes > ringbuf->space) {
/* No wrapping required, just waiting. */
wait_bytes = total_bytes;
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 43cba129a0c0..cc91ae832ffb 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -1132,7 +1132,11 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev)
} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
dev_priv->uncore.funcs.force_wake_get =
fw_domains_get_with_thread_status;
- dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
+ if (IS_HASWELL(dev))
+ dev_priv->uncore.funcs.force_wake_put =
+ fw_domains_put_with_fifo;
+ else
+ dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
} else if (IS_IVYBRIDGE(dev)) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/ramht.c b/drivers/gpu/drm/nouveau/nvkm/core/ramht.c
index 3216e157a8a0..89da47234016 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/ramht.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/ramht.c
@@ -131,7 +131,7 @@ nvkm_ramht_del(struct nvkm_ramht **pramht)
struct nvkm_ramht *ramht = *pramht;
if (ramht) {
nvkm_gpuobj_del(&ramht->gpuobj);
- kfree(*pramht);
+ vfree(*pramht);
*pramht = NULL;
}
}
@@ -143,8 +143,8 @@ nvkm_ramht_new(struct nvkm_device *device, u32 size, u32 align,
struct nvkm_ramht *ramht;
int ret, i;
- if (!(ramht = *pramht = kzalloc(sizeof(*ramht) + (size >> 3) *
- sizeof(*ramht->data), GFP_KERNEL)))
+ if (!(ramht = *pramht = vzalloc(sizeof(*ramht) +
+ (size >> 3) * sizeof(*ramht->data))))
return -ENOMEM;
ramht->device = device;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index 9f5dfc85147a..36655a74c538 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -1717,6 +1717,8 @@ gf100_gr_init(struct gf100_gr *gr)
gf100_gr_mmio(gr, gr->func->mmio);
+ nvkm_mask(device, TPC_UNIT(0, 0, 0x05c), 0x00000001, 0x00000001);
+
memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
for (i = 0, gpc = -1; i < gr->tpc_total; i++) {
do {
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 183aea1abebc..5edebf495c07 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -375,10 +375,15 @@ static int qxl_crtc_cursor_set2(struct drm_crtc *crtc,
qxl_bo_kunmap(user_bo);
+ qcrtc->cur_x += qcrtc->hot_spot_x - hot_x;
+ qcrtc->cur_y += qcrtc->hot_spot_y - hot_y;
+ qcrtc->hot_spot_x = hot_x;
+ qcrtc->hot_spot_y = hot_y;
+
cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
cmd->type = QXL_CURSOR_SET;
- cmd->u.set.position.x = qcrtc->cur_x;
- cmd->u.set.position.y = qcrtc->cur_y;
+ cmd->u.set.position.x = qcrtc->cur_x + qcrtc->hot_spot_x;
+ cmd->u.set.position.y = qcrtc->cur_y + qcrtc->hot_spot_y;
cmd->u.set.shape = qxl_bo_physical_address(qdev, cursor_bo, 0);
@@ -441,8 +446,8 @@ static int qxl_crtc_cursor_move(struct drm_crtc *crtc,
cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
cmd->type = QXL_CURSOR_MOVE;
- cmd->u.position.x = qcrtc->cur_x;
- cmd->u.position.y = qcrtc->cur_y;
+ cmd->u.position.x = qcrtc->cur_x + qcrtc->hot_spot_x;
+ cmd->u.position.y = qcrtc->cur_y + qcrtc->hot_spot_y;
qxl_release_unmap(qdev, release, &cmd->release_info);
qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index 01a86948eb8c..3ab90179e9ab 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -135,6 +135,8 @@ struct qxl_crtc {
int index;
int cur_x;
int cur_y;
+ int hot_spot_x;
+ int hot_spot_y;
};
struct qxl_output {
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index dac78ad24b31..79bab6fd76bb 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -1739,6 +1739,7 @@ static u32 radeon_get_pll_use_mask(struct drm_crtc *crtc)
static int radeon_get_shared_dp_ppll(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
struct drm_crtc *test_crtc;
struct radeon_crtc *test_radeon_crtc;
@@ -1748,6 +1749,10 @@ static int radeon_get_shared_dp_ppll(struct drm_crtc *crtc)
test_radeon_crtc = to_radeon_crtc(test_crtc);
if (test_radeon_crtc->encoder &&
ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_radeon_crtc->encoder))) {
+ /* PPLL2 is exclusive to UNIPHYA on DCE61 */
+ if (ASIC_IS_DCE61(rdev) && !ASIC_IS_DCE8(rdev) &&
+ test_radeon_crtc->pll_id == ATOM_PPLL2)
+ continue;
/* for DP use the same PLL for all */
if (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID)
return test_radeon_crtc->pll_id;
@@ -1769,6 +1774,7 @@ static int radeon_get_shared_nondp_ppll(struct drm_crtc *crtc)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
struct drm_crtc *test_crtc;
struct radeon_crtc *test_radeon_crtc;
u32 adjusted_clock, test_adjusted_clock;
@@ -1784,6 +1790,10 @@ static int radeon_get_shared_nondp_ppll(struct drm_crtc *crtc)
test_radeon_crtc = to_radeon_crtc(test_crtc);
if (test_radeon_crtc->encoder &&
!ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_radeon_crtc->encoder))) {
+ /* PPLL2 is exclusive to UNIPHYA on DCE61 */
+ if (ASIC_IS_DCE61(rdev) && !ASIC_IS_DCE8(rdev) &&
+ test_radeon_crtc->pll_id == ATOM_PPLL2)
+ continue;
/* check if we are already driving this connector with another crtc */
if (test_radeon_crtc->connector == radeon_crtc->connector) {
/* if we are, return that pll */
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index adf74f4366bb..0b04b9282f56 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -310,6 +310,10 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
&& (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2)))
adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2;
+ /* vertical FP must be at least 1 */
+ if (mode->crtc_vsync_start == mode->crtc_vdisplay)
+ adjusted_mode->crtc_vsync_start++;
+
/* get the native mode for scaling */
if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) {
radeon_panel_mode_fixup(encoder, adjusted_mode);
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 2ad462896896..32491355a1d4 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -2608,10 +2608,152 @@ static void evergreen_agp_enable(struct radeon_device *rdev)
WREG32(VM_CONTEXT1_CNTL, 0);
}
+static const unsigned ni_dig_offsets[] =
+{
+ NI_DIG0_REGISTER_OFFSET,
+ NI_DIG1_REGISTER_OFFSET,
+ NI_DIG2_REGISTER_OFFSET,
+ NI_DIG3_REGISTER_OFFSET,
+ NI_DIG4_REGISTER_OFFSET,
+ NI_DIG5_REGISTER_OFFSET
+};
+
+static const unsigned ni_tx_offsets[] =
+{
+ NI_DCIO_UNIPHY0_UNIPHY_TX_CONTROL1,
+ NI_DCIO_UNIPHY1_UNIPHY_TX_CONTROL1,
+ NI_DCIO_UNIPHY2_UNIPHY_TX_CONTROL1,
+ NI_DCIO_UNIPHY3_UNIPHY_TX_CONTROL1,
+ NI_DCIO_UNIPHY4_UNIPHY_TX_CONTROL1,
+ NI_DCIO_UNIPHY5_UNIPHY_TX_CONTROL1
+};
+
+static const unsigned evergreen_dp_offsets[] =
+{
+ EVERGREEN_DP0_REGISTER_OFFSET,
+ EVERGREEN_DP1_REGISTER_OFFSET,
+ EVERGREEN_DP2_REGISTER_OFFSET,
+ EVERGREEN_DP3_REGISTER_OFFSET,
+ EVERGREEN_DP4_REGISTER_OFFSET,
+ EVERGREEN_DP5_REGISTER_OFFSET
+};
+
+
+/*
+ * Assumption is that EVERGREEN_CRTC_MASTER_EN enable for requested crtc
+ * We go from crtc to connector and it is not relible since it
+ * should be an opposite direction .If crtc is enable then
+ * find the dig_fe which selects this crtc and insure that it enable.
+ * if such dig_fe is found then find dig_be which selects found dig_be and
+ * insure that it enable and in DP_SST mode.
+ * if UNIPHY_PLL_CONTROL1.enable then we should disconnect timing
+ * from dp symbols clocks .
+ */
+static bool evergreen_is_dp_sst_stream_enabled(struct radeon_device *rdev,
+ unsigned crtc_id, unsigned *ret_dig_fe)
+{
+ unsigned i;
+ unsigned dig_fe;
+ unsigned dig_be;
+ unsigned dig_en_be;
+ unsigned uniphy_pll;
+ unsigned digs_fe_selected;
+ unsigned dig_be_mode;
+ unsigned dig_fe_mask;
+ bool is_enabled = false;
+ bool found_crtc = false;
+
+ /* loop through all running dig_fe to find selected crtc */
+ for (i = 0; i < ARRAY_SIZE(ni_dig_offsets); i++) {
+ dig_fe = RREG32(NI_DIG_FE_CNTL + ni_dig_offsets[i]);
+ if (dig_fe & NI_DIG_FE_CNTL_SYMCLK_FE_ON &&
+ crtc_id == NI_DIG_FE_CNTL_SOURCE_SELECT(dig_fe)) {
+ /* found running pipe */
+ found_crtc = true;
+ dig_fe_mask = 1 << i;
+ dig_fe = i;
+ break;
+ }
+ }
+
+ if (found_crtc) {
+ /* loop through all running dig_be to find selected dig_fe */
+ for (i = 0; i < ARRAY_SIZE(ni_dig_offsets); i++) {
+ dig_be = RREG32(NI_DIG_BE_CNTL + ni_dig_offsets[i]);
+ /* if dig_fe_selected by dig_be? */
+ digs_fe_selected = NI_DIG_BE_CNTL_FE_SOURCE_SELECT(dig_be);
+ dig_be_mode = NI_DIG_FE_CNTL_MODE(dig_be);
+ if (dig_fe_mask & digs_fe_selected &&
+ /* if dig_be in sst mode? */
+ dig_be_mode == NI_DIG_BE_DPSST) {
+ dig_en_be = RREG32(NI_DIG_BE_EN_CNTL +
+ ni_dig_offsets[i]);
+ uniphy_pll = RREG32(NI_DCIO_UNIPHY0_PLL_CONTROL1 +
+ ni_tx_offsets[i]);
+ /* dig_be enable and tx is running */
+ if (dig_en_be & NI_DIG_BE_EN_CNTL_ENABLE &&
+ dig_en_be & NI_DIG_BE_EN_CNTL_SYMBCLK_ON &&
+ uniphy_pll & NI_DCIO_UNIPHY0_PLL_CONTROL1_ENABLE) {
+ is_enabled = true;
+ *ret_dig_fe = dig_fe;
+ break;
+ }
+ }
+ }
+ }
+
+ return is_enabled;
+}
+
+/*
+ * Blank dig when in dp sst mode
+ * Dig ignores crtc timing
+ */
+static void evergreen_blank_dp_output(struct radeon_device *rdev,
+ unsigned dig_fe)
+{
+ unsigned stream_ctrl;
+ unsigned fifo_ctrl;
+ unsigned counter = 0;
+
+ if (dig_fe >= ARRAY_SIZE(evergreen_dp_offsets)) {
+ DRM_ERROR("invalid dig_fe %d\n", dig_fe);
+ return;
+ }
+
+ stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL +
+ evergreen_dp_offsets[dig_fe]);
+ if (!(stream_ctrl & EVERGREEN_DP_VID_STREAM_CNTL_ENABLE)) {
+ DRM_ERROR("dig %d , should be enable\n", dig_fe);
+ return;
+ }
+
+ stream_ctrl &=~EVERGREEN_DP_VID_STREAM_CNTL_ENABLE;
+ WREG32(EVERGREEN_DP_VID_STREAM_CNTL +
+ evergreen_dp_offsets[dig_fe], stream_ctrl);
+
+ stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL +
+ evergreen_dp_offsets[dig_fe]);
+ while (counter < 32 && stream_ctrl & EVERGREEN_DP_VID_STREAM_STATUS) {
+ msleep(1);
+ counter++;
+ stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL +
+ evergreen_dp_offsets[dig_fe]);
+ }
+ if (counter >= 32 )
+ DRM_ERROR("counter exceeds %d\n", counter);
+
+ fifo_ctrl = RREG32(EVERGREEN_DP_STEER_FIFO + evergreen_dp_offsets[dig_fe]);
+ fifo_ctrl |= EVERGREEN_DP_STEER_FIFO_RESET;
+ WREG32(EVERGREEN_DP_STEER_FIFO + evergreen_dp_offsets[dig_fe], fifo_ctrl);
+
+}
+
void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save)
{
u32 crtc_enabled, tmp, frame_count, blackout;
int i, j;
+ unsigned dig_fe;
if (!ASIC_IS_NODCE(rdev)) {
save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
@@ -2651,7 +2793,17 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
break;
udelay(1);
}
-
+ /*we should disable dig if it drives dp sst*/
+ /*but we are in radeon_device_init and the topology is unknown*/
+ /*and it is available after radeon_modeset_init*/
+ /*the following method radeon_atom_encoder_dpms_dig*/
+ /*does the job if we initialize it properly*/
+ /*for now we do it this manually*/
+ /**/
+ if (ASIC_IS_DCE5(rdev) &&
+ evergreen_is_dp_sst_stream_enabled(rdev, i ,&dig_fe))
+ evergreen_blank_dp_output(rdev, dig_fe);
+ /*we could remove 6 lines below*/
/* XXX this is a hack to avoid strange behavior with EFI on certain systems */
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
index aa939dfed3a3..b436badf9efa 100644
--- a/drivers/gpu/drm/radeon/evergreen_reg.h
+++ b/drivers/gpu/drm/radeon/evergreen_reg.h
@@ -250,8 +250,43 @@
/* HDMI blocks at 0x7030, 0x7c30, 0x10830, 0x11430, 0x12030, 0x12c30 */
#define EVERGREEN_HDMI_BASE 0x7030
+/*DIG block*/
+#define NI_DIG0_REGISTER_OFFSET (0x7000 - 0x7000)
+#define NI_DIG1_REGISTER_OFFSET (0x7C00 - 0x7000)
+#define NI_DIG2_REGISTER_OFFSET (0x10800 - 0x7000)
+#define NI_DIG3_REGISTER_OFFSET (0x11400 - 0x7000)
+#define NI_DIG4_REGISTER_OFFSET (0x12000 - 0x7000)
+#define NI_DIG5_REGISTER_OFFSET (0x12C00 - 0x7000)
+
+
+#define NI_DIG_FE_CNTL 0x7000
+# define NI_DIG_FE_CNTL_SOURCE_SELECT(x) ((x) & 0x3)
+# define NI_DIG_FE_CNTL_SYMCLK_FE_ON (1<<24)
+
+
+#define NI_DIG_BE_CNTL 0x7140
+# define NI_DIG_BE_CNTL_FE_SOURCE_SELECT(x) (((x) >> 8 ) & 0x3F)
+# define NI_DIG_FE_CNTL_MODE(x) (((x) >> 16) & 0x7 )
+
+#define NI_DIG_BE_EN_CNTL 0x7144
+# define NI_DIG_BE_EN_CNTL_ENABLE (1 << 0)
+# define NI_DIG_BE_EN_CNTL_SYMBCLK_ON (1 << 8)
+# define NI_DIG_BE_DPSST 0
/* Display Port block */
+#define EVERGREEN_DP0_REGISTER_OFFSET (0x730C - 0x730C)
+#define EVERGREEN_DP1_REGISTER_OFFSET (0x7F0C - 0x730C)
+#define EVERGREEN_DP2_REGISTER_OFFSET (0x10B0C - 0x730C)
+#define EVERGREEN_DP3_REGISTER_OFFSET (0x1170C - 0x730C)
+#define EVERGREEN_DP4_REGISTER_OFFSET (0x1230C - 0x730C)
+#define EVERGREEN_DP5_REGISTER_OFFSET (0x12F0C - 0x730C)
+
+
+#define EVERGREEN_DP_VID_STREAM_CNTL 0x730C
+# define EVERGREEN_DP_VID_STREAM_CNTL_ENABLE (1 << 0)
+# define EVERGREEN_DP_VID_STREAM_STATUS (1 <<16)
+#define EVERGREEN_DP_STEER_FIFO 0x7310
+# define EVERGREEN_DP_STEER_FIFO_RESET (1 << 0)
#define EVERGREEN_DP_SEC_CNTL 0x7280
# define EVERGREEN_DP_SEC_STREAM_ENABLE (1 << 0)
# define EVERGREEN_DP_SEC_ASP_ENABLE (1 << 4)
@@ -266,4 +301,15 @@
# define EVERGREEN_DP_SEC_N_BASE_MULTIPLE(x) (((x) & 0xf) << 24)
# define EVERGREEN_DP_SEC_SS_EN (1 << 28)
+/*DCIO_UNIPHY block*/
+#define NI_DCIO_UNIPHY0_UNIPHY_TX_CONTROL1 (0x6600 -0x6600)
+#define NI_DCIO_UNIPHY1_UNIPHY_TX_CONTROL1 (0x6640 -0x6600)
+#define NI_DCIO_UNIPHY2_UNIPHY_TX_CONTROL1 (0x6680 - 0x6600)
+#define NI_DCIO_UNIPHY3_UNIPHY_TX_CONTROL1 (0x66C0 - 0x6600)
+#define NI_DCIO_UNIPHY4_UNIPHY_TX_CONTROL1 (0x6700 - 0x6600)
+#define NI_DCIO_UNIPHY5_UNIPHY_TX_CONTROL1 (0x6740 - 0x6600)
+
+#define NI_DCIO_UNIPHY0_PLL_CONTROL1 0x6618
+# define NI_DCIO_UNIPHY0_PLL_CONTROL1_ENABLE (1 << 0)
+
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
index 9bc408c9f9f6..c4b4f298a283 100644
--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
@@ -62,10 +62,6 @@ bool radeon_has_atpx(void) {
return radeon_atpx_priv.atpx_detected;
}
-bool radeon_has_atpx_dgpu_power_cntl(void) {
- return radeon_atpx_priv.atpx.functions.power_cntl;
-}
-
/**
* radeon_atpx_call - call an ATPX method
*
@@ -145,6 +141,10 @@ static void radeon_atpx_parse_functions(struct radeon_atpx_functions *f, u32 mas
*/
static int radeon_atpx_validate(struct radeon_atpx *atpx)
{
+ /* make sure required functions are enabled */
+ /* dGPU power control is required */
+ atpx->functions.power_cntl = true;
+
if (atpx->functions.px_params) {
union acpi_object *info;
struct atpx_px_params output;
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 340f3f549f29..9cfc1c3e1965 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -1996,10 +1996,12 @@ radeon_add_atom_connector(struct drm_device *dev,
rdev->mode_info.dither_property,
RADEON_FMT_DITHER_DISABLE);
- if (radeon_audio != 0)
+ if (radeon_audio != 0) {
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.audio_property,
RADEON_AUDIO_AUTO);
+ radeon_connector->audio = RADEON_AUDIO_AUTO;
+ }
if (ASIC_IS_DCE5(rdev))
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.output_csc_property,
@@ -2124,6 +2126,7 @@ radeon_add_atom_connector(struct drm_device *dev,
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.audio_property,
RADEON_AUDIO_AUTO);
+ radeon_connector->audio = RADEON_AUDIO_AUTO;
}
if (connector_type == DRM_MODE_CONNECTOR_DVII) {
radeon_connector->dac_load_detect = true;
@@ -2179,6 +2182,7 @@ radeon_add_atom_connector(struct drm_device *dev,
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.audio_property,
RADEON_AUDIO_AUTO);
+ radeon_connector->audio = RADEON_AUDIO_AUTO;
}
if (ASIC_IS_DCE5(rdev))
drm_object_attach_property(&radeon_connector->base.base,
@@ -2231,6 +2235,7 @@ radeon_add_atom_connector(struct drm_device *dev,
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.audio_property,
RADEON_AUDIO_AUTO);
+ radeon_connector->audio = RADEON_AUDIO_AUTO;
}
if (ASIC_IS_DCE5(rdev))
drm_object_attach_property(&radeon_connector->base.base,
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index f78f111e68de..c566993a2ec3 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -103,12 +103,6 @@ static const char radeon_family_name[][16] = {
"LAST",
};
-#if defined(CONFIG_VGA_SWITCHEROO)
-bool radeon_has_atpx_dgpu_power_cntl(void);
-#else
-static inline bool radeon_has_atpx_dgpu_power_cntl(void) { return false; }
-#endif
-
#define RADEON_PX_QUIRK_DISABLE_PX (1 << 0)
#define RADEON_PX_QUIRK_LONG_WAKEUP (1 << 1)
@@ -1439,7 +1433,7 @@ int radeon_device_init(struct radeon_device *rdev,
* ignore it */
vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
- if ((rdev->flags & RADEON_IS_PX) && radeon_has_atpx_dgpu_power_cntl())
+ if (rdev->flags & RADEON_IS_PX)
runtime = true;
vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops, runtime);
if (runtime)
diff --git a/drivers/gpu/drm/radeon/radeon_dp_auxch.c b/drivers/gpu/drm/radeon/radeon_dp_auxch.c
index 3b0c229d7dcd..db64e0062689 100644
--- a/drivers/gpu/drm/radeon/radeon_dp_auxch.c
+++ b/drivers/gpu/drm/radeon/radeon_dp_auxch.c
@@ -105,7 +105,7 @@ radeon_dp_aux_transfer_native(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg
tmp &= AUX_HPD_SEL(0x7);
tmp |= AUX_HPD_SEL(chan->rec.hpd);
- tmp |= AUX_EN | AUX_LS_READ_EN;
+ tmp |= AUX_EN | AUX_LS_READ_EN | AUX_HPD_DISCON(0x1);
WREG32(AUX_CONTROL + aux_offset[instance], tmp);
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index e06ac546a90f..f342aad79cc6 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -235,6 +235,8 @@ static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp)
{
struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo);
+ if (radeon_ttm_tt_has_userptr(bo->ttm))
+ return -EPERM;
return drm_vma_node_verify_access(&rbo->gem_base.vma_node, filp);
}
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 7285adb27099..caa73de584a5 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -2931,6 +2931,7 @@ static struct si_dpm_quirk si_dpm_quirk_list[] = {
{ PCI_VENDOR_ID_ATI, 0x6811, 0x1462, 0x2015, 0, 120000 },
{ PCI_VENDOR_ID_ATI, 0x6811, 0x1043, 0x2015, 0, 120000 },
{ PCI_VENDOR_ID_ATI, 0x6811, 0x148c, 0x2015, 0, 120000 },
+ { PCI_VENDOR_ID_ATI, 0x6810, 0x1682, 0x9275, 0, 120000 },
{ 0, 0, 0, 0 },
};
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
index a0e28f3a278d..0585fd2031dd 100644
--- a/drivers/gpu/ipu-v3/ipu-common.c
+++ b/drivers/gpu/ipu-v3/ipu-common.c
@@ -1068,7 +1068,6 @@ static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base)
goto err_register;
}
- pdev->dev.of_node = of_node;
pdev->dev.parent = dev;
ret = platform_device_add_data(pdev, &reg->pdata,
@@ -1079,6 +1078,12 @@ static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base)
platform_device_put(pdev);
goto err_register;
}
+
+ /*
+ * Set of_node only after calling platform_device_add. Otherwise
+ * the platform:imx-ipuv3-crtc modalias won't be used.
+ */
+ pdev->dev.of_node = of_node;
}
return 0;
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index 94d828027f20..6160aa567fbf 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -589,11 +589,21 @@ static irqreturn_t adreno_irq_handler(struct kgsl_device *device)
struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
struct adreno_irq *irq_params = gpudev->irq;
irqreturn_t ret = IRQ_NONE;
- unsigned int status = 0, tmp;
+ unsigned int status = 0, tmp, int_bit;
int i;
adreno_readreg(adreno_dev, ADRENO_REG_RBBM_INT_0_STATUS, &status);
+ /*
+ * Clear all the interrupt bits but ADRENO_INT_RBBM_AHB_ERROR. Because
+ * even if we clear it here, it will stay high until it is cleared
+ * in its respective handler. Otherwise, the interrupt handler will
+ * fire again.
+ */
+ int_bit = ADRENO_INT_BIT(adreno_dev, ADRENO_INT_RBBM_AHB_ERROR);
+ adreno_writereg(adreno_dev, ADRENO_REG_RBBM_INT_CLEAR_CMD,
+ status & ~int_bit);
+
/* Loop through all set interrupts and call respective handlers */
for (tmp = status; tmp != 0;) {
i = fls(tmp) - 1;
@@ -612,9 +622,14 @@ static irqreturn_t adreno_irq_handler(struct kgsl_device *device)
gpudev->irq_trace(adreno_dev, status);
- if (status)
+ /*
+ * Clear ADRENO_INT_RBBM_AHB_ERROR bit after this interrupt has been
+ * cleared in its respective handler
+ */
+ if (status & int_bit)
adreno_writereg(adreno_dev, ADRENO_REG_RBBM_INT_CLEAR_CMD,
- status);
+ int_bit);
+
return ret;
}
@@ -1961,7 +1976,7 @@ static int adreno_setproperty(struct kgsl_device_private *dev_priv,
KGSL_STATE_ACTIVE);
device->pwrctrl.ctrl_flags = KGSL_PWR_ON;
adreno_fault_detect_stop(adreno_dev);
- kgsl_pwrscale_disable(device);
+ kgsl_pwrscale_disable(device, true);
}
mutex_unlock(&device->mutex);
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index 0f3403cb0095..a2af26c81f50 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -198,6 +198,10 @@ struct adreno_gpudev;
/* Time to allow preemption to complete (in ms) */
#define ADRENO_PREEMPT_TIMEOUT 10000
+#define ADRENO_INT_BIT(a, _bit) (((a)->gpucore->gpudev->int_bits) ? \
+ (adreno_get_int(a, _bit) < 0 ? 0 : \
+ BIT(adreno_get_int(a, _bit))) : 0)
+
/**
* enum adreno_preempt_states
* ADRENO_PREEMPT_NONE: No preemption is scheduled
@@ -574,6 +578,11 @@ enum adreno_regs {
ADRENO_REG_REGISTER_MAX,
};
+enum adreno_int_bits {
+ ADRENO_INT_RBBM_AHB_ERROR,
+ ADRENO_INT_BITS_MAX,
+};
+
/**
* adreno_reg_offsets: Holds array of register offsets
* @offsets: Offset array of size defined by enum adreno_regs
@@ -589,6 +598,7 @@ struct adreno_reg_offsets {
#define ADRENO_REG_UNUSED 0xFFFFFFFF
#define ADRENO_REG_SKIP 0xFFFFFFFE
#define ADRENO_REG_DEFINE(_offset, _reg) [_offset] = _reg
+#define ADRENO_INT_DEFINE(_offset, _val) ADRENO_REG_DEFINE(_offset, _val)
/*
* struct adreno_vbif_data - Describes vbif register value pair
@@ -726,6 +736,7 @@ struct adreno_gpudev {
* so define them in the structure and use them as variables.
*/
const struct adreno_reg_offsets *reg_offsets;
+ unsigned int *const int_bits;
const struct adreno_ft_perf_counters *ft_perf_counters;
unsigned int ft_perf_counters_count;
@@ -1101,6 +1112,23 @@ static inline unsigned int adreno_getreg(struct adreno_device *adreno_dev,
return gpudev->reg_offsets->offsets[offset_name];
}
+/*
+ * adreno_get_int() - Returns the offset value of an interrupt bit from
+ * the interrupt bit array in the gpudev node
+ * @adreno_dev: Pointer to the the adreno device
+ * @bit_name: The interrupt bit enum whose bit is returned
+ */
+static inline unsigned int adreno_get_int(struct adreno_device *adreno_dev,
+ enum adreno_int_bits bit_name)
+{
+ struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
+
+ if (bit_name >= ADRENO_INT_BITS_MAX)
+ return -ERANGE;
+
+ return gpudev->int_bits[bit_name];
+}
+
/**
* adreno_gpu_fault() - Return the current state of the GPU
* @adreno_dev: A pointer to the adreno_device to query
diff --git a/drivers/gpu/msm/adreno_a3xx.c b/drivers/gpu/msm/adreno_a3xx.c
index 97e71464c2df..3f5a9c6318f6 100644
--- a/drivers/gpu/msm/adreno_a3xx.c
+++ b/drivers/gpu/msm/adreno_a3xx.c
@@ -1425,6 +1425,10 @@ static struct adreno_coresight a3xx_coresight = {
.groups = a3xx_coresight_groups,
};
+static unsigned int a3xx_int_bits[ADRENO_INT_BITS_MAX] = {
+ ADRENO_INT_DEFINE(ADRENO_INT_RBBM_AHB_ERROR, A3XX_INT_RBBM_AHB_ERROR),
+};
+
/* Register offset defines for A3XX */
static unsigned int a3xx_register_offsets[ADRENO_REG_REGISTER_MAX] = {
ADRENO_REG_DEFINE(ADRENO_REG_CP_ME_RAM_WADDR, A3XX_CP_ME_RAM_WADDR),
@@ -1853,6 +1857,7 @@ int a3xx_microcode_load(struct adreno_device *adreno_dev,
struct adreno_gpudev adreno_a3xx_gpudev = {
.reg_offsets = &a3xx_reg_offsets,
+ .int_bits = a3xx_int_bits,
.ft_perf_counters = a3xx_ft_perf_counters,
.ft_perf_counters_count = ARRAY_SIZE(a3xx_ft_perf_counters),
.perfcounters = &a3xx_perfcounters,
diff --git a/drivers/gpu/msm/adreno_a4xx.c b/drivers/gpu/msm/adreno_a4xx.c
index bfbdb0e7ac1f..5ca04e522270 100644
--- a/drivers/gpu/msm/adreno_a4xx.c
+++ b/drivers/gpu/msm/adreno_a4xx.c
@@ -739,6 +739,10 @@ static void a4xx_err_callback(struct adreno_device *adreno_dev, int bit)
}
}
+static unsigned int a4xx_int_bits[ADRENO_INT_BITS_MAX] = {
+ ADRENO_INT_DEFINE(ADRENO_INT_RBBM_AHB_ERROR, A4XX_INT_RBBM_AHB_ERROR),
+};
+
/* Register offset defines for A4XX, in order of enum adreno_regs */
static unsigned int a4xx_register_offsets[ADRENO_REG_REGISTER_MAX] = {
ADRENO_REG_DEFINE(ADRENO_REG_CP_ME_RAM_WADDR, A4XX_CP_ME_RAM_WADDR),
@@ -1765,6 +1769,7 @@ static struct adreno_snapshot_data a4xx_snapshot_data = {
struct adreno_gpudev adreno_a4xx_gpudev = {
.reg_offsets = &a4xx_reg_offsets,
+ .int_bits = a4xx_int_bits,
.ft_perf_counters = a4xx_ft_perf_counters,
.ft_perf_counters_count = ARRAY_SIZE(a4xx_ft_perf_counters),
.perfcounters = &a4xx_perfcounters,
diff --git a/drivers/gpu/msm/adreno_a5xx.c b/drivers/gpu/msm/adreno_a5xx.c
index 2891940b8f5b..860f6d2925f1 100644
--- a/drivers/gpu/msm/adreno_a5xx.c
+++ b/drivers/gpu/msm/adreno_a5xx.c
@@ -2872,6 +2872,10 @@ static struct adreno_ft_perf_counters a5xx_ft_perf_counters[] = {
{KGSL_PERFCOUNTER_GROUP_TSE, A5XX_TSE_INPUT_PRIM_NUM},
};
+static unsigned int a5xx_int_bits[ADRENO_INT_BITS_MAX] = {
+ ADRENO_INT_DEFINE(ADRENO_INT_RBBM_AHB_ERROR, A5XX_INT_RBBM_AHB_ERROR),
+};
+
/* Register offset defines for A5XX, in order of enum adreno_regs */
static unsigned int a5xx_register_offsets[ADRENO_REG_REGISTER_MAX] = {
ADRENO_REG_DEFINE(ADRENO_REG_CP_WFI_PEND_CTR, A5XX_CP_WFI_PEND_CTR),
@@ -3504,6 +3508,7 @@ static struct adreno_coresight a5xx_coresight = {
struct adreno_gpudev adreno_a5xx_gpudev = {
.reg_offsets = &a5xx_reg_offsets,
+ .int_bits = a5xx_int_bits,
.ft_perf_counters = a5xx_ft_perf_counters,
.ft_perf_counters_count = ARRAY_SIZE(a5xx_ft_perf_counters),
.coresight = &a5xx_coresight,
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c
index d71c6a63f2d3..172de7406c26 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.c
+++ b/drivers/gpu/msm/kgsl_pwrctrl.c
@@ -1387,6 +1387,47 @@ done:
return 0;
}
+static ssize_t kgsl_pwrctrl_pwrscale_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct kgsl_device *device = kgsl_device_from_dev(dev);
+ int ret;
+ unsigned int enable = 0;
+
+ if (device == NULL)
+ return 0;
+
+ ret = kgsl_sysfs_store(buf, &enable);
+ if (ret)
+ return ret;
+
+ mutex_lock(&device->mutex);
+
+ if (enable)
+ kgsl_pwrscale_enable(device);
+ else
+ kgsl_pwrscale_disable(device, false);
+
+ mutex_unlock(&device->mutex);
+
+ return count;
+}
+
+static ssize_t kgsl_pwrctrl_pwrscale_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct kgsl_device *device = kgsl_device_from_dev(dev);
+ struct kgsl_pwrscale *psc;
+
+ if (device == NULL)
+ return 0;
+ psc = &device->pwrscale;
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", psc->enabled);
+}
+
static DEVICE_ATTR(gpuclk, 0644, kgsl_pwrctrl_gpuclk_show,
kgsl_pwrctrl_gpuclk_store);
static DEVICE_ATTR(max_gpuclk, 0644, kgsl_pwrctrl_max_gpuclk_show,
@@ -1449,6 +1490,9 @@ static DEVICE_ATTR(clock_mhz, 0444, kgsl_pwrctrl_clock_mhz_show, NULL);
static DEVICE_ATTR(freq_table_mhz, 0444,
kgsl_pwrctrl_freq_table_mhz_show, NULL);
static DEVICE_ATTR(temp, 0444, kgsl_pwrctrl_temp_show, NULL);
+static DEVICE_ATTR(pwrscale, 0644,
+ kgsl_pwrctrl_pwrscale_show,
+ kgsl_pwrctrl_pwrscale_store);
static const struct device_attribute *pwrctrl_attr_list[] = {
&dev_attr_gpuclk,
@@ -1477,6 +1521,7 @@ static const struct device_attribute *pwrctrl_attr_list[] = {
&dev_attr_clock_mhz,
&dev_attr_freq_table_mhz,
&dev_attr_temp,
+ &dev_attr_pwrscale,
NULL
};
diff --git a/drivers/gpu/msm/kgsl_pwrscale.c b/drivers/gpu/msm/kgsl_pwrscale.c
index 01d3b74c16fd..85cd29b5364e 100644
--- a/drivers/gpu/msm/kgsl_pwrscale.c
+++ b/drivers/gpu/msm/kgsl_pwrscale.c
@@ -189,19 +189,21 @@ EXPORT_SYMBOL(kgsl_pwrscale_update);
/*
* kgsl_pwrscale_disable - temporarily disable the governor
* @device: The device
+ * @turbo: Indicates if pwrlevel should be forced to turbo
*
* Temporarily disable the governor, to prevent interference
* with profiling tools that expect a fixed clock frequency.
* This function must be called with the device mutex locked.
*/
-void kgsl_pwrscale_disable(struct kgsl_device *device)
+void kgsl_pwrscale_disable(struct kgsl_device *device, bool turbo)
{
BUG_ON(!mutex_is_locked(&device->mutex));
if (device->pwrscale.devfreqptr)
queue_work(device->pwrscale.devfreq_wq,
&device->pwrscale.devfreq_suspend_ws);
device->pwrscale.enabled = false;
- kgsl_pwrctrl_pwrlevel_change(device, KGSL_PWRLEVEL_TURBO);
+ if (turbo)
+ kgsl_pwrctrl_pwrlevel_change(device, KGSL_PWRLEVEL_TURBO);
}
EXPORT_SYMBOL(kgsl_pwrscale_disable);
diff --git a/drivers/gpu/msm/kgsl_pwrscale.h b/drivers/gpu/msm/kgsl_pwrscale.h
index c85317869f1d..0756a4490f22 100644
--- a/drivers/gpu/msm/kgsl_pwrscale.h
+++ b/drivers/gpu/msm/kgsl_pwrscale.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -123,7 +123,7 @@ void kgsl_pwrscale_sleep(struct kgsl_device *device);
void kgsl_pwrscale_wake(struct kgsl_device *device);
void kgsl_pwrscale_enable(struct kgsl_device *device);
-void kgsl_pwrscale_disable(struct kgsl_device *device);
+void kgsl_pwrscale_disable(struct kgsl_device *device, bool turbo);
int kgsl_devfreq_target(struct device *dev, unsigned long *freq, u32 flags);
int kgsl_devfreq_get_dev_status(struct device *, struct devfreq_dev_status *);
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 8b78a7f1f779..909ab0176ef2 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -255,6 +255,7 @@
#define USB_DEVICE_ID_CORSAIR_K90 0x1b02
#define USB_VENDOR_ID_CREATIVELABS 0x041e
+#define USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51 0x322c
#define USB_DEVICE_ID_PRODIKEYS_PCMIDI 0x2801
#define USB_VENDOR_ID_CVTOUCH 0x1ff7
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 7dd0953cd70f..dc8e6adf95a4 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -70,6 +70,7 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_3AXIS_5BUTTON_STICK, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL },
+ { USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_ELAN, HID_ANY_ID, HID_QUIRK_ALWAYS_POLL },
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 3c0f47ac8e53..5c02d7bbc7f2 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -3449,6 +3449,10 @@ static const struct wacom_features wacom_features_0x33E =
{ "Wacom Intuos PT M 2", 21600, 13500, 2047, 63,
INTUOSHT2, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .touch_max = 16,
.check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE };
+static const struct wacom_features wacom_features_0x343 =
+ { "Wacom DTK1651", 34616, 19559, 1023, 0,
+ DTUS, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 4,
+ WACOM_DTU_OFFSET, WACOM_DTU_OFFSET };
static const struct wacom_features wacom_features_HID_ANY_ID =
{ "Wacom HID", .type = HID_GENERIC };
@@ -3614,6 +3618,7 @@ const struct hid_device_id wacom_ids[] = {
{ USB_DEVICE_WACOM(0x33C) },
{ USB_DEVICE_WACOM(0x33D) },
{ USB_DEVICE_WACOM(0x33E) },
+ { USB_DEVICE_WACOM(0x343) },
{ USB_DEVICE_WACOM(0x4001) },
{ USB_DEVICE_WACOM(0x4004) },
{ USB_DEVICE_WACOM(0x5000) },
diff --git a/drivers/hwtracing/coresight/coresight-tmc.c b/drivers/hwtracing/coresight/coresight-tmc.c
index 0ec83d1437cd..294444d5f59e 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.c
+++ b/drivers/hwtracing/coresight/coresight-tmc.c
@@ -1146,6 +1146,12 @@ static int tmc_read_prepare(struct tmc_drvdata *drvdata)
goto err;
}
+ if (drvdata->config_type == TMC_CONFIG_TYPE_ETR &&
+ drvdata->vaddr == NULL) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
if (!drvdata->enable)
goto out;
diff --git a/drivers/hwtracing/stm/Kconfig b/drivers/hwtracing/stm/Kconfig
index 83e9f591a54b..e7a348807f0c 100644
--- a/drivers/hwtracing/stm/Kconfig
+++ b/drivers/hwtracing/stm/Kconfig
@@ -1,6 +1,7 @@
config STM
tristate "System Trace Module devices"
select CONFIGFS_FS
+ select SRCU
help
A System Trace Module (STM) is a device exporting data in System
Trace Protocol (STP) format as defined by MIPI STP standards.
diff --git a/drivers/i2c/busses/i2c-cpm.c b/drivers/i2c/busses/i2c-cpm.c
index 714bdc837769..b167ab25310a 100644
--- a/drivers/i2c/busses/i2c-cpm.c
+++ b/drivers/i2c/busses/i2c-cpm.c
@@ -116,8 +116,8 @@ struct cpm_i2c {
cbd_t __iomem *rbase;
u_char *txbuf[CPM_MAXBD];
u_char *rxbuf[CPM_MAXBD];
- u32 txdma[CPM_MAXBD];
- u32 rxdma[CPM_MAXBD];
+ dma_addr_t txdma[CPM_MAXBD];
+ dma_addr_t rxdma[CPM_MAXBD];
};
static irqreturn_t cpm_i2c_interrupt(int irq, void *dev_id)
diff --git a/drivers/i2c/busses/i2c-exynos5.c b/drivers/i2c/busses/i2c-exynos5.c
index b29c7500461a..f54ece8fce78 100644
--- a/drivers/i2c/busses/i2c-exynos5.c
+++ b/drivers/i2c/busses/i2c-exynos5.c
@@ -671,7 +671,9 @@ static int exynos5_i2c_xfer(struct i2c_adapter *adap,
return -EIO;
}
- clk_prepare_enable(i2c->clk);
+ ret = clk_enable(i2c->clk);
+ if (ret)
+ return ret;
for (i = 0; i < num; i++, msgs++) {
stop = (i == num - 1);
@@ -695,7 +697,7 @@ static int exynos5_i2c_xfer(struct i2c_adapter *adap,
}
out:
- clk_disable_unprepare(i2c->clk);
+ clk_disable(i2c->clk);
return ret;
}
@@ -747,7 +749,9 @@ static int exynos5_i2c_probe(struct platform_device *pdev)
return -ENOENT;
}
- clk_prepare_enable(i2c->clk);
+ ret = clk_prepare_enable(i2c->clk);
+ if (ret)
+ return ret;
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
i2c->regs = devm_ioremap_resource(&pdev->dev, mem);
@@ -799,6 +803,10 @@ static int exynos5_i2c_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, i2c);
+ clk_disable(i2c->clk);
+
+ return 0;
+
err_clk:
clk_disable_unprepare(i2c->clk);
return ret;
@@ -810,6 +818,8 @@ static int exynos5_i2c_remove(struct platform_device *pdev)
i2c_del_adapter(&i2c->adap);
+ clk_unprepare(i2c->clk);
+
return 0;
}
@@ -821,6 +831,8 @@ static int exynos5_i2c_suspend_noirq(struct device *dev)
i2c->suspended = 1;
+ clk_unprepare(i2c->clk);
+
return 0;
}
@@ -830,7 +842,9 @@ static int exynos5_i2c_resume_noirq(struct device *dev)
struct exynos5_i2c *i2c = platform_get_drvdata(pdev);
int ret = 0;
- clk_prepare_enable(i2c->clk);
+ ret = clk_prepare_enable(i2c->clk);
+ if (ret)
+ return ret;
ret = exynos5_hsi2c_clock_setup(i2c);
if (ret) {
@@ -839,7 +853,7 @@ static int exynos5_i2c_resume_noirq(struct device *dev)
}
exynos5_i2c_init(i2c);
- clk_disable_unprepare(i2c->clk);
+ clk_disable(i2c->clk);
i2c->suspended = 0;
return 0;
diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c
index b13936dacc78..f2a7f72f7aa6 100644
--- a/drivers/iio/magnetometer/ak8975.c
+++ b/drivers/iio/magnetometer/ak8975.c
@@ -462,6 +462,8 @@ static int ak8975_setup_irq(struct ak8975_data *data)
int rc;
int irq;
+ init_waitqueue_head(&data->data_ready_queue);
+ clear_bit(0, &data->flags);
if (client->irq)
irq = client->irq;
else
@@ -477,8 +479,6 @@ static int ak8975_setup_irq(struct ak8975_data *data)
return rc;
}
- init_waitqueue_head(&data->data_ready_queue);
- clear_bit(0, &data->flags);
data->eoc_irq = irq;
return rc;
@@ -732,7 +732,7 @@ static int ak8975_probe(struct i2c_client *client,
int eoc_gpio;
int err;
const char *name = NULL;
- enum asahi_compass_chipset chipset;
+ enum asahi_compass_chipset chipset = AK_MAX_TYPE;
/* Grab and set up the supplied GPIO. */
if (client->dev.platform_data)
diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
index 6b4e8a008bc0..564adf3116e8 100644
--- a/drivers/infiniband/core/ucm.c
+++ b/drivers/infiniband/core/ucm.c
@@ -48,6 +48,7 @@
#include <asm/uaccess.h>
+#include <rdma/ib.h>
#include <rdma/ib_cm.h>
#include <rdma/ib_user_cm.h>
#include <rdma/ib_marshall.h>
@@ -1103,6 +1104,9 @@ static ssize_t ib_ucm_write(struct file *filp, const char __user *buf,
struct ib_ucm_cmd_hdr hdr;
ssize_t result;
+ if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
+ return -EACCES;
+
if (len < sizeof(hdr))
return -EINVAL;
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index 8b5a934e1133..886f61ea6cc7 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -1574,6 +1574,9 @@ static ssize_t ucma_write(struct file *filp, const char __user *buf,
struct rdma_ucm_cmd_hdr hdr;
ssize_t ret;
+ if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
+ return -EACCES;
+
if (len < sizeof(hdr))
return -EINVAL;
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index e3ef28861be6..24f3ca2c4ad7 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -48,6 +48,8 @@
#include <asm/uaccess.h>
+#include <rdma/ib.h>
+
#include "uverbs.h"
MODULE_AUTHOR("Roland Dreier");
@@ -682,6 +684,9 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
int srcu_key;
ssize_t ret;
+ if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
+ return -EACCES;
+
if (count < sizeof hdr)
return -EINVAL;
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index de9cd6901752..bc147582bed9 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -162,7 +162,7 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
cq->bar2_va = c4iw_bar2_addrs(rdev, cq->cqid, T4_BAR2_QTYPE_INGRESS,
&cq->bar2_qid,
user ? &cq->bar2_pa : NULL);
- if (user && !cq->bar2_va) {
+ if (user && !cq->bar2_pa) {
pr_warn(MOD "%s: cqid %u not in BAR2 range.\n",
pci_name(rdev->lldi.pdev), cq->cqid);
ret = -EINVAL;
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index aa515afee724..53aa7515f542 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -185,6 +185,10 @@ void __iomem *c4iw_bar2_addrs(struct c4iw_rdev *rdev, unsigned int qid,
if (pbar2_pa)
*pbar2_pa = (rdev->bar2_pa + bar2_qoffset) & PAGE_MASK;
+
+ if (is_t4(rdev->lldi.adapter_type))
+ return NULL;
+
return rdev->bar2_kva + bar2_qoffset;
}
@@ -270,7 +274,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
/*
* User mode must have bar2 access.
*/
- if (user && (!wq->sq.bar2_va || !wq->rq.bar2_va)) {
+ if (user && (!wq->sq.bar2_pa || !wq->rq.bar2_pa)) {
pr_warn(MOD "%s: sqid %u or rqid %u not in BAR2 range.\n",
pci_name(rdev->lldi.pdev), wq->sq.qid, wq->rq.qid);
goto free_dma;
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index c4e091528390..fd17443aeacd 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -273,7 +273,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
sizeof(struct mlx5_wqe_ctrl_seg)) /
sizeof(struct mlx5_wqe_data_seg);
props->max_sge = min(max_rq_sg, max_sq_sg);
- props->max_sge_rd = props->max_sge;
+ props->max_sge_rd = MLX5_MAX_SGE_RD;
props->max_cq = 1 << MLX5_CAP_GEN(mdev, log_max_cq);
props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_cq_sz)) - 1;
props->max_mr = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
@@ -405,8 +405,8 @@ static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port,
struct mlx5_ib_dev *dev = to_mdev(ibdev);
struct mlx5_core_dev *mdev = dev->mdev;
struct mlx5_hca_vport_context *rep;
- int max_mtu;
- int oper_mtu;
+ u16 max_mtu;
+ u16 oper_mtu;
int err;
u8 ib_link_width_oper;
u8 vl_hw_cap;
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
index e449e394963f..24f4a782e0f4 100644
--- a/drivers/infiniband/hw/qib/qib_file_ops.c
+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
@@ -45,6 +45,8 @@
#include <linux/export.h>
#include <linux/uio.h>
+#include <rdma/ib.h>
+
#include "qib.h"
#include "qib_common.h"
#include "qib_user_sdma.h"
@@ -2067,6 +2069,9 @@ static ssize_t qib_write(struct file *fp, const char __user *data,
ssize_t ret = 0;
void *dest;
+ if (WARN_ON_ONCE(!ib_safe_file_access(fp)))
+ return -EACCES;
+
if (count < sizeof(cmd.type)) {
ret = -EINVAL;
goto bail;
diff --git a/drivers/input/misc/hbtp_input.c b/drivers/input/misc/hbtp_input.c
index 519e3db4d73e..f94ecf02d9cb 100644
--- a/drivers/input/misc/hbtp_input.c
+++ b/drivers/input/misc/hbtp_input.c
@@ -23,6 +23,12 @@
#include <linux/regulator/consumer.h>
#include <uapi/linux/hbtp_input.h>
#include "../input-compat.h"
+#include <linux/ktime.h>
+#include <linux/uaccess.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
+#include <linux/delay.h>
+#include <linux/completion.h>
#if defined(CONFIG_FB)
#include <linux/notifier.h>
@@ -32,6 +38,10 @@
#define HBTP_INPUT_NAME "hbtp_input"
#define DISP_COORDS_SIZE 2
+#define HBTP_PINCTRL_VALID_STATE_CNT (2)
+#define HBTP_HOLD_DURATION_US (10)
+#define HBTP_PINCTRL_DDIC_SEQ_NUM (4)
+
struct hbtp_data {
struct platform_device *pdev;
struct input_dev *input_dev;
@@ -41,6 +51,20 @@ struct hbtp_data {
#if defined(CONFIG_FB)
struct notifier_block fb_notif;
#endif
+ struct pinctrl *ts_pinctrl;
+ struct pinctrl_state *gpio_state_active;
+ struct pinctrl_state *gpio_state_suspend;
+ struct pinctrl_state *ddic_rst_state_active;
+ struct pinctrl_state *ddic_rst_state_suspend;
+ u32 ts_pinctrl_seq_delay;
+ u32 ddic_pinctrl_seq_delay[HBTP_PINCTRL_DDIC_SEQ_NUM];
+ u32 fb_resume_seq_delay;
+ bool lcd_on;
+ bool power_suspended;
+ bool power_sync_enabled;
+ bool power_sig_enabled;
+ struct completion power_resume_sig;
+ struct completion power_suspend_sig;
struct regulator *vcc_ana;
struct regulator *vcc_dig;
int afe_load_ua;
@@ -59,31 +83,68 @@ struct hbtp_data {
bool override_disp_coords;
bool manage_afe_power_ana;
bool manage_power_dig;
+ u32 power_on_delay;
+ u32 power_off_delay;
+ bool manage_pin_ctrl;
};
static struct hbtp_data *hbtp;
#if defined(CONFIG_FB)
+static int hbtp_fb_suspend(struct hbtp_data *ts);
+static int hbtp_fb_early_resume(struct hbtp_data *ts);
+static int hbtp_fb_resume(struct hbtp_data *ts);
+#endif
+
+#if defined(CONFIG_FB)
static int fb_notifier_callback(struct notifier_block *self,
unsigned long event, void *data)
{
int blank;
struct fb_event *evdata = data;
struct hbtp_data *hbtp_data =
- container_of(self, struct hbtp_data, fb_notif);
- char *envp[2] = {HBTP_EVENT_TYPE_DISPLAY, NULL};
+ container_of(self, struct hbtp_data, fb_notif);
- if (evdata && evdata->data && event == FB_EVENT_BLANK &&
- hbtp_data && hbtp_data->input_dev) {
+ if (evdata && evdata->data && hbtp_data &&
+ (event == FB_EARLY_EVENT_BLANK ||
+ event == FB_R_EARLY_EVENT_BLANK)) {
blank = *(int *)(evdata->data);
- if (blank == FB_BLANK_UNBLANK)
- kobject_uevent_env(&hbtp_data->input_dev->dev.kobj,
- KOBJ_ONLINE, envp);
- else if (blank == FB_BLANK_POWERDOWN)
- kobject_uevent_env(&hbtp_data->input_dev->dev.kobj,
- KOBJ_OFFLINE, envp);
+ if (event == FB_EARLY_EVENT_BLANK) {
+ if (blank == FB_BLANK_UNBLANK) {
+ pr_debug("%s: receives EARLY_BLANK:UNBLANK\n",
+ __func__);
+ hbtp_data->lcd_on = true;
+ hbtp_fb_early_resume(hbtp_data);
+ } else if (blank == FB_BLANK_POWERDOWN) {
+ pr_debug("%s: receives EARLY_BLANK:POWERDOWN\n",
+ __func__);
+ hbtp_data->lcd_on = false;
+ }
+ } else if (event == FB_R_EARLY_EVENT_BLANK) {
+ if (blank == FB_BLANK_UNBLANK) {
+ pr_debug("%s: receives R_EARLY_BALNK:UNBLANK\n",
+ __func__);
+ hbtp_data->lcd_on = false;
+ hbtp_fb_suspend(hbtp_data);
+ } else if (blank == FB_BLANK_POWERDOWN) {
+ pr_debug("%s: receives R_EARLY_BALNK:POWERDOWN\n",
+ __func__);
+ hbtp_data->lcd_on = true;
+ }
+ }
}
+ if (evdata && evdata->data && hbtp_data &&
+ event == FB_EVENT_BLANK) {
+ blank = *(int *)(evdata->data);
+ if (blank == FB_BLANK_POWERDOWN) {
+ pr_debug("%s: receives BLANK:POWERDOWN\n", __func__);
+ hbtp_fb_suspend(hbtp_data);
+ } else if (blank == FB_BLANK_UNBLANK) {
+ pr_debug("%s: receives BLANK:UNBLANK\n", __func__);
+ hbtp_fb_resume(hbtp_data);
+ }
+ }
return 0;
}
#endif
@@ -111,6 +172,8 @@ static int hbtp_input_release(struct inode *inode, struct file *file)
return -ENOTTY;
}
hbtp->count--;
+ if (hbtp->power_sig_enabled)
+ hbtp->power_sig_enabled = false;
mutex_unlock(&hbtp->mutex);
return 0;
@@ -278,6 +341,14 @@ static int hbtp_pdev_power_on(struct hbtp_data *hbtp, bool on)
return ret;
}
}
+
+ if (hbtp->power_on_delay) {
+ pr_debug("%s: power-on-delay = %u\n", __func__,
+ hbtp->power_on_delay);
+ usleep_range(hbtp->power_on_delay,
+ hbtp->power_on_delay + HBTP_HOLD_DURATION_US);
+ }
+
if (hbtp->vcc_dig) {
ret = reg_set_load_check(hbtp->vcc_dig,
hbtp->dig_load_ua);
@@ -299,17 +370,171 @@ static int hbtp_pdev_power_on(struct hbtp_data *hbtp, bool on)
return 0;
reg_off:
- if (hbtp->vcc_ana) {
- reg_set_load_check(hbtp->vcc_ana, 0);
- regulator_disable(hbtp->vcc_ana);
- }
if (hbtp->vcc_dig) {
reg_set_load_check(hbtp->vcc_dig, 0);
regulator_disable(hbtp->vcc_dig);
}
+
+ if (hbtp->power_off_delay) {
+ pr_debug("%s: power-off-delay = %u\n", __func__,
+ hbtp->power_off_delay);
+ usleep_range(hbtp->power_off_delay,
+ hbtp->power_off_delay + HBTP_HOLD_DURATION_US);
+ }
+
+ if (hbtp->vcc_ana) {
+ reg_set_load_check(hbtp->vcc_ana, 0);
+ regulator_disable(hbtp->vcc_ana);
+ }
return 0;
}
+static int hbtp_gpio_select(struct hbtp_data *data, bool on)
+{
+ struct pinctrl_state *pins_state;
+ int ret = 0;
+
+ pins_state = on ? data->gpio_state_active : data->gpio_state_suspend;
+ if (!IS_ERR_OR_NULL(pins_state)) {
+ ret = pinctrl_select_state(data->ts_pinctrl, pins_state);
+ if (ret) {
+ dev_err(&data->pdev->dev,
+ "can not set %s pins\n",
+ on ? "ts_active" : "ts_suspend");
+ return ret;
+ }
+
+ if (on) {
+ if (data->ts_pinctrl_seq_delay) {
+ usleep_range(data->ts_pinctrl_seq_delay,
+ data->ts_pinctrl_seq_delay +
+ HBTP_HOLD_DURATION_US);
+ dev_dbg(&data->pdev->dev, "ts_pinctrl_seq_delay = %u\n",
+ data->ts_pinctrl_seq_delay);
+ }
+ }
+ } else {
+ dev_warn(&data->pdev->dev,
+ "not a valid '%s' pinstate\n",
+ on ? "ts_active" : "ts_suspend");
+ return ret;
+ }
+
+ return ret;
+}
+
+static int hbtp_ddic_rst_select(struct hbtp_data *data, bool on)
+{
+ struct pinctrl_state *active, *suspend;
+ int ret = 0;
+
+ active = data->ddic_rst_state_active;
+ if (IS_ERR_OR_NULL(active)) {
+ dev_warn(&data->pdev->dev,
+ "not a valid ddic_rst_active pinstate\n");
+ return ret;
+ }
+
+ suspend = data->ddic_rst_state_suspend;
+ if (IS_ERR_OR_NULL(suspend)) {
+ dev_warn(&data->pdev->dev,
+ "not a valid ddic_rst_suspend pinstate\n");
+ return ret;
+ }
+
+ if (on) {
+ if (data->ddic_pinctrl_seq_delay[0]) {
+ usleep_range(data->ddic_pinctrl_seq_delay[0],
+ data->ddic_pinctrl_seq_delay[0] +
+ HBTP_HOLD_DURATION_US);
+ dev_dbg(&data->pdev->dev, "ddic_seq_delay[0] = %u\n",
+ data->ddic_pinctrl_seq_delay[0]);
+ }
+
+ ret = pinctrl_select_state(data->ts_pinctrl, active);
+ if (ret) {
+ dev_err(&data->pdev->dev,
+ "can not set ddic_rst_active pins\n");
+ return ret;
+ }
+ if (data->ddic_pinctrl_seq_delay[1]) {
+ usleep_range(data->ddic_pinctrl_seq_delay[1],
+ data->ddic_pinctrl_seq_delay[1] +
+ HBTP_HOLD_DURATION_US);
+ dev_dbg(&data->pdev->dev, "ddic_seq_delay[1] = %u\n",
+ data->ddic_pinctrl_seq_delay[1]);
+ }
+ ret = pinctrl_select_state(data->ts_pinctrl, suspend);
+ if (ret) {
+ dev_err(&data->pdev->dev,
+ "can not set ddic_rst_suspend pins\n");
+ return ret;
+ }
+
+ if (data->ddic_pinctrl_seq_delay[2]) {
+ usleep_range(data->ddic_pinctrl_seq_delay[2],
+ data->ddic_pinctrl_seq_delay[2] +
+ HBTP_HOLD_DURATION_US);
+ dev_dbg(&data->pdev->dev, "ddic_seq_delay[2] = %u\n",
+ data->ddic_pinctrl_seq_delay[2]);
+ }
+
+ ret = pinctrl_select_state(data->ts_pinctrl, active);
+ if (ret) {
+ dev_err(&data->pdev->dev,
+ "can not set ddic_rst_active pins\n");
+ return ret;
+ }
+
+ if (data->ddic_pinctrl_seq_delay[3]) {
+ usleep_range(data->ddic_pinctrl_seq_delay[3],
+ data->ddic_pinctrl_seq_delay[3] +
+ HBTP_HOLD_DURATION_US);
+ dev_dbg(&data->pdev->dev, "ddic_seq_delay[3] = %u\n",
+ data->ddic_pinctrl_seq_delay[3]);
+ }
+ } else {
+ ret = pinctrl_select_state(data->ts_pinctrl, suspend);
+ if (ret) {
+ dev_err(&data->pdev->dev,
+ "can not set ddic_rst_suspend pins\n");
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static int hbtp_pinctrl_enable(struct hbtp_data *ts, bool on)
+{
+ int rc = 0;
+
+ if (!ts->manage_pin_ctrl) {
+ pr_info("%s: pinctrl info is not available\n", __func__);
+ return 0;
+ }
+
+ if (!on)
+ goto pinctrl_suspend;
+
+ rc = hbtp_gpio_select(ts, true);
+ if (rc < 0)
+ return -EINVAL;
+
+ rc = hbtp_ddic_rst_select(ts, true);
+ if (rc < 0)
+ goto err_ddic_rst_pinctrl_enable;
+
+ return rc;
+
+pinctrl_suspend:
+ if (ts->ddic_rst_state_suspend)
+ hbtp_ddic_rst_select(ts, true);
+err_ddic_rst_pinctrl_enable:
+ hbtp_gpio_select(ts, false);
+ return rc;
+}
+
static long hbtp_input_ioctl_handler(struct file *file, unsigned int cmd,
unsigned long arg, void __user *p)
{
@@ -318,6 +543,8 @@ static long hbtp_input_ioctl_handler(struct file *file, unsigned int cmd,
struct hbtp_input_absinfo absinfo[ABS_MT_LAST - ABS_MT_FIRST + 1];
struct hbtp_input_key key_data;
enum hbtp_afe_power_cmd power_cmd;
+ enum hbtp_afe_signal afe_signal;
+ enum hbtp_afe_power_ctrl afe_power_ctrl;
switch (cmd) {
case HBTP_SET_ABSPARAM:
@@ -408,6 +635,112 @@ static long hbtp_input_ioctl_handler(struct file *file, unsigned int cmd,
input_sync(hbtp->input_dev);
break;
+ case HBTP_SET_SYNCSIGNAL:
+ if (!hbtp || !hbtp->input_dev) {
+ pr_err("%s: The input device hasn't been created\n",
+ __func__);
+ return -EFAULT;
+ }
+
+ if (!hbtp->power_sig_enabled) {
+ pr_err("%s: power_signal is not enabled", __func__);
+ return -EPERM;
+ }
+
+ if (copy_from_user(&afe_signal, (void *)arg,
+ sizeof(enum hbtp_afe_signal))) {
+ pr_err("%s: Error copying data\n", __func__);
+ return -EFAULT;
+ }
+
+ pr_debug("%s: receives %d signal\n", __func__, afe_signal);
+
+ switch (afe_signal) {
+ case HBTP_AFE_SIGNAL_ON_RESUME:
+ mutex_lock(&hbtp->mutex);
+ if (!hbtp->power_suspended) {
+ complete(&hbtp->power_resume_sig);
+ } else {
+ pr_err("%s: resume signal in wrong state\n",
+ __func__);
+ }
+ mutex_unlock(&hbtp->mutex);
+ break;
+ case HBTP_AFE_SIGNAL_ON_SUSPEND:
+ mutex_lock(&hbtp->mutex);
+ if (hbtp->power_suspended) {
+ complete(&hbtp->power_suspend_sig);
+ } else {
+ pr_err("%s: suspend signal in wrong state\n",
+ __func__);
+ }
+ mutex_unlock(&hbtp->mutex);
+ break;
+ default:
+ pr_err("%s: Unsupported command for afe signal, %d\n",
+ __func__, afe_signal);
+ return -EINVAL;
+ }
+ break;
+ case HBTP_SET_POWER_CTRL:
+ if (!hbtp || !hbtp->input_dev) {
+ pr_err("%s: The input device hasn't been created\n",
+ __func__);
+ return -EFAULT;
+ }
+
+ if (copy_from_user(&afe_power_ctrl, (void *)arg,
+ sizeof(enum hbtp_afe_power_ctrl))) {
+ pr_err("%s: Error copying data\n", __func__);
+ return -EFAULT;
+ }
+ switch (afe_power_ctrl) {
+ case HBTP_AFE_POWER_ENABLE_SYNC:
+ pr_debug("%s: power_sync is enabled\n", __func__);
+ if (!hbtp->manage_pin_ctrl || !hbtp->manage_power_dig ||
+ !hbtp->manage_afe_power_ana) {
+ pr_err("%s: power/pin is not available\n",
+ __func__);
+ return -EFAULT;
+ }
+ mutex_lock(&hbtp->mutex);
+ error = hbtp_pdev_power_on(hbtp, true);
+ if (error) {
+ mutex_unlock(&hbtp->mutex);
+ pr_err("%s: failed to power on\n", __func__);
+ return error;
+ }
+ error = hbtp_pinctrl_enable(hbtp, true);
+ if (error) {
+ mutex_unlock(&hbtp->mutex);
+ pr_err("%s: failed to enable pins\n", __func__);
+ hbtp_pdev_power_on(hbtp, false);
+ return error;
+ }
+ hbtp->power_sync_enabled = true;
+ mutex_unlock(&hbtp->mutex);
+ pr_debug("%s: power_sync option is enabled\n",
+ __func__);
+ break;
+ case HBTP_AFE_POWER_ENABLE_SYNC_SIGNAL:
+ if (!hbtp->power_sync_enabled) {
+ pr_err("%s: power_sync is not enabled\n",
+ __func__);
+ return -EFAULT;
+ }
+ mutex_lock(&hbtp->mutex);
+ init_completion(&hbtp->power_resume_sig);
+ init_completion(&hbtp->power_suspend_sig);
+ hbtp->power_sig_enabled = true;
+ mutex_unlock(&hbtp->mutex);
+ pr_err("%s: sync_signal option is enabled\n", __func__);
+ break;
+ default:
+ pr_err("%s: unsupported power ctrl, %d\n",
+ __func__, afe_power_ctrl);
+ return -EINVAL;
+ }
+ break;
default:
pr_err("%s: Unsupported ioctl command %u\n", __func__, cmd);
error = -EINVAL;
@@ -514,6 +847,25 @@ static int hbtp_parse_dt(struct device *dev)
}
}
+ if (hbtp->manage_power_dig && hbtp->manage_afe_power_ana) {
+ rc = of_property_read_u32(np,
+ "qcom,afe-power-on-delay-us", &temp_val);
+ if (!rc)
+ hbtp->power_on_delay = (u32)temp_val;
+ else
+ dev_info(dev, "Power-On Delay is not specified\n");
+
+ rc = of_property_read_u32(np,
+ "qcom,afe-power-off-delay-us", &temp_val);
+ if (!rc)
+ hbtp->power_off_delay = (u32)temp_val;
+ else
+ dev_info(dev, "Power-Off Delay is not specified\n");
+
+ dev_dbg(dev, "power-on-delay = %u, power-off-delay = %u\n",
+ hbtp->power_on_delay, hbtp->power_off_delay);
+ }
+
prop = of_find_property(np, "qcom,display-resolution", NULL);
if (prop != NULL) {
if (!prop->value)
@@ -605,11 +957,292 @@ static int hbtp_parse_dt(struct device *dev)
}
#endif
+static int hbtp_pinctrl_init(struct hbtp_data *data)
+{
+ const char *statename;
+ int rc;
+ int state_cnt, i;
+ struct device_node *np = data->pdev->dev.of_node;
+ bool pinctrl_state_act_found = false;
+ bool pinctrl_state_sus_found = false;
+ bool pinctrl_ddic_act_found = false;
+ bool pinctrl_ddic_sus_found = false;
+ int count = 0;
+
+ data->ts_pinctrl = devm_pinctrl_get(&(data->pdev->dev));
+ if (IS_ERR_OR_NULL(data->ts_pinctrl)) {
+ dev_err(&data->pdev->dev,
+ "Target does not use pinctrl\n");
+ rc = PTR_ERR(data->ts_pinctrl);
+ data->ts_pinctrl = NULL;
+ return rc;
+ }
+
+ state_cnt = of_property_count_strings(np, "pinctrl-names");
+ if (state_cnt < HBTP_PINCTRL_VALID_STATE_CNT) {
+ /*
+ *if pinctrl names are not available then,
+ *power_sync can't be enabled
+ */
+ dev_info(&data->pdev->dev,
+ "pinctrl names are not available\n");
+ rc = -EINVAL;
+ goto error;
+ }
+
+ for (i = 0; i < state_cnt; i++) {
+ rc = of_property_read_string_index(np,
+ "pinctrl-names", i, &statename);
+ if (rc) {
+ dev_err(&data->pdev->dev,
+ "failed to read pinctrl states by index\n");
+ goto error;
+ }
+
+ if (!strcmp(statename, "pmx_ts_active")) {
+ data->gpio_state_active
+ = pinctrl_lookup_state(data->ts_pinctrl,
+ statename);
+ if (IS_ERR_OR_NULL(data->gpio_state_active)) {
+ dev_err(&data->pdev->dev,
+ "Can not get ts default state\n");
+ rc = PTR_ERR(data->gpio_state_active);
+ goto error;
+ }
+ pinctrl_state_act_found = true;
+ } else if (!strcmp(statename, "pmx_ts_suspend")) {
+ data->gpio_state_suspend
+ = pinctrl_lookup_state(data->ts_pinctrl,
+ statename);
+ if (IS_ERR_OR_NULL(data->gpio_state_suspend)) {
+ dev_err(&data->pdev->dev,
+ "Can not get ts sleep state\n");
+ rc = PTR_ERR(data->gpio_state_suspend);
+ goto error;
+ }
+ pinctrl_state_sus_found = true;
+ } else if (!strcmp(statename, "ddic_rst_active")) {
+ data->ddic_rst_state_active
+ = pinctrl_lookup_state(data->ts_pinctrl,
+ statename);
+ if (IS_ERR(data->ddic_rst_state_active)) {
+ dev_err(&data->pdev->dev,
+ "Can not get DDIC rst act state\n");
+ rc = PTR_ERR(data->ddic_rst_state_active);
+ goto error;
+ }
+ pinctrl_ddic_act_found = true;
+ } else if (!strcmp(statename, "ddic_rst_suspend")) {
+ data->ddic_rst_state_suspend
+ = pinctrl_lookup_state(data->ts_pinctrl,
+ statename);
+ if (IS_ERR(data->ddic_rst_state_suspend)) {
+ dev_err(&data->pdev->dev,
+ "Can not get DDIC rst sleep state\n");
+ rc = PTR_ERR(data->ddic_rst_state_suspend);
+ goto error;
+ }
+ pinctrl_ddic_sus_found = true;
+ } else {
+ dev_err(&data->pdev->dev, "invalid pinctrl state\n");
+ rc = -EINVAL;
+ goto error;
+ }
+ }
+
+ if (!pinctrl_state_act_found || !pinctrl_state_sus_found) {
+ dev_err(&data->pdev->dev,
+ "missing required pinctrl states\n");
+ rc = -EINVAL;
+ goto error;
+ }
+
+ if (of_property_read_u32(np, "qcom,pmx-ts-on-seq-delay-us",
+ &data->ts_pinctrl_seq_delay)) {
+ dev_warn(&data->pdev->dev, "Can not find ts seq delay\n");
+ }
+
+ if (of_property_read_u32(np, "qcom,fb-resume-delay-us",
+ &data->fb_resume_seq_delay)) {
+ dev_warn(&data->pdev->dev, "Can not find fb resume seq delay\n");
+ }
+
+ if (pinctrl_ddic_act_found && pinctrl_ddic_sus_found) {
+ count = of_property_count_u32_elems(np,
+ "qcom,ddic-rst-on-seq-delay-us");
+ if (count == HBTP_PINCTRL_DDIC_SEQ_NUM) {
+ of_property_read_u32_array(np,
+ "qcom,ddic-rst-on-seq-delay-us",
+ data->ddic_pinctrl_seq_delay, count);
+ } else {
+ dev_err(&data->pdev->dev, "count(%u) is not same as %u\n",
+ (u32)count, HBTP_PINCTRL_DDIC_SEQ_NUM);
+ }
+ } else {
+ dev_err(&data->pdev->dev, "ddic pinctrl act/sus not found\n");
+ }
+
+ data->manage_pin_ctrl = true;
+ return 0;
+
+error:
+ devm_pinctrl_put(data->ts_pinctrl);
+ data->ts_pinctrl = NULL;
+ return rc;
+}
+
+static int hbtp_fb_suspend(struct hbtp_data *ts)
+{
+ int rc;
+ char *envp[2] = {HBTP_EVENT_TYPE_DISPLAY, NULL};
+
+ mutex_lock(&hbtp->mutex);
+ if (ts->pdev && ts->power_sync_enabled) {
+ pr_debug("%s: power_sync is enabled\n", __func__);
+ if (ts->power_suspended) {
+ mutex_unlock(&hbtp->mutex);
+ pr_err("%s: power is not resumed\n", __func__);
+ return 0;
+ }
+ rc = hbtp_pinctrl_enable(ts, false);
+ if (rc) {
+ pr_err("%s: failed to disable GPIO pins\n", __func__);
+ goto err_pin_disable;
+ }
+
+ rc = hbtp_pdev_power_on(ts, false);
+ if (rc) {
+ pr_err("%s: failed to disable power\n", __func__);
+ goto err_power_disable;
+ }
+ ts->power_suspended = true;
+ }
+
+ if (ts->input_dev) {
+ kobject_uevent_env(&ts->input_dev->dev.kobj,
+ KOBJ_OFFLINE, envp);
+
+ if (ts->power_sig_enabled) {
+ pr_debug("%s: power_sig is enabled, wait for signal\n",
+ __func__);
+ mutex_unlock(&hbtp->mutex);
+ rc = wait_for_completion_interruptible(
+ &hbtp->power_suspend_sig);
+ if (rc != 0) {
+ pr_err("%s: wait for suspend is interrupted\n",
+ __func__);
+ }
+ mutex_lock(&hbtp->mutex);
+ pr_debug("%s: Wait is done for suspend\n", __func__);
+ } else {
+ pr_debug("%s: power_sig is NOT enabled", __func__);
+ }
+ }
+
+ mutex_unlock(&hbtp->mutex);
+ return 0;
+err_power_disable:
+ hbtp_pinctrl_enable(ts, true);
+err_pin_disable:
+ mutex_unlock(&hbtp->mutex);
+ return rc;
+}
+
+static int hbtp_fb_early_resume(struct hbtp_data *ts)
+{
+ char *envp[2] = {HBTP_EVENT_TYPE_DISPLAY, NULL};
+ int rc;
+
+ mutex_lock(&hbtp->mutex);
+
+ pr_debug("%s: hbtp_fb_early_resume\n", __func__);
+
+ if (ts->pdev && ts->power_sync_enabled) {
+ pr_debug("%s: power_sync is enabled\n", __func__);
+ if (!ts->power_suspended) {
+ pr_err("%s: power is not suspended\n", __func__);
+ mutex_unlock(&hbtp->mutex);
+ return 0;
+ }
+ rc = hbtp_pdev_power_on(ts, true);
+ if (rc) {
+ pr_err("%s: failed to enable panel power\n", __func__);
+ goto err_power_on;
+ }
+
+ rc = hbtp_pinctrl_enable(ts, true);
+
+ if (rc) {
+ pr_err("%s: failed to enable pin\n", __func__);
+ goto err_pin_enable;
+ }
+
+ ts->power_suspended = false;
+
+ if (ts->input_dev) {
+
+ kobject_uevent_env(&ts->input_dev->dev.kobj,
+ KOBJ_ONLINE, envp);
+
+ if (ts->power_sig_enabled) {
+ pr_err("%s: power_sig is enabled, wait for signal\n",
+ __func__);
+ mutex_unlock(&hbtp->mutex);
+ rc = wait_for_completion_interruptible(
+ &hbtp->power_resume_sig);
+ if (rc != 0) {
+ pr_err("%s: wait for resume is interrupted\n",
+ __func__);
+ }
+ mutex_lock(&hbtp->mutex);
+ pr_debug("%s: wait is done\n", __func__);
+ } else {
+ pr_debug("%s: power_sig is NOT enabled\n",
+ __func__);
+ }
+
+ if (ts->fb_resume_seq_delay) {
+ usleep_range(ts->fb_resume_seq_delay,
+ ts->fb_resume_seq_delay +
+ HBTP_HOLD_DURATION_US);
+ pr_err("%s: fb_resume_seq_delay = %u\n",
+ __func__, ts->fb_resume_seq_delay);
+ }
+ }
+ }
+ mutex_unlock(&hbtp->mutex);
+ return 0;
+
+err_pin_enable:
+ hbtp_pdev_power_on(ts, false);
+err_power_on:
+ mutex_unlock(&hbtp->mutex);
+ return rc;
+}
+
+static int hbtp_fb_resume(struct hbtp_data *ts)
+{
+ char *envp[2] = {HBTP_EVENT_TYPE_DISPLAY, NULL};
+
+ mutex_lock(&hbtp->mutex);
+ if (!ts->power_sync_enabled) {
+ pr_debug("%s: power_sync is disabled, send uevent\n", __func__);
+ if (ts->input_dev) {
+ kobject_uevent_env(&ts->input_dev->dev.kobj,
+ KOBJ_ONLINE, envp);
+ }
+ }
+ mutex_unlock(&hbtp->mutex);
+ return 0;
+}
+
static int hbtp_pdev_probe(struct platform_device *pdev)
{
int error;
struct regulator *vcc_ana, *vcc_dig;
+ hbtp->pdev = pdev;
+
if (pdev->dev.of_node) {
error = hbtp_parse_dt(&pdev->dev);
if (error) {
@@ -618,6 +1251,14 @@ static int hbtp_pdev_probe(struct platform_device *pdev)
}
}
+ platform_set_drvdata(pdev, hbtp);
+
+ error = hbtp_pinctrl_init(hbtp);
+ if (error) {
+ pr_info("%s: pinctrl isn't available, rc=%d\n", __func__,
+ error);
+ }
+
if (hbtp->manage_afe_power_ana) {
vcc_ana = regulator_get(&pdev->dev, "vcc_ana");
if (IS_ERR(vcc_ana)) {
@@ -662,8 +1303,6 @@ static int hbtp_pdev_probe(struct platform_device *pdev)
hbtp->vcc_dig = vcc_dig;
}
- hbtp->pdev = pdev;
-
return 0;
}
@@ -677,6 +1316,9 @@ static int hbtp_pdev_remove(struct platform_device *pdev)
regulator_put(hbtp->vcc_dig);
}
+ if (hbtp->ts_pinctrl)
+ devm_pinctrl_put(hbtp->ts_pinctrl);
+
return 0;
}
diff --git a/drivers/input/misc/max8997_haptic.c b/drivers/input/misc/max8997_haptic.c
index a806ba3818f7..8d6326d7e7be 100644
--- a/drivers/input/misc/max8997_haptic.c
+++ b/drivers/input/misc/max8997_haptic.c
@@ -255,12 +255,14 @@ static int max8997_haptic_probe(struct platform_device *pdev)
struct max8997_dev *iodev = dev_get_drvdata(pdev->dev.parent);
const struct max8997_platform_data *pdata =
dev_get_platdata(iodev->dev);
- const struct max8997_haptic_platform_data *haptic_pdata =
- pdata->haptic_pdata;
+ const struct max8997_haptic_platform_data *haptic_pdata = NULL;
struct max8997_haptic *chip;
struct input_dev *input_dev;
int error;
+ if (pdata)
+ haptic_pdata = pdata->haptic_pdata;
+
if (!haptic_pdata) {
dev_err(&pdev->dev, "no haptic platform data\n");
return -EINVAL;
diff --git a/drivers/input/misc/ots_pat9125/pat9125_linux_driver.c b/drivers/input/misc/ots_pat9125/pat9125_linux_driver.c
index ca5a9ec2f7f9..fa5e4cca129d 100644
--- a/drivers/input/misc/ots_pat9125/pat9125_linux_driver.c
+++ b/drivers/input/misc/ots_pat9125/pat9125_linux_driver.c
@@ -31,6 +31,10 @@ struct pixart_pat9125_data {
struct pinctrl_state *pinctrl_state_release;
};
+/* Declaration of suspend and resume functions */
+static int pat9125_suspend(struct device *dev);
+static int pat9125_resume(struct device *dev);
+
static int pat9125_i2c_write(struct i2c_client *client, u8 reg, u8 *data,
int len)
{
@@ -146,6 +150,27 @@ static irqreturn_t pat9125_irq(int irq, void *dev_data)
return IRQ_HANDLED;
}
+static ssize_t pat9125_suspend_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct pixart_pat9125_data *data =
+ (struct pixart_pat9125_data *) dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
+ int mode;
+
+ if (kstrtoint(buf, 10, &mode)) {
+ dev_err(dev, "failed to read input for sysfs\n");
+ return -EINVAL;
+ }
+
+ if (mode == 1)
+ pat9125_suspend(&client->dev);
+ else if (mode == 0)
+ pat9125_resume(&client->dev);
+
+ return count;
+}
+
static ssize_t pat9125_test_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
@@ -191,11 +216,15 @@ static ssize_t pat9125_test_show(struct device *dev,
{
return 0;
}
+
+static DEVICE_ATTR(suspend, S_IRUGO | S_IWUSR | S_IWGRP,
+ NULL, pat9125_suspend_store);
static DEVICE_ATTR(test, S_IRUGO | S_IWUSR | S_IWGRP,
pat9125_test_show, pat9125_test_store);
static struct attribute *pat9125_attr_list[] = {
&dev_attr_test.attr,
+ &dev_attr_suspend.attr,
NULL,
};
diff --git a/drivers/input/misc/pmic8xxx-pwrkey.c b/drivers/input/misc/pmic8xxx-pwrkey.c
index 3f02e0e03d12..67aab86048ad 100644
--- a/drivers/input/misc/pmic8xxx-pwrkey.c
+++ b/drivers/input/misc/pmic8xxx-pwrkey.c
@@ -353,7 +353,8 @@ static int pmic8xxx_pwrkey_probe(struct platform_device *pdev)
if (of_property_read_u32(pdev->dev.of_node, "debounce", &kpd_delay))
kpd_delay = 15625;
- if (kpd_delay > 62500 || kpd_delay == 0) {
+ /* Valid range of pwr key trigger delay is 1/64 sec to 2 seconds. */
+ if (kpd_delay > USEC_PER_SEC * 2 || kpd_delay < USEC_PER_SEC / 64) {
dev_err(&pdev->dev, "invalid power key trigger delay\n");
return -EINVAL;
}
@@ -385,8 +386,8 @@ static int pmic8xxx_pwrkey_probe(struct platform_device *pdev)
pwr->name = "pmic8xxx_pwrkey";
pwr->phys = "pmic8xxx_pwrkey/input0";
- delay = (kpd_delay << 10) / USEC_PER_SEC;
- delay = 1 + ilog2(delay);
+ delay = (kpd_delay << 6) / USEC_PER_SEC;
+ delay = ilog2(delay);
err = regmap_read(regmap, PON_CNTL_1, &pon_cntl);
if (err < 0) {
diff --git a/drivers/input/tablet/gtco.c b/drivers/input/tablet/gtco.c
index 3a7f3a4a4396..7c18249d6c8e 100644
--- a/drivers/input/tablet/gtco.c
+++ b/drivers/input/tablet/gtco.c
@@ -858,6 +858,14 @@ static int gtco_probe(struct usb_interface *usbinterface,
goto err_free_buf;
}
+ /* Sanity check that a device has an endpoint */
+ if (usbinterface->altsetting[0].desc.bNumEndpoints < 1) {
+ dev_err(&usbinterface->dev,
+ "Invalid number of endpoints\n");
+ error = -EINVAL;
+ goto err_free_urb;
+ }
+
/*
* The endpoint is always altsetting 0, we know this since we know
* this device only has one interrupt endpoint
@@ -879,7 +887,7 @@ static int gtco_probe(struct usb_interface *usbinterface,
* HID report descriptor
*/
if (usb_get_extra_descriptor(usbinterface->cur_altsetting,
- HID_DEVICE_TYPE, &hid_desc) != 0){
+ HID_DEVICE_TYPE, &hid_desc) != 0) {
dev_err(&usbinterface->dev,
"Can't retrieve exta USB descriptor to get hid report descriptor length\n");
error = -EIO;
diff --git a/drivers/input/touchscreen/gt9xx/goodix_tool.c b/drivers/input/touchscreen/gt9xx/goodix_tool.c
index 99a29401b36f..1657f56558ce 100644
--- a/drivers/input/touchscreen/gt9xx/goodix_tool.c
+++ b/drivers/input/touchscreen/gt9xx/goodix_tool.c
@@ -236,14 +236,13 @@ static u8 relation(u8 src, u8 dst, u8 rlt)
return ret;
}
-/*******************************************************
+/*
* Function:
- * Comfirm function.
+ * Comfirm function.
* Input:
- * None.
+ * None.
* Output:
* Return write length.
- *******************************************************
*/
static u8 comfirm(void)
{
@@ -301,11 +300,11 @@ static s32 fill_update_info(char __user *user_buf,
* Function:
* Goodix tool write function.
* Input:
- * standard proc write function param.
+ * standard proc write function param.
* Output:
* Return write length.
*/
-static s32 goodix_tool_write(struct file *filp, const char __user *userbuf,
+static ssize_t goodix_tool_write(struct file *filp, const char __user *userbuf,
size_t count, loff_t *ppos)
{
s32 ret = 0;
@@ -318,20 +317,22 @@ static s32 goodix_tool_write(struct file *filp, const char __user *userbuf,
goto exit;
}
- dev_dbg(&gt_client->dev, "wr:0x%02x, flag:0x%02x, flag addr:0x%02x%02x,
- flag val:0x%02x, flag rel:0x%02x,", cmd_headd.wr,
- cmd_head.flag, cmd_head.flag_addr[0],
- cmd_head.flag_addr[1], cmd_head.flag_val,
- cmd_head.flag_relation);
- dev_dbg(&gt_client->dev, "circle:%d, times:%d, retry:%d, delay:%d,
- data len:%d, addr len:%d, addr:0x%02x%02x, write len: %d",
- (s32)cmd_head.circle, (s32)cmd_head.times, (s32)cmd_head.retry,
- (s32)cmd_head.delay, (s32)cmd_head.data_len,
- (s32)cmd_head.addr_len, cmd_head.addr[0], cmd_head.addr[1],
- (s32)count);
+ dev_dbg(&gt_client->dev,
+ "wr: 0x%02x, flag:0x%02x, flag addr:0x%02x%02x\n", cmd_head.wr,
+ cmd_head.flag, cmd_head.flag_addr[0], cmd_head.flag_addr[1]);
+ dev_dbg(&gt_client->dev,
+ "flag val:0x%02x, flag rel:0x%02x,\n", cmd_head.flag_val,
+ cmd_head.flag_relation);
+ dev_dbg(&gt_client->dev, "circle:%u, times:%u, retry:%u, delay:%u\n",
+ (s32) cmd_head.circle, (s32) cmd_head.times,
+ (s32) cmd_head.retry, (s32)cmd_head.delay);
+ dev_dbg(&gt_client->dev,
+ "data len:%u, addr len:%u, addr:0x%02x%02x, write len: %u\n",
+ (s32)cmd_head.data_len, (s32)cmd_head.addr_len,
+ cmd_head.addr[0], cmd_head.addr[1], (s32)count);
if (cmd_head.data_len > (data_length - GTP_ADDR_LENGTH)) {
- dev_err(&gt_client->dev, "data len %d > data buff %d, rejected\n",
+ dev_err(&gt_client->dev, "data len %u > data buff %d, rejected\n",
cmd_head.data_len, (data_length - GTP_ADDR_LENGTH));
ret = -EINVAL;
goto exit;
@@ -383,7 +384,7 @@ static s32 goodix_tool_write(struct file *filp, const char __user *userbuf,
if (cmd_head.data_len > sizeof(ic_type)) {
dev_err(&gt_client->dev,
- "data len %d > data buff %d, rejected\n",
+ "data len %u > data buff %zu, rejected\n",
cmd_head.data_len, sizeof(ic_type));
ret = -EINVAL;
goto exit;
@@ -445,7 +446,7 @@ static s32 goodix_tool_write(struct file *filp, const char __user *userbuf,
show_len = 0;
total_len = 0;
if (cmd_head.data_len + 1 > data_length) {
- dev_err(&gt_client->dev, "data len %d > data buff %d, rejected\n",
+ dev_err(&gt_client->dev, "data len %u > data buff %d, rejected\n",
cmd_head.data_len + 1, data_length);
ret = -EINVAL;
goto exit;
@@ -470,11 +471,11 @@ exit:
* Function:
* Goodix tool read function.
* Input:
- * standard proc read function param.
+ * standard proc read function param.
* Output:
* Return read length.
*/
-static s32 goodix_tool_read(struct file *file, char __user *user_buf,
+static ssize_t goodix_tool_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
u16 data_len = 0;
diff --git a/drivers/input/touchscreen/gt9xx/gt9xx.c b/drivers/input/touchscreen/gt9xx/gt9xx.c
index a9d7666a6d6f..ead935120624 100644
--- a/drivers/input/touchscreen/gt9xx/gt9xx.c
+++ b/drivers/input/touchscreen/gt9xx/gt9xx.c
@@ -48,6 +48,7 @@
#include <linux/module.h>
#include <linux/input/mt.h>
#include <linux/debugfs.h>
+#include <linux/interrupt.h>
#define GOODIX_DEV_NAME "Goodix-CTP"
#define CFG_MAX_TOUCH_POINTS 5
@@ -65,6 +66,8 @@
#define RESET_DELAY_T3_US 200 /* T3: > 100us */
#define RESET_DELAY_T4 20 /* T4: > 5ms */
+#define SLEEP_DELAY_US 5000
+#define WAKE_UP_DELAY_US 5000
#define PHY_BUF_SIZE 32
#define PROP_NAME_SIZE 24
@@ -72,11 +75,6 @@
#define GTP_MAX_TOUCH 5
#define GTP_ESD_CHECK_CIRCLE_MS 2000
-#if GTP_HAVE_TOUCH_KEY
-static const u16 touch_key_array[] = {KEY_MENU, KEY_HOMEPAGE, KEY_BACK};
-
-#endif
-
static void gtp_int_sync(struct goodix_ts_data *ts, int ms);
static int gtp_i2c_test(struct i2c_client *client);
static int goodix_power_off(struct goodix_ts_data *ts);
@@ -99,15 +97,14 @@ static void gtp_esd_check_func(struct work_struct *work);
static int gtp_init_ext_watchdog(struct i2c_client *client);
#endif
-#if GTP_SLIDE_WAKEUP
-enum doze_status {
+enum doze {
DOZE_DISABLED = 0,
DOZE_ENABLED = 1,
DOZE_WAKEUP = 2,
};
-static enum doze_status = DOZE_DISABLED;
+static enum doze doze_status = DOZE_DISABLED;
static s8 gtp_enter_doze(struct goodix_ts_data *ts);
-#endif
+
bool init_done;
static u8 chip_gt9xxs; /* true if ic is gt9xxs, like gt915s */
u8 grp_cfg_version;
@@ -115,6 +112,8 @@ struct i2c_client *i2c_connect_client;
#define GTP_DEBUGFS_DIR "ts_debug"
#define GTP_DEBUGFS_FILE_SUSPEND "suspend"
+#define GTP_DEBUGFS_FILE_DATA "data"
+#define GTP_DEBUGFS_FILE_ADDR "addr"
/*******************************************************
Function:
@@ -155,11 +154,11 @@ int gtp_i2c_read(struct i2c_client *client, u8 *buf, int len)
dev_err(&client->dev, "I2C retry: %d\n", retries + 1);
}
if (retries == GTP_I2C_RETRY_5) {
-#if GTP_SLIDE_WAKEUP
- /* reset chip would quit doze mode */
- if (doze_status == DOZE_ENABLED)
- return ret;
-#endif
+ if (ts->pdata->slide_wakeup)
+ /* reset chip would quit doze mode */
+ if (doze_status == DOZE_ENABLED)
+ return ret;
+
if (init_done)
gtp_reset_guitar(ts, 10);
else
@@ -201,10 +200,10 @@ int gtp_i2c_write(struct i2c_client *client, u8 *buf, int len)
dev_err(&client->dev, "I2C retry: %d\n", retries + 1);
}
if (retries == GTP_I2C_RETRY_5) {
-#if GTP_SLIDE_WAKEUP
- if (doze_status == DOZE_ENABLED)
- return ret;
-#endif
+ if (ts->pdata->slide_wakeup)
+ if (doze_status == DOZE_ENABLED)
+ return ret;
+
if (init_done)
gtp_reset_guitar(ts, 10);
else
@@ -268,24 +267,25 @@ Output:
*********************************************************/
int gtp_send_cfg(struct goodix_ts_data *ts)
{
- int ret;
-#if GTP_DRIVER_SEND_CFG
- int retry = 0;
+ int ret = 0;
+ int retry;
- if (ts->fixed_cfg) {
- dev_dbg(&ts->client->dev,
- "Ic fixed config, no config sent!");
- ret = 2;
- } else {
- for (retry = 0; retry < GTP_I2C_RETRY_5; retry++) {
- ret = gtp_i2c_write(ts->client,
- ts->config_data,
- GTP_CONFIG_MAX_LENGTH + GTP_ADDR_LENGTH);
- if (ret > 0)
- break;
+ if (ts->pdata->driver_send_cfg) {
+ if (ts->fixed_cfg) {
+ dev_dbg(&ts->client->dev,
+ "Ic fixed config, no config sent!");
+ ret = 2;
+ } else {
+ for (retry = 0; retry < GTP_I2C_RETRY_5; retry++) {
+ ret = gtp_i2c_write(ts->client,
+ ts->config_data,
+ GTP_CONFIG_MAX_LENGTH +
+ GTP_ADDR_LENGTH);
+ if (ret > 0)
+ break;
+ }
}
}
-#endif
return ret;
}
@@ -345,9 +345,8 @@ Output:
static void gtp_touch_down(struct goodix_ts_data *ts, int id, int x, int y,
int w)
{
-#if GTP_CHANGE_X2Y
- swap(x, y);
-#endif
+ if (ts->pdata->change_x2y)
+ swap(x, y);
input_mt_slot(ts->input_dev, id);
input_mt_report_slot_state(ts->input_dev, MT_TOOL_FINGER, true);
@@ -392,9 +391,7 @@ static void goodix_ts_work_func(struct work_struct *work)
u8 finger = 0;
static u16 pre_touch;
static u8 pre_key;
-#if GTP_WITH_PEN
static u8 pre_pen;
-#endif
u8 key_value = 0;
u8 *coor_data = NULL;
s32 input_x = 0;
@@ -404,10 +401,7 @@ static void goodix_ts_work_func(struct work_struct *work)
s32 i = 0;
int ret = -1;
struct goodix_ts_data *ts = NULL;
-
-#if GTP_SLIDE_WAKEUP
u8 doze_buf[3] = {0x81, 0x4B};
-#endif
ts = container_of(work, struct goodix_ts_data, work);
#ifdef CONFIG_GT9XX_TOUCHPANEL_UPDATE
@@ -415,55 +409,59 @@ static void goodix_ts_work_func(struct work_struct *work)
return;
#endif
-#if GTP_SLIDE_WAKEUP
- if (doze_status == DOZE_ENABLED) {
- ret = gtp_i2c_read(ts->client, doze_buf, 3);
- if (ret > 0) {
- if (doze_buf[2] == 0xAA) {
- dev_dbg(&ts->client->dev,
- "Slide(0xAA) To Light up the screen!");
- doze_status = DOZE_WAKEUP;
- input_report_key(
- ts->input_dev, KEY_POWER, 1);
- input_sync(ts->input_dev);
- input_report_key(
- ts->input_dev, KEY_POWER, 0);
- input_sync(ts->input_dev);
- /* clear 0x814B */
- doze_buf[2] = 0x00;
- gtp_i2c_write(ts->client, doze_buf, 3);
- } else if (doze_buf[2] == 0xBB) {
- dev_dbg(&ts->client->dev,
- "Slide(0xBB) To Light up the screen!");
- doze_status = DOZE_WAKEUP;
- input_report_key(ts->input_dev, KEY_POWER, 1);
- input_sync(ts->input_dev);
- input_report_key(ts->input_dev, KEY_POWER, 0);
- input_sync(ts->input_dev);
- /* clear 0x814B*/
- doze_buf[2] = 0x00;
- gtp_i2c_write(ts->client, doze_buf, 3);
- } else if (0xC0 == (doze_buf[2] & 0xC0)) {
- dev_dbg(&ts->client->dev,
- "double click to light up the screen!");
- doze_status = DOZE_WAKEUP;
- input_report_key(ts->input_dev, KEY_POWER, 1);
- input_sync(ts->input_dev);
- input_report_key(ts->input_dev, KEY_POWER, 0);
- input_sync(ts->input_dev);
- /* clear 0x814B */
- doze_buf[2] = 0x00;
- gtp_i2c_write(ts->client, doze_buf, 3);
- } else {
- gtp_enter_doze(ts);
+ if (ts->pdata->slide_wakeup) {
+ if (doze_status == DOZE_ENABLED) {
+ ret = gtp_i2c_read(ts->client, doze_buf, 3);
+ if (ret > 0) {
+ if (doze_buf[2] == 0xAA) {
+ dev_dbg(&ts->client->dev,
+ "Slide(0xAA) To Light up the screen!");
+ doze_status = DOZE_WAKEUP;
+ input_report_key(
+ ts->input_dev, KEY_POWER, 1);
+ input_sync(ts->input_dev);
+ input_report_key(
+ ts->input_dev, KEY_POWER, 0);
+ input_sync(ts->input_dev);
+ /* clear 0x814B */
+ doze_buf[2] = 0x00;
+ gtp_i2c_write(ts->client, doze_buf, 3);
+ } else if (doze_buf[2] == 0xBB) {
+ dev_dbg(&ts->client->dev,
+ "Slide(0xBB) To Light up the screen!");
+ doze_status = DOZE_WAKEUP;
+ input_report_key(ts->input_dev,
+ KEY_POWER, 1);
+ input_sync(ts->input_dev);
+ input_report_key(ts->input_dev,
+ KEY_POWER, 0);
+ input_sync(ts->input_dev);
+ /* clear 0x814B*/
+ doze_buf[2] = 0x00;
+ gtp_i2c_write(ts->client, doze_buf, 3);
+ } else if (0xC0 == (doze_buf[2] & 0xC0)) {
+ dev_dbg(&ts->client->dev,
+ "double click to light up the screen!");
+ doze_status = DOZE_WAKEUP;
+ input_report_key(ts->input_dev,
+ KEY_POWER, 1);
+ input_sync(ts->input_dev);
+ input_report_key(ts->input_dev,
+ KEY_POWER, 0);
+ input_sync(ts->input_dev);
+ /* clear 0x814B */
+ doze_buf[2] = 0x00;
+ gtp_i2c_write(ts->client, doze_buf, 3);
+ } else {
+ gtp_enter_doze(ts);
+ }
}
- }
- if (ts->use_irq)
- gtp_irq_enable(ts);
+ if (ts->use_irq)
+ gtp_irq_enable(ts);
- return;
+ return;
+ }
}
-#endif
ret = gtp_i2c_read(ts->client, point_data, 12);
if (ret < 0) {
@@ -504,15 +502,16 @@ static void goodix_ts_work_func(struct work_struct *work)
pre_key = key_value;
-#if GTP_WITH_PEN
- if (pre_pen && (touch_num == 0)) {
- dev_dbg(&ts->client->dev, "Pen touch UP(Slot)!");
- input_report_key(ts->input_dev, BTN_TOOL_PEN, 0);
- input_mt_slot(ts->input_dev, 5);
- input_report_abs(ts->input_dev, ABS_MT_TRACKING_ID, -1);
- pre_pen = 0;
+ if (ts->pdata->with_pen) {
+ if (pre_pen && (touch_num == 0)) {
+ dev_dbg(&ts->client->dev, "Pen touch UP(Slot)!");
+ input_report_key(ts->input_dev, BTN_TOOL_PEN, 0);
+ input_mt_slot(ts->input_dev, 5);
+ input_report_abs(ts->input_dev, ABS_MT_TRACKING_ID, -1);
+ pre_pen = 0;
+ }
}
-#endif
+
if (pre_touch || touch_num) {
s32 pos = 0;
u16 touch_index = 0;
@@ -520,45 +519,45 @@ static void goodix_ts_work_func(struct work_struct *work)
coor_data = &point_data[3];
if (touch_num) {
id = coor_data[pos] & 0x0F;
-#if GTP_WITH_PEN
- id = coor_data[pos];
- if (id == 128) {
- dev_dbg(&ts->client->dev,
- "Pen touch DOWN(Slot)!");
- input_x = coor_data[pos + 1]
- | (coor_data[pos + 2] << 8);
- input_y = coor_data[pos + 3]
- | (coor_data[pos + 4] << 8);
- input_w = coor_data[pos + 5]
- | (coor_data[pos + 6] << 8);
-
- input_report_key(ts->input_dev,
- BTN_TOOL_PEN, 1);
- input_mt_slot(ts->input_dev, 5);
- input_report_abs(ts->input_dev,
- ABS_MT_TRACKING_ID, 5);
- input_report_abs(ts->input_dev,
- ABS_MT_POSITION_X, input_x);
- input_report_abs(ts->input_dev,
- ABS_MT_POSITION_Y, input_y);
- input_report_abs(ts->input_dev,
- ABS_MT_TOUCH_MAJOR, input_w);
- dev_dbg(&ts->client->dev,
- "Pen/Stylus: (%d, %d)[%d]",
- input_x, input_y, input_w);
- pre_pen = 1;
- pre_touch = 0;
+ if (ts->pdata->with_pen) {
+ id = coor_data[pos];
+ if (id == 128) {
+ dev_dbg(&ts->client->dev,
+ "Pen touch DOWN(Slot)!");
+ input_x = coor_data[pos + 1]
+ | (coor_data[pos + 2] << 8);
+ input_y = coor_data[pos + 3]
+ | (coor_data[pos + 4] << 8);
+ input_w = coor_data[pos + 5]
+ | (coor_data[pos + 6] << 8);
+
+ input_report_key(ts->input_dev,
+ BTN_TOOL_PEN, 1);
+ input_mt_slot(ts->input_dev, 5);
+ input_report_abs(ts->input_dev,
+ ABS_MT_TRACKING_ID, 5);
+ input_report_abs(ts->input_dev,
+ ABS_MT_POSITION_X, input_x);
+ input_report_abs(ts->input_dev,
+ ABS_MT_POSITION_Y, input_y);
+ input_report_abs(ts->input_dev,
+ ABS_MT_TOUCH_MAJOR, input_w);
+ dev_dbg(&ts->client->dev,
+ "Pen/Stylus: (%d, %d)[%d]",
+ input_x, input_y, input_w);
+ pre_pen = 1;
+ pre_touch = 0;
+ }
}
-#endif
touch_index |= (0x01<<id);
}
for (i = 0; i < GTP_MAX_TOUCH; i++) {
-#if GTP_WITH_PEN
- if (pre_pen == 1)
- break;
-#endif
+ if (ts->pdata->with_pen)
+ if (pre_pen == 1)
+ break;
+
if (touch_index & (0x01<<i)) {
input_x = coor_data[pos + 1] |
coor_data[pos + 2] << 8;
@@ -649,7 +648,7 @@ void gtp_reset_guitar(struct goodix_ts_data *ts, int ms)
else
gpio_direction_output(ts->pdata->irq_gpio, 0);
- usleep(RESET_DELAY_T3_US);
+ usleep_range(RESET_DELAY_T3_US, RESET_DELAY_T3_US + 1);
gpio_direction_output(ts->pdata->reset_gpio, 1);
msleep(RESET_DELAY_T4);
@@ -663,7 +662,6 @@ void gtp_reset_guitar(struct goodix_ts_data *ts, int ms)
}
#if defined(CONFIG_HAS_EARLYSUSPEND) || defined(CONFIG_FB)
-#if GTP_SLIDE_WAKEUP
/*******************************************************
Function:
Enter doze mode for sliding wakeup.
@@ -680,9 +678,9 @@ static s8 gtp_enter_doze(struct goodix_ts_data *ts)
(u8)(GTP_REG_SLEEP >> 8),
(u8)GTP_REG_SLEEP, 8};
-#if GTP_DBL_CLK_WAKEUP
- i2c_control_buf[2] = 0x09;
-#endif
+ if (ts->pdata->dbl_clk_wakeup)
+ i2c_control_buf[2] = 0x09;
+
gtp_irq_disable(ts);
while (retry++ < GTP_I2C_RETRY_3) {
@@ -711,7 +709,6 @@ static s8 gtp_enter_doze(struct goodix_ts_data *ts)
gtp_irq_enable(ts);
return ret;
}
-#else
/**
* gtp_enter_sleep - Enter sleep mode
* @ts: driver private data
@@ -744,7 +741,7 @@ static u8 gtp_enter_sleep(struct goodix_ts_data *ts)
}
return 0;
}
- usleep(5000);
+ usleep_range(SLEEP_DELAY_US, SLEEP_DELAY_US + 1);
while (retry++ < GTP_I2C_RETRY_5) {
ret = gtp_i2c_write(ts->client, i2c_control_buf, 3);
if (ret == 1) {
@@ -756,7 +753,6 @@ static u8 gtp_enter_sleep(struct goodix_ts_data *ts)
dev_err(&ts->client->dev, "GTP send sleep cmd failed.\n");
return ret;
}
-#endif /* !GTP_SLIDE_WAKEUP */
/*******************************************************
Function:
@@ -802,33 +798,34 @@ static s8 gtp_wakeup_sleep(struct goodix_ts_data *ts)
"Wakeup sleep send config success.");
} else {
err_retry:
-#if GTP_SLIDE_WAKEUP
- /* wakeup not by slide */
- if (doze_status != DOZE_WAKEUP)
- gtp_reset_guitar(ts, 10);
- else
- /* wakeup by slide */
- doze_status = DOZE_DISABLED;
-#else
- if (chip_gt9xxs == 1) {
- gtp_reset_guitar(ts, 10);
+ if (ts->pdata->slide_wakeup) { /* wakeup not by slide */
+ if (doze_status != DOZE_WAKEUP)
+ gtp_reset_guitar(ts, 10);
+ else
+ /* wakeup by slide */
+ doze_status = DOZE_DISABLED;
} else {
- ret = gpio_direction_output(ts->pdata->irq_gpio, 1);
- usleep(5000);
+ if (chip_gt9xxs == 1) {
+ gtp_reset_guitar(ts, 10);
+ } else {
+ ret = gpio_direction_output(
+ ts->pdata->irq_gpio, 1);
+ usleep_range(WAKE_UP_DELAY_US,
+ WAKE_UP_DELAY_US + 1);
+ }
}
-#endif
ret = gtp_i2c_test(ts->client);
if (ret == 2) {
dev_dbg(&ts->client->dev, "GTP wakeup sleep.");
-#if (!GTP_SLIDE_WAKEUP)
- if (chip_gt9xxs == 0) {
- gtp_int_sync(ts, 25);
- msleep(20);
+ if (!ts->pdata->slide_wakeup) {
+ if (chip_gt9xxs == 0) {
+ gtp_int_sync(ts, 25);
+ msleep(20);
#if GTP_ESD_PROTECT
- gtp_init_ext_watchdog(ts->client);
+ gtp_init_ext_watchdog(ts->client);
#endif
+ }
}
-#endif
return ret;
}
gtp_reset_guitar(ts, 20);
@@ -852,123 +849,126 @@ Output:
static int gtp_init_panel(struct goodix_ts_data *ts)
{
struct i2c_client *client = ts->client;
- unsigned char *config_data;
+ unsigned char *config_data = NULL;
int ret = -EIO;
-
-#if GTP_DRIVER_SEND_CFG
int i;
u8 check_sum = 0;
u8 opr_buf[16];
u8 sensor_id = 0;
- for (i = 0; i < GOODIX_MAX_CFG_GROUP; i++)
- dev_dbg(&client->dev, "Config Groups(%d) Lengths: %d",
- i, ts->pdata->config_data_len[i]);
-
- ret = gtp_i2c_read_dbl_check(ts->client, 0x41E4, opr_buf, 1);
- if (ret == SUCCESS) {
- if (opr_buf[0] != 0xBE) {
- ts->fw_error = 1;
- dev_err(&client->dev,
- "Firmware error, no config sent!");
- return -EINVAL;
- }
- }
+ if (ts->pdata->driver_send_cfg) {
+ for (i = 0; i < GOODIX_MAX_CFG_GROUP; i++)
+ dev_dbg(&client->dev, "Config Groups(%d) Lengths: %zu",
+ i, ts->pdata->config_data_len[i]);
- for (i = 1; i < GOODIX_MAX_CFG_GROUP; i++) {
- if (ts->pdata->config_data_len[i])
- break;
- }
- if (i == GOODIX_MAX_CFG_GROUP) {
- sensor_id = 0;
- } else {
- ret = gtp_i2c_read_dbl_check(ts->client, GTP_REG_SENSOR_ID,
- &sensor_id, 1);
+ ret = gtp_i2c_read_dbl_check(ts->client, 0x41E4, opr_buf, 1);
if (ret == SUCCESS) {
- if (sensor_id >= GOODIX_MAX_CFG_GROUP) {
+ if (opr_buf[0] != 0xBE) {
+ ts->fw_error = 1;
dev_err(&client->dev,
- "Invalid sensor_id(0x%02X), No Config Sent!",
- sensor_id);
+ "Firmware error, no config sent!");
return -EINVAL;
}
+ }
+
+ for (i = 1; i < GOODIX_MAX_CFG_GROUP; i++) {
+ if (ts->pdata->config_data_len[i])
+ break;
+ }
+
+ if (i == GOODIX_MAX_CFG_GROUP) {
+ sensor_id = 0;
} else {
- dev_err(&client->dev,
- "Failed to get sensor_id, No config sent!");
- return -EINVAL;
+ ret = gtp_i2c_read_dbl_check(ts->client,
+ GTP_REG_SENSOR_ID, &sensor_id, 1);
+ if (ret == SUCCESS) {
+ if (sensor_id >= GOODIX_MAX_CFG_GROUP) {
+ dev_err(&client->dev,
+ "Invalid sensor_id(0x%02X), No Config Sent!",
+ sensor_id);
+ return -EINVAL;
+ }
+ } else {
+ dev_err(&client->dev,
+ "Failed to get sensor_id, No config sent!");
+ return -EINVAL;
+ }
}
- }
- dev_dbg(&client->dev, "Sensor ID selected: %d", sensor_id);
+ dev_info(&client->dev, "Sensor ID selected: %d", sensor_id);
- if (ts->pdata->config_data_len[sensor_id] < GTP_CONFIG_MIN_LENGTH ||
- !ts->pdata->config_data[sensor_id]) {
- dev_err(&client->dev,
- "Sensor_ID(%d) matches with NULL or invalid config group!\n",
- sensor_id);
- return -EINVAL;
- }
+ if (ts->pdata->config_data_len[sensor_id] <
+ GTP_CONFIG_MIN_LENGTH ||
+ !ts->pdata->config_data[sensor_id]) {
+ dev_err(&client->dev,
+ "Sensor_ID(%d) matches with NULL or invalid config group!\n",
+ sensor_id);
+ return -EINVAL;
+ }
- ret = gtp_i2c_read_dbl_check(ts->client, GTP_REG_CONFIG_DATA,
- &opr_buf[0], 1);
- if (ret == SUCCESS) {
- if (opr_buf[0] < 90) {
- /* backup group config version */
- grp_cfg_version =
- ts->pdata->config_data[sensor_id][GTP_ADDR_LENGTH];
- ts->pdata->config_data[sensor_id][GTP_ADDR_LENGTH] =
- 0x00;
- ts->fixed_cfg = 0;
+ ret = gtp_i2c_read_dbl_check(ts->client, GTP_REG_CONFIG_DATA,
+ &opr_buf[0], 1);
+ if (ret == SUCCESS) {
+ if (opr_buf[0] < 90) {
+ /* backup group config version */
+ grp_cfg_version =
+ ts->pdata->
+ config_data[sensor_id][GTP_ADDR_LENGTH];
+ ts->pdata->
+ config_data[sensor_id][GTP_ADDR_LENGTH]
+ = 0x00;
+ ts->fixed_cfg = 0;
+ } else {
+ /* treated as fixed config, not send config */
+ dev_warn(&client->dev,
+ "Ic fixed config with config version(%d, 0x%02X)",
+ opr_buf[0], opr_buf[0]);
+ ts->fixed_cfg = 1;
+ }
} else {
- /* treated as fixed config, not send config */
- dev_warn(&client->dev,
- "Ic fixed config with config version(%d, 0x%02X)",
- opr_buf[0], opr_buf[0]);
- ts->fixed_cfg = 1;
+ dev_err(&client->dev,
+ "Failed to get ic config version!No config sent!");
+ return -EINVAL;
}
- } else {
- dev_err(&client->dev,
- "Failed to get ic config version!No config sent!");
- return -EINVAL;
- }
- config_data = ts->pdata->config_data[sensor_id];
- ts->config_data = ts->pdata->config_data[sensor_id];
- ts->gtp_cfg_len = ts->pdata->config_data_len[sensor_id];
+ config_data = ts->pdata->config_data[sensor_id];
+ ts->config_data = ts->pdata->config_data[sensor_id];
+ ts->gtp_cfg_len = ts->pdata->config_data_len[sensor_id];
#if GTP_CUSTOM_CFG
- config_data[RESOLUTION_LOC] =
- (unsigned char)(GTP_MAX_WIDTH && 0xFF);
- config_data[RESOLUTION_LOC + 1] =
- (unsigned char)(GTP_MAX_WIDTH >> 8);
- config_data[RESOLUTION_LOC + 2] =
- (unsigned char)(GTP_MAX_HEIGHT && 0xFF);
- config_data[RESOLUTION_LOC + 3] =
- (unsigned char)(GTP_MAX_HEIGHT >> 8);
-
- if (GTP_INT_TRIGGER == 0)
- config_data[TRIGGER_LOC] &= 0xfe;
- else if (GTP_INT_TRIGGER == 1)
- config_data[TRIGGER_LOC] |= 0x01;
+ config_data[RESOLUTION_LOC] =
+ (unsigned char)(GTP_MAX_WIDTH && 0xFF);
+ config_data[RESOLUTION_LOC + 1] =
+ (unsigned char)(GTP_MAX_WIDTH >> 8);
+ config_data[RESOLUTION_LOC + 2] =
+ (unsigned char)(GTP_MAX_HEIGHT && 0xFF);
+ config_data[RESOLUTION_LOC + 3] =
+ (unsigned char)(GTP_MAX_HEIGHT >> 8);
+
+ if (GTP_INT_TRIGGER == 0)
+ config_data[TRIGGER_LOC] &= 0xfe;
+ else if (GTP_INT_TRIGGER == 1)
+ config_data[TRIGGER_LOC] |= 0x01;
#endif /* !GTP_CUSTOM_CFG */
- check_sum = 0;
- for (i = GTP_ADDR_LENGTH; i < ts->gtp_cfg_len; i++)
- check_sum += config_data[i];
+ check_sum = 0;
+ for (i = GTP_ADDR_LENGTH; i < ts->gtp_cfg_len; i++)
+ check_sum += config_data[i];
- config_data[ts->gtp_cfg_len] = (~check_sum) + 1;
+ config_data[ts->gtp_cfg_len] = (~check_sum) + 1;
-#else /* DRIVER NOT SEND CONFIG */
- ts->gtp_cfg_len = GTP_CONFIG_MAX_LENGTH;
- ret = gtp_i2c_read(ts->client, config_data,
+ } else { /* DRIVER NOT SEND CONFIG */
+ ts->gtp_cfg_len = GTP_CONFIG_MAX_LENGTH;
+ ret = gtp_i2c_read(ts->client, config_data,
ts->gtp_cfg_len + GTP_ADDR_LENGTH);
- if (ret < 0) {
- dev_err(&client->dev,
+ if (ret < 0) {
+ dev_err(&client->dev,
"Read Config Failed, Using DEFAULT Resolution & INT Trigger!\n");
- ts->abs_x_max = GTP_MAX_WIDTH;
- ts->abs_y_max = GTP_MAX_HEIGHT;
- ts->int_trigger_type = GTP_INT_TRIGGER;
- }
-#endif /* !DRIVER NOT SEND CONFIG */
+ ts->abs_x_max = GTP_MAX_WIDTH;
+ ts->abs_y_max = GTP_MAX_HEIGHT;
+ ts->int_trigger_type = GTP_INT_TRIGGER;
+ }
+ } /* !DRIVER NOT SEND CONFIG */
if ((ts->abs_x_max == 0) && (ts->abs_y_max == 0)) {
ts->abs_x_max = (config_data[RESOLUTION_LOC + 1] << 8)
@@ -1175,7 +1175,8 @@ static int gtp_request_irq(struct goodix_ts_data *ts)
int ret;
const u8 irq_table[] = GTP_IRQ_TAB;
- GTP_DEBUG("INT trigger type:%x, irq=%d", ts->int_trigger_type,
+ dev_dbg(&ts->client->dev, "INT trigger type:%x, irq=%d",
+ ts->int_trigger_type,
ts->client->irq);
ret = request_threaded_irq(ts->client->irq, NULL,
@@ -1204,9 +1205,7 @@ static int gtp_request_input_dev(struct goodix_ts_data *ts)
{
int ret;
char phys[PHY_BUF_SIZE];
-#if GTP_HAVE_TOUCH_KEY
int index = 0;
-#endif
ts->input_dev = input_allocate_device();
if (ts->input_dev == NULL) {
@@ -1222,26 +1221,24 @@ static int gtp_request_input_dev(struct goodix_ts_data *ts)
/* in case of "out of memory" */
input_mt_init_slots(ts->input_dev, 10, 0);
- for (index = 0; index < ts->pdata->num_button; index++) {
- input_set_capability(ts->input_dev,
+ if (ts->pdata->have_touch_key) {
+ for (index = 0; index < ts->pdata->num_button; index++) {
+ input_set_capability(ts->input_dev,
EV_KEY, ts->pdata->button_map[index]);
+ }
}
+ if (ts->pdata->slide_wakeup)
+ input_set_capability(ts->input_dev, EV_KEY, KEY_POWER);
-#if GTP_SLIDE_WAKEUP
- input_set_capability(ts->input_dev, EV_KEY, KEY_POWER);
-#endif
-
-#if GTP_WITH_PEN
- /* pen support */
- __set_bit(BTN_TOOL_PEN, ts->input_dev->keybit);
- __set_bit(INPUT_PROP_DIRECT, ts->input_dev->propbit);
- __set_bit(INPUT_PROP_POINTER, ts->input_dev->propbit);
-#endif
+ if (ts->pdata->with_pen) { /* pen support */
+ __set_bit(BTN_TOOL_PEN, ts->input_dev->keybit);
+ __set_bit(INPUT_PROP_DIRECT, ts->input_dev->propbit);
+ __set_bit(INPUT_PROP_POINTER, ts->input_dev->propbit);
+ }
-#if GTP_CHANGE_X2Y
- swap(ts->abs_x_max, ts->abs_y_max);
-#endif
+ if (ts->pdata->change_x2y)
+ swap(ts->abs_x_max, ts->abs_y_max);
input_set_abs_params(ts->input_dev, ABS_MT_POSITION_X,
0, ts->abs_x_max, 0, 0);
@@ -1281,7 +1278,7 @@ exit_free_inputdev:
static int reg_set_optimum_mode_check(struct regulator *reg, int load_uA)
{
return (regulator_count_voltages(reg) > 0) ?
- regulator_set_optimum_mode(reg, load_uA) : 0;
+ regulator_set_load(reg, load_uA) : 0;
}
/**
@@ -1521,12 +1518,108 @@ static ssize_t gtp_fw_name_store(struct device *dev,
return size;
}
+static ssize_t gtp_fw_upgrade_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct goodix_ts_data *ts = dev_get_drvdata(dev);
+
+ return snprintf(buf, 2, "%d\n", ts->fw_loading);
+}
+
+static ssize_t gtp_fw_upgrade_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct goodix_ts_data *ts = dev_get_drvdata(dev);
+ unsigned int val;
+ int ret;
+
+ if (size > 2)
+ return -EINVAL;
+
+ ret = kstrtouint(buf, 10, &val);
+ if (ret)
+ return ret;
+
+ if (ts->gtp_is_suspend) {
+ dev_err(&ts->client->dev,
+ "Can't start fw upgrade. Device is in suspend state");
+ return -EBUSY;
+ }
+
+ mutex_lock(&ts->input_dev->mutex);
+ if (!ts->fw_loading && val) {
+ disable_irq(ts->client->irq);
+ ts->fw_loading = true;
+ if (config_enabled(CONFIG_GT9XX_TOUCHPANEL_UPDATE)) {
+ ret = gup_update_proc(NULL);
+ if (ret == FAIL)
+ dev_err(&ts->client->dev,
+ "Fail to update GTP firmware\n");
+ }
+ ts->fw_loading = false;
+ enable_irq(ts->client->irq);
+ }
+ mutex_unlock(&ts->input_dev->mutex);
+
+ return size;
+}
+
+static ssize_t gtp_force_fw_upgrade_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct goodix_ts_data *ts = dev_get_drvdata(dev);
+ unsigned int val;
+ int ret;
+
+ if (size > 2)
+ return -EINVAL;
+
+ ret = kstrtouint(buf, 10, &val);
+ if (ret)
+ return ret;
+
+ if (ts->gtp_is_suspend) {
+ dev_err(&ts->client->dev,
+ "Can't start fw upgrade. Device is in suspend state.");
+ return -EBUSY;
+ }
+
+ mutex_lock(&ts->input_dev->mutex);
+ if (!ts->fw_loading && val) {
+ disable_irq(ts->client->irq);
+ ts->fw_loading = true;
+ ts->force_update = true;
+ if (config_enabled(CONFIG_GT9XX_TOUCHPANEL_UPDATE)) {
+ ret = gup_update_proc(NULL);
+ if (ret == FAIL)
+ dev_err(&ts->client->dev,
+ "Fail to force update GTP firmware.\n");
+ }
+ ts->force_update = false;
+ ts->fw_loading = false;
+ enable_irq(ts->client->irq);
+ }
+ mutex_unlock(&ts->input_dev->mutex);
+
+ return size;
+}
+
static DEVICE_ATTR(fw_name, (S_IRUGO | S_IWUSR | S_IWGRP),
gtp_fw_name_show,
gtp_fw_name_store);
+static DEVICE_ATTR(fw_upgrade, (S_IRUGO | S_IWUSR | S_IWGRP),
+ gtp_fw_upgrade_show,
+ gtp_fw_upgrade_store);
+static DEVICE_ATTR(force_fw_upgrade, (S_IRUGO | S_IWUSR | S_IWGRP),
+ gtp_fw_upgrade_show,
+ gtp_force_fw_upgrade_store);
static struct attribute *gtp_attrs[] = {
&dev_attr_fw_name.attr,
+ &dev_attr_fw_upgrade.attr,
+ &dev_attr_force_fw_upgrade.attr,
NULL
};
@@ -1534,6 +1627,84 @@ static const struct attribute_group gtp_attr_grp = {
.attrs = gtp_attrs,
};
+static int gtp_debug_addr_is_valid(u16 addr)
+{
+ if (addr < GTP_VALID_ADDR_START || addr > GTP_VALID_ADDR_END) {
+ pr_err("GTP reg address is invalid: 0x%x\n", addr);
+ return false;
+ }
+
+ return true;
+}
+
+static int gtp_debug_data_set(void *_data, u64 val)
+{
+ struct goodix_ts_data *ts = _data;
+
+ mutex_lock(&ts->input_dev->mutex);
+ if (gtp_debug_addr_is_valid(ts->addr))
+ dev_err(&ts->client->dev,
+ "Writing to GTP registers not supported\n");
+ mutex_unlock(&ts->input_dev->mutex);
+
+ return 0;
+}
+
+static int gtp_debug_data_get(void *_data, u64 *val)
+{
+ struct goodix_ts_data *ts = _data;
+ int ret;
+ u8 buf[3] = {0};
+
+ mutex_lock(&ts->input_dev->mutex);
+ buf[0] = ts->addr >> 8;
+ buf[1] = ts->addr & 0x00ff;
+
+ if (gtp_debug_addr_is_valid(ts->addr)) {
+ ret = gtp_i2c_read(ts->client, buf, 3);
+ if (ret < 0)
+ dev_err(&ts->client->dev,
+ "GTP read register 0x%x failed (%d)\n",
+ ts->addr, ret);
+ else
+ *val = buf[2];
+ }
+ mutex_unlock(&ts->input_dev->mutex);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(debug_data_fops, gtp_debug_data_get,
+ gtp_debug_data_set, "%llx\n");
+
+static int gtp_debug_addr_set(void *_data, u64 val)
+{
+ struct goodix_ts_data *ts = _data;
+
+ if (gtp_debug_addr_is_valid(val)) {
+ mutex_lock(&ts->input_dev->mutex);
+ ts->addr = val;
+ mutex_unlock(&ts->input_dev->mutex);
+ }
+
+ return 0;
+}
+
+static int gtp_debug_addr_get(void *_data, u64 *val)
+{
+ struct goodix_ts_data *ts = _data;
+
+ mutex_lock(&ts->input_dev->mutex);
+ if (gtp_debug_addr_is_valid(ts->addr))
+ *val = ts->addr;
+ mutex_unlock(&ts->input_dev->mutex);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(debug_addr_fops, gtp_debug_addr_get,
+ gtp_debug_addr_set, "%llx\n");
+
static int gtp_debug_suspend_set(void *_data, u64 val)
{
struct goodix_ts_data *ts = _data;
@@ -1567,7 +1738,7 @@ static int gtp_debugfs_init(struct goodix_ts_data *data)
data->debug_base = debugfs_create_dir(GTP_DEBUGFS_DIR, NULL);
if (IS_ERR_OR_NULL(data->debug_base)) {
- pr_err("Failed to create debugfs dir\n");
+ dev_err(&data->client->dev, "Failed to create debugfs dir\n");
return -EINVAL;
}
@@ -1576,7 +1747,27 @@ static int gtp_debugfs_init(struct goodix_ts_data *data)
data->debug_base,
data,
&debug_suspend_fops)))) {
- pr_err("Failed to create suspend file\n");
+ dev_err(&data->client->dev, "Failed to create suspend file\n");
+ debugfs_remove_recursive(data->debug_base);
+ return -EINVAL;
+ }
+
+ if ((IS_ERR_OR_NULL(debugfs_create_file(GTP_DEBUGFS_FILE_DATA,
+ S_IWUSR | S_IWGRP | S_IRUSR | S_IRGRP,
+ data->debug_base,
+ data,
+ &debug_data_fops)))) {
+ dev_err(&data->client->dev, "Failed to create data file\n");
+ debugfs_remove_recursive(data->debug_base);
+ return -EINVAL;
+ }
+
+ if ((IS_ERR_OR_NULL(debugfs_create_file(GTP_DEBUGFS_FILE_ADDR,
+ S_IWUSR | S_IWGRP | S_IRUSR | S_IRGRP,
+ data->debug_base,
+ data,
+ &debug_addr_fops)))) {
+ dev_err(&data->client->dev, "Failed to create addr file\n");
debugfs_remove_recursive(data->debug_base);
return -EINVAL;
}
@@ -1632,7 +1823,7 @@ static int goodix_parse_dt(struct device *dev,
u32 temp_val, num_buttons;
u32 button_map[MAX_BUTTONS];
char prop_name[PROP_NAME_SIZE];
- int i, read_cfg_num;
+ int i, read_cfg_num, temp;
rc = goodix_ts_get_dt_coords(dev, "goodix,panel-coords", pdata);
if (rc && (rc != -EINVAL))
@@ -1645,11 +1836,30 @@ static int goodix_parse_dt(struct device *dev,
pdata->i2c_pull_up = of_property_read_bool(np,
"goodix,i2c-pull-up");
- pdata->no_force_update = of_property_read_bool(np,
- "goodix,no-force-update");
+ pdata->force_update = of_property_read_bool(np,
+ "goodix,force-update");
pdata->enable_power_off = of_property_read_bool(np,
"goodix,enable-power-off");
+
+ pdata->have_touch_key = of_property_read_bool(np,
+ "goodix,have-touch-key");
+
+ pdata->driver_send_cfg = of_property_read_bool(np,
+ "goodix,driver-send-cfg");
+
+ pdata->change_x2y = of_property_read_bool(np,
+ "goodix,change-x2y");
+
+ pdata->with_pen = of_property_read_bool(np,
+ "goodix,with-pen");
+
+ pdata->slide_wakeup = of_property_read_bool(np,
+ "goodix,slide-wakeup");
+
+ pdata->dbl_clk_wakeup = of_property_read_bool(np,
+ "goodix,dbl_clk_wakeup");
+
/* reset, irq gpio info */
pdata->reset_gpio = of_get_named_gpio_flags(np, "reset-gpios",
0, &pdata->reset_gpio_flags);
@@ -1695,14 +1905,15 @@ static int goodix_parse_dt(struct device *dev,
read_cfg_num = 0;
for (i = 0; i < GOODIX_MAX_CFG_GROUP; i++) {
+ temp = 0;
snprintf(prop_name, sizeof(prop_name), "goodix,cfg-data%d", i);
- prop = of_find_property(np, prop_name,
- &pdata->config_data_len[i]);
+ prop = of_find_property(np, prop_name, &temp);
if (!prop || !prop->value) {
pdata->config_data_len[i] = 0;
pdata->config_data[i] = NULL;
continue;
}
+ pdata->config_data_len[i] = temp;
pdata->config_data[i] = devm_kzalloc(dev,
GTP_CONFIG_MAX_LENGTH + GTP_ADDR_LENGTH,
GFP_KERNEL);
@@ -1761,9 +1972,7 @@ static int goodix_ts_probe(struct i2c_client *client,
return -EINVAL;
}
-#if GTP_ESD_PROTECT
i2c_connect_client = client;
-#endif
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
dev_err(&client->dev, "GTP I2C not supported\n");
@@ -1811,22 +2020,24 @@ static int goodix_ts_probe(struct i2c_client *client,
goto exit_power_off;
}
+ if (pdata->force_update)
+ ts->force_update = true;
+
if (pdata->fw_name)
strlcpy(ts->fw_name, pdata->fw_name,
strlen(pdata->fw_name) + 1);
-#ifdef CONFIG_GT9XX_TOUCHPANEL_UPDATE
- ret = gup_init_update_proc(ts);
- if (ret < 0) {
- dev_err(&client->dev,
- "GTP Create firmware update thread error.\n");
- goto exit_power_off;
+ if (config_enabled(CONFIG_GT9XX_TOUCHPANEL_UPDATE)) {
+ ret = gup_init_update_proc(ts);
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "GTP Create firmware update thread error\n");
+ goto exit_power_off;
+ }
}
-#endif
-
ret = gtp_init_panel(ts);
if (ret < 0) {
- dev_err(&client->dev, "GTP init panel failed.\n");
+ dev_err(&client->dev, "GTP init panel failed\n");
ts->abs_x_max = GTP_MAX_WIDTH;
ts->abs_y_max = GTP_MAX_HEIGHT;
ts->int_trigger_type = GTP_INT_TRIGGER;
@@ -1834,7 +2045,7 @@ static int goodix_ts_probe(struct i2c_client *client,
ret = gtp_request_input_dev(ts);
if (ret) {
- dev_err(&client->dev, "GTP request input dev failed.\n");
+ dev_err(&client->dev, "GTP request input dev failed\n");
goto exit_free_inputdev;
}
input_set_drvdata(ts->input_dev, ts);
@@ -1905,8 +2116,6 @@ exit_free_irq:
#endif
if (ts->use_irq)
free_irq(client->irq, ts);
- else
- hrtimer_cancel(&ts->timer);
cancel_work_sync(&ts->work);
flush_workqueue(ts->goodix_wq);
destroy_workqueue(ts->goodix_wq);
@@ -1970,8 +2179,6 @@ static int goodix_ts_remove(struct i2c_client *client)
if (ts) {
if (ts->use_irq)
free_irq(client->irq, ts);
- else
- hrtimer_cancel(&ts->timer);
cancel_work_sync(&ts->work);
flush_workqueue(ts->goodix_wq);
@@ -2017,27 +2224,33 @@ static int goodix_ts_suspend(struct device *dev)
}
mutex_lock(&ts->lock);
+
+ if (ts->fw_loading) {
+ dev_info(&ts->client->dev,
+ "Fw upgrade in progress, can't go to suspend.");
+ mutex_unlock(&ts->lock);
+ return 0;
+ }
+
#if GTP_ESD_PROTECT
gtp_esd_switch(ts->client, SWITCH_OFF);
#endif
-#if GTP_SLIDE_WAKEUP
- ret = gtp_enter_doze(ts);
-#else
- if (ts->use_irq)
- gtp_irq_disable(ts);
- else
- hrtimer_cancel(&ts->timer);
+ if (ts->pdata->slide_wakeup) {
+ ret = gtp_enter_doze(ts);
+ } else {
+ if (ts->use_irq)
+ gtp_irq_disable(ts);
- for (i = 0; i < GTP_MAX_TOUCH; i++)
- gtp_touch_up(ts, i);
+ for (i = 0; i < GTP_MAX_TOUCH; i++)
+ gtp_touch_up(ts, i);
- input_sync(ts->input_dev);
+ input_sync(ts->input_dev);
- ret = gtp_enter_sleep(ts);
-#endif
- if (ret < 0)
- dev_err(&ts->client->dev, "GTP early suspend failed\n");
+ ret = gtp_enter_sleep(ts);
+ if (ret < 0)
+ dev_err(&ts->client->dev, "GTP early suspend failed.\n");
+ }
/* to avoid waking up while not sleeping,
* delay 48 + 10ms to ensure reliability
*/
@@ -2069,18 +2282,14 @@ static int goodix_ts_resume(struct device *dev)
mutex_lock(&ts->lock);
ret = gtp_wakeup_sleep(ts);
-#if GTP_SLIDE_WAKEUP
- doze_status = DOZE_DISABLED;
-#endif
+ if (ts->pdata->slide_wakeup)
+ doze_status = DOZE_DISABLED;
if (ret <= 0)
dev_err(&ts->client->dev, "GTP resume failed\n");
if (ts->use_irq)
gtp_irq_enable(ts);
- else
- hrtimer_start(&ts->timer,
- ktime_set(1, 0), HRTIMER_MODE_REL);
#if GTP_ESD_PROTECT
gtp_esd_switch(ts->client, SWITCH_ON);
diff --git a/drivers/input/touchscreen/gt9xx/gt9xx.h b/drivers/input/touchscreen/gt9xx/gt9xx.h
index 38487eea7b10..1e85e2fce276 100644
--- a/drivers/input/touchscreen/gt9xx/gt9xx.h
+++ b/drivers/input/touchscreen/gt9xx/gt9xx.h
@@ -53,13 +53,19 @@ struct goodix_ts_platform_data {
u32 panel_miny;
u32 panel_maxx;
u32 panel_maxy;
- bool no_force_update;
+ bool force_update;
bool i2c_pull_up;
bool enable_power_off;
size_t config_data_len[GOODIX_MAX_CFG_GROUP];
u8 *config_data[GOODIX_MAX_CFG_GROUP];
u32 button_map[MAX_BUTTONS];
u8 num_button;
+ bool have_touch_key;
+ bool driver_send_cfg;
+ bool change_x2y;
+ bool with_pen;
+ bool slide_wakeup;
+ bool dbl_clk_wakeup;
};
struct goodix_ts_data {
spinlock_t irq_lock;
@@ -69,11 +75,13 @@ struct goodix_ts_data {
struct hrtimer timer;
struct workqueue_struct *goodix_wq;
struct work_struct work;
+ char fw_name[GTP_FW_NAME_MAXSIZE];
struct delayed_work goodix_update_work;
s32 irq_is_disabled;
s32 use_irq;
u16 abs_x_max;
u16 abs_y_max;
+ u16 addr;
u8 max_touch_num;
u8 int_trigger_type;
u8 green_wake_mode;
@@ -88,6 +96,8 @@ struct goodix_ts_data {
u8 fw_error;
bool power_on;
struct mutex lock;
+ bool fw_loading;
+ bool force_update;
struct regulator *avdd;
struct regulator *vdd;
struct regulator *vcc_i2c;
@@ -104,17 +114,7 @@ extern u16 total_len;
/***************************PART1:ON/OFF define*******************************/
#define GTP_CUSTOM_CFG 0
-#define GTP_CHANGE_X2Y 0
-#define GTP_DRIVER_SEND_CFG 1
-#define GTP_HAVE_TOUCH_KEY 1
-
#define GTP_ESD_PROTECT 0
-#define GTP_WITH_PEN 0
-
-/* This cannot work when enable-power-off is on */
-#define GTP_SLIDE_WAKEUP 0
-/* double-click wakeup, function together with GTP_SLIDE_WAKEUP */
-#define GTP_DBL_CLK_WAKEUP 0
#define GTP_IRQ_TAB {\
IRQ_TYPE_EDGE_RISING,\
@@ -172,6 +172,8 @@ extern u16 total_len;
/* HIGH: 0x28/0x29, LOW: 0xBA/0xBB */
#define GTP_I2C_ADDRESS_HIGH 0x14
#define GTP_I2C_ADDRESS_LOW 0x5D
+#define GTP_VALID_ADDR_START 0x8040
+#define GTP_VALID_ADDR_END 0x8177
/* GTP CM_HEAD RW flags */
#define GTP_RW_READ 0
@@ -210,11 +212,9 @@ s32 init_wr_node(struct i2c_client *client);
void uninit_wr_node(void);
#endif
-#ifdef CONFIG_GT9XX_TOUCHPANEL_UPDATE
-extern u8 gup_init_update_proc(struct goodix_ts_data *ts);
+u8 gup_init_update_proc(struct goodix_ts_data *ts);
s32 gup_enter_update_mode(struct i2c_client *client);
void gup_leave_update_mode(struct i2c_client *client);
s32 gup_update_proc(void *dir);
extern struct i2c_client *i2c_connect_client;
-#endif
#endif /* _GOODIX_GT9XX_H_ */
diff --git a/drivers/input/touchscreen/gt9xx/gt9xx_update.c b/drivers/input/touchscreen/gt9xx/gt9xx_update.c
index 4660b27d156c..6bc243492272 100644
--- a/drivers/input/touchscreen/gt9xx/gt9xx_update.c
+++ b/drivers/input/touchscreen/gt9xx/gt9xx_update.c
@@ -67,6 +67,8 @@
#define FAIL 0
#define SUCCESS 1
+#define RESET_DELAY_US 20000
+
struct st_fw_head {
u8 hw_info[4]; /* hardware info */
u8 pid[8]; /* product id */
@@ -390,7 +392,7 @@ s32 gup_enter_update_mode(struct i2c_client *client)
/* step1:RST output low last at least 2ms */
gpio_direction_output(ts->pdata->reset_gpio, 0);
- usleep(20000);
+ usleep_range(RESET_DELAY_US, RESET_DELAY_US + 1);
/* step2:select I2C slave addr,INT:0--0xBA;1--0x28. */
gpio_direction_output(ts->pdata->irq_gpio,
@@ -565,8 +567,8 @@ static s8 gup_update_config(struct i2c_client *client,
!memcmp(&pid[GTP_ADDR_LENGTH], "960", 3)) {
chip_cfg_len = 228;
}
- pr_debug("config file ASCII len:%d", cfg->size);
- pr_debug("need config binary len:%d", chip_cfg_len);
+ pr_debug("config file ASCII len: %zu", cfg->size);
+ pr_debug("need config binary len: %u", chip_cfg_len);
if ((cfg->size + 5) < chip_cfg_len * 5) {
pr_err("Config length error");
return -EINVAL;
@@ -643,7 +645,7 @@ static s32 gup_get_firmware_file(struct i2c_client *client,
return -EEXIST;
}
- dev_dbg(&client->dev, "Config File: %s size=%d", path, fw->size);
+ dev_dbg(&client->dev, "Config File: %s size: %zu", path, fw->size);
msg->fw_data =
devm_kzalloc(&client->dev, fw->size, GFP_KERNEL);
if (!msg->fw_data) {
@@ -1408,10 +1410,15 @@ s32 gup_update_proc(void *dir)
goto file_fail;
}
- ret = gup_enter_update_judge(ts->client, &fw_head);
- if (ret == FAIL) {
- pr_err("Check *.bin file fail");
- goto file_fail;
+ if (ts->force_update) {
+ dev_dbg(&ts->client->dev, "Enter force update.");
+ } else {
+ ret = gup_enter_update_judge(ts->client, &fw_head);
+ if (ret == FAIL) {
+ dev_err(&ts->client->dev,
+ "Check *.bin file fail.");
+ goto file_fail;
+ }
}
ts->enter_update = 1;
diff --git a/drivers/input/touchscreen/zforce_ts.c b/drivers/input/touchscreen/zforce_ts.c
index 9bbadaaf6bc3..7b3845aa5983 100644
--- a/drivers/input/touchscreen/zforce_ts.c
+++ b/drivers/input/touchscreen/zforce_ts.c
@@ -370,8 +370,8 @@ static int zforce_touch_event(struct zforce_ts *ts, u8 *payload)
point.coord_x = point.coord_y = 0;
}
- point.state = payload[9 * i + 5] & 0x03;
- point.id = (payload[9 * i + 5] & 0xfc) >> 2;
+ point.state = payload[9 * i + 5] & 0x0f;
+ point.id = (payload[9 * i + 5] & 0xf0) >> 4;
/* determine touch major, minor and orientation */
point.area_major = max(payload[9 * i + 6],
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index fc836f523afa..b9319b76a8a1 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -91,6 +91,7 @@ struct iommu_dev_data {
struct list_head dev_data_list; /* For global dev_data_list */
struct protection_domain *domain; /* Domain the device is bound to */
u16 devid; /* PCI Device ID */
+ u16 alias; /* Alias Device ID */
bool iommu_v2; /* Device can make use of IOMMUv2 */
bool passthrough; /* Device is identity mapped */
struct {
@@ -125,6 +126,13 @@ static struct protection_domain *to_pdomain(struct iommu_domain *dom)
return container_of(dom, struct protection_domain, domain);
}
+static inline u16 get_device_id(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ return PCI_DEVID(pdev->bus->number, pdev->devfn);
+}
+
static struct iommu_dev_data *alloc_dev_data(u16 devid)
{
struct iommu_dev_data *dev_data;
@@ -162,6 +170,68 @@ out_unlock:
return dev_data;
}
+static int __last_alias(struct pci_dev *pdev, u16 alias, void *data)
+{
+ *(u16 *)data = alias;
+ return 0;
+}
+
+static u16 get_alias(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ u16 devid, ivrs_alias, pci_alias;
+
+ devid = get_device_id(dev);
+ ivrs_alias = amd_iommu_alias_table[devid];
+ pci_for_each_dma_alias(pdev, __last_alias, &pci_alias);
+
+ if (ivrs_alias == pci_alias)
+ return ivrs_alias;
+
+ /*
+ * DMA alias showdown
+ *
+ * The IVRS is fairly reliable in telling us about aliases, but it
+ * can't know about every screwy device. If we don't have an IVRS
+ * reported alias, use the PCI reported alias. In that case we may
+ * still need to initialize the rlookup and dev_table entries if the
+ * alias is to a non-existent device.
+ */
+ if (ivrs_alias == devid) {
+ if (!amd_iommu_rlookup_table[pci_alias]) {
+ amd_iommu_rlookup_table[pci_alias] =
+ amd_iommu_rlookup_table[devid];
+ memcpy(amd_iommu_dev_table[pci_alias].data,
+ amd_iommu_dev_table[devid].data,
+ sizeof(amd_iommu_dev_table[pci_alias].data));
+ }
+
+ return pci_alias;
+ }
+
+ pr_info("AMD-Vi: Using IVRS reported alias %02x:%02x.%d "
+ "for device %s[%04x:%04x], kernel reported alias "
+ "%02x:%02x.%d\n", PCI_BUS_NUM(ivrs_alias), PCI_SLOT(ivrs_alias),
+ PCI_FUNC(ivrs_alias), dev_name(dev), pdev->vendor, pdev->device,
+ PCI_BUS_NUM(pci_alias), PCI_SLOT(pci_alias),
+ PCI_FUNC(pci_alias));
+
+ /*
+ * If we don't have a PCI DMA alias and the IVRS alias is on the same
+ * bus, then the IVRS table may know about a quirk that we don't.
+ */
+ if (pci_alias == devid &&
+ PCI_BUS_NUM(ivrs_alias) == pdev->bus->number) {
+ pdev->dev_flags |= PCI_DEV_FLAGS_DMA_ALIAS_DEVFN;
+ pdev->dma_alias_devfn = ivrs_alias & 0xff;
+ pr_info("AMD-Vi: Added PCI DMA alias %02x.%d for %s\n",
+ PCI_SLOT(ivrs_alias), PCI_FUNC(ivrs_alias),
+ dev_name(dev));
+ }
+
+ return ivrs_alias;
+}
+
static struct iommu_dev_data *find_dev_data(u16 devid)
{
struct iommu_dev_data *dev_data;
@@ -174,13 +244,6 @@ static struct iommu_dev_data *find_dev_data(u16 devid)
return dev_data;
}
-static inline u16 get_device_id(struct device *dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
-
- return PCI_DEVID(pdev->bus->number, pdev->devfn);
-}
-
static struct iommu_dev_data *get_dev_data(struct device *dev)
{
return dev->archdata.iommu;
@@ -308,6 +371,8 @@ static int iommu_init_device(struct device *dev)
if (!dev_data)
return -ENOMEM;
+ dev_data->alias = get_alias(dev);
+
if (pci_iommuv2_capable(pdev)) {
struct amd_iommu *iommu;
@@ -328,7 +393,7 @@ static void iommu_ignore_device(struct device *dev)
u16 devid, alias;
devid = get_device_id(dev);
- alias = amd_iommu_alias_table[devid];
+ alias = get_alias(dev);
memset(&amd_iommu_dev_table[devid], 0, sizeof(struct dev_table_entry));
memset(&amd_iommu_dev_table[alias], 0, sizeof(struct dev_table_entry));
@@ -1017,7 +1082,7 @@ static int device_flush_dte(struct iommu_dev_data *dev_data)
int ret;
iommu = amd_iommu_rlookup_table[dev_data->devid];
- alias = amd_iommu_alias_table[dev_data->devid];
+ alias = dev_data->alias;
ret = iommu_flush_dte(iommu, dev_data->devid);
if (!ret && alias != dev_data->devid)
@@ -1891,7 +1956,7 @@ static void do_attach(struct iommu_dev_data *dev_data,
bool ats;
iommu = amd_iommu_rlookup_table[dev_data->devid];
- alias = amd_iommu_alias_table[dev_data->devid];
+ alias = dev_data->alias;
ats = dev_data->ats.enabled;
/* Update data structures */
@@ -1925,7 +1990,7 @@ static void do_detach(struct iommu_dev_data *dev_data)
return;
iommu = amd_iommu_rlookup_table[dev_data->devid];
- alias = amd_iommu_alias_table[dev_data->devid];
+ alias = dev_data->alias;
/* decrease reference counters */
dev_data->domain->dev_iommu[iommu->index] -= 1;
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index afa519aa8203..fda10abae58a 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -1870,6 +1870,10 @@ free_irqs:
arm_smmu_free_context_idx(smmu, cfg->cbndx);
smmu_domain->smmu = NULL;
+ cfg->cbndx = INVALID_CBNDX;
+ cfg->irptndx = INVALID_IRPTNDX;
+ cfg->asid = INVALID_ASID;
+ cfg->vmid = INVALID_VMID;
}
static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
@@ -2938,7 +2942,7 @@ static struct iommu_group *arm_smmu_device_group(struct device *dev)
*/
group = generic_device_group(dev);
- if (IS_ERR(group))
+ if (IS_ERR_OR_NULL(group))
return group;
if (dev_is_pci(dev))
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 72d6182666cb..58f2fe687a24 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -403,7 +403,7 @@ static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
unsigned int s_length = sg_dma_len(s);
unsigned int s_dma_len = s->length;
- s->offset = s_offset;
+ s->offset += s_offset;
s->length = s_length;
sg_dma_address(s) = dma_addr + s_offset;
dma_addr += s_dma_len;
@@ -422,7 +422,7 @@ static void __invalidate_sg(struct scatterlist *sg, int nents)
for_each_sg(sg, s, nents, i) {
if (sg_dma_address(s) != DMA_ERROR_CODE)
- s->offset = sg_dma_address(s);
+ s->offset += sg_dma_address(s);
if (sg_dma_len(s))
s->length = sg_dma_len(s);
sg_dma_address(s) = DMA_ERROR_CODE;
diff --git a/drivers/irqchip/irq-mxs.c b/drivers/irqchip/irq-mxs.c
index efe50845939d..17304705f2cf 100644
--- a/drivers/irqchip/irq-mxs.c
+++ b/drivers/irqchip/irq-mxs.c
@@ -183,7 +183,7 @@ static void __iomem * __init icoll_init_iobase(struct device_node *np)
void __iomem *icoll_base;
icoll_base = of_io_request_and_map(np, 0, np->name);
- if (!icoll_base)
+ if (IS_ERR(icoll_base))
panic("%s: unable to map resource", np->full_name);
return icoll_base;
}
diff --git a/drivers/irqchip/irq-sunxi-nmi.c b/drivers/irqchip/irq-sunxi-nmi.c
index 4ef178078e5b..1254e98f6b57 100644
--- a/drivers/irqchip/irq-sunxi-nmi.c
+++ b/drivers/irqchip/irq-sunxi-nmi.c
@@ -154,9 +154,9 @@ static int __init sunxi_sc_nmi_irq_init(struct device_node *node,
gc = irq_get_domain_generic_chip(domain, 0);
gc->reg_base = of_io_request_and_map(node, 0, of_node_full_name(node));
- if (!gc->reg_base) {
+ if (IS_ERR(gc->reg_base)) {
pr_err("unable to map resource\n");
- ret = -ENOMEM;
+ ret = PTR_ERR(gc->reg_base);
goto fail_irqd_remove;
}
diff --git a/drivers/leds/leds-qpnp-flash-v2.c b/drivers/leds/leds-qpnp-flash-v2.c
index bc94dff08d21..325bdb35e8a3 100644
--- a/drivers/leds/leds-qpnp-flash-v2.c
+++ b/drivers/leds/leds-qpnp-flash-v2.c
@@ -82,6 +82,8 @@
#define VPH_DROOP_THRESH_MV_TO_VAL(val_mv) ((val_mv / 100) - 25)
#define VPH_DROOP_THRESH_VAL_TO_UV(val) ((val + 25) * 100000)
#define MITIGATION_THRSH_MA_TO_VAL(val_ma) (val_ma / 100)
+#define CURRENT_MA_TO_REG_VAL(curr_ma, ires_ua) ((curr_ma * 1000) / ires_ua - 1)
+#define SAFETY_TMR_TO_REG_VAL(duration_ms) ((duration_ms / 10) - 1)
#define FLASH_LED_ISC_WARMUP_DELAY_SHIFT 6
#define FLASH_LED_WARMUP_DELAY_DEFAULT 2
@@ -97,8 +99,6 @@
#define FLASH_LED_VLED_MAX_DEFAULT_UV 3500000
#define FLASH_LED_IBATT_OCP_THRESH_DEFAULT_UA 4500000
#define FLASH_LED_RPARA_DEFAULT_UOHM 0
-#define FLASH_LED_SAFETY_TMR_VAL_OFFSET 1
-#define FLASH_LED_SAFETY_TMR_VAL_DIVISOR 10
#define FLASH_LED_SAFETY_TMR_ENABLE BIT(7)
#define FLASH_LED_LMH_LEVEL_DEFAULT 0
#define FLASH_LED_LMH_MITIGATION_ENABLE 1
@@ -738,7 +738,8 @@ static void qpnp_flash_led_node_set(struct flash_node_data *fnode, int value)
prgm_current_ma = min(prgm_current_ma, fnode->max_current);
fnode->current_ma = prgm_current_ma;
fnode->cdev.brightness = prgm_current_ma;
- fnode->current_reg_val = prgm_current_ma * 1000 / fnode->ires_ua + 1;
+ fnode->current_reg_val = CURRENT_MA_TO_REG_VAL(prgm_current_ma,
+ fnode->ires_ua);
fnode->led_on = prgm_current_ma != 0;
}
@@ -1341,9 +1342,7 @@ static int qpnp_flash_led_parse_each_led_dt(struct qpnp_flash_led *led,
fnode->duration = FLASH_LED_SAFETY_TMR_DISABLED;
rc = of_property_read_u32(node, "qcom,duration-ms", &val);
if (!rc) {
- fnode->duration = (u8)(((val -
- FLASH_LED_SAFETY_TMR_VAL_OFFSET) /
- FLASH_LED_SAFETY_TMR_VAL_DIVISOR) |
+ fnode->duration = (u8)(SAFETY_TMR_TO_REG_VAL(val) |
FLASH_LED_SAFETY_TMR_ENABLE);
} else if (rc == -EINVAL) {
if (fnode->type == FLASH_LED_TYPE_FLASH) {
diff --git a/drivers/leds/leds-qpnp-wled.c b/drivers/leds/leds-qpnp-wled.c
index d9626c29ce76..894c1d88b3ef 100644
--- a/drivers/leds/leds-qpnp-wled.c
+++ b/drivers/leds/leds-qpnp-wled.c
@@ -25,6 +25,7 @@
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/leds-qpnp-wled.h>
+#include <linux/qpnp/qpnp-revid.h>
#define QPNP_IRQ_FLAGS (IRQF_TRIGGER_RISING | \
IRQF_TRIGGER_FALLING | \
@@ -44,15 +45,19 @@
#define QPNP_WLED_SWITCH_FREQ_REG(b) (b + 0x4C)
#define QPNP_WLED_OVP_REG(b) (b + 0x4D)
#define QPNP_WLED_ILIM_REG(b) (b + 0x4E)
+#define QPNP_WLED_AMOLED_VOUT_REG(b) (b + 0x4F)
#define QPNP_WLED_SOFTSTART_RAMP_DLY(b) (b + 0x53)
#define QPNP_WLED_VLOOP_COMP_RES_REG(b) (b + 0x55)
#define QPNP_WLED_VLOOP_COMP_GM_REG(b) (b + 0x56)
#define QPNP_WLED_PSM_CTRL_REG(b) (b + 0x5B)
#define QPNP_WLED_SC_PRO_REG(b) (b + 0x5E)
+#define QPNP_WLED_SWIRE_AVDD_REG(b) (b + 0x5F)
+#define QPNP_WLED_CTRL_SPARE_REG(b) (b + 0xDF)
#define QPNP_WLED_TEST1_REG(b) (b + 0xE2)
#define QPNP_WLED_TEST4_REG(b) (b + 0xE5)
#define QPNP_WLED_REF_7P7_TRIM_REG(b) (b + 0xF2)
+#define QPNP_WLED_7P7_TRIM_MASK GENMASK(3, 0)
#define QPNP_WLED_EN_MASK 0x7F
#define QPNP_WLED_EN_SHIFT 7
#define QPNP_WLED_FDBK_OP_MASK 0xF8
@@ -79,16 +84,16 @@
#define QPNP_WLED_VREF_PSM_MAX_MV 750
#define QPNP_WLED_VREF_PSM_DFLT_AMOLED_MV 450
#define QPNP_WLED_PSM_CTRL_OVERWRITE 0x80
-#define QPNP_WLED_AVDD_MIN_TRIM_VALUE -7
-#define QPNP_WLED_AVDD_MAX_TRIM_VALUE 8
-#define QPNP_WLED_AVDD_TRIM_CENTER_VALUE 7
-
-#define QPNP_WLED_ILIM_MASK 0xF8
-#define QPNP_WLED_ILIM_MIN_MA 105
-#define QPNP_WLED_ILIM_MAX_MA 1980
-#define QPNP_WLED_ILIM_STEP_MA 280
-#define QPNP_WLED_DFLT_ILIM_MA 980
-#define QPNP_WLED_ILIM_OVERWRITE 0x80
+
+#define QPNP_WLED_ILIM_MASK GENMASK(2, 0)
+#define QPNP_WLED_ILIM_OVERWRITE BIT(7)
+#define PMI8994_WLED_ILIM_MIN_MA 105
+#define PMI8994_WLED_ILIM_MAX_MA 1980
+#define PMI8994_WLED_DFLT_ILIM_MA 980
+#define PMI8994_AMOLED_DFLT_ILIM_MA 385
+#define PMICOBALT_WLED_ILIM_MAX_MA 1500
+#define PMICOBALT_WLED_DFLT_ILIM_MA 970
+#define PMICOBALT_AMOLED_DFLT_ILIM_MA 620
#define QPNP_WLED_BOOST_DUTY_MASK 0xFC
#define QPNP_WLED_BOOST_DUTY_STEP_NS 52
#define QPNP_WLED_BOOST_DUTY_MIN_NS 26
@@ -98,11 +103,7 @@
#define QPNP_WLED_SWITCH_FREQ_800_KHZ 800
#define QPNP_WLED_SWITCH_FREQ_1600_KHZ 1600
#define QPNP_WLED_SWITCH_FREQ_OVERWRITE 0x80
-#define QPNP_WLED_OVP_MASK 0xFC
-#define QPNP_WLED_OVP_17800_MV 17800
-#define QPNP_WLED_OVP_19400_MV 19400
-#define QPNP_WLED_OVP_29500_MV 29500
-#define QPNP_WLED_OVP_31000_MV 31000
+#define QPNP_WLED_OVP_MASK GENMASK(1, 0)
#define QPNP_WLED_TEST4_EN_VREF_UP 0x32
#define QPNP_WLED_INT_EN_SET_OVP_EN 0x02
#define QPNP_WLED_OVP_FLT_SLEEP_US 10
@@ -198,6 +199,22 @@
#define QPNP_WLED_MIN_MSLEEP 20
#define QPNP_WLED_SC_DLY_MS 20
+#define NUM_SUPPORTED_AVDD_VOLTAGES 6
+#define QPNP_WLED_DFLT_AVDD_MV 7600
+#define QPNP_WLED_AVDD_MIN_MV 5650
+#define QPNP_WLED_AVDD_MAX_MV 7900
+#define QPNP_WLED_AVDD_STEP_MV 150
+#define QPNP_WLED_AVDD_MIN_TRIM_VAL 0x0
+#define QPNP_WLED_AVDD_MAX_TRIM_VAL 0xF
+#define QPNP_WLED_AVDD_SEL_SPMI_BIT BIT(7)
+#define QPNP_WLED_AVDD_SET_BIT BIT(4)
+
+#define NUM_SUPPORTED_OVP_THRESHOLDS 4
+#define NUM_SUPPORTED_ILIM_THRESHOLDS 8
+
+#define QPNP_WLED_AVDD_MV_TO_REG(val) \
+ ((val - QPNP_WLED_AVDD_MIN_MV) / QPNP_WLED_AVDD_STEP_MV)
+
/* output feedback mode */
enum qpnp_wled_fdbk_op {
QPNP_WLED_FDBK_AUTO,
@@ -230,10 +247,38 @@ static u8 qpnp_wled_sink_dbg_regs[] = {
0xe6,
};
+static int qpnp_wled_avdd_target_voltages[NUM_SUPPORTED_AVDD_VOLTAGES] = {
+ 7900, 7600, 7300, 6400, 6100, 5800,
+};
+
+static u8 qpnp_wled_ovp_reg_settings[NUM_SUPPORTED_AVDD_VOLTAGES] = {
+ 0x0, 0x0, 0x1, 0x2, 0x2, 0x3,
+};
+
+static int qpnp_wled_avdd_trim_adjustments[NUM_SUPPORTED_AVDD_VOLTAGES] = {
+ 3, 0, -2, 7, 3, 3,
+};
+
+static int qpnp_wled_ovp_thresholds_pmi8994[NUM_SUPPORTED_OVP_THRESHOLDS] = {
+ 31000, 29500, 19400, 17800,
+};
+
+static int qpnp_wled_ovp_thresholds_pmicobalt[NUM_SUPPORTED_OVP_THRESHOLDS] = {
+ 31100, 29600, 19600, 18100,
+};
+
+static int qpnp_wled_ilim_settings_pmi8994[NUM_SUPPORTED_ILIM_THRESHOLDS] = {
+ 105, 385, 660, 980, 1150, 1420, 1700, 1980,
+};
+
+static int qpnp_wled_ilim_settings_pmicobalt[NUM_SUPPORTED_ILIM_THRESHOLDS] = {
+ 105, 280, 450, 620, 970, 1150, 1300, 1500,
+};
+
/**
* qpnp_wled - wed data structure
* @ cdev - led class device
- * @ spmi - spmi device
+ * @ pdev - platform device
* @ work - worker for led operation
* @ lock - mutex lock for exclusive access
* @ fdbk_op - output feedback mode
@@ -241,7 +286,7 @@ static u8 qpnp_wled_sink_dbg_regs[] = {
* @ ovp_irq - over voltage protection irq
* @ sc_irq - short circuit irq
* @ sc_cnt - short circuit irq count
- * @ avdd_trim_steps_from_center - number of steps to trim from center value
+ * @ avdd_target_voltage_mv - target voltage for AVDD module in mV
* @ ctrl_base - base address for wled ctrl
* @ sink_base - base address for wled sink
* @ ibb_base - base address for IBB(Inverting Buck Boost)
@@ -264,6 +309,7 @@ static u8 qpnp_wled_sink_dbg_regs[] = {
* @ cons_sync_write_delay_us - delay between two consecutive writes to SYNC
* @ strings - supported list of strings
* @ num_strings - number of strings
+ * @ avdd_mode_spmi - enable avdd programming via spmi
* @ en_9b_dim_res - enable or disable 9bit dimming
* @ en_phase_stag - enable or disable phase staggering
* @ en_cabc - enable or disable cabc
@@ -276,14 +322,16 @@ struct qpnp_wled {
struct led_classdev cdev;
struct platform_device *pdev;
struct regmap *regmap;
+ struct pmic_revid_data *pmic_rev_id;
struct work_struct work;
struct mutex lock;
+ struct mutex bus_lock;
enum qpnp_wled_fdbk_op fdbk_op;
enum qpnp_wled_dim_mode dim_mode;
int ovp_irq;
int sc_irq;
u32 sc_cnt;
- u32 avdd_trim_steps_from_center;
+ u32 avdd_target_voltage_mv;
u16 ctrl_base;
u16 sink_base;
u16 mod_freq_khz;
@@ -304,6 +352,7 @@ struct qpnp_wled {
u16 cons_sync_write_delay_us;
u8 strings[QPNP_WLED_MAX_STRINGS];
u8 num_strings;
+ bool avdd_mode_spmi;
bool en_9b_dim_res;
bool en_phase_stag;
bool en_cabc;
@@ -319,39 +368,79 @@ static int qpnp_wled_read_reg(struct qpnp_wled *wled, u8 *data, u16 addr)
uint val;
rc = regmap_read(wled->regmap, addr, &val);
- if (rc < 0)
+ if (rc < 0) {
dev_err(&wled->pdev->dev,
"Error reading address: %x(%d)\n", addr, rc);
+ return rc;
+ }
+
*data = (u8)val;
- return rc;
+ return 0;
}
/* helper to write a pmic register */
-static int qpnp_wled_write_reg(struct qpnp_wled *wled, u8 *data, u16 addr)
+static int qpnp_wled_write_reg(struct qpnp_wled *wled, u8 data, u16 addr)
{
int rc;
- rc = regmap_write(wled->regmap, addr, *data);
+ mutex_lock(&wled->bus_lock);
+ rc = regmap_write(wled->regmap, addr, data);
+ if (rc < 0) {
+ dev_err(&wled->pdev->dev, "Error writing address: %x(%d)\n",
+ addr, rc);
+ goto out;
+ }
+
+ dev_dbg(&wled->pdev->dev, "wrote: WLED_0x%x = 0x%x\n", addr, data);
+out:
+ mutex_unlock(&wled->bus_lock);
+ return rc;
+}
+
+static int qpnp_wled_masked_write_reg(struct qpnp_wled *wled, u8 mask, u8 *data,
+ u16 addr)
+{
+ u8 reg;
+ int rc;
+
+ rc = qpnp_wled_read_reg(wled, &reg, addr);
if (rc < 0)
- dev_err(&wled->pdev->dev,
- "Error writing address: %x(%d)\n", addr, rc);
+ return rc;
+
+ reg &= ~mask;
+ reg |= *data & mask;
- dev_dbg(&wled->pdev->dev, "write: WLED_0x%x = 0x%x\n", addr, *data);
+ rc = qpnp_wled_write_reg(wled, reg, addr);
return rc;
}
-static int qpnp_wled_sec_access(struct qpnp_wled *wled, u16 base_addr)
+static int qpnp_wled_sec_write_reg(struct qpnp_wled *wled, u8 data, u16 addr)
{
int rc;
u8 reg = QPNP_WLED_SEC_UNLOCK;
+ u16 base_addr = addr & 0xFF00;
- rc = qpnp_wled_write_reg(wled, &reg,
- QPNP_WLED_SEC_ACCESS_REG(base_addr));
- if (rc)
- return rc;
+ mutex_lock(&wled->bus_lock);
+ rc = regmap_write(wled->regmap, QPNP_WLED_SEC_ACCESS_REG(base_addr),
+ reg);
+ if (rc < 0) {
+ dev_err(&wled->pdev->dev, "Error writing address: %x(%d)\n",
+ QPNP_WLED_SEC_ACCESS_REG(base_addr), rc);
+ goto out;
+ }
- return 0;
+ rc = regmap_write(wled->regmap, addr, data);
+ if (rc < 0) {
+ dev_err(&wled->pdev->dev, "Error writing address: %x(%d)\n",
+ addr, rc);
+ goto out;
+ }
+
+ dev_dbg(&wled->pdev->dev, "wrote: WLED_0x%x = 0x%x\n", addr, data);
+out:
+ mutex_unlock(&wled->bus_lock);
+ return rc;
}
static int qpnp_wled_sync_reg_toggle(struct qpnp_wled *wled)
@@ -361,7 +450,7 @@ static int qpnp_wled_sync_reg_toggle(struct qpnp_wled *wled)
/* sync */
reg = QPNP_WLED_SYNC;
- rc = qpnp_wled_write_reg(wled, &reg,
+ rc = qpnp_wled_write_reg(wled, reg,
QPNP_WLED_SYNC_REG(wled->sink_base));
if (rc < 0)
return rc;
@@ -371,7 +460,7 @@ static int qpnp_wled_sync_reg_toggle(struct qpnp_wled *wled)
wled->cons_sync_write_delay_us + 1);
reg = QPNP_WLED_SYNC_RESET;
- rc = qpnp_wled_write_reg(wled, &reg,
+ rc = qpnp_wled_write_reg(wled, reg,
QPNP_WLED_SYNC_REG(wled->sink_base));
if (rc < 0)
return rc;
@@ -388,7 +477,7 @@ static int qpnp_wled_set_level(struct qpnp_wled *wled, int level)
/* set brightness registers */
for (i = 0; i < wled->num_strings; i++) {
reg = level & QPNP_WLED_BRIGHT_LSB_MASK;
- rc = qpnp_wled_write_reg(wled, &reg,
+ rc = qpnp_wled_write_reg(wled, reg,
QPNP_WLED_BRIGHT_LSB_REG(wled->sink_base,
wled->strings[i]));
if (rc < 0)
@@ -396,7 +485,7 @@ static int qpnp_wled_set_level(struct qpnp_wled *wled, int level)
reg = level >> QPNP_WLED_BRIGHT_MSB_SHIFT;
reg = reg & QPNP_WLED_BRIGHT_MSB_MASK;
- rc = qpnp_wled_write_reg(wled, &reg,
+ rc = qpnp_wled_write_reg(wled, reg,
QPNP_WLED_BRIGHT_MSB_REG(wled->sink_base,
wled->strings[i]));
if (rc < 0)
@@ -421,7 +510,7 @@ static int qpnp_wled_module_en(struct qpnp_wled *wled,
/* disable OVP fault interrupt */
if (state) {
reg = QPNP_WLED_INT_EN_SET_OVP_EN;
- rc = qpnp_wled_write_reg(wled, &reg,
+ rc = qpnp_wled_write_reg(wled, reg,
QPNP_WLED_INT_EN_CLR(base_addr));
if (rc)
return rc;
@@ -433,7 +522,7 @@ static int qpnp_wled_module_en(struct qpnp_wled *wled,
return rc;
reg &= QPNP_WLED_MODULE_EN_MASK;
reg |= (state << QPNP_WLED_MODULE_EN_SHIFT);
- rc = qpnp_wled_write_reg(wled, &reg,
+ rc = qpnp_wled_write_reg(wled, reg,
QPNP_WLED_MODULE_EN_REG(base_addr));
if (rc)
return rc;
@@ -442,7 +531,7 @@ static int qpnp_wled_module_en(struct qpnp_wled *wled,
if (state && (wled->ovp_irq > 0)) {
udelay(QPNP_WLED_OVP_FLT_SLEEP_US);
reg = QPNP_WLED_INT_EN_SET_OVP_EN;
- rc = qpnp_wled_write_reg(wled, &reg,
+ rc = qpnp_wled_write_reg(wled, reg,
QPNP_WLED_INT_EN_SET(base_addr));
if (rc)
return rc;
@@ -678,7 +767,7 @@ static ssize_t qpnp_wled_dim_mode_store(struct device *dev,
reg |= temp;
}
- rc = qpnp_wled_write_reg(wled, &reg,
+ rc = qpnp_wled_write_reg(wled, reg,
QPNP_WLED_MOD_REG(wled->sink_base));
if (rc)
return rc;
@@ -722,7 +811,7 @@ static ssize_t qpnp_wled_fs_curr_ua_store(struct device *dev,
reg &= QPNP_WLED_FS_CURR_MASK;
temp = data / QPNP_WLED_FS_CURR_STEP_UA;
reg |= temp;
- rc = qpnp_wled_write_reg(wled, &reg,
+ rc = qpnp_wled_write_reg(wled, reg,
QPNP_WLED_FS_CURR_REG(wled->sink_base,
wled->strings[i]));
if (rc)
@@ -838,11 +927,7 @@ static int qpnp_wled_set_disp(struct qpnp_wled *wled, u16 base_addr)
reg &= QPNP_WLED_DISP_SEL_MASK;
reg |= (wled->disp_type_amoled << QPNP_WLED_DISP_SEL_SHIFT);
- rc = qpnp_wled_sec_access(wled, base_addr);
- if (rc)
- return rc;
-
- rc = qpnp_wled_write_reg(wled, &reg,
+ rc = qpnp_wled_sec_write_reg(wled, reg,
QPNP_WLED_DISP_SEL_REG(base_addr));
if (rc)
return rc;
@@ -863,7 +948,7 @@ static int qpnp_wled_set_disp(struct qpnp_wled *wled, u16 base_addr)
reg |= ((wled->vref_psm_mv - QPNP_WLED_VREF_PSM_MIN_MV)/
QPNP_WLED_VREF_PSM_STEP_MV);
reg |= QPNP_WLED_PSM_CTRL_OVERWRITE;
- rc = qpnp_wled_write_reg(wled, &reg,
+ rc = qpnp_wled_write_reg(wled, reg,
QPNP_WLED_PSM_CTRL_REG(wled->ctrl_base));
if (rc)
return rc;
@@ -887,7 +972,7 @@ static int qpnp_wled_set_disp(struct qpnp_wled *wled, u16 base_addr)
QPNP_WLED_LOOP_COMP_RES_MIN_KOHM)/
QPNP_WLED_LOOP_COMP_RES_STEP_KOHM);
reg |= QPNP_WLED_VLOOP_COMP_RES_OVERWRITE;
- rc = qpnp_wled_write_reg(wled, &reg,
+ rc = qpnp_wled_write_reg(wled, reg,
QPNP_WLED_VLOOP_COMP_RES_REG(wled->ctrl_base));
if (rc)
return rc;
@@ -905,7 +990,7 @@ static int qpnp_wled_set_disp(struct qpnp_wled *wled, u16 base_addr)
reg &= QPNP_WLED_VLOOP_COMP_GM_MASK;
reg |= (wled->loop_ea_gm | QPNP_WLED_VLOOP_COMP_GM_OVERWRITE);
- rc = qpnp_wled_write_reg(wled, &reg,
+ rc = qpnp_wled_write_reg(wled, reg,
QPNP_WLED_VLOOP_COMP_GM_REG(wled->ctrl_base));
if (rc)
return rc;
@@ -916,12 +1001,8 @@ static int qpnp_wled_set_disp(struct qpnp_wled *wled, u16 base_addr)
if (rc < 0)
return rc;
- rc = qpnp_wled_sec_access(wled, base_addr);
- if (rc)
- return rc;
-
reg |= QPNP_WLED_TEST4_EN_IIND_UP;
- rc = qpnp_wled_write_reg(wled, &reg,
+ rc = qpnp_wled_sec_write_reg(wled, reg,
QPNP_WLED_TEST4_REG(base_addr));
if (rc)
return rc;
@@ -929,12 +1010,8 @@ static int qpnp_wled_set_disp(struct qpnp_wled *wled, u16 base_addr)
/*
* enable VREF_UP to avoid false ovp on low brightness for LCD
*/
- rc = qpnp_wled_sec_access(wled, base_addr);
- if (rc)
- return rc;
-
reg = QPNP_WLED_TEST4_EN_VREF_UP;
- rc = qpnp_wled_write_reg(wled, &reg,
+ rc = qpnp_wled_sec_write_reg(wled, reg,
QPNP_WLED_TEST4_REG(base_addr));
if (rc)
return rc;
@@ -968,6 +1045,209 @@ static irqreturn_t qpnp_wled_sc_irq(int irq, void *_wled)
return IRQ_HANDLED;
}
+static bool is_avdd_trim_adjustment_required(struct qpnp_wled *wled)
+{
+ int rc;
+ u8 reg = 0;
+
+ /*
+ * AVDD trim adjustment is not required for pmicobalt/pm2falcon and not
+ * supported for pmi8994.
+ */
+ if (wled->pmic_rev_id->pmic_subtype == PMICOBALT_SUBTYPE ||
+ wled->pmic_rev_id->pmic_subtype == PM2FALCON_SUBTYPE ||
+ wled->pmic_rev_id->pmic_subtype == PMI8994_SUBTYPE)
+ return false;
+
+ /*
+ * Configure TRIM_REG only if disp_type_amoled and it has
+ * not already been programmed by bootloader.
+ */
+ if (!wled->disp_type_amoled)
+ return false;
+
+ rc = qpnp_wled_read_reg(wled, &reg,
+ QPNP_WLED_CTRL_SPARE_REG(wled->ctrl_base));
+ if (rc < 0)
+ return false;
+
+ return !(reg & QPNP_WLED_AVDD_SET_BIT);
+}
+
+static int qpnp_wled_ovp_config(struct qpnp_wled *wled)
+{
+ int rc, i, *ovp_table;
+ u8 reg;
+
+ /*
+ * Configure the OVP register based on ovp_mv only if display type is
+ * not AMOLED.
+ */
+ if (wled->disp_type_amoled)
+ return 0;
+
+ if (wled->pmic_rev_id->pmic_subtype == PMICOBALT_SUBTYPE ||
+ wled->pmic_rev_id->pmic_subtype == PM2FALCON_SUBTYPE)
+ ovp_table = qpnp_wled_ovp_thresholds_pmicobalt;
+ else
+ ovp_table = qpnp_wled_ovp_thresholds_pmi8994;
+
+ for (i = 0; i < NUM_SUPPORTED_OVP_THRESHOLDS; i++) {
+ if (wled->ovp_mv == ovp_table[i])
+ break;
+ }
+
+ if (i == NUM_SUPPORTED_OVP_THRESHOLDS) {
+ dev_err(&wled->pdev->dev,
+ "Invalid ovp threshold specified in device tree\n");
+ return -EINVAL;
+ }
+
+ reg = i & QPNP_WLED_OVP_MASK;
+ rc = qpnp_wled_masked_write_reg(wled, QPNP_WLED_OVP_MASK, &reg,
+ QPNP_WLED_OVP_REG(wled->ctrl_base));
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+static int qpnp_wled_avdd_trim_config(struct qpnp_wled *wled)
+{
+ int rc, i;
+ u8 reg;
+
+ for (i = 0; i < NUM_SUPPORTED_AVDD_VOLTAGES; i++) {
+ if (wled->avdd_target_voltage_mv ==
+ qpnp_wled_avdd_target_voltages[i])
+ break;
+ }
+
+ if (i == NUM_SUPPORTED_AVDD_VOLTAGES) {
+ dev_err(&wled->pdev->dev,
+ "Invalid avdd target voltage specified in device tree\n");
+ return -EINVAL;
+ }
+
+ /* Update WLED_OVP register based on desired target voltage */
+ reg = qpnp_wled_ovp_reg_settings[i];
+ rc = qpnp_wled_masked_write_reg(wled, QPNP_WLED_OVP_MASK, &reg,
+ QPNP_WLED_OVP_REG(wled->ctrl_base));
+ if (rc)
+ return rc;
+
+ /* Update WLED_TRIM register based on desired target voltage */
+ rc = qpnp_wled_read_reg(wled, &reg,
+ QPNP_WLED_REF_7P7_TRIM_REG(wled->ctrl_base));
+ if (rc)
+ return rc;
+
+ reg += qpnp_wled_avdd_trim_adjustments[i];
+ if ((s8)reg < QPNP_WLED_AVDD_MIN_TRIM_VAL ||
+ (s8)reg > QPNP_WLED_AVDD_MAX_TRIM_VAL) {
+ dev_dbg(&wled->pdev->dev,
+ "adjusted trim %d is not within range, capping it\n",
+ (s8)reg);
+ if ((s8)reg < QPNP_WLED_AVDD_MIN_TRIM_VAL)
+ reg = QPNP_WLED_AVDD_MIN_TRIM_VAL;
+ else
+ reg = QPNP_WLED_AVDD_MAX_TRIM_VAL;
+ }
+
+ reg &= QPNP_WLED_7P7_TRIM_MASK;
+ rc = qpnp_wled_sec_write_reg(wled, reg,
+ QPNP_WLED_REF_7P7_TRIM_REG(wled->ctrl_base));
+ if (rc < 0)
+ dev_err(&wled->pdev->dev, "Write to 7P7_TRIM register failed, rc=%d\n",
+ rc);
+ return rc;
+}
+
+static int qpnp_wled_avdd_mode_config(struct qpnp_wled *wled)
+{
+ int rc;
+ u8 reg = 0;
+
+ /*
+ * At present, configuring the mode to SPMI/SWIRE for controlling
+ * AVDD voltage is available only in pmicobalt/pm2falcon.
+ */
+ if (wled->pmic_rev_id->pmic_subtype != PMICOBALT_SUBTYPE &&
+ wled->pmic_rev_id->pmic_subtype != PM2FALCON_SUBTYPE)
+ return 0;
+
+ /* AMOLED_VOUT should be configured for AMOLED */
+ if (!wled->disp_type_amoled)
+ return 0;
+
+ /* Configure avdd register */
+ if (wled->avdd_target_voltage_mv > QPNP_WLED_AVDD_MAX_MV) {
+ dev_dbg(&wled->pdev->dev, "Capping avdd target voltage to %d\n",
+ QPNP_WLED_AVDD_MAX_MV);
+ wled->avdd_target_voltage_mv = QPNP_WLED_AVDD_MAX_MV;
+ } else if (wled->avdd_target_voltage_mv < QPNP_WLED_AVDD_MIN_MV) {
+ dev_info(&wled->pdev->dev, "Capping avdd target voltage to %d\n",
+ QPNP_WLED_AVDD_MIN_MV);
+ wled->avdd_target_voltage_mv = QPNP_WLED_AVDD_MIN_MV;
+ }
+
+ reg = QPNP_WLED_AVDD_MV_TO_REG(wled->avdd_target_voltage_mv);
+
+ if (wled->avdd_mode_spmi) {
+ reg |= QPNP_WLED_AVDD_SEL_SPMI_BIT;
+ rc = qpnp_wled_write_reg(wled, reg,
+ QPNP_WLED_AMOLED_VOUT_REG(wled->ctrl_base));
+ } else {
+ rc = qpnp_wled_write_reg(wled, reg,
+ QPNP_WLED_SWIRE_AVDD_REG(wled->ctrl_base));
+ }
+
+ if (rc < 0)
+ dev_err(&wled->pdev->dev, "Write to VOUT/AVDD register failed, rc=%d\n",
+ rc);
+ return rc;
+}
+
+static int qpnp_wled_ilim_config(struct qpnp_wled *wled)
+{
+ int rc, i, *ilim_table;
+ u8 reg;
+
+ if (wled->ilim_ma < PMI8994_WLED_ILIM_MIN_MA)
+ wled->ilim_ma = PMI8994_WLED_ILIM_MIN_MA;
+
+ if (wled->pmic_rev_id->pmic_subtype == PMICOBALT_SUBTYPE ||
+ wled->pmic_rev_id->pmic_subtype == PM2FALCON_SUBTYPE) {
+ ilim_table = qpnp_wled_ilim_settings_pmicobalt;
+ if (wled->ilim_ma > PMICOBALT_WLED_ILIM_MAX_MA)
+ wled->ilim_ma = PMICOBALT_WLED_ILIM_MAX_MA;
+ } else {
+ ilim_table = qpnp_wled_ilim_settings_pmi8994;
+ if (wled->ilim_ma > PMI8994_WLED_ILIM_MAX_MA)
+ wled->ilim_ma = PMI8994_WLED_ILIM_MAX_MA;
+ }
+
+ for (i = 0; i < NUM_SUPPORTED_ILIM_THRESHOLDS; i++) {
+ if (wled->ilim_ma == ilim_table[i])
+ break;
+ }
+
+ if (i == NUM_SUPPORTED_ILIM_THRESHOLDS) {
+ dev_err(&wled->pdev->dev,
+ "Invalid ilim threshold specified in device tree\n");
+ return -EINVAL;
+ }
+
+ reg = (i & QPNP_WLED_ILIM_MASK) | QPNP_WLED_ILIM_OVERWRITE;
+ rc = qpnp_wled_masked_write_reg(wled,
+ QPNP_WLED_ILIM_MASK | QPNP_WLED_ILIM_OVERWRITE,
+ &reg, QPNP_WLED_ILIM_REG(wled->ctrl_base));
+ if (rc < 0)
+ dev_err(&wled->pdev->dev, "Write to ILIM register failed, rc=%d\n",
+ rc);
+ return rc;
+}
+
/* Configure WLED registers */
static int qpnp_wled_config(struct qpnp_wled *wled)
{
@@ -986,7 +1266,7 @@ static int qpnp_wled_config(struct qpnp_wled *wled)
return rc;
reg &= QPNP_WLED_FDBK_OP_MASK;
reg |= wled->fdbk_op;
- rc = qpnp_wled_write_reg(wled, &reg,
+ rc = qpnp_wled_write_reg(wled, reg,
QPNP_WLED_FDBK_OP_REG(wled->ctrl_base));
if (rc)
return rc;
@@ -1004,35 +1284,21 @@ static int qpnp_wled_config(struct qpnp_wled *wled)
reg &= QPNP_WLED_VREF_MASK;
temp = wled->vref_mv - QPNP_WLED_VREF_MIN_MV;
reg |= (temp / QPNP_WLED_VREF_STEP_MV);
- rc = qpnp_wled_write_reg(wled, &reg,
+ rc = qpnp_wled_write_reg(wled, reg,
QPNP_WLED_VREF_REG(wled->ctrl_base));
if (rc)
return rc;
/* Configure the ILIM register */
- if (wled->ilim_ma < QPNP_WLED_ILIM_MIN_MA)
- wled->ilim_ma = QPNP_WLED_ILIM_MIN_MA;
- else if (wled->ilim_ma > QPNP_WLED_ILIM_MAX_MA)
- wled->ilim_ma = QPNP_WLED_ILIM_MAX_MA;
-
- rc = qpnp_wled_read_reg(wled, &reg,
- QPNP_WLED_ILIM_REG(wled->ctrl_base));
- if (rc < 0)
+ rc = qpnp_wled_ilim_config(wled);
+ if (rc < 0) {
+ pr_err("Error in configuring wled ilim, rc=%d\n", rc);
return rc;
- temp = (wled->ilim_ma / QPNP_WLED_ILIM_STEP_MA);
- if (temp != (reg & ~QPNP_WLED_ILIM_MASK)) {
- reg &= QPNP_WLED_ILIM_MASK;
- reg |= temp;
- reg |= QPNP_WLED_ILIM_OVERWRITE;
- rc = qpnp_wled_write_reg(wled, &reg,
- QPNP_WLED_ILIM_REG(wled->ctrl_base));
- if (rc)
- return rc;
}
/* Configure the Soft start Ramp delay: for AMOLED - 0,for LCD - 2 */
reg = (wled->disp_type_amoled) ? 0 : 2;
- rc = qpnp_wled_write_reg(wled, &reg,
+ rc = qpnp_wled_write_reg(wled, reg,
QPNP_WLED_SOFTSTART_RAMP_DLY(wled->ctrl_base));
if (rc)
return rc;
@@ -1049,7 +1315,7 @@ static int qpnp_wled_config(struct qpnp_wled *wled)
return rc;
reg &= QPNP_WLED_BOOST_DUTY_MASK;
reg |= (wled->boost_duty_ns / QPNP_WLED_BOOST_DUTY_STEP_NS);
- rc = qpnp_wled_write_reg(wled, &reg,
+ rc = qpnp_wled_write_reg(wled, reg,
QPNP_WLED_BOOST_DUTY_REG(wled->ctrl_base));
if (rc)
return rc;
@@ -1066,62 +1332,27 @@ static int qpnp_wled_config(struct qpnp_wled *wled)
return rc;
reg &= QPNP_WLED_SWITCH_FREQ_MASK;
reg |= (temp | QPNP_WLED_SWITCH_FREQ_OVERWRITE);
- rc = qpnp_wled_write_reg(wled, &reg,
+ rc = qpnp_wled_write_reg(wled, reg,
QPNP_WLED_SWITCH_FREQ_REG(wled->ctrl_base));
if (rc)
return rc;
- /* Configure the OVP register */
- if (wled->ovp_mv <= QPNP_WLED_OVP_17800_MV) {
- wled->ovp_mv = QPNP_WLED_OVP_17800_MV;
- temp = 3;
- } else if (wled->ovp_mv <= QPNP_WLED_OVP_19400_MV) {
- wled->ovp_mv = QPNP_WLED_OVP_19400_MV;
- temp = 2;
- } else if (wled->ovp_mv <= QPNP_WLED_OVP_29500_MV) {
- wled->ovp_mv = QPNP_WLED_OVP_29500_MV;
- temp = 1;
- } else {
- wled->ovp_mv = QPNP_WLED_OVP_31000_MV;
- temp = 0;
- }
-
- rc = qpnp_wled_read_reg(wled, &reg,
- QPNP_WLED_OVP_REG(wled->ctrl_base));
- if (rc < 0)
- return rc;
- reg &= QPNP_WLED_OVP_MASK;
- reg |= temp;
- rc = qpnp_wled_write_reg(wled, &reg,
- QPNP_WLED_OVP_REG(wled->ctrl_base));
- if (rc)
+ rc = qpnp_wled_ovp_config(wled);
+ if (rc < 0) {
+ pr_err("Error in configuring OVP threshold, rc=%d\n", rc);
return rc;
+ }
- if (wled->disp_type_amoled) {
- /* Configure avdd trim register */
- rc = qpnp_wled_sec_access(wled, wled->ctrl_base);
- if (rc)
- return rc;
-
- /* Check if wled->avdd_trim_steps_from_center is negative */
- if ((s32)wled->avdd_trim_steps_from_center <
- QPNP_WLED_AVDD_MIN_TRIM_VALUE) {
- wled->avdd_trim_steps_from_center =
- QPNP_WLED_AVDD_MIN_TRIM_VALUE;
- } else if ((s32)wled->avdd_trim_steps_from_center >
- QPNP_WLED_AVDD_MAX_TRIM_VALUE) {
- wled->avdd_trim_steps_from_center =
- QPNP_WLED_AVDD_MAX_TRIM_VALUE;
- }
- reg = wled->avdd_trim_steps_from_center +
- QPNP_WLED_AVDD_TRIM_CENTER_VALUE;
-
- rc = qpnp_wled_write_reg(wled, &reg,
- QPNP_WLED_REF_7P7_TRIM_REG(wled->ctrl_base));
- if (rc)
+ if (is_avdd_trim_adjustment_required(wled)) {
+ rc = qpnp_wled_avdd_trim_config(wled);
+ if (rc < 0)
return rc;
}
+ rc = qpnp_wled_avdd_mode_config(wled);
+ if (rc < 0)
+ return rc;
+
/* Configure the MODULATION register */
if (wled->mod_freq_khz <= QPNP_WLED_MOD_FREQ_1200_KHZ) {
wled->mod_freq_khz = QPNP_WLED_MOD_FREQ_1200_KHZ;
@@ -1166,7 +1397,7 @@ static int qpnp_wled_config(struct qpnp_wled *wled)
reg |= wled->dim_mode;
}
- rc = qpnp_wled_write_reg(wled, &reg,
+ rc = qpnp_wled_write_reg(wled, reg,
QPNP_WLED_MOD_REG(wled->sink_base));
if (rc)
return rc;
@@ -1184,7 +1415,7 @@ static int qpnp_wled_config(struct qpnp_wled *wled)
reg &= QPNP_WLED_HYB_THRES_MASK;
temp = fls(wled->hyb_thres / QPNP_WLED_HYB_THRES_MIN) - 1;
reg |= temp;
- rc = qpnp_wled_write_reg(wled, &reg,
+ rc = qpnp_wled_write_reg(wled, reg,
QPNP_WLED_HYB_THRES_REG(wled->sink_base));
if (rc)
return rc;
@@ -1195,17 +1426,14 @@ static int qpnp_wled_config(struct qpnp_wled *wled)
else
reg = QPNP_WLED_SINK_TEST5_HYB;
- rc = qpnp_wled_sec_access(wled, wled->sink_base);
- if (rc)
- return rc;
- rc = qpnp_wled_write_reg(wled, &reg,
+ rc = qpnp_wled_sec_write_reg(wled, reg,
QPNP_WLED_SINK_TEST5_REG(wled->sink_base));
if (rc)
return rc;
/* disable all current sinks and enable selected strings */
reg = 0x00;
- rc = qpnp_wled_write_reg(wled, &reg,
+ rc = qpnp_wled_write_reg(wled, reg,
QPNP_WLED_CURR_SINK_REG(wled->sink_base));
for (i = 0; i < wled->num_strings; i++) {
@@ -1228,7 +1456,7 @@ static int qpnp_wled_config(struct qpnp_wled *wled)
else
reg |= ~QPNP_WLED_GATE_DRV_MASK;
- rc = qpnp_wled_write_reg(wled, &reg,
+ rc = qpnp_wled_write_reg(wled, reg,
QPNP_WLED_MOD_EN_REG(wled->sink_base,
wled->strings[i]));
if (rc)
@@ -1246,7 +1474,7 @@ static int qpnp_wled_config(struct qpnp_wled *wled)
reg &= QPNP_WLED_SYNC_DLY_MASK;
temp = wled->sync_dly_us / QPNP_WLED_SYNC_DLY_STEP_US;
reg |= temp;
- rc = qpnp_wled_write_reg(wled, &reg,
+ rc = qpnp_wled_write_reg(wled, reg,
QPNP_WLED_SYNC_DLY_REG(wled->sink_base,
wled->strings[i]));
if (rc)
@@ -1264,7 +1492,7 @@ static int qpnp_wled_config(struct qpnp_wled *wled)
reg &= QPNP_WLED_FS_CURR_MASK;
temp = wled->fs_curr_ua / QPNP_WLED_FS_CURR_STEP_UA;
reg |= temp;
- rc = qpnp_wled_write_reg(wled, &reg,
+ rc = qpnp_wled_write_reg(wled, reg,
QPNP_WLED_FS_CURR_REG(wled->sink_base,
wled->strings[i]));
if (rc)
@@ -1278,7 +1506,7 @@ static int qpnp_wled_config(struct qpnp_wled *wled)
return rc;
reg &= QPNP_WLED_CABC_MASK;
reg |= (wled->en_cabc << QPNP_WLED_CABC_SHIFT);
- rc = qpnp_wled_write_reg(wled, &reg,
+ rc = qpnp_wled_write_reg(wled, reg,
QPNP_WLED_CABC_REG(wled->sink_base,
wled->strings[i]));
if (rc)
@@ -1291,7 +1519,7 @@ static int qpnp_wled_config(struct qpnp_wled *wled)
return rc;
temp = wled->strings[i] + QPNP_WLED_CURR_SINK_SHIFT;
reg |= (1 << temp);
- rc = qpnp_wled_write_reg(wled, &reg,
+ rc = qpnp_wled_write_reg(wled, reg,
QPNP_WLED_CURR_SINK_REG(wled->sink_base));
if (rc)
return rc;
@@ -1348,18 +1576,14 @@ static int qpnp_wled_config(struct qpnp_wled *wled)
if (wled->disp_type_amoled)
reg |= QPNP_WLED_SC_PRO_EN_DSCHGR;
- rc = qpnp_wled_write_reg(wled, &reg,
+ rc = qpnp_wled_write_reg(wled, reg,
QPNP_WLED_SC_PRO_REG(wled->ctrl_base));
if (rc)
return rc;
if (wled->en_ext_pfet_sc_pro) {
- rc = qpnp_wled_sec_access(wled, wled->ctrl_base);
- if (rc)
- return rc;
-
reg = QPNP_WLED_EXT_FET_DTEST2;
- rc = qpnp_wled_write_reg(wled, &reg,
+ rc = qpnp_wled_sec_write_reg(wled, reg,
QPNP_WLED_TEST1_REG(wled->ctrl_base));
if (rc)
return rc;
@@ -1378,7 +1602,7 @@ static int qpnp_wled_config(struct qpnp_wled *wled)
temp = fls(wled->sc_deb_cycles) - QPNP_WLED_SC_DEB_CYCLES_SUB;
reg |= (temp << 1);
- rc = qpnp_wled_write_reg(wled, &reg,
+ rc = qpnp_wled_write_reg(wled, reg,
QPNP_WLED_SC_PRO_REG(wled->ctrl_base));
if (rc)
return rc;
@@ -1447,13 +1671,16 @@ static int qpnp_wled_parse_dt(struct qpnp_wled *wled)
return rc;
}
- wled->avdd_trim_steps_from_center = 0;
+ wled->avdd_mode_spmi = of_property_read_bool(pdev->dev.of_node,
+ "qcom,avdd-mode-spmi");
+
+ wled->avdd_target_voltage_mv = QPNP_WLED_DFLT_AVDD_MV;
rc = of_property_read_u32(pdev->dev.of_node,
- "qcom,avdd-trim-steps-from-center", &temp_val);
+ "qcom,avdd-target-voltage-mv", &temp_val);
if (!rc) {
- wled->avdd_trim_steps_from_center = temp_val;
+ wled->avdd_target_voltage_mv = temp_val;
} else if (rc != -EINVAL) {
- dev_err(&pdev->dev, "Unable to read avdd trim steps from center value\n");
+ dev_err(&pdev->dev, "Unable to read avdd target voltage\n");
return rc;
}
}
@@ -1507,17 +1734,33 @@ static int qpnp_wled_parse_dt(struct qpnp_wled *wled)
return rc;
}
- wled->ovp_mv = QPNP_WLED_OVP_29500_MV;
+ if (wled->pmic_rev_id->pmic_subtype == PMICOBALT_SUBTYPE ||
+ wled->pmic_rev_id->pmic_subtype == PM2FALCON_SUBTYPE)
+ wled->ovp_mv = 29600;
+ else
+ wled->ovp_mv = 29500;
rc = of_property_read_u32(pdev->dev.of_node,
"qcom,ovp-mv", &temp_val);
if (!rc) {
wled->ovp_mv = temp_val;
} else if (rc != -EINVAL) {
- dev_err(&pdev->dev, "Unable to read vref\n");
+ dev_err(&pdev->dev, "Unable to read ovp\n");
return rc;
}
- wled->ilim_ma = QPNP_WLED_DFLT_ILIM_MA;
+ if (wled->pmic_rev_id->pmic_subtype == PMICOBALT_SUBTYPE ||
+ wled->pmic_rev_id->pmic_subtype == PM2FALCON_SUBTYPE) {
+ if (wled->disp_type_amoled)
+ wled->ilim_ma = PMICOBALT_AMOLED_DFLT_ILIM_MA;
+ else
+ wled->ilim_ma = PMICOBALT_WLED_DFLT_ILIM_MA;
+ } else {
+ if (wled->disp_type_amoled)
+ wled->ilim_ma = PMI8994_AMOLED_DFLT_ILIM_MA;
+ else
+ wled->ilim_ma = PMI8994_WLED_DFLT_ILIM_MA;
+ }
+
rc = of_property_read_u32(pdev->dev.of_node,
"qcom,ilim-ma", &temp_val);
if (!rc) {
@@ -1638,6 +1881,7 @@ static int qpnp_wled_parse_dt(struct qpnp_wled *wled)
static int qpnp_wled_probe(struct platform_device *pdev)
{
struct qpnp_wled *wled;
+ struct device_node *revid_node;
int rc = 0, i;
const __be32 *prop;
@@ -1652,6 +1896,27 @@ static int qpnp_wled_probe(struct platform_device *pdev)
wled->pdev = pdev;
+ revid_node = of_parse_phandle(pdev->dev.of_node, "qcom,pmic-revid", 0);
+ if (!revid_node) {
+ pr_err("Missing qcom,pmic-revid property - driver failed\n");
+ return -EINVAL;
+ }
+
+ wled->pmic_rev_id = get_revid_data(revid_node);
+ if (IS_ERR_OR_NULL(wled->pmic_rev_id)) {
+ pr_err("Unable to get pmic_revid rc=%ld\n",
+ PTR_ERR(wled->pmic_rev_id));
+ /*
+ * the revid peripheral must be registered, any failure
+ * here only indicates that the rev-id module has not
+ * probed yet.
+ */
+ return -EPROBE_DEFER;
+ }
+
+ pr_debug("PMIC subtype %d Digital major %d\n",
+ wled->pmic_rev_id->pmic_subtype, wled->pmic_rev_id->rev4);
+
prop = of_get_address_by_name(pdev->dev.of_node, QPNP_WLED_SINK_BASE,
0, 0);
if (!prop) {
@@ -1676,6 +1941,7 @@ static int qpnp_wled_probe(struct platform_device *pdev)
return rc;
}
+ mutex_init(&wled->bus_lock);
rc = qpnp_wled_config(wled);
if (rc) {
dev_err(&pdev->dev, "wled config failed\n");
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 998bd1ec0415..e562bdedfb07 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -517,4 +517,20 @@ config DM_LOG_WRITES
If unsure, say N.
+config DM_ANDROID_VERITY
+ bool "Android verity target support"
+ depends on DM_VERITY
+ depends on X509_CERTIFICATE_PARSER
+ depends on SYSTEM_TRUSTED_KEYRING
+ depends on PUBLIC_KEY_ALGO_RSA
+ depends on KEYS
+ depends on ASYMMETRIC_KEY_TYPE
+ depends on ASYMMETRIC_PUBLIC_KEY_SUBTYPE
+ ---help---
+ This device-mapper target is virtually a VERITY target. This
+ target is setup by reading the metadata contents piggybacked
+ to the actual data blocks in the block device. The signature
+ of the metadata contents are verified against the key included
+ in the system keyring. Upon success, the underlying verity
+ target is setup.
endif # MD
diff --git a/drivers/md/Makefile b/drivers/md/Makefile
index d470143dcf40..ce7cf06d0e8a 100644
--- a/drivers/md/Makefile
+++ b/drivers/md/Makefile
@@ -69,3 +69,7 @@ endif
ifeq ($(CONFIG_DM_VERITY_FEC),y)
dm-verity-objs += dm-verity-fec.o
endif
+
+ifeq ($(CONFIG_DM_ANDROID_VERITY),y)
+dm-verity-objs += dm-android-verity.o
+endif
diff --git a/drivers/md/dm-android-verity.c b/drivers/md/dm-android-verity.c
new file mode 100644
index 000000000000..bb6c1285e499
--- /dev/null
+++ b/drivers/md/dm-android-verity.c
@@ -0,0 +1,925 @@
+/*
+ * Copyright (C) 2015 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/buffer_head.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/device-mapper.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/fcntl.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/key.h>
+#include <linux/module.h>
+#include <linux/mount.h>
+#include <linux/namei.h>
+#include <linux/of.h>
+#include <linux/reboot.h>
+#include <linux/string.h>
+#include <linux/vmalloc.h>
+
+#include <asm/setup.h>
+#include <crypto/hash.h>
+#include <crypto/public_key.h>
+#include <crypto/sha.h>
+#include <keys/asymmetric-type.h>
+#include <keys/system_keyring.h>
+
+#include "dm-verity.h"
+#include "dm-android-verity.h"
+
+static char verifiedbootstate[VERITY_COMMANDLINE_PARAM_LENGTH];
+static char veritymode[VERITY_COMMANDLINE_PARAM_LENGTH];
+static char veritykeyid[VERITY_DEFAULT_KEY_ID_LENGTH];
+static char buildvariant[BUILD_VARIANT];
+
+static bool target_added;
+static bool verity_enabled = true;
+struct dentry *debug_dir;
+static int android_verity_ctr(struct dm_target *ti, unsigned argc, char **argv);
+
+static struct target_type android_verity_target = {
+ .name = "android-verity",
+ .version = {1, 0, 0},
+ .module = THIS_MODULE,
+ .ctr = android_verity_ctr,
+ .dtr = verity_dtr,
+ .map = verity_map,
+ .status = verity_status,
+ .prepare_ioctl = verity_prepare_ioctl,
+ .iterate_devices = verity_iterate_devices,
+ .io_hints = verity_io_hints,
+};
+
+static int __init verified_boot_state_param(char *line)
+{
+ strlcpy(verifiedbootstate, line, sizeof(verifiedbootstate));
+ return 1;
+}
+
+__setup("androidboot.verifiedbootstate=", verified_boot_state_param);
+
+static int __init verity_mode_param(char *line)
+{
+ strlcpy(veritymode, line, sizeof(veritymode));
+ return 1;
+}
+
+__setup("androidboot.veritymode=", verity_mode_param);
+
+static int __init verity_keyid_param(char *line)
+{
+ strlcpy(veritykeyid, line, sizeof(veritykeyid));
+ return 1;
+}
+
+__setup("veritykeyid=", verity_keyid_param);
+
+static int __init verity_buildvariant(char *line)
+{
+ strlcpy(buildvariant, line, sizeof(buildvariant));
+ return 1;
+}
+
+__setup("buildvariant=", verity_buildvariant);
+
+static inline bool default_verity_key_id(void)
+{
+ return veritykeyid[0] != '\0';
+}
+
+static inline bool is_eng(void)
+{
+ static const char typeeng[] = "eng";
+
+ return !strncmp(buildvariant, typeeng, sizeof(typeeng));
+}
+
+static inline bool is_userdebug(void)
+{
+ static const char typeuserdebug[] = "userdebug";
+
+ return !strncmp(buildvariant, typeuserdebug, sizeof(typeuserdebug));
+}
+
+
+static int table_extract_mpi_array(struct public_key_signature *pks,
+ const void *data, size_t len)
+{
+ MPI mpi = mpi_read_raw_data(data, len);
+
+ if (!mpi) {
+ DMERR("Error while allocating mpi array");
+ return -ENOMEM;
+ }
+
+ pks->mpi[0] = mpi;
+ pks->nr_mpi = 1;
+ return 0;
+}
+
+static struct public_key_signature *table_make_digest(
+ enum hash_algo hash,
+ const void *table,
+ unsigned long table_len)
+{
+ struct public_key_signature *pks = NULL;
+ struct crypto_shash *tfm;
+ struct shash_desc *desc;
+ size_t digest_size, desc_size;
+ int ret;
+
+ /* Allocate the hashing algorithm we're going to need and find out how
+ * big the hash operational data will be.
+ */
+ tfm = crypto_alloc_shash(hash_algo_name[hash], 0, 0);
+ if (IS_ERR(tfm))
+ return ERR_CAST(tfm);
+
+ desc_size = crypto_shash_descsize(tfm) + sizeof(*desc);
+ digest_size = crypto_shash_digestsize(tfm);
+
+ /* We allocate the hash operational data storage on the end of out
+ * context data and the digest output buffer on the end of that.
+ */
+ ret = -ENOMEM;
+ pks = kzalloc(digest_size + sizeof(*pks) + desc_size, GFP_KERNEL);
+ if (!pks)
+ goto error;
+
+ pks->pkey_hash_algo = hash;
+ pks->digest = (u8 *)pks + sizeof(*pks) + desc_size;
+ pks->digest_size = digest_size;
+
+ desc = (struct shash_desc *)(pks + 1);
+ desc->tfm = tfm;
+ desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ ret = crypto_shash_init(desc);
+ if (ret < 0)
+ goto error;
+
+ ret = crypto_shash_finup(desc, table, table_len, pks->digest);
+ if (ret < 0)
+ goto error;
+
+ crypto_free_shash(tfm);
+ return pks;
+
+error:
+ kfree(pks);
+ crypto_free_shash(tfm);
+ return ERR_PTR(ret);
+}
+
+static int read_block_dev(struct bio_read *payload, struct block_device *bdev,
+ sector_t offset, int length)
+{
+ struct bio *bio;
+ int err = 0, i;
+
+ payload->number_of_pages = DIV_ROUND_UP(length, PAGE_SIZE);
+
+ bio = bio_alloc(GFP_KERNEL, payload->number_of_pages);
+ if (!bio) {
+ DMERR("Error while allocating bio");
+ return -ENOMEM;
+ }
+
+ bio->bi_bdev = bdev;
+ bio->bi_iter.bi_sector = offset;
+
+ payload->page_io = kzalloc(sizeof(struct page *) *
+ payload->number_of_pages, GFP_KERNEL);
+ if (!payload->page_io) {
+ DMERR("page_io array alloc failed");
+ err = -ENOMEM;
+ goto free_bio;
+ }
+
+ for (i = 0; i < payload->number_of_pages; i++) {
+ payload->page_io[i] = alloc_page(GFP_KERNEL);
+ if (!payload->page_io[i]) {
+ DMERR("alloc_page failed");
+ err = -ENOMEM;
+ goto free_pages;
+ }
+ if (!bio_add_page(bio, payload->page_io[i], PAGE_SIZE, 0)) {
+ DMERR("bio_add_page error");
+ err = -EIO;
+ goto free_pages;
+ }
+ }
+
+ if (!submit_bio_wait(READ, bio))
+ /* success */
+ goto free_bio;
+ DMERR("bio read failed");
+ err = -EIO;
+
+free_pages:
+ for (i = 0; i < payload->number_of_pages; i++)
+ if (payload->page_io[i])
+ __free_page(payload->page_io[i]);
+ kfree(payload->page_io);
+free_bio:
+ bio_put(bio);
+ return err;
+}
+
+static inline u64 fec_div_round_up(u64 x, u64 y)
+{
+ u64 remainder;
+
+ return div64_u64_rem(x, y, &remainder) +
+ (remainder > 0 ? 1 : 0);
+}
+
+static inline void populate_fec_metadata(struct fec_header *header,
+ struct fec_ecc_metadata *ecc)
+{
+ ecc->blocks = fec_div_round_up(le64_to_cpu(header->inp_size),
+ FEC_BLOCK_SIZE);
+ ecc->roots = le32_to_cpu(header->roots);
+ ecc->start = le64_to_cpu(header->inp_size);
+}
+
+static inline int validate_fec_header(struct fec_header *header, u64 offset)
+{
+ /* move offset to make the sanity check work for backup header
+ * as well. */
+ offset -= offset % FEC_BLOCK_SIZE;
+ if (le32_to_cpu(header->magic) != FEC_MAGIC ||
+ le32_to_cpu(header->version) != FEC_VERSION ||
+ le32_to_cpu(header->size) != sizeof(struct fec_header) ||
+ le32_to_cpu(header->roots) == 0 ||
+ le32_to_cpu(header->roots) >= FEC_RSM)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int extract_fec_header(dev_t dev, struct fec_header *fec,
+ struct fec_ecc_metadata *ecc)
+{
+ u64 device_size;
+ struct bio_read payload;
+ int i, err = 0;
+ struct block_device *bdev;
+
+ bdev = blkdev_get_by_dev(dev, FMODE_READ, NULL);
+
+ if (IS_ERR_OR_NULL(bdev)) {
+ DMERR("bdev get error");
+ return PTR_ERR(bdev);
+ }
+
+ device_size = i_size_read(bdev->bd_inode);
+
+ /* fec metadata size is a power of 2 and PAGE_SIZE
+ * is a power of 2 as well.
+ */
+ BUG_ON(FEC_BLOCK_SIZE > PAGE_SIZE);
+ /* 512 byte sector alignment */
+ BUG_ON(((device_size - FEC_BLOCK_SIZE) % (1 << SECTOR_SHIFT)) != 0);
+
+ err = read_block_dev(&payload, bdev, (device_size -
+ FEC_BLOCK_SIZE) / (1 << SECTOR_SHIFT), FEC_BLOCK_SIZE);
+ if (err) {
+ DMERR("Error while reading verity metadata");
+ goto error;
+ }
+
+ BUG_ON(sizeof(struct fec_header) > PAGE_SIZE);
+ memcpy(fec, page_address(payload.page_io[0]),
+ sizeof(*fec));
+
+ ecc->valid = true;
+ if (validate_fec_header(fec, device_size - FEC_BLOCK_SIZE)) {
+ /* Try the backup header */
+ memcpy(fec, page_address(payload.page_io[0]) + FEC_BLOCK_SIZE
+ - sizeof(*fec) ,
+ sizeof(*fec));
+ if (validate_fec_header(fec, device_size -
+ sizeof(struct fec_header)))
+ ecc->valid = false;
+ }
+
+ if (ecc->valid)
+ populate_fec_metadata(fec, ecc);
+
+ for (i = 0; i < payload.number_of_pages; i++)
+ __free_page(payload.page_io[i]);
+ kfree(payload.page_io);
+
+error:
+ blkdev_put(bdev, FMODE_READ);
+ return err;
+}
+static void find_metadata_offset(struct fec_header *fec,
+ struct block_device *bdev, u64 *metadata_offset)
+{
+ u64 device_size;
+
+ device_size = i_size_read(bdev->bd_inode);
+
+ if (le32_to_cpu(fec->magic) == FEC_MAGIC)
+ *metadata_offset = le64_to_cpu(fec->inp_size) -
+ VERITY_METADATA_SIZE;
+ else
+ *metadata_offset = device_size - VERITY_METADATA_SIZE;
+}
+
+static int find_size(dev_t dev, u64 *device_size)
+{
+ struct block_device *bdev;
+
+ bdev = blkdev_get_by_dev(dev, FMODE_READ, NULL);
+ if (IS_ERR_OR_NULL(bdev)) {
+ DMERR("blkdev_get_by_dev failed");
+ return PTR_ERR(bdev);
+ }
+
+ *device_size = i_size_read(bdev->bd_inode);
+ *device_size >>= SECTOR_SHIFT;
+
+ DMINFO("blkdev size in sectors: %llu", *device_size);
+ blkdev_put(bdev, FMODE_READ);
+ return 0;
+}
+
+static int verify_header(struct android_metadata_header *header)
+{
+ int retval = -EINVAL;
+
+ if (is_userdebug() && le32_to_cpu(header->magic_number) ==
+ VERITY_METADATA_MAGIC_DISABLE)
+ return VERITY_STATE_DISABLE;
+
+ if (!(le32_to_cpu(header->magic_number) ==
+ VERITY_METADATA_MAGIC_NUMBER) ||
+ (le32_to_cpu(header->magic_number) ==
+ VERITY_METADATA_MAGIC_DISABLE)) {
+ DMERR("Incorrect magic number");
+ return retval;
+ }
+
+ if (le32_to_cpu(header->protocol_version) !=
+ VERITY_METADATA_VERSION) {
+ DMERR("Unsupported version %u",
+ le32_to_cpu(header->protocol_version));
+ return retval;
+ }
+
+ return 0;
+}
+
+static int extract_metadata(dev_t dev, struct fec_header *fec,
+ struct android_metadata **metadata,
+ bool *verity_enabled)
+{
+ struct block_device *bdev;
+ struct android_metadata_header *header;
+ int i;
+ u32 table_length, copy_length, offset;
+ u64 metadata_offset;
+ struct bio_read payload;
+ int err = 0;
+
+ bdev = blkdev_get_by_dev(dev, FMODE_READ, NULL);
+
+ if (IS_ERR_OR_NULL(bdev)) {
+ DMERR("blkdev_get_by_dev failed");
+ return -ENODEV;
+ }
+
+ find_metadata_offset(fec, bdev, &metadata_offset);
+
+ /* Verity metadata size is a power of 2 and PAGE_SIZE
+ * is a power of 2 as well.
+ * PAGE_SIZE is also a multiple of 512 bytes.
+ */
+ if (VERITY_METADATA_SIZE > PAGE_SIZE)
+ BUG_ON(VERITY_METADATA_SIZE % PAGE_SIZE != 0);
+ /* 512 byte sector alignment */
+ BUG_ON(metadata_offset % (1 << SECTOR_SHIFT) != 0);
+
+ err = read_block_dev(&payload, bdev, metadata_offset /
+ (1 << SECTOR_SHIFT), VERITY_METADATA_SIZE);
+ if (err) {
+ DMERR("Error while reading verity metadata");
+ goto blkdev_release;
+ }
+
+ header = kzalloc(sizeof(*header), GFP_KERNEL);
+ if (!header) {
+ DMERR("kzalloc failed for header");
+ err = -ENOMEM;
+ goto free_payload;
+ }
+
+ memcpy(header, page_address(payload.page_io[0]),
+ sizeof(*header));
+
+ DMINFO("bio magic_number:%u protocol_version:%d table_length:%u",
+ le32_to_cpu(header->magic_number),
+ le32_to_cpu(header->protocol_version),
+ le32_to_cpu(header->table_length));
+
+ err = verify_header(header);
+
+ if (err == VERITY_STATE_DISABLE) {
+ DMERR("Mounting root with verity disabled");
+ *verity_enabled = false;
+ /* we would still have to read the metadata to figure out
+ * the data blocks size. Or may be could map the entire
+ * partition similar to mounting the device.
+ *
+ * Reset error as well as the verity_enabled flag is changed.
+ */
+ err = 0;
+ } else if (err)
+ goto free_header;
+
+ *metadata = kzalloc(sizeof(**metadata), GFP_KERNEL);
+ if (!*metadata) {
+ DMERR("kzalloc for metadata failed");
+ err = -ENOMEM;
+ goto free_header;
+ }
+
+ (*metadata)->header = header;
+ table_length = le32_to_cpu(header->table_length);
+
+ if (table_length == 0 ||
+ table_length > (VERITY_METADATA_SIZE -
+ sizeof(struct android_metadata_header))) {
+ DMERR("table_length too long");
+ err = -EINVAL;
+ goto free_metadata;
+ }
+
+ (*metadata)->verity_table = kzalloc(table_length + 1, GFP_KERNEL);
+
+ if (!(*metadata)->verity_table) {
+ DMERR("kzalloc verity_table failed");
+ err = -ENOMEM;
+ goto free_metadata;
+ }
+
+ if (sizeof(struct android_metadata_header) +
+ table_length <= PAGE_SIZE) {
+ memcpy((*metadata)->verity_table,
+ page_address(payload.page_io[0])
+ + sizeof(struct android_metadata_header),
+ table_length);
+ } else {
+ copy_length = PAGE_SIZE -
+ sizeof(struct android_metadata_header);
+ memcpy((*metadata)->verity_table,
+ page_address(payload.page_io[0])
+ + sizeof(struct android_metadata_header),
+ copy_length);
+ table_length -= copy_length;
+ offset = copy_length;
+ i = 1;
+ while (table_length != 0) {
+ if (table_length > PAGE_SIZE) {
+ memcpy((*metadata)->verity_table + offset,
+ page_address(payload.page_io[i]),
+ PAGE_SIZE);
+ offset += PAGE_SIZE;
+ table_length -= PAGE_SIZE;
+ } else {
+ memcpy((*metadata)->verity_table + offset,
+ page_address(payload.page_io[i]),
+ table_length);
+ table_length = 0;
+ }
+ i++;
+ }
+ }
+ (*metadata)->verity_table[table_length] = '\0';
+
+ DMINFO("verity_table: %s", (*metadata)->verity_table);
+ goto free_payload;
+
+free_metadata:
+ kfree(*metadata);
+free_header:
+ kfree(header);
+free_payload:
+ for (i = 0; i < payload.number_of_pages; i++)
+ if (payload.page_io[i])
+ __free_page(payload.page_io[i]);
+ kfree(payload.page_io);
+blkdev_release:
+ blkdev_put(bdev, FMODE_READ);
+ return err;
+}
+
+/* helper functions to extract properties from dts */
+const char *find_dt_value(const char *name)
+{
+ struct device_node *firmware;
+ const char *value;
+
+ firmware = of_find_node_by_path("/firmware/android");
+ if (!firmware)
+ return NULL;
+ value = of_get_property(firmware, name, NULL);
+ of_node_put(firmware);
+
+ return value;
+}
+
+static int verity_mode(void)
+{
+ static const char enforcing[] = "enforcing";
+ static const char verified_mode_prop[] = "veritymode";
+ const char *value;
+
+ value = find_dt_value(verified_mode_prop);
+ if (!value)
+ value = veritymode;
+ if (!strncmp(value, enforcing, sizeof(enforcing) - 1))
+ return DM_VERITY_MODE_RESTART;
+
+ return DM_VERITY_MODE_EIO;
+}
+
+static int verify_verity_signature(char *key_id,
+ struct android_metadata *metadata)
+{
+ key_ref_t key_ref;
+ struct key *key;
+ struct public_key_signature *pks = NULL;
+ int retval = -EINVAL;
+
+ key_ref = keyring_search(make_key_ref(system_trusted_keyring, 1),
+ &key_type_asymmetric, key_id);
+
+ if (IS_ERR(key_ref)) {
+ DMERR("keyring: key not found");
+ return -ENOKEY;
+ }
+
+ key = key_ref_to_ptr(key_ref);
+
+ pks = table_make_digest(HASH_ALGO_SHA256,
+ (const void *)metadata->verity_table,
+ le32_to_cpu(metadata->header->table_length));
+
+ if (IS_ERR(pks)) {
+ DMERR("hashing failed");
+ goto error;
+ }
+
+ retval = table_extract_mpi_array(pks, &metadata->header->signature[0],
+ RSANUMBYTES);
+ if (retval < 0) {
+ DMERR("Error extracting mpi %d", retval);
+ goto error;
+ }
+
+ retval = verify_signature(key, pks);
+ mpi_free(pks->rsa.s);
+error:
+ kfree(pks);
+ key_put(key);
+
+ return retval;
+}
+
+static void handle_error(void)
+{
+ int mode = verity_mode();
+ if (mode == DM_VERITY_MODE_RESTART) {
+ DMERR("triggering restart");
+ kernel_restart("dm-verity device corrupted");
+ } else {
+ DMERR("Mounting verity root failed");
+ }
+}
+
+static inline bool test_mult_overflow(sector_t a, u32 b)
+{
+ sector_t r = (sector_t)~0ULL;
+
+ sector_div(r, b);
+ return a > r;
+}
+
+static int add_as_linear_device(struct dm_target *ti, char *dev)
+{
+ /*Move to linear mapping defines*/
+ char *linear_table_args[DM_LINEAR_ARGS] = {dev,
+ DM_LINEAR_TARGET_OFFSET};
+ int err = 0;
+
+ android_verity_target.dtr = dm_linear_dtr,
+ android_verity_target.map = dm_linear_map,
+ android_verity_target.status = dm_linear_status,
+ android_verity_target.prepare_ioctl = dm_linear_prepare_ioctl,
+ android_verity_target.iterate_devices = dm_linear_iterate_devices,
+ android_verity_target.io_hints = NULL;
+
+ err = dm_linear_ctr(ti, DM_LINEAR_ARGS, linear_table_args);
+
+ if (!err) {
+ DMINFO("Added android-verity as a linear target");
+ target_added = true;
+ } else
+ DMERR("Failed to add android-verity as linear target");
+
+ return err;
+}
+
+/*
+ * Target parameters:
+ * <key id> Key id of the public key in the system keyring.
+ * Verity metadata's signature would be verified against
+ * this. If the key id contains spaces, replace them
+ * with '#'.
+ * <block device> The block device for which dm-verity is being setup.
+ */
+static int android_verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
+{
+ dev_t uninitialized_var(dev);
+ struct android_metadata *metadata = NULL;
+ int err = 0, i, mode;
+ char *key_id, *table_ptr, dummy, *target_device,
+ *verity_table_args[VERITY_TABLE_ARGS + 2 + VERITY_TABLE_OPT_FEC_ARGS];
+ /* One for specifying number of opt args and one for mode */
+ sector_t data_sectors;
+ u32 data_block_size;
+ unsigned int no_of_args = VERITY_TABLE_ARGS + 2 + VERITY_TABLE_OPT_FEC_ARGS;
+ struct fec_header uninitialized_var(fec);
+ struct fec_ecc_metadata uninitialized_var(ecc);
+ char buf[FEC_ARG_LENGTH], *buf_ptr;
+ unsigned long long tmpll;
+ u64 uninitialized_var(device_size);
+
+ if (argc == 1) {
+ /* Use the default keyid */
+ if (default_verity_key_id())
+ key_id = veritykeyid;
+ else if (!is_eng()) {
+ DMERR("veritykeyid= is not set");
+ handle_error();
+ return -EINVAL;
+ }
+ } else if (argc == 2)
+ key_id = argv[1];
+ else {
+ DMERR("Incorrect number of arguments");
+ handle_error();
+ return -EINVAL;
+ }
+
+ target_device = argv[0];
+
+ dev = name_to_dev_t(target_device);
+ if (!dev) {
+ DMERR("no dev found for %s", target_device);
+ handle_error();
+ return -EINVAL;
+ }
+
+ if (is_eng()) {
+ err = find_size(dev, &device_size);
+ if (err) {
+ DMERR("error finding bdev size");
+ handle_error();
+ return err;
+ }
+
+ ti->len = device_size;
+ err = add_as_linear_device(ti, target_device);
+ if (err) {
+ handle_error();
+ return err;
+ }
+ verity_enabled = false;
+ return 0;
+ }
+
+ strreplace(key_id, '#', ' ');
+
+ DMINFO("key:%s dev:%s", key_id, target_device);
+
+ if (extract_fec_header(dev, &fec, &ecc)) {
+ DMERR("Error while extracting fec header");
+ handle_error();
+ return -EINVAL;
+ }
+
+ err = extract_metadata(dev, &fec, &metadata, &verity_enabled);
+
+ if (err) {
+ DMERR("Error while extracting metadata");
+ handle_error();
+ goto free_metadata;
+ }
+
+ if (verity_enabled) {
+ err = verify_verity_signature(key_id, metadata);
+
+ if (err) {
+ DMERR("Signature verification failed");
+ handle_error();
+ goto free_metadata;
+ } else
+ DMINFO("Signature verification success");
+ }
+
+ table_ptr = metadata->verity_table;
+
+ for (i = 0; i < VERITY_TABLE_ARGS; i++) {
+ verity_table_args[i] = strsep(&table_ptr, " ");
+ if (verity_table_args[i] == NULL)
+ break;
+ }
+
+ if (i != VERITY_TABLE_ARGS) {
+ DMERR("Verity table not in the expected format");
+ err = -EINVAL;
+ handle_error();
+ goto free_metadata;
+ }
+
+ if (sscanf(verity_table_args[5], "%llu%c", &tmpll, &dummy)
+ != 1) {
+ DMERR("Verity table not in the expected format");
+ handle_error();
+ err = -EINVAL;
+ goto free_metadata;
+ }
+
+ if (tmpll > ULONG_MAX) {
+ DMERR("<num_data_blocks> too large. Forgot to turn on CONFIG_LBDAF?");
+ handle_error();
+ err = -EINVAL;
+ goto free_metadata;
+ }
+
+ data_sectors = tmpll;
+
+ if (sscanf(verity_table_args[3], "%u%c", &data_block_size, &dummy)
+ != 1) {
+ DMERR("Verity table not in the expected format");
+ handle_error();
+ err = -EINVAL;
+ goto free_metadata;
+ }
+
+ if (test_mult_overflow(data_sectors, data_block_size >>
+ SECTOR_SHIFT)) {
+ DMERR("data_sectors too large");
+ handle_error();
+ err = -EOVERFLOW;
+ goto free_metadata;
+ }
+
+ data_sectors *= data_block_size >> SECTOR_SHIFT;
+ DMINFO("Data sectors %llu", (unsigned long long)data_sectors);
+
+ /* update target length */
+ ti->len = data_sectors;
+
+ /* Setup linear target and free */
+ if (!verity_enabled) {
+ err = add_as_linear_device(ti, target_device);
+ goto free_metadata;
+ }
+
+ /*substitute data_dev and hash_dev*/
+ verity_table_args[1] = target_device;
+ verity_table_args[2] = target_device;
+
+ mode = verity_mode();
+
+ if (ecc.valid && IS_BUILTIN(CONFIG_DM_VERITY_FEC)) {
+ if (mode) {
+ err = snprintf(buf, FEC_ARG_LENGTH,
+ "%u %s " VERITY_TABLE_OPT_FEC_FORMAT,
+ 1 + VERITY_TABLE_OPT_FEC_ARGS,
+ mode == DM_VERITY_MODE_RESTART ?
+ VERITY_TABLE_OPT_RESTART :
+ VERITY_TABLE_OPT_LOGGING,
+ target_device,
+ ecc.start / FEC_BLOCK_SIZE, ecc.blocks,
+ ecc.roots);
+ } else {
+ err = snprintf(buf, FEC_ARG_LENGTH,
+ "%u " VERITY_TABLE_OPT_FEC_FORMAT,
+ VERITY_TABLE_OPT_FEC_ARGS, target_device,
+ ecc.start / FEC_BLOCK_SIZE, ecc.blocks,
+ ecc.roots);
+ }
+ } else if (mode) {
+ err = snprintf(buf, FEC_ARG_LENGTH,
+ "2 " VERITY_TABLE_OPT_IGNZERO " %s",
+ mode == DM_VERITY_MODE_RESTART ?
+ VERITY_TABLE_OPT_RESTART : VERITY_TABLE_OPT_LOGGING);
+ } else {
+ err = snprintf(buf, FEC_ARG_LENGTH, "1 %s",
+ "ignore_zero_blocks");
+ }
+
+ if (err < 0 || err >= FEC_ARG_LENGTH)
+ goto free_metadata;
+
+ buf_ptr = buf;
+
+ for (i = VERITY_TABLE_ARGS; i < (VERITY_TABLE_ARGS +
+ VERITY_TABLE_OPT_FEC_ARGS + 2); i++) {
+ verity_table_args[i] = strsep(&buf_ptr, " ");
+ if (verity_table_args[i] == NULL) {
+ no_of_args = i;
+ break;
+ }
+ }
+
+ err = verity_ctr(ti, no_of_args, verity_table_args);
+
+ if (err)
+ DMERR("android-verity failed to mount as verity target");
+ else {
+ target_added = true;
+ DMINFO("android-verity mounted as verity target");
+ }
+
+free_metadata:
+ if (metadata) {
+ kfree(metadata->header);
+ kfree(metadata->verity_table);
+ }
+ kfree(metadata);
+ return err;
+}
+
+static int __init dm_android_verity_init(void)
+{
+ int r;
+ struct dentry *file;
+
+ r = dm_register_target(&android_verity_target);
+ if (r < 0)
+ DMERR("register failed %d", r);
+
+ /* Tracks the status of the last added target */
+ debug_dir = debugfs_create_dir("android_verity", NULL);
+
+ if (IS_ERR_OR_NULL(debug_dir)) {
+ DMERR("Cannot create android_verity debugfs directory: %ld",
+ PTR_ERR(debug_dir));
+ goto end;
+ }
+
+ file = debugfs_create_bool("target_added", S_IRUGO, debug_dir,
+ &target_added);
+
+ if (IS_ERR_OR_NULL(file)) {
+ DMERR("Cannot create android_verity debugfs directory: %ld",
+ PTR_ERR(debug_dir));
+ debugfs_remove_recursive(debug_dir);
+ goto end;
+ }
+
+ file = debugfs_create_bool("verity_enabled", S_IRUGO, debug_dir,
+ &verity_enabled);
+
+ if (IS_ERR_OR_NULL(file)) {
+ DMERR("Cannot create android_verity debugfs directory: %ld",
+ PTR_ERR(debug_dir));
+ debugfs_remove_recursive(debug_dir);
+ }
+
+end:
+ return r;
+}
+
+static void __exit dm_android_verity_exit(void)
+{
+ if (!IS_ERR_OR_NULL(debug_dir))
+ debugfs_remove_recursive(debug_dir);
+
+ dm_unregister_target(&android_verity_target);
+}
+
+module_init(dm_android_verity_init);
+module_exit(dm_android_verity_exit);
diff --git a/drivers/md/dm-android-verity.h b/drivers/md/dm-android-verity.h
new file mode 100644
index 000000000000..0fcd54aaf5f6
--- /dev/null
+++ b/drivers/md/dm-android-verity.h
@@ -0,0 +1,124 @@
+/*
+ * Copyright (C) 2015 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef DM_ANDROID_VERITY_H
+#define DM_ANDROID_VERITY_H
+
+#include <crypto/sha.h>
+
+#define RSANUMBYTES 256
+#define VERITY_METADATA_MAGIC_NUMBER 0xb001b001
+#define VERITY_METADATA_MAGIC_DISABLE 0x46464f56
+#define VERITY_METADATA_VERSION 0
+#define VERITY_STATE_DISABLE 1
+#define DATA_BLOCK_SIZE (4 * 1024)
+#define VERITY_METADATA_SIZE (8 * DATA_BLOCK_SIZE)
+#define VERITY_TABLE_ARGS 10
+#define VERITY_COMMANDLINE_PARAM_LENGTH 20
+#define BUILD_VARIANT 20
+
+/*
+ * <subject>:<sha1-id> is the format for the identifier.
+ * subject can either be the Common Name(CN) + Organization Name(O) or
+ * just the CN if the it is prefixed with O
+ * From https://tools.ietf.org/html/rfc5280#appendix-A
+ * ub-organization-name-length INTEGER ::= 64
+ * ub-common-name-length INTEGER ::= 64
+ *
+ * http://lxr.free-electrons.com/source/crypto/asymmetric_keys/x509_cert_parser.c?v=3.9#L278
+ * ctx->o_size + 2 + ctx->cn_size + 1
+ * + 41 characters for ":" and sha1 id
+ * 64 + 2 + 64 + 1 + 1 + 40 (172)
+ * setting VERITY_DEFAULT_KEY_ID_LENGTH to 200 characters.
+ */
+#define VERITY_DEFAULT_KEY_ID_LENGTH 200
+
+#define FEC_MAGIC 0xFECFECFE
+#define FEC_BLOCK_SIZE (4 * 1024)
+#define FEC_VERSION 0
+#define FEC_RSM 255
+#define FEC_ARG_LENGTH 300
+
+#define VERITY_TABLE_OPT_RESTART "restart_on_corruption"
+#define VERITY_TABLE_OPT_LOGGING "ignore_corruption"
+#define VERITY_TABLE_OPT_IGNZERO "ignore_zero_blocks"
+
+#define VERITY_TABLE_OPT_FEC_FORMAT \
+ "use_fec_from_device %s fec_start %llu fec_blocks %llu fec_roots %u ignore_zero_blocks"
+#define VERITY_TABLE_OPT_FEC_ARGS 9
+
+#define VERITY_DEBUG 0
+
+#define DM_MSG_PREFIX "android-verity"
+
+#define DM_LINEAR_ARGS 2
+#define DM_LINEAR_TARGET_OFFSET "0"
+
+/*
+ * There can be two formats.
+ * if fec is present
+ * <data_blocks> <verity_tree> <verity_metdata_32K><fec_data><fec_data_4K>
+ * if fec is not present
+ * <data_blocks> <verity_tree> <verity_metdata_32K>
+ */
+/* TODO: rearrange structure to reduce memory holes
+ * depends on userspace change.
+ */
+struct fec_header {
+ __le32 magic;
+ __le32 version;
+ __le32 size;
+ __le32 roots;
+ __le32 fec_size;
+ __le64 inp_size;
+ u8 hash[SHA256_DIGEST_SIZE];
+};
+
+struct android_metadata_header {
+ __le32 magic_number;
+ __le32 protocol_version;
+ char signature[RSANUMBYTES];
+ __le32 table_length;
+};
+
+struct android_metadata {
+ struct android_metadata_header *header;
+ char *verity_table;
+};
+
+struct fec_ecc_metadata {
+ bool valid;
+ u32 roots;
+ u64 blocks;
+ u64 rounds;
+ u64 start;
+};
+
+struct bio_read {
+ struct page **page_io;
+ int number_of_pages;
+};
+
+extern struct target_type linear_target;
+
+extern void dm_linear_dtr(struct dm_target *ti);
+extern int dm_linear_map(struct dm_target *ti, struct bio *bio);
+extern void dm_linear_status(struct dm_target *ti, status_type_t type,
+ unsigned status_flags, char *result, unsigned maxlen);
+extern int dm_linear_prepare_ioctl(struct dm_target *ti,
+ struct block_device **bdev, fmode_t *mode);
+extern int dm_linear_iterate_devices(struct dm_target *ti,
+ iterate_devices_callout_fn fn, void *data);
+extern int dm_linear_ctr(struct dm_target *ti, unsigned int argc, char **argv);
+#endif /* DM_ANDROID_VERITY_H */
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
index 27f2ef300f8b..3970cda10080 100644
--- a/drivers/md/dm-cache-metadata.c
+++ b/drivers/md/dm-cache-metadata.c
@@ -867,39 +867,55 @@ static int blocks_are_unmapped_or_clean(struct dm_cache_metadata *cmd,
return 0;
}
-#define WRITE_LOCK(cmd) \
- down_write(&cmd->root_lock); \
- if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) { \
- up_write(&cmd->root_lock); \
- return -EINVAL; \
+static bool cmd_write_lock(struct dm_cache_metadata *cmd)
+{
+ down_write(&cmd->root_lock);
+ if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) {
+ up_write(&cmd->root_lock);
+ return false;
}
+ return true;
+}
-#define WRITE_LOCK_VOID(cmd) \
- down_write(&cmd->root_lock); \
- if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) { \
- up_write(&cmd->root_lock); \
- return; \
- }
+#define WRITE_LOCK(cmd) \
+ do { \
+ if (!cmd_write_lock((cmd))) \
+ return -EINVAL; \
+ } while(0)
+
+#define WRITE_LOCK_VOID(cmd) \
+ do { \
+ if (!cmd_write_lock((cmd))) \
+ return; \
+ } while(0)
#define WRITE_UNLOCK(cmd) \
- up_write(&cmd->root_lock)
+ up_write(&(cmd)->root_lock)
-#define READ_LOCK(cmd) \
- down_read(&cmd->root_lock); \
- if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) { \
- up_read(&cmd->root_lock); \
- return -EINVAL; \
+static bool cmd_read_lock(struct dm_cache_metadata *cmd)
+{
+ down_read(&cmd->root_lock);
+ if (cmd->fail_io) {
+ up_read(&cmd->root_lock);
+ return false;
}
+ return true;
+}
-#define READ_LOCK_VOID(cmd) \
- down_read(&cmd->root_lock); \
- if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) { \
- up_read(&cmd->root_lock); \
- return; \
- }
+#define READ_LOCK(cmd) \
+ do { \
+ if (!cmd_read_lock((cmd))) \
+ return -EINVAL; \
+ } while(0)
+
+#define READ_LOCK_VOID(cmd) \
+ do { \
+ if (!cmd_read_lock((cmd))) \
+ return; \
+ } while(0)
#define READ_UNLOCK(cmd) \
- up_read(&cmd->root_lock)
+ up_read(&(cmd)->root_lock)
int dm_cache_resize(struct dm_cache_metadata *cmd, dm_cblock_t new_cache_size)
{
diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
index 05c35aacb3aa..8505a771de42 100644
--- a/drivers/md/dm-linear.c
+++ b/drivers/md/dm-linear.c
@@ -25,7 +25,7 @@ struct linear_c {
/*
* Construct a linear mapping: <dev_path> <offset>
*/
-static int linear_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+int dm_linear_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
struct linear_c *lc;
unsigned long long tmp;
@@ -67,7 +67,7 @@ static int linear_ctr(struct dm_target *ti, unsigned int argc, char **argv)
return ret;
}
-static void linear_dtr(struct dm_target *ti)
+void dm_linear_dtr(struct dm_target *ti)
{
struct linear_c *lc = (struct linear_c *) ti->private;
@@ -92,14 +92,14 @@ static void linear_map_bio(struct dm_target *ti, struct bio *bio)
linear_map_sector(ti, bio->bi_iter.bi_sector);
}
-static int linear_map(struct dm_target *ti, struct bio *bio)
+int dm_linear_map(struct dm_target *ti, struct bio *bio)
{
linear_map_bio(ti, bio);
return DM_MAPIO_REMAPPED;
}
-static void linear_status(struct dm_target *ti, status_type_t type,
+void dm_linear_status(struct dm_target *ti, status_type_t type,
unsigned status_flags, char *result, unsigned maxlen)
{
struct linear_c *lc = (struct linear_c *) ti->private;
@@ -116,7 +116,7 @@ static void linear_status(struct dm_target *ti, status_type_t type,
}
}
-static int linear_prepare_ioctl(struct dm_target *ti,
+int dm_linear_prepare_ioctl(struct dm_target *ti,
struct block_device **bdev, fmode_t *mode)
{
struct linear_c *lc = (struct linear_c *) ti->private;
@@ -133,7 +133,7 @@ static int linear_prepare_ioctl(struct dm_target *ti,
return 0;
}
-static int linear_iterate_devices(struct dm_target *ti,
+int dm_linear_iterate_devices(struct dm_target *ti,
iterate_devices_callout_fn fn, void *data)
{
struct linear_c *lc = ti->private;
@@ -145,12 +145,12 @@ static struct target_type linear_target = {
.name = "linear",
.version = {1, 2, 1},
.module = THIS_MODULE,
- .ctr = linear_ctr,
- .dtr = linear_dtr,
- .map = linear_map,
- .status = linear_status,
- .prepare_ioctl = linear_prepare_ioctl,
- .iterate_devices = linear_iterate_devices,
+ .ctr = dm_linear_ctr,
+ .dtr = dm_linear_dtr,
+ .map = dm_linear_map,
+ .status = dm_linear_status,
+ .prepare_ioctl = dm_linear_prepare_ioctl,
+ .iterate_devices = dm_linear_iterate_devices,
};
int __init dm_linear_init(void)
diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c
index ad10d6d8ed28..1dd667b97530 100644
--- a/drivers/md/dm-verity-fec.c
+++ b/drivers/md/dm-verity-fec.c
@@ -442,6 +442,13 @@ int verity_fec_decode(struct dm_verity *v, struct dm_verity_io *io,
if (!verity_fec_is_enabled(v))
return -EOPNOTSUPP;
+ if (fio->level >= DM_VERITY_FEC_MAX_RECURSION) {
+ DMWARN_LIMIT("%s: FEC: recursion too deep", v->data_dev->name);
+ return -EIO;
+ }
+
+ fio->level++;
+
if (type == DM_VERITY_BLOCK_TYPE_METADATA)
block += v->data_blocks;
@@ -456,9 +463,7 @@ int verity_fec_decode(struct dm_verity *v, struct dm_verity_io *io,
*/
offset = block << v->data_dev_block_bits;
-
- res = offset;
- div64_u64(res, v->fec->rounds << v->data_dev_block_bits);
+ res = div64_u64(offset, v->fec->rounds << v->data_dev_block_bits);
/*
* The base RS block we can feed to the interleaver to find out all
@@ -475,7 +480,7 @@ int verity_fec_decode(struct dm_verity *v, struct dm_verity_io *io,
if (r < 0) {
r = fec_decode_rsb(v, io, fio, rsb, offset, true);
if (r < 0)
- return r;
+ goto done;
}
if (dest)
@@ -485,6 +490,8 @@ int verity_fec_decode(struct dm_verity *v, struct dm_verity_io *io,
r = verity_for_bv_block(v, io, iter, fec_bv_copy);
}
+done:
+ fio->level--;
return r;
}
@@ -525,6 +532,7 @@ void verity_fec_init_io(struct dm_verity_io *io)
memset(fio->bufs, 0, sizeof(fio->bufs));
fio->nbufs = 0;
fio->output = NULL;
+ fio->level = 0;
}
/*
@@ -680,7 +688,8 @@ static struct attribute *fec_attrs[] = {
static struct kobj_type fec_ktype = {
.sysfs_ops = &kobj_sysfs_ops,
- .default_attrs = fec_attrs
+ .default_attrs = fec_attrs,
+ .release = dm_kobject_release
};
/*
diff --git a/drivers/md/dm-verity-fec.h b/drivers/md/dm-verity-fec.h
index 8c4bee052a73..b8e21cef3ad1 100644
--- a/drivers/md/dm-verity-fec.h
+++ b/drivers/md/dm-verity-fec.h
@@ -28,6 +28,9 @@
#define DM_VERITY_FEC_BUF_MAX \
(1 << (PAGE_SHIFT - DM_VERITY_FEC_BUF_RS_BITS))
+/* maximum recursion level for verity_fec_decode */
+#define DM_VERITY_FEC_MAX_RECURSION 4
+
#define DM_VERITY_OPT_FEC_DEV "use_fec_from_device"
#define DM_VERITY_OPT_FEC_BLOCKS "fec_blocks"
#define DM_VERITY_OPT_FEC_START "fec_start"
@@ -61,6 +64,7 @@ struct dm_verity_fec_io {
unsigned nbufs; /* number of buffers allocated */
u8 *output; /* buffer for corrected output */
size_t output_pos;
+ unsigned level; /* recursion level */
};
#ifdef CONFIG_DM_VERITY_FEC
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
index 5c5d30cb6ec5..5214ed2c7507 100644
--- a/drivers/md/dm-verity-target.c
+++ b/drivers/md/dm-verity-target.c
@@ -551,7 +551,7 @@ static void verity_submit_prefetch(struct dm_verity *v, struct dm_verity_io *io)
* Bio map function. It allocates dm_verity_io structure and bio vector and
* fills them. Then it issues prefetches and the I/O.
*/
-static int verity_map(struct dm_target *ti, struct bio *bio)
+int verity_map(struct dm_target *ti, struct bio *bio)
{
struct dm_verity *v = ti->private;
struct dm_verity_io *io;
@@ -596,7 +596,7 @@ static int verity_map(struct dm_target *ti, struct bio *bio)
/*
* Status: V (valid) or C (corruption found)
*/
-static void verity_status(struct dm_target *ti, status_type_t type,
+void verity_status(struct dm_target *ti, status_type_t type,
unsigned status_flags, char *result, unsigned maxlen)
{
struct dm_verity *v = ti->private;
@@ -656,7 +656,7 @@ static void verity_status(struct dm_target *ti, status_type_t type,
}
}
-static int verity_prepare_ioctl(struct dm_target *ti,
+int verity_prepare_ioctl(struct dm_target *ti,
struct block_device **bdev, fmode_t *mode)
{
struct dm_verity *v = ti->private;
@@ -669,7 +669,7 @@ static int verity_prepare_ioctl(struct dm_target *ti,
return 0;
}
-static int verity_iterate_devices(struct dm_target *ti,
+int verity_iterate_devices(struct dm_target *ti,
iterate_devices_callout_fn fn, void *data)
{
struct dm_verity *v = ti->private;
@@ -677,7 +677,7 @@ static int verity_iterate_devices(struct dm_target *ti,
return fn(ti, v->data_dev, v->data_start, ti->len, data);
}
-static void verity_io_hints(struct dm_target *ti, struct queue_limits *limits)
+void verity_io_hints(struct dm_target *ti, struct queue_limits *limits)
{
struct dm_verity *v = ti->private;
@@ -690,7 +690,7 @@ static void verity_io_hints(struct dm_target *ti, struct queue_limits *limits)
blk_limits_io_min(limits, limits->logical_block_size);
}
-static void verity_dtr(struct dm_target *ti)
+void verity_dtr(struct dm_target *ti)
{
struct dm_verity *v = ti->private;
@@ -817,7 +817,7 @@ static int verity_parse_opt_args(struct dm_arg_set *as, struct dm_verity *v)
* <digest>
* <salt> Hex string or "-" if no salt.
*/
-static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
+int verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
{
struct dm_verity *v;
struct dm_arg_set as;
diff --git a/drivers/md/dm-verity.h b/drivers/md/dm-verity.h
index fb419f422d73..75effca400a3 100644
--- a/drivers/md/dm-verity.h
+++ b/drivers/md/dm-verity.h
@@ -126,4 +126,14 @@ extern int verity_hash(struct dm_verity *v, struct shash_desc *desc,
extern int verity_hash_for_block(struct dm_verity *v, struct dm_verity_io *io,
sector_t block, u8 *digest, bool *is_zero);
+extern void verity_status(struct dm_target *ti, status_type_t type,
+ unsigned status_flags, char *result, unsigned maxlen);
+extern int verity_prepare_ioctl(struct dm_target *ti,
+ struct block_device **bdev, fmode_t *mode);
+extern int verity_iterate_devices(struct dm_target *ti,
+ iterate_devices_callout_fn fn, void *data);
+extern void verity_io_hints(struct dm_target *ti, struct queue_limits *limits);
+extern void verity_dtr(struct dm_target *ti);
+extern int verity_ctr(struct dm_target *ti, unsigned argc, char **argv);
+extern int verity_map(struct dm_target *ti, struct bio *bio);
#endif /* DM_VERITY_H */
diff --git a/drivers/md/md.c b/drivers/md/md.c
index b1e1f6b95782..c57fdf847b47 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -293,6 +293,8 @@ static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
* go away inside make_request
*/
sectors = bio_sectors(bio);
+ /* bio could be mergeable after passing to underlayer */
+ bio->bi_rw &= ~REQ_NOMERGE;
mddev->pers->make_request(mddev, bio);
cpu = part_stat_lock();
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c
index 98b29cc3a9e3..9e68af87b86c 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c
@@ -1747,7 +1747,7 @@ static int msm_vfe40_axi_halt(struct vfe_device *vfe_dev,
msm_camera_io_w(0xFEFFFEFF, vfe_dev->vfe_base + 0x34);
msm_camera_io_w(0x1, vfe_dev->vfe_base + 0x24);
- msm_isp_get_timestamp(&ts);
+ msm_isp_get_timestamp(&ts, vfe_dev);
/* if any stream is waiting for update, signal complete */
for (i = VFE_PIX_0; i <= VFE_RAW_2; i++) {
msm_isp_axi_stream_update(vfe_dev, i, &ts);
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp44.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp44.c
index 0e14e0957a2a..fb4f7a1dcc92 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp44.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp44.c
@@ -1377,7 +1377,7 @@ static int msm_vfe44_axi_halt(struct vfe_device *vfe_dev,
msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x2C0);
}
- msm_isp_get_timestamp(&ts);
+ msm_isp_get_timestamp(&ts, vfe_dev);
for (i = VFE_PIX_0; i <= VFE_RAW_2; i++) {
/* if any stream is waiting for update, signal complete */
msm_isp_axi_stream_update(vfe_dev, i, &ts);
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp46.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp46.c
index 5237b84e9477..d45b6ff0a7d0 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp46.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp46.c
@@ -1464,7 +1464,7 @@ static int msm_vfe46_axi_halt(struct vfe_device *vfe_dev,
msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x374);
}
- msm_isp_get_timestamp(&ts);
+ msm_isp_get_timestamp(&ts, vfe_dev);
for (i = VFE_PIX_0; i <= VFE_RAW_2; i++) {
msm_isp_axi_stream_update(vfe_dev, i, &ts);
msm_isp_axi_stream_update(vfe_dev, i, &ts);
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c
index df7e2a88e7ca..6d1ad8ef6804 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c
@@ -1798,7 +1798,7 @@ int msm_vfe47_axi_halt(struct vfe_device *vfe_dev,
msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x400);
}
- msm_isp_get_timestamp(&ts);
+ msm_isp_get_timestamp(&ts, vfe_dev);
for (i = VFE_PIX_0; i <= VFE_RAW_2; i++) {
/* if any stream is waiting for update, signal fake completes */
msm_isp_axi_stream_update(vfe_dev, i, &ts);
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
index 7488f371545b..fbc2fee5a51d 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
@@ -1060,7 +1060,7 @@ void msm_isp_start_avtimer(void)
avcs_core_disable_power_collapse(1);
}
-static inline void msm_isp_get_avtimer_ts(
+void msm_isp_get_avtimer_ts(
struct msm_isp_timestamp *time_stamp)
{
int rc = 0;
@@ -1088,7 +1088,7 @@ void msm_isp_start_avtimer(void)
pr_err("AV Timer is not supported\n");
}
-static inline void msm_isp_get_avtimer_ts(
+void msm_isp_get_avtimer_ts(
struct msm_isp_timestamp *time_stamp)
{
pr_err_ratelimited("%s: Error: AVTimer driver not available\n",
@@ -2282,7 +2282,7 @@ int msm_isp_axi_reset(struct vfe_device *vfe_dev,
update_vfes[vfe_dev->pdev->id] = vfe_dev;
}
- msm_isp_get_timestamp(&timestamp);
+ msm_isp_get_timestamp(&timestamp, vfe_dev);
for (k = 0; k < MAX_VFE; k++) {
vfe_dev = update_vfes[k];
@@ -2700,8 +2700,8 @@ static int __msm_isp_check_stream_state(struct msm_vfe_axi_stream *stream_info,
}
-static void __msm_isp_stop_axi_streams(struct msm_vfe_axi_stream **streams,
- int num_streams, int cmd_type)
+static void __msm_isp_stop_axi_streams(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream **streams, int num_streams, int cmd_type)
{
int i;
struct msm_vfe_axi_shared_data *axi_data;
@@ -2712,11 +2712,10 @@ static void __msm_isp_stop_axi_streams(struct msm_vfe_axi_stream **streams,
unsigned long flags;
uint32_t intf;
int rc;
- struct vfe_device *vfe_dev;
struct vfe_device *update_vfes[MAX_VFE] = {0, 0};
int k;
- msm_isp_get_timestamp(&timestamp);
+ msm_isp_get_timestamp(&timestamp, vfe_dev);
for (i = 0; i < num_streams; i++) {
stream_info = streams[i];
@@ -2881,7 +2880,7 @@ static int msm_isp_start_axi_stream(struct vfe_device *vfe_dev_ioctl,
if (stream_cfg_cmd->num_streams > MAX_NUM_STREAM)
return -EINVAL;
- msm_isp_get_timestamp(&timestamp);
+ msm_isp_get_timestamp(&timestamp, vfe_dev_ioctl);
for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
if (stream_cfg_cmd->stream_handle[i] == 0)
@@ -3005,7 +3004,7 @@ static int msm_isp_start_axi_stream(struct vfe_device *vfe_dev_ioctl,
return 0;
error:
- __msm_isp_stop_axi_streams(streams, num_streams,
+ __msm_isp_stop_axi_streams(vfe_dev_ioctl, streams, num_streams,
STOP_STREAM);
return rc;
@@ -3043,7 +3042,7 @@ static int msm_isp_stop_axi_stream(struct vfe_device *vfe_dev_ioctl,
}
streams[num_streams++] = stream_info;
}
- __msm_isp_stop_axi_streams(streams, num_streams,
+ __msm_isp_stop_axi_streams(vfe_dev_ioctl, streams, num_streams,
stream_cfg_cmd->cmd);
return rc;
@@ -3172,7 +3171,7 @@ static int msm_isp_return_empty_buffer(struct vfe_device *vfe_dev,
return rc;
}
- msm_isp_get_timestamp(&timestamp);
+ msm_isp_get_timestamp(&timestamp, vfe_dev);
buf->buf_debug.put_state[buf->buf_debug.put_state_last] =
MSM_ISP_BUFFER_STATE_DROP_REG;
buf->buf_debug.put_state_last ^= 1;
@@ -3576,7 +3575,7 @@ int msm_isp_update_axi_stream(struct vfe_device *vfe_dev, void *arg)
stream_info = msm_isp_get_stream_common_data(vfe_dev,
HANDLE_TO_IDX(update_info->stream_handle));
stream_info->buf_divert = 0;
- msm_isp_get_timestamp(&timestamp);
+ msm_isp_get_timestamp(&timestamp, vfe_dev);
frame_id = vfe_dev->axi_data.src_info[
SRC_TO_INTF(stream_info->stream_src)].frame_id;
/* set ping pong address to scratch before flush */
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.h b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.h
index 84720f3d8625..9c642370b1a1 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.h
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.h
@@ -27,6 +27,7 @@ void msm_isp_reset_framedrop(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info);
int msm_isp_request_axi_stream(struct vfe_device *vfe_dev, void *arg);
+void msm_isp_get_avtimer_ts(struct msm_isp_timestamp *time_stamp);
int msm_isp_cfg_axi_stream(struct vfe_device *vfe_dev, void *arg);
int msm_isp_release_axi_stream(struct vfe_device *vfe_dev, void *arg);
int msm_isp_update_axi_stream(struct vfe_device *vfe_dev, void *arg);
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c
index afa498f80928..f1103183c326 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c
@@ -852,7 +852,7 @@ int msm_isp_stats_reset(struct vfe_device *vfe_dev)
unsigned long flags;
int k;
- msm_isp_get_timestamp(&timestamp);
+ msm_isp_get_timestamp(&timestamp, vfe_dev);
if (vfe_dev->is_split) {
for (i = 0; i < MAX_VFE; i++)
@@ -1077,7 +1077,7 @@ static int msm_isp_start_stats_stream(struct vfe_device *vfe_dev_ioctl,
uint32_t num_active_streams[MAX_VFE] = {0, 0};
struct vfe_device *vfe_dev;
- msm_isp_get_timestamp(&timestamp);
+ msm_isp_get_timestamp(&timestamp, vfe_dev_ioctl);
num_stats_comp_mask =
vfe_dev_ioctl->hw_info->stats_hw_info->num_stats_comp_mask;
@@ -1164,7 +1164,7 @@ static int msm_isp_stop_stats_stream(struct vfe_device *vfe_dev,
struct msm_vfe_stats_stream *streams[MSM_ISP_STATS_MAX];
unsigned long flags;
- msm_isp_get_timestamp(&timestamp);
+ msm_isp_get_timestamp(&timestamp, vfe_dev);
num_stats_comp_mask =
vfe_dev->hw_info->stats_hw_info->num_stats_comp_mask;
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c
index 71c907f2b381..b0789ce4a71c 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c
@@ -198,13 +198,21 @@ uint32_t msm_isp_get_framedrop_period(
return 1;
}
-void msm_isp_get_timestamp(struct msm_isp_timestamp *time_stamp)
+void msm_isp_get_timestamp(struct msm_isp_timestamp *time_stamp,
+ struct vfe_device *vfe_dev)
{
struct timespec ts;
- get_monotonic_boottime(&ts);
- time_stamp->buf_time.tv_sec = ts.tv_sec;
- time_stamp->buf_time.tv_usec = ts.tv_nsec/1000;
do_gettimeofday(&(time_stamp->event_time));
+ if (vfe_dev->vt_enable) {
+ msm_isp_get_avtimer_ts(time_stamp);
+ time_stamp->buf_time.tv_sec = time_stamp->vt_time.tv_sec;
+ time_stamp->buf_time.tv_usec = time_stamp->vt_time.tv_usec;
+ } else {
+ get_monotonic_boottime(&ts);
+ time_stamp->buf_time.tv_sec = ts.tv_sec;
+ time_stamp->buf_time.tv_usec = ts.tv_nsec/1000;
+ }
+
}
static inline u32 msm_isp_evt_mask_to_isp_event(u32 evt_mask)
@@ -1820,7 +1828,8 @@ static void msm_isp_enqueue_tasklet_cmd(struct vfe_device *vfe_dev,
}
queue_cmd->vfeInterruptStatus0 = irq_status0;
queue_cmd->vfeInterruptStatus1 = irq_status1;
- msm_isp_get_timestamp(&queue_cmd->ts);
+ msm_isp_get_timestamp(&queue_cmd->ts, vfe_dev);
+
queue_cmd->cmd_used = 1;
vfe_dev->taskletq_idx = (vfe_dev->taskletq_idx + 1) %
MSM_VFE_TASKLETQ_SIZE;
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.h b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.h
index 16e3198f35b7..f4280581a730 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.h
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.h
@@ -70,5 +70,6 @@ void msm_isp_fetch_engine_done_notify(struct vfe_device *vfe_dev,
struct msm_vfe_fetch_engine_info *fetch_engine_info);
void msm_isp_print_fourcc_error(const char *origin, uint32_t fourcc_format);
void msm_isp_flush_tasklet(struct vfe_device *vfe_dev);
-void msm_isp_get_timestamp(struct msm_isp_timestamp *time_stamp);
+void msm_isp_get_timestamp(struct msm_isp_timestamp *time_stamp,
+ struct vfe_device *vfe_dev);
#endif /* __MSM_ISP_UTIL_H__ */
diff --git a/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c b/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c
index e0d6977b24a6..106d76aae3bb 100644
--- a/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c
+++ b/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c
@@ -2344,21 +2344,19 @@ static int msm_cpp_cfg_frame(struct cpp_device *cpp_dev,
return -EINVAL;
}
- if (!new_frame->partial_frame_indicator) {
- if (cpp_frame_msg[new_frame->msg_len - 1] !=
- MSM_CPP_MSG_ID_TRAILER) {
- pr_err("Invalid frame message\n");
- return -EINVAL;
- }
+ if (cpp_frame_msg[new_frame->msg_len - 1] !=
+ MSM_CPP_MSG_ID_TRAILER) {
+ pr_err("Invalid frame message\n");
+ return -EINVAL;
+ }
- if ((stripe_base + new_frame->num_strips * stripe_size + 1) !=
- new_frame->msg_len) {
- pr_err("Invalid frame message,len=%d,expected=%d\n",
- new_frame->msg_len,
- (stripe_base +
- new_frame->num_strips * stripe_size + 1));
- return -EINVAL;
- }
+ if ((stripe_base + new_frame->num_strips * stripe_size + 1) !=
+ new_frame->msg_len) {
+ pr_err("Invalid frame message,len=%d,expected=%d\n",
+ new_frame->msg_len,
+ (stripe_base +
+ new_frame->num_strips * stripe_size + 1));
+ return -EINVAL;
}
if (cpp_dev->iommu_state != CPP_IOMMU_STATE_ATTACHED) {
diff --git a/drivers/media/platform/msm/camera_v2/sensor/actuator/msm_actuator.c b/drivers/media/platform/msm/camera_v2/sensor/actuator/msm_actuator.c
index bf3973888573..a700f836061c 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/actuator/msm_actuator.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/actuator/msm_actuator.c
@@ -1559,11 +1559,13 @@ static long msm_actuator_subdev_ioctl(struct v4l2_subdev *sd,
pr_err("a_ctrl->i2c_client.i2c_func_tbl NULL\n");
return -EINVAL;
}
+ mutex_lock(a_ctrl->actuator_mutex);
rc = msm_actuator_power_down(a_ctrl);
if (rc < 0) {
pr_err("%s:%d Actuator Power down failed\n",
__func__, __LINE__);
}
+ mutex_unlock(a_ctrl->actuator_mutex);
return msm_actuator_close(sd, NULL);
default:
return -ENOIOCTLCMD;
diff --git a/drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_5_0_1_hwreg.h b/drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_5_0_1_hwreg.h
index bba0b2bc9cdb..07d522cb01bb 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_5_0_1_hwreg.h
+++ b/drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_5_0_1_hwreg.h
@@ -105,11 +105,11 @@ struct csiphy_settings_t csiphy_combo_mode_v5_0_1 = {
{
{0x818, 0x1},
{0x81c, 0x2},
- {0x004, 0x08},
- {0x704, 0x08},
- {0x204, 0x08},
- {0x404, 0x08},
- {0x604, 0x08},
+ {0x004, 0x0C},
+ {0x704, 0x0C},
+ {0x204, 0x0C},
+ {0x404, 0x0C},
+ {0x604, 0x0C},
{0x02c, 0x1},
{0x22c, 0x1},
{0x42c, 0x1},
diff --git a/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_cci_i2c.c b/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_cci_i2c.c
index a4ee5041bfff..1a40aa1c78bd 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_cci_i2c.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_cci_i2c.c
@@ -69,6 +69,12 @@ int32_t msm_camera_cci_i2c_read_seq(struct msm_camera_i2c_client *client,
|| num_byte == 0)
return rc;
+ if (num_byte > I2C_REG_DATA_MAX) {
+ S_I2C_DBG("%s: Error num_byte:0x%x exceeds max:0x%x\n",
+ __func__, num_byte, I2C_REG_DATA_MAX);
+ return rc;
+ }
+
buf = kzalloc(num_byte, GFP_KERNEL);
if (!buf) {
pr_err("%s:%d no memory\n", __func__, __LINE__);
diff --git a/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_qup_i2c.c b/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_qup_i2c.c
index 7a0fb97061d5..62c5cf8c91d9 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_qup_i2c.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_qup_i2c.c
@@ -73,7 +73,7 @@ int32_t msm_camera_qup_i2c_read(struct msm_camera_i2c_client *client,
enum msm_camera_i2c_data_type data_type)
{
int32_t rc = -EFAULT;
- unsigned char buf[client->addr_type+data_type];
+ unsigned char *buf = NULL;
if ((client->addr_type != MSM_CAMERA_I2C_BYTE_ADDR
&& client->addr_type != MSM_CAMERA_I2C_WORD_ADDR)
@@ -81,6 +81,17 @@ int32_t msm_camera_qup_i2c_read(struct msm_camera_i2c_client *client,
&& data_type != MSM_CAMERA_I2C_WORD_DATA))
return rc;
+ if (client->addr_type > UINT_MAX - data_type) {
+ S_I2C_DBG("%s: integer overflow prevented\n", __func__);
+ return rc;
+ }
+
+ buf = kzalloc(client->addr_type+data_type, GFP_KERNEL);
+ if (!buf) {
+ S_I2C_DBG("%s:%d no memory\n", __func__, __LINE__);
+ return -ENOMEM;
+ }
+
if (client->addr_type == MSM_CAMERA_I2C_BYTE_ADDR) {
buf[0] = addr;
} else if (client->addr_type == MSM_CAMERA_I2C_WORD_ADDR) {
@@ -90,6 +101,8 @@ int32_t msm_camera_qup_i2c_read(struct msm_camera_i2c_client *client,
rc = msm_camera_qup_i2c_rxdata(client, buf, data_type);
if (rc < 0) {
S_I2C_DBG("%s fail\n", __func__);
+ kfree(buf);
+ buf = NULL;
return rc;
}
@@ -99,6 +112,8 @@ int32_t msm_camera_qup_i2c_read(struct msm_camera_i2c_client *client,
*data = buf[0] << 8 | buf[1];
S_I2C_DBG("%s addr = 0x%x data: 0x%x\n", __func__, addr, *data);
+ kfree(buf);
+ buf = NULL;
return rc;
}
@@ -106,7 +121,7 @@ int32_t msm_camera_qup_i2c_read_seq(struct msm_camera_i2c_client *client,
uint32_t addr, uint8_t *data, uint32_t num_byte)
{
int32_t rc = -EFAULT;
- unsigned char buf[client->addr_type+num_byte];
+ unsigned char *buf = NULL;
int i;
if ((client->addr_type != MSM_CAMERA_I2C_BYTE_ADDR
@@ -114,6 +129,22 @@ int32_t msm_camera_qup_i2c_read_seq(struct msm_camera_i2c_client *client,
|| num_byte == 0)
return rc;
+ if (num_byte > I2C_REG_DATA_MAX) {
+ S_I2C_DBG("%s: Error num_byte:0x%x exceeds max:0x%x\n",
+ __func__, num_byte, I2C_REG_DATA_MAX);
+ return rc;
+ }
+ if (client->addr_type > UINT_MAX - num_byte) {
+ S_I2C_DBG("%s: integer overflow prevented\n", __func__);
+ return rc;
+ }
+
+ buf = kzalloc(client->addr_type+num_byte, GFP_KERNEL);
+ if (!buf) {
+ S_I2C_DBG("%s:%d no memory\n", __func__, __LINE__);
+ return -ENOMEM;
+ }
+
if (client->addr_type == MSM_CAMERA_I2C_BYTE_ADDR) {
buf[0] = addr;
} else if (client->addr_type == MSM_CAMERA_I2C_WORD_ADDR) {
@@ -123,6 +154,8 @@ int32_t msm_camera_qup_i2c_read_seq(struct msm_camera_i2c_client *client,
rc = msm_camera_qup_i2c_rxdata(client, buf, num_byte);
if (rc < 0) {
S_I2C_DBG("%s fail\n", __func__);
+ kfree(buf);
+ buf = NULL;
return rc;
}
@@ -132,6 +165,8 @@ int32_t msm_camera_qup_i2c_read_seq(struct msm_camera_i2c_client *client,
S_I2C_DBG("Byte %d: 0x%x\n", i, buf[i]);
S_I2C_DBG("Data: 0x%x\n", data[i]);
}
+ kfree(buf);
+ buf = NULL;
return rc;
}
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
index 594bac6c5902..0d8c6cb8f3f3 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
@@ -266,7 +266,7 @@ static int sde_rotator_update_clk(struct sde_rot_mgr *mgr)
SDEROT_DBG("core_clk %lu\n", total_clk_rate);
ATRACE_INT("core_clk", total_clk_rate);
- sde_rotator_set_clk_rate(mgr, total_clk_rate, mgr->core_clk_idx);
+ sde_rotator_set_clk_rate(mgr, total_clk_rate, SDE_ROTATOR_CLK_ROT_CORE);
return 0;
}
@@ -300,11 +300,34 @@ static void sde_rotator_footswitch_ctrl(struct sde_rot_mgr *mgr, bool on)
mgr->regulator_enable = on;
}
-int sde_rotator_clk_ctrl(struct sde_rot_mgr *mgr, int enable)
+static int sde_rotator_enable_clk(struct sde_rot_mgr *mgr, int clk_idx)
+{
+ struct clk *clk;
+ int ret = 0;
+
+ clk = sde_rotator_get_clk(mgr, clk_idx);
+ if (clk) {
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ SDEROT_ERR("enable failed clk_idx %d\n", clk_idx);
+ }
+
+ return ret;
+}
+
+static void sde_rotator_disable_clk(struct sde_rot_mgr *mgr, int clk_idx)
{
struct clk *clk;
+
+ clk = sde_rotator_get_clk(mgr, clk_idx);
+ if (clk)
+ clk_disable_unprepare(clk);
+}
+
+int sde_rotator_clk_ctrl(struct sde_rot_mgr *mgr, int enable)
+{
int ret = 0;
- int i, changed = 0;
+ int changed = 0;
if (enable) {
if (mgr->rot_enable_clk_cnt == 0)
@@ -323,32 +346,41 @@ int sde_rotator_clk_ctrl(struct sde_rot_mgr *mgr, int enable)
if (changed) {
SDEROT_EVTLOG(enable);
SDEROT_DBG("Rotator clk %s\n", enable ? "enable" : "disable");
- for (i = 0; i < mgr->num_rot_clk; i++) {
- clk = mgr->rot_clk[i].clk;
-
- if (!clk)
- continue;
-
- if (enable) {
- ret = clk_prepare_enable(clk);
- if (ret) {
- SDEROT_ERR(
- "enable failed clk_idx %d\n",
- i);
- goto error;
- }
- } else {
- clk_disable_unprepare(clk);
- }
- }
if (enable) {
+ ret = sde_rotator_enable_clk(mgr,
+ SDE_ROTATOR_CLK_MNOC_AHB);
+ if (ret)
+ goto error_mnoc_ahb;
+ ret = sde_rotator_enable_clk(mgr,
+ SDE_ROTATOR_CLK_MDSS_AHB);
+ if (ret)
+ goto error_mdss_ahb;
+ ret = sde_rotator_enable_clk(mgr,
+ SDE_ROTATOR_CLK_MDSS_AXI);
+ if (ret)
+ goto error_mdss_axi;
+ ret = sde_rotator_enable_clk(mgr,
+ SDE_ROTATOR_CLK_ROT_CORE);
+ if (ret)
+ goto error_rot_core;
+ ret = sde_rotator_enable_clk(mgr,
+ SDE_ROTATOR_CLK_MDSS_ROT);
+ if (ret)
+ goto error_mdss_rot;
+
/* Active+Sleep */
msm_bus_scale_client_update_context(
mgr->data_bus.bus_hdl, false,
mgr->data_bus.curr_bw_uc_idx);
trace_rot_bw_ao_as_context(0);
} else {
+ sde_rotator_disable_clk(mgr, SDE_ROTATOR_CLK_MDSS_ROT);
+ sde_rotator_disable_clk(mgr, SDE_ROTATOR_CLK_ROT_CORE);
+ sde_rotator_disable_clk(mgr, SDE_ROTATOR_CLK_MDSS_AXI);
+ sde_rotator_disable_clk(mgr, SDE_ROTATOR_CLK_MDSS_AHB);
+ sde_rotator_disable_clk(mgr, SDE_ROTATOR_CLK_MNOC_AHB);
+
/* Active Only */
msm_bus_scale_client_update_context(
mgr->data_bus.bus_hdl, true,
@@ -358,9 +390,15 @@ int sde_rotator_clk_ctrl(struct sde_rot_mgr *mgr, int enable)
}
return ret;
-error:
- for (i--; i >= 0; i--)
- clk_disable_unprepare(mgr->rot_clk[i].clk);
+error_mdss_rot:
+ sde_rotator_disable_clk(mgr, SDE_ROTATOR_CLK_ROT_CORE);
+error_rot_core:
+ sde_rotator_disable_clk(mgr, SDE_ROTATOR_CLK_MDSS_AXI);
+error_mdss_axi:
+ sde_rotator_disable_clk(mgr, SDE_ROTATOR_CLK_MDSS_AHB);
+error_mdss_ahb:
+ sde_rotator_disable_clk(mgr, SDE_ROTATOR_CLK_MNOC_AHB);
+error_mnoc_ahb:
return ret;
}
@@ -2101,7 +2139,6 @@ static ssize_t sde_rotator_show_state(struct device *dev,
SPRINT("footswitch_cnt=%d\n", mgr->res_ref_cnt);
SPRINT("regulator_enable=%d\n", mgr->regulator_enable);
SPRINT("enable_clk_cnt=%d\n", mgr->rot_enable_clk_cnt);
- SPRINT("core_clk_idx=%d\n", mgr->core_clk_idx);
for (i = 0; i < mgr->num_rot_clk; i++)
if (mgr->rot_clk[i].clk)
SPRINT("%s=%lu\n", mgr->rot_clk[i].clk_name,
@@ -2301,17 +2338,39 @@ static int sde_rotator_bus_scale_register(struct sde_rot_mgr *mgr)
return 0;
}
+static inline int sde_rotator_search_dt_clk(struct platform_device *pdev,
+ struct sde_rot_mgr *mgr, char *clk_name, int clk_idx)
+{
+ struct clk *tmp;
+
+ if (clk_idx >= SDE_ROTATOR_CLK_MAX) {
+ SDEROT_ERR("invalid clk index %d\n", clk_idx);
+ return -EINVAL;
+ }
+
+ tmp = devm_clk_get(&pdev->dev, clk_name);
+ if (IS_ERR(tmp)) {
+ SDEROT_ERR("unable to get clk: %s\n", clk_name);
+ return PTR_ERR(tmp);
+ }
+
+ strlcpy(mgr->rot_clk[clk_idx].clk_name, clk_name,
+ sizeof(mgr->rot_clk[clk_idx].clk_name));
+
+ mgr->rot_clk[clk_idx].clk = tmp;
+ return 0;
+}
+
static int sde_rotator_parse_dt_clk(struct platform_device *pdev,
struct sde_rot_mgr *mgr)
{
- u32 i = 0, rc = 0;
- const char *clock_name;
+ u32 rc = 0;
int num_clk;
num_clk = of_property_count_strings(pdev->dev.of_node,
"clock-names");
- if (num_clk <= 0) {
- SDEROT_ERR("clocks are not defined\n");
+ if ((num_clk <= 0) || (num_clk > SDE_ROTATOR_CLK_MAX)) {
+ SDEROT_ERR("Number of clocks are out of range: %d\n", num_clk);
goto clk_err;
}
@@ -2325,19 +2384,17 @@ static int sde_rotator_parse_dt_clk(struct platform_device *pdev,
goto clk_err;
}
- for (i = 0; i < mgr->num_rot_clk; i++) {
- u32 clock_rate = 0;
-
- of_property_read_string_index(pdev->dev.of_node, "clock-names",
- i, &clock_name);
- strlcpy(mgr->rot_clk[i].clk_name, clock_name,
- sizeof(mgr->rot_clk[i].clk_name));
-
- of_property_read_u32_index(pdev->dev.of_node, "clock-rate",
- i, &clock_rate);
- mgr->rot_clk[i].rate = clock_rate;
- }
-
+ if (sde_rotator_search_dt_clk(pdev, mgr, "mnoc_clk",
+ SDE_ROTATOR_CLK_MNOC_AHB) ||
+ sde_rotator_search_dt_clk(pdev, mgr, "iface_clk",
+ SDE_ROTATOR_CLK_MDSS_AHB) ||
+ sde_rotator_search_dt_clk(pdev, mgr, "axi_clk",
+ SDE_ROTATOR_CLK_MDSS_AXI) ||
+ sde_rotator_search_dt_clk(pdev, mgr, "rot_core_clk",
+ SDE_ROTATOR_CLK_ROT_CORE) ||
+ sde_rotator_search_dt_clk(pdev, mgr, "rot_clk",
+ SDE_ROTATOR_CLK_MDSS_ROT))
+ rc = -EINVAL;
clk_err:
return rc;
}
@@ -2345,10 +2402,7 @@ clk_err:
static int sde_rotator_register_clk(struct platform_device *pdev,
struct sde_rot_mgr *mgr)
{
- int i, ret;
- struct clk *clk;
- struct sde_rot_clk *rot_clk;
- int core_clk_idx = -1;
+ int ret;
ret = sde_rotator_parse_dt_clk(pdev, mgr);
if (ret) {
@@ -2356,28 +2410,6 @@ static int sde_rotator_register_clk(struct platform_device *pdev,
return -EINVAL;
}
- for (i = 0; i < mgr->num_rot_clk; i++) {
- rot_clk = &mgr->rot_clk[i];
-
- clk = devm_clk_get(&pdev->dev, rot_clk->clk_name);
- if (IS_ERR(clk)) {
- SDEROT_ERR("unable to get clk: %s\n",
- rot_clk->clk_name);
- return PTR_ERR(clk);
- }
- rot_clk->clk = clk;
-
- if (strcmp(rot_clk->clk_name, "rot_core_clk") == 0)
- core_clk_idx = i;
- }
-
- if (core_clk_idx < 0) {
- SDEROT_ERR("undefined core clk\n");
- return -ENXIO;
- }
-
- mgr->core_clk_idx = core_clk_idx;
-
return 0;
}
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.h
index 781b03e1b974..e1b326b8eb1c 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.h
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.h
@@ -92,6 +92,15 @@ enum sde_rotator_ts {
SDE_ROTATOR_TS_MAX
};
+enum sde_rotator_clk_type {
+ SDE_ROTATOR_CLK_MDSS_AHB,
+ SDE_ROTATOR_CLK_MDSS_AXI,
+ SDE_ROTATOR_CLK_ROT_CORE,
+ SDE_ROTATOR_CLK_MDSS_ROT,
+ SDE_ROTATOR_CLK_MNOC_AHB,
+ SDE_ROTATOR_CLK_MAX
+};
+
struct sde_rotation_item {
/* rotation request flag */
uint32_t flags;
@@ -275,7 +284,6 @@ struct sde_rot_mgr {
int rot_enable_clk_cnt;
struct sde_rot_clk *rot_clk;
int num_rot_clk;
- int core_clk_idx;
u32 rdot_limit;
u32 wrot_limit;
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
index d2bc76874c48..925b8497273a 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
@@ -2337,9 +2337,9 @@ int sde_rotator_r3_init(struct sde_rot_mgr *mgr)
goto error_hw_rev_init;
/* set rotator CBCR to shutoff memory/periphery on clock off.*/
- clk_set_flags(mgr->rot_clk[mgr->core_clk_idx].clk,
+ clk_set_flags(mgr->rot_clk[SDE_ROTATOR_CLK_ROT_CORE].clk,
CLKFLAG_NORETAIN_MEM);
- clk_set_flags(mgr->rot_clk[mgr->core_clk_idx].clk,
+ clk_set_flags(mgr->rot_clk[SDE_ROTATOR_CLK_ROT_CORE].clk,
CLKFLAG_NORETAIN_PERIPH);
mdata->sde_rot_hw = rot;
diff --git a/drivers/media/platform/msm/vidc/hfi_response_handler.c b/drivers/media/platform/msm/vidc/hfi_response_handler.c
index a18840b1a1a4..88a3b4b6f7ba 100644
--- a/drivers/media/platform/msm/vidc/hfi_response_handler.c
+++ b/drivers/media/platform/msm/vidc/hfi_response_handler.c
@@ -112,6 +112,7 @@ static int hfi_process_sess_evt_seq_changed(u32 device_id,
u8 *data_ptr;
int prop_id;
enum msm_vidc_pixel_depth luma_bit_depth, chroma_bit_depth;
+ struct hfi_colour_space *colour_info;
if (sizeof(struct hfi_msg_event_notify_packet) > pkt->size) {
dprintk(VIDC_ERR,
@@ -205,6 +206,18 @@ static int hfi_process_sess_evt_seq_changed(u32 device_id,
data_ptr +=
sizeof(struct hfi_pic_struct);
break;
+ case HFI_PROPERTY_PARAM_VDEC_COLOUR_SPACE:
+ data_ptr = data_ptr + sizeof(u32);
+ colour_info =
+ (struct hfi_colour_space *) data_ptr;
+ event_notify.colour_space =
+ colour_info->colour_space;
+ dprintk(VIDC_DBG,
+ "Colour space value is: %d\n",
+ colour_info->colour_space);
+ data_ptr +=
+ sizeof(struct hfi_colour_space);
+ break;
default:
dprintk(VIDC_ERR,
"%s cmd: %#x not supported\n",
diff --git a/drivers/media/platform/msm/vidc/msm_smem.c b/drivers/media/platform/msm/vidc/msm_smem.c
index bb2715654f45..90047a608984 100644
--- a/drivers/media/platform/msm/vidc/msm_smem.c
+++ b/drivers/media/platform/msm/vidc/msm_smem.c
@@ -475,7 +475,7 @@ bool msm_smem_compare_buffers(void *clt, int fd, void *priv)
}
handle = ion_import_dma_buf(client->clnt, fd);
ret = handle == priv;
- handle ? ion_free(client->clnt, handle) : 0;
+ (!IS_ERR_OR_NULL(handle)) ? ion_free(client->clnt, handle) : 0;
return ret;
}
diff --git a/drivers/media/platform/msm/vidc/msm_vidc.c b/drivers/media/platform/msm/vidc/msm_vidc.c
index b12eeddc678f..93e32ef4ac35 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc.c
@@ -1172,6 +1172,7 @@ void *msm_vidc_open(int core_id, int session_type)
inst->bit_depth = MSM_VIDC_BIT_DEPTH_8;
inst->instant_bitrate = 0;
inst->pic_struct = MSM_VIDC_PIC_STRUCT_PROGRESSIVE;
+ inst->colour_space = MSM_VIDC_BT601_6_525;
for (i = SESSION_MSG_INDEX(SESSION_MSG_START);
i <= SESSION_MSG_INDEX(SESSION_MSG_END); i++) {
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c
index d1cc08d53017..3671d5fa6479 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c
@@ -1200,29 +1200,46 @@ static void handle_event_change(enum hal_command_response cmd, void *data)
* ptr[2] = flag to indicate bit depth or/and pic struct changed
* ptr[3] = bit depth
* ptr[4] = pic struct (progressive or interlaced)
+ * ptr[5] = colour space
*/
ptr = (u32 *)seq_changed_event.u.data;
- ptr[2] = 0x0;
- ptr[3] = inst->bit_depth;
- ptr[4] = inst->pic_struct;
- if (inst->bit_depth != event_notify->bit_depth) {
- inst->bit_depth = event_notify->bit_depth;
- ptr[2] |= V4L2_EVENT_BITDEPTH_FLAG;
+ if (ptr != NULL) {
+ ptr[2] = 0x0;
ptr[3] = inst->bit_depth;
- event = V4L2_EVENT_SEQ_CHANGED_INSUFFICIENT;
- dprintk(VIDC_DBG,
- "V4L2_EVENT_SEQ_CHANGED_INSUFFICIENT due to bit-depth change\n");
- }
-
- if (inst->pic_struct != event_notify->pic_struct) {
- inst->pic_struct = event_notify->pic_struct;
- event = V4L2_EVENT_SEQ_CHANGED_INSUFFICIENT;
- ptr[2] |= V4L2_EVENT_PICSTRUCT_FLAG;
ptr[4] = inst->pic_struct;
- dprintk(VIDC_DBG,
- "V4L2_EVENT_SEQ_CHANGED_INSUFFICIENT due to pic-struct change\n");
+ ptr[5] = inst->colour_space;
+
+ if (inst->bit_depth != event_notify->bit_depth) {
+ inst->bit_depth = event_notify->bit_depth;
+ ptr[2] |= V4L2_EVENT_BITDEPTH_FLAG;
+ ptr[3] = inst->bit_depth;
+ event = V4L2_EVENT_SEQ_CHANGED_INSUFFICIENT;
+ dprintk(VIDC_DBG,
+ "V4L2_EVENT_SEQ_CHANGED_INSUFFICIENT due to bit-depth change\n");
+ }
+
+ if (inst->pic_struct != event_notify->pic_struct) {
+ inst->pic_struct = event_notify->pic_struct;
+ event = V4L2_EVENT_SEQ_CHANGED_INSUFFICIENT;
+ ptr[2] |= V4L2_EVENT_PICSTRUCT_FLAG;
+ ptr[4] = inst->pic_struct;
+ dprintk(VIDC_DBG,
+ "V4L2_EVENT_SEQ_CHANGED_INSUFFICIENT due to pic-struct change\n");
+ }
+
+ if (inst->bit_depth == MSM_VIDC_BIT_DEPTH_10
+ && inst->colour_space !=
+ event_notify->colour_space) {
+ inst->colour_space = event_notify->colour_space;
+ event = V4L2_EVENT_SEQ_CHANGED_INSUFFICIENT;
+ ptr[2] |= V4L2_EVENT_COLOUR_SPACE_FLAG;
+ ptr[5] = inst->colour_space;
+ dprintk(VIDC_DBG,
+ "V4L2_EVENT_SEQ_CHANGED_INSUFFICIENT due to colour space change\n");
+ }
+
}
if (event == V4L2_EVENT_SEQ_CHANGED_INSUFFICIENT) {
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_debug.c b/drivers/media/platform/msm/vidc/msm_vidc_debug.c
index efb90c69881f..011941c6d4eb 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_debug.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_debug.c
@@ -157,7 +157,7 @@ struct dentry *msm_vidc_debugfs_init_drv(void)
struct dentry *f = debugfs_create_##__type(__name, S_IRUGO | S_IWUSR, \
dir, __value); \
if (IS_ERR_OR_NULL(f)) { \
- dprintk(VIDC_ERR, "Failed creating debugfs file '%pKd/%s'\n", \
+ dprintk(VIDC_ERR, "Failed creating debugfs file '%pd/%s'\n", \
dir, __name); \
f = NULL; \
} \
@@ -349,7 +349,7 @@ struct dentry *msm_vidc_debugfs_init_inst(struct msm_vidc_inst *inst,
dprintk(VIDC_ERR, "Invalid params, inst: %pK\n", inst);
goto failed_create_dir;
}
- snprintf(debugfs_name, MAX_DEBUGFS_NAME, "inst_%pK", inst);
+ snprintf(debugfs_name, MAX_DEBUGFS_NAME, "inst_%p", inst);
dir = debugfs_create_dir(debugfs_name, parent);
if (!dir) {
dprintk(VIDC_ERR, "Failed to create debugfs for msm_vidc\n");
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_internal.h b/drivers/media/platform/msm/vidc/msm_vidc_internal.h
index 161e94f99040..ffe4456570e3 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_internal.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_internal.h
@@ -297,6 +297,7 @@ struct msm_vidc_inst {
u32 buffers_held_in_driver;
atomic_t in_flush;
u32 pic_struct;
+ u32 colour_space;
};
extern struct msm_vidc_drv *vidc_driver;
diff --git a/drivers/media/platform/msm/vidc/venus_hfi.c b/drivers/media/platform/msm/vidc/venus_hfi.c
index 8332c7f4db43..c87b6fc585c7 100644
--- a/drivers/media/platform/msm/vidc/venus_hfi.c
+++ b/drivers/media/platform/msm/vidc/venus_hfi.c
@@ -4263,7 +4263,7 @@ static inline int __suspend(struct venus_hfi_device *device)
return 0;
}
- dprintk(VIDC_DBG, "Entering power collapse\n");
+ dprintk(VIDC_PROF, "Entering power collapse\n");
if (device->res->pm_qos_latency_us &&
pm_qos_request_active(&device->qos))
@@ -4276,7 +4276,7 @@ static inline int __suspend(struct venus_hfi_device *device)
}
__venus_power_off(device, true);
- dprintk(VIDC_INFO, "Venus power collapsed\n");
+ dprintk(VIDC_PROF, "Venus power collapsed\n");
return rc;
err_tzbsp_suspend:
@@ -4298,7 +4298,7 @@ static inline int __resume(struct venus_hfi_device *device)
return -EINVAL;
}
- dprintk(VIDC_DBG, "Resuming from power collapse\n");
+ dprintk(VIDC_PROF, "Resuming from power collapse\n");
rc = __venus_power_on(device);
if (rc) {
dprintk(VIDC_ERR, "Failed to power on venus\n");
@@ -4334,7 +4334,7 @@ static inline int __resume(struct venus_hfi_device *device)
pm_qos_add_request(&device->qos, PM_QOS_CPU_DMA_LATENCY,
device->res->pm_qos_latency_us);
}
- dprintk(VIDC_INFO, "Resumed from power collapse\n");
+ dprintk(VIDC_PROF, "Resumed from power collapse\n");
exit:
device->skip_pc_count = 0;
return rc;
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_api.h b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
index 34ab36a4647b..aa566159c393 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_api.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
@@ -1357,6 +1357,7 @@ struct msm_vidc_cb_event {
ion_phys_addr_t packet_buffer;
ion_phys_addr_t extra_data_buffer;
u32 pic_struct;
+ u32 colour_space;
};
struct msm_vidc_cb_data_done {
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
index 23240746baf1..5e5ef6abc303 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
@@ -293,6 +293,8 @@ struct hfi_buffer_info {
(HFI_PROPERTY_PARAM_VDEC_COMMON_START + 0x007)
#define HFI_PROPERTY_PARAM_VDEC_PIC_STRUCT \
(HFI_PROPERTY_PARAM_VDEC_COMMON_START + 0x009)
+#define HFI_PROPERTY_PARAM_VDEC_COLOUR_SPACE \
+ (HFI_PROPERTY_PARAM_VDEC_COMMON_START + 0x00A)
#define HFI_PROPERTY_CONFIG_VDEC_COMMON_START \
@@ -435,6 +437,10 @@ struct hfi_bitrate {
u32 layer_id;
};
+struct hfi_colour_space {
+ u32 colour_space;
+};
+
#define HFI_CAPABILITY_FRAME_WIDTH (HFI_COMMON_BASE + 0x1)
#define HFI_CAPABILITY_FRAME_HEIGHT (HFI_COMMON_BASE + 0x2)
#define HFI_CAPABILITY_MBS_PER_FRAME (HFI_COMMON_BASE + 0x3)
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index 33bdd81065e8..11f39791ec33 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -1502,7 +1502,7 @@ static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking)
* Will sleep if required for nonblocking == false.
*/
static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb,
- int nonblocking)
+ void *pb, int nonblocking)
{
unsigned long flags;
int ret;
@@ -1523,10 +1523,10 @@ static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb,
/*
* Only remove the buffer from done_list if v4l2_buffer can handle all
* the planes.
- * Verifying planes is NOT necessary since it already has been checked
- * before the buffer is queued/prepared. So it can never fail.
*/
- list_del(&(*vb)->done_entry);
+ ret = call_bufop(q, verify_planes_array, *vb, pb);
+ if (!ret)
+ list_del(&(*vb)->done_entry);
spin_unlock_irqrestore(&q->done_lock, flags);
return ret;
@@ -1604,7 +1604,7 @@ int vb2_core_dqbuf(struct vb2_queue *q, void *pb, bool nonblocking)
struct vb2_buffer *vb = NULL;
int ret;
- ret = __vb2_get_done_vb(q, &vb, nonblocking);
+ ret = __vb2_get_done_vb(q, &vb, pb, nonblocking);
if (ret < 0)
return ret;
diff --git a/drivers/media/v4l2-core/videobuf2-memops.c b/drivers/media/v4l2-core/videobuf2-memops.c
index dbec5923fcf0..3c3b517f1d1c 100644
--- a/drivers/media/v4l2-core/videobuf2-memops.c
+++ b/drivers/media/v4l2-core/videobuf2-memops.c
@@ -49,7 +49,7 @@ struct frame_vector *vb2_create_framevec(unsigned long start,
vec = frame_vector_create(nr);
if (!vec)
return ERR_PTR(-ENOMEM);
- ret = get_vaddr_frames(start, nr, write, 1, vec);
+ ret = get_vaddr_frames(start & PAGE_MASK, nr, write, true, vec);
if (ret < 0)
goto out_destroy;
/* We accept only complete set of PFNs */
diff --git a/drivers/mfd/intel-lpss.c b/drivers/mfd/intel-lpss.c
index 6255513f54c7..68aa31ae553a 100644
--- a/drivers/mfd/intel-lpss.c
+++ b/drivers/mfd/intel-lpss.c
@@ -445,6 +445,7 @@ int intel_lpss_probe(struct device *dev,
err_remove_ltr:
intel_lpss_debugfs_remove(lpss);
intel_lpss_ltr_hide(lpss);
+ intel_lpss_unregister_clock(lpss);
err_clk_register:
ida_simple_remove(&intel_lpss_devid_ida, lpss->devid);
diff --git a/drivers/mfd/wcd934x-regmap.c b/drivers/mfd/wcd934x-regmap.c
index e07350a1e2ce..fbaf05e58aff 100644
--- a/drivers/mfd/wcd934x-regmap.c
+++ b/drivers/mfd/wcd934x-regmap.c
@@ -1904,6 +1904,9 @@ static bool wcd934x_is_volatile_register(struct device *dev, unsigned int reg)
(reg <= WCD934X_CDC_ANC1_FB_GAIN_CTL))
return true;
+ if ((reg >= WCD934X_CODEC_CPR_WR_DATA_0) &&
+ (reg <= WCD934X_CODEC_CPR_RD_DATA_3))
+ return true;
/*
* Need to mark volatile for registers that are writable but
@@ -1919,6 +1922,10 @@ static bool wcd934x_is_volatile_register(struct device *dev, unsigned int reg)
case WCD934X_SIDO_NEW_VOUT_A_STARTUP:
case WCD934X_SIDO_NEW_VOUT_D_STARTUP:
case WCD934X_CODEC_RPM_PWR_CDC_DIG_HM_CTL:
+ case WCD934X_ANA_MBHC_MECH:
+ case WCD934X_ANA_MBHC_ELECT:
+ case WCD934X_ANA_MBHC_ZDET:
+ case WCD934X_ANA_MICB2:
return true;
}
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 8a08ca61062a..9f0d9b7b7e17 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -439,7 +439,7 @@ config ARM_CHARLCD
still useful.
config BMP085
- bool
+ tristate
depends on SYSFS
config BMP085_I2C
diff --git a/drivers/misc/ad525x_dpot.c b/drivers/misc/ad525x_dpot.c
index 15e88078ba1e..f1a0b99f5a9a 100644
--- a/drivers/misc/ad525x_dpot.c
+++ b/drivers/misc/ad525x_dpot.c
@@ -216,7 +216,7 @@ static s32 dpot_read_i2c(struct dpot_data *dpot, u8 reg)
*/
value = swab16(value);
- if (dpot->uid == DPOT_UID(AD5271_ID))
+ if (dpot->uid == DPOT_UID(AD5274_ID))
value = value >> 2;
return value;
default:
diff --git a/drivers/misc/cxl/irq.c b/drivers/misc/cxl/irq.c
index 09a406058c46..efbb6945eb18 100644
--- a/drivers/misc/cxl/irq.c
+++ b/drivers/misc/cxl/irq.c
@@ -288,7 +288,6 @@ unsigned int cxl_map_irq(struct cxl *adapter, irq_hw_number_t hwirq,
void cxl_unmap_irq(unsigned int virq, void *cookie)
{
free_irq(virq, cookie);
- irq_dispose_mapping(virq);
}
static int cxl_register_one_irq(struct cxl *adapter,
diff --git a/drivers/misc/mic/scif/scif_rma.c b/drivers/misc/mic/scif/scif_rma.c
index 8310b4dbff06..6a451bd65bf3 100644
--- a/drivers/misc/mic/scif/scif_rma.c
+++ b/drivers/misc/mic/scif/scif_rma.c
@@ -1511,7 +1511,7 @@ off_t scif_register_pinned_pages(scif_epd_t epd,
if ((map_flags & SCIF_MAP_FIXED) &&
((ALIGN(offset, PAGE_SIZE) != offset) ||
(offset < 0) ||
- (offset + (off_t)len < offset)))
+ (len > LONG_MAX - offset)))
return -EINVAL;
might_sleep();
@@ -1614,7 +1614,7 @@ off_t scif_register(scif_epd_t epd, void *addr, size_t len, off_t offset,
if ((map_flags & SCIF_MAP_FIXED) &&
((ALIGN(offset, PAGE_SIZE) != offset) ||
(offset < 0) ||
- (offset + (off_t)len < offset)))
+ (len > LONG_MAX - offset)))
return -EINVAL;
/* Unsupported protection requested */
@@ -1732,7 +1732,8 @@ scif_unregister(scif_epd_t epd, off_t offset, size_t len)
/* Offset is not page aligned or offset+len wraps around */
if ((ALIGN(offset, PAGE_SIZE) != offset) ||
- (offset + (off_t)len < offset))
+ (offset < 0) ||
+ (len > LONG_MAX - offset))
return -EINVAL;
err = scif_verify_epd(ep);
diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c
index 644178a0cdfc..ff838ebefba6 100644
--- a/drivers/misc/qseecom.c
+++ b/drivers/misc/qseecom.c
@@ -1065,6 +1065,10 @@ static int __qseecom_set_sb_memory(struct qseecom_registered_listener_list *svc,
}
/* Populate the structure for sending scm call to load image */
svc->sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt, svc->ihandle);
+ if (IS_ERR_OR_NULL(svc->sb_virt)) {
+ pr_err("ION memory mapping for listener shared buffer failed\n");
+ return -ENOMEM;
+ }
svc->sb_phys = (phys_addr_t)pa;
if (qseecom.qsee_version < QSEE_VERSION_40) {
@@ -1522,6 +1526,10 @@ static int qseecom_set_client_mem_param(struct qseecom_dev_handle *data,
/* Populate the structure for sending scm call to load image */
data->client.sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt,
data->client.ihandle);
+ if (IS_ERR_OR_NULL(data->client.sb_virt)) {
+ pr_err("ION memory mapping for client shared buf failed\n");
+ return -ENOMEM;
+ }
data->client.sb_phys = (phys_addr_t)pa;
data->client.sb_length = req.sb_len;
data->client.user_virt_sb_base = (uintptr_t)req.virt_sb_base;
@@ -3591,7 +3599,7 @@ static bool __qseecom_is_fw_image_valid(const struct firmware *fw_entry)
return true;
}
-static int __qseecom_get_fw_size(char *appname, uint32_t *fw_size,
+static int __qseecom_get_fw_size(const char *appname, uint32_t *fw_size,
uint32_t *app_arch)
{
int ret = -1;
@@ -3629,14 +3637,21 @@ static int __qseecom_get_fw_size(char *appname, uint32_t *fw_size,
}
pr_debug("QSEE %s app, arch %u\n", appname, *app_arch);
release_firmware(fw_entry);
+ fw_entry = NULL;
for (i = 0; i < num_images; i++) {
memset(fw_name, 0, sizeof(fw_name));
snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d", appname, i);
ret = request_firmware(&fw_entry, fw_name, qseecom.pdev);
if (ret)
goto err;
+ if (*fw_size > U32_MAX - fw_entry->size) {
+ pr_err("QSEE %s app file size overflow\n", appname);
+ ret = -EINVAL;
+ goto err;
+ }
*fw_size += fw_entry->size;
release_firmware(fw_entry);
+ fw_entry = NULL;
}
return ret;
@@ -3647,8 +3662,9 @@ err:
return ret;
}
-static int __qseecom_get_fw_data(char *appname, u8 *img_data,
- struct qseecom_load_app_ireq *load_req)
+static int __qseecom_get_fw_data(const char *appname, u8 *img_data,
+ uint32_t fw_size,
+ struct qseecom_load_app_ireq *load_req)
{
int ret = -1;
int i = 0, rc = 0;
@@ -3668,6 +3684,12 @@ static int __qseecom_get_fw_data(char *appname, u8 *img_data,
}
load_req->img_len = fw_entry->size;
+ if (load_req->img_len > fw_size) {
+ pr_err("app %s size %zu is larger than buf size %u\n",
+ appname, fw_entry->size, fw_size);
+ ret = -EINVAL;
+ goto err;
+ }
memcpy(img_data_ptr, fw_entry->data, fw_entry->size);
img_data_ptr = img_data_ptr + fw_entry->size;
load_req->mdt_len = fw_entry->size; /*Get MDT LEN*/
@@ -3686,6 +3708,7 @@ static int __qseecom_get_fw_data(char *appname, u8 *img_data,
goto err;
}
release_firmware(fw_entry);
+ fw_entry = NULL;
for (i = 0; i < num_images; i++) {
snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d", appname, i);
ret = request_firmware(&fw_entry, fw_name, qseecom.pdev);
@@ -3693,10 +3716,17 @@ static int __qseecom_get_fw_data(char *appname, u8 *img_data,
pr_err("Failed to locate blob %s\n", fw_name);
goto err;
}
+ if ((fw_entry->size > U32_MAX - load_req->img_len) ||
+ (fw_entry->size + load_req->img_len > fw_size)) {
+ pr_err("Invalid file size for %s\n", fw_name);
+ ret = -EINVAL;
+ goto err;
+ }
memcpy(img_data_ptr, fw_entry->data, fw_entry->size);
img_data_ptr = img_data_ptr + fw_entry->size;
load_req->img_len += fw_entry->size;
release_firmware(fw_entry);
+ fw_entry = NULL;
}
return ret;
err:
@@ -3801,7 +3831,7 @@ static int __qseecom_load_fw(struct qseecom_dev_handle *data, char *appname)
if (ret)
return ret;
- ret = __qseecom_get_fw_data(appname, img_data, &load_req);
+ ret = __qseecom_get_fw_data(appname, img_data, fw_size, &load_req);
if (ret) {
ret = -EIO;
goto exit_free_img_data;
@@ -3922,7 +3952,7 @@ static int qseecom_load_commonlib_image(struct qseecom_dev_handle *data,
if (ret)
return -EIO;
- ret = __qseecom_get_fw_data(cmnlib_name, img_data, &load_req);
+ ret = __qseecom_get_fw_data(cmnlib_name, img_data, fw_size, &load_req);
if (ret) {
ret = -EIO;
goto exit_free_img_data;
@@ -4181,6 +4211,11 @@ int qseecom_start_app(struct qseecom_handle **handle,
/* Populate the structure for sending scm call to load image */
data->client.sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt,
data->client.ihandle);
+ if (IS_ERR_OR_NULL(data->client.sb_virt)) {
+ pr_err("ION memory mapping for client shared buf failed\n");
+ ret = -ENOMEM;
+ goto err;
+ }
data->client.user_virt_sb_base = (uintptr_t)data->client.sb_virt;
data->client.sb_phys = (phys_addr_t)pa;
(*handle)->dev = (void *)data;
diff --git a/drivers/mtd/nand/brcmnand/brcmnand.c b/drivers/mtd/nand/brcmnand/brcmnand.c
index 12c6190c6e33..4a07ba1195b5 100644
--- a/drivers/mtd/nand/brcmnand/brcmnand.c
+++ b/drivers/mtd/nand/brcmnand/brcmnand.c
@@ -309,6 +309,36 @@ static const u16 brcmnand_regs_v60[] = {
[BRCMNAND_FC_BASE] = 0x400,
};
+/* BRCMNAND v7.1 */
+static const u16 brcmnand_regs_v71[] = {
+ [BRCMNAND_CMD_START] = 0x04,
+ [BRCMNAND_CMD_EXT_ADDRESS] = 0x08,
+ [BRCMNAND_CMD_ADDRESS] = 0x0c,
+ [BRCMNAND_INTFC_STATUS] = 0x14,
+ [BRCMNAND_CS_SELECT] = 0x18,
+ [BRCMNAND_CS_XOR] = 0x1c,
+ [BRCMNAND_LL_OP] = 0x20,
+ [BRCMNAND_CS0_BASE] = 0x50,
+ [BRCMNAND_CS1_BASE] = 0,
+ [BRCMNAND_CORR_THRESHOLD] = 0xdc,
+ [BRCMNAND_CORR_THRESHOLD_EXT] = 0xe0,
+ [BRCMNAND_UNCORR_COUNT] = 0xfc,
+ [BRCMNAND_CORR_COUNT] = 0x100,
+ [BRCMNAND_CORR_EXT_ADDR] = 0x10c,
+ [BRCMNAND_CORR_ADDR] = 0x110,
+ [BRCMNAND_UNCORR_EXT_ADDR] = 0x114,
+ [BRCMNAND_UNCORR_ADDR] = 0x118,
+ [BRCMNAND_SEMAPHORE] = 0x150,
+ [BRCMNAND_ID] = 0x194,
+ [BRCMNAND_ID_EXT] = 0x198,
+ [BRCMNAND_LL_RDATA] = 0x19c,
+ [BRCMNAND_OOB_READ_BASE] = 0x200,
+ [BRCMNAND_OOB_READ_10_BASE] = 0,
+ [BRCMNAND_OOB_WRITE_BASE] = 0x280,
+ [BRCMNAND_OOB_WRITE_10_BASE] = 0,
+ [BRCMNAND_FC_BASE] = 0x400,
+};
+
enum brcmnand_cs_reg {
BRCMNAND_CS_CFG_EXT = 0,
BRCMNAND_CS_CFG,
@@ -404,7 +434,9 @@ static int brcmnand_revision_init(struct brcmnand_controller *ctrl)
}
/* Register offsets */
- if (ctrl->nand_version >= 0x0600)
+ if (ctrl->nand_version >= 0x0701)
+ ctrl->reg_offsets = brcmnand_regs_v71;
+ else if (ctrl->nand_version >= 0x0600)
ctrl->reg_offsets = brcmnand_regs_v60;
else if (ctrl->nand_version >= 0x0500)
ctrl->reg_offsets = brcmnand_regs_v50;
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 3ff583f165cd..ce7b2cab5762 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -3979,7 +3979,6 @@ static int nand_dt_init(struct mtd_info *mtd, struct nand_chip *chip,
* This is the first phase of the normal nand_scan() function. It reads the
* flash ID and sets up MTD fields accordingly.
*
- * The mtd->owner field must be set to the module of the caller.
*/
int nand_scan_ident(struct mtd_info *mtd, int maxchips,
struct nand_flash_dev *table)
@@ -4403,19 +4402,12 @@ EXPORT_SYMBOL(nand_scan_tail);
*
* This fills out all the uninitialized function pointers with the defaults.
* The flash ID is read and the mtd/chip structures are filled with the
- * appropriate values. The mtd->owner field must be set to the module of the
- * caller.
+ * appropriate values.
*/
int nand_scan(struct mtd_info *mtd, int maxchips)
{
int ret;
- /* Many callers got this wrong, so check for it for a while... */
- if (!mtd->owner && caller_is_module()) {
- pr_crit("%s called with NULL mtd->owner!\n", __func__);
- BUG();
- }
-
ret = nand_scan_ident(mtd, maxchips, NULL);
if (!ret)
ret = nand_scan_tail(mtd);
diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
index 32477c4eb421..37e4135ab213 100644
--- a/drivers/mtd/spi-nor/spi-nor.c
+++ b/drivers/mtd/spi-nor/spi-nor.c
@@ -1067,45 +1067,6 @@ static int spansion_quad_enable(struct spi_nor *nor)
return 0;
}
-static int micron_quad_enable(struct spi_nor *nor)
-{
- int ret;
- u8 val;
-
- ret = nor->read_reg(nor, SPINOR_OP_RD_EVCR, &val, 1);
- if (ret < 0) {
- dev_err(nor->dev, "error %d reading EVCR\n", ret);
- return ret;
- }
-
- write_enable(nor);
-
- /* set EVCR, enable quad I/O */
- nor->cmd_buf[0] = val & ~EVCR_QUAD_EN_MICRON;
- ret = nor->write_reg(nor, SPINOR_OP_WD_EVCR, nor->cmd_buf, 1);
- if (ret < 0) {
- dev_err(nor->dev, "error while writing EVCR register\n");
- return ret;
- }
-
- ret = spi_nor_wait_till_ready(nor);
- if (ret)
- return ret;
-
- /* read EVCR and check it */
- ret = nor->read_reg(nor, SPINOR_OP_RD_EVCR, &val, 1);
- if (ret < 0) {
- dev_err(nor->dev, "error %d reading EVCR\n", ret);
- return ret;
- }
- if (val & EVCR_QUAD_EN_MICRON) {
- dev_err(nor->dev, "Micron EVCR Quad bit not clear\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
static int set_quad_mode(struct spi_nor *nor, const struct flash_info *info)
{
int status;
@@ -1119,12 +1080,7 @@ static int set_quad_mode(struct spi_nor *nor, const struct flash_info *info)
}
return status;
case SNOR_MFR_MICRON:
- status = micron_quad_enable(nor);
- if (status) {
- dev_err(nor->dev, "Micron quad-read not enabled\n");
- return -EINVAL;
- }
- return status;
+ return 0;
default:
status = spansion_quad_enable(nor);
if (status) {
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index 8f76f4558a88..2ff465848b65 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -1412,7 +1412,7 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = -EIO;
- netdev->hw_features = NETIF_F_SG | NETIF_F_HW_VLAN_CTAG_RX;
+ netdev->hw_features = NETIF_F_HW_VLAN_CTAG_RX;
netdev->features |= (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
/* Init PHY as early as possible due to power saving issue */
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index 206b6a71a545..d1c217eaf417 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -550,6 +550,7 @@ static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
nicvf_config_vlan_stripping(nic, nic->netdev->features);
/* Enable Receive queue */
+ memset(&rq_cfg, 0, sizeof(struct rq_cfg));
rq_cfg.ena = 1;
rq_cfg.tcp_ena = 0;
nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, *(u64 *)&rq_cfg);
@@ -582,6 +583,7 @@ void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs,
qidx, (u64)(cq->dmem.phys_base));
/* Enable Completion queue */
+ memset(&cq_cfg, 0, sizeof(struct cq_cfg));
cq_cfg.ena = 1;
cq_cfg.reset = 0;
cq_cfg.caching = 0;
@@ -630,6 +632,7 @@ static void nicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs,
qidx, (u64)(sq->dmem.phys_base));
/* Enable send queue & set queue size */
+ memset(&sq_cfg, 0, sizeof(struct sq_cfg));
sq_cfg.ena = 1;
sq_cfg.reset = 0;
sq_cfg.ldwb = 0;
@@ -666,6 +669,7 @@ static void nicvf_rbdr_config(struct nicvf *nic, struct queue_set *qs,
/* Enable RBDR & set queue size */
/* Buffer size should be in multiples of 128 bytes */
+ memset(&rbdr_cfg, 0, sizeof(struct rbdr_cfg));
rbdr_cfg.ena = 1;
rbdr_cfg.reset = 0;
rbdr_cfg.ldwb = 0;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index b2a32209ffbf..f6147ffc7fbc 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1557,9 +1557,15 @@ fec_enet_rx(struct net_device *ndev, int budget)
struct fec_enet_private *fep = netdev_priv(ndev);
for_each_set_bit(queue_id, &fep->work_rx, FEC_ENET_MAX_RX_QS) {
- clear_bit(queue_id, &fep->work_rx);
- pkt_received += fec_enet_rx_queue(ndev,
+ int ret;
+
+ ret = fec_enet_rx_queue(ndev,
budget - pkt_received, queue_id);
+
+ if (ret < budget - pkt_received)
+ clear_bit(queue_id, &fep->work_rx);
+
+ pkt_received += ret;
}
return pkt_received;
}
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index 973dade2d07f..1257b18e6b90 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -270,11 +270,17 @@ jme_reset_mac_processor(struct jme_adapter *jme)
}
static inline void
-jme_clear_pm(struct jme_adapter *jme)
+jme_clear_pm_enable_wol(struct jme_adapter *jme)
{
jwrite32(jme, JME_PMCS, PMCS_STMASK | jme->reg_pmcs);
}
+static inline void
+jme_clear_pm_disable_wol(struct jme_adapter *jme)
+{
+ jwrite32(jme, JME_PMCS, PMCS_STMASK);
+}
+
static int
jme_reload_eeprom(struct jme_adapter *jme)
{
@@ -1853,7 +1859,7 @@ jme_open(struct net_device *netdev)
struct jme_adapter *jme = netdev_priv(netdev);
int rc;
- jme_clear_pm(jme);
+ jme_clear_pm_disable_wol(jme);
JME_NAPI_ENABLE(jme);
tasklet_init(&jme->linkch_task, jme_link_change_tasklet,
@@ -1925,11 +1931,11 @@ jme_wait_link(struct jme_adapter *jme)
static void
jme_powersave_phy(struct jme_adapter *jme)
{
- if (jme->reg_pmcs) {
+ if (jme->reg_pmcs && device_may_wakeup(&jme->pdev->dev)) {
jme_set_100m_half(jme);
if (jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN))
jme_wait_link(jme);
- jme_clear_pm(jme);
+ jme_clear_pm_enable_wol(jme);
} else {
jme_phy_off(jme);
}
@@ -2646,9 +2652,6 @@ jme_set_wol(struct net_device *netdev,
if (wol->wolopts & WAKE_MAGIC)
jme->reg_pmcs |= PMCS_MFEN;
- jwrite32(jme, JME_PMCS, jme->reg_pmcs);
- device_set_wakeup_enable(&jme->pdev->dev, !!(jme->reg_pmcs));
-
return 0;
}
@@ -3172,8 +3175,8 @@ jme_init_one(struct pci_dev *pdev,
jme->mii_if.mdio_read = jme_mdio_read;
jme->mii_if.mdio_write = jme_mdio_write;
- jme_clear_pm(jme);
- device_set_wakeup_enable(&pdev->dev, true);
+ jme_clear_pm_disable_wol(jme);
+ device_init_wakeup(&pdev->dev, true);
jme_set_phyfifo_5level(jme);
jme->pcirev = pdev->revision;
@@ -3304,7 +3307,7 @@ jme_resume(struct device *dev)
if (!netif_running(netdev))
return 0;
- jme_clear_pm(jme);
+ jme_clear_pm_disable_wol(jme);
jme_phy_on(jme);
if (test_bit(JME_FLAG_SSET, &jme->flags))
jme_set_settings(netdev, &jme->old_ecmd);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index e7a5000aa12c..bbff8ec6713e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -704,7 +704,7 @@ static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb,
if (ipv6h->nexthdr == IPPROTO_FRAGMENT || ipv6h->nexthdr == IPPROTO_HOPOPTS)
return -1;
- hw_checksum = csum_add(hw_checksum, (__force __wsum)(ipv6h->nexthdr << 8));
+ hw_checksum = csum_add(hw_checksum, (__force __wsum)htons(ipv6h->nexthdr));
csum_pseudo_hdr = csum_partial(&ipv6h->saddr,
sizeof(ipv6h->saddr) + sizeof(ipv6h->daddr), 0);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 4421bf5463f6..e4019a803a9c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -400,7 +400,6 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev,
u32 packets = 0;
u32 bytes = 0;
int factor = priv->cqe_factor;
- u64 timestamp = 0;
int done = 0;
int budget = priv->tx_work_limit;
u32 last_nr_txbb;
@@ -440,9 +439,12 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev,
new_index = be16_to_cpu(cqe->wqe_index) & size_mask;
do {
+ u64 timestamp = 0;
+
txbbs_skipped += last_nr_txbb;
ring_index = (ring_index + last_nr_txbb) & size_mask;
- if (ring->tx_info[ring_index].ts_requested)
+
+ if (unlikely(ring->tx_info[ring_index].ts_requested))
timestamp = mlx4_en_get_cqe_ts(cqe);
/* free next descriptor */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 1203d892e842..cbd17e25beeb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -1372,7 +1372,7 @@ static int mlx5e_set_dev_port_mtu(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
- int hw_mtu;
+ u16 hw_mtu;
int err;
err = mlx5_set_port_mtu(mdev, MLX5E_SW2HW_MTU(netdev->mtu), 1);
@@ -1891,22 +1891,27 @@ static int mlx5e_set_features(struct net_device *netdev,
return err;
}
+#define MXL5_HW_MIN_MTU 64
+#define MXL5E_MIN_MTU (MXL5_HW_MIN_MTU + ETH_FCS_LEN)
+
static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
bool was_opened;
- int max_mtu;
+ u16 max_mtu;
+ u16 min_mtu;
int err = 0;
mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
max_mtu = MLX5E_HW2SW_MTU(max_mtu);
+ min_mtu = MLX5E_HW2SW_MTU(MXL5E_MIN_MTU);
- if (new_mtu > max_mtu) {
+ if (new_mtu > max_mtu || new_mtu < min_mtu) {
netdev_err(netdev,
- "%s: Bad MTU (%d) > (%d) Max\n",
- __func__, new_mtu, max_mtu);
+ "%s: Bad MTU (%d), valid range is: [%d..%d]\n",
+ __func__, new_mtu, min_mtu, max_mtu);
return -EINVAL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index a87e773e93f3..53a793bc2e3d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -246,8 +246,8 @@ int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
}
EXPORT_SYMBOL_GPL(mlx5_query_port_admin_status);
-static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, int *admin_mtu,
- int *max_mtu, int *oper_mtu, u8 port)
+static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, u16 *admin_mtu,
+ u16 *max_mtu, u16 *oper_mtu, u8 port)
{
u32 in[MLX5_ST_SZ_DW(pmtu_reg)];
u32 out[MLX5_ST_SZ_DW(pmtu_reg)];
@@ -267,7 +267,7 @@ static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, int *admin_mtu,
*admin_mtu = MLX5_GET(pmtu_reg, out, admin_mtu);
}
-int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port)
+int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port)
{
u32 in[MLX5_ST_SZ_DW(pmtu_reg)];
u32 out[MLX5_ST_SZ_DW(pmtu_reg)];
@@ -282,14 +282,14 @@ int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port)
}
EXPORT_SYMBOL_GPL(mlx5_set_port_mtu);
-void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu,
+void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, u16 *max_mtu,
u8 port)
{
mlx5_query_port_mtu(dev, NULL, max_mtu, NULL, port);
}
EXPORT_SYMBOL_GPL(mlx5_query_port_max_mtu);
-void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu,
+void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, u16 *oper_mtu,
u8 port)
{
mlx5_query_port_mtu(dev, NULL, NULL, oper_mtu, port);
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
index bdd83d95ec0a..96a5028621c8 100644
--- a/drivers/net/usb/cdc_mbim.c
+++ b/drivers/net/usb/cdc_mbim.c
@@ -617,8 +617,13 @@ static const struct usb_device_id mbim_devs[] = {
{ USB_VENDOR_AND_INTERFACE_INFO(0x0bdb, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&cdc_mbim_info,
},
- /* Huawei E3372 fails unless NDP comes after the IP packets */
- { USB_DEVICE_AND_INTERFACE_INFO(0x12d1, 0x157d, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
+
+ /* Some Huawei devices, ME906s-158 (12d1:15c1) and E3372
+ * (12d1:157d), are known to fail unless the NDP is placed
+ * after the IP packets. Applying the quirk to all Huawei
+ * devices is broader than necessary, but harmless.
+ */
+ { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&cdc_mbim_info_ndp_to_end,
},
/* default entry */
diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
index 8f8793004b9f..1b271b99c49e 100644
--- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
@@ -274,6 +274,9 @@ void ar5008_hw_cmn_spur_mitigate(struct ath_hw *ah,
};
static const int inc[4] = { 0, 100, 0, 0 };
+ memset(&mask_m, 0, sizeof(int8_t) * 123);
+ memset(&mask_p, 0, sizeof(int8_t) * 123);
+
cur_bin = -6000;
upper = bin + 100;
lower = bin - 100;
@@ -424,14 +427,9 @@ static void ar5008_hw_spur_mitigate(struct ath_hw *ah,
int tmp, new;
int i;
- int8_t mask_m[123];
- int8_t mask_p[123];
int cur_bb_spur;
bool is2GHz = IS_CHAN_2GHZ(chan);
- memset(&mask_m, 0, sizeof(int8_t) * 123);
- memset(&mask_p, 0, sizeof(int8_t) * 123);
-
for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
cur_bb_spur = ah->eep_ops->get_spur_channel(ah, i, is2GHz);
if (AR_NO_SPUR == cur_bb_spur)
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.c b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
index db6624527d99..53d7445a5d12 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
@@ -178,14 +178,9 @@ static void ar9002_hw_spur_mitigate(struct ath_hw *ah,
int i;
struct chan_centers centers;
- int8_t mask_m[123];
- int8_t mask_p[123];
int cur_bb_spur;
bool is2GHz = IS_CHAN_2GHZ(chan);
- memset(&mask_m, 0, sizeof(int8_t) * 123);
- memset(&mask_p, 0, sizeof(int8_t) * 123);
-
ath9k_hw_get_channel_centers(ah, chan, &centers);
freq = centers.synth_center;
diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c
index 64046e0bd0a2..c3853a63b083 100644
--- a/drivers/net/wireless/ath/wil6210/interrupt.c
+++ b/drivers/net/wireless/ath/wil6210/interrupt.c
@@ -141,7 +141,7 @@ static void wil6210_unmask_irq_misc(struct wil6210_priv *wil, bool unmask_halp)
unmask_halp ? WIL6210_IMC_MISC : WIL6210_IMC_MISC_NO_HALP);
}
-static void wil6210_unmask_halp(struct wil6210_priv *wil)
+void wil6210_unmask_halp(struct wil6210_priv *wil)
{
wil_dbg_irq(wil, "%s()\n", __func__);
@@ -149,7 +149,7 @@ static void wil6210_unmask_halp(struct wil6210_priv *wil)
BIT_DMA_EP_MISC_ICR_HALP);
}
-static void wil6210_unmask_irq_pseudo(struct wil6210_priv *wil)
+void wil6210_unmask_irq_pseudo(struct wil6210_priv *wil)
{
wil_dbg_irq(wil, "%s()\n", __func__);
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index 5285ebc8b9af..94e5b67abd59 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -875,19 +875,29 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
flush_workqueue(wil->wq_service);
flush_workqueue(wil->wmi_wq);
+ wil6210_unmask_irq_pseudo(wil);
+ wil6210_unmask_halp(wil);
+ wil_halp_vote(wil);
+
wil_bl_crash_info(wil, false);
rc = wil_target_reset(wil);
+ /* wil_target_reset clears the HALP IRQ, need to set it again.
+ * Call wil_halp_unvote to clear the HALP reference counter
+ * and unmask the HALP interrupt before setting it again
+ */
+ wil_halp_unvote(wil);
+ wil_halp_vote(wil);
wil_rx_fini(wil);
if (rc) {
wil_bl_crash_info(wil, true);
- return rc;
+ goto out;
}
rc = wil_get_bl_info(wil);
if (rc == -EAGAIN && !load_fw) /* ignore RF error if not going up */
rc = 0;
if (rc)
- return rc;
+ goto out;
wil_set_oob_mode(wil, oob_mode);
if (load_fw) {
@@ -899,14 +909,19 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
/* Loading f/w from the file */
rc = wil_request_firmware(wil, WIL_FW_NAME, true);
if (rc)
- return rc;
+ goto out;
rc = wil_request_firmware(wil, WIL_FW2_NAME, true);
if (rc)
- return rc;
+ goto out;
/* Mark FW as loaded from host */
wil_s(wil, RGF_USER_USAGE_6, 1);
+ /* Clear the HALP while in BL, before clearing all the IRQs
+ * and running the FW.
+ */
+ wil_halp_unvote(wil);
+
/* clear any interrupts which on-card-firmware
* may have set
*/
@@ -917,6 +932,9 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
wil_w(wil, RGF_CAF_ICR + offsetof(struct RGF_ICR, IMV), ~0);
wil_release_cpu(wil);
+ } else {
+ /* Allow XTAL off when going down */
+ wil_halp_unvote(wil);
}
/* init after reset */
@@ -955,6 +973,10 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
}
return rc;
+
+out:
+ wil_halp_unvote(wil);
+ return rc;
}
void wil_fw_error_recovery(struct wil6210_priv *wil)
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index a19dba5b9e5f..8961b4ce4898 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -837,6 +837,7 @@ void wil_configure_interrupt_moderation(struct wil6210_priv *wil);
void wil_disable_irq(struct wil6210_priv *wil);
void wil_enable_irq(struct wil6210_priv *wil);
void wil6210_mask_halp(struct wil6210_priv *wil);
+void wil6210_unmask_halp(struct wil6210_priv *wil);
/* P2P */
bool wil_p2p_is_social_scan(struct cfg80211_scan_request *request);
@@ -902,6 +903,8 @@ void wil6210_unmask_irq_tx(struct wil6210_priv *wil);
void wil_rx_handle(struct wil6210_priv *wil, int *quota);
void wil6210_unmask_irq_rx(struct wil6210_priv *wil);
+void wil6210_unmask_irq_pseudo(struct wil6210_priv *wil);
+
int wil_iftype_nl2wmi(enum nl80211_iftype type);
int wil_ioctl(struct wil6210_priv *wil, void __user *data, int cmd);
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index e88afac51c5d..f96ab2f4b90e 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -1557,6 +1557,8 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
/* the fw is stopped, the aux sta is dead: clean up driver state */
iwl_mvm_del_aux_sta(mvm);
+ iwl_free_fw_paging(mvm);
+
/*
* Clear IN_HW_RESTART flag when stopping the hw (as restart_complete()
* won't be called in this case).
diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c
index c3adf2bcdc85..13c97f665ba8 100644
--- a/drivers/net/wireless/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/iwlwifi/mvm/ops.c
@@ -645,8 +645,6 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++)
kfree(mvm->nvm_sections[i].data);
- iwl_free_fw_paging(mvm);
-
iwl_mvm_tof_clean(mvm);
ieee80211_free_hw(mvm->hw);
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index 8c7204738aa3..00e0332e2544 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -731,8 +731,8 @@ static int iwl_pcie_rsa_race_bug_wa(struct iwl_trans *trans)
*/
val = iwl_read_prph(trans, PREG_AUX_BUS_WPROT_0);
if (val & (BIT(1) | BIT(17))) {
- IWL_INFO(trans,
- "can't access the RSA semaphore it is write protected\n");
+ IWL_DEBUG_INFO(trans,
+ "can't access the RSA semaphore it is write protected\n");
return 0;
}
diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c
index a6c8a4f7bfe9..d6c4f0f60839 100644
--- a/drivers/net/wireless/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/mwifiex/sta_ioctl.c
@@ -313,6 +313,7 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
mwifiex_dbg(adapter, ERROR,
"Attempt to reconnect on csa closed chan(%d)\n",
bss_desc->channel);
+ ret = -1;
goto done;
}
diff --git a/drivers/nvmem/mxs-ocotp.c b/drivers/nvmem/mxs-ocotp.c
index 8ba19bba3156..2bb3c5799ac4 100644
--- a/drivers/nvmem/mxs-ocotp.c
+++ b/drivers/nvmem/mxs-ocotp.c
@@ -94,7 +94,7 @@ static int mxs_ocotp_read(void *context, const void *reg, size_t reg_size,
if (ret)
goto close_banks;
- while (val_size) {
+ while (val_size >= reg_size) {
if ((offset < OCOTP_DATA_OFFSET) || (offset % 16)) {
/* fill up non-data register */
*buf = 0;
@@ -103,7 +103,7 @@ static int mxs_ocotp_read(void *context, const void *reg, size_t reg_size,
}
buf++;
- val_size--;
+ val_size -= reg_size;
offset += reg_size;
}
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 0acebc87ec20..0438512f4d69 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -760,6 +760,16 @@ const void * __init of_flat_dt_match_machine(const void *default_match,
}
#ifdef CONFIG_BLK_DEV_INITRD
+#ifndef __early_init_dt_declare_initrd
+static void __early_init_dt_declare_initrd(unsigned long start,
+ unsigned long end)
+{
+ initrd_start = (unsigned long)__va(start);
+ initrd_end = (unsigned long)__va(end);
+ initrd_below_start_ok = 1;
+}
+#endif
+
/**
* early_init_dt_check_for_initrd - Decode initrd location from flat tree
* @node: reference to node containing initrd location ('chosen')
@@ -782,9 +792,7 @@ static void __init early_init_dt_check_for_initrd(unsigned long node)
return;
end = of_read_number(prop, len/4);
- initrd_start = (unsigned long)__va(start);
- initrd_end = (unsigned long)__va(end);
- initrd_below_start_ok = 1;
+ __early_init_dt_declare_initrd(start, end);
pr_debug("initrd_start=0x%llx initrd_end=0x%llx\n",
(unsigned long long)start, (unsigned long long)end);
@@ -1006,13 +1014,16 @@ int __init early_init_dt_scan_chosen(unsigned long node, const char *uname,
}
#ifdef CONFIG_HAVE_MEMBLOCK
+#ifndef MIN_MEMBLOCK_ADDR
+#define MIN_MEMBLOCK_ADDR __pa(PAGE_OFFSET)
+#endif
#ifndef MAX_MEMBLOCK_ADDR
#define MAX_MEMBLOCK_ADDR ((phys_addr_t)~0)
#endif
void __init __weak early_init_dt_add_memory_arch(u64 base, u64 size)
{
- const u64 phys_offset = __pa(PAGE_OFFSET);
+ const u64 phys_offset = MIN_MEMBLOCK_ADDR;
if (!PAGE_ALIGNED(base)) {
if (size < PAGE_SIZE - (base & ~PAGE_MASK)) {
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
index 5c717275a7fa..3d8019eb3d84 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
@@ -939,7 +939,8 @@ static int mtk_gpio_set_debounce(struct gpio_chip *chip, unsigned offset,
struct mtk_pinctrl *pctl = dev_get_drvdata(chip->dev);
int eint_num, virq, eint_offset;
unsigned int set_offset, bit, clr_bit, clr_offset, rst, i, unmask, dbnc;
- static const unsigned int dbnc_arr[] = {0 , 1, 16, 32, 64, 128, 256};
+ static const unsigned int debounce_time[] = {500, 1000, 16000, 32000, 64000,
+ 128000, 256000};
const struct mtk_desc_pin *pin;
struct irq_data *d;
@@ -957,9 +958,9 @@ static int mtk_gpio_set_debounce(struct gpio_chip *chip, unsigned offset,
if (!mtk_eint_can_en_debounce(pctl, eint_num))
return -ENOSYS;
- dbnc = ARRAY_SIZE(dbnc_arr);
- for (i = 0; i < ARRAY_SIZE(dbnc_arr); i++) {
- if (debounce <= dbnc_arr[i]) {
+ dbnc = ARRAY_SIZE(debounce_time);
+ for (i = 0; i < ARRAY_SIZE(debounce_time); i++) {
+ if (debounce <= debounce_time[i]) {
dbnc = i;
break;
}
diff --git a/drivers/pinctrl/pinctrl-at91-pio4.c b/drivers/pinctrl/pinctrl-at91-pio4.c
index 33edd07d9149..b3235fd2950c 100644
--- a/drivers/pinctrl/pinctrl-at91-pio4.c
+++ b/drivers/pinctrl/pinctrl-at91-pio4.c
@@ -717,9 +717,11 @@ static int atmel_conf_pin_config_group_set(struct pinctrl_dev *pctldev,
break;
case PIN_CONFIG_BIAS_PULL_UP:
conf |= ATMEL_PIO_PUEN_MASK;
+ conf &= (~ATMEL_PIO_PDEN_MASK);
break;
case PIN_CONFIG_BIAS_PULL_DOWN:
conf |= ATMEL_PIO_PDEN_MASK;
+ conf &= (~ATMEL_PIO_PUEN_MASK);
break;
case PIN_CONFIG_DRIVE_OPEN_DRAIN:
if (arg == 0)
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
index ef04b962c3d5..23b6b8c29a99 100644
--- a/drivers/pinctrl/pinctrl-single.c
+++ b/drivers/pinctrl/pinctrl-single.c
@@ -1273,9 +1273,9 @@ static int pcs_parse_bits_in_pinctrl_entry(struct pcs_device *pcs,
/* Parse pins in each row from LSB */
while (mask) {
- bit_pos = ffs(mask);
+ bit_pos = __ffs(mask);
pin_num_from_lsb = bit_pos / pcs->bits_per_pin;
- mask_pos = ((pcs->fmask) << (bit_pos - 1));
+ mask_pos = ((pcs->fmask) << bit_pos);
val_pos = val & mask_pos;
submask = mask & mask_pos;
@@ -1847,7 +1847,7 @@ static int pcs_probe(struct platform_device *pdev)
ret = of_property_read_u32(np, "pinctrl-single,function-mask",
&pcs->fmask);
if (!ret) {
- pcs->fshift = ffs(pcs->fmask) - 1;
+ pcs->fshift = __ffs(pcs->fmask);
pcs->fmax = pcs->fmask >> pcs->fshift;
} else {
/* If mask property doesn't exist, function mux is invalid. */
diff --git a/drivers/platform/msm/gsi/gsi.c b/drivers/platform/msm/gsi/gsi.c
index bd2132d77360..df3901093006 100644
--- a/drivers/platform/msm/gsi/gsi.c
+++ b/drivers/platform/msm/gsi/gsi.c
@@ -108,6 +108,11 @@ static void gsi_handle_ch_ctrl(int ee)
GSIDBG("ch %x\n", ch);
for (i = 0; i < 32; i++) {
if ((1 << i) & ch) {
+ if (i >= gsi_ctx->max_ch || i >= GSI_CHAN_MAX) {
+ GSIERR("invalid channel %d\n", i);
+ break;
+ }
+
ctx = &gsi_ctx->chan[i];
val = gsi_readl(gsi_ctx->base +
GSI_EE_n_GSI_CH_k_CNTXT_0_OFFS(i, ee));
@@ -116,6 +121,7 @@ static void gsi_handle_ch_ctrl(int ee)
GSI_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_SHFT;
GSIDBG("ch %u state updated to %u\n", i, ctx->state);
complete(&ctx->compl);
+ gsi_ctx->ch_dbg[i].cmd_completed++;
}
}
@@ -135,6 +141,11 @@ static void gsi_handle_ev_ctrl(int ee)
GSIDBG("ev %x\n", ch);
for (i = 0; i < 32; i++) {
if ((1 << i) & ch) {
+ if (i >= gsi_ctx->max_ev || i >= GSI_EVT_RING_MAX) {
+ GSIERR("invalid event %d\n", i);
+ break;
+ }
+
ctx = &gsi_ctx->evtr[i];
val = gsi_readl(gsi_ctx->base +
GSI_EE_n_EV_CH_k_CNTXT_0_OFFS(i, ee));
@@ -1605,6 +1616,7 @@ int gsi_alloc_channel(struct gsi_chan_props *props, unsigned long dev_hdl,
ctx->props = *props;
mutex_lock(&gsi_ctx->mlock);
+ gsi_ctx->ch_dbg[props->ch_id].ch_allocate++;
val = (((props->ch_id << GSI_EE_n_GSI_CH_CMD_CHID_SHFT) &
GSI_EE_n_GSI_CH_CMD_CHID_BMSK) |
((op << GSI_EE_n_GSI_CH_CMD_OPCODE_SHFT) &
@@ -1775,6 +1787,7 @@ int gsi_start_channel(unsigned long chan_hdl)
mutex_lock(&gsi_ctx->mlock);
init_completion(&ctx->compl);
+ gsi_ctx->ch_dbg[chan_hdl].ch_start++;
val = (((chan_hdl << GSI_EE_n_GSI_CH_CMD_CHID_SHFT) &
GSI_EE_n_GSI_CH_CMD_CHID_BMSK) |
((op << GSI_EE_n_GSI_CH_CMD_OPCODE_SHFT) &
@@ -1832,6 +1845,7 @@ int gsi_stop_channel(unsigned long chan_hdl)
mutex_lock(&gsi_ctx->mlock);
init_completion(&ctx->compl);
+ gsi_ctx->ch_dbg[chan_hdl].ch_stop++;
val = (((chan_hdl << GSI_EE_n_GSI_CH_CMD_CHID_SHFT) &
GSI_EE_n_GSI_CH_CMD_CHID_BMSK) |
((op << GSI_EE_n_GSI_CH_CMD_OPCODE_SHFT) &
@@ -1900,6 +1914,7 @@ int gsi_stop_db_channel(unsigned long chan_hdl)
mutex_lock(&gsi_ctx->mlock);
init_completion(&ctx->compl);
+ gsi_ctx->ch_dbg[chan_hdl].ch_db_stop++;
val = (((chan_hdl << GSI_EE_n_GSI_CH_CMD_CHID_SHFT) &
GSI_EE_n_GSI_CH_CMD_CHID_BMSK) |
((op << GSI_EE_n_GSI_CH_CMD_OPCODE_SHFT) &
@@ -1964,6 +1979,7 @@ int gsi_reset_channel(unsigned long chan_hdl)
reset:
init_completion(&ctx->compl);
+ gsi_ctx->ch_dbg[chan_hdl].ch_reset++;
val = (((chan_hdl << GSI_EE_n_GSI_CH_CMD_CHID_SHFT) &
GSI_EE_n_GSI_CH_CMD_CHID_BMSK) |
((op << GSI_EE_n_GSI_CH_CMD_OPCODE_SHFT) &
@@ -2030,6 +2046,7 @@ int gsi_dealloc_channel(unsigned long chan_hdl)
mutex_lock(&gsi_ctx->mlock);
init_completion(&ctx->compl);
+ gsi_ctx->ch_dbg[chan_hdl].ch_de_alloc++;
val = (((chan_hdl << GSI_EE_n_GSI_CH_CMD_CHID_SHFT) &
GSI_EE_n_GSI_CH_CMD_CHID_BMSK) |
((op << GSI_EE_n_GSI_CH_CMD_OPCODE_SHFT) &
diff --git a/drivers/platform/msm/gsi/gsi.h b/drivers/platform/msm/gsi/gsi.h
index 0b94ed2d3a92..750ae2b329d3 100644
--- a/drivers/platform/msm/gsi/gsi.h
+++ b/drivers/platform/msm/gsi/gsi.h
@@ -125,12 +125,23 @@ struct gsi_ee_scratch {
uint32_t word1;
};
+struct ch_debug_stats {
+ unsigned long ch_allocate;
+ unsigned long ch_start;
+ unsigned long ch_stop;
+ unsigned long ch_reset;
+ unsigned long ch_de_alloc;
+ unsigned long ch_db_stop;
+ unsigned long cmd_completed;
+};
+
struct gsi_ctx {
void __iomem *base;
struct device *dev;
struct gsi_per_props per;
bool per_registered;
struct gsi_chan_ctx chan[GSI_CHAN_MAX];
+ struct ch_debug_stats ch_dbg[GSI_CHAN_MAX];
struct gsi_evt_ctx evtr[GSI_EVT_RING_MAX];
struct mutex mlock;
spinlock_t slock;
diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c b/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c
index d18308344431..293371b88ab9 100644
--- a/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c
+++ b/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c
@@ -127,6 +127,7 @@ enum ipa3_usb_state {
IPA_USB_SUSPEND_REQUESTED,
IPA_USB_SUSPEND_IN_PROGRESS,
IPA_USB_SUSPENDED,
+ IPA_USB_SUSPENDED_NO_RWAKEUP,
IPA_USB_RESUME_IN_PROGRESS
};
@@ -152,6 +153,12 @@ struct finish_suspend_work_context {
u32 ul_clnt_hdl;
};
+struct ipa3_usb_teth_prot_conn_params {
+ u32 usb_to_ipa_clnt_hdl;
+ u32 ipa_to_usb_clnt_hdl;
+ struct ipa_usb_teth_prot_params params;
+};
+
/**
* Transport type - could be either data tethering or DPL
* Each transport has it's own RM resources and statuses
@@ -163,6 +170,7 @@ struct ipa3_usb_transport_type_ctx {
enum ipa3_usb_state state;
struct finish_suspend_work_context finish_suspend_work;
struct ipa_usb_xdci_chan_params ch_params;
+ struct ipa3_usb_teth_prot_conn_params teth_conn_params;
};
struct ipa3_usb_smmu_reg_map {
@@ -189,14 +197,15 @@ struct ipa3_usb_context {
};
enum ipa3_usb_op {
- IPA_USB_INIT_TETH_PROT,
- IPA_USB_REQUEST_CHANNEL,
- IPA_USB_CONNECT,
- IPA_USB_DISCONNECT,
- IPA_USB_RELEASE_CHANNEL,
- IPA_USB_DEINIT_TETH_PROT,
- IPA_USB_SUSPEND,
- IPA_USB_RESUME
+ IPA_USB_OP_INIT_TETH_PROT,
+ IPA_USB_OP_REQUEST_CHANNEL,
+ IPA_USB_OP_CONNECT,
+ IPA_USB_OP_DISCONNECT,
+ IPA_USB_OP_RELEASE_CHANNEL,
+ IPA_USB_OP_DEINIT_TETH_PROT,
+ IPA_USB_OP_SUSPEND,
+ IPA_USB_OP_SUSPEND_NO_RWAKEUP,
+ IPA_USB_OP_RESUME
};
struct ipa3_usb_status_dbg_info {
@@ -228,22 +237,24 @@ struct ipa3_usb_context *ipa3_usb_ctx;
static char *ipa3_usb_op_to_string(enum ipa3_usb_op op)
{
switch (op) {
- case IPA_USB_INIT_TETH_PROT:
- return "IPA_USB_INIT_TETH_PROT";
- case IPA_USB_REQUEST_CHANNEL:
- return "IPA_USB_REQUEST_CHANNEL";
- case IPA_USB_CONNECT:
- return "IPA_USB_CONNECT";
- case IPA_USB_DISCONNECT:
- return "IPA_USB_DISCONNECT";
- case IPA_USB_RELEASE_CHANNEL:
- return "IPA_USB_RELEASE_CHANNEL";
- case IPA_USB_DEINIT_TETH_PROT:
- return "IPA_USB_DEINIT_TETH_PROT";
- case IPA_USB_SUSPEND:
- return "IPA_USB_SUSPEND";
- case IPA_USB_RESUME:
- return "IPA_USB_RESUME";
+ case IPA_USB_OP_INIT_TETH_PROT:
+ return "IPA_USB_OP_INIT_TETH_PROT";
+ case IPA_USB_OP_REQUEST_CHANNEL:
+ return "IPA_USB_OP_REQUEST_CHANNEL";
+ case IPA_USB_OP_CONNECT:
+ return "IPA_USB_OP_CONNECT";
+ case IPA_USB_OP_DISCONNECT:
+ return "IPA_USB_OP_DISCONNECT";
+ case IPA_USB_OP_RELEASE_CHANNEL:
+ return "IPA_USB_OP_RELEASE_CHANNEL";
+ case IPA_USB_OP_DEINIT_TETH_PROT:
+ return "IPA_USB_OP_DEINIT_TETH_PROT";
+ case IPA_USB_OP_SUSPEND:
+ return "IPA_USB_OP_SUSPEND";
+ case IPA_USB_OP_SUSPEND_NO_RWAKEUP:
+ return "IPA_USB_OP_SUSPEND_NO_RWAKEUP";
+ case IPA_USB_OP_RESUME:
+ return "IPA_USB_OP_RESUME";
}
return "UNSUPPORTED";
@@ -266,6 +277,8 @@ static char *ipa3_usb_state_to_string(enum ipa3_usb_state state)
return "IPA_USB_SUSPEND_IN_PROGRESS";
case IPA_USB_SUSPENDED:
return "IPA_USB_SUSPENDED";
+ case IPA_USB_SUSPENDED_NO_RWAKEUP:
+ return "IPA_USB_SUSPENDED_NO_RWAKEUP";
case IPA_USB_RESUME_IN_PROGRESS:
return "IPA_USB_RESUME_IN_PROGRESS";
}
@@ -312,6 +325,7 @@ static bool ipa3_usb_set_state(enum ipa3_usb_state new_state, bool err_permit,
if (state == IPA_USB_INITIALIZED ||
state == IPA_USB_STOPPED ||
state == IPA_USB_RESUME_IN_PROGRESS ||
+ state == IPA_USB_SUSPENDED_NO_RWAKEUP ||
/*
* In case of failure during suspend request
* handling, state is reverted to connected.
@@ -327,7 +341,8 @@ static bool ipa3_usb_set_state(enum ipa3_usb_state new_state, bool err_permit,
case IPA_USB_STOPPED:
if (state == IPA_USB_SUSPEND_IN_PROGRESS ||
state == IPA_USB_CONNECTED ||
- state == IPA_USB_SUSPENDED)
+ state == IPA_USB_SUSPENDED ||
+ state == IPA_USB_SUSPENDED_NO_RWAKEUP)
state_legal = true;
break;
case IPA_USB_SUSPEND_REQUESTED:
@@ -354,6 +369,10 @@ static bool ipa3_usb_set_state(enum ipa3_usb_state new_state, bool err_permit,
(err_permit && state == IPA_USB_RESUME_IN_PROGRESS))
state_legal = true;
break;
+ case IPA_USB_SUSPENDED_NO_RWAKEUP:
+ if (state == IPA_USB_CONNECTED)
+ state_legal = true;
+ break;
case IPA_USB_RESUME_IN_PROGRESS:
if (state == IPA_USB_SUSPEND_IN_PROGRESS ||
state == IPA_USB_SUSPENDED)
@@ -418,32 +437,33 @@ static bool ipa3_usb_check_legal_op(enum ipa3_usb_op op,
spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags);
state = ipa3_usb_ctx->ttype_ctx[ttype].state;
switch (op) {
- case IPA_USB_INIT_TETH_PROT:
+ case IPA_USB_OP_INIT_TETH_PROT:
if (state == IPA_USB_INVALID ||
(!is_dpl && state == IPA_USB_INITIALIZED))
is_legal = true;
break;
- case IPA_USB_REQUEST_CHANNEL:
+ case IPA_USB_OP_REQUEST_CHANNEL:
if (state == IPA_USB_INITIALIZED)
is_legal = true;
break;
- case IPA_USB_CONNECT:
+ case IPA_USB_OP_CONNECT:
if (state == IPA_USB_INITIALIZED || state == IPA_USB_STOPPED)
is_legal = true;
break;
- case IPA_USB_DISCONNECT:
+ case IPA_USB_OP_DISCONNECT:
if (state == IPA_USB_CONNECTED ||
state == IPA_USB_SUSPEND_IN_PROGRESS ||
- state == IPA_USB_SUSPENDED)
+ state == IPA_USB_SUSPENDED ||
+ state == IPA_USB_SUSPENDED_NO_RWAKEUP)
is_legal = true;
break;
- case IPA_USB_RELEASE_CHANNEL:
+ case IPA_USB_OP_RELEASE_CHANNEL:
/* when releasing 1st channel state will be changed already */
if (state == IPA_USB_STOPPED ||
(!is_dpl && state == IPA_USB_INITIALIZED))
is_legal = true;
break;
- case IPA_USB_DEINIT_TETH_PROT:
+ case IPA_USB_OP_DEINIT_TETH_PROT:
/*
* For data tethering we should allow deinit an inited protocol
* always. E.g. rmnet is inited and rndis is connected.
@@ -453,13 +473,18 @@ static bool ipa3_usb_check_legal_op(enum ipa3_usb_op op,
if (!is_dpl || state == IPA_USB_INITIALIZED)
is_legal = true;
break;
- case IPA_USB_SUSPEND:
+ case IPA_USB_OP_SUSPEND:
if (state == IPA_USB_CONNECTED)
is_legal = true;
break;
- case IPA_USB_RESUME:
+ case IPA_USB_OP_SUSPEND_NO_RWAKEUP:
+ if (state == IPA_USB_CONNECTED)
+ is_legal = true;
+ break;
+ case IPA_USB_OP_RESUME:
if (state == IPA_USB_SUSPENDED ||
- state == IPA_USB_SUSPEND_IN_PROGRESS)
+ state == IPA_USB_SUSPEND_IN_PROGRESS ||
+ state == IPA_USB_SUSPENDED_NO_RWAKEUP)
is_legal = true;
break;
default:
@@ -638,6 +663,7 @@ static int ipa3_usb_cons_request_resource_cb_do(
ipa3_usb_ctx->ttype_ctx[ttype].state));
switch (ipa3_usb_ctx->ttype_ctx[ttype].state) {
case IPA_USB_CONNECTED:
+ case IPA_USB_SUSPENDED_NO_RWAKEUP:
rm_ctx->cons_state = IPA_USB_CONS_GRANTED;
result = 0;
break;
@@ -717,6 +743,7 @@ static int ipa3_usb_cons_release_resource_cb_do(
break;
case IPA_USB_STOPPED:
case IPA_USB_RESUME_IN_PROGRESS:
+ case IPA_USB_SUSPENDED_NO_RWAKEUP:
if (rm_ctx->cons_requested)
rm_ctx->cons_requested = false;
break;
@@ -886,7 +913,7 @@ int ipa_usb_init_teth_prot(enum ipa_usb_teth_prot teth_prot,
ttype = IPA3_USB_GET_TTYPE(teth_prot);
- if (!ipa3_usb_check_legal_op(IPA_USB_INIT_TETH_PROT, ttype)) {
+ if (!ipa3_usb_check_legal_op(IPA_USB_OP_INIT_TETH_PROT, ttype)) {
IPA_USB_ERR("Illegal operation.\n");
result = -EPERM;
goto bad_params;
@@ -1204,7 +1231,7 @@ static int ipa3_usb_request_xdci_channel(
ttype = IPA3_USB_GET_TTYPE(params->teth_prot);
- if (!ipa3_usb_check_legal_op(IPA_USB_REQUEST_CHANNEL, ttype)) {
+ if (!ipa3_usb_check_legal_op(IPA_USB_OP_REQUEST_CHANNEL, ttype)) {
IPA_USB_ERR("Illegal operation\n");
return -EPERM;
}
@@ -1347,7 +1374,7 @@ static int ipa3_usb_release_xdci_channel(u32 clnt_hdl,
return -EINVAL;
}
- if (!ipa3_usb_check_legal_op(IPA_USB_RELEASE_CHANNEL, ttype)) {
+ if (!ipa3_usb_check_legal_op(IPA_USB_OP_RELEASE_CHANNEL, ttype)) {
IPA_USB_ERR("Illegal operation.\n");
return -EPERM;
}
@@ -1511,81 +1538,79 @@ static int ipa3_usb_connect_dpl(void)
return 0;
}
-static int ipa3_usb_connect_teth_prot(
- struct ipa_usb_xdci_connect_params_internal *params,
- enum ipa3_usb_transport_type ttype)
+static int ipa3_usb_connect_teth_prot(enum ipa_usb_teth_prot teth_prot)
{
int result;
struct teth_bridge_connect_params teth_bridge_params;
+ struct ipa3_usb_teth_prot_conn_params *teth_conn_params;
+ enum ipa3_usb_transport_type ttype;
- IPA_USB_DBG("connecting protocol = %d\n",
- params->teth_prot);
- switch (params->teth_prot) {
+ IPA_USB_DBG("connecting protocol = %s\n",
+ ipa3_usb_teth_prot_to_string(teth_prot));
+
+ ttype = IPA3_USB_GET_TTYPE(teth_prot);
+
+ teth_conn_params = &(ipa3_usb_ctx->ttype_ctx[ttype].teth_conn_params);
+
+ switch (teth_prot) {
case IPA_USB_RNDIS:
if (ipa3_usb_ctx->teth_prot_ctx[IPA_USB_RNDIS].state ==
IPA_USB_TETH_PROT_CONNECTED) {
IPA_USB_DBG("%s is already connected.\n",
- ipa3_usb_teth_prot_to_string(
- params->teth_prot));
+ ipa3_usb_teth_prot_to_string(teth_prot));
break;
}
ipa3_usb_ctx->ttype_ctx[ttype].user_data =
ipa3_usb_ctx->teth_prot_ctx[IPA_USB_RNDIS].user_data;
result = rndis_ipa_pipe_connect_notify(
- params->usb_to_ipa_clnt_hdl,
- params->ipa_to_usb_clnt_hdl,
- params->teth_prot_params.max_xfer_size_bytes_to_dev,
- params->teth_prot_params.max_packet_number_to_dev,
- params->teth_prot_params.max_xfer_size_bytes_to_host,
+ teth_conn_params->usb_to_ipa_clnt_hdl,
+ teth_conn_params->ipa_to_usb_clnt_hdl,
+ teth_conn_params->params.max_xfer_size_bytes_to_dev,
+ teth_conn_params->params.max_packet_number_to_dev,
+ teth_conn_params->params.max_xfer_size_bytes_to_host,
ipa3_usb_ctx->teth_prot_ctx[IPA_USB_RNDIS].
teth_prot_params.rndis.private);
if (result) {
IPA_USB_ERR("failed to connect %s.\n",
- ipa3_usb_teth_prot_to_string(
- params->teth_prot));
+ ipa3_usb_teth_prot_to_string(teth_prot));
ipa3_usb_ctx->ttype_ctx[ttype].user_data = NULL;
return result;
}
ipa3_usb_ctx->teth_prot_ctx[IPA_USB_RNDIS].state =
IPA_USB_TETH_PROT_CONNECTED;
IPA_USB_DBG("%s is connected.\n",
- ipa3_usb_teth_prot_to_string(
- params->teth_prot));
+ ipa3_usb_teth_prot_to_string(teth_prot));
break;
case IPA_USB_ECM:
if (ipa3_usb_ctx->teth_prot_ctx[IPA_USB_ECM].state ==
IPA_USB_TETH_PROT_CONNECTED) {
IPA_USB_DBG("%s is already connected.\n",
- ipa3_usb_teth_prot_to_string(
- params->teth_prot));
+ ipa3_usb_teth_prot_to_string(teth_prot));
break;
}
ipa3_usb_ctx->ttype_ctx[ttype].user_data =
ipa3_usb_ctx->teth_prot_ctx[IPA_USB_ECM].user_data;
- result = ecm_ipa_connect(params->usb_to_ipa_clnt_hdl,
- params->ipa_to_usb_clnt_hdl,
+ result = ecm_ipa_connect(teth_conn_params->usb_to_ipa_clnt_hdl,
+ teth_conn_params->ipa_to_usb_clnt_hdl,
ipa3_usb_ctx->teth_prot_ctx[IPA_USB_ECM].
teth_prot_params.ecm.private);
if (result) {
IPA_USB_ERR("failed to connect %s.\n",
- ipa3_usb_teth_prot_to_string(
- params->teth_prot));
+ ipa3_usb_teth_prot_to_string(teth_prot));
ipa3_usb_ctx->ttype_ctx[ttype].user_data = NULL;
return result;
}
ipa3_usb_ctx->teth_prot_ctx[IPA_USB_ECM].state =
IPA_USB_TETH_PROT_CONNECTED;
IPA_USB_DBG("%s is connected.\n",
- ipa3_usb_teth_prot_to_string(
- params->teth_prot));
+ ipa3_usb_teth_prot_to_string(teth_prot));
break;
case IPA_USB_RMNET:
case IPA_USB_MBIM:
- if (ipa3_usb_ctx->teth_prot_ctx[params->teth_prot].state ==
+ if (ipa3_usb_ctx->teth_prot_ctx[teth_prot].state ==
IPA_USB_TETH_PROT_CONNECTED) {
IPA_USB_DBG("%s is already connected.\n",
- ipa3_usb_teth_prot_to_string(
- params->teth_prot));
+ ipa3_usb_teth_prot_to_string(teth_prot));
break;
}
result = ipa3_usb_init_teth_bridge();
@@ -1593,14 +1618,14 @@ static int ipa3_usb_connect_teth_prot(
return result;
ipa3_usb_ctx->ttype_ctx[ttype].user_data =
- ipa3_usb_ctx->teth_prot_ctx[params->teth_prot].
+ ipa3_usb_ctx->teth_prot_ctx[teth_prot].
user_data;
teth_bridge_params.ipa_usb_pipe_hdl =
- params->ipa_to_usb_clnt_hdl;
+ teth_conn_params->ipa_to_usb_clnt_hdl;
teth_bridge_params.usb_ipa_pipe_hdl =
- params->usb_to_ipa_clnt_hdl;
+ teth_conn_params->usb_to_ipa_clnt_hdl;
teth_bridge_params.tethering_mode =
- (params->teth_prot == IPA_USB_RMNET) ?
+ (teth_prot == IPA_USB_RMNET) ?
(TETH_TETHERING_MODE_RMNET):(TETH_TETHERING_MODE_MBIM);
teth_bridge_params.client_type = IPA_CLIENT_USB_PROD;
result = ipa3_usb_connect_teth_bridge(&teth_bridge_params);
@@ -1608,27 +1633,23 @@ static int ipa3_usb_connect_teth_prot(
ipa3_usb_ctx->ttype_ctx[ttype].user_data = NULL;
return result;
}
- ipa3_usb_ctx->teth_prot_ctx[params->teth_prot].state =
+ ipa3_usb_ctx->teth_prot_ctx[teth_prot].state =
IPA_USB_TETH_PROT_CONNECTED;
ipa3_usb_notify_do(ttype, IPA_USB_DEVICE_READY);
IPA_USB_DBG("%s (%s) is connected.\n",
- ipa3_usb_teth_prot_to_string(
- params->teth_prot),
- ipa3_usb_teth_bridge_prot_to_string(
- params->teth_prot));
+ ipa3_usb_teth_prot_to_string(teth_prot),
+ ipa3_usb_teth_bridge_prot_to_string(teth_prot));
break;
case IPA_USB_DIAG:
if (ipa3_usb_ctx->teth_prot_ctx[IPA_USB_DIAG].state ==
IPA_USB_TETH_PROT_CONNECTED) {
IPA_USB_DBG("%s is already connected.\n",
- ipa3_usb_teth_prot_to_string(
- params->teth_prot));
+ ipa3_usb_teth_prot_to_string(teth_prot));
break;
}
ipa3_usb_ctx->ttype_ctx[ttype].user_data =
- ipa3_usb_ctx->teth_prot_ctx[params->teth_prot].
- user_data;
+ ipa3_usb_ctx->teth_prot_ctx[teth_prot].user_data;
result = ipa3_usb_connect_dpl();
if (result) {
IPA_USB_ERR("Failed connecting DPL result=%d\n",
@@ -1640,8 +1661,7 @@ static int ipa3_usb_connect_teth_prot(
IPA_USB_TETH_PROT_CONNECTED;
ipa3_usb_notify_do(ttype, IPA_USB_DEVICE_READY);
IPA_USB_DBG("%s is connected.\n",
- ipa3_usb_teth_prot_to_string(
- params->teth_prot));
+ ipa3_usb_teth_prot_to_string(teth_prot));
break;
default:
IPA_USB_ERR("Invalid tethering protocol\n");
@@ -1775,11 +1795,19 @@ static int ipa3_usb_xdci_connect_internal(
ttype = (params->teth_prot == IPA_USB_DIAG) ? IPA_USB_TRANSPORT_DPL :
IPA_USB_TRANSPORT_TETH;
- if (!ipa3_usb_check_legal_op(IPA_USB_CONNECT, ttype)) {
+ if (!ipa3_usb_check_legal_op(IPA_USB_OP_CONNECT, ttype)) {
IPA_USB_ERR("Illegal operation.\n");
return -EPERM;
}
+ ipa3_usb_ctx->ttype_ctx[ttype].teth_conn_params.ipa_to_usb_clnt_hdl
+ = params->ipa_to_usb_clnt_hdl;
+ if (!IPA3_USB_IS_TTYPE_DPL(ttype))
+ ipa3_usb_ctx->ttype_ctx[ttype].teth_conn_params.
+ usb_to_ipa_clnt_hdl = params->usb_to_ipa_clnt_hdl;
+ ipa3_usb_ctx->ttype_ctx[ttype].teth_conn_params.params
+ = params->teth_prot_params;
+
/* Set EE xDCI specific scratch */
result = ipa3_set_usb_max_packet_size(params->max_pkt_size);
if (result) {
@@ -1816,7 +1844,7 @@ static int ipa3_usb_xdci_connect_internal(
if (params->teth_prot != IPA_USB_DIAG) {
/* Start UL channel */
- result = ipa3_xdci_connect(params->usb_to_ipa_clnt_hdl,
+ result = ipa3_xdci_start(params->usb_to_ipa_clnt_hdl,
params->usb_to_ipa_xferrscidx,
params->usb_to_ipa_xferrscidx_valid);
if (result) {
@@ -1826,7 +1854,7 @@ static int ipa3_usb_xdci_connect_internal(
}
/* Start DL/DPL channel */
- result = ipa3_xdci_connect(params->ipa_to_usb_clnt_hdl,
+ result = ipa3_xdci_start(params->ipa_to_usb_clnt_hdl,
params->ipa_to_usb_xferrscidx,
params->ipa_to_usb_xferrscidx_valid);
if (result) {
@@ -1835,7 +1863,7 @@ static int ipa3_usb_xdci_connect_internal(
}
/* Connect tethering protocol */
- result = ipa3_usb_connect_teth_prot(params, ttype);
+ result = ipa3_usb_connect_teth_prot(params->teth_prot);
if (result) {
IPA_USB_ERR("failed to connect teth protocol\n");
goto connect_teth_prot_fail;
@@ -2164,6 +2192,70 @@ static int ipa3_usb_check_disconnect_prot(enum ipa_usb_teth_prot teth_prot)
return 0;
}
+/* Assumes lock already acquired */
+static int ipa_usb_xdci_dismiss_channels(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
+ enum ipa_usb_teth_prot teth_prot)
+{
+ int result = 0;
+ enum ipa3_usb_transport_type ttype;
+
+ ttype = IPA3_USB_GET_TTYPE(teth_prot);
+
+ IPA_USB_DBG_LOW("entry\n");
+
+ /* Reset DL channel */
+ result = ipa3_reset_gsi_channel(dl_clnt_hdl);
+ if (result) {
+ IPA_USB_ERR("failed to reset DL channel.\n");
+ return result;
+ }
+
+ /* Reset DL event ring */
+ result = ipa3_reset_gsi_event_ring(dl_clnt_hdl);
+ if (result) {
+ IPA_USB_ERR("failed to reset DL event ring.\n");
+ return result;
+ }
+
+ if (!IPA3_USB_IS_TTYPE_DPL(ttype)) {
+ /* Reset UL channel */
+ result = ipa3_reset_gsi_channel(ul_clnt_hdl);
+ if (result) {
+ IPA_USB_ERR("failed to reset UL channel.\n");
+ return result;
+ }
+
+ /* Reset UL event ring */
+ result = ipa3_reset_gsi_event_ring(ul_clnt_hdl);
+ if (result) {
+ IPA_USB_ERR("failed to reset UL event ring.\n");
+ return result;
+ }
+ }
+
+ /* Change state to STOPPED */
+ if (!ipa3_usb_set_state(IPA_USB_STOPPED, false, ttype))
+ IPA_USB_ERR("failed to change state to stopped\n");
+
+ if (!IPA3_USB_IS_TTYPE_DPL(ttype)) {
+ result = ipa3_usb_release_xdci_channel(ul_clnt_hdl, ttype);
+ if (result) {
+ IPA_USB_ERR("failed to release UL channel.\n");
+ return result;
+ }
+ }
+
+ result = ipa3_usb_release_xdci_channel(dl_clnt_hdl, ttype);
+ if (result) {
+ IPA_USB_ERR("failed to release DL channel.\n");
+ return result;
+ }
+
+ IPA_USB_DBG_LOW("exit\n");
+
+ return 0;
+}
+
int ipa_usb_xdci_disconnect(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
enum ipa_usb_teth_prot teth_prot)
{
@@ -2175,20 +2267,31 @@ int ipa_usb_xdci_disconnect(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
mutex_lock(&ipa3_usb_ctx->general_mutex);
IPA_USB_DBG_LOW("entry\n");
- if (ipa3_usb_check_disconnect_prot(teth_prot)) {
- result = -EINVAL;
- goto bad_params;
- }
ttype = IPA3_USB_GET_TTYPE(teth_prot);
- if (!ipa3_usb_check_legal_op(IPA_USB_DISCONNECT, ttype)) {
+ if (!ipa3_usb_check_legal_op(IPA_USB_OP_DISCONNECT, ttype)) {
IPA_USB_ERR("Illegal operation.\n");
result = -EPERM;
goto bad_params;
}
spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags);
+ if (ipa3_usb_ctx->ttype_ctx[ttype].state ==
+ IPA_USB_SUSPENDED_NO_RWAKEUP) {
+ spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
+ result = ipa_usb_xdci_dismiss_channels(ul_clnt_hdl, dl_clnt_hdl,
+ teth_prot);
+ mutex_unlock(&ipa3_usb_ctx->general_mutex);
+ return result;
+ }
+
+ if (ipa3_usb_check_disconnect_prot(teth_prot)) {
+ spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
+ result = -EINVAL;
+ goto bad_params;
+ }
+
if (ipa3_usb_ctx->ttype_ctx[ttype].state != IPA_USB_SUSPENDED) {
spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
/* Stop DL/DPL channel */
@@ -2227,53 +2330,10 @@ int ipa_usb_xdci_disconnect(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
} else
spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
- /* Reset DL channel */
- result = ipa3_reset_gsi_channel(dl_clnt_hdl);
- if (result) {
- IPA_USB_ERR("failed to reset DL channel.\n");
- goto bad_params;
- }
-
- /* Reset DL event ring */
- result = ipa3_reset_gsi_event_ring(dl_clnt_hdl);
- if (result) {
- IPA_USB_ERR("failed to reset DL event ring.\n");
- goto bad_params;
- }
-
- if (!IPA3_USB_IS_TTYPE_DPL(ttype)) {
- /* Reset UL channel */
- result = ipa3_reset_gsi_channel(ul_clnt_hdl);
- if (result) {
- IPA_USB_ERR("failed to reset UL channel.\n");
- goto bad_params;
- }
-
- /* Reset UL event ring */
- result = ipa3_reset_gsi_event_ring(ul_clnt_hdl);
- if (result) {
- IPA_USB_ERR("failed to reset UL event ring.\n");
- goto bad_params;
- }
- }
-
- /* Change state to STOPPED */
- if (!ipa3_usb_set_state(IPA_USB_STOPPED, false, ttype))
- IPA_USB_ERR("failed to change state to stopped\n");
-
- if (!IPA3_USB_IS_TTYPE_DPL(ttype)) {
- result = ipa3_usb_release_xdci_channel(ul_clnt_hdl, ttype);
- if (result) {
- IPA_USB_ERR("failed to release UL channel.\n");
- goto bad_params;
- }
- }
-
- result = ipa3_usb_release_xdci_channel(dl_clnt_hdl, ttype);
- if (result) {
- IPA_USB_ERR("failed to release DL channel.\n");
+ result = ipa_usb_xdci_dismiss_channels(ul_clnt_hdl, dl_clnt_hdl,
+ teth_prot);
+ if (result)
goto bad_params;
- }
/* Disconnect tethering protocol */
result = ipa3_usb_disconnect_teth_prot(teth_prot);
@@ -2315,7 +2375,7 @@ int ipa_usb_deinit_teth_prot(enum ipa_usb_teth_prot teth_prot)
ttype = IPA3_USB_GET_TTYPE(teth_prot);
- if (!ipa3_usb_check_legal_op(IPA_USB_DEINIT_TETH_PROT, ttype)) {
+ if (!ipa3_usb_check_legal_op(IPA_USB_OP_DEINIT_TETH_PROT, ttype)) {
IPA_USB_ERR("Illegal operation.\n");
result = -EPERM;
goto bad_params;
@@ -2411,25 +2471,104 @@ bad_params:
}
EXPORT_SYMBOL(ipa_usb_deinit_teth_prot);
-int ipa_usb_xdci_suspend(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
+/* Assumes lock already acquired */
+static int ipa3_usb_suspend_no_remote_wakeup(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
enum ipa_usb_teth_prot teth_prot)
{
int result = 0;
+ enum ipa3_usb_transport_type ttype;
+
+ ttype = IPA3_USB_GET_TTYPE(teth_prot);
+
+ if (!ipa3_usb_check_legal_op(IPA_USB_OP_SUSPEND_NO_RWAKEUP, ttype)) {
+ IPA_USB_ERR("Illegal operation.\n");
+ result = -EPERM;
+ goto fail_exit;
+ }
+
+ IPA_USB_DBG("Start suspend with no remote wakeup sequence: %s\n",
+ IPA3_USB_IS_TTYPE_DPL(ttype) ?
+ "DPL channel":"Data Tethering channels");
+
+ if (ipa3_usb_check_disconnect_prot(teth_prot)) {
+ result = -EINVAL;
+ goto fail_exit;
+ }
+
+ /* Stop DL/DPL channel */
+ result = ipa3_xdci_disconnect(dl_clnt_hdl, false, -1);
+ if (result) {
+ IPA_USB_ERR("failed to disconnect DL/DPL channel.\n");
+ goto fail_exit;
+ }
+
+ if (!IPA3_USB_IS_TTYPE_DPL(ttype)) {
+ /* Stop UL channel */
+ result = ipa3_xdci_disconnect(ul_clnt_hdl, true,
+ ipa3_usb_ctx->qmi_req_id);
+ if (result) {
+ IPA_USB_ERR("failed disconnect UL channel\n");
+ goto start_dl;
+ }
+ ipa3_usb_ctx->qmi_req_id++;
+ }
+
+ /* Disconnect tethering protocol */
+ result = ipa3_usb_disconnect_teth_prot(teth_prot);
+ if (result)
+ goto start_ul;
+
+ result = ipa3_usb_release_prod(ttype);
+ if (result) {
+ IPA_USB_ERR("failed to release PROD.\n");
+ goto connect_teth;
+ }
+
+ /* Change ipa_usb state to SUSPENDED_NO_RWAKEUP */
+ if (!ipa3_usb_set_state(IPA_USB_SUSPENDED_NO_RWAKEUP, false, ttype))
+ IPA_USB_ERR("failed to change state to suspend no rwakeup\n");
+
+ IPA_USB_DBG_LOW("exit\n");
+ return 0;
+
+connect_teth:
+ (void)ipa3_usb_connect_teth_prot(teth_prot);
+start_ul:
+ if (!IPA3_USB_IS_TTYPE_DPL(ttype))
+ (void)ipa3_xdci_connect(ul_clnt_hdl);
+start_dl:
+ (void)ipa3_xdci_connect(dl_clnt_hdl);
+fail_exit:
+ return result;
+}
+
+int ipa_usb_xdci_suspend(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
+ enum ipa_usb_teth_prot teth_prot, bool with_remote_wakeup)
+{
+ int result = 0;
unsigned long flags;
enum ipa3_usb_cons_state curr_cons_state;
enum ipa3_usb_transport_type ttype;
mutex_lock(&ipa3_usb_ctx->general_mutex);
IPA_USB_DBG_LOW("entry\n");
+
if (teth_prot > IPA_USB_MAX_TETH_PROT_SIZE) {
IPA_USB_ERR("bad parameters.\n");
result = -EINVAL;
goto bad_params;
}
+ if (!with_remote_wakeup) {
+ result = ipa3_usb_suspend_no_remote_wakeup(ul_clnt_hdl,
+ dl_clnt_hdl, teth_prot);
+ mutex_unlock(&ipa3_usb_ctx->general_mutex);
+ return result;
+ }
+
ttype = IPA3_USB_GET_TTYPE(teth_prot);
- if (!ipa3_usb_check_legal_op(IPA_USB_SUSPEND, ttype)) {
+ if (!ipa3_usb_check_legal_op(IPA_USB_OP_SUSPEND, ttype)) {
IPA_USB_ERR("Illegal operation.\n");
result = -EPERM;
goto bad_params;
@@ -2538,6 +2677,72 @@ bad_params:
}
EXPORT_SYMBOL(ipa_usb_xdci_suspend);
+/* Assumes lock already acquired */
+static int ipa3_usb_resume_no_remote_wakeup(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
+ enum ipa_usb_teth_prot teth_prot)
+{
+ int result = -EFAULT;
+ enum ipa3_usb_transport_type ttype;
+
+ ttype = IPA3_USB_GET_TTYPE(teth_prot);
+
+ IPA_USB_DBG("Start resume with no remote wakeup sequence: %s\n",
+ IPA3_USB_IS_TTYPE_DPL(ttype) ?
+ "DPL channel":"Data Tethering channels");
+
+ /* Request USB_PROD */
+ result = ipa3_usb_request_prod(ttype);
+ if (result)
+ goto fail_exit;
+
+ /* Connect tethering protocol */
+ result = ipa3_usb_connect_teth_prot(teth_prot);
+ if (result) {
+ IPA_USB_ERR("failed to connect teth protocol\n");
+ goto release_prod;
+ }
+
+ if (!IPA3_USB_IS_TTYPE_DPL(ttype)) {
+ /* Start UL channel */
+ result = ipa3_xdci_connect(ul_clnt_hdl);
+ if (result) {
+ IPA_USB_ERR("failed to start UL channel.\n");
+ goto disconn_teth;
+ }
+ }
+
+ /* Start DL/DPL channel */
+ result = ipa3_xdci_connect(dl_clnt_hdl);
+ if (result) {
+ IPA_USB_ERR("failed to start DL/DPL channel.\n");
+ goto stop_ul;
+ }
+
+ /* Change state to CONNECTED */
+ if (!ipa3_usb_set_state(IPA_USB_CONNECTED, false, ttype)) {
+ IPA_USB_ERR("failed to change state to connected\n");
+ result = -EFAULT;
+ goto stop_dl;
+ }
+
+ return 0;
+
+stop_dl:
+ (void)ipa3_xdci_disconnect(dl_clnt_hdl, false, -1);
+stop_ul:
+ if (!IPA3_USB_IS_TTYPE_DPL(ttype)) {
+ (void)ipa3_xdci_disconnect(ul_clnt_hdl, true,
+ ipa3_usb_ctx->qmi_req_id);
+ ipa3_usb_ctx->qmi_req_id++;
+ }
+disconn_teth:
+ (void)ipa3_usb_disconnect_teth_prot(teth_prot);
+release_prod:
+ (void)ipa3_usb_release_prod(ttype);
+fail_exit:
+ return result;
+}
+
int ipa_usb_xdci_resume(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
enum ipa_usb_teth_prot teth_prot)
{
@@ -2557,19 +2762,25 @@ int ipa_usb_xdci_resume(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
ttype = IPA3_USB_GET_TTYPE(teth_prot);
- if (!ipa3_usb_check_legal_op(IPA_USB_RESUME, ttype)) {
+ if (!ipa3_usb_check_legal_op(IPA_USB_OP_RESUME, ttype)) {
IPA_USB_ERR("Illegal operation.\n");
result = -EPERM;
goto bad_params;
}
- IPA_USB_DBG_LOW("Start resume sequence: %s\n",
- IPA3_USB_IS_TTYPE_DPL(ttype) ?
- "DPL channel" : "Data Tethering channels");
-
spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags);
prev_state = ipa3_usb_ctx->ttype_ctx[ttype].state;
spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
+ if (prev_state == IPA_USB_SUSPENDED_NO_RWAKEUP) {
+ result = ipa3_usb_resume_no_remote_wakeup(ul_clnt_hdl,
+ dl_clnt_hdl, teth_prot);
+ mutex_unlock(&ipa3_usb_ctx->general_mutex);
+ return result;
+ }
+
+ IPA_USB_DBG("Start resume sequence: %s\n",
+ IPA3_USB_IS_TTYPE_DPL(ttype) ?
+ "DPL channel" : "Data Tethering channels");
/* Change state to RESUME_IN_PROGRESS */
if (!ipa3_usb_set_state(IPA_USB_RESUME_IN_PROGRESS, false, ttype)) {
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c b/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c
index c36ecfef66f1..d6e563b935b6 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -235,7 +235,7 @@ static int ipa_generate_flt_hw_rule(enum ipa_ip_type ip,
* @ip: the ip address family type
* @hdr_sz: header size
*
- * Returns: 0 on success, negative on failure
+ * Returns: size on success, negative on failure
*
* caller needs to hold any needed locks to ensure integrity
*
@@ -373,7 +373,12 @@ static int ipa_generate_flt_hw_tbl_common(enum ipa_ip_type ip, u8 *base,
((long)body &
IPA_FLT_ENTRY_MEMORY_ALLIGNMENT));
} else {
- WARN_ON(tbl->sz == 0);
+ if (tbl->sz == 0) {
+ IPAERR("tbl size is 0\n");
+ WARN_ON(1);
+ goto proc_err;
+ }
+
/* allocate memory for the flt tbl */
flt_tbl_mem.size = tbl->sz;
flt_tbl_mem.base =
@@ -460,7 +465,12 @@ static int ipa_generate_flt_hw_tbl_common(enum ipa_ip_type ip, u8 *base,
((long)body &
IPA_FLT_ENTRY_MEMORY_ALLIGNMENT));
} else {
- WARN_ON(tbl->sz == 0);
+ if (tbl->sz == 0) {
+ IPAERR("tbl size is 0\n");
+ WARN_ON(1);
+ goto proc_err;
+ }
+
/* allocate memory for the flt tbl */
flt_tbl_mem.size = tbl->sz;
flt_tbl_mem.base =
@@ -534,8 +544,15 @@ static int ipa_generate_flt_hw_tbl_v1_1(enum ipa_ip_type ip,
u8 *hdr;
u8 *body;
u8 *base;
+ int res;
+
+ res = ipa_get_flt_hw_tbl_size(ip, &hdr_sz);
+ if (res < 0) {
+ IPAERR("ipa_get_flt_hw_tbl_size failed %d\n", res);
+ return res;
+ }
- mem->size = ipa_get_flt_hw_tbl_size(ip, &hdr_sz);
+ mem->size = res;
mem->size = IPA_HW_TABLE_ALIGNMENT(mem->size);
if (mem->size == 0) {
@@ -720,6 +737,7 @@ static int ipa_generate_flt_hw_tbl_v2(enum ipa_ip_type ip,
u32 *entr;
u32 body_start_offset;
u32 hdr_top;
+ int res;
if (ip == IPA_IP_v4)
body_start_offset = IPA_MEM_PART(apps_v4_flt_ofst) -
@@ -756,7 +774,13 @@ static int ipa_generate_flt_hw_tbl_v2(enum ipa_ip_type ip,
entr++;
}
- mem->size = ipa_get_flt_hw_tbl_size(ip, &hdr_sz);
+ res = ipa_get_flt_hw_tbl_size(ip, &hdr_sz);
+ if (res < 0) {
+ IPAERR("ipa_get_flt_hw_tbl_size failed %d\n", res);
+ goto body_err;
+ }
+
+ mem->size = res;
mem->size -= hdr_sz;
mem->size = IPA_HW_TABLE_ALIGNMENT(mem->size);
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_i.h b/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
index fec4d5484d28..73206abf9cfd 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
@@ -139,7 +139,7 @@
#define IPA_HW_TABLE_ALIGNMENT(start_ofst) \
(((start_ofst) + 127) & ~127)
-#define IPA_RT_FLT_HW_RULE_BUF_SIZE (128)
+#define IPA_RT_FLT_HW_RULE_BUF_SIZE (256)
#define IPA_HDR_PROC_CTX_TABLE_ALIGNMENT_BYTE 8
#define IPA_HDR_PROC_CTX_TABLE_ALIGNMENT(start_ofst) \
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_nat.c b/drivers/platform/msm/ipa/ipa_v2/ipa_nat.c
index 6202992c2c4e..314b09593026 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_nat.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_nat.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -25,6 +25,16 @@
#define IPA_NAT_SHARED_MEMORY 1
#define IPA_NAT_TEMP_MEM_SIZE 128
+enum nat_table_type {
+ IPA_NAT_BASE_TBL = 0,
+ IPA_NAT_EXPN_TBL = 1,
+ IPA_NAT_INDX_TBL = 2,
+ IPA_NAT_INDEX_EXPN_TBL = 3,
+};
+
+#define NAT_TABLE_ENTRY_SIZE_BYTE 32
+#define NAT_INTEX_TABLE_ENTRY_SIZE_BYTE 4
+
static int ipa_nat_vma_fault_remap(
struct vm_area_struct *vma, struct vm_fault *vmf)
{
@@ -568,6 +578,71 @@ int ipa2_nat_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma)
goto bail;
}
+ for (cnt = 0; cnt < dma->entries; cnt++) {
+ if (dma->dma[cnt].table_index >= 1) {
+ IPAERR("Invalid table index %d\n",
+ dma->dma[cnt].table_index);
+ ret = -EPERM;
+ goto bail;
+ }
+
+ switch (dma->dma[cnt].base_addr) {
+ case IPA_NAT_BASE_TBL:
+ if (dma->dma[cnt].offset >=
+ (ipa_ctx->nat_mem.size_base_tables + 1) *
+ NAT_TABLE_ENTRY_SIZE_BYTE) {
+ IPAERR("Invalid offset %d\n",
+ dma->dma[cnt].offset);
+ ret = -EPERM;
+ goto bail;
+ }
+
+ break;
+
+ case IPA_NAT_EXPN_TBL:
+ if (dma->dma[cnt].offset >=
+ ipa_ctx->nat_mem.size_expansion_tables *
+ NAT_TABLE_ENTRY_SIZE_BYTE) {
+ IPAERR("Invalid offset %d\n",
+ dma->dma[cnt].offset);
+ ret = -EPERM;
+ goto bail;
+ }
+
+ break;
+
+ case IPA_NAT_INDX_TBL:
+ if (dma->dma[cnt].offset >=
+ (ipa_ctx->nat_mem.size_base_tables + 1) *
+ NAT_INTEX_TABLE_ENTRY_SIZE_BYTE) {
+ IPAERR("Invalid offset %d\n",
+ dma->dma[cnt].offset);
+ ret = -EPERM;
+ goto bail;
+ }
+
+ break;
+
+ case IPA_NAT_INDEX_EXPN_TBL:
+ if (dma->dma[cnt].offset >=
+ ipa_ctx->nat_mem.size_expansion_tables *
+ NAT_INTEX_TABLE_ENTRY_SIZE_BYTE) {
+ IPAERR("Invalid offset %d\n",
+ dma->dma[cnt].offset);
+ ret = -EPERM;
+ goto bail;
+ }
+
+ break;
+
+ default:
+ IPAERR("Invalid base_addr %d\n",
+ dma->dma[cnt].base_addr);
+ ret = -EPERM;
+ goto bail;
+ }
+ }
+
size = sizeof(struct ipa_desc) * NUM_OF_DESC;
desc = kzalloc(size, GFP_KERNEL);
if (desc == NULL) {
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c b/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c
index 15476f38cf44..5e7a5383334c 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c
@@ -227,7 +227,7 @@ int __ipa_generate_rt_hw_rule_v2_6L(enum ipa_ip_type ip,
* @hdr_sz: header size
* @max_rt_idx: maximal index
*
- * Returns: 0 on success, negative on failure
+ * Returns: size on success, negative on failure
*
* caller needs to hold any needed locks to ensure integrity
*
@@ -356,7 +356,11 @@ static int ipa_generate_rt_hw_tbl_common(enum ipa_ip_type ip, u8 *base, u8 *hdr,
((long)body &
IPA_RT_ENTRY_MEMORY_ALLIGNMENT));
} else {
- WARN_ON(tbl->sz == 0);
+ if (tbl->sz == 0) {
+ IPAERR("cannot generate 0 size table\n");
+ goto proc_err;
+ }
+
/* allocate memory for the RT tbl */
rt_tbl_mem.size = tbl->sz;
rt_tbl_mem.base =
@@ -429,8 +433,15 @@ static int ipa_generate_rt_hw_tbl_v1_1(enum ipa_ip_type ip,
u8 *base;
int max_rt_idx;
int i;
+ int res;
- mem->size = ipa_get_rt_hw_tbl_size(ip, &hdr_sz, &max_rt_idx);
+ res = ipa_get_rt_hw_tbl_size(ip, &hdr_sz, &max_rt_idx);
+ if (res < 0) {
+ IPAERR("ipa_get_rt_hw_tbl_size failed %d\n", res);
+ goto error;
+ }
+
+ mem->size = res;
mem->size = (mem->size + IPA_RT_TABLE_MEMORY_ALLIGNMENT) &
~IPA_RT_TABLE_MEMORY_ALLIGNMENT;
@@ -603,6 +614,7 @@ static int ipa_generate_rt_hw_tbl_v2(enum ipa_ip_type ip,
int num_index;
u32 body_start_offset;
u32 apps_start_idx;
+ int res;
if (ip == IPA_IP_v4) {
num_index = IPA_MEM_PART(v4_apps_rt_index_hi) -
@@ -632,7 +644,13 @@ static int ipa_generate_rt_hw_tbl_v2(enum ipa_ip_type ip,
entr++;
}
- mem->size = ipa_get_rt_hw_tbl_size(ip, &hdr_sz, &max_rt_idx);
+ res = ipa_get_rt_hw_tbl_size(ip, &hdr_sz, &max_rt_idx);
+ if (res < 0) {
+ IPAERR("ipa_get_rt_hw_tbl_size failed %d\n", res);
+ goto base_err;
+ }
+
+ mem->size = res;
mem->size -= hdr_sz;
mem->size = (mem->size + IPA_RT_TABLE_MEMORY_ALLIGNMENT) &
~IPA_RT_TABLE_MEMORY_ALLIGNMENT;
diff --git a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
index 96003d7a16a0..520f139ee38a 100644
--- a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
@@ -1053,6 +1053,8 @@ static int ipa_wwan_xmit(struct sk_buff *skb, struct net_device *dev)
IPAWANDBG
("SW filtering out none QMAP packet received from %s",
current->comm);
+ dev_kfree_skb_any(skb);
+ dev->stats.tx_dropped++;
return NETDEV_TX_OK;
}
@@ -1094,6 +1096,8 @@ send:
if (ret) {
pr_err("[%s] fatal: ipa rm timer request resource failed %d\n",
dev->name, ret);
+ dev_kfree_skb_any(skb);
+ dev->stats.tx_dropped++;
return -EFAULT;
}
/* IPA_RM checking end */
@@ -1109,7 +1113,6 @@ send:
if (ret) {
ret = NETDEV_TX_BUSY;
- dev->stats.tx_dropped++;
goto out;
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index ab62dbcddd22..8676b35914e2 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -3790,6 +3790,12 @@ static int ipa3_gsi_pre_fw_load_init(void)
return 0;
}
+static void ipa3_uc_is_loaded(void)
+{
+ IPADBG("\n");
+ complete_all(&ipa3_ctx->uc_loaded_completion_obj);
+}
+
static enum gsi_ver ipa3_get_gsi_ver(enum ipa_hw_type ipa_hw_type)
{
enum gsi_ver gsi_ver;
@@ -3842,6 +3848,7 @@ static int ipa3_post_init(const struct ipa3_plat_drv_res *resource_p,
int result;
struct sps_bam_props bam_props = { 0 };
struct gsi_per_props gsi_props;
+ struct ipa3_uc_hdlrs uc_hdlrs = { 0 };
if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) {
memset(&gsi_props, 0, sizeof(gsi_props));
@@ -3918,6 +3925,9 @@ static int ipa3_post_init(const struct ipa3_plat_drv_res *resource_p,
else
IPADBG(":ipa Uc interface init ok\n");
+ uc_hdlrs.ipa_uc_loaded_hdlr = ipa3_uc_is_loaded;
+ ipa3_uc_register_handlers(IPA_HW_FEATURE_COMMON, &uc_hdlrs);
+
result = ipa3_wdi_init();
if (result)
IPAERR(":wdi init failed (%d)\n", -result);
@@ -4609,6 +4619,7 @@ static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p,
INIT_LIST_HEAD(&ipa3_ctx->ipa_ready_cb_list);
init_completion(&ipa3_ctx->init_completion_obj);
+ init_completion(&ipa3_ctx->uc_loaded_completion_obj);
/*
* For GSI, we can't register the GSI driver yet, as it expects
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_client.c b/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
index 8326c3fdd9d1..26bd180624f1 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
@@ -61,7 +61,7 @@ int ipa3_enable_data_path(u32 clnt_hdl)
!ipa3_should_pipe_be_suspended(ep->client))) {
memset(&ep_cfg_ctrl, 0 , sizeof(ep_cfg_ctrl));
ep_cfg_ctrl.ipa_ep_suspend = false;
- ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
+ res = ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
}
/* Assign the resource group for pipe */
@@ -99,9 +99,21 @@ int ipa3_disable_data_path(u32 clnt_hdl)
/* Suspend the pipe */
if (IPA_CLIENT_IS_CONS(ep->client)) {
+ /*
+ * for RG10 workaround uC needs to be loaded before pipe can
+ * be suspended in this case.
+ */
+ if (ipa3_ctx->apply_rg10_wa && ipa3_uc_state_check()) {
+ IPADBG("uC is not loaded yet, waiting...\n");
+ res = wait_for_completion_timeout(
+ &ipa3_ctx->uc_loaded_completion_obj, 60 * HZ);
+ if (res == 0)
+ IPADBG("timeout waiting for uC to load\n");
+ }
+
memset(&ep_cfg_ctrl, 0 , sizeof(struct ipa_ep_cfg_ctrl));
ep_cfg_ctrl.ipa_ep_suspend = true;
- ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
+ res = ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
}
udelay(IPA_PKT_FLUSH_TO_US);
@@ -1311,7 +1323,46 @@ int ipa3_set_usb_max_packet_size(
return 0;
}
-int ipa3_xdci_connect(u32 clnt_hdl, u8 xferrscidx, bool xferrscidx_valid)
+int ipa3_xdci_connect(u32 clnt_hdl)
+{
+ int result;
+ struct ipa3_ep_context *ep;
+
+ IPADBG("entry\n");
+
+ if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+ ipa3_ctx->ep[clnt_hdl].valid == 0) {
+ IPAERR("Bad parameter.\n");
+ return -EINVAL;
+ }
+
+ ep = &ipa3_ctx->ep[clnt_hdl];
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+ result = ipa3_start_gsi_channel(clnt_hdl);
+ if (result) {
+ IPAERR("failed to start gsi channel clnt_hdl=%u\n", clnt_hdl);
+ goto exit;
+ }
+
+ result = ipa3_enable_data_path(clnt_hdl);
+ if (result) {
+ IPAERR("enable data path failed res=%d clnt_hdl=%d.\n", result,
+ clnt_hdl);
+ goto stop_ch;
+ }
+
+ IPADBG("exit\n");
+ goto exit;
+
+stop_ch:
+ (void)ipa3_stop_gsi_channel(clnt_hdl);
+exit:
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+ return result;
+}
+
+int ipa3_xdci_start(u32 clnt_hdl, u8 xferrscidx, bool xferrscidx_valid)
{
struct ipa3_ep_context *ep;
int result = -EFAULT;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
index 09c7c1b0fd05..cc1cb456ab8a 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
@@ -767,6 +767,30 @@ static void ipa3_transport_irq_cmd_ack(void *user1, int user2)
}
/**
+ * ipa3_transport_irq_cmd_ack_free - callback function which will be
+ * called by SPS/GSI driver after an immediate command is complete.
+ * This function will also free the completion object once it is done.
+ * @tag_comp: pointer to the completion object
+ * @ignored: parameter not used
+ *
+ * Complete the immediate commands completion object, this will release the
+ * thread which waits on this completion object (ipa3_send_cmd())
+ */
+static void ipa3_transport_irq_cmd_ack_free(void *tag_comp, int ignored)
+{
+ struct ipa3_tag_completion *comp = tag_comp;
+
+ if (!comp) {
+ IPAERR("comp is NULL\n");
+ return;
+ }
+
+ complete(&comp->comp);
+ if (atomic_dec_return(&comp->cnt) == 0)
+ kfree(comp);
+}
+
+/**
* ipa3_send_cmd - send immediate commands
* @num_desc: number of descriptors within the desc struct
* @descr: descriptor structure
@@ -778,7 +802,58 @@ static void ipa3_transport_irq_cmd_ack(void *user1, int user2)
*/
int ipa3_send_cmd(u16 num_desc, struct ipa3_desc *descr)
{
- return ipa3_send_cmd_timeout(num_desc, descr, 0);
+ struct ipa3_desc *desc;
+ int i, result = 0;
+ struct ipa3_sys_context *sys;
+ int ep_idx;
+
+ for (i = 0; i < num_desc; i++)
+ IPADBG("sending imm cmd %d\n", descr[i].opcode);
+
+ ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_CMD_PROD);
+ if (-1 == ep_idx) {
+ IPAERR("Client %u is not mapped\n",
+ IPA_CLIENT_APPS_CMD_PROD);
+ return -EFAULT;
+ }
+
+ sys = ipa3_ctx->ep[ep_idx].sys;
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+ if (num_desc == 1) {
+ init_completion(&descr->xfer_done);
+
+ if (descr->callback || descr->user1)
+ WARN_ON(1);
+
+ descr->callback = ipa3_transport_irq_cmd_ack;
+ descr->user1 = descr;
+ if (ipa3_send_one(sys, descr, true)) {
+ IPAERR("fail to send immediate command\n");
+ result = -EFAULT;
+ goto bail;
+ }
+ wait_for_completion(&descr->xfer_done);
+ } else {
+ desc = &descr[num_desc - 1];
+ init_completion(&desc->xfer_done);
+
+ if (desc->callback || desc->user1)
+ WARN_ON(1);
+
+ desc->callback = ipa3_transport_irq_cmd_ack;
+ desc->user1 = desc;
+ if (ipa3_send(sys, num_desc, descr, true)) {
+ IPAERR("fail to send multiple immediate command set\n");
+ result = -EFAULT;
+ goto bail;
+ }
+ wait_for_completion(&desc->xfer_done);
+ }
+
+bail:
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+ return result;
}
/**
@@ -800,6 +875,7 @@ int ipa3_send_cmd_timeout(u16 num_desc, struct ipa3_desc *descr, u32 timeout)
struct ipa3_sys_context *sys;
int ep_idx;
int completed;
+ struct ipa3_tag_completion *comp;
for (i = 0; i < num_desc; i++)
IPADBG("sending imm cmd %d\n", descr[i].opcode);
@@ -810,55 +886,56 @@ int ipa3_send_cmd_timeout(u16 num_desc, struct ipa3_desc *descr, u32 timeout)
IPA_CLIENT_APPS_CMD_PROD);
return -EFAULT;
}
+
+ comp = kzalloc(sizeof(*comp), GFP_ATOMIC);
+ if (!comp) {
+ IPAERR("no mem\n");
+ return -ENOMEM;
+ }
+ init_completion(&comp->comp);
+
+ /* completion needs to be released from both here and in ack callback */
+ atomic_set(&comp->cnt, 2);
+
sys = ipa3_ctx->ep[ep_idx].sys;
IPA_ACTIVE_CLIENTS_INC_SIMPLE();
if (num_desc == 1) {
- init_completion(&descr->xfer_done);
-
if (descr->callback || descr->user1)
WARN_ON(1);
- descr->callback = ipa3_transport_irq_cmd_ack;
- descr->user1 = descr;
+ descr->callback = ipa3_transport_irq_cmd_ack_free;
+ descr->user1 = comp;
if (ipa3_send_one(sys, descr, true)) {
IPAERR("fail to send immediate command\n");
+ kfree(comp);
result = -EFAULT;
goto bail;
}
- if (timeout) {
- completed = wait_for_completion_timeout(
- &descr->xfer_done, msecs_to_jiffies(timeout));
- if (!completed)
- IPADBG("timeout waiting for imm-cmd ACK\n");
- } else {
- wait_for_completion(&descr->xfer_done);
- }
} else {
desc = &descr[num_desc - 1];
- init_completion(&desc->xfer_done);
if (desc->callback || desc->user1)
WARN_ON(1);
- desc->callback = ipa3_transport_irq_cmd_ack;
- desc->user1 = desc;
+ desc->callback = ipa3_transport_irq_cmd_ack_free;
+ desc->user1 = comp;
if (ipa3_send(sys, num_desc, descr, true)) {
IPAERR("fail to send multiple immediate command set\n");
+ kfree(comp);
result = -EFAULT;
goto bail;
}
- if (timeout) {
- completed = wait_for_completion_timeout(
- &desc->xfer_done, msecs_to_jiffies(timeout));
- if (!completed)
- IPADBG("timeout waiting for imm-cmd ACK\n");
- } else {
- wait_for_completion(&desc->xfer_done);
- }
-
}
+ completed = wait_for_completion_timeout(
+ &comp->comp, msecs_to_jiffies(timeout));
+ if (!completed)
+ IPADBG("timeout waiting for imm-cmd ACK\n");
+
+ if (atomic_dec_return(&comp->cnt) == 0)
+ kfree(comp);
+
bail:
IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return result;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
index 33be22f98b9d..40f1e93653f9 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
@@ -1232,6 +1232,7 @@ struct ipa3_context {
bool ipa_initialization_complete;
struct list_head ipa_ready_cb_list;
struct completion init_completion_obj;
+ struct completion uc_loaded_completion_obj;
struct ipa3_smp2p_info smp2p_info;
u32 ipa_tz_unlock_reg_num;
struct ipa_tz_unlock_reg_info *ipa_tz_unlock_reg;
@@ -1482,7 +1483,9 @@ int ipa3_reset_gsi_event_ring(u32 clnt_hdl);
int ipa3_set_usb_max_packet_size(
enum ipa_usb_max_usb_packet_size usb_max_packet_size);
-int ipa3_xdci_connect(u32 clnt_hdl, u8 xferrscidx, bool xferrscidx_valid);
+int ipa3_xdci_start(u32 clnt_hdl, u8 xferrscidx, bool xferrscidx_valid);
+
+int ipa3_xdci_connect(u32 clnt_hdl);
int ipa3_xdci_disconnect(u32 clnt_hdl, bool should_force_clear, u32 qmi_req_id);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c b/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c
index 67e9b397a8b4..e7e5cf114242 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c
@@ -24,6 +24,17 @@
#define IPA_NAT_TEMP_MEM_SIZE 128
+enum nat_table_type {
+ IPA_NAT_BASE_TBL = 0,
+ IPA_NAT_EXPN_TBL = 1,
+ IPA_NAT_INDX_TBL = 2,
+ IPA_NAT_INDEX_EXPN_TBL = 3,
+};
+
+#define NAT_TABLE_ENTRY_SIZE_BYTE 32
+#define NAT_INTEX_TABLE_ENTRY_SIZE_BYTE 4
+
+
static int ipa3_nat_vma_fault_remap(
struct vm_area_struct *vma, struct vm_fault *vmf)
{
@@ -571,6 +582,71 @@ int ipa3_nat_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma)
goto bail;
}
+ for (cnt = 0; cnt < dma->entries; cnt++) {
+ if (dma->dma[cnt].table_index >= 1) {
+ IPAERR("Invalid table index %d\n",
+ dma->dma[cnt].table_index);
+ ret = -EPERM;
+ goto bail;
+ }
+
+ switch (dma->dma[cnt].base_addr) {
+ case IPA_NAT_BASE_TBL:
+ if (dma->dma[cnt].offset >=
+ (ipa3_ctx->nat_mem.size_base_tables + 1) *
+ NAT_TABLE_ENTRY_SIZE_BYTE) {
+ IPAERR("Invalid offset %d\n",
+ dma->dma[cnt].offset);
+ ret = -EPERM;
+ goto bail;
+ }
+
+ break;
+
+ case IPA_NAT_EXPN_TBL:
+ if (dma->dma[cnt].offset >=
+ ipa3_ctx->nat_mem.size_expansion_tables *
+ NAT_TABLE_ENTRY_SIZE_BYTE) {
+ IPAERR("Invalid offset %d\n",
+ dma->dma[cnt].offset);
+ ret = -EPERM;
+ goto bail;
+ }
+
+ break;
+
+ case IPA_NAT_INDX_TBL:
+ if (dma->dma[cnt].offset >=
+ (ipa3_ctx->nat_mem.size_base_tables + 1) *
+ NAT_INTEX_TABLE_ENTRY_SIZE_BYTE) {
+ IPAERR("Invalid offset %d\n",
+ dma->dma[cnt].offset);
+ ret = -EPERM;
+ goto bail;
+ }
+
+ break;
+
+ case IPA_NAT_INDEX_EXPN_TBL:
+ if (dma->dma[cnt].offset >=
+ ipa3_ctx->nat_mem.size_expansion_tables *
+ NAT_INTEX_TABLE_ENTRY_SIZE_BYTE) {
+ IPAERR("Invalid offset %d\n",
+ dma->dma[cnt].offset);
+ ret = -EPERM;
+ goto bail;
+ }
+
+ break;
+
+ default:
+ IPAERR("Invalid base_addr %d\n",
+ dma->dma[cnt].base_addr);
+ ret = -EPERM;
+ goto bail;
+ }
+ }
+
size = sizeof(struct ipa3_desc) * NUM_OF_DESC;
desc = kzalloc(size, GFP_KERNEL);
if (desc == NULL) {
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
index c0a6e8b00d71..4ea68ae1e95c 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
@@ -3522,7 +3522,7 @@ int ipa3_stop_gsi_channel(u32 clnt_hdl)
goto end_sequence;
IPADBG("Inject a DMA_TASK with 1B packet to IPA and retry\n");
- /* Send a 1B packet DMA_RASK to IPA and try again*/
+ /* Send a 1B packet DMA_TASK to IPA and try again */
res = ipa3_inject_dma_task_for_gsi();
if (res) {
IPAERR("Failed to inject DMA TASk for GSI\n");
diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
index f134852e046e..8f6c303d2867 100644
--- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
@@ -1066,6 +1066,8 @@ static int ipa3_wwan_xmit(struct sk_buff *skb, struct net_device *dev)
IPAWANDBG_LOW
("SW filtering out none QMAP packet received from %s",
current->comm);
+ dev_kfree_skb_any(skb);
+ dev->stats.tx_dropped++;
return NETDEV_TX_OK;
}
@@ -1077,7 +1079,8 @@ static int ipa3_wwan_xmit(struct sk_buff *skb, struct net_device *dev)
pr_err("[%s]Queue stop, send ctrl pkts\n", dev->name);
goto send;
} else {
- pr_err("[%s]fatal: ipa_wwan_xmit stopped\n", dev->name);
+ pr_err("[%s]fatal: ipa3_wwan_xmit stopped\n",
+ dev->name);
return NETDEV_TX_BUSY;
}
}
@@ -1107,6 +1110,8 @@ send:
if (ret) {
pr_err("[%s] fatal: ipa rm timer request resource failed %d\n",
dev->name, ret);
+ dev_kfree_skb_any(skb);
+ dev->stats.tx_dropped++;
return -EFAULT;
}
/* IPA_RM checking end */
@@ -1122,7 +1127,6 @@ send:
if (ret) {
ret = NETDEV_TX_BUSY;
- dev->stats.tx_dropped++;
goto out;
}
diff --git a/drivers/platform/msm/qpnp-revid.c b/drivers/platform/msm/qpnp-revid.c
index 78e685f789cd..1ef8ebe3ed7d 100644
--- a/drivers/platform/msm/qpnp-revid.c
+++ b/drivers/platform/msm/qpnp-revid.c
@@ -56,6 +56,8 @@ static const char *const pmic_names[] = {
[PMICOBALT_SUBTYPE] = "PMICOBALT",
[PM8005_SUBTYPE] = "PM8005",
[PM8937_SUBTYPE] = "PM8937",
+ [PM2FALCON_SUBTYPE] = "PM2FALCON",
+ [PMFALCON_SUBTYPE] = "PMFALCON",
[PMI8937_SUBTYPE] = "PMI8937",
};
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index b0f62141ea4d..f774cb576ffa 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -131,7 +131,7 @@ MODULE_LICENSE("GPL");
/* Field definitions */
#define HCI_ACCEL_MASK 0x7fff
#define HCI_HOTKEY_DISABLE 0x0b
-#define HCI_HOTKEY_ENABLE 0x01
+#define HCI_HOTKEY_ENABLE 0x09
#define HCI_HOTKEY_SPECIAL_FUNCTIONS 0x10
#define HCI_LCD_BRIGHTNESS_BITS 3
#define HCI_LCD_BRIGHTNESS_SHIFT (16-HCI_LCD_BRIGHTNESS_BITS)
diff --git a/drivers/power/qcom-charger/fg-core.h b/drivers/power/qcom-charger/fg-core.h
index adc640c7afe1..a703b208f6e4 100644
--- a/drivers/power/qcom-charger/fg-core.h
+++ b/drivers/power/qcom-charger/fg-core.h
@@ -118,7 +118,14 @@ enum {
DELTA_SOC_IRQ_WA = BIT(0),
};
-/* SRAM parameters */
+/*
+ * List of FG_SRAM parameters. Please add a parameter only if it is an entry
+ * that will be used either to configure an entity (e.g. termination current)
+ * which might need some encoding (or) it is an entry that will be read from
+ * SRAM and decoded (e.g. CC_SOC_SW) for SW to use at various places. For
+ * generic read/writes to SRAM registers, please use fg_sram_read/write APIs
+ * directly without adding an entry here.
+ */
enum fg_sram_param_id {
FG_SRAM_BATT_SOC = 0,
FG_SRAM_VOLTAGE_PRED,
diff --git a/drivers/power/qcom-charger/fg-util.c b/drivers/power/qcom-charger/fg-util.c
index bbdbe48896d7..0e3c7dbb5731 100644
--- a/drivers/power/qcom-charger/fg-util.c
+++ b/drivers/power/qcom-charger/fg-util.c
@@ -621,6 +621,17 @@ static ssize_t fg_sram_dfs_reg_write(struct file *file, const char __user *buf,
/* Parse the data in the buffer. It should be a string of numbers */
while ((pos < count) &&
sscanf(kbuf + pos, "%i%n", &data, &bytes_read) == 1) {
+ /*
+ * We shouldn't be receiving a string of characters that
+ * exceeds a size of 5 to keep this functionally correct.
+ * Also, we should make sure that pos never gets overflowed
+ * beyond the limit.
+ */
+ if (bytes_read > 5 || bytes_read > INT_MAX - pos) {
+ cnt = 0;
+ ret = -EINVAL;
+ break;
+ }
pos += bytes_read;
values[cnt++] = data & 0xff;
}
diff --git a/drivers/power/qcom-charger/pmic-voter.c b/drivers/power/qcom-charger/pmic-voter.c
index d0bad7dec094..8072b63f53fe 100644
--- a/drivers/power/qcom-charger/pmic-voter.c
+++ b/drivers/power/qcom-charger/pmic-voter.c
@@ -421,6 +421,7 @@ static int show_votable_clients(struct seq_file *m, void *data)
lock_votable(votable);
+ seq_printf(m, "%s:\n", votable->name);
seq_puts(m, "Clients:\n");
for (i = 0; i < votable->num_clients; i++) {
if (votable->client_strs[i]) {
diff --git a/drivers/power/qcom-charger/qpnp-fg-gen3.c b/drivers/power/qcom-charger/qpnp-fg-gen3.c
index c0a19ae115d0..4ee94b990382 100644
--- a/drivers/power/qcom-charger/qpnp-fg-gen3.c
+++ b/drivers/power/qcom-charger/qpnp-fg-gen3.c
@@ -64,6 +64,8 @@
#define PROFILE_LOAD_OFFSET 0
#define NOM_CAP_WORD 58
#define NOM_CAP_OFFSET 0
+#define ACT_BATT_CAP_BKUP_WORD 74
+#define ACT_BATT_CAP_BKUP_OFFSET 0
#define CYCLE_COUNT_WORD 75
#define CYCLE_COUNT_OFFSET 0
#define PROFILE_INTEGRITY_WORD 79
@@ -156,8 +158,8 @@ static struct fg_sram_param pmicobalt_v1_sram_params[] = {
fg_decode_cc_soc),
PARAM(CC_SOC_SW, CC_SOC_SW_WORD, CC_SOC_SW_OFFSET, 4, 1, 1, 0, NULL,
fg_decode_cc_soc),
- PARAM(ACT_BATT_CAP, ACT_BATT_CAP_WORD, ACT_BATT_CAP_OFFSET, 2, 1, 1, 0,
- NULL, fg_decode_default),
+ PARAM(ACT_BATT_CAP, ACT_BATT_CAP_BKUP_WORD, ACT_BATT_CAP_BKUP_OFFSET, 2,
+ 1, 1, 0, NULL, fg_decode_default),
/* Entries below here are configurable during initialization */
PARAM(CUTOFF_VOLT, CUTOFF_VOLT_WORD, CUTOFF_VOLT_OFFSET, 2, 1000000,
244141, 0, fg_encode_voltage, NULL),
@@ -208,8 +210,8 @@ static struct fg_sram_param pmicobalt_v2_sram_params[] = {
fg_decode_cc_soc),
PARAM(CC_SOC_SW, CC_SOC_SW_WORD, CC_SOC_SW_OFFSET, 4, 1, 1, 0, NULL,
fg_decode_cc_soc),
- PARAM(ACT_BATT_CAP, ACT_BATT_CAP_WORD, ACT_BATT_CAP_OFFSET, 2, 1, 1, 0,
- NULL, fg_decode_default),
+ PARAM(ACT_BATT_CAP, ACT_BATT_CAP_BKUP_WORD, ACT_BATT_CAP_BKUP_OFFSET, 2,
+ 1, 1, 0, NULL, fg_decode_default),
/* Entries below here are configurable during initialization */
PARAM(CUTOFF_VOLT, CUTOFF_VOLT_WORD, CUTOFF_VOLT_OFFSET, 2, 1000000,
244141, 0, fg_encode_voltage, NULL),
@@ -504,8 +506,7 @@ static int fg_get_cc_soc_sw(struct fg_chip *chip, int *val)
#define BATT_TEMP_DENR 1
static int fg_get_battery_temp(struct fg_chip *chip, int *val)
{
- int rc = 0;
- u16 temp = 0;
+ int rc = 0, temp;
u8 buf[2];
rc = fg_read(chip, BATT_INFO_BATT_TEMP_LSB(chip), buf, 2);
@@ -887,10 +888,20 @@ static int fg_save_learned_cap_to_sram(struct fg_chip *chip)
return -EPERM;
cc_mah = div64_s64(chip->cl.learned_cc_uah, 1000);
+ /* Write to a backup register to use across reboot */
rc = fg_sram_write(chip, chip->sp[FG_SRAM_ACT_BATT_CAP].addr_word,
chip->sp[FG_SRAM_ACT_BATT_CAP].addr_byte, (u8 *)&cc_mah,
chip->sp[FG_SRAM_ACT_BATT_CAP].len, FG_IMA_DEFAULT);
if (rc < 0) {
+ pr_err("Error in writing act_batt_cap_bkup, rc=%d\n", rc);
+ return rc;
+ }
+
+ /* Write to actual capacity register for coulomb counter operation */
+ rc = fg_sram_write(chip, ACT_BATT_CAP_WORD, ACT_BATT_CAP_OFFSET,
+ (u8 *)&cc_mah, chip->sp[FG_SRAM_ACT_BATT_CAP].len,
+ FG_IMA_DEFAULT);
+ if (rc < 0) {
pr_err("Error in writing act_batt_cap, rc=%d\n", rc);
return rc;
}
@@ -913,10 +924,11 @@ static int fg_load_learned_cap_from_sram(struct fg_chip *chip)
}
chip->cl.learned_cc_uah = act_cap_mah * 1000;
- if (chip->cl.learned_cc_uah == 0)
- chip->cl.learned_cc_uah = chip->cl.nom_cap_uah;
if (chip->cl.learned_cc_uah != chip->cl.nom_cap_uah) {
+ if (chip->cl.learned_cc_uah == 0)
+ chip->cl.learned_cc_uah = chip->cl.nom_cap_uah;
+
delta_cc_uah = abs(chip->cl.learned_cc_uah -
chip->cl.nom_cap_uah);
pct_nom_cap_uah = div64_s64((int64_t)chip->cl.nom_cap_uah *
@@ -927,16 +939,14 @@ static int fg_load_learned_cap_from_sram(struct fg_chip *chip)
* the nominal capacity.
*/
if (chip->cl.nom_cap_uah && delta_cc_uah > pct_nom_cap_uah) {
- fg_dbg(chip, FG_CAP_LEARN, "learned_cc_uah: %lld is higher than expected\n",
- chip->cl.learned_cc_uah);
- fg_dbg(chip, FG_CAP_LEARN, "Capping it to nominal:%lld\n",
- chip->cl.nom_cap_uah);
+ fg_dbg(chip, FG_CAP_LEARN, "learned_cc_uah: %lld is higher than expected, capping it to nominal: %lld\n",
+ chip->cl.learned_cc_uah, chip->cl.nom_cap_uah);
chip->cl.learned_cc_uah = chip->cl.nom_cap_uah;
- rc = fg_save_learned_cap_to_sram(chip);
- if (rc < 0)
- pr_err("Error in saving learned_cc_uah, rc=%d\n",
- rc);
}
+
+ rc = fg_save_learned_cap_to_sram(chip);
+ if (rc < 0)
+ pr_err("Error in saving learned_cc_uah, rc=%d\n", rc);
}
fg_dbg(chip, FG_CAP_LEARN, "learned_cc_uah:%lld nom_cap_uah: %lld\n",
@@ -1663,7 +1673,7 @@ static void fg_notify_charger(struct fg_chip *chip)
return;
}
- prop.intval = chip->bp.fastchg_curr_ma;
+ prop.intval = chip->bp.fastchg_curr_ma * 1000;
rc = power_supply_set_property(chip->batt_psy,
POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, &prop);
if (rc < 0) {
diff --git a/drivers/power/qcom-charger/qpnp-smb2.c b/drivers/power/qcom-charger/qpnp-smb2.c
index 93965dbe99ae..f9d76c56aa2e 100644
--- a/drivers/power/qcom-charger/qpnp-smb2.c
+++ b/drivers/power/qcom-charger/qpnp-smb2.c
@@ -203,6 +203,13 @@ static struct smb_params v1_params = {
.get_proc = smblib_mapping_cc_delta_to_field_value,
.set_proc = smblib_mapping_cc_delta_from_field_value,
},
+ .freq_buck = {
+ .name = "buck switching frequency",
+ .reg = CFG_BUCKBOOST_FREQ_SELECT_BUCK_REG,
+ .min_u = 600,
+ .max_u = 2000,
+ .step_u = 200,
+ },
};
#define STEP_CHARGING_MAX_STEPS 5
diff --git a/drivers/power/qcom-charger/smb-lib.c b/drivers/power/qcom-charger/smb-lib.c
index 5785e42e0140..198e77469bbe 100644
--- a/drivers/power/qcom-charger/smb-lib.c
+++ b/drivers/power/qcom-charger/smb-lib.c
@@ -220,6 +220,34 @@ int smblib_get_usb_suspend(struct smb_charger *chg, int *suspend)
return rc;
}
+#define FSW_600HZ_FOR_5V 600
+#define FSW_800HZ_FOR_6V_8V 800
+#define FSW_1MHZ_FOR_REMOVAL 1000
+#define FSW_1MHZ_FOR_9V 1000
+#define FSW_1P2MHZ_FOR_12V 1200
+static int smblib_set_opt_freq_buck(struct smb_charger *chg, int fsw_khz)
+{
+ union power_supply_propval pval = {0, };
+ int rc = 0;
+
+ rc = smblib_set_charge_param(chg, &chg->param.freq_buck, fsw_khz);
+ if (rc < 0)
+ dev_err(chg->dev, "Error in setting freq_buck rc=%d\n", rc);
+
+ if (chg->mode == PARALLEL_MASTER && chg->pl.psy) {
+ pval.intval = fsw_khz;
+ rc = power_supply_set_property(chg->pl.psy,
+ POWER_SUPPLY_PROP_BUCK_FREQ, &pval);
+ if (rc < 0) {
+ dev_err(chg->dev,
+ "Could not set parallel buck_freq rc=%d\n", rc);
+ return rc;
+ }
+ }
+
+ return rc;
+}
+
struct apsd_result {
const char * const name;
const u8 bit;
@@ -416,10 +444,13 @@ static int smblib_set_usb_pd_allowed_voltage(struct smb_charger *chg,
if (min_allowed_uv == MICRO_5V && max_allowed_uv == MICRO_5V) {
allowed_voltage = USBIN_ADAPTER_ALLOW_5V;
+ smblib_set_opt_freq_buck(chg, FSW_600HZ_FOR_5V);
} else if (min_allowed_uv == MICRO_9V && max_allowed_uv == MICRO_9V) {
allowed_voltage = USBIN_ADAPTER_ALLOW_9V;
+ smblib_set_opt_freq_buck(chg, FSW_1MHZ_FOR_9V);
} else if (min_allowed_uv == MICRO_12V && max_allowed_uv == MICRO_12V) {
allowed_voltage = USBIN_ADAPTER_ALLOW_12V;
+ smblib_set_opt_freq_buck(chg, FSW_1P2MHZ_FOR_12V);
} else if (min_allowed_uv < MICRO_9V && max_allowed_uv <= MICRO_9V) {
allowed_voltage = USBIN_ADAPTER_ALLOW_5V_TO_9V;
} else if (min_allowed_uv < MICRO_9V && max_allowed_uv <= MICRO_12V) {
@@ -1199,6 +1230,7 @@ int smblib_get_prop_batt_charge_type(struct smb_charger *chg,
int smblib_get_prop_batt_health(struct smb_charger *chg,
union power_supply_propval *val)
{
+ union power_supply_propval pval;
int rc;
u8 stat;
@@ -1212,9 +1244,19 @@ int smblib_get_prop_batt_health(struct smb_charger *chg,
stat);
if (stat & CHARGER_ERROR_STATUS_BAT_OV_BIT) {
- smblib_err(chg, "battery over-voltage\n");
- val->intval = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
- goto done;
+ rc = smblib_get_prop_batt_voltage_now(chg, &pval);
+ if (!rc) {
+ /*
+ * If Vbatt is within 40mV above Vfloat, then don't
+ * treat it as overvoltage.
+ */
+ if (pval.intval >=
+ get_effective_result(chg->fv_votable) + 40000) {
+ val->intval = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
+ smblib_err(chg, "battery over-voltage\n");
+ goto done;
+ }
+ }
}
if (stat & BAT_TEMP_STATUS_TOO_COLD_BIT)
@@ -2167,6 +2209,16 @@ irqreturn_t smblib_handle_usb_plugin(int irq, void *data)
int rc;
u8 stat;
+ rc = smblib_read(chg, USBIN_BASE + INT_RT_STS_OFFSET, &stat);
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't read USB_INT_RT_STS rc=%d\n", rc);
+ return IRQ_HANDLED;
+ }
+
+ chg->vbus_present = (bool)(stat & USBIN_PLUGIN_RT_STS_BIT);
+ smblib_set_opt_freq_buck(chg,
+ chg->vbus_present ? FSW_600HZ_FOR_5V : FSW_1MHZ_FOR_REMOVAL);
+
/* fetch the DPDM regulator */
if (!chg->dpdm_reg && of_get_property(chg->dev->of_node,
"dpdm-supply", NULL)) {
@@ -2181,14 +2233,6 @@ irqreturn_t smblib_handle_usb_plugin(int irq, void *data)
if (!chg->dpdm_reg)
goto skip_dpdm_float;
- rc = smblib_read(chg, USBIN_BASE + INT_RT_STS_OFFSET, &stat);
- if (rc < 0) {
- smblib_err(chg, "Couldn't read USB_INT_RT_STS rc=%d\n", rc);
- return IRQ_HANDLED;
- }
-
- chg->vbus_present = (bool)(stat & USBIN_PLUGIN_RT_STS_BIT);
-
if (chg->vbus_present) {
if (!regulator_is_enabled(chg->dpdm_reg)) {
smblib_dbg(chg, PR_MISC, "enabling DPDM regulator\n");
@@ -2260,6 +2304,60 @@ static void smblib_handle_sdp_enumeration_done(struct smb_charger *chg,
rising ? "rising" : "falling");
}
+#define QC3_PULSES_FOR_6V 5
+#define QC3_PULSES_FOR_9V 20
+#define QC3_PULSES_FOR_12V 35
+static void smblib_hvdcp_adaptive_voltage_change(struct smb_charger *chg)
+{
+ int rc;
+ u8 stat;
+ int pulses;
+
+ if (chg->usb_psy_desc.type == POWER_SUPPLY_TYPE_USB_HVDCP) {
+ rc = smblib_read(chg, QC_CHANGE_STATUS_REG, &stat);
+ if (rc < 0) {
+ smblib_err(chg,
+ "Couldn't read QC_CHANGE_STATUS rc=%d\n", rc);
+ return;
+ }
+
+ switch (stat & QC_2P0_STATUS_MASK) {
+ case QC_5V_BIT:
+ smblib_set_opt_freq_buck(chg, FSW_600HZ_FOR_5V);
+ break;
+ case QC_9V_BIT:
+ smblib_set_opt_freq_buck(chg, FSW_1MHZ_FOR_9V);
+ break;
+ case QC_12V_BIT:
+ smblib_set_opt_freq_buck(chg, FSW_1P2MHZ_FOR_12V);
+ break;
+ default:
+ smblib_set_opt_freq_buck(chg, FSW_1MHZ_FOR_REMOVAL);
+ break;
+ }
+ }
+
+ if (chg->usb_psy_desc.type == POWER_SUPPLY_TYPE_USB_HVDCP_3) {
+ rc = smblib_read(chg, QC_PULSE_COUNT_STATUS_REG, &stat);
+ if (rc < 0) {
+ smblib_err(chg,
+ "Couldn't read QC_PULSE_COUNT rc=%d\n", rc);
+ return;
+ }
+ pulses = (stat & QC_PULSE_COUNT_MASK);
+
+ if (pulses < QC3_PULSES_FOR_6V)
+ smblib_set_opt_freq_buck(chg, FSW_600HZ_FOR_5V);
+ else if (pulses < QC3_PULSES_FOR_9V)
+ smblib_set_opt_freq_buck(chg, FSW_800HZ_FOR_6V_8V);
+ else if (pulses < QC3_PULSES_FOR_12V)
+ smblib_set_opt_freq_buck(chg, FSW_1MHZ_FOR_9V);
+ else
+ smblib_set_opt_freq_buck(chg, FSW_1P2MHZ_FOR_12V);
+
+ }
+}
+
static void smblib_handle_adaptive_voltage_done(struct smb_charger *chg,
bool rising)
{
@@ -2272,10 +2370,20 @@ static void smblib_handle_hvdcp_3p0_auth_done(struct smb_charger *chg,
bool rising)
{
const struct apsd_result *apsd_result;
+ int rc;
if (!rising)
return;
+ /*
+ * Disable AUTH_IRQ_EN_CFG_BIT to receive adapter voltage
+ * change interrupt.
+ */
+ rc = smblib_masked_write(chg, USBIN_SOURCE_CHANGE_INTRPT_ENB_REG,
+ AUTH_IRQ_EN_CFG_BIT, 0);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't enable QC auth setting rc=%d\n", rc);
+
if (chg->mode == PARALLEL_MASTER)
vote(chg->pl_enable_votable_indirect, USBIN_V_VOTER, true, 0);
@@ -2381,6 +2489,8 @@ irqreturn_t smblib_handle_usb_source_change(int irq, void *data)
smblib_handle_slow_plugin_timeout(chg,
(bool)(stat & SLOW_PLUGIN_TIMEOUT_BIT));
+ smblib_hvdcp_adaptive_voltage_change(chg);
+
power_supply_changed(chg->usb_psy);
return IRQ_HANDLED;
@@ -2399,6 +2509,12 @@ static void typec_source_removal(struct smb_charger *chg)
cancel_delayed_work_sync(&chg->hvdcp_detect_work);
+ /* reset AUTH_IRQ_EN_CFG_BIT */
+ rc = smblib_masked_write(chg, USBIN_SOURCE_CHANGE_INTRPT_ENB_REG,
+ AUTH_IRQ_EN_CFG_BIT, AUTH_IRQ_EN_CFG_BIT);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't enable QC auth setting rc=%d\n", rc);
+
/* reconfigure allowed voltage for HVDCP */
rc = smblib_write(chg, USBIN_ADAPTER_ALLOW_CFG_REG,
USBIN_ADAPTER_ALLOW_5V_OR_9V_TO_12V);
diff --git a/drivers/power/qcom-charger/smb-reg.h b/drivers/power/qcom-charger/smb-reg.h
index ba501761c209..a74fcf730a8c 100644
--- a/drivers/power/qcom-charger/smb-reg.h
+++ b/drivers/power/qcom-charger/smb-reg.h
@@ -428,6 +428,7 @@ enum {
#define USBIN_5V_TO_12V_BIT BIT(2)
#define USBIN_5V_TO_9V_BIT BIT(1)
#define USBIN_5V_BIT BIT(0)
+#define QC_2P0_STATUS_MASK GENMASK(2, 0)
#define APSD_STATUS_REG (USBIN_BASE + 0x07)
#define APSD_STATUS_7_BIT BIT(7)
diff --git a/drivers/power/qcom-charger/smb1351-charger.c b/drivers/power/qcom-charger/smb1351-charger.c
index 0f18844b9afa..79fbe33acf5d 100644
--- a/drivers/power/qcom-charger/smb1351-charger.c
+++ b/drivers/power/qcom-charger/smb1351-charger.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -1872,6 +1872,12 @@ static void smb1351_chg_adc_notification(enum qpnp_tm_state state, void *ctx)
}
}
+ if (!cur) {
+ pr_debug("Couldn't choose batt state, adc state=%d and temp=%d\n",
+ state, temp);
+ return;
+ }
+
if (cur->batt_present)
chip->battery_missing = false;
else
diff --git a/drivers/power/qcom-charger/smb138x-charger.c b/drivers/power/qcom-charger/smb138x-charger.c
index 3db295b3e6e8..9a87ff5fb081 100644
--- a/drivers/power/qcom-charger/smb138x-charger.c
+++ b/drivers/power/qcom-charger/smb138x-charger.c
@@ -48,14 +48,14 @@ static struct smb_params v1_params = {
.name = "fast charge current",
.reg = FAST_CHARGE_CURRENT_CFG_REG,
.min_u = 0,
- .max_u = 4500000,
+ .max_u = 6000000,
.step_u = 25000,
},
.fv = {
.name = "float voltage",
.reg = FLOAT_VOLTAGE_CFG_REG,
- .min_u = 2500000,
- .max_u = 5000000,
+ .min_u = 2450000,
+ .max_u = 4950000,
.step_u = 10000,
},
.usb_icl = {
diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig
index 17c8a5b00843..7569e35b59e0 100644
--- a/drivers/power/reset/Kconfig
+++ b/drivers/power/reset/Kconfig
@@ -183,5 +183,19 @@ config POWER_RESET_ZX
help
Reboot support for ZTE SoCs.
+config REBOOT_MODE
+ tristate
+
+config SYSCON_REBOOT_MODE
+ tristate "Generic SYSCON regmap reboot mode driver"
+ depends on OF
+ select REBOOT_MODE
+ select MFD_SYSCON
+ help
+ Say y here will enable reboot mode driver. This will
+ get reboot mode arguments and store it in SYSCON mapped
+ register, then the bootloader can read it to take different
+ action according to the mode.
+
endif
diff --git a/drivers/power/reset/Makefile b/drivers/power/reset/Makefile
index 3904e7977d07..66568c4497a4 100644
--- a/drivers/power/reset/Makefile
+++ b/drivers/power/reset/Makefile
@@ -20,3 +20,5 @@ obj-$(CONFIG_POWER_RESET_SYSCON) += syscon-reboot.o
obj-$(CONFIG_POWER_RESET_SYSCON_POWEROFF) += syscon-poweroff.o
obj-$(CONFIG_POWER_RESET_RMOBILE) += rmobile-reset.o
obj-$(CONFIG_POWER_RESET_ZX) += zx-reboot.o
+obj-$(CONFIG_REBOOT_MODE) += reboot-mode.o
+obj-$(CONFIG_SYSCON_REBOOT_MODE) += syscon-reboot-mode.o
diff --git a/drivers/power/reset/msm-poweroff.c b/drivers/power/reset/msm-poweroff.c
index 2f109013f723..d32f293695bb 100644
--- a/drivers/power/reset/msm-poweroff.c
+++ b/drivers/power/reset/msm-poweroff.c
@@ -380,7 +380,6 @@ static void do_msm_restart(enum reboot_mode reboot_mode, const char *cmd)
msm_trigger_wdog_bite();
#endif
- scm_disable_sdi();
halt_spmi_pmic_arbiter();
deassert_ps_hold();
diff --git a/drivers/power/reset/reboot-mode.c b/drivers/power/reset/reboot-mode.c
new file mode 100644
index 000000000000..2dfbbce0f817
--- /dev/null
+++ b/drivers/power/reset/reboot-mode.c
@@ -0,0 +1,140 @@
+/*
+ * Copyright (c) 2016, Fuzhou Rockchip Electronics Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/reboot.h>
+#include "reboot-mode.h"
+
+#define PREFIX "mode-"
+
+struct mode_info {
+ const char *mode;
+ u32 magic;
+ struct list_head list;
+};
+
+static unsigned int get_reboot_mode_magic(struct reboot_mode_driver *reboot,
+ const char *cmd)
+{
+ const char *normal = "normal";
+ int magic = 0;
+ struct mode_info *info;
+
+ if (!cmd)
+ cmd = normal;
+
+ list_for_each_entry(info, &reboot->head, list) {
+ if (!strcmp(info->mode, cmd)) {
+ magic = info->magic;
+ break;
+ }
+ }
+
+ return magic;
+}
+
+static int reboot_mode_notify(struct notifier_block *this,
+ unsigned long mode, void *cmd)
+{
+ struct reboot_mode_driver *reboot;
+ unsigned int magic;
+
+ reboot = container_of(this, struct reboot_mode_driver, reboot_notifier);
+ magic = get_reboot_mode_magic(reboot, cmd);
+ if (magic)
+ reboot->write(reboot, magic);
+
+ return NOTIFY_DONE;
+}
+
+/**
+ * reboot_mode_register - register a reboot mode driver
+ * @reboot: reboot mode driver
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+int reboot_mode_register(struct reboot_mode_driver *reboot)
+{
+ struct mode_info *info;
+ struct property *prop;
+ struct device_node *np = reboot->dev->of_node;
+ size_t len = strlen(PREFIX);
+ int ret;
+
+ INIT_LIST_HEAD(&reboot->head);
+
+ for_each_property_of_node(np, prop) {
+ if (strncmp(prop->name, PREFIX, len))
+ continue;
+
+ info = devm_kzalloc(reboot->dev, sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ if (of_property_read_u32(np, prop->name, &info->magic)) {
+ dev_err(reboot->dev, "reboot mode %s without magic number\n",
+ info->mode);
+ devm_kfree(reboot->dev, info);
+ continue;
+ }
+
+ info->mode = kstrdup_const(prop->name + len, GFP_KERNEL);
+ if (!info->mode) {
+ ret = -ENOMEM;
+ goto error;
+ } else if (info->mode[0] == '\0') {
+ kfree_const(info->mode);
+ ret = -EINVAL;
+ dev_err(reboot->dev, "invalid mode name(%s): too short!\n",
+ prop->name);
+ goto error;
+ }
+
+ list_add_tail(&info->list, &reboot->head);
+ }
+
+ reboot->reboot_notifier.notifier_call = reboot_mode_notify;
+ register_reboot_notifier(&reboot->reboot_notifier);
+
+ return 0;
+
+error:
+ list_for_each_entry(info, &reboot->head, list)
+ kfree_const(info->mode);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(reboot_mode_register);
+
+/**
+ * reboot_mode_unregister - unregister a reboot mode driver
+ * @reboot: reboot mode driver
+ */
+int reboot_mode_unregister(struct reboot_mode_driver *reboot)
+{
+ struct mode_info *info;
+
+ unregister_reboot_notifier(&reboot->reboot_notifier);
+
+ list_for_each_entry(info, &reboot->head, list)
+ kfree_const(info->mode);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(reboot_mode_unregister);
+
+MODULE_AUTHOR("Andy Yan <andy.yan@rock-chips.com");
+MODULE_DESCRIPTION("System reboot mode core library");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/power/reset/reboot-mode.h b/drivers/power/reset/reboot-mode.h
new file mode 100644
index 000000000000..2491bb71f591
--- /dev/null
+++ b/drivers/power/reset/reboot-mode.h
@@ -0,0 +1,14 @@
+#ifndef __REBOOT_MODE_H__
+#define __REBOOT_MODE_H__
+
+struct reboot_mode_driver {
+ struct device *dev;
+ struct list_head head;
+ int (*write)(struct reboot_mode_driver *reboot, unsigned int magic);
+ struct notifier_block reboot_notifier;
+};
+
+int reboot_mode_register(struct reboot_mode_driver *reboot);
+int reboot_mode_unregister(struct reboot_mode_driver *reboot);
+
+#endif
diff --git a/drivers/power/reset/syscon-reboot-mode.c b/drivers/power/reset/syscon-reboot-mode.c
new file mode 100644
index 000000000000..9e1cba5dd58e
--- /dev/null
+++ b/drivers/power/reset/syscon-reboot-mode.c
@@ -0,0 +1,99 @@
+/*
+ * Copyright (c) 2016, Fuzhou Rockchip Electronics Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/reboot.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+#include "reboot-mode.h"
+
+struct syscon_reboot_mode {
+ struct regmap *map;
+ struct reboot_mode_driver reboot;
+ u32 offset;
+ u32 mask;
+};
+
+static int syscon_reboot_mode_write(struct reboot_mode_driver *reboot,
+ unsigned int magic)
+{
+ struct syscon_reboot_mode *syscon_rbm;
+ int ret;
+
+ syscon_rbm = container_of(reboot, struct syscon_reboot_mode, reboot);
+
+ ret = regmap_update_bits(syscon_rbm->map, syscon_rbm->offset,
+ syscon_rbm->mask, magic);
+ if (ret < 0)
+ dev_err(reboot->dev, "update reboot mode bits failed\n");
+
+ return ret;
+}
+
+static int syscon_reboot_mode_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct syscon_reboot_mode *syscon_rbm;
+
+ syscon_rbm = devm_kzalloc(&pdev->dev, sizeof(*syscon_rbm), GFP_KERNEL);
+ if (!syscon_rbm)
+ return -ENOMEM;
+
+ syscon_rbm->reboot.dev = &pdev->dev;
+ syscon_rbm->reboot.write = syscon_reboot_mode_write;
+ syscon_rbm->mask = 0xffffffff;
+
+ dev_set_drvdata(&pdev->dev, syscon_rbm);
+
+ syscon_rbm->map = syscon_node_to_regmap(pdev->dev.parent->of_node);
+ if (IS_ERR(syscon_rbm->map))
+ return PTR_ERR(syscon_rbm->map);
+
+ if (of_property_read_u32(pdev->dev.of_node, "offset",
+ &syscon_rbm->offset))
+ return -EINVAL;
+
+ of_property_read_u32(pdev->dev.of_node, "mask", &syscon_rbm->mask);
+
+ ret = reboot_mode_register(&syscon_rbm->reboot);
+ if (ret)
+ dev_err(&pdev->dev, "can't register reboot mode\n");
+
+ return ret;
+}
+
+static int syscon_reboot_mode_remove(struct platform_device *pdev)
+{
+ struct syscon_reboot_mode *syscon_rbm = dev_get_drvdata(&pdev->dev);
+
+ return reboot_mode_unregister(&syscon_rbm->reboot);
+}
+
+static const struct of_device_id syscon_reboot_mode_of_match[] = {
+ { .compatible = "syscon-reboot-mode" },
+ {}
+};
+
+static struct platform_driver syscon_reboot_mode_driver = {
+ .probe = syscon_reboot_mode_probe,
+ .remove = syscon_reboot_mode_remove,
+ .driver = {
+ .name = "syscon-reboot-mode",
+ .of_match_table = syscon_reboot_mode_of_match,
+ },
+};
+module_platform_driver(syscon_reboot_mode_driver);
+
+MODULE_AUTHOR("Andy Yan <andy.yan@rock-chips.com");
+MODULE_DESCRIPTION("SYSCON reboot mode driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pwm/pwm-brcmstb.c b/drivers/pwm/pwm-brcmstb.c
index 423ce087cd9c..5d5adee16886 100644
--- a/drivers/pwm/pwm-brcmstb.c
+++ b/drivers/pwm/pwm-brcmstb.c
@@ -274,8 +274,8 @@ static int brcmstb_pwm_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
p->base = devm_ioremap_resource(&pdev->dev, res);
- if (!p->base) {
- ret = -ENOMEM;
+ if (IS_ERR(p->base)) {
+ ret = PTR_ERR(p->base);
goto out_clk;
}
diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c
index f2e1a39ce0f3..5cf4a97e0304 100644
--- a/drivers/regulator/axp20x-regulator.c
+++ b/drivers/regulator/axp20x-regulator.c
@@ -221,10 +221,10 @@ static const struct regulator_desc axp22x_regulators[] = {
AXP22X_ELDO2_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(1)),
AXP_DESC(AXP22X, ELDO3, "eldo3", "eldoin", 700, 3300, 100,
AXP22X_ELDO3_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(2)),
- AXP_DESC_IO(AXP22X, LDO_IO0, "ldo_io0", "ips", 1800, 3300, 100,
+ AXP_DESC_IO(AXP22X, LDO_IO0, "ldo_io0", "ips", 700, 3300, 100,
AXP22X_LDO_IO0_V_OUT, 0x1f, AXP20X_GPIO0_CTRL, 0x07,
AXP22X_IO_ENABLED, AXP22X_IO_DISABLED),
- AXP_DESC_IO(AXP22X, LDO_IO1, "ldo_io1", "ips", 1800, 3300, 100,
+ AXP_DESC_IO(AXP22X, LDO_IO1, "ldo_io1", "ips", 700, 3300, 100,
AXP22X_LDO_IO1_V_OUT, 0x1f, AXP20X_GPIO1_CTRL, 0x07,
AXP22X_IO_ENABLED, AXP22X_IO_DISABLED),
AXP_DESC_FIXED(AXP22X, RTC_LDO, "rtc_ldo", "ips", 3000),
diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c
index 72fc3c32db49..b6d831b84e1d 100644
--- a/drivers/regulator/s2mps11.c
+++ b/drivers/regulator/s2mps11.c
@@ -305,7 +305,7 @@ static struct regulator_ops s2mps11_buck_ops = {
.enable_mask = S2MPS11_ENABLE_MASK \
}
-#define regulator_desc_s2mps11_buck6_10(num, min, step) { \
+#define regulator_desc_s2mps11_buck67810(num, min, step) { \
.name = "BUCK"#num, \
.id = S2MPS11_BUCK##num, \
.ops = &s2mps11_buck_ops, \
@@ -321,6 +321,22 @@ static struct regulator_ops s2mps11_buck_ops = {
.enable_mask = S2MPS11_ENABLE_MASK \
}
+#define regulator_desc_s2mps11_buck9 { \
+ .name = "BUCK9", \
+ .id = S2MPS11_BUCK9, \
+ .ops = &s2mps11_buck_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .min_uV = MIN_3000_MV, \
+ .uV_step = STEP_25_MV, \
+ .n_voltages = S2MPS11_BUCK9_N_VOLTAGES, \
+ .ramp_delay = S2MPS11_RAMP_DELAY, \
+ .vsel_reg = S2MPS11_REG_B9CTRL2, \
+ .vsel_mask = S2MPS11_BUCK9_VSEL_MASK, \
+ .enable_reg = S2MPS11_REG_B9CTRL1, \
+ .enable_mask = S2MPS11_ENABLE_MASK \
+}
+
static const struct regulator_desc s2mps11_regulators[] = {
regulator_desc_s2mps11_ldo(1, STEP_25_MV),
regulator_desc_s2mps11_ldo(2, STEP_50_MV),
@@ -365,11 +381,11 @@ static const struct regulator_desc s2mps11_regulators[] = {
regulator_desc_s2mps11_buck1_4(3),
regulator_desc_s2mps11_buck1_4(4),
regulator_desc_s2mps11_buck5,
- regulator_desc_s2mps11_buck6_10(6, MIN_600_MV, STEP_6_25_MV),
- regulator_desc_s2mps11_buck6_10(7, MIN_600_MV, STEP_6_25_MV),
- regulator_desc_s2mps11_buck6_10(8, MIN_600_MV, STEP_6_25_MV),
- regulator_desc_s2mps11_buck6_10(9, MIN_3000_MV, STEP_25_MV),
- regulator_desc_s2mps11_buck6_10(10, MIN_750_MV, STEP_12_5_MV),
+ regulator_desc_s2mps11_buck67810(6, MIN_600_MV, STEP_6_25_MV),
+ regulator_desc_s2mps11_buck67810(7, MIN_600_MV, STEP_6_25_MV),
+ regulator_desc_s2mps11_buck67810(8, MIN_600_MV, STEP_6_25_MV),
+ regulator_desc_s2mps11_buck9,
+ regulator_desc_s2mps11_buck67810(10, MIN_750_MV, STEP_12_5_MV),
};
static struct regulator_ops s2mps14_reg_ops;
diff --git a/drivers/regulator/s5m8767.c b/drivers/regulator/s5m8767.c
index 58f5d3b8e981..27343e1c43ef 100644
--- a/drivers/regulator/s5m8767.c
+++ b/drivers/regulator/s5m8767.c
@@ -202,9 +202,10 @@ static int s5m8767_get_register(struct s5m8767_info *s5m8767, int reg_id,
}
}
- if (i < s5m8767->num_regulators)
- *enable_ctrl =
- s5m8767_opmode_reg[reg_id][mode] << S5M8767_ENCTRL_SHIFT;
+ if (i >= s5m8767->num_regulators)
+ return -EINVAL;
+
+ *enable_ctrl = s5m8767_opmode_reg[reg_id][mode] << S5M8767_ENCTRL_SHIFT;
return 0;
}
@@ -937,8 +938,12 @@ static int s5m8767_pmic_probe(struct platform_device *pdev)
else
regulators[id].vsel_mask = 0xff;
- s5m8767_get_register(s5m8767, id, &enable_reg,
+ ret = s5m8767_get_register(s5m8767, id, &enable_reg,
&enable_val);
+ if (ret) {
+ dev_err(s5m8767->dev, "error reading registers\n");
+ return ret;
+ }
regulators[id].enable_reg = enable_reg;
regulators[id].enable_mask = S5M8767_ENCTRL_MASK;
regulators[id].enable_val = enable_val;
diff --git a/drivers/rtc/rtc-ds1685.c b/drivers/rtc/rtc-ds1685.c
index 05a51ef52703..d5c1b057a739 100644
--- a/drivers/rtc/rtc-ds1685.c
+++ b/drivers/rtc/rtc-ds1685.c
@@ -187,9 +187,9 @@ ds1685_rtc_end_data_access(struct ds1685_priv *rtc)
* Only use this where you are certain another lock will not be held.
*/
static inline void
-ds1685_rtc_begin_ctrl_access(struct ds1685_priv *rtc, unsigned long flags)
+ds1685_rtc_begin_ctrl_access(struct ds1685_priv *rtc, unsigned long *flags)
{
- spin_lock_irqsave(&rtc->lock, flags);
+ spin_lock_irqsave(&rtc->lock, *flags);
ds1685_rtc_switch_to_bank1(rtc);
}
@@ -1304,7 +1304,7 @@ ds1685_rtc_sysfs_ctrl_regs_store(struct device *dev,
{
struct ds1685_priv *rtc = dev_get_drvdata(dev);
u8 reg = 0, bit = 0, tmp;
- unsigned long flags = 0;
+ unsigned long flags;
long int val = 0;
const struct ds1685_rtc_ctrl_regs *reg_info =
ds1685_rtc_sysfs_ctrl_regs_lookup(attr->attr.name);
@@ -1325,7 +1325,7 @@ ds1685_rtc_sysfs_ctrl_regs_store(struct device *dev,
bit = reg_info->bit;
/* Safe to spinlock during a write. */
- ds1685_rtc_begin_ctrl_access(rtc, flags);
+ ds1685_rtc_begin_ctrl_access(rtc, &flags);
tmp = rtc->read(rtc, reg);
rtc->write(rtc, reg, (val ? (tmp | bit) : (tmp & ~(bit))));
ds1685_rtc_end_ctrl_access(rtc, flags);
diff --git a/drivers/rtc/rtc-hym8563.c b/drivers/rtc/rtc-hym8563.c
index 097325d96db5..b1b4746a0eab 100644
--- a/drivers/rtc/rtc-hym8563.c
+++ b/drivers/rtc/rtc-hym8563.c
@@ -144,7 +144,7 @@ static int hym8563_rtc_set_time(struct device *dev, struct rtc_time *tm)
* it does not seem to carry it over a subsequent write/read.
* So we'll limit ourself to 100 years, starting at 2000 for now.
*/
- buf[6] = tm->tm_year - 100;
+ buf[6] = bin2bcd(tm->tm_year - 100);
/*
* CTL1 only contains TEST-mode bits apart from stop,
diff --git a/drivers/rtc/rtc-max77686.c b/drivers/rtc/rtc-max77686.c
index 7184a0eda793..725dccae24e7 100644
--- a/drivers/rtc/rtc-max77686.c
+++ b/drivers/rtc/rtc-max77686.c
@@ -465,7 +465,7 @@ static int max77686_rtc_probe(struct platform_device *pdev)
info->virq = regmap_irq_get_virq(max77686->rtc_irq_data,
MAX77686_RTCIRQ_RTCA1);
- if (!info->virq) {
+ if (info->virq <= 0) {
ret = -ENXIO;
goto err_rtc;
}
diff --git a/drivers/rtc/rtc-rx8025.c b/drivers/rtc/rtc-rx8025.c
index bd911bafb809..17341feadad1 100644
--- a/drivers/rtc/rtc-rx8025.c
+++ b/drivers/rtc/rtc-rx8025.c
@@ -65,7 +65,6 @@
static const struct i2c_device_id rx8025_id[] = {
{ "rx8025", 0 },
- { "rv8803", 1 },
{ }
};
MODULE_DEVICE_TABLE(i2c, rx8025_id);
diff --git a/drivers/rtc/rtc-vr41xx.c b/drivers/rtc/rtc-vr41xx.c
index f64c282275b3..e1b86bb01062 100644
--- a/drivers/rtc/rtc-vr41xx.c
+++ b/drivers/rtc/rtc-vr41xx.c
@@ -272,12 +272,13 @@ static irqreturn_t rtclong1_interrupt(int irq, void *dev_id)
}
static const struct rtc_class_ops vr41xx_rtc_ops = {
- .release = vr41xx_rtc_release,
- .ioctl = vr41xx_rtc_ioctl,
- .read_time = vr41xx_rtc_read_time,
- .set_time = vr41xx_rtc_set_time,
- .read_alarm = vr41xx_rtc_read_alarm,
- .set_alarm = vr41xx_rtc_set_alarm,
+ .release = vr41xx_rtc_release,
+ .ioctl = vr41xx_rtc_ioctl,
+ .read_time = vr41xx_rtc_read_time,
+ .set_time = vr41xx_rtc_set_time,
+ .read_alarm = vr41xx_rtc_read_alarm,
+ .set_alarm = vr41xx_rtc_set_alarm,
+ .alarm_irq_enable = vr41xx_rtc_alarm_irq_enable,
};
static int rtc_probe(struct platform_device *pdev)
diff --git a/drivers/scsi/device_handler/Kconfig b/drivers/scsi/device_handler/Kconfig
index e5647d59224f..0b331c9c0a8f 100644
--- a/drivers/scsi/device_handler/Kconfig
+++ b/drivers/scsi/device_handler/Kconfig
@@ -13,13 +13,13 @@ menuconfig SCSI_DH
config SCSI_DH_RDAC
tristate "LSI RDAC Device Handler"
- depends on SCSI_DH
+ depends on SCSI_DH && SCSI
help
If you have a LSI RDAC select y. Otherwise, say N.
config SCSI_DH_HP_SW
tristate "HP/COMPAQ MSA Device Handler"
- depends on SCSI_DH
+ depends on SCSI_DH && SCSI
help
If you have a HP/COMPAQ MSA device that requires START_STOP to
be sent to start it and cannot upgrade the firmware then select y.
@@ -27,13 +27,13 @@ config SCSI_DH_HP_SW
config SCSI_DH_EMC
tristate "EMC CLARiiON Device Handler"
- depends on SCSI_DH
+ depends on SCSI_DH && SCSI
help
If you have a EMC CLARiiON select y. Otherwise, say N.
config SCSI_DH_ALUA
tristate "SPC-3 ALUA Device Handler"
- depends on SCSI_DH
+ depends on SCSI_DH && SCSI
help
SCSI Device handler for generic SPC-3 Asymmetric Logical Unit
Access (ALUA).
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index db9446c612da..b0d92b84bcdc 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -2855,7 +2855,7 @@ lpfc_online(struct lpfc_hba *phba)
}
vports = lpfc_create_vport_work_array(phba);
- if (vports != NULL)
+ if (vports != NULL) {
for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
struct Scsi_Host *shost;
shost = lpfc_shost_from_vport(vports[i]);
@@ -2872,7 +2872,8 @@ lpfc_online(struct lpfc_hba *phba)
}
spin_unlock_irq(shost->host_lock);
}
- lpfc_destroy_vport_work_array(phba, vports);
+ }
+ lpfc_destroy_vport_work_array(phba, vports);
lpfc_unblock_mgmt_io(phba);
return 0;
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 97a1c1c33b05..00ce3e269a43 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -6282,12 +6282,13 @@ out:
}
for (i = 0; i < ioc->sge_count; i++) {
- if (kbuff_arr[i])
+ if (kbuff_arr[i]) {
dma_free_coherent(&instance->pdev->dev,
le32_to_cpu(kern_sge32[i].length),
kbuff_arr[i],
le32_to_cpu(kern_sge32[i].phys_addr));
kbuff_arr[i] = NULL;
+ }
}
megasas_return_cmd(instance, cmd);
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index 5d0ec42a9317..634254a52301 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -4214,7 +4214,7 @@ static struct scsi_host_template qla1280_driver_template = {
.eh_bus_reset_handler = qla1280_eh_bus_reset,
.eh_host_reset_handler = qla1280_eh_adapter_reset,
.bios_param = qla1280_biosparam,
- .can_queue = 0xfffff,
+ .can_queue = MAX_OUTSTANDING_COMMANDS,
.this_id = -1,
.sg_tablesize = SG_ALL,
.use_clustering = ENABLE_CLUSTERING,
diff --git a/drivers/sensors/sensors_ssc.c b/drivers/sensors/sensors_ssc.c
index 4707f74404f2..0910ef34e777 100644
--- a/drivers/sensors/sensors_ssc.c
+++ b/drivers/sensors/sensors_ssc.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -69,6 +69,8 @@ static void slpi_loader_do(struct platform_device *pdev)
{
struct slpi_loader_private *priv = NULL;
+ int ret;
+ const char *firmware_name = NULL;
if (!pdev) {
dev_err(&pdev->dev, "%s: Platform device null\n", __func__);
@@ -81,6 +83,13 @@ static void slpi_loader_do(struct platform_device *pdev)
goto fail;
}
+ ret = of_property_read_string(pdev->dev.of_node,
+ "qcom,firmware-name", &firmware_name);
+ if (ret < 0) {
+ pr_err("can't get fw name.\n");
+ goto fail;
+ }
+
priv = platform_get_drvdata(pdev);
if (!priv) {
dev_err(&pdev->dev,
@@ -88,7 +97,7 @@ static void slpi_loader_do(struct platform_device *pdev)
goto fail;
}
- priv->pil_h = subsystem_get("slpi");
+ priv->pil_h = subsystem_get_with_fwname("slpi", firmware_name);
if (IS_ERR(priv->pil_h)) {
dev_err(&pdev->dev, "%s: pil get failed,\n",
__func__);
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index 8c43effadc70..d3f967319d21 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -768,6 +768,15 @@ config MSM_SERVICE_NOTIFIER
like audio, the identifier for which is provided by the service
locator.
+config MSM_QBT1000
+ bool "QBT1000 Ultrasonic Fingerprint Sensor"
+ help
+ This driver provides services for configuring the fingerprint
+ sensor hardware and for communicating with the trusted app which
+ uses it. It enables clocks and provides commands for loading
+ trusted apps, unloading them and marshalling buffers to the
+ trusted fingerprint app.
+
config MSM_RPM_RBCPR_STATS_V2_LOG
tristate "MSM Resource Power Manager RPBCPR Stat Driver"
depends on DEBUG_FS
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index 0105e03b082d..f56c6bf1539a 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -93,6 +93,7 @@ obj-$(CONFIG_MSM_KERNEL_PROTECT) += kernel_protect.o
obj-$(CONFIG_MSM_RTB) += msm_rtb-hotplug.o
obj-$(CONFIG_QCOM_REMOTEQDSS) += remoteqdss.o
obj-$(CONFIG_MSM_SERVICE_LOCATOR) += service-locator.o
+obj-$(CONFIG_MSM_QBT1000) += qbt1000.o
obj-$(CONFIG_MSM_RPM_RBCPR_STATS_V2_LOG) += rpm_rbcpr_stats_v2.o
obj-$(CONFIG_MSM_RPM_STATS_LOG) += rpm_stats.o rpm_master_stat.o system_stats.o
obj-$(CONFIG_MSM_RPM_LOG) += rpm_log.o
diff --git a/drivers/soc/qcom/glink_spi_xprt.c b/drivers/soc/qcom/glink_spi_xprt.c
index 66caa6ecaad2..47c66c892736 100644
--- a/drivers/soc/qcom/glink_spi_xprt.c
+++ b/drivers/soc/qcom/glink_spi_xprt.c
@@ -875,21 +875,20 @@ static void __rx_worker(struct edge_info *einfo)
int rcu_id;
rcu_id = srcu_read_lock(&einfo->use_ref);
+ if (einfo->in_ssr) {
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return;
+ }
+
if (unlikely(!einfo->rx_fifo_start)) {
rx_avail = glink_spi_xprt_read_avail(einfo);
if (!rx_avail) {
srcu_read_unlock(&einfo->use_ref, rcu_id);
return;
}
- einfo->in_ssr = false;
einfo->xprt_if.glink_core_if_ptr->link_up(&einfo->xprt_if);
}
- if (einfo->in_ssr) {
- srcu_read_unlock(&einfo->use_ref, rcu_id);
- return;
- }
-
glink_spi_xprt_set_poll_mode(einfo);
while (inactive_cycles < MAX_INACTIVE_CYCLES) {
if (einfo->tx_resume_needed &&
@@ -1818,9 +1817,16 @@ static int glink_wdsp_cmpnt_event_handler(struct device *dev,
spi_dev = to_spi_device(sdev);
einfo->spi_dev = spi_dev;
break;
+ case WDSP_EVENT_POST_BOOTUP:
+ einfo->in_ssr = false;
+ synchronize_srcu(&einfo->use_ref);
+ /* No break here to trigger fake rx_worker */
case WDSP_EVENT_IPC1_INTR:
queue_kthread_work(&einfo->kworker, &einfo->kwork);
break;
+ case WDSP_EVENT_PRE_SHUTDOWN:
+ ssr(&einfo->xprt_if);
+ break;
default:
pr_debug("%s: unhandled event %d", __func__, event);
break;
@@ -2040,7 +2046,6 @@ static int glink_spi_probe(struct platform_device *pdev)
init_xprt_cfg(einfo, subsys_name);
init_xprt_if(einfo);
- einfo->in_ssr = true;
einfo->fifo_size = DEFAULT_FIFO_SIZE;
init_kthread_work(&einfo->kwork, rx_worker);
init_kthread_worker(&einfo->kworker);
diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c
index 2d3d96fe80e1..feeed645fc47 100644
--- a/drivers/soc/qcom/icnss.c
+++ b/drivers/soc/qcom/icnss.c
@@ -131,6 +131,9 @@ module_param(qmi_timeout, ulong, 0600);
* Registers: WCSS_HM_A_PMM_PMM
* Base Address: 0x18880000
*/
+#define WCSS_HM_A_PMM_ROOT_CLK_ENABLE 0x80010
+#define PMM_TCXO_CLK_ENABLE BIT(13)
+
#define PMM_COMMON_IDLEREQ_CSR_OFFSET 0x80120
#define PMM_COMMON_IDLEREQ_CSR_SW_WNOC_IDLEREQ_SET BIT(16)
#define PMM_COMMON_IDLEREQ_CSR_WNOC_IDLEACK BIT(26)
@@ -1318,7 +1321,7 @@ static int icnss_hw_reset_rf_reset_cmd(struct icnss_priv *priv)
if (ret) {
icnss_pr_err("RESET: RF reset command failed, state: 0x%lx\n",
priv->state);
- icnss_hw_wsi_cmd_error_recovery(priv);
+ return ret;
}
icnss_hw_write_reg_field(priv->mem_base_va, PMM_WSI_CMD_OFFSET,
@@ -1332,8 +1335,28 @@ static int icnss_hw_reset_rf_reset_cmd(struct icnss_priv *priv)
static int icnss_hw_reset_switch_to_cxo(struct icnss_priv *priv)
{
+ u32 rdata;
+
icnss_pr_dbg("RESET: Switch to CXO, state: 0x%lx\n", priv->state);
+ rdata = icnss_hw_read_reg(priv->mem_base_va,
+ WCSS_HM_A_PMM_ROOT_CLK_ENABLE);
+
+ icnss_pr_dbg("RESET: PMM_TCXO_CLK_ENABLE : 0x%05lx\n",
+ rdata & PMM_TCXO_CLK_ENABLE);
+
+ if ((rdata & PMM_TCXO_CLK_ENABLE) == 0) {
+ icnss_pr_dbg("RESET: Set PMM_TCXO_CLK_ENABLE to 1\n");
+
+ icnss_hw_write_reg_field(priv->mem_base_va,
+ WCSS_HM_A_PMM_ROOT_CLK_ENABLE,
+ PMM_TCXO_CLK_ENABLE, 1);
+ icnss_hw_poll_reg_field(priv->mem_base_va,
+ WCSS_HM_A_PMM_ROOT_CLK_ENABLE,
+ PMM_TCXO_CLK_ENABLE, 1, 10,
+ ICNSS_HW_REG_RETRY);
+ }
+
icnss_hw_write_reg_field(priv->mem_base_va,
WCSS_CLK_CTL_NOC_CFG_RCGR_OFFSET,
WCSS_CLK_CTL_NOC_CFG_RCGR_SRC_SEL, 0);
@@ -1389,7 +1412,7 @@ static int icnss_hw_reset_xo_disable_cmd(struct icnss_priv *priv)
if (ret) {
icnss_pr_err("RESET: XO disable command failed, state: 0x%lx\n",
priv->state);
- icnss_hw_wsi_cmd_error_recovery(priv);
+ return ret;
}
icnss_hw_write_reg_field(priv->mem_base_va, PMM_WSI_CMD_OFFSET,
@@ -1406,6 +1429,7 @@ static int icnss_hw_reset(struct icnss_priv *priv)
u32 rdata;
u32 rdata1;
int i;
+ int ret = 0;
if (test_bit(HW_ONLY_TOP_LEVEL_RESET, &quirks))
goto top_level_reset;
@@ -1457,11 +1481,35 @@ static int icnss_hw_reset(struct icnss_priv *priv)
icnss_hw_reset_wlan_rfactrl_power_down(priv);
- icnss_hw_reset_rf_reset_cmd(priv);
+ ret = icnss_hw_reset_rf_reset_cmd(priv);
+ if (ret)
+ goto top_level_reset;
icnss_hw_reset_switch_to_cxo(priv);
- icnss_hw_reset_xo_disable_cmd(priv);
+ for (i = 0; i < ICNSS_HW_REG_RETRY; i++) {
+ rdata = icnss_hw_read_reg(priv->mem_base_va, SR_PMM_SR_MSB);
+ usleep_range(5, 10);
+ rdata1 = icnss_hw_read_reg(priv->mem_base_va, SR_PMM_SR_MSB);
+
+ icnss_pr_dbg("RESET: SR_PMM_SR_MSB: 0x%08x/0x%08x, XO: 0x%05lx/0x%05lx, AHB: 0x%05lx/0x%05lx\n",
+ rdata, rdata1,
+ rdata & SR_PMM_SR_MSB_XO_CLOCK_MASK,
+ rdata1 & SR_PMM_SR_MSB_XO_CLOCK_MASK,
+ rdata & SR_PMM_SR_MSB_AHB_CLOCK_MASK,
+ rdata1 & SR_PMM_SR_MSB_AHB_CLOCK_MASK);
+
+ if ((rdata & SR_PMM_SR_MSB_AHB_CLOCK_MASK) !=
+ (rdata1 & SR_PMM_SR_MSB_AHB_CLOCK_MASK) &&
+ (rdata & SR_PMM_SR_MSB_XO_CLOCK_MASK) !=
+ (rdata1 & SR_PMM_SR_MSB_XO_CLOCK_MASK))
+ break;
+ usleep_range(5, 10);
+ }
+
+ ret = icnss_hw_reset_xo_disable_cmd(priv);
+ if (ret)
+ goto top_level_reset;
icnss_hw_write_reg_field(priv->mpm_config_va, MPM_WCSSAON_CONFIG_OFFSET,
MPM_WCSSAON_CONFIG_FORCE_ACTIVE, 0);
@@ -2414,6 +2462,7 @@ static int icnss_call_driver_probe(struct icnss_priv *priv)
out:
icnss_hw_power_off(priv);
+ penv->ops = NULL;
return ret;
}
@@ -2442,12 +2491,16 @@ static int icnss_call_driver_reinit(struct icnss_priv *priv)
out:
clear_bit(ICNSS_PD_RESTART, &priv->state);
+ icnss_pm_relax(priv);
+
return 0;
out_power_off:
icnss_hw_power_off(priv);
clear_bit(ICNSS_PD_RESTART, &priv->state);
+
+ icnss_pm_relax(priv);
return ret;
}
@@ -2516,6 +2569,7 @@ static int icnss_driver_event_register_driver(void *data)
power_off:
icnss_hw_power_off(penv);
+ penv->ops = NULL;
out:
return ret;
}
@@ -2566,6 +2620,8 @@ static int icnss_call_driver_shutdown(struct icnss_priv *priv)
set_bit(ICNSS_PD_RESTART, &priv->state);
clear_bit(ICNSS_FW_READY, &priv->state);
+ icnss_pm_stay_awake(priv);
+
if (!test_bit(ICNSS_DRIVER_PROBED, &penv->state))
return 0;
@@ -2599,10 +2655,10 @@ static int icnss_driver_event_pd_service_down(struct icnss_priv *priv,
icnss_call_driver_remove(priv);
out:
- icnss_remove_msa_permissions(priv);
-
ret = icnss_hw_power_off(priv);
+ icnss_remove_msa_permissions(priv);
+
kfree(data);
return ret;
@@ -4223,6 +4279,11 @@ static int icnss_get_vbatt_info(struct icnss_priv *priv)
struct qpnp_vadc_chip *vadc_dev = NULL;
int ret = 0;
+ if (test_bit(VBATT_DISABLE, &quirks)) {
+ icnss_pr_dbg("VBATT feature is disabled\n");
+ return ret;
+ }
+
adc_tm_dev = qpnp_get_adc_tm(&priv->pdev->dev, "icnss");
if (PTR_ERR(adc_tm_dev) == -EPROBE_DEFER) {
icnss_pr_err("adc_tm_dev probe defer\n");
diff --git a/drivers/soc/qcom/pil-q6v5.c b/drivers/soc/qcom/pil-q6v5.c
index f8895e8a7b3d..5752aecb82bd 100644
--- a/drivers/soc/qcom/pil-q6v5.c
+++ b/drivers/soc/qcom/pil-q6v5.c
@@ -388,7 +388,7 @@ static int __pil_q6v55_reset(struct pil_desc *pil)
mb();
udelay(1);
- if (drv->qdsp6v62_1_2) {
+ if (drv->qdsp6v62_1_2 || drv->qdsp6v62_1_5) {
for (i = BHS_CHECK_MAX_LOOPS; i > 0; i--) {
if (readl_relaxed(drv->reg_base + QDSP6V62SS_BHS_STATUS)
& QDSP6v55_BHS_EN_REST_ACK)
@@ -488,7 +488,8 @@ static int __pil_q6v55_reset(struct pil_desc *pil)
*/
udelay(1);
}
- } else if (drv->qdsp6v61_1_1 || drv->qdsp6v62_1_2) {
+ } else if (drv->qdsp6v61_1_1 || drv->qdsp6v62_1_2 ||
+ drv->qdsp6v62_1_5) {
/* Deassert QDSP6 compiler memory clamp */
val = readl_relaxed(drv->reg_base + QDSP6SS_PWR_CTL);
val &= ~QDSP6v55_CLAMP_QMC_MEM;
@@ -501,7 +502,13 @@ static int __pil_q6v55_reset(struct pil_desc *pil)
/* Turn on L1, L2, ETB and JU memories 1 at a time */
val = readl_relaxed(drv->reg_base +
QDSP6V6SS_MEM_PWR_CTL);
- for (i = 28; i >= 0; i--) {
+
+ if (drv->qdsp6v62_1_5)
+ i = 29;
+ else
+ i = 28;
+
+ for ( ; i >= 0; i--) {
val |= BIT(i);
writel_relaxed(val, drv->reg_base +
QDSP6V6SS_MEM_PWR_CTL);
@@ -663,6 +670,9 @@ struct q6v5_data *pil_q6v5_init(struct platform_device *pdev)
drv->qdsp6v62_1_2 = of_property_read_bool(pdev->dev.of_node,
"qcom,qdsp6v62-1-2");
+ drv->qdsp6v62_1_5 = of_property_read_bool(pdev->dev.of_node,
+ "qcom,qdsp6v62-1-5");
+
drv->non_elf_image = of_property_read_bool(pdev->dev.of_node,
"qcom,mba-image-is-not-elf");
diff --git a/drivers/soc/qcom/pil-q6v5.h b/drivers/soc/qcom/pil-q6v5.h
index 6a59b06f7b6c..9e8b8511e69b 100644
--- a/drivers/soc/qcom/pil-q6v5.h
+++ b/drivers/soc/qcom/pil-q6v5.h
@@ -62,6 +62,7 @@ struct q6v5_data {
bool qdsp6v56_1_10;
bool qdsp6v61_1_1;
bool qdsp6v62_1_2;
+ bool qdsp6v62_1_5;
bool non_elf_image;
bool restart_reg_sec;
bool override_acc;
diff --git a/drivers/soc/qcom/qbt1000.c b/drivers/soc/qcom/qbt1000.c
new file mode 100644
index 000000000000..dd543daca1b4
--- /dev/null
+++ b/drivers/soc/qcom/qbt1000.c
@@ -0,0 +1,1263 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "qbt1000:%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/platform_device.h>
+#include <linux/debugfs.h>
+#include <linux/types.h>
+#include <linux/cdev.h>
+#include <linux/clk.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/pm.h>
+#include <linux/of.h>
+#include <linux/mutex.h>
+#include <linux/atomic.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/input.h>
+#include <linux/kfifo.h>
+#include <linux/poll.h>
+#include <uapi/linux/qbt1000.h>
+#include <soc/qcom/scm.h>
+#include "qseecom_kernel.h"
+
+#define QBT1000_DEV "qbt1000"
+#define QBT1000_IN_DEV_NAME "qbt1000_key_input"
+#define QBT1000_IN_DEV_VERSION 0x0100
+#define MAX_FW_EVENTS 128
+#define FP_APP_CMD_RX_IPC 132
+#define FW_MAX_IPC_MSG_DATA_SIZE 0x500
+#define IPC_MSG_ID_CBGE_REQUIRED 29
+
+/*
+ * shared buffer size - init with max value,
+ * user space will provide new value upon tz app load
+ */
+static uint32_t g_app_buf_size = SZ_256K;
+static char const *const FP_APP_NAME = "fingerpr";
+
+struct finger_detect_gpio {
+ int gpio;
+ int active_low;
+ int irq;
+ struct work_struct work;
+ unsigned int key_code;
+ int power_key_enabled;
+ int last_gpio_state;
+ int event_reported;
+};
+
+struct fw_event_desc {
+ enum qbt1000_fw_event ev;
+};
+
+struct fw_ipc_info {
+ int gpio;
+ int irq;
+};
+
+struct qbt1000_drvdata {
+ struct class *qbt1000_class;
+ struct cdev qbt1000_cdev;
+ struct device *dev;
+ char *qbt1000_node;
+ struct clk **clocks;
+ unsigned clock_count;
+ uint8_t clock_state;
+ unsigned root_clk_idx;
+ unsigned frequency;
+ atomic_t available;
+ struct mutex mutex;
+ struct mutex fw_events_mutex;
+ struct input_dev *in_dev;
+ struct fw_ipc_info fw_ipc;
+ struct finger_detect_gpio fd_gpio;
+ DECLARE_KFIFO(fw_events, struct fw_event_desc, MAX_FW_EVENTS);
+ wait_queue_head_t read_wait_queue;
+ struct qseecom_handle *fp_app_handle;
+};
+
+/*
+* struct fw_ipc_cmd -
+* used to store IPC commands to/from firmware
+* @status - indicates whether sending/getting the IPC message was successful
+* @msg_type - the type of IPC message
+* @msg_len - the length of the message data
+* @resp_needed - whether a response is needed for this message
+* @msg_data - any extra data associated with the message
+*/
+struct fw_ipc_cmd {
+ uint32_t status;
+ uint32_t msg_type;
+ uint32_t msg_len;
+ uint32_t resp_needed;
+ uint8_t msg_data[FW_MAX_IPC_MSG_DATA_SIZE];
+};
+
+/*
+* struct ipc_msg_type_to_fw_event -
+* entry in mapping between an IPC message type to a firmware event
+* @msg_type - IPC message type, as reported by firmware
+* @fw_event - corresponding firmware event code to report to driver client
+*/
+struct ipc_msg_type_to_fw_event {
+ uint32_t msg_type;
+ enum qbt1000_fw_event fw_event;
+};
+
+/* mapping between firmware IPC message types to HLOS firmware events */
+struct ipc_msg_type_to_fw_event g_msg_to_event[] = {
+ {IPC_MSG_ID_CBGE_REQUIRED, FW_EVENT_CBGE_REQUIRED}
+};
+
+/**
+ * get_cmd_rsp_buffers() - Function sets cmd & rsp buffer pointers and
+ * aligns buffer lengths
+ * @hdl: index of qseecom_handle
+ * @cmd: req buffer - set to qseecom_handle.sbuf
+ * @cmd_len: ptr to req buffer len
+ * @rsp: rsp buffer - set to qseecom_handle.sbuf + offset
+ * @rsp_len: ptr to rsp buffer len
+ *
+ * Return: 0 on success. Error code on failure.
+ */
+static int get_cmd_rsp_buffers(struct qseecom_handle *hdl,
+ void **cmd,
+ uint32_t *cmd_len,
+ void **rsp,
+ uint32_t *rsp_len)
+{
+ /* 64 bytes alignment for QSEECOM */
+ *cmd_len = ALIGN(*cmd_len, 64);
+ *rsp_len = ALIGN(*rsp_len, 64);
+
+ if ((*rsp_len + *cmd_len) > g_app_buf_size) {
+ pr_err("buffer too small to hold cmd=%d and rsp=%d\n",
+ *cmd_len, *rsp_len);
+ return -ENOMEM;
+ }
+
+ *cmd = hdl->sbuf;
+ *rsp = hdl->sbuf + *cmd_len;
+ return 0;
+}
+
+/**
+ * clocks_on() - Function votes for SPI and AHB clocks to be on and sets
+ * the clk rate to predetermined value for SPI.
+ * @drvdata: ptr to driver data
+ *
+ * Return: 0 on success. Error code on failure.
+ */
+static int clocks_on(struct qbt1000_drvdata *drvdata)
+{
+ int rc = 0;
+ int index;
+
+ if (!drvdata->clock_state) {
+ for (index = 0; index < drvdata->clock_count; index++) {
+ pr_debug("set clock rate at idx:%d, freq: %u\n",
+ index, drvdata->frequency);
+ if (index == drvdata->root_clk_idx) {
+ rc = clk_set_rate(drvdata->clocks[index],
+ drvdata->frequency);
+ if (rc) {
+ pr_err("failure set clock rate at idx:%d\n",
+ index);
+ goto unprepare;
+ }
+ }
+
+ rc = clk_prepare_enable(drvdata->clocks[index]);
+ if (rc) {
+ pr_err("failure to prepare clk at idx:%d\n",
+ index);
+ goto unprepare;
+ }
+
+
+ }
+ drvdata->clock_state = 1;
+ }
+ goto end;
+
+unprepare:
+ for (--index; index >= 0; index--)
+ clk_disable_unprepare(drvdata->clocks[index]);
+
+end:
+ return rc;
+}
+
+/**
+ * clocks_off() - Function votes for SPI and AHB clocks to be off
+ * @drvdata: ptr to driver data
+ *
+ * Return: None
+ */
+static void clocks_off(struct qbt1000_drvdata *drvdata)
+{
+ int index;
+
+ if (drvdata->clock_state) {
+ for (index = 0; index < drvdata->clock_count; index++)
+ clk_disable_unprepare(drvdata->clocks[index]);
+ drvdata->clock_state = 0;
+ }
+}
+
+/**
+ * send_tz_cmd() - Function sends a command to TZ
+ *
+ * @drvdata: pointer to driver data
+ * @app_handle: handle to tz app
+ * @is_user_space: 1 if the cmd buffer is in user space, 0
+ * otherwise
+ * @cmd: command buffer to send
+ * @cmd_len: length of the command buffer
+ * @rsp: output, will be set to location of response buffer
+ * @rsp_len: max size of response
+ *
+ * Return: 0 on success.
+ */
+static int send_tz_cmd(struct qbt1000_drvdata *drvdata,
+ struct qseecom_handle *app_handle,
+ int is_user_space,
+ void *cmd, uint32_t cmd_len,
+ void **rsp, uint32_t rsp_len)
+{
+ int rc = 0;
+ void *aligned_cmd;
+ void *aligned_rsp;
+ uint32_t aligned_cmd_len;
+ uint32_t aligned_rsp_len;
+
+ /* init command and response buffers and align lengths */
+ aligned_cmd_len = cmd_len;
+ aligned_rsp_len = rsp_len;
+
+ rc = get_cmd_rsp_buffers(app_handle,
+ (void **)&aligned_cmd,
+ &aligned_cmd_len,
+ (void **)&aligned_rsp,
+ &aligned_rsp_len);
+
+ if (rc != 0)
+ goto end;
+
+ if (aligned_cmd - cmd + cmd_len > g_app_buf_size) {
+ rc = -ENOMEM;
+ goto end;
+ }
+
+ if (is_user_space) {
+ rc = copy_from_user(aligned_cmd, (void __user *)cmd,
+ cmd_len);
+ if (rc != 0) {
+ pr_err("failure to copy user space buf %d\n", rc);
+ rc = -EFAULT;
+ goto end;
+ }
+ } else
+ memcpy(aligned_cmd, cmd, cmd_len);
+
+
+ /* vote for clocks before sending TZ command */
+ rc = clocks_on(drvdata);
+ if (rc != 0) {
+ pr_err("failure to enable clocks %d\n", rc);
+ goto end;
+ }
+
+ /* send cmd to TZ */
+ rc = qseecom_send_command(app_handle,
+ aligned_cmd,
+ aligned_cmd_len,
+ aligned_rsp,
+ aligned_rsp_len);
+
+ /* un-vote for clocks */
+ clocks_off(drvdata);
+
+ if (rc != 0) {
+ pr_err("failure to send tz cmd %d\n", rc);
+ goto end;
+ }
+
+ *rsp = aligned_rsp;
+
+end:
+ return rc;
+}
+
+/**
+ * qbt1000_open() - Function called when user space opens device.
+ * Successful if driver not currently open.
+ * @inode: ptr to inode object
+ * @file: ptr to file object
+ *
+ * Return: 0 on success. Error code on failure.
+ */
+static int qbt1000_open(struct inode *inode, struct file *file)
+{
+ int rc = 0;
+
+ struct qbt1000_drvdata *drvdata = container_of(inode->i_cdev,
+ struct qbt1000_drvdata,
+ qbt1000_cdev);
+ file->private_data = drvdata;
+
+ pr_debug("qbt1000_open begin\n");
+ /* disallowing concurrent opens */
+ if (!atomic_dec_and_test(&drvdata->available)) {
+ atomic_inc(&drvdata->available);
+ rc = -EBUSY;
+ }
+
+ pr_debug("qbt1000_open end : %d\n", rc);
+ return rc;
+}
+
+/**
+ * qbt1000_release() - Function called when user space closes device.
+
+ * @inode: ptr to inode object
+ * @file: ptr to file object
+ *
+ * Return: 0 on success. Error code on failure.
+ */
+static int qbt1000_release(struct inode *inode, struct file *file)
+{
+ struct qbt1000_drvdata *drvdata = file->private_data;
+
+ atomic_inc(&drvdata->available);
+ return 0;
+}
+
+/**
+ * qbt1000_ioctl() - Function called when user space calls ioctl.
+ * @file: struct file - not used
+ * @cmd: cmd identifier:QBT1000_LOAD_APP,QBT1000_UNLOAD_APP,
+ * QBT1000_SEND_TZCMD
+ * @arg: ptr to relevant structe: either qbt1000_app or
+ * qbt1000_send_tz_cmd depending on which cmd is passed
+ *
+ * Return: 0 on success. Error code on failure.
+ */
+static long qbt1000_ioctl(struct file *file, unsigned cmd, unsigned long arg)
+{
+ int rc = 0;
+ void __user *priv_arg = (void __user *)arg;
+ struct qbt1000_drvdata *drvdata;
+
+ drvdata = file->private_data;
+
+ mutex_lock(&drvdata->mutex);
+
+ pr_debug("qbt1000_ioctl %d\n", cmd);
+
+ switch (cmd) {
+ case QBT1000_LOAD_APP:
+ {
+ struct qbt1000_app app;
+ struct qseecom_handle *app_handle;
+
+ if (copy_from_user(&app, priv_arg,
+ sizeof(app)) != 0) {
+ rc = -EFAULT;
+ pr_err("failed copy from user space-LOAD\n");
+ goto end;
+ }
+
+ if (!app.app_handle) {
+ dev_err(drvdata->dev, "%s: LOAD app_handle is null\n",
+ __func__);
+ rc = -EINVAL;
+ goto end;
+ }
+
+ pr_debug("app %s load before\n", app.name);
+
+ /* start the TZ app */
+ rc = qseecom_start_app(&app_handle, app.name, app.size);
+ if (rc == 0) {
+ g_app_buf_size = app.size;
+ rc = qseecom_set_bandwidth(app_handle,
+ app.high_band_width == 1 ? true : false);
+ if (rc != 0) {
+ /* log error, allow to continue */
+ pr_err("App %s failed to set bw\n", app.name);
+ }
+ } else {
+ pr_err("app %s failed to load\n", app.name);
+ goto end;
+ }
+
+ /* copy the app handle to user */
+ rc = copy_to_user((void __user *)app.app_handle, &app_handle,
+ sizeof(*app.app_handle));
+
+ if (rc != 0) {
+ dev_err(drvdata->dev,
+ "%s: Failed copy 2us LOAD rc:%d\n",
+ __func__, rc);
+ rc = -ENOMEM;
+ goto end;
+ }
+
+ pr_debug("app %s load after\n", app.name);
+
+ if (!strcmp(app.name, FP_APP_NAME))
+ drvdata->fp_app_handle = app_handle;
+
+ break;
+ }
+ case QBT1000_UNLOAD_APP:
+ {
+ struct qbt1000_app app;
+ struct qseecom_handle *app_handle;
+
+ if (copy_from_user(&app, priv_arg,
+ sizeof(app)) != 0) {
+ rc = -ENOMEM;
+ pr_err("failed copy from user space-UNLOAD\n");
+ goto end;
+ }
+
+ if (!app.app_handle) {
+ dev_err(drvdata->dev, "%s: UNLOAD app_handle is null\n",
+ __func__);
+ rc = -EINVAL;
+ goto end;
+ }
+
+ rc = copy_from_user(&app_handle, app.app_handle,
+ sizeof(app_handle));
+
+ if (rc != 0) {
+ dev_err(drvdata->dev,
+ "%s: Failed copy from user space-UNLOAD handle rc:%d\n",
+ __func__, rc);
+ rc = -ENOMEM;
+ goto end;
+ }
+
+ /* if the app hasn't been loaded already, return err */
+ if (!app_handle) {
+ pr_err("app not loaded\n");
+ rc = -EINVAL;
+ goto end;
+ }
+
+ if (drvdata->fp_app_handle == app_handle)
+ drvdata->fp_app_handle = 0;
+
+ /* set bw & shutdown the TZ app */
+ qseecom_set_bandwidth(app_handle,
+ app.high_band_width == 1 ? true : false);
+ rc = qseecom_shutdown_app(&app_handle);
+ if (rc != 0) {
+ pr_err("app failed to shutdown\n");
+ goto end;
+ }
+
+ /* copy the app handle (should be null) to user */
+ rc = copy_to_user((void __user *)app.app_handle, &app_handle,
+ sizeof(*app.app_handle));
+
+ if (rc != 0) {
+ dev_err(drvdata->dev,
+ "%s: Failed copy 2us UNLOAD rc:%d\n",
+ __func__, rc);
+ rc = -ENOMEM;
+ goto end;
+ }
+
+ break;
+ }
+ case QBT1000_SEND_TZCMD:
+ {
+ struct qbt1000_send_tz_cmd tzcmd;
+ void *rsp_buf;
+
+ if (copy_from_user(&tzcmd, priv_arg,
+ sizeof(tzcmd))
+ != 0) {
+ rc = -EFAULT;
+ pr_err("failed copy from user space %d\n", rc);
+ goto end;
+ }
+
+ if (tzcmd.req_buf_len > g_app_buf_size ||
+ tzcmd.rsp_buf_len > g_app_buf_size) {
+ rc = -ENOMEM;
+ pr_err("invalid cmd buf len, req=%d, rsp=%d\n",
+ tzcmd.req_buf_len, tzcmd.rsp_buf_len);
+ goto end;
+ }
+
+ /* if the app hasn't been loaded already, return err */
+ if (!tzcmd.app_handle) {
+ pr_err("app not loaded\n");
+ rc = -EINVAL;
+ goto end;
+ }
+
+ rc = send_tz_cmd(drvdata,
+ tzcmd.app_handle, 1,
+ tzcmd.req_buf, tzcmd.req_buf_len,
+ &rsp_buf, tzcmd.rsp_buf_len);
+
+ if (rc < 0) {
+ pr_err("failure sending command to tz\n");
+ goto end;
+ }
+
+ /* copy rsp buf back to user space buffer */
+ rc = copy_to_user((void __user *)tzcmd.rsp_buf,
+ rsp_buf, tzcmd.rsp_buf_len);
+ if (rc != 0) {
+ pr_err("failed copy 2us rc:%d bytes %d:\n",
+ rc, tzcmd.rsp_buf_len);
+ rc = -EFAULT;
+ goto end;
+ }
+
+ break;
+ }
+ case QBT1000_SET_FINGER_DETECT_KEY:
+ {
+ struct qbt1000_set_finger_detect_key set_fd_key;
+
+ if (copy_from_user(&set_fd_key, priv_arg,
+ sizeof(set_fd_key))
+ != 0) {
+ rc = -EFAULT;
+ pr_err("failed copy from user space %d\n", rc);
+ goto end;
+ }
+
+ drvdata->fd_gpio.key_code = set_fd_key.key_code;
+
+ break;
+ }
+ case QBT1000_CONFIGURE_POWER_KEY:
+ {
+ struct qbt1000_configure_power_key power_key;
+
+ if (copy_from_user(&power_key, priv_arg,
+ sizeof(power_key))
+ != 0) {
+ rc = -EFAULT;
+ pr_err("failed copy from user space %d\n", rc);
+ goto end;
+ }
+
+ drvdata->fd_gpio.power_key_enabled = power_key.enable;
+
+ break;
+ }
+ default:
+ pr_err("invalid cmd %d\n", cmd);
+ rc = -ENOIOCTLCMD;
+ goto end;
+ }
+
+end:
+ mutex_unlock(&drvdata->mutex);
+ return rc;
+}
+
+static int get_events_fifo_len_locked(struct qbt1000_drvdata *drvdata)
+{
+ int len;
+
+ mutex_lock(&drvdata->fw_events_mutex);
+ len = kfifo_len(&drvdata->fw_events);
+ mutex_unlock(&drvdata->fw_events_mutex);
+
+ return len;
+}
+
+static ssize_t qbt1000_read(struct file *filp, char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ struct fw_event_desc fw_event;
+ struct qbt1000_drvdata *drvdata = filp->private_data;
+
+ if (cnt < sizeof(fw_event.ev))
+ return -EINVAL;
+
+ mutex_lock(&drvdata->fw_events_mutex);
+
+ while (kfifo_len(&drvdata->fw_events) == 0) {
+ mutex_unlock(&drvdata->fw_events_mutex);
+
+ if (filp->f_flags & O_NONBLOCK)
+ return -EAGAIN;
+
+ pr_debug("fw_events fifo: empty, waiting\n");
+
+ if (wait_event_interruptible(drvdata->read_wait_queue,
+ (get_events_fifo_len_locked(drvdata) > 0)))
+ return -ERESTARTSYS;
+
+ mutex_lock(&drvdata->fw_events_mutex);
+ }
+
+ if (!kfifo_get(&drvdata->fw_events, &fw_event)) {
+ pr_debug("fw_events fifo: unexpectedly empty\n");
+
+ mutex_unlock(&drvdata->fw_events_mutex);
+ return -EINVAL;
+ }
+
+ mutex_unlock(&drvdata->fw_events_mutex);
+
+ pr_debug("fw_event: %d\n", (int)fw_event.ev);
+ return copy_to_user(ubuf, &fw_event.ev, sizeof(fw_event.ev));
+}
+
+static unsigned int qbt1000_poll(struct file *filp,
+ struct poll_table_struct *wait)
+{
+ struct qbt1000_drvdata *drvdata = filp->private_data;
+ unsigned int mask = 0;
+
+ poll_wait(filp, &drvdata->read_wait_queue, wait);
+
+ if (kfifo_len(&drvdata->fw_events) > 0)
+ mask |= (POLLIN | POLLRDNORM);
+
+ return mask;
+}
+
+static const struct file_operations qbt1000_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = qbt1000_ioctl,
+ .open = qbt1000_open,
+ .release = qbt1000_release,
+ .read = qbt1000_read,
+ .poll = qbt1000_poll
+};
+
+static int qbt1000_dev_register(struct qbt1000_drvdata *drvdata)
+{
+ dev_t dev_no;
+ int ret = 0;
+ size_t node_size;
+ char *node_name = QBT1000_DEV;
+ struct device *dev = drvdata->dev;
+ struct device *device;
+
+ node_size = strlen(node_name) + 1;
+
+ drvdata->qbt1000_node = devm_kzalloc(dev, node_size, GFP_KERNEL);
+ if (!drvdata->qbt1000_node) {
+ ret = -ENOMEM;
+ goto err_alloc;
+ }
+
+ strlcpy(drvdata->qbt1000_node, node_name, node_size);
+
+ ret = alloc_chrdev_region(&dev_no, 0, 1, drvdata->qbt1000_node);
+ if (ret) {
+ pr_err("alloc_chrdev_region failed %d\n", ret);
+ goto err_alloc;
+ }
+
+ cdev_init(&drvdata->qbt1000_cdev, &qbt1000_fops);
+
+ drvdata->qbt1000_cdev.owner = THIS_MODULE;
+ ret = cdev_add(&drvdata->qbt1000_cdev, dev_no, 1);
+ if (ret) {
+ pr_err("cdev_add failed %d\n", ret);
+ goto err_cdev_add;
+ }
+
+ drvdata->qbt1000_class = class_create(THIS_MODULE,
+ drvdata->qbt1000_node);
+ if (IS_ERR(drvdata->qbt1000_class)) {
+ ret = PTR_ERR(drvdata->qbt1000_class);
+ pr_err("class_create failed %d\n", ret);
+ goto err_class_create;
+ }
+
+ device = device_create(drvdata->qbt1000_class, NULL,
+ drvdata->qbt1000_cdev.dev, drvdata,
+ drvdata->qbt1000_node);
+ if (IS_ERR(device)) {
+ ret = PTR_ERR(device);
+ pr_err("device_create failed %d\n", ret);
+ goto err_dev_create;
+ }
+
+ return 0;
+err_dev_create:
+ class_destroy(drvdata->qbt1000_class);
+err_class_create:
+ cdev_del(&drvdata->qbt1000_cdev);
+err_cdev_add:
+ unregister_chrdev_region(drvdata->qbt1000_cdev.dev, 1);
+err_alloc:
+ return ret;
+}
+
+/**
+ * qbt1000_create_input_device() - Function allocates an input
+ * device, configures it for key events and registers it
+ *
+ * @drvdata: ptr to driver data
+ *
+ * Return: 0 on success. Error code on failure.
+ */
+static int qbt1000_create_input_device(struct qbt1000_drvdata *drvdata)
+{
+ int rc = 0;
+
+ drvdata->in_dev = input_allocate_device();
+ if (drvdata->in_dev == NULL) {
+ dev_err(drvdata->dev, "%s: input_allocate_device() failed\n",
+ __func__);
+ rc = -ENOMEM;
+ goto end;
+ }
+
+ drvdata->in_dev->name = QBT1000_IN_DEV_NAME;
+ drvdata->in_dev->phys = NULL;
+ drvdata->in_dev->id.bustype = BUS_HOST;
+ drvdata->in_dev->id.vendor = 0x0001;
+ drvdata->in_dev->id.product = 0x0001;
+ drvdata->in_dev->id.version = QBT1000_IN_DEV_VERSION;
+
+ drvdata->in_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
+ drvdata->in_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
+
+ drvdata->in_dev->keybit[BIT_WORD(KEY_HOMEPAGE)] |=
+ BIT_MASK(KEY_HOMEPAGE);
+ drvdata->in_dev->keybit[BIT_WORD(KEY_CAMERA)] |=
+ BIT_MASK(KEY_CAMERA);
+ drvdata->in_dev->keybit[BIT_WORD(KEY_POWER)] |=
+ BIT_MASK(KEY_POWER);
+
+ input_set_abs_params(drvdata->in_dev, ABS_X,
+ 0,
+ 1000,
+ 0, 0);
+ input_set_abs_params(drvdata->in_dev, ABS_Y,
+ 0,
+ 1000,
+ 0, 0);
+
+ rc = input_register_device(drvdata->in_dev);
+ if (rc) {
+ dev_err(drvdata->dev, "%s: input_reg_dev() failed %d\n",
+ __func__, rc);
+ goto end;
+ }
+
+end:
+ if (rc)
+ input_free_device(drvdata->in_dev);
+ return rc;
+}
+
+static void purge_finger_events(struct qbt1000_drvdata *drvdata)
+{
+ int i, fifo_len;
+ struct fw_event_desc fw_event;
+
+ fifo_len = kfifo_len(&drvdata->fw_events);
+
+ for (i = 0; i < fifo_len; i++) {
+ if (!kfifo_get(&drvdata->fw_events, &fw_event))
+ pr_err("fw events fifo: could not remove oldest item\n");
+ else if (fw_event.ev != FW_EVENT_FINGER_DOWN
+ && fw_event.ev != FW_EVENT_FINGER_UP)
+ kfifo_put(&drvdata->fw_events, fw_event);
+ }
+}
+
+static void qbt1000_gpio_report_event(struct qbt1000_drvdata *drvdata)
+{
+ int state;
+ struct fw_event_desc fw_event;
+
+ state = (__gpio_get_value(drvdata->fd_gpio.gpio) ? 1 : 0)
+ ^ drvdata->fd_gpio.active_low;
+
+ if (drvdata->fd_gpio.event_reported
+ && state == drvdata->fd_gpio.last_gpio_state)
+ return;
+
+ pr_debug("gpio %d: report state %d\n", drvdata->fd_gpio.gpio, state);
+
+ drvdata->fd_gpio.event_reported = 1;
+ drvdata->fd_gpio.last_gpio_state = state;
+
+ if (drvdata->fd_gpio.key_code) {
+ input_event(drvdata->in_dev, EV_KEY,
+ drvdata->fd_gpio.key_code, !!state);
+ input_sync(drvdata->in_dev);
+ }
+
+ if (state && drvdata->fd_gpio.power_key_enabled) {
+ input_event(drvdata->in_dev, EV_KEY, KEY_POWER, 1);
+ input_sync(drvdata->in_dev);
+ input_event(drvdata->in_dev, EV_KEY, KEY_POWER, 0);
+ input_sync(drvdata->in_dev);
+ }
+
+ fw_event.ev = (state ? FW_EVENT_FINGER_DOWN : FW_EVENT_FINGER_UP);
+
+ mutex_lock(&drvdata->fw_events_mutex);
+
+ if (kfifo_is_full(&drvdata->fw_events)) {
+ struct fw_event_desc dummy_fw_event;
+
+ pr_warn("fw events fifo: full, dropping oldest item\n");
+ if (!kfifo_get(&drvdata->fw_events, &dummy_fw_event))
+ pr_err("fw events fifo: could not remove oldest item\n");
+ }
+
+ purge_finger_events(drvdata);
+
+ if (!kfifo_put(&drvdata->fw_events, fw_event))
+ pr_err("fw events fifo: error adding item\n");
+
+ mutex_unlock(&drvdata->fw_events_mutex);
+ wake_up_interruptible(&drvdata->read_wait_queue);
+}
+
+static void qbt1000_gpio_work_func(struct work_struct *work)
+{
+ struct qbt1000_drvdata *drvdata =
+ container_of(work, struct qbt1000_drvdata, fd_gpio.work);
+
+ qbt1000_gpio_report_event(drvdata);
+
+ pm_relax(drvdata->dev);
+}
+
+static irqreturn_t qbt1000_gpio_isr(int irq, void *dev_id)
+{
+ struct qbt1000_drvdata *drvdata = dev_id;
+
+ if (irq != drvdata->fd_gpio.irq) {
+ pr_warn("invalid irq %d (expected %d)\n",
+ irq, drvdata->fd_gpio.irq);
+ return IRQ_HANDLED;
+ }
+
+ pm_stay_awake(drvdata->dev);
+ schedule_work(&drvdata->fd_gpio.work);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * qbt1000_ipc_irq_handler() - function processes IPC
+ * interrupts on its own thread
+ * @irq: the interrupt that occurred
+ * @dev_id: pointer to the qbt1000_drvdata
+ *
+ * Return: IRQ_HANDLED when complete
+ */
+static irqreturn_t qbt1000_ipc_irq_handler(int irq, void *dev_id)
+{
+ struct fw_ipc_cmd *rx_cmd;
+ int i;
+ uint32_t rxipc = FP_APP_CMD_RX_IPC;
+ struct qbt1000_drvdata *drvdata = (struct qbt1000_drvdata *)dev_id;
+ int rc = 0;
+
+ pm_stay_awake(drvdata->dev);
+
+ mutex_lock(&drvdata->mutex);
+
+ if (irq != drvdata->fw_ipc.irq) {
+ pr_warn("invalid irq %d (expected %d)\n",
+ irq, drvdata->fw_ipc.irq);
+ goto end;
+ }
+
+ pr_debug("firmware interrupt received (irq %d)\n", irq);
+
+ if (!drvdata->fp_app_handle)
+ goto end;
+
+ /*
+ * send the TZ command to fetch the message from firmware
+ * TZ will process the message if it can
+ */
+ rc = send_tz_cmd(drvdata, drvdata->fp_app_handle, 0,
+ &rxipc, sizeof(rxipc),
+ (void *)&rx_cmd, sizeof(*rx_cmd));
+
+ if (rc < 0) {
+ pr_err("failure sending tz cmd %d\n", rxipc);
+ goto end;
+ }
+
+ if (rx_cmd->status != 0) {
+ pr_err("tz command failed to complete\n");
+ goto end;
+ }
+
+ /*
+ * given the IPC message type, search for a corresponding event for the
+ * driver client. If found, add to the events FIFO
+ */
+ for (i = 0; i < ARRAY_SIZE(g_msg_to_event); i++) {
+ if (g_msg_to_event[i].msg_type == rx_cmd->msg_type) {
+ enum qbt1000_fw_event ev = g_msg_to_event[i].fw_event;
+ struct fw_event_desc fw_ev_desc;
+
+ mutex_lock(&drvdata->fw_events_mutex);
+ pr_debug("fw events: add %d\n", (int) ev);
+ fw_ev_desc.ev = ev;
+
+ if (!kfifo_put(&drvdata->fw_events, fw_ev_desc))
+ pr_err("fw events: fifo full, drop event %d\n",
+ (int) ev);
+
+ mutex_unlock(&drvdata->fw_events_mutex);
+ wake_up_interruptible(&drvdata->read_wait_queue);
+ break;
+ }
+ }
+
+end:
+ mutex_unlock(&drvdata->mutex);
+ pm_relax(drvdata->dev);
+ return IRQ_HANDLED;
+}
+
+static int setup_fd_gpio_irq(struct platform_device *pdev,
+ struct qbt1000_drvdata *drvdata)
+{
+ int rc = 0;
+ int irq;
+ const char *desc = "qbt_finger_detect";
+
+ rc = devm_gpio_request_one(&pdev->dev, drvdata->fd_gpio.gpio,
+ GPIOF_IN, desc);
+
+ if (rc < 0) {
+ pr_err("failed to request gpio %d, error %d\n",
+ drvdata->fd_gpio.gpio, rc);
+ goto end;
+ }
+
+ irq = gpio_to_irq(drvdata->fd_gpio.gpio);
+ if (irq < 0) {
+ rc = irq;
+ pr_err("unable to get irq number for gpio %d, error %d\n",
+ drvdata->fd_gpio.gpio, rc);
+ goto end;
+ }
+
+ drvdata->fd_gpio.irq = irq;
+ INIT_WORK(&drvdata->fd_gpio.work, qbt1000_gpio_work_func);
+
+ rc = devm_request_any_context_irq(&pdev->dev, drvdata->fd_gpio.irq,
+ qbt1000_gpio_isr, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+ desc, drvdata);
+
+ if (rc < 0) {
+ pr_err("unable to claim irq %d; error %d\n",
+ drvdata->fd_gpio.irq, rc);
+ goto end;
+ }
+
+end:
+ return rc;
+}
+
+static int setup_ipc_irq(struct platform_device *pdev,
+ struct qbt1000_drvdata *drvdata)
+{
+ int rc = 0;
+ const char *desc = "qbt_ipc";
+
+ drvdata->fw_ipc.irq = gpio_to_irq(drvdata->fw_ipc.gpio);
+ if (drvdata->fw_ipc.irq < 0) {
+ rc = drvdata->fw_ipc.irq;
+ pr_err("no irq for gpio %d, error=%d\n",
+ drvdata->fw_ipc.gpio, rc);
+ goto end;
+ }
+
+ rc = devm_gpio_request_one(&pdev->dev, drvdata->fw_ipc.gpio,
+ GPIOF_IN, desc);
+
+ if (rc < 0) {
+ pr_err("failed to request gpio %d, error %d\n",
+ drvdata->fw_ipc.gpio, rc);
+ goto end;
+ }
+
+ rc = devm_request_threaded_irq(&pdev->dev,
+ drvdata->fw_ipc.irq,
+ NULL,
+ qbt1000_ipc_irq_handler,
+ IRQF_ONESHOT | IRQF_TRIGGER_RISING,
+ desc,
+ drvdata);
+
+ if (rc < 0) {
+ pr_err("failed to register for ipc irq %d, rc = %d\n",
+ drvdata->fw_ipc.irq, rc);
+ goto end;
+ }
+
+end:
+ return rc;
+}
+
+/**
+ * qbt1000_read_device_tree() - Function reads device tree
+ * properties into driver data
+ * @pdev: ptr to platform device object
+ * @drvdata: ptr to driver data
+ *
+ * Return: 0 on success. Error code on failure.
+ */
+static int qbt1000_read_device_tree(struct platform_device *pdev,
+ struct qbt1000_drvdata *drvdata)
+{
+ int rc = 0;
+ uint8_t clkcnt = 0;
+ int index = 0;
+ uint32_t rate;
+ const char *clock_name;
+ int gpio;
+ enum of_gpio_flags flags;
+
+ /* obtain number of clocks from hw config */
+ clkcnt = of_property_count_strings(pdev->dev.of_node, "clock-names");
+ if (IS_ERR_VALUE(drvdata->clock_count)) {
+ pr_err("failed to get clock names\n");
+ rc = -EINVAL;
+ goto end;
+ }
+
+ /* sanity check for max clock count */
+ if (clkcnt > 16) {
+ pr_err("invalid clock count %d\n", clkcnt);
+ rc = -EINVAL;
+ goto end;
+ }
+
+ /* alloc mem for clock array - auto free if probe fails */
+ drvdata->clock_count = clkcnt;
+ pr_debug("clock count %d\n", clkcnt);
+ drvdata->clocks = devm_kzalloc(&pdev->dev,
+ sizeof(struct clk *) * drvdata->clock_count, GFP_KERNEL);
+ if (!drvdata->clocks) {
+ rc = -ENOMEM;
+ goto end;
+ }
+
+ /* load clock names */
+ for (index = 0; index < drvdata->clock_count; index++) {
+ of_property_read_string_index(pdev->dev.of_node,
+ "clock-names",
+ index, &clock_name);
+ pr_debug("getting clock %s\n", clock_name);
+ drvdata->clocks[index] = devm_clk_get(&pdev->dev, clock_name);
+ if (IS_ERR(drvdata->clocks[index])) {
+ rc = PTR_ERR(drvdata->clocks[index]);
+ if (rc != -EPROBE_DEFER)
+ pr_err("failed to get %s\n", clock_name);
+ goto end;
+ }
+
+ if (!strcmp(clock_name, "iface_clk")) {
+ pr_debug("root index %d\n", index);
+ drvdata->root_clk_idx = index;
+ }
+ }
+
+ /* read clock frequency */
+ if (of_property_read_u32(pdev->dev.of_node,
+ "clock-frequency", &rate) == 0) {
+ pr_debug("clk frequency %d\n", rate);
+ drvdata->frequency = rate;
+ }
+
+ /* read IPC gpio */
+ drvdata->fw_ipc.gpio = of_get_named_gpio(pdev->dev.of_node,
+ "qcom,ipc-gpio", 0);
+ if (drvdata->fw_ipc.gpio < 0) {
+ rc = drvdata->fw_ipc.gpio;
+ pr_err("ipc gpio not found, error=%d\n", rc);
+ goto end;
+ }
+
+ /* read finger detect GPIO configuration */
+ gpio = of_get_named_gpio_flags(pdev->dev.of_node,
+ "qcom,finger-detect-gpio", 0, &flags);
+ if (gpio < 0) {
+ pr_err("failed to get gpio flags\n");
+ rc = gpio;
+ goto end;
+ }
+
+ drvdata->fd_gpio.gpio = gpio;
+ drvdata->fd_gpio.active_low = flags & OF_GPIO_ACTIVE_LOW;
+
+end:
+ return rc;
+}
+
+/**
+ * qbt1000_probe() - Function loads hardware config from device tree
+ * @pdev: ptr to platform device object
+ *
+ * Return: 0 on success. Error code on failure.
+ */
+static int qbt1000_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct qbt1000_drvdata *drvdata;
+ int rc = 0;
+
+ pr_debug("qbt1000_probe begin\n");
+ drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
+
+ drvdata->dev = &pdev->dev;
+ platform_set_drvdata(pdev, drvdata);
+
+ rc = qbt1000_read_device_tree(pdev, drvdata);
+ if (rc < 0)
+ goto end;
+
+ atomic_set(&drvdata->available, 1);
+
+ mutex_init(&drvdata->mutex);
+ mutex_init(&drvdata->fw_events_mutex);
+
+ rc = qbt1000_dev_register(drvdata);
+ if (rc < 0)
+ goto end;
+
+ INIT_KFIFO(drvdata->fw_events);
+ init_waitqueue_head(&drvdata->read_wait_queue);
+
+ rc = qbt1000_create_input_device(drvdata);
+ if (rc < 0)
+ goto end;
+
+ rc = setup_fd_gpio_irq(pdev, drvdata);
+ if (rc < 0)
+ goto end;
+
+ rc = setup_ipc_irq(pdev, drvdata);
+ if (rc < 0)
+ goto end;
+
+ rc = device_init_wakeup(&pdev->dev, 1);
+ if (rc < 0)
+ goto end;
+
+end:
+ pr_debug("qbt1000_probe end : %d\n", rc);
+ return rc;
+}
+
+static int qbt1000_remove(struct platform_device *pdev)
+{
+ struct qbt1000_drvdata *drvdata = platform_get_drvdata(pdev);
+
+ input_unregister_device(drvdata->in_dev);
+
+ clocks_off(drvdata);
+ mutex_destroy(&drvdata->mutex);
+ mutex_destroy(&drvdata->fw_events_mutex);
+
+ device_destroy(drvdata->qbt1000_class, drvdata->qbt1000_cdev.dev);
+ class_destroy(drvdata->qbt1000_class);
+ cdev_del(&drvdata->qbt1000_cdev);
+ unregister_chrdev_region(drvdata->qbt1000_cdev.dev, 1);
+
+ device_init_wakeup(&pdev->dev, 0);
+
+ return 0;
+}
+
+static int qbt1000_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ int rc = 0;
+ struct qbt1000_drvdata *drvdata = platform_get_drvdata(pdev);
+
+ /*
+ * Returning an error code if driver currently making a TZ call.
+ * Note: The purpose of this driver is to ensure that the clocks are on
+ * while making a TZ call. Hence the clock check to determine if the
+ * driver will allow suspend to occur.
+ */
+ if (!mutex_trylock(&drvdata->mutex))
+ return -EBUSY;
+
+ if (drvdata->clock_state)
+ rc = -EBUSY;
+ else {
+ enable_irq_wake(drvdata->fd_gpio.irq);
+ enable_irq_wake(drvdata->fw_ipc.irq);
+ }
+
+ mutex_unlock(&drvdata->mutex);
+
+ return rc;
+}
+
+static int qbt1000_resume(struct platform_device *pdev)
+{
+ struct qbt1000_drvdata *drvdata = platform_get_drvdata(pdev);
+
+ disable_irq_wake(drvdata->fd_gpio.irq);
+ disable_irq_wake(drvdata->fw_ipc.irq);
+
+ return 0;
+}
+
+static const struct of_device_id qbt1000_match[] = {
+ { .compatible = "qcom,qbt1000" },
+ {}
+};
+
+static struct platform_driver qbt1000_plat_driver = {
+ .probe = qbt1000_probe,
+ .remove = qbt1000_remove,
+ .suspend = qbt1000_suspend,
+ .resume = qbt1000_resume,
+ .driver = {
+ .name = "qbt1000",
+ .owner = THIS_MODULE,
+ .of_match_table = qbt1000_match,
+ },
+};
+
+module_platform_driver(qbt1000_plat_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Qualcomm Technologies, Inc. QBT1000 driver");
diff --git a/drivers/soc/qcom/qdsp6v2/apr_tal_glink.c b/drivers/soc/qcom/qdsp6v2/apr_tal_glink.c
index dfd6b448a65f..e8969a5e533b 100644
--- a/drivers/soc/qcom/qdsp6v2/apr_tal_glink.c
+++ b/drivers/soc/qcom/qdsp6v2/apr_tal_glink.c
@@ -324,6 +324,7 @@ struct apr_svc_ch_dev *apr_tal_open(uint32_t clnt, uint32_t dest, uint32_t dl,
pr_err("%s: glink_open failed %s\n", __func__,
svc_names[dest][clnt]);
apr_ch->handle = NULL;
+ rc = -EINVAL;
goto unlock;
}
diff --git a/drivers/soc/qcom/watchdog_v2.c b/drivers/soc/qcom/watchdog_v2.c
index 8f58eaa537b1..470ecfdd9f5e 100644
--- a/drivers/soc/qcom/watchdog_v2.c
+++ b/drivers/soc/qcom/watchdog_v2.c
@@ -701,7 +701,7 @@ static void init_watchdog_data(struct msm_watchdog_data *wdog_dd)
wdog_dd->user_pet_complete = true;
wdog_dd->user_pet_enabled = false;
wake_up_process(wdog_dd->watchdog_task);
- init_timer(&wdog_dd->pet_timer);
+ init_timer_deferrable(&wdog_dd->pet_timer);
wdog_dd->pet_timer.data = (unsigned long)wdog_dd;
wdog_dd->pet_timer.function = pet_task_wakeup;
wdog_dd->pet_timer.expires = jiffies + delay_time;
diff --git a/drivers/soc/qcom/wcd-dsp-glink.c b/drivers/soc/qcom/wcd-dsp-glink.c
index 92cdadef715d..6bc815862541 100644
--- a/drivers/soc/qcom/wcd-dsp-glink.c
+++ b/drivers/soc/qcom/wcd-dsp-glink.c
@@ -161,8 +161,8 @@ static void wdsp_glink_notify_rx(void *handle, const void *priv,
wpriv->rsp_cnt = ++rsp_cnt;
mutex_unlock(&wpriv->rsp_mutex);
- complete(&wpriv->rsp_complete);
glink_rx_done(handle, ptr, true);
+ complete(&wpriv->rsp_complete);
}
/*
@@ -216,9 +216,9 @@ static bool wdsp_glink_notify_rx_intent_req(void *handle, const void *priv,
mutex_lock(&ch->mutex);
rc = glink_queue_rx_intent(ch->handle, ch, req_size);
- if (IS_ERR_VALUE(ret)) {
- dev_err(wpriv->dev, "%s: Failed to queue rx intent\n",
- __func__);
+ if (IS_ERR_VALUE(rc)) {
+ dev_err(wpriv->dev, "%s: Failed to queue rx intent, rc = %d\n",
+ __func__, rc);
mutex_unlock(&ch->mutex);
goto done;
}
@@ -659,10 +659,13 @@ static ssize_t wdsp_glink_read(struct file *file, char __user *buf,
count = WDSP_MAX_READ_SIZE;
}
/*
- * This is unblocked only from glink rx notification callback
- * or from flush API.
+ * Complete signal has given from glink rx notification callback
+ * or from flush API. Also use interruptible wait_for_completion API
+ * to allow the system to go in suspend.
*/
- wait_for_completion(&wpriv->rsp_complete);
+ ret = wait_for_completion_interruptible(&wpriv->rsp_complete);
+ if (ret)
+ goto done;
mutex_lock(&wpriv->rsp_mutex);
if (wpriv->rsp_cnt) {
diff --git a/drivers/soc/rockchip/pm_domains.c b/drivers/soc/rockchip/pm_domains.c
index 534c58937a56..4a65c5bda146 100644
--- a/drivers/soc/rockchip/pm_domains.c
+++ b/drivers/soc/rockchip/pm_domains.c
@@ -419,6 +419,7 @@ static int rockchip_pm_domain_probe(struct platform_device *pdev)
if (error) {
dev_err(dev, "failed to handle node %s: %d\n",
node->name, error);
+ of_node_put(node);
goto err_out;
}
}
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index b25dc71b0ea9..73c8ea0b1360 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -111,7 +111,7 @@ static const struct lpss_config lpss_platforms[] = {
.reg_general = -1,
.reg_ssp = 0x20,
.reg_cs_ctrl = 0x24,
- .reg_capabilities = 0xfc,
+ .reg_capabilities = -1,
.rx_threshold = 1,
.tx_threshold_lo = 32,
.tx_threshold_hi = 56,
diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
index 79a8bc4f6cec..035767c02072 100644
--- a/drivers/spi/spi-rockchip.c
+++ b/drivers/spi/spi-rockchip.c
@@ -265,7 +265,10 @@ static inline u32 rx_max(struct rockchip_spi *rs)
static void rockchip_spi_set_cs(struct spi_device *spi, bool enable)
{
u32 ser;
- struct rockchip_spi *rs = spi_master_get_devdata(spi->master);
+ struct spi_master *master = spi->master;
+ struct rockchip_spi *rs = spi_master_get_devdata(master);
+
+ pm_runtime_get_sync(rs->dev);
ser = readl_relaxed(rs->regs + ROCKCHIP_SPI_SER) & SER_MASK;
@@ -290,6 +293,8 @@ static void rockchip_spi_set_cs(struct spi_device *spi, bool enable)
ser &= ~(1 << spi->chip_select);
writel_relaxed(ser, rs->regs + ROCKCHIP_SPI_SER);
+
+ pm_runtime_put_sync(rs->dev);
}
static int rockchip_spi_prepare_message(struct spi_master *master,
diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c
index 64318fcfacf2..5044c6198332 100644
--- a/drivers/spi/spi-ti-qspi.c
+++ b/drivers/spi/spi-ti-qspi.c
@@ -94,6 +94,7 @@ struct ti_qspi {
#define QSPI_FLEN(n) ((n - 1) << 0)
#define QSPI_WLEN_MAX_BITS 128
#define QSPI_WLEN_MAX_BYTES 16
+#define QSPI_WLEN_MASK QSPI_WLEN(QSPI_WLEN_MAX_BITS)
/* STATUS REGISTER */
#define BUSY 0x01
@@ -224,16 +225,16 @@ static inline int ti_qspi_poll_wc(struct ti_qspi *qspi)
return -ETIMEDOUT;
}
-static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t)
+static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t,
+ int count)
{
- int wlen, count, xfer_len;
+ int wlen, xfer_len;
unsigned int cmd;
const u8 *txbuf;
u32 data;
txbuf = t->tx_buf;
cmd = qspi->cmd | QSPI_WR_SNGL;
- count = t->len;
wlen = t->bits_per_word >> 3; /* in bytes */
xfer_len = wlen;
@@ -293,9 +294,10 @@ static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t)
return 0;
}
-static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t)
+static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t,
+ int count)
{
- int wlen, count;
+ int wlen;
unsigned int cmd;
u8 *rxbuf;
@@ -312,7 +314,6 @@ static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t)
cmd |= QSPI_RD_SNGL;
break;
}
- count = t->len;
wlen = t->bits_per_word >> 3; /* in bytes */
while (count) {
@@ -343,12 +344,13 @@ static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t)
return 0;
}
-static int qspi_transfer_msg(struct ti_qspi *qspi, struct spi_transfer *t)
+static int qspi_transfer_msg(struct ti_qspi *qspi, struct spi_transfer *t,
+ int count)
{
int ret;
if (t->tx_buf) {
- ret = qspi_write_msg(qspi, t);
+ ret = qspi_write_msg(qspi, t, count);
if (ret) {
dev_dbg(qspi->dev, "Error while writing\n");
return ret;
@@ -356,7 +358,7 @@ static int qspi_transfer_msg(struct ti_qspi *qspi, struct spi_transfer *t)
}
if (t->rx_buf) {
- ret = qspi_read_msg(qspi, t);
+ ret = qspi_read_msg(qspi, t, count);
if (ret) {
dev_dbg(qspi->dev, "Error while reading\n");
return ret;
@@ -373,7 +375,8 @@ static int ti_qspi_start_transfer_one(struct spi_master *master,
struct spi_device *spi = m->spi;
struct spi_transfer *t;
int status = 0, ret;
- int frame_length;
+ unsigned int frame_len_words, transfer_len_words;
+ int wlen;
/* setup device control reg */
qspi->dc = 0;
@@ -385,30 +388,38 @@ static int ti_qspi_start_transfer_one(struct spi_master *master,
if (spi->mode & SPI_CS_HIGH)
qspi->dc |= QSPI_CSPOL(spi->chip_select);
- frame_length = (m->frame_length << 3) / spi->bits_per_word;
-
- frame_length = clamp(frame_length, 0, QSPI_FRAME);
+ frame_len_words = 0;
+ list_for_each_entry(t, &m->transfers, transfer_list)
+ frame_len_words += t->len / (t->bits_per_word >> 3);
+ frame_len_words = min_t(unsigned int, frame_len_words, QSPI_FRAME);
/* setup command reg */
qspi->cmd = 0;
qspi->cmd |= QSPI_EN_CS(spi->chip_select);
- qspi->cmd |= QSPI_FLEN(frame_length);
+ qspi->cmd |= QSPI_FLEN(frame_len_words);
ti_qspi_write(qspi, qspi->dc, QSPI_SPI_DC_REG);
mutex_lock(&qspi->list_lock);
list_for_each_entry(t, &m->transfers, transfer_list) {
- qspi->cmd |= QSPI_WLEN(t->bits_per_word);
+ qspi->cmd = ((qspi->cmd & ~QSPI_WLEN_MASK) |
+ QSPI_WLEN(t->bits_per_word));
+
+ wlen = t->bits_per_word >> 3;
+ transfer_len_words = min(t->len / wlen, frame_len_words);
- ret = qspi_transfer_msg(qspi, t);
+ ret = qspi_transfer_msg(qspi, t, transfer_len_words * wlen);
if (ret) {
dev_dbg(qspi->dev, "transfer message failed\n");
mutex_unlock(&qspi->list_lock);
return -EINVAL;
}
- m->actual_length += t->len;
+ m->actual_length += transfer_len_words * wlen;
+ frame_len_words -= transfer_len_words;
+ if (frame_len_words == 0)
+ break;
}
mutex_unlock(&qspi->list_lock);
diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c
index b02e48185355..c0b936d802ef 100644
--- a/drivers/spmi/spmi-pmic-arb.c
+++ b/drivers/spmi/spmi-pmic-arb.c
@@ -1005,6 +1005,12 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev)
pa->spmic = ctrl;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "core");
+ if (!res) {
+ dev_err(&pdev->dev, "core resource not specified\n");
+ err = -EINVAL;
+ goto err_put_ctrl;
+ }
+
pa->core_size = resource_size(res);
if (pa->core_size <= 0x800) {
dev_err(&pdev->dev, "core_size is smaller than 0x800. Failing Probe\n");
diff --git a/drivers/staging/android/ion/ion_heap.c b/drivers/staging/android/ion/ion_heap.c
index b8bf80f02f4c..43d3f92cd418 100644
--- a/drivers/staging/android/ion/ion_heap.c
+++ b/drivers/staging/android/ion/ion_heap.c
@@ -2,7 +2,7 @@
* drivers/staging/android/ion/ion_heap.c
*
* Copyright (C) 2011 Google, Inc.
- * Copyright (c) 2011-2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -325,8 +325,9 @@ struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data)
switch (heap_data->type) {
case ION_HEAP_TYPE_SYSTEM_CONTIG:
- heap = ion_system_contig_heap_create(heap_data);
- break;
+ pr_err("%s: Heap type is disabled: %d\n", __func__,
+ heap_data->type);
+ return ERR_PTR(-EINVAL);
case ION_HEAP_TYPE_SYSTEM:
heap = ion_system_heap_create(heap_data);
break;
@@ -366,7 +367,8 @@ void ion_heap_destroy(struct ion_heap *heap)
switch (heap->type) {
case ION_HEAP_TYPE_SYSTEM_CONTIG:
- ion_system_contig_heap_destroy(heap);
+ pr_err("%s: Heap type is disabled: %d\n", __func__,
+ heap->type);
break;
case ION_HEAP_TYPE_SYSTEM:
ion_system_heap_destroy(heap);
diff --git a/drivers/staging/rdma/hfi1/TODO b/drivers/staging/rdma/hfi1/TODO
index 05de0dad8762..4c6f1d7d2eaf 100644
--- a/drivers/staging/rdma/hfi1/TODO
+++ b/drivers/staging/rdma/hfi1/TODO
@@ -3,4 +3,4 @@ July, 2015
- Remove unneeded file entries in sysfs
- Remove software processing of IB protocol and place in library for use
by qib, ipath (if still present), hfi1, and eventually soft-roce
-
+- Replace incorrect uAPI
diff --git a/drivers/staging/rdma/hfi1/file_ops.c b/drivers/staging/rdma/hfi1/file_ops.c
index aae9826ec62b..c851e51b1dc3 100644
--- a/drivers/staging/rdma/hfi1/file_ops.c
+++ b/drivers/staging/rdma/hfi1/file_ops.c
@@ -62,6 +62,8 @@
#include <linux/cred.h>
#include <linux/uio.h>
+#include <rdma/ib.h>
+
#include "hfi.h"
#include "pio.h"
#include "device.h"
@@ -214,6 +216,10 @@ static ssize_t hfi1_file_write(struct file *fp, const char __user *data,
int uctxt_required = 1;
int must_be_root = 0;
+ /* FIXME: This interface cannot continue out of staging */
+ if (WARN_ON_ONCE(!ib_safe_file_access(fp)))
+ return -EACCES;
+
if (count < sizeof(cmd)) {
ret = -EINVAL;
goto bail;
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index 6ceac4f2d4b2..560c5c72daeb 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -103,6 +103,7 @@ struct cpufreq_cooling_device {
int dyn_power_table_entries;
struct device *cpu_dev;
get_static_t plat_get_static_power;
+ struct cpu_cooling_ops *plat_ops;
};
static DEFINE_IDR(cpufreq_idr);
static DEFINE_MUTEX(cooling_cpufreq_lock);
@@ -504,8 +505,13 @@ static int cpufreq_get_cur_state(struct thermal_cooling_device *cdev,
unsigned long *state)
{
struct cpufreq_cooling_device *cpufreq_device = cdev->devdata;
+ unsigned int cpu = cpumask_any(&cpufreq_device->allowed_cpus);
- *state = cpufreq_device->cpufreq_state;
+ if (cpufreq_device->plat_ops
+ && cpufreq_device->plat_ops->get_cur_state)
+ cpufreq_device->plat_ops->get_cur_state(cpu, state);
+ else
+ *state = cpufreq_device->cpufreq_state;
return 0;
}
@@ -539,7 +545,17 @@ static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev,
cpufreq_device->cpufreq_state = state;
cpufreq_device->clipped_freq = clip_freq;
- cpufreq_update_policy(cpu);
+ /* Check if the device has a platform mitigation function that
+ * can handle the CPU freq mitigation, if not, notify cpufreq
+ * framework.
+ */
+ if (cpufreq_device->plat_ops) {
+ if (cpufreq_device->plat_ops->ceil_limit)
+ cpufreq_device->plat_ops->ceil_limit(cpu,
+ clip_freq);
+ } else {
+ cpufreq_update_policy(cpu);
+ }
return 0;
}
@@ -773,6 +789,9 @@ static unsigned int find_next_max(struct cpufreq_frequency_table *table,
* @capacitance: dynamic power coefficient for these cpus
* @plat_static_func: function to calculate the static power consumed by these
* cpus (optional)
+ * @plat_mitig_func: function that does the mitigation by changing the
+ * frequencies (Optional). By default, cpufreq framweork will
+ * be notified of the new limits.
*
* This interface function registers the cpufreq cooling device with the name
* "thermal-cpufreq-%x". This api can support multiple instances of cpufreq
@@ -785,7 +804,8 @@ static unsigned int find_next_max(struct cpufreq_frequency_table *table,
static struct thermal_cooling_device *
__cpufreq_cooling_register(struct device_node *np,
const struct cpumask *clip_cpus, u32 capacitance,
- get_static_t plat_static_func)
+ get_static_t plat_static_func,
+ struct cpu_cooling_ops *plat_ops)
{
struct thermal_cooling_device *cool_dev;
struct cpufreq_cooling_device *cpufreq_dev;
@@ -851,6 +871,8 @@ __cpufreq_cooling_register(struct device_node *np,
}
}
+ cpufreq_dev->plat_ops = plat_ops;
+
ret = get_idr(&cpufreq_idr, &cpufreq_dev->id);
if (ret) {
cool_dev = ERR_PTR(ret);
@@ -924,7 +946,7 @@ free_cdev:
struct thermal_cooling_device *
cpufreq_cooling_register(const struct cpumask *clip_cpus)
{
- return __cpufreq_cooling_register(NULL, clip_cpus, 0, NULL);
+ return __cpufreq_cooling_register(NULL, clip_cpus, 0, NULL, NULL);
}
EXPORT_SYMBOL_GPL(cpufreq_cooling_register);
@@ -948,7 +970,7 @@ of_cpufreq_cooling_register(struct device_node *np,
if (!np)
return ERR_PTR(-EINVAL);
- return __cpufreq_cooling_register(np, clip_cpus, 0, NULL);
+ return __cpufreq_cooling_register(np, clip_cpus, 0, NULL, NULL);
}
EXPORT_SYMBOL_GPL(of_cpufreq_cooling_register);
@@ -978,11 +1000,31 @@ cpufreq_power_cooling_register(const struct cpumask *clip_cpus, u32 capacitance,
get_static_t plat_static_func)
{
return __cpufreq_cooling_register(NULL, clip_cpus, capacitance,
- plat_static_func);
+ plat_static_func, NULL);
}
EXPORT_SYMBOL(cpufreq_power_cooling_register);
/**
+ * cpufreq_platform_cooling_register() - create cpufreq cooling device with
+ * additional platform specific mitigation function.
+ *
+ * @clip_cpus: cpumask of cpus where the frequency constraints will happen
+ * @plat_ops: the platform mitigation functions that will be called insted of
+ * cpufreq, if provided.
+ *
+ * Return: a valid struct thermal_cooling_device pointer on success,
+ * on failure, it returns a corresponding ERR_PTR().
+ */
+struct thermal_cooling_device *
+cpufreq_platform_cooling_register(const struct cpumask *clip_cpus,
+ struct cpu_cooling_ops *plat_ops)
+{
+ return __cpufreq_cooling_register(NULL, clip_cpus, 0, NULL,
+ plat_ops);
+}
+EXPORT_SYMBOL(cpufreq_platform_cooling_register);
+
+/**
* of_cpufreq_power_cooling_register() - create cpufreq cooling device with power extensions
* @np: a valid struct device_node to the cooling device device tree node
* @clip_cpus: cpumask of cpus where the frequency constraints will happen
@@ -1015,7 +1057,7 @@ of_cpufreq_power_cooling_register(struct device_node *np,
return ERR_PTR(-EINVAL);
return __cpufreq_cooling_register(np, clip_cpus, capacitance,
- plat_static_func);
+ plat_static_func, NULL);
}
EXPORT_SYMBOL(of_cpufreq_power_cooling_register);
diff --git a/drivers/thermal/msm_lmh_dcvs.c b/drivers/thermal/msm_lmh_dcvs.c
index fbe76eaa3867..3758e39a1c02 100644
--- a/drivers/thermal/msm_lmh_dcvs.c
+++ b/drivers/thermal/msm_lmh_dcvs.c
@@ -24,6 +24,7 @@
#include <linux/interrupt.h>
#include <linux/timer.h>
#include <linux/pm_opp.h>
+#include <linux/cpu_cooling.h>
#include <asm/smp_plat.h>
#include <asm/cacheflush.h>
@@ -40,6 +41,7 @@
#define MSM_LIMITS_NODE_DCVS 0x44435653
#define MSM_LIMITS_SUB_FN_THERMAL 0x54484D4C
+#define MSM_LIMITS_SUB_FN_GENERAL 0x47454E00
#define MSM_LIMITS_ALGO_MODE_ENABLE 0x454E424C
@@ -49,6 +51,8 @@
#define MSM_LIMITS_CLUSTER_0 0x6370302D
#define MSM_LIMITS_CLUSTER_1 0x6370312D
+#define MSM_LIMITS_DOMAIN_MAX 0x444D4158
+
#define MSM_LIMITS_HIGH_THRESHOLD_VAL 95000
#define MSM_LIMITS_ARM_THRESHOLD_VAL 65000
#define MSM_LIMITS_POLLING_DELAY_MS 10
@@ -77,8 +81,12 @@ struct msm_lmh_dcvs_hw {
cpumask_t core_map;
struct timer_list poll_timer;
uint32_t max_freq;
+ uint32_t hw_freq_limit;
+ struct list_head list;
};
+LIST_HEAD(lmh_dcvs_hw_list);
+
static void msm_lmh_dcvs_get_max_freq(uint32_t cpu, uint32_t *max_freq)
{
unsigned long freq_ceil = UINT_MAX;
@@ -99,12 +107,29 @@ static void msm_lmh_dcvs_get_max_freq(uint32_t cpu, uint32_t *max_freq)
static uint32_t msm_lmh_mitigation_notify(struct msm_lmh_dcvs_hw *hw)
{
uint32_t max_limit = 0, val = 0;
+ struct device *cpu_dev = NULL;
+ unsigned long freq_val;
val = readl_relaxed(hw->osm_hw_reg);
dcvsh_get_frequency(val, max_limit);
+ cpu_dev = get_cpu_device(cpumask_first(&hw->core_map));
+ if (!cpu_dev) {
+ pr_err("Error in get CPU%d device\n",
+ cpumask_first(&hw->core_map));
+ goto notify_exit;
+ }
+
+ freq_val = max_limit;
+ rcu_read_lock();
+ dev_pm_opp_find_freq_floor(cpu_dev, &freq_val);
+ rcu_read_unlock();
+ max_limit = freq_val;
+
sched_update_cpu_freq_min_max(&hw->core_map, 0, max_limit);
trace_lmh_dcvs_freq(cpumask_first(&hw->core_map), max_limit);
+notify_exit:
+ hw->hw_freq_limit = max_limit;
return max_limit;
}
@@ -250,6 +275,45 @@ static int trip_notify(enum thermal_trip_type type, int temp, void *data)
return 0;
}
+static struct msm_lmh_dcvs_hw *get_dcvsh_hw_from_cpu(int cpu)
+{
+ struct msm_lmh_dcvs_hw *hw;
+
+ list_for_each_entry(hw, &lmh_dcvs_hw_list, list) {
+ if (cpumask_test_cpu(cpu, &hw->core_map))
+ return hw;
+ }
+
+ return NULL;
+}
+
+static int lmh_set_max_limit(int cpu, u32 freq)
+{
+ struct msm_lmh_dcvs_hw *hw = get_dcvsh_hw_from_cpu(cpu);
+
+ if (!hw)
+ return -EINVAL;
+
+ return msm_lmh_dcvs_write(hw->affinity, MSM_LIMITS_SUB_FN_GENERAL,
+ MSM_LIMITS_DOMAIN_MAX, freq);
+}
+
+static int lmh_get_cur_limit(int cpu, unsigned long *freq)
+{
+ struct msm_lmh_dcvs_hw *hw = get_dcvsh_hw_from_cpu(cpu);
+
+ if (!hw)
+ return -EINVAL;
+ *freq = hw->hw_freq_limit;
+
+ return 0;
+}
+
+static struct cpu_cooling_ops cd_ops = {
+ .get_cur_state = lmh_get_cur_limit,
+ .ceil_limit = lmh_set_max_limit,
+};
+
static int msm_lmh_dcvs_probe(struct platform_device *pdev)
{
int ret;
@@ -257,6 +321,7 @@ static int msm_lmh_dcvs_probe(struct platform_device *pdev)
struct msm_lmh_dcvs_hw *hw;
char sensor_name[] = "limits_sensor-00";
struct thermal_zone_device *tzdev;
+ struct thermal_cooling_device *cdev;
struct device_node *dn = pdev->dev.of_node;
struct device_node *cpu_node, *lmh_node;
uint32_t id, max_freq, request_reg, clear_reg;
@@ -331,6 +396,10 @@ static int msm_lmh_dcvs_probe(struct platform_device *pdev)
if (IS_ERR_OR_NULL(tzdev))
return PTR_ERR(tzdev);
+ /* Setup cooling devices to request mitigation states */
+ cdev = cpufreq_platform_cooling_register(&hw->core_map, &cd_ops);
+ if (IS_ERR_OR_NULL(cdev))
+ return PTR_ERR(cdev);
/*
* Driver defaults to for low and hi thresholds.
* Since we make a check for hi > lo value, set the hi threshold
@@ -356,7 +425,7 @@ static int msm_lmh_dcvs_probe(struct platform_device *pdev)
return ret;
}
- hw->max_freq = max_freq;
+ hw->hw_freq_limit = hw->max_freq = max_freq;
switch (affinity) {
case 0:
@@ -399,6 +468,9 @@ static int msm_lmh_dcvs_probe(struct platform_device *pdev)
return ret;
}
+ INIT_LIST_HEAD(&hw->list);
+ list_add(&hw->list, &lmh_dcvs_hw_list);
+
return ret;
}
diff --git a/drivers/thermal/rockchip_thermal.c b/drivers/thermal/rockchip_thermal.c
index e845841ab036..7106288efae3 100644
--- a/drivers/thermal/rockchip_thermal.c
+++ b/drivers/thermal/rockchip_thermal.c
@@ -545,15 +545,14 @@ static int rockchip_configure_from_dt(struct device *dev,
thermal->chip->tshut_temp);
thermal->tshut_temp = thermal->chip->tshut_temp;
} else {
+ if (shut_temp > INT_MAX) {
+ dev_err(dev, "Invalid tshut temperature specified: %d\n",
+ shut_temp);
+ return -ERANGE;
+ }
thermal->tshut_temp = shut_temp;
}
- if (thermal->tshut_temp > INT_MAX) {
- dev_err(dev, "Invalid tshut temperature specified: %d\n",
- thermal->tshut_temp);
- return -ERANGE;
- }
-
if (of_property_read_u32(np, "rockchip,hw-tshut-mode", &tshut_mode)) {
dev_warn(dev,
"Missing tshut mode property, using default (%s)\n",
diff --git a/drivers/tty/serial/msm_serial_hs.c b/drivers/tty/serial/msm_serial_hs.c
index ec9288791667..f0d5c96ac2e0 100644
--- a/drivers/tty/serial/msm_serial_hs.c
+++ b/drivers/tty/serial/msm_serial_hs.c
@@ -1596,6 +1596,16 @@ static void flip_insert_work(struct work_struct *work)
struct tty_struct *tty = msm_uport->uport.state->port.tty;
spin_lock_irqsave(&msm_uport->uport.lock, flags);
+ if (!tty || msm_uport->rx.flush == FLUSH_SHUTDOWN) {
+ dev_err(msm_uport->uport.dev,
+ "%s:Invalid driver state flush %d\n",
+ __func__, msm_uport->rx.flush);
+ MSM_HS_ERR("%s:Invalid driver state flush %d\n",
+ __func__, msm_uport->rx.flush);
+ spin_unlock_irqrestore(&msm_uport->uport.lock, flags);
+ return;
+ }
+
if (msm_uport->rx.buffer_pending == NONE_PENDING) {
MSM_HS_ERR("Error: No buffer pending in %s", __func__);
spin_unlock_irqrestore(&msm_uport->uport.lock, flags);
@@ -1668,6 +1678,16 @@ static void msm_serial_hs_rx_work(struct kthread_work *work)
spin_lock_irqsave(&uport->lock, flags);
+ if (!tty || rx->flush == FLUSH_SHUTDOWN) {
+ dev_err(uport->dev, "%s:Invalid driver state flush %d\n",
+ __func__, rx->flush);
+ MSM_HS_ERR("%s:Invalid driver state flush %d\n",
+ __func__, rx->flush);
+ spin_unlock_irqrestore(&uport->lock, flags);
+ msm_hs_resource_unvote(msm_uport);
+ return;
+ }
+
/*
* Process all pending descs or if nothing is
* queued - called from termios
@@ -3664,12 +3684,12 @@ static void msm_hs_shutdown(struct uart_port *uport)
if (msm_uport->rx.flush != FLUSH_SHUTDOWN) {
/* disable and disconnect rx */
- msm_hs_disconnect_rx(uport);
ret = wait_event_timeout(msm_uport->rx.wait,
- msm_uport->rx.flush == FLUSH_SHUTDOWN, 500);
+ !msm_uport->rx.pending_flag, 500);
if (!ret)
MSM_HS_WARN("%s(): rx disconnect not complete",
__func__);
+ msm_hs_disconnect_rx(uport);
}
cancel_delayed_work_sync(&msm_uport->rx.flip_insert_work);
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 51c7507b0444..63a06ab6ba03 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -38,7 +38,6 @@
#include <linux/major.h>
#include <linux/module.h>
#include <linux/mm.h>
-#include <linux/notifier.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
@@ -116,8 +115,6 @@ struct sci_port {
struct timer_list rx_timer;
unsigned int rx_timeout;
#endif
-
- struct notifier_block freq_transition;
};
#define SCI_NPORTS CONFIG_SERIAL_SH_SCI_NR_UARTS
@@ -1606,29 +1603,6 @@ static irqreturn_t sci_mpxed_interrupt(int irq, void *ptr)
return ret;
}
-/*
- * Here we define a transition notifier so that we can update all of our
- * ports' baud rate when the peripheral clock changes.
- */
-static int sci_notifier(struct notifier_block *self,
- unsigned long phase, void *p)
-{
- struct sci_port *sci_port;
- unsigned long flags;
-
- sci_port = container_of(self, struct sci_port, freq_transition);
-
- if (phase == CPUFREQ_POSTCHANGE) {
- struct uart_port *port = &sci_port->port;
-
- spin_lock_irqsave(&port->lock, flags);
- port->uartclk = clk_get_rate(sci_port->iclk);
- spin_unlock_irqrestore(&port->lock, flags);
- }
-
- return NOTIFY_OK;
-}
-
static const struct sci_irq_desc {
const char *desc;
irq_handler_t handler;
@@ -2559,9 +2533,6 @@ static int sci_remove(struct platform_device *dev)
{
struct sci_port *port = platform_get_drvdata(dev);
- cpufreq_unregister_notifier(&port->freq_transition,
- CPUFREQ_TRANSITION_NOTIFIER);
-
uart_remove_one_port(&sci_uart_driver, &port->port);
sci_cleanup_single(port);
@@ -2714,16 +2685,6 @@ static int sci_probe(struct platform_device *dev)
if (ret)
return ret;
- sp->freq_transition.notifier_call = sci_notifier;
-
- ret = cpufreq_register_notifier(&sp->freq_transition,
- CPUFREQ_TRANSITION_NOTIFIER);
- if (unlikely(ret < 0)) {
- uart_remove_one_port(&sci_uart_driver, &sp->port);
- sci_cleanup_single(sp);
- return ret;
- }
-
#ifdef CONFIG_SH_STANDARD_BIOS
sh_bios_gdb_detach();
#endif
diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
index 9eb1cff28bd4..b8b580e5ae6e 100644
--- a/drivers/usb/core/hcd-pci.c
+++ b/drivers/usb/core/hcd-pci.c
@@ -74,6 +74,15 @@ static void for_each_companion(struct pci_dev *pdev, struct usb_hcd *hcd,
if (companion->bus != pdev->bus ||
PCI_SLOT(companion->devfn) != slot)
continue;
+
+ /*
+ * Companion device should be either UHCI,OHCI or EHCI host
+ * controller, otherwise skip.
+ */
+ if (companion->class != CL_UHCI && companion->class != CL_OHCI &&
+ companion->class != CL_EHCI)
+ continue;
+
companion_hcd = pci_get_drvdata(companion);
if (!companion_hcd || !companion_hcd->self.root_hub)
continue;
diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c
index 08006d84fb38..add035269ae7 100644
--- a/drivers/usb/dwc3/dwc3-msm.c
+++ b/drivers/usb/dwc3/dwc3-msm.c
@@ -1651,9 +1651,10 @@ static void dwc3_msm_notify_event(struct dwc3 *dwc, unsigned event)
/*
* Below sequence is used when controller is working without
- * having ssphy and only USB high speed is supported.
+ * having ssphy and only USB high/full speed is supported.
*/
- if (dwc->maximum_speed == USB_SPEED_HIGH) {
+ if (dwc->maximum_speed == USB_SPEED_HIGH ||
+ dwc->maximum_speed == USB_SPEED_FULL) {
dwc3_msm_write_reg(mdwc->base, QSCRATCH_GENERAL_CFG,
dwc3_msm_read_reg(mdwc->base,
QSCRATCH_GENERAL_CFG)
@@ -2072,6 +2073,11 @@ static int dwc3_msm_resume(struct dwc3_msm *mdwc)
clk_prepare_enable(mdwc->iface_clk);
clk_set_rate(mdwc->core_clk, mdwc->core_clk_rate);
clk_prepare_enable(mdwc->core_clk);
+
+ /* set Memory core: ON, Memory periphery: ON */
+ clk_set_flags(mdwc->core_clk, CLKFLAG_RETAIN_MEM);
+ clk_set_flags(mdwc->core_clk, CLKFLAG_RETAIN_PERIPH);
+
clk_prepare_enable(mdwc->utmi_clk);
if (mdwc->bus_aggr_clk)
clk_prepare_enable(mdwc->bus_aggr_clk);
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 805c5e1931e1..687d51e25d4b 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -802,8 +802,8 @@ static int dwc3_gadget_ep_disable(struct usb_ep *ep)
dwc = dep->dwc;
if (!(dep->flags & DWC3_EP_ENABLED)) {
- dev_WARN_ONCE(dwc->dev, true, "%s is already disabled\n",
- dep->name);
+ dev_dbg(dwc->dev, "%s is already disabled\n", dep->name);
+ dbg_event(dep->number, "ALRDY DISABLED", dep->flags);
return 0;
}
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index fd2157c8e8c2..20fe358387be 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -710,6 +710,7 @@ static void ffs_user_copy_worker(struct work_struct *work)
work);
int ret = io_data->req->status ? io_data->req->status :
io_data->req->actual;
+ bool kiocb_has_eventfd = io_data->kiocb->ki_flags & IOCB_EVENTFD;
ffs_log("enter: ret %d", ret);
@@ -723,13 +724,11 @@ static void ffs_user_copy_worker(struct work_struct *work)
io_data->kiocb->ki_complete(io_data->kiocb, ret, ret);
- if (io_data->ffs->ffs_eventfd &&
- !(io_data->kiocb->ki_flags & IOCB_EVENTFD))
+ if (io_data->ffs->ffs_eventfd && !kiocb_has_eventfd)
eventfd_signal(io_data->ffs->ffs_eventfd, 1);
usb_ep_free_request(io_data->ep, io_data->req);
- io_data->kiocb->private = NULL;
if (io_data->read)
kfree(io_data->to_free);
kfree(io_data->buf);
diff --git a/drivers/usb/gadget/function/f_gsi.c b/drivers/usb/gadget/function/f_gsi.c
index c298c95d4ba0..738f20d935d6 100644
--- a/drivers/usb/gadget/function/f_gsi.c
+++ b/drivers/usb/gadget/function/f_gsi.c
@@ -497,7 +497,8 @@ static int ipa_suspend_work_handler(struct gsi_data_port *d_port)
log_event_dbg("%s: Calling xdci_suspend", __func__);
ret = ipa_usb_xdci_suspend(gsi->d_port.out_channel_handle,
- gsi->d_port.in_channel_handle, gsi->prot_id);
+ gsi->d_port.in_channel_handle, gsi->prot_id,
+ true);
if (!ret) {
d_port->sm_state = STATE_SUSPENDED;
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 2ac142e3cce9..ac298e632d73 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -1923,6 +1923,12 @@ no_bw:
kfree(xhci->rh_bw);
kfree(xhci->ext_caps);
+ xhci->usb2_ports = NULL;
+ xhci->usb3_ports = NULL;
+ xhci->port_array = NULL;
+ xhci->rh_bw = NULL;
+ xhci->ext_caps = NULL;
+
xhci->page_size = 0;
xhci->page_shift = 0;
xhci->bus_state[0].bus_suspended = 0;
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 6c47c26b5df7..ea4fb4b0cd44 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -48,6 +48,7 @@
#define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI 0xa12f
#define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI 0x9d2f
#define PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI 0x0aa8
+#define PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI 0x1aa8
static const char hcd_name[] = "xhci_hcd";
@@ -156,7 +157,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
(pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
- pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI)) {
+ pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI ||
+ pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI)) {
xhci->quirks |= XHCI_PME_STUCK_QUIRK;
}
if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 3e49861a09a2..5193bf5eb8c3 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -241,10 +241,14 @@ static int xhci_plat_probe(struct platform_device *pdev)
if (ret)
goto disable_usb_phy;
+ device_wakeup_enable(&hcd->self.root_hub->dev);
+
ret = usb_add_hcd(xhci->shared_hcd, irq, IRQF_SHARED | IRQF_ONESHOT);
if (ret)
goto dealloc_usb2_hcd;
+ device_wakeup_enable(&xhci->shared_hcd->self.root_hub->dev);
+
ret = device_create_file(&pdev->dev, &dev_attr_config_imod);
if (ret)
dev_err(&pdev->dev, "%s: unable to create imod sysfs entry\n",
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index b30831ef4014..5fc20c7c51f8 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -1116,8 +1116,8 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
/* Resume root hubs only when have pending events. */
status = readl(&xhci->op_regs->status);
if (status & STS_EINT) {
- usb_hcd_resume_root_hub(hcd);
usb_hcd_resume_root_hub(xhci->shared_hcd);
+ usb_hcd_resume_root_hub(hcd);
}
}
@@ -1132,10 +1132,10 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
/* Re-enable port polling. */
xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
- set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
- usb_hcd_poll_rh_status(hcd);
set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
usb_hcd_poll_rh_status(xhci->shared_hcd);
+ set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+ usb_hcd_poll_rh_status(hcd);
return retval;
}
diff --git a/drivers/usb/pd/policy_engine.c b/drivers/usb/pd/policy_engine.c
index a72c874f19a5..e193182af225 100644
--- a/drivers/usb/pd/policy_engine.c
+++ b/drivers/usb/pd/policy_engine.c
@@ -274,7 +274,7 @@ struct usbpd {
struct extcon_dev *extcon;
enum usbpd_state current_state;
- bool hard_reset;
+ bool hard_reset_recvd;
u8 rx_msg_type;
u8 rx_msg_len;
u32 rx_payload[7];
@@ -487,16 +487,12 @@ static int pd_eval_src_caps(struct usbpd *pd, const u32 *src_caps)
static void pd_send_hard_reset(struct usbpd *pd)
{
- int ret;
-
usbpd_dbg(&pd->dev, "send hard reset");
/* Force CC logic to source/sink to keep Rp/Rd unchanged */
set_power_role(pd, pd->current_pr);
pd->hard_reset_count++;
- ret = pd_phy_signal(HARD_RESET_SIG, 5); /* tHardResetComplete */
- if (!ret)
- pd->hard_reset = true;
+ pd_phy_signal(HARD_RESET_SIG, 5); /* tHardResetComplete */
pd->in_pr_swap = false;
}
@@ -522,7 +518,7 @@ static void phy_sig_received(struct usbpd *pd, enum pd_sig_type type)
/* Force CC logic to source/sink to keep Rp/Rd unchanged */
set_power_role(pd, pd->current_pr);
- pd->hard_reset = true;
+ pd->hard_reset_recvd = true;
kick_sm(pd, 0);
}
@@ -538,7 +534,7 @@ static void phy_msg_received(struct usbpd *pd, enum pd_msg_type type,
}
if (len < 2) {
- usbpd_err(&pd->dev, "invalid message received, len=%ld\n", len);
+ usbpd_err(&pd->dev, "invalid message received, len=%zd\n", len);
return;
}
@@ -547,7 +543,7 @@ static void phy_msg_received(struct usbpd *pd, enum pd_msg_type type,
len -= sizeof(u16);
if (len % 4 != 0) {
- usbpd_err(&pd->dev, "len=%ld not multiple of 4\n", len);
+ usbpd_err(&pd->dev, "len=%zd not multiple of 4\n", len);
return;
}
@@ -566,7 +562,7 @@ static void phy_msg_received(struct usbpd *pd, enum pd_msg_type type,
/* check header's count field to see if it matches len */
if (PD_MSG_HDR_COUNT(header) != (len / 4)) {
- usbpd_err(&pd->dev, "header count (%d) mismatch, len=%ld\n",
+ usbpd_err(&pd->dev, "header count (%d) mismatch, len=%zd\n",
PD_MSG_HDR_COUNT(header), len);
return;
}
@@ -753,40 +749,6 @@ static void usbpd_set_state(struct usbpd *pd, enum usbpd_state next_state)
kobject_uevent(&pd->dev.kobj, KOBJ_CHANGE);
break;
- case PE_SRC_TRANSITION_TO_DEFAULT:
- pd->hard_reset = false;
-
- if (pd->vconn_enabled)
- regulator_disable(pd->vconn);
- regulator_disable(pd->vbus);
-
- if (pd->current_dr != DR_DFP) {
- extcon_set_cable_state_(pd->extcon, EXTCON_USB, 0);
- pd->current_dr = DR_DFP;
- pd_phy_update_roles(pd->current_dr, pd->current_pr);
- }
-
- msleep(SRC_RECOVER_TIME);
-
- ret = regulator_enable(pd->vbus);
- if (ret)
- usbpd_err(&pd->dev, "Unable to enable vbus\n");
-
- if (pd->vconn_enabled) {
- ret = regulator_enable(pd->vconn);
- if (ret) {
- usbpd_err(&pd->dev, "Unable to enable vconn\n");
- pd->vconn_enabled = false;
- }
- }
-
- val.intval = 0;
- power_supply_set_property(pd->usb_psy,
- POWER_SUPPLY_PROP_PD_IN_HARD_RESET, &val);
-
- usbpd_set_state(pd, PE_SRC_STARTUP);
- break;
-
case PE_SRC_HARD_RESET:
case PE_SNK_HARD_RESET:
/* hard reset may sleep; handle it in the workqueue */
@@ -1403,7 +1365,7 @@ static void usbpd_sm(struct work_struct *w)
pd->in_pr_swap = false;
pd->pd_connected = false;
pd->in_explicit_contract = false;
- pd->hard_reset = false;
+ pd->hard_reset_recvd = false;
pd->caps_count = 0;
pd->hard_reset_count = 0;
pd->src_cap_id = 0;
@@ -1456,7 +1418,9 @@ static void usbpd_sm(struct work_struct *w)
}
/* Hard reset? */
- if (pd->hard_reset) {
+ if (pd->hard_reset_recvd) {
+ pd->hard_reset_recvd = false;
+
val.intval = 1;
power_supply_set_property(pd->usb_psy,
POWER_SUPPLY_PROP_PD_IN_HARD_RESET, &val);
@@ -1464,10 +1428,12 @@ static void usbpd_sm(struct work_struct *w)
pd->in_pr_swap = false;
reset_vdm_state(pd);
- if (pd->current_pr == PR_SINK)
+ if (pd->current_pr == PR_SINK) {
usbpd_set_state(pd, PE_SNK_TRANSITION_TO_DEFAULT);
- else
- usbpd_set_state(pd, PE_SRC_TRANSITION_TO_DEFAULT);
+ } else {
+ pd->current_state = PE_SRC_TRANSITION_TO_DEFAULT;
+ kick_sm(pd, PS_HARD_RESET_TIME);
+ }
goto sm_done;
}
@@ -1632,6 +1598,38 @@ static void usbpd_sm(struct work_struct *w)
}
break;
+ case PE_SRC_TRANSITION_TO_DEFAULT:
+ if (pd->vconn_enabled)
+ regulator_disable(pd->vconn);
+ regulator_disable(pd->vbus);
+
+ if (pd->current_dr != DR_DFP) {
+ extcon_set_cable_state_(pd->extcon, EXTCON_USB, 0);
+ pd->current_dr = DR_DFP;
+ pd_phy_update_roles(pd->current_dr, pd->current_pr);
+ }
+
+ msleep(SRC_RECOVER_TIME);
+
+ ret = regulator_enable(pd->vbus);
+ if (ret)
+ usbpd_err(&pd->dev, "Unable to enable vbus\n");
+
+ if (pd->vconn_enabled) {
+ ret = regulator_enable(pd->vconn);
+ if (ret) {
+ usbpd_err(&pd->dev, "Unable to enable vconn\n");
+ pd->vconn_enabled = false;
+ }
+ }
+
+ val.intval = 0;
+ power_supply_set_property(pd->usb_psy,
+ POWER_SUPPLY_PROP_PD_IN_HARD_RESET, &val);
+
+ usbpd_set_state(pd, PE_SRC_STARTUP);
+ break;
+
case PE_SRC_HARD_RESET:
val.intval = 1;
power_supply_set_property(pd->usb_psy,
@@ -1641,9 +1639,8 @@ static void usbpd_sm(struct work_struct *w)
pd->in_explicit_contract = false;
reset_vdm_state(pd);
- usleep_range(PS_HARD_RESET_TIME * USEC_PER_MSEC,
- (PS_HARD_RESET_TIME + 5) * USEC_PER_MSEC);
- usbpd_set_state(pd, PE_SRC_TRANSITION_TO_DEFAULT);
+ pd->current_state = PE_SRC_TRANSITION_TO_DEFAULT;
+ kick_sm(pd, PS_HARD_RESET_TIME);
break;
case PE_SNK_STARTUP:
@@ -1829,8 +1826,6 @@ static void usbpd_sm(struct work_struct *w)
break;
case PE_SNK_TRANSITION_TO_DEFAULT:
- pd->hard_reset = false;
-
val.intval = 0;
power_supply_set_property(pd->usb_psy,
POWER_SUPPLY_PROP_PD_IN_HARD_RESET, &val);
@@ -2117,11 +2112,11 @@ static int psy_changed(struct notifier_block *nb, unsigned long evt, void *ptr)
* During hard reset when VBUS goes to 0 the CC logic
* will report this as a disconnection. In those cases
* it can be ignored, however the downside is that
- * pd->hard_reset can be momentarily true even when a
- * non-PD capable source is attached, and can't be
- * distinguished from a physical disconnect. In that
- * case, allow for the common case of disconnecting
- * from an SDP.
+ * we can also happen to be in the SNK_Transition_to_default
+ * state due to a hard reset attempt even with a non-PD
+ * capable source, in which a physical disconnect may get
+ * masked. In that case, allow for the common case of
+ * disconnecting from an SDP.
*
* The less common case is a PD-capable SDP which will
* result in a hard reset getting treated like a
diff --git a/drivers/usb/pd/qpnp-pdphy.c b/drivers/usb/pd/qpnp-pdphy.c
index 5b5e6210a1bb..0b9b60c3ca45 100644
--- a/drivers/usb/pd/qpnp-pdphy.c
+++ b/drivers/usb/pd/qpnp-pdphy.c
@@ -23,6 +23,8 @@
#include <linux/of_irq.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
#include "usbpd.h"
#define USB_PDPHY_MAX_DATA_OBJ_LEN 28
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index bdc0f2f24f19..a2b43a6e7fa7 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -108,6 +108,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0x826B) }, /* Cygnal Integrated Products, Inc., Fasttrax GPS demonstration module */
{ USB_DEVICE(0x10C4, 0x8281) }, /* Nanotec Plug & Drive */
{ USB_DEVICE(0x10C4, 0x8293) }, /* Telegesis ETRX2USB */
+ { USB_DEVICE(0x10C4, 0x82F4) }, /* Starizona MicroTouch */
{ USB_DEVICE(0x10C4, 0x82F9) }, /* Procyon AVS */
{ USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */
{ USB_DEVICE(0x10C4, 0x8382) }, /* Cygnal Integrated Products, Inc. */
@@ -117,6 +118,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0x8418) }, /* IRZ Automation Teleport SG-10 GSM/GPRS Modem */
{ USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */
{ USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */
+ { USB_DEVICE(0x10C4, 0x84B6) }, /* Starizona Hyperion */
{ USB_DEVICE(0x10C4, 0x85EA) }, /* AC-Services IBUS-IF */
{ USB_DEVICE(0x10C4, 0x85EB) }, /* AC-Services CIS-IBUS */
{ USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */
@@ -140,6 +142,8 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0xF004) }, /* Elan Digital Systems USBcount50 */
{ USB_DEVICE(0x10C5, 0xEA61) }, /* Silicon Labs MobiData GPRS USB Modem */
{ USB_DEVICE(0x10CE, 0xEA6A) }, /* Silicon Labs MobiData GPRS USB Modem 100EU */
+ { USB_DEVICE(0x12B8, 0xEC60) }, /* Link G4 ECU */
+ { USB_DEVICE(0x12B8, 0xEC62) }, /* Link G4+ ECU */
{ USB_DEVICE(0x13AD, 0x9999) }, /* Baltech card reader */
{ USB_DEVICE(0x1555, 0x0004) }, /* Owen AC4 USB-RS485 Converter */
{ USB_DEVICE(0x166A, 0x0201) }, /* Clipsal 5500PACA C-Bus Pascal Automation Controller */
diff --git a/drivers/usb/usbip/usbip_common.c b/drivers/usb/usbip/usbip_common.c
index facaaf003f19..e40da7759a0e 100644
--- a/drivers/usb/usbip/usbip_common.c
+++ b/drivers/usb/usbip/usbip_common.c
@@ -741,6 +741,17 @@ int usbip_recv_xbuff(struct usbip_device *ud, struct urb *urb)
if (!(size > 0))
return 0;
+ if (size > urb->transfer_buffer_length) {
+ /* should not happen, probably malicious packet */
+ if (ud->side == USBIP_STUB) {
+ usbip_event_add(ud, SDEV_EVENT_ERROR_TCP);
+ return 0;
+ } else {
+ usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
+ return -EPIPE;
+ }
+ }
+
ret = usbip_recv(ud->tcp_socket, urb->transfer_buffer, size);
if (ret != size) {
dev_err(&urb->dev->dev, "recv xbuf, %d\n", ret);
diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
index 42ea4028cfe1..9868d8a5c1ed 100644
--- a/drivers/video/fbdev/Kconfig
+++ b/drivers/video/fbdev/Kconfig
@@ -2249,7 +2249,6 @@ config XEN_FBDEV_FRONTEND
select FB_SYS_IMAGEBLIT
select FB_SYS_FOPS
select FB_DEFERRED_IO
- select INPUT_XEN_KBDDEV_FRONTEND if INPUT_MISC
select XEN_XENBUS_FRONTEND
default y
help
diff --git a/drivers/video/fbdev/da8xx-fb.c b/drivers/video/fbdev/da8xx-fb.c
index 0081725c6b5b..d00510029c93 100644
--- a/drivers/video/fbdev/da8xx-fb.c
+++ b/drivers/video/fbdev/da8xx-fb.c
@@ -209,8 +209,7 @@ static struct fb_videomode known_lcd_panels[] = {
.lower_margin = 2,
.hsync_len = 0,
.vsync_len = 0,
- .sync = FB_SYNC_CLK_INVERT |
- FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .sync = FB_SYNC_CLK_INVERT,
},
/* Sharp LK043T1DG01 */
[1] = {
@@ -224,7 +223,7 @@ static struct fb_videomode known_lcd_panels[] = {
.lower_margin = 2,
.hsync_len = 41,
.vsync_len = 10,
- .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .sync = 0,
.flag = 0,
},
[2] = {
@@ -239,7 +238,7 @@ static struct fb_videomode known_lcd_panels[] = {
.lower_margin = 10,
.hsync_len = 10,
.vsync_len = 10,
- .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .sync = 0,
.flag = 0,
},
[3] = {
diff --git a/drivers/video/fbdev/msm/Makefile b/drivers/video/fbdev/msm/Makefile
index 6009a9b97d1c..b905c0e855dd 100644
--- a/drivers/video/fbdev/msm/Makefile
+++ b/drivers/video/fbdev/msm/Makefile
@@ -46,6 +46,7 @@ obj-$(CONFIG_FB_MSM_MDSS) += mdss_dba_utils.o
obj-$(CONFIG_FB_MSM_MDSS) += mdss_hdcp_1x.o
obj-$(CONFIG_FB_MSM_MDSS_DP_PANEL) += mdss_dp.o mdss_dp_util.o
obj-$(CONFIG_FB_MSM_MDSS_DP_PANEL) += mdss_dp_aux.o
+obj-$(CONFIG_FB_MSM_MDSS_DP_PANEL) += mdss_dp_hdcp2p2.o
obj-$(CONFIG_FB_MSM_MDSS) += mdss_io_util.o
obj-$(CONFIG_FB_MSM_MDSS) += msm_ext_display.o
diff --git a/drivers/video/fbdev/msm/mdss_debug.c b/drivers/video/fbdev/msm/mdss_debug.c
index 79980acc2201..8663797f1730 100644
--- a/drivers/video/fbdev/msm/mdss_debug.c
+++ b/drivers/video/fbdev/msm/mdss_debug.c
@@ -39,6 +39,9 @@
#define PANEL_CMD_MIN_TX_COUNT 2
#define PANEL_DATA_NODE_LEN 80
+/* Hex number + whitespace */
+#define NEXT_VALUE_OFFSET 3
+
#define INVALID_XIN_ID 0xFF
static char panel_reg[2] = {DEFAULT_READ_PANEL_POWER_MODE_REG, 0x00};
@@ -81,7 +84,7 @@ static ssize_t panel_debug_base_offset_write(struct file *file,
buf[count] = 0; /* end of string */
- if (sscanf(buf, "%x %d", &off, &cnt) != 2)
+ if (sscanf(buf, "%x %u", &off, &cnt) != 2)
return -EFAULT;
if (off > dbg->max_offset)
@@ -129,7 +132,7 @@ static ssize_t panel_debug_base_reg_write(struct file *file,
struct mdss_debug_base *dbg = file->private_data;
char buf[PANEL_TX_MAX_BUF] = {0x0};
char reg[PANEL_TX_MAX_BUF] = {0x0};
- u32 len = 0, step = 0, value = 0;
+ u32 len = 0, value = 0;
char *bufp;
struct mdss_data_type *mdata = mdss_res;
@@ -152,13 +155,21 @@ static ssize_t panel_debug_base_reg_write(struct file *file,
buf[count] = 0; /* end of string */
bufp = buf;
- while (sscanf(bufp, "%x%n", &value, &step) > 0) {
+ /* End of a hex value in given string */
+ bufp[NEXT_VALUE_OFFSET - 1] = 0;
+ while (kstrtouint(bufp, 16, &value) == 0) {
reg[len++] = value;
if (len >= PANEL_TX_MAX_BUF) {
pr_err("wrong input reg len\n");
return -EFAULT;
}
- bufp += step;
+ bufp += NEXT_VALUE_OFFSET;
+ if ((bufp >= (buf + count)) || (bufp < buf)) {
+ pr_warn("%s,buffer out-of-bounds\n", __func__);
+ break;
+ }
+ /* End of a hex value in given string */
+ bufp[NEXT_VALUE_OFFSET - 1] = 0;
}
if (len < PANEL_CMD_MIN_TX_COUNT) {
pr_err("wrong input reg len\n");
@@ -203,6 +214,7 @@ static ssize_t panel_debug_base_reg_read(struct file *file,
struct mdss_panel_data *panel_data = ctl->panel_data;
struct mdss_dsi_ctrl_pdata *ctrl_pdata = container_of(panel_data,
struct mdss_dsi_ctrl_pdata, panel_data);
+ int rc = -EFAULT;
if (!dbg)
return -ENODEV;
@@ -221,7 +233,8 @@ static ssize_t panel_debug_base_reg_read(struct file *file,
if (!rx_buf || !panel_reg_buf) {
pr_err("not enough memory to hold panel reg dump\n");
- return -ENOMEM;
+ rc = -ENOMEM;
+ goto read_reg_fail;
}
if (mdata->debug_inf.debug_enable_clock)
@@ -260,8 +273,7 @@ static ssize_t panel_debug_base_reg_read(struct file *file,
read_reg_fail:
kfree(rx_buf);
kfree(panel_reg_buf);
- return -EFAULT;
-
+ return rc;
}
static const struct file_operations panel_off_fops = {
@@ -739,11 +751,11 @@ static ssize_t mdss_debug_factor_write(struct file *file,
if (strnchr(buf, count, '/')) {
/* Parsing buf as fraction */
- if (sscanf(buf, "%d/%d", &numer, &denom) != 2)
+ if (sscanf(buf, "%u/%u", &numer, &denom) != 2)
return -EFAULT;
} else {
/* Parsing buf as percentage */
- if (sscanf(buf, "%d", &numer) != 1)
+ if (kstrtouint(buf, 0, &numer))
return -EFAULT;
denom = 100;
}
@@ -1051,7 +1063,7 @@ static ssize_t mdss_debug_perf_bw_limit_write(struct file *file,
if (strnchr(buf, count, ' ')) {
/* Parsing buf */
- if (sscanf(buf, "%d %d", &mode, &val) != 2)
+ if (sscanf(buf, "%u %u", &mode, &val) != 2)
return -EFAULT;
}
diff --git a/drivers/video/fbdev/msm/mdss_dp.c b/drivers/video/fbdev/msm/mdss_dp.c
index b246204f3181..fa2e47e06503 100644
--- a/drivers/video/fbdev/msm/mdss_dp.c
+++ b/drivers/video/fbdev/msm/mdss_dp.c
@@ -36,7 +36,7 @@
#include "mdss_dp_util.h"
#include "mdss_hdmi_panel.h"
#include <linux/hdcp_qseecom.h>
-#include "mdss_hdcp_1x.h"
+#include "mdss_hdcp.h"
#include "mdss_debug.h"
#define RGB_COMPONENTS 3
@@ -857,6 +857,18 @@ int mdss_dp_wait4train(struct mdss_dp_drv_pdata *dp_drv)
return ret;
}
+static void mdss_dp_update_cable_status(struct mdss_dp_drv_pdata *dp,
+ bool connected)
+{
+ mutex_lock(&dp->pd_msg_mutex);
+ pr_debug("cable_connected to %d\n", connected);
+ if (dp->cable_connected != connected)
+ dp->cable_connected = connected;
+ else
+ pr_debug("no change in cable status\n");
+ mutex_unlock(&dp->pd_msg_mutex);
+}
+
static int dp_get_cable_status(struct platform_device *pdev, u32 vote)
{
struct mdss_dp_drv_pdata *dp_ctrl = platform_get_drvdata(pdev);
@@ -886,8 +898,6 @@ static int dp_audio_info_setup(struct platform_device *pdev,
}
mdss_dp_audio_setup_sdps(&dp_ctrl->ctrl_io);
- mdss_dp_audio_set_sample_rate(&dp_ctrl->ctrl_io,
- dp_ctrl->link_rate, params->sample_rate_hz);
mdss_dp_config_audio_acr_ctrl(&dp_ctrl->ctrl_io, dp_ctrl->link_rate);
mdss_dp_set_safe_to_exit_level(&dp_ctrl->ctrl_io, dp_ctrl->lane_cnt);
mdss_dp_audio_enable(&dp_ctrl->ctrl_io, true);
@@ -1038,115 +1048,234 @@ static inline void mdss_dp_set_audio_switch_node(
val);
}
-int mdss_dp_on(struct mdss_panel_data *pdata)
+/**
+ * mdss_dp_get_lane_mapping() - returns lane mapping based on given orientation
+ * @orientation: usb plug orientation
+ * @lane_map: the configured lane mapping
+ *
+ * Returns 0 when the lane mapping is successfully determined based on the
+ * given usb plug orientation.
+ */
+static int mdss_dp_get_lane_mapping(struct mdss_dp_drv_pdata *dp,
+ enum plug_orientation orientation,
+ struct lane_mapping *lane_map)
{
- struct mdss_dp_drv_pdata *dp_drv = NULL;
int ret = 0;
- enum plug_orientation orientation = ORIENTATION_NONE;
- struct lane_mapping ln_map;
- if (!pdata) {
- pr_err("Invalid input data\n");
- return -EINVAL;
+ pr_debug("enter: orientation = %d\n", orientation);
+
+ if (!lane_map) {
+ pr_err("invalid lane map input");
+ ret = -EINVAL;
+ goto exit;
}
- dp_drv = container_of(pdata, struct mdss_dp_drv_pdata,
- panel_data);
+ /* Set the default lane mapping */
+ lane_map->lane0 = 2;
+ lane_map->lane1 = 3;
+ lane_map->lane2 = 1;
+ lane_map->lane3 = 0;
+
+ if (orientation == ORIENTATION_CC2) {
+ lane_map->lane0 = 1;
+ lane_map->lane1 = 0;
+ lane_map->lane2 = 2;
+ lane_map->lane3 = 3;
+
+ if (gpio_is_valid(dp->usbplug_cc_gpio)) {
+ gpio_set_value(dp->usbplug_cc_gpio, 1);
+ pr_debug("Configured cc gpio for new Orientation\n");
+ }
+ }
+
+ pr_debug("lane0 = %d, lane1 = %d, lane2 =%d, lane3 =%d\n",
+ lane_map->lane0, lane_map->lane1, lane_map->lane2,
+ lane_map->lane3);
+
+exit:
+ return ret;
+}
+
+/**
+ * mdss_dp_enable_mainlink_clocks() - enables Display Port main link clocks
+ * @dp: Display Port Driver data
+ *
+ * Returns 0 when the main link clocks are successfully enabled.
+ */
+static int mdss_dp_enable_mainlink_clocks(struct mdss_dp_drv_pdata *dp)
+{
+ int ret = 0;
+
+ dp->power_data[DP_CTRL_PM].clk_config[0].rate =
+ ((dp->link_rate * DP_LINK_RATE_MULTIPLIER) / 1000);/* KHz */
+
+ dp->pixel_rate = dp->panel_data.panel_info.clk_rate;
+ dp->power_data[DP_CTRL_PM].clk_config[3].rate =
+ (dp->pixel_rate / 1000);/* KHz */
+
+ ret = mdss_dp_clk_ctrl(dp, DP_CTRL_PM, true);
+ if (ret) {
+ pr_err("Unabled to start link clocks\n");
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+/**
+ * mdss_dp_disable_mainlink_clocks() - disables Display Port main link clocks
+ * @dp: Display Port Driver data
+ */
+static void mdss_dp_disable_mainlink_clocks(struct mdss_dp_drv_pdata *dp_drv)
+{
+ mdss_dp_clk_ctrl(dp_drv, DP_CTRL_PM, false);
+}
+
+/**
+ * mdss_dp_configure_source_params() - configures DP transmitter source params
+ * @dp: Display Port Driver data
+ * @lane_map: usb port lane mapping
+ *
+ * Configures the DP transmitter source params including details such as lane
+ * configuration, output format and sink/panel timing information.
+ */
+static void mdss_dp_configure_source_params(struct mdss_dp_drv_pdata *dp,
+ struct lane_mapping *lane_map)
+{
+ mdss_dp_ctrl_lane_mapping(&dp->ctrl_io, *lane_map);
+ mdss_dp_fill_link_cfg(dp);
+ mdss_dp_mainlink_ctrl(&dp->ctrl_io, true);
+ mdss_dp_config_ctrl(dp);
+ mdss_dp_sw_mvid_nvid(&dp->ctrl_io);
+ mdss_dp_timing_cfg(&dp->ctrl_io, &dp->panel_data.panel_info);
+}
+
+/**
+ * mdss_dp_train_main_link() - initiates training of DP main link
+ * @dp: Display Port Driver data
+ *
+ * Initiates training of the DP main link and checks the state of the main
+ * link after the training is complete.
+ */
+static void mdss_dp_train_main_link(struct mdss_dp_drv_pdata *dp)
+{
+ int ready = 0;
+
+ pr_debug("enter\n");
+
+ mdss_dp_link_train(dp);
+ mdss_dp_wait4train(dp);
+
+ ready = mdss_dp_mainlink_ready(dp, BIT(0));
+
+ pr_debug("main link %s\n", ready ? "READY" : "NOT READY");
+}
+
+static int mdss_dp_on_irq(struct mdss_dp_drv_pdata *dp_drv)
+{
+ int ret = 0;
+ enum plug_orientation orientation = ORIENTATION_NONE;
+ struct lane_mapping ln_map;
/* wait until link training is completed */
mutex_lock(&dp_drv->train_mutex);
- pr_debug("Enter++ cont_splash=%d\n", dp_drv->cont_splash);
- /* Default lane mapping */
- ln_map.lane0 = 2;
- ln_map.lane1 = 3;
- ln_map.lane2 = 1;
- ln_map.lane3 = 0;
-
- if (!dp_drv->cont_splash) { /* vote for clocks */
- ret = mdss_dp_clk_ctrl(dp_drv, DP_CORE_PM, true);
- if (ret) {
- pr_err("Unabled to start core clocks\n");
- goto exit;
- }
- mdss_dp_hpd_configure(&dp_drv->ctrl_io, true);
-
- orientation = usbpd_get_plug_orientation(dp_drv->pd);
- pr_debug("plug Orientation = %d\n", orientation);
-
- if (orientation == ORIENTATION_CC2) {
- /* update lane mapping */
- ln_map.lane0 = 1;
- ln_map.lane1 = 0;
- ln_map.lane2 = 2;
- ln_map.lane3 = 3;
-
- if (gpio_is_valid(dp_drv->usbplug_cc_gpio)) {
- gpio_set_value(
- dp_drv->usbplug_cc_gpio, 1);
- pr_debug("Configured cc gpio for new Orientation\n");
- }
+ pr_debug("enter\n");
- }
+ orientation = usbpd_get_plug_orientation(dp_drv->pd);
+ pr_debug("plug orientation = %d\n", orientation);
- if (dp_drv->new_vic && (dp_drv->new_vic != dp_drv->vic))
- dp_init_panel_info(dp_drv, dp_drv->new_vic);
+ ret = mdss_dp_get_lane_mapping(dp_drv, orientation, &ln_map);
+ if (ret)
+ goto exit;
- dp_drv->link_rate =
- mdss_dp_gen_link_clk(&dp_drv->panel_data.panel_info,
- dp_drv->dpcd.max_lane_count);
+ mdss_dp_phy_share_lane_config(&dp_drv->phy_io,
+ orientation, dp_drv->dpcd.max_lane_count);
- pr_debug("link_rate=0x%x, Max rate supported by sink=0x%x\n",
- dp_drv->link_rate, dp_drv->dpcd.max_link_rate);
- if (!dp_drv->link_rate) {
- pr_err("Unable to configure required link rate\n");
- ret = -EINVAL;
- goto exit;
- }
+ ret = mdss_dp_enable_mainlink_clocks(dp_drv);
+ if (ret)
+ goto exit;
- mdss_dp_phy_share_lane_config(&dp_drv->phy_io,
- orientation, dp_drv->dpcd.max_lane_count);
+ mdss_dp_mainlink_reset(&dp_drv->ctrl_io);
- pr_debug("link_rate = 0x%x\n", dp_drv->link_rate);
+ reinit_completion(&dp_drv->idle_comp);
- dp_drv->power_data[DP_CTRL_PM].clk_config[0].rate =
- ((dp_drv->link_rate * DP_LINK_RATE_MULTIPLIER) /
- 1000); /* KHz */
+ mdss_dp_configure_source_params(dp_drv, &ln_map);
- dp_drv->pixel_rate = dp_drv->panel_data.panel_info.clk_rate;
- dp_drv->power_data[DP_CTRL_PM].clk_config[3].rate =
- (dp_drv->pixel_rate /
- 1000); /* KHz */
+ mdss_dp_train_main_link(dp_drv);
- ret = mdss_dp_clk_ctrl(dp_drv, DP_CTRL_PM, true);
- if (ret) {
- pr_err("Unabled to start link clocks\n");
- goto exit;
- }
+ dp_drv->power_on = true;
+ pr_debug("end\n");
- mdss_dp_mainlink_reset(&dp_drv->ctrl_io);
+exit:
+ mutex_unlock(&dp_drv->train_mutex);
+ return ret;
+}
- mdss_dp_ctrl_lane_mapping(&dp_drv->ctrl_io, ln_map);
- reinit_completion(&dp_drv->idle_comp);
- mdss_dp_fill_link_cfg(dp_drv);
- mdss_dp_mainlink_ctrl(&dp_drv->ctrl_io, true);
- mdss_dp_config_ctrl(dp_drv);
- mdss_dp_sw_mvid_nvid(&dp_drv->ctrl_io);
- mdss_dp_timing_cfg(&dp_drv->ctrl_io,
- &dp_drv->panel_data.panel_info);
- } else {
+int mdss_dp_on_hpd(struct mdss_dp_drv_pdata *dp_drv)
+{
+ int ret = 0;
+ enum plug_orientation orientation = ORIENTATION_NONE;
+ struct lane_mapping ln_map;
+
+ /* wait until link training is completed */
+ mutex_lock(&dp_drv->train_mutex);
+
+ pr_debug("Enter++ cont_splash=%d\n", dp_drv->cont_splash);
+
+ if (dp_drv->cont_splash) {
mdss_dp_aux_ctrl(&dp_drv->ctrl_io, true);
+ goto link_training;
}
- pr_debug("call link_training\n");
- mdss_dp_link_train(dp_drv);
+ ret = mdss_dp_clk_ctrl(dp_drv, DP_CORE_PM, true);
+ if (ret) {
+ pr_err("Unabled to start core clocks\n");
+ goto exit;
+ }
+ mdss_dp_hpd_configure(&dp_drv->ctrl_io, true);
- mdss_dp_wait4train(dp_drv);
+ orientation = usbpd_get_plug_orientation(dp_drv->pd);
+ pr_debug("plug Orientation = %d\n", orientation);
- dp_drv->cont_splash = 0;
+ ret = mdss_dp_get_lane_mapping(dp_drv, orientation, &ln_map);
+ if (ret)
+ goto exit;
+
+ if (dp_drv->new_vic && (dp_drv->new_vic != dp_drv->vic))
+ dp_init_panel_info(dp_drv, dp_drv->new_vic);
- if (mdss_dp_mainlink_ready(dp_drv, BIT(0)))
- pr_debug("mainlink ready\n");
+ dp_drv->link_rate =
+ mdss_dp_gen_link_clk(&dp_drv->panel_data.panel_info,
+ dp_drv->dpcd.max_lane_count);
+
+ pr_debug("link_rate=0x%x, Max rate supported by sink=0x%x\n",
+ dp_drv->link_rate, dp_drv->dpcd.max_link_rate);
+ if (!dp_drv->link_rate) {
+ pr_err("Unable to configure required link rate\n");
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ mdss_dp_phy_share_lane_config(&dp_drv->phy_io,
+ orientation, dp_drv->dpcd.max_lane_count);
+
+ pr_debug("link_rate = 0x%x\n", dp_drv->link_rate);
+
+ ret = mdss_dp_enable_mainlink_clocks(dp_drv);
+ if (ret)
+ goto exit;
+
+ mdss_dp_mainlink_reset(&dp_drv->ctrl_io);
+
+ reinit_completion(&dp_drv->idle_comp);
+
+ mdss_dp_configure_source_params(dp_drv, &ln_map);
+
+link_training:
+ mdss_dp_train_main_link(dp_drv);
+
+ dp_drv->cont_splash = 0;
dp_drv->power_on = true;
mdss_dp_set_audio_switch_node(dp_drv, true);
@@ -1157,49 +1286,84 @@ exit:
return ret;
}
-static void mdss_dp_mainlink_off(struct mdss_panel_data *pdata)
+int mdss_dp_on(struct mdss_panel_data *pdata)
{
struct mdss_dp_drv_pdata *dp_drv = NULL;
- const int idle_pattern_completion_timeout_ms = 3 * HZ / 100;
- dp_drv = container_of(pdata, struct mdss_dp_drv_pdata,
- panel_data);
- if (!dp_drv) {
+ if (!pdata) {
pr_err("Invalid input data\n");
- return;
+ return -EINVAL;
+ }
+
+ dp_drv = container_of(pdata, struct mdss_dp_drv_pdata,
+ panel_data);
+
+ return mdss_dp_on_hpd(dp_drv);
+}
+
+static inline void mdss_dp_reset_test_data(struct mdss_dp_drv_pdata *dp)
+{
+ dp->test_data = (const struct dpcd_test_request){ 0 };
+}
+
+static inline bool mdss_dp_is_link_status_updated(struct mdss_dp_drv_pdata *dp)
+{
+ return dp->link_status.link_status_updated;
+}
+
+static inline bool mdss_dp_is_link_training_requested(
+ struct mdss_dp_drv_pdata *dp)
+{
+ return (dp->test_data.test_requested == TEST_LINK_TRAINING);
+}
+
+static inline bool mdss_dp_soft_hpd_reset(struct mdss_dp_drv_pdata *dp)
+{
+ return mdss_dp_is_link_training_requested(dp) &&
+ dp->alt_mode.dp_status.hpd_irq;
+}
+
+static int mdss_dp_off_irq(struct mdss_dp_drv_pdata *dp_drv)
+{
+ if (!dp_drv->power_on) {
+ pr_debug("panel already powered off\n");
+ return 0;
}
- pr_debug("Entered++\n");
/* wait until link training is completed */
mutex_lock(&dp_drv->train_mutex);
- reinit_completion(&dp_drv->idle_comp);
- mdss_dp_state_ctrl(&dp_drv->ctrl_io, ST_PUSH_IDLE);
- if (!wait_for_completion_timeout(&dp_drv->idle_comp,
- idle_pattern_completion_timeout_ms))
- pr_warn("PUSH_IDLE pattern timedout\n");
+ pr_debug("start\n");
+
+ mdss_dp_mainlink_ctrl(&dp_drv->ctrl_io, false);
+
+ mdss_dp_audio_enable(&dp_drv->ctrl_io, false);
+
+ /* Make sure the DP main link is disabled before clk disable */
+ wmb();
+ mdss_dp_disable_mainlink_clocks(dp_drv);
+ dp_drv->power_on = false;
mutex_unlock(&dp_drv->train_mutex);
- pr_debug("mainlink off done\n");
+ complete_all(&dp_drv->irq_comp);
+ pr_debug("end\n");
+
+ return 0;
}
-int mdss_dp_off(struct mdss_panel_data *pdata)
+static int mdss_dp_off_hpd(struct mdss_dp_drv_pdata *dp_drv)
{
- struct mdss_dp_drv_pdata *dp_drv = NULL;
-
- dp_drv = container_of(pdata, struct mdss_dp_drv_pdata,
- panel_data);
- if (!dp_drv) {
- pr_err("Invalid input data\n");
- return -EINVAL;
+ if (!dp_drv->power_on) {
+ pr_debug("panel already powered off\n");
+ return 0;
}
- pr_debug("Entered++, cont_splash=%d\n", dp_drv->cont_splash);
/* wait until link training is completed */
mutex_lock(&dp_drv->train_mutex);
- if (dp_drv->link_clks_on)
- mdss_dp_mainlink_ctrl(&dp_drv->ctrl_io, false);
+ pr_debug("Entered++, cont_splash=%d\n", dp_drv->cont_splash);
+
+ mdss_dp_mainlink_ctrl(&dp_drv->ctrl_io, false);
mdss_dp_aux_ctrl(&dp_drv->ctrl_io, false);
@@ -1219,7 +1383,7 @@ int mdss_dp_off(struct mdss_panel_data *pdata)
/* Make sure DP is disabled before clk disable */
wmb();
- mdss_dp_clk_ctrl(dp_drv, DP_CTRL_PM, false);
+ mdss_dp_disable_mainlink_clocks(dp_drv);
mdss_dp_clk_ctrl(dp_drv, DP_CORE_PM, false);
mdss_dp_regulator_ctrl(dp_drv, false);
@@ -1232,6 +1396,23 @@ int mdss_dp_off(struct mdss_panel_data *pdata)
return 0;
}
+int mdss_dp_off(struct mdss_panel_data *pdata)
+{
+ struct mdss_dp_drv_pdata *dp = NULL;
+
+ dp = container_of(pdata, struct mdss_dp_drv_pdata,
+ panel_data);
+ if (!dp) {
+ pr_err("Invalid input data\n");
+ return -EINVAL;
+ }
+
+ if (mdss_dp_soft_hpd_reset(dp))
+ return mdss_dp_off_irq(dp);
+ else
+ return mdss_dp_off_hpd(dp);
+}
+
static void mdss_dp_send_cable_notification(
struct mdss_dp_drv_pdata *dp, int val)
{
@@ -1444,22 +1625,24 @@ static void mdss_dp_hdcp_cb(void *ptr, enum hdcp_states status)
return;
}
- ops = dp->hdcp_ops;
+ ops = dp->hdcp.ops;
mutex_lock(&dp->train_mutex);
switch (status) {
case HDCP_STATE_AUTHENTICATED:
- pr_debug("hdcp 1.3 authenticated\n");
+ pr_debug("hdcp authenticated\n");
+ dp->hdcp.auth_state = true;
break;
case HDCP_STATE_AUTH_FAIL:
+ dp->hdcp.auth_state = false;
+
if (dp->power_on) {
pr_debug("Reauthenticating\n");
if (ops && ops->reauthenticate) {
- rc = ops->reauthenticate(dp->hdcp_data);
+ rc = ops->reauthenticate(dp->hdcp.data);
if (rc)
- pr_err("HDCP reauth failed. rc=%d\n",
- rc);
+ pr_err("reauth failed rc=%d\n", rc);
}
} else {
pr_debug("not reauthenticating, cable disconnected\n");
@@ -1508,19 +1691,22 @@ static int mdss_dp_hdcp_init(struct mdss_panel_data *pdata)
hdcp_init_data.sec_access = true;
hdcp_init_data.client_id = HDCP_CLIENT_DP;
- dp_drv->hdcp_data = hdcp_1x_init(&hdcp_init_data);
- if (IS_ERR_OR_NULL(dp_drv->hdcp_data)) {
+ dp_drv->hdcp.data = hdcp_1x_init(&hdcp_init_data);
+ if (IS_ERR_OR_NULL(dp_drv->hdcp.data)) {
pr_err("Error hdcp init\n");
rc = -EINVAL;
goto error;
}
- dp_drv->panel_data.panel_info.hdcp_1x_data = dp_drv->hdcp_data;
+ dp_drv->panel_data.panel_info.hdcp_1x_data = dp_drv->hdcp.data;
pr_debug("HDCP 1.3 initialized\n");
- dp_drv->hdcp_ops = hdcp_1x_start(dp_drv->hdcp_data);
+ dp_drv->hdcp.hdcp2 = dp_hdcp2p2_init(&hdcp_init_data);
+ if (!IS_ERR_OR_NULL(dp_drv->hdcp.data))
+ pr_debug("HDCP 2.2 initialized\n");
+ dp_drv->hdcp.feature_enabled = true;
return 0;
error:
return rc;
@@ -1634,13 +1820,74 @@ static int mdss_dp_sysfs_create(struct mdss_dp_drv_pdata *dp,
return 0;
}
+static void mdss_dp_mainlink_push_idle(struct mdss_panel_data *pdata)
+{
+ struct mdss_dp_drv_pdata *dp_drv = NULL;
+ const int idle_pattern_completion_timeout_ms = 3 * HZ / 100;
+
+ dp_drv = container_of(pdata, struct mdss_dp_drv_pdata,
+ panel_data);
+ if (!dp_drv) {
+ pr_err("Invalid input data\n");
+ return;
+ }
+ pr_debug("Entered++\n");
+
+ /* wait until link training is completed */
+ mutex_lock(&dp_drv->train_mutex);
+
+ mdss_dp_aux_set_sink_power_state(dp_drv, SINK_POWER_OFF);
+
+ reinit_completion(&dp_drv->idle_comp);
+ mdss_dp_state_ctrl(&dp_drv->ctrl_io, ST_PUSH_IDLE);
+ if (!wait_for_completion_timeout(&dp_drv->idle_comp,
+ idle_pattern_completion_timeout_ms))
+ pr_warn("PUSH_IDLE pattern timedout\n");
+
+ mutex_unlock(&dp_drv->train_mutex);
+ pr_debug("mainlink off done\n");
+}
+
+static void mdss_dp_update_hdcp_info(struct mdss_dp_drv_pdata *dp)
+{
+ void *fd = NULL;
+ struct hdcp_ops *ops = NULL;
+
+ if (!dp) {
+ pr_err("invalid input\n");
+ return;
+ }
+
+ /* check first if hdcp2p2 is supported */
+ fd = dp->hdcp.hdcp2;
+ if (fd)
+ ops = dp_hdcp2p2_start(fd);
+
+ if (ops && ops->feature_supported)
+ dp->hdcp.hdcp2_present = ops->feature_supported(fd);
+ else
+ dp->hdcp.hdcp2_present = false;
+
+ if (!dp->hdcp.hdcp2_present) {
+ dp->hdcp.hdcp1_present = hdcp1_check_if_supported_load_app();
+
+ if (dp->hdcp.hdcp1_present) {
+ fd = dp->hdcp.hdcp1;
+ ops = hdcp_1x_start(fd);
+ }
+ }
+
+ /* update internal data about hdcp */
+ dp->hdcp.data = fd;
+ dp->hdcp.ops = ops;
+}
+
static int mdss_dp_event_handler(struct mdss_panel_data *pdata,
int event, void *arg)
{
int rc = 0;
struct fb_info *fbi;
struct mdss_dp_drv_pdata *dp = NULL;
- struct hdcp_ops *ops;
if (!pdata) {
pr_err("%s: Invalid input data\n", __func__);
@@ -1652,25 +1899,24 @@ static int mdss_dp_event_handler(struct mdss_panel_data *pdata,
dp = container_of(pdata, struct mdss_dp_drv_pdata,
panel_data);
- ops = dp->hdcp_ops;
-
switch (event) {
case MDSS_EVENT_UNBLANK:
rc = mdss_dp_on(pdata);
break;
case MDSS_EVENT_PANEL_ON:
- if (hdcp1_check_if_supported_load_app()) {
- if (ops && ops->authenticate)
- rc = ops->authenticate(dp->hdcp_data);
- }
+ mdss_dp_update_hdcp_info(dp);
+
+ if (dp->hdcp.ops && dp->hdcp.ops->authenticate)
+ rc = dp->hdcp.ops->authenticate(dp->hdcp.data);
break;
case MDSS_EVENT_PANEL_OFF:
rc = mdss_dp_off(pdata);
break;
case MDSS_EVENT_BLANK:
- if (ops && ops->off)
- ops->off(dp->hdcp_data);
- mdss_dp_mainlink_off(pdata);
+ if (dp->hdcp.ops && dp->hdcp.ops->off)
+ dp->hdcp.ops->off(dp->hdcp.data);
+
+ mdss_dp_mainlink_push_idle(pdata);
break;
case MDSS_EVENT_FB_REGISTERED:
fbi = (struct fb_info *)arg;
@@ -1704,6 +1950,7 @@ static int mdss_dp_remove(struct platform_device *pdev)
struct mdss_dp_drv_pdata *dp_drv = NULL;
dp_drv = platform_get_drvdata(pdev);
+ dp_hdcp2p2_deinit(dp_drv->hdcp.data);
iounmap(dp_drv->ctrl_io.base);
dp_drv->ctrl_io.base = NULL;
@@ -1944,11 +2191,6 @@ irqreturn_t dp_isr(int irq, void *ptr)
dp_aux_native_handler(dp, isr1);
}
- if (dp->hdcp_ops && dp->hdcp_ops->isr) {
- if (dp->hdcp_ops->isr(dp->hdcp_data))
- pr_err("dp_hdcp_isr failed\n");
- }
-
return IRQ_HANDLED;
}
@@ -1976,10 +2218,8 @@ static void usbpd_connect_callback(struct usbpd_svid_handler *hdlr)
return;
}
- mutex_lock(&dp_drv->pd_msg_mutex);
- dp_drv->cable_connected = true;
+ mdss_dp_update_cable_status(dp_drv, true);
dp_send_events(dp_drv, EV_USBPD_DISCOVER_MODES);
- mutex_unlock(&dp_drv->pd_msg_mutex);
pr_debug("discover_mode event sent\n");
}
@@ -1994,10 +2234,8 @@ static void usbpd_disconnect_callback(struct usbpd_svid_handler *hdlr)
}
pr_debug("cable disconnected\n");
- mutex_lock(&dp_drv->pd_msg_mutex);
- dp_drv->cable_connected = false;
+ mdss_dp_update_cable_status(dp_drv, false);
dp_drv->alt_mode.current_state = UNKNOWN_STATE;
- mutex_unlock(&dp_drv->pd_msg_mutex);
mdss_dp_notify_clients(dp_drv, false);
}
@@ -2040,6 +2278,160 @@ end:
return ret;
}
+/**
+ * mdss_dp_send_test_response() - sends the test response to the sink
+ * @dp: Display Port Driver data
+ *
+ * This function will send the test response to the sink but only after
+ * any previous link training has been completed.
+ */
+static inline void mdss_dp_send_test_response(struct mdss_dp_drv_pdata *dp)
+{
+ mutex_lock(&dp->train_mutex);
+ mdss_dp_aux_send_test_response(dp);
+ mutex_unlock(&dp->train_mutex);
+}
+
+/**
+ * mdss_dp_hpd_irq_notify_clients() - notifies DP clients of HPD IRQ tear down
+ * @dp: Display Port Driver data
+ *
+ * This function will send a notification to display/audio clients of DP tear
+ * down during an HPD IRQ. This happens only if HPD IRQ is toggled,
+ * in which case the user space proceeds with shutdown of DP driver, including
+ * mainlink disable, and pushing the controller into idle state.
+ */
+static int mdss_dp_hpd_irq_notify_clients(struct mdss_dp_drv_pdata *dp)
+{
+ const int irq_comp_timeout = HZ * 2;
+ int ret = 0;
+
+ if (dp->hpd_irq_toggled) {
+ mdss_dp_notify_clients(dp, false);
+
+ reinit_completion(&dp->irq_comp);
+ ret = wait_for_completion_timeout(&dp->irq_comp,
+ irq_comp_timeout);
+ if (ret <= 0) {
+ pr_warn("irq_comp timed out\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * mdss_dp_link_retraining() - initiates link retraining
+ * @dp: Display Port Driver data
+ *
+ * This function will initiate link retraining by first notifying
+ * DP clients and triggering DP shutdown, and then enabling DP after
+ * notification is done successfully.
+ */
+static inline void mdss_dp_link_retraining(struct mdss_dp_drv_pdata *dp)
+{
+ if (mdss_dp_hpd_irq_notify_clients(dp))
+ return;
+
+ mdss_dp_on_irq(dp);
+}
+
+/**
+ * mdss_dp_process_link_status_update() - processes link status updates
+ * @dp: Display Port Driver data
+ *
+ * This function will check for changes in the link status, e.g. clock
+ * recovery done on all lanes, and trigger link training if there is a
+ * failure/error on the link.
+ */
+static void mdss_dp_process_link_status_update(struct mdss_dp_drv_pdata *dp)
+{
+ if (!mdss_dp_is_link_status_updated(dp) ||
+ (mdss_dp_aux_channel_eq_done(dp) &&
+ mdss_dp_aux_clock_recovery_done(dp)))
+ return;
+
+ pr_info("channel_eq_done = %d, clock_recovery_done = %d\n",
+ mdss_dp_aux_channel_eq_done(dp),
+ mdss_dp_aux_clock_recovery_done(dp));
+
+ mdss_dp_link_retraining(dp);
+}
+
+/**
+ * mdss_dp_process_link_training_request() - processes new training requests
+ * @dp: Display Port Driver data
+ *
+ * This function will handle new link training requests that are initiated by
+ * the sink. In particular, it will update the requested lane count and link
+ * link rate, and then trigger the link retraining procedure.
+ */
+static void mdss_dp_process_link_training_request(struct mdss_dp_drv_pdata *dp)
+{
+ if (!mdss_dp_is_link_training_requested(dp))
+ return;
+
+ mdss_dp_send_test_response(dp);
+
+ pr_info("%s link rate = 0x%x, lane count = 0x%x\n",
+ mdss_dp_get_test_name(TEST_LINK_TRAINING),
+ dp->test_data.test_link_rate,
+ dp->test_data.test_lane_count);
+ dp->dpcd.max_lane_count =
+ dp->test_data.test_lane_count;
+ dp->link_rate = dp->test_data.test_link_rate;
+
+ mdss_dp_link_retraining(dp);
+}
+
+/**
+ * mdss_dp_process_hpd_irq_high() - handle HPD IRQ transition to HIGH
+ * @dp: Display Port Driver data
+ *
+ * This function will handle the HPD IRQ state transitions from LOW to HIGH
+ * (including cases when there are back to back HPD IRQ HIGH) indicating
+ * the start of a new link training request or sink status update.
+ */
+static void mdss_dp_process_hpd_irq_high(struct mdss_dp_drv_pdata *dp)
+{
+ pr_debug("enter: HPD IRQ High\n");
+
+ dp->hpd_irq_on = true;
+
+ mdss_dp_aux_parse_sink_status_field(dp);
+
+ mdss_dp_process_link_training_request(dp);
+
+ mdss_dp_process_link_status_update(dp);
+
+ mdss_dp_reset_test_data(dp);
+
+ pr_debug("done\n");
+}
+
+/**
+ * mdss_dp_process_hpd_irq_low() - handle HPD IRQ transition to LOW
+ * @dp: Display Port Driver data
+ *
+ * This function will handle the HPD IRQ state transitions from HIGH to LOW,
+ * indicating the end of a test request.
+ */
+static void mdss_dp_process_hpd_irq_low(struct mdss_dp_drv_pdata *dp)
+{
+ pr_debug("enter: HPD IRQ low\n");
+
+ dp->hpd_irq_on = false;
+
+ mdss_dp_update_cable_status(dp, false);
+ mdss_dp_mainlink_push_idle(&dp->panel_data);
+ mdss_dp_off_hpd(dp);
+
+ mdss_dp_reset_test_data(dp);
+
+ pr_debug("done\n");
+}
+
static void usbpd_response_callback(struct usbpd_svid_handler *hdlr, u8 cmd,
enum usbpd_svdm_cmd_type cmd_type,
const u32 *vdos, int num_vdos)
@@ -2055,8 +2447,10 @@ static void usbpd_response_callback(struct usbpd_svid_handler *hdlr, u8 cmd,
pr_debug("callback -> cmd: 0x%x, *vdos = 0x%x, num_vdos = %d\n",
cmd, *vdos, num_vdos);
- if (mdss_dp_validate_callback(cmd, cmd_type, num_vdos))
+ if (mdss_dp_validate_callback(cmd, cmd_type, num_vdos)) {
+ pr_debug("invalid callback received\n");
return;
+ }
switch (cmd) {
case USBPD_SVDM_DISCOVER_MODES:
@@ -2073,10 +2467,31 @@ static void usbpd_response_callback(struct usbpd_svid_handler *hdlr, u8 cmd,
dp_drv->alt_mode.dp_status.response = *vdos;
mdss_dp_usbpd_ext_dp_status(&dp_drv->alt_mode.dp_status);
- if (!dp_drv->alt_mode.dp_status.hpd_high)
- return;
+ dp_drv->hpd_irq_toggled = dp_drv->hpd_irq_on !=
+ dp_drv->alt_mode.dp_status.hpd_irq;
+
+ if (dp_drv->alt_mode.dp_status.hpd_irq) {
+ mdss_dp_process_hpd_irq_high(dp_drv);
+ break;
+ }
- pr_debug("HPD high\n");
+ if (dp_drv->hpd_irq_toggled
+ && !dp_drv->alt_mode.dp_status.hpd_irq) {
+ mdss_dp_process_hpd_irq_low(dp_drv);
+ break;
+ }
+
+ if (!dp_drv->alt_mode.dp_status.hpd_high) {
+ pr_debug("Attention: HPD low\n");
+ mdss_dp_update_cable_status(dp_drv, false);
+ mdss_dp_notify_clients(dp_drv, false);
+ pr_debug("Attention: Notified clients\n");
+ break;
+ }
+
+ pr_debug("Attention: HPD high\n");
+
+ mdss_dp_update_cable_status(dp_drv, true);
dp_drv->alt_mode.current_state |= DP_STATUS_DONE;
@@ -2084,6 +2499,10 @@ static void usbpd_response_callback(struct usbpd_svid_handler *hdlr, u8 cmd,
mdss_dp_host_init(&dp_drv->panel_data);
else
dp_send_events(dp_drv, EV_USBPD_DP_CONFIGURE);
+
+ if (dp_drv->alt_mode.dp_status.hpd_irq && dp_drv->power_on &&
+ dp_drv->hdcp.ops && dp_drv->hdcp.ops->isr)
+ dp_drv->hdcp.ops->isr(dp_drv->hdcp.data);
break;
case DP_VDM_STATUS:
dp_drv->alt_mode.dp_status.response = *vdos;
@@ -2096,7 +2515,7 @@ static void usbpd_response_callback(struct usbpd_svid_handler *hdlr, u8 cmd,
break;
case DP_VDM_CONFIGURE:
dp_drv->alt_mode.current_state |= DP_CONFIGURE_DONE;
- pr_debug("config USBPD to DP done\n");
+ pr_debug("Configure: config USBPD to DP done\n");
if (dp_drv->alt_mode.dp_status.hpd_high)
mdss_dp_host_init(&dp_drv->panel_data);
@@ -2259,7 +2678,10 @@ static int mdss_dp_probe(struct platform_device *pdev)
dp_drv->inited = true;
dp_drv->wait_for_audio_comp = false;
+ dp_drv->hpd_irq_on = false;
+ mdss_dp_reset_test_data(dp_drv);
init_completion(&dp_drv->audio_comp);
+ init_completion(&dp_drv->irq_comp);
pr_debug("done\n");
@@ -2280,6 +2702,53 @@ probe_err:
}
+void *mdss_dp_get_hdcp_data(struct device *dev)
+{
+ struct mdss_dp_drv_pdata *dp_drv = NULL;
+
+ if (!dev) {
+ pr_err("%s:Invalid input\n", __func__);
+ return NULL;
+ }
+ dp_drv = dev_get_drvdata(dev);
+ if (!dp_drv) {
+ pr_err("%s:Invalid dp driver\n", __func__);
+ return NULL;
+ }
+ return dp_drv->hdcp.data;
+}
+
+static inline bool dp_is_hdcp_enabled(struct mdss_dp_drv_pdata *dp_drv)
+{
+ return dp_drv->hdcp.feature_enabled &&
+ (dp_drv->hdcp.hdcp1_present || dp_drv->hdcp.hdcp2_present) &&
+ dp_drv->hdcp.ops;
+}
+
+static inline bool dp_is_stream_shareable(struct mdss_dp_drv_pdata *dp_drv)
+{
+ bool ret = 0;
+
+ switch (dp_drv->hdcp.enc_lvl) {
+ case HDCP_STATE_AUTH_ENC_NONE:
+ ret = true;
+ break;
+ case HDCP_STATE_AUTH_ENC_1X:
+ ret = dp_is_hdcp_enabled(dp_drv) &&
+ dp_drv->hdcp.auth_state;
+ break;
+ case HDCP_STATE_AUTH_ENC_2P2:
+ ret = dp_drv->hdcp.feature_enabled &&
+ dp_drv->hdcp.hdcp2_present &&
+ dp_drv->hdcp.auth_state;
+ break;
+ default:
+ ret = false;
+ }
+
+ return ret;
+}
+
static const struct of_device_id msm_mdss_dp_dt_match[] = {
{.compatible = "qcom,mdss-dp"},
{}
diff --git a/drivers/video/fbdev/msm/mdss_dp.h b/drivers/video/fbdev/msm/mdss_dp.h
index 8d5af4dc5bf3..4ba2d20d4261 100644
--- a/drivers/video/fbdev/msm/mdss_dp.h
+++ b/drivers/video/fbdev/msm/mdss_dp.h
@@ -70,8 +70,6 @@
#define EDP_INTR_ACK_SHIFT 1
#define EDP_INTR_MASK_SHIFT 2
-#define EDP_MAX_LANE 4
-
/* isr */
#define EDP_INTR_HPD BIT(0)
#define EDP_INTR_AUX_I2C_DONE BIT(3)
@@ -105,7 +103,7 @@
EDP_INTR_FRAME_END | EDP_INTR_CRC_UPDATED)
#define EDP_INTR_MASK2 (EDP_INTR_STATUS2 << 2)
-#define EV_EVENT_STR(x) #x
+#define DP_ENUM_STR(x) #x
struct edp_buf {
char *start; /* buffer start addr */
@@ -256,6 +254,18 @@ struct dpcd_link_status {
char req_pre_emphasis[4];
};
+struct dpcd_test_request {
+ u32 test_requested;
+ u32 test_link_rate;
+ u32 test_lane_count;
+ u32 response;
+};
+
+struct dpcd_sink_count {
+ u32 count;
+ bool cp_ready;
+};
+
struct display_timing_desc {
u32 pclk;
u32 h_addressable; /* addressable + boder = active */
@@ -317,8 +327,19 @@ struct dp_statistic {
u32 aux_native_rx;
};
-#define DPCD_LINK_VOLTAGE_MAX 4
-#define DPCD_LINK_PRE_EMPHASIS_MAX 4
+enum dpcd_link_voltage_level {
+ DPCD_LINK_VOLTAGE_LEVEL_0 = 0,
+ DPCD_LINK_VOLTAGE_LEVEL_1 = 1,
+ DPCD_LINK_VOLTAGE_LEVEL_2 = 2,
+ DPCD_LINK_VOLTAGE_MAX = DPCD_LINK_VOLTAGE_LEVEL_2,
+};
+
+enum dpcd_link_preemaphasis_level {
+ DPCD_LINK_PRE_EMPHASIS_LEVEL_0 = 0,
+ DPCD_LINK_PRE_EMPHASIS_LEVEL_1 = 1,
+ DPCD_LINK_PRE_EMPHASIS_LEVEL_2 = 2,
+ DPCD_LINK_PRE_EMPHASIS_MAX = DPCD_LINK_PRE_EMPHASIS_LEVEL_2,
+};
struct dp_pinctrl_res {
struct pinctrl *pinctrl;
@@ -329,6 +350,21 @@ struct dp_pinctrl_res {
irqreturn_t dp_isr(int irq, void *ptr);
+struct dp_hdcp {
+ void *data;
+ struct hdcp_ops *ops;
+
+ void *hdcp1;
+ void *hdcp2;
+
+ int enc_lvl;
+
+ bool auth_state;
+ bool hdcp1_present;
+ bool hdcp2_present;
+ bool feature_enabled;
+};
+
struct mdss_dp_drv_pdata {
/* device driver */
int (*on) (struct mdss_panel_data *pdata);
@@ -337,6 +373,7 @@ struct mdss_dp_drv_pdata {
struct platform_device *ext_pdev;
struct usbpd *pd;
+ struct dp_hdcp hdcp;
struct usbpd_svid_handler svid_handler;
struct dp_alt_mode alt_mode;
bool dp_initialized;
@@ -401,6 +438,7 @@ struct mdss_dp_drv_pdata {
struct completion idle_comp;
struct completion video_comp;
struct completion audio_comp;
+ struct completion irq_comp;
struct mutex aux_mutex;
struct mutex train_mutex;
struct mutex pd_msg_mutex;
@@ -426,6 +464,8 @@ struct mdss_dp_drv_pdata {
u32 bpp;
struct dp_statistic dp_stat;
bool wait_for_audio_comp;
+ bool hpd_irq_on;
+ bool hpd_irq_toggled;
/* event */
struct workqueue_struct *workq;
@@ -441,10 +481,47 @@ struct mdss_dp_drv_pdata {
u32 new_vic;
int fb_node;
- void *hdcp_data;
- struct hdcp_ops *hdcp_ops;
+ struct dpcd_test_request test_data;
+ struct dpcd_sink_count sink_count;
};
+enum dp_lane_count {
+ DP_LANE_COUNT_1 = 1,
+ DP_LANE_COUNT_2 = 2,
+ DP_LANE_COUNT_4 = 4,
+};
+
+enum test_response {
+ TEST_NACK = 0x0,
+ TEST_ACK = 0x1,
+};
+
+static inline char *mdss_dp_get_test_response(u32 test_response)
+{
+ switch (test_response) {
+ case TEST_NACK: return DP_ENUM_STR(TEST_NACK);
+ case TEST_ACK: return DP_ENUM_STR(TEST_ACK);
+ default: return "unknown";
+ }
+}
+
+enum test_type {
+ UNKNOWN_TEST = 0,
+ TEST_LINK_TRAINING = BIT(0),
+ TEST_PATTERN = BIT(1),
+ TEST_EDID_READ = BIT(2),
+};
+
+static inline char *mdss_dp_get_test_name(u32 test_requested)
+{
+ switch (test_requested) {
+ case TEST_LINK_TRAINING: return DP_ENUM_STR(TEST_LINK_TRAINING);
+ case TEST_PATTERN: return DP_ENUM_STR(TEST_PATTERN);
+ case TEST_EDID_READ: return DP_ENUM_STR(TEST_EDID_READ);
+ default: return "unknown";
+ }
+}
+
static inline const char *__mdss_dp_pm_name(enum dp_pm_type module)
{
switch (module) {
@@ -470,19 +547,19 @@ static inline char *mdss_dp_ev_event_to_string(int event)
{
switch (event) {
case EV_EDP_AUX_SETUP:
- return EV_EVENT_STR(EV_EDP_AUX_SETUP);
+ return DP_ENUM_STR(EV_EDP_AUX_SETUP);
case EV_EDID_READ:
- return EV_EVENT_STR(EV_EDID_READ);
+ return DP_ENUM_STR(EV_EDID_READ);
case EV_DPCD_CAP_READ:
- return EV_EVENT_STR(EV_DPCD_CAP_READ);
+ return DP_ENUM_STR(EV_DPCD_CAP_READ);
case EV_DPCD_STATUS_READ:
- return EV_EVENT_STR(EV_DPCD_STATUS_READ);
+ return DP_ENUM_STR(EV_DPCD_STATUS_READ);
case EV_LINK_TRAIN:
- return EV_EVENT_STR(EV_LINK_TRAIN);
+ return DP_ENUM_STR(EV_LINK_TRAIN);
case EV_IDLE_PATTERNS_SENT:
- return EV_EVENT_STR(EV_IDLE_PATTERNS_SENT);
+ return DP_ENUM_STR(EV_IDLE_PATTERNS_SENT);
case EV_VIDEO_READY:
- return EV_EVENT_STR(EV_VIDEO_READY);
+ return DP_ENUM_STR(EV_VIDEO_READY);
default:
return "unknown";
}
@@ -492,6 +569,7 @@ void mdss_dp_phy_initialize(struct mdss_dp_drv_pdata *dp);
void mdss_dp_dpcd_cap_read(struct mdss_dp_drv_pdata *dp);
int mdss_dp_dpcd_status_read(struct mdss_dp_drv_pdata *dp);
+void mdss_dp_aux_parse_sink_status_field(struct mdss_dp_drv_pdata *dp);
int mdss_dp_edid_read(struct mdss_dp_drv_pdata *dp);
int mdss_dp_link_train(struct mdss_dp_drv_pdata *dp);
void dp_aux_i2c_handler(struct mdss_dp_drv_pdata *dp, u32 isr);
@@ -503,5 +581,11 @@ void mdss_dp_sink_power_down(struct mdss_dp_drv_pdata *ep);
void mdss_dp_lane_power_ctrl(struct mdss_dp_drv_pdata *ep, int up);
void mdss_dp_config_ctrl(struct mdss_dp_drv_pdata *ep);
char mdss_dp_gen_link_clk(struct mdss_panel_info *pinfo, char lane_cnt);
+int mdss_dp_aux_set_sink_power_state(struct mdss_dp_drv_pdata *ep, char state);
+void mdss_dp_aux_send_test_response(struct mdss_dp_drv_pdata *ep);
+void *mdss_dp_get_hdcp_data(struct device *dev);
+int mdss_dp_hdcp2p2_init(struct mdss_dp_drv_pdata *dp_drv);
+bool mdss_dp_aux_clock_recovery_done(struct mdss_dp_drv_pdata *ep);
+bool mdss_dp_aux_channel_eq_done(struct mdss_dp_drv_pdata *ep);
#endif /* MDSS_DP_H */
diff --git a/drivers/video/fbdev/msm/mdss_dp_aux.c b/drivers/video/fbdev/msm/mdss_dp_aux.c
index 3c525b0dac4f..91066662e793 100644
--- a/drivers/video/fbdev/msm/mdss_dp_aux.c
+++ b/drivers/video/fbdev/msm/mdss_dp_aux.c
@@ -277,13 +277,15 @@ static int dp_aux_read_cmds(struct mdss_dp_drv_pdata *ep,
wait_for_completion(&ep->aux_comp);
- if (ep->aux_error_num == EDP_AUX_ERR_NONE)
+ if (ep->aux_error_num == EDP_AUX_ERR_NONE) {
ret = dp_cmd_fifo_rx(rp, len, ep->base);
- else
- ret = ep->aux_error_num;
- if (cmds->out_buf)
- memcpy(cmds->out_buf, rp->data, cmds->len);
+ if (cmds->out_buf)
+ memcpy(cmds->out_buf, rp->data, cmds->len);
+
+ } else {
+ ret = ep->aux_error_num;
+ }
ep->aux_cmd_busy = 0;
mutex_unlock(&ep->aux_mutex);
@@ -952,6 +954,232 @@ static int dp_link_status_read(struct mdss_dp_drv_pdata *ep, int len)
return len;
}
+/**
+ * mdss_dp_aux_send_test_response() - sends a test response to the sink
+ * @dp: Display Port Driver data
+ */
+void mdss_dp_aux_send_test_response(struct mdss_dp_drv_pdata *dp)
+{
+ char test_response[4];
+
+ test_response[0] = dp->test_data.response;
+
+ pr_debug("sending test response %s",
+ mdss_dp_get_test_response(test_response[0]));
+ dp_aux_write_buf(dp, 0x260, test_response, 1, 0);
+}
+
+/**
+ * dp_is_link_rate_valid() - validates the link rate
+ * @lane_rate: link rate requested by the sink
+ *
+ * Returns true if the requested link rate is supported.
+ */
+static bool dp_is_link_rate_valid(u32 link_rate)
+{
+ return (link_rate == DP_LINK_RATE_162) ||
+ (link_rate == DP_LINK_RATE_270) ||
+ (link_rate == DP_LINK_RATE_540);
+}
+
+/**
+ * dp_is_lane_count_valid() - validates the lane count
+ * @lane_count: lane count requested by the sink
+ *
+ * Returns true if the requested lane count is supported.
+ */
+static bool dp_is_lane_count_valid(u32 lane_count)
+{
+ return (lane_count == DP_LANE_COUNT_1) ||
+ (lane_count == DP_LANE_COUNT_2) ||
+ (lane_count == DP_LANE_COUNT_4);
+}
+
+/**
+ * dp_parse_link_training_params() - parses link training parameters from DPCD
+ * @ep: Display Port Driver data
+ *
+ * Returns 0 if it successfully parses the link rate (Byte 0x219) and lane
+ * count (Byte 0x220), and if these values parse are valid.
+ */
+static int dp_parse_link_training_params(struct mdss_dp_drv_pdata *ep)
+{
+ int ret = 0;
+ char *bp;
+ char data;
+ struct edp_buf *rp;
+ int rlen;
+ int const test_parameter_len = 0x1;
+ int const test_link_rate_addr = 0x219;
+ int const test_lane_count_addr = 0x220;
+
+ /* Read the requested link rate (Byte 0x219). */
+ rlen = dp_aux_read_buf(ep, test_link_rate_addr,
+ test_parameter_len, 0);
+ if (rlen < test_parameter_len) {
+ pr_err("failed to read link rate\n");
+ ret = -EINVAL;
+ goto exit;
+ }
+ rp = &ep->rxp;
+ bp = rp->data;
+ data = *bp++;
+
+ if (!dp_is_link_rate_valid(data)) {
+ pr_err("invalid link rate = 0x%x\n", data);
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ ep->test_data.test_link_rate = data;
+ pr_debug("link rate = 0x%x\n", ep->test_data.test_link_rate);
+
+ /* Read the requested lane count (Byte 0x220). */
+ rlen = dp_aux_read_buf(ep, test_lane_count_addr,
+ test_parameter_len, 0);
+ if (rlen < test_parameter_len) {
+ pr_err("failed to read lane count\n");
+ ret = -EINVAL;
+ goto exit;
+ }
+ rp = &ep->rxp;
+ bp = rp->data;
+ data = *bp++;
+ data &= 0x1F;
+
+ if (!dp_is_lane_count_valid(data)) {
+ pr_err("invalid lane count = 0x%x\n", data);
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ ep->test_data.test_lane_count = data;
+ pr_debug("lane count = 0x%x\n", ep->test_data.test_lane_count);
+
+exit:
+ return ret;
+}
+
+/**
+ * dp_sink_parse_sink_count() - parses the sink count
+ * @ep: Display Port Driver data
+ *
+ * Parses the DPCD to check if there is an update to the sink count
+ * (Byte 0x200), and whether all the sink devices connected have Content
+ * Protection enabled.
+ */
+static void dp_sink_parse_sink_count(struct mdss_dp_drv_pdata *ep)
+{
+ char *bp;
+ char data;
+ struct edp_buf *rp;
+ int rlen;
+ int const param_len = 0x1;
+ int const sink_count_addr = 0x200;
+
+ rlen = dp_aux_read_buf(ep, sink_count_addr, param_len, 0);
+ if (rlen < param_len) {
+ pr_err("failed to read sink count\n");
+ return;
+ }
+ rp = &ep->rxp;
+ bp = rp->data;
+ data = *bp++;
+
+ /* BIT 7, BIT 5:0 */
+ ep->sink_count.count = (data & BIT(7)) << 6 | (data & 0x63);
+ /* BIT 6*/
+ ep->sink_count.cp_ready = data & BIT(6);
+
+ pr_debug("sink_count = 0x%x, cp_ready = 0x%x\n",
+ ep->sink_count.count, ep->sink_count.cp_ready);
+}
+
+/**
+ * dp_is_test_supported() - checks if test requested by sink is supported
+ * @test_requested: test requested by the sink
+ *
+ * Returns true if the requested test is supported.
+ */
+static bool dp_is_test_supported(u32 test_requested)
+{
+ return test_requested == TEST_LINK_TRAINING;
+}
+
+/**
+ * dp_sink_parse_test_request() - parses test request parameters from sink
+ * @ep: Display Port Driver data
+ *
+ * Parses the DPCD to check if an automated test is requested (Byte 0x201),
+ * and what type of test automation is being requested (Byte 0x218).
+ */
+static void dp_sink_parse_test_request(struct mdss_dp_drv_pdata *ep)
+{
+ int ret = 0;
+ char *bp;
+ char data;
+ struct edp_buf *rp;
+ int rlen;
+ int const test_parameter_len = 0x1;
+ int const device_service_irq_addr = 0x201;
+ int const test_request_addr = 0x218;
+
+ /**
+ * Read the device service IRQ vector (Byte 0x201) to determine
+ * whether an automated test has been requested by the sink.
+ */
+ rlen = dp_aux_read_buf(ep, device_service_irq_addr,
+ test_parameter_len, 0);
+ if (rlen < test_parameter_len) {
+ pr_err("failed to read device service IRQ vector\n");
+ return;
+ }
+ rp = &ep->rxp;
+ bp = rp->data;
+ data = *bp++;
+
+ pr_debug("device service irq vector = 0x%x\n", data);
+
+ if (!(data & BIT(1))) {
+ pr_debug("no test requested\n");
+ return;
+ }
+
+ /**
+ * Read the test request byte (Byte 0x218) to determine what type
+ * of automated test has been requested by the sink.
+ */
+ rlen = dp_aux_read_buf(ep, test_request_addr,
+ test_parameter_len, 0);
+ if (rlen < test_parameter_len) {
+ pr_err("failed to read test_requested\n");
+ return;
+ }
+ rp = &ep->rxp;
+ bp = rp->data;
+ data = *bp++;
+
+ if (!dp_is_test_supported(data)) {
+ pr_debug("test 0x%x not supported\n", data);
+ return;
+ }
+
+ pr_debug("%s requested\n", mdss_dp_get_test_name(data));
+ ep->test_data.test_requested = data;
+
+ if (ep->test_data.test_requested == TEST_LINK_TRAINING)
+ ret = dp_parse_link_training_params(ep);
+
+ /**
+ * Send a TEST_ACK if all test parameters are valid, otherwise send
+ * a TEST_NACK.
+ */
+ if (ret)
+ ep->test_data.response = TEST_NACK;
+ else
+ ep->test_data.response = TEST_ACK;
+}
+
static int dp_cap_lane_rate_set(struct mdss_dp_drv_pdata *ep)
{
char buf[4];
@@ -975,17 +1203,26 @@ static int dp_lane_set_write(struct mdss_dp_drv_pdata *ep, int voltage_level,
{
int i;
char buf[4];
+ u32 max_level_reached = 0;
- if (voltage_level >= DPCD_LINK_VOLTAGE_MAX)
- voltage_level |= 0x04;
+ if (voltage_level == DPCD_LINK_VOLTAGE_MAX) {
+ pr_debug("max. voltage swing level reached %d\n",
+ voltage_level);
+ max_level_reached |= BIT(2);
+ }
- if (pre_emphasis_level >= DPCD_LINK_PRE_EMPHASIS_MAX)
- pre_emphasis_level |= 0x04;
+ if (pre_emphasis_level == DPCD_LINK_PRE_EMPHASIS_MAX) {
+ pr_debug("max. pre-emphasis level reached %d\n",
+ pre_emphasis_level);
+ max_level_reached |= BIT(5);
+ }
+
+ pr_debug("max_level_reached = 0x%x\n", max_level_reached);
pre_emphasis_level <<= 3;
for (i = 0; i < 4; i++)
- buf[i] = voltage_level | pre_emphasis_level;
+ buf[i] = voltage_level | pre_emphasis_level | max_level_reached;
pr_debug("p|v=0x%x", voltage_level | pre_emphasis_level);
return dp_aux_write_buf(ep, 0x103, buf, 4, 0);
@@ -1001,7 +1238,7 @@ static int dp_train_pattern_set_write(struct mdss_dp_drv_pdata *ep,
return dp_aux_write_buf(ep, 0x102, buf, 1, 0);
}
-static int dp_sink_clock_recovery_done(struct mdss_dp_drv_pdata *ep)
+bool mdss_dp_aux_clock_recovery_done(struct mdss_dp_drv_pdata *ep)
{
u32 mask;
u32 data;
@@ -1022,12 +1259,12 @@ static int dp_sink_clock_recovery_done(struct mdss_dp_drv_pdata *ep)
pr_debug("data=%x mask=%x\n", data, mask);
data &= mask;
if (data == mask) /* all done */
- return 1;
+ return true;
- return 0;
+ return false;
}
-static int dp_sink_channel_eq_done(struct mdss_dp_drv_pdata *ep)
+bool mdss_dp_aux_channel_eq_done(struct mdss_dp_drv_pdata *ep)
{
u32 mask;
u32 data;
@@ -1056,9 +1293,9 @@ static int dp_sink_channel_eq_done(struct mdss_dp_drv_pdata *ep)
data &= mask;
if (data == mask)/* all done */
- return 1;
+ return true;
- return 0;
+ return false;
}
void dp_sink_train_set_adjust(struct mdss_dp_drv_pdata *ep)
@@ -1087,6 +1324,30 @@ void dp_sink_train_set_adjust(struct mdss_dp_drv_pdata *ep)
}
ep->p_level = max;
+
+ /**
+ * Adjust the voltage swing and pre-emphasis level combination to within
+ * the allowable range.
+ */
+ if (ep->v_level > DPCD_LINK_VOLTAGE_MAX) {
+ pr_debug("Requested vSwingLevel=%d, change to %d\n",
+ ep->v_level, DPCD_LINK_VOLTAGE_MAX);
+ ep->v_level = DPCD_LINK_VOLTAGE_MAX;
+ }
+
+ if (ep->p_level > DPCD_LINK_PRE_EMPHASIS_MAX) {
+ pr_debug("Requested preEmphasisLevel=%d, change to %d\n",
+ ep->p_level, DPCD_LINK_PRE_EMPHASIS_MAX);
+ ep->p_level = DPCD_LINK_PRE_EMPHASIS_MAX;
+ }
+
+ if ((ep->p_level > DPCD_LINK_PRE_EMPHASIS_LEVEL_1)
+ && (ep->v_level == DPCD_LINK_VOLTAGE_LEVEL_2)) {
+ pr_debug("Requested preEmphasisLevel=%d, change to %d\n",
+ ep->p_level, DPCD_LINK_PRE_EMPHASIS_LEVEL_1);
+ ep->p_level = DPCD_LINK_PRE_EMPHASIS_LEVEL_1;
+ }
+
pr_debug("v_level=%d, p_level=%d",
ep->v_level, ep->p_level);
}
@@ -1185,7 +1446,7 @@ static int dp_start_link_train_1(struct mdss_dp_drv_pdata *ep)
usleep_range(usleep_time, usleep_time);
dp_link_status_read(ep, 6);
- if (dp_sink_clock_recovery_done(ep)) {
+ if (mdss_dp_aux_clock_recovery_done(ep)) {
ret = 0;
break;
}
@@ -1238,7 +1499,7 @@ static int dp_start_link_train_2(struct mdss_dp_drv_pdata *ep)
dp_link_status_read(ep, 6);
- if (dp_sink_channel_eq_done(ep)) {
+ if (mdss_dp_aux_channel_eq_done(ep)) {
ret = 0;
break;
}
@@ -1300,7 +1561,7 @@ static int dp_link_rate_down_shift(struct mdss_dp_drv_pdata *ep)
return -EINVAL;
}
-static int mdss_dp_sink_power_state(struct mdss_dp_drv_pdata *ep, char state)
+int mdss_dp_aux_set_sink_power_state(struct mdss_dp_drv_pdata *ep, char state)
{
int ret;
@@ -1332,7 +1593,7 @@ int mdss_dp_link_train(struct mdss_dp_drv_pdata *dp)
dp_write(dp->base + DP_MAINLINK_CTRL, 0x1);
- mdss_dp_sink_power_state(dp, SINK_POWER_ON);
+ mdss_dp_aux_set_sink_power_state(dp, SINK_POWER_ON);
train_start:
dp->v_level = 0; /* start from default level */
@@ -1387,6 +1648,13 @@ void mdss_dp_dpcd_cap_read(struct mdss_dp_drv_pdata *ep)
dp_sink_capability_read(ep, 16);
}
+void mdss_dp_aux_parse_sink_status_field(struct mdss_dp_drv_pdata *ep)
+{
+ dp_sink_parse_sink_count(ep);
+ dp_sink_parse_test_request(ep);
+ dp_link_status_read(ep, 6);
+}
+
int mdss_dp_dpcd_status_read(struct mdss_dp_drv_pdata *ep)
{
struct dpcd_link_status *sp;
@@ -1428,3 +1696,36 @@ void mdss_dp_aux_init(struct mdss_dp_drv_pdata *ep)
dp_buf_init(&ep->txp, ep->txbuf, sizeof(ep->txbuf));
dp_buf_init(&ep->rxp, ep->rxbuf, sizeof(ep->rxbuf));
}
+
+int mdss_dp_aux_read_rx_status(struct mdss_dp_drv_pdata *dp, u8 *rx_status)
+{
+ bool cp_irq;
+ int rc = 0;
+
+ if (!dp) {
+ pr_err("%s Invalid input\n", __func__);
+ return -EINVAL;
+ }
+
+ *rx_status = 0;
+
+ rc = dp_aux_read_buf(dp, DP_DPCD_CP_IRQ, 1, 0);
+ if (!rc) {
+ pr_err("Error reading CP_IRQ\n");
+ return -EINVAL;
+ }
+
+ cp_irq = *dp->rxp.data & BIT(2);
+
+ if (cp_irq) {
+ rc = dp_aux_read_buf(dp, DP_DPCD_RXSTATUS, 1, 0);
+ if (!rc) {
+ pr_err("Error reading RxStatus\n");
+ return -EINVAL;
+ }
+
+ *rx_status = *dp->rxp.data;
+ }
+
+ return 0;
+}
diff --git a/drivers/video/fbdev/msm/mdss_dp_hdcp2p2.c b/drivers/video/fbdev/msm/mdss_dp_hdcp2p2.c
new file mode 100644
index 000000000000..3891806b09bb
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_dp_hdcp2p2.c
@@ -0,0 +1,861 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/stat.h>
+#include <linux/types.h>
+#include <linux/kthread.h>
+
+#include <linux/hdcp_qseecom.h>
+#include "mdss_hdcp.h"
+#include "mdss_dp_util.h"
+
+enum dp_hdcp2p2_sink_status {
+ SINK_DISCONNECTED,
+ SINK_CONNECTED
+};
+
+enum dp_auth_status {
+ DP_HDCP_AUTH_STATUS_FAILURE,
+ DP_HDCP_AUTH_STATUS_SUCCESS
+};
+
+struct dp_hdcp2p2_ctrl {
+ atomic_t auth_state;
+ enum dp_hdcp2p2_sink_status sink_status; /* Is sink connected */
+ struct hdcp_init_data init_data;
+ struct mutex mutex; /* mutex to protect access to ctrl */
+ struct mutex msg_lock; /* mutex to protect access to msg buffer */
+ struct mutex wakeup_mutex; /* mutex to protect access to wakeup call*/
+ struct hdcp_ops *ops;
+ void *lib_ctx; /* Handle to HDCP 2.2 Trustzone library */
+ struct hdcp_txmtr_ops *lib; /* Ops for driver to call into TZ */
+ enum hdmi_hdcp_wakeup_cmd wakeup_cmd;
+ enum dp_auth_status auth_status;
+
+ struct task_struct *thread;
+ struct kthread_worker worker;
+ struct kthread_work status;
+ struct kthread_work auth;
+ struct kthread_work send_msg;
+ struct kthread_work recv_msg;
+ struct kthread_work link;
+ struct kthread_work poll;
+ char *msg_buf;
+ uint32_t send_msg_len; /* length of all parameters in msg */
+ uint32_t timeout;
+ uint32_t num_messages;
+ struct hdcp_msg_part msg_part[HDCP_MAX_MESSAGE_PARTS];
+ u8 sink_rx_status;
+ u8 rx_status;
+ char abort_mask;
+
+ bool cp_irq_done;
+ bool polling;
+};
+
+static inline char *dp_hdcp_cmd_to_str(uint32_t cmd)
+{
+ switch (cmd) {
+ case HDMI_HDCP_WKUP_CMD_SEND_MESSAGE:
+ return "HDMI_HDCP_WKUP_CMD_SEND_MESSAGE";
+ case HDMI_HDCP_WKUP_CMD_RECV_MESSAGE:
+ return "HDMI_HDCP_WKUP_CMD_RECV_MESSAGE";
+ case HDMI_HDCP_WKUP_CMD_STATUS_SUCCESS:
+ return "HDMI_HDCP_WKUP_CMD_STATUS_SUCCESS";
+ case HDMI_HDCP_WKUP_CMD_STATUS_FAILED:
+ return "DP_HDCP_WKUP_CMD_STATUS_FAIL";
+ case HDMI_HDCP_WKUP_CMD_LINK_POLL:
+ return "HDMI_HDCP_WKUP_CMD_LINK_POLL";
+ case HDMI_HDCP_WKUP_CMD_AUTHENTICATE:
+ return "HDMI_HDCP_WKUP_CMD_AUTHENTICATE";
+ default:
+ return "???";
+ }
+}
+
+static inline bool dp_hdcp2p2_is_valid_state(struct dp_hdcp2p2_ctrl *ctrl)
+{
+ if (ctrl->wakeup_cmd == HDMI_HDCP_WKUP_CMD_AUTHENTICATE)
+ return true;
+
+ if (atomic_read(&ctrl->auth_state) != HDCP_STATE_INACTIVE)
+ return true;
+
+ return false;
+}
+
+static int dp_hdcp2p2_copy_buf(struct dp_hdcp2p2_ctrl *ctrl,
+ struct hdmi_hdcp_wakeup_data *data)
+{
+ int i = 0;
+
+ if (!data || !data->message_data)
+ return 0;
+
+ mutex_lock(&ctrl->msg_lock);
+
+ ctrl->timeout = data->timeout;
+ ctrl->num_messages = data->message_data->num_messages;
+ ctrl->send_msg_len = 0; /* Total len of all messages */
+
+ for (i = 0; i < ctrl->num_messages ; i++)
+ ctrl->send_msg_len += data->message_data->messages[i].length;
+
+ memcpy(ctrl->msg_part, data->message_data->messages,
+ sizeof(data->message_data->messages));
+
+ ctrl->rx_status = data->message_data->rx_status;
+ ctrl->abort_mask = data->abort_mask;
+
+ if (!data->send_msg_len) {
+ mutex_unlock(&ctrl->msg_lock);
+ return 0;
+ }
+
+ kzfree(ctrl->msg_buf);
+
+ ctrl->msg_buf = kzalloc(ctrl->send_msg_len, GFP_KERNEL);
+
+ if (!ctrl->msg_buf) {
+ mutex_unlock(&ctrl->msg_lock);
+ return -ENOMEM;
+ }
+
+ /* ignore first byte as it contains message id */
+ memcpy(ctrl->msg_buf, data->send_msg_buf + 1, ctrl->send_msg_len);
+
+ mutex_unlock(&ctrl->msg_lock);
+
+ return 0;
+}
+
+static int dp_hdcp2p2_wakeup(struct hdmi_hdcp_wakeup_data *data)
+{
+ struct dp_hdcp2p2_ctrl *ctrl;
+ u32 const default_timeout_us = 500;
+
+ if (!data) {
+ pr_err("invalid input\n");
+ return -EINVAL;
+ }
+
+ ctrl = data->context;
+ if (!ctrl) {
+ pr_err("invalid ctrl\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&ctrl->wakeup_mutex);
+
+ ctrl->wakeup_cmd = data->cmd;
+
+ if (data->timeout)
+ ctrl->timeout = (data->timeout) * 2;
+ else
+ ctrl->timeout = default_timeout_us;
+
+ if (!dp_hdcp2p2_is_valid_state(ctrl)) {
+ pr_err("invalid state\n");
+ goto exit;
+ }
+
+ if (dp_hdcp2p2_copy_buf(ctrl, data))
+ goto exit;
+
+ if (ctrl->wakeup_cmd == HDMI_HDCP_WKUP_CMD_STATUS_SUCCESS)
+ ctrl->auth_status = DP_HDCP_AUTH_STATUS_SUCCESS;
+ else if (ctrl->wakeup_cmd == HDMI_HDCP_WKUP_CMD_STATUS_FAILED)
+ ctrl->auth_status = DP_HDCP_AUTH_STATUS_FAILURE;
+
+ switch (ctrl->wakeup_cmd) {
+ case HDMI_HDCP_WKUP_CMD_SEND_MESSAGE:
+ queue_kthread_work(&ctrl->worker, &ctrl->send_msg);
+ break;
+ case HDMI_HDCP_WKUP_CMD_RECV_MESSAGE:
+ queue_kthread_work(&ctrl->worker, &ctrl->recv_msg);
+ break;
+ case HDMI_HDCP_WKUP_CMD_STATUS_SUCCESS:
+ case HDMI_HDCP_WKUP_CMD_STATUS_FAILED:
+ queue_kthread_work(&ctrl->worker, &ctrl->status);
+ break;
+ case HDMI_HDCP_WKUP_CMD_LINK_POLL:
+ queue_kthread_work(&ctrl->worker, &ctrl->poll);
+ break;
+ case HDMI_HDCP_WKUP_CMD_AUTHENTICATE:
+ queue_kthread_work(&ctrl->worker, &ctrl->auth);
+ break;
+ default:
+ pr_err("invalid wakeup command %d\n", ctrl->wakeup_cmd);
+ }
+exit:
+ mutex_unlock(&ctrl->wakeup_mutex);
+ return 0;
+}
+
+static inline int dp_hdcp2p2_wakeup_lib(struct dp_hdcp2p2_ctrl *ctrl,
+ struct hdcp_lib_wakeup_data *data)
+{
+ int rc = 0;
+
+ if (ctrl && ctrl->lib && ctrl->lib->wakeup &&
+ data && (data->cmd != HDCP_LIB_WKUP_CMD_INVALID)) {
+ rc = ctrl->lib->wakeup(data);
+ if (rc)
+ pr_err("error sending %s to lib\n",
+ hdcp_lib_cmd_to_str(data->cmd));
+ }
+
+ return rc;
+}
+
+static void dp_hdcp2p2_reset(struct dp_hdcp2p2_ctrl *ctrl)
+{
+ if (!ctrl) {
+ pr_err("invalid input\n");
+ return;
+ }
+
+ ctrl->sink_status = SINK_DISCONNECTED;
+ atomic_set(&ctrl->auth_state, HDCP_STATE_INACTIVE);
+}
+
+static void dp_hdcp2p2_off(void *input)
+{
+ struct dp_hdcp2p2_ctrl *ctrl = (struct dp_hdcp2p2_ctrl *)input;
+ struct hdmi_hdcp_wakeup_data cdata = {HDMI_HDCP_WKUP_CMD_AUTHENTICATE};
+
+ if (!ctrl) {
+ pr_err("invalid input\n");
+ return;
+ }
+
+ dp_hdcp2p2_reset(ctrl);
+
+ flush_kthread_worker(&ctrl->worker);
+
+ cdata.context = input;
+ dp_hdcp2p2_wakeup(&cdata);
+}
+
+static int dp_hdcp2p2_authenticate(void *input)
+{
+ struct dp_hdcp2p2_ctrl *ctrl = input;
+ struct hdmi_hdcp_wakeup_data cdata = {HDMI_HDCP_WKUP_CMD_AUTHENTICATE};
+ int rc = 0;
+
+ flush_kthread_worker(&ctrl->worker);
+
+ ctrl->sink_status = SINK_CONNECTED;
+ atomic_set(&ctrl->auth_state, HDCP_STATE_AUTHENTICATING);
+
+ cdata.context = input;
+ dp_hdcp2p2_wakeup(&cdata);
+
+ return rc;
+}
+
+static int dp_hdcp2p2_reauthenticate(void *input)
+{
+ struct dp_hdcp2p2_ctrl *ctrl = (struct dp_hdcp2p2_ctrl *)input;
+
+ if (!ctrl) {
+ pr_err("invalid input\n");
+ return -EINVAL;
+ }
+
+ dp_hdcp2p2_reset((struct dp_hdcp2p2_ctrl *)input);
+
+ return dp_hdcp2p2_authenticate(input);
+}
+
+static ssize_t dp_hdcp2p2_sysfs_wta_min_level_change(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct dp_hdcp2p2_ctrl *ctrl = mdss_dp_get_hdcp_data(dev);
+ struct hdcp_lib_wakeup_data cdata = {
+ HDCP_LIB_WKUP_CMD_QUERY_STREAM_TYPE};
+ bool enc_notify = true;
+ int enc_lvl;
+ int min_enc_lvl;
+ int rc;
+
+ if (!ctrl) {
+ pr_err("invalid input\n");
+ rc = -EINVAL;
+ goto exit;
+ }
+
+ rc = kstrtoint(buf, 10, &min_enc_lvl);
+ if (rc) {
+ pr_err("kstrtoint failed. rc=%d\n", rc);
+ goto exit;
+ }
+
+ switch (min_enc_lvl) {
+ case 0:
+ enc_lvl = HDCP_STATE_AUTH_ENC_NONE;
+ break;
+ case 1:
+ enc_lvl = HDCP_STATE_AUTH_ENC_1X;
+ break;
+ case 2:
+ enc_lvl = HDCP_STATE_AUTH_ENC_2P2;
+ break;
+ default:
+ enc_notify = false;
+ }
+
+ pr_debug("enc level changed %d\n", min_enc_lvl);
+
+ cdata.context = ctrl->lib_ctx;
+ dp_hdcp2p2_wakeup_lib(ctrl, &cdata);
+
+ if (enc_notify && ctrl->init_data.notify_status)
+ ctrl->init_data.notify_status(ctrl->init_data.cb_data, enc_lvl);
+
+ rc = count;
+exit:
+ return rc;
+}
+
+static void dp_hdcp2p2_auth_failed(struct dp_hdcp2p2_ctrl *ctrl)
+{
+ if (!ctrl) {
+ pr_err("invalid input\n");
+ return;
+ }
+
+ atomic_set(&ctrl->auth_state, HDCP_STATE_AUTH_FAIL);
+
+ /* notify DP about HDCP failure */
+ ctrl->init_data.notify_status(ctrl->init_data.cb_data,
+ HDCP_STATE_AUTH_FAIL);
+}
+
+static int dp_hdcp2p2_aux_read_message(struct dp_hdcp2p2_ctrl *ctrl,
+ u8 *buf, int size, int offset, u32 timeout)
+{
+ int rc, max_size = 16, read_size, len = size;
+ u8 *buf_start = buf;
+
+ if (atomic_read(&ctrl->auth_state) == HDCP_STATE_INACTIVE) {
+ pr_err("hdcp is off\n");
+ return -EINVAL;
+ }
+
+ do {
+ struct edp_cmd cmd = {0};
+
+ read_size = min(size, max_size);
+
+ cmd.read = 1;
+ cmd.addr = offset;
+ cmd.len = read_size;
+ cmd.out_buf = buf;
+
+ rc = dp_aux_read(ctrl->init_data.cb_data, &cmd);
+ if (rc) {
+ pr_err("Aux read failed\n");
+ break;
+ }
+
+ buf += read_size;
+ offset += read_size;
+ size -= read_size;
+ } while (size > 0);
+
+ print_hex_dump(KERN_DEBUG, "hdcp2p2: ", DUMP_PREFIX_NONE,
+ 16, 1, buf_start, len, false);
+ return rc;
+}
+
+static int dp_hdcp2p2_aux_write_message(struct dp_hdcp2p2_ctrl *ctrl,
+ u8 *buf, int size, uint offset, uint timeout)
+{
+ int rc, max_size = 16, write_size;
+
+ do {
+ struct edp_cmd cmd = {0};
+
+ write_size = min(size, max_size);
+
+ cmd.read = 0;
+ cmd.addr = offset;
+ cmd.len = write_size;
+ cmd.datap = buf;
+
+ rc = dp_aux_write(ctrl->init_data.cb_data, &cmd);
+ if (rc) {
+ pr_err("Aux write failed\n");
+ break;
+ }
+
+ buf += write_size;
+ offset += write_size;
+ size -= write_size;
+ } while (size > 0);
+
+ return rc;
+}
+
+static DEVICE_ATTR(min_level_change, S_IWUSR, NULL,
+ dp_hdcp2p2_sysfs_wta_min_level_change);
+
+static struct attribute *dp_hdcp2p2_fs_attrs[] = {
+ &dev_attr_min_level_change.attr,
+ NULL,
+};
+
+static struct attribute_group dp_hdcp2p2_fs_attr_group = {
+ .name = "dp_hdcp2p2",
+ .attrs = dp_hdcp2p2_fs_attrs,
+};
+
+static bool dp_hdcp2p2_feature_supported(void *input)
+{
+ struct dp_hdcp2p2_ctrl *ctrl = input;
+ struct hdcp_txmtr_ops *lib = NULL;
+ bool supported = false;
+
+ if (!ctrl) {
+ pr_err("invalid input\n");
+ goto end;
+ }
+
+ lib = ctrl->lib;
+ if (!lib) {
+ pr_err("invalid lib ops data\n");
+ goto end;
+ }
+
+ if (lib->feature_supported)
+ supported = lib->feature_supported(
+ ctrl->lib_ctx);
+end:
+ return supported;
+}
+
+static void dp_hdcp2p2_send_msg_work(struct kthread_work *work)
+{
+ int rc = 0;
+ int i;
+ int sent_bytes = 0;
+ struct dp_hdcp2p2_ctrl *ctrl = container_of(work,
+ struct dp_hdcp2p2_ctrl, send_msg);
+ struct hdcp_lib_wakeup_data cdata = {HDCP_LIB_WKUP_CMD_INVALID};
+ char *buf = NULL;
+
+ if (!ctrl) {
+ pr_err("invalid input\n");
+ rc = -EINVAL;
+ goto exit;
+ }
+
+ cdata.context = ctrl->lib_ctx;
+
+ if (atomic_read(&ctrl->auth_state) == HDCP_STATE_INACTIVE) {
+ pr_err("hdcp is off\n");
+ goto exit;
+ }
+
+ mutex_lock(&ctrl->msg_lock);
+
+ /* Loop through number of parameters in the messages. */
+ for (i = 0; i < ctrl->num_messages; i++) {
+ buf = ctrl->msg_buf + sent_bytes;
+
+ /* Forward the message to the sink */
+ rc = dp_hdcp2p2_aux_write_message(ctrl, buf,
+ (size_t)ctrl->msg_part[i].length,
+ ctrl->msg_part[i].offset, ctrl->timeout);
+ if (rc) {
+ pr_err("Error sending msg to sink %d\n", rc);
+ mutex_unlock(&ctrl->msg_lock);
+ goto exit;
+ }
+ sent_bytes += ctrl->msg_part[i].length;
+ }
+
+ cdata.cmd = HDCP_LIB_WKUP_CMD_MSG_SEND_SUCCESS;
+ cdata.timeout = ctrl->timeout;
+ mutex_unlock(&ctrl->msg_lock);
+
+exit:
+ if (rc == -ETIMEDOUT)
+ cdata.cmd = HDCP_LIB_WKUP_CMD_MSG_RECV_TIMEOUT;
+ else if (rc)
+ cdata.cmd = HDCP_LIB_WKUP_CMD_MSG_RECV_FAILED;
+
+ dp_hdcp2p2_wakeup_lib(ctrl, &cdata);
+}
+
+static int dp_hdcp2p2_get_msg_from_sink(struct dp_hdcp2p2_ctrl *ctrl)
+{
+ int i, rc = 0;
+ char *recvd_msg_buf = NULL;
+ struct hdcp_lib_wakeup_data cdata = { HDCP_LIB_WKUP_CMD_INVALID };
+ int bytes_read = 0;
+
+ cdata.context = ctrl->lib_ctx;
+
+ recvd_msg_buf = kzalloc(ctrl->send_msg_len, GFP_KERNEL);
+ if (!recvd_msg_buf) {
+ rc = -ENOMEM;
+ goto exit;
+ }
+
+ for (i = 0; i < ctrl->num_messages; i++) {
+ rc = dp_hdcp2p2_aux_read_message(
+ ctrl, recvd_msg_buf + bytes_read,
+ ctrl->msg_part[i].length,
+ ctrl->msg_part[i].offset,
+ ctrl->timeout);
+ if (rc) {
+ pr_err("error reading message %d\n", rc);
+ goto exit;
+ }
+ bytes_read += ctrl->msg_part[i].length;
+ }
+
+ cdata.recvd_msg_buf = recvd_msg_buf;
+ cdata.recvd_msg_len = ctrl->send_msg_len;
+ cdata.timeout = ctrl->timeout;
+exit:
+ if (rc == -ETIMEDOUT)
+ cdata.cmd = HDCP_LIB_WKUP_CMD_MSG_RECV_TIMEOUT;
+ else if (rc)
+ cdata.cmd = HDCP_LIB_WKUP_CMD_MSG_RECV_FAILED;
+ else
+ cdata.cmd = HDCP_LIB_WKUP_CMD_MSG_RECV_SUCCESS;
+
+ dp_hdcp2p2_wakeup_lib(ctrl, &cdata);
+ kfree(recvd_msg_buf);
+
+ return rc;
+}
+
+static void dp_hdcp2p2_recv_msg_work(struct kthread_work *work)
+{
+ int rc = 0;
+ struct hdcp_lib_wakeup_data cdata = { HDCP_LIB_WKUP_CMD_INVALID };
+ struct dp_hdcp2p2_ctrl *ctrl = container_of(work,
+ struct dp_hdcp2p2_ctrl, recv_msg);
+
+ cdata.context = ctrl->lib_ctx;
+
+ if (atomic_read(&ctrl->auth_state) == HDCP_STATE_INACTIVE) {
+ pr_err("hdcp is off\n");
+ goto exit;
+ }
+
+ if (ctrl->sink_rx_status & ctrl->abort_mask) {
+ pr_err("reauth or Link fail triggered by sink\n");
+
+ ctrl->sink_rx_status = 0;
+ rc = -ENOLINK;
+ cdata.cmd = HDCP_LIB_WKUP_CMD_STOP;
+
+ goto exit;
+ }
+
+ if (ctrl->rx_status && !ctrl->sink_rx_status) {
+ pr_debug("Recv msg for RxStatus, but no CP_IRQ yet\n");
+ ctrl->polling = true;
+ goto exit;
+ }
+
+ dp_hdcp2p2_get_msg_from_sink(ctrl);
+
+ return;
+exit:
+ if (rc)
+ dp_hdcp2p2_wakeup_lib(ctrl, &cdata);
+}
+
+static void dp_hdcp2p2_poll_work(struct kthread_work *work)
+{
+ struct dp_hdcp2p2_ctrl *ctrl = container_of(work,
+ struct dp_hdcp2p2_ctrl, poll);
+
+ if (ctrl->cp_irq_done) {
+ ctrl->cp_irq_done = false;
+ dp_hdcp2p2_get_msg_from_sink(ctrl);
+ } else {
+ ctrl->polling = true;
+ }
+}
+
+static void dp_hdcp2p2_auth_status_work(struct kthread_work *work)
+{
+ struct dp_hdcp2p2_ctrl *ctrl = container_of(work,
+ struct dp_hdcp2p2_ctrl, status);
+
+ if (!ctrl) {
+ pr_err("invalid input\n");
+ return;
+ }
+
+ if (atomic_read(&ctrl->auth_state) == HDCP_STATE_INACTIVE) {
+ pr_err("hdcp is off\n");
+ return;
+ }
+
+ if (ctrl->auth_status == DP_HDCP_AUTH_STATUS_SUCCESS) {
+ ctrl->init_data.notify_status(ctrl->init_data.cb_data,
+ HDCP_STATE_AUTHENTICATED);
+
+ atomic_set(&ctrl->auth_state, HDCP_STATE_AUTHENTICATED);
+ } else {
+ dp_hdcp2p2_auth_failed(ctrl);
+ }
+}
+
+static void dp_hdcp2p2_link_work(struct kthread_work *work)
+{
+ int rc = 0;
+ struct dp_hdcp2p2_ctrl *ctrl = container_of(work,
+ struct dp_hdcp2p2_ctrl, link);
+ struct hdcp_lib_wakeup_data cdata = {HDCP_LIB_WKUP_CMD_INVALID};
+
+ if (!ctrl) {
+ pr_err("invalid input\n");
+ return;
+ }
+
+ cdata.context = ctrl->lib_ctx;
+
+ ctrl->sink_rx_status = 0;
+ rc = mdss_dp_aux_read_rx_status(ctrl->init_data.cb_data,
+ &ctrl->sink_rx_status);
+
+ if (rc) {
+ pr_err("failed to read rx status\n");
+
+ cdata.cmd = HDCP_LIB_WKUP_CMD_STOP;
+ goto exit;
+ }
+
+ if (ctrl->sink_rx_status & ctrl->abort_mask) {
+ pr_err("reauth or Link fail triggered by sink\n");
+
+ ctrl->sink_rx_status = 0;
+ ctrl->rx_status = 0;
+
+ rc = -ENOLINK;
+ cdata.cmd = HDCP_LIB_WKUP_CMD_STOP;
+ goto exit;
+ }
+
+ /* if polling, get message from sink else let polling start */
+ if (ctrl->polling && (ctrl->sink_rx_status & ctrl->rx_status)) {
+ ctrl->sink_rx_status = 0;
+ ctrl->rx_status = 0;
+
+ rc = dp_hdcp2p2_get_msg_from_sink(ctrl);
+
+ ctrl->polling = false;
+ } else {
+ ctrl->cp_irq_done = true;
+ }
+exit:
+ dp_hdcp2p2_wakeup_lib(ctrl, &cdata);
+
+ if (rc) {
+ dp_hdcp2p2_auth_failed(ctrl);
+ return;
+ }
+}
+
+static void dp_hdcp2p2_auth_work(struct kthread_work *work)
+{
+ int rc = 0;
+ struct hdcp_lib_wakeup_data cdata = {HDCP_LIB_WKUP_CMD_INVALID};
+ struct dp_hdcp2p2_ctrl *ctrl = container_of(work,
+ struct dp_hdcp2p2_ctrl, auth);
+
+ cdata.context = ctrl->lib_ctx;
+
+ if (atomic_read(&ctrl->auth_state) == HDCP_STATE_AUTHENTICATING)
+ cdata.cmd = HDCP_LIB_WKUP_CMD_START;
+ else
+ cdata.cmd = HDCP_LIB_WKUP_CMD_STOP;
+
+ rc = dp_hdcp2p2_wakeup_lib(ctrl, &cdata);
+ if (rc)
+ dp_hdcp2p2_auth_failed(ctrl);
+}
+
+static int dp_hdcp2p2_isr(void *input)
+{
+ struct dp_hdcp2p2_ctrl *ctrl = input;
+
+ if (!ctrl) {
+ pr_err("invalid input\n");
+ return -EINVAL;
+ }
+
+ queue_kthread_work(&ctrl->worker, &ctrl->link);
+
+ return 0;
+}
+
+void dp_hdcp2p2_deinit(void *input)
+{
+ struct dp_hdcp2p2_ctrl *ctrl = (struct dp_hdcp2p2_ctrl *)input;
+ struct hdcp_lib_wakeup_data cdata = {HDCP_LIB_WKUP_CMD_INVALID};
+
+ if (!ctrl) {
+ pr_err("invalid input\n");
+ return;
+ }
+
+ cdata.cmd = HDCP_LIB_WKUP_CMD_STOP;
+ cdata.context = ctrl->lib_ctx;
+ dp_hdcp2p2_wakeup_lib(ctrl, &cdata);
+
+ kthread_stop(ctrl->thread);
+
+ sysfs_remove_group(ctrl->init_data.sysfs_kobj,
+ &dp_hdcp2p2_fs_attr_group);
+
+ mutex_destroy(&ctrl->mutex);
+ mutex_destroy(&ctrl->msg_lock);
+ mutex_destroy(&ctrl->wakeup_mutex);
+ kzfree(ctrl->msg_buf);
+ kfree(ctrl);
+}
+
+void *dp_hdcp2p2_init(struct hdcp_init_data *init_data)
+{
+ int rc;
+ struct dp_hdcp2p2_ctrl *ctrl;
+ static struct hdcp_ops ops = {
+ .reauthenticate = dp_hdcp2p2_reauthenticate,
+ .authenticate = dp_hdcp2p2_authenticate,
+ .feature_supported = dp_hdcp2p2_feature_supported,
+ .off = dp_hdcp2p2_off,
+ .isr = dp_hdcp2p2_isr
+ };
+
+ static struct hdcp_client_ops client_ops = {
+ .wakeup = dp_hdcp2p2_wakeup,
+ };
+
+ static struct hdcp_txmtr_ops txmtr_ops;
+ struct hdcp_register_data register_data = {0};
+
+ if (!init_data || !init_data->cb_data ||
+ !init_data->notify_status) {
+ pr_err("invalid input\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
+ if (!ctrl)
+ return ERR_PTR(-ENOMEM);
+
+ ctrl->init_data = *init_data;
+ ctrl->lib = &txmtr_ops;
+ ctrl->msg_buf = NULL;
+ rc = sysfs_create_group(init_data->sysfs_kobj,
+ &dp_hdcp2p2_fs_attr_group);
+ if (rc) {
+ pr_err("dp_hdcp2p2 sysfs group creation failed\n");
+ goto error;
+ }
+
+ ctrl->sink_status = SINK_DISCONNECTED;
+
+ atomic_set(&ctrl->auth_state, HDCP_STATE_INACTIVE);
+
+ ctrl->ops = &ops;
+ mutex_init(&ctrl->mutex);
+ mutex_init(&ctrl->msg_lock);
+ mutex_init(&ctrl->wakeup_mutex);
+
+ register_data.hdcp_ctx = &ctrl->lib_ctx;
+ register_data.client_ops = &client_ops;
+ register_data.txmtr_ops = &txmtr_ops;
+ register_data.device_type = HDCP_TXMTR_DP;
+ register_data.client_ctx = ctrl;
+
+ rc = hdcp_library_register(&register_data);
+ if (rc) {
+ pr_err("Unable to register with HDCP 2.2 library\n");
+ goto error;
+ }
+
+ init_kthread_worker(&ctrl->worker);
+
+ init_kthread_work(&ctrl->auth, dp_hdcp2p2_auth_work);
+ init_kthread_work(&ctrl->send_msg, dp_hdcp2p2_send_msg_work);
+ init_kthread_work(&ctrl->recv_msg, dp_hdcp2p2_recv_msg_work);
+ init_kthread_work(&ctrl->status, dp_hdcp2p2_auth_status_work);
+ init_kthread_work(&ctrl->link, dp_hdcp2p2_link_work);
+ init_kthread_work(&ctrl->poll, dp_hdcp2p2_poll_work);
+
+ ctrl->thread = kthread_run(kthread_worker_fn,
+ &ctrl->worker, "dp_hdcp2p2");
+
+ if (IS_ERR(ctrl->thread)) {
+ pr_err("unable to start DP hdcp2p2 thread\n");
+ rc = PTR_ERR(ctrl->thread);
+ ctrl->thread = NULL;
+ goto error;
+ }
+
+ return ctrl;
+error:
+ kfree(ctrl);
+ return ERR_PTR(rc);
+}
+
+static bool dp_hdcp2p2_supported(struct dp_hdcp2p2_ctrl *ctrl)
+{
+ struct edp_cmd cmd = {0};
+ const u32 offset = 0x6921d;
+ u8 buf;
+
+ cmd.read = 1;
+ cmd.addr = offset;
+ cmd.len = sizeof(buf);
+ cmd.out_buf = &buf;
+
+ if (dp_aux_read(ctrl->init_data.cb_data, &cmd)) {
+ pr_err("RxCaps read failed\n");
+ goto error;
+ }
+
+ pr_debug("rxcaps 0x%x\n", buf);
+
+ if (buf & BIT(1))
+ return true;
+error:
+ return false;
+}
+
+struct hdcp_ops *dp_hdcp2p2_start(void *input)
+{
+ struct dp_hdcp2p2_ctrl *ctrl = input;
+
+ pr_debug("Checking sink capability\n");
+ if (dp_hdcp2p2_supported(ctrl))
+ return ctrl->ops;
+ else
+ return NULL;
+}
+
diff --git a/drivers/video/fbdev/msm/mdss_dp_util.c b/drivers/video/fbdev/msm/mdss_dp_util.c
index b1eb8e0c9579..2d24d8499105 100644
--- a/drivers/video/fbdev/msm/mdss_dp_util.c
+++ b/drivers/video/fbdev/msm/mdss_dp_util.c
@@ -27,10 +27,6 @@
#define DP_LS_FREQ_162 162000000
#define DP_LS_FREQ_270 270000000
#define DP_LS_FREQ_540 540000000
-#define AUDIO_FREQ_32 32000
-#define AUDIO_FREQ_44_1 44100
-#define AUDIO_FREQ_48 48000
-#define DP_AUDIO_FREQ_COUNT 3
enum mdss_dp_pin_assignment {
PIN_ASSIGNMENT_A,
@@ -55,68 +51,12 @@ static const char *mdss_dp_pin_name(u8 pin)
}
}
-static const uint32_t naud_value[DP_AUDIO_FREQ_COUNT][DP_AUDIO_FREQ_COUNT] = {
- { 10125, 16875, 33750 },
- { 5625, 9375, 18750 },
- { 3375, 5625, 11250 }
-};
-
-static const uint32_t maud_rate[DP_AUDIO_FREQ_COUNT] = { 1024, 784, 512 };
-
-static const uint32_t audio_timing_rbr[DP_AUDIO_FREQ_COUNT] = {
- MMSS_DP_AUDIO_TIMING_RBR_32,
- MMSS_DP_AUDIO_TIMING_RBR_44,
- MMSS_DP_AUDIO_TIMING_RBR_48
-};
-
-static const uint32_t std_audio_freq_list[DP_AUDIO_FREQ_COUNT] = {
- AUDIO_FREQ_32,
- AUDIO_FREQ_44_1,
- AUDIO_FREQ_48
-};
-
struct mdss_hw mdss_dp_hw = {
.hw_ndx = MDSS_HW_EDP,
.ptr = NULL,
.irq_handler = dp_isr,
};
-static int mdss_dp_get_rate_index(uint32_t rate)
-{
- int index = 0;
-
- switch (rate) {
- case DP_LS_FREQ_162:
- case AUDIO_FREQ_32:
- index = 0;
- break;
- case DP_LS_FREQ_270:
- case AUDIO_FREQ_44_1:
- index = 1;
- break;
- case DP_LS_FREQ_540:
- case AUDIO_FREQ_48:
- index = 2;
- break;
- default:
- index = 0;
- pr_err("unsupported rate\n");
- break;
- }
-
- return index;
-}
-
-static bool match_std_freq(uint32_t audio_freq, uint32_t std_freq)
-{
- int quotient = audio_freq / std_freq;
-
- if (quotient & (quotient - 1))
- return false;
- else
- return true;
-}
-
/* DP retrieve ctrl HW version */
u32 mdss_dp_get_ctrl_hw_version(struct dss_io_data *ctrl_io)
{
@@ -866,52 +806,6 @@ void mdss_dp_audio_setup_sdps(struct dss_io_data *ctrl_io)
mdss_dp_audio_setup_isrc_sdp(ctrl_io);
}
-void mdss_dp_audio_set_sample_rate(struct dss_io_data *ctrl_io,
- char dp_link_rate, uint32_t audio_freq)
-{
- uint32_t link_rate;
- uint32_t default_audio_freq = AUDIO_FREQ_32;
- int i, multiplier = 1;
- uint32_t maud_index, lrate_index, register_index, value;
-
- link_rate = (uint32_t)dp_link_rate * DP_LINK_RATE_MULTIPLIER;
-
- pr_debug("link_rate = %u, audio_freq = %u\n", link_rate, audio_freq);
-
- for (i = 0; i < DP_AUDIO_FREQ_COUNT; i++) {
- if (audio_freq % std_audio_freq_list[i])
- continue;
-
- if (match_std_freq(audio_freq, std_audio_freq_list[i])) {
- default_audio_freq = std_audio_freq_list[i];
- multiplier = audio_freq / default_audio_freq;
- break;
- }
- }
-
- pr_debug("default_audio_freq = %u, multiplier = %d\n",
- default_audio_freq, multiplier);
-
- lrate_index = mdss_dp_get_rate_index(link_rate);
- maud_index = mdss_dp_get_rate_index(default_audio_freq);
-
- pr_debug("lrate_index = %u, maud_index = %u, maud = %u, naud = %u\n",
- lrate_index, maud_index,
- maud_rate[maud_index] * multiplier,
- naud_value[maud_index][lrate_index]);
-
- register_index = mdss_dp_get_rate_index(default_audio_freq);
- value = ((maud_rate[maud_index] * multiplier) << 16) |
- naud_value[maud_index][lrate_index];
-
- pr_debug("reg index = %d, offset = 0x%x, value = 0x%x\n",
- (int)register_index, audio_timing_rbr[register_index],
- value);
-
- writel_relaxed(value, ctrl_io->base +
- audio_timing_rbr[register_index]);
-}
-
void mdss_dp_set_safe_to_exit_level(struct dss_io_data *ctrl_io,
uint32_t lane_cnt)
{
diff --git a/drivers/video/fbdev/msm/mdss_dp_util.h b/drivers/video/fbdev/msm/mdss_dp_util.h
index cf2286f9b58a..334c0071050d 100644
--- a/drivers/video/fbdev/msm/mdss_dp_util.h
+++ b/drivers/video/fbdev/msm/mdss_dp_util.h
@@ -37,6 +37,9 @@
#define DP_AUX_TRANS_CTRL (0x00000238)
#define DP_AUX_STATUS (0x00000244)
+#define DP_DPCD_CP_IRQ (0x201)
+#define DP_DPCD_RXSTATUS (0x69493)
+
#define DP_INTERRUPT_TRANS_NUM (0x000002A0)
#define DP_MAINLINK_CTRL (0x00000400)
@@ -299,5 +302,6 @@ void mdss_dp_audio_set_sample_rate(struct dss_io_data *ctrl_io,
char dp_link_rate, uint32_t audio_freq);
void mdss_dp_set_safe_to_exit_level(struct dss_io_data *ctrl_io,
uint32_t lane_cnt);
+int mdss_dp_aux_read_rx_status(struct mdss_dp_drv_pdata *dp, u8 *rx_status);
#endif /* __DP_UTIL_H__ */
diff --git a/drivers/video/fbdev/msm/mdss_dsi.c b/drivers/video/fbdev/msm/mdss_dsi.c
index 66cd99720afa..cf7a398c13ce 100644
--- a/drivers/video/fbdev/msm/mdss_dsi.c
+++ b/drivers/video/fbdev/msm/mdss_dsi.c
@@ -995,6 +995,7 @@ static void mdss_dsi_debugfs_cleanup(struct mdss_dsi_ctrl_pdata *ctrl_pdata)
struct mdss_dsi_debugfs_info *dfs = ctrl->debugfs_info;
if (dfs && dfs->root)
debugfs_remove_recursive(dfs->root);
+ kfree(dfs);
pdata = pdata->next;
} while (pdata);
pr_debug("%s: Cleaned up mdss_dsi_debugfs_info\n", __func__);
diff --git a/drivers/video/fbdev/msm/mdss_dsi_host.c b/drivers/video/fbdev/msm/mdss_dsi_host.c
index 18bcdca31bf6..c9168242da60 100644
--- a/drivers/video/fbdev/msm/mdss_dsi_host.c
+++ b/drivers/video/fbdev/msm/mdss_dsi_host.c
@@ -1108,6 +1108,11 @@ static int mdss_dsi_read_status(struct mdss_dsi_ctrl_pdata *ctrl)
rc = 1;
lenp = ctrl->status_valid_params ?: ctrl->status_cmds_rlen;
+ if (!lenp || !ctrl->status_cmds_rlen) {
+ pr_err("invalid dsi read params!\n");
+ return 0;
+ }
+
for (i = 0; i < ctrl->status_cmds.cmd_cnt; ++i) {
memset(&cmdreq, 0, sizeof(cmdreq));
cmdreq.cmds = ctrl->status_cmds.cmds + i;
diff --git a/drivers/video/fbdev/msm/mdss_dsi_panel.c b/drivers/video/fbdev/msm/mdss_dsi_panel.c
index 01fc01425a3a..bd0c2ad32c05 100644
--- a/drivers/video/fbdev/msm/mdss_dsi_panel.c
+++ b/drivers/video/fbdev/msm/mdss_dsi_panel.c
@@ -25,6 +25,7 @@
#include "mdss_dsi.h"
#include "mdss_dba_utils.h"
+#include "mdss_debug.h"
#define DT_CMD_HDR 6
#define DEFAULT_MDP_TRANSFER_TIME 14000
@@ -1498,7 +1499,7 @@ static int mdss_dsi_parse_reset_seq(struct device_node *np,
static bool mdss_dsi_cmp_panel_reg_v2(struct mdss_dsi_ctrl_pdata *ctrl)
{
- int i, j;
+ int i, j = 0;
int len = 0, *lenp;
int group = 0;
@@ -1507,6 +1508,15 @@ static bool mdss_dsi_cmp_panel_reg_v2(struct mdss_dsi_ctrl_pdata *ctrl)
for (i = 0; i < ctrl->status_cmds.cmd_cnt; i++)
len += lenp[i];
+ for (i = 0; i < len; i++) {
+ pr_debug("[%i] return:0x%x status:0x%x\n",
+ i, (unsigned int)ctrl->return_buf[i],
+ (unsigned int)ctrl->status_value[j + i]);
+ MDSS_XLOG(ctrl->ndx, ctrl->return_buf[i],
+ ctrl->status_value[j + i]);
+ j += len;
+ }
+
for (j = 0; j < ctrl->groups; ++j) {
for (i = 0; i < len; ++i) {
if (ctrl->return_buf[i] !=
diff --git a/drivers/video/fbdev/msm/mdss_fb.c b/drivers/video/fbdev/msm/mdss_fb.c
index fc8d3898351e..bcd23d3c19f2 100644
--- a/drivers/video/fbdev/msm/mdss_fb.c
+++ b/drivers/video/fbdev/msm/mdss_fb.c
@@ -615,8 +615,8 @@ static ssize_t mdss_fb_force_panel_dead(struct device *dev,
return len;
}
- if (sscanf(buf, "%d", &pdata->panel_info.panel_force_dead) != 1)
- pr_err("sccanf buf error!\n");
+ if (kstrtouint(buf, 0, &pdata->panel_info.panel_force_dead))
+ pr_err("kstrtouint buf error!\n");
return len;
}
@@ -729,8 +729,8 @@ static ssize_t mdss_fb_change_dfps_mode(struct device *dev,
}
pinfo = &pdata->panel_info;
- if (sscanf(buf, "%d", &dfps_mode) != 1) {
- pr_err("sccanf buf error!\n");
+ if (kstrtouint(buf, 0, &dfps_mode)) {
+ pr_err("kstrtouint buf error!\n");
return len;
}
@@ -1280,6 +1280,7 @@ static int mdss_fb_remove(struct platform_device *pdev)
return -EINVAL;
mdss_fb_unregister_input_handler(mfd);
+ mdss_panel_debugfs_cleanup(mfd->panel_info);
if (mdss_fb_suspend_sub(mfd))
pr_err("msm_fb_remove: can't stop the device %d\n",
@@ -2702,7 +2703,9 @@ static int mdss_fb_release_all(struct fb_info *info, bool release_all)
* enabling ahead of unblank. for some special cases like
* adb shell stop/start.
*/
+ mutex_lock(&mfd->bl_lock);
mdss_fb_set_backlight(mfd, 0);
+ mutex_unlock(&mfd->bl_lock);
ret = mdss_fb_blank_sub(FB_BLANK_POWERDOWN, info,
mfd->op_enable);
diff --git a/drivers/video/fbdev/msm/mdss_hdcp_1x.h b/drivers/video/fbdev/msm/mdss_hdcp.h
index 426b13a340f4..d373d22384e8 100644
--- a/drivers/video/fbdev/msm/mdss_hdcp_1x.h
+++ b/drivers/video/fbdev/msm/mdss_hdcp.h
@@ -14,6 +14,7 @@
#define __MDSS_HDMI_HDCP_H__
#include "mdss_hdmi_util.h"
+#include "mdss_dp.h"
#include <video/msm_hdmi_modes.h>
#include <soc/qcom/scm.h>
@@ -42,6 +43,8 @@ struct hdcp_init_data {
void *cb_data;
void (*notify_status)(void *cb_data, enum hdcp_states status);
struct hdmi_tx_ddc_ctrl *ddc_ctrl;
+ u8 sink_rx_status;
+ void *dp_data;
u32 phy_addr;
u32 hdmi_tx_ver;
struct msm_hdmi_mode_timing_info *timing;
@@ -59,12 +62,17 @@ struct hdcp_ops {
};
void *hdcp_1x_init(struct hdcp_init_data *init_data);
-void *hdmi_hdcp2p2_init(struct hdcp_init_data *init_data);
void hdcp_1x_deinit(void *input);
+
+void *hdmi_hdcp2p2_init(struct hdcp_init_data *init_data);
void hdmi_hdcp2p2_deinit(void *input);
+void *dp_hdcp2p2_init(struct hdcp_init_data *init_data);
+void dp_hdcp2p2_deinit(void *input);
+
struct hdcp_ops *hdcp_1x_start(void *input);
struct hdcp_ops *hdmi_hdcp2p2_start(void *input);
+struct hdcp_ops *dp_hdcp2p2_start(void *input);
const char *hdcp_state_name(enum hdcp_states hdcp_state);
diff --git a/drivers/video/fbdev/msm/mdss_hdcp_1x.c b/drivers/video/fbdev/msm/mdss_hdcp_1x.c
index 5b490ad67e65..1e502cf750a6 100644
--- a/drivers/video/fbdev/msm/mdss_hdcp_1x.c
+++ b/drivers/video/fbdev/msm/mdss_hdcp_1x.c
@@ -18,7 +18,7 @@
#include <linux/iopoll.h>
#include <soc/qcom/scm.h>
#include <linux/hdcp_qseecom.h>
-#include "mdss_hdcp_1x.h"
+#include "mdss_hdcp.h"
#include "mdss_fb.h"
#include "mdss_dp_util.h"
#include "video/msm_hdmi_hdcp_mgr.h"
diff --git a/drivers/video/fbdev/msm/mdss_hdmi_edid.c b/drivers/video/fbdev/msm/mdss_hdmi_edid.c
index 2047a047b537..b90ac82049c6 100644
--- a/drivers/video/fbdev/msm/mdss_hdmi_edid.c
+++ b/drivers/video/fbdev/msm/mdss_hdmi_edid.c
@@ -831,14 +831,10 @@ static const u8 *hdmi_edid_find_block(const u8 *in_buf, u32 start_offset,
u32 offset = start_offset;
u32 dbc_offset = in_buf[2];
- if (dbc_offset >= EDID_BLOCK_SIZE - EDID_DTD_LEN)
- return NULL;
- *len = 0;
-
/*
* * edid buffer 1, byte 2 being 4 means no non-DTD/Data block
* collection present.
- * * edid buffer 1, byte 2 being 0 menas no non-DTD/DATA block
+ * * edid buffer 1, byte 2 being 0 means no non-DTD/DATA block
* collection present and no DTD data present.
*/
if ((dbc_offset == 0) || (dbc_offset == 4)) {
@@ -858,8 +854,6 @@ static const u8 *hdmi_edid_find_block(const u8 *in_buf, u32 start_offset,
}
offset += 1 + block_len;
}
- DEV_WARN("%s: EDID: type=%d block not found in EDID block\n",
- __func__, type);
return NULL;
} /* hdmi_edid_find_block */
@@ -1602,7 +1596,6 @@ static void hdmi_edid_detail_desc(struct hdmi_edid_ctrl *edid_ctrl,
if (rc < 0)
rc = hdmi_set_resv_timing_info(&timing);
} else {
- DEV_ERR("%s: Invalid frame data\n", __func__);
rc = -EINVAL;
}
@@ -1611,7 +1604,6 @@ static void hdmi_edid_detail_desc(struct hdmi_edid_ctrl *edid_ctrl,
DEV_DBG("%s: DTD mode found: %d\n", __func__, *disp_mode);
} else {
*disp_mode = HDMI_VFRMT_UNKNOWN;
- DEV_ERR("%s: error adding mode from DTD: %d\n", __func__, rc);
}
} /* hdmi_edid_detail_desc */
@@ -1987,7 +1979,6 @@ static void hdmi_edid_get_display_mode(struct hdmi_edid_ctrl *edid_ctrl)
u32 has60hz_mode = false;
u32 has50hz_mode = false;
u32 desc_offset = 0;
- bool read_block0_res = false;
struct hdmi_edid_sink_data *sink_data = NULL;
if (!edid_ctrl) {
@@ -2004,12 +1995,6 @@ static void hdmi_edid_get_display_mode(struct hdmi_edid_ctrl *edid_ctrl)
hdmi_edid_find_block(data_buf+0x80, DBC_START_OFFSET,
VIDEO_DATA_BLOCK, &len) : NULL;
- if (num_of_cea_blocks && (len == 0 || len > MAX_DATA_BLOCK_SIZE)) {
- DEV_DBG("%s: fall back to block 0 res\n", __func__);
- svd = NULL;
- read_block0_res = true;
- }
-
sink_data = &edid_ctrl->sink_data;
sink_data->disp_multi_3d_mode_list_cnt = 0;
@@ -2059,20 +2044,21 @@ static void hdmi_edid_get_display_mode(struct hdmi_edid_ctrl *edid_ctrl)
edid_blk0+0x36+desc_offset,
&video_format);
- DEV_DBG("[%s:%d] Block-0 Adding vid fmt = [%s]\n",
- __func__, __LINE__,
- msm_hdmi_mode_2string(video_format));
+ if (video_format != HDMI_VFRMT_UNKNOWN) {
+ DEV_DBG("[%s:%d] Block-0 Adding vid fmt = [%s]\n",
+ __func__, __LINE__,
+ msm_hdmi_mode_2string(video_format));
- hdmi_edid_add_sink_video_format(edid_ctrl,
- video_format);
+ hdmi_edid_add_sink_video_format(edid_ctrl,
+ video_format);
- if (video_format == HDMI_VFRMT_640x480p60_4_3)
- has480p = true;
+ if (video_format == HDMI_VFRMT_640x480p60_4_3)
+ has480p = true;
- /* Make a note of the preferred video format */
- if (i == 0) {
- sink_data->preferred_video_format =
- video_format;
+ /* Make a note of the preferred video format */
+ if (i == 0)
+ sink_data->preferred_video_format =
+ video_format;
}
desc_offset += 0x12;
++i;
@@ -2088,28 +2074,32 @@ static void hdmi_edid_get_display_mode(struct hdmi_edid_ctrl *edid_ctrl)
* * EDID_BLOCK_SIZE = 0x80 Each page size in the EDID ROM
*/
desc_offset = edid_blk1[0x02];
- i = 0;
- while (!edid_blk1[desc_offset]) {
- hdmi_edid_detail_desc(edid_ctrl,
- edid_blk1+desc_offset,
- &video_format);
-
- DEV_DBG("[%s:%d] Block-1 Adding vid fmt = [%s]\n",
- __func__, __LINE__,
- msm_hdmi_mode_2string(video_format));
-
- hdmi_edid_add_sink_video_format(edid_ctrl,
- video_format);
- if (video_format == HDMI_VFRMT_640x480p60_4_3)
- has480p = true;
-
- /* Make a note of the preferred video format */
- if (i == 0) {
- sink_data->preferred_video_format =
- video_format;
+ if (desc_offset < (EDID_BLOCK_SIZE - EDID_DTD_LEN)) {
+ i = 0;
+ while (!edid_blk1[desc_offset]) {
+ hdmi_edid_detail_desc(edid_ctrl,
+ edid_blk1+desc_offset,
+ &video_format);
+
+ if (video_format != HDMI_VFRMT_UNKNOWN) {
+ DEV_DBG("%s Block-1 Adding vid fmt = [%s]\n",
+ __func__,
+ msm_hdmi_mode_2string(video_format));
+
+ hdmi_edid_add_sink_video_format(edid_ctrl,
+ video_format);
+ if (video_format == HDMI_VFRMT_640x480p60_4_3)
+ has480p = true;
+
+ /* Make a note of the preferred video format */
+ if (i == 0) {
+ sink_data->preferred_video_format =
+ video_format;
+ }
+ }
+ desc_offset += 0x12;
+ ++i;
}
- desc_offset += 0x12;
- ++i;
}
std_blk = 0;
diff --git a/drivers/video/fbdev/msm/mdss_hdmi_hdcp2p2.c b/drivers/video/fbdev/msm/mdss_hdmi_hdcp2p2.c
index 1f4760d87172..481fc118c7ad 100644
--- a/drivers/video/fbdev/msm/mdss_hdmi_hdcp2p2.c
+++ b/drivers/video/fbdev/msm/mdss_hdmi_hdcp2p2.c
@@ -20,7 +20,7 @@
#include <linux/kthread.h>
#include <linux/hdcp_qseecom.h>
-#include "mdss_hdcp_1x.h"
+#include "mdss_hdcp.h"
#include "video/msm_hdmi_hdcp_mgr.h"
#include "mdss_hdmi_util.h"
diff --git a/drivers/video/fbdev/msm/mdss_hdmi_tx.c b/drivers/video/fbdev/msm/mdss_hdmi_tx.c
index 94cc2f2dc370..9c90a72bce99 100644
--- a/drivers/video/fbdev/msm/mdss_hdmi_tx.c
+++ b/drivers/video/fbdev/msm/mdss_hdmi_tx.c
@@ -29,7 +29,7 @@
#include "mdss_fb.h"
#include "mdss_hdmi_cec.h"
#include "mdss_hdmi_edid.h"
-#include "mdss_hdcp_1x.h"
+#include "mdss_hdcp.h"
#include "mdss_hdmi_tx.h"
#include "mdss_hdmi_audio.h"
#include "mdss.h"
diff --git a/drivers/video/fbdev/msm/mdss_mdp.c b/drivers/video/fbdev/msm/mdss_mdp.c
index a0637109c7b3..04e8fa4ba576 100644
--- a/drivers/video/fbdev/msm/mdss_mdp.c
+++ b/drivers/video/fbdev/msm/mdss_mdp.c
@@ -2605,7 +2605,7 @@ static ssize_t mdss_mdp_store_max_limit_bw(struct device *dev,
struct mdss_data_type *mdata = dev_get_drvdata(dev);
u32 data = 0;
- if (1 != sscanf(buf, "%d", &data)) {
+ if (kstrtouint(buf, 0, &data)) {
pr_info("Not able scan to bw_mode_bitmap\n");
} else {
mdata->bw_mode_bitmap = data;
diff --git a/drivers/video/fbdev/msm/mdss_mdp.h b/drivers/video/fbdev/msm/mdss_mdp.h
index 8ac63aaaefce..93f5f9a51a63 100644
--- a/drivers/video/fbdev/msm/mdss_mdp.h
+++ b/drivers/video/fbdev/msm/mdss_mdp.h
@@ -109,6 +109,9 @@
* QSEED3 parameters needs to be updated.
* @DS_ENHANCER_UPDATE: Setting this bit indicates current Desitnation Scaler
* QSEED3 Detial enhancer parameters need to be updated.
+ * @DS_VALIDATE: Indicate destination data structure parameters are validated
+ * and can be used for programming the HW and perform a flush.
+ * @DS_DIRTY_UPDATE: Mark for dirty update for Power resume usecase.
*/
#define DS_ENABLE BIT(0)
#define DS_DUAL_MODE BIT(1)
@@ -116,6 +119,8 @@
#define DS_RIGHT BIT(3)
#define DS_SCALE_UPDATE BIT(4)
#define DS_ENHANCER_UPDATE BIT(5)
+#define DS_VALIDATE BIT(6)
+#define DS_DIRTY_UPDATE BIT(7)
/**
* Destination Scaler DUAL mode overfetch pixel count
@@ -230,9 +235,13 @@ enum mdss_mdp_csc_type {
MDSS_MDP_CSC_YUV2RGB_601L,
MDSS_MDP_CSC_YUV2RGB_601FR,
MDSS_MDP_CSC_YUV2RGB_709L,
+ MDSS_MDP_CSC_YUV2RGB_2020L,
+ MDSS_MDP_CSC_YUV2RGB_2020FR,
MDSS_MDP_CSC_RGB2YUV_601L,
MDSS_MDP_CSC_RGB2YUV_601FR,
MDSS_MDP_CSC_RGB2YUV_709L,
+ MDSS_MDP_CSC_RGB2YUV_2020L,
+ MDSS_MDP_CSC_RGB2YUV_2020FR,
MDSS_MDP_CSC_YUV2YUV,
MDSS_MDP_CSC_RGB2RGB,
MDSS_MDP_MAX_CSC
@@ -370,6 +379,8 @@ struct mdss_mdp_destination_scaler {
char __iomem *lut_base;
u16 src_width;
u16 src_height;
+ u16 last_mixer_width;
+ u16 last_mixer_height;
u32 flags;
struct mdp_scale_data_v2 scaler;
};
@@ -404,7 +415,7 @@ struct mdss_mdp_ctl_intfs_ops {
/* to update lineptr, [1..yres] - enable, 0 - disable */
int (*update_lineptr)(struct mdss_mdp_ctl *ctl, bool enable);
- int (*avr_ctrl_fnc)(struct mdss_mdp_ctl *);
+ int (*avr_ctrl_fnc)(struct mdss_mdp_ctl *, bool enable);
};
struct mdss_mdp_cwb {
@@ -1401,6 +1412,10 @@ static inline uint8_t pp_vig_csc_pipe_val(struct mdss_mdp_pipe *pipe)
return MDSS_MDP_CSC_YUV2RGB_601L;
case MDP_CSC_ITU_R_601_FR:
return MDSS_MDP_CSC_YUV2RGB_601FR;
+ case MDP_CSC_ITU_R_2020:
+ return MDSS_MDP_CSC_YUV2RGB_2020L;
+ case MDP_CSC_ITU_R_2020_FR:
+ return MDSS_MDP_CSC_YUV2RGB_2020FR;
case MDP_CSC_ITU_R_709:
default:
return MDSS_MDP_CSC_YUV2RGB_709L;
@@ -1706,6 +1721,7 @@ void mdss_mdp_pp_term(struct device *dev);
int mdss_mdp_pp_overlay_init(struct msm_fb_data_type *mfd);
int mdss_mdp_pp_resume(struct msm_fb_data_type *mfd);
+void mdss_mdp_pp_dest_scaler_resume(struct mdss_mdp_ctl *ctl);
int mdss_mdp_pp_setup(struct mdss_mdp_ctl *ctl);
int mdss_mdp_pp_setup_locked(struct mdss_mdp_ctl *ctl);
diff --git a/drivers/video/fbdev/msm/mdss_mdp_ctl.c b/drivers/video/fbdev/msm/mdss_mdp_ctl.c
index eb1e0b5c47a6..1d61653b76bb 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_ctl.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_ctl.c
@@ -3508,7 +3508,9 @@ int mdss_mdp_ctl_setup(struct mdss_mdp_ctl *ctl)
if (is_dest_scaling_enable(ctl->mixer_left)) {
width = get_ds_input_width(ctl->mixer_left);
height = get_ds_input_height(ctl->mixer_left);
- if (ctl->panel_data->next && is_pingpong_split(ctl->mfd))
+ if (is_dual_lm_single_display(ctl->mfd) ||
+ (ctl->panel_data->next &&
+ is_pingpong_split(ctl->mfd)))
width *= 2;
} else {
width = get_panel_width(ctl);
@@ -3548,9 +3550,13 @@ int mdss_mdp_ctl_setup(struct mdss_mdp_ctl *ctl)
}
if (split_fb) {
- width = ctl->mfd->split_fb_left;
- width += (pinfo->lcdc.border_left +
- pinfo->lcdc.border_right);
+ if (is_dest_scaling_enable(ctl->mixer_left)) {
+ width = get_ds_input_width(ctl->mixer_left);
+ } else {
+ width = ctl->mfd->split_fb_left;
+ width += (pinfo->lcdc.border_left +
+ pinfo->lcdc.border_right);
+ }
} else if (width > max_mixer_width) {
width /= 2;
}
@@ -3576,8 +3582,12 @@ int mdss_mdp_ctl_setup(struct mdss_mdp_ctl *ctl)
return 0;
}
- if (split_fb)
- width = ctl->mfd->split_fb_right;
+ if (split_fb) {
+ if (is_dest_scaling_enable(ctl->mixer_left))
+ width = get_ds_input_width(ctl->mixer_left);
+ else
+ width = ctl->mfd->split_fb_right;
+ }
if (width < ctl->width) {
if (ctl->mixer_right == NULL) {
@@ -4038,6 +4048,7 @@ static void mdss_mdp_ctl_restore_sub(struct mdss_mdp_ctl *ctl)
if (ctl->mfd && ctl->panel_data) {
ctl->mfd->ipc_resume = true;
mdss_mdp_pp_resume(ctl->mfd);
+ mdss_mdp_pp_dest_scaler_resume(ctl);
if (is_dsc_compression(&ctl->panel_data->panel_info)) {
/*
@@ -5593,7 +5604,7 @@ int mdss_mdp_display_commit(struct mdss_mdp_ctl *ctl, void *arg,
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
if (ctl->ops.avr_ctrl_fnc) {
- ret = ctl->ops.avr_ctrl_fnc(ctl);
+ ret = ctl->ops.avr_ctrl_fnc(ctl, true);
if (ret) {
pr_err("error configuring avr ctrl registers ctl=%d err=%d\n",
ctl->num, ret);
@@ -5603,7 +5614,7 @@ int mdss_mdp_display_commit(struct mdss_mdp_ctl *ctl, void *arg,
}
if (sctl && sctl->ops.avr_ctrl_fnc) {
- ret = sctl->ops.avr_ctrl_fnc(sctl);
+ ret = sctl->ops.avr_ctrl_fnc(sctl, true);
if (ret) {
pr_err("error configuring avr ctrl registers sctl=%d err=%d\n",
sctl->num, ret);
diff --git a/drivers/video/fbdev/msm/mdss_mdp_intf_video.c b/drivers/video/fbdev/msm/mdss_mdp_intf_video.c
index d316ab6d263a..048e5fce30c6 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_intf_video.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_intf_video.c
@@ -108,6 +108,8 @@ static void mdss_mdp_fetch_end_config(struct mdss_mdp_video_ctx *ctx,
static void early_wakeup_dfps_update_work(struct work_struct *work);
+static int mdss_mdp_video_avr_ctrl(struct mdss_mdp_ctl *ctl, bool enable);
+
static inline void mdp_video_write(struct mdss_mdp_video_ctx *ctx,
u32 reg, u32 val)
{
@@ -459,13 +461,15 @@ static int mdss_mdp_video_avr_trigger_setup(struct mdss_mdp_ctl *ctl)
}
static void mdss_mdp_video_avr_ctrl_setup(struct mdss_mdp_video_ctx *ctx,
- struct mdss_mdp_avr_info *avr_info, bool is_master)
+ struct mdss_mdp_avr_info *avr_info, bool is_master, bool enable)
{
u32 avr_ctrl = 0;
u32 avr_mode = 0;
- avr_ctrl = avr_info->avr_enabled;
- avr_mode = avr_info->avr_mode;
+ if (enable) {
+ avr_ctrl = avr_info->avr_enabled;
+ avr_mode = avr_info->avr_mode;
+ }
/* Enable avr_vsync_clear_en bit to clear avr in next vsync */
if (avr_mode == MDSS_MDP_AVR_ONE_SHOT)
@@ -1429,6 +1433,20 @@ static int mdss_mdp_video_config_fps(struct mdss_mdp_ctl *ctl, int new_fps)
}
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+
+ /*
+ * Need to disable AVR during DFPS update period.
+ * Next commit will restore the AVR settings.
+ */
+ if (test_bit(MDSS_CAPS_AVR_SUPPORTED,
+ mdata->mdss_caps_map) &&
+ ctl->avr_info.avr_enabled) {
+ mdss_mdp_video_avr_ctrl(ctl, false);
+ rc = mdss_mdp_video_dfps_wait4vsync(ctl);
+ if (rc < 0)
+ pr_err("Error in dfps_wait: %d\n", rc);
+ }
+
spin_lock_irqsave(&ctx->dfps_lock, flags);
if (mdata->mdp_rev < MDSS_MDP_HW_REV_105) {
@@ -2112,6 +2130,7 @@ static void early_wakeup_dfps_update_work(struct work_struct *work)
struct mdss_panel_info *pinfo;
struct msm_fb_data_type *mfd;
struct mdss_mdp_ctl *ctl;
+ struct mdss_data_type *mdata;
struct dynamic_fps_data data = {0};
int ret = 0;
int dfps;
@@ -2123,7 +2142,8 @@ static void early_wakeup_dfps_update_work(struct work_struct *work)
ctl = ctx->ctl;
- if (!ctl || !ctl->panel_data || !ctl->mfd || !ctl->mfd->fbi) {
+ if (!ctl || !ctl->panel_data || !ctl->mfd || !ctl->mfd->fbi ||
+ !ctl->mdata) {
pr_err("%s: invalid ctl\n", __func__);
return;
}
@@ -2131,6 +2151,7 @@ static void early_wakeup_dfps_update_work(struct work_struct *work)
pdata = ctl->panel_data;
pinfo = &ctl->panel_data->panel_info;
mfd = ctl->mfd;
+ mdata = ctl->mdata;
if (!pinfo->dynamic_fps || !ctl->ops.config_fps_fnc ||
!pdata->panel_info.default_fps) {
@@ -2138,6 +2159,17 @@ static void early_wakeup_dfps_update_work(struct work_struct *work)
return;
}
+ /*
+ * Bypass DFPS update when AVR is enabled because
+ * AVR will take control of the programmable fetch
+ */
+ if (test_bit(MDSS_CAPS_AVR_SUPPORTED,
+ mdata->mdss_caps_map) &&
+ ctl->avr_info.avr_enabled) {
+ pr_debug("Bypass DFPS update when AVR is enabled\n");
+ return;
+ }
+
/* get the default fps that was cached before any dfps update */
dfps = pdata->panel_info.default_fps;
@@ -2213,7 +2245,7 @@ static int mdss_mdp_video_early_wake_up(struct mdss_mdp_ctl *ctl)
return 0;
}
-static int mdss_mdp_video_avr_ctrl(struct mdss_mdp_ctl *ctl)
+static int mdss_mdp_video_avr_ctrl(struct mdss_mdp_ctl *ctl, bool enable)
{
struct mdss_mdp_video_ctx *ctx = NULL, *sctx = NULL;
@@ -2222,7 +2254,8 @@ static int mdss_mdp_video_avr_ctrl(struct mdss_mdp_ctl *ctl)
pr_err("invalid master ctx\n");
return -EINVAL;
}
- mdss_mdp_video_avr_ctrl_setup(ctx, &ctl->avr_info, ctl->is_master);
+ mdss_mdp_video_avr_ctrl_setup(ctx, &ctl->avr_info, ctl->is_master,
+ enable);
if (is_pingpong_split(ctl->mfd)) {
sctx = (struct mdss_mdp_video_ctx *) ctl->intf_ctx[SLAVE_CTX];
@@ -2230,7 +2263,8 @@ static int mdss_mdp_video_avr_ctrl(struct mdss_mdp_ctl *ctl)
pr_err("invalid slave ctx\n");
return -EINVAL;
}
- mdss_mdp_video_avr_ctrl_setup(sctx, &ctl->avr_info, false);
+ mdss_mdp_video_avr_ctrl_setup(sctx, &ctl->avr_info, false,
+ enable);
}
return 0;
diff --git a/drivers/video/fbdev/msm/mdss_mdp_layer.c b/drivers/video/fbdev/msm/mdss_mdp_layer.c
index e26b3843d7b0..353d07ad64ac 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_layer.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_layer.c
@@ -67,13 +67,89 @@ static inline void *u64_to_ptr(uint64_t address)
return (void *)(uintptr_t)address;
}
+static void mdss_mdp_disable_destination_scaler_setup(struct mdss_mdp_ctl *ctl)
+{
+ struct mdss_data_type *mdata = ctl->mdata;
+ struct mdss_panel_info *pinfo = &ctl->panel_data->panel_info;
+
+ if (test_bit(MDSS_CAPS_DEST_SCALER, mdata->mdss_caps_map)) {
+ if (ctl->mixer_left && ctl->mixer_right &&
+ ctl->mixer_left->ds && ctl->mixer_right->ds &&
+ ctl->mixer_left->ds->scaler.enable &&
+ ctl->mixer_right->ds->scaler.enable) {
+ /*
+ * DUAL mode disable
+ */
+ ctl->mixer_left->width = get_panel_width(ctl);
+ ctl->mixer_left->height = get_panel_yres(pinfo);
+ ctl->mixer_left->width /= 2;
+ ctl->mixer_right->width = ctl->mixer_left->width;
+ ctl->mixer_right->height = ctl->mixer_left->height;
+ ctl->mixer_left->roi = (struct mdss_rect) { 0, 0,
+ ctl->mixer_left->width,
+ ctl->mixer_left->height };
+ ctl->mixer_right->roi = (struct mdss_rect) { 0, 0,
+ ctl->mixer_right->width,
+ ctl->mixer_right->height };
+
+ /*
+ * Disable destination scaler by resetting the control
+ * flags and also need to disable in the QSEED3
+ * settings.
+ */
+ ctl->mixer_left->ds->flags = DS_SCALE_UPDATE |
+ DS_VALIDATE;
+ ctl->mixer_right->ds->flags = DS_SCALE_UPDATE |
+ DS_VALIDATE;
+ ctl->mixer_left->ds->scaler.enable = 0;
+ ctl->mixer_left->ds->scaler.detail_enhance.enable = 0;
+ ctl->mixer_right->ds->scaler.enable = 0;
+ ctl->mixer_right->ds->scaler.detail_enhance.enable = 0;
+
+ pr_debug("DS-Left+Right disable: left:%dx%d, right:%dx%d\n",
+ ctl->mixer_left->width,
+ ctl->mixer_left->height,
+ ctl->mixer_right->width,
+ ctl->mixer_right->height);
+ MDSS_XLOG(ctl->mixer_left->width,
+ ctl->mixer_left->height,
+ ctl->mixer_right->width,
+ ctl->mixer_right->height);
+ } else if (ctl->mixer_left && ctl->mixer_left->ds &&
+ ctl->mixer_left->ds->scaler.enable) {
+ /*
+ * Single DS disable
+ */
+ ctl->mixer_left->width = get_panel_width(ctl);
+ ctl->mixer_left->height = get_panel_yres(pinfo);
+ ctl->mixer_left->roi = (struct mdss_rect) { 0, 0,
+ ctl->mixer_left->width,
+ ctl->mixer_left->height };
+
+ ctl->mixer_left->ds->flags = DS_SCALE_UPDATE |
+ DS_VALIDATE;
+ ctl->mixer_left->ds->scaler.enable = 0;
+ ctl->mixer_left->ds->scaler.detail_enhance.enable = 0;
+
+ pr_debug("DS-left disable: wxh=%dx%d\n",
+ ctl->mixer_left->width,
+ ctl->mixer_left->height);
+ MDSS_XLOG(ctl->mixer_left->width,
+ ctl->mixer_left->height);
+ }
+ }
+}
+
static int __dest_scaler_data_setup(struct mdp_destination_scaler_data *ds_data,
struct mdss_mdp_destination_scaler *ds,
u32 max_input_width, u32 max_output_width)
{
struct mdp_scale_data_v2 *scale;
- ds->flags = (ds_data->flags & MDP_DESTSCALER_ENABLE) ? DS_ENABLE : 0;
+ if (ds_data->flags & MDP_DESTSCALER_ENABLE)
+ ds->flags |= DS_ENABLE;
+ else
+ ds->flags &= ~DS_ENABLE;
if (ds_data->flags & (MDP_DESTSCALER_SCALE_UPDATE |
MDP_DESTSCALER_ENHANCER_UPDATE)) {
@@ -101,8 +177,12 @@ static int __dest_scaler_data_setup(struct mdp_destination_scaler_data *ds_data,
ds->flags |= DS_SCALE_UPDATE;
if (ds_data->flags & MDP_DESTSCALER_ENHANCER_UPDATE)
ds->flags |= DS_ENHANCER_UPDATE;
- ds->src_width = scale->src_width[0];
- ds->src_height = scale->src_height[0];
+
+ /*
+ * Update with LM resolution
+ */
+ ds->src_width = ds_data->lm_width;
+ ds->src_height = ds_data->lm_height;
}
if (ds_data->flags == 0) {
@@ -110,6 +190,11 @@ static int __dest_scaler_data_setup(struct mdp_destination_scaler_data *ds_data,
ds_data->dest_scaler_ndx);
}
+ /*
+ * Confirm all check pass validation, and to be cleared in CTL after
+ * flush is issued.
+ */
+ ds->flags |= DS_VALIDATE;
return 0;
}
@@ -118,7 +203,7 @@ static int mdss_mdp_destination_scaler_pre_validate(struct mdss_mdp_ctl *ctl,
{
struct mdss_data_type *mdata;
struct mdss_panel_info *pinfo;
-
+ u16 mxleft_w = 0, mxleft_h = 0, mxright_w = 0, mxright_h = 0;
mdata = ctl->mdata;
/*
@@ -134,7 +219,7 @@ static int mdss_mdp_destination_scaler_pre_validate(struct mdss_mdp_ctl *ctl,
* height.
*/
pinfo = &ctl->panel_data->panel_info;
- if ((ds_data->lm_width > get_panel_xres(pinfo)) ||
+ if ((ds_data->lm_width > get_panel_width(ctl)) ||
(ds_data->lm_height > get_panel_yres(pinfo)) ||
(ds_data->lm_width == 0) ||
(ds_data->lm_height == 0)) {
@@ -142,14 +227,8 @@ static int mdss_mdp_destination_scaler_pre_validate(struct mdss_mdp_ctl *ctl,
ds_data->lm_width, ds_data->lm_height);
return -EINVAL;
}
-
- ctl->width = ds_data->lm_width;
- ctl->height = ds_data->lm_height;
-
- ctl->mixer_left->width = ds_data->lm_width;
- ctl->mixer_left->height = ds_data->lm_height;
- pr_debug("Update mixer-left width/height: %dx%d\n",
- ds_data->lm_width, ds_data->lm_width);
+ mxleft_w = ds_data->lm_width;
+ mxleft_h = ds_data->lm_height;
}
if (ctl->mixer_right && ctl->mixer_right->ds) {
@@ -174,25 +253,51 @@ static int mdss_mdp_destination_scaler_pre_validate(struct mdss_mdp_ctl *ctl,
* Split display both left and right should have the
* same width and height
*/
- ctl->mixer_right->width = ds_data->lm_width;
- ctl->mixer_right->height = ds_data->lm_height;
- pr_debug("Update mixer-right width/height: %dx%d\n",
- ds_data->lm_width, ds_data->lm_height);
+ mxright_w = ds_data->lm_width;
+ mxright_h = ds_data->lm_height;
if (ctl->mixer_left &&
- ((ctl->mixer_right->width !=
- ctl->mixer_left->width) ||
- (ctl->mixer_right->height !=
- ctl->mixer_left->height))) {
+ ((mxright_w != mxleft_w) ||
+ (mxright_h != mxleft_h))) {
pr_err("Mismatch width/heigth in LM for split display\n");
return -EINVAL;
}
+ }
+
+ /*
+ * Update mixer and control dimension after successful
+ * pre-validation
+ */
+ if (mxleft_w && mxleft_h) {
+ ctl->mixer_left->ds->last_mixer_width =
+ ctl->mixer_left->width;
+ ctl->mixer_left->ds->last_mixer_height =
+ ctl->mixer_left->height;
+
+ ctl->width = mxleft_w;
+ ctl->height = mxleft_h;
+ ctl->mixer_left->width = mxleft_w;
+ ctl->mixer_left->height = mxleft_h;
+ pr_debug("Update mixer-left width/height: %dx%d\n",
+ mxleft_w, mxleft_h);
+ }
+
+ if (mxright_w && mxright_h) {
+ ctl->mixer_right->ds->last_mixer_width =
+ ctl->mixer_right->width;
+ ctl->mixer_right->ds->last_mixer_height =
+ ctl->mixer_right->height;
+
+ ctl->mixer_right->width = mxright_w;
+ ctl->mixer_right->height = mxright_h;
+ pr_debug("Update mixer-right width/height: %dx%d\n",
+ mxright_w, mxright_h);
/*
* For split display, CTL width should be equal to
* whole panel size
*/
- ctl->width += ds_data->lm_width;
+ ctl->width += mxright_w;
}
pr_debug("Updated CTL width:%d, height:%d\n",
@@ -236,19 +341,23 @@ static int mdss_mdp_validate_destination_scaler(struct msm_fb_data_type *mfd,
MDSS_MDP_DS_OVERFETCH_SIZE,
mdata->max_dest_scaler_output_width);
if (ret)
- return ret;
+ goto reset_mixer;
ret = __dest_scaler_data_setup(&ds_data[1], ds_right,
mdata->max_dest_scaler_input_width -
MDSS_MDP_DS_OVERFETCH_SIZE,
mdata->max_dest_scaler_output_width);
if (ret)
- return ret;
+ goto reset_mixer;
ds_left->flags &= ~(DS_LEFT|DS_RIGHT);
ds_left->flags |= DS_DUAL_MODE;
ds_right->flags &= ~(DS_LEFT|DS_RIGHT);
ds_right->flags |= DS_DUAL_MODE;
+ MDSS_XLOG(ds_left->num, ds_left->src_width,
+ ds_left->src_height, ds_left->flags,
+ ds_right->num, ds_right->src_width,
+ ds_right->src_height, ds_right->flags);
break;
case DS_LEFT:
@@ -262,6 +371,11 @@ static int mdss_mdp_validate_destination_scaler(struct msm_fb_data_type *mfd,
ret = __dest_scaler_data_setup(&ds_data[0], ds_left,
mdata->max_dest_scaler_input_width,
mdata->max_dest_scaler_output_width);
+ if (ret)
+ goto reset_mixer;
+
+ MDSS_XLOG(ds_left->num, ds_left->src_width,
+ ds_left->src_height, ds_left->flags);
break;
case DS_RIGHT:
@@ -276,6 +390,11 @@ static int mdss_mdp_validate_destination_scaler(struct msm_fb_data_type *mfd,
ret = __dest_scaler_data_setup(&ds_data[0], ds_right,
mdata->max_dest_scaler_input_width,
mdata->max_dest_scaler_output_width);
+ if (ret)
+ goto reset_mixer;
+
+ MDSS_XLOG(ds_right->num, ds_right->src_width,
+ ds_right->src_height, ds_right->flags);
break;
}
@@ -312,6 +431,40 @@ static int mdss_mdp_validate_destination_scaler(struct msm_fb_data_type *mfd,
pr_err("Invalid Dest-scaler output width/height: %d/%d\n",
scaler_width, scaler_height);
ret = -EINVAL;
+ goto reset_mixer;
+ }
+
+ return ret;
+
+reset_mixer:
+ /* reverting mixer and control dimension */
+ if (ctl->mixer_left && ctl->mixer_left->ds &&
+ ctl->mixer_left->ds->last_mixer_width) {
+ ctl->width = ctl->mixer_left->ds->last_mixer_width;
+ ctl->height = ctl->mixer_left->ds->last_mixer_height;
+ ctl->mixer_left->width =
+ ctl->mixer_left->ds->last_mixer_width;
+ ctl->mixer_left->height =
+ ctl->mixer_left->ds->last_mixer_height;
+ if (ds_left)
+ ds_left->flags &= ~DS_ENABLE;
+ MDSS_XLOG(ctl->width, ctl->height,
+ ctl->mixer_left->width,
+ ctl->mixer_left->height);
+ }
+
+ if (ctl->mixer_right && ctl->mixer_right->ds &&
+ ctl->mixer_right->ds->last_mixer_width) {
+ ctl->width += ctl->mixer_right->ds->last_mixer_width;
+ ctl->mixer_right->width =
+ ctl->mixer_right->ds->last_mixer_width;
+ ctl->mixer_right->height =
+ ctl->mixer_right->ds->last_mixer_height;
+ if (ds_right)
+ ds_right->flags &= ~DS_ENABLE;
+ MDSS_XLOG(ctl->width, ctl->height,
+ ctl->mixer_right->width,
+ ctl->mixer_right->height);
}
return ret;
@@ -1939,6 +2092,7 @@ static int __validate_layers(struct msm_fb_data_type *mfd,
enum layer_pipe_q pipe_q_type;
enum layer_zorder_used zorder_used[MDSS_MDP_MAX_STAGE] = {0};
enum mdss_mdp_pipe_rect rect_num;
+ struct mdp_destination_scaler_data *ds_data;
ret = mutex_lock_interruptible(&mdp5_data->ov_lock);
if (ret)
@@ -2194,11 +2348,10 @@ static int __validate_layers(struct msm_fb_data_type *mfd,
layer->z_order -= MDSS_MDP_STAGE_0;
}
+ ds_data = commit->dest_scaler;
if (test_bit(MDSS_CAPS_DEST_SCALER, mdata->mdss_caps_map) &&
- commit->dest_scaler &&
+ ds_data && (ds_data->flags & MDP_DESTSCALER_ENABLE) &&
commit->dest_scaler_cnt) {
- struct mdp_destination_scaler_data *ds_data =
- commit->dest_scaler;
/*
* Find out which DS block to use based on DS commit info
@@ -2217,8 +2370,7 @@ static int __validate_layers(struct msm_fb_data_type *mfd,
}
ret = mdss_mdp_validate_destination_scaler(mfd,
- commit->dest_scaler,
- ds_mode);
+ ds_data, ds_mode);
if (ret) {
pr_err("fail to validate destination scaler\n");
layer->error_code = ret;
@@ -2472,6 +2624,7 @@ int mdss_mdp_layer_atomic_validate(struct msm_fb_data_type *mfd,
struct file *file, struct mdp_layer_commit_v1 *commit)
{
struct mdss_overlay_private *mdp5_data;
+ struct mdp_destination_scaler_data *ds_data;
int rc = 0;
if (!mfd || !commit) {
@@ -2505,15 +2658,17 @@ int mdss_mdp_layer_atomic_validate(struct msm_fb_data_type *mfd,
}
}
- if (commit->dest_scaler && commit->dest_scaler_cnt) {
+ ds_data = commit->dest_scaler;
+ if (ds_data && commit->dest_scaler_cnt &&
+ (ds_data->flags & MDP_DESTSCALER_ENABLE)) {
rc = mdss_mdp_destination_scaler_pre_validate(mdp5_data->ctl,
- commit->dest_scaler,
- commit->dest_scaler_cnt);
+ ds_data, commit->dest_scaler_cnt);
if (IS_ERR_VALUE(rc)) {
pr_err("Destination scaler pre-validate failed\n");
return -EINVAL;
}
- }
+ } else
+ mdss_mdp_disable_destination_scaler_setup(mdp5_data->ctl);
rc = mdss_mdp_avr_validate(mfd, commit);
if (IS_ERR_VALUE(rc)) {
diff --git a/drivers/video/fbdev/msm/mdss_mdp_overlay.c b/drivers/video/fbdev/msm/mdss_mdp_overlay.c
index 965d4a6cfb5e..9bdc66232dd5 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_overlay.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_overlay.c
@@ -2958,7 +2958,7 @@ static ssize_t dynamic_fps_sysfs_wta_dfps(struct device *dev,
if (pdata->panel_info.dfps_update ==
DFPS_IMMEDIATE_MULTI_UPDATE_MODE_CLK_HFP) {
- if (sscanf(buf, "%d %d %d %d %d",
+ if (sscanf(buf, "%u %u %u %u %u",
&data.hfp, &data.hbp, &data.hpw,
&data.clk_rate, &data.fps) != 5) {
pr_err("could not read input\n");
diff --git a/drivers/video/fbdev/msm/mdss_mdp_pp.c b/drivers/video/fbdev/msm/mdss_mdp_pp.c
index ee1cd8fd623e..f79212ea740d 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_pp.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_pp.c
@@ -60,6 +60,30 @@ struct mdp_csc_cfg mdp_csc_8bit_convert[MDSS_MDP_MAX_CSC] = {
{ 0x10, 0xeb, 0x10, 0xf0, 0x10, 0xf0,},
{ 0x0, 0xff, 0x0, 0xff, 0x0, 0xff,},
},
+ [MDSS_MDP_CSC_YUV2RGB_2020L] = {
+ 0,
+ {
+ 0x0256, 0x0000, 0x035e,
+ 0x0256, 0xffa0, 0xfeb2,
+ 0x0256, 0x044c, 0x0000,
+ },
+ { 0xfff0, 0xff80, 0xff80,},
+ { 0x0, 0x0, 0x0,},
+ { 0x10, 0xeb, 0x10, 0xf0, 0x10, 0xf0,},
+ { 0x0, 0xff, 0x0, 0xff, 0x0, 0xff,},
+ },
+ [MDSS_MDP_CSC_YUV2RGB_2020FR] = {
+ 0,
+ {
+ 0x0200, 0x0000, 0x02f3,
+ 0x0200, 0xffac, 0xfedb,
+ 0x0200, 0x03c3, 0x0000,
+ },
+ { 0x0000, 0xff80, 0xff80,},
+ { 0x0, 0x0, 0x0,},
+ { 0x0, 0xff, 0x0, 0xff, 0x0, 0xff,},
+ { 0x0, 0xff, 0x0, 0xff, 0x0, 0xff,},
+ },
[MDSS_MDP_CSC_RGB2YUV_601L] = {
0,
{
@@ -96,6 +120,30 @@ struct mdp_csc_cfg mdp_csc_8bit_convert[MDSS_MDP_MAX_CSC] = {
{ 0x0, 0xff, 0x0, 0xff, 0x0, 0xff,},
{ 0x0010, 0x00eb, 0x0010, 0x00f0, 0x0010, 0x00f0,},
},
+ [MDSS_MDP_CSC_RGB2YUV_2020L] = {
+ 0,
+ {
+ 0x0073, 0x0129, 0x001a,
+ 0xffc1, 0xff5e, 0x00e0,
+ 0x00e0, 0xff32, 0xffee
+ },
+ { 0x0, 0x0, 0x0,},
+ { 0x0010, 0x0080, 0x0080,},
+ { 0x0, 0xff, 0x0, 0xff, 0x0, 0xff,},
+ { 0x0010, 0x00eb, 0x0010, 0x00f0, 0x0010, 0x00f0,},
+ },
+ [MDSS_MDP_CSC_RGB2YUV_2020FR] = {
+ 0,
+ {
+ 0x0086, 0x015b, 0x001e,
+ 0xffb9, 0xff47, 0x0100,
+ 0x0100, 0xff15, 0xffeb
+ },
+ { 0x0, 0x0, 0x0,},
+ { 0x0, 0x0080, 0x0080,},
+ { 0x0, 0xff, 0x0, 0xff, 0x0, 0xff,},
+ { 0x0, 0xff, 0x0, 0xff, 0x0, 0xff,},
+ },
[MDSS_MDP_CSC_YUV2YUV] = {
0,
{
@@ -159,6 +207,30 @@ struct mdp_csc_cfg mdp_csc_10bit_convert[MDSS_MDP_MAX_CSC] = {
{ 0x40, 0x3ac, 0x40, 0x3c0, 0x40, 0x3c0,},
{ 0x0, 0x3ff, 0x0, 0x3ff, 0x0, 0x3ff,},
},
+ [MDSS_MDP_CSC_YUV2RGB_2020L] = {
+ 0,
+ {
+ 0x0256, 0x0000, 0x035e,
+ 0x0256, 0xffa0, 0xfeb2,
+ 0x0256, 0x044c, 0x0000,
+ },
+ { 0xffc0, 0xfe00, 0xfe00,},
+ { 0x0, 0x0, 0x0,},
+ { 0x40, 0x3ac, 0x40, 0x3c0, 0x40, 0x3c0,},
+ { 0x0, 0x3ff, 0x0, 0x3ff, 0x0, 0x3ff,},
+ },
+ [MDSS_MDP_CSC_YUV2RGB_2020FR] = {
+ 0,
+ {
+ 0x0200, 0x0000, 0x02f3,
+ 0x0200, 0xffac, 0xfedb,
+ 0x0200, 0x03c3, 0x0000,
+ },
+ { 0x0000, 0xfe00, 0xfe00,},
+ { 0x0, 0x0, 0x0,},
+ { 0x0, 0x3ff, 0x0, 0x3ff, 0x0, 0x3ff,},
+ { 0x0, 0x3ff, 0x0, 0x3ff, 0x0, 0x3ff,},
+ },
[MDSS_MDP_CSC_RGB2YUV_601L] = {
0,
{
@@ -195,6 +267,30 @@ struct mdp_csc_cfg mdp_csc_10bit_convert[MDSS_MDP_MAX_CSC] = {
{ 0x0, 0x3ff, 0x0, 0x3ff, 0x0, 0x3ff,},
{ 0x0040, 0x03ac, 0x0040, 0x03c0, 0x0040, 0x03c0,},
},
+ [MDSS_MDP_CSC_RGB2YUV_2020L] = {
+ 0,
+ {
+ 0x0073, 0x0129, 0x001a,
+ 0xffc1, 0xff5e, 0x00e0,
+ 0x00e0, 0xff32, 0xffee
+ },
+ { 0x0, 0x0, 0x0,},
+ { 0x0040, 0x0200, 0x0200,},
+ { 0x0, 0x3ff, 0x0, 0x3ff, 0x0, 0x3ff,},
+ { 0x0040, 0x03ac, 0x0040, 0x03c0, 0x0040, 0x03c0,},
+ },
+ [MDSS_MDP_CSC_RGB2YUV_2020FR] = {
+ 0,
+ {
+ 0x0086, 0x015b, 0x001e,
+ 0xffb9, 0xff47, 0x0100,
+ 0x0100, 0xff15, 0xffeb
+ },
+ { 0x0, 0x0, 0x0,},
+ { 0x0, 0x0200, 0x0200,},
+ { 0x0, 0x3ff, 0x0, 0x3ff, 0x0, 0x3ff,},
+ { 0x0, 0x3ff, 0x0, 0x3ff, 0x0, 0x3ff,},
+ },
[MDSS_MDP_CSC_YUV2YUV] = {
0,
{
@@ -2480,6 +2576,28 @@ static int pp_dest_scaler_setup(struct mdss_mdp_mixer *mixer)
if (!test_bit(MDSS_CAPS_DEST_SCALER, mdata->mdss_caps_map) || !ds)
return 0;
+ /*
+ * Non-validated DS data will be related to PM event. It is required
+ * to send out last setup to match the mixer and panel configuration.
+ */
+ if (!(ds->flags & DS_VALIDATE)) {
+ pr_debug("Apply old DS[%d] for non validate data\n", ds->num);
+ if (ds->flags & DS_ENABLE)
+ ds->flags |= (DS_SCALE_UPDATE | DS_ENHANCER_UPDATE);
+ ds->flags |= DS_VALIDATE;
+ }
+
+ /*
+ * If mark for dirty update, force update to scaler and detail
+ * enhancer.
+ */
+ if (ds->flags & DS_DIRTY_UPDATE) {
+ pr_debug("Scale dirty update requested\n");
+ ds->flags |= (DS_SCALE_UPDATE | DS_ENHANCER_UPDATE |
+ DS_VALIDATE);
+ ds->flags &= ~DS_DIRTY_UPDATE;
+ }
+
ds_offset = ds->ds_base;
op_mode = readl_relaxed(MDSS_MDP_REG_DEST_SCALER_OP_MODE +
ds_offset);
@@ -2519,12 +2637,37 @@ static int pp_dest_scaler_setup(struct mdss_mdp_mixer *mixer)
}
/* Destinations scaler shared the flush with DSPP in control */
- if (ds->flags & DS_ENABLE)
+ if (ds->flags & (DS_ENABLE | DS_VALIDATE)) {
+ pr_debug("FLUSH[%d]: flags:%X, op_mode:%x\n",
+ ds->num, ds->flags, op_mode);
ctl->flush_bits |= BIT(13 + ds->num);
+ }
+ ds->flags &= ~DS_VALIDATE;
return 0;
}
+void mdss_mdp_pp_dest_scaler_resume(struct mdss_mdp_ctl *ctl)
+{
+ if (!ctl || !ctl->mdata) {
+ pr_err("Invalid ctl\n");
+ return;
+ }
+
+ if (!test_bit(MDSS_CAPS_DEST_SCALER, ctl->mdata->mdss_caps_map))
+ return;
+
+ if (ctl->mixer_left && ctl->mixer_left->ds) {
+ ctl->mixer_left->ds->flags |= DS_DIRTY_UPDATE;
+ pr_debug("DS left mark dirty\n");
+ }
+
+ if (ctl->mixer_right && ctl->mixer_right->ds) {
+ ctl->mixer_right->ds->flags |= DS_DIRTY_UPDATE;
+ pr_debug("DS right mark dirty\n");
+ }
+}
+
int mdss_mdp_pp_setup(struct mdss_mdp_ctl *ctl)
{
int ret = 0;
diff --git a/drivers/video/fbdev/msm/mdss_panel.c b/drivers/video/fbdev/msm/mdss_panel.c
index 16c2d4e6e92d..31cf74274131 100644
--- a/drivers/video/fbdev/msm/mdss_panel.c
+++ b/drivers/video/fbdev/msm/mdss_panel.c
@@ -455,10 +455,12 @@ int mdss_panel_debugfs_setup(struct mdss_panel_info *panel_info, struct dentry
return -ENOMEM;
}
+ debugfs_info->parent = parent;
debugfs_info->root = debugfs_create_dir(intf_str, parent);
if (IS_ERR_OR_NULL(debugfs_info->root)) {
pr_err("Debugfs create dir failed with error: %ld\n",
PTR_ERR(debugfs_info->root));
+ kfree(debugfs_info);
return -ENODEV;
}
@@ -503,6 +505,7 @@ int mdss_panel_debugfs_init(struct mdss_panel_info *panel_info,
intf_str);
if (rc) {
pr_err("error in initilizing panel debugfs\n");
+ mdss_panel_debugfs_cleanup(&pdata->panel_info);
return rc;
}
pdata = pdata->next;
@@ -516,13 +519,16 @@ void mdss_panel_debugfs_cleanup(struct mdss_panel_info *panel_info)
{
struct mdss_panel_data *pdata;
struct mdss_panel_debugfs_info *debugfs_info;
+ struct dentry *parent = NULL;
pdata = container_of(panel_info, struct mdss_panel_data, panel_info);
do {
debugfs_info = pdata->panel_info.debugfs_info;
- if (debugfs_info && debugfs_info->root)
- debugfs_remove_recursive(debugfs_info->root);
+ if (debugfs_info && !parent)
+ parent = debugfs_info->parent;
+ kfree(debugfs_info);
pdata = pdata->next;
} while (pdata);
+ debugfs_remove_recursive(parent);
pr_debug("Cleaned up mdss_panel_debugfs_info\n");
}
diff --git a/drivers/video/fbdev/msm/mdss_panel.h b/drivers/video/fbdev/msm/mdss_panel.h
index be0491195263..18a93f9d3c3e 100644
--- a/drivers/video/fbdev/msm/mdss_panel.h
+++ b/drivers/video/fbdev/msm/mdss_panel.h
@@ -879,6 +879,7 @@ struct mdss_panel_data {
struct mdss_panel_debugfs_info {
struct dentry *root;
+ struct dentry *parent;
struct mdss_panel_info panel_info;
u32 override_flag;
struct mdss_panel_debugfs_info *next;
diff --git a/drivers/video/fbdev/msm/msm_ext_display.c b/drivers/video/fbdev/msm/msm_ext_display.c
index 4899231787f2..f6c6548bdaa5 100644
--- a/drivers/video/fbdev/msm/msm_ext_display.c
+++ b/drivers/video/fbdev/msm/msm_ext_display.c
@@ -24,6 +24,7 @@
#include <linux/msm_ext_display.h>
#include "mdss_hdmi_util.h"
+#include "mdss_fb.h"
struct msm_ext_disp_list {
struct msm_ext_disp_init_data *data;
@@ -97,41 +98,55 @@ end:
return;
}
-static int msm_ext_disp_get_pdev(struct device *dev,
- struct platform_device *pdev) {
- int ret = 0;
+static void msm_ext_disp_get_pdev_by_name(struct device *dev,
+ const char *phandle, struct platform_device **pdev)
+{
struct device_node *pd_np;
- const char *phandle = "qcom,msm_ext_disp";
- if (!dev || !dev->of_node) {
- ret = -ENODEV;
- pr_err("Invalid params\n");
- goto end;
+ if (!dev) {
+ pr_err("Invalid device\n");
+ return;
+ }
+
+ if (!dev->of_node) {
+ pr_err("Invalid of_node\n");
+ return;
}
pd_np = of_parse_phandle(dev->of_node, phandle, 0);
if (!pd_np) {
pr_err("Cannot find %s dev\n", phandle);
- ret = -ENODEV;
- goto end;
+ return;
}
- pdev = of_find_device_by_node(pd_np);
- if (!pdev) {
- pr_err("Cannot find %s pdev\n", phandle);
- ret = -ENODEV;
+ *pdev = of_find_device_by_node(pd_np);
+}
+
+static void msm_ext_disp_get_fb_pdev(struct device *device,
+ struct platform_device **fb_pdev)
+{
+ struct msm_fb_data_type *mfd = NULL;
+ struct fb_info *fbi = dev_get_drvdata(device);
+
+ if (!fbi) {
+ pr_err("fb_info is null\n");
+ return;
}
-end:
- return ret;
-}
+ mfd = (struct msm_fb_data_type *)fbi->par;
+ *fb_pdev = mfd->pdev;
+}
static ssize_t msm_ext_disp_sysfs_wta_audio_cb(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
int ack, ret = 0;
ssize_t size = strnlen(buf, PAGE_SIZE);
- struct platform_device *pdev = NULL;
+ const char *ext_phandle = "qcom,msm_ext_disp";
+ struct platform_device *ext_pdev = NULL;
+ const char *intf_phandle = "qcom,mdss-intf";
+ struct platform_device *intf_pdev = NULL;
+ struct platform_device *fb_pdev = NULL;
ret = kstrtoint(buf, 10, &ack);
if (ret) {
@@ -139,13 +154,25 @@ static ssize_t msm_ext_disp_sysfs_wta_audio_cb(struct device *dev,
goto end;
}
- ret = msm_ext_disp_get_pdev(dev, pdev);
- if (ret) {
- pr_err("Failed to get pdev. ret=%d\n", ret);
+ msm_ext_disp_get_fb_pdev(dev, &fb_pdev);
+ if (!fb_pdev) {
+ pr_err("failed to get fb pdev\n");
+ goto end;
+ }
+
+ msm_ext_disp_get_pdev_by_name(&fb_pdev->dev, intf_phandle, &intf_pdev);
+ if (!intf_pdev) {
+ pr_err("failed to get display intf pdev\n");
+ goto end;
+ }
+
+ msm_ext_disp_get_pdev_by_name(&intf_pdev->dev, ext_phandle, &ext_pdev);
+ if (!ext_pdev) {
+ pr_err("failed to get ext_pdev\n");
goto end;
}
- ret = msm_ext_disp_audio_ack(pdev, ack);
+ ret = msm_ext_disp_audio_ack(ext_pdev, ack);
if (ret)
pr_err("Failed to process ack. ret=%d\n", ret);
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 12eab503efd1..364bc44610c1 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -152,6 +152,8 @@ static DECLARE_WAIT_QUEUE_HEAD(balloon_wq);
static void balloon_process(struct work_struct *work);
static DECLARE_DELAYED_WORK(balloon_worker, balloon_process);
+static void release_memory_resource(struct resource *resource);
+
/* When ballooning out (allocating memory to return to Xen) we don't really
want the kernel to try too hard since that can trigger the oom killer. */
#define GFP_BALLOON \
@@ -268,6 +270,20 @@ static struct resource *additional_memory_resource(phys_addr_t size)
return NULL;
}
+#ifdef CONFIG_SPARSEMEM
+ {
+ unsigned long limit = 1UL << (MAX_PHYSMEM_BITS - PAGE_SHIFT);
+ unsigned long pfn = res->start >> PAGE_SHIFT;
+
+ if (pfn > limit) {
+ pr_err("New System RAM resource outside addressable RAM (%lu > %lu)\n",
+ pfn, limit);
+ release_memory_resource(res);
+ return NULL;
+ }
+ }
+#endif
+
return res;
}
diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c
index 38272ad24551..f4edd6df3df2 100644
--- a/drivers/xen/evtchn.c
+++ b/drivers/xen/evtchn.c
@@ -316,7 +316,6 @@ static int evtchn_resize_ring(struct per_user_data *u)
{
unsigned int new_size;
evtchn_port_t *new_ring, *old_ring;
- unsigned int p, c;
/*
* Ensure the ring is large enough to capture all possible
@@ -346,20 +345,17 @@ static int evtchn_resize_ring(struct per_user_data *u)
/*
* Copy the old ring contents to the new ring.
*
- * If the ring contents crosses the end of the current ring,
- * it needs to be copied in two chunks.
+ * To take care of wrapping, a full ring, and the new index
+ * pointing into the second half, simply copy the old contents
+ * twice.
*
* +---------+ +------------------+
- * |34567 12| -> | 1234567 |
- * +-----p-c-+ +------------------+
+ * |34567 12| -> |34567 1234567 12|
+ * +-----p-c-+ +-------c------p---+
*/
- p = evtchn_ring_offset(u, u->ring_prod);
- c = evtchn_ring_offset(u, u->ring_cons);
- if (p < c) {
- memcpy(new_ring + c, u->ring + c, (u->ring_size - c) * sizeof(*u->ring));
- memcpy(new_ring + u->ring_size, u->ring, p * sizeof(*u->ring));
- } else
- memcpy(new_ring + c, u->ring + c, (p - c) * sizeof(*u->ring));
+ memcpy(new_ring, old_ring, u->ring_size * sizeof(*u->ring));
+ memcpy(new_ring + u->ring_size, old_ring,
+ u->ring_size * sizeof(*u->ring));
u->ring = new_ring;
u->ring_size = new_size;
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
index b7fcc0de0b2f..0f5d05bf2131 100644
--- a/fs/debugfs/inode.c
+++ b/fs/debugfs/inode.c
@@ -457,7 +457,7 @@ struct dentry *debugfs_create_automount(const char *name,
if (unlikely(!inode))
return failed_creating(dentry);
- inode->i_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
+ make_empty_dir_inode(inode);
inode->i_flags |= S_AUTOMOUNT;
inode->i_private = data;
dentry->d_fsdata = (void *)f;
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index f287ddfd17f1..5d9d0be2770c 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -934,6 +934,15 @@ struct ext4_inode_info {
* by other means, so we have i_data_sem.
*/
struct rw_semaphore i_data_sem;
+ /*
+ * i_mmap_sem is for serializing page faults with truncate / punch hole
+ * operations. We have to make sure that new page cannot be faulted in
+ * a section of the inode that is being punched. We cannot easily use
+ * i_data_sem for this since we need protection for the whole punch
+ * operation and i_data_sem ranks below transaction start so we have
+ * to occasionally drop it.
+ */
+ struct rw_semaphore i_mmap_sem;
struct inode vfs_inode;
struct jbd2_inode *jinode;
@@ -2524,6 +2533,7 @@ extern int ext4_chunk_trans_blocks(struct inode *, int nrblocks);
extern int ext4_zero_partial_blocks(handle_t *handle, struct inode *inode,
loff_t lstart, loff_t lend);
extern int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
+extern int ext4_filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
extern qsize_t *ext4_get_reserved_space(struct inode *inode);
extern void ext4_da_update_reserve_space(struct inode *inode,
int used, int quota_claim);
@@ -2888,6 +2898,9 @@ static inline int ext4_update_inode_size(struct inode *inode, loff_t newsize)
return changed;
}
+int ext4_update_disksize_before_punch(struct inode *inode, loff_t offset,
+ loff_t len);
+
struct ext4_group_info {
unsigned long bb_state;
struct rb_root bb_free_root;
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 551353b1b17a..3578b25fccfd 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -4685,10 +4685,6 @@ static int ext4_alloc_file_blocks(struct file *file, ext4_lblk_t offset,
if (len <= EXT_UNWRITTEN_MAX_LEN)
flags |= EXT4_GET_BLOCKS_NO_NORMALIZE;
- /* Wait all existing dio workers, newcomers will block on i_mutex */
- ext4_inode_block_unlocked_dio(inode);
- inode_dio_wait(inode);
-
/*
* credits to insert 1 extent into extent tree
*/
@@ -4752,8 +4748,6 @@ retry:
goto retry;
}
- ext4_inode_resume_unlocked_dio(inode);
-
return ret > 0 ? ret2 : ret;
}
@@ -4770,7 +4764,6 @@ static long ext4_zero_range(struct file *file, loff_t offset,
int partial_begin, partial_end;
loff_t start, end;
ext4_lblk_t lblk;
- struct address_space *mapping = inode->i_mapping;
unsigned int blkbits = inode->i_blkbits;
trace_ext4_zero_range(inode, offset, len, mode);
@@ -4786,17 +4779,6 @@ static long ext4_zero_range(struct file *file, loff_t offset,
}
/*
- * Write out all dirty pages to avoid race conditions
- * Then release them.
- */
- if (mapping->nrpages && mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
- ret = filemap_write_and_wait_range(mapping, offset,
- offset + len - 1);
- if (ret)
- return ret;
- }
-
- /*
* Round up offset. This is not fallocate, we neet to zero out
* blocks, so convert interior block aligned part of the range to
* unwritten and possibly manually zero out unaligned parts of the
@@ -4839,6 +4821,10 @@ static long ext4_zero_range(struct file *file, loff_t offset,
if (mode & FALLOC_FL_KEEP_SIZE)
flags |= EXT4_GET_BLOCKS_KEEP_SIZE;
+ /* Wait all existing dio workers, newcomers will block on i_mutex */
+ ext4_inode_block_unlocked_dio(inode);
+ inode_dio_wait(inode);
+
/* Preallocate the range including the unaligned edges */
if (partial_begin || partial_end) {
ret = ext4_alloc_file_blocks(file,
@@ -4847,7 +4833,7 @@ static long ext4_zero_range(struct file *file, loff_t offset,
round_down(offset, 1 << blkbits)) >> blkbits,
new_size, flags, mode);
if (ret)
- goto out_mutex;
+ goto out_dio;
}
@@ -4856,16 +4842,23 @@ static long ext4_zero_range(struct file *file, loff_t offset,
flags |= (EXT4_GET_BLOCKS_CONVERT_UNWRITTEN |
EXT4_EX_NOCACHE);
- /* Now release the pages and zero block aligned part of pages*/
+ /*
+ * Prevent page faults from reinstantiating pages we have
+ * released from page cache.
+ */
+ down_write(&EXT4_I(inode)->i_mmap_sem);
+ ret = ext4_update_disksize_before_punch(inode, offset, len);
+ if (ret) {
+ up_write(&EXT4_I(inode)->i_mmap_sem);
+ goto out_dio;
+ }
+ /* Now release the pages and zero block aligned part of pages */
truncate_pagecache_range(inode, start, end - 1);
inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
- /* Wait all existing dio workers, newcomers will block on i_mutex */
- ext4_inode_block_unlocked_dio(inode);
- inode_dio_wait(inode);
-
ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size,
flags, mode);
+ up_write(&EXT4_I(inode)->i_mmap_sem);
if (ret)
goto out_dio;
}
@@ -4998,8 +4991,13 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
goto out;
}
+ /* Wait all existing dio workers, newcomers will block on i_mutex */
+ ext4_inode_block_unlocked_dio(inode);
+ inode_dio_wait(inode);
+
ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size,
flags, mode);
+ ext4_inode_resume_unlocked_dio(inode);
if (ret)
goto out;
@@ -5494,21 +5492,7 @@ int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
return ret;
}
- /*
- * Need to round down offset to be aligned with page size boundary
- * for page size > block size.
- */
- ioffset = round_down(offset, PAGE_SIZE);
-
- /* Write out all dirty pages */
- ret = filemap_write_and_wait_range(inode->i_mapping, ioffset,
- LLONG_MAX);
- if (ret)
- return ret;
-
- /* Take mutex lock */
mutex_lock(&inode->i_mutex);
-
/*
* There is no need to overlap collapse range with EOF, in which case
* it is effectively a truncate operation
@@ -5524,17 +5508,43 @@ int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
goto out_mutex;
}
- truncate_pagecache(inode, ioffset);
-
/* Wait for existing dio to complete */
ext4_inode_block_unlocked_dio(inode);
inode_dio_wait(inode);
+ /*
+ * Prevent page faults from reinstantiating pages we have released from
+ * page cache.
+ */
+ down_write(&EXT4_I(inode)->i_mmap_sem);
+ /*
+ * Need to round down offset to be aligned with page size boundary
+ * for page size > block size.
+ */
+ ioffset = round_down(offset, PAGE_SIZE);
+ /*
+ * Write tail of the last page before removed range since it will get
+ * removed from the page cache below.
+ */
+ ret = filemap_write_and_wait_range(inode->i_mapping, ioffset, offset);
+ if (ret)
+ goto out_mmap;
+ /*
+ * Write data that will be shifted to preserve them when discarding
+ * page cache below. We are also protected from pages becoming dirty
+ * by i_mmap_sem.
+ */
+ ret = filemap_write_and_wait_range(inode->i_mapping, offset + len,
+ LLONG_MAX);
+ if (ret)
+ goto out_mmap;
+ truncate_pagecache(inode, ioffset);
+
credits = ext4_writepage_trans_blocks(inode);
handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
- goto out_dio;
+ goto out_mmap;
}
down_write(&EXT4_I(inode)->i_data_sem);
@@ -5573,7 +5583,8 @@ int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
out_stop:
ext4_journal_stop(handle);
-out_dio:
+out_mmap:
+ up_write(&EXT4_I(inode)->i_mmap_sem);
ext4_inode_resume_unlocked_dio(inode);
out_mutex:
mutex_unlock(&inode->i_mutex);
@@ -5627,21 +5638,7 @@ int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len)
return ret;
}
- /*
- * Need to round down to align start offset to page size boundary
- * for page size > block size.
- */
- ioffset = round_down(offset, PAGE_SIZE);
-
- /* Write out all dirty pages */
- ret = filemap_write_and_wait_range(inode->i_mapping, ioffset,
- LLONG_MAX);
- if (ret)
- return ret;
-
- /* Take mutex lock */
mutex_lock(&inode->i_mutex);
-
/* Currently just for extent based files */
if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
ret = -EOPNOTSUPP;
@@ -5660,17 +5657,32 @@ int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len)
goto out_mutex;
}
- truncate_pagecache(inode, ioffset);
-
/* Wait for existing dio to complete */
ext4_inode_block_unlocked_dio(inode);
inode_dio_wait(inode);
+ /*
+ * Prevent page faults from reinstantiating pages we have released from
+ * page cache.
+ */
+ down_write(&EXT4_I(inode)->i_mmap_sem);
+ /*
+ * Need to round down to align start offset to page size boundary
+ * for page size > block size.
+ */
+ ioffset = round_down(offset, PAGE_SIZE);
+ /* Write out all dirty pages */
+ ret = filemap_write_and_wait_range(inode->i_mapping, ioffset,
+ LLONG_MAX);
+ if (ret)
+ goto out_mmap;
+ truncate_pagecache(inode, ioffset);
+
credits = ext4_writepage_trans_blocks(inode);
handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
- goto out_dio;
+ goto out_mmap;
}
/* Expand file to avoid data loss if there is error while shifting */
@@ -5741,7 +5753,8 @@ int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len)
out_stop:
ext4_journal_stop(handle);
-out_dio:
+out_mmap:
+ up_write(&EXT4_I(inode)->i_mmap_sem);
ext4_inode_resume_unlocked_dio(inode);
out_mutex:
mutex_unlock(&inode->i_mutex);
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 113837e7ba98..0d24ebcd7c9e 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -209,15 +209,18 @@ static int ext4_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
int result;
handle_t *handle = NULL;
- struct super_block *sb = file_inode(vma->vm_file)->i_sb;
+ struct inode *inode = file_inode(vma->vm_file);
+ struct super_block *sb = inode->i_sb;
bool write = vmf->flags & FAULT_FLAG_WRITE;
if (write) {
sb_start_pagefault(sb);
file_update_time(vma->vm_file);
+ down_read(&EXT4_I(inode)->i_mmap_sem);
handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE,
EXT4_DATA_TRANS_BLOCKS(sb));
- }
+ } else
+ down_read(&EXT4_I(inode)->i_mmap_sem);
if (IS_ERR(handle))
result = VM_FAULT_SIGBUS;
@@ -228,8 +231,10 @@ static int ext4_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
if (write) {
if (!IS_ERR(handle))
ext4_journal_stop(handle);
+ up_read(&EXT4_I(inode)->i_mmap_sem);
sb_end_pagefault(sb);
- }
+ } else
+ up_read(&EXT4_I(inode)->i_mmap_sem);
return result;
}
@@ -246,10 +251,12 @@ static int ext4_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
if (write) {
sb_start_pagefault(sb);
file_update_time(vma->vm_file);
+ down_read(&EXT4_I(inode)->i_mmap_sem);
handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE,
ext4_chunk_trans_blocks(inode,
PMD_SIZE / PAGE_SIZE));
- }
+ } else
+ down_read(&EXT4_I(inode)->i_mmap_sem);
if (IS_ERR(handle))
result = VM_FAULT_SIGBUS;
@@ -260,30 +267,71 @@ static int ext4_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
if (write) {
if (!IS_ERR(handle))
ext4_journal_stop(handle);
+ up_read(&EXT4_I(inode)->i_mmap_sem);
sb_end_pagefault(sb);
- }
+ } else
+ up_read(&EXT4_I(inode)->i_mmap_sem);
return result;
}
static int ext4_dax_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
{
- return dax_mkwrite(vma, vmf, ext4_get_block_dax,
- ext4_end_io_unwritten);
+ int err;
+ struct inode *inode = file_inode(vma->vm_file);
+
+ sb_start_pagefault(inode->i_sb);
+ file_update_time(vma->vm_file);
+ down_read(&EXT4_I(inode)->i_mmap_sem);
+ err = __dax_mkwrite(vma, vmf, ext4_get_block_dax,
+ ext4_end_io_unwritten);
+ up_read(&EXT4_I(inode)->i_mmap_sem);
+ sb_end_pagefault(inode->i_sb);
+
+ return err;
+}
+
+/*
+ * Handle write fault for VM_MIXEDMAP mappings. Similarly to ext4_dax_mkwrite()
+ * handler we check for races agaist truncate. Note that since we cycle through
+ * i_mmap_sem, we are sure that also any hole punching that began before we
+ * were called is finished by now and so if it included part of the file we
+ * are working on, our pte will get unmapped and the check for pte_same() in
+ * wp_pfn_shared() fails. Thus fault gets retried and things work out as
+ * desired.
+ */
+static int ext4_dax_pfn_mkwrite(struct vm_area_struct *vma,
+ struct vm_fault *vmf)
+{
+ struct inode *inode = file_inode(vma->vm_file);
+ struct super_block *sb = inode->i_sb;
+ int ret = VM_FAULT_NOPAGE;
+ loff_t size;
+
+ sb_start_pagefault(sb);
+ file_update_time(vma->vm_file);
+ down_read(&EXT4_I(inode)->i_mmap_sem);
+ size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ if (vmf->pgoff >= size)
+ ret = VM_FAULT_SIGBUS;
+ up_read(&EXT4_I(inode)->i_mmap_sem);
+ sb_end_pagefault(sb);
+
+ return ret;
}
static const struct vm_operations_struct ext4_dax_vm_ops = {
.fault = ext4_dax_fault,
.pmd_fault = ext4_dax_pmd_fault,
.page_mkwrite = ext4_dax_mkwrite,
- .pfn_mkwrite = dax_pfn_mkwrite,
+ .pfn_mkwrite = ext4_dax_pfn_mkwrite,
};
#else
#define ext4_dax_vm_ops ext4_file_vm_ops
#endif
static const struct vm_operations_struct ext4_file_vm_ops = {
- .fault = filemap_fault,
+ .fault = ext4_filemap_fault,
.map_pages = filemap_map_pages,
.page_mkwrite = ext4_page_mkwrite,
};
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 933f1866b811..27d284042d1a 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -3590,6 +3590,35 @@ int ext4_can_truncate(struct inode *inode)
}
/*
+ * We have to make sure i_disksize gets properly updated before we truncate
+ * page cache due to hole punching or zero range. Otherwise i_disksize update
+ * can get lost as it may have been postponed to submission of writeback but
+ * that will never happen after we truncate page cache.
+ */
+int ext4_update_disksize_before_punch(struct inode *inode, loff_t offset,
+ loff_t len)
+{
+ handle_t *handle;
+ loff_t size = i_size_read(inode);
+
+ WARN_ON(!mutex_is_locked(&inode->i_mutex));
+ if (offset > size || offset + len < size)
+ return 0;
+
+ if (EXT4_I(inode)->i_disksize >= size)
+ return 0;
+
+ handle = ext4_journal_start(inode, EXT4_HT_MISC, 1);
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ ext4_update_i_disksize(inode, size);
+ ext4_mark_inode_dirty(handle, inode);
+ ext4_journal_stop(handle);
+
+ return 0;
+}
+
+/*
* ext4_punch_hole: punches a hole in a file by releaseing the blocks
* associated with the given offset and length
*
@@ -3655,17 +3684,26 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
}
+ /* Wait all existing dio workers, newcomers will block on i_mutex */
+ ext4_inode_block_unlocked_dio(inode);
+ inode_dio_wait(inode);
+
+ /*
+ * Prevent page faults from reinstantiating pages we have released from
+ * page cache.
+ */
+ down_write(&EXT4_I(inode)->i_mmap_sem);
first_block_offset = round_up(offset, sb->s_blocksize);
last_block_offset = round_down((offset + length), sb->s_blocksize) - 1;
/* Now release the pages and zero block aligned part of pages*/
- if (last_block_offset > first_block_offset)
+ if (last_block_offset > first_block_offset) {
+ ret = ext4_update_disksize_before_punch(inode, offset, length);
+ if (ret)
+ goto out_dio;
truncate_pagecache_range(inode, first_block_offset,
last_block_offset);
-
- /* Wait all existing dio workers, newcomers will block on i_mutex */
- ext4_inode_block_unlocked_dio(inode);
- inode_dio_wait(inode);
+ }
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
credits = ext4_writepage_trans_blocks(inode);
@@ -3712,16 +3750,12 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
if (IS_SYNC(inode))
ext4_handle_sync(handle);
- /* Now release the pages again to reduce race window */
- if (last_block_offset > first_block_offset)
- truncate_pagecache_range(inode, first_block_offset,
- last_block_offset);
-
inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
ext4_mark_inode_dirty(handle, inode);
out_stop:
ext4_journal_stop(handle);
out_dio:
+ up_write(&EXT4_I(inode)->i_mmap_sem);
ext4_inode_resume_unlocked_dio(inode);
out_mutex:
mutex_unlock(&inode->i_mutex);
@@ -4861,6 +4895,7 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
} else
ext4_wait_for_tail_page_commit(inode);
}
+ down_write(&EXT4_I(inode)->i_mmap_sem);
/*
* Truncate pagecache after we've waited for commit
* in data=journal mode to make pages freeable.
@@ -4868,6 +4903,7 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
truncate_pagecache(inode, inode->i_size);
if (shrink)
ext4_truncate(inode);
+ up_write(&EXT4_I(inode)->i_mmap_sem);
}
if (!rc) {
@@ -5119,6 +5155,8 @@ int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
might_sleep();
trace_ext4_mark_inode_dirty(inode, _RET_IP_);
err = ext4_reserve_inode_write(handle, inode, &iloc);
+ if (err)
+ return err;
if (ext4_handle_valid(handle) &&
EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize &&
!ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND)) {
@@ -5149,9 +5187,7 @@ int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
}
}
}
- if (!err)
- err = ext4_mark_iloc_dirty(handle, inode, &iloc);
- return err;
+ return ext4_mark_iloc_dirty(handle, inode, &iloc);
}
/*
@@ -5316,6 +5352,8 @@ int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
sb_start_pagefault(inode->i_sb);
file_update_time(vma->vm_file);
+
+ down_read(&EXT4_I(inode)->i_mmap_sem);
/* Delalloc case is easy... */
if (test_opt(inode->i_sb, DELALLOC) &&
!ext4_should_journal_data(inode) &&
@@ -5385,6 +5423,19 @@ retry_alloc:
out_ret:
ret = block_page_mkwrite_return(ret);
out:
+ up_read(&EXT4_I(inode)->i_mmap_sem);
sb_end_pagefault(inode->i_sb);
return ret;
}
+
+int ext4_filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct inode *inode = file_inode(vma->vm_file);
+ int err;
+
+ down_read(&EXT4_I(inode)->i_mmap_sem);
+ err = filemap_fault(vma, vmf);
+ up_read(&EXT4_I(inode)->i_mmap_sem);
+
+ return err;
+}
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index ba1cf0bf2f81..852c26806af2 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -958,6 +958,7 @@ static void init_once(void *foo)
INIT_LIST_HEAD(&ei->i_orphan);
init_rwsem(&ei->xattr_sem);
init_rwsem(&ei->i_data_sem);
+ init_rwsem(&ei->i_mmap_sem);
inode_init_once(&ei->vfs_inode);
}
diff --git a/fs/ext4/truncate.h b/fs/ext4/truncate.h
index 011ba6670d99..c70d06a383e2 100644
--- a/fs/ext4/truncate.h
+++ b/fs/ext4/truncate.h
@@ -10,8 +10,10 @@
*/
static inline void ext4_truncate_failed_write(struct inode *inode)
{
+ down_write(&EXT4_I(inode)->i_mmap_sem);
truncate_inode_pages(inode->i_mapping, inode->i_size);
ext4_truncate(inode);
+ up_write(&EXT4_I(inode)->i_mmap_sem);
}
/*
diff --git a/fs/isofs/rock.c b/fs/isofs/rock.c
index 735d7522a3a9..204659a5f6db 100644
--- a/fs/isofs/rock.c
+++ b/fs/isofs/rock.c
@@ -203,6 +203,8 @@ int get_rock_ridge_filename(struct iso_directory_record *de,
int retnamlen = 0;
int truncate = 0;
int ret = 0;
+ char *p;
+ int len;
if (!ISOFS_SB(inode->i_sb)->s_rock)
return 0;
@@ -267,12 +269,17 @@ repeat:
rr->u.NM.flags);
break;
}
- if ((strlen(retname) + rr->len - 5) >= 254) {
+ len = rr->len - 5;
+ if (retnamlen + len >= 254) {
truncate = 1;
break;
}
- strncat(retname, rr->u.NM.name, rr->len - 5);
- retnamlen += rr->len - 5;
+ p = memchr(rr->u.NM.name, '\0', len);
+ if (unlikely(p))
+ len = p - rr->u.NM.name;
+ memcpy(retname + retnamlen, rr->u.NM.name, len);
+ retnamlen += len;
+ retname[retnamlen] = '\0';
break;
case SIG('R', 'E'):
kfree(rs.buffer);
diff --git a/fs/namei.c b/fs/namei.c
index 558ea922a515..441033da002b 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -2912,22 +2912,10 @@ no_open:
dentry = lookup_real(dir, dentry, nd->flags);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
-
- if (create_error) {
- int open_flag = op->open_flag;
-
- error = create_error;
- if ((open_flag & O_EXCL)) {
- if (!dentry->d_inode)
- goto out;
- } else if (!dentry->d_inode) {
- goto out;
- } else if ((open_flag & O_TRUNC) &&
- d_is_reg(dentry)) {
- goto out;
- }
- /* will fail later, go on to get the right error */
- }
+ }
+ if (create_error && !dentry->d_inode) {
+ error = create_error;
+ goto out;
}
looked_up:
path->dentry = dentry;
@@ -4209,7 +4197,11 @@ int vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
bool new_is_dir = false;
unsigned max_links = new_dir->i_sb->s_max_links;
- if (source == target)
+ /*
+ * Check source == target.
+ * On overlayfs need to look at underlying inodes.
+ */
+ if (vfs_select_inode(old_dentry, 0) == vfs_select_inode(new_dentry, 0))
return 0;
error = may_delete(old_dir, old_dentry, is_dir);
diff --git a/fs/ocfs2/acl.c b/fs/ocfs2/acl.c
index 0cdf497c91ef..2162434728c0 100644
--- a/fs/ocfs2/acl.c
+++ b/fs/ocfs2/acl.c
@@ -322,3 +322,90 @@ struct posix_acl *ocfs2_iop_get_acl(struct inode *inode, int type)
brelse(di_bh);
return acl;
}
+
+int ocfs2_acl_chmod(struct inode *inode, struct buffer_head *bh)
+{
+ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+ struct posix_acl *acl;
+ int ret;
+
+ if (S_ISLNK(inode->i_mode))
+ return -EOPNOTSUPP;
+
+ if (!(osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL))
+ return 0;
+
+ acl = ocfs2_get_acl_nolock(inode, ACL_TYPE_ACCESS, bh);
+ if (IS_ERR(acl) || !acl)
+ return PTR_ERR(acl);
+ ret = __posix_acl_chmod(&acl, GFP_KERNEL, inode->i_mode);
+ if (ret)
+ return ret;
+ ret = ocfs2_set_acl(NULL, inode, NULL, ACL_TYPE_ACCESS,
+ acl, NULL, NULL);
+ posix_acl_release(acl);
+ return ret;
+}
+
+/*
+ * Initialize the ACLs of a new inode. If parent directory has default ACL,
+ * then clone to new inode. Called from ocfs2_mknod.
+ */
+int ocfs2_init_acl(handle_t *handle,
+ struct inode *inode,
+ struct inode *dir,
+ struct buffer_head *di_bh,
+ struct buffer_head *dir_bh,
+ struct ocfs2_alloc_context *meta_ac,
+ struct ocfs2_alloc_context *data_ac)
+{
+ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+ struct posix_acl *acl = NULL;
+ int ret = 0, ret2;
+ umode_t mode;
+
+ if (!S_ISLNK(inode->i_mode)) {
+ if (osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) {
+ acl = ocfs2_get_acl_nolock(dir, ACL_TYPE_DEFAULT,
+ dir_bh);
+ if (IS_ERR(acl))
+ return PTR_ERR(acl);
+ }
+ if (!acl) {
+ mode = inode->i_mode & ~current_umask();
+ ret = ocfs2_acl_set_mode(inode, di_bh, handle, mode);
+ if (ret) {
+ mlog_errno(ret);
+ goto cleanup;
+ }
+ }
+ }
+ if ((osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) && acl) {
+ if (S_ISDIR(inode->i_mode)) {
+ ret = ocfs2_set_acl(handle, inode, di_bh,
+ ACL_TYPE_DEFAULT, acl,
+ meta_ac, data_ac);
+ if (ret)
+ goto cleanup;
+ }
+ mode = inode->i_mode;
+ ret = __posix_acl_create(&acl, GFP_NOFS, &mode);
+ if (ret < 0)
+ return ret;
+
+ ret2 = ocfs2_acl_set_mode(inode, di_bh, handle, mode);
+ if (ret2) {
+ mlog_errno(ret2);
+ ret = ret2;
+ goto cleanup;
+ }
+ if (ret > 0) {
+ ret = ocfs2_set_acl(handle, inode,
+ di_bh, ACL_TYPE_ACCESS,
+ acl, meta_ac, data_ac);
+ }
+ }
+cleanup:
+ posix_acl_release(acl);
+ return ret;
+}
diff --git a/fs/ocfs2/acl.h b/fs/ocfs2/acl.h
index 3fce68d08625..2783a75b3999 100644
--- a/fs/ocfs2/acl.h
+++ b/fs/ocfs2/acl.h
@@ -35,5 +35,10 @@ int ocfs2_set_acl(handle_t *handle,
struct posix_acl *acl,
struct ocfs2_alloc_context *meta_ac,
struct ocfs2_alloc_context *data_ac);
+extern int ocfs2_acl_chmod(struct inode *, struct buffer_head *);
+extern int ocfs2_init_acl(handle_t *, struct inode *, struct inode *,
+ struct buffer_head *, struct buffer_head *,
+ struct ocfs2_alloc_context *,
+ struct ocfs2_alloc_context *);
#endif /* OCFS2_ACL_H */
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 0e5b4515f92e..77d30cbd944d 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -1268,20 +1268,20 @@ bail_unlock_rw:
if (size_change)
ocfs2_rw_unlock(inode, 1);
bail:
- brelse(bh);
/* Release quota pointers in case we acquired them */
for (qtype = 0; qtype < OCFS2_MAXQUOTAS; qtype++)
dqput(transfer_to[qtype]);
if (!status && attr->ia_valid & ATTR_MODE) {
- status = posix_acl_chmod(inode, inode->i_mode);
+ status = ocfs2_acl_chmod(inode, bh);
if (status < 0)
mlog_errno(status);
}
if (inode_locked)
ocfs2_inode_unlock(inode, 1);
+ brelse(bh);
return status;
}
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
index 3123408da935..62af9554541d 100644
--- a/fs/ocfs2/namei.c
+++ b/fs/ocfs2/namei.c
@@ -259,7 +259,6 @@ static int ocfs2_mknod(struct inode *dir,
struct ocfs2_dir_lookup_result lookup = { NULL, };
sigset_t oldset;
int did_block_signals = 0;
- struct posix_acl *default_acl = NULL, *acl = NULL;
struct ocfs2_dentry_lock *dl = NULL;
trace_ocfs2_mknod(dir, dentry, dentry->d_name.len, dentry->d_name.name,
@@ -367,12 +366,6 @@ static int ocfs2_mknod(struct inode *dir,
goto leave;
}
- status = posix_acl_create(dir, &inode->i_mode, &default_acl, &acl);
- if (status) {
- mlog_errno(status);
- goto leave;
- }
-
handle = ocfs2_start_trans(osb, ocfs2_mknod_credits(osb->sb,
S_ISDIR(mode),
xattr_credits));
@@ -421,16 +414,8 @@ static int ocfs2_mknod(struct inode *dir,
inc_nlink(dir);
}
- if (default_acl) {
- status = ocfs2_set_acl(handle, inode, new_fe_bh,
- ACL_TYPE_DEFAULT, default_acl,
- meta_ac, data_ac);
- }
- if (!status && acl) {
- status = ocfs2_set_acl(handle, inode, new_fe_bh,
- ACL_TYPE_ACCESS, acl,
- meta_ac, data_ac);
- }
+ status = ocfs2_init_acl(handle, inode, dir, new_fe_bh, parent_fe_bh,
+ meta_ac, data_ac);
if (status < 0) {
mlog_errno(status);
@@ -472,10 +457,6 @@ static int ocfs2_mknod(struct inode *dir,
d_instantiate(dentry, inode);
status = 0;
leave:
- if (default_acl)
- posix_acl_release(default_acl);
- if (acl)
- posix_acl_release(acl);
if (status < 0 && did_quota_inode)
dquot_free_inode(inode);
if (handle)
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
index 252119860e6c..6a0c55d7dff0 100644
--- a/fs/ocfs2/refcounttree.c
+++ b/fs/ocfs2/refcounttree.c
@@ -4248,20 +4248,12 @@ static int ocfs2_reflink(struct dentry *old_dentry, struct inode *dir,
struct inode *inode = d_inode(old_dentry);
struct buffer_head *old_bh = NULL;
struct inode *new_orphan_inode = NULL;
- struct posix_acl *default_acl, *acl;
- umode_t mode;
if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb)))
return -EOPNOTSUPP;
- mode = inode->i_mode;
- error = posix_acl_create(dir, &mode, &default_acl, &acl);
- if (error) {
- mlog_errno(error);
- return error;
- }
- error = ocfs2_create_inode_in_orphan(dir, mode,
+ error = ocfs2_create_inode_in_orphan(dir, inode->i_mode,
&new_orphan_inode);
if (error) {
mlog_errno(error);
@@ -4300,16 +4292,11 @@ static int ocfs2_reflink(struct dentry *old_dentry, struct inode *dir,
/* If the security isn't preserved, we need to re-initialize them. */
if (!preserve) {
error = ocfs2_init_security_and_acl(dir, new_orphan_inode,
- &new_dentry->d_name,
- default_acl, acl);
+ &new_dentry->d_name);
if (error)
mlog_errno(error);
}
out:
- if (default_acl)
- posix_acl_release(default_acl);
- if (acl)
- posix_acl_release(acl);
if (!error) {
error = ocfs2_mv_orphaned_inode_to_new(dir, new_orphan_inode,
new_dentry);
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
index e9164f09841b..877830b05e12 100644
--- a/fs/ocfs2/xattr.c
+++ b/fs/ocfs2/xattr.c
@@ -7197,12 +7197,10 @@ out:
*/
int ocfs2_init_security_and_acl(struct inode *dir,
struct inode *inode,
- const struct qstr *qstr,
- struct posix_acl *default_acl,
- struct posix_acl *acl)
+ const struct qstr *qstr)
{
- struct buffer_head *dir_bh = NULL;
int ret = 0;
+ struct buffer_head *dir_bh = NULL;
ret = ocfs2_init_security_get(inode, dir, qstr, NULL);
if (ret) {
@@ -7215,11 +7213,9 @@ int ocfs2_init_security_and_acl(struct inode *dir,
mlog_errno(ret);
goto leave;
}
-
- if (!ret && default_acl)
- ret = ocfs2_iop_set_acl(inode, default_acl, ACL_TYPE_DEFAULT);
- if (!ret && acl)
- ret = ocfs2_iop_set_acl(inode, acl, ACL_TYPE_ACCESS);
+ ret = ocfs2_init_acl(NULL, inode, dir, NULL, dir_bh, NULL, NULL);
+ if (ret)
+ mlog_errno(ret);
ocfs2_inode_unlock(dir, 0);
brelse(dir_bh);
diff --git a/fs/ocfs2/xattr.h b/fs/ocfs2/xattr.h
index f10d5b93c366..1633cc15ea1f 100644
--- a/fs/ocfs2/xattr.h
+++ b/fs/ocfs2/xattr.h
@@ -94,7 +94,5 @@ int ocfs2_reflink_xattrs(struct inode *old_inode,
bool preserve_security);
int ocfs2_init_security_and_acl(struct inode *dir,
struct inode *inode,
- const struct qstr *qstr,
- struct posix_acl *default_acl,
- struct posix_acl *acl);
+ const struct qstr *qstr);
#endif /* OCFS2_XATTR_H */
diff --git a/fs/open.c b/fs/open.c
index 6a24f988d253..157b9940dd73 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -840,16 +840,12 @@ EXPORT_SYMBOL(file_path);
int vfs_open(const struct path *path, struct file *file,
const struct cred *cred)
{
- struct dentry *dentry = path->dentry;
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = vfs_select_inode(path->dentry, file->f_flags);
- file->f_path = *path;
- if (dentry->d_flags & DCACHE_OP_SELECT_INODE) {
- inode = dentry->d_op->d_select_inode(dentry, file->f_flags);
- if (IS_ERR(inode))
- return PTR_ERR(inode);
- }
+ if (IS_ERR(inode))
+ return PTR_ERR(inode);
+ file->f_path = *path;
return do_dentry_open(file, inode, NULL, cred);
}
diff --git a/fs/pnode.c b/fs/pnode.c
index 6367e1e435c6..99899705b105 100644
--- a/fs/pnode.c
+++ b/fs/pnode.c
@@ -198,10 +198,15 @@ static struct mount *next_group(struct mount *m, struct mount *origin)
/* all accesses are serialized by namespace_sem */
static struct user_namespace *user_ns;
-static struct mount *last_dest, *last_source, *dest_master;
+static struct mount *last_dest, *first_source, *last_source, *dest_master;
static struct mountpoint *mp;
static struct hlist_head *list;
+static inline bool peers(struct mount *m1, struct mount *m2)
+{
+ return m1->mnt_group_id == m2->mnt_group_id && m1->mnt_group_id;
+}
+
static int propagate_one(struct mount *m)
{
struct mount *child;
@@ -212,24 +217,26 @@ static int propagate_one(struct mount *m)
/* skip if mountpoint isn't covered by it */
if (!is_subdir(mp->m_dentry, m->mnt.mnt_root))
return 0;
- if (m->mnt_group_id == last_dest->mnt_group_id) {
+ if (peers(m, last_dest)) {
type = CL_MAKE_SHARED;
} else {
struct mount *n, *p;
+ bool done;
for (n = m; ; n = p) {
p = n->mnt_master;
- if (p == dest_master || IS_MNT_MARKED(p)) {
- while (last_dest->mnt_master != p) {
- last_source = last_source->mnt_master;
- last_dest = last_source->mnt_parent;
- }
- if (n->mnt_group_id != last_dest->mnt_group_id) {
- last_source = last_source->mnt_master;
- last_dest = last_source->mnt_parent;
- }
+ if (p == dest_master || IS_MNT_MARKED(p))
break;
- }
}
+ do {
+ struct mount *parent = last_source->mnt_parent;
+ if (last_source == first_source)
+ break;
+ done = parent->mnt_master == p;
+ if (done && peers(n, parent))
+ break;
+ last_source = last_source->mnt_master;
+ } while (!done);
+
type = CL_SLAVE;
/* beginning of peer group among the slaves? */
if (IS_MNT_SHARED(m))
@@ -281,6 +288,7 @@ int propagate_mnt(struct mount *dest_mnt, struct mountpoint *dest_mp,
*/
user_ns = current->nsproxy->mnt_ns->user_ns;
last_dest = dest_mnt;
+ first_source = source_mnt;
last_source = source_mnt;
mp = dest_mp;
list = tree_list;
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 98b4b03dfe31..de2dcc1d1167 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -954,7 +954,8 @@ static ssize_t environ_read(struct file *file, char __user *buf,
int ret = 0;
struct mm_struct *mm = file->private_data;
- if (!mm)
+ /* Ensure the process spawned far enough to have an environment. */
+ if (!mm || !mm->env_end)
return 0;
page = (char *)__get_free_page(GFP_TEMPORARY);
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index ee79fadfc6e7..be3003ef2b4e 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -1731,6 +1731,32 @@ static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma,
return page;
}
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+static struct page *can_gather_numa_stats_pmd(pmd_t pmd,
+ struct vm_area_struct *vma,
+ unsigned long addr)
+{
+ struct page *page;
+ int nid;
+
+ if (!pmd_present(pmd))
+ return NULL;
+
+ page = vm_normal_page_pmd(vma, addr, pmd);
+ if (!page)
+ return NULL;
+
+ if (PageReserved(page))
+ return NULL;
+
+ nid = page_to_nid(page);
+ if (!node_isset(nid, node_states[N_MEMORY]))
+ return NULL;
+
+ return page;
+}
+#endif
+
static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
unsigned long end, struct mm_walk *walk)
{
@@ -1740,13 +1766,13 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
pte_t *orig_pte;
pte_t *pte;
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
- pte_t huge_pte = *(pte_t *)pmd;
struct page *page;
- page = can_gather_numa_stats(huge_pte, vma, addr);
+ page = can_gather_numa_stats_pmd(*pmd, vma, addr);
if (page)
- gather_stats(page, md, pte_dirty(huge_pte),
+ gather_stats(page, md, pmd_dirty(*pmd),
HPAGE_PMD_SIZE/PAGE_SIZE);
spin_unlock(ptl);
return 0;
@@ -1754,6 +1780,7 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
if (pmd_trans_unstable(pmd))
return 0;
+#endif
orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
do {
struct page *page = can_gather_numa_stats(*pte, vma, addr);
diff --git a/include/asm-generic/fixmap.h b/include/asm-generic/fixmap.h
index 1cbb8338edf3..827e4d3bbc7a 100644
--- a/include/asm-generic/fixmap.h
+++ b/include/asm-generic/fixmap.h
@@ -70,12 +70,12 @@ static inline unsigned long virt_to_fix(const unsigned long vaddr)
#endif
/* Return a pointer with offset calculated */
-#define __set_fixmap_offset(idx, phys, flags) \
-({ \
- unsigned long addr; \
- __set_fixmap(idx, phys, flags); \
- addr = fix_to_virt(idx) + ((phys) & (PAGE_SIZE - 1)); \
- addr; \
+#define __set_fixmap_offset(idx, phys, flags) \
+({ \
+ unsigned long ________addr; \
+ __set_fixmap(idx, phys, flags); \
+ ________addr = fix_to_virt(idx) + ((phys) & (PAGE_SIZE - 1)); \
+ ________addr; \
})
#define set_fixmap_offset(idx, phys) \
diff --git a/include/asm-generic/futex.h b/include/asm-generic/futex.h
index e56272c919b5..bf2d34c9d804 100644
--- a/include/asm-generic/futex.h
+++ b/include/asm-generic/futex.h
@@ -108,11 +108,15 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
u32 val;
preempt_disable();
- if (unlikely(get_user(val, uaddr) != 0))
+ if (unlikely(get_user(val, uaddr) != 0)) {
+ preempt_enable();
return -EFAULT;
+ }
- if (val == oldval && unlikely(put_user(newval, uaddr) != 0))
+ if (val == oldval && unlikely(put_user(newval, uaddr) != 0)) {
+ preempt_enable();
return -EFAULT;
+ }
*uval = val;
preempt_enable();
diff --git a/include/drm/drm_cache.h b/include/drm/drm_cache.h
index 461a0558bca4..cebecff536a3 100644
--- a/include/drm/drm_cache.h
+++ b/include/drm/drm_cache.h
@@ -39,6 +39,8 @@ static inline bool drm_arch_can_wc_memory(void)
{
#if defined(CONFIG_PPC) && !defined(CONFIG_NOT_COHERENT_CACHE)
return false;
+#elif defined(CONFIG_MIPS) && defined(CONFIG_CPU_LOONGSON3)
+ return false;
#else
return true;
#endif
diff --git a/include/dt-bindings/clock/msm-clocks-cobalt.h b/include/dt-bindings/clock/msm-clocks-cobalt.h
index 251b7e314238..4bacef303967 100644
--- a/include/dt-bindings/clock/msm-clocks-cobalt.h
+++ b/include/dt-bindings/clock/msm-clocks-cobalt.h
@@ -198,9 +198,7 @@
#define clk_gcc_gp1_clk 0x057f7b69
#define clk_gcc_gp2_clk 0x9bf83ffd
#define clk_gcc_gp3_clk 0xec6539ee
-#define clk_gcc_gpu_snoc_dvm_gfx_clk 0xc9147451
#define clk_gcc_gpu_bimc_gfx_clk 0x3909459b
-#define clk_gcc_gpu_bimc_gfx_src_clk 0x377cb748
#define clk_gcc_gpu_cfg_ahb_clk 0x72f20a57
#define clk_gcc_gpu_iref_clk 0xfd82abad
#define clk_gcc_hmss_ahb_clk 0x62818713
diff --git a/include/dt-bindings/clock/msm-clocks-hwio-cobalt.h b/include/dt-bindings/clock/msm-clocks-hwio-cobalt.h
index 81d75bc9a8d6..f10afffc74b2 100644
--- a/include/dt-bindings/clock/msm-clocks-hwio-cobalt.h
+++ b/include/dt-bindings/clock/msm-clocks-hwio-cobalt.h
@@ -167,9 +167,7 @@
#define GCC_GP2_CBCR 0x65000
#define GCC_GP3_CBCR 0x66000
#define GCC_GPU_BIMC_GFX_CBCR 0x71010
-#define GCC_GPU_BIMC_GFX_SRC_CBCR 0x7100C
#define GCC_GPU_CFG_AHB_CBCR 0x71004
-#define GCC_GPU_SNOC_DVM_GFX_CBCR 0x71018
#define GCC_GPU_IREF_EN 0x88010
#define GCC_HMSS_AHB_CBCR 0x48000
#define GCC_HMSS_DVM_BUS_CBCR 0x4808C
diff --git a/include/linux/bluetooth-power.h b/include/linux/bluetooth-power.h
index 7be94d298b88..a822ba8c07d1 100644
--- a/include/linux/bluetooth-power.h
+++ b/include/linux/bluetooth-power.h
@@ -85,4 +85,5 @@ struct bluetooth_power_platform_data {
int bt_register_slimdev(struct device *dev);
#define BT_CMD_SLIM_TEST 0xbfac
+#define BT_CMD_PWR_CTRL 0xbfad
#endif /* __LINUX_BLUETOOTH_POWER_H */
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 83d1926c61e4..67bc2da5d233 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -165,12 +165,13 @@ void bpf_register_prog_type(struct bpf_prog_type_list *tl);
void bpf_register_map_type(struct bpf_map_type_list *tl);
struct bpf_prog *bpf_prog_get(u32 ufd);
+struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog);
void bpf_prog_put(struct bpf_prog *prog);
void bpf_prog_put_rcu(struct bpf_prog *prog);
struct bpf_map *bpf_map_get_with_uref(u32 ufd);
struct bpf_map *__bpf_map_get(struct fd f);
-void bpf_map_inc(struct bpf_map *map, bool uref);
+struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref);
void bpf_map_put_with_uref(struct bpf_map *map);
void bpf_map_put(struct bpf_map *map);
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index e63d3a513e67..788c7c49a673 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -435,6 +435,7 @@ struct cgroup_subsys {
int (*can_attach)(struct cgroup_taskset *tset);
void (*cancel_attach)(struct cgroup_taskset *tset);
void (*attach)(struct cgroup_taskset *tset);
+ void (*post_attach)(void);
int (*can_fork)(struct task_struct *task, void **priv_p);
void (*cancel_fork)(struct task_struct *task, void *priv);
void (*fork)(struct task_struct *task, void *priv);
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index 0de2d0c780d7..9a4a80ae5eaf 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -387,6 +387,7 @@ struct clk_divider {
#define CLK_DIVIDER_MAX_AT_ZERO BIT(6)
extern const struct clk_ops clk_divider_ops;
+extern const struct clk_ops clk_divider_ro_ops;
unsigned long divider_recalc_rate(struct clk_hw *hw, unsigned long parent_rate,
unsigned int val, const struct clk_div_table *table,
diff --git a/include/linux/clk/msm-clk.h b/include/linux/clk/msm-clk.h
index 22587e8852e2..964909d25021 100644
--- a/include/linux/clk/msm-clk.h
+++ b/include/linux/clk/msm-clk.h
@@ -14,6 +14,16 @@
#include <linux/notifier.h>
+#if defined(CONFIG_COMMON_CLK_QCOM)
+enum branch_mem_flags {
+ CLKFLAG_RETAIN_PERIPH,
+ CLKFLAG_NORETAIN_PERIPH,
+ CLKFLAG_RETAIN_MEM,
+ CLKFLAG_NORETAIN_MEM,
+ CLKFLAG_PERIPH_OFF_SET,
+ CLKFLAG_PERIPH_OFF_CLEAR,
+};
+#elif defined(CONFIG_COMMON_CLK_MSM)
#define CLKFLAG_INVERT 0x00000001
#define CLKFLAG_NOINVERT 0x00000002
#define CLKFLAG_NONEST 0x00000004
@@ -32,6 +42,7 @@
#define CLKFLAG_EPROBE_DEFER 0x00010000
#define CLKFLAG_PERIPH_OFF_SET 0x00020000
#define CLKFLAG_PERIPH_OFF_CLEAR 0x00040000
+#endif
struct clk_lookup;
struct clk;
diff --git a/include/linux/cpu_cooling.h b/include/linux/cpu_cooling.h
index c156f5082758..e221494fb7a0 100644
--- a/include/linux/cpu_cooling.h
+++ b/include/linux/cpu_cooling.h
@@ -31,6 +31,11 @@
typedef int (*get_static_t)(cpumask_t *cpumask, int interval,
unsigned long voltage, u32 *power);
+struct cpu_cooling_ops {
+ int (*ceil_limit)(int, u32);
+ int (*get_cur_state)(int, unsigned long *);
+};
+
#ifdef CONFIG_CPU_THERMAL
/**
* cpufreq_cooling_register - function to create cpufreq cooling device.
@@ -43,6 +48,10 @@ struct thermal_cooling_device *
cpufreq_power_cooling_register(const struct cpumask *clip_cpus,
u32 capacitance, get_static_t plat_static_func);
+struct thermal_cooling_device *
+cpufreq_platform_cooling_register(const struct cpumask *clip_cpus,
+ struct cpu_cooling_ops *ops);
+
/**
* of_cpufreq_cooling_register - create cpufreq cooling device based on DT.
* @np: a valid struct device_node to the cooling device device tree node.
@@ -112,6 +121,13 @@ of_cpufreq_power_cooling_register(struct device_node *np,
return NULL;
}
+static inline struct thermal_cooling_device *
+cpufreq_platform_cooling_register(const struct cpumask *clip_cpus,
+ struct cpu_cooling_ops *ops)
+{
+ return NULL;
+}
+
static inline
void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
{
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index fea160ee5803..85a868ccb493 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -137,8 +137,6 @@ static inline void set_mems_allowed(nodemask_t nodemask)
task_unlock(current);
}
-extern void cpuset_post_attach_flush(void);
-
#else /* !CONFIG_CPUSETS */
static inline bool cpusets_enabled(void) { return false; }
@@ -245,10 +243,6 @@ static inline bool read_mems_allowed_retry(unsigned int seq)
return false;
}
-static inline void cpuset_post_attach_flush(void)
-{
-}
-
#endif /* !CONFIG_CPUSETS */
#endif /* _LINUX_CPUSET_H */
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index e4221f7c5b53..d184e283cf81 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -593,4 +593,16 @@ static inline struct dentry *d_real(struct dentry *dentry)
return dentry;
}
+static inline struct inode *vfs_select_inode(struct dentry *dentry,
+ unsigned open_flags)
+{
+ struct inode *inode = d_inode(dentry);
+
+ if (inode && unlikely(dentry->d_flags & DCACHE_OP_SELECT_INODE))
+ inode = dentry->d_op->d_select_inode(dentry, open_flags);
+
+ return inode;
+}
+
+
#endif /* __LINUX_DCACHE_H */
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 47be3ad7d3e5..333d0ca6940f 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -299,7 +299,7 @@ typedef struct {
void *open_protocol_information;
void *protocols_per_handle;
void *locate_handle_buffer;
- void *locate_protocol;
+ efi_status_t (*locate_protocol)(efi_guid_t *, void *, void **);
void *install_multiple_protocol_interfaces;
void *uninstall_multiple_protocol_interfaces;
void *calculate_crc32;
@@ -599,6 +599,10 @@ void efi_native_runtime_setup(void);
#define EFI_PROPERTIES_TABLE_GUID \
EFI_GUID( 0x880aaca3, 0x4adc, 0x4a04, 0x90, 0x79, 0xb7, 0x47, 0x34, 0x08, 0x25, 0xe5 )
+#define EFI_RNG_PROTOCOL_GUID \
+ EFI_GUID(0x3152bca5, 0xeade, 0x433d, \
+ 0x86, 0x2e, 0xc0, 0x1c, 0xdc, 0x29, 0x1f, 0x44)
+
typedef struct {
efi_guid_t guid;
u64 table;
diff --git a/include/linux/hash.h b/include/linux/hash.h
index d0494c399392..a75b1009d3f7 100644
--- a/include/linux/hash.h
+++ b/include/linux/hash.h
@@ -33,12 +33,28 @@
#error Wordsize not 32 or 64
#endif
+/*
+ * The above primes are actively bad for hashing, since they are
+ * too sparse. The 32-bit one is mostly ok, the 64-bit one causes
+ * real problems. Besides, the "prime" part is pointless for the
+ * multiplicative hash.
+ *
+ * Although a random odd number will do, it turns out that the golden
+ * ratio phi = (sqrt(5)-1)/2, or its negative, has particularly nice
+ * properties.
+ *
+ * These are the negative, (1 - phi) = (phi^2) = (3 - sqrt(5))/2.
+ * (See Knuth vol 3, section 6.4, exercise 9.)
+ */
+#define GOLDEN_RATIO_32 0x61C88647
+#define GOLDEN_RATIO_64 0x61C8864680B583EBull
+
static __always_inline u64 hash_64(u64 val, unsigned int bits)
{
u64 hash = val;
-#if defined(CONFIG_ARCH_HAS_FAST_MULTIPLIER) && BITS_PER_LONG == 64
- hash = hash * GOLDEN_RATIO_PRIME_64;
+#if BITS_PER_LONG == 64
+ hash = hash * GOLDEN_RATIO_64;
#else
/* Sigh, gcc can't optimise this alone like it does for 32 bits. */
u64 n = hash;
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 685c262e0be8..b0eb06423d5e 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -96,9 +96,7 @@ u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
struct address_space *mapping,
pgoff_t idx, unsigned long address);
-#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
-#endif
extern int hugepages_treat_as_movable;
extern int sysctl_hugetlb_shm_group;
diff --git a/include/linux/ipa_usb.h b/include/linux/ipa_usb.h
index 0fe0e36c551f..de1163348c05 100644
--- a/include/linux/ipa_usb.h
+++ b/include/linux/ipa_usb.h
@@ -253,6 +253,7 @@ int ipa_usb_deinit_teth_prot(enum ipa_usb_teth_prot teth_prot);
* @dl_clnt_hdl: client handle previously obtained from
* ipa_usb_xdci_connect() for IN channel
* @teth_prot: tethering protocol
+ * @with_remote_wakeup: Does host support remote wakeup?
*
* Note: Should not be called from atomic context
* Note: for DPL, the ul will be ignored as irrelevant
@@ -260,7 +261,8 @@ int ipa_usb_deinit_teth_prot(enum ipa_usb_teth_prot teth_prot);
* @Return 0 on success, negative on failure
*/
int ipa_usb_xdci_suspend(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
- enum ipa_usb_teth_prot teth_prot);
+ enum ipa_usb_teth_prot teth_prot,
+ bool with_remote_wakeup);
/**
* ipa_usb_xdci_resume - Peripheral should call this function to resume
@@ -313,7 +315,8 @@ static inline int ipa_usb_deinit_teth_prot(enum ipa_usb_teth_prot teth_prot)
}
static inline int ipa_usb_xdci_suspend(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
- enum ipa_usb_teth_prot teth_prot)
+ enum ipa_usb_teth_prot teth_prot,
+ bool with_remote_wakeup)
{
return -EPERM;
}
diff --git a/include/linux/mfd/samsung/s2mps11.h b/include/linux/mfd/samsung/s2mps11.h
index b288965e8101..2c14eeca46f0 100644
--- a/include/linux/mfd/samsung/s2mps11.h
+++ b/include/linux/mfd/samsung/s2mps11.h
@@ -173,10 +173,12 @@ enum s2mps11_regulators {
#define S2MPS11_LDO_VSEL_MASK 0x3F
#define S2MPS11_BUCK_VSEL_MASK 0xFF
+#define S2MPS11_BUCK9_VSEL_MASK 0x1F
#define S2MPS11_ENABLE_MASK (0x03 << S2MPS11_ENABLE_SHIFT)
#define S2MPS11_ENABLE_SHIFT 0x06
#define S2MPS11_LDO_N_VOLTAGES (S2MPS11_LDO_VSEL_MASK + 1)
#define S2MPS11_BUCK_N_VOLTAGES (S2MPS11_BUCK_VSEL_MASK + 1)
+#define S2MPS11_BUCK9_N_VOLTAGES (S2MPS11_BUCK9_VSEL_MASK + 1)
#define S2MPS11_RAMP_DELAY 25000 /* uV/us */
#define S2MPS11_CTRL1_PWRHOLD_MASK BIT(4)
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 0b473cbfa7ef..a91b67b18a73 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -334,6 +334,17 @@ enum {
MLX5_CAP_OFF_CMDIF_CSUM = 46,
};
+enum {
+ /*
+ * Max wqe size for rdma read is 512 bytes, so this
+ * limits our max_sge_rd as the wqe needs to fit:
+ * - ctrl segment (16 bytes)
+ * - rdma segment (16 bytes)
+ * - scatter elements (16 bytes each)
+ */
+ MLX5_MAX_SGE_RD = (512 - 16 - 16) / 16
+};
+
struct mlx5_inbox_hdr {
__be16 opcode;
u8 rsvd[4];
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index af3efd9157f0..412aa988c6ad 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -792,9 +792,9 @@ int mlx5_set_port_admin_status(struct mlx5_core_dev *dev,
int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
enum mlx5_port_status *status);
-int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port);
-void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu, u8 port);
-void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu,
+int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port);
+void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, u16 *max_mtu, u8 port);
+void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, u16 *oper_mtu,
u8 port);
int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev,
diff --git a/include/linux/mm.h b/include/linux/mm.h
index cbe9b794c714..57a44fa9ab89 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1097,6 +1097,8 @@ struct zap_details {
struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
pte_t pte);
+struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
+ pmd_t pmd);
int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
unsigned long size);
diff --git a/include/linux/net.h b/include/linux/net.h
index 0b4ac7da583a..25ef630f1bd6 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -245,7 +245,15 @@ do { \
net_ratelimited_function(pr_warn, fmt, ##__VA_ARGS__)
#define net_info_ratelimited(fmt, ...) \
net_ratelimited_function(pr_info, fmt, ##__VA_ARGS__)
-#if defined(DEBUG)
+#if defined(CONFIG_DYNAMIC_DEBUG)
+#define net_dbg_ratelimited(fmt, ...) \
+do { \
+ DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
+ if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT) && \
+ net_ratelimit()) \
+ __dynamic_pr_debug(&descriptor, fmt, ##__VA_ARGS__); \
+} while (0)
+#elif defined(DEBUG)
#define net_dbg_ratelimited(fmt, ...) \
net_ratelimited_function(pr_debug, fmt, ##__VA_ARGS__)
#else
diff --git a/include/linux/qpnp/qpnp-revid.h b/include/linux/qpnp/qpnp-revid.h
index 7c12823894df..652d68ac63bf 100644
--- a/include/linux/qpnp/qpnp-revid.h
+++ b/include/linux/qpnp/qpnp-revid.h
@@ -177,6 +177,10 @@
/* PMICOBALT */
#define PMICOBALT_SUBTYPE 0x15
+/* PMFALCON */
+#define PM2FALCON_SUBTYPE 0x1A
+#define PMFALCON_SUBTYPE 0x1B
+
#define PMICOBALT_V1P0_REV1 0x00
#define PMICOBALT_V1P0_REV2 0x00
#define PMICOBALT_V1P0_REV3 0x00
diff --git a/kernel/sched/core_ctl.h b/include/linux/sched/core_ctl.h
index 3b0c12acb9c0..98d7cb3e899b 100644
--- a/kernel/sched/core_ctl.h
+++ b/include/linux/sched/core_ctl.h
@@ -16,9 +16,12 @@
#ifdef CONFIG_SCHED_CORE_CTL
void core_ctl_check(u64 wallclock);
-void core_ctl_set_boost(bool boost);
+int core_ctl_set_boost(bool boost);
#else
static inline void core_ctl_check(u64 wallclock) {}
-static inline void core_ctl_set_boost(bool boost) {}
+static inline int core_ctl_set_boost(bool boost)
+{
+ return 0;
+}
#endif
#endif
diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h
index 4984d372b04b..7d250f14d032 100644
--- a/include/media/videobuf2-core.h
+++ b/include/media/videobuf2-core.h
@@ -363,6 +363,7 @@ struct vb2_ops {
};
struct vb2_buf_ops {
+ int (*verify_planes_array)(struct vb2_buffer *vb, const void *pb);
int (*fill_user_buffer)(struct vb2_buffer *vb, void *pb);
int (*fill_vb2_buffer)(struct vb2_buffer *vb, const void *pb,
struct vb2_plane *planes);
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 801315d1d405..35c527c7d2e4 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -695,6 +695,18 @@ struct cfg80211_acl_data {
struct mac_address mac_addrs[];
};
+/*
+ * cfg80211_bitrate_mask - masks for bitrate control
+ */
+struct cfg80211_bitrate_mask {
+ struct {
+ u32 legacy;
+ u8 ht_mcs[IEEE80211_HT_MCS_MASK_LEN];
+ u16 vht_mcs[NL80211_VHT_NSS_MAX];
+ enum nl80211_txrate_gi gi;
+ } control[IEEE80211_NUM_BANDS];
+};
+
/**
* struct cfg80211_ap_settings - AP configuration
*
@@ -719,6 +731,7 @@ struct cfg80211_acl_data {
* MAC address based access control
* @pbss: If set, start as a PCP instead of AP. Relevant for DMG
* networks.
+ * @beacon_rate: masks for setting user configured beacon tx rate.
*/
struct cfg80211_ap_settings {
struct cfg80211_chan_def chandef;
@@ -738,6 +751,7 @@ struct cfg80211_ap_settings {
bool p2p_opp_ps;
const struct cfg80211_acl_data *acl;
bool pbss;
+ struct cfg80211_bitrate_mask beacon_rate;
};
/**
@@ -770,6 +784,26 @@ struct cfg80211_csa_settings {
};
/**
+ * struct iface_combination_params - input parameters for interface combinations
+ *
+ * Used to pass interface combination parameters
+ *
+ * @num_different_channels: the number of different channels we want
+ * to use for verification
+ * @radar_detect: a bitmap where each bit corresponds to a channel
+ * width where radar detection is needed, as in the definition of
+ * &struct ieee80211_iface_combination.@radar_detect_widths
+ * @iftype_num: array with the number of interfaces of each interface
+ * type. The index is the interface type as specified in &enum
+ * nl80211_iftype.
+ */
+struct iface_combination_params {
+ int num_different_channels;
+ u8 radar_detect;
+ int iftype_num[NUM_NL80211_IFTYPES];
+};
+
+/**
* enum station_parameters_apply_mask - station parameter values to apply
* @STATION_PARAM_APPLY_UAPSD: apply new uAPSD parameters (uapsd_queues, max_sp)
* @STATION_PARAM_APPLY_CAPABILITY: apply new capability
@@ -1945,17 +1979,6 @@ enum wiphy_params_flags {
WIPHY_PARAM_DYN_ACK = 1 << 5,
};
-/*
- * cfg80211_bitrate_mask - masks for bitrate control
- */
-struct cfg80211_bitrate_mask {
- struct {
- u32 legacy;
- u8 ht_mcs[IEEE80211_HT_MCS_MASK_LEN];
- u16 vht_mcs[NL80211_VHT_NSS_MAX];
- enum nl80211_txrate_gi gi;
- } control[IEEE80211_NUM_BANDS];
-};
/**
* struct cfg80211_pmksa - PMK Security Association
*
@@ -5330,36 +5353,20 @@ unsigned int ieee80211_get_num_supported_channels(struct wiphy *wiphy);
* cfg80211_check_combinations - check interface combinations
*
* @wiphy: the wiphy
- * @num_different_channels: the number of different channels we want
- * to use for verification
- * @radar_detect: a bitmap where each bit corresponds to a channel
- * width where radar detection is needed, as in the definition of
- * &struct ieee80211_iface_combination.@radar_detect_widths
- * @iftype_num: array with the numbers of interfaces of each interface
- * type. The index is the interface type as specified in &enum
- * nl80211_iftype.
- *
+ * @params: the interface combinations parameter
+*
* This function can be called by the driver to check whether a
* combination of interfaces and their types are allowed according to
* the interface combinations.
*/
int cfg80211_check_combinations(struct wiphy *wiphy,
- const int num_different_channels,
- const u8 radar_detect,
- const int iftype_num[NUM_NL80211_IFTYPES]);
+ struct iface_combination_params *params);
/**
* cfg80211_iter_combinations - iterate over matching combinations
*
* @wiphy: the wiphy
- * @num_different_channels: the number of different channels we want
- * to use for verification
- * @radar_detect: a bitmap where each bit corresponds to a channel
- * width where radar detection is needed, as in the definition of
- * &struct ieee80211_iface_combination.@radar_detect_widths
- * @iftype_num: array with the numbers of interfaces of each interface
- * type. The index is the interface type as specified in &enum
- * nl80211_iftype.
+ * @params: the interface combinations parameter
* @iter: function to call for each matching combination
* @data: pointer to pass to iter function
*
@@ -5368,9 +5375,7 @@ int cfg80211_check_combinations(struct wiphy *wiphy,
* purposes.
*/
int cfg80211_iter_combinations(struct wiphy *wiphy,
- const int num_different_channels,
- const u8 radar_detect,
- const int iftype_num[NUM_NL80211_IFTYPES],
+ struct iface_combination_params *params,
void (*iter)(const struct ieee80211_iface_combination *c,
void *data),
void *data);
diff --git a/include/net/codel.h b/include/net/codel.h
index 267e70210061..d168aca115cc 100644
--- a/include/net/codel.h
+++ b/include/net/codel.h
@@ -162,12 +162,14 @@ struct codel_vars {
* struct codel_stats - contains codel shared variables and stats
* @maxpacket: largest packet we've seen so far
* @drop_count: temp count of dropped packets in dequeue()
+ * @drop_len: bytes of dropped packets in dequeue()
* ecn_mark: number of packets we ECN marked instead of dropping
* ce_mark: number of packets CE marked because sojourn time was above ce_threshold
*/
struct codel_stats {
u32 maxpacket;
u32 drop_count;
+ u32 drop_len;
u32 ecn_mark;
u32 ce_mark;
};
@@ -308,6 +310,7 @@ static struct sk_buff *codel_dequeue(struct Qdisc *sch,
vars->rec_inv_sqrt);
goto end;
}
+ stats->drop_len += qdisc_pkt_len(skb);
qdisc_drop(skb, sch);
stats->drop_count++;
skb = dequeue_func(vars, sch);
@@ -330,6 +333,7 @@ static struct sk_buff *codel_dequeue(struct Qdisc *sch,
if (params->ecn && INET_ECN_set_ce(skb)) {
stats->ecn_mark++;
} else {
+ stats->drop_len += qdisc_pkt_len(skb);
qdisc_drop(skb, sch);
stats->drop_count++;
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index 0816c872b689..a6cc576fd467 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -1588,6 +1588,23 @@ static inline void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp)
}
#endif /* CONFIG_IP_VS_NFCT */
+/* Really using conntrack? */
+static inline bool ip_vs_conn_uses_conntrack(struct ip_vs_conn *cp,
+ struct sk_buff *skb)
+{
+#ifdef CONFIG_IP_VS_NFCT
+ enum ip_conntrack_info ctinfo;
+ struct nf_conn *ct;
+
+ if (!(cp->flags & IP_VS_CONN_F_NFCT))
+ return false;
+ ct = nf_ct_get(skb, &ctinfo);
+ if (ct && !nf_ct_is_untracked(ct))
+ return true;
+#endif
+ return false;
+}
+
static inline int
ip_vs_dest_conn_overhead(struct ip_vs_dest *dest)
{
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index b2a8e6338576..86df0835f6b5 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -396,7 +396,8 @@ struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
struct Qdisc *qdisc);
void qdisc_reset(struct Qdisc *qdisc);
void qdisc_destroy(struct Qdisc *qdisc);
-void qdisc_tree_decrease_qlen(struct Qdisc *qdisc, unsigned int n);
+void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, unsigned int n,
+ unsigned int len);
struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
const struct Qdisc_ops *ops);
struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
@@ -698,6 +699,23 @@ static inline void qdisc_reset_queue(struct Qdisc *sch)
sch->qstats.backlog = 0;
}
+static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new,
+ struct Qdisc **pold)
+{
+ struct Qdisc *old;
+
+ sch_tree_lock(sch);
+ old = *pold;
+ *pold = new;
+ if (old != NULL) {
+ qdisc_tree_reduce_backlog(old, old->q.qlen, old->qstats.backlog);
+ qdisc_reset(old);
+ }
+ sch_tree_unlock(sch);
+
+ return old;
+}
+
static inline unsigned int __qdisc_queue_drop(struct Qdisc *sch,
struct sk_buff_head *list)
{
diff --git a/include/rdma/ib.h b/include/rdma/ib.h
index cf8f9e700e48..a6b93706b0fc 100644
--- a/include/rdma/ib.h
+++ b/include/rdma/ib.h
@@ -34,6 +34,7 @@
#define _RDMA_IB_H
#include <linux/types.h>
+#include <linux/sched.h>
struct ib_addr {
union {
@@ -86,4 +87,19 @@ struct sockaddr_ib {
__u64 sib_scope_id;
};
+/*
+ * The IB interfaces that use write() as bi-directional ioctl() are
+ * fundamentally unsafe, since there are lots of ways to trigger "write()"
+ * calls from various contexts with elevated privileges. That includes the
+ * traditional suid executable error message writes, but also various kernel
+ * interfaces that can write to file descriptors.
+ *
+ * This function provides protection for the legacy API by restricting the
+ * calling context.
+ */
+static inline bool ib_safe_file_access(struct file *filp)
+{
+ return filp->f_cred == current_cred() && segment_eq(get_fs(), USER_DS);
+}
+
#endif /* _RDMA_IB_H */
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index cd15ae7b8b0c..7778ff3947de 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -1323,6 +1323,21 @@ TRACE_EVENT(core_ctl_set_busy,
__entry->is_busy)
);
+TRACE_EVENT(core_ctl_set_boost,
+
+ TP_PROTO(u32 refcount, s32 ret),
+ TP_ARGS(refcount, ret),
+ TP_STRUCT__entry(
+ __field(u32, refcount)
+ __field(s32, ret)
+ ),
+ TP_fast_assign(
+ __entry->refcount = refcount;
+ __entry->ret = ret;
+ ),
+ TP_printk("refcount=%u, ret=%d", __entry->refcount, __entry->ret)
+);
+
/**
* sched_isolate - called when cores are isolated/unisolated
*
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index 0bac6947a1cb..d6ff882ad6a7 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -381,6 +381,7 @@ header-y += prctl.h
header-y += psci.h
header-y += ptp_clock.h
header-y += ptrace.h
+header-y += qbt1000.h
header-y += qcedev.h
header-y += qcota.h
header-y += qnx4_fs.h
diff --git a/include/uapi/linux/hbtp_input.h b/include/uapi/linux/hbtp_input.h
index 67692ed8e3b8..9173c2ab72ed 100644
--- a/include/uapi/linux/hbtp_input.h
+++ b/include/uapi/linux/hbtp_input.h
@@ -43,6 +43,17 @@ struct hbtp_input_key {
__s32 value;
};
+enum hbtp_afe_signal {
+ HBTP_AFE_SIGNAL_ON_RESUME,
+ HBTP_AFE_SIGNAL_ON_SUSPEND,
+};
+
+enum hbtp_afe_power_ctrl {
+ HBTP_AFE_POWER_ENABLE_SYNC,
+ HBTP_AFE_POWER_ENABLE_SYNC_SIGNAL,
+};
+
+
/* ioctl */
#define HBTP_INPUT_IOCTL_BASE 'T'
#define HBTP_SET_ABSPARAM _IOW(HBTP_INPUT_IOCTL_BASE, 201, \
@@ -53,6 +64,10 @@ struct hbtp_input_key {
enum hbtp_afe_power_cmd)
#define HBTP_SET_KEYDATA _IOW(HBTP_INPUT_IOCTL_BASE, 204, \
struct hbtp_input_key)
+#define HBTP_SET_SYNCSIGNAL _IOW(HBTP_INPUT_IOCTL_BASE, 205, \
+ enum hbtp_afe_signal)
+#define HBTP_SET_POWER_CTRL _IOW(HBTP_INPUT_IOCTL_BASE, 206, \
+ enum hbtp_afe_power_ctrl)
#endif /* _UAPI_HBTP_INPUT_H */
diff --git a/include/uapi/linux/if.h b/include/uapi/linux/if.h
index 9cf2394f0bcf..752f5dc040a5 100644
--- a/include/uapi/linux/if.h
+++ b/include/uapi/linux/if.h
@@ -19,14 +19,20 @@
#ifndef _LINUX_IF_H
#define _LINUX_IF_H
+#include <linux/libc-compat.h> /* for compatibility with glibc */
#include <linux/types.h> /* for "__kernel_caddr_t" et al */
#include <linux/socket.h> /* for "struct sockaddr" et al */
#include <linux/compiler.h> /* for "__user" et al */
+#if __UAPI_DEF_IF_IFNAMSIZ
#define IFNAMSIZ 16
+#endif /* __UAPI_DEF_IF_IFNAMSIZ */
#define IFALIASZ 256
#include <linux/hdlc/ioctl.h>
+/* For glibc compatibility. An empty enum does not compile. */
+#if __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO != 0 && \
+ __UAPI_DEF_IF_NET_DEVICE_FLAGS != 0
/**
* enum net_device_flags - &struct net_device flags
*
@@ -68,6 +74,8 @@
* @IFF_ECHO: echo sent packets. Volatile.
*/
enum net_device_flags {
+/* for compatibility with glibc net/if.h */
+#if __UAPI_DEF_IF_NET_DEVICE_FLAGS
IFF_UP = 1<<0, /* sysfs */
IFF_BROADCAST = 1<<1, /* volatile */
IFF_DEBUG = 1<<2, /* sysfs */
@@ -84,11 +92,17 @@ enum net_device_flags {
IFF_PORTSEL = 1<<13, /* sysfs */
IFF_AUTOMEDIA = 1<<14, /* sysfs */
IFF_DYNAMIC = 1<<15, /* sysfs */
+#endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS */
+#if __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO
IFF_LOWER_UP = 1<<16, /* volatile */
IFF_DORMANT = 1<<17, /* volatile */
IFF_ECHO = 1<<18, /* volatile */
+#endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO */
};
+#endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO != 0 && __UAPI_DEF_IF_NET_DEVICE_FLAGS != 0 */
+/* for compatibility with glibc net/if.h */
+#if __UAPI_DEF_IF_NET_DEVICE_FLAGS
#define IFF_UP IFF_UP
#define IFF_BROADCAST IFF_BROADCAST
#define IFF_DEBUG IFF_DEBUG
@@ -105,9 +119,13 @@ enum net_device_flags {
#define IFF_PORTSEL IFF_PORTSEL
#define IFF_AUTOMEDIA IFF_AUTOMEDIA
#define IFF_DYNAMIC IFF_DYNAMIC
+#endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS */
+
+#if __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO
#define IFF_LOWER_UP IFF_LOWER_UP
#define IFF_DORMANT IFF_DORMANT
#define IFF_ECHO IFF_ECHO
+#endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO */
#define IFF_VOLATILE (IFF_LOOPBACK|IFF_POINTOPOINT|IFF_BROADCAST|IFF_ECHO|\
IFF_MASTER|IFF_SLAVE|IFF_RUNNING|IFF_LOWER_UP|IFF_DORMANT)
@@ -166,6 +184,8 @@ enum {
* being very small might be worth keeping for clean configuration.
*/
+/* for compatibility with glibc net/if.h */
+#if __UAPI_DEF_IF_IFMAP
struct ifmap {
unsigned long mem_start;
unsigned long mem_end;
@@ -175,6 +195,7 @@ struct ifmap {
unsigned char port;
/* 3 bytes spare */
};
+#endif /* __UAPI_DEF_IF_IFMAP */
struct if_settings {
unsigned int type; /* Type of physical device or protocol */
@@ -200,6 +221,8 @@ struct if_settings {
* remainder may be interface specific.
*/
+/* for compatibility with glibc net/if.h */
+#if __UAPI_DEF_IF_IFREQ
struct ifreq {
#define IFHWADDRLEN 6
union
@@ -223,6 +246,7 @@ struct ifreq {
struct if_settings ifru_settings;
} ifr_ifru;
};
+#endif /* __UAPI_DEF_IF_IFREQ */
#define ifr_name ifr_ifrn.ifrn_name /* interface name */
#define ifr_hwaddr ifr_ifru.ifru_hwaddr /* MAC address */
@@ -249,6 +273,8 @@ struct ifreq {
* must know all networks accessible).
*/
+/* for compatibility with glibc net/if.h */
+#if __UAPI_DEF_IF_IFCONF
struct ifconf {
int ifc_len; /* size of buffer */
union {
@@ -256,6 +282,8 @@ struct ifconf {
struct ifreq __user *ifcu_req;
} ifc_ifcu;
};
+#endif /* __UAPI_DEF_IF_IFCONF */
+
#define ifc_buf ifc_ifcu.ifcu_buf /* buffer address */
#define ifc_req ifc_ifcu.ifcu_req /* array of structures */
diff --git a/include/uapi/linux/libc-compat.h b/include/uapi/linux/libc-compat.h
index 7d024ceb075d..d5e38c73377c 100644
--- a/include/uapi/linux/libc-compat.h
+++ b/include/uapi/linux/libc-compat.h
@@ -51,6 +51,40 @@
/* We have included glibc headers... */
#if defined(__GLIBC__)
+/* Coordinate with glibc net/if.h header. */
+#if defined(_NET_IF_H)
+
+/* GLIBC headers included first so don't define anything
+ * that would already be defined. */
+
+#define __UAPI_DEF_IF_IFCONF 0
+#define __UAPI_DEF_IF_IFMAP 0
+#define __UAPI_DEF_IF_IFNAMSIZ 0
+#define __UAPI_DEF_IF_IFREQ 0
+/* Everything up to IFF_DYNAMIC, matches net/if.h until glibc 2.23 */
+#define __UAPI_DEF_IF_NET_DEVICE_FLAGS 0
+/* For the future if glibc adds IFF_LOWER_UP, IFF_DORMANT and IFF_ECHO */
+#ifndef __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO
+#define __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO 1
+#endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO */
+
+#else /* _NET_IF_H */
+
+/* Linux headers included first, and we must define everything
+ * we need. The expectation is that glibc will check the
+ * __UAPI_DEF_* defines and adjust appropriately. */
+
+#define __UAPI_DEF_IF_IFCONF 1
+#define __UAPI_DEF_IF_IFMAP 1
+#define __UAPI_DEF_IF_IFNAMSIZ 1
+#define __UAPI_DEF_IF_IFREQ 1
+/* Everything up to IFF_DYNAMIC, matches net/if.h until glibc 2.23 */
+#define __UAPI_DEF_IF_NET_DEVICE_FLAGS 1
+/* For the future if glibc adds IFF_LOWER_UP, IFF_DORMANT and IFF_ECHO */
+#define __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO 1
+
+#endif /* _NET_IF_H */
+
/* Coordinate with glibc netinet/in.h header. */
#if defined(_NETINET_IN_H)
@@ -117,6 +151,16 @@
* that we need. */
#else /* !defined(__GLIBC__) */
+/* Definitions for if.h */
+#define __UAPI_DEF_IF_IFCONF 1
+#define __UAPI_DEF_IF_IFMAP 1
+#define __UAPI_DEF_IF_IFNAMSIZ 1
+#define __UAPI_DEF_IF_IFREQ 1
+/* Everything up to IFF_DYNAMIC, matches net/if.h until glibc 2.23 */
+#define __UAPI_DEF_IF_NET_DEVICE_FLAGS 1
+/* For the future if glibc adds IFF_LOWER_UP, IFF_DORMANT and IFF_ECHO */
+#define __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO 1
+
/* Definitions for in.h */
#define __UAPI_DEF_IN_ADDR 1
#define __UAPI_DEF_IN_IPPROTO 1
diff --git a/include/uapi/linux/msm_mdp.h b/include/uapi/linux/msm_mdp.h
index 1df65c7f90b3..f0ac02e9c7a8 100644
--- a/include/uapi/linux/msm_mdp.h
+++ b/include/uapi/linux/msm_mdp.h
@@ -1406,12 +1406,21 @@ enum {
MDP_WRITEBACK_MIRROR_RESUME,
};
+/*
+ * The enum values are continued below as preprocessor macro definitions
+ */
enum mdp_color_space {
MDP_CSC_ITU_R_601,
MDP_CSC_ITU_R_601_FR,
MDP_CSC_ITU_R_709,
};
+/*
+ * These definitions are a continuation of the mdp_color_space enum above
+ */
+#define MDP_CSC_ITU_R_2020 (MDP_CSC_ITU_R_709 + 1)
+#define MDP_CSC_ITU_R_2020_FR (MDP_CSC_ITU_R_2020 + 1)
+
enum {
mdp_igc_v1_7 = 1,
mdp_igc_vmax,
diff --git a/include/uapi/linux/qbt1000.h b/include/uapi/linux/qbt1000.h
new file mode 100644
index 000000000000..8a3be2c634d3
--- /dev/null
+++ b/include/uapi/linux/qbt1000.h
@@ -0,0 +1,99 @@
+#ifndef _UAPI_QBT1000_H_
+#define _UAPI_QBT1000_H_
+
+#define MAX_NAME_SIZE 32
+
+/*
+* enum qbt1000_commands -
+* enumeration of command options
+* @QBT1000_LOAD_APP - cmd loads TZ app
+* @QBT1000_UNLOAD_APP - cmd unloads TZ app
+* @QBT1000_SEND_TZCMD - sends cmd to TZ app
+* @QBT1000_SET_FINGER_DETECT_KEY - sets the input key to send on finger detect
+* @QBT1000_CONFIGURE_POWER_KEY - enables/disables sending the power key on
+ finger down events
+*/
+enum qbt1000_commands {
+ QBT1000_LOAD_APP = 100,
+ QBT1000_UNLOAD_APP = 101,
+ QBT1000_SEND_TZCMD = 102,
+ QBT1000_SET_FINGER_DETECT_KEY = 103,
+ QBT1000_CONFIGURE_POWER_KEY = 104
+};
+
+/*
+* enum qbt1000_fw_event -
+* enumeration of firmware events
+* @FW_EVENT_FINGER_DOWN - finger down detected
+* @FW_EVENT_FINGER_UP - finger up detected
+* @FW_EVENT_INDICATION - an indication IPC from the firmware is pending
+*/
+enum qbt1000_fw_event {
+ FW_EVENT_FINGER_DOWN = 1,
+ FW_EVENT_FINGER_UP = 2,
+ FW_EVENT_CBGE_REQUIRED = 3,
+};
+
+/*
+* struct qbt1000_app -
+* used to load and unload apps in TZ
+* @app_handle - qseecom handle for clients
+* @name - Name of secure app to load
+* @size - Size of requested buffer of secure app
+* @high_band_width - 1 - for high bandwidth usage
+* 0 - for normal bandwidth usage
+*/
+struct qbt1000_app {
+ struct qseecom_handle **app_handle;
+ char name[MAX_NAME_SIZE];
+ uint32_t size;
+ uint8_t high_band_width;
+};
+
+/*
+* struct qbt1000_send_tz_cmd -
+* used to cmds to TZ App
+* @app_handle - qseecom handle for clients
+* @req_buf - Buffer containing request for secure app
+* @req_buf_len - Length of request buffer
+* @rsp_buf - Buffer containing response from secure app
+* @rsp_buf_len - Length of response buffer
+*/
+struct qbt1000_send_tz_cmd {
+ struct qseecom_handle *app_handle;
+ uint8_t *req_buf;
+ uint32_t req_buf_len;
+ uint8_t *rsp_buf;
+ uint32_t rsp_buf_len;
+};
+
+/*
+* struct qbt1000_erie_event -
+* used to receive events from Erie
+* @buf - Buffer containing event from Erie
+* @buf_len - Length of buffer
+*/
+struct qbt1000_erie_event {
+ uint8_t *buf;
+ uint32_t buf_len;
+};
+
+/*
+* struct qbt1000_set_finger_detect_key -
+* used to configure the input key which is sent on finger down/up event
+* @key_code - Key code to send on finger down/up. 0 disables sending key events
+*/
+struct qbt1000_set_finger_detect_key {
+ unsigned int key_code;
+};
+
+/*
+* struct qbt1000_configure_power_key -
+* used to configure whether the power key is sent on finger down
+* @enable - if non-zero, power key is sent on finger down
+*/
+struct qbt1000_configure_power_key {
+ unsigned int enable;
+};
+
+#endif /* _UAPI_QBT1000_H_ */
diff --git a/include/uapi/linux/v4l2-dv-timings.h b/include/uapi/linux/v4l2-dv-timings.h
index c039f1d68a09..086168e18ca8 100644
--- a/include/uapi/linux/v4l2-dv-timings.h
+++ b/include/uapi/linux/v4l2-dv-timings.h
@@ -183,7 +183,8 @@
#define V4L2_DV_BT_CEA_3840X2160P24 { \
.type = V4L2_DV_BT_656_1120, \
- V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
+ V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
297000000, 1276, 88, 296, 8, 10, 72, 0, 0, 0, \
V4L2_DV_BT_STD_CEA861, \
V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
@@ -191,14 +192,16 @@
#define V4L2_DV_BT_CEA_3840X2160P25 { \
.type = V4L2_DV_BT_656_1120, \
- V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
+ V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
297000000, 1056, 88, 296, 8, 10, 72, 0, 0, 0, \
V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
}
#define V4L2_DV_BT_CEA_3840X2160P30 { \
.type = V4L2_DV_BT_656_1120, \
- V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
+ V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
297000000, 176, 88, 296, 8, 10, 72, 0, 0, 0, \
V4L2_DV_BT_STD_CEA861, \
V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
@@ -206,14 +209,16 @@
#define V4L2_DV_BT_CEA_3840X2160P50 { \
.type = V4L2_DV_BT_656_1120, \
- V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
+ V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
594000000, 1056, 88, 296, 8, 10, 72, 0, 0, 0, \
V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
}
#define V4L2_DV_BT_CEA_3840X2160P60 { \
.type = V4L2_DV_BT_656_1120, \
- V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
+ V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
594000000, 176, 88, 296, 8, 10, 72, 0, 0, 0, \
V4L2_DV_BT_STD_CEA861, \
V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
@@ -221,7 +226,8 @@
#define V4L2_DV_BT_CEA_4096X2160P24 { \
.type = V4L2_DV_BT_656_1120, \
- V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
+ V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
297000000, 1020, 88, 296, 8, 10, 72, 0, 0, 0, \
V4L2_DV_BT_STD_CEA861, \
V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
@@ -229,14 +235,16 @@
#define V4L2_DV_BT_CEA_4096X2160P25 { \
.type = V4L2_DV_BT_656_1120, \
- V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
+ V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
297000000, 968, 88, 128, 8, 10, 72, 0, 0, 0, \
V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
}
#define V4L2_DV_BT_CEA_4096X2160P30 { \
.type = V4L2_DV_BT_656_1120, \
- V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
+ V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
297000000, 88, 88, 128, 8, 10, 72, 0, 0, 0, \
V4L2_DV_BT_STD_CEA861, \
V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
@@ -244,14 +252,16 @@
#define V4L2_DV_BT_CEA_4096X2160P50 { \
.type = V4L2_DV_BT_656_1120, \
- V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
+ V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
594000000, 968, 88, 128, 8, 10, 72, 0, 0, 0, \
V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
}
#define V4L2_DV_BT_CEA_4096X2160P60 { \
.type = V4L2_DV_BT_656_1120, \
- V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
+ V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
594000000, 88, 88, 128, 8, 10, 72, 0, 0, 0, \
V4L2_DV_BT_STD_CEA861, \
V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
index 4a3b35f10257..7be4c28cc1ca 100644
--- a/include/uapi/linux/videodev2.h
+++ b/include/uapi/linux/videodev2.h
@@ -2161,6 +2161,7 @@ struct v4l2_streamparm {
#define V4L2_EVENT_BITDEPTH_FLAG 0x1
#define V4L2_EVENT_PICSTRUCT_FLAG 0x2
+#define V4L2_EVENT_COLOUR_SPACE_FLAG 0x4
#define V4L2_EVENT_MSM_VIDC_START (V4L2_EVENT_PRIVATE_START + 0x00001000)
#define V4L2_EVENT_MSM_VIDC_FLUSH_DONE (V4L2_EVENT_MSM_VIDC_START + 1)
diff --git a/include/xen/page.h b/include/xen/page.h
index 96294ac93755..9dc46cb8a0fd 100644
--- a/include/xen/page.h
+++ b/include/xen/page.h
@@ -15,9 +15,9 @@
*/
#define xen_pfn_to_page(xen_pfn) \
- ((pfn_to_page(((unsigned long)(xen_pfn) << XEN_PAGE_SHIFT) >> PAGE_SHIFT)))
+ (pfn_to_page((unsigned long)(xen_pfn) >> (PAGE_SHIFT - XEN_PAGE_SHIFT)))
#define page_to_xen_pfn(page) \
- (((page_to_pfn(page)) << PAGE_SHIFT) >> XEN_PAGE_SHIFT)
+ ((page_to_pfn(page)) << (PAGE_SHIFT - XEN_PAGE_SHIFT))
#define XEN_PFN_PER_PAGE (PAGE_SIZE / XEN_PAGE_SIZE)
diff --git a/init/do_mounts.h b/init/do_mounts.h
index f5b978a9bb92..375835f1d258 100644
--- a/init/do_mounts.h
+++ b/init/do_mounts.h
@@ -13,6 +13,9 @@ void mount_block_root(char *name, int flags);
void mount_root(void);
extern int root_mountflags;
+struct dm_table;
+static inline void dm_table_put(struct dm_table *t) { }
+
static inline int create_dev(char *name, dev_t dev)
{
sys_unlink(name);
diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c
index 5a8a797d50b7..d1a7646f79c5 100644
--- a/kernel/bpf/inode.c
+++ b/kernel/bpf/inode.c
@@ -31,10 +31,10 @@ static void *bpf_any_get(void *raw, enum bpf_type type)
{
switch (type) {
case BPF_TYPE_PROG:
- atomic_inc(&((struct bpf_prog *)raw)->aux->refcnt);
+ raw = bpf_prog_inc(raw);
break;
case BPF_TYPE_MAP:
- bpf_map_inc(raw, true);
+ raw = bpf_map_inc(raw, true);
break;
default:
WARN_ON_ONCE(1);
@@ -277,7 +277,8 @@ static void *bpf_obj_do_get(const struct filename *pathname,
goto out;
raw = bpf_any_get(inode->i_private, *type);
- touch_atime(&path);
+ if (!IS_ERR(raw))
+ touch_atime(&path);
path_put(&path);
return raw;
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 3b39550d8485..4e32cc94edd9 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -181,11 +181,18 @@ struct bpf_map *__bpf_map_get(struct fd f)
return f.file->private_data;
}
-void bpf_map_inc(struct bpf_map *map, bool uref)
+/* prog's and map's refcnt limit */
+#define BPF_MAX_REFCNT 32768
+
+struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref)
{
- atomic_inc(&map->refcnt);
+ if (atomic_inc_return(&map->refcnt) > BPF_MAX_REFCNT) {
+ atomic_dec(&map->refcnt);
+ return ERR_PTR(-EBUSY);
+ }
if (uref)
atomic_inc(&map->usercnt);
+ return map;
}
struct bpf_map *bpf_map_get_with_uref(u32 ufd)
@@ -197,7 +204,7 @@ struct bpf_map *bpf_map_get_with_uref(u32 ufd)
if (IS_ERR(map))
return map;
- bpf_map_inc(map, true);
+ map = bpf_map_inc(map, true);
fdput(f);
return map;
@@ -580,6 +587,15 @@ static struct bpf_prog *__bpf_prog_get(struct fd f)
return f.file->private_data;
}
+struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog)
+{
+ if (atomic_inc_return(&prog->aux->refcnt) > BPF_MAX_REFCNT) {
+ atomic_dec(&prog->aux->refcnt);
+ return ERR_PTR(-EBUSY);
+ }
+ return prog;
+}
+
/* called by sockets/tracing/seccomp before attaching program to an event
* pairs with bpf_prog_put()
*/
@@ -592,7 +608,7 @@ struct bpf_prog *bpf_prog_get(u32 ufd)
if (IS_ERR(prog))
return prog;
- atomic_inc(&prog->aux->refcnt);
+ prog = bpf_prog_inc(prog);
fdput(f);
return prog;
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 2e7f7ab739e4..2cbfba78d3db 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -239,15 +239,6 @@ static const char * const reg_type_str[] = {
[CONST_IMM] = "imm",
};
-static const struct {
- int map_type;
- int func_id;
-} func_limit[] = {
- {BPF_MAP_TYPE_PROG_ARRAY, BPF_FUNC_tail_call},
- {BPF_MAP_TYPE_PERF_EVENT_ARRAY, BPF_FUNC_perf_event_read},
- {BPF_MAP_TYPE_PERF_EVENT_ARRAY, BPF_FUNC_perf_event_output},
-};
-
static void print_verifier_state(struct verifier_env *env)
{
enum bpf_reg_type t;
@@ -898,24 +889,44 @@ static int check_func_arg(struct verifier_env *env, u32 regno,
static int check_map_func_compatibility(struct bpf_map *map, int func_id)
{
- bool bool_map, bool_func;
- int i;
-
if (!map)
return 0;
- for (i = 0; i < ARRAY_SIZE(func_limit); i++) {
- bool_map = (map->map_type == func_limit[i].map_type);
- bool_func = (func_id == func_limit[i].func_id);
- /* only when map & func pair match it can continue.
- * don't allow any other map type to be passed into
- * the special func;
- */
- if (bool_func && bool_map != bool_func)
- return -EINVAL;
+ /* We need a two way check, first is from map perspective ... */
+ switch (map->map_type) {
+ case BPF_MAP_TYPE_PROG_ARRAY:
+ if (func_id != BPF_FUNC_tail_call)
+ goto error;
+ break;
+ case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
+ if (func_id != BPF_FUNC_perf_event_read &&
+ func_id != BPF_FUNC_perf_event_output)
+ goto error;
+ break;
+ default:
+ break;
+ }
+
+ /* ... and second from the function itself. */
+ switch (func_id) {
+ case BPF_FUNC_tail_call:
+ if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
+ goto error;
+ break;
+ case BPF_FUNC_perf_event_read:
+ case BPF_FUNC_perf_event_output:
+ if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY)
+ goto error;
+ break;
+ default:
+ break;
}
return 0;
+error:
+ verbose("cannot pass map_type %d into func %d\n",
+ map->map_type, func_id);
+ return -EINVAL;
}
static int check_call(struct verifier_env *env, int func_id)
@@ -1348,6 +1359,7 @@ static int check_ld_abs(struct verifier_env *env, struct bpf_insn *insn)
}
if (insn->dst_reg != BPF_REG_0 || insn->off != 0 ||
+ BPF_SIZE(insn->code) == BPF_DW ||
(mode == BPF_ABS && insn->src_reg != BPF_REG_0)) {
verbose("BPF_LD_ABS uses reserved fields\n");
return -EINVAL;
@@ -2003,7 +2015,6 @@ static int replace_map_fd_with_map_ptr(struct verifier_env *env)
if (IS_ERR(map)) {
verbose("fd %d is not pointing to valid bpf_map\n",
insn->imm);
- fdput(f);
return PTR_ERR(map);
}
@@ -2023,15 +2034,18 @@ static int replace_map_fd_with_map_ptr(struct verifier_env *env)
return -E2BIG;
}
- /* remember this map */
- env->used_maps[env->used_map_cnt++] = map;
-
/* hold the map. If the program is rejected by verifier,
* the map will be released by release_maps() or it
* will be used by the valid program until it's unloaded
* and all maps are released in free_bpf_prog_info()
*/
- bpf_map_inc(map, false);
+ map = bpf_map_inc(map, false);
+ if (IS_ERR(map)) {
+ fdput(f);
+ return PTR_ERR(map);
+ }
+ env->used_maps[env->used_map_cnt++] = map;
+
fdput(f);
next_insn:
insn++;
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index cc6c7d0a6758..e2e784ad9e0f 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -2776,9 +2776,10 @@ static ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf,
size_t nbytes, loff_t off, bool threadgroup)
{
struct task_struct *tsk;
+ struct cgroup_subsys *ss;
struct cgroup *cgrp;
pid_t pid;
- int ret;
+ int ssid, ret;
if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0)
return -EINVAL;
@@ -2826,8 +2827,10 @@ out_unlock_rcu:
rcu_read_unlock();
out_unlock_threadgroup:
percpu_up_write(&cgroup_threadgroup_rwsem);
+ for_each_subsys(ss, ssid)
+ if (ss->post_attach)
+ ss->post_attach();
cgroup_kn_unlock(of->kn);
- cpuset_post_attach_flush();
return ret ?: nbytes;
}
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index a7ec545308a6..e3c0f38acbe6 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -57,7 +57,6 @@
#include <asm/uaccess.h>
#include <linux/atomic.h>
#include <linux/mutex.h>
-#include <linux/workqueue.h>
#include <linux/cgroup.h>
#include <linux/wait.h>
@@ -1029,7 +1028,7 @@ static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
}
}
-void cpuset_post_attach_flush(void)
+static void cpuset_post_attach(void)
{
flush_workqueue(cpuset_migrate_mm_wq);
}
@@ -2122,6 +2121,7 @@ struct cgroup_subsys cpuset_cgrp_subsys = {
.allow_attach = cpuset_allow_attach,
.cancel_attach = cpuset_cancel_attach,
.attach = cpuset_attach,
+ .post_attach = cpuset_post_attach,
.bind = cpuset_bind,
.legacy_cftypes = files,
.early_init = 1,
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index adfdc0536117..014b69528194 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -347,6 +347,7 @@ void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size,
bool truncated)
{
struct ring_buffer *rb = handle->rb;
+ bool wakeup = truncated;
unsigned long aux_head;
u64 flags = 0;
@@ -375,9 +376,16 @@ void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size,
aux_head = rb->user_page->aux_head = local_read(&rb->aux_head);
if (aux_head - local_read(&rb->aux_wakeup) >= rb->aux_watermark) {
- perf_output_wakeup(handle);
+ wakeup = true;
local_add(rb->aux_watermark, &rb->aux_wakeup);
}
+
+ if (wakeup) {
+ if (truncated)
+ handle->event->pending_disable = 1;
+ perf_output_wakeup(handle);
+ }
+
handle->event = NULL;
local_set(&rb->aux_nest, 0);
diff --git a/kernel/futex.c b/kernel/futex.c
index 461c72b2dac2..9d8163afd87c 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -1244,10 +1244,20 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this,
if (unlikely(should_fail_futex(true)))
ret = -EFAULT;
- if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
+ if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)) {
ret = -EFAULT;
- else if (curval != uval)
- ret = -EINVAL;
+ } else if (curval != uval) {
+ /*
+ * If a unconditional UNLOCK_PI operation (user space did not
+ * try the TID->0 transition) raced with a waiter setting the
+ * FUTEX_WAITERS flag between get_user() and locking the hash
+ * bucket lock, retry the operation.
+ */
+ if ((FUTEX_TID_MASK & curval) == uval)
+ ret = -EAGAIN;
+ else
+ ret = -EINVAL;
+ }
if (ret) {
raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
return ret;
@@ -1474,8 +1484,8 @@ void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
if (likely(&hb1->chain != &hb2->chain)) {
plist_del(&q->list, &hb1->chain);
hb_waiters_dec(hb1);
- plist_add(&q->list, &hb2->chain);
hb_waiters_inc(hb2);
+ plist_add(&q->list, &hb2->chain);
q->lock_ptr = &hb2->lock;
}
get_futex_key_refs(key2);
@@ -2538,6 +2548,15 @@ retry:
if (ret == -EFAULT)
goto pi_faulted;
/*
+ * A unconditional UNLOCK_PI op raced against a waiter
+ * setting the FUTEX_WAITERS bit. Try again.
+ */
+ if (ret == -EAGAIN) {
+ spin_unlock(&hb->lock);
+ put_futex_key(&key);
+ goto retry;
+ }
+ /*
* wake_futex_pi has detected invalid state. Tell user
* space.
*/
diff --git a/kernel/locking/mcs_spinlock.h b/kernel/locking/mcs_spinlock.h
index 5b9102a47ea5..c835270f0c2f 100644
--- a/kernel/locking/mcs_spinlock.h
+++ b/kernel/locking/mcs_spinlock.h
@@ -67,7 +67,13 @@ void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
node->locked = 0;
node->next = NULL;
- prev = xchg_acquire(lock, node);
+ /*
+ * We rely on the full barrier with global transitivity implied by the
+ * below xchg() to order the initialization stores above against any
+ * observation of @node. And to provide the ACQUIRE ordering associated
+ * with a LOCK primitive.
+ */
+ prev = xchg(lock, node);
if (likely(prev == NULL)) {
/*
* Lock acquired, don't need to set node->locked to 1. Threads
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 7e7e19ed53c6..c07d844c576e 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -75,6 +75,7 @@
#include <linux/context_tracking.h>
#include <linux/compiler.h>
#include <linux/irq.h>
+#include <linux/sched/core_ctl.h>
#include <asm/switch_to.h>
#include <asm/tlb.h>
@@ -85,7 +86,6 @@
#endif
#include "sched.h"
-#include "core_ctl.h"
#include "../workqueue_internal.h"
#include "../smpboot.h"
@@ -2273,6 +2273,14 @@ void sched_exit(struct task_struct *p)
kfree(p->ravg.curr_window_cpu);
kfree(p->ravg.prev_window_cpu);
+ /*
+ * update_task_ravg() can be called for exiting tasks. While the
+ * function itself ensures correct behavior, the corresponding
+ * trace event requires that these pointers be NULL.
+ */
+ p->ravg.curr_window_cpu = NULL;
+ p->ravg.prev_window_cpu = NULL;
+
enqueue_task(rq, p, 0);
clear_ed_task(p, rq);
task_rq_unlock(rq, p, &flags);
diff --git a/kernel/sched/core_ctl.c b/kernel/sched/core_ctl.c
index d81886da7ca2..0db85a4fa9c8 100644
--- a/kernel/sched/core_ctl.c
+++ b/kernel/sched/core_ctl.c
@@ -45,7 +45,7 @@ struct cluster_data {
bool nrrun_changed;
struct task_struct *core_ctl_thread;
unsigned int first_cpu;
- bool boost;
+ unsigned int boost;
struct kobject kobj;
};
@@ -652,17 +652,40 @@ static bool do_check(u64 wallclock)
return do_check;
}
-void core_ctl_set_boost(bool boost)
+int core_ctl_set_boost(bool boost)
{
unsigned int index = 0;
struct cluster_data *cluster;
+ unsigned long flags;
+ int ret = 0;
+ bool boost_state_changed = false;
+ spin_lock_irqsave(&state_lock, flags);
for_each_cluster(cluster, index) {
- if (cluster->is_big_cluster && cluster->boost != boost) {
- cluster->boost = boost;
- apply_need(cluster);
+ if (cluster->is_big_cluster) {
+ if (boost) {
+ boost_state_changed = !cluster->boost;
+ ++cluster->boost;
+ } else {
+ if (!cluster->boost) {
+ pr_err("Error turning off boost. Boost already turned off\n");
+ ret = -EINVAL;
+ } else {
+ --cluster->boost;
+ boost_state_changed = !cluster->boost;
+ }
+ }
+ break;
}
}
+ spin_unlock_irqrestore(&state_lock, flags);
+
+ if (boost_state_changed)
+ apply_need(cluster);
+
+ trace_core_ctl_set_boost(cluster->boost, ret);
+
+ return ret;
}
void core_ctl_check(u64 wallclock)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 98ae45174a40..e0f212743c77 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3056,7 +3056,8 @@ bias_to_prev_cpu(struct cpu_select_env *env, struct cluster_cpu_stats *stats)
static inline bool
wake_to_waker_cluster(struct cpu_select_env *env)
{
- return !env->need_idle && !env->reason && env->sync &&
+ return env->boost_type == SCHED_BOOST_NONE &&
+ !env->need_idle && !env->reason && env->sync &&
task_load(current) > sched_big_waker_task_load &&
task_load(env->p) < sched_small_wakee_task_load;
}
@@ -3524,7 +3525,7 @@ static void dec_throttled_cfs_rq_hmp_stats(struct hmp_sched_stats *stats,
BUG_ON(stats->nr_big_tasks < 0 ||
(s64)stats->cumulative_runnable_avg < 0);
- verify_pred_demands_sum(stats);
+ BUG_ON((s64)stats->pred_demands_sum < 0);
}
#else /* CONFIG_CFS_BANDWIDTH */
diff --git a/kernel/sched/hmp.c b/kernel/sched/hmp.c
index 7039eb8f1e63..1d55e226196f 100644
--- a/kernel/sched/hmp.c
+++ b/kernel/sched/hmp.c
@@ -18,9 +18,9 @@
#include <linux/list_sort.h>
#include <linux/syscore_ops.h>
#include <linux/of.h>
+#include <linux/sched/core_ctl.h>
#include "sched.h"
-#include "core_ctl.h"
#include <trace/events/sched.h>
@@ -1387,7 +1387,7 @@ void dec_rq_hmp_stats(struct rq *rq, struct task_struct *p, int change_cra)
dec_cumulative_runnable_avg(&rq->hmp_stats, p);
}
-static void reset_hmp_stats(struct hmp_sched_stats *stats, int reset_cra)
+void reset_hmp_stats(struct hmp_sched_stats *stats, int reset_cra)
{
stats->nr_big_tasks = 0;
if (reset_cra) {
@@ -3106,9 +3106,9 @@ static void reset_all_task_stats(void)
read_lock(&tasklist_lock);
do_each_thread(g, p) {
- raw_spin_lock(&p->pi_lock);
+ raw_spin_lock_irq(&p->pi_lock);
reset_task_stats(p);
- raw_spin_unlock(&p->pi_lock);
+ raw_spin_unlock_irq(&p->pi_lock);
} while_each_thread(g, p);
read_unlock(&tasklist_lock);
}
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 471dc9faab35..4289bf6cd642 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1407,6 +1407,7 @@ extern void inc_rq_hmp_stats(struct rq *rq,
struct task_struct *p, int change_cra);
extern void dec_rq_hmp_stats(struct rq *rq,
struct task_struct *p, int change_cra);
+extern void reset_hmp_stats(struct hmp_sched_stats *stats, int reset_cra);
extern int is_big_task(struct task_struct *p);
extern int upmigrate_discouraged(struct task_struct *p);
extern struct sched_cluster *rq_cluster(struct rq *rq);
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index fda3b6e1b3a0..26960e49bb8c 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -2108,8 +2108,13 @@ event_create_dir(struct dentry *parent, struct trace_event_file *file)
trace_create_file("filter", 0644, file->dir, file,
&ftrace_event_filter_fops);
- trace_create_file("trigger", 0644, file->dir, file,
- &event_trigger_fops);
+ /*
+ * Only event directories that can be enabled should have
+ * triggers.
+ */
+ if (!(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
+ trace_create_file("trigger", 0644, file->dir, file,
+ &event_trigger_fops);
trace_create_file("format", 0444, file->dir, call,
&ftrace_event_format_fops);
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index ef84d9874d03..316b316c7528 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -652,6 +652,35 @@ static void set_work_pool_and_clear_pending(struct work_struct *work,
*/
smp_wmb();
set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0);
+ /*
+ * The following mb guarantees that previous clear of a PENDING bit
+ * will not be reordered with any speculative LOADS or STORES from
+ * work->current_func, which is executed afterwards. This possible
+ * reordering can lead to a missed execution on attempt to qeueue
+ * the same @work. E.g. consider this case:
+ *
+ * CPU#0 CPU#1
+ * ---------------------------- --------------------------------
+ *
+ * 1 STORE event_indicated
+ * 2 queue_work_on() {
+ * 3 test_and_set_bit(PENDING)
+ * 4 } set_..._and_clear_pending() {
+ * 5 set_work_data() # clear bit
+ * 6 smp_mb()
+ * 7 work->current_func() {
+ * 8 LOAD event_indicated
+ * }
+ *
+ * Without an explicit full barrier speculative LOAD on line 8 can
+ * be executed before CPU#0 does STORE on line 1. If that happens,
+ * CPU#0 observes the PENDING bit is still set and new execution of
+ * a @work is not queued in a hope, that CPU#1 will eventually
+ * finish the queued @work. Meanwhile CPU#1 does not see
+ * event_indicated is set, because speculative LOAD was executed
+ * before actual STORE.
+ */
+ smp_mb();
}
static void clear_work_data(struct work_struct *work)
@@ -4447,6 +4476,17 @@ static void rebind_workers(struct worker_pool *pool)
pool->attrs->cpumask) < 0);
spin_lock_irq(&pool->lock);
+
+ /*
+ * XXX: CPU hotplug notifiers are weird and can call DOWN_FAILED
+ * w/o preceding DOWN_PREPARE. Work around it. CPU hotplug is
+ * being reworked and this can go away in time.
+ */
+ if (!(pool->flags & POOL_DISASSOCIATED)) {
+ spin_unlock_irq(&pool->lock);
+ return;
+ }
+
pool->flags &= ~POOL_DISASSOCIATED;
for_each_pool_worker(worker, pool) {
diff --git a/lib/assoc_array.c b/lib/assoc_array.c
index 03dd576e6773..59fd7c0b119c 100644
--- a/lib/assoc_array.c
+++ b/lib/assoc_array.c
@@ -524,7 +524,9 @@ static bool assoc_array_insert_into_terminal_node(struct assoc_array_edit *edit,
free_slot = i;
continue;
}
- if (ops->compare_object(assoc_array_ptr_to_leaf(ptr), index_key)) {
+ if (assoc_array_ptr_is_leaf(ptr) &&
+ ops->compare_object(assoc_array_ptr_to_leaf(ptr),
+ index_key)) {
pr_devel("replace in slot %d\n", i);
edit->leaf_p = &node->slots[i];
edit->dead_leaf = node->slots[i];
diff --git a/lib/extable.c b/lib/extable.c
index 4cac81ec225e..0be02ad561e9 100644
--- a/lib/extable.c
+++ b/lib/extable.c
@@ -14,7 +14,37 @@
#include <linux/sort.h>
#include <asm/uaccess.h>
+#ifndef ARCH_HAS_RELATIVE_EXTABLE
+#define ex_to_insn(x) ((x)->insn)
+#else
+static inline unsigned long ex_to_insn(const struct exception_table_entry *x)
+{
+ return (unsigned long)&x->insn + x->insn;
+}
+#endif
+
#ifndef ARCH_HAS_SORT_EXTABLE
+#ifndef ARCH_HAS_RELATIVE_EXTABLE
+#define swap_ex NULL
+#else
+static void swap_ex(void *a, void *b, int size)
+{
+ struct exception_table_entry *x = a, *y = b, tmp;
+ int delta = b - a;
+
+ tmp = *x;
+ x->insn = y->insn + delta;
+ y->insn = tmp.insn - delta;
+
+#ifdef swap_ex_entry_fixup
+ swap_ex_entry_fixup(x, y, tmp, delta);
+#else
+ x->fixup = y->fixup + delta;
+ y->fixup = tmp.fixup - delta;
+#endif
+}
+#endif /* ARCH_HAS_RELATIVE_EXTABLE */
+
/*
* The exception table needs to be sorted so that the binary
* search that we use to find entries in it works properly.
@@ -26,9 +56,9 @@ static int cmp_ex(const void *a, const void *b)
const struct exception_table_entry *x = a, *y = b;
/* avoid overflow */
- if (x->insn > y->insn)
+ if (ex_to_insn(x) > ex_to_insn(y))
return 1;
- if (x->insn < y->insn)
+ if (ex_to_insn(x) < ex_to_insn(y))
return -1;
return 0;
}
@@ -37,7 +67,7 @@ void sort_extable(struct exception_table_entry *start,
struct exception_table_entry *finish)
{
sort(start, finish - start, sizeof(struct exception_table_entry),
- cmp_ex, NULL);
+ cmp_ex, swap_ex);
}
#ifdef CONFIG_MODULES
@@ -48,13 +78,15 @@ void sort_extable(struct exception_table_entry *start,
void trim_init_extable(struct module *m)
{
/*trim the beginning*/
- while (m->num_exentries && within_module_init(m->extable[0].insn, m)) {
+ while (m->num_exentries &&
+ within_module_init(ex_to_insn(&m->extable[0]), m)) {
m->extable++;
m->num_exentries--;
}
/*trim the end*/
while (m->num_exentries &&
- within_module_init(m->extable[m->num_exentries-1].insn, m))
+ within_module_init(ex_to_insn(&m->extable[m->num_exentries - 1]),
+ m))
m->num_exentries--;
}
#endif /* CONFIG_MODULES */
@@ -81,13 +113,13 @@ search_extable(const struct exception_table_entry *first,
* careful, the distance between value and insn
* can be larger than MAX_LONG:
*/
- if (mid->insn < value)
+ if (ex_to_insn(mid) < value)
first = mid + 1;
- else if (mid->insn > value)
+ else if (ex_to_insn(mid) > value)
last = mid - 1;
else
return mid;
- }
- return NULL;
+ }
+ return NULL;
}
#endif
diff --git a/lib/lz4/lz4defs.h b/lib/lz4/lz4defs.h
index abcecdc2d0f2..0710a62ad2f6 100644
--- a/lib/lz4/lz4defs.h
+++ b/lib/lz4/lz4defs.h
@@ -11,8 +11,7 @@
/*
* Detects 64 bits mode
*/
-#if (defined(__x86_64__) || defined(__x86_64) || defined(__amd64__) \
- || defined(__ppc64__) || defined(__LP64__))
+#if defined(CONFIG_64BIT)
#define LZ4_ARCH64 1
#else
#define LZ4_ARCH64 0
@@ -35,6 +34,10 @@ typedef struct _U64_S { u64 v; } U64_S;
#define PUT4(s, d) (A32(d) = A32(s))
#define PUT8(s, d) (A64(d) = A64(s))
+
+#define LZ4_READ_LITTLEENDIAN_16(d, s, p) \
+ (d = s - A16(p))
+
#define LZ4_WRITE_LITTLEENDIAN_16(p, v) \
do { \
A16(p) = v; \
@@ -51,10 +54,13 @@ typedef struct _U64_S { u64 v; } U64_S;
#define PUT8(s, d) \
put_unaligned(get_unaligned((const u64 *) s), (u64 *) d)
-#define LZ4_WRITE_LITTLEENDIAN_16(p, v) \
- do { \
- put_unaligned(v, (u16 *)(p)); \
- p += 2; \
+#define LZ4_READ_LITTLEENDIAN_16(d, s, p) \
+ (d = s - get_unaligned_le16(p))
+
+#define LZ4_WRITE_LITTLEENDIAN_16(p, v) \
+ do { \
+ put_unaligned_le16(v, (u16 *)(p)); \
+ p += 2; \
} while (0)
#endif
@@ -140,9 +146,6 @@ typedef struct _U64_S { u64 v; } U64_S;
#endif
-#define LZ4_READ_LITTLEENDIAN_16(d, s, p) \
- (d = s - get_unaligned_le16(p))
-
#define LZ4_WILDCOPY(s, d, e) \
do { \
LZ4_COPYPACKET(s, d); \
diff --git a/lib/mpi/mpicoder.c b/lib/mpi/mpicoder.c
index 3db76b8c1115..e00ff00e861c 100644
--- a/lib/mpi/mpicoder.c
+++ b/lib/mpi/mpicoder.c
@@ -128,6 +128,23 @@ leave:
}
EXPORT_SYMBOL_GPL(mpi_read_from_buffer);
+static int count_lzeros(MPI a)
+{
+ mpi_limb_t alimb;
+ int i, lzeros = 0;
+
+ for (i = a->nlimbs - 1; i >= 0; i--) {
+ alimb = a->d[i];
+ if (alimb == 0) {
+ lzeros += sizeof(mpi_limb_t);
+ } else {
+ lzeros += count_leading_zeros(alimb) / 8;
+ break;
+ }
+ }
+ return lzeros;
+}
+
/**
* mpi_read_buffer() - read MPI to a bufer provided by user (msb first)
*
@@ -146,7 +163,7 @@ int mpi_read_buffer(MPI a, uint8_t *buf, unsigned buf_len, unsigned *nbytes,
uint8_t *p;
mpi_limb_t alimb;
unsigned int n = mpi_get_size(a);
- int i, lzeros = 0;
+ int i, lzeros;
if (buf_len < n || !buf || !nbytes)
return -EINVAL;
@@ -154,14 +171,7 @@ int mpi_read_buffer(MPI a, uint8_t *buf, unsigned buf_len, unsigned *nbytes,
if (sign)
*sign = a->sign;
- p = (void *)&a->d[a->nlimbs] - 1;
-
- for (i = a->nlimbs * sizeof(alimb) - 1; i >= 0; i--, p--) {
- if (!*p)
- lzeros++;
- else
- break;
- }
+ lzeros = count_lzeros(a);
p = buf;
*nbytes = n - lzeros;
@@ -343,7 +353,7 @@ int mpi_write_to_sgl(MPI a, struct scatterlist *sgl, unsigned *nbytes,
u8 *p, *p2;
mpi_limb_t alimb, alimb2;
unsigned int n = mpi_get_size(a);
- int i, x, y = 0, lzeros = 0, buf_len;
+ int i, x, y = 0, lzeros, buf_len;
if (!nbytes || *nbytes < n)
return -EINVAL;
@@ -351,14 +361,7 @@ int mpi_write_to_sgl(MPI a, struct scatterlist *sgl, unsigned *nbytes,
if (sign)
*sign = a->sign;
- p = (void *)&a->d[a->nlimbs] - 1;
-
- for (i = a->nlimbs * sizeof(alimb) - 1; i >= 0; i--, p--) {
- if (!*p)
- lzeros++;
- else
- break;
- }
+ lzeros = count_lzeros(a);
*nbytes = n - lzeros;
buf_len = sgl->length;
diff --git a/lib/test-string_helpers.c b/lib/test-string_helpers.c
index 98866a770770..25b5cbfb7615 100644
--- a/lib/test-string_helpers.c
+++ b/lib/test-string_helpers.c
@@ -327,36 +327,67 @@ out:
}
#define string_get_size_maxbuf 16
-#define test_string_get_size_one(size, blk_size, units, exp_result) \
+#define test_string_get_size_one(size, blk_size, exp_result10, exp_result2) \
do { \
- BUILD_BUG_ON(sizeof(exp_result) >= string_get_size_maxbuf); \
- __test_string_get_size((size), (blk_size), (units), \
- (exp_result)); \
+ BUILD_BUG_ON(sizeof(exp_result10) >= string_get_size_maxbuf); \
+ BUILD_BUG_ON(sizeof(exp_result2) >= string_get_size_maxbuf); \
+ __test_string_get_size((size), (blk_size), (exp_result10), \
+ (exp_result2)); \
} while (0)
-static __init void __test_string_get_size(const u64 size, const u64 blk_size,
- const enum string_size_units units,
- const char *exp_result)
+static __init void test_string_get_size_check(const char *units,
+ const char *exp,
+ char *res,
+ const u64 size,
+ const u64 blk_size)
{
- char buf[string_get_size_maxbuf];
-
- string_get_size(size, blk_size, units, buf, sizeof(buf));
- if (!memcmp(buf, exp_result, strlen(exp_result) + 1))
+ if (!memcmp(res, exp, strlen(exp) + 1))
return;
- buf[sizeof(buf) - 1] = '\0';
- pr_warn("Test 'test_string_get_size_one' failed!\n");
- pr_warn("string_get_size(size = %llu, blk_size = %llu, units = %d\n",
+ res[string_get_size_maxbuf - 1] = '\0';
+
+ pr_warn("Test 'test_string_get_size' failed!\n");
+ pr_warn("string_get_size(size = %llu, blk_size = %llu, units = %s)\n",
size, blk_size, units);
- pr_warn("expected: '%s', got '%s'\n", exp_result, buf);
+ pr_warn("expected: '%s', got '%s'\n", exp, res);
+}
+
+static __init void __test_string_get_size(const u64 size, const u64 blk_size,
+ const char *exp_result10,
+ const char *exp_result2)
+{
+ char buf10[string_get_size_maxbuf];
+ char buf2[string_get_size_maxbuf];
+
+ string_get_size(size, blk_size, STRING_UNITS_10, buf10, sizeof(buf10));
+ string_get_size(size, blk_size, STRING_UNITS_2, buf2, sizeof(buf2));
+
+ test_string_get_size_check("STRING_UNITS_10", exp_result10, buf10,
+ size, blk_size);
+
+ test_string_get_size_check("STRING_UNITS_2", exp_result2, buf2,
+ size, blk_size);
}
static __init void test_string_get_size(void)
{
- test_string_get_size_one(16384, 512, STRING_UNITS_2, "8.00 MiB");
- test_string_get_size_one(8192, 4096, STRING_UNITS_10, "32.7 MB");
- test_string_get_size_one(1, 512, STRING_UNITS_10, "512 B");
+ /* small values */
+ test_string_get_size_one(0, 512, "0 B", "0 B");
+ test_string_get_size_one(1, 512, "512 B", "512 B");
+ test_string_get_size_one(1100, 1, "1.10 kB", "1.07 KiB");
+
+ /* normal values */
+ test_string_get_size_one(16384, 512, "8.39 MB", "8.00 MiB");
+ test_string_get_size_one(500118192, 512, "256 GB", "238 GiB");
+ test_string_get_size_one(8192, 4096, "33.6 MB", "32.0 MiB");
+
+ /* weird block sizes */
+ test_string_get_size_one(3000, 1900, "5.70 MB", "5.44 MiB");
+
+ /* huge values */
+ test_string_get_size_one(U64_MAX, 4096, "75.6 ZB", "64.0 ZiB");
+ test_string_get_size_one(4096, U64_MAX, "75.6 ZB", "64.0 ZiB");
}
static int __init test_string_helpers_init(void)
diff --git a/mm/compaction.c b/mm/compaction.c
index e0d4a58bcee4..7f9e60489d67 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -905,16 +905,8 @@ isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn,
pfn = isolate_migratepages_block(cc, pfn, block_end_pfn,
ISOLATE_UNEVICTABLE);
- /*
- * In case of fatal failure, release everything that might
- * have been isolated in the previous iteration, and signal
- * the failure back to caller.
- */
- if (!pfn) {
- putback_movable_pages(&cc->migratepages);
- cc->nr_migratepages = 0;
+ if (!pfn)
break;
- }
if (cc->nr_migratepages == COMPACT_CLUSTER_MAX)
break;
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 62fe06bb7d04..530e6427f823 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2134,10 +2134,9 @@ int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
* page fault if needed.
*/
return 0;
- if (vma->vm_ops)
+ if (vma->vm_ops || (vm_flags & VM_NO_THP))
/* khugepaged not yet working on file or special mappings */
return 0;
- VM_BUG_ON_VMA(vm_flags & VM_NO_THP, vma);
hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
hend = vma->vm_end & HPAGE_PMD_MASK;
if (hstart < hend)
@@ -2498,8 +2497,7 @@ static bool hugepage_vma_check(struct vm_area_struct *vma)
return false;
if (is_vma_temporary_stack(vma))
return false;
- VM_BUG_ON_VMA(vma->vm_flags & VM_NO_THP, vma);
- return true;
+ return !(vma->vm_flags & VM_NO_THP);
}
static void collapse_huge_page(struct mm_struct *mm,
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 7535ef32a75b..d1f6dc5a715d 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -196,6 +196,7 @@ static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
/* "mc" and its members are protected by cgroup_mutex */
static struct move_charge_struct {
spinlock_t lock; /* for from, to */
+ struct mm_struct *mm;
struct mem_cgroup *from;
struct mem_cgroup *to;
unsigned long flags;
@@ -4800,6 +4801,8 @@ static void __mem_cgroup_clear_mc(void)
static void mem_cgroup_clear_mc(void)
{
+ struct mm_struct *mm = mc.mm;
+
/*
* we must clear moving_task before waking up waiters at the end of
* task migration.
@@ -4809,7 +4812,10 @@ static void mem_cgroup_clear_mc(void)
spin_lock(&mc.lock);
mc.from = NULL;
mc.to = NULL;
+ mc.mm = NULL;
spin_unlock(&mc.lock);
+
+ mmput(mm);
}
static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
@@ -4866,6 +4872,7 @@ static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
VM_BUG_ON(mc.moved_swap);
spin_lock(&mc.lock);
+ mc.mm = mm;
mc.from = from;
mc.to = memcg;
mc.flags = move_flags;
@@ -4875,8 +4882,9 @@ static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
ret = mem_cgroup_precharge_mc(mm);
if (ret)
mem_cgroup_clear_mc();
+ } else {
+ mmput(mm);
}
- mmput(mm);
return ret;
}
@@ -4990,11 +4998,11 @@ put: /* get_mctgt_type() gets the page */
return ret;
}
-static void mem_cgroup_move_charge(struct mm_struct *mm)
+static void mem_cgroup_move_charge(void)
{
struct mm_walk mem_cgroup_move_charge_walk = {
.pmd_entry = mem_cgroup_move_charge_pte_range,
- .mm = mm,
+ .mm = mc.mm,
};
lru_add_drain_all();
@@ -5006,7 +5014,7 @@ static void mem_cgroup_move_charge(struct mm_struct *mm)
atomic_inc(&mc.from->moving_account);
synchronize_rcu();
retry:
- if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
+ if (unlikely(!down_read_trylock(&mc.mm->mmap_sem))) {
/*
* Someone who are holding the mmap_sem might be waiting in
* waitq. So we cancel all extra charges, wake up all waiters,
@@ -5023,23 +5031,16 @@ retry:
* additional charge, the page walk just aborts.
*/
walk_page_range(0, ~0UL, &mem_cgroup_move_charge_walk);
- up_read(&mm->mmap_sem);
+ up_read(&mc.mm->mmap_sem);
atomic_dec(&mc.from->moving_account);
}
-static void mem_cgroup_move_task(struct cgroup_taskset *tset)
+static void mem_cgroup_move_task(void)
{
- struct cgroup_subsys_state *css;
- struct task_struct *p = cgroup_taskset_first(tset, &css);
- struct mm_struct *mm = get_task_mm(p);
-
- if (mm) {
- if (mc.to)
- mem_cgroup_move_charge(mm);
- mmput(mm);
- }
- if (mc.to)
+ if (mc.to) {
+ mem_cgroup_move_charge();
mem_cgroup_clear_mc();
+ }
}
#else /* !CONFIG_MMU */
static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
@@ -5053,7 +5054,7 @@ static int mem_cgroup_allow_attach(struct cgroup_taskset *tset)
static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
{
}
-static void mem_cgroup_move_task(struct cgroup_taskset *tset)
+static void mem_cgroup_move_task(void)
{
}
#endif
@@ -5269,6 +5270,7 @@ struct cgroup_subsys memory_cgrp_subsys = {
.cancel_attach = mem_cgroup_cancel_attach,
.attach = mem_cgroup_move_task,
.allow_attach = mem_cgroup_allow_attach,
+ .post_attach = mem_cgroup_move_task,
.bind = mem_cgroup_bind,
.dfl_cftypes = memory_files,
.legacy_cftypes = mem_cgroup_legacy_files,
diff --git a/mm/memory.c b/mm/memory.c
index 6098837a4e5e..b536e3d60fc7 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -797,6 +797,46 @@ out:
return pfn_to_page(pfn);
}
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
+ pmd_t pmd)
+{
+ unsigned long pfn = pmd_pfn(pmd);
+
+ /*
+ * There is no pmd_special() but there may be special pmds, e.g.
+ * in a direct-access (dax) mapping, so let's just replicate the
+ * !HAVE_PTE_SPECIAL case from vm_normal_page() here.
+ */
+ if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
+ if (vma->vm_flags & VM_MIXEDMAP) {
+ if (!pfn_valid(pfn))
+ return NULL;
+ goto out;
+ } else {
+ unsigned long off;
+ off = (addr - vma->vm_start) >> PAGE_SHIFT;
+ if (pfn == vma->vm_pgoff + off)
+ return NULL;
+ if (!is_cow_mapping(vma->vm_flags))
+ return NULL;
+ }
+ }
+
+ if (is_zero_pfn(pfn))
+ return NULL;
+ if (unlikely(pfn > highest_memmap_pfn))
+ return NULL;
+
+ /*
+ * NOTE! We still have PageReserved() pages in the page tables.
+ * eg. VDSO mappings can cause them to exist.
+ */
+out:
+ return pfn_to_page(pfn);
+}
+#endif
+
/*
* copy one vm_area from one task to the other. Assumes the page tables
* already present in the new task to be cleared in the whole range
diff --git a/mm/migrate.c b/mm/migrate.c
index cd1e63062459..3db1b0277eb4 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -963,7 +963,13 @@ out:
dec_zone_page_state(page, NR_ISOLATED_ANON +
page_is_file_cache(page));
/* Soft-offlined page shouldn't go through lru cache list */
- if (reason == MR_MEMORY_FAILURE) {
+ if (reason == MR_MEMORY_FAILURE && rc == MIGRATEPAGE_SUCCESS) {
+ /*
+ * With this release, we free successfully migrated
+ * page and set PG_HWPoison on just freed page
+ * intentionally. Although it's rather weird, it's how
+ * HWPoison flag works at the moment.
+ */
put_page(page);
if (!test_set_page_hwpoison(page))
num_poisoned_pages_inc();
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 112c0bebfff3..8bf8e06a56a6 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -1899,7 +1899,8 @@ bool wb_over_bg_thresh(struct bdi_writeback *wb)
if (gdtc->dirty > gdtc->bg_thresh)
return true;
- if (wb_stat(wb, WB_RECLAIMABLE) > __wb_calc_thresh(gdtc))
+ if (wb_stat(wb, WB_RECLAIMABLE) >
+ wb_calc_thresh(gdtc->wb, gdtc->bg_thresh))
return true;
if (mdtc) {
@@ -1913,7 +1914,8 @@ bool wb_over_bg_thresh(struct bdi_writeback *wb)
if (mdtc->dirty > mdtc->bg_thresh)
return true;
- if (wb_stat(wb, WB_RECLAIMABLE) > __wb_calc_thresh(mdtc))
+ if (wb_stat(wb, WB_RECLAIMABLE) >
+ wb_calc_thresh(mdtc->wb, mdtc->bg_thresh))
return true;
}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index ffcb2b56f6c1..c8a31783c2d6 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -6294,7 +6294,7 @@ int __meminit init_per_zone_wmark_min(void)
setup_per_zone_inactive_ratio();
return 0;
}
-module_init(init_per_zone_wmark_min)
+core_initcall(init_per_zone_wmark_min)
/*
* min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
diff --git a/mm/slub.c b/mm/slub.c
index 2d5bbea0f0e8..fdc0721ebc31 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2832,6 +2832,7 @@ struct detached_freelist {
void *tail;
void *freelist;
int cnt;
+ struct kmem_cache *s;
};
/*
@@ -2846,8 +2847,9 @@ struct detached_freelist {
* synchronization primitive. Look ahead in the array is limited due
* to performance reasons.
*/
-static int build_detached_freelist(struct kmem_cache *s, size_t size,
- void **p, struct detached_freelist *df)
+static inline
+int build_detached_freelist(struct kmem_cache *s, size_t size,
+ void **p, struct detached_freelist *df)
{
size_t first_skipped_index = 0;
int lookahead = 3;
@@ -2863,8 +2865,11 @@ static int build_detached_freelist(struct kmem_cache *s, size_t size,
if (!object)
return 0;
+ /* Support for memcg, compiler can optimize this out */
+ df->s = cache_from_obj(s, object);
+
/* Start new detached freelist */
- set_freepointer(s, object, NULL);
+ set_freepointer(df->s, object, NULL);
df->page = virt_to_head_page(object);
df->tail = object;
df->freelist = object;
@@ -2879,7 +2884,7 @@ static int build_detached_freelist(struct kmem_cache *s, size_t size,
/* df->page is always set at this point */
if (df->page == virt_to_head_page(object)) {
/* Opportunity build freelist */
- set_freepointer(s, object, df->freelist);
+ set_freepointer(df->s, object, df->freelist);
df->freelist = object;
df->cnt++;
p[size] = NULL; /* mark object processed */
@@ -2898,25 +2903,20 @@ static int build_detached_freelist(struct kmem_cache *s, size_t size,
return first_skipped_index;
}
-
/* Note that interrupts must be enabled when calling this function. */
-void kmem_cache_free_bulk(struct kmem_cache *orig_s, size_t size, void **p)
+void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
{
if (WARN_ON(!size))
return;
do {
struct detached_freelist df;
- struct kmem_cache *s;
-
- /* Support for memcg */
- s = cache_from_obj(orig_s, p[size - 1]);
size = build_detached_freelist(s, size, p, &df);
if (unlikely(!df.page))
continue;
- slab_free(s, df.page, df.freelist, df.tail, df.cnt, _RET_IP_);
+ slab_free(df.s, df.page, df.freelist, df.tail, df.cnt,_RET_IP_);
} while (likely(size));
}
EXPORT_SYMBOL(kmem_cache_free_bulk);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 73f5cec91063..d5c3ef60a71e 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2683,7 +2683,7 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
sc->gfp_mask |= __GFP_HIGHMEM;
for_each_zone_zonelist_nodemask(zone, z, zonelist,
- requested_highidx, sc->nodemask) {
+ gfp_zone(sc->gfp_mask), sc->nodemask) {
enum zone_type classzone_idx;
if (!populated_zone(zone))
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index fc083996e40a..c1ea19478119 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -1732,10 +1732,13 @@ static struct page *isolate_source_page(struct size_class *class)
static unsigned long zs_can_compact(struct size_class *class)
{
unsigned long obj_wasted;
+ unsigned long obj_allocated = zs_stat_get(class, OBJ_ALLOCATED);
+ unsigned long obj_used = zs_stat_get(class, OBJ_USED);
- obj_wasted = zs_stat_get(class, OBJ_ALLOCATED) -
- zs_stat_get(class, OBJ_USED);
+ if (obj_allocated <= obj_used)
+ return 0;
+ obj_wasted = obj_allocated - obj_used;
obj_wasted /= get_maxobj_per_zspage(class->size,
class->pages_per_zspage);
diff --git a/mm/zswap.c b/mm/zswap.c
index bf14508afd64..340261946fda 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -170,6 +170,8 @@ static struct zswap_tree *zswap_trees[MAX_SWAPFILES];
static LIST_HEAD(zswap_pools);
/* protects zswap_pools list modification */
static DEFINE_SPINLOCK(zswap_pools_lock);
+/* pool counter to provide unique names to zpool */
+static atomic_t zswap_pools_count = ATOMIC_INIT(0);
/* used by param callback function */
static bool zswap_init_started;
@@ -565,6 +567,7 @@ static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor)
static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
{
struct zswap_pool *pool;
+ char name[38]; /* 'zswap' + 32 char (max) num + \0 */
gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
pool = kzalloc(sizeof(*pool), GFP_KERNEL);
@@ -573,7 +576,10 @@ static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
return NULL;
}
- pool->zpool = zpool_create_pool(type, "zswap", gfp, &zswap_zpool_ops);
+ /* unique name for each pool specifically required by zsmalloc */
+ snprintf(name, 38, "zswap%x", atomic_inc_return(&zswap_pools_count));
+
+ pool->zpool = zpool_create_pool(type, name, gfp, &zswap_zpool_ops);
if (!pool->zpool) {
pr_err("%s zpool not available\n", type);
goto error;
diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
index a49c705fb86b..5f19133c5530 100644
--- a/net/batman-adv/distributed-arp-table.c
+++ b/net/batman-adv/distributed-arp-table.c
@@ -553,6 +553,7 @@ static void batadv_choose_next_candidate(struct batadv_priv *bat_priv,
* be sent to
* @bat_priv: the bat priv with all the soft interface information
* @ip_dst: ipv4 to look up in the DHT
+ * @vid: VLAN identifier
*
* An originator O is selected if and only if its DHT_ID value is one of three
* closest values (from the LEFT, with wrap around if needed) then the hash
@@ -561,7 +562,8 @@ static void batadv_choose_next_candidate(struct batadv_priv *bat_priv,
* Returns the candidate array of size BATADV_DAT_CANDIDATE_NUM.
*/
static struct batadv_dat_candidate *
-batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst)
+batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst,
+ unsigned short vid)
{
int select;
batadv_dat_addr_t last_max = BATADV_DAT_ADDR_MAX, ip_key;
@@ -577,7 +579,7 @@ batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst)
return NULL;
dat.ip = ip_dst;
- dat.vid = 0;
+ dat.vid = vid;
ip_key = (batadv_dat_addr_t)batadv_hash_dat(&dat,
BATADV_DAT_ADDR_MAX);
@@ -597,6 +599,7 @@ batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst)
* @bat_priv: the bat priv with all the soft interface information
* @skb: payload to send
* @ip: the DHT key
+ * @vid: VLAN identifier
* @packet_subtype: unicast4addr packet subtype to use
*
* This function copies the skb with pskb_copy() and is sent as unicast packet
@@ -607,7 +610,7 @@ batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst)
*/
static bool batadv_dat_send_data(struct batadv_priv *bat_priv,
struct sk_buff *skb, __be32 ip,
- int packet_subtype)
+ unsigned short vid, int packet_subtype)
{
int i;
bool ret = false;
@@ -616,7 +619,7 @@ static bool batadv_dat_send_data(struct batadv_priv *bat_priv,
struct sk_buff *tmp_skb;
struct batadv_dat_candidate *cand;
- cand = batadv_dat_select_candidates(bat_priv, ip);
+ cand = batadv_dat_select_candidates(bat_priv, ip, vid);
if (!cand)
goto out;
@@ -1004,7 +1007,7 @@ bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv,
ret = true;
} else {
/* Send the request to the DHT */
- ret = batadv_dat_send_data(bat_priv, skb, ip_dst,
+ ret = batadv_dat_send_data(bat_priv, skb, ip_dst, vid,
BATADV_P_DAT_DHT_GET);
}
out:
@@ -1132,8 +1135,8 @@ void batadv_dat_snoop_outgoing_arp_reply(struct batadv_priv *bat_priv,
/* Send the ARP reply to the candidates for both the IP addresses that
* the node obtained from the ARP reply
*/
- batadv_dat_send_data(bat_priv, skb, ip_src, BATADV_P_DAT_DHT_PUT);
- batadv_dat_send_data(bat_priv, skb, ip_dst, BATADV_P_DAT_DHT_PUT);
+ batadv_dat_send_data(bat_priv, skb, ip_src, vid, BATADV_P_DAT_DHT_PUT);
+ batadv_dat_send_data(bat_priv, skb, ip_dst, vid, BATADV_P_DAT_DHT_PUT);
}
/**
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index 3207667e69de..d8a2f33e60e5 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -104,6 +104,15 @@ static void _batadv_update_route(struct batadv_priv *bat_priv,
neigh_node = NULL;
spin_lock_bh(&orig_node->neigh_list_lock);
+ /* curr_router used earlier may not be the current orig_ifinfo->router
+ * anymore because it was dereferenced outside of the neigh_list_lock
+ * protected region. After the new best neighbor has replace the current
+ * best neighbor the reference counter needs to decrease. Consequently,
+ * the code needs to ensure the curr_router variable contains a pointer
+ * to the replaced best neighbor.
+ */
+ curr_router = rcu_dereference_protected(orig_ifinfo->router, true);
+
rcu_assign_pointer(orig_ifinfo->router, neigh_node);
spin_unlock_bh(&orig_node->neigh_list_lock);
batadv_orig_ifinfo_free_ref(orig_ifinfo);
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c
index f664324805eb..0e0c3b8ed927 100644
--- a/net/batman-adv/send.c
+++ b/net/batman-adv/send.c
@@ -630,6 +630,9 @@ batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
if (pending) {
hlist_del(&forw_packet->list);
+ if (!forw_packet->own)
+ atomic_inc(&bat_priv->bcast_queue_left);
+
batadv_forw_packet_free(forw_packet);
}
}
@@ -657,6 +660,9 @@ batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
if (pending) {
hlist_del(&forw_packet->list);
+ if (!forw_packet->own)
+ atomic_inc(&bat_priv->batman_queue_left);
+
batadv_forw_packet_free(forw_packet);
}
}
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index ac4d08de5df4..720f1a5b81ac 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -407,11 +407,17 @@ void batadv_interface_rx(struct net_device *soft_iface,
*/
nf_reset(skb);
+ if (unlikely(!pskb_may_pull(skb, ETH_HLEN)))
+ goto dropped;
+
vid = batadv_get_vid(skb, 0);
ethhdr = eth_hdr(skb);
switch (ntohs(ethhdr->h_proto)) {
case ETH_P_8021Q:
+ if (!pskb_may_pull(skb, VLAN_ETH_HLEN))
+ goto dropped;
+
vhdr = (struct vlan_ethhdr *)skb->data;
if (vhdr->h_vlan_encapsulated_proto != ethertype)
@@ -423,8 +429,6 @@ void batadv_interface_rx(struct net_device *soft_iface,
}
/* skb->dev & skb->pkt_type are set here */
- if (unlikely(!pskb_may_pull(skb, ETH_HLEN)))
- goto dropped;
skb->protocol = eth_type_trans(skb, soft_iface);
/* should not be necessary anymore as we use skb_pull_rcsum()
diff --git a/net/bridge/br_ioctl.c b/net/bridge/br_ioctl.c
index 263b4de4de57..60a3dbfca8a1 100644
--- a/net/bridge/br_ioctl.c
+++ b/net/bridge/br_ioctl.c
@@ -21,18 +21,19 @@
#include <asm/uaccess.h>
#include "br_private.h"
-/* called with RTNL */
static int get_bridge_ifindices(struct net *net, int *indices, int num)
{
struct net_device *dev;
int i = 0;
- for_each_netdev(net, dev) {
+ rcu_read_lock();
+ for_each_netdev_rcu(net, dev) {
if (i >= num)
break;
if (dev->priv_flags & IFF_EBRIDGE)
indices[i++] = dev->ifindex;
}
+ rcu_read_unlock();
return i;
}
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 03661d97463c..ea9893743a0f 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -1270,6 +1270,7 @@ static int br_ip4_multicast_query(struct net_bridge *br,
struct br_ip saddr;
unsigned long max_delay;
unsigned long now = jiffies;
+ unsigned int offset = skb_transport_offset(skb);
__be32 group;
int err = 0;
@@ -1280,14 +1281,14 @@ static int br_ip4_multicast_query(struct net_bridge *br,
group = ih->group;
- if (skb->len == sizeof(*ih)) {
+ if (skb->len == offset + sizeof(*ih)) {
max_delay = ih->code * (HZ / IGMP_TIMER_SCALE);
if (!max_delay) {
max_delay = 10 * HZ;
group = 0;
}
- } else if (skb->len >= sizeof(*ih3)) {
+ } else if (skb->len >= offset + sizeof(*ih3)) {
ih3 = igmpv3_query_hdr(skb);
if (ih3->nsrcs)
goto out;
@@ -1348,6 +1349,7 @@ static int br_ip6_multicast_query(struct net_bridge *br,
struct br_ip saddr;
unsigned long max_delay;
unsigned long now = jiffies;
+ unsigned int offset = skb_transport_offset(skb);
const struct in6_addr *group = NULL;
bool is_general_query;
int err = 0;
@@ -1357,8 +1359,8 @@ static int br_ip6_multicast_query(struct net_bridge *br,
(port && port->state == BR_STATE_DISABLED))
goto out;
- if (skb->len == sizeof(*mld)) {
- if (!pskb_may_pull(skb, sizeof(*mld))) {
+ if (skb->len == offset + sizeof(*mld)) {
+ if (!pskb_may_pull(skb, offset + sizeof(*mld))) {
err = -EINVAL;
goto out;
}
@@ -1367,7 +1369,7 @@ static int br_ip6_multicast_query(struct net_bridge *br,
if (max_delay)
group = &mld->mld_mca;
} else {
- if (!pskb_may_pull(skb, sizeof(*mld2q))) {
+ if (!pskb_may_pull(skb, offset + sizeof(*mld2q))) {
err = -EINVAL;
goto out;
}
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 732be5afa6ce..38467f386b14 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -4444,15 +4444,16 @@ int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci)
__skb_push(skb, offset);
err = __vlan_insert_tag(skb, skb->vlan_proto,
skb_vlan_tag_get(skb));
- if (err)
+ if (err) {
+ __skb_pull(skb, offset);
return err;
+ }
+
skb->protocol = skb->vlan_proto;
skb->mac_len += VLAN_HLEN;
- __skb_pull(skb, offset);
- if (skb->ip_summed == CHECKSUM_COMPLETE)
- skb->csum = csum_add(skb->csum, csum_partial(skb->data
- + (2 * ETH_ALEN), VLAN_HLEN, 0));
+ skb_postpush_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN);
+ __skb_pull(skb, offset);
}
__vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci);
return 0;
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index 607a14f20d88..b1dc096d22f8 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -1034,10 +1034,13 @@ source_ok:
if (!fld.daddr) {
fld.daddr = fld.saddr;
- err = -EADDRNOTAVAIL;
if (dev_out)
dev_put(dev_out);
+ err = -EINVAL;
dev_out = init_net.loopback_dev;
+ if (!dev_out->dn_ptr)
+ goto out;
+ err = -EADDRNOTAVAIL;
dev_hold(dev_out);
if (!fld.daddr) {
fld.daddr =
@@ -1110,6 +1113,8 @@ source_ok:
if (dev_out == NULL)
goto out;
dn_db = rcu_dereference_raw(dev_out->dn_ptr);
+ if (!dn_db)
+ goto e_inval;
/* Possible improvement - check all devices for local addr */
if (dn_dev_islocal(dev_out, fld.daddr)) {
dev_put(dev_out);
@@ -1151,6 +1156,8 @@ select_source:
dev_put(dev_out);
dev_out = init_net.loopback_dev;
dev_hold(dev_out);
+ if (!dev_out->dn_ptr)
+ goto e_inval;
fld.flowidn_oif = dev_out->ifindex;
if (res.fi)
dn_fib_info_put(res.fi);
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index f97ae9d93ee9..98c754e61024 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -905,7 +905,11 @@ void fib_del_ifaddr(struct in_ifaddr *ifa, struct in_ifaddr *iprim)
if (ifa->ifa_flags & IFA_F_SECONDARY) {
prim = inet_ifa_byprefix(in_dev, any, ifa->ifa_mask);
if (!prim) {
- pr_warn("%s: bug: prim == NULL\n", __func__);
+ /* if the device has been deleted, we don't perform
+ * address promotion
+ */
+ if (!in_dev->dead)
+ pr_warn("%s: bug: prim == NULL\n", __func__);
return;
}
if (iprim && iprim != prim) {
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index d97268e8ff10..2b68418c7198 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -975,6 +975,8 @@ fib_convert_metrics(struct fib_info *fi, const struct fib_config *cfg)
val = 65535 - 40;
if (type == RTAX_MTU && val > 65535 - 15)
val = 65535 - 15;
+ if (type == RTAX_HOPLIMIT && val > 255)
+ val = 255;
if (type == RTAX_FEATURES && (val & ~RTAX_FEATURE_MASK))
return -EINVAL;
fi->fib_metrics[type - 1] = val;
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 614521437e30..7dc962b89fa1 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -180,6 +180,7 @@ static __be16 tnl_flags_to_gre_flags(__be16 tflags)
return flags;
}
+/* Fills in tpi and returns header length to be pulled. */
static int parse_gre_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
bool *csum_err)
{
@@ -239,7 +240,7 @@ static int parse_gre_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
return -EINVAL;
}
}
- return iptunnel_pull_header(skb, hdr_len, tpi->proto);
+ return hdr_len;
}
static void ipgre_err(struct sk_buff *skb, u32 info,
@@ -342,7 +343,7 @@ static void gre_err(struct sk_buff *skb, u32 info)
struct tnl_ptk_info tpi;
bool csum_err = false;
- if (parse_gre_header(skb, &tpi, &csum_err)) {
+ if (parse_gre_header(skb, &tpi, &csum_err) < 0) {
if (!csum_err) /* ignore csum errors. */
return;
}
@@ -420,6 +421,7 @@ static int gre_rcv(struct sk_buff *skb)
{
struct tnl_ptk_info tpi;
bool csum_err = false;
+ int hdr_len;
#ifdef CONFIG_NET_IPGRE_BROADCAST
if (ipv4_is_multicast(ip_hdr(skb)->daddr)) {
@@ -429,7 +431,10 @@ static int gre_rcv(struct sk_buff *skb)
}
#endif
- if (parse_gre_header(skb, &tpi, &csum_err) < 0)
+ hdr_len = parse_gre_header(skb, &tpi, &csum_err);
+ if (hdr_len < 0)
+ goto drop;
+ if (iptunnel_pull_header(skb, hdr_len, tpi.proto) < 0)
goto drop;
if (ipgre_rcv(skb, &tpi) == PACKET_RCVD)
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 79a957ea6545..fb54659320d8 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -2047,6 +2047,18 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
*/
if (fi && res->prefixlen < 4)
fi = NULL;
+ } else if ((type == RTN_LOCAL) && (orig_oif != 0) &&
+ (orig_oif != dev_out->ifindex)) {
+ /* For local routes that require a particular output interface
+ * we do not want to cache the result. Caching the result
+ * causes incorrect behaviour when there are multiple source
+ * addresses on the interface, the end result being that if the
+ * intended recipient is waiting on that interface for the
+ * packet he won't receive it because it will be delivered on
+ * the loopback interface and the IP_PKTINFO ipi_ifindex will
+ * be set to the loopback interface as well.
+ */
+ fi = NULL;
}
fnhe = NULL;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 9f069bd9de46..0dd207cd1f38 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2625,8 +2625,10 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
*/
if (unlikely((NET_IP_ALIGN && ((unsigned long)skb->data & 3)) ||
skb_headroom(skb) >= 0xFFFF)) {
- struct sk_buff *nskb = __pskb_copy(skb, MAX_TCP_HEADER,
- GFP_ATOMIC);
+ struct sk_buff *nskb;
+
+ skb_mstamp_get(&skb->skb_mstamp);
+ nskb = __pskb_copy(skb, MAX_TCP_HEADER, GFP_ATOMIC);
err = nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) :
-ENOBUFS;
} else {
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 45f5ae51de65..a234552a7e3d 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -496,10 +496,8 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
IP6CB(head)->flags |= IP6SKB_FRAGMENTED;
/* Yes, and fold redundant checksum back. 8) */
- if (head->ip_summed == CHECKSUM_COMPLETE)
- head->csum = csum_partial(skb_network_header(head),
- skb_network_header_len(head),
- head->csum);
+ skb_postpush_rcsum(head, skb_network_header(head),
+ skb_network_header_len(head));
rcu_read_lock();
IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMOKS);
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 01d7ee57d937..161cdc072547 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1721,6 +1721,8 @@ static int ip6_convert_metrics(struct mx6_config *mxc,
} else {
val = nla_get_u32(nla);
}
+ if (type == RTAX_HOPLIMIT && val > 255)
+ val = 255;
if (type == RTAX_FEATURES && (val & ~RTAX_FEATURE_MASK))
goto err;
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index 8dab4e569571..bb8edb9ef506 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -626,6 +626,7 @@ static void llc_cmsg_rcv(struct msghdr *msg, struct sk_buff *skb)
if (llc->cmsg_flags & LLC_CMSG_PKTINFO) {
struct llc_pktinfo info;
+ memset(&info, 0, sizeof(info));
info.lpi_ifindex = llc_sk(skb->sk)->dev->ifindex;
llc_pdu_decode_dsap(skb, &info.lpi_sap);
llc_pdu_decode_da(skb, info.lpi_mac);
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 7a2b7915093b..bcb0a1b64556 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -1750,7 +1750,7 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
ret = dev_alloc_name(ndev, ndev->name);
if (ret < 0) {
- free_netdev(ndev);
+ ieee80211_if_free(ndev);
return ret;
}
@@ -1836,7 +1836,7 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
ret = register_netdevice(ndev);
if (ret) {
- free_netdev(ndev);
+ ieee80211_if_free(ndev);
return ret;
}
}
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 33344f5a66a8..9ea2cc098ad1 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -3198,10 +3198,11 @@ int ieee80211_check_combinations(struct ieee80211_sub_if_data *sdata,
struct ieee80211_local *local = sdata->local;
struct ieee80211_sub_if_data *sdata_iter;
enum nl80211_iftype iftype = sdata->wdev.iftype;
- int num[NUM_NL80211_IFTYPES];
struct ieee80211_chanctx *ctx;
- int num_different_channels = 0;
int total = 1;
+ struct iface_combination_params params = {
+ .radar_detect = radar_detect,
+ };
lockdep_assert_held(&local->chanctx_mtx);
@@ -3212,9 +3213,6 @@ int ieee80211_check_combinations(struct ieee80211_sub_if_data *sdata,
!chandef->chan))
return -EINVAL;
- if (chandef)
- num_different_channels = 1;
-
if (WARN_ON(iftype >= NUM_NL80211_IFTYPES))
return -EINVAL;
@@ -3225,24 +3223,26 @@ int ieee80211_check_combinations(struct ieee80211_sub_if_data *sdata,
return 0;
}
- memset(num, 0, sizeof(num));
+ if (chandef)
+ params.num_different_channels = 1;
if (iftype != NL80211_IFTYPE_UNSPECIFIED)
- num[iftype] = 1;
+ params.iftype_num[iftype] = 1;
list_for_each_entry(ctx, &local->chanctx_list, list) {
if (ctx->replace_state == IEEE80211_CHANCTX_WILL_BE_REPLACED)
continue;
- radar_detect |= ieee80211_chanctx_radar_detect(local, ctx);
+ params.radar_detect |=
+ ieee80211_chanctx_radar_detect(local, ctx);
if (ctx->mode == IEEE80211_CHANCTX_EXCLUSIVE) {
- num_different_channels++;
+ params.num_different_channels++;
continue;
}
if (chandef && chanmode == IEEE80211_CHANCTX_SHARED &&
cfg80211_chandef_compatible(chandef,
&ctx->conf.def))
continue;
- num_different_channels++;
+ params.num_different_channels++;
}
list_for_each_entry_rcu(sdata_iter, &local->interfaces, list) {
@@ -3255,16 +3255,14 @@ int ieee80211_check_combinations(struct ieee80211_sub_if_data *sdata,
local->hw.wiphy->software_iftypes & BIT(wdev_iter->iftype))
continue;
- num[wdev_iter->iftype]++;
+ params.iftype_num[wdev_iter->iftype]++;
total++;
}
- if (total == 1 && !radar_detect)
+ if (total == 1 && !params.radar_detect)
return 0;
- return cfg80211_check_combinations(local->hw.wiphy,
- num_different_channels,
- radar_detect, num);
+ return cfg80211_check_combinations(local->hw.wiphy, &params);
}
static void
@@ -3280,12 +3278,10 @@ ieee80211_iter_max_chans(const struct ieee80211_iface_combination *c,
int ieee80211_max_num_channels(struct ieee80211_local *local)
{
struct ieee80211_sub_if_data *sdata;
- int num[NUM_NL80211_IFTYPES] = {};
struct ieee80211_chanctx *ctx;
- int num_different_channels = 0;
- u8 radar_detect = 0;
u32 max_num_different_channels = 1;
int err;
+ struct iface_combination_params params = {0};
lockdep_assert_held(&local->chanctx_mtx);
@@ -3293,17 +3289,17 @@ int ieee80211_max_num_channels(struct ieee80211_local *local)
if (ctx->replace_state == IEEE80211_CHANCTX_WILL_BE_REPLACED)
continue;
- num_different_channels++;
+ params.num_different_channels++;
- radar_detect |= ieee80211_chanctx_radar_detect(local, ctx);
+ params.radar_detect |=
+ ieee80211_chanctx_radar_detect(local, ctx);
}
list_for_each_entry_rcu(sdata, &local->interfaces, list)
- num[sdata->wdev.iftype]++;
+ params.iftype_num[sdata->wdev.iftype]++;
- err = cfg80211_iter_combinations(local->hw.wiphy,
- num_different_channels, radar_detect,
- num, ieee80211_iter_max_chans,
+ err = cfg80211_iter_combinations(local->hw.wiphy, &params,
+ ieee80211_iter_max_chans,
&max_num_different_channels);
if (err < 0)
return err;
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index f57b4dcdb233..4da560005b0e 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -1757,15 +1757,34 @@ ip_vs_in(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int
cp = pp->conn_in_get(ipvs, af, skb, &iph);
conn_reuse_mode = sysctl_conn_reuse_mode(ipvs);
- if (conn_reuse_mode && !iph.fragoffs &&
- is_new_conn(skb, &iph) && cp &&
- ((unlikely(sysctl_expire_nodest_conn(ipvs)) && cp->dest &&
- unlikely(!atomic_read(&cp->dest->weight))) ||
- unlikely(is_new_conn_expected(cp, conn_reuse_mode)))) {
- if (!atomic_read(&cp->n_control))
- ip_vs_conn_expire_now(cp);
- __ip_vs_conn_put(cp);
- cp = NULL;
+ if (conn_reuse_mode && !iph.fragoffs && is_new_conn(skb, &iph) && cp) {
+ bool uses_ct = false, resched = false;
+
+ if (unlikely(sysctl_expire_nodest_conn(ipvs)) && cp->dest &&
+ unlikely(!atomic_read(&cp->dest->weight))) {
+ resched = true;
+ uses_ct = ip_vs_conn_uses_conntrack(cp, skb);
+ } else if (is_new_conn_expected(cp, conn_reuse_mode)) {
+ uses_ct = ip_vs_conn_uses_conntrack(cp, skb);
+ if (!atomic_read(&cp->n_control)) {
+ resched = true;
+ } else {
+ /* Do not reschedule controlling connection
+ * that uses conntrack while it is still
+ * referenced by controlled connection(s).
+ */
+ resched = !uses_ct;
+ }
+ }
+
+ if (resched) {
+ if (!atomic_read(&cp->n_control))
+ ip_vs_conn_expire_now(cp);
+ __ip_vs_conn_put(cp);
+ if (uses_ct)
+ return NF_DROP;
+ cp = NULL;
+ }
}
if (unlikely(!cp)) {
diff --git a/net/netfilter/ipvs/ip_vs_pe_sip.c b/net/netfilter/ipvs/ip_vs_pe_sip.c
index 1b8d594e493a..0a6eb5c0d9e9 100644
--- a/net/netfilter/ipvs/ip_vs_pe_sip.c
+++ b/net/netfilter/ipvs/ip_vs_pe_sip.c
@@ -70,10 +70,10 @@ ip_vs_sip_fill_param(struct ip_vs_conn_param *p, struct sk_buff *skb)
const char *dptr;
int retc;
- ip_vs_fill_iph_skb(p->af, skb, false, &iph);
+ retc = ip_vs_fill_iph_skb(p->af, skb, false, &iph);
/* Only useful with UDP */
- if (iph.protocol != IPPROTO_UDP)
+ if (!retc || iph.protocol != IPPROTO_UDP)
return -EINVAL;
/* todo: IPv6 fragments:
* I think this only should be done for the first fragment. /HS
@@ -88,7 +88,7 @@ ip_vs_sip_fill_param(struct ip_vs_conn_param *p, struct sk_buff *skb)
dptr = skb->data + dataoff;
datalen = skb->len - dataoff;
- if (get_callid(dptr, dataoff, datalen, &matchoff, &matchlen))
+ if (get_callid(dptr, 0, datalen, &matchoff, &matchlen))
return -EINVAL;
/* N.B: pe_data is only set on success,
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 3cb3cb831591..86a3c6f0c871 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -1757,6 +1757,7 @@ void nf_conntrack_init_end(void)
int nf_conntrack_init_net(struct net *net)
{
+ static atomic64_t unique_id;
int ret = -ENOMEM;
int cpu;
@@ -1779,7 +1780,8 @@ int nf_conntrack_init_net(struct net *net)
if (!net->ct.stat)
goto err_pcpu_lists;
- net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net);
+ net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%llu",
+ (u64)atomic64_inc_return(&unique_id));
if (!net->ct.slabname)
goto err_slabname;
diff --git a/net/netfilter/xt_qtaguid.c b/net/netfilter/xt_qtaguid.c
index dca5cacc51f0..ececa65868ef 100644
--- a/net/netfilter/xt_qtaguid.c
+++ b/net/netfilter/xt_qtaguid.c
@@ -2536,8 +2536,7 @@ static int pp_stats_line(struct seq_file *m, struct tag_stat *ts_entry,
uid_t stat_uid = get_uid_from_tag(tag);
struct proc_print_info *ppi = m->private;
/* Detailed tags are not available to everybody */
- if (get_atag_from_tag(tag) && !can_read_other_uid_stats(
- make_kuid(&init_user_ns,stat_uid))) {
+ if (!can_read_other_uid_stats(make_kuid(&init_user_ns,stat_uid))) {
CT_DEBUG("qtaguid: stats line: "
"%s 0x%llx %u: insufficient priv "
"from pid=%u tgid=%u uid=%u stats.gid=%u\n",
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 59651af8cc27..992b35fb8615 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1305,7 +1305,7 @@ static int netlink_release(struct socket *sock)
skb_queue_purge(&sk->sk_write_queue);
- if (nlk->portid) {
+ if (nlk->portid && nlk->bound) {
struct netlink_notify n = {
.net = sock_net(sk),
.protocol = sk->sk_protocol,
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index c88d0f2d3e01..7cb8184ac165 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -158,9 +158,7 @@ static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key,
new_mpls_lse = (__be32 *)skb_mpls_header(skb);
*new_mpls_lse = mpls->mpls_lse;
- if (skb->ip_summed == CHECKSUM_COMPLETE)
- skb->csum = csum_add(skb->csum, csum_partial(new_mpls_lse,
- MPLS_HLEN, 0));
+ skb_postpush_rcsum(skb, new_mpls_lse, MPLS_HLEN);
hdr = eth_hdr(skb);
hdr->h_proto = mpls->mpls_ethertype;
@@ -280,7 +278,7 @@ static int set_eth_addr(struct sk_buff *skb, struct sw_flow_key *flow_key,
ether_addr_copy_masked(eth_hdr(skb)->h_dest, key->eth_dst,
mask->eth_dst);
- ovs_skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
+ skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
ether_addr_copy(flow_key->eth.src, eth_hdr(skb)->h_source);
ether_addr_copy(flow_key->eth.dst, eth_hdr(skb)->h_dest);
@@ -463,7 +461,7 @@ static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
mask_ipv6_addr(saddr, key->ipv6_src, mask->ipv6_src, masked);
if (unlikely(memcmp(saddr, masked, sizeof(masked)))) {
- set_ipv6_addr(skb, key->ipv6_proto, saddr, masked,
+ set_ipv6_addr(skb, flow_key->ip.proto, saddr, masked,
true);
memcpy(&flow_key->ipv6.addr.src, masked,
sizeof(flow_key->ipv6.addr.src));
@@ -485,7 +483,7 @@ static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
NULL, &flags)
!= NEXTHDR_ROUTING);
- set_ipv6_addr(skb, key->ipv6_proto, daddr, masked,
+ set_ipv6_addr(skb, flow_key->ip.proto, daddr, masked,
recalc_csum);
memcpy(&flow_key->ipv6.addr.dst, masked,
sizeof(flow_key->ipv6.addr.dst));
@@ -639,7 +637,7 @@ static int ovs_vport_output(struct net *net, struct sock *sk, struct sk_buff *sk
/* Reconstruct the MAC header. */
skb_push(skb, data->l2_len);
memcpy(skb->data, &data->l2_data, data->l2_len);
- ovs_skb_postpush_rcsum(skb, skb->data, data->l2_len);
+ skb_postpush_rcsum(skb, skb->data, data->l2_len);
skb_reset_mac_header(skb);
ovs_vport_send(vport, skb);
diff --git a/net/openvswitch/vport-netdev.c b/net/openvswitch/vport-netdev.c
index 6b0190b987ec..76fcaf1fd2a9 100644
--- a/net/openvswitch/vport-netdev.c
+++ b/net/openvswitch/vport-netdev.c
@@ -58,7 +58,7 @@ static void netdev_port_receive(struct sk_buff *skb)
return;
skb_push(skb, ETH_HLEN);
- ovs_skb_postpush_rcsum(skb, skb->data, ETH_HLEN);
+ skb_postpush_rcsum(skb, skb->data, ETH_HLEN);
ovs_vport_receive(vport, skb, skb_tunnel_info(skb));
return;
error:
diff --git a/net/openvswitch/vport.h b/net/openvswitch/vport.h
index 8ea3a96980ac..6e2b62f9d595 100644
--- a/net/openvswitch/vport.h
+++ b/net/openvswitch/vport.h
@@ -184,13 +184,6 @@ static inline struct vport *vport_from_priv(void *priv)
int ovs_vport_receive(struct vport *, struct sk_buff *,
const struct ip_tunnel_info *);
-static inline void ovs_skb_postpush_rcsum(struct sk_buff *skb,
- const void *start, unsigned int len)
-{
- if (skb->ip_summed == CHECKSUM_COMPLETE)
- skb->csum = csum_add(skb->csum, csum_partial(start, len, 0));
-}
-
static inline const char *ovs_vport_name(struct vport *vport)
{
return vport->dev->name;
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index da1ae0e13cb5..9cc7b512b472 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -3436,6 +3436,7 @@ static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
i->ifindex = mreq->mr_ifindex;
i->alen = mreq->mr_alen;
memcpy(i->addr, mreq->mr_address, i->alen);
+ memset(i->addr + i->alen, 0, sizeof(i->addr) - i->alen);
i->count = 1;
i->next = po->mclist;
po->mclist = i;
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 34967c19da85..b855352167b1 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -744,14 +744,15 @@ static u32 qdisc_alloc_handle(struct net_device *dev)
return 0;
}
-void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
+void qdisc_tree_reduce_backlog(struct Qdisc *sch, unsigned int n,
+ unsigned int len)
{
const struct Qdisc_class_ops *cops;
unsigned long cl;
u32 parentid;
int drops;
- if (n == 0)
+ if (n == 0 && len == 0)
return;
drops = max_t(int, n, 0);
rcu_read_lock();
@@ -774,11 +775,12 @@ void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
cops->put(sch, cl);
}
sch->q.qlen -= n;
+ sch->qstats.backlog -= len;
__qdisc_qstats_drop(sch, drops);
}
rcu_read_unlock();
}
-EXPORT_SYMBOL(qdisc_tree_decrease_qlen);
+EXPORT_SYMBOL(qdisc_tree_reduce_backlog);
static void notify_and_destroy(struct net *net, struct sk_buff *skb,
struct nlmsghdr *n, u32 clid,
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index c538d9e4a8f6..baafddf229ce 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -1624,13 +1624,8 @@ static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
new->reshape_fail = cbq_reshape_fail;
#endif
}
- sch_tree_lock(sch);
- *old = cl->q;
- cl->q = new;
- qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
- qdisc_reset(*old);
- sch_tree_unlock(sch);
+ *old = qdisc_replace(sch, new, &cl->q);
return 0;
}
@@ -1914,7 +1909,7 @@ static int cbq_delete(struct Qdisc *sch, unsigned long arg)
{
struct cbq_sched_data *q = qdisc_priv(sch);
struct cbq_class *cl = (struct cbq_class *)arg;
- unsigned int qlen;
+ unsigned int qlen, backlog;
if (cl->filters || cl->children || cl == &q->link)
return -EBUSY;
@@ -1922,8 +1917,9 @@ static int cbq_delete(struct Qdisc *sch, unsigned long arg)
sch_tree_lock(sch);
qlen = cl->q->q.qlen;
+ backlog = cl->q->qstats.backlog;
qdisc_reset(cl->q);
- qdisc_tree_decrease_qlen(cl->q, qlen);
+ qdisc_tree_reduce_backlog(cl->q, qlen, backlog);
if (cl->next_alive)
cbq_deactivate_class(cl);
diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c
index 5ffb8b8337c7..0a08c860eee4 100644
--- a/net/sched/sch_choke.c
+++ b/net/sched/sch_choke.c
@@ -128,8 +128,8 @@ static void choke_drop_by_idx(struct Qdisc *sch, unsigned int idx)
choke_zap_tail_holes(q);
qdisc_qstats_backlog_dec(sch, skb);
+ qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(skb));
qdisc_drop(skb, sch);
- qdisc_tree_decrease_qlen(sch, 1);
--sch->q.qlen;
}
@@ -456,6 +456,7 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt)
old = q->tab;
if (old) {
unsigned int oqlen = sch->q.qlen, tail = 0;
+ unsigned dropped = 0;
while (q->head != q->tail) {
struct sk_buff *skb = q->tab[q->head];
@@ -467,11 +468,12 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt)
ntab[tail++] = skb;
continue;
}
+ dropped += qdisc_pkt_len(skb);
qdisc_qstats_backlog_dec(sch, skb);
--sch->q.qlen;
qdisc_drop(skb, sch);
}
- qdisc_tree_decrease_qlen(sch, oqlen - sch->q.qlen);
+ qdisc_tree_reduce_backlog(sch, oqlen - sch->q.qlen, dropped);
q->head = 0;
q->tail = tail;
}
diff --git a/net/sched/sch_codel.c b/net/sched/sch_codel.c
index 535007d5f0b5..9b7e2980ee5c 100644
--- a/net/sched/sch_codel.c
+++ b/net/sched/sch_codel.c
@@ -79,12 +79,13 @@ static struct sk_buff *codel_qdisc_dequeue(struct Qdisc *sch)
skb = codel_dequeue(sch, &q->params, &q->vars, &q->stats, dequeue);
- /* We cant call qdisc_tree_decrease_qlen() if our qlen is 0,
+ /* We cant call qdisc_tree_reduce_backlog() if our qlen is 0,
* or HTB crashes. Defer it for next round.
*/
if (q->stats.drop_count && sch->q.qlen) {
- qdisc_tree_decrease_qlen(sch, q->stats.drop_count);
+ qdisc_tree_reduce_backlog(sch, q->stats.drop_count, q->stats.drop_len);
q->stats.drop_count = 0;
+ q->stats.drop_len = 0;
}
if (skb)
qdisc_bstats_update(sch, skb);
@@ -116,7 +117,7 @@ static int codel_change(struct Qdisc *sch, struct nlattr *opt)
{
struct codel_sched_data *q = qdisc_priv(sch);
struct nlattr *tb[TCA_CODEL_MAX + 1];
- unsigned int qlen;
+ unsigned int qlen, dropped = 0;
int err;
if (!opt)
@@ -156,10 +157,11 @@ static int codel_change(struct Qdisc *sch, struct nlattr *opt)
while (sch->q.qlen > sch->limit) {
struct sk_buff *skb = __skb_dequeue(&sch->q);
+ dropped += qdisc_pkt_len(skb);
qdisc_qstats_backlog_dec(sch, skb);
qdisc_drop(skb, sch);
}
- qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
+ qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped);
sch_tree_unlock(sch);
return 0;
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
index f26bdea875c1..d6e3ad43cecb 100644
--- a/net/sched/sch_drr.c
+++ b/net/sched/sch_drr.c
@@ -53,9 +53,10 @@ static struct drr_class *drr_find_class(struct Qdisc *sch, u32 classid)
static void drr_purge_queue(struct drr_class *cl)
{
unsigned int len = cl->qdisc->q.qlen;
+ unsigned int backlog = cl->qdisc->qstats.backlog;
qdisc_reset(cl->qdisc);
- qdisc_tree_decrease_qlen(cl->qdisc, len);
+ qdisc_tree_reduce_backlog(cl->qdisc, len, backlog);
}
static const struct nla_policy drr_policy[TCA_DRR_MAX + 1] = {
@@ -226,11 +227,7 @@ static int drr_graft_class(struct Qdisc *sch, unsigned long arg,
new = &noop_qdisc;
}
- sch_tree_lock(sch);
- drr_purge_queue(cl);
- *old = cl->qdisc;
- cl->qdisc = new;
- sch_tree_unlock(sch);
+ *old = qdisc_replace(sch, new, &cl->qdisc);
return 0;
}
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
index f357f34d02d2..d0dff0cd8186 100644
--- a/net/sched/sch_dsmark.c
+++ b/net/sched/sch_dsmark.c
@@ -73,13 +73,7 @@ static int dsmark_graft(struct Qdisc *sch, unsigned long arg,
new = &noop_qdisc;
}
- sch_tree_lock(sch);
- *old = p->q;
- p->q = new;
- qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
- qdisc_reset(*old);
- sch_tree_unlock(sch);
-
+ *old = qdisc_replace(sch, new, &p->q);
return 0;
}
@@ -264,6 +258,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch)
return err;
}
+ qdisc_qstats_backlog_inc(sch, skb);
sch->q.qlen++;
return NET_XMIT_SUCCESS;
@@ -286,6 +281,7 @@ static struct sk_buff *dsmark_dequeue(struct Qdisc *sch)
return NULL;
qdisc_bstats_update(sch, skb);
+ qdisc_qstats_backlog_dec(sch, skb);
sch->q.qlen--;
index = skb->tc_index & (p->indices - 1);
@@ -401,6 +397,7 @@ static void dsmark_reset(struct Qdisc *sch)
pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
qdisc_reset(p->q);
+ sch->qstats.backlog = 0;
sch->q.qlen = 0;
}
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
index 109b2322778f..3c6a47d66a04 100644
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@ -662,6 +662,7 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt)
struct fq_sched_data *q = qdisc_priv(sch);
struct nlattr *tb[TCA_FQ_MAX + 1];
int err, drop_count = 0;
+ unsigned drop_len = 0;
u32 fq_log;
if (!opt)
@@ -736,10 +737,11 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt)
if (!skb)
break;
+ drop_len += qdisc_pkt_len(skb);
kfree_skb(skb);
drop_count++;
}
- qdisc_tree_decrease_qlen(sch, drop_count);
+ qdisc_tree_reduce_backlog(sch, drop_count, drop_len);
sch_tree_unlock(sch);
return err;
diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
index 4c834e93dafb..d3fc8f9dd3d4 100644
--- a/net/sched/sch_fq_codel.c
+++ b/net/sched/sch_fq_codel.c
@@ -175,7 +175,7 @@ static unsigned int fq_codel_qdisc_drop(struct Qdisc *sch)
static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
struct fq_codel_sched_data *q = qdisc_priv(sch);
- unsigned int idx;
+ unsigned int idx, prev_backlog;
struct fq_codel_flow *flow;
int uninitialized_var(ret);
@@ -203,6 +203,7 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
if (++sch->q.qlen <= sch->limit)
return NET_XMIT_SUCCESS;
+ prev_backlog = sch->qstats.backlog;
q->drop_overlimit++;
/* Return Congestion Notification only if we dropped a packet
* from this flow.
@@ -211,7 +212,7 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
return NET_XMIT_CN;
/* As we dropped a packet, better let upper stack know this */
- qdisc_tree_decrease_qlen(sch, 1);
+ qdisc_tree_reduce_backlog(sch, 1, prev_backlog - sch->qstats.backlog);
return NET_XMIT_SUCCESS;
}
@@ -241,6 +242,7 @@ static struct sk_buff *fq_codel_dequeue(struct Qdisc *sch)
struct fq_codel_flow *flow;
struct list_head *head;
u32 prev_drop_count, prev_ecn_mark;
+ unsigned int prev_backlog;
begin:
head = &q->new_flows;
@@ -259,6 +261,7 @@ begin:
prev_drop_count = q->cstats.drop_count;
prev_ecn_mark = q->cstats.ecn_mark;
+ prev_backlog = sch->qstats.backlog;
skb = codel_dequeue(sch, &q->cparams, &flow->cvars, &q->cstats,
dequeue);
@@ -276,12 +279,14 @@ begin:
}
qdisc_bstats_update(sch, skb);
flow->deficit -= qdisc_pkt_len(skb);
- /* We cant call qdisc_tree_decrease_qlen() if our qlen is 0,
+ /* We cant call qdisc_tree_reduce_backlog() if our qlen is 0,
* or HTB crashes. Defer it for next round.
*/
if (q->cstats.drop_count && sch->q.qlen) {
- qdisc_tree_decrease_qlen(sch, q->cstats.drop_count);
+ qdisc_tree_reduce_backlog(sch, q->cstats.drop_count,
+ q->cstats.drop_len);
q->cstats.drop_count = 0;
+ q->cstats.drop_len = 0;
}
return skb;
}
@@ -372,11 +377,13 @@ static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt)
while (sch->q.qlen > sch->limit) {
struct sk_buff *skb = fq_codel_dequeue(sch);
+ q->cstats.drop_len += qdisc_pkt_len(skb);
kfree_skb(skb);
q->cstats.drop_count++;
}
- qdisc_tree_decrease_qlen(sch, q->cstats.drop_count);
+ qdisc_tree_reduce_backlog(sch, q->cstats.drop_count, q->cstats.drop_len);
q->cstats.drop_count = 0;
+ q->cstats.drop_len = 0;
sch_tree_unlock(sch);
return 0;
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 16bc83b2842a..aa4725038f94 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -159,12 +159,15 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
if (validate)
skb = validate_xmit_skb_list(skb, dev);
- if (skb) {
+ if (likely(skb)) {
HARD_TX_LOCK(dev, txq, smp_processor_id());
if (!netif_xmit_frozen_or_stopped(txq))
skb = dev_hard_start_xmit(skb, dev, txq, &ret);
HARD_TX_UNLOCK(dev, txq);
+ } else {
+ spin_lock(root_lock);
+ return qdisc_qlen(q);
}
spin_lock(root_lock);
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index b7ebe2c87586..d783d7cc3348 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -895,9 +895,10 @@ static void
hfsc_purge_queue(struct Qdisc *sch, struct hfsc_class *cl)
{
unsigned int len = cl->qdisc->q.qlen;
+ unsigned int backlog = cl->qdisc->qstats.backlog;
qdisc_reset(cl->qdisc);
- qdisc_tree_decrease_qlen(cl->qdisc, len);
+ qdisc_tree_reduce_backlog(cl->qdisc, len, backlog);
}
static void
@@ -1215,11 +1216,7 @@ hfsc_graft_class(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
new = &noop_qdisc;
}
- sch_tree_lock(sch);
- hfsc_purge_queue(sch, cl);
- *old = cl->qdisc;
- cl->qdisc = new;
- sch_tree_unlock(sch);
+ *old = qdisc_replace(sch, new, &cl->qdisc);
return 0;
}
diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c
index 86b04e31e60b..13d6f83ec491 100644
--- a/net/sched/sch_hhf.c
+++ b/net/sched/sch_hhf.c
@@ -382,6 +382,7 @@ static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
struct hhf_sched_data *q = qdisc_priv(sch);
enum wdrr_bucket_idx idx;
struct wdrr_bucket *bucket;
+ unsigned int prev_backlog;
idx = hhf_classify(skb, sch);
@@ -409,6 +410,7 @@ static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
if (++sch->q.qlen <= sch->limit)
return NET_XMIT_SUCCESS;
+ prev_backlog = sch->qstats.backlog;
q->drop_overlimit++;
/* Return Congestion Notification only if we dropped a packet from this
* bucket.
@@ -417,7 +419,7 @@ static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
return NET_XMIT_CN;
/* As we dropped a packet, better let upper stack know this. */
- qdisc_tree_decrease_qlen(sch, 1);
+ qdisc_tree_reduce_backlog(sch, 1, prev_backlog - sch->qstats.backlog);
return NET_XMIT_SUCCESS;
}
@@ -527,7 +529,7 @@ static int hhf_change(struct Qdisc *sch, struct nlattr *opt)
{
struct hhf_sched_data *q = qdisc_priv(sch);
struct nlattr *tb[TCA_HHF_MAX + 1];
- unsigned int qlen;
+ unsigned int qlen, prev_backlog;
int err;
u64 non_hh_quantum;
u32 new_quantum = q->quantum;
@@ -577,12 +579,14 @@ static int hhf_change(struct Qdisc *sch, struct nlattr *opt)
}
qlen = sch->q.qlen;
+ prev_backlog = sch->qstats.backlog;
while (sch->q.qlen > sch->limit) {
struct sk_buff *skb = hhf_dequeue(sch);
kfree_skb(skb);
}
- qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
+ qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen,
+ prev_backlog - sch->qstats.backlog);
sch_tree_unlock(sch);
return 0;
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 15ccd7f8fb2a..87b02ed3d5f2 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -600,6 +600,7 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
htb_activate(q, cl);
}
+ qdisc_qstats_backlog_inc(sch, skb);
sch->q.qlen++;
return NET_XMIT_SUCCESS;
}
@@ -889,6 +890,7 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch)
ok:
qdisc_bstats_update(sch, skb);
qdisc_unthrottled(sch);
+ qdisc_qstats_backlog_dec(sch, skb);
sch->q.qlen--;
return skb;
}
@@ -955,6 +957,7 @@ static unsigned int htb_drop(struct Qdisc *sch)
unsigned int len;
if (cl->un.leaf.q->ops->drop &&
(len = cl->un.leaf.q->ops->drop(cl->un.leaf.q))) {
+ sch->qstats.backlog -= len;
sch->q.qlen--;
if (!cl->un.leaf.q->q.qlen)
htb_deactivate(q, cl);
@@ -984,12 +987,12 @@ static void htb_reset(struct Qdisc *sch)
}
cl->prio_activity = 0;
cl->cmode = HTB_CAN_SEND;
-
}
}
qdisc_watchdog_cancel(&q->watchdog);
__skb_queue_purge(&q->direct_queue);
sch->q.qlen = 0;
+ sch->qstats.backlog = 0;
memset(q->hlevel, 0, sizeof(q->hlevel));
memset(q->row_mask, 0, sizeof(q->row_mask));
for (i = 0; i < TC_HTB_NUMPRIO; i++)
@@ -1163,14 +1166,7 @@ static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
cl->common.classid)) == NULL)
return -ENOBUFS;
- sch_tree_lock(sch);
- *old = cl->un.leaf.q;
- cl->un.leaf.q = new;
- if (*old != NULL) {
- qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
- qdisc_reset(*old);
- }
- sch_tree_unlock(sch);
+ *old = qdisc_replace(sch, new, &cl->un.leaf.q);
return 0;
}
@@ -1272,7 +1268,6 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg)
{
struct htb_sched *q = qdisc_priv(sch);
struct htb_class *cl = (struct htb_class *)arg;
- unsigned int qlen;
struct Qdisc *new_q = NULL;
int last_child = 0;
@@ -1292,9 +1287,11 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg)
sch_tree_lock(sch);
if (!cl->level) {
- qlen = cl->un.leaf.q->q.qlen;
+ unsigned int qlen = cl->un.leaf.q->q.qlen;
+ unsigned int backlog = cl->un.leaf.q->qstats.backlog;
+
qdisc_reset(cl->un.leaf.q);
- qdisc_tree_decrease_qlen(cl->un.leaf.q, qlen);
+ qdisc_tree_reduce_backlog(cl->un.leaf.q, qlen, backlog);
}
/* delete from hash and active; remainder in destroy_class */
@@ -1428,10 +1425,11 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
sch_tree_lock(sch);
if (parent && !parent->level) {
unsigned int qlen = parent->un.leaf.q->q.qlen;
+ unsigned int backlog = parent->un.leaf.q->qstats.backlog;
/* turn parent into inner node */
qdisc_reset(parent->un.leaf.q);
- qdisc_tree_decrease_qlen(parent->un.leaf.q, qlen);
+ qdisc_tree_reduce_backlog(parent->un.leaf.q, qlen, backlog);
qdisc_destroy(parent->un.leaf.q);
if (parent->prio_activity)
htb_deactivate(q, parent);
diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c
index 4e904ca0af9d..bcdd54bb101c 100644
--- a/net/sched/sch_multiq.c
+++ b/net/sched/sch_multiq.c
@@ -218,7 +218,8 @@ static int multiq_tune(struct Qdisc *sch, struct nlattr *opt)
if (q->queues[i] != &noop_qdisc) {
struct Qdisc *child = q->queues[i];
q->queues[i] = &noop_qdisc;
- qdisc_tree_decrease_qlen(child, child->q.qlen);
+ qdisc_tree_reduce_backlog(child, child->q.qlen,
+ child->qstats.backlog);
qdisc_destroy(child);
}
}
@@ -238,8 +239,9 @@ static int multiq_tune(struct Qdisc *sch, struct nlattr *opt)
q->queues[i] = child;
if (old != &noop_qdisc) {
- qdisc_tree_decrease_qlen(old,
- old->q.qlen);
+ qdisc_tree_reduce_backlog(old,
+ old->q.qlen,
+ old->qstats.backlog);
qdisc_destroy(old);
}
sch_tree_unlock(sch);
@@ -303,13 +305,7 @@ static int multiq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
if (new == NULL)
new = &noop_qdisc;
- sch_tree_lock(sch);
- *old = q->queues[band];
- q->queues[band] = new;
- qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
- qdisc_reset(*old);
- sch_tree_unlock(sch);
-
+ *old = qdisc_replace(sch, new, &q->queues[band]);
return 0;
}
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 5abd1d9de989..4befe97a9034 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -395,6 +395,25 @@ static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
sch->q.qlen++;
}
+/* netem can't properly corrupt a megapacket (like we get from GSO), so instead
+ * when we statistically choose to corrupt one, we instead segment it, returning
+ * the first packet to be corrupted, and re-enqueue the remaining frames
+ */
+static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch)
+{
+ struct sk_buff *segs;
+ netdev_features_t features = netif_skb_features(skb);
+
+ segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
+
+ if (IS_ERR_OR_NULL(segs)) {
+ qdisc_reshape_fail(skb, sch);
+ return NULL;
+ }
+ consume_skb(skb);
+ return segs;
+}
+
/*
* Insert one skb into qdisc.
* Note: parent depends on return value to account for queue length.
@@ -407,7 +426,11 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
/* We don't fill cb now as skb_unshare() may invalidate it */
struct netem_skb_cb *cb;
struct sk_buff *skb2;
+ struct sk_buff *segs = NULL;
+ unsigned int len = 0, last_len, prev_len = qdisc_pkt_len(skb);
+ int nb = 0;
int count = 1;
+ int rc = NET_XMIT_SUCCESS;
/* Random duplication */
if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
@@ -453,10 +476,23 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
* do it now in software before we mangle it.
*/
if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
+ if (skb_is_gso(skb)) {
+ segs = netem_segment(skb, sch);
+ if (!segs)
+ return NET_XMIT_DROP;
+ } else {
+ segs = skb;
+ }
+
+ skb = segs;
+ segs = segs->next;
+
if (!(skb = skb_unshare(skb, GFP_ATOMIC)) ||
(skb->ip_summed == CHECKSUM_PARTIAL &&
- skb_checksum_help(skb)))
- return qdisc_drop(skb, sch);
+ skb_checksum_help(skb))) {
+ rc = qdisc_drop(skb, sch);
+ goto finish_segs;
+ }
skb->data[prandom_u32() % skb_headlen(skb)] ^=
1<<(prandom_u32() % 8);
@@ -516,6 +552,27 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
sch->qstats.requeues++;
}
+finish_segs:
+ if (segs) {
+ while (segs) {
+ skb2 = segs->next;
+ segs->next = NULL;
+ qdisc_skb_cb(segs)->pkt_len = segs->len;
+ last_len = segs->len;
+ rc = qdisc_enqueue(segs, sch);
+ if (rc != NET_XMIT_SUCCESS) {
+ if (net_xmit_drop_count(rc))
+ qdisc_qstats_drop(sch);
+ } else {
+ nb++;
+ len += last_len;
+ }
+ segs = skb2;
+ }
+ sch->q.qlen += nb;
+ if (nb > 1)
+ qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len);
+ }
return NET_XMIT_SUCCESS;
}
@@ -598,7 +655,8 @@ deliver:
if (unlikely(err != NET_XMIT_SUCCESS)) {
if (net_xmit_drop_count(err)) {
qdisc_qstats_drop(sch);
- qdisc_tree_decrease_qlen(sch, 1);
+ qdisc_tree_reduce_backlog(sch, 1,
+ qdisc_pkt_len(skb));
}
}
goto tfifo_dequeue;
@@ -1037,15 +1095,7 @@ static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
{
struct netem_sched_data *q = qdisc_priv(sch);
- sch_tree_lock(sch);
- *old = q->qdisc;
- q->qdisc = new;
- if (*old) {
- qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
- qdisc_reset(*old);
- }
- sch_tree_unlock(sch);
-
+ *old = qdisc_replace(sch, new, &q->qdisc);
return 0;
}
diff --git a/net/sched/sch_pie.c b/net/sched/sch_pie.c
index b783a446d884..71ae3b9629f9 100644
--- a/net/sched/sch_pie.c
+++ b/net/sched/sch_pie.c
@@ -183,7 +183,7 @@ static int pie_change(struct Qdisc *sch, struct nlattr *opt)
{
struct pie_sched_data *q = qdisc_priv(sch);
struct nlattr *tb[TCA_PIE_MAX + 1];
- unsigned int qlen;
+ unsigned int qlen, dropped = 0;
int err;
if (!opt)
@@ -232,10 +232,11 @@ static int pie_change(struct Qdisc *sch, struct nlattr *opt)
while (sch->q.qlen > sch->limit) {
struct sk_buff *skb = __skb_dequeue(&sch->q);
+ dropped += qdisc_pkt_len(skb);
qdisc_qstats_backlog_dec(sch, skb);
qdisc_drop(skb, sch);
}
- qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
+ qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped);
sch_tree_unlock(sch);
return 0;
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index 59ef2daf652c..0d4630b155fe 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -205,7 +205,7 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
struct Qdisc *child = q->queues[i];
q->queues[i] = &noop_qdisc;
if (child != &noop_qdisc) {
- qdisc_tree_decrease_qlen(child, child->q.qlen);
+ qdisc_tree_reduce_backlog(child, child->q.qlen, child->qstats.backlog);
qdisc_destroy(child);
}
}
@@ -224,8 +224,9 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
q->queues[i] = child;
if (old != &noop_qdisc) {
- qdisc_tree_decrease_qlen(old,
- old->q.qlen);
+ qdisc_tree_reduce_backlog(old,
+ old->q.qlen,
+ old->qstats.backlog);
qdisc_destroy(old);
}
sch_tree_unlock(sch);
@@ -290,13 +291,7 @@ static int prio_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
if (new == NULL)
new = &noop_qdisc;
- sch_tree_lock(sch);
- *old = q->queues[band];
- q->queues[band] = new;
- qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
- qdisc_reset(*old);
- sch_tree_unlock(sch);
-
+ *old = qdisc_replace(sch, new, &q->queues[band]);
return 0;
}
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
index 3dc3a6e56052..8d2d8d953432 100644
--- a/net/sched/sch_qfq.c
+++ b/net/sched/sch_qfq.c
@@ -220,9 +220,10 @@ static struct qfq_class *qfq_find_class(struct Qdisc *sch, u32 classid)
static void qfq_purge_queue(struct qfq_class *cl)
{
unsigned int len = cl->qdisc->q.qlen;
+ unsigned int backlog = cl->qdisc->qstats.backlog;
qdisc_reset(cl->qdisc);
- qdisc_tree_decrease_qlen(cl->qdisc, len);
+ qdisc_tree_reduce_backlog(cl->qdisc, len, backlog);
}
static const struct nla_policy qfq_policy[TCA_QFQ_MAX + 1] = {
@@ -617,11 +618,7 @@ static int qfq_graft_class(struct Qdisc *sch, unsigned long arg,
new = &noop_qdisc;
}
- sch_tree_lock(sch);
- qfq_purge_queue(cl);
- *old = cl->qdisc;
- cl->qdisc = new;
- sch_tree_unlock(sch);
+ *old = qdisc_replace(sch, new, &cl->qdisc);
return 0;
}
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index 6c0534cc7758..8c0508c0e287 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -210,7 +210,8 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt)
q->flags = ctl->flags;
q->limit = ctl->limit;
if (child) {
- qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen);
+ qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen,
+ q->qdisc->qstats.backlog);
qdisc_destroy(q->qdisc);
q->qdisc = child;
}
@@ -313,12 +314,7 @@ static int red_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
if (new == NULL)
new = &noop_qdisc;
- sch_tree_lock(sch);
- *old = q->qdisc;
- q->qdisc = new;
- qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
- qdisc_reset(*old);
- sch_tree_unlock(sch);
+ *old = qdisc_replace(sch, new, &q->qdisc);
return 0;
}
diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c
index 5bbb6332ec57..c69611640fa5 100644
--- a/net/sched/sch_sfb.c
+++ b/net/sched/sch_sfb.c
@@ -510,7 +510,8 @@ static int sfb_change(struct Qdisc *sch, struct nlattr *opt)
sch_tree_lock(sch);
- qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen);
+ qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen,
+ q->qdisc->qstats.backlog);
qdisc_destroy(q->qdisc);
q->qdisc = child;
@@ -606,12 +607,7 @@ static int sfb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
if (new == NULL)
new = &noop_qdisc;
- sch_tree_lock(sch);
- *old = q->qdisc;
- q->qdisc = new;
- qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
- qdisc_reset(*old);
- sch_tree_unlock(sch);
+ *old = qdisc_replace(sch, new, &q->qdisc);
return 0;
}
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 3abab534eb5c..498f0a2cb47f 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -346,7 +346,7 @@ static int
sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
struct sfq_sched_data *q = qdisc_priv(sch);
- unsigned int hash;
+ unsigned int hash, dropped;
sfq_index x, qlen;
struct sfq_slot *slot;
int uninitialized_var(ret);
@@ -461,7 +461,7 @@ enqueue:
return NET_XMIT_SUCCESS;
qlen = slot->qlen;
- sfq_drop(sch);
+ dropped = sfq_drop(sch);
/* Return Congestion Notification only if we dropped a packet
* from this flow.
*/
@@ -469,7 +469,7 @@ enqueue:
return NET_XMIT_CN;
/* As we dropped a packet, better let upper stack know this */
- qdisc_tree_decrease_qlen(sch, 1);
+ qdisc_tree_reduce_backlog(sch, 1, dropped);
return NET_XMIT_SUCCESS;
}
@@ -537,6 +537,7 @@ static void sfq_rehash(struct Qdisc *sch)
struct sfq_slot *slot;
struct sk_buff_head list;
int dropped = 0;
+ unsigned int drop_len = 0;
__skb_queue_head_init(&list);
@@ -565,6 +566,7 @@ static void sfq_rehash(struct Qdisc *sch)
if (x >= SFQ_MAX_FLOWS) {
drop:
qdisc_qstats_backlog_dec(sch, skb);
+ drop_len += qdisc_pkt_len(skb);
kfree_skb(skb);
dropped++;
continue;
@@ -594,7 +596,7 @@ drop:
}
}
sch->q.qlen -= dropped;
- qdisc_tree_decrease_qlen(sch, dropped);
+ qdisc_tree_reduce_backlog(sch, dropped, drop_len);
}
static void sfq_perturbation(unsigned long arg)
@@ -618,7 +620,7 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
struct sfq_sched_data *q = qdisc_priv(sch);
struct tc_sfq_qopt *ctl = nla_data(opt);
struct tc_sfq_qopt_v1 *ctl_v1 = NULL;
- unsigned int qlen;
+ unsigned int qlen, dropped = 0;
struct red_parms *p = NULL;
if (opt->nla_len < nla_attr_size(sizeof(*ctl)))
@@ -667,8 +669,8 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
qlen = sch->q.qlen;
while (sch->q.qlen > q->limit)
- sfq_drop(sch);
- qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
+ dropped += sfq_drop(sch);
+ qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped);
del_timer(&q->perturb_timer);
if (q->perturb_period) {
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index a4afde14e865..c2fbde742f37 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -160,6 +160,7 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch)
struct tbf_sched_data *q = qdisc_priv(sch);
struct sk_buff *segs, *nskb;
netdev_features_t features = netif_skb_features(skb);
+ unsigned int len = 0, prev_len = qdisc_pkt_len(skb);
int ret, nb;
segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
@@ -172,6 +173,7 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch)
nskb = segs->next;
segs->next = NULL;
qdisc_skb_cb(segs)->pkt_len = segs->len;
+ len += segs->len;
ret = qdisc_enqueue(segs, q->qdisc);
if (ret != NET_XMIT_SUCCESS) {
if (net_xmit_drop_count(ret))
@@ -183,7 +185,7 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch)
}
sch->q.qlen += nb;
if (nb > 1)
- qdisc_tree_decrease_qlen(sch, 1 - nb);
+ qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len);
consume_skb(skb);
return nb > 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP;
}
@@ -399,7 +401,8 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
sch_tree_lock(sch);
if (child) {
- qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen);
+ qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen,
+ q->qdisc->qstats.backlog);
qdisc_destroy(q->qdisc);
q->qdisc = child;
}
@@ -502,13 +505,7 @@ static int tbf_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
if (new == NULL)
new = &noop_qdisc;
- sch_tree_lock(sch);
- *old = q->qdisc;
- q->qdisc = new;
- qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
- qdisc_reset(*old);
- sch_tree_unlock(sch);
-
+ *old = qdisc_replace(sch, new, &q->qdisc);
return 0;
}
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 21e20353178e..63fb5ee212cf 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -1182,14 +1182,14 @@ int sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h)
}
crq->q.reader = 0;
- crq->item = cache_get(h);
crq->buf = buf;
crq->len = 0;
crq->readers = 0;
spin_lock(&queue_lock);
- if (test_bit(CACHE_PENDING, &h->flags))
+ if (test_bit(CACHE_PENDING, &h->flags)) {
+ crq->item = cache_get(h);
list_add_tail(&crq->q.list, &detail->queue);
- else
+ } else
/* Lost a race, no longer PENDING, so don't enqueue */
ret = -EAGAIN;
spin_unlock(&queue_lock);
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index 7fd1220fbfa0..9b5bd6d142dc 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -1794,27 +1794,8 @@ vsock_stream_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
else if (sk->sk_shutdown & RCV_SHUTDOWN)
err = 0;
- if (copied > 0) {
- /* We only do these additional bookkeeping/notification steps
- * if we actually copied something out of the queue pair
- * instead of just peeking ahead.
- */
-
- if (!(flags & MSG_PEEK)) {
- /* If the other side has shutdown for sending and there
- * is nothing more to read, then modify the socket
- * state.
- */
- if (vsk->peer_shutdown & SEND_SHUTDOWN) {
- if (vsock_stream_has_data(vsk) <= 0) {
- sk->sk_state = SS_UNCONNECTED;
- sock_set_flag(sk, SOCK_DONE);
- sk->sk_state_change(sk);
- }
- }
- }
+ if (copied > 0)
err = copied;
- }
out_wait:
finish_wait(sk_sleep(sk), &wait);
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 8fae48e01a43..4d7281df26b6 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -3245,6 +3245,279 @@ static int nl80211_set_mac_acl(struct sk_buff *skb, struct genl_info *info)
return err;
}
+static u32 rateset_to_mask(struct ieee80211_supported_band *sband,
+ u8 *rates, u8 rates_len)
+{
+ u8 i;
+ u32 mask = 0;
+
+ for (i = 0; i < rates_len; i++) {
+ int rate = (rates[i] & 0x7f) * 5;
+ int ridx;
+
+ for (ridx = 0; ridx < sband->n_bitrates; ridx++) {
+ struct ieee80211_rate *srate =
+ &sband->bitrates[ridx];
+ if (rate == srate->bitrate) {
+ mask |= 1 << ridx;
+ break;
+ }
+ }
+ if (ridx == sband->n_bitrates)
+ return 0; /* rate not found */
+ }
+
+ return mask;
+}
+
+static bool ht_rateset_to_mask(struct ieee80211_supported_band *sband,
+ u8 *rates, u8 rates_len,
+ u8 mcs[IEEE80211_HT_MCS_MASK_LEN])
+{
+ u8 i;
+
+ memset(mcs, 0, IEEE80211_HT_MCS_MASK_LEN);
+
+ for (i = 0; i < rates_len; i++) {
+ int ridx, rbit;
+
+ ridx = rates[i] / 8;
+ rbit = BIT(rates[i] % 8);
+
+ /* check validity */
+ if ((ridx < 0) || (ridx >= IEEE80211_HT_MCS_MASK_LEN))
+ return false;
+
+ /* check availability */
+ if (sband->ht_cap.mcs.rx_mask[ridx] & rbit)
+ mcs[ridx] |= rbit;
+ else
+ return false;
+ }
+
+ return true;
+}
+
+static u16 vht_mcs_map_to_mcs_mask(u8 vht_mcs_map)
+{
+ u16 mcs_mask = 0;
+
+ switch (vht_mcs_map) {
+ case IEEE80211_VHT_MCS_NOT_SUPPORTED:
+ break;
+ case IEEE80211_VHT_MCS_SUPPORT_0_7:
+ mcs_mask = 0x00FF;
+ break;
+ case IEEE80211_VHT_MCS_SUPPORT_0_8:
+ mcs_mask = 0x01FF;
+ break;
+ case IEEE80211_VHT_MCS_SUPPORT_0_9:
+ mcs_mask = 0x03FF;
+ break;
+ default:
+ break;
+ }
+
+ return mcs_mask;
+}
+
+static void vht_build_mcs_mask(u16 vht_mcs_map,
+ u16 vht_mcs_mask[NL80211_VHT_NSS_MAX])
+{
+ u8 nss;
+
+ for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++) {
+ vht_mcs_mask[nss] = vht_mcs_map_to_mcs_mask(vht_mcs_map & 0x03);
+ vht_mcs_map >>= 2;
+ }
+}
+
+static bool vht_set_mcs_mask(struct ieee80211_supported_band *sband,
+ struct nl80211_txrate_vht *txrate,
+ u16 mcs[NL80211_VHT_NSS_MAX])
+{
+ u16 tx_mcs_map = le16_to_cpu(sband->vht_cap.vht_mcs.tx_mcs_map);
+ u16 tx_mcs_mask[NL80211_VHT_NSS_MAX] = {};
+ u8 i;
+
+ if (!sband->vht_cap.vht_supported)
+ return false;
+
+ memset(mcs, 0, sizeof(u16) * NL80211_VHT_NSS_MAX);
+
+ /* Build vht_mcs_mask from VHT capabilities */
+ vht_build_mcs_mask(tx_mcs_map, tx_mcs_mask);
+
+ for (i = 0; i < NL80211_VHT_NSS_MAX; i++) {
+ if ((tx_mcs_mask[i] & txrate->mcs[i]) == txrate->mcs[i])
+ mcs[i] = txrate->mcs[i];
+ else
+ return false;
+ }
+
+ return true;
+}
+
+static const struct nla_policy nl80211_txattr_policy[NL80211_TXRATE_MAX + 1] = {
+ [NL80211_TXRATE_LEGACY] = { .type = NLA_BINARY,
+ .len = NL80211_MAX_SUPP_RATES },
+ [NL80211_TXRATE_HT] = { .type = NLA_BINARY,
+ .len = NL80211_MAX_SUPP_HT_RATES },
+ [NL80211_TXRATE_VHT] = { .len = sizeof(struct nl80211_txrate_vht)},
+ [NL80211_TXRATE_GI] = { .type = NLA_U8 },
+};
+
+static int nl80211_parse_tx_bitrate_mask(struct genl_info *info,
+ struct cfg80211_bitrate_mask *mask)
+{
+ struct nlattr *tb[NL80211_TXRATE_MAX + 1];
+ struct cfg80211_registered_device *rdev = info->user_ptr[0];
+ int rem, i;
+ struct nlattr *tx_rates;
+ struct ieee80211_supported_band *sband;
+ u16 vht_tx_mcs_map;
+
+ memset(mask, 0, sizeof(*mask));
+ /* Default to all rates enabled */
+ for (i = 0; i < IEEE80211_NUM_BANDS; i++) {
+ sband = rdev->wiphy.bands[i];
+
+ if (!sband)
+ continue;
+
+ mask->control[i].legacy = (1 << sband->n_bitrates) - 1;
+ memcpy(mask->control[i].ht_mcs,
+ sband->ht_cap.mcs.rx_mask,
+ sizeof(mask->control[i].ht_mcs));
+
+ if (!sband->vht_cap.vht_supported)
+ continue;
+
+ vht_tx_mcs_map = le16_to_cpu(sband->vht_cap.vht_mcs.tx_mcs_map);
+ vht_build_mcs_mask(vht_tx_mcs_map, mask->control[i].vht_mcs);
+ }
+
+ /* if no rates are given set it back to the defaults */
+ if (!info->attrs[NL80211_ATTR_TX_RATES])
+ goto out;
+
+ /* The nested attribute uses enum nl80211_band as the index. This maps
+ * directly to the enum nl80211_band values used in cfg80211.
+ */
+ BUILD_BUG_ON(NL80211_MAX_SUPP_HT_RATES > IEEE80211_HT_MCS_MASK_LEN * 8);
+ nla_for_each_nested(tx_rates, info->attrs[NL80211_ATTR_TX_RATES], rem) {
+ enum ieee80211_band band = nla_type(tx_rates);
+ int err;
+
+ if (band < 0 || band >= IEEE80211_NUM_BANDS)
+ return -EINVAL;
+ sband = rdev->wiphy.bands[band];
+ if (sband == NULL)
+ return -EINVAL;
+ err = nla_parse(tb, NL80211_TXRATE_MAX, nla_data(tx_rates),
+ nla_len(tx_rates), nl80211_txattr_policy);
+ if (err)
+ return err;
+ if (tb[NL80211_TXRATE_LEGACY]) {
+ mask->control[band].legacy = rateset_to_mask(
+ sband,
+ nla_data(tb[NL80211_TXRATE_LEGACY]),
+ nla_len(tb[NL80211_TXRATE_LEGACY]));
+ if ((mask->control[band].legacy == 0) &&
+ nla_len(tb[NL80211_TXRATE_LEGACY]))
+ return -EINVAL;
+ }
+ if (tb[NL80211_TXRATE_HT]) {
+ if (!ht_rateset_to_mask(
+ sband,
+ nla_data(tb[NL80211_TXRATE_HT]),
+ nla_len(tb[NL80211_TXRATE_HT]),
+ mask->control[band].ht_mcs))
+ return -EINVAL;
+ }
+ if (tb[NL80211_TXRATE_VHT]) {
+ if (!vht_set_mcs_mask(
+ sband,
+ nla_data(tb[NL80211_TXRATE_VHT]),
+ mask->control[band].vht_mcs))
+ return -EINVAL;
+ }
+ if (tb[NL80211_TXRATE_GI]) {
+ mask->control[band].gi =
+ nla_get_u8(tb[NL80211_TXRATE_GI]);
+ if (mask->control[band].gi > NL80211_TXRATE_FORCE_LGI)
+ return -EINVAL;
+ }
+
+ if (mask->control[band].legacy == 0) {
+ /* don't allow empty legacy rates if HT or VHT
+ * are not even supported.
+ */
+ if (!(rdev->wiphy.bands[band]->ht_cap.ht_supported ||
+ rdev->wiphy.bands[band]->vht_cap.vht_supported))
+ return -EINVAL;
+
+ for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++)
+ if (mask->control[band].ht_mcs[i])
+ goto out;
+
+ for (i = 0; i < NL80211_VHT_NSS_MAX; i++)
+ if (mask->control[band].vht_mcs[i])
+ goto out;
+
+ /* legacy and mcs rates may not be both empty */
+ return -EINVAL;
+ }
+ }
+
+out:
+ return 0;
+}
+
+static int validate_beacon_tx_rate(struct cfg80211_ap_settings *params)
+{
+ u32 rate, count_ht, count_vht, i;
+ enum nl80211_band band;
+
+ band = params->chandef.chan->band;
+ rate = params->beacon_rate.control[band].legacy;
+
+ /* Allow only one rate */
+ if (hweight32(rate) > 1)
+ return -EINVAL;
+
+ count_ht = 0;
+ for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++) {
+ if (hweight8(params->beacon_rate.control[band].ht_mcs[i]) > 1) {
+ return -EINVAL;
+ } else if (params->beacon_rate.control[band].ht_mcs[i]) {
+ count_ht++;
+ if (count_ht > 1)
+ return -EINVAL;
+ }
+ if (count_ht && rate)
+ return -EINVAL;
+ }
+
+ count_vht = 0;
+ for (i = 0; i < NL80211_VHT_NSS_MAX; i++) {
+ if (hweight16(params->beacon_rate.control[band].vht_mcs[i]) > 1) {
+ return -EINVAL;
+ } else if (params->beacon_rate.control[band].vht_mcs[i]) {
+ count_vht++;
+ if (count_vht > 1)
+ return -EINVAL;
+ }
+ if (count_vht && rate)
+ return -EINVAL;
+ }
+
+ if ((count_ht && count_vht) || (!rate && !count_ht && !count_vht))
+ return -EINVAL;
+
+ return 0;
+}
+
static int nl80211_parse_beacon(struct nlattr *attrs[],
struct cfg80211_beacon_data *bcn)
{
@@ -3474,6 +3747,16 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
wdev->iftype))
return -EINVAL;
+ if (info->attrs[NL80211_ATTR_TX_RATES]) {
+ err = nl80211_parse_tx_bitrate_mask(info, &params.beacon_rate);
+ if (err)
+ return err;
+
+ err = validate_beacon_tx_rate(&params);
+ if (err)
+ return err;
+ }
+
if (info->attrs[NL80211_ATTR_SMPS_MODE]) {
params.smps_mode =
nla_get_u8(info->attrs[NL80211_ATTR_SMPS_MODE]);
@@ -8292,237 +8575,21 @@ static int nl80211_cancel_remain_on_channel(struct sk_buff *skb,
return rdev_cancel_remain_on_channel(rdev, wdev, cookie);
}
-static u32 rateset_to_mask(struct ieee80211_supported_band *sband,
- u8 *rates, u8 rates_len)
-{
- u8 i;
- u32 mask = 0;
-
- for (i = 0; i < rates_len; i++) {
- int rate = (rates[i] & 0x7f) * 5;
- int ridx;
- for (ridx = 0; ridx < sband->n_bitrates; ridx++) {
- struct ieee80211_rate *srate =
- &sband->bitrates[ridx];
- if (rate == srate->bitrate) {
- mask |= 1 << ridx;
- break;
- }
- }
- if (ridx == sband->n_bitrates)
- return 0; /* rate not found */
- }
-
- return mask;
-}
-
-static bool ht_rateset_to_mask(struct ieee80211_supported_band *sband,
- u8 *rates, u8 rates_len,
- u8 mcs[IEEE80211_HT_MCS_MASK_LEN])
-{
- u8 i;
-
- memset(mcs, 0, IEEE80211_HT_MCS_MASK_LEN);
-
- for (i = 0; i < rates_len; i++) {
- int ridx, rbit;
-
- ridx = rates[i] / 8;
- rbit = BIT(rates[i] % 8);
-
- /* check validity */
- if ((ridx < 0) || (ridx >= IEEE80211_HT_MCS_MASK_LEN))
- return false;
-
- /* check availability */
- if (sband->ht_cap.mcs.rx_mask[ridx] & rbit)
- mcs[ridx] |= rbit;
- else
- return false;
- }
-
- return true;
-}
-
-static u16 vht_mcs_map_to_mcs_mask(u8 vht_mcs_map)
-{
- u16 mcs_mask = 0;
-
- switch (vht_mcs_map) {
- case IEEE80211_VHT_MCS_NOT_SUPPORTED:
- break;
- case IEEE80211_VHT_MCS_SUPPORT_0_7:
- mcs_mask = 0x00FF;
- break;
- case IEEE80211_VHT_MCS_SUPPORT_0_8:
- mcs_mask = 0x01FF;
- break;
- case IEEE80211_VHT_MCS_SUPPORT_0_9:
- mcs_mask = 0x03FF;
- break;
- default:
- break;
- }
-
- return mcs_mask;
-}
-
-static void vht_build_mcs_mask(u16 vht_mcs_map,
- u16 vht_mcs_mask[NL80211_VHT_NSS_MAX])
-{
- u8 nss;
-
- for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++) {
- vht_mcs_mask[nss] = vht_mcs_map_to_mcs_mask(vht_mcs_map & 0x03);
- vht_mcs_map >>= 2;
- }
-}
-
-static bool vht_set_mcs_mask(struct ieee80211_supported_band *sband,
- struct nl80211_txrate_vht *txrate,
- u16 mcs[NL80211_VHT_NSS_MAX])
-{
- u16 tx_mcs_map = le16_to_cpu(sband->vht_cap.vht_mcs.tx_mcs_map);
- u16 tx_mcs_mask[NL80211_VHT_NSS_MAX] = {};
- u8 i;
-
- if (!sband->vht_cap.vht_supported)
- return false;
-
- memset(mcs, 0, sizeof(u16) * NL80211_VHT_NSS_MAX);
-
- /* Build vht_mcs_mask from VHT capabilities */
- vht_build_mcs_mask(tx_mcs_map, tx_mcs_mask);
-
- for (i = 0; i < NL80211_VHT_NSS_MAX; i++) {
- if ((tx_mcs_mask[i] & txrate->mcs[i]) == txrate->mcs[i])
- mcs[i] = txrate->mcs[i];
- else
- return false;
- }
-
- return true;
-}
-
-static const struct nla_policy nl80211_txattr_policy[NL80211_TXRATE_MAX + 1] = {
- [NL80211_TXRATE_LEGACY] = { .type = NLA_BINARY,
- .len = NL80211_MAX_SUPP_RATES },
- [NL80211_TXRATE_HT] = { .type = NLA_BINARY,
- .len = NL80211_MAX_SUPP_HT_RATES },
- [NL80211_TXRATE_VHT] = { .len = sizeof(struct nl80211_txrate_vht)},
- [NL80211_TXRATE_GI] = { .type = NLA_U8 },
-};
-
static int nl80211_set_tx_bitrate_mask(struct sk_buff *skb,
struct genl_info *info)
{
- struct nlattr *tb[NL80211_TXRATE_MAX + 1];
- struct cfg80211_registered_device *rdev = info->user_ptr[0];
struct cfg80211_bitrate_mask mask;
- int rem, i;
+ struct cfg80211_registered_device *rdev = info->user_ptr[0];
struct net_device *dev = info->user_ptr[1];
- struct nlattr *tx_rates;
- struct ieee80211_supported_band *sband;
- u16 vht_tx_mcs_map;
+ int err;
if (!rdev->ops->set_bitrate_mask)
return -EOPNOTSUPP;
- memset(&mask, 0, sizeof(mask));
- /* Default to all rates enabled */
- for (i = 0; i < IEEE80211_NUM_BANDS; i++) {
- sband = rdev->wiphy.bands[i];
-
- if (!sband)
- continue;
-
- mask.control[i].legacy = (1 << sband->n_bitrates) - 1;
- memcpy(mask.control[i].ht_mcs,
- sband->ht_cap.mcs.rx_mask,
- sizeof(mask.control[i].ht_mcs));
-
- if (!sband->vht_cap.vht_supported)
- continue;
-
- vht_tx_mcs_map = le16_to_cpu(sband->vht_cap.vht_mcs.tx_mcs_map);
- vht_build_mcs_mask(vht_tx_mcs_map, mask.control[i].vht_mcs);
- }
-
- /* if no rates are given set it back to the defaults */
- if (!info->attrs[NL80211_ATTR_TX_RATES])
- goto out;
-
- /*
- * The nested attribute uses enum nl80211_band as the index. This maps
- * directly to the enum ieee80211_band values used in cfg80211.
- */
- BUILD_BUG_ON(NL80211_MAX_SUPP_HT_RATES > IEEE80211_HT_MCS_MASK_LEN * 8);
- nla_for_each_nested(tx_rates, info->attrs[NL80211_ATTR_TX_RATES], rem) {
- enum ieee80211_band band = nla_type(tx_rates);
- int err;
-
- if (band < 0 || band >= IEEE80211_NUM_BANDS)
- return -EINVAL;
- sband = rdev->wiphy.bands[band];
- if (sband == NULL)
- return -EINVAL;
- err = nla_parse(tb, NL80211_TXRATE_MAX, nla_data(tx_rates),
- nla_len(tx_rates), nl80211_txattr_policy);
- if (err)
- return err;
- if (tb[NL80211_TXRATE_LEGACY]) {
- mask.control[band].legacy = rateset_to_mask(
- sband,
- nla_data(tb[NL80211_TXRATE_LEGACY]),
- nla_len(tb[NL80211_TXRATE_LEGACY]));
- if ((mask.control[band].legacy == 0) &&
- nla_len(tb[NL80211_TXRATE_LEGACY]))
- return -EINVAL;
- }
- if (tb[NL80211_TXRATE_HT]) {
- if (!ht_rateset_to_mask(
- sband,
- nla_data(tb[NL80211_TXRATE_HT]),
- nla_len(tb[NL80211_TXRATE_HT]),
- mask.control[band].ht_mcs))
- return -EINVAL;
- }
- if (tb[NL80211_TXRATE_VHT]) {
- if (!vht_set_mcs_mask(
- sband,
- nla_data(tb[NL80211_TXRATE_VHT]),
- mask.control[band].vht_mcs))
- return -EINVAL;
- }
- if (tb[NL80211_TXRATE_GI]) {
- mask.control[band].gi =
- nla_get_u8(tb[NL80211_TXRATE_GI]);
- if (mask.control[band].gi > NL80211_TXRATE_FORCE_LGI)
- return -EINVAL;
- }
-
- if (mask.control[band].legacy == 0) {
- /* don't allow empty legacy rates if HT or VHT
- * are not even supported.
- */
- if (!(rdev->wiphy.bands[band]->ht_cap.ht_supported ||
- rdev->wiphy.bands[band]->vht_cap.vht_supported))
- return -EINVAL;
-
- for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++)
- if (mask.control[band].ht_mcs[i])
- goto out;
-
- for (i = 0; i < NL80211_VHT_NSS_MAX; i++)
- if (mask.control[band].vht_mcs[i])
- goto out;
-
- /* legacy and mcs rates may not be both empty */
- return -EINVAL;
- }
- }
+ err = nl80211_parse_tx_bitrate_mask(info, &mask);
+ if (err)
+ return err;
-out:
return rdev_set_bitrate_mask(rdev, dev, NULL, &mask);
}
@@ -13252,7 +13319,7 @@ static int nl80211_netlink_notify(struct notifier_block * nb,
struct wireless_dev *wdev;
struct cfg80211_beacon_registration *reg, *tmp;
- if (state != NETLINK_URELEASE)
+ if (state != NETLINK_URELEASE || notify->protocol != NETLINK_GENERIC)
return NOTIFY_DONE;
rcu_read_lock();
diff --git a/net/wireless/util.c b/net/wireless/util.c
index a5b20d75017e..6822b4e57fad 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -1504,9 +1504,7 @@ int cfg80211_validate_beacon_int(struct cfg80211_registered_device *rdev,
}
int cfg80211_iter_combinations(struct wiphy *wiphy,
- const int num_different_channels,
- const u8 radar_detect,
- const int iftype_num[NUM_NL80211_IFTYPES],
+ struct iface_combination_params *params,
void (*iter)(const struct ieee80211_iface_combination *c,
void *data),
void *data)
@@ -1517,7 +1515,7 @@ int cfg80211_iter_combinations(struct wiphy *wiphy,
int num_interfaces = 0;
u32 used_iftypes = 0;
- if (radar_detect) {
+ if (params->radar_detect) {
rcu_read_lock();
regdom = rcu_dereference(cfg80211_regdomain);
if (regdom)
@@ -1526,8 +1524,8 @@ int cfg80211_iter_combinations(struct wiphy *wiphy,
}
for (iftype = 0; iftype < NUM_NL80211_IFTYPES; iftype++) {
- num_interfaces += iftype_num[iftype];
- if (iftype_num[iftype] > 0 &&
+ num_interfaces += params->iftype_num[iftype];
+ if (params->iftype_num[iftype] > 0 &&
!(wiphy->software_iftypes & BIT(iftype)))
used_iftypes |= BIT(iftype);
}
@@ -1541,7 +1539,7 @@ int cfg80211_iter_combinations(struct wiphy *wiphy,
if (num_interfaces > c->max_interfaces)
continue;
- if (num_different_channels > c->num_different_channels)
+ if (params->num_different_channels > c->num_different_channels)
continue;
limits = kmemdup(c->limits, sizeof(limits[0]) * c->n_limits,
@@ -1556,16 +1554,17 @@ int cfg80211_iter_combinations(struct wiphy *wiphy,
all_iftypes |= limits[j].types;
if (!(limits[j].types & BIT(iftype)))
continue;
- if (limits[j].max < iftype_num[iftype])
+ if (limits[j].max < params->iftype_num[iftype])
goto cont;
- limits[j].max -= iftype_num[iftype];
+ limits[j].max -= params->iftype_num[iftype];
}
}
- if (radar_detect != (c->radar_detect_widths & radar_detect))
+ if (params->radar_detect !=
+ (c->radar_detect_widths & params->radar_detect))
goto cont;
- if (radar_detect && c->radar_detect_regions &&
+ if (params->radar_detect && c->radar_detect_regions &&
!(c->radar_detect_regions & BIT(region)))
goto cont;
@@ -1599,14 +1598,11 @@ cfg80211_iter_sum_ifcombs(const struct ieee80211_iface_combination *c,
}
int cfg80211_check_combinations(struct wiphy *wiphy,
- const int num_different_channels,
- const u8 radar_detect,
- const int iftype_num[NUM_NL80211_IFTYPES])
+ struct iface_combination_params *params)
{
int err, num = 0;
- err = cfg80211_iter_combinations(wiphy, num_different_channels,
- radar_detect, iftype_num,
+ err = cfg80211_iter_combinations(wiphy, params,
cfg80211_iter_sum_ifcombs, &num);
if (err)
return err;
@@ -1625,14 +1621,15 @@ int cfg80211_can_use_iftype_chan(struct cfg80211_registered_device *rdev,
u8 radar_detect)
{
struct wireless_dev *wdev_iter;
- int num[NUM_NL80211_IFTYPES];
struct ieee80211_channel
*used_channels[CFG80211_MAX_NUM_DIFFERENT_CHANNELS];
struct ieee80211_channel *ch;
enum cfg80211_chan_mode chmode;
- int num_different_channels = 0;
int total = 1;
int i;
+ struct iface_combination_params params = {
+ .radar_detect = radar_detect,
+ };
ASSERT_RTNL();
@@ -1649,10 +1646,9 @@ int cfg80211_can_use_iftype_chan(struct cfg80211_registered_device *rdev,
return 0;
}
- memset(num, 0, sizeof(num));
memset(used_channels, 0, sizeof(used_channels));
- num[iftype] = 1;
+ params.iftype_num[iftype] = 1;
/* TODO: We'll probably not need this anymore, since this
* should only be called with CHAN_MODE_UNDEFINED. There are
@@ -1665,10 +1661,10 @@ int cfg80211_can_use_iftype_chan(struct cfg80211_registered_device *rdev,
case CHAN_MODE_SHARED:
WARN_ON(!chan);
used_channels[0] = chan;
- num_different_channels++;
+ params.num_different_channels++;
break;
case CHAN_MODE_EXCLUSIVE:
- num_different_channels++;
+ params.num_different_channels++;
break;
}
@@ -1696,7 +1692,8 @@ int cfg80211_can_use_iftype_chan(struct cfg80211_registered_device *rdev,
*/
mutex_lock_nested(&wdev_iter->mtx, 1);
__acquire(wdev_iter->mtx);
- cfg80211_get_chan_state(wdev_iter, &ch, &chmode, &radar_detect);
+ cfg80211_get_chan_state(wdev_iter, &ch, &chmode,
+ &params.radar_detect);
wdev_unlock(wdev_iter);
switch (chmode) {
@@ -1712,23 +1709,22 @@ int cfg80211_can_use_iftype_chan(struct cfg80211_registered_device *rdev,
if (used_channels[i] == NULL) {
used_channels[i] = ch;
- num_different_channels++;
+ params.num_different_channels++;
}
break;
case CHAN_MODE_EXCLUSIVE:
- num_different_channels++;
+ params.num_different_channels++;
break;
}
- num[wdev_iter->iftype]++;
+ params.iftype_num[wdev_iter->iftype]++;
total++;
}
- if (total == 1 && !radar_detect)
+ if (total == 1 && !params.radar_detect)
return 0;
- return cfg80211_check_combinations(&rdev->wiphy, num_different_channels,
- radar_detect, num);
+ return cfg80211_check_combinations(&rdev->wiphy, &params);
}
int ieee80211_get_ratemask(struct ieee80211_supported_band *sband,
diff --git a/net/x25/x25_facilities.c b/net/x25/x25_facilities.c
index 7ecd04c21360..997ff7b2509b 100644
--- a/net/x25/x25_facilities.c
+++ b/net/x25/x25_facilities.c
@@ -277,6 +277,7 @@ int x25_negotiate_facilities(struct sk_buff *skb, struct sock *sk,
memset(&theirs, 0, sizeof(theirs));
memcpy(new, ours, sizeof(*new));
+ memset(dte, 0, sizeof(*dte));
len = x25_parse_facilities(skb, &theirs, dte, &x25->vc_facil_mask);
if (len < 0)
diff --git a/samples/bpf/trace_output_kern.c b/samples/bpf/trace_output_kern.c
index 8d8d1ec429eb..9b96f4fb8cea 100644
--- a/samples/bpf/trace_output_kern.c
+++ b/samples/bpf/trace_output_kern.c
@@ -18,7 +18,6 @@ int bpf_prog1(struct pt_regs *ctx)
u64 cookie;
} data;
- memset(&data, 0, sizeof(data));
data.pid = bpf_get_current_pid_tgid();
data.cookie = 0x12345678;
diff --git a/scripts/kconfig/confdata.c b/scripts/kconfig/confdata.c
index 0b7dc2fd7bac..dd243d2abd87 100644
--- a/scripts/kconfig/confdata.c
+++ b/scripts/kconfig/confdata.c
@@ -267,10 +267,8 @@ int conf_read_simple(const char *name, int def)
if (in)
goto load;
sym_add_change_count(1);
- if (!sym_defconfig_list) {
- sym_calc_value(modules_sym);
+ if (!sym_defconfig_list)
return 1;
- }
for_all_defaults(sym_defconfig_list, prop) {
if (expr_calc_value(prop->visible.expr) == no ||
@@ -403,7 +401,6 @@ setsym:
}
free(line);
fclose(in);
- sym_calc_value(modules_sym);
return 0;
}
@@ -414,8 +411,12 @@ int conf_read(const char *name)
sym_set_change_count(0);
- if (conf_read_simple(name, S_DEF_USER))
+ if (conf_read_simple(name, S_DEF_USER)) {
+ sym_calc_value(modules_sym);
return 1;
+ }
+
+ sym_calc_value(modules_sym);
for_all_symbols(i, sym) {
sym_calc_value(sym);
@@ -846,6 +847,7 @@ static int conf_split_config(void)
name = conf_get_autoconfig_name();
conf_read_simple(name, S_DEF_AUTO);
+ sym_calc_value(modules_sym);
if (chdir("include/config"))
return 1;
diff --git a/scripts/sortextable.c b/scripts/sortextable.c
index c2423d913b46..a2c0d620ca80 100644
--- a/scripts/sortextable.c
+++ b/scripts/sortextable.c
@@ -266,9 +266,9 @@ do_file(char const *const fname)
break;
} /* end switch */
if (memcmp(ELFMAG, ehdr->e_ident, SELFMAG) != 0
- || r2(&ehdr->e_type) != ET_EXEC
+ || (r2(&ehdr->e_type) != ET_EXEC && r2(&ehdr->e_type) != ET_DYN)
|| ehdr->e_ident[EI_VERSION] != EV_CURRENT) {
- fprintf(stderr, "unrecognized ET_EXEC file %s\n", fname);
+ fprintf(stderr, "unrecognized ET_EXEC/ET_DYN file %s\n", fname);
fail_file();
}
@@ -282,12 +282,13 @@ do_file(char const *const fname)
case EM_386:
case EM_X86_64:
case EM_S390:
+ case EM_AARCH64:
+ case EM_PARISC:
custom_sort = sort_relative_table;
break;
case EM_ARCOMPACT:
case EM_ARCV2:
case EM_ARM:
- case EM_AARCH64:
case EM_MICROBLAZE:
case EM_MIPS:
case EM_XTENSA:
@@ -304,7 +305,7 @@ do_file(char const *const fname)
if (r2(&ehdr->e_ehsize) != sizeof(Elf32_Ehdr)
|| r2(&ehdr->e_shentsize) != sizeof(Elf32_Shdr)) {
fprintf(stderr,
- "unrecognized ET_EXEC file: %s\n", fname);
+ "unrecognized ET_EXEC/ET_DYN file: %s\n", fname);
fail_file();
}
do32(ehdr, fname, custom_sort);
@@ -314,7 +315,7 @@ do_file(char const *const fname)
if (r2(&ghdr->e_ehsize) != sizeof(Elf64_Ehdr)
|| r2(&ghdr->e_shentsize) != sizeof(Elf64_Shdr)) {
fprintf(stderr,
- "unrecognized ET_EXEC file: %s\n", fname);
+ "unrecognized ET_EXEC/ET_DYN file: %s\n", fname);
fail_file();
}
do64(ghdr, fname, custom_sort);
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
index 5c4fa8eba1d0..367dbf0d285e 100644
--- a/sound/pci/hda/hda_generic.c
+++ b/sound/pci/hda/hda_generic.c
@@ -843,7 +843,7 @@ static hda_nid_t path_power_update(struct hda_codec *codec,
bool allow_powerdown)
{
hda_nid_t nid, changed = 0;
- int i, state;
+ int i, state, power;
for (i = 0; i < path->depth; i++) {
nid = path->path[i];
@@ -855,7 +855,9 @@ static hda_nid_t path_power_update(struct hda_codec *codec,
state = AC_PWRST_D0;
else
state = AC_PWRST_D3;
- if (!snd_hda_check_power_state(codec, nid, state)) {
+ power = snd_hda_codec_read(codec, nid, 0,
+ AC_VERB_GET_POWER_STATE, 0);
+ if (power != (state | (state << 4))) {
snd_hda_codec_write(codec, nid, 0,
AC_VERB_SET_POWER_STATE, state);
changed = nid;
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 2ff692dd2c5f..411630e9c034 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -2207,6 +2207,9 @@ static const struct pci_device_id azx_ids[] = {
/* Broxton-P(Apollolake) */
{ PCI_DEVICE(0x8086, 0x5a98),
.driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_BROXTON },
+ /* Broxton-T */
+ { PCI_DEVICE(0x8086, 0x1a98),
+ .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_BROXTON },
/* Haswell */
{ PCI_DEVICE(0x8086, 0x0a0c),
.driver_data = AZX_DRIVER_HDMI | AZX_DCAPS_INTEL_HASWELL },
diff --git a/sound/pci/hda/hda_sysfs.c b/sound/pci/hda/hda_sysfs.c
index 64e0d1d81ca5..9739fce9e032 100644
--- a/sound/pci/hda/hda_sysfs.c
+++ b/sound/pci/hda/hda_sysfs.c
@@ -141,14 +141,6 @@ static int reconfig_codec(struct hda_codec *codec)
err = snd_hda_codec_configure(codec);
if (err < 0)
goto error;
- /* rebuild PCMs */
- err = snd_hda_codec_build_pcms(codec);
- if (err < 0)
- goto error;
- /* rebuild mixers */
- err = snd_hda_codec_build_controls(codec);
- if (err < 0)
- goto error;
err = snd_card_register(codec->card);
error:
snd_hda_power_down(codec);
diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
index a47e8ae0eb30..80bbadc83721 100644
--- a/sound/pci/hda/patch_cirrus.c
+++ b/sound/pci/hda/patch_cirrus.c
@@ -361,6 +361,7 @@ static int cs_parse_auto_config(struct hda_codec *codec)
{
struct cs_spec *spec = codec->spec;
int err;
+ int i;
err = snd_hda_parse_pin_defcfg(codec, &spec->gen.autocfg, NULL, 0);
if (err < 0)
@@ -370,6 +371,19 @@ static int cs_parse_auto_config(struct hda_codec *codec)
if (err < 0)
return err;
+ /* keep the ADCs powered up when it's dynamically switchable */
+ if (spec->gen.dyn_adc_switch) {
+ unsigned int done = 0;
+ for (i = 0; i < spec->gen.input_mux.num_items; i++) {
+ int idx = spec->gen.dyn_adc_idx[i];
+ if (done & (1 << idx))
+ continue;
+ snd_hda_gen_fix_pin_power(codec,
+ spec->gen.adc_nids[idx]);
+ done |= 1 << idx;
+ }
+ }
+
return 0;
}
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 1402ba954b3d..4918ffa5ba68 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -5449,6 +5449,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1028, 0x064a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x064b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x0665, "Dell XPS 13", ALC288_FIXUP_DELL_XPS_13),
+ SND_PCI_QUIRK(0x1028, 0x0669, "Dell Optiplex 9020m", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x069a, "Dell Vostro 5480", ALC290_FIXUP_SUBWOOFER_HSJACK),
SND_PCI_QUIRK(0x1028, 0x06c7, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x06d9, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
@@ -5583,6 +5584,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x5034, "Thinkpad T450", ALC292_FIXUP_TPT440_DOCK),
SND_PCI_QUIRK(0x17aa, 0x5036, "Thinkpad T450s", ALC292_FIXUP_TPT440_DOCK),
SND_PCI_QUIRK(0x17aa, 0x503c, "Thinkpad L450", ALC292_FIXUP_TPT440_DOCK),
+ SND_PCI_QUIRK(0x17aa, 0x504a, "ThinkPad X260", ALC292_FIXUP_TPT440_DOCK),
SND_PCI_QUIRK(0x17aa, 0x504b, "Thinkpad", ALC293_FIXUP_LENOVO_SPK_NOISE),
SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
@@ -6424,6 +6426,7 @@ enum {
ALC668_FIXUP_DELL_DISABLE_AAMIX,
ALC668_FIXUP_DELL_XPS13,
ALC662_FIXUP_ASUS_Nx50,
+ ALC668_FIXUP_ASUS_Nx51,
};
static const struct hda_fixup alc662_fixups[] = {
@@ -6670,6 +6673,15 @@ static const struct hda_fixup alc662_fixups[] = {
.chained = true,
.chain_id = ALC662_FIXUP_BASS_1A
},
+ [ALC668_FIXUP_ASUS_Nx51] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ {0x1a, 0x90170151}, /* bass speaker */
+ {}
+ },
+ .chained = true,
+ .chain_id = ALC662_FIXUP_BASS_CHMAP,
+ },
};
static const struct snd_pci_quirk alc662_fixup_tbl[] = {
@@ -6692,11 +6704,14 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
SND_PCI_QUIRK(0x1028, 0x0698, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x069f, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800),
+ SND_PCI_QUIRK(0x1043, 0x1080, "Asus UX501VW", ALC668_FIXUP_HEADSET_MODE),
SND_PCI_QUIRK(0x1043, 0x11cd, "Asus N550", ALC662_FIXUP_ASUS_Nx50),
SND_PCI_QUIRK(0x1043, 0x13df, "Asus N550JX", ALC662_FIXUP_BASS_1A),
SND_PCI_QUIRK(0x1043, 0x129d, "Asus N750", ALC662_FIXUP_ASUS_Nx50),
SND_PCI_QUIRK(0x1043, 0x1477, "ASUS N56VZ", ALC662_FIXUP_BASS_MODE4_CHMAP),
SND_PCI_QUIRK(0x1043, 0x15a7, "ASUS UX51VZH", ALC662_FIXUP_BASS_16),
+ SND_PCI_QUIRK(0x1043, 0x177d, "ASUS N551", ALC668_FIXUP_ASUS_Nx51),
+ SND_PCI_QUIRK(0x1043, 0x17bd, "ASUS N751", ALC668_FIXUP_ASUS_Nx51),
SND_PCI_QUIRK(0x1043, 0x1b73, "ASUS N55SF", ALC662_FIXUP_BASS_16),
SND_PCI_QUIRK(0x1043, 0x1bf3, "ASUS N76VZ", ALC662_FIXUP_BASS_MODE4_CHMAP),
SND_PCI_QUIRK(0x1043, 0x8469, "ASUS mobo", ALC662_FIXUP_NO_JACK_DETECT),
diff --git a/sound/pci/pcxhr/pcxhr_core.c b/sound/pci/pcxhr/pcxhr_core.c
index c5194f5b150a..d7e71f309299 100644
--- a/sound/pci/pcxhr/pcxhr_core.c
+++ b/sound/pci/pcxhr/pcxhr_core.c
@@ -1341,5 +1341,6 @@ irqreturn_t pcxhr_threaded_irq(int irq, void *dev_id)
}
pcxhr_msg_thread(mgr);
+ mutex_unlock(&mgr->lock);
return IRQ_HANDLED;
}
diff --git a/sound/soc/codecs/rt5640.c b/sound/soc/codecs/rt5640.c
index f2beb1aa5763..b1c8bb39cdf1 100644
--- a/sound/soc/codecs/rt5640.c
+++ b/sound/soc/codecs/rt5640.c
@@ -359,7 +359,7 @@ static const DECLARE_TLV_DB_RANGE(bst_tlv,
/* Interface data select */
static const char * const rt5640_data_select[] = {
- "Normal", "left copy to right", "right copy to left", "Swap"};
+ "Normal", "Swap", "left copy to right", "right copy to left"};
static SOC_ENUM_SINGLE_DECL(rt5640_if1_dac_enum, RT5640_DIG_INF_DATA,
RT5640_IF1_DAC_SEL_SFT, rt5640_data_select);
diff --git a/sound/soc/codecs/rt5640.h b/sound/soc/codecs/rt5640.h
index 3deb8babeabb..243f42633989 100644
--- a/sound/soc/codecs/rt5640.h
+++ b/sound/soc/codecs/rt5640.h
@@ -442,39 +442,39 @@
#define RT5640_IF1_DAC_SEL_MASK (0x3 << 14)
#define RT5640_IF1_DAC_SEL_SFT 14
#define RT5640_IF1_DAC_SEL_NOR (0x0 << 14)
-#define RT5640_IF1_DAC_SEL_L2R (0x1 << 14)
-#define RT5640_IF1_DAC_SEL_R2L (0x2 << 14)
-#define RT5640_IF1_DAC_SEL_SWAP (0x3 << 14)
+#define RT5640_IF1_DAC_SEL_SWAP (0x1 << 14)
+#define RT5640_IF1_DAC_SEL_L2R (0x2 << 14)
+#define RT5640_IF1_DAC_SEL_R2L (0x3 << 14)
#define RT5640_IF1_ADC_SEL_MASK (0x3 << 12)
#define RT5640_IF1_ADC_SEL_SFT 12
#define RT5640_IF1_ADC_SEL_NOR (0x0 << 12)
-#define RT5640_IF1_ADC_SEL_L2R (0x1 << 12)
-#define RT5640_IF1_ADC_SEL_R2L (0x2 << 12)
-#define RT5640_IF1_ADC_SEL_SWAP (0x3 << 12)
+#define RT5640_IF1_ADC_SEL_SWAP (0x1 << 12)
+#define RT5640_IF1_ADC_SEL_L2R (0x2 << 12)
+#define RT5640_IF1_ADC_SEL_R2L (0x3 << 12)
#define RT5640_IF2_DAC_SEL_MASK (0x3 << 10)
#define RT5640_IF2_DAC_SEL_SFT 10
#define RT5640_IF2_DAC_SEL_NOR (0x0 << 10)
-#define RT5640_IF2_DAC_SEL_L2R (0x1 << 10)
-#define RT5640_IF2_DAC_SEL_R2L (0x2 << 10)
-#define RT5640_IF2_DAC_SEL_SWAP (0x3 << 10)
+#define RT5640_IF2_DAC_SEL_SWAP (0x1 << 10)
+#define RT5640_IF2_DAC_SEL_L2R (0x2 << 10)
+#define RT5640_IF2_DAC_SEL_R2L (0x3 << 10)
#define RT5640_IF2_ADC_SEL_MASK (0x3 << 8)
#define RT5640_IF2_ADC_SEL_SFT 8
#define RT5640_IF2_ADC_SEL_NOR (0x0 << 8)
-#define RT5640_IF2_ADC_SEL_L2R (0x1 << 8)
-#define RT5640_IF2_ADC_SEL_R2L (0x2 << 8)
-#define RT5640_IF2_ADC_SEL_SWAP (0x3 << 8)
+#define RT5640_IF2_ADC_SEL_SWAP (0x1 << 8)
+#define RT5640_IF2_ADC_SEL_L2R (0x2 << 8)
+#define RT5640_IF2_ADC_SEL_R2L (0x3 << 8)
#define RT5640_IF3_DAC_SEL_MASK (0x3 << 6)
#define RT5640_IF3_DAC_SEL_SFT 6
#define RT5640_IF3_DAC_SEL_NOR (0x0 << 6)
-#define RT5640_IF3_DAC_SEL_L2R (0x1 << 6)
-#define RT5640_IF3_DAC_SEL_R2L (0x2 << 6)
-#define RT5640_IF3_DAC_SEL_SWAP (0x3 << 6)
+#define RT5640_IF3_DAC_SEL_SWAP (0x1 << 6)
+#define RT5640_IF3_DAC_SEL_L2R (0x2 << 6)
+#define RT5640_IF3_DAC_SEL_R2L (0x3 << 6)
#define RT5640_IF3_ADC_SEL_MASK (0x3 << 4)
#define RT5640_IF3_ADC_SEL_SFT 4
#define RT5640_IF3_ADC_SEL_NOR (0x0 << 4)
-#define RT5640_IF3_ADC_SEL_L2R (0x1 << 4)
-#define RT5640_IF3_ADC_SEL_R2L (0x2 << 4)
-#define RT5640_IF3_ADC_SEL_SWAP (0x3 << 4)
+#define RT5640_IF3_ADC_SEL_SWAP (0x1 << 4)
+#define RT5640_IF3_ADC_SEL_L2R (0x2 << 4)
+#define RT5640_IF3_ADC_SEL_R2L (0x3 << 4)
/* REC Left Mixer Control 1 (0x3b) */
#define RT5640_G_HP_L_RM_L_MASK (0x7 << 13)
diff --git a/sound/soc/codecs/ssm4567.c b/sound/soc/codecs/ssm4567.c
index e619d5651b09..080c78e88e10 100644
--- a/sound/soc/codecs/ssm4567.c
+++ b/sound/soc/codecs/ssm4567.c
@@ -352,6 +352,11 @@ static int ssm4567_set_power(struct ssm4567 *ssm4567, bool enable)
regcache_cache_only(ssm4567->regmap, !enable);
if (enable) {
+ ret = regmap_write(ssm4567->regmap, SSM4567_REG_SOFT_RESET,
+ 0x00);
+ if (ret)
+ return ret;
+
ret = regmap_update_bits(ssm4567->regmap,
SSM4567_REG_POWER_CTRL,
SSM4567_POWER_SPWDN, 0x00);
diff --git a/sound/soc/codecs/wcd-dsp-mgr.c b/sound/soc/codecs/wcd-dsp-mgr.c
index d9d413f0a80a..f51301d1ab08 100644
--- a/sound/soc/codecs/wcd-dsp-mgr.c
+++ b/sound/soc/codecs/wcd-dsp-mgr.c
@@ -717,8 +717,8 @@ static void wdsp_ssr_work_fn(struct work_struct *work)
*/
WDSP_CLEAR_STATUS(wdsp, WDSP_STATUS_CODE_DLOADED);
- /* If codec went down, then all components must be re-initialized */
- if (wdsp->ssr_type == WDSP_SSR_TYPE_CDC_DOWN) {
+ /* If codec restarted, then all components must be re-initialized */
+ if (wdsp->ssr_type == WDSP_SSR_TYPE_CDC_UP) {
wdsp_deinit_components(wdsp);
WDSP_CLEAR_STATUS(wdsp, WDSP_STATUS_INITIALIZED);
}
diff --git a/sound/soc/codecs/wcd-mbhc-v2.c b/sound/soc/codecs/wcd-mbhc-v2.c
index 3cbc1e7821cf..a1f23685beb5 100644
--- a/sound/soc/codecs/wcd-mbhc-v2.c
+++ b/sound/soc/codecs/wcd-mbhc-v2.c
@@ -2593,6 +2593,8 @@ void wcd_mbhc_deinit(struct wcd_mbhc *mbhc)
if (mbhc->mbhc_cb && mbhc->mbhc_cb->register_notifier)
mbhc->mbhc_cb->register_notifier(mbhc, &mbhc->nblock, false);
mutex_destroy(&mbhc->codec_resource_lock);
+ mutex_destroy(&mbhc->hphl_pa_lock);
+ mutex_destroy(&mbhc->hphr_pa_lock);
}
EXPORT_SYMBOL(wcd_mbhc_deinit);
diff --git a/sound/soc/codecs/wcd9335.c b/sound/soc/codecs/wcd9335.c
index b2d4e08a55cc..281db1d07f57 100644
--- a/sound/soc/codecs/wcd9335.c
+++ b/sound/soc/codecs/wcd9335.c
@@ -143,6 +143,7 @@ static int cpe_debug_mode;
#define DAPM_MICBIAS3_STANDALONE "MIC BIAS3 Standalone"
#define DAPM_MICBIAS4_STANDALONE "MIC BIAS4 Standalone"
+#define DAPM_LDO_H_STANDALONE "LDO_H"
module_param(cpe_debug_mode, int,
S_IRUGO | S_IWUSR | S_IWGRP);
MODULE_PARM_DESC(cpe_debug_mode, "boot cpe in debug mode");
@@ -5792,6 +5793,7 @@ static int tasha_codec_enable_dec(struct snd_soc_dapm_widget *w,
char *wname;
int ret = 0, amic_n;
u16 tx_vol_ctl_reg, pwr_level_reg = 0, dec_cfg_reg, hpf_gate_reg;
+ u16 tx_gain_ctl_reg;
char *dec;
u8 hpf_cut_off_freq;
struct tasha_priv *tasha = snd_soc_codec_get_drvdata(codec);
@@ -5834,6 +5836,7 @@ static int tasha_codec_enable_dec(struct snd_soc_dapm_widget *w,
tx_vol_ctl_reg = WCD9335_CDC_TX0_TX_PATH_CTL + 16 * decimator;
hpf_gate_reg = WCD9335_CDC_TX0_TX_PATH_SEC2 + 16 * decimator;
dec_cfg_reg = WCD9335_CDC_TX0_TX_PATH_CFG0 + 16 * decimator;
+ tx_gain_ctl_reg = WCD9335_CDC_TX0_TX_VOL_CTL + 16 * decimator;
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
@@ -5896,6 +5899,9 @@ static int tasha_codec_enable_dec(struct snd_soc_dapm_widget *w,
schedule_delayed_work(
&tasha->tx_hpf_work[decimator].dwork,
msecs_to_jiffies(300));
+ /* apply gain after decimator is enabled */
+ snd_soc_write(codec, tx_gain_ctl_reg,
+ snd_soc_read(codec, tx_gain_ctl_reg));
break;
case SND_SOC_DAPM_PRE_PMD:
hpf_cut_off_freq =
@@ -6181,6 +6187,55 @@ static int __tasha_codec_enable_micbias(struct snd_soc_dapm_widget *w,
return 0;
}
+static int tasha_codec_ldo_h_control(struct snd_soc_dapm_widget *w,
+ int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ struct tasha_priv *tasha = snd_soc_codec_get_drvdata(codec);
+
+ if (SND_SOC_DAPM_EVENT_ON(event)) {
+ tasha->ldo_h_users++;
+
+ if (tasha->ldo_h_users == 1)
+ snd_soc_update_bits(codec, WCD9335_LDOH_MODE,
+ 0x80, 0x80);
+ }
+
+ if (SND_SOC_DAPM_EVENT_OFF(event)) {
+ tasha->ldo_h_users--;
+
+ if (tasha->ldo_h_users < 0)
+ tasha->ldo_h_users = 0;
+
+ if (tasha->ldo_h_users == 0)
+ snd_soc_update_bits(codec, WCD9335_LDOH_MODE,
+ 0x80, 0x00);
+ }
+
+ return 0;
+}
+
+static int tasha_codec_force_enable_ldo_h(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol,
+ int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ struct tasha_priv *tasha = snd_soc_codec_get_drvdata(codec);
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ wcd_resmgr_enable_master_bias(tasha->resmgr);
+ tasha_codec_ldo_h_control(w, event);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ tasha_codec_ldo_h_control(w, event);
+ wcd_resmgr_disable_master_bias(tasha->resmgr);
+ break;
+ }
+
+ return 0;
+}
+
static int tasha_codec_force_enable_micbias(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol,
int event)
@@ -6213,6 +6268,29 @@ static int tasha_codec_enable_micbias(struct snd_soc_dapm_widget *w,
return __tasha_codec_enable_micbias(w, event);
}
+static int tasha_codec_enable_standalone_ldo_h(struct snd_soc_codec *codec,
+ bool enable)
+{
+ int rc;
+
+ if (enable)
+ rc = snd_soc_dapm_force_enable_pin(
+ snd_soc_codec_get_dapm(codec),
+ DAPM_LDO_H_STANDALONE);
+ else
+ rc = snd_soc_dapm_disable_pin(
+ snd_soc_codec_get_dapm(codec),
+ DAPM_LDO_H_STANDALONE);
+
+ if (!rc)
+ snd_soc_dapm_sync(snd_soc_codec_get_dapm(codec));
+ else
+ dev_err(codec->dev, "%s: ldo_h force %s pin failed\n",
+ __func__, (enable ? "enable" : "disable"));
+
+ return rc;
+}
+
/*
* tasha_codec_enable_standalone_micbias - enable micbias standalone
* @codec: pointer to codec instance
@@ -7767,6 +7845,34 @@ static const struct soc_enum tasha_conn_mad_enum =
SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(tasha_conn_mad_text),
tasha_conn_mad_text);
+static int tasha_enable_ldo_h_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ u8 val = 0;
+
+ if (codec)
+ val = snd_soc_read(codec, WCD9335_LDOH_MODE) & 0x80;
+
+ ucontrol->value.integer.value[0] = !!val;
+
+ return 0;
+}
+
+static int tasha_enable_ldo_h_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ int value = ucontrol->value.integer.value[0];
+ bool enable;
+
+ enable = !!value;
+ if (codec)
+ tasha_codec_enable_standalone_ldo_h(codec, enable);
+
+ return 0;
+}
+
static int tasha_mad_input_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
@@ -8511,6 +8617,8 @@ static const struct snd_kcontrol_new tasha_snd_controls[] = {
SOC_ENUM_EXT("MAD Input", tasha_conn_mad_enum,
tasha_mad_input_get, tasha_mad_input_put),
+ SOC_SINGLE_EXT("LDO_H Enable", SND_SOC_NOPM, 0, 1, 0,
+ tasha_enable_ldo_h_get, tasha_enable_ldo_h_put),
SOC_SINGLE_EXT("DMIC1_CLK_PIN_MODE", SND_SOC_NOPM, 17, 1, 0,
tasha_pinctl_mode_get, tasha_pinctl_mode_put),
@@ -10265,14 +10373,14 @@ static int tasha_codec_ec_buf_mux_enable(struct snd_soc_dapm_widget *w,
switch (event) {
case SND_SOC_DAPM_POST_PMU:
snd_soc_write(codec, WCD9335_CPE_SS_EC_BUF_INT_PERIOD, 0x3B);
- snd_soc_update_bits(codec, WCD9335_CPE_SS_CFG, 0x68, 0x28);
+ snd_soc_update_bits(codec, WCD9335_CPE_SS_CFG, 0x08, 0x08);
snd_soc_update_bits(codec, WCD9335_CDC_IF_ROUTER_TX_MUX_CFG0,
0x08, 0x08);
break;
case SND_SOC_DAPM_POST_PMD:
snd_soc_update_bits(codec, WCD9335_CDC_IF_ROUTER_TX_MUX_CFG0,
0x08, 0x00);
- snd_soc_update_bits(codec, WCD9335_CPE_SS_CFG, 0x68, 0x40);
+ snd_soc_update_bits(codec, WCD9335_CPE_SS_CFG, 0x08, 0x00);
snd_soc_write(codec, WCD9335_CPE_SS_EC_BUF_INT_PERIOD, 0x00);
break;
}
@@ -10745,6 +10853,9 @@ static const struct snd_soc_dapm_widget tasha_dapm_widgets[] = {
SND_SOC_DAPM_MICBIAS_E(DAPM_MICBIAS4_STANDALONE, SND_SOC_NOPM, 0, 0,
tasha_codec_force_enable_micbias,
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_SUPPLY(DAPM_LDO_H_STANDALONE, SND_SOC_NOPM, 0, 0,
+ tasha_codec_force_enable_ldo_h,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_MUX("ANC0 FB MUX", SND_SOC_NOPM, 0, 0, &anc0_fb_mux),
SND_SOC_DAPM_MUX("ANC1 FB MUX", SND_SOC_NOPM, 0, 0, &anc1_fb_mux),
diff --git a/sound/soc/codecs/wcd934x/wcd934x-dsd.c b/sound/soc/codecs/wcd934x/wcd934x-dsd.c
index 4e3e769585e6..580591a32ba1 100644
--- a/sound/soc/codecs/wcd934x/wcd934x-dsd.c
+++ b/sound/soc/codecs/wcd934x/wcd934x-dsd.c
@@ -619,6 +619,53 @@ static const struct snd_soc_dapm_widget tavil_dsd_widgets[] = {
};
/**
+ * tavil_dsd_post_ssr_init - DSD intialization after subsystem restart
+ *
+ * @codec: pointer to snd_soc_codec
+ *
+ * Returns 0 on success or error on failure
+ */
+int tavil_dsd_post_ssr_init(struct tavil_dsd_config *dsd_conf)
+{
+ struct snd_soc_codec *codec;
+
+ if (!dsd_conf || !dsd_conf->codec)
+ return -EINVAL;
+
+ codec = dsd_conf->codec;
+ /* Disable DSD Interrupts */
+ snd_soc_update_bits(codec, WCD934X_INTR_CODEC_MISC_MASK, 0x08, 0x08);
+
+ /* DSD registers init */
+ if (dsd_conf->version == TAVIL_VERSION_1_0) {
+ snd_soc_update_bits(codec, WCD934X_CDC_DSD0_CFG2, 0x02, 0x00);
+ snd_soc_update_bits(codec, WCD934X_CDC_DSD1_CFG2, 0x02, 0x00);
+ }
+ /* DSD0: Mute EN */
+ snd_soc_update_bits(codec, WCD934X_CDC_DSD0_CFG2, 0x04, 0x04);
+ /* DSD1: Mute EN */
+ snd_soc_update_bits(codec, WCD934X_CDC_DSD1_CFG2, 0x04, 0x04);
+ snd_soc_update_bits(codec, WCD934X_CDC_DEBUG_DSD0_DEBUG_CFG3, 0x10,
+ 0x10);
+ snd_soc_update_bits(codec, WCD934X_CDC_DEBUG_DSD1_DEBUG_CFG3, 0x10,
+ 0x10);
+ snd_soc_update_bits(codec, WCD934X_CDC_DEBUG_DSD0_DEBUG_CFG0, 0x0E,
+ 0x0A);
+ snd_soc_update_bits(codec, WCD934X_CDC_DEBUG_DSD1_DEBUG_CFG0, 0x0E,
+ 0x0A);
+ snd_soc_update_bits(codec, WCD934X_CDC_DEBUG_DSD0_DEBUG_CFG1, 0x07,
+ 0x04);
+ snd_soc_update_bits(codec, WCD934X_CDC_DEBUG_DSD1_DEBUG_CFG1, 0x07,
+ 0x04);
+
+ /* Enable DSD Interrupts */
+ snd_soc_update_bits(codec, WCD934X_INTR_CODEC_MISC_MASK, 0x08, 0x00);
+
+ return 0;
+}
+EXPORT_SYMBOL(tavil_dsd_post_ssr_init);
+
+/**
* tavil_dsd_init - DSD intialization
*
* @codec: pointer to snd_soc_codec
diff --git a/sound/soc/codecs/wcd934x/wcd934x-dsd.h b/sound/soc/codecs/wcd934x/wcd934x-dsd.h
index 21450c90a272..498288335b3b 100644
--- a/sound/soc/codecs/wcd934x/wcd934x-dsd.h
+++ b/sound/soc/codecs/wcd934x/wcd934x-dsd.h
@@ -55,6 +55,7 @@ void tavil_dsd_set_interp_rate(struct tavil_dsd_config *dsd_conf, u16 rx_port,
u32 sample_rate, u8 sample_rate_val);
struct tavil_dsd_config *tavil_dsd_init(struct snd_soc_codec *codec);
void tavil_dsd_deinit(struct tavil_dsd_config *dsd_config);
+int tavil_dsd_post_ssr_init(struct tavil_dsd_config *dsd_config);
#else
int tavil_dsd_set_mixer_value(struct tavil_dsd_config *dsd_conf,
int interp_num, int sw_value)
@@ -88,5 +89,9 @@ struct tavil_dsd_config *tavil_dsd_init(struct snd_soc_codec *codec)
void tavil_dsd_deinit(struct tavil_dsd_config *dsd_config)
{ }
+int tavil_dsd_post_ssr_init(struct tavil_dsd_config *dsd_config)
+{
+ return 0;
+}
#endif
#endif
diff --git a/sound/soc/codecs/wcd934x/wcd934x-dsp-cntl.c b/sound/soc/codecs/wcd934x/wcd934x-dsp-cntl.c
index 8d2247176607..7e4cd6ce55a7 100644
--- a/sound/soc/codecs/wcd934x/wcd934x-dsp-cntl.c
+++ b/sound/soc/codecs/wcd934x/wcd934x-dsp-cntl.c
@@ -46,6 +46,11 @@
mutex_unlock(&lock); \
}
+enum wcd_mem_type {
+ WCD_MEM_TYPE_ALWAYS_ON,
+ WCD_MEM_TYPE_SWITCHABLE,
+};
+
struct wcd_cntl_attribute {
struct attribute attr;
ssize_t (*show)(struct wcd_dsp_cntl *cntl, char *buf);
@@ -469,7 +474,8 @@ static void wcd_cntl_cpar_ctrl(struct wcd_dsp_cntl *cntl,
snd_soc_write(codec, WCD934X_CPE_SS_CPAR_CTL, 0x00);
}
-static int wcd_cntl_enable_memory(struct wcd_dsp_cntl *cntl)
+static int wcd_cntl_enable_memory(struct wcd_dsp_cntl *cntl,
+ enum wcd_mem_type mem_type)
{
struct snd_soc_codec *codec = cntl->codec;
struct wcd9xxx *wcd9xxx = dev_get_drvdata(codec->dev->parent);
@@ -477,77 +483,115 @@ static int wcd_cntl_enable_memory(struct wcd_dsp_cntl *cntl)
u8 status;
int ret = 0;
- snd_soc_update_bits(codec, WCD934X_CPE_SS_SOC_SW_COLLAPSE_CTL,
- 0x04, 0x00);
- snd_soc_update_bits(codec, WCD934X_TEST_DEBUG_MEM_CTRL,
- 0x80, 0x80);
- snd_soc_update_bits(codec, WCD934X_CPE_SS_SOC_SW_COLLAPSE_CTL,
- 0x01, 0x01);
- do {
- loop_cnt++;
- /* Time to enable the power domain for memory */
- usleep_range(100, 150);
- status = snd_soc_read(codec,
- WCD934X_CPE_SS_SOC_SW_COLLAPSE_CTL);
- } while ((status & 0x02) != 0x02 &&
- loop_cnt != WCD_MEM_ENABLE_MAX_RETRIES);
-
- if ((status & 0x02) != 0x02) {
- dev_err(cntl->codec->dev,
- "%s: power domain not enabled, status = 0x%02x\n",
- __func__, status);
- ret = -EIO;
- goto done;
- }
+ switch (mem_type) {
- /* 512KB of always on region */
- wcd9xxx_slim_write_repeat(wcd9xxx,
- WCD934X_CPE_SS_PWR_CPE_SYSMEM_SHUTDOWN_0,
- ARRAY_SIZE(mem_enable_values),
- mem_enable_values);
- wcd9xxx_slim_write_repeat(wcd9xxx,
- WCD934X_CPE_SS_PWR_CPE_SYSMEM_SHUTDOWN_1,
- ARRAY_SIZE(mem_enable_values),
- mem_enable_values);
-
- snd_soc_write(codec, WCD934X_CPE_SS_PWR_CPE_DRAM1_SHUTDOWN, 0x05);
-
- /* Rest of the memory */
- wcd9xxx_slim_write_repeat(wcd9xxx,
- WCD934X_CPE_SS_PWR_CPE_SYSMEM_SHUTDOWN_2,
- ARRAY_SIZE(mem_enable_values),
- mem_enable_values);
- wcd9xxx_slim_write_repeat(wcd9xxx,
- WCD934X_CPE_SS_PWR_CPE_SYSMEM_SHUTDOWN_3,
- ARRAY_SIZE(mem_enable_values),
- mem_enable_values);
+ case WCD_MEM_TYPE_ALWAYS_ON:
+ /* 512KB of always on region */
+ wcd9xxx_slim_write_repeat(wcd9xxx,
+ WCD934X_CPE_SS_PWR_CPE_SYSMEM_SHUTDOWN_0,
+ ARRAY_SIZE(mem_enable_values),
+ mem_enable_values);
+ wcd9xxx_slim_write_repeat(wcd9xxx,
+ WCD934X_CPE_SS_PWR_CPE_SYSMEM_SHUTDOWN_1,
+ ARRAY_SIZE(mem_enable_values),
+ mem_enable_values);
+ break;
+
+ case WCD_MEM_TYPE_SWITCHABLE:
+
+ snd_soc_update_bits(codec, WCD934X_CPE_SS_SOC_SW_COLLAPSE_CTL,
+ 0x04, 0x00);
+ snd_soc_update_bits(codec, WCD934X_TEST_DEBUG_MEM_CTRL,
+ 0x80, 0x80);
+ snd_soc_update_bits(codec, WCD934X_CPE_SS_SOC_SW_COLLAPSE_CTL,
+ 0x01, 0x01);
+ do {
+ loop_cnt++;
+ /* Time to enable the power domain for memory */
+ usleep_range(100, 150);
+ status = snd_soc_read(codec,
+ WCD934X_CPE_SS_SOC_SW_COLLAPSE_CTL);
+ } while ((status & 0x02) != 0x02 &&
+ loop_cnt != WCD_MEM_ENABLE_MAX_RETRIES);
+
+ if ((status & 0x02) != 0x02) {
+ dev_err(cntl->codec->dev,
+ "%s: power domain not enabled, status = 0x%02x\n",
+ __func__, status);
+ ret = -EIO;
+ goto done;
+ }
+
+ /* Rest of the memory */
+ wcd9xxx_slim_write_repeat(wcd9xxx,
+ WCD934X_CPE_SS_PWR_CPE_SYSMEM_SHUTDOWN_2,
+ ARRAY_SIZE(mem_enable_values),
+ mem_enable_values);
+ wcd9xxx_slim_write_repeat(wcd9xxx,
+ WCD934X_CPE_SS_PWR_CPE_SYSMEM_SHUTDOWN_3,
+ ARRAY_SIZE(mem_enable_values),
+ mem_enable_values);
+
+ snd_soc_write(codec, WCD934X_CPE_SS_PWR_CPE_DRAM1_SHUTDOWN,
+ 0x05);
+ break;
+
+ default:
+ dev_err(cntl->codec->dev, "%s: Invalid mem_type %d\n",
+ __func__, mem_type);
+ ret = -EINVAL;
+ break;
+ }
+done:
/* Make sure Deep sleep of memories is enabled for all banks */
snd_soc_write(codec, WCD934X_CPE_SS_PWR_CPE_SYSMEM_DEEPSLP_0, 0xFF);
snd_soc_write(codec, WCD934X_CPE_SS_PWR_CPE_SYSMEM_DEEPSLP_1, 0x0F);
-done:
+
return ret;
}
-static void wcd_cntl_disable_memory(struct wcd_dsp_cntl *cntl)
+static void wcd_cntl_disable_memory(struct wcd_dsp_cntl *cntl,
+ enum wcd_mem_type mem_type)
{
struct snd_soc_codec *codec = cntl->codec;
+ u8 val;
+
+ switch (mem_type) {
+ case WCD_MEM_TYPE_ALWAYS_ON:
+ snd_soc_write(codec, WCD934X_CPE_SS_PWR_CPE_SYSMEM_SHUTDOWN_1,
+ 0xFF);
+ snd_soc_write(codec, WCD934X_CPE_SS_PWR_CPE_SYSMEM_SHUTDOWN_0,
+ 0xFF);
+ break;
+ case WCD_MEM_TYPE_SWITCHABLE:
+ snd_soc_write(codec, WCD934X_CPE_SS_PWR_CPE_SYSMEM_SHUTDOWN_3,
+ 0xFF);
+ snd_soc_write(codec, WCD934X_CPE_SS_PWR_CPE_SYSMEM_SHUTDOWN_2,
+ 0xFF);
+ snd_soc_write(codec, WCD934X_CPE_SS_PWR_CPE_DRAM1_SHUTDOWN,
+ 0x07);
+
+ snd_soc_update_bits(codec, WCD934X_CPE_SS_SOC_SW_COLLAPSE_CTL,
+ 0x01, 0x00);
+ val = snd_soc_read(codec, WCD934X_CPE_SS_SOC_SW_COLLAPSE_CTL);
+ if (val & 0x02)
+ dev_err(codec->dev,
+ "%s: Disable switchable failed, val = 0x%02x",
+ __func__, val);
+
+ snd_soc_update_bits(codec, WCD934X_TEST_DEBUG_MEM_CTRL,
+ 0x80, 0x00);
+ break;
+ default:
+ dev_err(cntl->codec->dev, "%s: Invalid mem_type %d\n",
+ __func__, mem_type);
+ break;
+ }
snd_soc_write(codec, WCD934X_CPE_SS_PWR_CPE_SYSMEM_DEEPSLP_0, 0xFF);
snd_soc_write(codec, WCD934X_CPE_SS_PWR_CPE_SYSMEM_DEEPSLP_1, 0x0F);
- snd_soc_write(codec, WCD934X_CPE_SS_PWR_CPE_SYSMEM_SHUTDOWN_3, 0xFF);
- snd_soc_write(codec, WCD934X_CPE_SS_PWR_CPE_SYSMEM_SHUTDOWN_2, 0xFF);
- snd_soc_write(codec, WCD934X_CPE_SS_PWR_CPE_DRAM1_SHUTDOWN, 0x07);
- snd_soc_write(codec, WCD934X_CPE_SS_PWR_CPE_SYSMEM_SHUTDOWN_1, 0xFF);
- snd_soc_write(codec, WCD934X_CPE_SS_PWR_CPE_SYSMEM_SHUTDOWN_0, 0xFF);
-
- snd_soc_update_bits(codec, WCD934X_CPE_SS_SOC_SW_COLLAPSE_CTL,
- 0x01, 0x00);
- snd_soc_update_bits(codec, WCD934X_TEST_DEBUG_MEM_CTRL,
- 0x80, 0x00);
- snd_soc_update_bits(codec, WCD934X_CPE_SS_SOC_SW_COLLAPSE_CTL,
- 0x04, 0x04);
}
static void wcd_cntl_do_shutdown(struct wcd_dsp_cntl *cntl)
@@ -743,7 +787,9 @@ static int wcd_control_handler(struct device *dev, void *priv_data,
wcd_cntl_cpar_ctrl(cntl, true);
if (event == WDSP_EVENT_PRE_DLOAD_CODE)
- wcd_cntl_enable_memory(cntl);
+ wcd_cntl_enable_memory(cntl, WCD_MEM_TYPE_ALWAYS_ON);
+ else if (event == WDSP_EVENT_PRE_DLOAD_DATA)
+ wcd_cntl_enable_memory(cntl, WCD_MEM_TYPE_SWITCHABLE);
break;
case WDSP_EVENT_DO_BOOT:
@@ -758,6 +804,7 @@ static int wcd_control_handler(struct device *dev, void *priv_data,
case WDSP_EVENT_DO_SHUTDOWN:
wcd_cntl_do_shutdown(cntl);
+ wcd_cntl_disable_memory(cntl, WCD_MEM_TYPE_SWITCHABLE);
break;
default:
@@ -1188,7 +1235,8 @@ void wcd_dsp_cntl_deinit(struct wcd_dsp_cntl **cntl)
* irrespective of DSP was booted up or not.
*/
wcd_cntl_do_shutdown(control);
- wcd_cntl_disable_memory(control);
+ wcd_cntl_disable_memory(control, WCD_MEM_TYPE_SWITCHABLE);
+ wcd_cntl_disable_memory(control, WCD_MEM_TYPE_ALWAYS_ON);
component_del(codec->dev, &wcd_ctrl_component_ops);
diff --git a/sound/soc/codecs/wcd934x/wcd934x-mbhc.c b/sound/soc/codecs/wcd934x/wcd934x-mbhc.c
index b3a30eb10b92..64c33c082b4b 100644
--- a/sound/soc/codecs/wcd934x/wcd934x-mbhc.c
+++ b/sound/soc/codecs/wcd934x/wcd934x-mbhc.c
@@ -870,11 +870,41 @@ static int tavil_get_hph_type(struct snd_kcontrol *kcontrol,
return 0;
}
+static int tavil_hph_impedance_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ uint32_t zl, zr;
+ bool hphr;
+ struct soc_multi_mixer_control *mc;
+ struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ struct wcd934x_mbhc *wcd934x_mbhc = tavil_soc_get_mbhc(codec);
+
+ if (!wcd934x_mbhc) {
+ dev_err(codec->dev, "%s: mbhc not initialized!\n", __func__);
+ return -EINVAL;
+ }
+
+ mc = (struct soc_multi_mixer_control *)(kcontrol->private_value);
+ hphr = mc->shift;
+ wcd_mbhc_get_impedance(&wcd934x_mbhc->wcd_mbhc, &zl, &zr);
+ dev_dbg(codec->dev, "%s: zl=%u(ohms), zr=%u(ohms)\n", __func__, zl, zr);
+ ucontrol->value.integer.value[0] = hphr ? zr : zl;
+
+ return 0;
+}
+
static const struct snd_kcontrol_new hph_type_detect_controls[] = {
SOC_SINGLE_EXT("HPH Type", 0, 0, UINT_MAX, 0,
tavil_get_hph_type, NULL),
};
+static const struct snd_kcontrol_new impedance_detect_controls[] = {
+ SOC_SINGLE_EXT("HPHL Impedance", 0, 0, UINT_MAX, 0,
+ tavil_hph_impedance_get, NULL),
+ SOC_SINGLE_EXT("HPHR Impedance", 0, 1, UINT_MAX, 0,
+ tavil_hph_impedance_get, NULL),
+};
+
/*
* tavil_mbhc_hs_detect: starts mbhc insertion/removal functionality
* @codec: handle to snd_soc_codec *
@@ -912,6 +942,37 @@ void tavil_mbhc_hs_detect_exit(struct snd_soc_codec *codec)
EXPORT_SYMBOL(tavil_mbhc_hs_detect_exit);
/*
+ * tavil_mbhc_post_ssr_init: initialize mbhc for tavil post subsystem restart
+ * @mbhc: poniter to wcd934x_mbhc structure
+ * @codec: handle to snd_soc_codec *
+ *
+ * return 0 if mbhc_init is success or error code in case of failure
+ */
+int tavil_mbhc_post_ssr_init(struct wcd934x_mbhc *mbhc,
+ struct snd_soc_codec *codec)
+{
+ int ret;
+
+ if (!mbhc || !codec)
+ return -EINVAL;
+
+ wcd_mbhc_deinit(&mbhc->wcd_mbhc);
+ ret = wcd_mbhc_init(&mbhc->wcd_mbhc, codec, &mbhc_cb, &intr_ids,
+ wcd_mbhc_registers, TAVIL_ZDET_SUPPORTED);
+ if (ret) {
+ dev_err(codec->dev, "%s: mbhc initialization failed\n",
+ __func__);
+ goto done;
+ }
+ snd_soc_update_bits(codec, WCD934X_MBHC_NEW_CTL_1, 0x04, 0x04);
+ snd_soc_update_bits(codec, WCD934X_MBHC_CTL_BCS, 0x01, 0x01);
+
+done:
+ return ret;
+}
+EXPORT_SYMBOL(tavil_mbhc_post_ssr_init);
+
+/*
* tavil_mbhc_init: initialize mbhc for tavil
* @mbhc: poniter to wcd934x_mbhc struct pointer to store the configs
* @codec: handle to snd_soc_codec *
@@ -954,6 +1015,8 @@ int tavil_mbhc_init(struct wcd934x_mbhc **mbhc, struct snd_soc_codec *codec,
0;
}
+ snd_soc_add_codec_controls(codec, impedance_detect_controls,
+ ARRAY_SIZE(impedance_detect_controls));
snd_soc_add_codec_controls(codec, hph_type_detect_controls,
ARRAY_SIZE(hph_type_detect_controls));
@@ -977,7 +1040,9 @@ void tavil_mbhc_deinit(struct snd_soc_codec *codec)
{
struct wcd934x_mbhc *wcd934x_mbhc = tavil_soc_get_mbhc(codec);
- if (!wcd934x_mbhc)
+ if (wcd934x_mbhc) {
+ wcd_mbhc_deinit(&wcd934x_mbhc->wcd_mbhc);
devm_kfree(codec->dev, wcd934x_mbhc);
+ }
}
EXPORT_SYMBOL(tavil_mbhc_deinit);
diff --git a/sound/soc/codecs/wcd934x/wcd934x-mbhc.h b/sound/soc/codecs/wcd934x/wcd934x-mbhc.h
index 120a7b0f8177..3c88a12194af 100644
--- a/sound/soc/codecs/wcd934x/wcd934x-mbhc.h
+++ b/sound/soc/codecs/wcd934x/wcd934x-mbhc.h
@@ -42,6 +42,8 @@ extern void tavil_mbhc_hs_detect_exit(struct snd_soc_codec *codec);
extern int tavil_mbhc_hs_detect(struct snd_soc_codec *codec,
struct wcd_mbhc_config *mbhc_cfg);
extern void tavil_mbhc_deinit(struct snd_soc_codec *codec);
+extern int tavil_mbhc_post_ssr_init(struct wcd934x_mbhc *mbhc,
+ struct snd_soc_codec *codec);
#endif /* __WCD934X_MBHC_H__ */
diff --git a/sound/soc/codecs/wcd934x/wcd934x.c b/sound/soc/codecs/wcd934x/wcd934x.c
index 30c23df444d6..5d6119c6d86a 100644
--- a/sound/soc/codecs/wcd934x/wcd934x.c
+++ b/sound/soc/codecs/wcd934x/wcd934x.c
@@ -260,6 +260,11 @@ static const struct intr_data wcd934x_intr_table[] = {
{WCD934X_IRQ_VBAT_RESTORE, false},
};
+struct tavil_cpr_reg_defaults {
+ int wr_data;
+ int wr_addr;
+};
+
struct interp_sample_rate {
int sample_rate;
int rate_val;
@@ -652,6 +657,8 @@ static const struct tavil_reg_mask_val tavil_spkr_mode1[] = {
{WCD934X_CDC_BOOST1_BOOST_CTL, 0x7C, 0x44},
};
+static int __tavil_enable_efuse_sensing(struct tavil_priv *tavil);
+
/*
* wcd934x_get_codec_info: Get codec specific information
*
@@ -3602,7 +3609,7 @@ static void tavil_tx_hpf_corner_freq_callback(struct work_struct *work)
struct hpf_work *hpf_work;
struct tavil_priv *tavil;
struct snd_soc_codec *codec;
- u16 dec_cfg_reg, amic_reg;
+ u16 dec_cfg_reg, amic_reg, go_bit_reg;
u8 hpf_cut_off_freq;
int amic_n;
@@ -3613,6 +3620,7 @@ static void tavil_tx_hpf_corner_freq_callback(struct work_struct *work)
hpf_cut_off_freq = hpf_work->hpf_cut_off_freq;
dec_cfg_reg = WCD934X_CDC_TX0_TX_PATH_CFG0 + 16 * hpf_work->decimator;
+ go_bit_reg = dec_cfg_reg + 7;
dev_dbg(codec->dev, "%s: decimator %u hpf_cut_of_freq 0x%x\n",
__func__, hpf_work->decimator, hpf_cut_off_freq);
@@ -3624,6 +3632,10 @@ static void tavil_tx_hpf_corner_freq_callback(struct work_struct *work)
}
snd_soc_update_bits(codec, dec_cfg_reg, TX_HPF_CUT_OFF_FREQ_MASK,
hpf_cut_off_freq << 5);
+ snd_soc_update_bits(codec, go_bit_reg, 0x02, 0x02);
+ /* Minimum 1 clk cycle delay is required as per HW spec */
+ usleep_range(1000, 1010);
+ snd_soc_update_bits(codec, go_bit_reg, 0x02, 0x00);
}
static void tavil_tx_mute_update_callback(struct work_struct *work)
@@ -3643,7 +3655,6 @@ static void tavil_tx_mute_update_callback(struct work_struct *work)
16 * tx_mute_dwork->decimator;
hpf_gate_reg = WCD934X_CDC_TX0_TX_PATH_SEC2 +
16 * tx_mute_dwork->decimator;
- snd_soc_update_bits(codec, hpf_gate_reg, 0x01, 0x01);
snd_soc_update_bits(codec, tx_vol_ctl_reg, 0x10, 0x00);
}
@@ -3658,6 +3669,7 @@ static int tavil_codec_enable_dec(struct snd_soc_dapm_widget *w,
char *wname;
int ret = 0, amic_n;
u16 tx_vol_ctl_reg, pwr_level_reg = 0, dec_cfg_reg, hpf_gate_reg;
+ u16 tx_gain_ctl_reg;
char *dec;
u8 hpf_cut_off_freq;
@@ -3699,6 +3711,7 @@ static int tavil_codec_enable_dec(struct snd_soc_dapm_widget *w,
tx_vol_ctl_reg = WCD934X_CDC_TX0_TX_PATH_CTL + 16 * decimator;
hpf_gate_reg = WCD934X_CDC_TX0_TX_PATH_SEC2 + 16 * decimator;
dec_cfg_reg = WCD934X_CDC_TX0_TX_PATH_CFG0 + 16 * decimator;
+ tx_gain_ctl_reg = WCD934X_CDC_TX0_TX_VOL_CTL + 16 * decimator;
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
@@ -3730,20 +3743,27 @@ static int tavil_codec_enable_dec(struct snd_soc_dapm_widget *w,
break;
}
}
+ /* Enable TX PGA Mute */
+ snd_soc_update_bits(codec, tx_vol_ctl_reg, 0x10, 0x10);
+ break;
+ case SND_SOC_DAPM_POST_PMU:
hpf_cut_off_freq = (snd_soc_read(codec, dec_cfg_reg) &
TX_HPF_CUT_OFF_FREQ_MASK) >> 5;
tavil->tx_hpf_work[decimator].hpf_cut_off_freq =
hpf_cut_off_freq;
- if (hpf_cut_off_freq != CF_MIN_3DB_150HZ)
+ if (hpf_cut_off_freq != CF_MIN_3DB_150HZ) {
snd_soc_update_bits(codec, dec_cfg_reg,
TX_HPF_CUT_OFF_FREQ_MASK,
CF_MIN_3DB_150HZ << 5);
- /* Enable TX PGA Mute */
- snd_soc_update_bits(codec, tx_vol_ctl_reg, 0x10, 0x10);
- break;
- case SND_SOC_DAPM_POST_PMU:
- snd_soc_update_bits(codec, hpf_gate_reg, 0x01, 0x00);
+ snd_soc_update_bits(codec, hpf_gate_reg, 0x02, 0x02);
+ /*
+ * Minimum 1 clk cycle delay is required as per
+ * HW spec.
+ */
+ usleep_range(1000, 1010);
+ snd_soc_update_bits(codec, hpf_gate_reg, 0x02, 0x00);
+ }
/* schedule work queue to Remove Mute */
schedule_delayed_work(&tavil->tx_mute_dwork[decimator].dwork,
msecs_to_jiffies(tx_unmute_delay));
@@ -3752,7 +3772,9 @@ static int tavil_codec_enable_dec(struct snd_soc_dapm_widget *w,
schedule_delayed_work(
&tavil->tx_hpf_work[decimator].dwork,
msecs_to_jiffies(300));
-
+ /* apply gain after decimator is enabled */
+ snd_soc_write(codec, tx_gain_ctl_reg,
+ snd_soc_read(codec, tx_gain_ctl_reg));
break;
case SND_SOC_DAPM_PRE_PMD:
hpf_cut_off_freq =
@@ -3760,10 +3782,20 @@ static int tavil_codec_enable_dec(struct snd_soc_dapm_widget *w,
snd_soc_update_bits(codec, tx_vol_ctl_reg, 0x10, 0x10);
if (cancel_delayed_work_sync(
&tavil->tx_hpf_work[decimator].dwork)) {
- if (hpf_cut_off_freq != CF_MIN_3DB_150HZ)
+ if (hpf_cut_off_freq != CF_MIN_3DB_150HZ) {
snd_soc_update_bits(codec, dec_cfg_reg,
TX_HPF_CUT_OFF_FREQ_MASK,
hpf_cut_off_freq << 5);
+ snd_soc_update_bits(codec, hpf_gate_reg,
+ 0x02, 0x02);
+ /*
+ * Minimum 1 clk cycle delay is required as per
+ * HW spec.
+ */
+ usleep_range(1000, 1010);
+ snd_soc_update_bits(codec, hpf_gate_reg,
+ 0x02, 0x00);
+ }
}
cancel_delayed_work_sync(
&tavil->tx_mute_dwork[decimator].dwork);
@@ -4129,7 +4161,8 @@ int tavil_micbias_control(struct snd_soc_codec *codec,
snd_soc_update_bits(codec, micb_reg, 0xC0, 0x80);
break;
case MICB_PULLUP_DISABLE:
- tavil->pullup_ref[micb_index]--;
+ if (tavil->pullup_ref[micb_index] > 0)
+ tavil->pullup_ref[micb_index]--;
if ((tavil->pullup_ref[micb_index] == 0) &&
(tavil->micb_ref[micb_index] == 0))
snd_soc_update_bits(codec, micb_reg, 0xC0, 0x00);
@@ -4149,7 +4182,8 @@ int tavil_micbias_control(struct snd_soc_codec *codec,
post_dapm_on, &tavil->mbhc->wcd_mbhc);
break;
case MICB_DISABLE:
- tavil->micb_ref[micb_index]--;
+ if (tavil->pullup_ref[micb_index] > 0)
+ tavil->micb_ref[micb_index]--;
if ((tavil->micb_ref[micb_index] == 0) &&
(tavil->pullup_ref[micb_index] > 0))
snd_soc_update_bits(codec, micb_reg, 0xC0, 0x80);
@@ -8005,6 +8039,7 @@ static const struct tavil_reg_mask_val tavil_codec_reg_defaults[] = {
{WCD934X_HPH_OCP_CTL, 0xFF, 0x3A}, /* OCP current limit */
{WCD934X_HPH_L_TEST, 0x01, 0x01},
{WCD934X_HPH_R_TEST, 0x01, 0x01},
+ {WCD934X_CPE_FLL_CONFIG_CTL_2, 0xFF, 0x20},
};
static const struct tavil_reg_mask_val tavil_codec_reg_init_1_1_val[] = {
@@ -8014,6 +8049,25 @@ static const struct tavil_reg_mask_val tavil_codec_reg_init_1_1_val[] = {
{WCD934X_HPH_NEW_INT_RDAC_HD2_CTL_R, 0xFF, 0x84},
};
+static const struct tavil_cpr_reg_defaults cpr_defaults[] = {
+ { 0x00000820, 0x00000094 },
+ { 0x00000fC0, 0x00000048 },
+ { 0x0000f000, 0x00000044 },
+ { 0x0000bb80, 0xC0000178 },
+ { 0x00000000, 0x00000160 },
+ { 0x10854522, 0x00000060 },
+ { 0x10854509, 0x00000064 },
+ { 0x108544dd, 0x00000068 },
+ { 0x108544ad, 0x0000006C },
+ { 0x0000077E, 0x00000070 },
+ { 0x000007da, 0x00000074 },
+ { 0x00000000, 0x00000078 },
+ { 0x00000000, 0x0000007C },
+ { 0x00042029, 0x00000080 },
+ { 0x4002002A, 0x00000090 },
+ { 0x4002002B, 0x00000090 },
+};
+
static const struct tavil_reg_mask_val tavil_codec_reg_init_common_val[] = {
{WCD934X_CDC_CLSH_K2_MSB, 0x0F, 0x00},
{WCD934X_CDC_CLSH_K2_LSB, 0xFF, 0x60},
@@ -8073,6 +8127,33 @@ static void tavil_update_reg_defaults(struct tavil_priv *tavil)
tavil_codec_reg_defaults[i].val);
}
+static void tavil_update_cpr_defaults(struct tavil_priv *tavil)
+{
+ int i;
+ struct wcd9xxx *wcd9xxx;
+
+ wcd9xxx = tavil->wcd9xxx;
+ if (!TAVIL_IS_1_1(wcd9xxx))
+ return;
+
+ __tavil_cdc_mclk_enable(tavil, true);
+
+ regmap_write(wcd9xxx->regmap, WCD934X_CODEC_CPR_SVS2_MIN_CX_VDD, 0x2C);
+ regmap_update_bits(wcd9xxx->regmap, WCD934X_CODEC_RPM_CLK_GATE,
+ 0x10, 0x00);
+
+ for (i = 0; i < ARRAY_SIZE(cpr_defaults); i++) {
+ regmap_bulk_write(wcd9xxx->regmap,
+ WCD934X_CODEC_CPR_WR_DATA_0,
+ (u8 *)&cpr_defaults[i].wr_data, 4);
+ regmap_bulk_write(wcd9xxx->regmap,
+ WCD934X_CODEC_CPR_WR_ADDR_0,
+ (u8 *)&cpr_defaults[i].wr_addr, 4);
+ }
+
+ __tavil_cdc_mclk_enable(tavil, false);
+}
+
static void tavil_slim_interface_init_reg(struct snd_soc_codec *codec)
{
int i;
@@ -8261,6 +8342,7 @@ static void tavil_cleanup_irqs(struct tavil_priv *tavil)
&wcd9xxx->core_res;
wcd9xxx_free_irq(core_res, WCD9XXX_IRQ_SLIMBUS, tavil);
+ wcd9xxx_free_irq(core_res, WCD934X_IRQ_MISC, tavil);
}
/*
@@ -8512,6 +8594,133 @@ static void tavil_mclk2_reg_defaults(struct tavil_priv *tavil)
}
}
+static int tavil_device_down(struct wcd9xxx *wcd9xxx)
+{
+ struct snd_soc_codec *codec;
+ struct tavil_priv *priv;
+ int count;
+
+ codec = (struct snd_soc_codec *)(wcd9xxx->ssr_priv);
+ priv = snd_soc_codec_get_drvdata(codec);
+ swrm_wcd_notify(priv->swr.ctrl_data[0].swr_pdev,
+ SWR_DEVICE_DOWN, NULL);
+ tavil_dsd_reset(priv->dsd_config);
+ snd_soc_card_change_online_state(codec->component.card, 0);
+ for (count = 0; count < NUM_CODEC_DAIS; count++)
+ priv->dai[count].bus_down_in_recovery = true;
+ wcd_dsp_ssr_event(priv->wdsp_cntl, WCD_CDC_DOWN_EVENT);
+ priv->resmgr->sido_input_src = SIDO_SOURCE_INTERNAL;
+
+ return 0;
+}
+
+static int tavil_post_reset_cb(struct wcd9xxx *wcd9xxx)
+{
+ int i, ret = 0;
+ struct wcd9xxx *control;
+ struct snd_soc_codec *codec;
+ struct tavil_priv *tavil;
+ struct wcd9xxx_pdata *pdata;
+ struct wcd_mbhc *mbhc;
+
+ codec = (struct snd_soc_codec *)(wcd9xxx->ssr_priv);
+ tavil = snd_soc_codec_get_drvdata(codec);
+ control = dev_get_drvdata(codec->dev->parent);
+
+ wcd9xxx_set_power_state(tavil->wcd9xxx,
+ WCD_REGION_POWER_COLLAPSE_REMOVE,
+ WCD9XXX_DIG_CORE_REGION_1);
+
+ mutex_lock(&tavil->codec_mutex);
+ /*
+ * Codec hardware by default comes up in SVS mode.
+ * Initialize the svs_ref_cnt to 1 to reflect the hardware
+ * state in the driver.
+ */
+ tavil->svs_ref_cnt = 1;
+
+ tavil_slimbus_slave_port_cfg.slave_dev_intfdev_la =
+ control->slim_slave->laddr;
+ tavil_slimbus_slave_port_cfg.slave_dev_pgd_la =
+ control->slim->laddr;
+ tavil_init_slim_slave_cfg(codec);
+ snd_soc_card_change_online_state(codec->component.card, 1);
+
+ /* Class-H Init */
+ wcd_clsh_init(&tavil->clsh_d);
+ /* Default HPH Mode to Class-H LOHiFi */
+ tavil->hph_mode = CLS_H_LOHIFI;
+
+ for (i = 0; i < TAVIL_MAX_MICBIAS; i++)
+ tavil->micb_ref[i] = 0;
+
+ for (i = 0; i < COMPANDER_MAX; i++)
+ tavil->comp_enabled[i] = 0;
+
+ dev_dbg(codec->dev, "%s: MCLK Rate = %x\n",
+ __func__, control->mclk_rate);
+
+ if (control->mclk_rate == WCD934X_MCLK_CLK_12P288MHZ)
+ snd_soc_update_bits(codec, WCD934X_CODEC_RPM_CLK_MCLK_CFG,
+ 0x03, 0x00);
+ else if (control->mclk_rate == WCD934X_MCLK_CLK_9P6MHZ)
+ snd_soc_update_bits(codec, WCD934X_CODEC_RPM_CLK_MCLK_CFG,
+ 0x03, 0x01);
+ wcd_resmgr_post_ssr_v2(tavil->resmgr);
+ tavil_update_reg_defaults(tavil);
+ tavil_codec_init_reg(tavil);
+ __tavil_enable_efuse_sensing(tavil);
+ tavil_mclk2_reg_defaults(tavil);
+
+ __tavil_cdc_mclk_enable(tavil, true);
+ regcache_mark_dirty(codec->component.regmap);
+ regcache_sync(codec->component.regmap);
+ __tavil_cdc_mclk_enable(tavil, false);
+
+ tavil_update_cpr_defaults(tavil);
+
+ pdata = dev_get_platdata(codec->dev->parent);
+ ret = tavil_handle_pdata(tavil, pdata);
+ if (IS_ERR_VALUE(ret))
+ dev_err(codec->dev, "%s: invalid pdata\n", __func__);
+
+ /* Initialize MBHC module */
+ mbhc = &tavil->mbhc->wcd_mbhc;
+ ret = tavil_mbhc_post_ssr_init(tavil->mbhc, codec);
+ if (ret) {
+ dev_err(codec->dev, "%s: mbhc initialization failed\n",
+ __func__);
+ goto done;
+ } else {
+ tavil_mbhc_hs_detect(codec, mbhc->mbhc_cfg);
+ }
+
+ /* DSD initialization */
+ ret = tavil_dsd_post_ssr_init(tavil->dsd_config);
+ if (ret)
+ dev_dbg(tavil->dev, "%s: DSD init failed\n", __func__);
+
+ tavil_cleanup_irqs(tavil);
+ ret = tavil_setup_irqs(tavil);
+ if (ret) {
+ dev_err(codec->dev, "%s: tavil irq setup failed %d\n",
+ __func__, ret);
+ goto done;
+ }
+
+ tavil_set_spkr_mode(codec, tavil->swr.spkr_mode);
+ /*
+ * Once the codec initialization is completed, the svs vote
+ * can be released allowing the codec to go to SVS2.
+ */
+ tavil_vote_svs(tavil, false);
+ wcd_dsp_ssr_event(tavil->wdsp_cntl, WCD_CDC_UP_EVENT);
+
+done:
+ mutex_unlock(&tavil->codec_mutex);
+ return ret;
+}
+
static int tavil_soc_codec_probe(struct snd_soc_codec *codec)
{
struct wcd9xxx *control;
@@ -8527,8 +8736,12 @@ static int tavil_soc_codec_probe(struct snd_soc_codec *codec)
tavil = snd_soc_codec_get_drvdata(codec);
tavil->intf_type = wcd9xxx_get_intf_type();
+ control->dev_down = tavil_device_down;
+ control->post_reset = tavil_post_reset_cb;
+ control->ssr_priv = (void *)codec;
+
/* Resource Manager post Init */
- ret = wcd_resmgr_post_init(tavil->resmgr, NULL, codec);
+ ret = wcd_resmgr_post_init(tavil->resmgr, &tavil_resmgr_cb, codec);
if (ret) {
dev_err(codec->dev, "%s: wcd resmgr post init failed\n",
__func__);
@@ -9139,6 +9352,7 @@ err_mem:
static int __tavil_enable_efuse_sensing(struct tavil_priv *tavil)
{
int val, rc;
+ struct snd_soc_codec *codec;
__tavil_cdc_mclk_enable(tavil, true);
@@ -9155,8 +9369,17 @@ static int __tavil_enable_efuse_sensing(struct tavil_priv *tavil)
rc = regmap_read(tavil->wcd9xxx->regmap,
WCD934X_CHIP_TIER_CTRL_EFUSE_STATUS, &val);
if (rc || (!(val & 0x01)))
- WARN(1, "%s: Efuse sense is not complete\n", __func__);
+ WARN(1, "%s: Efuse sense is not complete val=%x, ret=%d\n",
+ __func__, val, rc);
+
+ codec = tavil->codec;
+ if (!codec) {
+ pr_debug("%s: codec is not yet registered\n", __func__);
+ goto done;
+ }
+ tavil_enable_sido_buck(codec);
+done:
__tavil_cdc_mclk_enable(tavil, false);
return rc;
@@ -9314,6 +9537,7 @@ static int tavil_probe(struct platform_device *pdev)
tavil_update_reg_defaults(tavil);
__tavil_enable_efuse_sensing(tavil);
___tavil_get_codec_fine_version(tavil);
+ tavil_update_cpr_defaults(tavil);
/* Register with soc framework */
ret = snd_soc_register_codec(&pdev->dev, &soc_codec_dev_tavil,
diff --git a/sound/soc/msm/msmcobalt.c b/sound/soc/msm/msmcobalt.c
index 524f737360d8..05a0e27cb45a 100644
--- a/sound/soc/msm/msmcobalt.c
+++ b/sound/soc/msm/msmcobalt.c
@@ -511,6 +511,13 @@ static struct wcd_mbhc_config wcd_mbhc_cfg = {
.moisture_en = true,
};
+static struct snd_soc_dapm_route wcd_audio_paths_tasha[] = {
+ {"MIC BIAS1", NULL, "MCLK TX"},
+ {"MIC BIAS2", NULL, "MCLK TX"},
+ {"MIC BIAS3", NULL, "MCLK TX"},
+ {"MIC BIAS4", NULL, "MCLK TX"},
+};
+
static struct snd_soc_dapm_route wcd_audio_paths[] = {
{"MIC BIAS1", NULL, "MCLK"},
{"MIC BIAS2", NULL, "MCLK"},
@@ -2362,6 +2369,42 @@ static const struct snd_kcontrol_new msm_snd_controls[] = {
SOC_ENUM_EXT("Display Port RX SampleRate", ext_disp_rx_sample_rate,
ext_disp_rx_sample_rate_get,
ext_disp_rx_sample_rate_put),
+ SOC_ENUM_EXT("PRI_TDM_RX_0 SampleRate", tdm_rx_sample_rate,
+ tdm_rx_sample_rate_get,
+ tdm_rx_sample_rate_put),
+ SOC_ENUM_EXT("PRI_TDM_TX_0 SampleRate", tdm_tx_sample_rate,
+ tdm_tx_sample_rate_get,
+ tdm_tx_sample_rate_put),
+ SOC_ENUM_EXT("PRI_TDM_RX_0 Format", tdm_rx_format,
+ tdm_rx_format_get,
+ tdm_rx_format_put),
+ SOC_ENUM_EXT("PRI_TDM_TX_0 Format", tdm_tx_format,
+ tdm_tx_format_get,
+ tdm_tx_format_put),
+ SOC_ENUM_EXT("PRI_TDM_RX_0 Channels", tdm_rx_chs,
+ tdm_rx_ch_get,
+ tdm_rx_ch_put),
+ SOC_ENUM_EXT("PRI_TDM_TX_0 Channels", tdm_tx_chs,
+ tdm_tx_ch_get,
+ tdm_tx_ch_put),
+ SOC_ENUM_EXT("SEC_TDM_RX_0 SampleRate", tdm_rx_sample_rate,
+ tdm_rx_sample_rate_get,
+ tdm_rx_sample_rate_put),
+ SOC_ENUM_EXT("SEC_TDM_TX_0 SampleRate", tdm_tx_sample_rate,
+ tdm_tx_sample_rate_get,
+ tdm_tx_sample_rate_put),
+ SOC_ENUM_EXT("SEC_TDM_RX_0 Format", tdm_rx_format,
+ tdm_rx_format_get,
+ tdm_rx_format_put),
+ SOC_ENUM_EXT("SEC_TDM_TX_0 Format", tdm_tx_format,
+ tdm_tx_format_get,
+ tdm_tx_format_put),
+ SOC_ENUM_EXT("SEC_TDM_RX_0 Channels", tdm_rx_chs,
+ tdm_rx_ch_get,
+ tdm_rx_ch_put),
+ SOC_ENUM_EXT("SEC_TDM_TX_0 Channels", tdm_tx_chs,
+ tdm_tx_ch_get,
+ tdm_tx_ch_put),
SOC_ENUM_EXT("TERT_TDM_RX_0 SampleRate", tdm_rx_sample_rate,
tdm_rx_sample_rate_get,
tdm_rx_sample_rate_put),
@@ -2380,6 +2423,24 @@ static const struct snd_kcontrol_new msm_snd_controls[] = {
SOC_ENUM_EXT("TERT_TDM_TX_0 Channels", tdm_tx_chs,
tdm_tx_ch_get,
tdm_tx_ch_put),
+ SOC_ENUM_EXT("QUAT_TDM_RX_0 SampleRate", tdm_rx_sample_rate,
+ tdm_rx_sample_rate_get,
+ tdm_rx_sample_rate_put),
+ SOC_ENUM_EXT("QUAT_TDM_TX_0 SampleRate", tdm_tx_sample_rate,
+ tdm_tx_sample_rate_get,
+ tdm_tx_sample_rate_put),
+ SOC_ENUM_EXT("QUAT_TDM_RX_0 Format", tdm_rx_format,
+ tdm_rx_format_get,
+ tdm_rx_format_put),
+ SOC_ENUM_EXT("QUAT_TDM_TX_0 Format", tdm_tx_format,
+ tdm_tx_format_get,
+ tdm_tx_format_put),
+ SOC_ENUM_EXT("QUAT_TDM_RX_0 Channels", tdm_rx_chs,
+ tdm_rx_ch_get,
+ tdm_rx_ch_put),
+ SOC_ENUM_EXT("QUAT_TDM_TX_0 Channels", tdm_tx_chs,
+ tdm_tx_ch_get,
+ tdm_tx_ch_put),
SOC_ENUM_EXT("PRIM_AUX_PCM_RX SampleRate", prim_aux_pcm_rx_sample_rate,
aux_pcm_rx_sample_rate_get,
aux_pcm_rx_sample_rate_put),
@@ -2463,6 +2524,37 @@ static int msm_snd_enable_codec_ext_clk(struct snd_soc_codec *codec,
return ret;
}
+static int msm_snd_enable_codec_ext_tx_clk(struct snd_soc_codec *codec,
+ int enable, bool dapm)
+{
+ int ret = 0;
+
+ if (!strcmp(dev_name(codec->dev), "tasha_codec"))
+ ret = tasha_cdc_mclk_tx_enable(codec, enable, dapm);
+ else {
+ dev_err(codec->dev, "%s: unknown codec to enable ext clk\n",
+ __func__);
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+static int msm_mclk_tx_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+
+ pr_debug("%s: event = %d\n", __func__, event);
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ return msm_snd_enable_codec_ext_tx_clk(codec, 1, true);
+ case SND_SOC_DAPM_POST_PMD:
+ return msm_snd_enable_codec_ext_tx_clk(codec, 0, true);
+ }
+ return 0;
+}
+
static int msm_mclk_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
@@ -2485,6 +2577,9 @@ static const struct snd_soc_dapm_widget msm_dapm_widgets[] = {
msm_mclk_event,
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_SUPPLY("MCLK TX", SND_SOC_NOPM, 0, 0,
+ msm_mclk_tx_event, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+
SND_SOC_DAPM_SPK("Lineout_1 amp", NULL),
SND_SOC_DAPM_SPK("Lineout_3 amp", NULL),
SND_SOC_DAPM_SPK("Lineout_2 amp", NULL),
@@ -2717,6 +2812,38 @@ static int msm_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
rate->min = rate->max = SAMPLING_RATE_48KHZ;
break;
+ case MSM_BACKEND_DAI_PRI_TDM_RX_0:
+ channels->min = channels->max =
+ tdm_rx_cfg[TDM_PRI][TDM_0].channels;
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ tdm_rx_cfg[TDM_PRI][TDM_0].bit_format);
+ rate->min = rate->max = tdm_rx_cfg[TDM_PRI][TDM_0].sample_rate;
+ break;
+
+ case MSM_BACKEND_DAI_PRI_TDM_TX_0:
+ channels->min = channels->max =
+ tdm_tx_cfg[TDM_PRI][TDM_0].channels;
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ tdm_tx_cfg[TDM_PRI][TDM_0].bit_format);
+ rate->min = rate->max = tdm_tx_cfg[TDM_PRI][TDM_0].sample_rate;
+ break;
+
+ case MSM_BACKEND_DAI_SEC_TDM_RX_0:
+ channels->min = channels->max =
+ tdm_rx_cfg[TDM_SEC][TDM_0].channels;
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ tdm_rx_cfg[TDM_SEC][TDM_0].bit_format);
+ rate->min = rate->max = tdm_rx_cfg[TDM_SEC][TDM_0].sample_rate;
+ break;
+
+ case MSM_BACKEND_DAI_SEC_TDM_TX_0:
+ channels->min = channels->max =
+ tdm_tx_cfg[TDM_SEC][TDM_0].channels;
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ tdm_tx_cfg[TDM_SEC][TDM_0].bit_format);
+ rate->min = rate->max = tdm_tx_cfg[TDM_SEC][TDM_0].sample_rate;
+ break;
+
case MSM_BACKEND_DAI_TERT_TDM_RX_0:
channels->min = channels->max =
tdm_rx_cfg[TDM_TERT][TDM_0].channels;
@@ -2733,6 +2860,22 @@ static int msm_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
rate->min = rate->max = tdm_tx_cfg[TDM_TERT][TDM_0].sample_rate;
break;
+ case MSM_BACKEND_DAI_QUAT_TDM_RX_0:
+ channels->min = channels->max =
+ tdm_rx_cfg[TDM_QUAT][TDM_0].channels;
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ tdm_rx_cfg[TDM_QUAT][TDM_0].bit_format);
+ rate->min = rate->max = tdm_rx_cfg[TDM_QUAT][TDM_0].sample_rate;
+ break;
+
+ case MSM_BACKEND_DAI_QUAT_TDM_TX_0:
+ channels->min = channels->max =
+ tdm_tx_cfg[TDM_QUAT][TDM_0].channels;
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ tdm_tx_cfg[TDM_QUAT][TDM_0].bit_format);
+ rate->min = rate->max = tdm_tx_cfg[TDM_QUAT][TDM_0].sample_rate;
+ break;
+
case MSM_BACKEND_DAI_AUXPCM_RX:
rate->min = rate->max =
aux_pcm_rx_cfg[PRIM_AUX_PCM].sample_rate;
@@ -3089,8 +3232,12 @@ static int msm_audrx_init(struct snd_soc_pcm_runtime *rtd)
snd_soc_dapm_new_controls(dapm, msm_dapm_widgets,
ARRAY_SIZE(msm_dapm_widgets));
- snd_soc_dapm_add_routes(dapm, wcd_audio_paths,
- ARRAY_SIZE(wcd_audio_paths));
+ if (!strcmp(dev_name(codec_dai->dev), "tasha_codec"))
+ snd_soc_dapm_add_routes(dapm, wcd_audio_paths_tasha,
+ ARRAY_SIZE(wcd_audio_paths_tasha));
+ else
+ snd_soc_dapm_add_routes(dapm, wcd_audio_paths,
+ ARRAY_SIZE(wcd_audio_paths));
snd_soc_dapm_ignore_suspend(dapm, "Handset Mic");
snd_soc_dapm_ignore_suspend(dapm, "Headset Mic");
@@ -4632,6 +4779,20 @@ static struct snd_soc_dai_link msm_tasha_fe_dai_links[] = {
.codec_dai_name = "snd-soc-dummy-dai",
.codec_name = "snd-soc-dummy",
},
+ /* CPE LSM EC PP direct dai-link */
+ {
+ .name = "CPE Listen service ECPP",
+ .stream_name = "CPE Listen Audio Service ECPP",
+ .cpu_dai_name = "CPE_LSM_NOHOST",
+ .platform_name = "msm-cpe-lsm.3",
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST,
+ SND_SOC_DPCM_TRIGGER_POST},
+ .no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+ .ignore_suspend = 1,
+ .ignore_pmdown_time = 1,
+ .codec_dai_name = "tasha_cpe",
+ .codec_name = "tasha_codec",
+ },
};
static struct snd_soc_dai_link msm_tavil_fe_dai_links[] = {
@@ -4788,6 +4949,62 @@ static struct snd_soc_dai_link msm_common_be_dai_links[] = {
.ignore_suspend = 1,
},
{
+ .name = LPASS_BE_PRI_TDM_RX_0,
+ .stream_name = "Primary TDM0 Playback",
+ .cpu_dai_name = "msm-dai-q6-tdm.36864",
+ .platform_name = "msm-pcm-routing",
+ .codec_name = "msm-stub-codec.1",
+ .codec_dai_name = "msm-stub-rx",
+ .no_pcm = 1,
+ .dpcm_playback = 1,
+ .be_id = MSM_BACKEND_DAI_PRI_TDM_RX_0,
+ .be_hw_params_fixup = msm_be_hw_params_fixup,
+ .ops = &msm_tdm_be_ops,
+ .ignore_suspend = 1,
+ },
+ {
+ .name = LPASS_BE_PRI_TDM_TX_0,
+ .stream_name = "Primary TDM0 Capture",
+ .cpu_dai_name = "msm-dai-q6-tdm.36865",
+ .platform_name = "msm-pcm-routing",
+ .codec_name = "msm-stub-codec.1",
+ .codec_dai_name = "msm-stub-tx",
+ .no_pcm = 1,
+ .dpcm_capture = 1,
+ .be_id = MSM_BACKEND_DAI_PRI_TDM_TX_0,
+ .be_hw_params_fixup = msm_be_hw_params_fixup,
+ .ops = &msm_tdm_be_ops,
+ .ignore_suspend = 1,
+ },
+ {
+ .name = LPASS_BE_SEC_TDM_RX_0,
+ .stream_name = "Secondary TDM0 Playback",
+ .cpu_dai_name = "msm-dai-q6-tdm.36880",
+ .platform_name = "msm-pcm-routing",
+ .codec_name = "msm-stub-codec.1",
+ .codec_dai_name = "msm-stub-rx",
+ .no_pcm = 1,
+ .dpcm_playback = 1,
+ .be_id = MSM_BACKEND_DAI_SEC_TDM_RX_0,
+ .be_hw_params_fixup = msm_be_hw_params_fixup,
+ .ops = &msm_tdm_be_ops,
+ .ignore_suspend = 1,
+ },
+ {
+ .name = LPASS_BE_SEC_TDM_TX_0,
+ .stream_name = "Secondary TDM0 Capture",
+ .cpu_dai_name = "msm-dai-q6-tdm.36881",
+ .platform_name = "msm-pcm-routing",
+ .codec_name = "msm-stub-codec.1",
+ .codec_dai_name = "msm-stub-tx",
+ .no_pcm = 1,
+ .dpcm_capture = 1,
+ .be_id = MSM_BACKEND_DAI_SEC_TDM_TX_0,
+ .be_hw_params_fixup = msm_be_hw_params_fixup,
+ .ops = &msm_tdm_be_ops,
+ .ignore_suspend = 1,
+ },
+ {
.name = LPASS_BE_TERT_TDM_RX_0,
.stream_name = "Tertiary TDM0 Playback",
.cpu_dai_name = "msm-dai-q6-tdm.36896",
@@ -4815,6 +5032,34 @@ static struct snd_soc_dai_link msm_common_be_dai_links[] = {
.ops = &msm_tdm_be_ops,
.ignore_suspend = 1,
},
+ {
+ .name = LPASS_BE_QUAT_TDM_RX_0,
+ .stream_name = "Quaternary TDM0 Playback",
+ .cpu_dai_name = "msm-dai-q6-tdm.36912",
+ .platform_name = "msm-pcm-routing",
+ .codec_name = "msm-stub-codec.1",
+ .codec_dai_name = "msm-stub-rx",
+ .no_pcm = 1,
+ .dpcm_playback = 1,
+ .be_id = MSM_BACKEND_DAI_QUAT_TDM_RX_0,
+ .be_hw_params_fixup = msm_be_hw_params_fixup,
+ .ops = &msm_tdm_be_ops,
+ .ignore_suspend = 1,
+ },
+ {
+ .name = LPASS_BE_QUAT_TDM_TX_0,
+ .stream_name = "Quaternary TDM0 Capture",
+ .cpu_dai_name = "msm-dai-q6-tdm.36913",
+ .platform_name = "msm-pcm-routing",
+ .codec_name = "msm-stub-codec.1",
+ .codec_dai_name = "msm-stub-tx",
+ .no_pcm = 1,
+ .dpcm_capture = 1,
+ .be_id = MSM_BACKEND_DAI_QUAT_TDM_TX_0,
+ .be_hw_params_fixup = msm_be_hw_params_fixup,
+ .ops = &msm_tdm_be_ops,
+ .ignore_suspend = 1,
+ },
};
static struct snd_soc_dai_link msm_tasha_be_dai_links[] = {
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
index 547af163c5c0..13d4ba27d990 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
@@ -121,14 +121,14 @@ static int msm_routing_get_bit_width(unsigned int format)
return bit_width;
}
-static bool msm_is_fractional_resample_needed(int input_sr, int output_sr)
+static bool msm_is_resample_needed(int input_sr, int output_sr)
{
bool rc = false;
- if ((input_sr % output_sr != 0) && (output_sr % input_sr != 0))
+ if (input_sr != output_sr)
rc = true;
- pr_debug("performing fractional resample (%s) for copp rate (%d)afe rate (%d)",
+ pr_debug("perform resampling (%s) for copp rate (%d)afe rate (%d)",
(rc ? "oh yes" : "not really"),
input_sr, output_sr);
@@ -901,7 +901,7 @@ int msm_pcm_routing_reg_phy_compr_stream(int fe_id, int perf_mode,
set_bit(copp_idx,
&session_copp_map[fe_id][session_type][i]);
- if (msm_is_fractional_resample_needed(
+ if (msm_is_resample_needed(
sample_rate,
msm_bedais[i].sample_rate))
adm_copp_mfc_cfg(
@@ -1057,7 +1057,7 @@ int msm_pcm_routing_reg_phy_stream(int fedai_id, int perf_mode,
set_bit(copp_idx,
&session_copp_map[fedai_id][session_type][i]);
- if (msm_is_fractional_resample_needed(
+ if (msm_is_resample_needed(
sample_rate,
msm_bedais[i].sample_rate))
adm_copp_mfc_cfg(
@@ -1284,7 +1284,7 @@ static void msm_pcm_routing_process_audio(u16 reg, u16 val, int set)
set_bit(copp_idx,
&session_copp_map[val][session_type][reg]);
- if (msm_is_fractional_resample_needed(
+ if (msm_is_resample_needed(
sample_rate,
msm_bedais[reg].sample_rate))
adm_copp_mfc_cfg(
@@ -3628,6 +3628,210 @@ static const struct snd_kcontrol_new quat_auxpcm_rx_mixer_controls[] = {
msm_routing_put_audio_mixer),
};
+static const struct snd_kcontrol_new pri_tdm_rx_0_mixer_controls[] = {
+ SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_PRI_TDM_RX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia2", MSM_BACKEND_DAI_PRI_TDM_RX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia3", MSM_BACKEND_DAI_PRI_TDM_RX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia4", MSM_BACKEND_DAI_PRI_TDM_RX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia5", MSM_BACKEND_DAI_PRI_TDM_RX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia6", MSM_BACKEND_DAI_PRI_TDM_RX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia7", MSM_BACKEND_DAI_PRI_TDM_RX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia8", MSM_BACKEND_DAI_PRI_TDM_RX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia9", MSM_BACKEND_DAI_PRI_TDM_RX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia10", MSM_BACKEND_DAI_PRI_TDM_RX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA10, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia11", MSM_BACKEND_DAI_PRI_TDM_RX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA11, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia12", MSM_BACKEND_DAI_PRI_TDM_RX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA12, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia13", MSM_BACKEND_DAI_PRI_TDM_RX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA13, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia14", MSM_BACKEND_DAI_PRI_TDM_RX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA14, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia15", MSM_BACKEND_DAI_PRI_TDM_RX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA15, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_PRI_TDM_RX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new pri_tdm_tx_0_mixer_controls[] = {
+ SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_PRI_TDM_TX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia2", MSM_BACKEND_DAI_PRI_TDM_TX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia3", MSM_BACKEND_DAI_PRI_TDM_TX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia4", MSM_BACKEND_DAI_PRI_TDM_TX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia5", MSM_BACKEND_DAI_PRI_TDM_TX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia6", MSM_BACKEND_DAI_PRI_TDM_TX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia7", MSM_BACKEND_DAI_PRI_TDM_TX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia8", MSM_BACKEND_DAI_PRI_TDM_TX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia9", MSM_BACKEND_DAI_PRI_TDM_TX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia10", MSM_BACKEND_DAI_PRI_TDM_TX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA10, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia11", MSM_BACKEND_DAI_PRI_TDM_TX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA11, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia12", MSM_BACKEND_DAI_PRI_TDM_TX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA12, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia13", MSM_BACKEND_DAI_PRI_TDM_TX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA13, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia14", MSM_BACKEND_DAI_PRI_TDM_TX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA14, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia15", MSM_BACKEND_DAI_PRI_TDM_TX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA15, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_PRI_TDM_TX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new sec_tdm_rx_0_mixer_controls[] = {
+ SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_SEC_TDM_RX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia2", MSM_BACKEND_DAI_SEC_TDM_RX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia3", MSM_BACKEND_DAI_SEC_TDM_RX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia4", MSM_BACKEND_DAI_SEC_TDM_RX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia5", MSM_BACKEND_DAI_SEC_TDM_RX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia6", MSM_BACKEND_DAI_SEC_TDM_RX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia7", MSM_BACKEND_DAI_SEC_TDM_RX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia8", MSM_BACKEND_DAI_SEC_TDM_RX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia9", MSM_BACKEND_DAI_SEC_TDM_RX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia10", MSM_BACKEND_DAI_SEC_TDM_RX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA10, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia11", MSM_BACKEND_DAI_SEC_TDM_RX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA11, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia12", MSM_BACKEND_DAI_SEC_TDM_RX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA12, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia13", MSM_BACKEND_DAI_SEC_TDM_RX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA13, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia14", MSM_BACKEND_DAI_SEC_TDM_RX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA14, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia15", MSM_BACKEND_DAI_SEC_TDM_RX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA15, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_SEC_TDM_RX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new sec_tdm_tx_0_mixer_controls[] = {
+ SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_SEC_TDM_TX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia2", MSM_BACKEND_DAI_SEC_TDM_TX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia3", MSM_BACKEND_DAI_SEC_TDM_TX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia4", MSM_BACKEND_DAI_SEC_TDM_TX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia5", MSM_BACKEND_DAI_SEC_TDM_TX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia6", MSM_BACKEND_DAI_SEC_TDM_TX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia7", MSM_BACKEND_DAI_SEC_TDM_TX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia8", MSM_BACKEND_DAI_SEC_TDM_TX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia9", MSM_BACKEND_DAI_SEC_TDM_TX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia10", MSM_BACKEND_DAI_SEC_TDM_TX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA10, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia11", MSM_BACKEND_DAI_SEC_TDM_TX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA11, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia12", MSM_BACKEND_DAI_SEC_TDM_TX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA12, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia13", MSM_BACKEND_DAI_SEC_TDM_TX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA13, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia14", MSM_BACKEND_DAI_SEC_TDM_TX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA14, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia15", MSM_BACKEND_DAI_SEC_TDM_TX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA15, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_SEC_TDM_TX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+};
+
static const struct snd_kcontrol_new tert_tdm_rx_0_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_TERT_TDM_RX_0 ,
MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
@@ -3679,6 +3883,57 @@ static const struct snd_kcontrol_new tert_tdm_rx_0_mixer_controls[] = {
msm_routing_put_audio_mixer),
};
+static const struct snd_kcontrol_new tert_tdm_tx_0_mixer_controls[] = {
+ SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_TERT_TDM_TX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia2", MSM_BACKEND_DAI_TERT_TDM_TX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia3", MSM_BACKEND_DAI_TERT_TDM_TX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia4", MSM_BACKEND_DAI_TERT_TDM_TX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia5", MSM_BACKEND_DAI_TERT_TDM_TX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia6", MSM_BACKEND_DAI_TERT_TDM_TX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia7", MSM_BACKEND_DAI_TERT_TDM_TX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia8", MSM_BACKEND_DAI_TERT_TDM_TX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia9", MSM_BACKEND_DAI_TERT_TDM_TX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia10", MSM_BACKEND_DAI_TERT_TDM_TX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA10, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia11", MSM_BACKEND_DAI_TERT_TDM_TX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA11, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia12", MSM_BACKEND_DAI_TERT_TDM_TX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA12, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia13", MSM_BACKEND_DAI_TERT_TDM_TX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA13, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia14", MSM_BACKEND_DAI_TERT_TDM_TX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA14, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia15", MSM_BACKEND_DAI_TERT_TDM_TX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA15, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_TERT_TDM_TX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+};
+
static const struct snd_kcontrol_new tert_tdm_rx_1_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_TERT_TDM_RX_1 ,
MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
@@ -3883,6 +4138,57 @@ static const struct snd_kcontrol_new quat_tdm_rx_0_mixer_controls[] = {
msm_routing_put_audio_mixer),
};
+static const struct snd_kcontrol_new quat_tdm_tx_0_mixer_controls[] = {
+ SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_QUAT_TDM_TX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia2", MSM_BACKEND_DAI_QUAT_TDM_TX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia3", MSM_BACKEND_DAI_QUAT_TDM_TX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia4", MSM_BACKEND_DAI_QUAT_TDM_TX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia5", MSM_BACKEND_DAI_QUAT_TDM_TX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia6", MSM_BACKEND_DAI_QUAT_TDM_TX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia7", MSM_BACKEND_DAI_QUAT_TDM_TX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia8", MSM_BACKEND_DAI_QUAT_TDM_TX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia9", MSM_BACKEND_DAI_QUAT_TDM_TX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia10", MSM_BACKEND_DAI_QUAT_TDM_TX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA10, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia11", MSM_BACKEND_DAI_QUAT_TDM_TX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA11, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia12", MSM_BACKEND_DAI_QUAT_TDM_TX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA12, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia13", MSM_BACKEND_DAI_QUAT_TDM_TX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA13, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia14", MSM_BACKEND_DAI_QUAT_TDM_TX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA14, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia15", MSM_BACKEND_DAI_QUAT_TDM_TX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA15, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_QUAT_TDM_TX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+};
+
static const struct snd_kcontrol_new quat_tdm_rx_1_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_QUAT_TDM_RX_1 ,
MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
@@ -8141,9 +8447,24 @@ static const struct snd_soc_dapm_widget msm_qdsp6_widgets[] = {
SND_SOC_DAPM_MIXER("QUIN_MI2S_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
quinary_mi2s_rx_mixer_controls,
ARRAY_SIZE(quinary_mi2s_rx_mixer_controls)),
+ SND_SOC_DAPM_MIXER("PRI_TDM_RX_0 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ pri_tdm_rx_0_mixer_controls,
+ ARRAY_SIZE(pri_tdm_rx_0_mixer_controls)),
+ SND_SOC_DAPM_MIXER("PRI_TDM_TX_0 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ pri_tdm_tx_0_mixer_controls,
+ ARRAY_SIZE(pri_tdm_tx_0_mixer_controls)),
+ SND_SOC_DAPM_MIXER("SEC_TDM_RX_0 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ sec_tdm_rx_0_mixer_controls,
+ ARRAY_SIZE(sec_tdm_rx_0_mixer_controls)),
+ SND_SOC_DAPM_MIXER("SEC_TDM_TX_0 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ sec_tdm_tx_0_mixer_controls,
+ ARRAY_SIZE(sec_tdm_tx_0_mixer_controls)),
SND_SOC_DAPM_MIXER("TERT_TDM_RX_0 Audio Mixer", SND_SOC_NOPM, 0, 0,
tert_tdm_rx_0_mixer_controls,
ARRAY_SIZE(tert_tdm_rx_0_mixer_controls)),
+ SND_SOC_DAPM_MIXER("TERT_TDM_TX_0 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ tert_tdm_tx_0_mixer_controls,
+ ARRAY_SIZE(tert_tdm_tx_0_mixer_controls)),
SND_SOC_DAPM_MIXER("TERT_TDM_RX_1 Audio Mixer", SND_SOC_NOPM, 0, 0,
tert_tdm_rx_1_mixer_controls,
ARRAY_SIZE(tert_tdm_rx_1_mixer_controls)),
@@ -8156,6 +8477,9 @@ static const struct snd_soc_dapm_widget msm_qdsp6_widgets[] = {
SND_SOC_DAPM_MIXER("QUAT_TDM_RX_0 Audio Mixer", SND_SOC_NOPM, 0, 0,
quat_tdm_rx_0_mixer_controls,
ARRAY_SIZE(quat_tdm_rx_0_mixer_controls)),
+ SND_SOC_DAPM_MIXER("QUAT_TDM_TX_0 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ quat_tdm_tx_0_mixer_controls,
+ ARRAY_SIZE(quat_tdm_tx_0_mixer_controls)),
SND_SOC_DAPM_MIXER("QUAT_TDM_RX_1 Audio Mixer", SND_SOC_NOPM, 0, 0,
quat_tdm_rx_1_mixer_controls,
ARRAY_SIZE(quat_tdm_rx_1_mixer_controls)),
@@ -8775,6 +9099,78 @@ static const struct snd_soc_dapm_route intercon[] = {
{"QUIN_MI2S_RX Audio Mixer", "MultiMedia16", "MM_DL16"},
{"QUIN_MI2S_RX", NULL, "QUIN_MI2S_RX Audio Mixer"},
+ {"PRI_TDM_RX_0 Audio Mixer", "MultiMedia1", "MM_DL1"},
+ {"PRI_TDM_RX_0 Audio Mixer", "MultiMedia2", "MM_DL2"},
+ {"PRI_TDM_RX_0 Audio Mixer", "MultiMedia3", "MM_DL3"},
+ {"PRI_TDM_RX_0 Audio Mixer", "MultiMedia4", "MM_DL4"},
+ {"PRI_TDM_RX_0 Audio Mixer", "MultiMedia5", "MM_DL5"},
+ {"PRI_TDM_RX_0 Audio Mixer", "MultiMedia6", "MM_DL6"},
+ {"PRI_TDM_RX_0 Audio Mixer", "MultiMedia7", "MM_DL7"},
+ {"PRI_TDM_RX_0 Audio Mixer", "MultiMedia8", "MM_DL8"},
+ {"PRI_TDM_RX_0 Audio Mixer", "MultiMedia9", "MM_DL9"},
+ {"PRI_TDM_RX_0 Audio Mixer", "MultiMedia10", "MM_DL10"},
+ {"PRI_TDM_RX_0 Audio Mixer", "MultiMedia11", "MM_DL11"},
+ {"PRI_TDM_RX_0 Audio Mixer", "MultiMedia12", "MM_DL12"},
+ {"PRI_TDM_RX_0 Audio Mixer", "MultiMedia13", "MM_DL13"},
+ {"PRI_TDM_RX_0 Audio Mixer", "MultiMedia14", "MM_DL14"},
+ {"PRI_TDM_RX_0 Audio Mixer", "MultiMedia15", "MM_DL15"},
+ {"PRI_TDM_RX_0 Audio Mixer", "MultiMedia16", "MM_DL16"},
+ {"PRI_TDM_RX_0", NULL, "PRI_TDM_RX_0 Audio Mixer"},
+
+ {"PRI_TDM_TX_0 Audio Mixer", "MultiMedia1", "MM_DL1"},
+ {"PRI_TDM_TX_0 Audio Mixer", "MultiMedia2", "MM_DL2"},
+ {"PRI_TDM_TX_0 Audio Mixer", "MultiMedia3", "MM_DL3"},
+ {"PRI_TDM_TX_0 Audio Mixer", "MultiMedia4", "MM_DL4"},
+ {"PRI_TDM_TX_0 Audio Mixer", "MultiMedia5", "MM_DL5"},
+ {"PRI_TDM_TX_0 Audio Mixer", "MultiMedia6", "MM_DL6"},
+ {"PRI_TDM_TX_0 Audio Mixer", "MultiMedia7", "MM_DL7"},
+ {"PRI_TDM_TX_0 Audio Mixer", "MultiMedia8", "MM_DL8"},
+ {"PRI_TDM_TX_0 Audio Mixer", "MultiMedia9", "MM_DL9"},
+ {"PRI_TDM_TX_0 Audio Mixer", "MultiMedia10", "MM_DL10"},
+ {"PRI_TDM_TX_0 Audio Mixer", "MultiMedia11", "MM_DL11"},
+ {"PRI_TDM_TX_0 Audio Mixer", "MultiMedia12", "MM_DL12"},
+ {"PRI_TDM_TX_0 Audio Mixer", "MultiMedia13", "MM_DL13"},
+ {"PRI_TDM_TX_0 Audio Mixer", "MultiMedia14", "MM_DL14"},
+ {"PRI_TDM_TX_0 Audio Mixer", "MultiMedia15", "MM_DL15"},
+ {"PRI_TDM_TX_0 Audio Mixer", "MultiMedia16", "MM_DL16"},
+ {"PRI_TDM_TX_0", NULL, "PRI_TDM_TX_0 Audio Mixer"},
+
+ {"SEC_TDM_RX_0 Audio Mixer", "MultiMedia1", "MM_DL1"},
+ {"SEC_TDM_RX_0 Audio Mixer", "MultiMedia2", "MM_DL2"},
+ {"SEC_TDM_RX_0 Audio Mixer", "MultiMedia3", "MM_DL3"},
+ {"SEC_TDM_RX_0 Audio Mixer", "MultiMedia4", "MM_DL4"},
+ {"SEC_TDM_RX_0 Audio Mixer", "MultiMedia5", "MM_DL5"},
+ {"SEC_TDM_RX_0 Audio Mixer", "MultiMedia6", "MM_DL6"},
+ {"SEC_TDM_RX_0 Audio Mixer", "MultiMedia7", "MM_DL7"},
+ {"SEC_TDM_RX_0 Audio Mixer", "MultiMedia8", "MM_DL8"},
+ {"SEC_TDM_RX_0 Audio Mixer", "MultiMedia9", "MM_DL9"},
+ {"SEC_TDM_RX_0 Audio Mixer", "MultiMedia10", "MM_DL10"},
+ {"SEC_TDM_RX_0 Audio Mixer", "MultiMedia11", "MM_DL11"},
+ {"SEC_TDM_RX_0 Audio Mixer", "MultiMedia12", "MM_DL12"},
+ {"SEC_TDM_RX_0 Audio Mixer", "MultiMedia13", "MM_DL13"},
+ {"SEC_TDM_RX_0 Audio Mixer", "MultiMedia14", "MM_DL14"},
+ {"SEC_TDM_RX_0 Audio Mixer", "MultiMedia15", "MM_DL15"},
+ {"SEC_TDM_RX_0 Audio Mixer", "MultiMedia16", "MM_DL16"},
+ {"SEC_TDM_RX_0", NULL, "SEC_TDM_RX_0 Audio Mixer"},
+
+ {"SEC_TDM_TX_0 Audio Mixer", "MultiMedia1", "MM_DL1"},
+ {"SEC_TDM_TX_0 Audio Mixer", "MultiMedia2", "MM_DL2"},
+ {"SEC_TDM_TX_0 Audio Mixer", "MultiMedia3", "MM_DL3"},
+ {"SEC_TDM_TX_0 Audio Mixer", "MultiMedia4", "MM_DL4"},
+ {"SEC_TDM_TX_0 Audio Mixer", "MultiMedia5", "MM_DL5"},
+ {"SEC_TDM_TX_0 Audio Mixer", "MultiMedia6", "MM_DL6"},
+ {"SEC_TDM_TX_0 Audio Mixer", "MultiMedia7", "MM_DL7"},
+ {"SEC_TDM_TX_0 Audio Mixer", "MultiMedia8", "MM_DL8"},
+ {"SEC_TDM_TX_0 Audio Mixer", "MultiMedia9", "MM_DL9"},
+ {"SEC_TDM_TX_0 Audio Mixer", "MultiMedia10", "MM_DL10"},
+ {"SEC_TDM_TX_0 Audio Mixer", "MultiMedia11", "MM_DL11"},
+ {"SEC_TDM_TX_0 Audio Mixer", "MultiMedia12", "MM_DL12"},
+ {"SEC_TDM_TX_0 Audio Mixer", "MultiMedia13", "MM_DL13"},
+ {"SEC_TDM_TX_0 Audio Mixer", "MultiMedia14", "MM_DL14"},
+ {"SEC_TDM_TX_0 Audio Mixer", "MultiMedia15", "MM_DL15"},
+ {"SEC_TDM_TX_0 Audio Mixer", "MultiMedia16", "MM_DL16"},
+ {"SEC_TDM_TX_0", NULL, "SEC_TDM_TX_0 Audio Mixer"},
+
{"TERT_TDM_RX_0 Audio Mixer", "MultiMedia1", "MM_DL1"},
{"TERT_TDM_RX_0 Audio Mixer", "MultiMedia2", "MM_DL2"},
{"TERT_TDM_RX_0 Audio Mixer", "MultiMedia3", "MM_DL3"},
@@ -8793,6 +9189,24 @@ static const struct snd_soc_dapm_route intercon[] = {
{"TERT_TDM_RX_0 Audio Mixer", "MultiMedia16", "MM_DL16"},
{"TERT_TDM_RX_0", NULL, "TERT_TDM_RX_0 Audio Mixer"},
+ {"TERT_TDM_TX_0 Audio Mixer", "MultiMedia1", "MM_DL1"},
+ {"TERT_TDM_TX_0 Audio Mixer", "MultiMedia2", "MM_DL2"},
+ {"TERT_TDM_TX_0 Audio Mixer", "MultiMedia3", "MM_DL3"},
+ {"TERT_TDM_TX_0 Audio Mixer", "MultiMedia4", "MM_DL4"},
+ {"TERT_TDM_TX_0 Audio Mixer", "MultiMedia5", "MM_DL5"},
+ {"TERT_TDM_TX_0 Audio Mixer", "MultiMedia6", "MM_DL6"},
+ {"TERT_TDM_TX_0 Audio Mixer", "MultiMedia7", "MM_DL7"},
+ {"TERT_TDM_TX_0 Audio Mixer", "MultiMedia8", "MM_DL8"},
+ {"TERT_TDM_TX_0 Audio Mixer", "MultiMedia9", "MM_DL9"},
+ {"TERT_TDM_TX_0 Audio Mixer", "MultiMedia10", "MM_DL10"},
+ {"TERT_TDM_TX_0 Audio Mixer", "MultiMedia11", "MM_DL11"},
+ {"TERT_TDM_TX_0 Audio Mixer", "MultiMedia12", "MM_DL12"},
+ {"TERT_TDM_TX_0 Audio Mixer", "MultiMedia13", "MM_DL13"},
+ {"TERT_TDM_TX_0 Audio Mixer", "MultiMedia14", "MM_DL14"},
+ {"TERT_TDM_TX_0 Audio Mixer", "MultiMedia15", "MM_DL15"},
+ {"TERT_TDM_TX_0 Audio Mixer", "MultiMedia16", "MM_DL16"},
+ {"TERT_TDM_TX_0", NULL, "TERT_TDM_TX_0 Audio Mixer"},
+
{"TERT_TDM_RX_1 Audio Mixer", "MultiMedia1", "MM_DL1"},
{"TERT_TDM_RX_1 Audio Mixer", "MultiMedia2", "MM_DL2"},
{"TERT_TDM_RX_1 Audio Mixer", "MultiMedia3", "MM_DL3"},
@@ -8865,6 +9279,24 @@ static const struct snd_soc_dapm_route intercon[] = {
{"QUAT_TDM_RX_0 Audio Mixer", "MultiMedia16", "MM_DL16"},
{"QUAT_TDM_RX_0", NULL, "QUAT_TDM_RX_0 Audio Mixer"},
+ {"QUAT_TDM_TX_0 Audio Mixer", "MultiMedia1", "MM_DL1"},
+ {"QUAT_TDM_TX_0 Audio Mixer", "MultiMedia2", "MM_DL2"},
+ {"QUAT_TDM_TX_0 Audio Mixer", "MultiMedia3", "MM_DL3"},
+ {"QUAT_TDM_TX_0 Audio Mixer", "MultiMedia4", "MM_DL4"},
+ {"QUAT_TDM_TX_0 Audio Mixer", "MultiMedia5", "MM_DL5"},
+ {"QUAT_TDM_TX_0 Audio Mixer", "MultiMedia6", "MM_DL6"},
+ {"QUAT_TDM_TX_0 Audio Mixer", "MultiMedia7", "MM_DL7"},
+ {"QUAT_TDM_TX_0 Audio Mixer", "MultiMedia8", "MM_DL8"},
+ {"QUAT_TDM_TX_0 Audio Mixer", "MultiMedia9", "MM_DL9"},
+ {"QUAT_TDM_TX_0 Audio Mixer", "MultiMedia10", "MM_DL10"},
+ {"QUAT_TDM_TX_0 Audio Mixer", "MultiMedia11", "MM_DL11"},
+ {"QUAT_TDM_TX_0 Audio Mixer", "MultiMedia12", "MM_DL12"},
+ {"QUAT_TDM_TX_0 Audio Mixer", "MultiMedia13", "MM_DL13"},
+ {"QUAT_TDM_TX_0 Audio Mixer", "MultiMedia14", "MM_DL14"},
+ {"QUAT_TDM_TX_0 Audio Mixer", "MultiMedia15", "MM_DL15"},
+ {"QUAT_TDM_TX_0 Audio Mixer", "MultiMedia16", "MM_DL16"},
+ {"QUAT_TDM_TX_0", NULL, "QUAT_TDM_TX_0 Audio Mixer"},
+
{"QUAT_TDM_RX_1 Audio Mixer", "MultiMedia1", "MM_DL1"},
{"QUAT_TDM_RX_1 Audio Mixer", "MultiMedia2", "MM_DL2"},
{"QUAT_TDM_RX_1 Audio Mixer", "MultiMedia3", "MM_DL3"},
@@ -9735,6 +10167,10 @@ static const struct snd_soc_dapm_route intercon[] = {
{"TERT_MI2S_RX", NULL, "TERT_MI2S_DL_HL"},
{"QUAT_MI2S_UL_HL", NULL, "QUAT_MI2S_TX"},
+ {"PRI_TDM_TX_0_UL_HL", NULL, "PRI_TDM_TX_0"},
+ {"PRI_TDM_RX_0", NULL, "PRI_TDM_RX_0_DL_HL"},
+ {"SEC_TDM_TX_0_UL_HL", NULL, "SEC_TDM_TX_0"},
+ {"SEC_TDM_RX_0", NULL, "SEC_TDM_RX_0_DL_HL"},
{"TERT_TDM_TX_0_UL_HL", NULL, "TERT_TDM_TX_0"},
{"TERT_TDM_TX_1_UL_HL", NULL, "TERT_TDM_TX_1"},
{"TERT_TDM_TX_2_UL_HL", NULL, "TERT_TDM_TX_2"},
@@ -10140,6 +10576,8 @@ static const struct snd_soc_dapm_route intercon[] = {
{"BE_OUT", NULL, "SLIMBUS_3_RX"},
{"BE_OUT", NULL, "VOICE_PLAYBACK_TX"},
{"BE_OUT", NULL, "VOICE2_PLAYBACK_TX"},
+ {"BE_OUT", NULL, "PRI_TDM_RX_0"},
+ {"BE_OUT", NULL, "SEC_TDM_RX_0"},
{"BE_OUT", NULL, "TERT_TDM_RX_0"},
{"BE_OUT", NULL, "TERT_TDM_RX_1"},
{"BE_OUT", NULL, "TERT_TDM_RX_2"},
@@ -10186,6 +10624,8 @@ static const struct snd_soc_dapm_route intercon[] = {
{"SLIMBUS_0_RX", NULL, "SLIM0_RX_VI_FB_LCH_MUX"},
{"SLIMBUS_0_RX", NULL, "SLIM0_RX_VI_FB_RCH_MUX"},
{"PRI_MI2S_RX", NULL, "PRI_MI2S_RX_VI_FB_MUX"},
+ {"PRI_TDM_TX_0", NULL, "BE_IN"},
+ {"SEC_TDM_TX_0", NULL, "BE_IN"},
{"TERT_TDM_TX_0", NULL, "BE_IN"},
{"TERT_TDM_TX_1", NULL, "BE_IN"},
{"TERT_TDM_TX_2", NULL, "BE_IN"},
@@ -10381,7 +10821,7 @@ static int msm_pcm_routing_prepare(struct snd_pcm_substream *substream)
set_bit(copp_idx,
&session_copp_map[i][session_type][be_id]);
- if (msm_is_fractional_resample_needed(
+ if (msm_is_resample_needed(
sample_rate,
bedai->sample_rate))
adm_copp_mfc_cfg(
diff --git a/sound/soc/msm/qdsp6v2/q6adm.c b/sound/soc/msm/qdsp6v2/q6adm.c
index e30a4efa6e60..564b67c9f76b 100644
--- a/sound/soc/msm/qdsp6v2/q6adm.c
+++ b/sound/soc/msm/qdsp6v2/q6adm.c
@@ -2508,8 +2508,6 @@ void adm_copp_mfc_cfg(int port_id, int copp_idx, int dst_sample_rate)
mfc_cfg.sampling_rate = dst_sample_rate;
mfc_cfg.bits_per_sample =
atomic_read(&this_adm.copp.bit_width[port_idx][copp_idx]);
- if (24 == mfc_cfg.bits_per_sample)
- mfc_cfg.bits_per_sample = 32;
open.dev_num_channel = mfc_cfg.num_channels =
atomic_read(&this_adm.copp.channels[port_idx][copp_idx]);
diff --git a/sound/soc/msm/qdsp6v2/q6asm.c b/sound/soc/msm/qdsp6v2/q6asm.c
index 88c27339b299..0ea94cb52bfb 100644
--- a/sound/soc/msm/qdsp6v2/q6asm.c
+++ b/sound/soc/msm/qdsp6v2/q6asm.c
@@ -1221,8 +1221,9 @@ int q6asm_audio_client_buf_alloc(unsigned int dir,
struct audio_buffer *buf;
size_t len;
- if (!(ac) || ((dir != IN) && (dir != OUT))) {
- pr_err("%s: ac %pK dir %d\n", __func__, ac, dir);
+ if (!(ac) || !(bufsz) || ((dir != IN) && (dir != OUT))) {
+ pr_err("%s: ac %pK bufsz %d dir %d\n", __func__, ac, bufsz,
+ dir);
return -EINVAL;
}
diff --git a/sound/soc/samsung/s3c-i2s-v2.c b/sound/soc/samsung/s3c-i2s-v2.c
index df65c5b494b1..b6ab3fc5789e 100644
--- a/sound/soc/samsung/s3c-i2s-v2.c
+++ b/sound/soc/samsung/s3c-i2s-v2.c
@@ -709,7 +709,7 @@ static int s3c2412_i2s_resume(struct snd_soc_dai *dai)
#endif
int s3c_i2sv2_register_component(struct device *dev, int id,
- struct snd_soc_component_driver *cmp_drv,
+ const struct snd_soc_component_driver *cmp_drv,
struct snd_soc_dai_driver *dai_drv)
{
struct snd_soc_dai_ops *ops = (struct snd_soc_dai_ops *)dai_drv->ops;
diff --git a/sound/soc/samsung/s3c-i2s-v2.h b/sound/soc/samsung/s3c-i2s-v2.h
index 90abab364b49..d0684145ed1f 100644
--- a/sound/soc/samsung/s3c-i2s-v2.h
+++ b/sound/soc/samsung/s3c-i2s-v2.h
@@ -101,7 +101,7 @@ extern int s3c_i2sv2_probe(struct snd_soc_dai *dai,
* soc core.
*/
extern int s3c_i2sv2_register_component(struct device *dev, int id,
- struct snd_soc_component_driver *cmp_drv,
+ const struct snd_soc_component_driver *cmp_drv,
struct snd_soc_dai_driver *dai_drv);
#endif /* __SND_SOC_S3C24XX_S3C_I2SV2_I2S_H */
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index 0700b4c00aeb..4c66eeaeeb03 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -2204,6 +2204,13 @@ static ssize_t dapm_widget_show_component(struct snd_soc_component *cmpnt,
int count = 0;
char *state = "not set";
+ /* card won't be set for the dummy component, as a spot fix
+ * we're checking for that case specifically here but in future
+ * we will ensure that the dummy component looks like others.
+ */
+ if (!cmpnt->card)
+ return 0;
+
list_for_each_entry(w, &cmpnt->card->widgets, list) {
if (w->dapm != dapm)
continue;
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index 001fb4dc0722..db11ecf0b74d 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -1138,8 +1138,11 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
case USB_ID(0x047F, 0x0415): /* Plantronics BT-300 */
case USB_ID(0x047F, 0xAA05): /* Plantronics DA45 */
case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */
+ case USB_ID(0x0556, 0x0014): /* Phoenix Audio TMX320VC */
case USB_ID(0x074D, 0x3553): /* Outlaw RR2150 (Micronas UAC3553B) */
+ case USB_ID(0x1de7, 0x0013): /* Phoenix Audio MT202exe */
case USB_ID(0x1de7, 0x0014): /* Phoenix Audio TMX320 */
+ case USB_ID(0x1de7, 0x0114): /* Phoenix Audio MT202pcs */
case USB_ID(0x21B4, 0x0081): /* AudioQuest DragonFly */
return true;
}
diff --git a/tools/lib/traceevent/parse-filter.c b/tools/lib/traceevent/parse-filter.c
index 0144b3d1bb77..88cccea3ca99 100644
--- a/tools/lib/traceevent/parse-filter.c
+++ b/tools/lib/traceevent/parse-filter.c
@@ -1164,11 +1164,11 @@ process_filter(struct event_format *event, struct filter_arg **parg,
current_op = current_exp;
ret = collapse_tree(current_op, parg, error_str);
+ /* collapse_tree() may free current_op, and updates parg accordingly */
+ current_op = NULL;
if (ret < 0)
goto fail;
- *parg = current_op;
-
free(token);
return 0;
diff --git a/tools/perf/Documentation/perf-stat.txt b/tools/perf/Documentation/perf-stat.txt
index 4e074a660826..90c3558c2c12 100644
--- a/tools/perf/Documentation/perf-stat.txt
+++ b/tools/perf/Documentation/perf-stat.txt
@@ -62,6 +62,14 @@ OPTIONS
--scale::
scale/normalize counter values
+-d::
+--detailed::
+ print more detailed statistics, can be specified up to 3 times
+
+ -d: detailed events, L1 and LLC data cache
+ -d -d: more detailed events, dTLB and iTLB events
+ -d -d -d: very detailed events, adding prefetch events
+
-r::
--repeat=<n>::
repeat command and print average + stddev (max: 100). 0 means forever.
diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
index 81def6c3f24b..3900386a3629 100644
--- a/tools/perf/ui/browsers/hists.c
+++ b/tools/perf/ui/browsers/hists.c
@@ -2059,10 +2059,12 @@ skip_annotation:
*
* See hist_browser__show_entry.
*/
- nr_options += add_script_opt(browser,
- &actions[nr_options],
- &options[nr_options],
- NULL, browser->selection->sym);
+ if (sort__has_sym && browser->selection->sym) {
+ nr_options += add_script_opt(browser,
+ &actions[nr_options],
+ &options[nr_options],
+ NULL, browser->selection->sym);
+ }
}
nr_options += add_script_opt(browser, &actions[nr_options],
&options[nr_options], NULL, NULL);
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index 8b10621b415c..956187bf1a85 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -274,7 +274,7 @@ int perf_event__synthesize_mmap_events(struct perf_tool *tool,
strcpy(execname, "");
/* 00400000-0040c000 r-xp 00000000 fd:01 41038 /bin/cat */
- n = sscanf(bf, "%"PRIx64"-%"PRIx64" %s %"PRIx64" %x:%x %u %s\n",
+ n = sscanf(bf, "%"PRIx64"-%"PRIx64" %s %"PRIx64" %x:%x %u %[^\n]\n",
&event->mmap2.start, &event->mmap2.len, prot,
&event->mmap2.pgoff, &event->mmap2.maj,
&event->mmap2.min,
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index d1392194a9a9..b4b96120fc3b 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -1211,12 +1211,12 @@ void perf_evlist__set_maps(struct perf_evlist *evlist, struct cpu_map *cpus,
*/
if (cpus != evlist->cpus) {
cpu_map__put(evlist->cpus);
- evlist->cpus = cpus;
+ evlist->cpus = cpu_map__get(cpus);
}
if (threads != evlist->threads) {
thread_map__put(evlist->threads);
- evlist->threads = threads;
+ evlist->threads = thread_map__get(threads);
}
perf_evlist__propagate_maps(evlist);
diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
index 97f963a3dcb9..9227c2f076c3 100644
--- a/tools/perf/util/intel-pt.c
+++ b/tools/perf/util/intel-pt.c
@@ -1127,7 +1127,7 @@ static int intel_pt_synth_transaction_sample(struct intel_pt_queue *ptq)
pr_err("Intel Processor Trace: failed to deliver transaction event, error %d\n",
ret);
- if (pt->synth_opts.callchain)
+ if (pt->synth_opts.last_branch)
intel_pt_reset_last_branch_rb(ptq);
return ret;
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
index ea6064696fe4..a7b9022b5c8f 100644
--- a/virt/kvm/arm/arch_timer.c
+++ b/virt/kvm/arm/arch_timer.c
@@ -86,6 +86,8 @@ static void kvm_timer_inject_irq_work(struct work_struct *work)
vcpu = container_of(work, struct kvm_vcpu, arch.timer_cpu.expired);
vcpu->arch.timer_cpu.armed = false;
+ WARN_ON(!kvm_timer_should_fire(vcpu));
+
/*
* If the vcpu is blocked we want to wake it up so that it will see
* the timer has expired when entering the guest.
@@ -93,10 +95,46 @@ static void kvm_timer_inject_irq_work(struct work_struct *work)
kvm_vcpu_kick(vcpu);
}
+static u64 kvm_timer_compute_delta(struct kvm_vcpu *vcpu)
+{
+ cycle_t cval, now;
+
+ cval = vcpu->arch.timer_cpu.cntv_cval;
+ now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff;
+
+ if (now < cval) {
+ u64 ns;
+
+ ns = cyclecounter_cyc2ns(timecounter->cc,
+ cval - now,
+ timecounter->mask,
+ &timecounter->frac);
+ return ns;
+ }
+
+ return 0;
+}
+
static enum hrtimer_restart kvm_timer_expire(struct hrtimer *hrt)
{
struct arch_timer_cpu *timer;
+ struct kvm_vcpu *vcpu;
+ u64 ns;
+
timer = container_of(hrt, struct arch_timer_cpu, timer);
+ vcpu = container_of(timer, struct kvm_vcpu, arch.timer_cpu);
+
+ /*
+ * Check that the timer has really expired from the guest's
+ * PoV (NTP on the host may have forced it to expire
+ * early). If we should have slept longer, restart it.
+ */
+ ns = kvm_timer_compute_delta(vcpu);
+ if (unlikely(ns)) {
+ hrtimer_forward_now(hrt, ns_to_ktime(ns));
+ return HRTIMER_RESTART;
+ }
+
queue_work(wqueue, &timer->expired);
return HRTIMER_NORESTART;
}
@@ -170,8 +208,6 @@ static int kvm_timer_update_state(struct kvm_vcpu *vcpu)
void kvm_timer_schedule(struct kvm_vcpu *vcpu)
{
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
- u64 ns;
- cycle_t cval, now;
BUG_ON(timer_is_armed(timer));
@@ -191,14 +227,7 @@ void kvm_timer_schedule(struct kvm_vcpu *vcpu)
return;
/* The timer has not yet expired, schedule a background timer */
- cval = timer->cntv_cval;
- now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff;
-
- ns = cyclecounter_cyc2ns(timecounter->cc,
- cval - now,
- timecounter->mask,
- &timecounter->frac);
- timer_arm(timer, ns);
+ timer_arm(timer, kvm_timer_compute_delta(vcpu));
}
void kvm_timer_unschedule(struct kvm_vcpu *vcpu)