summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/cnss/icnss.txt12
-rw-r--r--Documentation/devicetree/bindings/fb/mdss-dsi-panel.txt22
-rw-r--r--Documentation/devicetree/bindings/media/video/msm-cci.txt2
-rw-r--r--Documentation/devicetree/bindings/media/video/msm-cpp.txt3
-rw-r--r--Documentation/devicetree/bindings/misc/memory-state-time.txt8
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,lpi-pinctrl.txt154
-rw-r--r--Documentation/devicetree/bindings/power/qcom-charger/qpnp-smb2.txt6
-rw-r--r--Documentation/devicetree/bindings/qdsp/msm-ssc-sensors.txt5
-rw-r--r--Documentation/devicetree/bindings/scheduler/sched-energy-costs.txt360
-rw-r--r--Documentation/devicetree/bindings/sound/qcom-audio-dev.txt29
-rw-r--r--Documentation/devicetree/bindings/sound/wcd_codec.txt53
-rw-r--r--Documentation/devicetree/bindings/thermal/tsens.txt7
-rw-r--r--Documentation/filesystems/proc.txt9
-rw-r--r--Documentation/kernel-parameters.txt2
-rw-r--r--Documentation/module-signing.txt6
-rw-r--r--Documentation/scheduler/sched-energy.txt362
-rw-r--r--Documentation/scheduler/sched-hmp.txt10
-rw-r--r--Documentation/scheduler/sched-tune.txt366
-rw-r--r--Documentation/x86/pat.txt32
-rw-r--r--Makefile2
-rw-r--r--android/configs/android-base.cfg1
-rw-r--r--android/configs/android-recommended.cfg2
-rw-r--r--arch/Kconfig9
-rw-r--r--arch/arc/Makefile16
-rw-r--r--arch/arc/include/asm/arcregs.h6
-rw-r--r--arch/arc/include/asm/entry.h4
-rw-r--r--arch/arc/include/asm/irqflags-compact.h2
-rw-r--r--arch/arc/include/asm/pgtable.h2
-rw-r--r--arch/arc/kernel/stacktrace.c2
-rw-r--r--arch/arc/mm/cache.c9
-rw-r--r--arch/arm/Kconfig1
-rw-r--r--arch/arm/boot/dts/qcom/apq8998-v2.1-mediabox.dts18
-rw-r--r--arch/arm/boot/dts/qcom/dsi-panel-nt35597-dsc-wqxga-cmd.dtsi6
-rw-r--r--arch/arm/boot/dts/qcom/dsi-panel-nt35597-dsc-wqxga-video.dtsi6
-rw-r--r--arch/arm/boot/dts/qcom/dsi-panel-nt35597-dualmipi-wqxga-cmd.dtsi5
-rw-r--r--arch/arm/boot/dts/qcom/dsi-panel-nt35597-dualmipi-wqxga-video.dtsi5
-rw-r--r--arch/arm/boot/dts/qcom/dsi-panel-s6e3ha3-amoled-dualmipi-wqhd-cmd.dtsi91
-rw-r--r--arch/arm/boot/dts/qcom/msm-arm-smmu-falcon.dtsi11
-rw-r--r--arch/arm/boot/dts/qcom/msm-audio.dtsi135
-rw-r--r--arch/arm/boot/dts/qcom/msm-pm2falcon.dtsi4
-rw-r--r--arch/arm/boot/dts/qcom/msm-pmi8998.dtsi2
-rw-r--r--arch/arm/boot/dts/qcom/msm8996-camera.dtsi1
-rw-r--r--arch/arm/boot/dts/qcom/msm8998-camera-sensor-skuk.dtsi204
-rw-r--r--arch/arm/boot/dts/qcom/msm8998-camera.dtsi1
-rw-r--r--arch/arm/boot/dts/qcom/msm8998-interposer-camera-sensor-cdp.dtsi2
-rw-r--r--arch/arm/boot/dts/qcom/msm8998-interposer-camera-sensor-mtp.dtsi2
-rw-r--r--arch/arm/boot/dts/qcom/msm8998-interposer-msmfalcon-audio.dtsi62
-rw-r--r--arch/arm/boot/dts/qcom/msm8998-interposer-msmfalcon.dtsi2
-rw-r--r--arch/arm/boot/dts/qcom/msm8998-interposer-pmfalcon.dtsi4
-rw-r--r--arch/arm/boot/dts/qcom/msm8998-pinctrl.dtsi54
-rw-r--r--arch/arm/boot/dts/qcom/msm8998-qrd-skuk.dtsi53
-rw-r--r--arch/arm/boot/dts/qcom/msm8998-qrd-vr1.dtsi43
-rw-r--r--arch/arm/boot/dts/qcom/msm8998-v2-camera.dtsi1
-rw-r--r--arch/arm/boot/dts/qcom/msm8998-v2.1-interposer-msmfalcon-cdp.dts4
-rw-r--r--arch/arm/boot/dts/qcom/msm8998-v2.1-interposer-msmfalcon-mtp.dts4
-rw-r--r--arch/arm/boot/dts/qcom/msm8998-v2.1-interposer-msmfalcon-qrd.dts29
-rw-r--r--arch/arm/boot/dts/qcom/msm8998.dtsi12
-rw-r--r--arch/arm/boot/dts/qcom/msmfalcon-audio.dtsi157
-rw-r--r--arch/arm/boot/dts/qcom/msmfalcon-bus.dtsi77
-rw-r--r--arch/arm/boot/dts/qcom/msmfalcon-cdp.dtsi3
-rw-r--r--arch/arm/boot/dts/qcom/msmfalcon-common.dtsi104
-rw-r--r--arch/arm/boot/dts/qcom/msmfalcon-coresight.dtsi772
-rw-r--r--arch/arm/boot/dts/qcom/msmfalcon-lpi.dtsi182
-rw-r--r--arch/arm/boot/dts/qcom/msmfalcon-mtp.dtsi3
-rw-r--r--arch/arm/boot/dts/qcom/msmfalcon-pinctrl.dtsi181
-rw-r--r--arch/arm/boot/dts/qcom/msmfalcon-pm.dtsi4
-rw-r--r--arch/arm/boot/dts/qcom/msmfalcon-wcd.dtsi14
-rw-r--r--arch/arm/boot/dts/qcom/msmfalcon.dtsi105
-rw-r--r--arch/arm/boot/dts/qcom/msmtriton-rumi.dts4
-rw-r--r--arch/arm/boot/dts/sun4i-a10-a1000.dts1
-rw-r--r--arch/arm/boot/dts/sun4i-a10-hackberry.dts1
-rw-r--r--arch/arm/boot/dts/sun4i-a10-jesurun-q5.dts1
-rw-r--r--arch/arm/boot/dts/sun5i-a10s-wobo-i5.dts1
-rw-r--r--arch/arm/configs/msmfalcon-perf_defconfig2
-rw-r--r--arch/arm/configs/msmfalcon_defconfig2
-rw-r--r--arch/arm/include/asm/topology.h7
-rw-r--r--arch/arm/include/asm/uaccess.h11
-rw-r--r--arch/arm/kernel/setup.c4
-rw-r--r--arch/arm/kernel/sys_oabi-compat.c8
-rw-r--r--arch/arm/kernel/topology.c149
-rw-r--r--arch/arm/kernel/vmlinux.lds.S4
-rw-r--r--arch/arm/mm/dma-mapping.c21
-rw-r--r--arch/arm/mm/mmu.c2
-rw-r--r--arch/arm64/Kconfig21
-rw-r--r--arch/arm64/Makefile2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3368.dtsi4
-rw-r--r--arch/arm64/configs/msm-perf_defconfig1
-rw-r--r--arch/arm64/configs/msm_defconfig1
-rw-r--r--arch/arm64/configs/msmcortex-perf_defconfig7
-rw-r--r--arch/arm64/configs/msmcortex_defconfig7
-rw-r--r--arch/arm64/configs/msmfalcon-perf_defconfig2
-rw-r--r--arch/arm64/configs/msmfalcon_defconfig2
-rw-r--r--arch/arm64/include/asm/cpufeature.h2
-rw-r--r--arch/arm64/include/asm/elf.h1
-rw-r--r--arch/arm64/include/asm/kvm_arm.h2
-rw-r--r--arch/arm64/include/asm/module.h5
-rw-r--r--arch/arm64/include/asm/ptrace.h2
-rw-r--r--arch/arm64/include/asm/topology.h9
-rw-r--r--arch/arm64/include/asm/uaccess.h29
-rw-r--r--arch/arm64/include/uapi/asm/auxvec.h2
-rw-r--r--arch/arm64/kernel/arm64ksyms.c4
-rw-r--r--arch/arm64/kernel/asm-offsets.c1
-rw-r--r--arch/arm64/kernel/cpu_errata.c9
-rw-r--r--arch/arm64/kernel/debug-monitors.c1
-rw-r--r--arch/arm64/kernel/entry.S15
-rw-r--r--arch/arm64/kernel/head.S21
-rw-r--r--arch/arm64/kernel/setup.c2
-rw-r--r--arch/arm64/kernel/smp.c3
-rw-r--r--arch/arm64/kernel/topology.c79
-rw-r--r--arch/arm64/kernel/vmlinux.lds.S20
-rw-r--r--arch/arm64/kvm/hyp-init.S13
-rw-r--r--arch/arm64/lib/copy_from_user.S4
-rw-r--r--arch/arm64/lib/copy_to_user.S4
-rw-r--r--arch/arm64/mm/init.c4
-rw-r--r--arch/arm64/mm/mmu.c28
-rw-r--r--arch/arm64/mm/proc.S14
-rw-r--r--arch/ia64/Kconfig1
-rw-r--r--arch/ia64/include/asm/uaccess.h18
-rw-r--r--arch/metag/include/asm/atomic_lnkget.h2
-rw-r--r--arch/metag/include/asm/cmpxchg_lnkget.h2
-rw-r--r--arch/mips/kernel/csrc-r4k.c4
-rw-r--r--arch/mips/kernel/scall64-n32.S2
-rw-r--r--arch/mips/kernel/scall64-o32.S2
-rw-r--r--arch/mips/kvm/emulate.c40
-rw-r--r--arch/mips/kvm/tlb.c68
-rw-r--r--arch/mips/loongson64/loongson-3/hpet.c14
-rw-r--r--arch/mips/mm/uasm-mips.c2
-rw-r--r--arch/parisc/include/uapi/asm/errno.h4
-rw-r--r--arch/powerpc/Kconfig1
-rw-r--r--arch/powerpc/include/asm/icswx.h1
-rw-r--r--arch/powerpc/include/asm/uaccess.h21
-rw-r--r--arch/powerpc/kernel/eeh.c2
-rw-r--r--arch/powerpc/kernel/tm.S61
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S462
-rw-r--r--arch/s390/Kconfig1
-rw-r--r--arch/s390/crypto/prng.c2
-rw-r--r--arch/s390/include/asm/pci_dma.h2
-rw-r--r--arch/s390/kernel/ipl.c7
-rw-r--r--arch/s390/lib/uaccess.c2
-rw-r--r--arch/s390/pci/pci.c3
-rw-r--r--arch/s390/pci/pci_dma.c19
-rw-r--r--arch/sparc/Kconfig1
-rw-r--r--arch/sparc/include/asm/uaccess_32.h14
-rw-r--r--arch/sparc/include/asm/uaccess_64.h11
-rw-r--r--arch/um/include/asm/common.lds.S2
-rw-r--r--arch/x86/Kconfig3
-rw-r--r--arch/x86/entry/syscalls/syscall_32.tbl2
-rw-r--r--arch/x86/include/asm/mtrr.h6
-rw-r--r--arch/x86/include/asm/pat.h2
-rw-r--r--arch/x86/include/asm/pvclock.h2
-rw-r--r--arch/x86/include/asm/thread_info.h44
-rw-r--r--arch/x86/include/asm/tlbflush.h7
-rw-r--r--arch/x86/include/asm/uaccess.h92
-rw-r--r--arch/x86/include/asm/uaccess_32.h62
-rw-r--r--arch/x86/include/asm/uaccess_64.h96
-rw-r--r--arch/x86/kernel/apic/apic.c3
-rw-r--r--arch/x86/kernel/cpu/mshyperv.c12
-rw-r--r--arch/x86/kernel/cpu/mtrr/generic.c24
-rw-r--r--arch/x86/kernel/cpu/mtrr/main.c13
-rw-r--r--arch/x86/kernel/cpu/mtrr/mtrr.h1
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_cqm.c56
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_ds.c9
-rw-r--r--arch/x86/kernel/early-quirks.c105
-rw-r--r--arch/x86/kernel/pvclock.c4
-rw-r--r--arch/x86/kernel/uprobes.c22
-rw-r--r--arch/x86/kvm/mtrr.c1
-rw-r--r--arch/x86/kvm/vmx.c20
-rw-r--r--arch/x86/mm/mmap.c14
-rw-r--r--arch/x86/mm/pat.c109
-rw-r--r--arch/x86/pci/intel_mid_pci.c12
-rw-r--r--arch/x86/xen/enlighten.c9
-rw-r--r--backported-features14
-rw-r--r--block/bio.c15
-rw-r--r--block/blk-core.c88
-rw-r--r--block/blk-merge.c22
-rw-r--r--block/blk-mq.c6
-rw-r--r--block/genhd.c2
-rw-r--r--crypto/gcm.c4
-rw-r--r--crypto/scatterwalk.c3
-rw-r--r--drivers/acpi/cppc_acpi.c24
-rw-r--r--drivers/acpi/ec.c41
-rw-r--r--drivers/acpi/nfit.c3
-rw-r--r--drivers/acpi/numa.c16
-rw-r--r--drivers/acpi/scan.c6
-rw-r--r--drivers/acpi/sysfs.c7
-rw-r--r--drivers/ata/libata-core.c6
-rw-r--r--drivers/bcma/bcma_private.h2
-rw-r--r--drivers/bluetooth/btusb.c11
-rw-r--r--drivers/bluetooth/hci_intel.c6
-rw-r--r--drivers/char/diag/diag_dci.c4
-rw-r--r--drivers/char/diag/diag_memorydevice.c2
-rw-r--r--drivers/char/diag/diagchar.h8
-rw-r--r--drivers/char/diag/diagchar_core.c4
-rw-r--r--drivers/char/diag/diagfwd_glink.c35
-rw-r--r--drivers/char/diag/diagfwd_peripheral.h6
-rw-r--r--drivers/char/diag/diagfwd_smd.c25
-rw-r--r--drivers/char/diag/diagfwd_socket.c31
-rw-r--r--drivers/char/hw_random/exynos-rng.c9
-rw-r--r--drivers/char/random.c42
-rw-r--r--drivers/clk/clk-xgene.c3
-rw-r--r--drivers/clk/msm/clock-osm.c4
-rw-r--r--drivers/clk/msm/mdss/mdss-hdmi-pll-8998.c8
-rw-r--r--drivers/clk/qcom/clk-branch.c67
-rw-r--r--drivers/clk/qcom/clk-branch.h1
-rw-r--r--drivers/clk/qcom/clk-rcg.h4
-rw-r--r--drivers/clk/qcom/clk-rcg2.c188
-rw-r--r--drivers/clk/qcom/mmcc-msmfalcon.c8
-rw-r--r--drivers/clk/rockchip/clk-mmc-phase.c1
-rw-r--r--drivers/cpufreq/Kconfig21
-rw-r--r--drivers/cpufreq/cpufreq.c64
-rw-r--r--drivers/cpufreq/cpufreq_userspace.c43
-rw-r--r--drivers/cpufreq/intel_pstate.c2
-rw-r--r--drivers/cpuidle/cpuidle.c4
-rw-r--r--drivers/cpuidle/lpm-levels.c4
-rw-r--r--drivers/crypto/caam/caamalg.c90
-rw-r--r--drivers/crypto/caam/caamhash.c1
-rw-r--r--drivers/crypto/nx/nx-842-powernv.c12
-rw-r--r--drivers/crypto/nx/nx.c2
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs.c4
-rw-r--r--drivers/crypto/vmx/aes_cbc.c2
-rw-r--r--drivers/crypto/vmx/aes_ctr.c2
-rw-r--r--drivers/crypto/vmx/ppc-xlate.pl20
-rw-r--r--drivers/dma/at_xdmac.c82
-rw-r--r--drivers/dma/sh/usb-dmac.c19
-rw-r--r--drivers/edac/edac_mc.c2
-rw-r--r--drivers/edac/edac_mc_sysfs.c20
-rw-r--r--drivers/gpio/Kconfig1
-rw-r--r--drivers/gpio/gpio-intel-mid.c19
-rw-r--r--drivers/gpio/gpio-pca953x.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_dp.c96
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_encoders.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_dpm.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c1
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c4
-rw-r--r--drivers/gpu/drm/drm_cache.c1
-rw-r--r--drivers/gpu/drm/drm_crtc.c3
-rw-r--r--drivers/gpu/drm/drm_edid.c8
-rw-r--r--drivers/gpu/drm/drm_gem.c29
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h3
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c1
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h15
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c39
-rw-r--r--drivers/gpu/drm/i915/intel_display.c60
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c26
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h2
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c3
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c2
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c17
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c27
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c11
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fbcon.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fbcon.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fbcon.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c4
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c108
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_atpx_handler.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c15
-rw-r--r--drivers/gpu/drm/radeon/radeon_dp_mst.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h6
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c4
-rw-r--r--drivers/gpu/msm/kgsl_pool.c22
-rw-r--r--drivers/gpu/msm/kgsl_sharedmem.c23
-rw-r--r--drivers/gpu/msm/kgsl_sharedmem.h26
-rw-r--r--drivers/hid/hid-sony.c6
-rw-r--r--drivers/hid/uhid.c33
-rw-r--r--drivers/hv/channel.c27
-rw-r--r--drivers/hv/channel_mgmt.c61
-rw-r--r--drivers/hv/hv.c10
-rw-r--r--drivers/hv/hv_fcopy.c37
-rw-r--r--drivers/hv/hv_kvp.c31
-rw-r--r--drivers/hv/hv_snapshot.c34
-rw-r--r--drivers/hv/hv_utils_transport.c9
-rw-r--r--drivers/hv/hyperv_vmbus.h11
-rw-r--r--drivers/hv/vmbus_drv.c38
-rw-r--r--drivers/hwmon/iio_hwmon.c24
-rw-r--r--drivers/hwmon/qpnp-adc-common.c4
-rw-r--r--drivers/hwtracing/intel_th/core.c35
-rw-r--r--drivers/hwtracing/intel_th/intel_th.h3
-rw-r--r--drivers/hwtracing/intel_th/pci.c5
-rw-r--r--drivers/i2c/busses/i2c-cros-ec-tunnel.c2
-rw-r--r--drivers/i2c/busses/i2c-efm32.c2
-rw-r--r--drivers/i2c/busses/i2c-i801.c103
-rw-r--r--drivers/i2c/muxes/i2c-mux-reg.c2
-rw-r--r--drivers/idle/intel_idle.c25
-rw-r--r--drivers/iio/adc/qcom-rradc.c230
-rw-r--r--drivers/iio/industrialio-buffer.c23
-rw-r--r--drivers/infiniband/core/iwpm_util.c1
-rw-r--r--drivers/infiniband/core/sa_query.c2
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c24
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c6
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c12
-rw-r--r--drivers/infiniband/hw/mlx5/main.c5
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c21
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c4
-rw-r--r--drivers/input/joystick/xpad.c348
-rw-r--r--drivers/input/keyboard/tegra-kbc.c2
-rw-r--r--drivers/input/mouse/elan_i2c_core.c79
-rw-r--r--drivers/input/mouse/elantech.c8
-rw-r--r--drivers/input/mouse/vmmouse.c22
-rw-r--r--drivers/input/serio/i8042.c17
-rw-r--r--drivers/input/serio/libps2.c10
-rw-r--r--drivers/input/touchscreen/sur40.c5
-rw-r--r--drivers/input/touchscreen/tsc2004.c7
-rw-r--r--drivers/input/touchscreen/tsc2005.c7
-rw-r--r--drivers/input/touchscreen/tsc200x-core.c15
-rw-r--r--drivers/input/touchscreen/tsc200x-core.h2
-rw-r--r--drivers/input/touchscreen/wacom_w8001.c2
-rw-r--r--drivers/iommu/amd_iommu.c40
-rw-r--r--drivers/iommu/arm-smmu-v3.c7
-rw-r--r--drivers/iommu/dma-iommu.c3
-rw-r--r--drivers/iommu/exynos-iommu.c1
-rw-r--r--drivers/iommu/intel-iommu.c2
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c49
-rw-r--r--drivers/leds/leds-qpnp-wled.c48
-rw-r--r--drivers/lightnvm/gennvm.c3
-rw-r--r--drivers/lightnvm/rrpc.c24
-rw-r--r--drivers/md/Kconfig3
-rw-r--r--drivers/md/Makefile1
-rw-r--r--drivers/md/bcache/super.c2
-rw-r--r--drivers/md/dm-android-verity.c4
-rw-r--r--drivers/md/dm-android-verity.h5
-rw-r--r--drivers/md/dm-flakey.c23
-rw-r--r--drivers/md/dm-linear.c6
-rw-r--r--drivers/md/dm-verity-target.c7
-rw-r--r--drivers/md/dm.c14
-rw-r--r--drivers/media/dvb-core/dvb_ringbuffer.c83
-rw-r--r--drivers/media/dvb-frontends/Kconfig2
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.c157
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.h10
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp.c2
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp.h6
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp32.c2
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp40.c2
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp44.c2
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp46.c2
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp47.c2
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp48.c21
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c25
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c47
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.h1
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c15
-rw-r--r--drivers/media/platform/msm/camera_v2/jpeg_dma/msm_jpeg_dma_dev.c14
-rw-r--r--drivers/media/platform/msm/camera_v2/jpeg_dma/msm_jpeg_dma_dev.h1
-rw-r--r--drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp_soc.c5
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/msm_sensor_driver.c61
-rw-r--r--drivers/media/platform/msm/sde/rotator/sde_rotator_core.c4
-rw-r--r--drivers/media/platform/msm/sde/rotator/sde_rotator_r1_ctl.c2
-rw-r--r--drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c2
-rw-r--r--drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c55
-rw-r--r--drivers/media/platform/msm/vidc/msm_vidc.c7
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc.c11
-rw-r--r--drivers/media/rc/ir-rc5-decoder.c2
-rw-r--r--drivers/media/tuners/tuner-xc2028.c7
-rw-r--r--drivers/media/usb/airspy/airspy.c3
-rw-r--r--drivers/media/usb/usbtv/usbtv-audio.c5
-rw-r--r--drivers/media/usb/uvc/uvc_driver.c20
-rw-r--r--drivers/media/usb/uvc/uvcvideo.h12
-rw-r--r--drivers/media/v4l2-core/videobuf2-core.c10
-rw-r--r--drivers/media/v4l2-core/videobuf2-v4l2.c6
-rw-r--r--drivers/mfd/msm-cdc-pinctrl.c16
-rw-r--r--drivers/mfd/qcom_rpm.c55
-rw-r--r--drivers/misc/Kconfig5
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/cxl/Makefile2
-rw-r--r--drivers/misc/cxl/api.c6
-rw-r--r--drivers/misc/cxl/context.c15
-rw-r--r--drivers/misc/cxl/cxl.h15
-rw-r--r--drivers/misc/cxl/fault.c129
-rw-r--r--drivers/misc/cxl/file.c25
-rw-r--r--drivers/misc/cxl/pci.c1
-rw-r--r--drivers/misc/memory_state_time.c454
-rw-r--r--drivers/misc/qseecom.c6
-rw-r--r--drivers/mmc/card/block.c12
-rw-r--r--drivers/mmc/core/core.c66
-rw-r--r--drivers/mmc/core/host.c4
-rw-r--r--drivers/mmc/core/host.h5
-rw-r--r--drivers/mmc/host/Kconfig1
-rw-r--r--drivers/mmc/host/sdhci-acpi.c81
-rw-r--r--drivers/mmc/host/sdhci.c35
-rw-r--r--drivers/mmc/host/sdhci.h21
-rw-r--r--drivers/mtd/nand/nand_base.c2
-rw-r--r--drivers/mtd/ubi/build.c13
-rw-r--r--drivers/mtd/ubi/vmt.c25
-rw-r--r--drivers/net/bonding/bond_netlink.c6
-rw-r--r--drivers/net/can/at91_can.c5
-rw-r--r--drivers/net/can/c_can/c_can.c38
-rw-r--r--drivers/net/can/dev.c9
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c2
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic.h9
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_main.c6
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_reg.h2
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c12
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c8
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.h3
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c91
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.h2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000.h7
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c5
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c6
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k.h2
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c8
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pci.c65
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_type.h1
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_vf.c16
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c14
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c126
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c73
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c23
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c182
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.h2
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c40
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c6
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c32
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c1
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.c27
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.h1
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_regs.h1
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h2
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c17
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c3
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c12
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_spq.c7
-rw-r--r--drivers/net/ppp/ppp_generic.c5
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/sdio.c7
-rw-r--r--drivers/net/wireless/cnss/cnss_pci.c4
-rw-r--r--drivers/net/wireless/cnss/cnss_sdio.c3
-rw-r--r--drivers/nvme/host/pci.c71
-rw-r--r--drivers/of/base.c41
-rw-r--r--drivers/of/dynamic.c2
-rw-r--r--drivers/of/of_private.h3
-rw-r--r--drivers/pci/msi.c2
-rw-r--r--drivers/pci/pci-sysfs.c18
-rw-r--r--drivers/pci/quirks.c22
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx.c4
-rw-r--r--drivers/pinctrl/intel/pinctrl-cherryview.c80
-rw-r--r--drivers/pinctrl/pinctrl-amd.c20
-rw-r--r--drivers/pinctrl/pinctrl-single.c3
-rw-r--r--drivers/pinctrl/qcom/Kconfig7
-rw-r--r--drivers/pinctrl/qcom/Makefile1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-lpi.c643
-rw-r--r--drivers/platform/chrome/cros_ec_dev.c8
-rw-r--r--drivers/platform/chrome/cros_ec_proto.c17
-rw-r--r--drivers/platform/msm/ipa/ipa_api.c19
-rw-r--r--drivers/platform/msm/ipa/ipa_api.h3
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_dp.c6
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_i.h8
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c3
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h8
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c30
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_utils.c32
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c239
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c5
-rw-r--r--drivers/platform/x86/hp-wmi.c7
-rw-r--r--drivers/pnp/quirks.c2
-rw-r--r--drivers/power/power_supply_core.c27
-rw-r--r--drivers/power/qcom-charger/qpnp-smb2.c23
-rw-r--r--drivers/power/qcom-charger/smb-lib.c66
-rw-r--r--drivers/power/qcom-charger/smb-lib.h5
-rw-r--r--drivers/power/qcom-charger/smb-reg.h1
-rw-r--r--drivers/power/qcom-charger/smb1351-charger.c2
-rw-r--r--drivers/pps/clients/pps_parport.c2
-rw-r--r--drivers/pwm/pwm-fsl-ftm.c58
-rw-r--r--drivers/pwm/pwm-lpc32xx.c55
-rw-r--r--drivers/regulator/anatop-regulator.c2
-rw-r--r--drivers/remoteproc/remoteproc_core.c15
-rw-r--r--drivers/rtc/rtc-s3c.c2
-rw-r--r--drivers/s390/block/dasd.c10
-rw-r--r--drivers/s390/char/sclp_ctl.c12
-rw-r--r--drivers/s390/cio/chp.c21
-rw-r--r--drivers/s390/cio/chp.h2
-rw-r--r--drivers/s390/cio/chsc.c43
-rw-r--r--drivers/s390/cio/cmf.c29
-rw-r--r--drivers/s390/net/qeth_l2_main.c2
-rw-r--r--drivers/s390/net/qeth_l3_main.c2
-rw-r--r--drivers/scsi/aacraid/commctrl.c13
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c26
-rw-r--r--drivers/scsi/constants.c5
-rw-r--r--drivers/scsi/cxlflash/common.h2
-rw-r--r--drivers/scsi/cxlflash/main.c69
-rw-r--r--drivers/scsi/cxlflash/main.h4
-rw-r--r--drivers/scsi/cxlflash/superpipe.c19
-rw-r--r--drivers/scsi/cxlflash/vlun.c2
-rw-r--r--drivers/scsi/ipr.c1
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c373
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c20
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c9
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c10
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c134
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c6
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c23
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c274
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c3
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c46
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h5
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c2
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c12
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c2
-rw-r--r--drivers/scsi/scsi_devinfo.c10
-rw-r--r--drivers/scsi/scsi_sysfs.c7
-rw-r--r--drivers/scsi/ufs/ufs-qcom-ice.c7
-rw-r--r--drivers/scsi/ufs/ufs-qcom.c25
-rw-r--r--drivers/scsi/ufs/ufshcd.c241
-rw-r--r--drivers/scsi/ufs/ufshcd.h3
-rw-r--r--drivers/soc/qcom/icnss.c1075
-rw-r--r--drivers/soc/qcom/pil-q6v5-mss.c4
-rw-r--r--drivers/soc/qcom/pil-q6v5.c4
-rw-r--r--drivers/soc/qcom/scm.c5
-rw-r--r--drivers/soc/qcom/spcom.c5
-rw-r--r--drivers/soc/qcom/subsys-pil-tz.c6
-rw-r--r--drivers/soc/qcom/subsystem_notif.c6
-rw-r--r--drivers/soc/qcom/subsystem_restart.c36
-rw-r--r--drivers/spi/spi-pxa2xx.c9
-rw-r--r--drivers/spi/spi-sun4i.c23
-rw-r--r--drivers/spi/spi-sun6i.c10
-rw-r--r--drivers/spi/spi_qsd.c195
-rw-r--r--drivers/spi/spi_qsd.h14
-rw-r--r--drivers/staging/android/lowmemorykiller.c2
-rw-r--r--drivers/staging/comedi/drivers/comedi_test.c46
-rw-r--r--drivers/staging/comedi/drivers/daqboard2000.c2
-rw-r--r--drivers/staging/comedi/drivers/ni_mio_common.c12
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_internal.h2
-rw-r--r--drivers/staging/rdma/ipath/ipath_file_ops.c5
-rw-r--r--drivers/target/iscsi/iscsi_target.c22
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c5
-rw-r--r--drivers/target/target_core_device.c8
-rw-r--r--drivers/target/target_core_file.c3
-rw-r--r--drivers/target/target_core_iblock.c3
-rw-r--r--drivers/target/target_core_internal.h1
-rw-r--r--drivers/target/target_core_sbc.c2
-rw-r--r--drivers/target/target_core_transport.c78
-rw-r--r--drivers/thermal/msm-tsens.c27
-rw-r--r--drivers/tty/pty.c63
-rw-r--r--drivers/tty/serial/atmel_serial.c14
-rw-r--r--drivers/tty/serial/msm_serial.c135
-rw-r--r--drivers/tty/serial/samsung.c18
-rw-r--r--drivers/usb/chipidea/udc.c7
-rw-r--r--drivers/usb/class/cdc-acm.c5
-rw-r--r--drivers/usb/class/cdc-acm.h1
-rw-r--r--drivers/usb/common/common.c1
-rw-r--r--drivers/usb/core/config.c69
-rw-r--r--drivers/usb/core/devices.c10
-rw-r--r--drivers/usb/core/devio.c16
-rw-r--r--drivers/usb/core/hcd-pci.c2
-rw-r--r--drivers/usb/core/hcd.c6
-rw-r--r--drivers/usb/core/hub.c49
-rw-r--r--drivers/usb/core/quirks.c3
-rw-r--r--drivers/usb/core/urb.c3
-rw-r--r--drivers/usb/core/usb.c3
-rw-r--r--drivers/usb/core/usb.h2
-rw-r--r--drivers/usb/dwc3/dbm.c4
-rw-r--r--drivers/usb/dwc3/dwc3-msm.c115
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c2
-rw-r--r--drivers/usb/dwc3/gadget.c11
-rw-r--r--drivers/usb/gadget/composite.c12
-rw-r--r--drivers/usb/gadget/function/f_accessory.c9
-rw-r--r--drivers/usb/gadget/function/f_fs.c10
-rw-r--r--drivers/usb/gadget/function/f_gsi.c4
-rw-r--r--drivers/usb/gadget/function/f_qc_rndis.c51
-rw-r--r--drivers/usb/gadget/function/f_qdss.c23
-rw-r--r--drivers/usb/gadget/function/f_rmnet.c559
-rw-r--r--drivers/usb/gadget/function/f_uac2.c1
-rw-r--r--drivers/usb/gadget/function/u_bam.c4
-rw-r--r--drivers/usb/gadget/function/u_bam_data.c4
-rw-r--r--drivers/usb/gadget/function/u_ctrl_qti.c18
-rw-r--r--drivers/usb/gadget/function/u_data_ipa.c56
-rw-r--r--drivers/usb/gadget/function/u_data_ipa.h19
-rw-r--r--drivers/usb/gadget/function/u_qdss.c2
-rw-r--r--drivers/usb/gadget/legacy/inode.c2
-rw-r--r--drivers/usb/gadget/udc/fsl_qe_udc.c2
-rw-r--r--drivers/usb/host/ehci-hcd.c4
-rw-r--r--drivers/usb/host/ohci-q.c3
-rw-r--r--drivers/usb/host/xhci-hub.c3
-rw-r--r--drivers/usb/host/xhci-mem.c109
-rw-r--r--drivers/usb/host/xhci-pci.c3
-rw-r--r--drivers/usb/host/xhci-plat.c4
-rw-r--r--drivers/usb/host/xhci-ring.c16
-rw-r--r--drivers/usb/host/xhci.c7
-rw-r--r--drivers/usb/misc/usbtest.c7
-rw-r--r--drivers/usb/pd/policy_engine.c24
-rw-r--r--drivers/usb/phy/phy-msm-qusb-v2.c9
-rw-r--r--drivers/usb/phy/phy-msm-qusb.c11
-rw-r--r--drivers/usb/renesas_usbhs/fifo.c22
-rw-r--r--drivers/usb/renesas_usbhs/mod_gadget.c18
-rw-r--r--drivers/usb/serial/ftdi_sio.c3
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h12
-rw-r--r--drivers/usb/serial/mos7720.c2
-rw-r--r--drivers/usb/serial/mos7840.c4
-rw-r--r--drivers/usb/serial/option.c34
-rw-r--r--drivers/usb/serial/usb-serial.c4
-rw-r--r--drivers/vfio/pci/vfio_pci_intrs.c85
-rw-r--r--drivers/vhost/scsi.c6
-rw-r--r--drivers/video/fbdev/msm/mdss_dsi_host.c2
-rw-r--r--drivers/video/fbdev/msm/mdss_dsi_panel.c55
-rw-r--r--drivers/video/fbdev/msm/mdss_fb.c20
-rw-r--r--drivers/video/fbdev/msm/mdss_hdmi_hdcp2p2.c4
-rw-r--r--drivers/video/fbdev/msm/mdss_hdmi_tx.c6
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp.c6
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_layer.c14
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_pipe.c23
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_pp.c5
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_pp_v1_7.c18
-rw-r--r--drivers/video/fbdev/msm/mdss_panel.h19
-rw-r--r--drivers/virtio/virtio_balloon.c2
-rw-r--r--drivers/virtio/virtio_ring.c2
-rw-r--r--drivers/w1/masters/omap_hdq.c2
-rw-r--r--drivers/xen/xen-pciback/conf_space.c6
-rw-r--r--drivers/xen/xenbus/xenbus_dev_frontend.c14
-rw-r--r--drivers/xen/xenbus/xenbus_xs.c3
-rw-r--r--fs/9p/vfs_file.c6
-rw-r--r--fs/btrfs/ctree.h1
-rw-r--r--fs/btrfs/disk-io.c3
-rw-r--r--fs/btrfs/extent_io.c6
-rw-r--r--fs/btrfs/file.c37
-rw-r--r--fs/btrfs/ioctl.c2
-rw-r--r--fs/btrfs/qgroup.c21
-rw-r--r--fs/btrfs/qgroup.h3
-rw-r--r--fs/cifs/cifs_fs_sb.h4
-rw-r--r--fs/cifs/cifsencrypt.c16
-rw-r--r--fs/cifs/cifsfs.c14
-rw-r--r--fs/cifs/connect.c49
-rw-r--r--fs/cifs/dir.c44
-rw-r--r--fs/cifs/inode.c22
-rw-r--r--fs/cifs/smb2ops.c30
-rw-r--r--fs/dcache.c7
-rw-r--r--fs/devpts/inode.c49
-rw-r--r--fs/ecryptfs/file.c78
-rw-r--r--fs/ecryptfs/kthread.c13
-rw-r--r--fs/ext4/balloc.c3
-rw-r--r--fs/ext4/extents.c12
-rw-r--r--fs/ext4/inode.c83
-rw-r--r--fs/ext4/mballoc.c17
-rw-r--r--fs/ext4/namei.c9
-rw-r--r--fs/ext4/super.c53
-rw-r--r--fs/ext4/xattr.c66
-rw-r--r--fs/fuse/file.c24
-rw-r--r--fs/fuse/inode.c2
-rw-r--r--fs/inode.c6
-rw-r--r--fs/jbd2/commit.c2
-rw-r--r--fs/locks.c2
-rw-r--r--fs/namei.c4
-rw-r--r--fs/namespace.c1
-rw-r--r--fs/nfs/write.c5
-rw-r--r--fs/nfsd/nfs4state.c65
-rw-r--r--fs/nilfs2/the_nilfs.c2
-rw-r--r--fs/overlayfs/copy_up.c2
-rw-r--r--fs/overlayfs/inode.c22
-rw-r--r--fs/overlayfs/overlayfs.h1
-rw-r--r--fs/overlayfs/super.c22
-rw-r--r--fs/proc/base.c52
-rw-r--r--fs/proc/task_mmu.c66
-rw-r--r--fs/proc/task_nommu.c49
-rw-r--r--fs/pstore/platform.c36
-rw-r--r--fs/pstore/pmsg.c35
-rw-r--r--fs/pstore/ram.c19
-rw-r--r--fs/pstore/ram_core.c47
-rw-r--r--fs/sdcardfs/derived_perm.c2
-rw-r--r--fs/seq_file.c4
-rw-r--r--fs/sysfs/file.c8
-rw-r--r--fs/ubifs/tnc_commit.c2
-rw-r--r--fs/xfs/libxfs/xfs_sb.c3
-rw-r--r--include/asm-generic/vmlinux.lds.h4
-rw-r--r--include/drm/i915_pciids.h6
-rw-r--r--include/dt-bindings/msm/msm-bus-ids.h3
-rw-r--r--include/linux/acpi.h2
-rw-r--r--include/linux/backing-dev-defs.h1
-rw-r--r--include/linux/backing-dev.h1
-rw-r--r--include/linux/bcma/bcma.h1
-rw-r--r--include/linux/bio.h3
-rw-r--r--include/linux/blkdev.h78
-rw-r--r--include/linux/capability.h5
-rw-r--r--include/linux/cpufreq.h16
-rw-r--r--include/linux/cpuidle.h2
-rw-r--r--include/linux/devpts_fs.h34
-rw-r--r--include/linux/fs.h29
-rw-r--r--include/linux/i8042.h6
-rw-r--r--include/linux/ipa.h6
-rw-r--r--include/linux/kernel.h4
-rw-r--r--include/linux/lightnvm.h5
-rw-r--r--include/linux/memcontrol.h8
-rw-r--r--include/linux/memory-state-time.h42
-rw-r--r--include/linux/mfd/cros_ec.h15
-rw-r--r--include/linux/mlx5/qp.h5
-rw-r--r--include/linux/mm.h3
-rw-r--r--include/linux/mmc/core.h2
-rw-r--r--include/linux/mmc/host.h7
-rw-r--r--include/linux/mmzone.h2
-rw-r--r--include/linux/msi.h8
-rw-r--r--include/linux/pci_ids.h7
-rw-r--r--include/linux/perf_event.h1
-rw-r--r--include/linux/pstore.h11
-rw-r--r--include/linux/pstore_ram.h7
-rw-r--r--include/linux/sched.h34
-rw-r--r--include/linux/sched/sysctl.h6
-rw-r--r--include/linux/sched_energy.h46
-rw-r--r--include/linux/serio.h24
-rw-r--r--include/linux/slab.h12
-rw-r--r--include/linux/slub_def.h1
-rw-r--r--include/linux/thread_info.h25
-rw-r--r--include/linux/time.h26
-rw-r--r--include/linux/uaccess.h7
-rw-r--r--include/linux/usb/msm_hsusb.h6
-rw-r--r--include/net/cfg80211.h4
-rw-r--r--include/net/fib_rules.h1
-rw-r--r--include/soc/qcom/camera2.h3
-rw-r--r--include/soc/qcom/subsystem_restart.h20
-rw-r--r--include/sound/apr_audio-v2.h9
-rw-r--r--include/target/target_core_backend.h2
-rw-r--r--include/target/target_core_base.h1
-rw-r--r--include/target/target_core_fabric.h1
-rw-r--r--include/trace/events/cpufreq_sched.h87
-rw-r--r--include/trace/events/power.h7
-rw-r--r--include/trace/events/sched.h358
-rw-r--r--include/trace/events/sunrpc.h47
-rw-r--r--include/uapi/linux/hyperv.h1
-rw-r--r--include/uapi/linux/nl80211.h43
-rw-r--r--include/uapi/linux/rtnetlink.h2
-rw-r--r--include/uapi/linux/usb/ch9.h1
-rw-r--r--include/uapi/linux/videodev2.h3
-rw-r--r--include/uapi/media/msmb_isp.h22
-rw-r--r--include/uapi/scsi/cxlflash_ioctl.h10
-rw-r--r--init/Kconfig29
-rw-r--r--init/do_mounts_dm.c3
-rw-r--r--ipc/msg.c2
-rw-r--r--ipc/sem.c12
-rw-r--r--kernel/capability.c46
-rw-r--r--kernel/cgroup.c1
-rw-r--r--kernel/cred.c2
-rw-r--r--kernel/events/uprobes.c8
-rw-r--r--kernel/exit.c3
-rw-r--r--kernel/futex.c2
-rw-r--r--kernel/irq/msi.c19
-rw-r--r--kernel/module.c13
-rw-r--r--kernel/panic.c8
-rw-r--r--kernel/rcu/sync.c1
-rw-r--r--kernel/sched/Makefile3
-rw-r--r--kernel/sched/core.c321
-rw-r--r--kernel/sched/cpufreq_sched.c499
-rw-r--r--kernel/sched/cputime.c15
-rw-r--r--kernel/sched/deadline.c33
-rw-r--r--kernel/sched/energy.c134
-rw-r--r--kernel/sched/fair.c1398
-rw-r--r--kernel/sched/features.h6
-rw-r--r--kernel/sched/hmp.c87
-rw-r--r--kernel/sched/idle.c3
-rw-r--r--kernel/sched/rt.c67
-rw-r--r--kernel/sched/sched.h260
-rw-r--r--kernel/sched/tune.c756
-rw-r--r--kernel/sched/tune.h55
-rw-r--r--kernel/sched/walt.c1171
-rw-r--r--kernel/sched/walt.h62
-rw-r--r--kernel/sysctl.c42
-rw-r--r--kernel/time/clocksource.c52
-rw-r--r--kernel/time/hrtimer.c7
-rw-r--r--kernel/time/ntp.c20
-rw-r--r--kernel/time/posix-cpu-timers.c1
-rw-r--r--kernel/time/sched_clock.c11
-rw-r--r--kernel/time/timekeeping.c7
-rw-r--r--kernel/time/timekeeping_debug.c9
-rw-r--r--lib/mpi/mpicoder.c21
-rw-r--r--lib/strncpy_from_user.c15
-rw-r--r--lib/strnlen_user.c21
-rw-r--r--mm/Makefile4
-rw-r--r--mm/backing-dev.c19
-rw-r--r--mm/compaction.c61
-rw-r--r--mm/hugetlb.c7
-rw-r--r--mm/maccess.c3
-rw-r--r--mm/memcontrol.c143
-rw-r--r--mm/page_alloc.c6
-rw-r--r--mm/process_reclaim.c7
-rw-r--r--mm/slab.c30
-rw-r--r--mm/slab_common.c4
-rw-r--r--mm/slub.c140
-rw-r--r--mm/usercopy.c277
-rw-r--r--mm/util.c27
-rw-r--r--mm/vmpressure.c28
-rw-r--r--net/bluetooth/l2cap_sock.c2
-rw-r--r--net/ceph/osdmap.c156
-rw-r--r--net/ipv4/fib_semantics.c6
-rw-r--r--net/ipv4/ping.c2
-rw-r--r--net/ipv4/tcp_input.c39
-rw-r--r--net/ipv4/tcp_output.c3
-rw-r--r--net/ipv4/udp.c6
-rw-r--r--net/ipv6/icmp.c2
-rw-r--r--net/ipv6/ping.c38
-rw-r--r--net/ipv6/udp.c6
-rw-r--r--net/irda/af_irda.c7
-rw-r--r--net/mac80211/cfg.c2
-rw-r--r--net/mac80211/tx.c6
-rw-r--r--net/netfilter/x_tables.c6
-rw-r--r--net/netlabel/netlabel_kapi.c12
-rw-r--r--net/rds/recv.c2
-rw-r--r--net/rds/tcp.c5
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c8
-rw-r--r--net/sunrpc/xprtsock.c4
-rw-r--r--net/sysctl_net.c2
-rw-r--r--net/tipc/netlink_compat.c3
-rw-r--r--net/tipc/subscr.c3
-rw-r--r--net/wireless/nl80211.c46
-rw-r--r--scripts/recordmcount.c9
-rw-r--r--security/Kconfig40
-rw-r--r--security/apparmor/apparmorfs.c1
-rw-r--r--sound/core/rawmidi.c4
-rw-r--r--sound/core/timer.c20
-rw-r--r--sound/firewire/fireworks/fireworks.h1
-rw-r--r--sound/firewire/fireworks/fireworks_hwdep.c71
-rw-r--r--sound/firewire/fireworks/fireworks_proc.c4
-rw-r--r--sound/firewire/fireworks/fireworks_transaction.c5
-rw-r--r--sound/firewire/tascam/tascam-hwdep.c33
-rw-r--r--sound/hda/array.c4
-rw-r--r--sound/pci/hda/hda_intel.c38
-rw-r--r--sound/pci/hda/patch_hdmi.c5
-rw-r--r--sound/pci/hda/patch_realtek.c43
-rw-r--r--sound/soc/atmel/atmel_ssc_dai.c5
-rw-r--r--sound/soc/codecs/Kconfig7
-rw-r--r--sound/soc/codecs/Makefile3
-rw-r--r--sound/soc/codecs/msm8x16/Makefile3
-rw-r--r--sound/soc/codecs/msm8x16/msm8x16-wcd-tables.c263
-rw-r--r--sound/soc/codecs/msm8x16/msm8x16-wcd.c6022
-rw-r--r--sound/soc/codecs/msm_sdw/Kconfig6
-rw-r--r--sound/soc/codecs/msm_sdw/Makefile3
-rw-r--r--sound/soc/codecs/msm_sdw/msm-sdw-tables.c221
-rw-r--r--sound/soc/codecs/msm_sdw/msm_sdw.h166
-rw-r--r--sound/soc/codecs/msm_sdw/msm_sdw_cdc.c1920
-rw-r--r--sound/soc/codecs/msm_sdw/msm_sdw_cdc_utils.c211
-rw-r--r--sound/soc/codecs/msm_sdw/msm_sdw_registers.h126
-rw-r--r--sound/soc/codecs/msm_sdw/msm_sdw_regmap.c155
-rw-r--r--sound/soc/codecs/msmfalcon_cdc/Kconfig (renamed from sound/soc/codecs/msm8x16/Kconfig)2
-rw-r--r--sound/soc/codecs/msmfalcon_cdc/Makefile2
-rw-r--r--sound/soc/codecs/msmfalcon_cdc/msm-analog-cdc.c4645
-rw-r--r--sound/soc/codecs/msmfalcon_cdc/msm-analog-cdc.h (renamed from sound/soc/codecs/msm8x16/msm8x16-wcd.h)174
-rw-r--r--sound/soc/codecs/msmfalcon_cdc/msm-cdc-common.h66
-rw-r--r--sound/soc/codecs/msmfalcon_cdc/msm-digital-cdc.c2039
-rw-r--r--sound/soc/codecs/msmfalcon_cdc/msm-digital-cdc.h93
-rw-r--r--sound/soc/codecs/msmfalcon_cdc/msmfalcon-cdc-irq.c (renamed from sound/soc/codecs/msm8x16/msm8916-wcd-irq.c)6
-rw-r--r--sound/soc/codecs/msmfalcon_cdc/msmfalcon-cdc-irq.h (renamed from sound/soc/codecs/msm8x16/msm8916-wcd-irq.h)0
-rw-r--r--sound/soc/codecs/msmfalcon_cdc/msmfalcon-cdc-registers.h (renamed from sound/soc/codecs/msm8x16/msm8x16_wcd_registers.h)198
-rw-r--r--sound/soc/codecs/msmfalcon_cdc/msmfalcon-regmap.c (renamed from sound/soc/codecs/msm8x16/msm89xx-regmap.c)244
-rw-r--r--sound/soc/codecs/wcd934x/wcd934x-routing.h3
-rw-r--r--sound/soc/codecs/wcd934x/wcd934x.c2
-rw-r--r--sound/soc/msm/Kconfig7
-rw-r--r--sound/soc/msm/Makefile2
-rw-r--r--sound/soc/msm/msm8998.c44
-rw-r--r--sound/soc/msm/msmfalcon-common.c53
-rw-r--r--sound/soc/msm/msmfalcon-common.h12
-rw-r--r--sound/soc/msm/msmfalcon-ext-dai-links.c1
-rw-r--r--sound/soc/msm/msmfalcon-internal.c290
-rw-r--r--sound/soc/msm/qdsp6v2/msm-dts-eagle.c4
-rw-r--r--sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c62
-rw-r--r--sound/soc/msm/qdsp6v2/msm-pcm-voip-v2.c5
-rw-r--r--sound/soc/msm/qdsp6v2/q6asm.c6
-rw-r--r--sound/soc/msm/qdsp6v2/q6voice.c315
-rw-r--r--sound/soc/msm/qdsp6v2/q6voice.h123
-rw-r--r--sound/soc/msm/qdsp6v2/rtac.c24
-rw-r--r--sound/usb/card.c1
-rw-r--r--sound/usb/line6/pcm.c3
-rw-r--r--sound/usb/line6/pod.c12
-rw-r--r--sound/usb/quirks.c3
-rw-r--r--sound/usb/usb_audio_qmi_svc.c38
-rw-r--r--tools/hv/hv_fcopy_daemon.c20
-rw-r--r--tools/perf/arch/x86/util/intel-pt.c6
-rw-r--r--tools/testing/nvdimm/test/nfit.c2
-rw-r--r--virt/kvm/kvm_main.c2
882 files changed, 31898 insertions, 13000 deletions
diff --git a/Documentation/devicetree/bindings/cnss/icnss.txt b/Documentation/devicetree/bindings/cnss/icnss.txt
index e19a43446357..15feda3b7407 100644
--- a/Documentation/devicetree/bindings/cnss/icnss.txt
+++ b/Documentation/devicetree/bindings/cnss/icnss.txt
@@ -12,17 +12,9 @@ Required properties:
- reg-names: Names of the memory regions defined in reg entry
- interrupts: Copy engine interrupt table
- qcom,wlan-msa-memory: MSA memory size
- - clocks: List of clock phandles
- - clock-names: List of clock names corresponding to the "clocks" property
- iommus: SMMUs and corresponding Stream IDs needed by WLAN
- qcom,wlan-smmu-iova-address: I/O virtual address range as <start length>
format to be used for allocations associated between WLAN and SMMU
- - <supply-name>-supply: phandle to the regulator device tree node
- Required "supply-name" is "vdd-0.8-cx-mx".
- - qcom,<supply>-config - specifies voltage levels for supply. Should be
- specified in pairs (min, max), units uV. There can
- be optional load in uA and Regulator settle delay in
- uS.
Optional properties:
- qcom,icnss-vadc: VADC handle for vph_pwr read APIs.
@@ -34,8 +26,6 @@ Example:
compatible = "qcom,icnss";
reg = <0x0a000000 0x1000000>;
reg-names = "membase";
- clocks = <&clock_gcc clk_aggre2_noc_clk>;
- clock-names = "smmu_aggre2_noc_clk";
iommus = <&anoc2_smmu 0x1900>,
<&anoc2_smmu 0x1901>;
qcom,wlan-smmu-iova-address = <0 0x10000000>;
@@ -53,6 +43,4 @@ Example:
<0 140 0 /* CE10 */ >,
<0 141 0 /* CE11 */ >;
qcom,wlan-msa-memory = <0x200000>;
- vdd-0.8-cx-mx-supply = <&pm8998_l5>;
- qcom,vdd-0.8-cx-mx-config = <800000 800000 2400 1000>;
};
diff --git a/Documentation/devicetree/bindings/fb/mdss-dsi-panel.txt b/Documentation/devicetree/bindings/fb/mdss-dsi-panel.txt
index a9bb6b81e60d..0299b1aef2b6 100644
--- a/Documentation/devicetree/bindings/fb/mdss-dsi-panel.txt
+++ b/Documentation/devicetree/bindings/fb/mdss-dsi-panel.txt
@@ -365,6 +365,28 @@ the fps window.
2A/2B command.
- qcom,dcs-cmd-by-left: Boolean to indicate that dcs command are sent
through the left DSI controller only in a dual-dsi configuration
+- qcom,mdss-dsi-panel-hdr-enabled: Boolean to indicate HDR support in panel.
+- qcom,mdss-dsi-panel-hdr-color-primaries:
+ Array of 8 unsigned integers denoting chromaticity of panel.These
+ values are specified in nits units. The value range is 0 through 50000.
+ To obtain real chromacity, these values should be divided by factor of
+ 50000. The structure of array is defined in below order
+ value 1: x value of white chromaticity of display panel
+ value 2: y value of white chromaticity of display panel
+ value 3: x value of red chromaticity of display panel
+ value 4: y value of red chromaticity of display panel
+ value 5: x value of green chromaticity of display panel
+ value 6: y value of green chromaticity of display panel
+ value 7: x value of blue chromaticity of display panel
+ value 8: y value of blue chromaticity of display panel
+- qcom,mdss-dsi-panel-peak-brightness: Maximum brightness supported by panel.In absence of maximum value
+ typical value becomes peak brightness. Value is specified in nits units.
+ To obtail real peak brightness, this value should be divided by factor of
+ 10000.
+- qcom,mdss-dsi-panel-blackness-level: Blackness level supported by panel. Blackness level is defined as
+ ratio of peak brightness to contrast. Value is specified in nits units.
+ To obtail real blackness level, this value should be divided by factor of
+ 10000.
- qcom,mdss-dsi-lp11-init: Boolean used to enable the DSI clocks and data lanes (low power 11)
before issuing hardware reset line.
- qcom,mdss-dsi-init-delay-us: Delay in microseconds(us) before performing any DSI activity in lp11
diff --git a/Documentation/devicetree/bindings/media/video/msm-cci.txt b/Documentation/devicetree/bindings/media/video/msm-cci.txt
index 991c6d4ec255..9fb84020add7 100644
--- a/Documentation/devicetree/bindings/media/video/msm-cci.txt
+++ b/Documentation/devicetree/bindings/media/video/msm-cci.txt
@@ -172,6 +172,8 @@ Optional properties:
should contain phandle of respective ir-led node
- qcom,ir-cut-src : if ir cut is supported by this sensor, this property
should contain phandle of respective ir-cut node
+- qcom,special-support-sensors: if only some special sensors are supported
+ on this board, add sensor name in this property.
* Qualcomm Technologies, Inc. MSM ACTUATOR
diff --git a/Documentation/devicetree/bindings/media/video/msm-cpp.txt b/Documentation/devicetree/bindings/media/video/msm-cpp.txt
index ade5fbe8cbd7..2bd9fb840830 100644
--- a/Documentation/devicetree/bindings/media/video/msm-cpp.txt
+++ b/Documentation/devicetree/bindings/media/video/msm-cpp.txt
@@ -68,6 +68,8 @@ Optional properties:
platforms that support such feature.
- qcom,vbif-setting: The offset and value for vbif core qos registers.
The first entry is register offset and second entry is register value.
+- qcom,micro-reset: Boolean flag indicating if micro reset need to be enabled.
+ This needs to present on platforms that support this feature.
Example:
@@ -111,6 +113,7 @@ Example:
<0x2C 0x10000000>;
qcom,src-clock-rates = <100000000 200000000 384000000 404000000
480000000 576000000 600000000>;
+ qcom,micro-reset;
qcom,cpp-fw-payload-info {
qcom,stripe-base = <553>;
qcom,plane-base = <481>;
diff --git a/Documentation/devicetree/bindings/misc/memory-state-time.txt b/Documentation/devicetree/bindings/misc/memory-state-time.txt
new file mode 100644
index 000000000000..c99a506c030d
--- /dev/null
+++ b/Documentation/devicetree/bindings/misc/memory-state-time.txt
@@ -0,0 +1,8 @@
+Memory bandwidth and frequency state tracking
+
+Required properties:
+- compatible : should be:
+ "memory-state-time"
+- freq-tbl: Should contain entries with each frequency in Hz.
+- bw-buckets: Should contain upper-bound limits for each bandwidth bucket in Mbps.
+ Must match the framework power_profile.xml for the device.
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,lpi-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/qcom,lpi-pinctrl.txt
new file mode 100644
index 000000000000..57510ec13d03
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,lpi-pinctrl.txt
@@ -0,0 +1,154 @@
+Qualcomm Technologies, Inc. LPI GPIO controller driver
+
+This DT bindings describes the GPIO controller driver
+being added for supporting LPI (Low Power Island) TLMM
+from QTI chipsets.
+
+Following properties are for LPI GPIO controller device main node.
+- compatible:
+ Usage: required
+ Value type: <string>
+ Definition: must be "qcom,lpi-pinctrl"
+
+- reg:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: Register base of the GPIO controller and length.
+
+- qcom,num-gpios:
+ Usage: required
+ Value type: <u32>
+ Definition: Number of GPIOs supported by the controller.
+
+- gpio-controller:
+ Usage: required
+ Value type: <none>
+ Definition: Used to mark the device node as a GPIO controller.
+
+- #gpio-cells:
+ Usage: required
+ Value type: <u32>
+ Definition: Must be 2;
+ The first cell will be used to define gpio number and the
+ second denotes the flags for this gpio.
+
+Please refer to ../gpio/gpio.txt for general description of GPIO bindings.
+
+Please refer to pinctrl-bindings.txt in this directory for details of the
+common pinctrl bindings used by client devices, including the meaning of the
+phrase "pin configuration node".
+
+The pin configuration nodes act as a container for an arbitrary number of
+subnodes. Each of these subnodes represents some desired configuration for a
+pin or a list of pins. This configuration can include the
+mux function to select on those pin(s), and various pin configuration
+parameters, as listed below.
+
+SUBNODES:
+
+The name of each subnode is not important; all subnodes should be enumerated
+and processed purely based on their content.
+
+Each subnode only affects those parameters that are explicitly listed. In
+other words, a subnode that lists a mux function but no pin configuration
+parameters implies no information about any pin configuration parameters.
+Similarly, a pin subnode that describes a pullup parameter implies no
+information about e.g. the mux function.
+
+The following generic properties as defined in pinctrl-bindings.txt are valid
+to specify in a pin configuration subnode:
+
+- pins:
+ Usage: required
+ Value type: <string-array>
+ Definition: List of gpio pins affected by the properties specified in
+ this subnode. Valid pins are: gpio0-gpio31 for LPI.
+
+- function:
+ Usage: required
+ Value type: <string>
+ Definition: Specify the alternative function to be configured for the
+ specified pins. Valid values are:
+ "gpio",
+ "func1",
+ "func2",
+ "func3",
+ "func4",
+ "func5"
+
+- bias-disable:
+ Usage: optional
+ Value type: <none>
+ Definition: The specified pins should be configured as no pull.
+
+- bias-pull-down:
+ Usage: optional
+ Value type: <none>
+ Definition: The specified pins should be configured as pull down.
+
+- bias-bus-hold:
+ Usage: optional
+ Value type: <none>
+ Definition: The specified pins should be configured as bus-keeper mode.
+
+- bias-pull-up:
+ Usage: optional
+ Value type: <empty>
+ Definition: The specified pins should be configured as pull up.
+
+- input-enable:
+ Usage: optional
+ Value type: <none>
+ Definition: The specified pins are put in input mode.
+
+- output-high:
+ Usage: optional
+ Value type: <none>
+ Definition: The specified pins are configured in output mode, driven
+ high.
+
+- output-low:
+ Usage: optional
+ Value type: <none>
+ Definition: The specified pins are configured in output mode, driven
+ low.
+
+- qcom,drive-strength:
+ Usage: optional
+ Value type: <u32>
+ Definition: Selects the drive strength for the specified pins.
+
+Example:
+
+ lpi_tlmm: lpi_pinctrl@152c000 {
+ compatible = "qcom,lpi-pinctrl";
+ qcom,num-gpios = <32>;
+ reg = <0x152c000 0>;
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ hph_comp_active: hph_comp_active {
+ mux {
+ pins = "gpio22";
+ function = "func1";
+ };
+
+ config {
+ pins = "gpio22";
+ output-high;
+ qcom,drive-strength = <8>;
+ };
+ };
+
+ hph_comp_sleep: hph_comp_sleep {
+ mux {
+ pins = "gpio22";
+ function = "func1";
+ };
+
+ config {
+ pins = "gpio22";
+ qcom,drive-strength = <2>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/power/qcom-charger/qpnp-smb2.txt b/Documentation/devicetree/bindings/power/qcom-charger/qpnp-smb2.txt
index 382587ea5922..94a1fdceec8f 100644
--- a/Documentation/devicetree/bindings/power/qcom-charger/qpnp-smb2.txt
+++ b/Documentation/devicetree/bindings/power/qcom-charger/qpnp-smb2.txt
@@ -64,6 +64,12 @@ Charger specific properties:
Value type: <u32>
Definition: Specifies the DC input current limit in micro-amps.
+- qcom,boost-threshold-ua
+ Usage: optional
+ Value type: <u32>
+ Definition: Specifies the boost current threshold in micro-amps.
+ If the value is not present, 100mA is used as default.
+
- qcom,wipower-max-uw
Usage: optional
Value type: <u32>
diff --git a/Documentation/devicetree/bindings/qdsp/msm-ssc-sensors.txt b/Documentation/devicetree/bindings/qdsp/msm-ssc-sensors.txt
index 2fb34fd16258..c4b69d734880 100644
--- a/Documentation/devicetree/bindings/qdsp/msm-ssc-sensors.txt
+++ b/Documentation/devicetree/bindings/qdsp/msm-ssc-sensors.txt
@@ -5,7 +5,12 @@ msm-ssc-sensors driver implements the mechanism that allows to load SLPI firmwar
Required properties:
- compatible: This must be "qcom,msm-ssc-sensors"
+
+Optional properties:
+
- qcom,firmware-name: SLPI firmware name, must be "slpi_v1" or "slpi_v2"
+ Firmware name is not required, if sensors driver is sharing processor for execution.
+
Example:
The following for msm8998 version 1.
diff --git a/Documentation/devicetree/bindings/scheduler/sched-energy-costs.txt b/Documentation/devicetree/bindings/scheduler/sched-energy-costs.txt
new file mode 100644
index 000000000000..11216f09e596
--- /dev/null
+++ b/Documentation/devicetree/bindings/scheduler/sched-energy-costs.txt
@@ -0,0 +1,360 @@
+===========================================================
+Energy cost bindings for Energy Aware Scheduling
+===========================================================
+
+===========================================================
+1 - Introduction
+===========================================================
+
+This note specifies bindings required for energy-aware scheduling
+(EAS)[1]. Historically, the scheduler's primary objective has been
+performance. EAS aims to provide an alternative objective - energy
+efficiency. EAS relies on a simple platform energy cost model to
+guide scheduling decisions. The model only considers the CPU
+subsystem.
+
+This note is aligned with the definition of the layout of physical
+CPUs in the system as described in the ARM topology binding
+description [2]. The concept is applicable to any system so long as
+the cost model data is provided for those processing elements in
+that system's topology that EAS is required to service.
+
+Processing elements refer to hardware threads, CPUs and clusters of
+related CPUs in increasing order of hierarchy.
+
+EAS requires two key cost metrics - busy costs and idle costs. Busy
+costs comprise of a list of compute capacities for the processing
+element in question and the corresponding power consumption at that
+capacity. Idle costs comprise of a list of power consumption values
+for each idle state [C-state] that the processing element supports.
+For a detailed description of these metrics, their derivation and
+their use see [3].
+
+These cost metrics are required for processing elements in all
+scheduling domain levels that EAS is required to service.
+
+===========================================================
+2 - energy-costs node
+===========================================================
+
+Energy costs for the processing elements in scheduling domains that
+EAS is required to service are defined in the energy-costs node
+which acts as a container for the actual per processing element cost
+nodes. A single energy-costs node is required for a given system.
+
+- energy-costs node
+
+ Usage: Required
+
+ Description: The energy-costs node is a container node and
+ it's sub-nodes describe costs for each processing element at
+ all scheduling domain levels that EAS is required to
+ service.
+
+ Node name must be "energy-costs".
+
+ The energy-costs node's parent node must be the cpus node.
+
+ The energy-costs node's child nodes can be:
+
+ - one or more cost nodes.
+
+ Any other configuration is considered invalid.
+
+The energy-costs node can only contain a single type of child node
+whose bindings are described in paragraph 4.
+
+===========================================================
+3 - energy-costs node child nodes naming convention
+===========================================================
+
+energy-costs child nodes must follow a naming convention where the
+node name must be "thread-costN", "core-costN", "cluster-costN"
+depending on whether the costs in the node are for a thread, core or
+cluster. N (where N = {0, 1, ...}) is the node number and has no
+bearing to the OS' logical thread, core or cluster index.
+
+===========================================================
+4 - cost node bindings
+===========================================================
+
+Bindings for cost nodes are defined as follows:
+
+- cluster-cost node
+
+ Description: must be declared within an energy-costs node. A
+ system can contain multiple clusters and each cluster
+ serviced by EAS must have a corresponding cluster-costs
+ node.
+
+ The cluster-cost node name must be "cluster-costN" as
+ described in 3 above.
+
+ A cluster-cost node must be a leaf node with no children.
+
+ Properties for cluster-cost nodes are described in paragraph
+ 5 below.
+
+ Any other configuration is considered invalid.
+
+- core-cost node
+
+ Description: must be declared within an energy-costs node. A
+ system can contain multiple cores and each core serviced by
+ EAS must have a corresponding core-cost node.
+
+ The core-cost node name must be "core-costN" as described in
+ 3 above.
+
+ A core-cost node must be a leaf node with no children.
+
+ Properties for core-cost nodes are described in paragraph
+ 5 below.
+
+ Any other configuration is considered invalid.
+
+- thread-cost node
+
+ Description: must be declared within an energy-costs node. A
+ system can contain cores with multiple hardware threads and
+ each thread serviced by EAS must have a corresponding
+ thread-cost node.
+
+ The core-cost node name must be "core-costN" as described in
+ 3 above.
+
+ A core-cost node must be a leaf node with no children.
+
+ Properties for thread-cost nodes are described in paragraph
+ 5 below.
+
+ Any other configuration is considered invalid.
+
+===========================================================
+5 - Cost node properties
+==========================================================
+
+All cost node types must have only the following properties:
+
+- busy-cost-data
+
+ Usage: required
+ Value type: An array of 2-item tuples. Each item is of type
+ u32.
+ Definition: The first item in the tuple is the capacity
+ value as described in [3]. The second item in the tuple is
+ the energy cost value as described in [3].
+
+- idle-cost-data
+
+ Usage: required
+ Value type: An array of 1-item tuples. The item is of type
+ u32.
+ Definition: The item in the tuple is the energy cost value
+ as described in [3].
+
+===========================================================
+4 - Extensions to the cpu node
+===========================================================
+
+The cpu node is extended with a property that establishes the
+connection between the processing element represented by the cpu
+node and the cost-nodes associated with this processing element.
+
+The connection is expressed in line with the topological hierarchy
+that this processing element belongs to starting with the level in
+the hierarchy that this processing element itself belongs to through
+to the highest level that EAS is required to service. The
+connection cannot be sparse and must be contiguous from the
+processing element's level through to the highest desired level. The
+highest desired level must be the same for all processing elements.
+
+Example: Given that a cpu node may represent a thread that is a part
+of a core, this property may contain multiple elements which
+associate the thread with cost nodes describing the costs for the
+thread itself, the core the thread belongs to, the cluster the core
+belongs to and so on. The elements must be ordered from the lowest
+level nodes to the highest desired level that EAS must service. The
+highest desired level must be the same for all cpu nodes. The
+elements must not be sparse: there must be elements for the current
+thread, the next level of hierarchy (core) and so on without any
+'holes'.
+
+Example: Given that a cpu node may represent a core that is a part
+of a cluster of related cpus this property may contain multiple
+elements which associate the core with cost nodes describing the
+costs for the core itself, the cluster the core belongs to and so
+on. The elements must be ordered from the lowest level nodes to the
+highest desired level that EAS must service. The highest desired
+level must be the same for all cpu nodes. The elements must not be
+sparse: there must be elements for the current thread, the next
+level of hierarchy (core) and so on without any 'holes'.
+
+If the system comprises of hierarchical clusters of clusters, this
+property will contain multiple associations with the relevant number
+of cluster elements in hierarchical order.
+
+Property added to the cpu node:
+
+- sched-energy-costs
+
+ Usage: required
+ Value type: List of phandles
+ Definition: a list of phandles to specific cost nodes in the
+ energy-costs parent node that correspond to the processing
+ element represented by this cpu node in hierarchical order
+ of topology.
+
+ The order of phandles in the list is significant. The first
+ phandle is to the current processing element's own cost
+ node. Subsequent phandles are to higher hierarchical level
+ cost nodes up until the maximum level that EAS is to
+ service.
+
+ All cpu nodes must have the same highest level cost node.
+
+ The phandle list must not be sparsely populated with handles
+ to non-contiguous hierarchical levels. See commentary above
+ for clarity.
+
+ Any other configuration is invalid.
+
+===========================================================
+5 - Example dts
+===========================================================
+
+Example 1 (ARM 64-bit, 6-cpu system, two clusters of cpus, one
+cluster of 2 Cortex-A57 cpus, one cluster of 4 Cortex-A53 cpus):
+
+cpus {
+ #address-cells = <2>;
+ #size-cells = <0>;
+ .
+ .
+ .
+ A57_0: cpu@0 {
+ compatible = "arm,cortex-a57","arm,armv8";
+ reg = <0x0 0x0>;
+ device_type = "cpu";
+ enable-method = "psci";
+ next-level-cache = <&A57_L2>;
+ clocks = <&scpi_dvfs 0>;
+ cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+ sched-energy-costs = <&CPU_COST_0 &CLUSTER_COST_0>;
+ };
+
+ A57_1: cpu@1 {
+ compatible = "arm,cortex-a57","arm,armv8";
+ reg = <0x0 0x1>;
+ device_type = "cpu";
+ enable-method = "psci";
+ next-level-cache = <&A57_L2>;
+ clocks = <&scpi_dvfs 0>;
+ cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+ sched-energy-costs = <&CPU_COST_0 &CLUSTER_COST_0>;
+ };
+
+ A53_0: cpu@100 {
+ compatible = "arm,cortex-a53","arm,armv8";
+ reg = <0x0 0x100>;
+ device_type = "cpu";
+ enable-method = "psci";
+ next-level-cache = <&A53_L2>;
+ clocks = <&scpi_dvfs 1>;
+ cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+ sched-energy-costs = <&CPU_COST_1 &CLUSTER_COST_1>;
+ };
+
+ A53_1: cpu@101 {
+ compatible = "arm,cortex-a53","arm,armv8";
+ reg = <0x0 0x101>;
+ device_type = "cpu";
+ enable-method = "psci";
+ next-level-cache = <&A53_L2>;
+ clocks = <&scpi_dvfs 1>;
+ cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+ sched-energy-costs = <&CPU_COST_1 &CLUSTER_COST_1>;
+ };
+
+ A53_2: cpu@102 {
+ compatible = "arm,cortex-a53","arm,armv8";
+ reg = <0x0 0x102>;
+ device_type = "cpu";
+ enable-method = "psci";
+ next-level-cache = <&A53_L2>;
+ clocks = <&scpi_dvfs 1>;
+ cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+ sched-energy-costs = <&CPU_COST_1 &CLUSTER_COST_1>;
+ };
+
+ A53_3: cpu@103 {
+ compatible = "arm,cortex-a53","arm,armv8";
+ reg = <0x0 0x103>;
+ device_type = "cpu";
+ enable-method = "psci";
+ next-level-cache = <&A53_L2>;
+ clocks = <&scpi_dvfs 1>;
+ cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+ sched-energy-costs = <&CPU_COST_1 &CLUSTER_COST_1>;
+ };
+
+ energy-costs {
+ CPU_COST_0: core-cost0 {
+ busy-cost-data = <
+ 417 168
+ 579 251
+ 744 359
+ 883 479
+ 1024 616
+ >;
+ idle-cost-data = <
+ 15
+ 0
+ >;
+ };
+ CPU_COST_1: core-cost1 {
+ busy-cost-data = <
+ 235 33
+ 302 46
+ 368 61
+ 406 76
+ 447 93
+ >;
+ idle-cost-data = <
+ 6
+ 0
+ >;
+ };
+ CLUSTER_COST_0: cluster-cost0 {
+ busy-cost-data = <
+ 417 24
+ 579 32
+ 744 43
+ 883 49
+ 1024 64
+ >;
+ idle-cost-data = <
+ 65
+ 24
+ >;
+ };
+ CLUSTER_COST_1: cluster-cost1 {
+ busy-cost-data = <
+ 235 26
+ 303 30
+ 368 39
+ 406 47
+ 447 57
+ >;
+ idle-cost-data = <
+ 56
+ 17
+ >;
+ };
+ };
+};
+
+===============================================================================
+[1] https://lkml.org/lkml/2015/5/12/728
+[2] Documentation/devicetree/bindings/topology.txt
+[3] Documentation/scheduler/sched-energy.txt
diff --git a/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt b/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
index 336b9f0087a5..8957ff9dc9ee 100644
--- a/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
+++ b/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
@@ -361,6 +361,10 @@ Required properties:
- compatible : "qcom,msm-cdc-pinctrl"
+Optional properties:
+ - qcom,lpi-gpios : This boolean property is added if GPIOs are under
+ LPI TLMM.
+
* msm-dai-slim
Required properties:
@@ -1655,6 +1659,10 @@ mclk frequency needs to be configured for internal and external PA.
- qcom,wsa-max-devs : Maximum number of WSA881x devices present in the target
- qcom,wsa-devs : List of phandles for all possible WSA881x devices supported for the target
- qcom,wsa-aux-dev-prefix : Name prefix with Left/Right configuration for WSA881x device
+- qcom,cdc-pdm-gpios : phandle for pdm gpios.
+- qcom,cdc-comp-gpios : phandle for compander gpios.
+- qcom,cdc-dmic-gpios : phandle for Digital mic clk and data gpios.
+- qcom,cdc-sdw-gpios : phandle for soundwire clk and data gpios.
Example:
sound {
@@ -1675,24 +1683,11 @@ Example:
"AMIC1", "MIC BIAS External",
"AMIC2", "MIC BIAS Internal2",
"AMIC3", "MIC BIAS External";
- qcom,msm-gpios =
- "int_pdm",
- "us_eu_gpio";
- qcom,pinctrl-names =
- "all_off",
- "int_pdm_act",
- "us_eu_gpio_act",
- "int_pdm_us_eu_gpio_act";
- pinctrl-names =
- "all_off",
- "int_pdm_act",
- "us_eu_gpio_act",
- "int_pdm_us_eu_gpio_act";
- pinctrl-0 = <&cdc_pdm_lines_sus &cdc_pdm_lines_2_sus &cross_conn_det_sus>;
- pinctrl-1 = <&cdc_pdm_lines_act &cdc_pdm_lines_2_act &cross_conn_det_sus>;
- pinctrl-2 = <&cdc_pdm_lines_sus &cdc_pdm_lines_2_sus &cross_conn_det_act>;
- pinctrl-3 = <&cdc_pdm_lines_act &cdc_pdm_lines_2_act &cross_conn_det_act>;
qcom,cdc-us-euro-gpios = <&msm_gpio 63 0>;
+ qcom,cdc-pdm-gpios = <&cdc_pdm_gpios>;
+ qcom,cdc-comp-gpios = <&cdc_comp_gpios>;
+ qcom,cdc-dmic-gpios = <&cdc_dmic_gpios>;
+ qcom,cdc-sdw-gpios = <&cdc_sdw_gpios>;
asoc-platform = <&pcm0>, <&pcm1>, <&voip>, <&voice>,
<&loopback>, <&compress>, <&hostless>,
<&afe>, <&lsm>, <&routing>, <&lpa>;
diff --git a/Documentation/devicetree/bindings/sound/wcd_codec.txt b/Documentation/devicetree/bindings/sound/wcd_codec.txt
index c0a7a240b922..fb2b3654dcc7 100644
--- a/Documentation/devicetree/bindings/sound/wcd_codec.txt
+++ b/Documentation/devicetree/bindings/sound/wcd_codec.txt
@@ -438,19 +438,14 @@ Optional properties:
Example:
-msm_dig_codec: qcom,msm-int-codec {
- compatible = "qcom,msm_int_core_codec";
- qcom,dig-cdc-base-addr = <0xc0f0000>;
+msm_digital_codec: msm-dig-codec@c0f0000 {
+ compatible = "qcom,msm-digital-codec";
+ reg = <0xc0f0000 0x0>;
};
-msm8x16_wcd_codec@f100 {
- compatible = "qcom,msm_int_pmic_analog_codec";
- reg = <0xf100 0x100>;
-};
-
-msm8x16_wcd_codec@f000{
- compatible = "qcom,msm_int_pmic_digital_codec";
- reg = <0xf000 0x100>;
+pmic_analog_codec: analog-codec@f000 {
+ compatible = "qcom,pmic-analog-codec";
+ reg = <0xf000 0x200>;
interrupt-parent = <&spmi_bus>;
interrupts = <0x1 0xf0 0x0>,
<0x1 0xf0 0x1>,
@@ -501,7 +496,41 @@ msm8x16_wcd_codec@f000{
"cdc-vdda-cp";
qcom,cdc-on-demand-supplies = "cdc-vdd-mic-bias";
- qcom,dig-cdc-base-addr = <0xc0f0000>;
+};
+
+MSM based Soundwire audio codec
+
+Required properties:
+ - compatible = "qcom,msm-sdw-codec";
+ - reg: Specifies the soundwire codec base address for MSM digital
+ soundwire core registers.
+ - interrupts: Specifies the soundwire master interrupt number to Apps processor.
+ - interrupt-names: Specify the interrupt name from soundwire master.
+ - swr_master: This node is added as a child of MSM soundwire codec
+ and uses already existing driver soundwire master.
+ And there is/are subchild node(s) under soundwire master
+ which is also existing driver WSA881x that represents
+ soundwire slave devices.
+
+Example:
+
+msm_sdw_codec: qcom,msm-sdw-codec@152c1000 {
+ compatible = "qcom,msm-sdw-codec";
+ reg = <0x152c1000 0x0>;
+ interrupts = <0 161 0>;
+ interrupt-names = "swr_master_irq";
+
+ swr_master {
+ compatible = "qcom,swr-wcd";
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ wsa881x_1: wsa881x@20170212 {
+ compatible = "qcom,wsa881x";
+ reg = <0x00 0x20170212>;
+ qcom,spkr-sd-n-gpio = <&tlmm 80 0>;
+ };
+ };
};
Tasha audio CODEC in I2C mode
diff --git a/Documentation/devicetree/bindings/thermal/tsens.txt b/Documentation/devicetree/bindings/thermal/tsens.txt
index fc697e843fbb..ada060d19a9b 100644
--- a/Documentation/devicetree/bindings/thermal/tsens.txt
+++ b/Documentation/devicetree/bindings/thermal/tsens.txt
@@ -82,10 +82,13 @@ Optional properties:
total number of supported sensors with each controller instance.
- qcom,valid-status-check: If property is present, check the VALID bit is set
before reporting the temperature data.
-- qcom,temp1-offset: If property is present, Use these offset values
+- qcom,temp1-offset: If flag is present, Use these offset values
to be added for 30 deg calib points.
-- qcom,temp2-offset: If property is present, Use these offset values
+- qcom,temp2-offset: If flag is present, Use these offset values
to be added for 120 deg calib points.
+- qcom,cycle-monitor: If flag is present, Use the value for cycle
+ completion monitoring.
+- qcom,wd-bark: If flag is present, Use the value for watchdog bark.
Example:
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
index 04c02786f6bd..aaafd8178eab 100644
--- a/Documentation/filesystems/proc.txt
+++ b/Documentation/filesystems/proc.txt
@@ -348,7 +348,7 @@ address perms offset dev inode pathname
a7cb1000-a7cb2000 ---p 00000000 00:00 0
a7cb2000-a7eb2000 rw-p 00000000 00:00 0
a7eb2000-a7eb3000 ---p 00000000 00:00 0
-a7eb3000-a7ed5000 rw-p 00000000 00:00 0 [stack:1001]
+a7eb3000-a7ed5000 rw-p 00000000 00:00 0
a7ed5000-a8008000 r-xp 00000000 03:00 4222 /lib/libc.so.6
a8008000-a800a000 r--p 00133000 03:00 4222 /lib/libc.so.6
a800a000-a800b000 rw-p 00135000 03:00 4222 /lib/libc.so.6
@@ -380,7 +380,6 @@ is not associated with a file:
[heap] = the heap of the program
[stack] = the stack of the main process
- [stack:1001] = the stack of the thread with tid 1001
[vdso] = the "virtual dynamic shared object",
the kernel system call handler
[anon:<name>] = an anonymous mapping that has been
@@ -390,10 +389,8 @@ is not associated with a file:
The /proc/PID/task/TID/maps is a view of the virtual memory from the viewpoint
of the individual tasks of a process. In this file you will see a mapping marked
-as [stack] if that task sees it as a stack. This is a key difference from the
-content of /proc/PID/maps, where you will see all mappings that are being used
-as stack by all of those tasks. Hence, for the example above, the task-level
-map, i.e. /proc/PID/task/TID/maps for thread 1001 will look like this:
+as [stack] if that task sees it as a stack. Hence, for the example above, the
+task-level map, i.e. /proc/PID/task/TID/maps for thread 1001 will look like this:
08048000-08049000 r-xp 00000000 03:00 8312 /opt/test
08049000-0804a000 rw-p 00001000 03:00 8312 /opt/test
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 276d3f68e08d..628e08f958f6 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -923,6 +923,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
dm= [DM] Allows early creation of a device-mapper device.
See Documentation/device-mapper/boot.txt.
+ dmasound= [HW,OSS] Sound subsystem buff
+
dma_debug=off If the kernel is compiled with DMA_API_DEBUG support,
this option disables the debugging code at boot.
diff --git a/Documentation/module-signing.txt b/Documentation/module-signing.txt
index a78bf1ffa68c..39b7f612c418 100644
--- a/Documentation/module-signing.txt
+++ b/Documentation/module-signing.txt
@@ -271,3 +271,9 @@ Since the private key is used to sign modules, viruses and malware could use
the private key to sign modules and compromise the operating system. The
private key must be either destroyed or moved to a secure location and not kept
in the root node of the kernel source tree.
+
+If you use the same private key to sign modules for multiple kernel
+configurations, you must ensure that the module version information is
+sufficient to prevent loading a module into a different kernel. Either
+set CONFIG_MODVERSIONS=y or ensure that each configuration has a different
+kernel release string by changing EXTRAVERSION or CONFIG_LOCALVERSION.
diff --git a/Documentation/scheduler/sched-energy.txt b/Documentation/scheduler/sched-energy.txt
new file mode 100644
index 000000000000..dab2f9088b33
--- /dev/null
+++ b/Documentation/scheduler/sched-energy.txt
@@ -0,0 +1,362 @@
+Energy cost model for energy-aware scheduling (EXPERIMENTAL)
+
+Introduction
+=============
+
+The basic energy model uses platform energy data stored in sched_group_energy
+data structures attached to the sched_groups in the sched_domain hierarchy. The
+energy cost model offers two functions that can be used to guide scheduling
+decisions:
+
+1. static unsigned int sched_group_energy(struct energy_env *eenv)
+2. static int energy_diff(struct energy_env *eenv)
+
+sched_group_energy() estimates the energy consumed by all cpus in a specific
+sched_group including any shared resources owned exclusively by this group of
+cpus. Resources shared with other cpus are excluded (e.g. later level caches).
+
+energy_diff() estimates the total energy impact of a utilization change. That
+is, adding, removing, or migrating utilization (tasks).
+
+Both functions use a struct energy_env to specify the scenario to be evaluated:
+
+ struct energy_env {
+ struct sched_group *sg_top;
+ struct sched_group *sg_cap;
+ int cap_idx;
+ int util_delta;
+ int src_cpu;
+ int dst_cpu;
+ int energy;
+ };
+
+sg_top: sched_group to be evaluated. Not used by energy_diff().
+
+sg_cap: sched_group covering the cpus in the same frequency domain. Set by
+sched_group_energy().
+
+cap_idx: Capacity state to be used for energy calculations. Set by
+find_new_capacity().
+
+util_delta: Amount of utilization to be added, removed, or migrated.
+
+src_cpu: Source cpu from where 'util_delta' utilization is removed. Should be
+-1 if no source (e.g. task wake-up).
+
+dst_cpu: Destination cpu where 'util_delta' utilization is added. Should be -1
+if utilization is removed (e.g. terminating tasks).
+
+energy: Result of sched_group_energy().
+
+The metric used to represent utilization is the actual per-entity running time
+averaged over time using a geometric series. Very similar to the existing
+per-entity load-tracking, but _not_ scaled by task priority and capped by the
+capacity of the cpu. The latter property does mean that utilization may
+underestimate the compute requirements for task on fully/over utilized cpus.
+The greatest potential for energy savings without affecting performance too much
+is scenarios where the system isn't fully utilized. If the system is deemed
+fully utilized load-balancing should be done with task load (includes task
+priority) instead in the interest of fairness and performance.
+
+
+Background and Terminology
+===========================
+
+To make it clear from the start:
+
+energy = [joule] (resource like a battery on powered devices)
+power = energy/time = [joule/second] = [watt]
+
+The goal of energy-aware scheduling is to minimize energy, while still getting
+the job done. That is, we want to maximize:
+
+ performance [inst/s]
+ --------------------
+ power [W]
+
+which is equivalent to minimizing:
+
+ energy [J]
+ -----------
+ instruction
+
+while still getting 'good' performance. It is essentially an alternative
+optimization objective to the current performance-only objective for the
+scheduler. This alternative considers two objectives: energy-efficiency and
+performance. Hence, there needs to be a user controllable knob to switch the
+objective. Since it is early days, this is currently a sched_feature
+(ENERGY_AWARE).
+
+The idea behind introducing an energy cost model is to allow the scheduler to
+evaluate the implications of its decisions rather than applying energy-saving
+techniques blindly that may only have positive effects on some platforms. At
+the same time, the energy cost model must be as simple as possible to minimize
+the scheduler latency impact.
+
+Platform topology
+------------------
+
+The system topology (cpus, caches, and NUMA information, not peripherals) is
+represented in the scheduler by the sched_domain hierarchy which has
+sched_groups attached at each level that covers one or more cpus (see
+sched-domains.txt for more details). To add energy awareness to the scheduler
+we need to consider power and frequency domains.
+
+Power domain:
+
+A power domain is a part of the system that can be powered on/off
+independently. Power domains are typically organized in a hierarchy where you
+may be able to power down just a cpu or a group of cpus along with any
+associated resources (e.g. shared caches). Powering up a cpu means that all
+power domains it is a part of in the hierarchy must be powered up. Hence, it is
+more expensive to power up the first cpu that belongs to a higher level power
+domain than powering up additional cpus in the same high level domain. Two
+level power domain hierarchy example:
+
+ Power source
+ +-------------------------------+----...
+per group PD G G
+ | +----------+ |
+ +--------+-------| Shared | (other groups)
+per-cpu PD G G | resource |
+ | | +----------+
+ +-------+ +-------+
+ | CPU 0 | | CPU 1 |
+ +-------+ +-------+
+
+Frequency domain:
+
+Frequency domains (P-states) typically cover the same group of cpus as one of
+the power domain levels. That is, there might be several smaller power domains
+sharing the same frequency (P-state) or there might be a power domain spanning
+multiple frequency domains.
+
+From a scheduling point of view there is no need to know the actual frequencies
+[Hz]. All the scheduler cares about is the compute capacity available at the
+current state (P-state) the cpu is in and any other available states. For that
+reason, and to also factor in any cpu micro-architecture differences, compute
+capacity scaling states are called 'capacity states' in this document. For SMP
+systems this is equivalent to P-states. For mixed micro-architecture systems
+(like ARM big.LITTLE) it is P-states scaled according to the micro-architecture
+performance relative to the other cpus in the system.
+
+Energy modelling:
+------------------
+
+Due to the hierarchical nature of the power domains, the most obvious way to
+model energy costs is therefore to associate power and energy costs with
+domains (groups of cpus). Energy costs of shared resources are associated with
+the group of cpus that share the resources, only the cost of powering the
+cpu itself and any private resources (e.g. private L1 caches) is associated
+with the per-cpu groups (lowest level).
+
+For example, for an SMP system with per-cpu power domains and a cluster level
+(group of cpus) power domain we get the overall energy costs to be:
+
+ energy = energy_cluster + n * energy_cpu
+
+where 'n' is the number of cpus powered up and energy_cluster is the cost paid
+as soon as any cpu in the cluster is powered up.
+
+The power and frequency domains can naturally be mapped onto the existing
+sched_domain hierarchy and sched_groups by adding the necessary data to the
+existing data structures.
+
+The energy model considers energy consumption from two contributors (shown in
+the illustration below):
+
+1. Busy energy: Energy consumed while a cpu and the higher level groups that it
+belongs to are busy running tasks. Busy energy is associated with the state of
+the cpu, not an event. The time the cpu spends in this state varies. Thus, the
+most obvious platform parameter for this contribution is busy power
+(energy/time).
+
+2. Idle energy: Energy consumed while a cpu and higher level groups that it
+belongs to are idle (in a C-state). Like busy energy, idle energy is associated
+with the state of the cpu. Thus, the platform parameter for this contribution
+is idle power (energy/time).
+
+Energy consumed during transitions from an idle-state (C-state) to a busy state
+(P-state) or going the other way is ignored by the model to simplify the energy
+model calculations.
+
+
+ Power
+ ^
+ | busy->idle idle->busy
+ | transition transition
+ |
+ | _ __
+ | / \ / \__________________
+ |______________/ \ /
+ | \ /
+ | Busy \ Idle / Busy
+ | low P-state \____________/ high P-state
+ |
+ +------------------------------------------------------------> time
+
+Busy |--------------| |-----------------|
+
+Wakeup |------| |------|
+
+Idle |------------|
+
+
+The basic algorithm
+====================
+
+The basic idea is to determine the total energy impact when utilization is
+added or removed by estimating the impact at each level in the sched_domain
+hierarchy starting from the bottom (sched_group contains just a single cpu).
+The energy cost comes from busy time (sched_group is awake because one or more
+cpus are busy) and idle time (in an idle-state). Energy model numbers account
+for energy costs associated with all cpus in the sched_group as a group.
+
+ for_each_domain(cpu, sd) {
+ sg = sched_group_of(cpu)
+ energy_before = curr_util(sg) * busy_power(sg)
+ + (1-curr_util(sg)) * idle_power(sg)
+ energy_after = new_util(sg) * busy_power(sg)
+ + (1-new_util(sg)) * idle_power(sg)
+ energy_diff += energy_before - energy_after
+
+ }
+
+ return energy_diff
+
+{curr, new}_util: The cpu utilization at the lowest level and the overall
+non-idle time for the entire group for higher levels. Utilization is in the
+range 0.0 to 1.0 in the pseudo-code.
+
+busy_power: The power consumption of the sched_group.
+
+idle_power: The power consumption of the sched_group when idle.
+
+Note: It is a fundamental assumption that the utilization is (roughly) scale
+invariant. Task utilization tracking factors in any frequency scaling and
+performance scaling differences due to difference cpu microarchitectures such
+that task utilization can be used across the entire system.
+
+
+Platform energy data
+=====================
+
+struct sched_group_energy can be attached to sched_groups in the sched_domain
+hierarchy and has the following members:
+
+cap_states:
+ List of struct capacity_state representing the supported capacity states
+ (P-states). struct capacity_state has two members: cap and power, which
+ represents the compute capacity and the busy_power of the state. The
+ list must be ordered by capacity low->high.
+
+nr_cap_states:
+ Number of capacity states in cap_states list.
+
+idle_states:
+ List of struct idle_state containing idle_state power cost for each
+ idle-state supported by the system orderd by shallowest state first.
+ All states must be included at all level in the hierarchy, i.e. a
+ sched_group spanning just a single cpu must also include coupled
+ idle-states (cluster states). In addition to the cpuidle idle-states,
+ the list must also contain an entry for the idling using the arch
+ default idle (arch_idle_cpu()). Despite this state may not be a true
+ hardware idle-state it is considered the shallowest idle-state in the
+ energy model and must be the first entry. cpus may enter this state
+ (possibly 'active idling') if cpuidle decides not enter a cpuidle
+ idle-state. Default idle may not be used when cpuidle is enabled.
+ In this case, it should just be a copy of the first cpuidle idle-state.
+
+nr_idle_states:
+ Number of idle states in idle_states list.
+
+There are no unit requirements for the energy cost data. Data can be normalized
+with any reference, however, the normalization must be consistent across all
+energy cost data. That is, one bogo-joule/watt must be the same quantity for
+data, but we don't care what it is.
+
+A recipe for platform characterization
+=======================================
+
+Obtaining the actual model data for a particular platform requires some way of
+measuring power/energy. There isn't a tool to help with this (yet). This
+section provides a recipe for use as reference. It covers the steps used to
+characterize the ARM TC2 development platform. This sort of measurements is
+expected to be done anyway when tuning cpuidle and cpufreq for a given
+platform.
+
+The energy model needs two types of data (struct sched_group_energy holds
+these) for each sched_group where energy costs should be taken into account:
+
+1. Capacity state information
+
+A list containing the compute capacity and power consumption when fully
+utilized attributed to the group as a whole for each available capacity state.
+At the lowest level (group contains just a single cpu) this is the power of the
+cpu alone without including power consumed by resources shared with other cpus.
+It basically needs to fit the basic modelling approach described in "Background
+and Terminology" section:
+
+ energy_system = energy_shared + n * energy_cpu
+
+for a system containing 'n' busy cpus. Only 'energy_cpu' should be included at
+the lowest level. 'energy_shared' is included at the next level which
+represents the group of cpus among which the resources are shared.
+
+This model is, of course, a simplification of reality. Thus, power/energy
+attributions might not always exactly represent how the hardware is designed.
+Also, busy power is likely to depend on the workload. It is therefore
+recommended to use a representative mix of workloads when characterizing the
+capacity states.
+
+If the group has no capacity scaling support, the list will contain a single
+state where power is the busy power attributed to the group. The capacity
+should be set to a default value (1024).
+
+When frequency domains include multiple power domains, the group representing
+the frequency domain and all child groups share capacity states. This must be
+indicated by setting the SD_SHARE_CAP_STATES sched_domain flag. All groups at
+all levels that share the capacity state must have the list of capacity states
+with the power set to the contribution of the individual group.
+
+2. Idle power information
+
+Stored in the idle_states list. The power number is the group idle power
+consumption in each idle state as well when the group is idle but has not
+entered an idle-state ('active idle' as mentioned earlier). Due to the way the
+energy model is defined, the idle power of the deepest group idle state can
+alternatively be accounted for in the parent group busy power. In that case the
+group idle state power values are offset such that the idle power of the
+deepest state is zero. It is less intuitive, but it is easier to measure as
+idle power consumed by the group and the busy/idle power of the parent group
+cannot be distinguished without per group measurement points.
+
+Measuring capacity states and idle power:
+
+The capacity states' capacity and power can be estimated by running a benchmark
+workload at each available capacity state. By restricting the benchmark to run
+on subsets of cpus it is possible to extrapolate the power consumption of
+shared resources.
+
+ARM TC2 has two clusters of two and three cpus respectively. Each cluster has a
+shared L2 cache. TC2 has on-chip energy counters per cluster. Running a
+benchmark workload on just one cpu in a cluster means that power is consumed in
+the cluster (higher level group) and a single cpu (lowest level group). Adding
+another benchmark task to another cpu increases the power consumption by the
+amount consumed by the additional cpu. Hence, it is possible to extrapolate the
+cluster busy power.
+
+For platforms that don't have energy counters or equivalent instrumentation
+built-in, it may be possible to use an external DAQ to acquire similar data.
+
+If the benchmark includes some performance score (for example sysbench cpu
+benchmark), this can be used to record the compute capacity.
+
+Measuring idle power requires insight into the idle state implementation on the
+particular platform. Specifically, if the platform has coupled idle-states (or
+package states). To measure non-coupled per-cpu idle-states it is necessary to
+keep one cpu busy to keep any shared resources alive to isolate the idle power
+of the cpu from idle/busy power of the shared resources. The cpu can be tricked
+into different per-cpu idle states by disabling the other states. Based on
+various combinations of measurements with specific cpus busy and disabling
+idle-states it is possible to extrapolate the idle-state power.
diff --git a/Documentation/scheduler/sched-hmp.txt b/Documentation/scheduler/sched-hmp.txt
index 091d49ea80cf..766c01d321b5 100644
--- a/Documentation/scheduler/sched-hmp.txt
+++ b/Documentation/scheduler/sched-hmp.txt
@@ -726,6 +726,16 @@ d. /proc/sys/kernel/sched_select_prev_cpu_us
Default value of sched_select_prev_cpu_us is 2000 (2ms). This can be
turned off by setting it to 0.
+e. /proc/sys/kernel/sched_short_burst_ns
+ This threshold controls whether a task is considered as "short-burst"
+ or not. "short-burst" tasks are eligible for packing to avoid overhead
+ associated with waking up an idle CPU. "non-idle" CPUs which are not
+ loaded with IRQs and can accommodate the waking task without exceeding
+ spill limits are considered. The ties are broken with load followed
+ by previous CPU. This tunable does not affect cluster selection.
+ It only affects CPU selection in a given cluster. This packing is
+ skipped for tasks that are eligible for "wake-up-idle" and "boost".
+
**** 5.2.4 Wakeup Logic for Task "p"
Wakeup task placement logic is as follows:
diff --git a/Documentation/scheduler/sched-tune.txt b/Documentation/scheduler/sched-tune.txt
new file mode 100644
index 000000000000..9bd2231c01b1
--- /dev/null
+++ b/Documentation/scheduler/sched-tune.txt
@@ -0,0 +1,366 @@
+ Central, scheduler-driven, power-performance control
+ (EXPERIMENTAL)
+
+Abstract
+========
+
+The topic of a single simple power-performance tunable, that is wholly
+scheduler centric, and has well defined and predictable properties has come up
+on several occasions in the past [1,2]. With techniques such as a scheduler
+driven DVFS [3], we now have a good framework for implementing such a tunable.
+This document describes the overall ideas behind its design and implementation.
+
+
+Table of Contents
+=================
+
+1. Motivation
+2. Introduction
+3. Signal Boosting Strategy
+4. OPP selection using boosted CPU utilization
+5. Per task group boosting
+6. Question and Answers
+ - What about "auto" mode?
+ - What about boosting on a congested system?
+ - How CPUs are boosted when we have tasks with multiple boost values?
+7. References
+
+
+1. Motivation
+=============
+
+Sched-DVFS [3] is a new event-driven cpufreq governor which allows the
+scheduler to select the optimal DVFS operating point (OPP) for running a task
+allocated to a CPU. The introduction of sched-DVFS enables running workloads at
+the most energy efficient OPPs.
+
+However, sometimes it may be desired to intentionally boost the performance of
+a workload even if that could imply a reasonable increase in energy
+consumption. For example, in order to reduce the response time of a task, we
+may want to run the task at a higher OPP than the one that is actually required
+by it's CPU bandwidth demand.
+
+This last requirement is especially important if we consider that one of the
+main goals of the sched-DVFS component is to replace all currently available
+CPUFreq policies. Since sched-DVFS is event based, as opposed to the sampling
+driven governors we currently have, it is already more responsive at selecting
+the optimal OPP to run tasks allocated to a CPU. However, just tracking the
+actual task load demand may not be enough from a performance standpoint. For
+example, it is not possible to get behaviors similar to those provided by the
+"performance" and "interactive" CPUFreq governors.
+
+This document describes an implementation of a tunable, stacked on top of the
+sched-DVFS which extends its functionality to support task performance
+boosting.
+
+By "performance boosting" we mean the reduction of the time required to
+complete a task activation, i.e. the time elapsed from a task wakeup to its
+next deactivation (e.g. because it goes back to sleep or it terminates). For
+example, if we consider a simple periodic task which executes the same workload
+for 5[s] every 20[s] while running at a certain OPP, a boosted execution of
+that task must complete each of its activations in less than 5[s].
+
+A previous attempt [5] to introduce such a boosting feature has not been
+successful mainly because of the complexity of the proposed solution. The
+approach described in this document exposes a single simple interface to
+user-space. This single tunable knob allows the tuning of system wide
+scheduler behaviours ranging from energy efficiency at one end through to
+incremental performance boosting at the other end. This first tunable affects
+all tasks. However, a more advanced extension of the concept is also provided
+which uses CGroups to boost the performance of only selected tasks while using
+the energy efficient default for all others.
+
+The rest of this document introduces in more details the proposed solution
+which has been named SchedTune.
+
+
+2. Introduction
+===============
+
+SchedTune exposes a simple user-space interface with a single power-performance
+tunable:
+
+ /proc/sys/kernel/sched_cfs_boost
+
+This permits expressing a boost value as an integer in the range [0..100].
+
+A value of 0 (default) configures the CFS scheduler for maximum energy
+efficiency. This means that sched-DVFS runs the tasks at the minimum OPP
+required to satisfy their workload demand.
+A value of 100 configures scheduler for maximum performance, which translates
+to the selection of the maximum OPP on that CPU.
+
+The range between 0 and 100 can be set to satisfy other scenarios suitably. For
+example to satisfy interactive response or depending on other system events
+(battery level etc).
+
+A CGroup based extension is also provided, which permits further user-space
+defined task classification to tune the scheduler for different goals depending
+on the specific nature of the task, e.g. background vs interactive vs
+low-priority.
+
+The overall design of the SchedTune module is built on top of "Per-Entity Load
+Tracking" (PELT) signals and sched-DVFS by introducing a bias on the Operating
+Performance Point (OPP) selection.
+Each time a task is allocated on a CPU, sched-DVFS has the opportunity to tune
+the operating frequency of that CPU to better match the workload demand. The
+selection of the actual OPP being activated is influenced by the global boost
+value, or the boost value for the task CGroup when in use.
+
+This simple biasing approach leverages existing frameworks, which means minimal
+modifications to the scheduler, and yet it allows to achieve a range of
+different behaviours all from a single simple tunable knob.
+The only new concept introduced is that of signal boosting.
+
+
+3. Signal Boosting Strategy
+===========================
+
+The whole PELT machinery works based on the value of a few load tracking signals
+which basically track the CPU bandwidth requirements for tasks and the capacity
+of CPUs. The basic idea behind the SchedTune knob is to artificially inflate
+some of these load tracking signals to make a task or RQ appears more demanding
+that it actually is.
+
+Which signals have to be inflated depends on the specific "consumer". However,
+independently from the specific (signal, consumer) pair, it is important to
+define a simple and possibly consistent strategy for the concept of boosting a
+signal.
+
+A boosting strategy defines how the "abstract" user-space defined
+sched_cfs_boost value is translated into an internal "margin" value to be added
+to a signal to get its inflated value:
+
+ margin := boosting_strategy(sched_cfs_boost, signal)
+ boosted_signal := signal + margin
+
+Different boosting strategies were identified and analyzed before selecting the
+one found to be most effective.
+
+Signal Proportional Compensation (SPC)
+--------------------------------------
+
+In this boosting strategy the sched_cfs_boost value is used to compute a
+margin which is proportional to the complement of the original signal.
+When a signal has a maximum possible value, its complement is defined as
+the delta from the actual value and its possible maximum.
+
+Since the tunable implementation uses signals which have SCHED_LOAD_SCALE as
+the maximum possible value, the margin becomes:
+
+ margin := sched_cfs_boost * (SCHED_LOAD_SCALE - signal)
+
+Using this boosting strategy:
+- a 100% sched_cfs_boost means that the signal is scaled to the maximum value
+- each value in the range of sched_cfs_boost effectively inflates the signal in
+ question by a quantity which is proportional to the maximum value.
+
+For example, by applying the SPC boosting strategy to the selection of the OPP
+to run a task it is possible to achieve these behaviors:
+
+- 0% boosting: run the task at the minimum OPP required by its workload
+- 100% boosting: run the task at the maximum OPP available for the CPU
+- 50% boosting: run at the half-way OPP between minimum and maximum
+
+Which means that, at 50% boosting, a task will be scheduled to run at half of
+the maximum theoretically achievable performance on the specific target
+platform.
+
+A graphical representation of an SPC boosted signal is represented in the
+following figure where:
+ a) "-" represents the original signal
+ b) "b" represents a 50% boosted signal
+ c) "p" represents a 100% boosted signal
+
+
+ ^
+ | SCHED_LOAD_SCALE
+ +-----------------------------------------------------------------+
+ |pppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppp
+ |
+ | boosted_signal
+ | bbbbbbbbbbbbbbbbbbbbbbbb
+ |
+ | original signal
+ | bbbbbbbbbbbbbbbbbbbbbbbb+----------------------+
+ | |
+ |bbbbbbbbbbbbbbbbbb |
+ | |
+ | |
+ | |
+ | +-----------------------+
+ | |
+ | |
+ | |
+ |------------------+
+ |
+ |
+ +----------------------------------------------------------------------->
+
+The plot above shows a ramped load signal (titled 'original_signal') and it's
+boosted equivalent. For each step of the original signal the boosted signal
+corresponding to a 50% boost is midway from the original signal and the upper
+bound. Boosting by 100% generates a boosted signal which is always saturated to
+the upper bound.
+
+
+4. OPP selection using boosted CPU utilization
+==============================================
+
+It is worth calling out that the implementation does not introduce any new load
+signals. Instead, it provides an API to tune existing signals. This tuning is
+done on demand and only in scheduler code paths where it is sensible to do so.
+The new API calls are defined to return either the default signal or a boosted
+one, depending on the value of sched_cfs_boost. This is a clean an non invasive
+modification of the existing existing code paths.
+
+The signal representing a CPU's utilization is boosted according to the
+previously described SPC boosting strategy. To sched-DVFS, this allows a CPU
+(ie CFS run-queue) to appear more used then it actually is.
+
+Thus, with the sched_cfs_boost enabled we have the following main functions to
+get the current utilization of a CPU:
+
+ cpu_util()
+ boosted_cpu_util()
+
+The new boosted_cpu_util() is similar to the first but returns a boosted
+utilization signal which is a function of the sched_cfs_boost value.
+
+This function is used in the CFS scheduler code paths where sched-DVFS needs to
+decide the OPP to run a CPU at.
+For example, this allows selecting the highest OPP for a CPU which has
+the boost value set to 100%.
+
+
+5. Per task group boosting
+==========================
+
+The availability of a single knob which is used to boost all tasks in the
+system is certainly a simple solution but it quite likely doesn't fit many
+utilization scenarios, especially in the mobile device space.
+
+For example, on battery powered devices there usually are many background
+services which are long running and need energy efficient scheduling. On the
+other hand, some applications are more performance sensitive and require an
+interactive response and/or maximum performance, regardless of the energy cost.
+To better service such scenarios, the SchedTune implementation has an extension
+that provides a more fine grained boosting interface.
+
+A new CGroup controller, namely "schedtune", could be enabled which allows to
+defined and configure task groups with different boosting values.
+Tasks that require special performance can be put into separate CGroups.
+The value of the boost associated with the tasks in this group can be specified
+using a single knob exposed by the CGroup controller:
+
+ schedtune.boost
+
+This knob allows the definition of a boost value that is to be used for
+SPC boosting of all tasks attached to this group.
+
+The current schedtune controller implementation is really simple and has these
+main characteristics:
+
+ 1) It is only possible to create 1 level depth hierarchies
+
+ The root control groups define the system-wide boost value to be applied
+ by default to all tasks. Its direct subgroups are named "boost groups" and
+ they define the boost value for specific set of tasks.
+ Further nested subgroups are not allowed since they do not have a sensible
+ meaning from a user-space standpoint.
+
+ 2) It is possible to define only a limited number of "boost groups"
+
+ This number is defined at compile time and by default configured to 16.
+ This is a design decision motivated by two main reasons:
+ a) In a real system we do not expect utilization scenarios with more then few
+ boost groups. For example, a reasonable collection of groups could be
+ just "background", "interactive" and "performance".
+ b) It simplifies the implementation considerably, especially for the code
+ which has to compute the per CPU boosting once there are multiple
+ RUNNABLE tasks with different boost values.
+
+Such a simple design should allow servicing the main utilization scenarios identified
+so far. It provides a simple interface which can be used to manage the
+power-performance of all tasks or only selected tasks.
+Moreover, this interface can be easily integrated by user-space run-times (e.g.
+Android, ChromeOS) to implement a QoS solution for task boosting based on tasks
+classification, which has been a long standing requirement.
+
+Setup and usage
+---------------
+
+0. Use a kernel with CGROUP_SCHEDTUNE support enabled
+
+1. Check that the "schedtune" CGroup controller is available:
+
+ root@linaro-nano:~# cat /proc/cgroups
+ #subsys_name hierarchy num_cgroups enabled
+ cpuset 0 1 1
+ cpu 0 1 1
+ schedtune 0 1 1
+
+2. Mount a tmpfs to create the CGroups mount point (Optional)
+
+ root@linaro-nano:~# sudo mount -t tmpfs cgroups /sys/fs/cgroup
+
+3. Mount the "schedtune" controller
+
+ root@linaro-nano:~# mkdir /sys/fs/cgroup/stune
+ root@linaro-nano:~# sudo mount -t cgroup -o schedtune stune /sys/fs/cgroup/stune
+
+4. Setup the system-wide boost value (Optional)
+
+ If not configured the root control group has a 0% boost value, which
+ basically disables boosting for all tasks in the system thus running in
+ an energy-efficient mode.
+
+ root@linaro-nano:~# echo $SYSBOOST > /sys/fs/cgroup/stune/schedtune.boost
+
+5. Create task groups and configure their specific boost value (Optional)
+
+ For example here we create a "performance" boost group configure to boost
+ all its tasks to 100%
+
+ root@linaro-nano:~# mkdir /sys/fs/cgroup/stune/performance
+ root@linaro-nano:~# echo 100 > /sys/fs/cgroup/stune/performance/schedtune.boost
+
+6. Move tasks into the boost group
+
+ For example, the following moves the tasks with PID $TASKPID (and all its
+ threads) into the "performance" boost group.
+
+ root@linaro-nano:~# echo "TASKPID > /sys/fs/cgroup/stune/performance/cgroup.procs
+
+This simple configuration allows only the threads of the $TASKPID task to run,
+when needed, at the highest OPP in the most capable CPU of the system.
+
+
+6. Question and Answers
+=======================
+
+What about "auto" mode?
+-----------------------
+
+The 'auto' mode as described in [5] can be implemented by interfacing SchedTune
+with some suitable user-space element. This element could use the exposed
+system-wide or cgroup based interface.
+
+How are multiple groups of tasks with different boost values managed?
+---------------------------------------------------------------------
+
+The current SchedTune implementation keeps track of the boosted RUNNABLE tasks
+on a CPU. Once sched-DVFS selects the OPP to run a CPU at, the CPU utilization
+is boosted with a value which is the maximum of the boost values of the
+currently RUNNABLE tasks in its RQ.
+
+This allows sched-DVFS to boost a CPU only while there are boosted tasks ready
+to run and switch back to the energy efficient mode as soon as the last boosted
+task is dequeued.
+
+
+7. References
+=============
+[1] http://lwn.net/Articles/552889
+[2] http://lkml.org/lkml/2012/5/18/91
+[3] http://lkml.org/lkml/2015/6/26/620
diff --git a/Documentation/x86/pat.txt b/Documentation/x86/pat.txt
index 54944c71b819..2a4ee6302122 100644
--- a/Documentation/x86/pat.txt
+++ b/Documentation/x86/pat.txt
@@ -196,3 +196,35 @@ Another, more verbose way of getting PAT related debug messages is with
"debugpat" boot parameter. With this parameter, various debug messages are
printed to dmesg log.
+PAT Initialization
+------------------
+
+The following table describes how PAT is initialized under various
+configurations. The PAT MSR must be updated by Linux in order to support WC
+and WT attributes. Otherwise, the PAT MSR has the value programmed in it
+by the firmware. Note, Xen enables WC attribute in the PAT MSR for guests.
+
+ MTRR PAT Call Sequence PAT State PAT MSR
+ =========================================================
+ E E MTRR -> PAT init Enabled OS
+ E D MTRR -> PAT init Disabled -
+ D E MTRR -> PAT disable Disabled BIOS
+ D D MTRR -> PAT disable Disabled -
+ - np/E PAT -> PAT disable Disabled BIOS
+ - np/D PAT -> PAT disable Disabled -
+ E !P/E MTRR -> PAT init Disabled BIOS
+ D !P/E MTRR -> PAT disable Disabled BIOS
+ !M !P/E MTRR stub -> PAT disable Disabled BIOS
+
+ Legend
+ ------------------------------------------------
+ E Feature enabled in CPU
+ D Feature disabled/unsupported in CPU
+ np "nopat" boot option specified
+ !P CONFIG_X86_PAT option unset
+ !M CONFIG_MTRR option unset
+ Enabled PAT state set to enabled
+ Disabled PAT state set to disabled
+ OS PAT initializes PAT MSR with OS setting
+ BIOS PAT keeps PAT MSR with BIOS setting
+
diff --git a/Makefile b/Makefile
index 66b873791ce0..11ab8feb7fc9 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
VERSION = 4
PATCHLEVEL = 4
-SUBLEVEL = 16
+SUBLEVEL = 21
EXTRAVERSION =
NAME = Blurry Fish Butt
diff --git a/android/configs/android-base.cfg b/android/configs/android-base.cfg
index 6496bb3961a2..34fa64a669ac 100644
--- a/android/configs/android-base.cfg
+++ b/android/configs/android-base.cfg
@@ -141,6 +141,7 @@ CONFIG_PROFILING=y
CONFIG_QUOTA=y
CONFIG_RTC_CLASS=y
CONFIG_RT_GROUP_SCHED=y
+CONFIG_SECCOMP=y
CONFIG_SECURITY=y
CONFIG_SECURITY_NETWORK=y
CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
diff --git a/android/configs/android-recommended.cfg b/android/configs/android-recommended.cfg
index c3222a77ba24..3465a848d74d 100644
--- a/android/configs/android-recommended.cfg
+++ b/android/configs/android-recommended.cfg
@@ -11,6 +11,7 @@ CONFIG_BACKLIGHT_LCD_SUPPORT=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_CC_STACKPROTECTOR_STRONG=y
CONFIG_COMPACTION=y
CONFIG_DEBUG_RODATA=y
CONFIG_DM_UEVENT=y
@@ -118,6 +119,7 @@ CONFIG_TIMER_STATS=y
CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
CONFIG_UHID=y
+CONFIG_MEMORY_STATE_TIME=y
CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
CONFIG_USB_EHCI_HCD=y
CONFIG_USB_HIDDEV=y
diff --git a/arch/Kconfig b/arch/Kconfig
index 31a318a56d98..98f64ad1caf1 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -423,6 +423,15 @@ config CC_STACKPROTECTOR_STRONG
endchoice
+config HAVE_ARCH_WITHIN_STACK_FRAMES
+ bool
+ help
+ An architecture should select this if it can walk the kernel stack
+ frames to determine if an object is part of either the arguments
+ or local variables (i.e. that it excludes saved return addresses,
+ and similar) by implementing an inline arch_within_stack_frames(),
+ which is used by CONFIG_HARDENED_USERCOPY.
+
config HAVE_CONTEXT_TRACKING
bool
help
diff --git a/arch/arc/Makefile b/arch/arc/Makefile
index aeb19021099e..c05ea2b54276 100644
--- a/arch/arc/Makefile
+++ b/arch/arc/Makefile
@@ -18,6 +18,20 @@ cflags-y += -fno-common -pipe -fno-builtin -D__linux__
cflags-$(CONFIG_ISA_ARCOMPACT) += -mA7
cflags-$(CONFIG_ISA_ARCV2) += -mcpu=archs
+is_700 = $(shell $(CC) -dM -E - < /dev/null | grep -q "ARC700" && echo 1 || echo 0)
+
+ifdef CONFIG_ISA_ARCOMPACT
+ifeq ($(is_700), 0)
+ $(error Toolchain not configured for ARCompact builds)
+endif
+endif
+
+ifdef CONFIG_ISA_ARCV2
+ifeq ($(is_700), 1)
+ $(error Toolchain not configured for ARCv2 builds)
+endif
+endif
+
ifdef CONFIG_ARC_CURR_IN_REG
# For a global register defintion, make sure it gets passed to every file
# We had a customer reported bug where some code built in kernel was NOT using
@@ -48,8 +62,6 @@ endif
endif
-cflags-$(CONFIG_ARC_DW2_UNWIND) += -fasynchronous-unwind-tables
-
# By default gcc 4.8 generates dwarf4 which kernel unwinder can't grok
ifeq ($(atleast_gcc48),y)
cflags-$(CONFIG_ARC_DW2_UNWIND) += -gdwarf-2
diff --git a/arch/arc/include/asm/arcregs.h b/arch/arc/include/asm/arcregs.h
index 7fac7d85ed6a..2c30a016cf15 100644
--- a/arch/arc/include/asm/arcregs.h
+++ b/arch/arc/include/asm/arcregs.h
@@ -374,12 +374,6 @@ static inline int is_isa_arcompact(void)
return IS_ENABLED(CONFIG_ISA_ARCOMPACT);
}
-#if defined(CONFIG_ISA_ARCOMPACT) && !defined(_CPU_DEFAULT_A7)
-#error "Toolchain not configured for ARCompact builds"
-#elif defined(CONFIG_ISA_ARCV2) && !defined(_CPU_DEFAULT_HS)
-#error "Toolchain not configured for ARCv2 builds"
-#endif
-
#endif /* __ASEMBLY__ */
#endif /* _ASM_ARC_ARCREGS_H */
diff --git a/arch/arc/include/asm/entry.h b/arch/arc/include/asm/entry.h
index ad7860c5ce15..51597f344a62 100644
--- a/arch/arc/include/asm/entry.h
+++ b/arch/arc/include/asm/entry.h
@@ -142,7 +142,7 @@
#ifdef CONFIG_ARC_CURR_IN_REG
; Retrieve orig r25 and save it with rest of callee_regs
- ld.as r12, [r12, PT_user_r25]
+ ld r12, [r12, PT_user_r25]
PUSH r12
#else
PUSH r25
@@ -198,7 +198,7 @@
; SP is back to start of pt_regs
#ifdef CONFIG_ARC_CURR_IN_REG
- st.as r12, [sp, PT_user_r25]
+ st r12, [sp, PT_user_r25]
#endif
.endm
diff --git a/arch/arc/include/asm/irqflags-compact.h b/arch/arc/include/asm/irqflags-compact.h
index c1d36458bfb7..4c6eed80cd8b 100644
--- a/arch/arc/include/asm/irqflags-compact.h
+++ b/arch/arc/include/asm/irqflags-compact.h
@@ -188,10 +188,10 @@ static inline int arch_irqs_disabled(void)
.endm
.macro IRQ_ENABLE scratch
+ TRACE_ASM_IRQ_ENABLE
lr \scratch, [status32]
or \scratch, \scratch, (STATUS_E1_MASK | STATUS_E2_MASK)
flag \scratch
- TRACE_ASM_IRQ_ENABLE
.endm
#endif /* __ASSEMBLY__ */
diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
index 57af2f05ae84..3cab04255ae0 100644
--- a/arch/arc/include/asm/pgtable.h
+++ b/arch/arc/include/asm/pgtable.h
@@ -110,7 +110,7 @@
#define ___DEF (_PAGE_PRESENT | _PAGE_CACHEABLE)
/* Set of bits not changed in pte_modify */
-#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
+#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_SPECIAL)
/* More Abbrevaited helpers */
#define PAGE_U_NONE __pgprot(___DEF)
diff --git a/arch/arc/kernel/stacktrace.c b/arch/arc/kernel/stacktrace.c
index 001de4ce711e..11b50959f20e 100644
--- a/arch/arc/kernel/stacktrace.c
+++ b/arch/arc/kernel/stacktrace.c
@@ -142,7 +142,7 @@ arc_unwind_core(struct task_struct *tsk, struct pt_regs *regs,
* prelogue is setup (callee regs saved and then fp set and not other
* way around
*/
- pr_warn("CONFIG_ARC_DW2_UNWIND needs to be enabled\n");
+ pr_warn_once("CONFIG_ARC_DW2_UNWIND needs to be enabled\n");
return 0;
#endif
diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c
index ff7ff6cbb811..aaf1e2d1d900 100644
--- a/arch/arc/mm/cache.c
+++ b/arch/arc/mm/cache.c
@@ -914,6 +914,15 @@ void arc_cache_init(void)
printk(arc_cache_mumbojumbo(0, str, sizeof(str)));
+ /*
+ * Only master CPU needs to execute rest of function:
+ * - Assume SMP so all cores will have same cache config so
+ * any geomtry checks will be same for all
+ * - IOC setup / dma callbacks only need to be setup once
+ */
+ if (cpu)
+ return;
+
if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE)) {
struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache;
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 83082a5790d8..8d3d7a283eed 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -36,6 +36,7 @@ config ARM
select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32
select HAVE_ARCH_KGDB if !CPU_ENDIAN_BE32
select HAVE_ARCH_MMAP_RND_BITS if MMU
+ select HAVE_ARCH_HARDENED_USERCOPY
select HAVE_ARCH_SECCOMP_FILTER if (AEABI && !OABI_COMPAT)
select HAVE_ARCH_TRACEHOOK
select HAVE_BPF_JIT
diff --git a/arch/arm/boot/dts/qcom/apq8998-v2.1-mediabox.dts b/arch/arm/boot/dts/qcom/apq8998-v2.1-mediabox.dts
index bc60d9a08c0b..03fa038d9413 100644
--- a/arch/arm/boot/dts/qcom/apq8998-v2.1-mediabox.dts
+++ b/arch/arm/boot/dts/qcom/apq8998-v2.1-mediabox.dts
@@ -28,3 +28,21 @@
&mdss_mdp {
qcom,mdss-pref-prim-intf = "hdmi";
};
+
+&slim_aud {
+ tasha_codec {
+ wsa_spkr_sd1: msm_cdc_pinctrll {
+ compatible = "qcom,msm-cdc-pinctrl";
+ pinctrl-names = "aud_active", "aud_sleep";
+ pinctrl-0 = <&spkr_1_sd_active_mediabox>;
+ pinctrl-1 = <&spkr_1_sd_sleep_mediabox>;
+ };
+
+ wsa_spkr_sd2: msm_cdc_pinctrlr {
+ compatible = "qcom,msm-cdc-pinctrl";
+ pinctrl-names = "aud_active", "aud_sleep";
+ pinctrl-0 = <&spkr_2_sd_active_mediabox>;
+ pinctrl-1 = <&spkr_2_sd_sleep_mediabox>;
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dsc-wqxga-cmd.dtsi b/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dsc-wqxga-cmd.dtsi
index 94ca102c9dc0..c80259bae5c0 100644
--- a/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dsc-wqxga-cmd.dtsi
+++ b/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dsc-wqxga-cmd.dtsi
@@ -53,7 +53,11 @@
qcom,mdss-dsi-te-check-enable;
qcom,mdss-dsi-te-using-te-pin;
qcom,ulps-enabled;
-
+ qcom,mdss-dsi-panel-hdr-enabled;
+ qcom,mdss-dsi-panel-hdr-color-primaries = <14500 15500 32000
+ 17000 15500 30000 8000 3000>;
+ qcom,mdss-dsi-panel-peak-brightness = <4200000>;
+ qcom,mdss-dsi-panel-blackness-level = <3230>;
qcom,adjust-timer-wakeup-ms = <1>;
qcom,mdss-dsi-on-command = [15 01 00 00 00 00 02 ff 10
15 01 00 00 00 00 02 fb 01
diff --git a/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dsc-wqxga-video.dtsi b/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dsc-wqxga-video.dtsi
index 49130cc96f79..10713dd2490c 100644
--- a/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dsc-wqxga-video.dtsi
+++ b/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dsc-wqxga-video.dtsi
@@ -29,7 +29,11 @@
qcom,mdss-dsi-bpp = <24>;
qcom,mdss-dsi-underflow-color = <0xff>;
qcom,mdss-dsi-border-color = <0>;
-
+ qcom,mdss-dsi-panel-hdr-enabled;
+ qcom,mdss-dsi-panel-hdr-color-primaries = <14500 15500 32000
+ 17000 15500 30000 8000 3000>;
+ qcom,mdss-dsi-panel-peak-brightness = <4200000>;
+ qcom,mdss-dsi-panel-blackness-level = <3230>;
qcom,mdss-dsi-on-command = [15 01 00 00 00 00 02 ff 10
15 01 00 00 00 00 02 fb 01
15 01 00 00 00 00 02 ba 03
diff --git a/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dualmipi-wqxga-cmd.dtsi b/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dualmipi-wqxga-cmd.dtsi
index 10be9cc183f6..aeeaaa7ca6fb 100644
--- a/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dualmipi-wqxga-cmd.dtsi
+++ b/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dualmipi-wqxga-cmd.dtsi
@@ -61,6 +61,11 @@
qcom,mdss-dsi-te-check-enable;
qcom,mdss-dsi-te-using-te-pin;
qcom,ulps-enabled;
+ qcom,mdss-dsi-panel-hdr-enabled;
+ qcom,mdss-dsi-panel-hdr-color-primaries = <14500 15500 32000
+ 17000 15500 30000 8000 3000>;
+ qcom,mdss-dsi-panel-peak-brightness = <4200000>;
+ qcom,mdss-dsi-panel-blackness-level = <3230>;
qcom,mdss-dsi-on-command = [15 01 00 00 00 00 02 ff 10
15 01 00 00 00 00 02 fb 01
15 01 00 00 00 00 02 ba 03
diff --git a/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dualmipi-wqxga-video.dtsi b/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dualmipi-wqxga-video.dtsi
index a2cac50325c5..f7122d34d8a9 100644
--- a/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dualmipi-wqxga-video.dtsi
+++ b/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dualmipi-wqxga-video.dtsi
@@ -29,6 +29,11 @@
qcom,mdss-dsi-bpp = <24>;
qcom,mdss-dsi-underflow-color = <0xff>;
qcom,mdss-dsi-border-color = <0>;
+ qcom,mdss-dsi-panel-hdr-enabled;
+ qcom,mdss-dsi-panel-hdr-color-primaries = <14500 15500 32000
+ 17000 15500 30000 8000 3000>;
+ qcom,mdss-dsi-panel-peak-brightness = <4200000>;
+ qcom,mdss-dsi-panel-blackness-level = <3230>;
qcom,mdss-dsi-on-command = [15 01 00 00 00 00 02 ff 10
15 01 00 00 00 00 02 fb 01
15 01 00 00 00 00 02 ba 03
diff --git a/arch/arm/boot/dts/qcom/dsi-panel-s6e3ha3-amoled-dualmipi-wqhd-cmd.dtsi b/arch/arm/boot/dts/qcom/dsi-panel-s6e3ha3-amoled-dualmipi-wqhd-cmd.dtsi
new file mode 100644
index 000000000000..7bd844ae6770
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/dsi-panel-s6e3ha3-amoled-dualmipi-wqhd-cmd.dtsi
@@ -0,0 +1,91 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&mdss_mdp {
+ dsi_dual_s6e3ha3_amoled_cmd: qcom,mdss_dsi_s6e3ha3_amoled_wqhd_cmd{
+ qcom,mdss-dsi-panel-name =
+ "Dual s6e3ha3 amoled cmd mode dsi panel";
+ qcom,mdss-dsi-panel-type = "dsi_cmd_mode";
+ qcom,mdss-dsi-panel-framerate = <60>;
+ qcom,mdss-dsi-virtual-channel-id = <0>;
+ qcom,mdss-dsi-stream = <0>;
+ qcom,mdss-dsi-panel-width = <720>;
+ qcom,mdss-dsi-panel-height = <2560>;
+ qcom,mdss-dsi-h-front-porch = <100>;
+ qcom,mdss-dsi-h-back-porch = <100>;
+ qcom,mdss-dsi-h-pulse-width = <40>;
+ qcom,mdss-dsi-h-sync-skew = <0>;
+ qcom,mdss-dsi-v-back-porch = <31>;
+ qcom,mdss-dsi-v-front-porch = <30>;
+ qcom,mdss-dsi-v-pulse-width = <8>;
+ qcom,mdss-dsi-h-left-border = <0>;
+ qcom,mdss-dsi-h-right-border = <0>;
+ qcom,mdss-dsi-v-top-border = <0>;
+ qcom,mdss-dsi-v-bottom-border = <0>;
+ qcom,mdss-dsi-bpp = <24>;
+ qcom,mdss-dsi-underflow-color = <0xff>;
+ qcom,mdss-dsi-border-color = <0>;
+ qcom,mdss-dsi-color-order = "rgb_swap_rgb";
+ qcom,mdss-dsi-on-command = [05 01 00 00 05 00 02 11 00
+ 39 01 00 00 00 00 05 2a 00 00 05 9f
+ 39 01 00 00 00 00 05 2b 00 00 09 ff
+ 39 01 00 00 00 00 03 f0 5a 5a
+ 39 01 00 00 00 00 02 b0 10
+ 39 01 00 00 00 00 02 b5 a0
+ 39 01 00 00 00 00 02 c4 03
+ 39 01 00 00 00 00 0a
+ f6 42 57 37 00 aa cc d0 00 00
+ 39 01 00 00 00 00 02 f9 03
+ 39 01 00 00 00 00 14
+ c2 00 00 d8 d8 00 80 2b 05 08
+ 0e 07 0b 05 0d 0a 15 13 20 1e
+ 39 01 00 00 78 00 03 f0 a5 a5
+ 39 01 00 00 00 00 02 35 00
+ 39 01 00 00 00 00 02 53 20
+ 39 01 00 00 00 00 02 51 60
+ 05 01 00 00 05 00 02 29 00];
+ qcom,mdss-dsi-off-command = [05 01 00 00 3c 00 02 28 00
+ 05 01 00 00 b4 00 02 10 00];
+ qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+ qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
+ qcom,mdss-dsi-h-sync-pulse = <0>;
+ qcom,mdss-dsi-traffic-mode = "non_burst_sync_event";
+ qcom,mdss-dsi-lane-map = "lane_map_0123";
+ qcom,mdss-dsi-bllp-eof-power-mode;
+ qcom,mdss-dsi-bllp-power-mode;
+ qcom,mdss-dsi-tx-eot-append;
+ qcom,dcs-cmd-by-left;
+ qcom,mdss-dsi-lane-0-state;
+ qcom,mdss-dsi-lane-1-state;
+ qcom,mdss-dsi-lane-2-state;
+ qcom,mdss-dsi-lane-3-state;
+ qcom,mdss-dsi-wr-mem-start = <0x2c>;
+ qcom,mdss-dsi-wr-mem-continue = <0x3c>;
+ qcom,mdss-dsi-te-pin-select = <1>;
+ qcom,mdss-dsi-te-dcs-command = <1>;
+ qcom,mdss-dsi-te-check-enable;
+ qcom,mdss-dsi-te-using-te-pin;
+ qcom,mdss-dsi-panel-timings =
+ [eb 38 26 00 6a 66 32 3c 2f 03 04 00];
+ qcom,mdss-dsi-t-clk-post = <0x2c>;
+ qcom,mdss-dsi-t-clk-pre = <0x1c>;
+ qcom,mdss-dsi-dma-trigger = "trigger_sw";
+ qcom,mdss-dsi-mdp-trigger = "none";
+ qcom,mdss-dsi-lp11-init;
+ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_dcs";
+ qcom,mdss-dsi-bl-min-level = <1>;
+ qcom,mdss-dsi-bl-max-level = <255>;
+ qcom,mdss-pan-physical-width-dimension = <68>;
+ qcom,mdss-pan-physical-height-dimension = <122>;
+ qcom,mdss-dsi-reset-sequence = <1 10>, <0 10>, <1 10>;
+ };
+};
diff --git a/arch/arm/boot/dts/qcom/msm-arm-smmu-falcon.dtsi b/arch/arm/boot/dts/qcom/msm-arm-smmu-falcon.dtsi
index ff4db75d9aff..a610bee8969a 100644
--- a/arch/arm/boot/dts/qcom/msm-arm-smmu-falcon.dtsi
+++ b/arch/arm/boot/dts/qcom/msm-arm-smmu-falcon.dtsi
@@ -196,4 +196,15 @@
clock-names = "turing_q6_smmu_clk";
#clock-cells = <1>;
};
+
+ iommu_test_device {
+ compatible = "iommu-debug-test";
+ /*
+ * 42 shouldn't be used by anyone on the mmss_smmu. We just
+ * need _something_ here to get this node recognized by the
+ * SMMU driver. Our test uses ATOS, which doesn't use SIDs
+ * anyways, so using a dummy value is ok.
+ */
+ iommus = <&mmss_bimc_smmu 42>;
+ };
};
diff --git a/arch/arm/boot/dts/qcom/msm-audio.dtsi b/arch/arm/boot/dts/qcom/msm-audio.dtsi
index 7f2d1cba5b42..7a96e19c62c5 100644
--- a/arch/arm/boot/dts/qcom/msm-audio.dtsi
+++ b/arch/arm/boot/dts/qcom/msm-audio.dtsi
@@ -619,7 +619,9 @@
qcom,msm-mbhc-hphl-swh = <1>;
qcom,msm-mbhc-gnd-swh = <1>;
qcom,us-euro-gpios = <&us_euro_gpio>;
- qcom,tasha-mclk-clk-freq = <9600000>;
+ qcom,hph-en0-gpio = <&tasha_hph_en0>;
+ qcom,hph-en1-gpio = <&tasha_hph_en1>;
+ qcom,msm-mclk-freq = <9600000>;
asoc-platform = <&pcm0>, <&pcm1>, <&pcm2>, <&voip>, <&voice>,
<&loopback>, <&compress>, <&hostless>,
<&afe>, <&lsm>, <&routing>, <&cpe>, <&compr>,
@@ -674,8 +676,8 @@
asoc-codec = <&stub_codec>;
asoc-codec-names = "msm-stub-codec.1";
qcom,wsa-max-devs = <2>;
- qcom,wsa-devs = <&wsa881x_0211>, <&wsa881x_0212>,
- <&wsa881x_0213>, <&wsa881x_0214>;
+ qcom,wsa-devs = <&wsa881x_211>, <&wsa881x_212>,
+ <&wsa881x_213>, <&wsa881x_214>;
qcom,wsa-aux-dev-prefix = "SpkrLeft", "SpkrRight",
"SpkrLeft", "SpkrRight";
};
@@ -728,7 +730,7 @@
qcom,us-euro-gpios = <&tavil_us_euro_sw>;
qcom,hph-en0-gpio = <&tavil_hph_en0>;
qcom,hph-en1-gpio = <&tavil_hph_en1>;
- qcom,tavil-mclk-clk-freq = <9600000>;
+ qcom,msm-mclk-freq = <9600000>;
asoc-platform = <&pcm0>, <&pcm1>, <&pcm2>, <&voip>, <&voice>,
<&loopback>, <&compress>, <&hostless>,
<&afe>, <&lsm>, <&routing>, <&cpe>, <&compr>,
@@ -788,6 +790,111 @@
"SpkrLeft", "SpkrRight";
};
+ int_codec: sound {
+ status = "disabled";
+ compatible = "qcom,msmfalcon-asoc-snd";
+ qcom,model = "msmfalcon-snd-card";
+ qcom,wcn-btfm;
+ qcom,mi2s-audio-intf;
+ qcom,auxpcm-audio-intf;
+ qcom,msm-mi2s-master = <1>, <1>, <1>, <1>;
+
+ reg = <0x1508a000 0x4>,
+ <0x1508b000 0x4>,
+ <0x1508c000 0x4>,
+ <0x1508d000 0x4>;
+ reg-names = "lpaif_pri_mode_muxsel",
+ "lpaif_sec_mode_muxsel",
+ "lpaif_tert_mode_muxsel",
+ "lpaif_quat_mode_muxsel";
+
+ qcom,msm-mclk-freq = <9600000>;
+ qcom,msm-mbhc-hphl-swh = <1>;
+ qcom,msm-mbhc-gnd-swh = <1>;
+ qcom,msm-hs-micbias-type = "external";
+ qcom,us-euro-gpios = <&us_euro_gpio>;
+ qcom,cdc-pdm-gpios = <&cdc_pdm_gpios>;
+ qcom,cdc-comp-gpios = <&cdc_comp_gpios>;
+ qcom,cdc-dmic-gpios = <&cdc_dmic_gpios>;
+ qcom,cdc-sdw-gpios = <&cdc_sdw_gpios>;
+ qcom,audio-routing =
+ "RX_BIAS", "INT_MCLK0",
+ "SPK_RX_BIAS", "INT_MCLK0",
+ "INT_LDO_H", "INT_MCLK0",
+ "MIC BIAS External", "Handset Mic",
+ "MIC BIAS External2", "Headset Mic",
+ "MIC BIAS External", "Secondary Mic",
+ "AMIC1", "MIC BIAS External",
+ "AMIC2", "MIC BIAS External2",
+ "AMIC3", "MIC BIAS External",
+ "DMIC1", "MIC BIAS External",
+ "MIC BIAS External", "Digital Mic1",
+ "DMIC2", "MIC BIAS External",
+ "MIC BIAS External", "Digital Mic2",
+ "DMIC3", "MIC BIAS External",
+ "MIC BIAS External", "Digital Mic3",
+ "DMIC4", "MIC BIAS External",
+ "MIC BIAS External", "Digital Mic4",
+ "SpkrLeft IN", "SPK1 OUT",
+ "SpkrRight IN", "SPK2 OUT";
+
+ asoc-platform = <&pcm0>, <&pcm1>, <&pcm2>, <&voip>, <&voice>,
+ <&loopback>, <&compress>, <&hostless>,
+ <&afe>, <&lsm>, <&routing>, <&compr>,
+ <&pcm_noirq>;
+ asoc-platform-names = "msm-pcm-dsp.0", "msm-pcm-dsp.1",
+ "msm-pcm-dsp.2", "msm-voip-dsp",
+ "msm-pcm-voice", "msm-pcm-loopback",
+ "msm-compress-dsp", "msm-pcm-hostless",
+ "msm-pcm-afe", "msm-lsm-client",
+ "msm-pcm-routing", "msm-compr-dsp",
+ "msm-pcm-dsp-noirq";
+ asoc-cpu = <&dai_mi2s0>, <&dai_mi2s1>,
+ <&dai_mi2s2>, <&dai_mi2s3>,
+ <&dai_int_mi2s0>, <&dai_int_mi2s1>,
+ <&dai_int_mi2s2>, <&dai_int_mi2s3>,
+ <&dai_int_mi2s4>, <&dai_int_mi2s5>,
+ <&dai_pri_auxpcm>, <&dai_sec_auxpcm>,
+ <&dai_tert_auxpcm>, <&dai_quat_auxpcm>,
+ <&afe_pcm_rx>, <&afe_pcm_tx>, <&afe_proxy_rx>,
+ <&afe_proxy_tx>, <&incall_record_rx>,
+ <&incall_record_tx>, <&incall_music_rx>,
+ <&incall_music_2_rx>, <&sb_7_rx>, <&sb_7_tx>,
+ <&sb_8_tx>, <&sb_8_rx>,
+ <&usb_audio_rx>, <&usb_audio_tx>,
+ <&dai_pri_tdm_rx_0>, <&dai_pri_tdm_tx_0>,
+ <&dai_sec_tdm_rx_0>, <&dai_sec_tdm_tx_0>,
+ <&dai_tert_tdm_rx_0>, <&dai_tert_tdm_tx_0>,
+ <&dai_quat_tdm_rx_0>, <&dai_quat_tdm_tx_0>;
+ asoc-cpu-names = "msm-dai-q6-mi2s.0", "msm-dai-q6-mi2s.1",
+ "msm-dai-q6-mi2s.2", "msm-dai-q6-mi2s.3",
+ "msm-dai-q6-mi2s.7", "msm-dai-q6-mi2s.8",
+ "msm-dai-q6-mi2s.9", "msm-dai-q6-mi2s.10",
+ "msm-dai-q6-mi2s.11", "msm-dai-q6-mi2s.12",
+ "msm-dai-q6-auxpcm.1", "msm-dai-q6-auxpcm.2",
+ "msm-dai-q6-auxpcm.3", "msm-dai-q6-auxpcm.4",
+ "msm-dai-q6-dev.224", "msm-dai-q6-dev.225",
+ "msm-dai-q6-dev.241", "msm-dai-q6-dev.240",
+ "msm-dai-q6-dev.32771", "msm-dai-q6-dev.32772",
+ "msm-dai-q6-dev.32773", "msm-dai-q6-dev.32770",
+ "msm-dai-q6-dev.16398", "msm-dai-q6-dev.16399",
+ "msm-dai-q6-dev.16401", "msm-dai-q6-dev.16400",
+ "msm-dai-q6-dev.28672", "msm-dai-q6-dev.28673",
+ "msm-dai-q6-tdm.36864", "msm-dai-q6-tdm.36865",
+ "msm-dai-q6-tdm.36880", "msm-dai-q6-tdm.36881",
+ "msm-dai-q6-tdm.36896", "msm-dai-q6-tdm.36897",
+ "msm-dai-q6-tdm.36912", "msm-dai-q6-tdm.36913";
+ asoc-codec = <&stub_codec>, <&msm_digital_codec>,
+ <&pmic_analog_codec>, <&msm_sdw_codec>;
+ asoc-codec-names = "msm-stub-codec.1", "msm-dig-codec",
+ "analog-codec", "msm_sdw_codec";
+
+ qcom,wsa-max-devs = <2>;
+ qcom,wsa-devs = <&wsa881x_211_en>, <&wsa881x_212_en>,
+ <&wsa881x_213_en>, <&wsa881x_214_en>;
+ qcom,wsa-aux-dev-prefix = "SpkrLeft", "SpkrRight",
+ "SpkrLeft", "SpkrRight";
+ };
us_euro_gpio: msm_cdc_pinctrl@75 {
compatible = "qcom,msm-cdc-pinctrl";
@@ -800,38 +907,36 @@
compatible = "qcom,wcd9xxx-irq";
interrupt-controller;
#interrupt-cells = <1>;
- interrupt-parent = <&tlmm>;
- qcom,gpio-connect = <&tlmm 54 0>;
- pinctrl-names = "default";
- pinctrl-0 = <&wcd_intr_default>;
+ interrupts = <0 177 0>;
+ interrupt-names = "wcd_irq";
};
clock_audio: audio_ext_clk {
compatible = "qcom,audio-ref-clk";
qcom,audio-ref-clk-gpio = <&pmfalcon_gpios 3 0>;
clock-names = "osr_clk";
- clocks = <&clock_gcc clk_div_clk1>;
+ clocks = <&clock_rpmcc AUDIO_PMI_CLK>;
qcom,node_has_rpm_clock;
#clock-cells = <1>;
pinctrl-names = "sleep", "active";
- pinctrl-0 = <&spkr_i2s_clk_sleep>;
- pinctrl-1 = <&spkr_i2s_clk_active>;
+ pinctrl-0 = <&lpi_mclk0_sleep>;
+ pinctrl-1 = <&lpi_mclk0_active>;
};
clock_audio_lnbb: audio_ext_clk_lnbb {
compatible = "qcom,audio-ref-clk";
clock-names = "osr_clk";
- clocks = <&clock_gcc clk_ln_bb_clk2>;
+ clocks = <&clock_rpmcc AUDIO_PMIC_LNBB_CLK>;
qcom,node_has_rpm_clock;
#clock-cells = <1>;
};
wcd_rst_gpio: msm_cdc_pinctrl@64 {
compatible = "qcom,msm-cdc-pinctrl";
- qcom,cdc-rst-n-gpio = <&tlmm 64 0>;
+ qcom,cdc-rst-n-gpio = <&lpi_tlmm 24 0>;
pinctrl-names = "aud_active", "aud_sleep";
- pinctrl-0 = <&cdc_reset_active>;
- pinctrl-1 = <&cdc_reset_sleep>;
+ pinctrl-0 = <&lpi_cdc_reset_active>;
+ pinctrl-1 = <&lpi_cdc_reset_sleep>;
};
};
diff --git a/arch/arm/boot/dts/qcom/msm-pm2falcon.dtsi b/arch/arm/boot/dts/qcom/msm-pm2falcon.dtsi
index 0f5c12856cc0..d3a920fff82e 100644
--- a/arch/arm/boot/dts/qcom/msm-pm2falcon.dtsi
+++ b/arch/arm/boot/dts/qcom/msm-pm2falcon.dtsi
@@ -124,7 +124,7 @@
};
};
- qcom,pm2falcon@3 {
+ pm2falcon_3: qcom,pm2falcon@3 {
compatible ="qcom,spmi-pmic";
reg = <0x3 SPMI_USID>;
#address-cells = <2>;
@@ -249,9 +249,9 @@
qcom,sync-dly-us = <800>;
qcom,fs-curr-ua = <25000>;
qcom,cons-sync-write-delay-us = <1000>;
- qcom,en-phase-stag;
qcom,led-strings-list = [00 01 02];
qcom,en-ext-pfet-sc-pro;
+ qcom,loop-auto-gm-en;
qcom,pmic-revid = <&pm2falcon_revid>;
status = "ok";
};
diff --git a/arch/arm/boot/dts/qcom/msm-pmi8998.dtsi b/arch/arm/boot/dts/qcom/msm-pmi8998.dtsi
index 3fa85d918f6c..99f3f58cc20e 100644
--- a/arch/arm/boot/dts/qcom/msm-pmi8998.dtsi
+++ b/arch/arm/boot/dts/qcom/msm-pmi8998.dtsi
@@ -155,6 +155,7 @@
"usbin_i",
"usbin_v";
+ qcom,boost-threshold-ua = <100000>;
qcom,wipower-max-uw = <5000000>;
dpdm-supply = <&qusb_phy0>;
@@ -606,7 +607,6 @@
qcom,sync-dly-us = <800>;
qcom,fs-curr-ua = <25000>;
qcom,cons-sync-write-delay-us = <1000>;
- qcom,en-phase-stag;
qcom,led-strings-list = [00 01 02 03];
qcom,en-ext-pfet-sc-pro;
qcom,pmic-revid = <&pmi8998_revid>;
diff --git a/arch/arm/boot/dts/qcom/msm8996-camera.dtsi b/arch/arm/boot/dts/qcom/msm8996-camera.dtsi
index ec07030092ca..ec713e1b11fd 100644
--- a/arch/arm/boot/dts/qcom/msm8996-camera.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8996-camera.dtsi
@@ -644,6 +644,7 @@
qcom,msm-bus-vector-dyn-vote;
qcom,src-clock-rates = <100000000 200000000 320000000
480000000 640000000>;
+ qcom,micro-reset;
qcom,cpp-fw-payload-info {
qcom,stripe-base = <553>;
qcom,plane-base = <481>;
diff --git a/arch/arm/boot/dts/qcom/msm8998-camera-sensor-skuk.dtsi b/arch/arm/boot/dts/qcom/msm8998-camera-sensor-skuk.dtsi
index 36441f9aa15a..69b0286dba09 100644
--- a/arch/arm/boot/dts/qcom/msm8998-camera-sensor-skuk.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8998-camera-sensor-skuk.dtsi
@@ -29,6 +29,89 @@
};
};
+&tlmm{
+ cam_sensor_front_active: cam_sensor_front_active {
+ /* RESET */
+ mux {
+ pins = "gpio9";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio9";
+ bias-disable; /* No PULL */
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
+ cam_sensor_front_suspend: cam_sensor_front_suspend {
+ /* RESET */
+ mux {
+ pins = "gpio9";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio9";
+ bias-disable; /* No PULL */
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
+ cam_sensor_rear2_active: cam_sensor_rear2_active {
+ /* RESET, STANDBY */
+ mux {
+ pins = "gpio28","gpio27";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio28","gpio27";
+ bias-disable; /* No PULL */
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
+ cam_sensor_rear2_suspend: cam_sensor_rear2_suspend {
+ /* RESET, STANDBY */
+ mux {
+ pins = "gpio28","gpio27";
+ function = "gpio";
+ };
+ config {
+ pins = "gpio28","gpio27";
+ bias-disable; /* No PULL */
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
+ cam_sensor_rear_active: cam_sensor_rear_active {
+ /* RESET, STANDBY */
+ mux {
+ pins = "gpio30","gpio29";
+ function = "gpio";
+ };
+ config {
+ pins = "gpio30","gpio29";
+ bias-disable; /* No PULL */
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
+ cam_sensor_rear_suspend: cam_sensor_rear_suspend {
+ /* RESET, STANDBY */
+ mux {
+ pins = "gpio30","gpio29";
+ function = "gpio";
+ };
+ config {
+ pins = "gpio30","gpio29";
+ bias-disable; /* No PULL */
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+};
+
&cci {
actuator0: qcom,actuator@0 {
cell-index = <0>;
@@ -50,7 +133,22 @@
reg = <0x1>;
compatible = "qcom,actuator";
qcom,cci-master = <0>;
- gpios = <&tlmm 29 0>;
+ gpios = <&tlmm 27 0>;
+ qcom,gpio-vaf = <0>;
+ qcom,gpio-req-tbl-num = <0>;
+ qcom,gpio-req-tbl-flags = <0>;
+ qcom,gpio-req-tbl-label = "CAM_VAF";
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_actuator_vaf_active>;
+ pinctrl-1 = <&cam_actuator_vaf_suspend>;
+ };
+
+ ois0: qcom,ois@0 {
+ cell-index = <0>;
+ reg = <0x0>;
+ compatible = "qcom,ois";
+ qcom,cci-master = <0>;
+ gpios = <&tlmm 27 0>;
qcom,gpio-vaf = <0>;
qcom,gpio-req-tbl-num = <0>;
qcom,gpio-req-tbl-flags = <0>;
@@ -58,6 +156,7 @@
pinctrl-names = "cam_default", "cam_suspend";
pinctrl-0 = <&cam_actuator_vaf_active>;
pinctrl-1 = <&cam_actuator_vaf_suspend>;
+ status = "disabled";
};
eeprom0: qcom,eeprom@0 {
@@ -104,35 +203,38 @@
cell-index = <1>;
reg = <0x1>;
compatible = "qcom,eeprom";
- cam_vdig-supply = <&pm8998_lvs1>;
cam_vio-supply = <&pm8998_lvs1>;
cam_vana-supply = <&pmi8998_bob>;
- qcom,cam-vreg-name = "cam_vdig", "cam_vio", "cam_vana";
- qcom,cam-vreg-min-voltage = <0 0 3312000>;
- qcom,cam-vreg-max-voltage = <0 0 3600000>;
- qcom,cam-vreg-op-mode = <0 0 80000>;
+ cam_vdig-supply = <&pm8998_s3>;
+ qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig";
+ qcom,cam-vreg-min-voltage = <0 3312000 1352000>;
+ qcom,cam-vreg-max-voltage = <0 3600000 1352000>;
+ qcom,cam-vreg-op-mode = <0 80000 105000>;
qcom,gpio-no-mux = <0>;
pinctrl-names = "cam_default", "cam_suspend";
- pinctrl-0 = <&cam_sensor_mclk2_active
- &cam_sensor_rear2_active>;
- pinctrl-1 = <&cam_sensor_mclk2_suspend
- &cam_sensor_rear2_suspend>;
- gpios = <&tlmm 15 0>,
- <&tlmm 9 0>,
- <&tlmm 8 0>;
+ pinctrl-0 = <&cam_sensor_mclk1_active
+ &cam_sensor_rear2_active>;
+ pinctrl-1 = <&cam_sensor_mclk1_suspend
+ &cam_sensor_rear2_suspend>;
+ gpios = <&tlmm 14 0>,
+ <&tlmm 28 0>,
+ <&pm8998_gpios 20 0>,
+ <&tlmm 29 0>;
qcom,gpio-reset = <1>;
- qcom,gpio-vana = <2>;
- qcom,gpio-req-tbl-num = <0 1 2>;
- qcom,gpio-req-tbl-flags = <1 0 0>;
+ qcom,gpio-vdig = <2>;
+ qcom,gpio-vana = <3>;
+ qcom,gpio-req-tbl-num = <0 1 2 3>;
+ qcom,gpio-req-tbl-flags = <1 0 0 0>;
qcom,gpio-req-tbl-label = "CAMIF_MCLK1",
"CAM_RESET1",
- "CAM_VANA1";
+ "CAM_VDIG",
+ "CAM_VANA";
qcom,sensor-position = <0>;
qcom,sensor-mode = <0>;
- qcom,cci-master = <1>;
+ qcom,cci-master = <0>;
status = "ok";
- clocks = <&clock_mmss clk_mclk2_clk_src>,
- <&clock_mmss clk_mmss_camss_mclk2_clk>;
+ clocks = <&clock_mmss clk_mclk1_clk_src>,
+ <&clock_mmss clk_mmss_camss_mclk1_clk>;
clock-names = "cam_src_clk", "cam_clk";
qcom,clock-rates = <24000000 0>;
};
@@ -152,12 +254,12 @@
qcom,cam-vreg-op-mode = <0 80000 105000>;
qcom,gpio-no-mux = <0>;
pinctrl-names = "cam_default", "cam_suspend";
- pinctrl-0 = <&cam_sensor_mclk1_active
+ pinctrl-0 = <&cam_sensor_mclk2_active
&cam_sensor_front_active>;
- pinctrl-1 = <&cam_sensor_mclk1_suspend
+ pinctrl-1 = <&cam_sensor_mclk2_suspend
&cam_sensor_front_suspend>;
- gpios = <&tlmm 14 0>,
- <&tlmm 28 0>,
+ gpios = <&tlmm 15 0>,
+ <&tlmm 9 0>,
<&pm8998_gpios 9 0>;
qcom,gpio-reset = <1>;
qcom,gpio-vdig = <2>;
@@ -170,8 +272,8 @@
qcom,sensor-mode = <0>;
qcom,cci-master = <1>;
status = "ok";
- clocks = <&clock_mmss clk_mclk1_clk_src>,
- <&clock_mmss clk_mmss_camss_mclk1_clk>;
+ clocks = <&clock_mmss clk_mclk2_clk_src>,
+ <&clock_mmss clk_mmss_camss_mclk2_clk>;
clock-names = "cam_src_clk", "cam_clk";
qcom,clock-rates = <24000000 0>;
};
@@ -180,7 +282,8 @@
cell-index = <0>;
compatible = "qcom,camera";
reg = <0x0>;
- qcom,special-support-sensors = "imx362_gt24c64a";
+ qcom,special-support-sensors = "imx362_gt24c64a",
+ "s5k3m3sm", "s5k2l7sx";
qcom,csiphy-sd-index = <0>;
qcom,csid-sd-index = <0>;
qcom,mount-angle = <270>;
@@ -230,36 +333,42 @@
qcom,csiphy-sd-index = <1>;
qcom,csid-sd-index = <1>;
qcom,mount-angle = <90>;
+ qcom,led-flash-src = <&led_flash0>;
+ qcom,actuator-src = <&actuator1>;
qcom,eeprom-src = <&eeprom1>;
- cam_vdig-supply = <&pm8998_lvs1>;
+ qcom,ois-src = <&ois0>;
cam_vio-supply = <&pm8998_lvs1>;
cam_vana-supply = <&pmi8998_bob>;
- qcom,cam-vreg-name = "cam_vdig", "cam_vio", "cam_vana";
- qcom,cam-vreg-min-voltage = <0 0 3312000>;
- qcom,cam-vreg-max-voltage = <0 0 3600000>;
- qcom,cam-vreg-op-mode = <0 0 80000>;
+ cam_vdig-supply = <&pm8998_s3>;
+ qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig";
+ qcom,cam-vreg-min-voltage = <0 3312000 1352000>;
+ qcom,cam-vreg-max-voltage = <0 3600000 1352000>;
+ qcom,cam-vreg-op-mode = <0 80000 105000>;
qcom,gpio-no-mux = <0>;
pinctrl-names = "cam_default", "cam_suspend";
- pinctrl-0 = <&cam_sensor_mclk2_active
- &cam_sensor_rear2_active>;
- pinctrl-1 = <&cam_sensor_mclk2_suspend
- &cam_sensor_rear2_suspend>;
- gpios = <&tlmm 15 0>,
- <&tlmm 9 0>,
- <&tlmm 8 0>;
+ pinctrl-0 = <&cam_sensor_mclk1_active
+ &cam_sensor_rear2_active>;
+ pinctrl-1 = <&cam_sensor_mclk1_suspend
+ &cam_sensor_rear2_suspend>;
+ gpios = <&tlmm 14 0>,
+ <&tlmm 28 0>,
+ <&pm8998_gpios 20 0>,
+ <&tlmm 29 0>;
qcom,gpio-reset = <1>;
- qcom,gpio-vana = <2>;
- qcom,gpio-req-tbl-num = <0 1 2>;
- qcom,gpio-req-tbl-flags = <1 0 0>;
+ qcom,gpio-vdig = <2>;
+ qcom,gpio-vana = <3>;
+ qcom,gpio-req-tbl-num = <0 1 2 3>;
+ qcom,gpio-req-tbl-flags = <1 0 0 0>;
qcom,gpio-req-tbl-label = "CAMIF_MCLK1",
"CAM_RESET1",
- "CAM_VANA1";
+ "CAM_VDIG",
+ "CAM_VANA";
qcom,sensor-position = <0>;
qcom,sensor-mode = <0>;
qcom,cci-master = <0>;
status = "ok";
- clocks = <&clock_mmss clk_mclk2_clk_src>,
- <&clock_mmss clk_mmss_camss_mclk2_clk>;
+ clocks = <&clock_mmss clk_mclk1_clk_src>,
+ <&clock_mmss clk_mmss_camss_mclk1_clk>;
clock-names = "cam_src_clk", "cam_clk";
qcom,clock-rates = <24000000 0>;
};
@@ -271,6 +380,7 @@
qcom,csiphy-sd-index = <2>;
qcom,csid-sd-index = <2>;
qcom,mount-angle = <90>;
+ qcom,eeprom-src = <&eeprom2>;
cam_vio-supply = <&pm8998_lvs1>;
cam_vana-supply = <&pm8998_l22>;
cam_vdig-supply = <&pm8998_s3>;
@@ -283,9 +393,9 @@
qcom,gpio-no-mux = <0>;
pinctrl-names = "cam_default", "cam_suspend";
pinctrl-0 = <&cam_sensor_mclk2_active
- &cam_sensor_rear2_active>;
+ &cam_sensor_front_active>;
pinctrl-1 = <&cam_sensor_mclk2_suspend
- &cam_sensor_rear2_suspend>;
+ &cam_sensor_front_suspend>;
gpios = <&tlmm 15 0>,
<&tlmm 9 0>,
<&pm8998_gpios 9 0>;
diff --git a/arch/arm/boot/dts/qcom/msm8998-camera.dtsi b/arch/arm/boot/dts/qcom/msm8998-camera.dtsi
index d0fa9921da3f..976750d6a26f 100644
--- a/arch/arm/boot/dts/qcom/msm8998-camera.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8998-camera.dtsi
@@ -424,6 +424,7 @@
reset-names = "micro_iface_reset";
qcom,src-clock-rates = <100000000 200000000 576000000
600000000>;
+ qcom,micro-reset;
qcom,cpp-fw-payload-info {
qcom,stripe-base = <790>;
qcom,plane-base = <715>;
diff --git a/arch/arm/boot/dts/qcom/msm8998-interposer-camera-sensor-cdp.dtsi b/arch/arm/boot/dts/qcom/msm8998-interposer-camera-sensor-cdp.dtsi
index 505b13aeb342..78ba56b3e3c2 100644
--- a/arch/arm/boot/dts/qcom/msm8998-interposer-camera-sensor-cdp.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8998-interposer-camera-sensor-cdp.dtsi
@@ -288,6 +288,7 @@
qcom,csiphy-sd-index = <0>;
qcom,csid-sd-index = <0>;
qcom,mount-angle = <270>;
+ qcom,led-flash-src = <&led_flash0>;
qcom,actuator-src = <&actuator0>;
qcom,ois-src = <&ois0>;
qcom,eeprom-src = <&eeprom0>;
@@ -378,6 +379,7 @@
qcom,csid-sd-index = <2>;
qcom,mount-angle = <90>;
qcom,actuator-src = <&actuator1>;
+ qcom,led-flash-src = <&led_flash1>;
qcom,eeprom-src = <&eeprom2>;
cam_vio-supply = <&pmfalcon_l11>;
cam_vana-supply = <&pm2falcon_bob>;
diff --git a/arch/arm/boot/dts/qcom/msm8998-interposer-camera-sensor-mtp.dtsi b/arch/arm/boot/dts/qcom/msm8998-interposer-camera-sensor-mtp.dtsi
index 505b13aeb342..78ba56b3e3c2 100644
--- a/arch/arm/boot/dts/qcom/msm8998-interposer-camera-sensor-mtp.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8998-interposer-camera-sensor-mtp.dtsi
@@ -288,6 +288,7 @@
qcom,csiphy-sd-index = <0>;
qcom,csid-sd-index = <0>;
qcom,mount-angle = <270>;
+ qcom,led-flash-src = <&led_flash0>;
qcom,actuator-src = <&actuator0>;
qcom,ois-src = <&ois0>;
qcom,eeprom-src = <&eeprom0>;
@@ -378,6 +379,7 @@
qcom,csid-sd-index = <2>;
qcom,mount-angle = <90>;
qcom,actuator-src = <&actuator1>;
+ qcom,led-flash-src = <&led_flash1>;
qcom,eeprom-src = <&eeprom2>;
cam_vio-supply = <&pmfalcon_l11>;
cam_vana-supply = <&pm2falcon_bob>;
diff --git a/arch/arm/boot/dts/qcom/msm8998-interposer-msmfalcon-audio.dtsi b/arch/arm/boot/dts/qcom/msm8998-interposer-msmfalcon-audio.dtsi
index 26c021da803d..b05707c6c585 100644
--- a/arch/arm/boot/dts/qcom/msm8998-interposer-msmfalcon-audio.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8998-interposer-msmfalcon-audio.dtsi
@@ -10,18 +10,60 @@
* GNU General Public License for more details.
*/
+#include "msm-audio.dtsi"
+#include "msmfalcon-audio.dtsi"
+
+&pm2falcon_3 {
+ /delete-node/analog-codec;
+};
+
+&soc {
+ /delete-node/msm-sdw-codec@152c1000;
+ /delete-node/sound;
+};
+
+&slim_aud {
+ tasha_codec {
+ clocks = <&clock_audio clk_audio_pmi_clk>,
+ <&clock_audio clk_audio_ap_clk2>;
+ };
+ tavil_codec {
+ clocks = <&clock_audio_lnbb clk_audio_pmi_lnbb_clk>;
+ };
+};
+
+&tasha_hph_en0 {
+ /delete-property/pinctrl-0;
+ /delete-property/pinctrl-1;
+};
+
+&tasha_hph_en1 {
+ /delete-property/pinctrl-0;
+ /delete-property/pinctrl-1;
+};
+
&clock_audio {
qcom,audio-ref-clk-gpio = <&pmfalcon_gpios 3 0>;
+ clocks = <&clock_gcc clk_div_clk1>;
+ pinctrl-0 = <&spkr_i2s_clk_sleep>;
+ pinctrl-1 = <&spkr_i2s_clk_active>;
};
-&pmfalcon_gpios {
- gpio@c200 {
- status = "ok";
- qcom,mode = <1>;
- qcom,pull = <5>;
- qcom,vin-sel = <0>;
- qcom,src-sel = <2>;
- qcom,master-en = <1>;
- qcom,out-strength = <2>;
- };
+&clock_audio_lnbb {
+ clocks = <&clock_gcc clk_ln_bb_clk2>;
+};
+
+&wcd_rst_gpio {
+ qcom,cdc-rst-n-gpio = <&tlmm 64 0>;
+ pinctrl-0 = <&cdc_reset_active>;
+ pinctrl-1 = <&cdc_reset_sleep>;
+};
+
+&wcd9xxx_intc {
+ interrupt-parent = <&tlmm>;
+ qcom,gpio-connect = <&tlmm 54 0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&wcd_intr_default>;
+ /delete-property/interrupts;
+ /delete-property/interrupt-names;
};
diff --git a/arch/arm/boot/dts/qcom/msm8998-interposer-msmfalcon.dtsi b/arch/arm/boot/dts/qcom/msm8998-interposer-msmfalcon.dtsi
index 7cf02e09e785..5b4dda445ff3 100644
--- a/arch/arm/boot/dts/qcom/msm8998-interposer-msmfalcon.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8998-interposer-msmfalcon.dtsi
@@ -3104,8 +3104,6 @@
#include "msm8998-mdss.dtsi"
#include "msm8998-mdss-pll.dtsi"
#include "msm8998-blsp.dtsi"
-#include "msm-audio.dtsi"
-#include "msmfalcon-audio.dtsi"
#include "msm-smb138x.dtsi"
/* GPU overrides */
diff --git a/arch/arm/boot/dts/qcom/msm8998-interposer-pmfalcon.dtsi b/arch/arm/boot/dts/qcom/msm8998-interposer-pmfalcon.dtsi
index fe47565fc1de..74f43c15dd72 100644
--- a/arch/arm/boot/dts/qcom/msm8998-interposer-pmfalcon.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8998-interposer-pmfalcon.dtsi
@@ -10,10 +10,6 @@
* GNU General Public License for more details.
*/
-&clock_audio {
- /delete-property/qcom,audio-ref-clk-gpio;
-};
-
&slim_aud {
tasha_codec {
/delete-property/cdc-vdd-buck-supply;
diff --git a/arch/arm/boot/dts/qcom/msm8998-pinctrl.dtsi b/arch/arm/boot/dts/qcom/msm8998-pinctrl.dtsi
index d4a2290c9b0a..bd20ae0a9b85 100644
--- a/arch/arm/boot/dts/qcom/msm8998-pinctrl.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8998-pinctrl.dtsi
@@ -2957,5 +2957,59 @@
};
};
};
+
+ spkr_1_sd_mediabox {
+ spkr_1_sd_sleep_mediabox: spkr_1_sd_sleep_mediabox {
+ mux {
+ pins = "gpio85";
+ function = "gpio";
+ };
+ config {
+ pins = "gpio85";
+ drive-strength = <2>; /* 2 mA */
+ bias-pull-down;
+ input-enable;
+ };
+ };
+ spkr_1_sd_active_mediabox: spkr_1_sd_active_mediabox {
+ mux {
+ pins = "gpio85";
+ function = "gpio";
+ };
+ config {
+ pins = "gpio85";
+ drive-strength = <8>; /* 8 mA */
+ bias-disable;
+ output-high;
+ };
+ };
+ };
+
+ spkr_2_sd_mediabox_mediabox {
+ spkr_2_sd_sleep_mediabox: spkr_2_sd_sleep_mediabox {
+ mux {
+ pins = "gpio112";
+ function = "gpio";
+ };
+ config {
+ pins = "gpio112";
+ drive-strength = <2>; /* 2 mA */
+ bias-pull-down;
+ input-enable;
+ };
+ };
+ spkr_2_sd_active_mediabox: spkr_2_sd_active_mediabox {
+ mux {
+ pins = "gpio112";
+ function = "gpio";
+ };
+ config {
+ pins = "gpio112";
+ drive-strength = <8>; /* 8 mA */
+ bias-disable;
+ output-high;
+ };
+ };
+ };
};
};
diff --git a/arch/arm/boot/dts/qcom/msm8998-qrd-skuk.dtsi b/arch/arm/boot/dts/qcom/msm8998-qrd-skuk.dtsi
index dcd84e79ba1b..0352874c64c7 100644
--- a/arch/arm/boot/dts/qcom/msm8998-qrd-skuk.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8998-qrd-skuk.dtsi
@@ -101,7 +101,39 @@
pinctrl-0 = <&uart_console_active>;
};
+&i2c_6 { /* BLSP1 QUP6 (NFC) */
+ status = "okay";
+ nq@28 {
+ compatible = "qcom,nq-nci";
+ reg = <0x28>;
+ qcom,nq-irq = <&tlmm 92 0x00>;
+ qcom,nq-ven = <&tlmm 12 0x00>;
+ qcom,nq-firm = <&tlmm 93 0x00>;
+ qcom,nq-clkreq = <&pm8998_gpios 21 0x00>;
+ qcom,nq-esepwr = <&tlmm 116 0x00>;
+ interrupt-parent = <&tlmm>;
+ qcom,clk-src = "BBCLK3";
+ interrupts = <92 0>;
+ interrupt-names = "nfc_irq";
+ pinctrl-names = "nfc_active", "nfc_suspend";
+ pinctrl-0 = <&nfc_int_active &nfc_enable_active>;
+ pinctrl-1 = <&nfc_int_suspend &nfc_enable_suspend>;
+ clocks = <&clock_gcc clk_ln_bb_clk3_pin>;
+ clock-names = "ref_clk";
+ };
+};
+
&pm8998_gpios {
+ /* GPIO 5 for Home Key */
+ gpio@c400 {
+ status = "okay";
+ qcom,mode = <0>;
+ qcom,pull = <0>;
+ qcom,vin-sel = <0>;
+ qcom,src-sel = <0>;
+ qcom,out-strength = <1>;
+ };
+
/* GPIO 6 for Vol+ Key */
gpio@c500 {
status = "okay";
@@ -111,6 +143,15 @@
qcom,src-sel = <0>;
qcom,out-strength = <1>;
};
+
+ /* GPIO 21 (NFC_CLK_REQ) */
+ gpio@d400 {
+ qcom,mode = <0>;
+ qcom,vin-sel = <1>;
+ qcom,src-sel = <0>;
+ qcom,master-en = <1>;
+ status = "okay";
+ };
};
&soc {
@@ -389,3 +430,15 @@
qcom,thermal-node;
};
};
+
+&red_led {
+ /delete-property/ linux,default-trigger;
+};
+
+&green_led {
+ /delete-property/ linux,default-trigger;
+};
+
+&blue_led {
+ /delete-property/ linux,default-trigger;
+};
diff --git a/arch/arm/boot/dts/qcom/msm8998-qrd-vr1.dtsi b/arch/arm/boot/dts/qcom/msm8998-qrd-vr1.dtsi
index 2d1616412caa..7b67bdb7243b 100644
--- a/arch/arm/boot/dts/qcom/msm8998-qrd-vr1.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8998-qrd-vr1.dtsi
@@ -100,6 +100,28 @@
pinctrl-0 = <&uart_console_active>;
};
+&i2c_6 { /* BLSP1 QUP6 (NFC) */
+ status = "okay";
+ nq@28 {
+ compatible = "qcom,nq-nci";
+ reg = <0x28>;
+ qcom,nq-irq = <&tlmm 92 0x00>;
+ qcom,nq-ven = <&tlmm 12 0x00>;
+ qcom,nq-firm = <&tlmm 93 0x00>;
+ qcom,nq-clkreq = <&pm8998_gpios 21 0x00>;
+ qcom,nq-esepwr = <&tlmm 116 0x00>;
+ interrupt-parent = <&tlmm>;
+ qcom,clk-src = "BBCLK3";
+ interrupts = <92 0>;
+ interrupt-names = "nfc_irq";
+ pinctrl-names = "nfc_active", "nfc_suspend";
+ pinctrl-0 = <&nfc_int_active &nfc_enable_active>;
+ pinctrl-1 = <&nfc_int_suspend &nfc_enable_suspend>;
+ clocks = <&clock_gcc clk_ln_bb_clk3_pin>;
+ clock-names = "ref_clk";
+ };
+};
+
&pm8998_gpios {
/* GPIO 6 for Vol+ Key */
gpio@c500 {
@@ -110,6 +132,15 @@
qcom,src-sel = <0>;
qcom,out-strength = <1>;
};
+
+ /* GPIO 21 (NFC_CLK_REQ) */
+ gpio@d400 {
+ qcom,mode = <0>;
+ qcom,vin-sel = <1>;
+ qcom,src-sel = <0>;
+ qcom,master-en = <1>;
+ status = "okay";
+ };
};
&soc {
@@ -272,3 +303,15 @@
qcom,thermal-node;
};
};
+
+&red_led {
+ /delete-property/ linux,default-trigger;
+};
+
+&green_led {
+ /delete-property/ linux,default-trigger;
+};
+
+&blue_led {
+ /delete-property/ linux,default-trigger;
+};
diff --git a/arch/arm/boot/dts/qcom/msm8998-v2-camera.dtsi b/arch/arm/boot/dts/qcom/msm8998-v2-camera.dtsi
index a81287c36266..fdc452a47a46 100644
--- a/arch/arm/boot/dts/qcom/msm8998-v2-camera.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8998-v2-camera.dtsi
@@ -168,6 +168,7 @@
reset-names = "micro_iface_reset";
qcom,src-clock-rates = <100000000 200000000 384000000 404000000
480000000 576000000 600000000>;
+ qcom,micro-reset;
qcom,cpp-fw-payload-info {
qcom,stripe-base = <790>;
qcom,plane-base = <715>;
diff --git a/arch/arm/boot/dts/qcom/msm8998-v2.1-interposer-msmfalcon-cdp.dts b/arch/arm/boot/dts/qcom/msm8998-v2.1-interposer-msmfalcon-cdp.dts
index e88ee107e280..5ea248f6f2dc 100644
--- a/arch/arm/boot/dts/qcom/msm8998-v2.1-interposer-msmfalcon-cdp.dts
+++ b/arch/arm/boot/dts/qcom/msm8998-v2.1-interposer-msmfalcon-cdp.dts
@@ -149,3 +149,7 @@
qcom,platform-te-gpio = <&tlmm 10 0>;
qcom,panel-mode-gpio = <&tlmm 91 0>;
};
+
+&pm2falcon_wled {
+ qcom,led-strings-list = [01 02];
+};
diff --git a/arch/arm/boot/dts/qcom/msm8998-v2.1-interposer-msmfalcon-mtp.dts b/arch/arm/boot/dts/qcom/msm8998-v2.1-interposer-msmfalcon-mtp.dts
index ccc94307277d..7c0c53033a44 100644
--- a/arch/arm/boot/dts/qcom/msm8998-v2.1-interposer-msmfalcon-mtp.dts
+++ b/arch/arm/boot/dts/qcom/msm8998-v2.1-interposer-msmfalcon-mtp.dts
@@ -178,3 +178,7 @@
};
};
};
+
+&pm2falcon_wled {
+ qcom,led-strings-list = [01 02];
+};
diff --git a/arch/arm/boot/dts/qcom/msm8998-v2.1-interposer-msmfalcon-qrd.dts b/arch/arm/boot/dts/qcom/msm8998-v2.1-interposer-msmfalcon-qrd.dts
index cf313cfea768..f0092a5718ab 100644
--- a/arch/arm/boot/dts/qcom/msm8998-v2.1-interposer-msmfalcon-qrd.dts
+++ b/arch/arm/boot/dts/qcom/msm8998-v2.1-interposer-msmfalcon-qrd.dts
@@ -113,3 +113,32 @@
qcom,vdd-voltage-level = <0 925000 925000>;
core-supply = <&pmfalcon_l1>;
};
+
+&pm2falcon_gpios {
+ /* GPIO 7 for VOL_UP */
+ gpio@c600 {
+ status = "ok";
+ qcom,mode = <0>;
+ qcom,pull = <0>;
+ qcom,vin-sel = <0>;
+ qcom,src-sel = <0>;
+ qcom,out-strength = <1>;
+ };
+};
+
+&soc {
+ gpio_keys {
+ compatible = "gpio-keys";
+ input-name = "gpio-keys";
+ status = "ok";
+
+ vol_up {
+ label = "volume_up";
+ gpios = <&pm2falcon_gpios 7 0x1>;
+ linux,input-type = <1>;
+ linux,code = <115>;
+ gpio-key,wakeup;
+ debounce-interval = <15>;
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/qcom/msm8998.dtsi b/arch/arm/boot/dts/qcom/msm8998.dtsi
index f570819fa70f..8ce9e7c22761 100644
--- a/arch/arm/boot/dts/qcom/msm8998.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8998.dtsi
@@ -2970,13 +2970,9 @@
qcom,icnss@18800000 {
compatible = "qcom,icnss";
reg = <0x18800000 0x800000>,
- <0x10AC000 0x20>,
<0xa0000000 0x10000000>,
<0xb0000000 0x10000>;
- reg-names = "membase", "mpm_config",
- "smmu_iova_base", "smmu_iova_ipa";
- clocks = <&clock_gcc clk_rf_clk2_pin>;
- clock-names = "cxo_ref_clk_pin";
+ reg-names = "membase", "smmu_iova_base", "smmu_iova_ipa";
iommus = <&anoc2_smmu 0x1900>,
<&anoc2_smmu 0x1901>;
interrupts = <0 413 0 /* CE0 */ >,
@@ -2992,12 +2988,6 @@
<0 424 0 /* CE10 */ >,
<0 425 0 /* CE11 */ >;
qcom,wlan-msa-memory = <0x100000>;
- vdd-0.8-cx-mx-supply = <&pm8998_l5>;
- vdd-1.8-xo-supply = <&pm8998_l7_pin_ctrl>;
- vdd-1.3-rfa-supply = <&pm8998_l17_pin_ctrl>;
- vdd-3.3-ch0-supply = <&pm8998_l25_pin_ctrl>;
- qcom,vdd-0.8-cx-mx-config = <800000 800000>;
- qcom,vdd-3.3-ch0-config = <3104000 3312000>;
qcom,icnss-vadc = <&pm8998_vadc>;
qcom,icnss-adc_tm = <&pm8998_adc_tm>;
};
diff --git a/arch/arm/boot/dts/qcom/msmfalcon-audio.dtsi b/arch/arm/boot/dts/qcom/msmfalcon-audio.dtsi
index 7b216a0aa990..bf367944f50c 100644
--- a/arch/arm/boot/dts/qcom/msmfalcon-audio.dtsi
+++ b/arch/arm/boot/dts/qcom/msmfalcon-audio.dtsi
@@ -12,6 +12,7 @@
*/
#include "msmfalcon-wsa881x.dtsi"
+#include "msmfalcon-lpi.dtsi"
&slim_aud {
msm_dai_slim {
@@ -31,8 +32,8 @@
qcom,wcd-rst-gpio-node = <&wcd_rst_gpio>;
clock-names = "wcd_clk", "wcd_native_clk";
- clocks = <&clock_audio clk_audio_pmi_clk>,
- <&clock_audio clk_audio_ap_clk2>;
+ clocks = <&clock_audio AUDIO_PMI_CLK>,
+ <&clock_audio AUDIO_AP_CLK2>;
cdc-vdd-mic-bias-supply = <&pm2falcon_bob>;
qcom,cdc-vdd-mic-bias-voltage = <3300000 3300000>;
@@ -64,7 +65,7 @@
qcom,wcd-rst-gpio-node = <&wcd_rst_gpio>;
clock-names = "wcd_clk";
- clocks = <&clock_audio_lnbb clk_audio_pmi_lnbb_clk>;
+ clocks = <&clock_audio_lnbb AUDIO_PMIC_LNBB_CLK>;
cdc-vdd-mic-bias-supply = <&pm2falcon_bob>;
qcom,cdc-vdd-mic-bias-voltage = <3300000 3300000>;
@@ -94,3 +95,153 @@
};
};
};
+
+&pm2falcon_3 {
+ pmic_analog_codec: analog-codec@f000 {
+ status = "disabled";
+ compatible = "qcom,pmic-analog-codec";
+ reg = <0xf000 0x200>;
+ interrupt-parent = <&spmi_bus>;
+ interrupts = <0x1 0xf0 0x0>,
+ <0x1 0xf0 0x1>,
+ <0x1 0xf0 0x2>,
+ <0x1 0xf0 0x3>,
+ <0x1 0xf0 0x4>,
+ <0x1 0xf0 0x5>,
+ <0x1 0xf0 0x6>,
+ <0x1 0xf0 0x7>;
+ interrupt-names = "spk_cnp_int",
+ "spk_clip_int",
+ "spk_ocp_int",
+ "ins_rem_det1",
+ "but_rel_det",
+ "but_press_det",
+ "ins_rem_det",
+ "mbhc_int";
+
+ cdc-vdda-cp-supply = <&pmfalcon_s4>;
+ qcom,cdc-vdda-cp-voltage = <1900000 2050000>;
+ qcom,cdc-vdda-cp-current = <50000>;
+
+ cdc-vdd-pa-supply = <&pmfalcon_s4>;
+ qcom,cdc-vdd-pa-voltage = <2040000 2040000>;
+ qcom,cdc-vdd-pa-current = <260000>;
+
+ cdc-vdd-mic-bias-supply = <&pm2falcon_l7>;
+ qcom,cdc-vdd-mic-bias-voltage = <3125000 3125000>;
+ qcom,cdc-vdd-mic-bias-current = <5000>;
+
+ qcom,cdc-mclk-clk-rate = <9600000>;
+
+ qcom,cdc-static-supplies = "cdc-vdda-cp",
+ "cdc-vdd-pa";
+
+ qcom,cdc-on-demand-supplies = "cdc-vdd-mic-bias";
+
+ cdc_pdm_gpios: cdc_pdm_pinctrl {
+ compatible = "qcom,msm-cdc-pinctrl";
+ pinctrl-names = "aud_active", "aud_sleep";
+ pinctrl-0 = <&cdc_pdm_gpios_active>;
+ pinctrl-1 = <&cdc_pdm_gpios_sleep>;
+ qcom,lpi-gpios;
+ };
+
+ cdc_comp_gpios: cdc_comp_pinctrl {
+ compatible = "qcom,msm-cdc-pinctrl";
+ pinctrl-names = "aud_active", "aud_sleep";
+ pinctrl-0 = <&cdc_comp_gpios_active>;
+ pinctrl-1 = <&cdc_comp_gpios_sleep>;
+ qcom,lpi-gpios;
+ };
+ /*
+ * Not marking address @ as driver searches this child
+ * with name msm-dig-codec
+ */
+ msm_digital_codec: msm-dig-codec {
+ compatible = "qcom,msm-digital-codec";
+ reg = <0x152c0000 0x0>;
+ cdc_dmic_gpios: cdc_dmic_pinctrl {
+ compatible = "qcom,msm-cdc-pinctrl";
+ pinctrl-names = "aud_active", "aud_sleep";
+ pinctrl-0 = <&cdc_dmic12_gpios_active
+ &cdc_dmic34_gpios_active>;
+ pinctrl-1 = <&cdc_dmic12_gpios_sleep
+ &cdc_dmic34_gpios_sleep>;
+ qcom,lpi-gpios;
+ };
+ };
+ };
+};
+
+&soc {
+ msm_sdw_codec: msm-sdw-codec@152c1000 {
+ status = "disabled";
+ compatible = "qcom,msm-sdw-codec";
+ reg = <0x152c1000 0x0>;
+ interrupts = <0 161 0>;
+ interrupt-names = "swr_master_irq";
+
+ cdc_sdw_gpios: sdw_clk_data_pinctrl {
+ compatible = "qcom,msm-cdc-pinctrl";
+ pinctrl-names = "aud_active", "aud_sleep";
+ pinctrl-0 = <&sdw_clk_active &sdw_data_active>;
+ pinctrl-1 = <&sdw_clk_sleep &sdw_data_sleep>;
+ };
+
+ wsa_spkr_en1: wsa_spkr_en1_pinctrl {
+ compatible = "qcom,msm-cdc-pinctrl";
+ pinctrl-names = "aud_active", "aud_sleep";
+ pinctrl-0 = <&spkr_1_sd_n_active>;
+ pinctrl-1 = <&spkr_1_sd_n_sleep>;
+ };
+
+ wsa_spkr_en2: wsa_spkr_en2_pinctrl {
+ compatible = "qcom,msm-cdc-pinctrl";
+ pinctrl-names = "aud_active", "aud_sleep";
+ pinctrl-0 = <&spkr_2_sd_n_active>;
+ pinctrl-1 = <&spkr_2_sd_n_sleep>;
+ };
+
+ swr_master {
+ compatible = "qcom,swr-wcd";
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ wsa881x_211_en: wsa881x_en@20170211 {
+ compatible = "qcom,wsa881x";
+ reg = <0x0 0x20170211>;
+ qcom,spkr-sd-n-node = <&wsa_spkr_en1>;
+ };
+
+ wsa881x_212_en: wsa881x_en@20170212 {
+ compatible = "qcom,wsa881x";
+ reg = <0x0 0x20170212>;
+ qcom,spkr-sd-n-node = <&wsa_spkr_en2>;
+ };
+
+ wsa881x_213_en: wsa881x_en@21170213 {
+ compatible = "qcom,wsa881x";
+ reg = <0x0 0x21170213>;
+ qcom,spkr-sd-n-node = <&wsa_spkr_en1>;
+ };
+
+ wsa881x_214_en: wsa881x_en@21170214 {
+ compatible = "qcom,wsa881x";
+ reg = <0x0 0x21170214>;
+ qcom,spkr-sd-n-node = <&wsa_spkr_en2>;
+ };
+ };
+ };
+};
+
+&pmfalcon_gpios {
+ gpio@c200 {
+ status = "ok";
+ qcom,mode = <1>;
+ qcom,pull = <5>;
+ qcom,vin-sel = <0>;
+ qcom,src-sel = <2>;
+ qcom,master-en = <1>;
+ qcom,out-strength = <2>;
+ };
+};
diff --git a/arch/arm/boot/dts/qcom/msmfalcon-bus.dtsi b/arch/arm/boot/dts/qcom/msmfalcon-bus.dtsi
index cb5fce378b6c..93c615639be9 100644
--- a/arch/arm/boot/dts/qcom/msmfalcon-bus.dtsi
+++ b/arch/arm/boot/dts/qcom/msmfalcon-bus.dtsi
@@ -41,6 +41,20 @@
clock-names = "bus_clk", "bus_a_clk";
clocks = <&clock_rpmcc RPM_AGGR2_NOC_CLK>,
<&clock_rpmcc RPM_AGGR2_NOC_A_CLK>;
+ qcom,node-qos-clks {
+ clock-names =
+ "clk-ipa-clk",
+ "clk-sdcc1-ahb-no-rate",
+ "clk-sdcc2-ahb-no-rate",
+ "clk-blsp1-ahb-no-rate",
+ "clk-blsp2-ahb-no-rate";
+ clocks =
+ <&clock_rpmcc RPM_IPA_CLK>,
+ <&clock_gcc GCC_SDCC1_AHB_CLK>,
+ <&clock_gcc GCC_SDCC2_AHB_CLK>,
+ <&clock_gcc GCC_BLSP1_AHB_CLK>,
+ <&clock_gcc GCC_BLSP2_AHB_CLK>;
+ };
};
fab_bimc: fab-bimc {
@@ -85,10 +99,32 @@
qcom,bus-type = <1>;
qcom,qos-off = <4096>;
qcom,base-offset = <20480>;
- qcom,util-fact = <154>;
+ qcom,util-fact = <153>;
clock-names = "bus_clk", "bus_a_clk";
clocks = <&clock_rpmcc MMSSNOC_AXI_CLK>,
<&clock_rpmcc MMSSNOC_AXI_A_CLK>;
+ clk-camss-ahb-no-rate-supply =
+ <&gdsc_camss_top>;
+ clk-video-ahb-no-rate-supply =
+ <&gdsc_venus>;
+ clk-video-axi-no-rate-supply =
+ <&gdsc_venus>;
+ qcom,node-qos-clks {
+ clock-names =
+ "clk-mmssnoc-axi-no-rate",
+ "clk-noc-cfg-ahb-no-rate",
+ "clk-mnoc-ahb-no-rate",
+ "clk-camss-ahb-no-rate",
+ "clk-video-ahb-no-rate",
+ "clk-video-axi-no-rate";
+ clocks =
+ <&clock_rpmcc MMSSNOC_AXI_CLK>,
+ <&clock_gcc GCC_MMSS_NOC_CFG_AHB_CLK>,
+ <&clock_mmss MMSS_MNOC_AHB_CLK>,
+ <&clock_mmss MMSS_CAMSS_AHB_CLK>,
+ <&clock_mmss MMSS_VIDEO_AHB_CLK>,
+ <&clock_mmss MMSS_VIDEO_AXI_CLK>;
+ };
};
fab_snoc: fab-snoc {
@@ -269,12 +305,9 @@
qcom,agg-ports = <2>;
qcom,ap-owned;
qcom,qport = <1>;
- qcom,qos-mode = "fixed";
+ qcom,qos-mode = "bypass";
qcom,connections = <&slv_hmss_l3
&slv_ebi &slv_bimc_snoc>;
- qcom,prio-lvl = <0>;
- qcom,prio-rd = <0>;
- qcom,prio-wr = <0>;
qcom,bus-dev = <&fab_bimc>;
qcom,mas-rpm-id = <ICBID_MASTER_GFX3D>;
};
@@ -303,6 +336,22 @@
qcom,mas-rpm-id = <ICBID_MASTER_SNOC_BIMC>;
};
+ mas_pimem: mas-pimem {
+ cell-id = <MSM_BUS_MASTER_PIMEM>;
+ label = "mas-pimem";
+ qcom,buswidth = <4>;
+ qcom,agg-ports = <2>;
+ qcom,ap-owned;
+ qcom,qport = <4>;
+ qcom,qos-mode = "fixed";
+ qcom,connections = <&slv_hmss_l3 &slv_ebi>;
+ qcom,prio-lvl = <0>;
+ qcom,prio-rd = <0>;
+ qcom,prio-wr = <0>;
+ qcom,bus-dev = <&fab_bimc>;
+ qcom,mas-rpm-id = <ICBID_MASTER_PIMEM>;
+ };
+
mas_snoc_cnoc: mas-snoc-cnoc {
cell-id = <MSM_BUS_SNOC_CNOC_MAS>;
label = "mas-snoc-cnoc";
@@ -432,7 +481,20 @@
qcom,qos-mode = "bypass";
qcom,connections = <&slv_mnoc_bimc>;
qcom,bus-dev = <&fab_mnoc>;
+ qcom,vrail-comp = <50>;
qcom,mas-rpm-id = <ICBID_MASTER_MDP0>;
+ clk-mdss-axi-no-rate-supply =
+ <&gdsc_mdss>;
+ clk-mdss-ahb-no-rate-supply =
+ <&gdsc_mdss>;
+ qcom,node-qos-clks {
+ clock-names =
+ "clk-mdss-ahb-no-rate",
+ "clk-mdss-axi-no-rate";
+ clocks =
+ <&clock_mmss MMSS_MDSS_AHB_CLK>,
+ <&clock_mmss MMSS_MDSS_AXI_CLK>;
+ };
};
mas_mdp_p1: mas-mdp-p1 {
@@ -445,6 +507,7 @@
qcom,qos-mode = "bypass";
qcom,connections = <&slv_mnoc_bimc>;
qcom,bus-dev = <&fab_mnoc>;
+ qcom,vrail-comp = <50>;
qcom,mas-rpm-id = <ICBID_MASTER_MDP1>;
};
@@ -546,6 +609,7 @@
qcom,bus-dev = <&fab_snoc>;
qcom,mas-rpm-id = <ICBID_MASTER_A2NOC_SNOC>;
};
+
/*Internal nodes*/
/*Slaves*/
@@ -1097,6 +1161,9 @@
qcom,bus-dev = <&fab_mnoc>;
qcom,connections = <&mas_mnoc_bimc>;
qcom,slv-rpm-id = <ICBID_SLAVE_MNOC_BIMC>;
+ qcom,enable-only-clk;
+ clock-names = "node_clk";
+ clocks = <&clock_rpmcc MMSSNOC_AXI_CLK>;
};
slv_srvc_mnoc:slv-srvc-mnoc {
diff --git a/arch/arm/boot/dts/qcom/msmfalcon-cdp.dtsi b/arch/arm/boot/dts/qcom/msmfalcon-cdp.dtsi
index ff14003877d1..9cf8a200a327 100644
--- a/arch/arm/boot/dts/qcom/msmfalcon-cdp.dtsi
+++ b/arch/arm/boot/dts/qcom/msmfalcon-cdp.dtsi
@@ -42,4 +42,7 @@
};
&soc {
+ qcom,msm-ssc-sensors {
+ compatible = "qcom,msm-ssc-sensors";
+ };
};
diff --git a/arch/arm/boot/dts/qcom/msmfalcon-common.dtsi b/arch/arm/boot/dts/qcom/msmfalcon-common.dtsi
index 334158ea285a..50513ceabbeb 100644
--- a/arch/arm/boot/dts/qcom/msmfalcon-common.dtsi
+++ b/arch/arm/boot/dts/qcom/msmfalcon-common.dtsi
@@ -179,6 +179,110 @@
qcom,vdd-voltage-level = <0 925000 925000>;
vdd-core-voltage-level = <0 1800000 1800000>;
qcom,vbus-valid-override;
+ qcom,qmp-phy-init-seq =
+ /* <reg_offset, value, delay> */
+ <0xac 0x14 0x00
+ 0x34 0x08 0x00
+ 0x174 0x30 0x00
+ 0x3c 0x06 0x00
+ 0xbc 0x00 0x00
+ 0xc0 0x08 0x00
+ 0x194 0x06 0x00
+ 0x19c 0x01 0x00
+ 0x178 0x00 0x00
+ 0xd0 0x82 0x00
+ 0xdc 0x55 0x00
+ 0xe0 0x55 0x00
+ 0xe4 0x03 0x00
+ 0x78 0x0b 0x00
+ 0x84 0x16 0x00
+ 0x90 0x28 0x00
+ 0x108 0x80 0x00
+ 0x10c 0x00 0x00
+ 0x184 0x0a 0x00
+ 0x4c 0x15 0x00
+ 0x50 0x34 0x00
+ 0x54 0x00 0x00
+ 0xc8 0x00 0x00
+ 0x18c 0x00 0x00
+ 0xcc 0x00 0x00
+ 0x128 0x00 0x00
+ 0x0c 0x0a 0x00
+ 0x10 0x01 0x00
+ 0x1c 0x31 0x00
+ 0x20 0x01 0x00
+ 0x14 0x00 0x00
+ 0x18 0x00 0x00
+ 0x24 0xde 0x00
+ 0x28 0x07 0x00
+ 0x48 0x0f 0x00
+ 0x70 0x0f 0x00
+ 0x100 0x80 0x00
+ 0xa8 0x01 0x00
+ 0x430 0x0b 0x00
+ 0x830 0x0b 0x00
+ 0x444 0x00 0x00
+ 0x844 0x00 0x00
+ 0x43c 0x00 0x00
+ 0x83c 0x00 0x00
+ 0x440 0x00 0x00
+ 0x840 0x00 0x00
+ 0x408 0x0a 0x00
+ 0x808 0x0a 0x00
+ 0x414 0x06 0x00
+ 0x814 0x06 0x00
+ 0x434 0x75 0x00
+ 0x834 0x75 0x00
+ 0x4d4 0x02 0x00
+ 0x8d4 0x02 0x00
+ 0x4d8 0x4e 0x00
+ 0x8d8 0x4e 0x00
+ 0x4dc 0x18 0x00
+ 0x8dc 0x18 0x00
+ 0x4f8 0x77 0x00
+ 0x8f8 0x77 0x00
+ 0x4fc 0x80 0x00
+ 0x8fc 0x80 0x00
+ 0x4c0 0x0a 0x00
+ 0x8c0 0x0a 0x00
+ 0x504 0x03 0x00
+ 0x904 0x03 0x00
+ 0x50c 0x16 0x00
+ 0x90c 0x16 0x00
+ 0x500 0x00 0x00
+ 0x900 0x00 0x00
+ 0x260 0x10 0x00
+ 0x660 0x10 0x00
+ 0x2a4 0x12 0x00
+ 0x6a4 0x12 0x00
+ 0x28c 0xc6 0x00
+ 0x68c 0xc6 0x00
+ 0x244 0x00 0x00
+ 0x644 0x00 0x00
+ 0x248 0x00 0x00
+ 0x648 0x00 0x00
+ 0xc0c 0x9f 0x00
+ 0xc24 0x17 0x00
+ 0xc28 0x0f 0x00
+ 0xcc8 0x83 0x00
+ 0xcc4 0x02 0x00
+ 0xccc 0x09 0x00
+ 0xcd0 0xa2 0x00
+ 0xcd4 0x85 0x00
+ 0xc80 0xd1 0x00
+ 0xc84 0x1f 0x00
+ 0xc88 0x47 0x00
+ 0xcb8 0x75 0x00
+ 0xcbc 0x13 0x00
+ 0xcb0 0x86 0x00
+ 0xca0 0x04 0x00
+ 0xc8c 0x44 0x00
+ 0xc70 0xe7 0x00
+ 0xc74 0x03 0x00
+ 0xc78 0x40 0x00
+ 0xc7c 0x00 0x00
+ 0xdd8 0x88 0x00
+ 0xffffffff 0xffffffff 0x00>;
qcom,qmp-phy-reg-offset =
<0xd74 /* USB3_PHY_PCS_STATUS */
diff --git a/arch/arm/boot/dts/qcom/msmfalcon-coresight.dtsi b/arch/arm/boot/dts/qcom/msmfalcon-coresight.dtsi
index 2f1ef974811e..d5e27cc05979 100644
--- a/arch/arm/boot/dts/qcom/msmfalcon-coresight.dtsi
+++ b/arch/arm/boot/dts/qcom/msmfalcon-coresight.dtsi
@@ -173,7 +173,7 @@
<&funnel_merg_in_funnel_in0>;
};
};
- port@3 {
+ port@2 {
reg = <6>;
funnel_in0_in_funnel_qatb: endpoint {
slave-mode;
@@ -181,7 +181,7 @@
<&funnel_qatb_out_funnel_in0>;
};
};
- port@4 {
+ port@3 {
reg = <7>;
funnel_in0_in_stm: endpoint {
slave-mode;
@@ -215,7 +215,23 @@
<&funnel_merg_in_funnel_in1>;
};
};
- port@5 {
+ port@1 {
+ reg = <2>;
+ funnel_in1_in_tpda_nav: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&tpda_nav_out_funnel_in1>;
+ };
+ };
+ port@2 {
+ reg = <5>;
+ funnel_in1_in_tpda_mss: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&tpda_mss_out_funnel_in1>;
+ };
+ };
+ port@3 {
reg = <6>;
funnel_in1_in_funnel_apss_merg: endpoint {
slave-mode;
@@ -258,6 +274,22 @@
<&funnel_apss_out_funnel_apss_merg>;
};
};
+ port@2 {
+ reg = <1>;
+ funnel_apss_merg_in_tpda_olc: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&tpda_olc_out_funnel_apss_merg>;
+ };
+ };
+ port@3 {
+ reg = <3>;
+ funnel_apss_merg_in_tpda_apss: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&tpda_apss_out_funnel_apss_merg>;
+ };
+ };
};
};
@@ -829,6 +861,162 @@
clock-names = "core_clk", "core_a_clk";
};
+ cti_apss: cti@7b80000 {
+ compatible = "arm,coresight-cti";
+ reg = <0x7b80000 0x1000>;
+ reg-names = "cti-base";
+
+ coresight-name = "coresight-cti-apss";
+
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
+ clock-names = "core_clk", "core_a_clk";
+ };
+
+ cti_apss_dl: cti@7bc1000 {
+ compatible = "arm,coresight-cti";
+ reg = <0x7bc1000 0x1000>;
+ reg-names = "cti-base";
+
+ coresight-name = "coresight-cti-apss-dl";
+
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
+ clock-names = "core_clk", "core_a_clk";
+ };
+
+ cti_olc: cti@7b91000 {
+ compatible = "arm,coresight-cti";
+ reg = <0x7b91000 0x1000>;
+ reg-names = "cti-base";
+
+ coresight-name = "coresight-cti-olc";
+
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
+ clock-names = "core_clk", "core_a_clk";
+ };
+
+ cti_lpass0: cti@7060000 {
+ compatible = "arm,coresight-cti";
+ reg = <0x7060000 0x1000>;
+ reg-names = "cti-base";
+
+ coresight-name = "coresight-cti-lpass0";
+
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
+ clock-names = "core_clk", "core_a_clk";
+ };
+
+ cti_lpass1: cti@7061000 {
+ compatible = "arm,coresight-cti";
+ reg = <0x7061000 0x1000>;
+ reg-names = "cti-base";
+
+ coresight-name = "coresight-cti-lpass1";
+
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
+ clock-names = "core_clk", "core_a_clk";
+ };
+
+ cti_turing: cti@7068000 {
+ compatible = "arm,coresight-cti";
+ reg = <0x7068000 0x1000>;
+ reg-names = "cti-base";
+
+ coresight-name = "coresight-cti-turing";
+
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
+ clock-names = "core_clk", "core_a_clk";
+ };
+
+ cti_wcss0: cti@71a4000 {
+ compatible = "arm,coresight-cti";
+ reg = <0x71a4000 0x1000>;
+ reg-names = "cti-base";
+
+ coresight-name = "coresight-cti-wcss0";
+
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
+ clock-names = "core_clk", "core_a_clk";
+ };
+
+ cti_wcss1: cti@71a5000 {
+ compatible = "arm,coresight-cti";
+ reg = <0x71a5000 0x1000>;
+ reg-names = "cti-base";
+
+ coresight-name = "coresight-cti-wcss1";
+
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
+ clock-names = "core_clk", "core_a_clk";
+ };
+
+ cti_wcss2: cti@71a6000 {
+ compatible = "arm,coresight-cti";
+ reg = <0x71a6000 0x1000>;
+ reg-names = "cti-base";
+
+ coresight-name = "coresight-cti-wcss2";
+
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
+ clock-names = "core_clk", "core_a_clk";
+ };
+
+ cti_mmss: cti@7188000 {
+ compatible = "arm,coresight-cti";
+ reg = <0x7188000 0x1000>;
+ reg-names = "cti-base";
+
+ coresight-name = "coresight-cti-mmss";
+
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
+ clock-names = "core_clk", "core_a_clk";
+ };
+
+ cti_isdb: cti@7121000 {
+ compatible = "arm,coresight-cti";
+ reg = <0x7121000 0x1000>;
+ reg-names = "cti-base";
+
+ coresight-name = "coresight-cti-isdb";
+
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
+ clock-names = "core_clk", "core_a_clk";
+ };
+
+ cti_rpm: cti@7048000 {
+ compatible = "arm,coresight-cti";
+ reg = <0x7048000 0x1000>;
+ reg-names = "cti-base";
+
+ coresight-name = "coresight-cti-rpm";
+
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
+ clock-names = "core_clk", "core_a_clk";
+ };
+
+ cti_mss: cti@7041000 {
+ compatible = "arm,coresight-cti";
+ reg = <0x7041000 0x1000>;
+ reg-names = "cti-base";
+
+ coresight-name = "coresight-cti-mss";
+
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
+ clock-names = "core_clk", "core_a_clk";
+ };
+
funnel_qatb: funnel@6005000 {
compatible = "arm,primecell";
arm,primecell-periphid = <0x0003b908>;
@@ -861,6 +1049,14 @@
<&tpda_out_funnel_qatb>;
};
};
+ port@2 {
+ reg = <3>;
+ funnel_qatb_in_funnel_dlct: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&funnel_dlct_out_funnel_qatb>;
+ };
+ };
};
};
@@ -898,7 +1094,31 @@
<&funnel_qatb_in_tpda>;
};
};
+ port@1 {
+ reg = <1>;
+ tpda_in_funnel_gpu_dl: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&funnel_gpu_dl_out_tpda>;
+ };
+ };
port@2 {
+ reg = <2>;
+ tpda_in_funnel_dlct: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&funnel_dlct_out_tpda>;
+ };
+ };
+ port@3 {
+ reg = <4>;
+ tpda_in_tpdm_vsense: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&tpdm_vsense_out_tpda>;
+ };
+ };
+ port@4 {
reg = <5>;
tpda_in_tpdm_dcc: endpoint {
slave-mode;
@@ -906,6 +1126,110 @@
<&tpdm_dcc_out_tpda>;
};
};
+ port@5 {
+ reg = <6>;
+ tpda_in_tpdm_prng: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&tpdm_prng_out_tpda>;
+ };
+ };
+ port@6 {
+ reg = <8>;
+ tpda_in_tpdm_qm: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&tpdm_qm_out_tpda>;
+ };
+ };
+ port@7 {
+ reg = <10>;
+ tpda_in_tpdm_pimem: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&tpdm_pimem_out_tpda>;
+ };
+ };
+ port@8 {
+ reg = <11>;
+ tpda_in_tpdm: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&tpdm_out_tpda>;
+ };
+ };
+ };
+ };
+
+ funnel_gpu_dl: funnel@7140000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b908>;
+
+ reg = <0x71c40000 0x1000>;
+ reg-names = "funnel-base";
+
+ coresight-name = "coresight-funnel-gpu-dl";
+
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
+ clock-names = "apb_pclk", "core_a_clk";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ funnel_gpu_dl_out_tpda: endpoint {
+ remote-endpoint =
+ <&tpda_in_funnel_gpu_dl>;
+ };
+ };
+ port@2 {
+ reg = <0>;
+ funnel_gpu_dl_in_tpdm_gpu: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&tpdm_gpu_out_funnel_gpu_dl>;
+ };
+ };
+ };
+ };
+
+ tpdm_gpu: tpdm@7111000 {
+ status = "disabled";
+ compatible = "qcom,coresight-tpdm";
+ reg = <0x7111000 0x1000>;
+ reg-names = "tpdm-base";
+
+ coresight-name = "coresight-tpdm-gpu";
+
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
+ clock-names = "core_clk", "core_a_clk";
+
+ port{
+ tpdm_gpu_out_funnel_gpu_dl: endpoint {
+ remote-endpoint = <&funnel_gpu_dl_in_tpdm_gpu>;
+ };
+ };
+ };
+
+ tpdm_vsense: tpdm@7038000 {
+ compatible = "qcom,coresight-tpdm";
+ reg = <0x7038000 0x1000>;
+ reg-names = "tpdm-base";
+
+ coresight-name = "coresight-tpdm-vsense";
+
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
+ clock-names = "core_clk", "core_a_clk";
+
+ port{
+ tpdm_vsense_out_tpda: endpoint {
+ remote-endpoint = <&tpda_in_tpdm_vsense>;
+ };
};
};
@@ -926,4 +1250,446 @@
};
};
};
+
+ tpdm_prng: tpdm@704c000 {
+ compatible = "qcom,coresight-tpdm";
+ reg = <0x704c000 0x1000>;
+ reg-names = "tpdm-base";
+
+ coresight-name = "coresight-tpdm-prng";
+
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
+ clock-names = "core_clk", "core_a_clk";
+
+ port{
+ tpdm_prng_out_tpda: endpoint {
+ remote-endpoint = <&tpda_in_tpdm_prng>;
+ };
+ };
+ };
+
+ tpdm_qm: tpdm@71d0000 {
+ compatible = "qcom,coresight-tpdm";
+ reg = <0x71d0000 0x1000>;
+ reg-names = "tpdm-base";
+
+ coresight-name = "coresight-tpdm-qm";
+
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
+ clock-names = "core_clk", "core_a_clk";
+
+ port{
+ tpdm_qm_out_tpda: endpoint {
+ remote-endpoint = <&tpda_in_tpdm_qm>;
+ };
+ };
+ };
+
+ tpdm_pimem: tpdm@7050000 {
+ compatible = "qcom,coresight-tpdm";
+ reg = <0x7050000 0x1000>;
+ reg-names = "tpdm-base";
+
+ coresight-name = "coresight-tpdm-pimem";
+
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
+ clock-names = "core_clk", "core_a_clk";
+
+ port{
+ tpdm_pimem_out_tpda: endpoint {
+ remote-endpoint = <&tpda_in_tpdm_pimem>;
+ };
+ };
+ };
+
+ tpdm: tpdm@6006000 {
+ compatible = "qcom,coresight-tpdm";
+ reg = <0x6006000 0x1000>;
+ reg-names = "tpdm-base";
+
+ coresight-name = "coresight-tpdm";
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
+ clock-names = "core_clk", "core_a_clk";
+
+ port{
+ tpdm_out_tpda: endpoint {
+ remote-endpoint = <&tpda_in_tpdm>;
+ };
+ };
+ };
+
+ tpda_nav: tpda@7191000 {
+ compatible = "qcom,coresight-tpda";
+ reg = <0x7191000 0x1000>;
+ reg-names = "tpda-base";
+
+ coresight-name = "coresight-tpda-nav";
+
+ qcom,tpda-atid = <68>;
+ qcom,cmb-elem-size = <0 32>;
+
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
+ clock-names = "core_clk", "core_a_clk";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ tpda_nav_out_funnel_in1: endpoint {
+ remote-endpoint =
+ <&funnel_in1_in_tpda_nav>;
+ };
+ };
+ port@1 {
+ reg = <0>;
+ tpda_nav_in_tpdm_nav: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&tpdm_nav_out_tpda_nav>;
+ };
+ };
+ };
+ };
+
+ tpda_apss: tpda@7bc2000 {
+ compatible = "qcom,coresight-tpda";
+ reg = <0x7bc2000 0x1000>;
+ reg-names = "tpda-base";
+
+ coresight-name = "coresight-tpda-apss";
+
+ qcom,tpda-atid = <66>;
+ qcom,dsb-elem-size = <0 128>;
+
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
+ clock-names = "core_clk", "core_a_clk";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ tpda_apss_out_funnel_apss_merg: endpoint {
+ remote-endpoint =
+ <&funnel_apss_merg_in_tpda_apss>;
+ };
+ };
+ port@1 {
+ reg = <0>;
+ tpda_apss_in_tpdm_apss: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&tpdm_apss_out_tpda_apss>;
+ };
+ };
+ };
+ };
+
+ tpdm_apss: tpdm@7bc0000 {
+ compatible = "qcom,coresight-tpdm";
+ reg = <0x7bc0000 0x1000>;
+ reg-names = "tpdm-base";
+
+ coresight-name = "coresight-tpdm-apss";
+
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
+ clock-names = "core_clk", "core_a_clk";
+
+ port{
+ tpdm_apss_out_tpda_apss: endpoint {
+ remote-endpoint = <&tpda_apss_in_tpdm_apss>;
+ };
+ };
+ };
+
+ tpda_mss: tpda@7043000 {
+ compatible = "qcom,coresight-tpda";
+ reg = <0x7043000 0x1000>;
+ reg-names = "tpda-base";
+
+ coresight-name = "coresight-tpda-mss";
+
+ qcom,tpda-atid = <67>;
+ qcom,dsb-elem-size = <0 32>;
+ qcom,cmb-elem-size = <0 32>;
+
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
+ clock-names = "core_clk", "core_a_clk";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ tpda_mss_out_funnel_in1: endpoint {
+ remote-endpoint =
+ <&funnel_in1_in_tpda_mss>;
+ };
+ };
+ port@1 {
+ reg = <0>;
+ tpda_mss_in_tpdm_mss: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&tpdm_mss_out_tpda_mss>;
+ };
+ };
+ };
+ };
+
+ tpdm_mss: tpdm@7042000 {
+ compatible = "qcom,coresight-tpdm";
+ reg = <0x7042000 0x1000>;
+ reg-names = "tpdm-base";
+
+ coresight-name = "coresight-tpdm-mss";
+
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
+ clock-names = "core_clk", "core_a_clk";
+
+ port{
+ tpdm_mss_out_tpda_mss: endpoint {
+ remote-endpoint = <&tpda_mss_in_tpdm_mss>;
+ };
+ };
+ };
+
+ tpdm_nav: tpdm@7190000 {
+ compatible = "qcom,coresight-tpdm";
+ reg = <0x7190000 0x1000>;
+ reg-names = "tpdm-base";
+
+ coresight-name = "coresight-tpdm-nav";
+
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
+ clock-names = "core_clk", "core_a_clk";
+
+ port{
+ tpdm_nav_out_tpda_nav: endpoint {
+ remote-endpoint = <&tpda_nav_in_tpdm_nav>;
+ };
+ };
+ };
+
+ tpda_olc: tpda@7b92000 {
+ compatible = "qcom,coresight-tpda";
+ reg = <0x7b92000 0x1000>;
+ reg-names = "tpda-base";
+
+ coresight-name = "coresight-tpda-olc";
+
+ qcom,tpda-atid = <69>;
+ qcom,cmb-elem-size = <0 64>;
+
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
+ clock-names = "core_clk", "core_a_clk";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ tpda_olc_out_funnel_apss_merg: endpoint {
+ remote-endpoint =
+ <&funnel_apss_merg_in_tpda_olc>;
+ };
+ };
+ port@1 {
+ reg = <0>;
+ tpda_olc_in_tpdm_olc: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&tpdm_olc_out_tpda_olc>;
+ };
+ };
+ };
+ };
+
+ tpdm_olc: tpdm@7b90000 {
+ compatible = "qcom,coresight-tpdm";
+ reg = <0x7b90000 0x1000>;
+ reg-names = "tpdm-base";
+
+ coresight-name = "coresight-tpdm-olc";
+
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
+ clock-names = "core_clk", "core_a_clk";
+
+ port{
+ tpdm_olc_out_tpda_olc: endpoint {
+ remote-endpoint = <&tpda_olc_in_tpdm_olc>;
+ };
+ };
+ };
+
+ funnel_dlct: funnel@71c3000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b908>;
+
+ reg = <0x71c3000 0x1000>;
+ reg-names = "funnel-base";
+
+ coresight-name = "coresight-funnel-dlct";
+
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
+ clock-names = "apb_pclk", "core_a_clk";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ funnel_dlct_out_tpda: endpoint {
+ remote-endpoint =
+ <&tpda_in_funnel_dlct>;
+ };
+ };
+ port@1 {
+ reg = <1>;
+ funnel_dlct_out_funnel_qatb: endpoint {
+ remote-endpoint =
+ <&funnel_qatb_in_funnel_dlct>;
+ };
+ };
+ port@2 {
+ reg = <0>;
+ funnel_dlct_in_tpdm_dlct: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&tpdm_dlct_out_funnel_dlct>;
+ };
+ };
+ port@3 {
+ reg = <3>;
+ funnel_dlct_in_funnel_wcss: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&funnel_wcss_out_funnel_dlct>;
+ };
+ };
+ };
+ };
+
+ tpdm_dlct: tpdm@71c2000 {
+ compatible = "qcom,coresight-tpdm";
+ reg = <0x71c2000 0x1000>;
+ reg-names = "tpdm-base";
+
+ coresight-name = "coresight-tpdm-dlct";
+
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
+ clock-names = "core_clk", "core_a_clk";
+
+ port{
+ tpdm_dlct_out_funnel_dlct: endpoint {
+ remote-endpoint = <&funnel_dlct_in_tpdm_dlct>;
+ };
+ };
+ };
+
+ funnel_wcss: funnel@719e000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b908>;
+
+ reg = <0x719e000 0x1000>;
+ reg-names = "funnel-base";
+
+ coresight-name = "coresight-funnel-wcss";
+
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
+ clock-names = "apb_pclk", "core_a_clk";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ funnel_wcss_out_funnel_dlct: endpoint {
+ remote-endpoint =
+ <&funnel_dlct_in_funnel_wcss>;
+ };
+ };
+ port@1 {
+ reg = <1>;
+ funnel_wcss_in_tpda_wcss: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&tpda_wcss_out_funnel_wcss>;
+ };
+ };
+ };
+ };
+
+ tpda_wcss: tpda@719d000 {
+ status = "disabled";
+ compatible = "qcom,coresight-tpda";
+ reg = <0x719d000 0x1000>;
+ reg-names = "tpda-base";
+
+ coresight-name = "coresight-tpda-wcss";
+
+ qcom,tpda-atid = <70>;
+ qcom,dsb-elem-size = <0 32>;
+
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
+ clock-names = "core_clk", "core_a_clk";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ tpda_wcss_out_funnel_wcss: endpoint {
+ remote-endpoint =
+ <&funnel_wcss_in_tpda_wcss>;
+ };
+ };
+ port@1 {
+ reg = <0>;
+ tpda_wcss_in_tpdm_wcss: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&tpdm_wcss_out_tpda_wcss>;
+ };
+ };
+ };
+ };
+
+ tpdm_wcss: tpdm@719c000 {
+ status = "disabled";
+ compatible = "qcom,coresight-tpdm";
+ reg = <0x719c000 0x1000>;
+ reg-names = "tpdm-base";
+
+ coresight-name = "coresight-tpdm-wcss";
+
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
+ clock-names = "core_clk", "core_a_clk";
+
+ port{
+ tpdm_wcss_out_tpda_wcss: endpoint {
+ remote-endpoint = <&tpda_wcss_in_tpdm_wcss>;
+ };
+ };
+ };
};
diff --git a/arch/arm/boot/dts/qcom/msmfalcon-lpi.dtsi b/arch/arm/boot/dts/qcom/msmfalcon-lpi.dtsi
new file mode 100644
index 000000000000..34946c07074b
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/msmfalcon-lpi.dtsi
@@ -0,0 +1,182 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+ lpi_tlmm: lpi_pinctrl@15070000 {
+ compatible = "qcom,lpi-pinctrl";
+ reg = <0x15070000 0x0>;
+ qcom,num-gpios = <32>;
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ lpi_mclk0_active: lpi_mclk0_active {
+ mux {
+ pins = "gpio18";
+ function = "func2";
+ };
+
+ config {
+ pins = "gpio18";
+ drive-strength = <8>;
+ bias-disable;
+ };
+ };
+
+ lpi_mclk0_sleep: lpi_mclk0_sleep {
+ mux {
+ pins = "gpio18";
+ function = "func2";
+ };
+
+ config {
+ pins = "gpio18";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+ };
+
+ cdc_pdm_gpios_active: cdc_pdm_gpios_active {
+ mux {
+ pins = "gpio18", "gpio19",
+ "gpio20", "gpio21",
+ "gpio23", "gpio25";
+ function = "func1";
+ };
+
+ config {
+ pins = "gpio18", "gpio19",
+ "gpio20", "gpio21",
+ "gpio23", "gpio25";
+ drive-strength = <8>;
+ };
+ };
+
+ cdc_pdm_gpios_sleep: cdc_pdm_gpios_sleep {
+ mux {
+ pins = "gpio18", "gpio19",
+ "gpio20", "gpio21",
+ "gpio23", "gpio25";
+ function = "func1";
+ };
+
+ config {
+ pins = "gpio18", "gpio19",
+ "gpio20", "gpio21",
+ "gpio23", "gpio25";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+
+ cdc_comp_gpios_active: cdc_pdm_comp_gpios_active {
+ mux {
+ pins = "gpio22", "gpio24";
+ function = "func1";
+ };
+
+ config {
+ pins = "gpio22", "gpio24";
+ drive-strength = <8>;
+ };
+ };
+
+ cdc_comp_gpios_sleep: cdc_pdm_comp_gpios_sleep {
+ mux {
+ pins = "gpio22", "gpio24";
+ function = "func1";
+ };
+
+ config {
+ pins = "gpio22", "gpio24";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+
+ lpi_cdc_reset_active: lpi_cdc_reset_active {
+ mux {
+ pins = "gpio24";
+ function = "func2";
+ };
+ config {
+ pins = "gpio24";
+ drive-strength = <16>;
+ bias-pull-down;
+ output-high;
+ };
+ };
+
+ lpi_cdc_reset_sleep: lpi_cdc_reset_sleep {
+ mux {
+ pins = "gpio24";
+ function = "func2";
+ };
+
+ config {
+ pins = "gpio24";
+ drive-strength = <16>;
+ bias-disable;
+ output-low;
+ };
+ };
+
+ cdc_dmic12_gpios_active: dmic12_gpios_active {
+ mux {
+ pins = "gpio26", "gpio27";
+ function = "func1";
+ };
+
+ config {
+ pins = "gpio26", "gpio27";
+ drive-strength = <8>;
+ };
+ };
+
+ cdc_dmic12_gpios_sleep: dmic12_gpios_sleep {
+ mux {
+ pins = "gpio26", "gpio27";
+ function = "func1";
+ };
+
+ config {
+ pins = "gpio26", "gpio27";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+
+ cdc_dmic34_gpios_active: dmic34_gpios_active {
+ mux {
+ pins = "gpio28", "gpio29";
+ function = "func1";
+ };
+
+ config {
+ pins = "gpio28", "gpio29";
+ drive-strength = <8>;
+ };
+ };
+
+ cdc_dmic34_gpios_sleep: dmic34_gpios_sleep {
+ mux {
+ pins = "gpio28", "gpio29";
+ function = "func1";
+ };
+
+ config {
+ pins = "gpio28", "gpio29";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/qcom/msmfalcon-mtp.dtsi b/arch/arm/boot/dts/qcom/msmfalcon-mtp.dtsi
index ff14003877d1..9cf8a200a327 100644
--- a/arch/arm/boot/dts/qcom/msmfalcon-mtp.dtsi
+++ b/arch/arm/boot/dts/qcom/msmfalcon-mtp.dtsi
@@ -42,4 +42,7 @@
};
&soc {
+ qcom,msm-ssc-sensors {
+ compatible = "qcom,msm-ssc-sensors";
+ };
};
diff --git a/arch/arm/boot/dts/qcom/msmfalcon-pinctrl.dtsi b/arch/arm/boot/dts/qcom/msmfalcon-pinctrl.dtsi
index 6951cdf96815..8b7672ac396f 100644
--- a/arch/arm/boot/dts/qcom/msmfalcon-pinctrl.dtsi
+++ b/arch/arm/boot/dts/qcom/msmfalcon-pinctrl.dtsi
@@ -568,7 +568,7 @@
mux {
pins = "gpio28", "gpio29",
"gpio30", "gpio31";
- function = "blsp_spi8";
+ function = "blsp_spi8_a";
};
config {
@@ -583,7 +583,7 @@
mux {
pins = "gpio28", "gpio29",
"gpio30", "gpio31";
- function = "blsp_spi8";
+ function = "blsp_spi8_a";
};
config {
@@ -595,6 +595,62 @@
};
};
+ sdw_clk_pin {
+ sdw_clk_sleep: sdw_clk_sleep {
+ mux {
+ pins = "gpio24";
+ function = "sndwire_clk";
+ };
+
+ config {
+ pins = "gpio24";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+ };
+
+ sdw_clk_active: sdw_clk_active {
+ mux {
+ pins = "gpio24";
+ function = "sndwire_clk";
+ };
+
+ config {
+ pins = "gpio24";
+ drive-strength = <16>;
+ bias-disable;
+ };
+ };
+ };
+
+ sdw_clk_data {
+ sdw_data_sleep: sdw_data_sleep {
+ mux {
+ pins = "gpio25";
+ function = "sndwire_data";
+ };
+
+ config {
+ pins = "gpio25";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+ };
+
+ sdw_data_active: sdw_data_active {
+ mux {
+ pins = "gpio25";
+ function = "sndwire_data";
+ };
+
+ config {
+ pins = "gpio25";
+ drive-strength = <16>;
+ bias-disable;
+ };
+ };
+ };
+
/* WSA speaker reset pins */
spkr_1_sd_n {
spkr_1_sd_n_sleep: spkr_1_sd_n_sleep {
@@ -656,6 +712,86 @@
};
};
+ wcd_gnd_mic_swap {
+ wcd_gnd_mic_swap_idle: wcd_gnd_mic_swap_idle {
+ mux {
+ pins = "gpio63";
+ function = "gpio";
+ };
+ config {
+ pins = "gpio63";
+ drive-strength = <2>;
+ bias-pull-down;
+ output-low;
+ };
+ };
+
+ wcd_gnd_mic_swap_active: wcd_gnd_mic_swap_active {
+ mux {
+ pins = "gpio63";
+ function = "gpio";
+ };
+ config {
+ pins = "gpio63";
+ drive-strength = <2>;
+ bias-disable;
+ output-high;
+ };
+ };
+ };
+
+ msm_hph_en0 {
+ hph_en0_sleep: hph_en0_sleep {
+ mux {
+ pins = "gpio24";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio24";
+ output-low;
+ };
+ };
+
+ hph_en0_active: hph_en0_active {
+ mux {
+ pins = "gpio24";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio24";
+ output-high;
+ };
+ };
+ };
+
+ msm_hph_en1 {
+ hph_en1_sleep: hph_en1_sleep {
+ mux {
+ pins = "gpio25";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio25";
+ output-low;
+ };
+ };
+
+ hph_en1_active: hph_en1_active {
+ mux {
+ pins = "gpio25";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio25";
+ output-high;
+ };
+ };
+ };
+
/* HS UART CONFIGURATION */
blsp1_uart1_active: blsp1_uart1_active {
mux {
@@ -788,5 +924,46 @@
};
};
};
+
+ ts_mux {
+ ts_active: ts_active {
+ mux {
+ pins = "gpio66", "gpio67";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio66", "gpio67";
+ drive-strength = <16>;
+ bias-pull-up;
+ };
+ };
+
+ ts_reset_suspend: ts_reset_suspend {
+ mux {
+ pins = "gpio66";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio66";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+ };
+
+ ts_int_suspend: ts_int_suspend {
+ mux {
+ pins = "gpio67";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio67";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+ };
};
};
diff --git a/arch/arm/boot/dts/qcom/msmfalcon-pm.dtsi b/arch/arm/boot/dts/qcom/msmfalcon-pm.dtsi
index 39c766613b30..6a9fc5bde361 100644
--- a/arch/arm/boot/dts/qcom/msmfalcon-pm.dtsi
+++ b/arch/arm/boot/dts/qcom/msmfalcon-pm.dtsi
@@ -340,10 +340,11 @@
qcom,gic-parent = <&intc>;
qcom,gic-map =
<0x02 216>, /* tsens1_tsens_upper_lower_int */
- <0x31 212>, /* usb30_power_event_irq */
<0x34 275>, /* qmp_usb3_lfps_rxterm_irq_cx */
<0x4f 379>, /* qusb2phy_intr */
+ <0x51 379>, /* qusb2phy_intr */
<0x57 358>, /* ee0_apps_hlos_spmi_periph_irq */
+ <0x5b 519>, /* lpass_pmu_tmr_timeout_irq_cx */
<0xff 16>, /* APC[0-7]_qgicQTmrHypPhysIrptReq */
<0xff 17>, /* APC[0-7]_qgicQTmrSecPhysIrptReq */
<0xff 18>, /* APC[0-7]_qgicQTmrNonSecPhysIrptReq */
@@ -708,7 +709,6 @@
<0xff 515>, /* turing_irq_out_vmm[3] */
<0xff 516>, /* lpass_irq_out_apcs[41] */
<0xff 517>, /* lpass_irq_out_apcs[42] */
- <0xff 519>, /* lpass_irq_out_apcs[44] */
<0xff 520>, /* lpass_irq_out_apcs[45] */
<0xff 544>, /* turing_irq_out_apcs[00] */
<0xff 545>, /* turing_irq_out_apcs[01] */
diff --git a/arch/arm/boot/dts/qcom/msmfalcon-wcd.dtsi b/arch/arm/boot/dts/qcom/msmfalcon-wcd.dtsi
index 29f4ccaede9f..006bf0175874 100644
--- a/arch/arm/boot/dts/qcom/msmfalcon-wcd.dtsi
+++ b/arch/arm/boot/dts/qcom/msmfalcon-wcd.dtsi
@@ -25,6 +25,20 @@
pinctrl-0 = <&spkr_2_sd_n_active>;
pinctrl-1 = <&spkr_2_sd_n_sleep>;
};
+
+ tasha_hph_en0: msm_cdc_pinctrl_hph_en0 {
+ compatible = "qcom,msm-cdc-pinctrl";
+ pinctrl-names = "aud_active", "aud_sleep";
+ pinctrl-0 = <&hph_en0_active>;
+ pinctrl-1 = <&hph_en0_sleep>;
+ };
+
+ tasha_hph_en1: msm_cdc_pinctrl_hph_en1 {
+ compatible = "qcom,msm-cdc-pinctrl";
+ pinctrl-names = "aud_active", "aud_sleep";
+ pinctrl-0 = <&hph_en1_active>;
+ pinctrl-1 = <&hph_en1_sleep>;
+ };
};
tavil_codec {
diff --git a/arch/arm/boot/dts/qcom/msmfalcon.dtsi b/arch/arm/boot/dts/qcom/msmfalcon.dtsi
index ce6c5cf6c9b1..55fc450303c8 100644
--- a/arch/arm/boot/dts/qcom/msmfalcon.dtsi
+++ b/arch/arm/boot/dts/qcom/msmfalcon.dtsi
@@ -15,6 +15,7 @@
#include <dt-bindings/clock/qcom,gpu-msmfalcon.h>
#include <dt-bindings/clock/qcom,mmcc-msmfalcon.h>
#include <dt-bindings/clock/qcom,rpmcc.h>
+#include <dt-bindings/clock/audio-ext-clk.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/regulator/qcom,rpm-smd-regulator.h>
@@ -1001,6 +1002,13 @@
};
};
+ qcom,rmtfs_sharedmem@0 {
+ compatible = "qcom,sharedmem-uio";
+ reg = <0x0 0x200000>;
+ reg-names = "rmtfs";
+ qcom,client-id = <0x00000001>;
+ };
+
qcom,rmnet-ipa {
compatible = "qcom,rmnet-ipa";
qcom,rmnet-ipa-ssr;
@@ -1025,6 +1033,98 @@
qcom,mpu-enabled;
};
+
+ qcom,msm-adsprpc-mem {
+ compatible = "qcom,msm-adsprpc-mem-region";
+ memory-region = <&adsp_mem>;
+ };
+
+ qcom,msm_fastrpc {
+ compatible = "qcom,msm-fastrpc-adsp";
+ qcom,fastrpc-glink;
+
+ qcom,msm_fastrpc_compute_cb1 {
+ compatible = "qcom,msm-fastrpc-compute-cb";
+ label = "adsprpc-smd";
+ iommus = <&lpass_q6_smmu 3>;
+ dma-coherent;
+ };
+ qcom,msm_fastrpc_compute_cb2 {
+ compatible = "qcom,msm-fastrpc-compute-cb";
+ label = "adsprpc-smd";
+ iommus = <&lpass_q6_smmu 7>;
+ dma-coherent;
+ };
+ qcom,msm_fastrpc_compute_cb3 {
+ compatible = "qcom,msm-fastrpc-compute-cb";
+ label = "adsprpc-smd";
+ iommus = <&lpass_q6_smmu 8>;
+ dma-coherent;
+ };
+ qcom,msm_fastrpc_compute_cb4 {
+ compatible = "qcom,msm-fastrpc-compute-cb";
+ label = "adsprpc-smd";
+ iommus = <&lpass_q6_smmu 9>;
+ dma-coherent;
+ };
+ qcom,msm_fastrpc_compute_cb5 {
+ compatible = "qcom,msm-fastrpc-compute-cb";
+ label = "cdsprpc-smd";
+ iommus = <&turing_q6_smmu 3>;
+ dma-coherent;
+ };
+ qcom,msm_fastrpc_compute_cb6 {
+ compatible = "qcom,msm-fastrpc-compute-cb";
+ label = "cdsprpc-smd";
+ iommus = <&turing_q6_smmu 4>;
+ dma-coherent;
+ };
+ qcom,msm_fastrpc_compute_cb7 {
+ compatible = "qcom,msm-fastrpc-compute-cb";
+ label = "cdsprpc-smd";
+ iommus = <&turing_q6_smmu 5>;
+ dma-coherent;
+ };
+
+ qcom,msm_fastrpc_compute_cb8 {
+ compatible = "qcom,msm-fastrpc-compute-cb";
+ label = "cdsprpc-smd";
+ iommus = <&turing_q6_smmu 6>;
+ dma-coherent;
+ };
+ qcom,msm_fastrpc_compute_cb9 {
+ compatible = "qcom,msm-fastrpc-compute-cb";
+ label = "cdsprpc-smd";
+ iommus = <&turing_q6_smmu 7>;
+ dma-coherent;
+ };
+ qcom,msm_fastrpc_compute_cb10 {
+ compatible = "qcom,msm-fastrpc-compute-cb";
+ label = "cdsprpc-smd";
+ iommus = <&turing_q6_smmu 8>;
+ dma-coherent;
+ };
+ qcom,msm_fastrpc_compute_cb11 {
+ compatible = "qcom,msm-fastrpc-compute-cb";
+ label = "cdsprpc-smd";
+ iommus = <&turing_q6_smmu 9>;
+ dma-coherent;
+ };
+ qcom,msm_fastrpc_compute_cb12 {
+ compatible = "qcom,msm-fastrpc-compute-cb";
+ label = "cdsprpc-smd";
+ iommus = <&turing_q6_smmu 10>;
+ dma-coherent;
+ };
+ qcom,msm_fastrpc_compute_cb13 {
+ compatible = "qcom,msm-fastrpc-compute-cb";
+ label = "cdsprpc-smd";
+ iommus = <&turing_q6_smmu 11>;
+ dma-coherent;
+ };
+ };
+
+
dcc: dcc@10b3000 {
compatible = "qcom,dcc";
reg = <0x10b3000 0x1000>,
@@ -1187,7 +1287,6 @@
compatible = "qcom,rpm-glink";
qcom,glink-edge = "rpm";
rpm-channel-name = "rpm_requests";
- rpm-standalone; /* TODO: remove this after bring up */
};
qcom,ipc_router {
@@ -1322,7 +1421,7 @@
qcom,pas-id = <18>;
qcom,proxy-timeout-ms = <10000>;
- qcom,smem-id = <423>;
+ qcom,smem-id = <601>;
qcom,sysmon-id = <7>;
qcom,ssctl-instance-id = <0x17>;
qcom,firmware-name = "cdsp";
@@ -1684,6 +1783,8 @@
#include "msm-gdsc-falcon.dtsi"
#include "msmfalcon-gpu.dtsi"
#include "msmfalcon-pm.dtsi"
+#include "msm-audio.dtsi"
+#include "msmfalcon-audio.dtsi"
&gdsc_usb30 {
status = "ok";
diff --git a/arch/arm/boot/dts/qcom/msmtriton-rumi.dts b/arch/arm/boot/dts/qcom/msmtriton-rumi.dts
index 094d1ef6812c..b2ad04bca714 100644
--- a/arch/arm/boot/dts/qcom/msmtriton-rumi.dts
+++ b/arch/arm/boot/dts/qcom/msmtriton-rumi.dts
@@ -20,6 +20,10 @@
model = "Qualcomm Technologies, Inc. MSM TRITON RUMI";
compatible = "qcom,msmtriton-rumi", "qcom,msmtriton", "qcom,rumi";
qcom,board-id = <15 0>;
+
+ chosen {
+ bootargs = "lpm_levels.sleep_disabled=1";
+ };
};
&usb3 {
diff --git a/arch/arm/boot/dts/sun4i-a10-a1000.dts b/arch/arm/boot/dts/sun4i-a10-a1000.dts
index 97570cb7f2fc..1d23527d4ecf 100644
--- a/arch/arm/boot/dts/sun4i-a10-a1000.dts
+++ b/arch/arm/boot/dts/sun4i-a10-a1000.dts
@@ -84,6 +84,7 @@
regulator-name = "emac-3v3";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
+ startup-delay-us = <20000>;
enable-active-high;
gpio = <&pio 7 15 GPIO_ACTIVE_HIGH>;
};
diff --git a/arch/arm/boot/dts/sun4i-a10-hackberry.dts b/arch/arm/boot/dts/sun4i-a10-hackberry.dts
index 2b17c5199151..6de83a6187d0 100644
--- a/arch/arm/boot/dts/sun4i-a10-hackberry.dts
+++ b/arch/arm/boot/dts/sun4i-a10-hackberry.dts
@@ -66,6 +66,7 @@
regulator-name = "emac-3v3";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
+ startup-delay-us = <20000>;
enable-active-high;
gpio = <&pio 7 19 GPIO_ACTIVE_HIGH>;
};
diff --git a/arch/arm/boot/dts/sun4i-a10-jesurun-q5.dts b/arch/arm/boot/dts/sun4i-a10-jesurun-q5.dts
index 7afc7a64eef1..e28f080b1fd5 100644
--- a/arch/arm/boot/dts/sun4i-a10-jesurun-q5.dts
+++ b/arch/arm/boot/dts/sun4i-a10-jesurun-q5.dts
@@ -80,6 +80,7 @@
regulator-name = "emac-3v3";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
+ startup-delay-us = <20000>;
enable-active-high;
gpio = <&pio 7 19 GPIO_ACTIVE_HIGH>; /* PH19 */
};
diff --git a/arch/arm/boot/dts/sun5i-a10s-wobo-i5.dts b/arch/arm/boot/dts/sun5i-a10s-wobo-i5.dts
index 9fea918f949e..39731a78f087 100644
--- a/arch/arm/boot/dts/sun5i-a10s-wobo-i5.dts
+++ b/arch/arm/boot/dts/sun5i-a10s-wobo-i5.dts
@@ -79,6 +79,7 @@
regulator-name = "emac-3v3";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
+ startup-delay-us = <20000>;
enable-active-high;
gpio = <&pio 0 2 GPIO_ACTIVE_HIGH>;
};
diff --git a/arch/arm/configs/msmfalcon-perf_defconfig b/arch/arm/configs/msmfalcon-perf_defconfig
index b47866ae1826..f77eb2da02bf 100644
--- a/arch/arm/configs/msmfalcon-perf_defconfig
+++ b/arch/arm/configs/msmfalcon-perf_defconfig
@@ -408,6 +408,7 @@ CONFIG_USB_ISP1760=y
CONFIG_USB_ISP1760_HOST_ROLE=y
CONFIG_USB_PD_POLICY=y
CONFIG_QPNP_USB_PDPHY=y
+CONFIG_USB_EHSET_TEST_FIXTURE=y
CONFIG_USB_OTG_WAKELOCK=y
CONFIG_NOP_USB_XCEIV=y
CONFIG_USB_MSM_SSPHY_QMP=y
@@ -479,6 +480,7 @@ CONFIG_SEEMP_CORE=y
CONFIG_USB_BAM=y
CONFIG_QCOM_CLK_SMD_RPM=y
CONFIG_MSM_GCC_FALCON=y
+CONFIG_QCOM_MDSS_PLL=y
CONFIG_REMOTE_SPINLOCK_MSM=y
CONFIG_ARM_SMMU=y
CONFIG_QCOM_COMMON_LOG=y
diff --git a/arch/arm/configs/msmfalcon_defconfig b/arch/arm/configs/msmfalcon_defconfig
index 693588045aed..2f4f032bd3fd 100644
--- a/arch/arm/configs/msmfalcon_defconfig
+++ b/arch/arm/configs/msmfalcon_defconfig
@@ -409,6 +409,7 @@ CONFIG_USB_ISP1760=y
CONFIG_USB_ISP1760_HOST_ROLE=y
CONFIG_USB_PD_POLICY=y
CONFIG_QPNP_USB_PDPHY=y
+CONFIG_USB_EHSET_TEST_FIXTURE=y
CONFIG_USB_OTG_WAKELOCK=y
CONFIG_NOP_USB_XCEIV=y
CONFIG_USB_MSM_SSPHY_QMP=y
@@ -481,6 +482,7 @@ CONFIG_USB_BAM=y
CONFIG_QCOM_CLK_SMD_RPM=y
CONFIG_MSM_GPUCC_FALCON=y
CONFIG_MSM_MMCC_FALCON=y
+CONFIG_QCOM_MDSS_PLL=y
CONFIG_REMOTE_SPINLOCK_MSM=y
CONFIG_ARM_SMMU=y
CONFIG_IOMMU_DEBUG=y
diff --git a/arch/arm/include/asm/topology.h b/arch/arm/include/asm/topology.h
index a96cc16ee60a..5bf87c62a418 100644
--- a/arch/arm/include/asm/topology.h
+++ b/arch/arm/include/asm/topology.h
@@ -3,6 +3,7 @@
#ifdef CONFIG_ARM_CPU_TOPOLOGY
+#include <linux/cpufreq.h>
#include <linux/cpumask.h>
struct cputopo_arm {
@@ -24,6 +25,12 @@ void init_cpu_topology(void);
void store_cpu_topology(unsigned int cpuid);
const struct cpumask *cpu_coregroup_mask(int cpu);
+#ifdef CONFIG_CPU_FREQ
+#define arch_scale_freq_capacity cpufreq_scale_freq_capacity
+#endif
+#define arch_scale_cpu_capacity scale_cpu_capacity
+extern unsigned long scale_cpu_capacity(struct sched_domain *sd, int cpu);
+
#else
static inline void init_cpu_topology(void) { }
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index 35c9db857ebe..7fb59199c6bb 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -496,7 +496,10 @@ arm_copy_from_user(void *to, const void __user *from, unsigned long n);
static inline unsigned long __must_check
__copy_from_user(void *to, const void __user *from, unsigned long n)
{
- unsigned int __ua_flags = uaccess_save_and_enable();
+ unsigned int __ua_flags;
+
+ check_object_size(to, n, false);
+ __ua_flags = uaccess_save_and_enable();
n = arm_copy_from_user(to, from, n);
uaccess_restore(__ua_flags);
return n;
@@ -511,11 +514,15 @@ static inline unsigned long __must_check
__copy_to_user(void __user *to, const void *from, unsigned long n)
{
#ifndef CONFIG_UACCESS_WITH_MEMCPY
- unsigned int __ua_flags = uaccess_save_and_enable();
+ unsigned int __ua_flags;
+
+ check_object_size(from, n, true);
+ __ua_flags = uaccess_save_and_enable();
n = arm_copy_to_user(to, from, n);
uaccess_restore(__ua_flags);
return n;
#else
+ check_object_size(from, n, true);
return arm_copy_to_user(to, from, n);
#endif
}
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index ebdde5b9744c..74d792dd2977 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -781,7 +781,7 @@ static void __init request_standard_resources(const struct machine_desc *mdesc)
struct resource *res;
kernel_code.start = virt_to_phys(_text);
- kernel_code.end = virt_to_phys(_etext - 1);
+ kernel_code.end = virt_to_phys(__init_begin - 1);
kernel_data.start = virt_to_phys(_sdata);
kernel_data.end = virt_to_phys(_end - 1);
@@ -1039,7 +1039,7 @@ static int __init topology_init(void)
return 0;
}
-subsys_initcall(topology_init);
+postcore_initcall(topology_init);
#ifdef CONFIG_HAVE_PROC_CPU
static int __init proc_cpu_init(void)
diff --git a/arch/arm/kernel/sys_oabi-compat.c b/arch/arm/kernel/sys_oabi-compat.c
index 087acb569b63..5f221acd21ae 100644
--- a/arch/arm/kernel/sys_oabi-compat.c
+++ b/arch/arm/kernel/sys_oabi-compat.c
@@ -279,8 +279,12 @@ asmlinkage long sys_oabi_epoll_wait(int epfd,
mm_segment_t fs;
long ret, err, i;
- if (maxevents <= 0 || maxevents > (INT_MAX/sizeof(struct epoll_event)))
+ if (maxevents <= 0 ||
+ maxevents > (INT_MAX/sizeof(*kbuf)) ||
+ maxevents > (INT_MAX/sizeof(*events)))
return -EINVAL;
+ if (!access_ok(VERIFY_WRITE, events, sizeof(*events) * maxevents))
+ return -EFAULT;
kbuf = kmalloc(sizeof(*kbuf) * maxevents, GFP_KERNEL);
if (!kbuf)
return -ENOMEM;
@@ -317,6 +321,8 @@ asmlinkage long sys_oabi_semtimedop(int semid,
if (nsops < 1 || nsops > SEMOPM)
return -EINVAL;
+ if (!access_ok(VERIFY_READ, tsops, sizeof(*tsops) * nsops))
+ return -EFAULT;
sops = kmalloc(sizeof(*sops) * nsops, GFP_KERNEL);
if (!sops)
return -ENOMEM;
diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c
index e683d147816c..d7533f0e227b 100644
--- a/arch/arm/kernel/topology.c
+++ b/arch/arm/kernel/topology.c
@@ -42,9 +42,15 @@
*/
static DEFINE_PER_CPU(unsigned long, cpu_scale);
-unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu)
+unsigned long scale_cpu_capacity(struct sched_domain *sd, int cpu)
{
+#ifdef CONFIG_CPU_FREQ
+ unsigned long max_freq_scale = cpufreq_scale_max_freq_capacity(cpu);
+
+ return per_cpu(cpu_scale, cpu) * max_freq_scale >> SCHED_CAPACITY_SHIFT;
+#else
return per_cpu(cpu_scale, cpu);
+#endif
}
static void set_capacity_scale(unsigned int cpu, unsigned long capacity)
@@ -343,6 +349,8 @@ out:
return ret;
}
+static const struct sched_group_energy * const cpu_core_energy(int cpu);
+
/*
* Look for a customed capacity of a CPU in the cpu_capacity table during the
* boot. The update of all CPUs is in O(n^2) for heteregeneous system but the
@@ -350,10 +358,14 @@ out:
*/
static void update_cpu_capacity(unsigned int cpu)
{
- if (!cpu_capacity(cpu))
- return;
+ unsigned long capacity = SCHED_CAPACITY_SCALE;
+
+ if (cpu_core_energy(cpu)) {
+ int max_cap_idx = cpu_core_energy(cpu)->nr_cap_states - 1;
+ capacity = cpu_core_energy(cpu)->cap_states[max_cap_idx].cap;
+ }
- set_capacity_scale(cpu, cpu_capacity(cpu) / middle_capacity);
+ set_capacity_scale(cpu, capacity);
pr_info("CPU%u: update cpu_capacity %lu\n",
cpu, arch_scale_cpu_capacity(NULL, cpu));
@@ -464,17 +476,138 @@ topology_populated:
update_cpu_capacity(cpuid);
}
+/*
+ * ARM TC2 specific energy cost model data. There are no unit requirements for
+ * the data. Data can be normalized to any reference point, but the
+ * normalization must be consistent. That is, one bogo-joule/watt must be the
+ * same quantity for all data, but we don't care what it is.
+ */
+static struct idle_state idle_states_cluster_a7[] = {
+ { .power = 25 }, /* arch_cpu_idle() (active idle) = WFI */
+ { .power = 25 }, /* WFI */
+ { .power = 10 }, /* cluster-sleep-l */
+ };
+
+static struct idle_state idle_states_cluster_a15[] = {
+ { .power = 70 }, /* arch_cpu_idle() (active idle) = WFI */
+ { .power = 70 }, /* WFI */
+ { .power = 25 }, /* cluster-sleep-b */
+ };
+
+static struct capacity_state cap_states_cluster_a7[] = {
+ /* Cluster only power */
+ { .cap = 150, .power = 2967, }, /* 350 MHz */
+ { .cap = 172, .power = 2792, }, /* 400 MHz */
+ { .cap = 215, .power = 2810, }, /* 500 MHz */
+ { .cap = 258, .power = 2815, }, /* 600 MHz */
+ { .cap = 301, .power = 2919, }, /* 700 MHz */
+ { .cap = 344, .power = 2847, }, /* 800 MHz */
+ { .cap = 387, .power = 3917, }, /* 900 MHz */
+ { .cap = 430, .power = 4905, }, /* 1000 MHz */
+ };
+
+static struct capacity_state cap_states_cluster_a15[] = {
+ /* Cluster only power */
+ { .cap = 426, .power = 7920, }, /* 500 MHz */
+ { .cap = 512, .power = 8165, }, /* 600 MHz */
+ { .cap = 597, .power = 8172, }, /* 700 MHz */
+ { .cap = 682, .power = 8195, }, /* 800 MHz */
+ { .cap = 768, .power = 8265, }, /* 900 MHz */
+ { .cap = 853, .power = 8446, }, /* 1000 MHz */
+ { .cap = 938, .power = 11426, }, /* 1100 MHz */
+ { .cap = 1024, .power = 15200, }, /* 1200 MHz */
+ };
+
+static struct sched_group_energy energy_cluster_a7 = {
+ .nr_idle_states = ARRAY_SIZE(idle_states_cluster_a7),
+ .idle_states = idle_states_cluster_a7,
+ .nr_cap_states = ARRAY_SIZE(cap_states_cluster_a7),
+ .cap_states = cap_states_cluster_a7,
+};
+
+static struct sched_group_energy energy_cluster_a15 = {
+ .nr_idle_states = ARRAY_SIZE(idle_states_cluster_a15),
+ .idle_states = idle_states_cluster_a15,
+ .nr_cap_states = ARRAY_SIZE(cap_states_cluster_a15),
+ .cap_states = cap_states_cluster_a15,
+};
+
+static struct idle_state idle_states_core_a7[] = {
+ { .power = 0 }, /* arch_cpu_idle (active idle) = WFI */
+ { .power = 0 }, /* WFI */
+ { .power = 0 }, /* cluster-sleep-l */
+ };
+
+static struct idle_state idle_states_core_a15[] = {
+ { .power = 0 }, /* arch_cpu_idle (active idle) = WFI */
+ { .power = 0 }, /* WFI */
+ { .power = 0 }, /* cluster-sleep-b */
+ };
+
+static struct capacity_state cap_states_core_a7[] = {
+ /* Power per cpu */
+ { .cap = 150, .power = 187, }, /* 350 MHz */
+ { .cap = 172, .power = 275, }, /* 400 MHz */
+ { .cap = 215, .power = 334, }, /* 500 MHz */
+ { .cap = 258, .power = 407, }, /* 600 MHz */
+ { .cap = 301, .power = 447, }, /* 700 MHz */
+ { .cap = 344, .power = 549, }, /* 800 MHz */
+ { .cap = 387, .power = 761, }, /* 900 MHz */
+ { .cap = 430, .power = 1024, }, /* 1000 MHz */
+ };
+
+static struct capacity_state cap_states_core_a15[] = {
+ /* Power per cpu */
+ { .cap = 426, .power = 2021, }, /* 500 MHz */
+ { .cap = 512, .power = 2312, }, /* 600 MHz */
+ { .cap = 597, .power = 2756, }, /* 700 MHz */
+ { .cap = 682, .power = 3125, }, /* 800 MHz */
+ { .cap = 768, .power = 3524, }, /* 900 MHz */
+ { .cap = 853, .power = 3846, }, /* 1000 MHz */
+ { .cap = 938, .power = 5177, }, /* 1100 MHz */
+ { .cap = 1024, .power = 6997, }, /* 1200 MHz */
+ };
+
+static struct sched_group_energy energy_core_a7 = {
+ .nr_idle_states = ARRAY_SIZE(idle_states_core_a7),
+ .idle_states = idle_states_core_a7,
+ .nr_cap_states = ARRAY_SIZE(cap_states_core_a7),
+ .cap_states = cap_states_core_a7,
+};
+
+static struct sched_group_energy energy_core_a15 = {
+ .nr_idle_states = ARRAY_SIZE(idle_states_core_a15),
+ .idle_states = idle_states_core_a15,
+ .nr_cap_states = ARRAY_SIZE(cap_states_core_a15),
+ .cap_states = cap_states_core_a15,
+};
+
+/* sd energy functions */
+static inline
+const struct sched_group_energy * const cpu_cluster_energy(int cpu)
+{
+ return cpu_topology[cpu].cluster_id ? &energy_cluster_a7 :
+ &energy_cluster_a15;
+}
+
+static inline
+const struct sched_group_energy * const cpu_core_energy(int cpu)
+{
+ return cpu_topology[cpu].cluster_id ? &energy_core_a7 :
+ &energy_core_a15;
+}
+
static inline int cpu_corepower_flags(void)
{
- return SD_SHARE_PKG_RESOURCES | SD_SHARE_POWERDOMAIN;
+ return SD_SHARE_PKG_RESOURCES | SD_SHARE_POWERDOMAIN | \
+ SD_SHARE_CAP_STATES;
}
static struct sched_domain_topology_level arm_topology[] = {
#ifdef CONFIG_SCHED_MC
- { cpu_corepower_mask, cpu_corepower_flags, SD_INIT_NAME(GMC) },
- { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
+ { cpu_coregroup_mask, cpu_corepower_flags, cpu_core_energy, SD_INIT_NAME(MC) },
#endif
- { cpu_cpu_mask, SD_INIT_NAME(DIE) },
+ { cpu_cpu_mask, NULL, cpu_cluster_energy, SD_INIT_NAME(DIE) },
{ NULL, },
};
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index 8b60fde5ce48..be2ab6d3b91f 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -120,6 +120,8 @@ SECTIONS
#ifdef CONFIG_DEBUG_RODATA
. = ALIGN(1<<SECTION_SHIFT);
#endif
+ _etext = .; /* End of text section */
+
RO_DATA(PAGE_SIZE)
. = ALIGN(4);
@@ -150,8 +152,6 @@ SECTIONS
NOTES
- _etext = .; /* End of text and rodata section */
-
#ifndef CONFIG_XIP_KERNEL
# ifdef CONFIG_ARM_KERNMEM_PERMS
. = ALIGN(1<<SECTION_SHIFT);
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 723e3925dc84..48836eba4ab7 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -2160,12 +2160,16 @@ int arm_iommu_attach_device(struct device *dev,
struct dma_iommu_mapping *mapping)
{
int err;
+ int s1_bypass = 0;
err = __arm_iommu_attach_device(dev, mapping);
if (err)
return err;
- set_dma_ops(dev, &iommu_ops);
+ iommu_domain_get_attr(mapping->domain, DOMAIN_ATTR_S1_BYPASS,
+ &s1_bypass);
+ if (!s1_bypass)
+ set_dma_ops(dev, &iommu_ops);
return 0;
}
EXPORT_SYMBOL_GPL(arm_iommu_attach_device);
@@ -2199,8 +2203,21 @@ static void __arm_iommu_detach_device(struct device *dev)
*/
void arm_iommu_detach_device(struct device *dev)
{
+ struct dma_iommu_mapping *mapping;
+ int s1_bypass = 0;
+
+ mapping = to_dma_iommu_mapping(dev);
+ if (!mapping) {
+ dev_warn(dev, "Not attached\n");
+ return;
+ }
+
__arm_iommu_detach_device(dev);
- set_dma_ops(dev, NULL);
+
+ iommu_domain_get_attr(mapping->domain, DOMAIN_ATTR_S1_BYPASS,
+ &s1_bypass);
+ if (!s1_bypass)
+ set_dma_ops(dev, NULL);
}
EXPORT_SYMBOL_GPL(arm_iommu_detach_device);
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index fd7b4dd3bb99..221b11bb50e3 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -572,7 +572,7 @@ static void __init build_mem_type_table(void)
* in the Short-descriptor translation table format descriptors.
*/
if (cpu_arch == CPU_ARCH_ARMv7 &&
- (read_cpuid_ext(CPUID_EXT_MMFR0) & 0xF) == 4) {
+ (read_cpuid_ext(CPUID_EXT_MMFR0) & 0xF) >= 4) {
user_pmd_table |= PMD_PXNTABLE;
}
#endif
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 65fbf6633e28..8eeb297d7b6f 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -51,6 +51,7 @@ config ARM64
select HAVE_ALIGNED_STRUCT_PAGE if SLUB
select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_BITREVERSE
+ select HAVE_ARCH_HARDENED_USERCOPY
select HAVE_ARCH_HUGE_VMAP
select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_KASAN if SPARSEMEM_VMEMMAP && !(ARM64_16K_PAGES && ARM64_VA_BITS_48)
@@ -455,6 +456,15 @@ config CAVIUM_ERRATUM_22375
If unsure, say Y.
+config CAVIUM_ERRATUM_23144
+ bool "Cavium erratum 23144: ITS SYNC hang on dual socket system"
+ depends on NUMA
+ default y
+ help
+ ITS SYNC command hang for cross node io and collections/cpu mapping.
+
+ If unsure, say Y.
+
config CAVIUM_ERRATUM_23154
bool "Cavium erratum 23154: Access to ICC_IAR1_EL1 is not sync'ed"
default y
@@ -465,6 +475,17 @@ config CAVIUM_ERRATUM_23154
If unsure, say Y.
+config CAVIUM_ERRATUM_27456
+ bool "Cavium erratum 27456: Broadcast TLBI instructions may cause icache corruption"
+ default y
+ help
+ On ThunderX T88 pass 1.x through 2.1 parts, broadcast TLBI
+ instructions may cause the icache to become corrupted if it
+ contains data for a non-current ASID. The fix is to
+ invalidate the icache when changing the mm context.
+
+ If unsure, say Y.
+
endmenu
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 4c2298924cc3..101632379b8b 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -16,7 +16,7 @@ OBJCOPYFLAGS :=-O binary -R .note -R .note.gnu.build-id -R .comment -S
GZFLAGS :=-9
ifneq ($(CONFIG_RELOCATABLE),)
-LDFLAGS_vmlinux += -pie
+LDFLAGS_vmlinux += -pie -Bsymbolic
endif
KBUILD_DEFCONFIG := defconfig
diff --git a/arch/arm64/boot/dts/rockchip/rk3368.dtsi b/arch/arm64/boot/dts/rockchip/rk3368.dtsi
index cc093a482aa4..e0ee2b00d573 100644
--- a/arch/arm64/boot/dts/rockchip/rk3368.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3368.dtsi
@@ -262,6 +262,8 @@
#io-channel-cells = <1>;
clocks = <&cru SCLK_SARADC>, <&cru PCLK_SARADC>;
clock-names = "saradc", "apb_pclk";
+ resets = <&cru SRST_SARADC>;
+ reset-names = "saradc-apb";
status = "disabled";
};
@@ -517,7 +519,7 @@
#address-cells = <0>;
reg = <0x0 0xffb71000 0x0 0x1000>,
- <0x0 0xffb72000 0x0 0x1000>,
+ <0x0 0xffb72000 0x0 0x2000>,
<0x0 0xffb74000 0x0 0x2000>,
<0x0 0xffb76000 0x0 0x2000>;
interrupts = <GIC_PPI 9
diff --git a/arch/arm64/configs/msm-perf_defconfig b/arch/arm64/configs/msm-perf_defconfig
index 4226060cb6fc..08dccf8e5022 100644
--- a/arch/arm64/configs/msm-perf_defconfig
+++ b/arch/arm64/configs/msm-perf_defconfig
@@ -307,6 +307,7 @@ CONFIG_DIAG_CHAR=y
CONFIG_HW_RANDOM=y
CONFIG_HW_RANDOM_MSM_LEGACY=y
CONFIG_MSM_ADSPRPC=y
+CONFIG_MSM_RDBG=m
# CONFIG_ACPI_I2C_OPREGION is not set
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_QUP=y
diff --git a/arch/arm64/configs/msm_defconfig b/arch/arm64/configs/msm_defconfig
index 720dc8ba3be4..4620e74de5bc 100644
--- a/arch/arm64/configs/msm_defconfig
+++ b/arch/arm64/configs/msm_defconfig
@@ -295,6 +295,7 @@ CONFIG_DIAG_CHAR=y
CONFIG_HW_RANDOM=y
CONFIG_HW_RANDOM_MSM_LEGACY=y
CONFIG_MSM_ADSPRPC=y
+CONFIG_MSM_RDBG=m
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_QUP=y
CONFIG_I2C_MSM_V2=y
diff --git a/arch/arm64/configs/msmcortex-perf_defconfig b/arch/arm64/configs/msmcortex-perf_defconfig
index 85ce3e119ebc..31d4aac67ee2 100644
--- a/arch/arm64/configs/msmcortex-perf_defconfig
+++ b/arch/arm64/configs/msmcortex-perf_defconfig
@@ -29,6 +29,7 @@ CONFIG_BLK_DEV_INITRD=y
# CONFIG_RD_LZ4 is not set
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_KALLSYMS_ALL=y
+# CONFIG_AIO is not set
# CONFIG_MEMBARRIER is not set
CONFIG_EMBEDDED=y
# CONFIG_SLUB_DEBUG is not set
@@ -247,6 +248,8 @@ CONFIG_SCSI_UFSHCD_PLATFORM=y
CONFIG_SCSI_UFS_QCOM=y
CONFIG_SCSI_UFS_QCOM_ICE=y
CONFIG_MD=y
+CONFIG_BLK_DEV_MD=y
+CONFIG_MD_LINEAR=y
CONFIG_BLK_DEV_DM=y
CONFIG_DM_CRYPT=y
CONFIG_DM_REQ_CRYPT=y
@@ -295,6 +298,7 @@ CONFIG_DIAG_CHAR=y
CONFIG_HW_RANDOM=y
CONFIG_HW_RANDOM_MSM_LEGACY=y
CONFIG_MSM_ADSPRPC=y
+CONFIG_MSM_RDBG=m
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_MSM_V2=y
CONFIG_SLIMBUS_MSM_NGD=y
@@ -315,10 +319,7 @@ CONFIG_POWER_RESET_SYSCON=y
CONFIG_MSM_PM=y
CONFIG_APSS_CORE_EA=y
CONFIG_MSM_APM=y
-CONFIG_QPNP_SMBCHARGER=y
CONFIG_QPNP_FG_GEN3=y
-CONFIG_SMB135X_CHARGER=y
-CONFIG_SMB1351_USB_CHARGER=y
CONFIG_MSM_BCL_CTL=y
CONFIG_MSM_BCL_PERIPHERAL_CTL=y
CONFIG_BATTERY_BCL=y
diff --git a/arch/arm64/configs/msmcortex_defconfig b/arch/arm64/configs/msmcortex_defconfig
index 8d5e16bbe4f9..0680cb0408db 100644
--- a/arch/arm64/configs/msmcortex_defconfig
+++ b/arch/arm64/configs/msmcortex_defconfig
@@ -29,6 +29,7 @@ CONFIG_BLK_DEV_INITRD=y
# CONFIG_RD_LZ4 is not set
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_KALLSYMS_ALL=y
+# CONFIG_AIO is not set
# CONFIG_MEMBARRIER is not set
CONFIG_EMBEDDED=y
# CONFIG_COMPAT_BRK is not set
@@ -249,6 +250,8 @@ CONFIG_SCSI_UFSHCD_PLATFORM=y
CONFIG_SCSI_UFS_QCOM=y
CONFIG_SCSI_UFS_QCOM_ICE=y
CONFIG_MD=y
+CONFIG_BLK_DEV_MD=y
+CONFIG_MD_LINEAR=y
CONFIG_BLK_DEV_DM=y
CONFIG_DM_CRYPT=y
CONFIG_DM_REQ_CRYPT=y
@@ -298,6 +301,7 @@ CONFIG_DIAG_CHAR=y
CONFIG_HW_RANDOM=y
CONFIG_HW_RANDOM_MSM_LEGACY=y
CONFIG_MSM_ADSPRPC=y
+CONFIG_MSM_RDBG=m
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_MSM_V2=y
CONFIG_SLIMBUS_MSM_NGD=y
@@ -318,10 +322,7 @@ CONFIG_POWER_RESET_SYSCON=y
CONFIG_MSM_PM=y
CONFIG_APSS_CORE_EA=y
CONFIG_MSM_APM=y
-CONFIG_QPNP_SMBCHARGER=y
CONFIG_QPNP_FG_GEN3=y
-CONFIG_SMB135X_CHARGER=y
-CONFIG_SMB1351_USB_CHARGER=y
CONFIG_MSM_BCL_CTL=y
CONFIG_MSM_BCL_PERIPHERAL_CTL=y
CONFIG_BATTERY_BCL=y
diff --git a/arch/arm64/configs/msmfalcon-perf_defconfig b/arch/arm64/configs/msmfalcon-perf_defconfig
index 10c988472268..0e2ecff7b8f6 100644
--- a/arch/arm64/configs/msmfalcon-perf_defconfig
+++ b/arch/arm64/configs/msmfalcon-perf_defconfig
@@ -434,6 +434,7 @@ CONFIG_USB_ISP1760=y
CONFIG_USB_ISP1760_HOST_ROLE=y
CONFIG_USB_PD_POLICY=y
CONFIG_QPNP_USB_PDPHY=y
+CONFIG_USB_EHSET_TEST_FIXTURE=y
CONFIG_USB_OTG_WAKELOCK=y
CONFIG_NOP_USB_XCEIV=y
CONFIG_USB_MSM_SSPHY_QMP=y
@@ -499,6 +500,7 @@ CONFIG_USB_BAM=y
CONFIG_QCOM_CLK_SMD_RPM=y
CONFIG_MSM_GPUCC_FALCON=y
CONFIG_MSM_MMCC_FALCON=y
+CONFIG_QCOM_MDSS_PLL=y
CONFIG_REMOTE_SPINLOCK_MSM=y
CONFIG_IOMMU_IO_PGTABLE_FAST=y
CONFIG_ARM_SMMU=y
diff --git a/arch/arm64/configs/msmfalcon_defconfig b/arch/arm64/configs/msmfalcon_defconfig
index 702952ceafa0..13c8aa86823a 100644
--- a/arch/arm64/configs/msmfalcon_defconfig
+++ b/arch/arm64/configs/msmfalcon_defconfig
@@ -436,6 +436,7 @@ CONFIG_USB_ISP1760=y
CONFIG_USB_ISP1760_HOST_ROLE=y
CONFIG_USB_PD_POLICY=y
CONFIG_QPNP_USB_PDPHY=y
+CONFIG_USB_EHSET_TEST_FIXTURE=y
CONFIG_USB_OTG_WAKELOCK=y
CONFIG_NOP_USB_XCEIV=y
CONFIG_USB_MSM_SSPHY_QMP=y
@@ -509,6 +510,7 @@ CONFIG_USB_BAM=y
CONFIG_QCOM_CLK_SMD_RPM=y
CONFIG_MSM_GPUCC_FALCON=y
CONFIG_MSM_MMCC_FALCON=y
+CONFIG_QCOM_MDSS_PLL=y
CONFIG_REMOTE_SPINLOCK_MSM=y
CONFIG_IOMMU_IO_PGTABLE_FAST=y
CONFIG_IOMMU_IO_PGTABLE_FAST_SELFTEST=y
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 44e8b3d76fb7..fb1013f9bb86 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -35,6 +35,8 @@
#define ARM64_ALT_PAN_NOT_UAO 10
#define ARM64_NCAPS 11
+#define ARM64_WORKAROUND_CAVIUM_27456 12
+
#ifndef __ASSEMBLY__
diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h
index 83d48a599f69..7875c886ad24 100644
--- a/arch/arm64/include/asm/elf.h
+++ b/arch/arm64/include/asm/elf.h
@@ -140,6 +140,7 @@ typedef struct user_fpsimd_state elf_fpregset_t;
#define SET_PERSONALITY(ex) clear_thread_flag(TIF_32BIT);
+/* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */
#define ARCH_DLINFO \
do { \
NEW_AUX_ENT(AT_SYSINFO_EHDR, \
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index 5e6857b6bdc4..2d960f8588b0 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -107,8 +107,6 @@
#define TCR_EL2_MASK (TCR_EL2_TG0 | TCR_EL2_SH0 | \
TCR_EL2_ORGN0 | TCR_EL2_IRGN0 | TCR_EL2_T0SZ)
-#define TCR_EL2_FLAGS (TCR_EL2_RES1 | TCR_EL2_PS_40B)
-
/* VTCR_EL2 Registers bits */
#define VTCR_EL2_RES1 (1 << 31)
#define VTCR_EL2_PS_MASK (7 << 16)
diff --git a/arch/arm64/include/asm/module.h b/arch/arm64/include/asm/module.h
index e12af6754634..06ff7fd9e81f 100644
--- a/arch/arm64/include/asm/module.h
+++ b/arch/arm64/include/asm/module.h
@@ -17,6 +17,7 @@
#define __ASM_MODULE_H
#include <asm-generic/module.h>
+#include <asm/memory.h>
#define MODULE_ARCH_VERMAGIC "aarch64"
@@ -32,6 +33,10 @@ u64 module_emit_plt_entry(struct module *mod, const Elf64_Rela *rela,
Elf64_Sym *sym);
#ifdef CONFIG_RANDOMIZE_BASE
+#ifdef CONFIG_MODVERSIONS
+#define ARCH_RELOCATES_KCRCTAB
+#define reloc_start (kimage_vaddr - KIMAGE_VADDR)
+#endif
extern u64 module_alloc_base;
#else
#define module_alloc_base ((u64)_etext - MODULES_VSIZE)
diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h
index a307eb6e7fa8..7f94755089e2 100644
--- a/arch/arm64/include/asm/ptrace.h
+++ b/arch/arm64/include/asm/ptrace.h
@@ -117,6 +117,8 @@ struct pt_regs {
};
u64 orig_x0;
u64 syscallno;
+ u64 orig_addr_limit;
+ u64 unused; // maintain 16 byte alignment
};
#define arch_has_single_step() (1)
diff --git a/arch/arm64/include/asm/topology.h b/arch/arm64/include/asm/topology.h
index 51b1c57c4443..7d35ea7b5b95 100644
--- a/arch/arm64/include/asm/topology.h
+++ b/arch/arm64/include/asm/topology.h
@@ -23,6 +23,15 @@ void store_cpu_topology(unsigned int cpuid);
const struct cpumask *cpu_coregroup_mask(int cpu);
unsigned long arch_get_cpu_efficiency(int cpu);
+struct sched_domain;
+#ifdef CONFIG_CPU_FREQ
+#define arch_scale_freq_capacity cpufreq_scale_freq_capacity
+extern unsigned long cpufreq_scale_freq_capacity(struct sched_domain *sd, int cpu);
+extern unsigned long cpufreq_scale_max_freq_capacity(int cpu);
+#endif
+#define arch_scale_cpu_capacity scale_cpu_capacity
+extern unsigned long scale_cpu_capacity(struct sched_domain *sd, int cpu);
+
#include <asm-generic/topology.h>
#endif /* _ASM_ARM_TOPOLOGY_H */
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 0685d74572af..c3d445b42351 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -269,24 +269,39 @@ do { \
-EFAULT; \
})
-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
+extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n);
+extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n);
extern unsigned long __must_check __copy_in_user(void __user *to, const void __user *from, unsigned long n);
extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+ check_object_size(to, n, false);
+ return __arch_copy_from_user(to, from, n);
+}
+
+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+ check_object_size(from, n, true);
+ return __arch_copy_to_user(to, from, n);
+}
+
static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
{
- if (access_ok(VERIFY_READ, from, n))
- n = __copy_from_user(to, from, n);
- else /* security hole - plug it */
+ if (access_ok(VERIFY_READ, from, n)) {
+ check_object_size(to, n, false);
+ n = __arch_copy_from_user(to, from, n);
+ } else /* security hole - plug it */
memset(to, 0, n);
return n;
}
static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
{
- if (access_ok(VERIFY_WRITE, to, n))
- n = __copy_to_user(to, from, n);
+ if (access_ok(VERIFY_WRITE, to, n)) {
+ check_object_size(from, n, true);
+ n = __arch_copy_to_user(to, from, n);
+ }
return n;
}
diff --git a/arch/arm64/include/uapi/asm/auxvec.h b/arch/arm64/include/uapi/asm/auxvec.h
index 22d6d8885854..4cf0c17787a8 100644
--- a/arch/arm64/include/uapi/asm/auxvec.h
+++ b/arch/arm64/include/uapi/asm/auxvec.h
@@ -19,4 +19,6 @@
/* vDSO location */
#define AT_SYSINFO_EHDR 33
+#define AT_VECTOR_SIZE_ARCH 1 /* entries in ARCH_DLINFO */
+
#endif
diff --git a/arch/arm64/kernel/arm64ksyms.c b/arch/arm64/kernel/arm64ksyms.c
index 8c501789c4bb..65919bfe6cf5 100644
--- a/arch/arm64/kernel/arm64ksyms.c
+++ b/arch/arm64/kernel/arm64ksyms.c
@@ -34,8 +34,8 @@ EXPORT_SYMBOL(copy_page);
EXPORT_SYMBOL(clear_page);
/* user mem (segment) */
-EXPORT_SYMBOL(__copy_from_user);
-EXPORT_SYMBOL(__copy_to_user);
+EXPORT_SYMBOL(__arch_copy_from_user);
+EXPORT_SYMBOL(__arch_copy_to_user);
EXPORT_SYMBOL(__clear_user);
EXPORT_SYMBOL(__copy_in_user);
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
index 25de8b244961..087cf9a65359 100644
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -58,6 +58,7 @@ int main(void)
DEFINE(S_PC, offsetof(struct pt_regs, pc));
DEFINE(S_ORIG_X0, offsetof(struct pt_regs, orig_x0));
DEFINE(S_SYSCALLNO, offsetof(struct pt_regs, syscallno));
+ DEFINE(S_ORIG_ADDR_LIMIT, offsetof(struct pt_regs, orig_addr_limit));
DEFINE(S_FRAME_SIZE, sizeof(struct pt_regs));
BLANK();
DEFINE(MM_CONTEXT_ID, offsetof(struct mm_struct, context.id.counter));
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index 1555520bbb74..b3f2d13d8ff1 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -94,6 +94,15 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
MIDR_RANGE(MIDR_THUNDERX, 0x00, 0x01),
},
#endif
+#ifdef CONFIG_CAVIUM_ERRATUM_27456
+ {
+ /* Cavium ThunderX, T88 pass 1.x - 2.1 */
+ .desc = "Cavium erratum 27456",
+ .capability = ARM64_WORKAROUND_CAVIUM_27456,
+ MIDR_RANGE(MIDR_THUNDERX, 0x00,
+ (1 << MIDR_VARIANT_SHIFT) | 1),
+ },
+#endif
{
}
};
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
index c1492ba1f6d1..e51f27ac13fd 100644
--- a/arch/arm64/kernel/debug-monitors.c
+++ b/arch/arm64/kernel/debug-monitors.c
@@ -152,7 +152,6 @@ static int debug_monitors_init(void)
/* Clear the OS lock. */
on_each_cpu(clear_os_lock, NULL, 1);
isb();
- local_dbg_enable();
/* Register hotplug handler. */
__register_cpu_notifier(&os_lock_nb);
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index b67e70c34888..9bcc0ad84917 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -28,6 +28,7 @@
#include <asm/errno.h>
#include <asm/esr.h>
#include <asm/irq.h>
+#include <asm/memory.h>
#include <asm/thread_info.h>
#include <asm/unistd.h>
@@ -97,7 +98,13 @@
mov x29, xzr // fp pointed to user-space
.else
add x21, sp, #S_FRAME_SIZE
- .endif
+ get_thread_info tsk
+ /* Save the task's original addr_limit and set USER_DS (TASK_SIZE_64) */
+ ldr x20, [tsk, #TI_ADDR_LIMIT]
+ str x20, [sp, #S_ORIG_ADDR_LIMIT]
+ mov x20, #TASK_SIZE_64
+ str x20, [tsk, #TI_ADDR_LIMIT]
+ .endif /* \el == 0 */
mrs x22, elr_el1
mrs x23, spsr_el1
stp lr, x21, [sp, #S_LR]
@@ -128,6 +135,12 @@
.endm
.macro kernel_exit, el
+ .if \el != 0
+ /* Restore the task's original addr_limit. */
+ ldr x20, [sp, #S_ORIG_ADDR_LIMIT]
+ str x20, [tsk, #TI_ADDR_LIMIT]
+ .endif
+
ldp x21, x22, [sp, #S_PC] // load ELR, SPSR
.if \el == 0
ct_user_enter
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 491ad4124615..9890d04a96cb 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -717,40 +717,25 @@ __primary_switch:
* Iterate over each entry in the relocation table, and apply the
* relocations in place.
*/
- ldr w8, =__dynsym_offset // offset to symbol table
ldr w9, =__rela_offset // offset to reloc table
ldr w10, =__rela_size // size of reloc table
mov_q x11, KIMAGE_VADDR // default virtual offset
add x11, x11, x23 // actual virtual offset
- add x8, x8, x11 // __va(.dynsym)
add x9, x9, x11 // __va(.rela)
add x10, x9, x10 // __va(.rela) + sizeof(.rela)
0: cmp x9, x10
- b.hs 2f
+ b.hs 1f
ldp x11, x12, [x9], #24
ldr x13, [x9, #-8]
cmp w12, #R_AARCH64_RELATIVE
- b.ne 1f
+ b.ne 0b
add x13, x13, x23 // relocate
str x13, [x11, x23]
b 0b
-1: cmp w12, #R_AARCH64_ABS64
- b.ne 0b
- add x12, x12, x12, lsl #1 // symtab offset: 24x top word
- add x12, x8, x12, lsr #(32 - 3) // ... shifted into bottom word
- ldrsh w14, [x12, #6] // Elf64_Sym::st_shndx
- ldr x15, [x12, #8] // Elf64_Sym::st_value
- cmp w14, #-0xf // SHN_ABS (0xfff1) ?
- add x14, x15, x23 // relocate
- csel x15, x14, x15, ne
- add x15, x13, x15
- str x15, [x11, x23]
- b 0b
-
-2:
+1:
#endif
ldr x8, =__primary_switched
br x8
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index d1e01e6498bb..326c53b7fe0b 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -214,7 +214,7 @@ static void __init request_standard_resources(void)
struct resource *res;
kernel_code.start = virt_to_phys(_text);
- kernel_code.end = virt_to_phys(_etext - 1);
+ kernel_code.end = virt_to_phys(__init_begin - 1);
kernel_data.start = virt_to_phys(_sdata);
kernel_data.end = virt_to_phys(_end - 1);
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index b2f5631c3785..a3a6b2ea9b4d 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -191,7 +191,6 @@ asmlinkage void secondary_start_kernel(void)
set_cpu_online(cpu, true);
complete(&cpu_running);
- local_dbg_enable();
local_irq_enable();
local_async_enable();
@@ -346,8 +345,8 @@ void __init smp_cpus_done(unsigned int max_cpus)
void __init smp_prepare_boot_cpu(void)
{
- cpuinfo_store_boot_cpu();
set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
+ cpuinfo_store_boot_cpu();
}
static u64 __init of_get_cpu_mpidr(struct device_node *dn)
diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c
index 349b131baec3..c0ee84020784 100644
--- a/arch/arm64/kernel/topology.c
+++ b/arch/arm64/kernel/topology.c
@@ -20,6 +20,8 @@
#include <linux/of.h>
#include <linux/sched.h>
#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/sched_energy.h>
#include <asm/cputype.h>
#include <asm/topology.h>
@@ -35,7 +37,7 @@
* rebalance_domains for all idle cores and the cpu_power can be updated
* during this sequence.
*/
-static DEFINE_PER_CPU(unsigned long, cpu_scale);
+static DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE;
unsigned long arch_scale_freq_power(struct sched_domain *sd, int cpu)
{
@@ -47,6 +49,22 @@ static void set_power_scale(unsigned int cpu, unsigned long power)
per_cpu(cpu_scale, cpu) = power;
}
+unsigned long scale_cpu_capacity(struct sched_domain *sd, int cpu)
+{
+#ifdef CONFIG_CPU_FREQ
+ unsigned long max_freq_scale = cpufreq_scale_max_freq_capacity(cpu);
+
+ return per_cpu(cpu_scale, cpu) * max_freq_scale >> SCHED_CAPACITY_SHIFT;
+#else
+ return per_cpu(cpu_scale, cpu);
+#endif
+}
+
+static void set_capacity_scale(unsigned int cpu, unsigned long capacity)
+{
+ per_cpu(cpu_scale, cpu) = capacity;
+}
+
static int __init get_cpu_for_node(struct device_node *node)
{
struct device_node *cpu_node;
@@ -371,11 +389,67 @@ static void update_cpu_power(unsigned int cpu)
struct cpu_topology cpu_topology[NR_CPUS];
EXPORT_SYMBOL_GPL(cpu_topology);
+/* sd energy functions */
+static inline
+const struct sched_group_energy * const cpu_cluster_energy(int cpu)
+{
+ struct sched_group_energy *sge = sge_array[cpu][SD_LEVEL1];
+
+ if (!sge) {
+ pr_warn("Invalid sched_group_energy for Cluster%d\n", cpu);
+ return NULL;
+ }
+
+ return sge;
+}
+
+static inline
+const struct sched_group_energy * const cpu_core_energy(int cpu)
+{
+ struct sched_group_energy *sge = sge_array[cpu][SD_LEVEL0];
+
+ if (!sge) {
+ pr_warn("Invalid sched_group_energy for CPU%d\n", cpu);
+ return NULL;
+ }
+
+ return sge;
+}
+
const struct cpumask *cpu_coregroup_mask(int cpu)
{
return &cpu_topology[cpu].core_sibling;
}
+static inline int cpu_corepower_flags(void)
+{
+ return SD_SHARE_PKG_RESOURCES | SD_SHARE_POWERDOMAIN | \
+ SD_SHARE_CAP_STATES;
+}
+
+static struct sched_domain_topology_level arm64_topology[] = {
+#ifdef CONFIG_SCHED_MC
+ { cpu_coregroup_mask, cpu_corepower_flags, cpu_core_energy, SD_INIT_NAME(MC) },
+#endif
+ { cpu_cpu_mask, NULL, cpu_cluster_energy, SD_INIT_NAME(DIE) },
+ { NULL, },
+};
+
+static void update_cpu_capacity(unsigned int cpu)
+{
+ unsigned long capacity = SCHED_CAPACITY_SCALE;
+
+ if (sched_energy_aware && cpu_core_energy(cpu)) {
+ int max_cap_idx = cpu_core_energy(cpu)->nr_cap_states - 1;
+ capacity = cpu_core_energy(cpu)->cap_states[max_cap_idx].cap;
+ }
+
+ set_capacity_scale(cpu, capacity);
+
+ pr_info("CPU%d: update cpu_capacity %lu\n",
+ cpu, arch_scale_cpu_capacity(NULL, cpu));
+}
+
static void update_siblings_masks(unsigned int cpuid)
{
struct cpu_topology *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
@@ -438,6 +512,7 @@ void store_cpu_topology(unsigned int cpuid)
topology_populated:
update_siblings_masks(cpuid);
update_cpu_power(cpuid);
+ update_cpu_capacity(cpuid);
}
static void __init reset_cpu_topology(void)
@@ -479,10 +554,12 @@ void __init init_cpu_topology(void)
if (of_have_populated_dt() && parse_dt_topology()) {
reset_cpu_topology();
} else {
+ set_sched_topology(arm64_topology);
for_each_possible_cpu(cpu)
update_siblings_masks(cpu);
}
reset_cpu_power();
parse_dt_cpu_power();
+ init_sched_energy_costs();
}
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index 8b4623eeb62d..76ff67d44fb5 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -94,6 +94,7 @@ SECTIONS
*(.discard)
*(.discard.*)
*(.interp .dynamic)
+ *(.dynsym .dynstr .hash)
}
. = KIMAGE_VADDR + TEXT_OFFSET;
@@ -120,12 +121,13 @@ SECTIONS
}
. = ALIGN(SEGMENT_ALIGN);
- RO_DATA(PAGE_SIZE) /* everything from this point to */
- EXCEPTION_TABLE(8) /* _etext will be marked RO NX */
+ _etext = .; /* End of text section */
+
+ RO_DATA(PAGE_SIZE) /* everything from this point to */
+ EXCEPTION_TABLE(8) /* __init_begin will be marked RO NX */
NOTES
. = ALIGN(SEGMENT_ALIGN);
- _etext = .; /* End of text and rodata section */
__init_begin = .;
INIT_TEXT_SECTION(8)
@@ -159,19 +161,9 @@ SECTIONS
.rela : ALIGN(8) {
*(.rela .rela*)
}
- .dynsym : ALIGN(8) {
- *(.dynsym)
- }
- .dynstr : {
- *(.dynstr)
- }
- .hash : {
- *(.hash)
- }
- __rela_offset = ADDR(.rela) - KIMAGE_VADDR;
+ __rela_offset = ABSOLUTE(ADDR(.rela) - KIMAGE_VADDR);
__rela_size = SIZEOF(.rela);
- __dynsym_offset = ADDR(.dynsym) - KIMAGE_VADDR;
. = ALIGN(SEGMENT_ALIGN);
__init_end = .;
diff --git a/arch/arm64/kvm/hyp-init.S b/arch/arm64/kvm/hyp-init.S
index 178ba2248a98..84c338f017b2 100644
--- a/arch/arm64/kvm/hyp-init.S
+++ b/arch/arm64/kvm/hyp-init.S
@@ -64,7 +64,7 @@ __do_hyp_init:
mrs x4, tcr_el1
ldr x5, =TCR_EL2_MASK
and x4, x4, x5
- ldr x5, =TCR_EL2_FLAGS
+ mov x5, #TCR_EL2_RES1
orr x4, x4, x5
#ifndef CONFIG_ARM64_VA_BITS_48
@@ -85,15 +85,18 @@ __do_hyp_init:
ldr_l x5, idmap_t0sz
bfi x4, x5, TCR_T0SZ_OFFSET, TCR_TxSZ_WIDTH
#endif
- msr tcr_el2, x4
-
- ldr x4, =VTCR_EL2_FLAGS
/*
* Read the PARange bits from ID_AA64MMFR0_EL1 and set the PS bits in
- * VTCR_EL2.
+ * TCR_EL2 and VTCR_EL2.
*/
mrs x5, ID_AA64MMFR0_EL1
bfi x4, x5, #16, #3
+
+ msr tcr_el2, x4
+
+ ldr x4, =VTCR_EL2_FLAGS
+ bfi x4, x5, #16, #3
+
msr vtcr_el2, x4
mrs x4, mair_el1
diff --git a/arch/arm64/lib/copy_from_user.S b/arch/arm64/lib/copy_from_user.S
index 17e8306dca29..0b90497d4424 100644
--- a/arch/arm64/lib/copy_from_user.S
+++ b/arch/arm64/lib/copy_from_user.S
@@ -66,7 +66,7 @@
.endm
end .req x5
-ENTRY(__copy_from_user)
+ENTRY(__arch_copy_from_user)
ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_ALT_PAN_NOT_UAO, \
CONFIG_ARM64_PAN)
add end, x0, x2
@@ -75,7 +75,7 @@ ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_ALT_PAN_NOT_UAO, \
CONFIG_ARM64_PAN)
mov x0, #0 // Nothing to copy
ret
-ENDPROC(__copy_from_user)
+ENDPROC(__arch_copy_from_user)
.section .fixup,"ax"
.align 2
diff --git a/arch/arm64/lib/copy_to_user.S b/arch/arm64/lib/copy_to_user.S
index 21faae60f988..7a7efe255034 100644
--- a/arch/arm64/lib/copy_to_user.S
+++ b/arch/arm64/lib/copy_to_user.S
@@ -65,7 +65,7 @@
.endm
end .req x5
-ENTRY(__copy_to_user)
+ENTRY(__arch_copy_to_user)
ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_ALT_PAN_NOT_UAO, \
CONFIG_ARM64_PAN)
add end, x0, x2
@@ -74,7 +74,7 @@ ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_ALT_PAN_NOT_UAO, \
CONFIG_ARM64_PAN)
mov x0, #0
ret
-ENDPROC(__copy_to_user)
+ENDPROC(__arch_copy_to_user)
.section .fixup,"ax"
.align 2
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 4a49093dddd7..9b9b13ba603d 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -387,8 +387,8 @@ void __init mem_init(void)
MLM(MODULES_VADDR, MODULES_END),
MLG(VMALLOC_START, VMALLOC_END),
MLK_ROUNDUP(__init_begin, __init_end),
- MLK_ROUNDUP(_text, __start_rodata),
- MLK_ROUNDUP(__start_rodata, _etext),
+ MLK_ROUNDUP(_text, _etext),
+ MLK_ROUNDUP(__start_rodata, __init_begin),
MLK_ROUNDUP(_sdata, _edata),
#ifdef CONFIG_SPARSEMEM_VMEMMAP
MLG(VMEMMAP_START,
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index a8a6f91343cc..c3a3deb76ae2 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -392,14 +392,14 @@ static void create_mapping_late(phys_addr_t phys, unsigned long virt,
static void __init __map_memblock(pgd_t *pgd, phys_addr_t start, phys_addr_t end)
{
unsigned long kernel_start = __pa(_text);
- unsigned long kernel_end = __pa(_etext);
+ unsigned long kernel_end = __pa(__init_begin);
/*
* Take care not to create a writable alias for the
* read-only text and rodata sections of the kernel image.
*/
- /* No overlap with the kernel text */
+ /* No overlap with the kernel text/rodata */
if (end < kernel_start || start >= kernel_end) {
__create_pgd_mapping(pgd, start, __phys_to_virt(start),
end - start, PAGE_KERNEL,
@@ -408,7 +408,7 @@ static void __init __map_memblock(pgd_t *pgd, phys_addr_t start, phys_addr_t end
}
/*
- * This block overlaps the kernel text mapping.
+ * This block overlaps the kernel text/rodata mappings.
* Map the portion(s) which don't overlap.
*/
if (start < kernel_start)
@@ -423,7 +423,7 @@ static void __init __map_memblock(pgd_t *pgd, phys_addr_t start, phys_addr_t end
early_pgtable_alloc);
/*
- * Map the linear alias of the [_text, _etext) interval as
+ * Map the linear alias of the [_text, __init_begin) interval as
* read-only/non-executable. This makes the contents of the
* region accessible to subsystems such as hibernate, but
* protects it from inadvertent modification or execution.
@@ -453,14 +453,14 @@ void mark_rodata_ro(void)
{
unsigned long section_size;
- section_size = (unsigned long)__start_rodata - (unsigned long)_text;
+ section_size = (unsigned long)_etext - (unsigned long)_text;
create_mapping_late(__pa(_text), (unsigned long)_text,
section_size, PAGE_KERNEL_ROX);
/*
- * mark .rodata as read only. Use _etext rather than __end_rodata to
- * cover NOTES and EXCEPTION_TABLE.
+ * mark .rodata as read only. Use __init_begin rather than __end_rodata
+ * to cover NOTES and EXCEPTION_TABLE.
*/
- section_size = (unsigned long)_etext - (unsigned long)__start_rodata;
+ section_size = (unsigned long)__init_begin - (unsigned long)__start_rodata;
create_mapping_late(__pa(__start_rodata), (unsigned long)__start_rodata,
section_size, PAGE_KERNEL_RO);
}
@@ -503,8 +503,8 @@ static void __init map_kernel(pgd_t *pgd)
{
static struct vm_struct vmlinux_text, vmlinux_rodata, vmlinux_init, vmlinux_data;
- map_kernel_segment(pgd, _text, __start_rodata, PAGE_KERNEL_EXEC, &vmlinux_text);
- map_kernel_segment(pgd, __start_rodata, _etext, PAGE_KERNEL, &vmlinux_rodata);
+ map_kernel_segment(pgd, _text, _etext, PAGE_KERNEL_EXEC, &vmlinux_text);
+ map_kernel_segment(pgd, __start_rodata, __init_begin, PAGE_KERNEL, &vmlinux_rodata);
map_kernel_segment(pgd, __init_begin, __init_end, PAGE_KERNEL_EXEC,
&vmlinux_init);
map_kernel_segment(pgd, _data, _end, PAGE_KERNEL, &vmlinux_data);
@@ -785,9 +785,9 @@ void *__init __fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot)
/*
* Check whether the physical FDT address is set and meets the minimum
* alignment requirement. Since we are relying on MIN_FDT_ALIGN to be
- * at least 8 bytes so that we can always access the size field of the
- * FDT header after mapping the first chunk, double check here if that
- * is indeed the case.
+ * at least 8 bytes so that we can always access the magic and size
+ * fields of the FDT header after mapping the first chunk, double check
+ * here if that is indeed the case.
*/
BUILD_BUG_ON(MIN_FDT_ALIGN < 8);
if (!dt_phys || dt_phys % MIN_FDT_ALIGN)
@@ -815,7 +815,7 @@ void *__init __fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot)
create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE),
dt_virt_base, SWAPPER_BLOCK_SIZE, prot);
- if (fdt_check_header(dt_virt) != 0)
+ if (fdt_magic(dt_virt) != FDT_MAGIC)
return NULL;
*size = fdt_totalsize(dt_virt);
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index dc22de0ce413..ec968a076919 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -25,6 +25,8 @@
#include <asm/hwcap.h>
#include <asm/pgtable-hwdef.h>
#include <asm/pgtable.h>
+#include <asm/cpufeature.h>
+#include <asm/alternative.h>
#include "proc-macros.S"
@@ -183,7 +185,17 @@ ENTRY(cpu_do_switch_mm)
bfi x0, x1, #48, #16 // set the ASID
msr ttbr0_el1, x0 // set TTBR0
isb
+alternative_if_not ARM64_WORKAROUND_CAVIUM_27456
ret
+ nop
+ nop
+ nop
+alternative_else
+ ic iallu
+ dsb nsh
+ isb
+ ret
+alternative_endif
ENDPROC(cpu_do_switch_mm)
.pushsection ".idmap.text", "ax"
@@ -228,6 +240,8 @@ ENTRY(__cpu_setup)
msr cpacr_el1, x0 // Enable FP/ASIMD
mov x0, #1 << 12 // Reset mdscr_el1 and disable
msr mdscr_el1, x0 // access to the DCC from EL0
+ isb // Unmask debug exceptions now,
+ enable_dbg // since this is per-cpu
reset_pmuserenr_el0 x0 // Disable PMU access from EL0
/*
* Memory region attributes for LPAE:
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index eb0249e37981..2c86a4ef6742 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -53,6 +53,7 @@ config IA64
select MODULES_USE_ELF_RELA
select ARCH_USE_CMPXCHG_LOCKREF
select HAVE_ARCH_AUDITSYSCALL
+ select HAVE_ARCH_HARDENED_USERCOPY
default y
help
The Itanium Processor Family is Intel's 64-bit successor to
diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
index 4f3fb6ccbf21..3d6b840c5c99 100644
--- a/arch/ia64/include/asm/uaccess.h
+++ b/arch/ia64/include/asm/uaccess.h
@@ -241,12 +241,18 @@ extern unsigned long __must_check __copy_user (void __user *to, const void __use
static inline unsigned long
__copy_to_user (void __user *to, const void *from, unsigned long count)
{
+ if (!__builtin_constant_p(count))
+ check_object_size(from, count, true);
+
return __copy_user(to, (__force void __user *) from, count);
}
static inline unsigned long
__copy_from_user (void *to, const void __user *from, unsigned long count)
{
+ if (!__builtin_constant_p(count))
+ check_object_size(to, count, false);
+
return __copy_user((__force void __user *) to, from, count);
}
@@ -258,8 +264,11 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
const void *__cu_from = (from); \
long __cu_len = (n); \
\
- if (__access_ok(__cu_to, __cu_len, get_fs())) \
- __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
+ if (__access_ok(__cu_to, __cu_len, get_fs())) { \
+ if (!__builtin_constant_p(n)) \
+ check_object_size(__cu_from, __cu_len, true); \
+ __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
+ } \
__cu_len; \
})
@@ -270,8 +279,11 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
long __cu_len = (n); \
\
__chk_user_ptr(__cu_from); \
- if (__access_ok(__cu_from, __cu_len, get_fs())) \
+ if (__access_ok(__cu_from, __cu_len, get_fs())) { \
+ if (!__builtin_constant_p(n)) \
+ check_object_size(__cu_to, __cu_len, false); \
__cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
+ } \
__cu_len; \
})
diff --git a/arch/metag/include/asm/atomic_lnkget.h b/arch/metag/include/asm/atomic_lnkget.h
index a62581815624..88fa25fae8bd 100644
--- a/arch/metag/include/asm/atomic_lnkget.h
+++ b/arch/metag/include/asm/atomic_lnkget.h
@@ -61,7 +61,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
" CMPT %0, #HI(0x02000000)\n" \
" BNZ 1b\n" \
: "=&d" (temp), "=&da" (result) \
- : "da" (&v->counter), "bd" (i) \
+ : "da" (&v->counter), "br" (i) \
: "cc"); \
\
smp_mb(); \
diff --git a/arch/metag/include/asm/cmpxchg_lnkget.h b/arch/metag/include/asm/cmpxchg_lnkget.h
index 0154e2807ebb..2369ad394876 100644
--- a/arch/metag/include/asm/cmpxchg_lnkget.h
+++ b/arch/metag/include/asm/cmpxchg_lnkget.h
@@ -73,7 +73,7 @@ static inline unsigned long __cmpxchg_u32(volatile int *m, unsigned long old,
" DCACHE [%2], %0\n"
#endif
"2:\n"
- : "=&d" (temp), "=&da" (retval)
+ : "=&d" (temp), "=&d" (retval)
: "da" (m), "bd" (old), "da" (new)
: "cc"
);
diff --git a/arch/mips/kernel/csrc-r4k.c b/arch/mips/kernel/csrc-r4k.c
index 1f910563fdf6..d76275da54cb 100644
--- a/arch/mips/kernel/csrc-r4k.c
+++ b/arch/mips/kernel/csrc-r4k.c
@@ -23,7 +23,7 @@ static struct clocksource clocksource_mips = {
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
-static u64 notrace r4k_read_sched_clock(void)
+static u64 __maybe_unused notrace r4k_read_sched_clock(void)
{
return read_c0_count();
}
@@ -82,7 +82,9 @@ int __init init_r4k_clocksource(void)
clocksource_register_hz(&clocksource_mips, mips_hpt_frequency);
+#ifndef CONFIG_CPU_FREQ
sched_clock_register(r4k_read_sched_clock, 32, mips_hpt_frequency);
+#endif
return 0;
}
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index 5a69eb48d0a8..ee93d5fe61d7 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -344,7 +344,7 @@ EXPORT(sysn32_call_table)
PTR sys_ni_syscall /* available, was setaltroot */
PTR sys_add_key
PTR sys_request_key
- PTR sys_keyctl /* 6245 */
+ PTR compat_sys_keyctl /* 6245 */
PTR sys_set_thread_area
PTR sys_inotify_init
PTR sys_inotify_add_watch
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index e4b6d7c97822..b77052ec6fb2 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -500,7 +500,7 @@ EXPORT(sys32_call_table)
PTR sys_ni_syscall /* available, was setaltroot */
PTR sys_add_key /* 4280 */
PTR sys_request_key
- PTR sys_keyctl
+ PTR compat_sys_keyctl
PTR sys_set_thread_area
PTR sys_inotify_init
PTR sys_inotify_add_watch /* 4285 */
diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c
index dc10c77b7500..d6476d11212e 100644
--- a/arch/mips/kvm/emulate.c
+++ b/arch/mips/kvm/emulate.c
@@ -1629,8 +1629,14 @@ enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc,
preempt_disable();
if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) {
- if (kvm_mips_host_tlb_lookup(vcpu, va) < 0)
- kvm_mips_handle_kseg0_tlb_fault(va, vcpu);
+ if (kvm_mips_host_tlb_lookup(vcpu, va) < 0 &&
+ kvm_mips_handle_kseg0_tlb_fault(va, vcpu)) {
+ kvm_err("%s: handling mapped kseg0 tlb fault for %lx, vcpu: %p, ASID: %#lx\n",
+ __func__, va, vcpu, read_c0_entryhi());
+ er = EMULATE_FAIL;
+ preempt_enable();
+ goto done;
+ }
} else if ((KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0) ||
KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) {
int index;
@@ -1665,14 +1671,19 @@ enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc,
run, vcpu);
preempt_enable();
goto dont_update_pc;
- } else {
- /*
- * We fault an entry from the guest tlb to the
- * shadow host TLB
- */
- kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb,
- NULL,
- NULL);
+ }
+ /*
+ * We fault an entry from the guest tlb to the
+ * shadow host TLB
+ */
+ if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb,
+ NULL, NULL)) {
+ kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n",
+ __func__, va, index, vcpu,
+ read_c0_entryhi());
+ er = EMULATE_FAIL;
+ preempt_enable();
+ goto done;
}
}
} else {
@@ -2633,8 +2644,13 @@ enum emulation_result kvm_mips_handle_tlbmiss(unsigned long cause,
* OK we have a Guest TLB entry, now inject it into the
* shadow host TLB
*/
- kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, NULL,
- NULL);
+ if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb,
+ NULL, NULL)) {
+ kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n",
+ __func__, va, index, vcpu,
+ read_c0_entryhi());
+ er = EMULATE_FAIL;
+ }
}
}
diff --git a/arch/mips/kvm/tlb.c b/arch/mips/kvm/tlb.c
index aed0ac2a4972..7a7ed9ca01bb 100644
--- a/arch/mips/kvm/tlb.c
+++ b/arch/mips/kvm/tlb.c
@@ -276,7 +276,7 @@ int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
}
gfn = (KVM_GUEST_CPHYSADDR(badvaddr) >> PAGE_SHIFT);
- if (gfn >= kvm->arch.guest_pmap_npages) {
+ if ((gfn | 1) >= kvm->arch.guest_pmap_npages) {
kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__,
gfn, badvaddr);
kvm_mips_dump_host_tlbs();
@@ -361,25 +361,39 @@ int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
struct kvm *kvm = vcpu->kvm;
pfn_t pfn0, pfn1;
-
- if ((tlb->tlb_hi & VPN2_MASK) == 0) {
- pfn0 = 0;
- pfn1 = 0;
- } else {
- if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo0)
- >> PAGE_SHIFT) < 0)
- return -1;
-
- if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo1)
- >> PAGE_SHIFT) < 0)
- return -1;
-
- pfn0 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo0)
- >> PAGE_SHIFT];
- pfn1 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo1)
- >> PAGE_SHIFT];
+ gfn_t gfn0, gfn1;
+ long tlb_lo[2];
+
+ tlb_lo[0] = tlb->tlb_lo0;
+ tlb_lo[1] = tlb->tlb_lo1;
+
+ /*
+ * The commpage address must not be mapped to anything else if the guest
+ * TLB contains entries nearby, or commpage accesses will break.
+ */
+ if (!((tlb->tlb_hi ^ KVM_GUEST_COMMPAGE_ADDR) &
+ VPN2_MASK & (PAGE_MASK << 1)))
+ tlb_lo[(KVM_GUEST_COMMPAGE_ADDR >> PAGE_SHIFT) & 1] = 0;
+
+ gfn0 = mips3_tlbpfn_to_paddr(tlb_lo[0]) >> PAGE_SHIFT;
+ gfn1 = mips3_tlbpfn_to_paddr(tlb_lo[1]) >> PAGE_SHIFT;
+ if (gfn0 >= kvm->arch.guest_pmap_npages ||
+ gfn1 >= kvm->arch.guest_pmap_npages) {
+ kvm_err("%s: Invalid gfn: [%#llx, %#llx], EHi: %#lx\n",
+ __func__, gfn0, gfn1, tlb->tlb_hi);
+ kvm_mips_dump_guest_tlbs(vcpu);
+ return -1;
}
+ if (kvm_mips_map_page(kvm, gfn0) < 0)
+ return -1;
+
+ if (kvm_mips_map_page(kvm, gfn1) < 0)
+ return -1;
+
+ pfn0 = kvm->arch.guest_pmap[gfn0];
+ pfn1 = kvm->arch.guest_pmap[gfn1];
+
if (hpa0)
*hpa0 = pfn0 << PAGE_SHIFT;
@@ -391,9 +405,9 @@ int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
kvm_mips_get_kernel_asid(vcpu) :
kvm_mips_get_user_asid(vcpu));
entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
- (tlb->tlb_lo0 & MIPS3_PG_D) | (tlb->tlb_lo0 & MIPS3_PG_V);
+ (tlb_lo[0] & MIPS3_PG_D) | (tlb_lo[0] & MIPS3_PG_V);
entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) |
- (tlb->tlb_lo1 & MIPS3_PG_D) | (tlb->tlb_lo1 & MIPS3_PG_V);
+ (tlb_lo[1] & MIPS3_PG_D) | (tlb_lo[1] & MIPS3_PG_V);
kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc,
tlb->tlb_lo0, tlb->tlb_lo1);
@@ -794,10 +808,16 @@ uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu)
local_irq_restore(flags);
return KVM_INVALID_INST;
}
- kvm_mips_handle_mapped_seg_tlb_fault(vcpu,
- &vcpu->arch.
- guest_tlb[index],
- NULL, NULL);
+ if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu,
+ &vcpu->arch.guest_tlb[index],
+ NULL, NULL)) {
+ kvm_err("%s: handling mapped seg tlb fault failed for %p, index: %u, vcpu: %p, ASID: %#lx\n",
+ __func__, opc, index, vcpu,
+ read_c0_entryhi());
+ kvm_mips_dump_guest_tlbs(vcpu);
+ local_irq_restore(flags);
+ return KVM_INVALID_INST;
+ }
inst = *(opc);
}
local_irq_restore(flags);
diff --git a/arch/mips/loongson64/loongson-3/hpet.c b/arch/mips/loongson64/loongson-3/hpet.c
index a2631a52ca99..444802e78554 100644
--- a/arch/mips/loongson64/loongson-3/hpet.c
+++ b/arch/mips/loongson64/loongson-3/hpet.c
@@ -13,8 +13,8 @@
#define SMBUS_PCI_REG64 0x64
#define SMBUS_PCI_REGB4 0xb4
-#define HPET_MIN_CYCLES 64
-#define HPET_MIN_PROG_DELTA (HPET_MIN_CYCLES + (HPET_MIN_CYCLES >> 1))
+#define HPET_MIN_CYCLES 16
+#define HPET_MIN_PROG_DELTA (HPET_MIN_CYCLES * 12)
static DEFINE_SPINLOCK(hpet_lock);
DEFINE_PER_CPU(struct clock_event_device, hpet_clockevent_device);
@@ -157,14 +157,14 @@ static int hpet_tick_resume(struct clock_event_device *evt)
static int hpet_next_event(unsigned long delta,
struct clock_event_device *evt)
{
- unsigned int cnt;
- int res;
+ u32 cnt;
+ s32 res;
cnt = hpet_read(HPET_COUNTER);
- cnt += delta;
+ cnt += (u32) delta;
hpet_write(HPET_T0_CMP, cnt);
- res = (int)(cnt - hpet_read(HPET_COUNTER));
+ res = (s32)(cnt - hpet_read(HPET_COUNTER));
return res < HPET_MIN_CYCLES ? -ETIME : 0;
}
@@ -230,7 +230,7 @@ void __init setup_hpet_timer(void)
cd = &per_cpu(hpet_clockevent_device, cpu);
cd->name = "hpet";
- cd->rating = 320;
+ cd->rating = 100;
cd->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
cd->set_state_shutdown = hpet_set_state_shutdown;
cd->set_state_periodic = hpet_set_state_periodic;
diff --git a/arch/mips/mm/uasm-mips.c b/arch/mips/mm/uasm-mips.c
index b4a837893562..5abe51cad899 100644
--- a/arch/mips/mm/uasm-mips.c
+++ b/arch/mips/mm/uasm-mips.c
@@ -65,7 +65,7 @@ static struct insn insn_table[] = {
#ifndef CONFIG_CPU_MIPSR6
{ insn_cache, M(cache_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
#else
- { insn_cache, M6(cache_op, 0, 0, 0, cache6_op), RS | RT | SIMM9 },
+ { insn_cache, M6(spec3_op, 0, 0, 0, cache6_op), RS | RT | SIMM9 },
#endif
{ insn_daddiu, M(daddiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
{ insn_daddu, M(spec_op, 0, 0, 0, 0, daddu_op), RS | RT | RD },
diff --git a/arch/parisc/include/uapi/asm/errno.h b/arch/parisc/include/uapi/asm/errno.h
index c0ae62520d15..274d5bc6ecce 100644
--- a/arch/parisc/include/uapi/asm/errno.h
+++ b/arch/parisc/include/uapi/asm/errno.h
@@ -97,10 +97,10 @@
#define ENOTCONN 235 /* Transport endpoint is not connected */
#define ESHUTDOWN 236 /* Cannot send after transport endpoint shutdown */
#define ETOOMANYREFS 237 /* Too many references: cannot splice */
-#define EREFUSED ECONNREFUSED /* for HP's NFS apparently */
#define ETIMEDOUT 238 /* Connection timed out */
#define ECONNREFUSED 239 /* Connection refused */
-#define EREMOTERELEASE 240 /* Remote peer released connection */
+#define EREFUSED ECONNREFUSED /* for HP's NFS apparently */
+#define EREMOTERELEASE 240 /* Remote peer released connection */
#define EHOSTDOWN 241 /* Host is down */
#define EHOSTUNREACH 242 /* No route to host */
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index db49e0d796b1..ec7b8f1e4822 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -160,6 +160,7 @@ config PPC
select EDAC_ATOMIC_SCRUB
select ARCH_HAS_DMA_SET_COHERENT_MASK
select HAVE_ARCH_SECCOMP_FILTER
+ select HAVE_ARCH_HARDENED_USERCOPY
config GENERIC_CSUM
def_bool CPU_LITTLE_ENDIAN
diff --git a/arch/powerpc/include/asm/icswx.h b/arch/powerpc/include/asm/icswx.h
index 9f8402b35115..27e588f6c72e 100644
--- a/arch/powerpc/include/asm/icswx.h
+++ b/arch/powerpc/include/asm/icswx.h
@@ -164,6 +164,7 @@ struct coprocessor_request_block {
#define ICSWX_INITIATED (0x8)
#define ICSWX_BUSY (0x4)
#define ICSWX_REJECTED (0x2)
+#define ICSWX_XERS0 (0x1) /* undefined or set from XERSO. */
static inline int icswx(__be32 ccw, struct coprocessor_request_block *crb)
{
diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
index 2a8ebae0936b..b39a69370057 100644
--- a/arch/powerpc/include/asm/uaccess.h
+++ b/arch/powerpc/include/asm/uaccess.h
@@ -325,10 +325,15 @@ static inline unsigned long copy_from_user(void *to,
{
unsigned long over;
- if (access_ok(VERIFY_READ, from, n))
+ if (access_ok(VERIFY_READ, from, n)) {
+ if (!__builtin_constant_p(n))
+ check_object_size(to, n, false);
return __copy_tofrom_user((__force void __user *)to, from, n);
+ }
if ((unsigned long)from < TASK_SIZE) {
over = (unsigned long)from + n - TASK_SIZE;
+ if (!__builtin_constant_p(n - over))
+ check_object_size(to, n - over, false);
return __copy_tofrom_user((__force void __user *)to, from,
n - over) + over;
}
@@ -340,10 +345,15 @@ static inline unsigned long copy_to_user(void __user *to,
{
unsigned long over;
- if (access_ok(VERIFY_WRITE, to, n))
+ if (access_ok(VERIFY_WRITE, to, n)) {
+ if (!__builtin_constant_p(n))
+ check_object_size(from, n, true);
return __copy_tofrom_user(to, (__force void __user *)from, n);
+ }
if ((unsigned long)to < TASK_SIZE) {
over = (unsigned long)to + n - TASK_SIZE;
+ if (!__builtin_constant_p(n))
+ check_object_size(from, n - over, true);
return __copy_tofrom_user(to, (__force void __user *)from,
n - over) + over;
}
@@ -387,6 +397,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
if (ret == 0)
return 0;
}
+
+ if (!__builtin_constant_p(n))
+ check_object_size(to, n, false);
+
return __copy_tofrom_user((__force void __user *)to, from, n);
}
@@ -413,6 +427,9 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
if (ret == 0)
return 0;
}
+ if (!__builtin_constant_p(n))
+ check_object_size(from, n, true);
+
return __copy_tofrom_user(to, (__force const void __user *)from, n);
}
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
index b34e8a54f7db..98949b0df00a 100644
--- a/arch/powerpc/kernel/eeh.c
+++ b/arch/powerpc/kernel/eeh.c
@@ -677,7 +677,7 @@ int eeh_pci_enable(struct eeh_pe *pe, int function)
/* Check if the request is finished successfully */
if (active_flag) {
rc = eeh_ops->wait_state(pe, PCI_BUS_RESET_WAIT_MSEC);
- if (rc <= 0)
+ if (rc < 0)
return rc;
if (rc & active_flag)
diff --git a/arch/powerpc/kernel/tm.S b/arch/powerpc/kernel/tm.S
index bf8f34a58670..b7019b559ddb 100644
--- a/arch/powerpc/kernel/tm.S
+++ b/arch/powerpc/kernel/tm.S
@@ -110,17 +110,11 @@ _GLOBAL(tm_reclaim)
std r3, STK_PARAM(R3)(r1)
SAVE_NVGPRS(r1)
- /* We need to setup MSR for VSX register save instructions. Here we
- * also clear the MSR RI since when we do the treclaim, we won't have a
- * valid kernel pointer for a while. We clear RI here as it avoids
- * adding another mtmsr closer to the treclaim. This makes the region
- * maked as non-recoverable wider than it needs to be but it saves on
- * inserting another mtmsrd later.
- */
+ /* We need to setup MSR for VSX register save instructions. */
mfmsr r14
mr r15, r14
ori r15, r15, MSR_FP
- li r16, MSR_RI
+ li r16, 0
ori r16, r16, MSR_EE /* IRQs hard off */
andc r15, r15, r16
oris r15, r15, MSR_VEC@h
@@ -176,7 +170,17 @@ dont_backup_fp:
1: tdeqi r6, 0
EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,0
- /* The moment we treclaim, ALL of our GPRs will switch
+ /* Clear MSR RI since we are about to change r1, EE is already off. */
+ li r4, 0
+ mtmsrd r4, 1
+
+ /*
+ * BE CAREFUL HERE:
+ * At this point we can't take an SLB miss since we have MSR_RI
+ * off. Load only to/from the stack/paca which are in SLB bolted regions
+ * until we turn MSR RI back on.
+ *
+ * The moment we treclaim, ALL of our GPRs will switch
* to user register state. (FPRs, CCR etc. also!)
* Use an sprg and a tm_scratch in the PACA to shuffle.
*/
@@ -197,6 +201,11 @@ dont_backup_fp:
/* Store the PPR in r11 and reset to decent value */
std r11, GPR11(r1) /* Temporary stash */
+
+ /* Reset MSR RI so we can take SLB faults again */
+ li r11, MSR_RI
+ mtmsrd r11, 1
+
mfspr r11, SPRN_PPR
HMT_MEDIUM
@@ -397,11 +406,6 @@ restore_gprs:
ld r5, THREAD_TM_DSCR(r3)
ld r6, THREAD_TM_PPR(r3)
- /* Clear the MSR RI since we are about to change R1. EE is already off
- */
- li r4, 0
- mtmsrd r4, 1
-
REST_GPR(0, r7) /* GPR0 */
REST_2GPRS(2, r7) /* GPR2-3 */
REST_GPR(4, r7) /* GPR4 */
@@ -439,10 +443,33 @@ restore_gprs:
ld r6, _CCR(r7)
mtcr r6
- REST_GPR(1, r7) /* GPR1 */
- REST_GPR(5, r7) /* GPR5-7 */
REST_GPR(6, r7)
- ld r7, GPR7(r7)
+
+ /*
+ * Store r1 and r5 on the stack so that we can access them
+ * after we clear MSR RI.
+ */
+
+ REST_GPR(5, r7)
+ std r5, -8(r1)
+ ld r5, GPR1(r7)
+ std r5, -16(r1)
+
+ REST_GPR(7, r7)
+
+ /* Clear MSR RI since we are about to change r1. EE is already off */
+ li r5, 0
+ mtmsrd r5, 1
+
+ /*
+ * BE CAREFUL HERE:
+ * At this point we can't take an SLB miss since we have MSR_RI
+ * off. Load only to/from the stack/paca which are in SLB bolted regions
+ * until we turn MSR RI back on.
+ */
+
+ ld r5, -8(r1)
+ ld r1, -16(r1)
/* Commit register state as checkpointed state: */
TRECHKPT
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 463af88c95a2..974f73df00bb 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -655,112 +655,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
BEGIN_FTR_SECTION
- b skip_tm
-END_FTR_SECTION_IFCLR(CPU_FTR_TM)
-
- /* Turn on TM/FP/VSX/VMX so we can restore them. */
- mfmsr r5
- li r6, MSR_TM >> 32
- sldi r6, r6, 32
- or r5, r5, r6
- ori r5, r5, MSR_FP
- oris r5, r5, (MSR_VEC | MSR_VSX)@h
- mtmsrd r5
-
- /*
- * The user may change these outside of a transaction, so they must
- * always be context switched.
- */
- ld r5, VCPU_TFHAR(r4)
- ld r6, VCPU_TFIAR(r4)
- ld r7, VCPU_TEXASR(r4)
- mtspr SPRN_TFHAR, r5
- mtspr SPRN_TFIAR, r6
- mtspr SPRN_TEXASR, r7
-
- ld r5, VCPU_MSR(r4)
- rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
- beq skip_tm /* TM not active in guest */
-
- /* Make sure the failure summary is set, otherwise we'll program check
- * when we trechkpt. It's possible that this might have been not set
- * on a kvmppc_set_one_reg() call but we shouldn't let this crash the
- * host.
- */
- oris r7, r7, (TEXASR_FS)@h
- mtspr SPRN_TEXASR, r7
-
- /*
- * We need to load up the checkpointed state for the guest.
- * We need to do this early as it will blow away any GPRs, VSRs and
- * some SPRs.
- */
-
- mr r31, r4
- addi r3, r31, VCPU_FPRS_TM
- bl load_fp_state
- addi r3, r31, VCPU_VRS_TM
- bl load_vr_state
- mr r4, r31
- lwz r7, VCPU_VRSAVE_TM(r4)
- mtspr SPRN_VRSAVE, r7
-
- ld r5, VCPU_LR_TM(r4)
- lwz r6, VCPU_CR_TM(r4)
- ld r7, VCPU_CTR_TM(r4)
- ld r8, VCPU_AMR_TM(r4)
- ld r9, VCPU_TAR_TM(r4)
- mtlr r5
- mtcr r6
- mtctr r7
- mtspr SPRN_AMR, r8
- mtspr SPRN_TAR, r9
-
- /*
- * Load up PPR and DSCR values but don't put them in the actual SPRs
- * till the last moment to avoid running with userspace PPR and DSCR for
- * too long.
- */
- ld r29, VCPU_DSCR_TM(r4)
- ld r30, VCPU_PPR_TM(r4)
-
- std r2, PACATMSCRATCH(r13) /* Save TOC */
-
- /* Clear the MSR RI since r1, r13 are all going to be foobar. */
- li r5, 0
- mtmsrd r5, 1
-
- /* Load GPRs r0-r28 */
- reg = 0
- .rept 29
- ld reg, VCPU_GPRS_TM(reg)(r31)
- reg = reg + 1
- .endr
-
- mtspr SPRN_DSCR, r29
- mtspr SPRN_PPR, r30
-
- /* Load final GPRs */
- ld 29, VCPU_GPRS_TM(29)(r31)
- ld 30, VCPU_GPRS_TM(30)(r31)
- ld 31, VCPU_GPRS_TM(31)(r31)
-
- /* TM checkpointed state is now setup. All GPRs are now volatile. */
- TRECHKPT
-
- /* Now let's get back the state we need. */
- HMT_MEDIUM
- GET_PACA(r13)
- ld r29, HSTATE_DSCR(r13)
- mtspr SPRN_DSCR, r29
- ld r4, HSTATE_KVM_VCPU(r13)
- ld r1, HSTATE_HOST_R1(r13)
- ld r2, PACATMSCRATCH(r13)
-
- /* Set the MSR RI since we have our registers back. */
- li r5, MSR_RI
- mtmsrd r5, 1
-skip_tm:
+ bl kvmppc_restore_tm
+END_FTR_SECTION_IFSET(CPU_FTR_TM)
#endif
/* Load guest PMU registers */
@@ -841,12 +737,6 @@ BEGIN_FTR_SECTION
/* Skip next section on POWER7 */
b 8f
END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
- /* Turn on TM so we can access TFHAR/TFIAR/TEXASR */
- mfmsr r8
- li r0, 1
- rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
- mtmsrd r8
-
/* Load up POWER8-specific registers */
ld r5, VCPU_IAMR(r4)
lwz r6, VCPU_PSPB(r4)
@@ -1436,106 +1326,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
BEGIN_FTR_SECTION
- b 2f
-END_FTR_SECTION_IFCLR(CPU_FTR_TM)
- /* Turn on TM. */
- mfmsr r8
- li r0, 1
- rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
- mtmsrd r8
-
- ld r5, VCPU_MSR(r9)
- rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
- beq 1f /* TM not active in guest. */
-
- li r3, TM_CAUSE_KVM_RESCHED
-
- /* Clear the MSR RI since r1, r13 are all going to be foobar. */
- li r5, 0
- mtmsrd r5, 1
-
- /* All GPRs are volatile at this point. */
- TRECLAIM(R3)
-
- /* Temporarily store r13 and r9 so we have some regs to play with */
- SET_SCRATCH0(r13)
- GET_PACA(r13)
- std r9, PACATMSCRATCH(r13)
- ld r9, HSTATE_KVM_VCPU(r13)
-
- /* Get a few more GPRs free. */
- std r29, VCPU_GPRS_TM(29)(r9)
- std r30, VCPU_GPRS_TM(30)(r9)
- std r31, VCPU_GPRS_TM(31)(r9)
-
- /* Save away PPR and DSCR soon so don't run with user values. */
- mfspr r31, SPRN_PPR
- HMT_MEDIUM
- mfspr r30, SPRN_DSCR
- ld r29, HSTATE_DSCR(r13)
- mtspr SPRN_DSCR, r29
-
- /* Save all but r9, r13 & r29-r31 */
- reg = 0
- .rept 29
- .if (reg != 9) && (reg != 13)
- std reg, VCPU_GPRS_TM(reg)(r9)
- .endif
- reg = reg + 1
- .endr
- /* ... now save r13 */
- GET_SCRATCH0(r4)
- std r4, VCPU_GPRS_TM(13)(r9)
- /* ... and save r9 */
- ld r4, PACATMSCRATCH(r13)
- std r4, VCPU_GPRS_TM(9)(r9)
-
- /* Reload stack pointer and TOC. */
- ld r1, HSTATE_HOST_R1(r13)
- ld r2, PACATOC(r13)
-
- /* Set MSR RI now we have r1 and r13 back. */
- li r5, MSR_RI
- mtmsrd r5, 1
-
- /* Save away checkpinted SPRs. */
- std r31, VCPU_PPR_TM(r9)
- std r30, VCPU_DSCR_TM(r9)
- mflr r5
- mfcr r6
- mfctr r7
- mfspr r8, SPRN_AMR
- mfspr r10, SPRN_TAR
- std r5, VCPU_LR_TM(r9)
- stw r6, VCPU_CR_TM(r9)
- std r7, VCPU_CTR_TM(r9)
- std r8, VCPU_AMR_TM(r9)
- std r10, VCPU_TAR_TM(r9)
-
- /* Restore r12 as trap number. */
- lwz r12, VCPU_TRAP(r9)
-
- /* Save FP/VSX. */
- addi r3, r9, VCPU_FPRS_TM
- bl store_fp_state
- addi r3, r9, VCPU_VRS_TM
- bl store_vr_state
- mfspr r6, SPRN_VRSAVE
- stw r6, VCPU_VRSAVE_TM(r9)
-1:
- /*
- * We need to save these SPRs after the treclaim so that the software
- * error code is recorded correctly in the TEXASR. Also the user may
- * change these outside of a transaction, so they must always be
- * context switched.
- */
- mfspr r5, SPRN_TFHAR
- mfspr r6, SPRN_TFIAR
- mfspr r7, SPRN_TEXASR
- std r5, VCPU_TFHAR(r9)
- std r6, VCPU_TFIAR(r9)
- std r7, VCPU_TEXASR(r9)
-2:
+ bl kvmppc_save_tm
+END_FTR_SECTION_IFSET(CPU_FTR_TM)
#endif
/* Increment yield count if they have a VPA */
@@ -2245,6 +2037,13 @@ _GLOBAL(kvmppc_h_cede) /* r3 = vcpu pointer, r11 = msr, r13 = paca */
/* save FP state */
bl kvmppc_save_fp
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+BEGIN_FTR_SECTION
+ ld r9, HSTATE_KVM_VCPU(r13)
+ bl kvmppc_save_tm
+END_FTR_SECTION_IFSET(CPU_FTR_TM)
+#endif
+
/*
* Set DEC to the smaller of DEC and HDEC, so that we wake
* no later than the end of our timeslice (HDEC interrupts
@@ -2321,6 +2120,12 @@ kvm_end_cede:
bl kvmhv_accumulate_time
#endif
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+BEGIN_FTR_SECTION
+ bl kvmppc_restore_tm
+END_FTR_SECTION_IFSET(CPU_FTR_TM)
+#endif
+
/* load up FP state */
bl kvmppc_load_fp
@@ -2629,6 +2434,239 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
mr r4,r31
blr
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+/*
+ * Save transactional state and TM-related registers.
+ * Called with r9 pointing to the vcpu struct.
+ * This can modify all checkpointed registers, but
+ * restores r1, r2 and r9 (vcpu pointer) before exit.
+ */
+kvmppc_save_tm:
+ mflr r0
+ std r0, PPC_LR_STKOFF(r1)
+
+ /* Turn on TM. */
+ mfmsr r8
+ li r0, 1
+ rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
+ mtmsrd r8
+
+ ld r5, VCPU_MSR(r9)
+ rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
+ beq 1f /* TM not active in guest. */
+
+ std r1, HSTATE_HOST_R1(r13)
+ li r3, TM_CAUSE_KVM_RESCHED
+
+ /* Clear the MSR RI since r1, r13 are all going to be foobar. */
+ li r5, 0
+ mtmsrd r5, 1
+
+ /* All GPRs are volatile at this point. */
+ TRECLAIM(R3)
+
+ /* Temporarily store r13 and r9 so we have some regs to play with */
+ SET_SCRATCH0(r13)
+ GET_PACA(r13)
+ std r9, PACATMSCRATCH(r13)
+ ld r9, HSTATE_KVM_VCPU(r13)
+
+ /* Get a few more GPRs free. */
+ std r29, VCPU_GPRS_TM(29)(r9)
+ std r30, VCPU_GPRS_TM(30)(r9)
+ std r31, VCPU_GPRS_TM(31)(r9)
+
+ /* Save away PPR and DSCR soon so don't run with user values. */
+ mfspr r31, SPRN_PPR
+ HMT_MEDIUM
+ mfspr r30, SPRN_DSCR
+ ld r29, HSTATE_DSCR(r13)
+ mtspr SPRN_DSCR, r29
+
+ /* Save all but r9, r13 & r29-r31 */
+ reg = 0
+ .rept 29
+ .if (reg != 9) && (reg != 13)
+ std reg, VCPU_GPRS_TM(reg)(r9)
+ .endif
+ reg = reg + 1
+ .endr
+ /* ... now save r13 */
+ GET_SCRATCH0(r4)
+ std r4, VCPU_GPRS_TM(13)(r9)
+ /* ... and save r9 */
+ ld r4, PACATMSCRATCH(r13)
+ std r4, VCPU_GPRS_TM(9)(r9)
+
+ /* Reload stack pointer and TOC. */
+ ld r1, HSTATE_HOST_R1(r13)
+ ld r2, PACATOC(r13)
+
+ /* Set MSR RI now we have r1 and r13 back. */
+ li r5, MSR_RI
+ mtmsrd r5, 1
+
+ /* Save away checkpinted SPRs. */
+ std r31, VCPU_PPR_TM(r9)
+ std r30, VCPU_DSCR_TM(r9)
+ mflr r5
+ mfcr r6
+ mfctr r7
+ mfspr r8, SPRN_AMR
+ mfspr r10, SPRN_TAR
+ std r5, VCPU_LR_TM(r9)
+ stw r6, VCPU_CR_TM(r9)
+ std r7, VCPU_CTR_TM(r9)
+ std r8, VCPU_AMR_TM(r9)
+ std r10, VCPU_TAR_TM(r9)
+
+ /* Restore r12 as trap number. */
+ lwz r12, VCPU_TRAP(r9)
+
+ /* Save FP/VSX. */
+ addi r3, r9, VCPU_FPRS_TM
+ bl store_fp_state
+ addi r3, r9, VCPU_VRS_TM
+ bl store_vr_state
+ mfspr r6, SPRN_VRSAVE
+ stw r6, VCPU_VRSAVE_TM(r9)
+1:
+ /*
+ * We need to save these SPRs after the treclaim so that the software
+ * error code is recorded correctly in the TEXASR. Also the user may
+ * change these outside of a transaction, so they must always be
+ * context switched.
+ */
+ mfspr r5, SPRN_TFHAR
+ mfspr r6, SPRN_TFIAR
+ mfspr r7, SPRN_TEXASR
+ std r5, VCPU_TFHAR(r9)
+ std r6, VCPU_TFIAR(r9)
+ std r7, VCPU_TEXASR(r9)
+
+ ld r0, PPC_LR_STKOFF(r1)
+ mtlr r0
+ blr
+
+/*
+ * Restore transactional state and TM-related registers.
+ * Called with r4 pointing to the vcpu struct.
+ * This potentially modifies all checkpointed registers.
+ * It restores r1, r2, r4 from the PACA.
+ */
+kvmppc_restore_tm:
+ mflr r0
+ std r0, PPC_LR_STKOFF(r1)
+
+ /* Turn on TM/FP/VSX/VMX so we can restore them. */
+ mfmsr r5
+ li r6, MSR_TM >> 32
+ sldi r6, r6, 32
+ or r5, r5, r6
+ ori r5, r5, MSR_FP
+ oris r5, r5, (MSR_VEC | MSR_VSX)@h
+ mtmsrd r5
+
+ /*
+ * The user may change these outside of a transaction, so they must
+ * always be context switched.
+ */
+ ld r5, VCPU_TFHAR(r4)
+ ld r6, VCPU_TFIAR(r4)
+ ld r7, VCPU_TEXASR(r4)
+ mtspr SPRN_TFHAR, r5
+ mtspr SPRN_TFIAR, r6
+ mtspr SPRN_TEXASR, r7
+
+ ld r5, VCPU_MSR(r4)
+ rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
+ beqlr /* TM not active in guest */
+ std r1, HSTATE_HOST_R1(r13)
+
+ /* Make sure the failure summary is set, otherwise we'll program check
+ * when we trechkpt. It's possible that this might have been not set
+ * on a kvmppc_set_one_reg() call but we shouldn't let this crash the
+ * host.
+ */
+ oris r7, r7, (TEXASR_FS)@h
+ mtspr SPRN_TEXASR, r7
+
+ /*
+ * We need to load up the checkpointed state for the guest.
+ * We need to do this early as it will blow away any GPRs, VSRs and
+ * some SPRs.
+ */
+
+ mr r31, r4
+ addi r3, r31, VCPU_FPRS_TM
+ bl load_fp_state
+ addi r3, r31, VCPU_VRS_TM
+ bl load_vr_state
+ mr r4, r31
+ lwz r7, VCPU_VRSAVE_TM(r4)
+ mtspr SPRN_VRSAVE, r7
+
+ ld r5, VCPU_LR_TM(r4)
+ lwz r6, VCPU_CR_TM(r4)
+ ld r7, VCPU_CTR_TM(r4)
+ ld r8, VCPU_AMR_TM(r4)
+ ld r9, VCPU_TAR_TM(r4)
+ mtlr r5
+ mtcr r6
+ mtctr r7
+ mtspr SPRN_AMR, r8
+ mtspr SPRN_TAR, r9
+
+ /*
+ * Load up PPR and DSCR values but don't put them in the actual SPRs
+ * till the last moment to avoid running with userspace PPR and DSCR for
+ * too long.
+ */
+ ld r29, VCPU_DSCR_TM(r4)
+ ld r30, VCPU_PPR_TM(r4)
+
+ std r2, PACATMSCRATCH(r13) /* Save TOC */
+
+ /* Clear the MSR RI since r1, r13 are all going to be foobar. */
+ li r5, 0
+ mtmsrd r5, 1
+
+ /* Load GPRs r0-r28 */
+ reg = 0
+ .rept 29
+ ld reg, VCPU_GPRS_TM(reg)(r31)
+ reg = reg + 1
+ .endr
+
+ mtspr SPRN_DSCR, r29
+ mtspr SPRN_PPR, r30
+
+ /* Load final GPRs */
+ ld 29, VCPU_GPRS_TM(29)(r31)
+ ld 30, VCPU_GPRS_TM(30)(r31)
+ ld 31, VCPU_GPRS_TM(31)(r31)
+
+ /* TM checkpointed state is now setup. All GPRs are now volatile. */
+ TRECHKPT
+
+ /* Now let's get back the state we need. */
+ HMT_MEDIUM
+ GET_PACA(r13)
+ ld r29, HSTATE_DSCR(r13)
+ mtspr SPRN_DSCR, r29
+ ld r4, HSTATE_KVM_VCPU(r13)
+ ld r1, HSTATE_HOST_R1(r13)
+ ld r2, PACATMSCRATCH(r13)
+
+ /* Set the MSR RI since we have our registers back. */
+ li r5, MSR_RI
+ mtmsrd r5, 1
+
+ ld r0, PPC_LR_STKOFF(r1)
+ mtlr r0
+ blr
+#endif
+
/*
* We come here if we get any exception or interrupt while we are
* executing host real mode code while in guest MMU context.
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 3a55f493c7da..60530fd93d6d 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -117,6 +117,7 @@ config S390
select HAVE_ALIGNED_STRUCT_PAGE if SLUB
select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_EARLY_PFN_TO_NID
+ select HAVE_ARCH_HARDENED_USERCOPY
select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_SOFT_DIRTY
diff --git a/arch/s390/crypto/prng.c b/arch/s390/crypto/prng.c
index b8045b97f4fb..d750cc0dfe30 100644
--- a/arch/s390/crypto/prng.c
+++ b/arch/s390/crypto/prng.c
@@ -669,11 +669,13 @@ static const struct file_operations prng_tdes_fops = {
static struct miscdevice prng_sha512_dev = {
.name = "prandom",
.minor = MISC_DYNAMIC_MINOR,
+ .mode = 0644,
.fops = &prng_sha512_fops,
};
static struct miscdevice prng_tdes_dev = {
.name = "prandom",
.minor = MISC_DYNAMIC_MINOR,
+ .mode = 0644,
.fops = &prng_tdes_fops,
};
diff --git a/arch/s390/include/asm/pci_dma.h b/arch/s390/include/asm/pci_dma.h
index 1aac41e83ea1..92df3eb8d14e 100644
--- a/arch/s390/include/asm/pci_dma.h
+++ b/arch/s390/include/asm/pci_dma.h
@@ -23,6 +23,8 @@ enum zpci_ioat_dtype {
#define ZPCI_IOTA_FS_2G 2
#define ZPCI_KEY (PAGE_DEFAULT_KEY << 5)
+#define ZPCI_TABLE_SIZE_RT (1UL << 42)
+
#define ZPCI_IOTA_STO_FLAG (ZPCI_IOTA_IOT_ENABLED | ZPCI_KEY | ZPCI_IOTA_DT_ST)
#define ZPCI_IOTA_RTTO_FLAG (ZPCI_IOTA_IOT_ENABLED | ZPCI_KEY | ZPCI_IOTA_DT_RT)
#define ZPCI_IOTA_RSTO_FLAG (ZPCI_IOTA_IOT_ENABLED | ZPCI_KEY | ZPCI_IOTA_DT_RS)
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index b1f0a90f933b..42570d8fb265 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -2070,13 +2070,6 @@ void s390_reset_system(void (*fn_pre)(void),
S390_lowcore.program_new_psw.addr =
PSW_ADDR_AMODE | (unsigned long) s390_base_pgm_handler;
- /*
- * Clear subchannel ID and number to signal new kernel that no CCW or
- * SCSI IPL has been done (for kexec and kdump)
- */
- S390_lowcore.subchannel_id = 0;
- S390_lowcore.subchannel_nr = 0;
-
/* Store status at absolute zero */
store_status();
diff --git a/arch/s390/lib/uaccess.c b/arch/s390/lib/uaccess.c
index ae4de559e3a0..6986c20166f0 100644
--- a/arch/s390/lib/uaccess.c
+++ b/arch/s390/lib/uaccess.c
@@ -104,6 +104,7 @@ static inline unsigned long copy_from_user_mvcp(void *x, const void __user *ptr,
unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
{
+ check_object_size(to, n, false);
if (static_branch_likely(&have_mvcos))
return copy_from_user_mvcos(to, from, n);
return copy_from_user_mvcp(to, from, n);
@@ -177,6 +178,7 @@ static inline unsigned long copy_to_user_mvcs(void __user *ptr, const void *x,
unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
{
+ check_object_size(from, n, true);
if (static_branch_likely(&have_mvcos))
return copy_to_user_mvcos(to, from, n);
return copy_to_user_mvcs(to, from, n);
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index 19442395f413..f2f6720a3331 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -701,8 +701,7 @@ static int zpci_restore(struct device *dev)
goto out;
zpci_map_resources(pdev);
- zpci_register_ioat(zdev, 0, zdev->start_dma + PAGE_OFFSET,
- zdev->start_dma + zdev->iommu_size - 1,
+ zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
(u64) zdev->dma_table);
out:
diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c
index d348f2c09a1e..3a40f718baef 100644
--- a/arch/s390/pci/pci_dma.c
+++ b/arch/s390/pci/pci_dma.c
@@ -458,7 +458,19 @@ int zpci_dma_init_device(struct zpci_dev *zdev)
goto out_clean;
}
- zdev->iommu_size = (unsigned long) high_memory - PAGE_OFFSET;
+ /*
+ * Restrict the iommu bitmap size to the minimum of the following:
+ * - main memory size
+ * - 3-level pagetable address limit minus start_dma offset
+ * - DMA address range allowed by the hardware (clp query pci fn)
+ *
+ * Also set zdev->end_dma to the actual end address of the usable
+ * range, instead of the theoretical maximum as reported by hardware.
+ */
+ zdev->iommu_size = min3((u64) high_memory,
+ ZPCI_TABLE_SIZE_RT - zdev->start_dma,
+ zdev->end_dma - zdev->start_dma + 1);
+ zdev->end_dma = zdev->start_dma + zdev->iommu_size - 1;
zdev->iommu_pages = zdev->iommu_size >> PAGE_SHIFT;
zdev->iommu_bitmap = vzalloc(zdev->iommu_pages / 8);
if (!zdev->iommu_bitmap) {
@@ -466,10 +478,7 @@ int zpci_dma_init_device(struct zpci_dev *zdev)
goto out_reg;
}
- rc = zpci_register_ioat(zdev,
- 0,
- zdev->start_dma + PAGE_OFFSET,
- zdev->start_dma + zdev->iommu_size - 1,
+ rc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
(u64) zdev->dma_table);
if (rc)
goto out_reg;
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 56442d2d7bbc..3736be630113 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -43,6 +43,7 @@ config SPARC
select ODD_RT_SIGACTION
select OLD_SIGSUSPEND
select ARCH_HAS_SG_CHAIN
+ select HAVE_ARCH_HARDENED_USERCOPY
config SPARC32
def_bool !64BIT
diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
index 64ee103dc29d..4cfb77913cd2 100644
--- a/arch/sparc/include/asm/uaccess_32.h
+++ b/arch/sparc/include/asm/uaccess_32.h
@@ -313,22 +313,28 @@ unsigned long __copy_user(void __user *to, const void __user *from, unsigned lon
static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
{
- if (n && __access_ok((unsigned long) to, n))
+ if (n && __access_ok((unsigned long) to, n)) {
+ if (!__builtin_constant_p(n))
+ check_object_size(from, n, true);
return __copy_user(to, (__force void __user *) from, n);
- else
+ } else
return n;
}
static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
{
+ if (!__builtin_constant_p(n))
+ check_object_size(from, n, true);
return __copy_user(to, (__force void __user *) from, n);
}
static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
{
- if (n && __access_ok((unsigned long) from, n))
+ if (n && __access_ok((unsigned long) from, n)) {
+ if (!__builtin_constant_p(n))
+ check_object_size(to, n, false);
return __copy_user((__force void __user *) to, from, n);
- else
+ } else
return n;
}
diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
index ea6e9a20f3ff..6069e9040388 100644
--- a/arch/sparc/include/asm/uaccess_64.h
+++ b/arch/sparc/include/asm/uaccess_64.h
@@ -250,8 +250,12 @@ unsigned long copy_from_user_fixup(void *to, const void __user *from,
static inline unsigned long __must_check
copy_from_user(void *to, const void __user *from, unsigned long size)
{
- unsigned long ret = ___copy_from_user(to, from, size);
+ unsigned long ret;
+ if (!__builtin_constant_p(size))
+ check_object_size(to, size, false);
+
+ ret = ___copy_from_user(to, from, size);
if (unlikely(ret))
ret = copy_from_user_fixup(to, from, size);
@@ -267,8 +271,11 @@ unsigned long copy_to_user_fixup(void __user *to, const void *from,
static inline unsigned long __must_check
copy_to_user(void __user *to, const void *from, unsigned long size)
{
- unsigned long ret = ___copy_to_user(to, from, size);
+ unsigned long ret;
+ if (!__builtin_constant_p(size))
+ check_object_size(from, size, true);
+ ret = ___copy_to_user(to, from, size);
if (unlikely(ret))
ret = copy_to_user_fixup(to, from, size);
return ret;
diff --git a/arch/um/include/asm/common.lds.S b/arch/um/include/asm/common.lds.S
index 1dd5bd8a8c59..133055311dce 100644
--- a/arch/um/include/asm/common.lds.S
+++ b/arch/um/include/asm/common.lds.S
@@ -81,7 +81,7 @@
.altinstr_replacement : { *(.altinstr_replacement) }
/* .exit.text is discard at runtime, not link time, to deal with references
from .altinstructions and .eh_frame */
- .exit.text : { *(.exit.text) }
+ .exit.text : { EXIT_TEXT }
.exit.data : { *(.exit.data) }
.preinit_array : {
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 737967e2cf28..b893a99d9b52 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -80,6 +80,7 @@ config X86
select HAVE_ALIGNED_STRUCT_PAGE if SLUB
select HAVE_AOUT if X86_32
select HAVE_ARCH_AUDITSYSCALL
+ select HAVE_ARCH_HARDENED_USERCOPY
select HAVE_ARCH_HUGE_VMAP if X86_64 || X86_PAE
select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_KASAN if X86_64 && SPARSEMEM_VMEMMAP
@@ -89,7 +90,7 @@ config X86
select HAVE_ARCH_SOFT_DIRTY if X86_64
select HAVE_ARCH_TRACEHOOK
select HAVE_ARCH_TRANSPARENT_HUGEPAGE
- select HAVE_BPF_JIT if X86_64
+ select HAVE_ARCH_WITHIN_STACK_FRAMES
select HAVE_CC_STACKPROTECTOR
select HAVE_CMPXCHG_DOUBLE
select HAVE_CMPXCHG_LOCAL
diff --git a/arch/x86/entry/syscalls/syscall_32.tbl b/arch/x86/entry/syscalls/syscall_32.tbl
index f17705e1332c..e62f4401e792 100644
--- a/arch/x86/entry/syscalls/syscall_32.tbl
+++ b/arch/x86/entry/syscalls/syscall_32.tbl
@@ -294,7 +294,7 @@
# 285 sys_setaltroot
286 i386 add_key sys_add_key
287 i386 request_key sys_request_key
-288 i386 keyctl sys_keyctl
+288 i386 keyctl sys_keyctl compat_sys_keyctl
289 i386 ioprio_set sys_ioprio_set
290 i386 ioprio_get sys_ioprio_get
291 i386 inotify_init sys_inotify_init
diff --git a/arch/x86/include/asm/mtrr.h b/arch/x86/include/asm/mtrr.h
index b94f6f64e23d..dbff1456d215 100644
--- a/arch/x86/include/asm/mtrr.h
+++ b/arch/x86/include/asm/mtrr.h
@@ -24,6 +24,7 @@
#define _ASM_X86_MTRR_H
#include <uapi/asm/mtrr.h>
+#include <asm/pat.h>
/*
@@ -83,9 +84,12 @@ static inline int mtrr_trim_uncached_memory(unsigned long end_pfn)
static inline void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi)
{
}
+static inline void mtrr_bp_init(void)
+{
+ pat_disable("MTRRs disabled, skipping PAT initialization too.");
+}
#define mtrr_ap_init() do {} while (0)
-#define mtrr_bp_init() do {} while (0)
#define set_mtrr_aps_delayed_init() do {} while (0)
#define mtrr_aps_init() do {} while (0)
#define mtrr_bp_restore() do {} while (0)
diff --git a/arch/x86/include/asm/pat.h b/arch/x86/include/asm/pat.h
index ca6c228d5e62..0b1ff4c1c14e 100644
--- a/arch/x86/include/asm/pat.h
+++ b/arch/x86/include/asm/pat.h
@@ -5,8 +5,8 @@
#include <asm/pgtable_types.h>
bool pat_enabled(void);
+void pat_disable(const char *reason);
extern void pat_init(void);
-void pat_init_cache_modes(u64);
extern int reserve_memtype(u64 start, u64 end,
enum page_cache_mode req_pcm, enum page_cache_mode *ret_pcm);
diff --git a/arch/x86/include/asm/pvclock.h b/arch/x86/include/asm/pvclock.h
index 7a6bed5c08bc..baad72e4c100 100644
--- a/arch/x86/include/asm/pvclock.h
+++ b/arch/x86/include/asm/pvclock.h
@@ -76,6 +76,8 @@ unsigned __pvclock_read_cycles(const struct pvclock_vcpu_time_info *src,
u8 ret_flags;
version = src->version;
+ /* Make the latest version visible */
+ smp_rmb();
offset = pvclock_get_nsec_offset(src);
ret = src->system_time + offset;
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index c7b551028740..0c977fc124a7 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -177,6 +177,50 @@ static inline unsigned long current_stack_pointer(void)
return sp;
}
+/*
+ * Walks up the stack frames to make sure that the specified object is
+ * entirely contained by a single stack frame.
+ *
+ * Returns:
+ * 1 if within a frame
+ * -1 if placed across a frame boundary (or outside stack)
+ * 0 unable to determine (no frame pointers, etc)
+ */
+static inline int arch_within_stack_frames(const void * const stack,
+ const void * const stackend,
+ const void *obj, unsigned long len)
+{
+#if defined(CONFIG_FRAME_POINTER)
+ const void *frame = NULL;
+ const void *oldframe;
+
+ oldframe = __builtin_frame_address(1);
+ if (oldframe)
+ frame = __builtin_frame_address(2);
+ /*
+ * low ----------------------------------------------> high
+ * [saved bp][saved ip][args][local vars][saved bp][saved ip]
+ * ^----------------^
+ * allow copies only within here
+ */
+ while (stack <= frame && frame < stackend) {
+ /*
+ * If obj + len extends past the last frame, this
+ * check won't pass and the next frame will be 0,
+ * causing us to bail out and correctly report
+ * the copy as invalid.
+ */
+ if (obj + len <= frame)
+ return obj >= oldframe + 2 * sizeof(void *) ? 1 : -1;
+ oldframe = frame;
+ frame = *(const void * const *)frame;
+ }
+ return -1;
+#else
+ return 0;
+#endif
+}
+
#else /* !__ASSEMBLY__ */
#ifdef CONFIG_X86_64
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index 6df2029405a3..3142218e546f 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -86,7 +86,14 @@ static inline void cr4_set_bits_and_update_boot(unsigned long mask)
static inline void __native_flush_tlb(void)
{
+ /*
+ * If current->mm == NULL then we borrow a mm which may change during a
+ * task switch and therefore we must not be preempted while we write CR3
+ * back:
+ */
+ preempt_disable();
native_write_cr3(native_read_cr3());
+ preempt_enable();
}
static inline void __native_flush_tlb_global_irq_disabled(void)
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index 09b1b0ab94b7..dbe64f27280e 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -134,6 +134,9 @@ extern int __get_user_4(void);
extern int __get_user_8(void);
extern int __get_user_bad(void);
+#define __uaccess_begin() stac()
+#define __uaccess_end() clac()
+
/*
* This is a type: either unsigned long, if the argument fits into
* that type, or otherwise unsigned long long.
@@ -193,10 +196,10 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
#ifdef CONFIG_X86_32
#define __put_user_asm_u64(x, addr, err, errret) \
- asm volatile(ASM_STAC "\n" \
+ asm volatile("\n" \
"1: movl %%eax,0(%2)\n" \
"2: movl %%edx,4(%2)\n" \
- "3: " ASM_CLAC "\n" \
+ "3:" \
".section .fixup,\"ax\"\n" \
"4: movl %3,%0\n" \
" jmp 3b\n" \
@@ -207,10 +210,10 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
: "A" (x), "r" (addr), "i" (errret), "0" (err))
#define __put_user_asm_ex_u64(x, addr) \
- asm volatile(ASM_STAC "\n" \
+ asm volatile("\n" \
"1: movl %%eax,0(%1)\n" \
"2: movl %%edx,4(%1)\n" \
- "3: " ASM_CLAC "\n" \
+ "3:" \
_ASM_EXTABLE_EX(1b, 2b) \
_ASM_EXTABLE_EX(2b, 3b) \
: : "A" (x), "r" (addr))
@@ -304,6 +307,10 @@ do { \
} \
} while (0)
+/*
+ * This doesn't do __uaccess_begin/end - the exception handling
+ * around it must do that.
+ */
#define __put_user_size_ex(x, ptr, size) \
do { \
__chk_user_ptr(ptr); \
@@ -358,9 +365,9 @@ do { \
} while (0)
#define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
- asm volatile(ASM_STAC "\n" \
+ asm volatile("\n" \
"1: mov"itype" %2,%"rtype"1\n" \
- "2: " ASM_CLAC "\n" \
+ "2:\n" \
".section .fixup,\"ax\"\n" \
"3: mov %3,%0\n" \
" xor"itype" %"rtype"1,%"rtype"1\n" \
@@ -370,6 +377,10 @@ do { \
: "=r" (err), ltype(x) \
: "m" (__m(addr)), "i" (errret), "0" (err))
+/*
+ * This doesn't do __uaccess_begin/end - the exception handling
+ * around it must do that.
+ */
#define __get_user_size_ex(x, ptr, size) \
do { \
__chk_user_ptr(ptr); \
@@ -400,7 +411,9 @@ do { \
#define __put_user_nocheck(x, ptr, size) \
({ \
int __pu_err; \
+ __uaccess_begin(); \
__put_user_size((x), (ptr), (size), __pu_err, -EFAULT); \
+ __uaccess_end(); \
__builtin_expect(__pu_err, 0); \
})
@@ -408,7 +421,9 @@ do { \
({ \
int __gu_err; \
unsigned long __gu_val; \
+ __uaccess_begin(); \
__get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
+ __uaccess_end(); \
(x) = (__force __typeof__(*(ptr)))__gu_val; \
__builtin_expect(__gu_err, 0); \
})
@@ -423,9 +438,9 @@ struct __large_struct { unsigned long buf[100]; };
* aliasing issues.
*/
#define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
- asm volatile(ASM_STAC "\n" \
+ asm volatile("\n" \
"1: mov"itype" %"rtype"1,%2\n" \
- "2: " ASM_CLAC "\n" \
+ "2:\n" \
".section .fixup,\"ax\"\n" \
"3: mov %3,%0\n" \
" jmp 2b\n" \
@@ -445,11 +460,11 @@ struct __large_struct { unsigned long buf[100]; };
*/
#define uaccess_try do { \
current_thread_info()->uaccess_err = 0; \
- stac(); \
+ __uaccess_begin(); \
barrier();
#define uaccess_catch(err) \
- clac(); \
+ __uaccess_end(); \
(err) |= (current_thread_info()->uaccess_err ? -EFAULT : 0); \
} while (0)
@@ -547,12 +562,13 @@ extern void __cmpxchg_wrong_size(void)
__typeof__(ptr) __uval = (uval); \
__typeof__(*(ptr)) __old = (old); \
__typeof__(*(ptr)) __new = (new); \
+ __uaccess_begin(); \
switch (size) { \
case 1: \
{ \
- asm volatile("\t" ASM_STAC "\n" \
+ asm volatile("\n" \
"1:\t" LOCK_PREFIX "cmpxchgb %4, %2\n" \
- "2:\t" ASM_CLAC "\n" \
+ "2:\n" \
"\t.section .fixup, \"ax\"\n" \
"3:\tmov %3, %0\n" \
"\tjmp 2b\n" \
@@ -566,9 +582,9 @@ extern void __cmpxchg_wrong_size(void)
} \
case 2: \
{ \
- asm volatile("\t" ASM_STAC "\n" \
+ asm volatile("\n" \
"1:\t" LOCK_PREFIX "cmpxchgw %4, %2\n" \
- "2:\t" ASM_CLAC "\n" \
+ "2:\n" \
"\t.section .fixup, \"ax\"\n" \
"3:\tmov %3, %0\n" \
"\tjmp 2b\n" \
@@ -582,9 +598,9 @@ extern void __cmpxchg_wrong_size(void)
} \
case 4: \
{ \
- asm volatile("\t" ASM_STAC "\n" \
+ asm volatile("\n" \
"1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" \
- "2:\t" ASM_CLAC "\n" \
+ "2:\n" \
"\t.section .fixup, \"ax\"\n" \
"3:\tmov %3, %0\n" \
"\tjmp 2b\n" \
@@ -601,9 +617,9 @@ extern void __cmpxchg_wrong_size(void)
if (!IS_ENABLED(CONFIG_X86_64)) \
__cmpxchg_wrong_size(); \
\
- asm volatile("\t" ASM_STAC "\n" \
+ asm volatile("\n" \
"1:\t" LOCK_PREFIX "cmpxchgq %4, %2\n" \
- "2:\t" ASM_CLAC "\n" \
+ "2:\n" \
"\t.section .fixup, \"ax\"\n" \
"3:\tmov %3, %0\n" \
"\tjmp 2b\n" \
@@ -618,6 +634,7 @@ extern void __cmpxchg_wrong_size(void)
default: \
__cmpxchg_wrong_size(); \
} \
+ __uaccess_end(); \
*__uval = __old; \
__ret; \
})
@@ -689,7 +706,7 @@ __copy_from_user_overflow(int size, unsigned long count)
#endif
-static inline unsigned long __must_check
+static __always_inline unsigned long __must_check
copy_from_user(void *to, const void __user *from, unsigned long n)
{
int sz = __compiletime_object_size(to);
@@ -714,9 +731,10 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
* case, and do only runtime checking for non-constant sizes.
*/
- if (likely(sz < 0 || sz >= n))
+ if (likely(sz < 0 || sz >= n)) {
+ check_object_size(to, n, false);
n = _copy_from_user(to, from, n);
- else if(__builtin_constant_p(n))
+ } else if (__builtin_constant_p(n))
copy_from_user_overflow();
else
__copy_from_user_overflow(sz, n);
@@ -724,7 +742,7 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
return n;
}
-static inline unsigned long __must_check
+static __always_inline unsigned long __must_check
copy_to_user(void __user *to, const void *from, unsigned long n)
{
int sz = __compiletime_object_size(from);
@@ -732,9 +750,10 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
might_fault();
/* See the comment in copy_from_user() above. */
- if (likely(sz < 0 || sz >= n))
+ if (likely(sz < 0 || sz >= n)) {
+ check_object_size(from, n, true);
n = _copy_to_user(to, from, n);
- else if(__builtin_constant_p(n))
+ } else if (__builtin_constant_p(n))
copy_to_user_overflow();
else
__copy_to_user_overflow(sz, n);
@@ -745,5 +764,30 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
#undef __copy_from_user_overflow
#undef __copy_to_user_overflow
+/*
+ * The "unsafe" user accesses aren't really "unsafe", but the naming
+ * is a big fat warning: you have to not only do the access_ok()
+ * checking before using them, but you have to surround them with the
+ * user_access_begin/end() pair.
+ */
+#define user_access_begin() __uaccess_begin()
+#define user_access_end() __uaccess_end()
+
+#define unsafe_put_user(x, ptr, err_label) \
+do { \
+ int __pu_err; \
+ __put_user_size((x), (ptr), sizeof(*(ptr)), __pu_err, -EFAULT); \
+ if (unlikely(__pu_err)) goto err_label; \
+} while (0)
+
+#define unsafe_get_user(x, ptr, err_label) \
+do { \
+ int __gu_err; \
+ unsigned long __gu_val; \
+ __get_user_size(__gu_val, (ptr), sizeof(*(ptr)), __gu_err, -EFAULT); \
+ (x) = (__force __typeof__(*(ptr)))__gu_val; \
+ if (unlikely(__gu_err)) goto err_label; \
+} while (0)
+
#endif /* _ASM_X86_UACCESS_H */
diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
index f5dcb5204dcd..7d3bdd1ed697 100644
--- a/arch/x86/include/asm/uaccess_32.h
+++ b/arch/x86/include/asm/uaccess_32.h
@@ -33,38 +33,11 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
* the specified block with access_ok() before calling this function.
* The caller should also make sure he pins the user space address
* so that we don't result in page fault and sleep.
- *
- * Here we special-case 1, 2 and 4-byte copy_*_user invocations. On a fault
- * we return the initial request size (1, 2 or 4), as copy_*_user should do.
- * If a store crosses a page boundary and gets a fault, the x86 will not write
- * anything, so this is accurate.
*/
-
static __always_inline unsigned long __must_check
__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
{
- if (__builtin_constant_p(n)) {
- unsigned long ret;
-
- switch (n) {
- case 1:
- __put_user_size(*(u8 *)from, (u8 __user *)to,
- 1, ret, 1);
- return ret;
- case 2:
- __put_user_size(*(u16 *)from, (u16 __user *)to,
- 2, ret, 2);
- return ret;
- case 4:
- __put_user_size(*(u32 *)from, (u32 __user *)to,
- 4, ret, 4);
- return ret;
- case 8:
- __put_user_size(*(u64 *)from, (u64 __user *)to,
- 8, ret, 8);
- return ret;
- }
- }
+ check_object_size(from, n, true);
return __copy_to_user_ll(to, from, n);
}
@@ -93,26 +66,6 @@ __copy_to_user(void __user *to, const void *from, unsigned long n)
static __always_inline unsigned long
__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
{
- /* Avoid zeroing the tail if the copy fails..
- * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
- * but as the zeroing behaviour is only significant when n is not
- * constant, that shouldn't be a problem.
- */
- if (__builtin_constant_p(n)) {
- unsigned long ret;
-
- switch (n) {
- case 1:
- __get_user_size(*(u8 *)to, from, 1, ret, 1);
- return ret;
- case 2:
- __get_user_size(*(u16 *)to, from, 2, ret, 2);
- return ret;
- case 4:
- __get_user_size(*(u32 *)to, from, 4, ret, 4);
- return ret;
- }
- }
return __copy_from_user_ll_nozero(to, from, n);
}
@@ -143,18 +96,25 @@ static __always_inline unsigned long
__copy_from_user(void *to, const void __user *from, unsigned long n)
{
might_fault();
+ check_object_size(to, n, false);
if (__builtin_constant_p(n)) {
unsigned long ret;
switch (n) {
case 1:
+ __uaccess_begin();
__get_user_size(*(u8 *)to, from, 1, ret, 1);
+ __uaccess_end();
return ret;
case 2:
+ __uaccess_begin();
__get_user_size(*(u16 *)to, from, 2, ret, 2);
+ __uaccess_end();
return ret;
case 4:
+ __uaccess_begin();
__get_user_size(*(u32 *)to, from, 4, ret, 4);
+ __uaccess_end();
return ret;
}
}
@@ -170,13 +130,19 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
switch (n) {
case 1:
+ __uaccess_begin();
__get_user_size(*(u8 *)to, from, 1, ret, 1);
+ __uaccess_end();
return ret;
case 2:
+ __uaccess_begin();
__get_user_size(*(u16 *)to, from, 2, ret, 2);
+ __uaccess_end();
return ret;
case 4:
+ __uaccess_begin();
__get_user_size(*(u32 *)to, from, 4, ret, 4);
+ __uaccess_end();
return ret;
}
}
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
index f2f9b39b274a..2957c8237c28 100644
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -53,38 +53,53 @@ int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size)
{
int ret = 0;
+ check_object_size(dst, size, false);
if (!__builtin_constant_p(size))
return copy_user_generic(dst, (__force void *)src, size);
switch (size) {
- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
+ case 1:
+ __uaccess_begin();
+ __get_user_asm(*(u8 *)dst, (u8 __user *)src,
ret, "b", "b", "=q", 1);
+ __uaccess_end();
return ret;
- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
+ case 2:
+ __uaccess_begin();
+ __get_user_asm(*(u16 *)dst, (u16 __user *)src,
ret, "w", "w", "=r", 2);
+ __uaccess_end();
return ret;
- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
+ case 4:
+ __uaccess_begin();
+ __get_user_asm(*(u32 *)dst, (u32 __user *)src,
ret, "l", "k", "=r", 4);
+ __uaccess_end();
return ret;
- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
+ case 8:
+ __uaccess_begin();
+ __get_user_asm(*(u64 *)dst, (u64 __user *)src,
ret, "q", "", "=r", 8);
+ __uaccess_end();
return ret;
case 10:
+ __uaccess_begin();
__get_user_asm(*(u64 *)dst, (u64 __user *)src,
ret, "q", "", "=r", 10);
- if (unlikely(ret))
- return ret;
- __get_user_asm(*(u16 *)(8 + (char *)dst),
- (u16 __user *)(8 + (char __user *)src),
- ret, "w", "w", "=r", 2);
+ if (likely(!ret))
+ __get_user_asm(*(u16 *)(8 + (char *)dst),
+ (u16 __user *)(8 + (char __user *)src),
+ ret, "w", "w", "=r", 2);
+ __uaccess_end();
return ret;
case 16:
+ __uaccess_begin();
__get_user_asm(*(u64 *)dst, (u64 __user *)src,
ret, "q", "", "=r", 16);
- if (unlikely(ret))
- return ret;
- __get_user_asm(*(u64 *)(8 + (char *)dst),
- (u64 __user *)(8 + (char __user *)src),
- ret, "q", "", "=r", 8);
+ if (likely(!ret))
+ __get_user_asm(*(u64 *)(8 + (char *)dst),
+ (u64 __user *)(8 + (char __user *)src),
+ ret, "q", "", "=r", 8);
+ __uaccess_end();
return ret;
default:
return copy_user_generic(dst, (__force void *)src, size);
@@ -103,38 +118,55 @@ int __copy_to_user_nocheck(void __user *dst, const void *src, unsigned size)
{
int ret = 0;
+ check_object_size(src, size, true);
if (!__builtin_constant_p(size))
return copy_user_generic((__force void *)dst, src, size);
switch (size) {
- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
+ case 1:
+ __uaccess_begin();
+ __put_user_asm(*(u8 *)src, (u8 __user *)dst,
ret, "b", "b", "iq", 1);
+ __uaccess_end();
return ret;
- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
+ case 2:
+ __uaccess_begin();
+ __put_user_asm(*(u16 *)src, (u16 __user *)dst,
ret, "w", "w", "ir", 2);
+ __uaccess_end();
return ret;
- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
+ case 4:
+ __uaccess_begin();
+ __put_user_asm(*(u32 *)src, (u32 __user *)dst,
ret, "l", "k", "ir", 4);
+ __uaccess_end();
return ret;
- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
+ case 8:
+ __uaccess_begin();
+ __put_user_asm(*(u64 *)src, (u64 __user *)dst,
ret, "q", "", "er", 8);
+ __uaccess_end();
return ret;
case 10:
+ __uaccess_begin();
__put_user_asm(*(u64 *)src, (u64 __user *)dst,
ret, "q", "", "er", 10);
- if (unlikely(ret))
- return ret;
- asm("":::"memory");
- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
- ret, "w", "w", "ir", 2);
+ if (likely(!ret)) {
+ asm("":::"memory");
+ __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
+ ret, "w", "w", "ir", 2);
+ }
+ __uaccess_end();
return ret;
case 16:
+ __uaccess_begin();
__put_user_asm(*(u64 *)src, (u64 __user *)dst,
ret, "q", "", "er", 16);
- if (unlikely(ret))
- return ret;
- asm("":::"memory");
- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
- ret, "q", "", "er", 8);
+ if (likely(!ret)) {
+ asm("":::"memory");
+ __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
+ ret, "q", "", "er", 8);
+ }
+ __uaccess_end();
return ret;
default:
return copy_user_generic((__force void *)dst, src, size);
@@ -160,39 +192,47 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
switch (size) {
case 1: {
u8 tmp;
+ __uaccess_begin();
__get_user_asm(tmp, (u8 __user *)src,
ret, "b", "b", "=q", 1);
if (likely(!ret))
__put_user_asm(tmp, (u8 __user *)dst,
ret, "b", "b", "iq", 1);
+ __uaccess_end();
return ret;
}
case 2: {
u16 tmp;
+ __uaccess_begin();
__get_user_asm(tmp, (u16 __user *)src,
ret, "w", "w", "=r", 2);
if (likely(!ret))
__put_user_asm(tmp, (u16 __user *)dst,
ret, "w", "w", "ir", 2);
+ __uaccess_end();
return ret;
}
case 4: {
u32 tmp;
+ __uaccess_begin();
__get_user_asm(tmp, (u32 __user *)src,
ret, "l", "k", "=r", 4);
if (likely(!ret))
__put_user_asm(tmp, (u32 __user *)dst,
ret, "l", "k", "ir", 4);
+ __uaccess_end();
return ret;
}
case 8: {
u64 tmp;
+ __uaccess_begin();
__get_user_asm(tmp, (u64 __user *)src,
ret, "q", "", "=r", 8);
if (likely(!ret))
__put_user_asm(tmp, (u64 __user *)dst,
ret, "q", "", "er", 8);
+ __uaccess_end();
return ret;
}
default:
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 2f69e3b184f6..a3e1f8497f8c 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -1587,6 +1587,9 @@ void __init enable_IR_x2apic(void)
unsigned long flags;
int ret, ir_stat;
+ if (skip_ioapic_setup)
+ return;
+
ir_stat = irq_remapping_prepare();
if (ir_stat < 0 && !x2apic_supported())
return;
diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
index 20e242ea1bc4..cfc4a966e2b9 100644
--- a/arch/x86/kernel/cpu/mshyperv.c
+++ b/arch/x86/kernel/cpu/mshyperv.c
@@ -152,6 +152,11 @@ static struct clocksource hyperv_cs = {
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
+static unsigned char hv_get_nmi_reason(void)
+{
+ return 0;
+}
+
static void __init ms_hyperv_init_platform(void)
{
/*
@@ -191,6 +196,13 @@ static void __init ms_hyperv_init_platform(void)
machine_ops.crash_shutdown = hv_machine_crash_shutdown;
#endif
mark_tsc_unstable("running on Hyper-V");
+
+ /*
+ * Generation 2 instances don't support reading the NMI status from
+ * 0x61 port.
+ */
+ if (efi_enabled(EFI_BOOT))
+ x86_platform.get_nmi_reason = hv_get_nmi_reason;
}
const __refconst struct hypervisor_x86 x86_hyper_ms_hyperv = {
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
index 3b533cf37c74..b5624fafa44a 100644
--- a/arch/x86/kernel/cpu/mtrr/generic.c
+++ b/arch/x86/kernel/cpu/mtrr/generic.c
@@ -444,11 +444,24 @@ static void __init print_mtrr_state(void)
pr_debug("TOM2: %016llx aka %lldM\n", mtrr_tom2, mtrr_tom2>>20);
}
+/* PAT setup for BP. We need to go through sync steps here */
+void __init mtrr_bp_pat_init(void)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ prepare_set();
+
+ pat_init();
+
+ post_set();
+ local_irq_restore(flags);
+}
+
/* Grab all of the MTRR state for this CPU into *state */
bool __init get_mtrr_state(void)
{
struct mtrr_var_range *vrs;
- unsigned long flags;
unsigned lo, dummy;
unsigned int i;
@@ -481,15 +494,6 @@ bool __init get_mtrr_state(void)
mtrr_state_set = 1;
- /* PAT setup for BP. We need to go through sync steps here */
- local_irq_save(flags);
- prepare_set();
-
- pat_init();
-
- post_set();
- local_irq_restore(flags);
-
return !!(mtrr_state.enabled & MTRR_STATE_MTRR_ENABLED);
}
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
index f891b4750f04..fa77ac8291f0 100644
--- a/arch/x86/kernel/cpu/mtrr/main.c
+++ b/arch/x86/kernel/cpu/mtrr/main.c
@@ -752,6 +752,9 @@ void __init mtrr_bp_init(void)
/* BIOS may override */
__mtrr_enabled = get_mtrr_state();
+ if (mtrr_enabled())
+ mtrr_bp_pat_init();
+
if (mtrr_cleanup(phys_addr)) {
changed_by_mtrr_cleanup = 1;
mtrr_if->set_all();
@@ -759,8 +762,16 @@ void __init mtrr_bp_init(void)
}
}
- if (!mtrr_enabled())
+ if (!mtrr_enabled()) {
pr_info("MTRR: Disabled\n");
+
+ /*
+ * PAT initialization relies on MTRR's rendezvous handler.
+ * Skip PAT init until the handler can initialize both
+ * features independently.
+ */
+ pat_disable("MTRRs disabled, skipping PAT initialization too.");
+ }
}
void mtrr_ap_init(void)
diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
index 951884dcc433..6c7ced07d16d 100644
--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
@@ -52,6 +52,7 @@ void set_mtrr_prepare_save(struct set_mtrr_context *ctxt);
void fill_mtrr_var_range(unsigned int index,
u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi);
bool get_mtrr_state(void);
+void mtrr_bp_pat_init(void);
extern void set_mtrr_ops(const struct mtrr_ops *ops);
diff --git a/arch/x86/kernel/cpu/perf_event_intel_cqm.c b/arch/x86/kernel/cpu/perf_event_intel_cqm.c
index a316ca96f1b6..fc704ed587e8 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_cqm.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_cqm.c
@@ -211,6 +211,20 @@ static void __put_rmid(u32 rmid)
list_add_tail(&entry->list, &cqm_rmid_limbo_lru);
}
+static void cqm_cleanup(void)
+{
+ int i;
+
+ if (!cqm_rmid_ptrs)
+ return;
+
+ for (i = 0; i < cqm_max_rmid; i++)
+ kfree(cqm_rmid_ptrs[i]);
+
+ kfree(cqm_rmid_ptrs);
+ cqm_rmid_ptrs = NULL;
+}
+
static int intel_cqm_setup_rmid_cache(void)
{
struct cqm_rmid_entry *entry;
@@ -218,7 +232,7 @@ static int intel_cqm_setup_rmid_cache(void)
int r = 0;
nr_rmids = cqm_max_rmid + 1;
- cqm_rmid_ptrs = kmalloc(sizeof(struct cqm_rmid_entry *) *
+ cqm_rmid_ptrs = kzalloc(sizeof(struct cqm_rmid_entry *) *
nr_rmids, GFP_KERNEL);
if (!cqm_rmid_ptrs)
return -ENOMEM;
@@ -249,11 +263,9 @@ static int intel_cqm_setup_rmid_cache(void)
mutex_unlock(&cache_mutex);
return 0;
-fail:
- while (r--)
- kfree(cqm_rmid_ptrs[r]);
- kfree(cqm_rmid_ptrs);
+fail:
+ cqm_cleanup();
return -ENOMEM;
}
@@ -281,9 +293,13 @@ static bool __match_event(struct perf_event *a, struct perf_event *b)
/*
* Events that target same task are placed into the same cache group.
+ * Mark it as a multi event group, so that we update ->count
+ * for every event rather than just the group leader later.
*/
- if (a->hw.target == b->hw.target)
+ if (a->hw.target == b->hw.target) {
+ b->hw.is_group_event = true;
return true;
+ }
/*
* Are we an inherited event?
@@ -849,6 +865,7 @@ static void intel_cqm_setup_event(struct perf_event *event,
bool conflict = false;
u32 rmid;
+ event->hw.is_group_event = false;
list_for_each_entry(iter, &cache_groups, hw.cqm_groups_entry) {
rmid = iter->hw.cqm_rmid;
@@ -940,7 +957,9 @@ static u64 intel_cqm_event_count(struct perf_event *event)
return __perf_event_count(event);
/*
- * Only the group leader gets to report values. This stops us
+ * Only the group leader gets to report values except in case of
+ * multiple events in the same group, we still need to read the
+ * other events.This stops us
* reporting duplicate values to userspace, and gives us a clear
* rule for which task gets to report the values.
*
@@ -948,7 +967,7 @@ static u64 intel_cqm_event_count(struct perf_event *event)
* specific packages - we forfeit that ability when we create
* task events.
*/
- if (!cqm_group_leader(event))
+ if (!cqm_group_leader(event) && !event->hw.is_group_event)
return 0;
/*
@@ -1315,7 +1334,7 @@ static const struct x86_cpu_id intel_cqm_match[] = {
static int __init intel_cqm_init(void)
{
- char *str, scale[20];
+ char *str = NULL, scale[20];
int i, cpu, ret;
if (!x86_match_cpu(intel_cqm_match))
@@ -1375,16 +1394,25 @@ static int __init intel_cqm_init(void)
cqm_pick_event_reader(i);
}
- __perf_cpu_notifier(intel_cqm_cpu_notifier);
-
ret = perf_pmu_register(&intel_cqm_pmu, "intel_cqm", -1);
- if (ret)
+ if (ret) {
pr_err("Intel CQM perf registration failed: %d\n", ret);
- else
- pr_info("Intel CQM monitoring enabled\n");
+ goto out;
+ }
+
+ pr_info("Intel CQM monitoring enabled\n");
+ /*
+ * Register the hot cpu notifier once we are sure cqm
+ * is enabled to avoid notifier leak.
+ */
+ __perf_cpu_notifier(intel_cqm_cpu_notifier);
out:
cpu_notifier_register_done();
+ if (ret) {
+ kfree(str);
+ cqm_cleanup();
+ }
return ret;
}
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
index 7abb2b88572e..1e7de3cefc9c 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
@@ -1110,6 +1110,13 @@ get_next_pebs_record_by_bit(void *base, void *top, int bit)
void *at;
u64 pebs_status;
+ /*
+ * fmt0 does not have a status bitfield (does not use
+ * perf_record_nhm format)
+ */
+ if (x86_pmu.intel_cap.pebs_format < 1)
+ return base;
+
if (base == NULL)
return NULL;
@@ -1195,7 +1202,7 @@ static void intel_pmu_drain_pebs_core(struct pt_regs *iregs)
if (!event->attr.precise_ip)
return;
- n = (top - at) / x86_pmu.pebs_record_size;
+ n = top - at;
if (n <= 0)
return;
diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c
index db9a675e751b..9fdf1d330727 100644
--- a/arch/x86/kernel/early-quirks.c
+++ b/arch/x86/kernel/early-quirks.c
@@ -11,7 +11,11 @@
#include <linux/pci.h>
#include <linux/acpi.h>
+#include <linux/delay.h>
+#include <linux/dmi.h>
#include <linux/pci_ids.h>
+#include <linux/bcma/bcma.h>
+#include <linux/bcma/bcma_regs.h>
#include <drm/i915_drm.h>
#include <asm/pci-direct.h>
#include <asm/dma.h>
@@ -21,6 +25,9 @@
#include <asm/iommu.h>
#include <asm/gart.h>
#include <asm/irq_remapping.h>
+#include <asm/early_ioremap.h>
+
+#define dev_err(msg) pr_err("pci 0000:%02x:%02x.%d: %s", bus, slot, func, msg)
static void __init fix_hypertransport_config(int num, int slot, int func)
{
@@ -76,6 +83,13 @@ static void __init nvidia_bugs(int num, int slot, int func)
#ifdef CONFIG_ACPI
#ifdef CONFIG_X86_IO_APIC
/*
+ * Only applies to Nvidia root ports (bus 0) and not to
+ * Nvidia graphics cards with PCI ports on secondary buses.
+ */
+ if (num)
+ return;
+
+ /*
* All timer overrides on Nvidia are
* wrong unless HPET is enabled.
* Unfortunately that's not true on many Asus boards.
@@ -589,6 +603,61 @@ static void __init force_disable_hpet(int num, int slot, int func)
#endif
}
+#define BCM4331_MMIO_SIZE 16384
+#define BCM4331_PM_CAP 0x40
+#define bcma_aread32(reg) ioread32(mmio + 1 * BCMA_CORE_SIZE + reg)
+#define bcma_awrite32(reg, val) iowrite32(val, mmio + 1 * BCMA_CORE_SIZE + reg)
+
+static void __init apple_airport_reset(int bus, int slot, int func)
+{
+ void __iomem *mmio;
+ u16 pmcsr;
+ u64 addr;
+ int i;
+
+ if (!dmi_match(DMI_SYS_VENDOR, "Apple Inc."))
+ return;
+
+ /* Card may have been put into PCI_D3hot by grub quirk */
+ pmcsr = read_pci_config_16(bus, slot, func, BCM4331_PM_CAP + PCI_PM_CTRL);
+
+ if ((pmcsr & PCI_PM_CTRL_STATE_MASK) != PCI_D0) {
+ pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
+ write_pci_config_16(bus, slot, func, BCM4331_PM_CAP + PCI_PM_CTRL, pmcsr);
+ mdelay(10);
+
+ pmcsr = read_pci_config_16(bus, slot, func, BCM4331_PM_CAP + PCI_PM_CTRL);
+ if ((pmcsr & PCI_PM_CTRL_STATE_MASK) != PCI_D0) {
+ dev_err("Cannot power up Apple AirPort card\n");
+ return;
+ }
+ }
+
+ addr = read_pci_config(bus, slot, func, PCI_BASE_ADDRESS_0);
+ addr |= (u64)read_pci_config(bus, slot, func, PCI_BASE_ADDRESS_1) << 32;
+ addr &= PCI_BASE_ADDRESS_MEM_MASK;
+
+ mmio = early_ioremap(addr, BCM4331_MMIO_SIZE);
+ if (!mmio) {
+ dev_err("Cannot iomap Apple AirPort card\n");
+ return;
+ }
+
+ pr_info("Resetting Apple AirPort card (left enabled by EFI)\n");
+
+ for (i = 0; bcma_aread32(BCMA_RESET_ST) && i < 30; i++)
+ udelay(10);
+
+ bcma_awrite32(BCMA_RESET_CTL, BCMA_RESET_CTL_RESET);
+ bcma_aread32(BCMA_RESET_CTL);
+ udelay(1);
+
+ bcma_awrite32(BCMA_RESET_CTL, 0);
+ bcma_aread32(BCMA_RESET_CTL);
+ udelay(10);
+
+ early_iounmap(mmio, BCM4331_MMIO_SIZE);
+}
#define QFLAG_APPLY_ONCE 0x1
#define QFLAG_APPLIED 0x2
@@ -602,12 +671,6 @@ struct chipset {
void (*f)(int num, int slot, int func);
};
-/*
- * Only works for devices on the root bus. If you add any devices
- * not on bus 0 readd another loop level in early_quirks(). But
- * be careful because at least the Nvidia quirk here relies on
- * only matching on bus 0.
- */
static struct chipset early_qrk[] __initdata = {
{ PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
PCI_CLASS_BRIDGE_PCI, PCI_ANY_ID, QFLAG_APPLY_ONCE, nvidia_bugs },
@@ -637,9 +700,13 @@ static struct chipset early_qrk[] __initdata = {
*/
{ PCI_VENDOR_ID_INTEL, 0x0f00,
PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet},
+ { PCI_VENDOR_ID_BROADCOM, 0x4331,
+ PCI_CLASS_NETWORK_OTHER, PCI_ANY_ID, 0, apple_airport_reset},
{}
};
+static void __init early_pci_scan_bus(int bus);
+
/**
* check_dev_quirk - apply early quirks to a given PCI device
* @num: bus number
@@ -648,7 +715,7 @@ static struct chipset early_qrk[] __initdata = {
*
* Check the vendor & device ID against the early quirks table.
*
- * If the device is single function, let early_quirks() know so we don't
+ * If the device is single function, let early_pci_scan_bus() know so we don't
* poke at this device again.
*/
static int __init check_dev_quirk(int num, int slot, int func)
@@ -657,6 +724,7 @@ static int __init check_dev_quirk(int num, int slot, int func)
u16 vendor;
u16 device;
u8 type;
+ u8 sec;
int i;
class = read_pci_config_16(num, slot, func, PCI_CLASS_DEVICE);
@@ -684,25 +752,36 @@ static int __init check_dev_quirk(int num, int slot, int func)
type = read_pci_config_byte(num, slot, func,
PCI_HEADER_TYPE);
+
+ if ((type & 0x7f) == PCI_HEADER_TYPE_BRIDGE) {
+ sec = read_pci_config_byte(num, slot, func, PCI_SECONDARY_BUS);
+ if (sec > num)
+ early_pci_scan_bus(sec);
+ }
+
if (!(type & 0x80))
return -1;
return 0;
}
-void __init early_quirks(void)
+static void __init early_pci_scan_bus(int bus)
{
int slot, func;
- if (!early_pci_allowed())
- return;
-
/* Poor man's PCI discovery */
- /* Only scan the root bus */
for (slot = 0; slot < 32; slot++)
for (func = 0; func < 8; func++) {
/* Only probe function 0 on single fn devices */
- if (check_dev_quirk(0, slot, func))
+ if (check_dev_quirk(bus, slot, func))
break;
}
}
+
+void __init early_quirks(void)
+{
+ if (!early_pci_allowed())
+ return;
+
+ early_pci_scan_bus(0);
+}
diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
index 2f355d229a58..bf0ce75735b0 100644
--- a/arch/x86/kernel/pvclock.c
+++ b/arch/x86/kernel/pvclock.c
@@ -66,6 +66,8 @@ u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src)
do {
version = __pvclock_read_cycles(src, &ret, &flags);
+ /* Make sure that the version double-check is last. */
+ smp_rmb();
} while ((src->version & 1) || version != src->version);
return flags & valid_flags;
@@ -80,6 +82,8 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
do {
version = __pvclock_read_cycles(src, &ret, &flags);
+ /* Make sure that the version double-check is last. */
+ smp_rmb();
} while ((src->version & 1) || version != src->version);
if (unlikely((flags & PVCLOCK_GUEST_STOPPED) != 0)) {
diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
index bf4db6eaec8f..c6aace2bbe08 100644
--- a/arch/x86/kernel/uprobes.c
+++ b/arch/x86/kernel/uprobes.c
@@ -357,20 +357,22 @@ static void riprel_analyze(struct arch_uprobe *auprobe, struct insn *insn)
*cursor &= 0xfe;
}
/*
- * Similar treatment for VEX3 prefix.
- * TODO: add XOP/EVEX treatment when insn decoder supports them
+ * Similar treatment for VEX3/EVEX prefix.
+ * TODO: add XOP treatment when insn decoder supports them
*/
- if (insn->vex_prefix.nbytes == 3) {
+ if (insn->vex_prefix.nbytes >= 3) {
/*
* vex2: c5 rvvvvLpp (has no b bit)
* vex3/xop: c4/8f rxbmmmmm wvvvvLpp
* evex: 62 rxbR00mm wvvvv1pp zllBVaaa
- * (evex will need setting of both b and x since
- * in non-sib encoding evex.x is 4th bit of MODRM.rm)
- * Setting VEX3.b (setting because it has inverted meaning):
+ * Setting VEX3.b (setting because it has inverted meaning).
+ * Setting EVEX.x since (in non-SIB encoding) EVEX.x
+ * is the 4th bit of MODRM.rm, and needs the same treatment.
+ * For VEX3-encoded insns, VEX3.x value has no effect in
+ * non-SIB encoding, the change is superfluous but harmless.
*/
cursor = auprobe->insn + insn_offset_vex_prefix(insn) + 1;
- *cursor |= 0x20;
+ *cursor |= 0x60;
}
/*
@@ -415,12 +417,10 @@ static void riprel_analyze(struct arch_uprobe *auprobe, struct insn *insn)
reg = MODRM_REG(insn); /* Fetch modrm.reg */
reg2 = 0xff; /* Fetch vex.vvvv */
- if (insn->vex_prefix.nbytes == 2)
- reg2 = insn->vex_prefix.bytes[1];
- else if (insn->vex_prefix.nbytes == 3)
+ if (insn->vex_prefix.nbytes)
reg2 = insn->vex_prefix.bytes[2];
/*
- * TODO: add XOP, EXEV vvvv reading.
+ * TODO: add XOP vvvv reading.
*
* vex.vvvv field is in bits 6-3, bits are inverted.
* But in 32-bit mode, high-order bit may be ignored.
diff --git a/arch/x86/kvm/mtrr.c b/arch/x86/kvm/mtrr.c
index c146f3c262c3..0149ac59c273 100644
--- a/arch/x86/kvm/mtrr.c
+++ b/arch/x86/kvm/mtrr.c
@@ -539,6 +539,7 @@ static void mtrr_lookup_var_start(struct mtrr_iter *iter)
iter->fixed = false;
iter->start_max = iter->start;
+ iter->range = NULL;
iter->range = list_prepare_entry(iter->range, &mtrr_state->head, node);
__mtrr_lookup_var_next(iter);
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 41e7943004fe..4589b6feeb7b 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -8124,6 +8124,7 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
if ((vectoring_info & VECTORING_INFO_VALID_MASK) &&
(exit_reason != EXIT_REASON_EXCEPTION_NMI &&
exit_reason != EXIT_REASON_EPT_VIOLATION &&
+ exit_reason != EXIT_REASON_PML_FULL &&
exit_reason != EXIT_REASON_TASK_SWITCH)) {
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_DELIVERY_EV;
@@ -8736,6 +8737,22 @@ static void vmx_load_vmcs01(struct kvm_vcpu *vcpu)
put_cpu();
}
+/*
+ * Ensure that the current vmcs of the logical processor is the
+ * vmcs01 of the vcpu before calling free_nested().
+ */
+static void vmx_free_vcpu_nested(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ int r;
+
+ r = vcpu_load(vcpu);
+ BUG_ON(r);
+ vmx_load_vmcs01(vcpu);
+ free_nested(vmx);
+ vcpu_put(vcpu);
+}
+
static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -8744,8 +8761,7 @@ static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
vmx_destroy_pml_buffer(vmx);
free_vpid(vmx->vpid);
leave_guest_mode(vcpu);
- vmx_load_vmcs01(vcpu);
- free_nested(vmx);
+ vmx_free_vcpu_nested(vcpu);
free_loaded_vmcs(vmx->loaded_vmcs);
kfree(vmx->guest_msrs);
kvm_vcpu_uninit(vcpu);
diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
index 72bb52f93c3d..d2dc0438d654 100644
--- a/arch/x86/mm/mmap.c
+++ b/arch/x86/mm/mmap.c
@@ -94,18 +94,6 @@ static unsigned long mmap_base(unsigned long rnd)
}
/*
- * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
- * does, but not when emulating X86_32
- */
-static unsigned long mmap_legacy_base(unsigned long rnd)
-{
- if (mmap_is_ia32())
- return TASK_UNMAPPED_BASE;
- else
- return TASK_UNMAPPED_BASE + rnd;
-}
-
-/*
* This function, called very early during the creation of a new
* process VM image, sets up which VM layout function to use:
*/
@@ -116,7 +104,7 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
if (current->flags & PF_RANDOMIZE)
random_factor = arch_mmap_rnd();
- mm->mmap_legacy_base = mmap_legacy_base(random_factor);
+ mm->mmap_legacy_base = TASK_UNMAPPED_BASE + random_factor;
if (mmap_is_legacy()) {
mm->mmap_base = mm->mmap_legacy_base;
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 188e3e07eeeb..6ad687d104ca 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -39,11 +39,22 @@
static bool boot_cpu_done;
static int __read_mostly __pat_enabled = IS_ENABLED(CONFIG_X86_PAT);
+static void init_cache_modes(void);
-static inline void pat_disable(const char *reason)
+void pat_disable(const char *reason)
{
+ if (!__pat_enabled)
+ return;
+
+ if (boot_cpu_done) {
+ WARN_ONCE(1, "x86/PAT: PAT cannot be disabled after initialization\n");
+ return;
+ }
+
__pat_enabled = 0;
pr_info("x86/PAT: %s\n", reason);
+
+ init_cache_modes();
}
static int __init nopat(char *str)
@@ -180,7 +191,7 @@ static enum page_cache_mode pat_get_cache_mode(unsigned pat_val, char *msg)
* configuration.
* Using lower indices is preferred, so we start with highest index.
*/
-void pat_init_cache_modes(u64 pat)
+static void __init_cache_modes(u64 pat)
{
enum page_cache_mode cache;
char pat_msg[33];
@@ -201,14 +212,11 @@ static void pat_bsp_init(u64 pat)
{
u64 tmp_pat;
- if (!cpu_has_pat) {
+ if (!boot_cpu_has(X86_FEATURE_PAT)) {
pat_disable("PAT not supported by CPU.");
return;
}
- if (!pat_enabled())
- goto done;
-
rdmsrl(MSR_IA32_CR_PAT, tmp_pat);
if (!tmp_pat) {
pat_disable("PAT MSR is 0, disabled.");
@@ -217,16 +225,12 @@ static void pat_bsp_init(u64 pat)
wrmsrl(MSR_IA32_CR_PAT, pat);
-done:
- pat_init_cache_modes(pat);
+ __init_cache_modes(pat);
}
static void pat_ap_init(u64 pat)
{
- if (!pat_enabled())
- return;
-
- if (!cpu_has_pat) {
+ if (!boot_cpu_has(X86_FEATURE_PAT)) {
/*
* If this happens we are on a secondary CPU, but switched to
* PAT on the boot CPU. We have no way to undo PAT.
@@ -237,18 +241,32 @@ static void pat_ap_init(u64 pat)
wrmsrl(MSR_IA32_CR_PAT, pat);
}
-void pat_init(void)
+static void init_cache_modes(void)
{
- u64 pat;
- struct cpuinfo_x86 *c = &boot_cpu_data;
+ u64 pat = 0;
+ static int init_cm_done;
- if (!pat_enabled()) {
+ if (init_cm_done)
+ return;
+
+ if (boot_cpu_has(X86_FEATURE_PAT)) {
+ /*
+ * CPU supports PAT. Set PAT table to be consistent with
+ * PAT MSR. This case supports "nopat" boot option, and
+ * virtual machine environments which support PAT without
+ * MTRRs. In specific, Xen has unique setup to PAT MSR.
+ *
+ * If PAT MSR returns 0, it is considered invalid and emulates
+ * as No PAT.
+ */
+ rdmsrl(MSR_IA32_CR_PAT, pat);
+ }
+
+ if (!pat) {
/*
* No PAT. Emulate the PAT table that corresponds to the two
- * cache bits, PWT (Write Through) and PCD (Cache Disable). This
- * setup is the same as the BIOS default setup when the system
- * has PAT but the "nopat" boot option has been specified. This
- * emulated PAT table is used when MSR_IA32_CR_PAT returns 0.
+ * cache bits, PWT (Write Through) and PCD (Cache Disable).
+ * This setup is also the same as the BIOS default setup.
*
* PTE encoding:
*
@@ -265,10 +283,36 @@ void pat_init(void)
*/
pat = PAT(0, WB) | PAT(1, WT) | PAT(2, UC_MINUS) | PAT(3, UC) |
PAT(4, WB) | PAT(5, WT) | PAT(6, UC_MINUS) | PAT(7, UC);
+ }
+
+ __init_cache_modes(pat);
+
+ init_cm_done = 1;
+}
+
+/**
+ * pat_init - Initialize PAT MSR and PAT table
+ *
+ * This function initializes PAT MSR and PAT table with an OS-defined value
+ * to enable additional cache attributes, WC and WT.
+ *
+ * This function must be called on all CPUs using the specific sequence of
+ * operations defined in Intel SDM. mtrr_rendezvous_handler() provides this
+ * procedure for PAT.
+ */
+void pat_init(void)
+{
+ u64 pat;
+ struct cpuinfo_x86 *c = &boot_cpu_data;
+
+ if (!pat_enabled()) {
+ init_cache_modes();
+ return;
+ }
- } else if ((c->x86_vendor == X86_VENDOR_INTEL) &&
- (((c->x86 == 0x6) && (c->x86_model <= 0xd)) ||
- ((c->x86 == 0xf) && (c->x86_model <= 0x6)))) {
+ if ((c->x86_vendor == X86_VENDOR_INTEL) &&
+ (((c->x86 == 0x6) && (c->x86_model <= 0xd)) ||
+ ((c->x86 == 0xf) && (c->x86_model <= 0x6)))) {
/*
* PAT support with the lower four entries. Intel Pentium 2,
* 3, M, and 4 are affected by PAT errata, which makes the
@@ -733,25 +777,6 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
if (file->f_flags & O_DSYNC)
pcm = _PAGE_CACHE_MODE_UC_MINUS;
-#ifdef CONFIG_X86_32
- /*
- * On the PPro and successors, the MTRRs are used to set
- * memory types for physical addresses outside main memory,
- * so blindly setting UC or PWT on those pages is wrong.
- * For Pentiums and earlier, the surround logic should disable
- * caching for the high addresses through the KEN pin, but
- * we maintain the tradition of paranoia in this code.
- */
- if (!pat_enabled() &&
- !(boot_cpu_has(X86_FEATURE_MTRR) ||
- boot_cpu_has(X86_FEATURE_K6_MTRR) ||
- boot_cpu_has(X86_FEATURE_CYRIX_ARR) ||
- boot_cpu_has(X86_FEATURE_CENTAUR_MCR)) &&
- (pfn << PAGE_SHIFT) >= __pa(high_memory)) {
- pcm = _PAGE_CACHE_MODE_UC;
- }
-#endif
-
*vma_prot = __pgprot((pgprot_val(*vma_prot) & ~_PAGE_CACHE_MASK) |
cachemode2protval(pcm));
return 1;
diff --git a/arch/x86/pci/intel_mid_pci.c b/arch/x86/pci/intel_mid_pci.c
index 8b93e634af84..ae97f24a4371 100644
--- a/arch/x86/pci/intel_mid_pci.c
+++ b/arch/x86/pci/intel_mid_pci.c
@@ -37,6 +37,7 @@
/* Quirks for the listed devices */
#define PCI_DEVICE_ID_INTEL_MRFL_MMC 0x1190
+#define PCI_DEVICE_ID_INTEL_MRFL_HSU 0x1191
/* Fixed BAR fields */
#define PCIE_VNDR_CAP_ID_FIXED_BAR 0x00 /* Fixed BAR (TBD) */
@@ -225,13 +226,20 @@ static int intel_mid_pci_irq_enable(struct pci_dev *dev)
/* Special treatment for IRQ0 */
if (dev->irq == 0) {
/*
+ * Skip HS UART common registers device since it has
+ * IRQ0 assigned and not used by the kernel.
+ */
+ if (dev->device == PCI_DEVICE_ID_INTEL_MRFL_HSU)
+ return -EBUSY;
+ /*
* TNG has IRQ0 assigned to eMMC controller. But there
* are also other devices with bogus PCI configuration
* that have IRQ0 assigned. This check ensures that
- * eMMC gets it.
+ * eMMC gets it. The rest of devices still could be
+ * enabled without interrupt line being allocated.
*/
if (dev->device != PCI_DEVICE_ID_INTEL_MRFL_MMC)
- return -EBUSY;
+ return 0;
}
break;
default:
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index beab8c706ac9..ffa41591bff9 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -74,7 +74,6 @@
#include <asm/mach_traps.h>
#include <asm/mwait.h>
#include <asm/pci_x86.h>
-#include <asm/pat.h>
#include <asm/cpu.h>
#ifdef CONFIG_ACPI
@@ -1519,7 +1518,6 @@ asmlinkage __visible void __init xen_start_kernel(void)
{
struct physdev_set_iopl set_iopl;
unsigned long initrd_start = 0;
- u64 pat;
int rc;
if (!xen_start_info)
@@ -1627,13 +1625,6 @@ asmlinkage __visible void __init xen_start_kernel(void)
xen_start_info->nr_pages);
xen_reserve_special_pages();
- /*
- * Modify the cache mode translation tables to match Xen's PAT
- * configuration.
- */
- rdmsrl(MSR_IA32_CR_PAT, pat);
- pat_init_cache_modes(pat);
-
/* keep using Xen gdt for now; no urgent need to change it */
#ifdef CONFIG_X86_32
diff --git a/backported-features b/backported-features
new file mode 100644
index 000000000000..b680ed49292f
--- /dev/null
+++ b/backported-features
@@ -0,0 +1,14 @@
+ LSK backported features
+
+1, The kaslr and kaslr-pax_usercopy branches base on LSK directly.
+ v4.4/topic/mm-kaslr
+ v4.4/topic/mm-kaslr-pax_usercopy
+
+2, Coresight and openCSD are used for Juno board 'perf' tool implement.
+ origin/v4.4/topic/coresight
+ origin/v4.4/topic/perf-opencsd-4.4-github
+
+3, OPTEE base on LSK mainline, but isn't included of mainline.
+
+Feature introducation:
+https://wiki.linaro.org/lsk/features
diff --git a/block/bio.c b/block/bio.c
index b57f7818709d..b9829b6504c8 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -585,6 +585,8 @@ void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
bio->bi_iter = bio_src->bi_iter;
bio->bi_io_vec = bio_src->bi_io_vec;
bio->bi_dio_inode = bio_src->bi_dio_inode;
+
+ bio_clone_blkcg_association(bio, bio_src);
}
EXPORT_SYMBOL(__bio_clone_fast);
@@ -690,6 +692,8 @@ integrity_clone:
}
}
+ bio_clone_blkcg_association(bio, bio_src);
+
return bio;
}
EXPORT_SYMBOL(bio_clone_bioset);
@@ -2015,6 +2019,17 @@ void bio_disassociate_task(struct bio *bio)
}
}
+/**
+ * bio_clone_blkcg_association - clone blkcg association from src to dst bio
+ * @dst: destination bio
+ * @src: source bio
+ */
+void bio_clone_blkcg_association(struct bio *dst, struct bio *src)
+{
+ if (src->bi_css)
+ WARN_ON(bio_associate_blkcg(dst, src->bi_css));
+}
+
#endif /* CONFIG_BLK_CGROUP */
static void __init biovec_init_slabs(void)
diff --git a/block/blk-core.c b/block/blk-core.c
index 5101e6a7bd48..450da06fa27e 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -40,6 +40,8 @@
#include "blk.h"
#include "blk-mq.h"
+#include <linux/math64.h>
+
EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
@@ -515,7 +517,9 @@ EXPORT_SYMBOL_GPL(blk_queue_bypass_end);
void blk_set_queue_dying(struct request_queue *q)
{
- queue_flag_set_unlocked(QUEUE_FLAG_DYING, q);
+ spin_lock_irq(q->queue_lock);
+ queue_flag_set(QUEUE_FLAG_DYING, q);
+ spin_unlock_irq(q->queue_lock);
if (q->mq_ops)
blk_mq_wake_waiters(q);
@@ -3577,3 +3581,85 @@ int __init blk_dev_init(void)
return 0;
}
+
+/*
+ * Blk IO latency support. We want this to be as cheap as possible, so doing
+ * this lockless (and avoiding atomics), a few off by a few errors in this
+ * code is not harmful, and we don't want to do anything that is
+ * perf-impactful.
+ * TODO : If necessary, we can make the histograms per-cpu and aggregate
+ * them when printing them out.
+ */
+void
+blk_zero_latency_hist(struct io_latency_state *s)
+{
+ memset(s->latency_y_axis_read, 0,
+ sizeof(s->latency_y_axis_read));
+ memset(s->latency_y_axis_write, 0,
+ sizeof(s->latency_y_axis_write));
+ s->latency_reads_elems = 0;
+ s->latency_writes_elems = 0;
+}
+EXPORT_SYMBOL(blk_zero_latency_hist);
+
+ssize_t
+blk_latency_hist_show(struct io_latency_state *s, char *buf)
+{
+ int i;
+ int bytes_written = 0;
+ u_int64_t num_elem, elem;
+ int pct;
+
+ num_elem = s->latency_reads_elems;
+ if (num_elem > 0) {
+ bytes_written += scnprintf(buf + bytes_written,
+ PAGE_SIZE - bytes_written,
+ "IO svc_time Read Latency Histogram (n = %llu):\n",
+ num_elem);
+ for (i = 0;
+ i < ARRAY_SIZE(latency_x_axis_us);
+ i++) {
+ elem = s->latency_y_axis_read[i];
+ pct = div64_u64(elem * 100, num_elem);
+ bytes_written += scnprintf(buf + bytes_written,
+ PAGE_SIZE - bytes_written,
+ "\t< %5lluus%15llu%15d%%\n",
+ latency_x_axis_us[i],
+ elem, pct);
+ }
+ /* Last element in y-axis table is overflow */
+ elem = s->latency_y_axis_read[i];
+ pct = div64_u64(elem * 100, num_elem);
+ bytes_written += scnprintf(buf + bytes_written,
+ PAGE_SIZE - bytes_written,
+ "\t> %5dms%15llu%15d%%\n", 10,
+ elem, pct);
+ }
+ num_elem = s->latency_writes_elems;
+ if (num_elem > 0) {
+ bytes_written += scnprintf(buf + bytes_written,
+ PAGE_SIZE - bytes_written,
+ "IO svc_time Write Latency Histogram (n = %llu):\n",
+ num_elem);
+ for (i = 0;
+ i < ARRAY_SIZE(latency_x_axis_us);
+ i++) {
+ elem = s->latency_y_axis_write[i];
+ pct = div64_u64(elem * 100, num_elem);
+ bytes_written += scnprintf(buf + bytes_written,
+ PAGE_SIZE - bytes_written,
+ "\t< %5lluus%15llu%15d%%\n",
+ latency_x_axis_us[i],
+ elem, pct);
+ }
+ /* Last element in y-axis table is overflow */
+ elem = s->latency_y_axis_write[i];
+ pct = div64_u64(elem * 100, num_elem);
+ bytes_written += scnprintf(buf + bytes_written,
+ PAGE_SIZE - bytes_written,
+ "\t> %5dms%15llu%15d%%\n", 10,
+ elem, pct);
+ }
+ return bytes_written;
+}
+EXPORT_SYMBOL(blk_latency_hist_show);
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 2b27747b46d3..7e79974b8b4d 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -94,9 +94,31 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
bool do_split = true;
struct bio *new = NULL;
const unsigned max_sectors = get_max_io_size(q, bio);
+ unsigned bvecs = 0;
bio_for_each_segment(bv, bio, iter) {
/*
+ * With arbitrary bio size, the incoming bio may be very
+ * big. We have to split the bio into small bios so that
+ * each holds at most BIO_MAX_PAGES bvecs because
+ * bio_clone() can fail to allocate big bvecs.
+ *
+ * It should have been better to apply the limit per
+ * request queue in which bio_clone() is involved,
+ * instead of globally. The biggest blocker is the
+ * bio_clone() in bio bounce.
+ *
+ * If bio is splitted by this reason, we should have
+ * allowed to continue bios merging, but don't do
+ * that now for making the change simple.
+ *
+ * TODO: deal with bio bounce's bio_clone() gracefully
+ * and convert the global limit into per-queue limit.
+ */
+ if (bvecs++ >= BIO_MAX_PAGES)
+ goto split;
+
+ /*
* If the queue doesn't support SG gaps and adding this
* offset would create a gap, disallow it.
*/
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 8398e18d4139..10d1318c1128 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -601,8 +601,10 @@ static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
* If a request wasn't started before the queue was
* marked dying, kill it here or it'll go unnoticed.
*/
- if (unlikely(blk_queue_dying(rq->q)))
- blk_mq_complete_request(rq, -EIO);
+ if (unlikely(blk_queue_dying(rq->q))) {
+ rq->errors = -EIO;
+ blk_mq_end_request(rq, rq->errors);
+ }
return;
}
if (rq->cmd_flags & REQ_NO_TIMEOUT)
diff --git a/block/genhd.c b/block/genhd.c
index 82bc52cad1c1..fad9db981675 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -612,7 +612,7 @@ void add_disk(struct gendisk *disk)
/* Register BDI before referencing it from bdev */
bdi = &disk->queue->backing_dev_info;
- bdi_register_dev(bdi, disk_devt(disk));
+ bdi_register_owner(bdi, disk_to_dev(disk));
blk_register_region(disk_devt(disk), disk->minors, NULL,
exact_match, exact_lock, disk);
diff --git a/crypto/gcm.c b/crypto/gcm.c
index bec329b3de8d..d9ea5f9c0574 100644
--- a/crypto/gcm.c
+++ b/crypto/gcm.c
@@ -639,7 +639,9 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl,
ghash_alg = crypto_find_alg(ghash_name, &crypto_ahash_type,
CRYPTO_ALG_TYPE_HASH,
- CRYPTO_ALG_TYPE_AHASH_MASK);
+ CRYPTO_ALG_TYPE_AHASH_MASK |
+ crypto_requires_sync(algt->type,
+ algt->mask));
if (IS_ERR(ghash_alg))
return PTR_ERR(ghash_alg);
diff --git a/crypto/scatterwalk.c b/crypto/scatterwalk.c
index ea5815c5e128..bc769c448d4a 100644
--- a/crypto/scatterwalk.c
+++ b/crypto/scatterwalk.c
@@ -72,7 +72,8 @@ static void scatterwalk_pagedone(struct scatter_walk *walk, int out,
void scatterwalk_done(struct scatter_walk *walk, int out, int more)
{
- if (!(scatterwalk_pagelen(walk) & (PAGE_SIZE - 1)) || !more)
+ if (!more || walk->offset >= walk->sg->offset + walk->sg->length ||
+ !(walk->offset & (PAGE_SIZE - 1)))
scatterwalk_pagedone(walk, out, more);
}
EXPORT_SYMBOL_GPL(scatterwalk_done);
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index 6730f965b379..0afd1981e350 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -216,8 +216,10 @@ int acpi_get_psd_map(struct cpudata **all_cpu_data)
continue;
cpc_ptr = per_cpu(cpc_desc_ptr, i);
- if (!cpc_ptr)
- continue;
+ if (!cpc_ptr) {
+ retval = -EFAULT;
+ goto err_ret;
+ }
pdomain = &(cpc_ptr->domain_info);
cpumask_set_cpu(i, pr->shared_cpu_map);
@@ -239,8 +241,10 @@ int acpi_get_psd_map(struct cpudata **all_cpu_data)
continue;
match_cpc_ptr = per_cpu(cpc_desc_ptr, j);
- if (!match_cpc_ptr)
- continue;
+ if (!match_cpc_ptr) {
+ retval = -EFAULT;
+ goto err_ret;
+ }
match_pdomain = &(match_cpc_ptr->domain_info);
if (match_pdomain->domain != pdomain->domain)
@@ -270,8 +274,10 @@ int acpi_get_psd_map(struct cpudata **all_cpu_data)
continue;
match_cpc_ptr = per_cpu(cpc_desc_ptr, j);
- if (!match_cpc_ptr)
- continue;
+ if (!match_cpc_ptr) {
+ retval = -EFAULT;
+ goto err_ret;
+ }
match_pdomain = &(match_cpc_ptr->domain_info);
if (match_pdomain->domain != pdomain->domain)
@@ -502,9 +508,6 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
/* Store CPU Logical ID */
cpc_ptr->cpu_id = pr->id;
- /* Plug it into this CPUs CPC descriptor. */
- per_cpu(cpc_desc_ptr, pr->id) = cpc_ptr;
-
/* Parse PSD data for this CPU */
ret = acpi_get_psd(cpc_ptr, handle);
if (ret)
@@ -517,6 +520,9 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
goto out_free;
}
+ /* Plug PSD data into this CPUs CPC descriptor. */
+ per_cpu(cpc_desc_ptr, pr->id) = cpc_ptr;
+
/* Everything looks okay */
pr_debug("Parsed CPC struct for CPU: %d\n", pr->id);
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index b420fb46669d..43f20328f830 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -101,6 +101,7 @@ enum ec_command {
#define ACPI_EC_UDELAY_POLL 550 /* Wait 1ms for EC transaction polling */
#define ACPI_EC_CLEAR_MAX 100 /* Maximum number of events to query
* when trying to clear the EC */
+#define ACPI_EC_MAX_QUERIES 16 /* Maximum number of parallel queries */
enum {
EC_FLAGS_QUERY_PENDING, /* Query is pending */
@@ -121,6 +122,10 @@ static unsigned int ec_delay __read_mostly = ACPI_EC_DELAY;
module_param(ec_delay, uint, 0644);
MODULE_PARM_DESC(ec_delay, "Timeout(ms) waited until an EC command completes");
+static unsigned int ec_max_queries __read_mostly = ACPI_EC_MAX_QUERIES;
+module_param(ec_max_queries, uint, 0644);
+MODULE_PARM_DESC(ec_max_queries, "Maximum parallel _Qxx evaluations");
+
static bool ec_busy_polling __read_mostly;
module_param(ec_busy_polling, bool, 0644);
MODULE_PARM_DESC(ec_busy_polling, "Use busy polling to advance EC transaction");
@@ -174,6 +179,7 @@ static void acpi_ec_event_processor(struct work_struct *work);
struct acpi_ec *boot_ec, *first_ec;
EXPORT_SYMBOL(first_ec);
+static struct workqueue_struct *ec_query_wq;
static int EC_FLAGS_VALIDATE_ECDT; /* ASUStec ECDTs need to be validated */
static int EC_FLAGS_SKIP_DSDT_SCAN; /* Not all BIOS survive early DSDT scan */
@@ -1097,7 +1103,7 @@ static int acpi_ec_query(struct acpi_ec *ec, u8 *data)
* work queue execution.
*/
ec_dbg_evt("Query(0x%02x) scheduled", value);
- if (!schedule_work(&q->work)) {
+ if (!queue_work(ec_query_wq, &q->work)) {
ec_dbg_evt("Query(0x%02x) overlapped", value);
result = -EBUSY;
}
@@ -1657,15 +1663,41 @@ static struct acpi_driver acpi_ec_driver = {
},
};
+static inline int acpi_ec_query_init(void)
+{
+ if (!ec_query_wq) {
+ ec_query_wq = alloc_workqueue("kec_query", 0,
+ ec_max_queries);
+ if (!ec_query_wq)
+ return -ENODEV;
+ }
+ return 0;
+}
+
+static inline void acpi_ec_query_exit(void)
+{
+ if (ec_query_wq) {
+ destroy_workqueue(ec_query_wq);
+ ec_query_wq = NULL;
+ }
+}
+
int __init acpi_ec_init(void)
{
- int result = 0;
+ int result;
+ /* register workqueue for _Qxx evaluations */
+ result = acpi_ec_query_init();
+ if (result)
+ goto err_exit;
/* Now register the driver for the EC */
result = acpi_bus_register_driver(&acpi_ec_driver);
- if (result < 0)
- return -ENODEV;
+ if (result)
+ goto err_exit;
+err_exit:
+ if (result)
+ acpi_ec_query_exit();
return result;
}
@@ -1675,5 +1707,6 @@ static void __exit acpi_ec_exit(void)
{
acpi_bus_unregister_driver(&acpi_ec_driver);
+ acpi_ec_query_exit();
}
#endif /* 0 */
diff --git a/drivers/acpi/nfit.c b/drivers/acpi/nfit.c
index 11d8209e6e5d..5230e8449d30 100644
--- a/drivers/acpi/nfit.c
+++ b/drivers/acpi/nfit.c
@@ -1072,11 +1072,12 @@ static u32 read_blk_stat(struct nfit_blk *nfit_blk, unsigned int bw)
{
struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR];
u64 offset = nfit_blk->stat_offset + mmio->size * bw;
+ const u32 STATUS_MASK = 0x80000037;
if (mmio->num_lines)
offset = to_interleave_offset(offset, mmio);
- return readl(mmio->addr.base + offset);
+ return readl(mmio->addr.base + offset) & STATUS_MASK;
}
static void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw,
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index 72b6e9ef0ae9..d176e0ece470 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -327,10 +327,18 @@ int __init acpi_numa_init(void)
/* SRAT: Static Resource Affinity Table */
if (!acpi_table_parse(ACPI_SIG_SRAT, acpi_parse_srat)) {
- acpi_table_parse_srat(ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY,
- acpi_parse_x2apic_affinity, 0);
- acpi_table_parse_srat(ACPI_SRAT_TYPE_CPU_AFFINITY,
- acpi_parse_processor_affinity, 0);
+ struct acpi_subtable_proc srat_proc[2];
+
+ memset(srat_proc, 0, sizeof(srat_proc));
+ srat_proc[0].id = ACPI_SRAT_TYPE_CPU_AFFINITY;
+ srat_proc[0].handler = acpi_parse_processor_affinity;
+ srat_proc[1].id = ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY;
+ srat_proc[1].handler = acpi_parse_x2apic_affinity;
+
+ acpi_table_parse_entries_array(ACPI_SIG_SRAT,
+ sizeof(struct acpi_table_srat),
+ srat_proc, ARRAY_SIZE(srat_proc), 0);
+
cnt = acpi_table_parse_srat(ACPI_SRAT_TYPE_MEMORY_AFFINITY,
acpi_parse_memory_affinity,
NR_NODE_MEMBLKS);
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 78d5f02a073b..dcb3d6245ca5 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -1958,7 +1958,7 @@ int __init acpi_scan_init(void)
static struct acpi_probe_entry *ape;
static int acpi_probe_count;
-static DEFINE_SPINLOCK(acpi_probe_lock);
+static DEFINE_MUTEX(acpi_probe_mutex);
static int __init acpi_match_madt(struct acpi_subtable_header *header,
const unsigned long end)
@@ -1977,7 +1977,7 @@ int __init __acpi_probe_device_table(struct acpi_probe_entry *ap_head, int nr)
if (acpi_disabled)
return 0;
- spin_lock(&acpi_probe_lock);
+ mutex_lock(&acpi_probe_mutex);
for (ape = ap_head; nr; ape++, nr--) {
if (ACPI_COMPARE_NAME(ACPI_SIG_MADT, ape->id)) {
acpi_probe_count = 0;
@@ -1990,7 +1990,7 @@ int __init __acpi_probe_device_table(struct acpi_probe_entry *ap_head, int nr)
count++;
}
}
- spin_unlock(&acpi_probe_lock);
+ mutex_unlock(&acpi_probe_mutex);
return count;
}
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index 0243d375c6fd..4b3a9e27f1b6 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -555,23 +555,22 @@ static void acpi_global_event_handler(u32 event_type, acpi_handle device,
static int get_status(u32 index, acpi_event_status *status,
acpi_handle *handle)
{
- int result = 0;
+ int result;
if (index >= num_gpes + ACPI_NUM_FIXED_EVENTS)
- goto end;
+ return -EINVAL;
if (index < num_gpes) {
result = acpi_get_gpe_device(index, handle);
if (result) {
ACPI_EXCEPTION((AE_INFO, AE_NOT_FOUND,
"Invalid GPE 0x%x", index));
- goto end;
+ return result;
}
result = acpi_get_gpe_status(*handle, index, status);
} else if (index < (num_gpes + ACPI_NUM_FIXED_EVENTS))
result = acpi_get_event_status(index - num_gpes, status);
-end:
return result;
}
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index b79cb10e289e..bd370c98f77d 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4138,6 +4138,12 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
*/
{ "ST380013AS", "3.20", ATA_HORKAGE_MAX_SEC_1024 },
+ /*
+ * Device times out with higher max sects.
+ * https://bugzilla.kernel.org/show_bug.cgi?id=121671
+ */
+ { "LITEON CX1-JB256-HP", NULL, ATA_HORKAGE_MAX_SEC_1024 },
+
/* Devices we expect to fail diagnostics */
/* Devices where NCQ should be avoided */
diff --git a/drivers/bcma/bcma_private.h b/drivers/bcma/bcma_private.h
index 38f156745d53..71df8f2afc6c 100644
--- a/drivers/bcma/bcma_private.h
+++ b/drivers/bcma/bcma_private.h
@@ -8,8 +8,6 @@
#include <linux/bcma/bcma.h>
#include <linux/delay.h>
-#define BCMA_CORE_SIZE 0x1000
-
#define bcma_err(bus, fmt, ...) \
pr_err("bus%d: " fmt, (bus)->num, ##__VA_ARGS__)
#define bcma_warn(bus, fmt, ...) \
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 79107597a594..c306b483de60 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -2056,12 +2056,13 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
return -EINVAL;
}
- /* At the moment only the hardware variant iBT 3.0 (LnP/SfP) is
- * supported by this firmware loading method. This check has been
- * put in place to ensure correct forward compatibility options
- * when newer hardware variants come along.
+ /* At the moment the iBT 3.0 hardware variants 0x0b (LnP/SfP)
+ * and 0x0c (WsP) are supported by this firmware loading method.
+ *
+ * This check has been put in place to ensure correct forward
+ * compatibility options when newer hardware variants come along.
*/
- if (ver->hw_variant != 0x0b) {
+ if (ver->hw_variant != 0x0b && ver->hw_variant != 0x0c) {
BT_ERR("%s: Unsupported Intel hardware variant (%u)",
hdev->name, ver->hw_variant);
kfree_skb(skb);
diff --git a/drivers/bluetooth/hci_intel.c b/drivers/bluetooth/hci_intel.c
index 4a414a5a3165..b9065506a847 100644
--- a/drivers/bluetooth/hci_intel.c
+++ b/drivers/bluetooth/hci_intel.c
@@ -1234,8 +1234,7 @@ static int intel_probe(struct platform_device *pdev)
idev->pdev = pdev;
- idev->reset = devm_gpiod_get_optional(&pdev->dev, "reset",
- GPIOD_OUT_LOW);
+ idev->reset = devm_gpiod_get(&pdev->dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(idev->reset)) {
dev_err(&pdev->dev, "Unable to retrieve gpio\n");
return PTR_ERR(idev->reset);
@@ -1247,8 +1246,7 @@ static int intel_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "No IRQ, falling back to gpio-irq\n");
- host_wake = devm_gpiod_get_optional(&pdev->dev, "host-wake",
- GPIOD_IN);
+ host_wake = devm_gpiod_get(&pdev->dev, "host-wake", GPIOD_IN);
if (IS_ERR(host_wake)) {
dev_err(&pdev->dev, "Unable to retrieve IRQ\n");
goto no_irq;
diff --git a/drivers/char/diag/diag_dci.c b/drivers/char/diag/diag_dci.c
index f0cd6cf3967d..7af9d8184f97 100644
--- a/drivers/char/diag/diag_dci.c
+++ b/drivers/char/diag/diag_dci.c
@@ -1444,6 +1444,7 @@ void diag_dci_notify_client(int peripheral_mask, int data, int proc)
dci_ops_tbl[proc].peripheral_status &= ~peripheral_mask;
/* Notify the DCI process that the peripheral DCI Channel is up */
+ mutex_lock(&driver->dci_mutex);
list_for_each_safe(start, temp, &driver->dci_client_list) {
entry = list_entry(start, struct diag_dci_client_tbl, track);
if (entry->client_info.token != proc)
@@ -1466,6 +1467,7 @@ void diag_dci_notify_client(int peripheral_mask, int data, int proc)
info.si_int, stat);
}
}
+ mutex_unlock(&driver->dci_mutex);
}
static int diag_send_dci_pkt(struct diag_cmd_reg_t *entry,
@@ -1938,6 +1940,7 @@ static int diag_process_dci_pkt_rsp(unsigned char *buf, int len)
reg_entry.cmd_code_hi = header->subsys_cmd_code;
reg_entry.cmd_code_lo = header->subsys_cmd_code;
+ mutex_lock(&driver->cmd_reg_mutex);
temp_entry = diag_cmd_search(&reg_entry, ALL_PROC);
if (temp_entry) {
reg_item = container_of(temp_entry, struct diag_cmd_reg_t,
@@ -1949,6 +1952,7 @@ static int diag_process_dci_pkt_rsp(unsigned char *buf, int len)
reg_entry.cmd_code, reg_entry.subsys_id,
reg_entry.cmd_code_hi);
}
+ mutex_unlock(&driver->cmd_reg_mutex);
return ret;
}
diff --git a/drivers/char/diag/diag_memorydevice.c b/drivers/char/diag/diag_memorydevice.c
index da5a477375da..c552f263d7e5 100644
--- a/drivers/char/diag/diag_memorydevice.c
+++ b/drivers/char/diag/diag_memorydevice.c
@@ -356,8 +356,8 @@ int diag_md_init()
ch->tbl[j].buf = NULL;
ch->tbl[j].len = 0;
ch->tbl[j].ctx = 0;
- spin_lock_init(&(ch->lock));
}
+ spin_lock_init(&(ch->lock));
}
return 0;
diff --git a/drivers/char/diag/diagchar.h b/drivers/char/diag/diagchar.h
index 35cbe3b6b596..67db49badf21 100644
--- a/drivers/char/diag/diagchar.h
+++ b/drivers/char/diag/diagchar.h
@@ -65,11 +65,13 @@
#define DIAG_CON_WCNSS (0x0008) /* Bit mask for WCNSS */
#define DIAG_CON_SENSORS (0x0010) /* Bit mask for Sensors */
#define DIAG_CON_WDSP (0x0020) /* Bit mask for WDSP */
+#define DIAG_CON_CDSP (0x0040)
#define DIAG_CON_NONE (0x0000) /* Bit mask for No SS*/
#define DIAG_CON_ALL (DIAG_CON_APSS | DIAG_CON_MPSS \
| DIAG_CON_LPASS | DIAG_CON_WCNSS \
- | DIAG_CON_SENSORS | DIAG_CON_WDSP)
+ | DIAG_CON_SENSORS | DIAG_CON_WDSP \
+ | DIAG_CON_CDSP)
#define DIAG_STM_MODEM 0x01
#define DIAG_STM_LPASS 0x02
@@ -77,6 +79,7 @@
#define DIAG_STM_APPS 0x08
#define DIAG_STM_SENSORS 0x10
#define DIAG_STM_WDSP 0x20
+#define DIAG_STM_CDSP 0x40
#define INVALID_PID -1
#define DIAG_CMD_FOUND 1
@@ -202,7 +205,8 @@
#define PERIPHERAL_WCNSS 2
#define PERIPHERAL_SENSORS 3
#define PERIPHERAL_WDSP 4
-#define NUM_PERIPHERALS 5
+#define PERIPHERAL_CDSP 5
+#define NUM_PERIPHERALS 6
#define APPS_DATA (NUM_PERIPHERALS)
/* Number of sessions possible in Memory Device Mode. +1 for Apps data */
diff --git a/drivers/char/diag/diagchar_core.c b/drivers/char/diag/diagchar_core.c
index 9d0955289796..85be81e88696 100644
--- a/drivers/char/diag/diagchar_core.c
+++ b/drivers/char/diag/diagchar_core.c
@@ -385,6 +385,8 @@ static uint32_t diag_translate_kernel_to_user_mask(uint32_t peripheral_mask)
ret |= DIAG_CON_SENSORS;
if (peripheral_mask & MD_PERIPHERAL_MASK(PERIPHERAL_WDSP))
ret |= DIAG_CON_WDSP;
+ if (peripheral_mask & MD_PERIPHERAL_MASK(PERIPHERAL_CDSP))
+ ret |= DIAG_CON_CDSP;
return ret;
}
@@ -1525,6 +1527,8 @@ static uint32_t diag_translate_mask(uint32_t peripheral_mask)
ret |= (1 << PERIPHERAL_SENSORS);
if (peripheral_mask & DIAG_CON_WDSP)
ret |= (1 << PERIPHERAL_WDSP);
+ if (peripheral_mask & DIAG_CON_CDSP)
+ ret |= (1 << PERIPHERAL_CDSP);
return ret;
}
diff --git a/drivers/char/diag/diagfwd_glink.c b/drivers/char/diag/diagfwd_glink.c
index ce523ac35a51..e761e6ec4e39 100644
--- a/drivers/char/diag/diagfwd_glink.c
+++ b/drivers/char/diag/diagfwd_glink.c
@@ -64,6 +64,13 @@ struct diag_glink_info glink_data[NUM_PERIPHERALS] = {
.edge = "wdsp",
.name = "DIAG_DATA",
.hdl = NULL
+ },
+ {
+ .peripheral = PERIPHERAL_CDSP,
+ .type = TYPE_DATA,
+ .edge = "cdsp",
+ .name = "DIAG_DATA",
+ .hdl = NULL
}
};
@@ -102,6 +109,13 @@ struct diag_glink_info glink_cntl[NUM_PERIPHERALS] = {
.edge = "wdsp",
.name = "DIAG_CTRL",
.hdl = NULL
+ },
+ {
+ .peripheral = PERIPHERAL_CDSP,
+ .type = TYPE_CNTL,
+ .edge = "cdsp",
+ .name = "DIAG_CTRL",
+ .hdl = NULL
}
};
@@ -140,6 +154,13 @@ struct diag_glink_info glink_dci[NUM_PERIPHERALS] = {
.edge = "wdsp",
.name = "DIAG_DCI_DATA",
.hdl = NULL
+ },
+ {
+ .peripheral = PERIPHERAL_CDSP,
+ .type = TYPE_DCI,
+ .edge = "cdsp",
+ .name = "DIAG_DCI_DATA",
+ .hdl = NULL
}
};
@@ -178,6 +199,13 @@ struct diag_glink_info glink_cmd[NUM_PERIPHERALS] = {
.edge = "wdsp",
.name = "DIAG_CMD",
.hdl = NULL
+ },
+ {
+ .peripheral = PERIPHERAL_CDSP,
+ .type = TYPE_CMD,
+ .edge = "cdsp",
+ .name = "DIAG_CMD",
+ .hdl = NULL
}
};
@@ -216,6 +244,13 @@ struct diag_glink_info glink_dci_cmd[NUM_PERIPHERALS] = {
.edge = "wdsp",
.name = "DIAG_DCI_CMD",
.hdl = NULL
+ },
+ {
+ .peripheral = PERIPHERAL_CDSP,
+ .type = TYPE_DCI_CMD,
+ .edge = "cdsp",
+ .name = "DIAG_DCI_CMD",
+ .hdl = NULL
}
};
diff --git a/drivers/char/diag/diagfwd_peripheral.h b/drivers/char/diag/diagfwd_peripheral.h
index cbbab86a9425..b5f360ac5158 100644
--- a/drivers/char/diag/diagfwd_peripheral.h
+++ b/drivers/char/diag/diagfwd_peripheral.h
@@ -28,14 +28,16 @@
((x == PERIPHERAL_LPASS) ? DIAG_CON_LPASS : \
((x == PERIPHERAL_WCNSS) ? DIAG_CON_WCNSS : \
((x == PERIPHERAL_SENSORS) ? DIAG_CON_SENSORS : \
- ((x == PERIPHERAL_WDSP) ? DIAG_CON_WDSP : 0))))) \
+ ((x == PERIPHERAL_WDSP) ? DIAG_CON_WDSP : \
+ ((x == PERIPHERAL_CDSP) ? DIAG_CON_CDSP : 0)))))) \
#define PERIPHERAL_STRING(x) \
((x == PERIPHERAL_MODEM) ? "MODEM" : \
((x == PERIPHERAL_LPASS) ? "LPASS" : \
((x == PERIPHERAL_WCNSS) ? "WCNSS" : \
((x == PERIPHERAL_SENSORS) ? "SENSORS" : \
- ((x == PERIPHERAL_WDSP) ? "WDSP" : "UNKNOWN"))))) \
+ ((x == PERIPHERAL_WDSP) ? "WDSP" : \
+ ((x == PERIPHERAL_CDSP) ? "CDSP" : "UNKNOWN")))))) \
struct diagfwd_buf_t {
unsigned char *data;
diff --git a/drivers/char/diag/diagfwd_smd.c b/drivers/char/diag/diagfwd_smd.c
index f0698f0814d6..51ab58b99fdd 100644
--- a/drivers/char/diag/diagfwd_smd.c
+++ b/drivers/char/diag/diagfwd_smd.c
@@ -54,6 +54,11 @@ struct diag_smd_info smd_data[NUM_PERIPHERALS] = {
.peripheral = PERIPHERAL_WDSP,
.type = TYPE_DATA,
.name = "DIAG_DATA"
+ },
+ {
+ .peripheral = PERIPHERAL_CDSP,
+ .type = TYPE_DATA,
+ .name = "CDSP_DATA"
}
};
@@ -82,6 +87,11 @@ struct diag_smd_info smd_cntl[NUM_PERIPHERALS] = {
.peripheral = PERIPHERAL_WDSP,
.type = TYPE_CNTL,
.name = "DIAG_CTRL"
+ },
+ {
+ .peripheral = PERIPHERAL_CDSP,
+ .type = TYPE_CNTL,
+ .name = "CDSP_CNTL"
}
};
@@ -110,6 +120,11 @@ struct diag_smd_info smd_dci[NUM_PERIPHERALS] = {
.peripheral = PERIPHERAL_WDSP,
.type = TYPE_DCI,
.name = "DIAG_DCI_DATA"
+ },
+ {
+ .peripheral = PERIPHERAL_CDSP,
+ .type = TYPE_DCI,
+ .name = "CDSP_DCI"
}
};
@@ -138,6 +153,11 @@ struct diag_smd_info smd_cmd[NUM_PERIPHERALS] = {
.peripheral = PERIPHERAL_WDSP,
.type = TYPE_CMD,
.name = "DIAG_CMD"
+ },
+ {
+ .peripheral = PERIPHERAL_CDSP,
+ .type = TYPE_CMD,
+ .name = "CDSP_CMD"
}
};
@@ -166,6 +186,11 @@ struct diag_smd_info smd_dci_cmd[NUM_PERIPHERALS] = {
.peripheral = PERIPHERAL_WDSP,
.type = TYPE_DCI_CMD,
.name = "DIAG_DCI_CMD"
+ },
+ {
+ .peripheral = PERIPHERAL_CDSP,
+ .type = TYPE_DCI_CMD,
+ .name = "CDSP_DCI_CMD"
}
};
diff --git a/drivers/char/diag/diagfwd_socket.c b/drivers/char/diag/diagfwd_socket.c
index 2f9ec51a17ba..bedb56e745dc 100644
--- a/drivers/char/diag/diagfwd_socket.c
+++ b/drivers/char/diag/diagfwd_socket.c
@@ -41,6 +41,7 @@
#define WCNSS_INST_BASE 128
#define SENSORS_INST_BASE 192
#define WDSP_INST_BASE 256
+#define CDSP_INST_BASE 320
#define INST_ID_CNTL 0
#define INST_ID_CMD 1
@@ -75,6 +76,11 @@ struct diag_socket_info socket_data[NUM_PERIPHERALS] = {
.peripheral = PERIPHERAL_WDSP,
.type = TYPE_DATA,
.name = "DIAG_DATA"
+ },
+ {
+ .peripheral = PERIPHERAL_CDSP,
+ .type = TYPE_DATA,
+ .name = "CDSP_DATA"
}
};
@@ -103,6 +109,11 @@ struct diag_socket_info socket_cntl[NUM_PERIPHERALS] = {
.peripheral = PERIPHERAL_WDSP,
.type = TYPE_CNTL,
.name = "DIAG_CTRL"
+ },
+ {
+ .peripheral = PERIPHERAL_CDSP,
+ .type = TYPE_CNTL,
+ .name = "CDSP_CNTL"
}
};
@@ -131,6 +142,11 @@ struct diag_socket_info socket_dci[NUM_PERIPHERALS] = {
.peripheral = PERIPHERAL_WDSP,
.type = TYPE_DCI,
.name = "DIAG_DCI_DATA"
+ },
+ {
+ .peripheral = PERIPHERAL_CDSP,
+ .type = TYPE_DCI,
+ .name = "CDSP_DCI"
}
};
@@ -159,6 +175,11 @@ struct diag_socket_info socket_cmd[NUM_PERIPHERALS] = {
.peripheral = PERIPHERAL_WDSP,
.type = TYPE_CMD,
.name = "DIAG_CMD"
+ },
+ {
+ .peripheral = PERIPHERAL_CDSP,
+ .type = TYPE_CMD,
+ .name = "CDSP_CMD"
}
};
@@ -188,7 +209,12 @@ struct diag_socket_info socket_dci_cmd[NUM_PERIPHERALS] = {
.peripheral = PERIPHERAL_WDSP,
.type = TYPE_DCI_CMD,
.name = "DIAG_DCI_CMD"
- }
+ },
+ {
+ .peripheral = PERIPHERAL_CDSP,
+ .type = TYPE_DCI_CMD,
+ .name = "CDSP_DCI_CMD"
+ },
};
static void diag_state_open_socket(void *ctxt);
@@ -741,6 +767,9 @@ static void __diag_socket_init(struct diag_socket_info *info)
case PERIPHERAL_WDSP:
ins_base = WDSP_INST_BASE;
break;
+ case PERIPHERAL_CDSP:
+ ins_base = CDSP_INST_BASE;
+ break;
}
switch (info->type) {
diff --git a/drivers/char/hw_random/exynos-rng.c b/drivers/char/hw_random/exynos-rng.c
index aa30af5f0f2b..7845a38b6604 100644
--- a/drivers/char/hw_random/exynos-rng.c
+++ b/drivers/char/hw_random/exynos-rng.c
@@ -118,6 +118,7 @@ static int exynos_rng_probe(struct platform_device *pdev)
{
struct exynos_rng *exynos_rng;
struct resource *res;
+ int ret;
exynos_rng = devm_kzalloc(&pdev->dev, sizeof(struct exynos_rng),
GFP_KERNEL);
@@ -145,7 +146,13 @@ static int exynos_rng_probe(struct platform_device *pdev)
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_enable(&pdev->dev);
- return devm_hwrng_register(&pdev->dev, &exynos_rng->rng);
+ ret = devm_hwrng_register(&pdev->dev, &exynos_rng->rng);
+ if (ret) {
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ }
+
+ return ret;
}
#ifdef CONFIG_PM
diff --git a/drivers/char/random.c b/drivers/char/random.c
index b583e5336630..d93dfebae0bb 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -722,15 +722,18 @@ retry:
}
}
-static void credit_entropy_bits_safe(struct entropy_store *r, int nbits)
+static int credit_entropy_bits_safe(struct entropy_store *r, int nbits)
{
const int nbits_max = (int)(~0U >> (ENTROPY_SHIFT + 1));
+ if (nbits < 0)
+ return -EINVAL;
+
/* Cap the value to avoid overflows */
nbits = min(nbits, nbits_max);
- nbits = max(nbits, -nbits_max);
credit_entropy_bits(r, nbits);
+ return 0;
}
/*********************************************************************
@@ -945,6 +948,7 @@ void add_interrupt_randomness(int irq, int irq_flags)
/* award one bit for the contents of the fast pool */
credit_entropy_bits(r, credit + 1);
}
+EXPORT_SYMBOL_GPL(add_interrupt_randomness);
#ifdef CONFIG_BLOCK
void add_disk_randomness(struct gendisk *disk)
@@ -1457,12 +1461,16 @@ random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
static ssize_t
urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
{
+ static int maxwarn = 10;
int ret;
- if (unlikely(nonblocking_pool.initialized == 0))
- printk_once(KERN_NOTICE "random: %s urandom read "
- "with %d bits of entropy available\n",
- current->comm, nonblocking_pool.entropy_total);
+ if (unlikely(nonblocking_pool.initialized == 0) &&
+ maxwarn > 0) {
+ maxwarn--;
+ printk(KERN_NOTICE "random: %s: uninitialized urandom read "
+ "(%zd bytes read, %d bits of entropy available)\n",
+ current->comm, nbytes, nonblocking_pool.entropy_total);
+ }
nbytes = min_t(size_t, nbytes, INT_MAX >> (ENTROPY_SHIFT + 3));
ret = extract_entropy_user(&nonblocking_pool, buf, nbytes);
@@ -1542,8 +1550,7 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
return -EPERM;
if (get_user(ent_count, p))
return -EFAULT;
- credit_entropy_bits_safe(&input_pool, ent_count);
- return 0;
+ return credit_entropy_bits_safe(&input_pool, ent_count);
case RNDADDENTROPY:
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
@@ -1557,8 +1564,7 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
size);
if (retval < 0)
return retval;
- credit_entropy_bits_safe(&input_pool, ent_count);
- return 0;
+ return credit_entropy_bits_safe(&input_pool, ent_count);
case RNDZAPENTCNT:
case RNDCLEARPOOL:
/*
@@ -1868,12 +1874,18 @@ void add_hwgenerator_randomness(const char *buffer, size_t count,
{
struct entropy_store *poolp = &input_pool;
- /* Suspend writing if we're above the trickle threshold.
- * We'll be woken up again once below random_write_wakeup_thresh,
- * or when the calling thread is about to terminate.
- */
- wait_event_interruptible(random_write_wait, kthread_should_stop() ||
+ if (unlikely(nonblocking_pool.initialized == 0))
+ poolp = &nonblocking_pool;
+ else {
+ /* Suspend writing if we're above the trickle
+ * threshold. We'll be woken up again once below
+ * random_write_wakeup_thresh, or when the calling
+ * thread is about to terminate.
+ */
+ wait_event_interruptible(random_write_wait,
+ kthread_should_stop() ||
ENTROPY_BITS(&input_pool) <= random_write_wakeup_bits);
+ }
mix_pool_bytes(poolp, buffer, count);
credit_entropy_bits(poolp, entropy);
}
diff --git a/drivers/clk/clk-xgene.c b/drivers/clk/clk-xgene.c
index 27c0da29eca3..10224b01b97c 100644
--- a/drivers/clk/clk-xgene.c
+++ b/drivers/clk/clk-xgene.c
@@ -351,7 +351,8 @@ static int xgene_clk_set_rate(struct clk_hw *hw, unsigned long rate,
/* Set new divider */
data = xgene_clk_read(pclk->param.divider_reg +
pclk->param.reg_divider_offset);
- data &= ~((1 << pclk->param.reg_divider_width) - 1);
+ data &= ~((1 << pclk->param.reg_divider_width) - 1)
+ << pclk->param.reg_divider_shift;
data |= divider;
xgene_clk_write(data, pclk->param.divider_reg +
pclk->param.reg_divider_offset);
diff --git a/drivers/clk/msm/clock-osm.c b/drivers/clk/msm/clock-osm.c
index 7094fb4d40af..9e1036c19760 100644
--- a/drivers/clk/msm/clock-osm.c
+++ b/drivers/clk/msm/clock-osm.c
@@ -1850,9 +1850,9 @@ static int clk_osm_set_llm_volt_policy(struct platform_device *pdev)
/* Enable or disable LLM VOLT DVCS */
regval = val | clk_osm_read_reg(&pwrcl_clk, LLM_INTF_DCVS_DISABLE);
- clk_osm_write_reg(&pwrcl_clk, val, LLM_INTF_DCVS_DISABLE);
+ clk_osm_write_reg(&pwrcl_clk, regval, LLM_INTF_DCVS_DISABLE);
regval = val | clk_osm_read_reg(&perfcl_clk, LLM_INTF_DCVS_DISABLE);
- clk_osm_write_reg(&perfcl_clk, val, LLM_INTF_DCVS_DISABLE);
+ clk_osm_write_reg(&perfcl_clk, regval, LLM_INTF_DCVS_DISABLE);
/* Wait for the writes to complete */
clk_osm_mb(&perfcl_clk, OSM_BASE);
diff --git a/drivers/clk/msm/mdss/mdss-hdmi-pll-8998.c b/drivers/clk/msm/mdss/mdss-hdmi-pll-8998.c
index 029f779979c7..c60c4864442f 100644
--- a/drivers/clk/msm/mdss/mdss-hdmi-pll-8998.c
+++ b/drivers/clk/msm/mdss/mdss-hdmi-pll-8998.c
@@ -494,7 +494,7 @@ static int hdmi_8998_pll_set_clk_rate(struct clk *c, unsigned long rate)
_W(pll, SYSCLK_EN_SEL, 0x37);
_W(pll, SYS_CLK_CTRL, 0x2);
_W(pll, CLK_ENABLE1, 0xE);
- _W(pll, PLL_IVCO, 0xF);
+ _W(pll, PLL_IVCO, 0x7);
_W(pll, VCO_TUNE_CTRL, 0x0);
_W(pll, SVS_MODE_CLK_SEL, cfg.svs_mode_clk_sel);
_W(pll, CLK_SELECT, 0x30);
@@ -536,10 +536,10 @@ static int hdmi_8998_pll_set_clk_rate(struct clk *c, unsigned long rate)
_W(pll, PHY_TX_PRE_DRIVER_2(2), cfg.l2_pre_driver_2);
_W(pll, PHY_TX_PRE_DRIVER_2(3), cfg.l3_pre_driver_2);
- _W(pll, PHY_TX_DRV_LVL_RES_CODE_OFFSET(0), 0x0);
+ _W(pll, PHY_TX_DRV_LVL_RES_CODE_OFFSET(0), 0x3);
_W(pll, PHY_TX_DRV_LVL_RES_CODE_OFFSET(1), 0x0);
_W(pll, PHY_TX_DRV_LVL_RES_CODE_OFFSET(2), 0x0);
- _W(pll, PHY_TX_DRV_LVL_RES_CODE_OFFSET(3), 0x0);
+ _W(pll, PHY_TX_DRV_LVL_RES_CODE_OFFSET(3), 0x3);
_W(phy, PHY_MODE, cfg.phy_mode);
@@ -627,8 +627,6 @@ static int hdmi_8998_pll_enable(struct clk *c)
_W(phy, PHY_CFG, 0x59);
udelay(100);
- _W(phy, PHY_CLOCK, 0x6);
-
/* Ensure all registers are flushed to hardware */
wmb();
diff --git a/drivers/clk/qcom/clk-branch.c b/drivers/clk/qcom/clk-branch.c
index cffaf46d732f..096e16db02fe 100644
--- a/drivers/clk/qcom/clk-branch.c
+++ b/drivers/clk/qcom/clk-branch.c
@@ -248,6 +248,73 @@ const struct clk_ops clk_branch2_ops = {
};
EXPORT_SYMBOL_GPL(clk_branch2_ops);
+static int clk_branch2_hw_ctl_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ /*
+ * Make sure the branch clock has CLK_SET_RATE_PARENT flag,
+ * and the RCG has FORCE_ENABLE_RCGR flag set.
+ */
+ if (!(hw->init->flags & CLK_SET_RATE_PARENT)) {
+ pr_err("set rate would not get propagated to parent\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static unsigned long clk_branch2_hw_ctl_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ return parent_rate;
+}
+
+static int clk_branch2_hw_ctl_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct clk_hw *clkp;
+
+ clkp = __clk_get_hw(clk_get_parent(hw->clk));
+
+ req->best_parent_hw = clkp;
+ req->best_parent_rate = clk_round_rate(clkp->clk, req->rate);
+
+ return 0;
+}
+
+static int clk_branch2_hw_ctl_enable(struct clk_hw *hw)
+{
+ struct clk_hw *parent = __clk_get_hw(clk_get_parent(hw->clk));
+
+ /* The parent branch clock should have been prepared prior to this. */
+ if (!parent || (parent && !clk_hw_is_prepared(parent)))
+ return -EINVAL;
+
+ return clk_enable_regmap(hw);
+}
+
+static void clk_branch2_hw_ctl_disable(struct clk_hw *hw)
+{
+ struct clk_hw *parent = __clk_get_hw(clk_get_parent(hw->clk));
+
+ if (!parent)
+ return;
+
+ clk_disable_regmap(hw);
+}
+
+const struct clk_ops clk_branch2_hw_ctl_ops = {
+ .enable = clk_branch2_hw_ctl_enable,
+ .disable = clk_branch2_hw_ctl_disable,
+ .is_enabled = clk_is_enabled_regmap,
+ .set_rate = clk_branch2_hw_ctl_set_rate,
+ .recalc_rate = clk_branch2_hw_ctl_recalc_rate,
+ .determine_rate = clk_branch2_hw_ctl_determine_rate,
+ .set_flags = clk_branch_set_flags,
+ .list_registers = clk_branch2_list_registers,
+};
+EXPORT_SYMBOL_GPL(clk_branch2_hw_ctl_ops);
+
static int clk_gate_toggle(struct clk_hw *hw, bool en)
{
struct clk_gate2 *gt = to_clk_gate2(hw);
diff --git a/drivers/clk/qcom/clk-branch.h b/drivers/clk/qcom/clk-branch.h
index 8a934cf8bed1..b67ac1dfbbf9 100644
--- a/drivers/clk/qcom/clk-branch.h
+++ b/drivers/clk/qcom/clk-branch.h
@@ -62,6 +62,7 @@ extern const struct clk_ops clk_branch_ops;
extern const struct clk_ops clk_branch2_ops;
extern const struct clk_ops clk_gate2_ops;
extern const struct clk_ops clk_branch_simple_ops;
+extern const struct clk_ops clk_branch2_hw_ctl_ops;
#define to_clk_branch(_hw) \
container_of(to_clk_regmap(_hw), struct clk_branch, clkr)
diff --git a/drivers/clk/qcom/clk-rcg.h b/drivers/clk/qcom/clk-rcg.h
index 020bd351bbd8..accdac9fb964 100644
--- a/drivers/clk/qcom/clk-rcg.h
+++ b/drivers/clk/qcom/clk-rcg.h
@@ -171,10 +171,12 @@ struct clk_rcg2 {
const struct parent_map *parent_map;
const struct freq_tbl *freq_tbl;
unsigned long current_freq;
+ u32 new_index;
+ u32 curr_index;
struct clk_regmap clkr;
-#define FORCE_ENABLE_RCGR BIT(0)
u8 flags;
+#define FORCE_ENABLE_RCGR BIT(0)
};
#define to_clk_rcg2(_hw) container_of(to_clk_regmap(_hw), struct clk_rcg2, clkr)
diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
index 4104a238c088..653722f9c4b0 100644
--- a/drivers/clk/qcom/clk-rcg2.c
+++ b/drivers/clk/qcom/clk-rcg2.c
@@ -89,30 +89,6 @@ static int clk_rcg_set_force_enable(struct clk_hw *hw)
return ret;
}
-static int clk_rcg2_enable(struct clk_hw *hw)
-{
- int ret = 0;
- struct clk_rcg2 *rcg = to_clk_rcg2(hw);
-
- if (rcg->flags & FORCE_ENABLE_RCGR)
- ret = clk_rcg_set_force_enable(hw);
-
- return ret;
-}
-
-static void clk_rcg2_disable(struct clk_hw *hw)
-{
- struct clk_rcg2 *rcg = to_clk_rcg2(hw);
-
- if (rcg->flags & FORCE_ENABLE_RCGR) {
- /* force disable RCG - clear CMD_ROOT_EN bit */
- regmap_update_bits(rcg->clkr.regmap,
- rcg->cmd_rcgr + CMD_REG, CMD_ROOT_EN, 0);
- /* Add a delay to disable the RCG */
- udelay(100);
- }
-}
-
static u8 clk_rcg2_get_parent(struct clk_hw *hw)
{
struct clk_rcg2 *rcg = to_clk_rcg2(hw);
@@ -381,16 +357,178 @@ static long clk_rcg2_list_rate(struct clk_hw *hw, unsigned n,
return (rcg->freq_tbl + n)->freq;
}
+static int prepare_enable_rcg_srcs(struct clk_hw *hw, struct clk *curr,
+ struct clk *new)
+{
+ int rc = 0;
+
+ rc = clk_prepare(curr);
+ if (rc)
+ return rc;
+
+ if (clk_hw_is_prepared(hw)) {
+ rc = clk_prepare(new);
+ if (rc)
+ goto err_new_src_prepare;
+ }
+
+ rc = clk_prepare(new);
+ if (rc)
+ goto err_new_src_prepare2;
+
+ rc = clk_enable(curr);
+ if (rc)
+ goto err_curr_src_enable;
+
+ if (__clk_get_enable_count(hw->clk)) {
+ rc = clk_enable(new);
+ if (rc)
+ goto err_new_src_enable;
+ }
+
+ rc = clk_enable(new);
+ if (rc)
+ goto err_new_src_enable2;
+
+ return rc;
+
+err_new_src_enable2:
+ if (__clk_get_enable_count(hw->clk))
+ clk_disable(new);
+err_new_src_enable:
+ clk_disable(curr);
+err_curr_src_enable:
+ clk_unprepare(new);
+err_new_src_prepare2:
+ if (clk_hw_is_prepared(hw))
+ clk_unprepare(new);
+err_new_src_prepare:
+ clk_unprepare(curr);
+
+ return rc;
+}
+
+static void disable_unprepare_rcg_srcs(struct clk_hw *hw, struct clk *curr,
+ struct clk *new)
+{
+ clk_disable(new);
+
+ clk_disable(curr);
+
+ if (__clk_get_enable_count(hw->clk))
+ clk_disable(new);
+
+ clk_unprepare(new);
+ clk_unprepare(curr);
+
+ if (clk_hw_is_prepared(hw))
+ clk_unprepare(new);
+}
+
+static struct freq_tbl cxo_f = {
+ .freq = 19200000,
+ .src = 0,
+ .pre_div = 1,
+ .m = 0,
+ .n = 0,
+};
+
+static int clk_enable_disable_prepare_unprepare(struct clk_hw *hw, int cindex,
+ int nindex, bool enable)
+{
+ struct clk_hw *new_p, *curr_p;
+
+ curr_p = clk_hw_get_parent_by_index(hw, cindex);
+ new_p = clk_hw_get_parent_by_index(hw, nindex);
+
+ if (enable)
+ return prepare_enable_rcg_srcs(hw, curr_p->clk, new_p->clk);
+
+ disable_unprepare_rcg_srcs(hw, curr_p->clk, new_p->clk);
+ return 0;
+}
+
+static int clk_rcg2_enable(struct clk_hw *hw)
+{
+ int ret = 0;
+ const struct freq_tbl *f;
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+
+ if (rcg->flags & FORCE_ENABLE_RCGR) {
+ if (!rcg->current_freq)
+ rcg->current_freq = cxo_f.freq;
+
+ if (rcg->current_freq == cxo_f.freq)
+ rcg->curr_index = 0;
+ else {
+ f = qcom_find_freq(rcg->freq_tbl, rcg->current_freq);
+ rcg->curr_index = qcom_find_src_index(hw,
+ rcg->parent_map, f->src);
+ }
+
+ ret = clk_enable_disable_prepare_unprepare(hw, rcg->curr_index,
+ rcg->new_index, true);
+ if (ret) {
+ pr_err("Failed to prepare_enable new and current sources\n");
+ return ret;
+ }
+
+ clk_rcg_set_force_enable(hw);
+
+ clk_enable_disable_prepare_unprepare(hw, rcg->curr_index,
+ rcg->new_index, false);
+ }
+
+ return ret;
+}
+
+static void clk_rcg2_disable(struct clk_hw *hw)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+
+ if (rcg->flags & FORCE_ENABLE_RCGR) {
+ /* force disable RCG - clear CMD_ROOT_EN bit */
+ regmap_update_bits(rcg->clkr.regmap,
+ rcg->cmd_rcgr + CMD_REG, CMD_ROOT_EN, 0);
+ /* Add a delay to disable the RCG */
+ udelay(100);
+ }
+}
+
+
static int __clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate)
{
struct clk_rcg2 *rcg = to_clk_rcg2(hw);
const struct freq_tbl *f;
+ int ret = 0;
+
+ /* Current frequency */
+ if (rcg->flags & FORCE_ENABLE_RCGR)
+ rcg->current_freq = clk_get_rate(hw->clk);
f = qcom_find_freq(rcg->freq_tbl, rate);
if (!f)
return -EINVAL;
- return clk_rcg2_configure(rcg, f);
+ /* New parent index */
+ if (rcg->flags & FORCE_ENABLE_RCGR) {
+ rcg->new_index = qcom_find_src_index(hw,
+ rcg->parent_map, f->src);
+ ret = clk_rcg2_enable(hw);
+ if (ret) {
+ pr_err("Failed to enable rcg\n");
+ return ret;
+ }
+ }
+
+ ret = clk_rcg2_configure(rcg, f);
+ if (ret)
+ return ret;
+
+ if (rcg->flags & FORCE_ENABLE_RCGR)
+ clk_rcg2_disable(hw);
+
+ return ret;
}
static int clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate,
diff --git a/drivers/clk/qcom/mmcc-msmfalcon.c b/drivers/clk/qcom/mmcc-msmfalcon.c
index 1d874f6db464..ef4c8c264078 100644
--- a/drivers/clk/qcom/mmcc-msmfalcon.c
+++ b/drivers/clk/qcom/mmcc-msmfalcon.c
@@ -112,8 +112,8 @@ static const struct parent_map mmcc_parent_map_1[] = {
static const char * const mmcc_parent_names_1[] = {
"xo",
- "dsi0_phy_pll_out_byteclk",
- "dsi1_phy_pll_out_byteclk",
+ "dsi0pll_byte_clk_mux",
+ "dsi1pll_byte_clk_mux",
"core_bi_pll_test_se",
};
@@ -240,8 +240,8 @@ static const struct parent_map mmcc_parent_map_8[] = {
static const char * const mmcc_parent_names_8[] = {
"xo",
- "dsi0_phy_pll_out_dsiclk",
- "dsi1_phy_pll_out_dsiclk",
+ "dsi0pll_pixel_clk_mux",
+ "dsi1pll_pixel_clk_mux",
"core_bi_pll_test_se",
};
diff --git a/drivers/clk/rockchip/clk-mmc-phase.c b/drivers/clk/rockchip/clk-mmc-phase.c
index 2685644826a0..33c20c6b45af 100644
--- a/drivers/clk/rockchip/clk-mmc-phase.c
+++ b/drivers/clk/rockchip/clk-mmc-phase.c
@@ -153,6 +153,7 @@ struct clk *rockchip_clk_register_mmc(const char *name,
return NULL;
init.name = name;
+ init.flags = 0;
init.num_parents = num_parents;
init.parent_names = parent_names;
init.ops = &rockchip_mmc_clk_ops;
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index 4c5c65e1e5d0..3b2f46bacd77 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -112,6 +112,14 @@ config CPU_FREQ_DEFAULT_GOV_INTERACTIVE
loading your cpufreq low-level hardware driver, using the
'interactive' governor for latency-sensitive workloads.
+config CPU_FREQ_DEFAULT_GOV_SCHED
+ bool "sched"
+ select CPU_FREQ_GOV_SCHED
+ help
+ Use the CPUfreq governor 'sched' as default. This scales
+ cpu frequency using CPU utilization estimates from the
+ scheduler.
+
endchoice
config CPU_FREQ_GOV_PERFORMANCE
@@ -218,6 +226,19 @@ config CPU_BOOST
If in doubt, say N.
+config CPU_FREQ_GOV_SCHED
+ bool "'sched' cpufreq governor"
+ depends on CPU_FREQ
+ depends on SMP
+ select CPU_FREQ_GOV_COMMON
+ help
+ 'sched' - this governor scales cpu frequency from the
+ scheduler as a function of cpu capacity utilization. It does
+ not evaluate utilization on a periodic basis (as ondemand
+ does) but instead is event-driven by the scheduler.
+
+ If in doubt, say N.
+
comment "CPU frequency scaling drivers"
config CPUFREQ_DT
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index d45cf584d23b..118dbf1cef44 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -29,6 +29,9 @@
#include <linux/suspend.h>
#include <linux/syscore_ops.h>
#include <linux/tick.h>
+#ifdef CONFIG_SMP
+#include <linux/sched.h>
+#endif
#include <trace/events/power.h>
static LIST_HEAD(cpufreq_policy_list);
@@ -164,6 +167,12 @@ bool have_governor_per_policy(void)
}
EXPORT_SYMBOL_GPL(have_governor_per_policy);
+bool cpufreq_driver_is_slow(void)
+{
+ return !(cpufreq_driver->flags & CPUFREQ_DRIVER_FAST);
+}
+EXPORT_SYMBOL_GPL(cpufreq_driver_is_slow);
+
struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
{
if (have_governor_per_policy())
@@ -357,6 +366,50 @@ static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
#endif
}
+/*********************************************************************
+ * FREQUENCY INVARIANT CPU CAPACITY *
+ *********************************************************************/
+
+static DEFINE_PER_CPU(unsigned long, freq_scale) = SCHED_CAPACITY_SCALE;
+static DEFINE_PER_CPU(unsigned long, max_freq_scale) = SCHED_CAPACITY_SCALE;
+
+static void
+scale_freq_capacity(struct cpufreq_policy *policy, struct cpufreq_freqs *freqs)
+{
+ unsigned long cur = freqs ? freqs->new : policy->cur;
+ unsigned long scale = (cur << SCHED_CAPACITY_SHIFT) / policy->max;
+ struct cpufreq_cpuinfo *cpuinfo = &policy->cpuinfo;
+ int cpu;
+
+ pr_debug("cpus %*pbl cur/cur max freq %lu/%u kHz freq scale %lu\n",
+ cpumask_pr_args(policy->cpus), cur, policy->max, scale);
+
+ for_each_cpu(cpu, policy->cpus)
+ per_cpu(freq_scale, cpu) = scale;
+
+ if (freqs)
+ return;
+
+ scale = (policy->max << SCHED_CAPACITY_SHIFT) / cpuinfo->max_freq;
+
+ pr_debug("cpus %*pbl cur max/max freq %u/%u kHz max freq scale %lu\n",
+ cpumask_pr_args(policy->cpus), policy->max, cpuinfo->max_freq,
+ scale);
+
+ for_each_cpu(cpu, policy->cpus)
+ per_cpu(max_freq_scale, cpu) = scale;
+}
+
+unsigned long cpufreq_scale_freq_capacity(struct sched_domain *sd, int cpu)
+{
+ return per_cpu(freq_scale, cpu);
+}
+
+unsigned long cpufreq_scale_max_freq_capacity(int cpu)
+{
+ return per_cpu(max_freq_scale, cpu);
+}
+
static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
struct cpufreq_freqs *freqs, unsigned int state)
{
@@ -433,6 +486,9 @@ static void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
void cpufreq_freq_transition_begin(struct cpufreq_policy *policy,
struct cpufreq_freqs *freqs)
{
+#ifdef CONFIG_SMP
+ int cpu;
+#endif
/*
* Catch double invocations of _begin() which lead to self-deadlock.
@@ -460,6 +516,12 @@ wait:
spin_unlock(&policy->transition_lock);
+ scale_freq_capacity(policy, freqs);
+#ifdef CONFIG_SMP
+ for_each_cpu(cpu, policy->cpus)
+ trace_cpu_capacity(capacity_curr_of(cpu), cpu);
+#endif
+
cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
}
EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin);
@@ -2140,6 +2202,8 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
CPUFREQ_NOTIFY, new_policy);
+ scale_freq_capacity(new_policy, NULL);
+
policy->min = new_policy->min;
policy->max = new_policy->max;
trace_cpu_frequency_limits(policy->max, policy->min, policy->cpu);
diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c
index 4dbf1db16aca..9cc8abd3d116 100644
--- a/drivers/cpufreq/cpufreq_userspace.c
+++ b/drivers/cpufreq/cpufreq_userspace.c
@@ -17,6 +17,7 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mutex.h>
+#include <linux/slab.h>
static DEFINE_PER_CPU(unsigned int, cpu_is_managed);
static DEFINE_MUTEX(userspace_mutex);
@@ -31,6 +32,7 @@ static DEFINE_MUTEX(userspace_mutex);
static int cpufreq_set(struct cpufreq_policy *policy, unsigned int freq)
{
int ret = -EINVAL;
+ unsigned int *setspeed = policy->governor_data;
pr_debug("cpufreq_set for cpu %u, freq %u kHz\n", policy->cpu, freq);
@@ -38,6 +40,8 @@ static int cpufreq_set(struct cpufreq_policy *policy, unsigned int freq)
if (!per_cpu(cpu_is_managed, policy->cpu))
goto err;
+ *setspeed = freq;
+
ret = __cpufreq_driver_target(policy, freq, CPUFREQ_RELATION_L);
err:
mutex_unlock(&userspace_mutex);
@@ -49,19 +53,45 @@ static ssize_t show_speed(struct cpufreq_policy *policy, char *buf)
return sprintf(buf, "%u\n", policy->cur);
}
+static int cpufreq_userspace_policy_init(struct cpufreq_policy *policy)
+{
+ unsigned int *setspeed;
+
+ setspeed = kzalloc(sizeof(*setspeed), GFP_KERNEL);
+ if (!setspeed)
+ return -ENOMEM;
+
+ policy->governor_data = setspeed;
+ return 0;
+}
+
static int cpufreq_governor_userspace(struct cpufreq_policy *policy,
unsigned int event)
{
+ unsigned int *setspeed = policy->governor_data;
unsigned int cpu = policy->cpu;
int rc = 0;
+ if (event == CPUFREQ_GOV_POLICY_INIT)
+ return cpufreq_userspace_policy_init(policy);
+
+ if (!setspeed)
+ return -EINVAL;
+
switch (event) {
+ case CPUFREQ_GOV_POLICY_EXIT:
+ mutex_lock(&userspace_mutex);
+ policy->governor_data = NULL;
+ kfree(setspeed);
+ mutex_unlock(&userspace_mutex);
+ break;
case CPUFREQ_GOV_START:
BUG_ON(!policy->cur);
pr_debug("started managing cpu %u\n", cpu);
mutex_lock(&userspace_mutex);
per_cpu(cpu_is_managed, cpu) = 1;
+ *setspeed = policy->cur;
mutex_unlock(&userspace_mutex);
break;
case CPUFREQ_GOV_STOP:
@@ -69,20 +99,23 @@ static int cpufreq_governor_userspace(struct cpufreq_policy *policy,
mutex_lock(&userspace_mutex);
per_cpu(cpu_is_managed, cpu) = 0;
+ *setspeed = 0;
mutex_unlock(&userspace_mutex);
break;
case CPUFREQ_GOV_LIMITS:
mutex_lock(&userspace_mutex);
- pr_debug("limit event for cpu %u: %u - %u kHz, currently %u kHz\n",
- cpu, policy->min, policy->max,
- policy->cur);
+ pr_debug("limit event for cpu %u: %u - %u kHz, currently %u kHz, last set to %u kHz\n",
+ cpu, policy->min, policy->max, policy->cur, *setspeed);
- if (policy->max < policy->cur)
+ if (policy->max < *setspeed)
__cpufreq_driver_target(policy, policy->max,
CPUFREQ_RELATION_H);
- else if (policy->min > policy->cur)
+ else if (policy->min > *setspeed)
__cpufreq_driver_target(policy, policy->min,
CPUFREQ_RELATION_L);
+ else
+ __cpufreq_driver_target(policy, *setspeed,
+ CPUFREQ_RELATION_L);
mutex_unlock(&userspace_mutex);
break;
}
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index f53b02a6bc05..6e80e4298274 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -662,7 +662,7 @@ static int core_get_max_pstate(void)
if (err)
goto skip_tar;
- tdp_msr = MSR_CONFIG_TDP_NOMINAL + tdp_ctrl;
+ tdp_msr = MSR_CONFIG_TDP_NOMINAL + (tdp_ctrl & 0x3);
err = rdmsrl_safe(tdp_msr, &tdp_ratio);
if (err)
goto skip_tar;
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 7f437bc4431b..71ecc7924b58 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -192,7 +192,7 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
}
/* Take note of the planned idle state. */
- sched_idle_set_state(target_state);
+ sched_idle_set_state(target_state, index);
trace_cpu_idle_rcuidle(index, dev->cpu);
time_start = ktime_get();
@@ -205,7 +205,7 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);
/* The cpu is no longer idle or about to enter idle. */
- sched_idle_set_state(NULL);
+ sched_idle_set_state(NULL, -1);
if (broadcast) {
if (WARN_ON_ONCE(!irqs_disabled()))
diff --git a/drivers/cpuidle/lpm-levels.c b/drivers/cpuidle/lpm-levels.c
index de033cc37a15..81801605d6e7 100644
--- a/drivers/cpuidle/lpm-levels.c
+++ b/drivers/cpuidle/lpm-levels.c
@@ -1132,11 +1132,11 @@ static int cluster_configure(struct lpm_cluster *cluster, int idx,
goto failed_set_mode;
}
- us = us + 1;
+ us = (us + 1) * 1000;
clear_predict_history();
clear_cl_predict_history();
- do_div(us, USEC_PER_SEC/SCLK_HZ);
+ do_div(us, NSEC_PER_SEC/SCLK_HZ);
msm_mpm_enter_sleep(us, from_idle, cpumask);
}
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index ea8189f4b021..b3044219772c 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -441,6 +441,9 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
OP_ALG_AAI_CTR_MOD128);
const bool is_rfc3686 = alg->caam.rfc3686;
+ if (!ctx->authsize)
+ return 0;
+
/* NULL encryption / decryption */
if (!ctx->enckeylen)
return aead_null_set_sh_desc(aead);
@@ -553,7 +556,10 @@ skip_enc:
/* Read and write assoclen bytes */
append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+ if (alg->caam.geniv)
+ append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM, ivsize);
+ else
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
/* Skip assoc data */
append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
@@ -562,6 +568,14 @@ skip_enc:
append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
KEY_VLF);
+ if (alg->caam.geniv) {
+ append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ (ctx1_iv_off << LDST_OFFSET_SHIFT));
+ append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO |
+ (ctx1_iv_off << MOVE_OFFSET_SHIFT) | ivsize);
+ }
+
/* Load Counter into CONTEXT1 reg */
if (is_rfc3686)
append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
@@ -614,7 +628,7 @@ skip_enc:
keys_fit_inline = true;
/* aead_givencrypt shared descriptor */
- desc = ctx->sh_desc_givenc;
+ desc = ctx->sh_desc_enc;
/* Note: Context registers are saved. */
init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
@@ -645,13 +659,13 @@ copy_iv:
append_operation(desc, ctx->class2_alg_type |
OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
- /* ivsize + cryptlen = seqoutlen - authsize */
- append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
-
/* Read and write assoclen bytes */
append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+ /* ivsize + cryptlen = seqoutlen - authsize */
+ append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
+
/* Skip assoc data */
append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
@@ -697,7 +711,7 @@ copy_iv:
ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
desc_bytes(desc),
DMA_TO_DEVICE);
- if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
+ if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
dev_err(jrdev, "unable to map shared descriptor\n");
return -ENOMEM;
}
@@ -2147,7 +2161,7 @@ static void init_authenc_job(struct aead_request *req,
init_aead_job(req, edesc, all_contig, encrypt);
- if (ivsize && (is_rfc3686 || !(alg->caam.geniv && encrypt)))
+ if (ivsize && ((is_rfc3686 && encrypt) || !alg->caam.geniv))
append_load_as_imm(desc, req->iv, ivsize,
LDST_CLASS_1_CCB |
LDST_SRCDST_BYTE_CONTEXT |
@@ -2534,20 +2548,6 @@ static int aead_decrypt(struct aead_request *req)
return ret;
}
-static int aead_givdecrypt(struct aead_request *req)
-{
- struct crypto_aead *aead = crypto_aead_reqtfm(req);
- unsigned int ivsize = crypto_aead_ivsize(aead);
-
- if (req->cryptlen < ivsize)
- return -EINVAL;
-
- req->cryptlen -= ivsize;
- req->assoclen += ivsize;
-
- return aead_decrypt(req);
-}
-
/*
* allocate and map the ablkcipher extended descriptor for ablkcipher
*/
@@ -3207,7 +3207,7 @@ static struct caam_aead_alg driver_aeads[] = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
- .decrypt = aead_givdecrypt,
+ .decrypt = aead_decrypt,
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = MD5_DIGEST_SIZE,
},
@@ -3253,7 +3253,7 @@ static struct caam_aead_alg driver_aeads[] = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
- .decrypt = aead_givdecrypt,
+ .decrypt = aead_decrypt,
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
},
@@ -3299,7 +3299,7 @@ static struct caam_aead_alg driver_aeads[] = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
- .decrypt = aead_givdecrypt,
+ .decrypt = aead_decrypt,
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA224_DIGEST_SIZE,
},
@@ -3345,7 +3345,7 @@ static struct caam_aead_alg driver_aeads[] = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
- .decrypt = aead_givdecrypt,
+ .decrypt = aead_decrypt,
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
},
@@ -3391,7 +3391,7 @@ static struct caam_aead_alg driver_aeads[] = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
- .decrypt = aead_givdecrypt,
+ .decrypt = aead_decrypt,
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA384_DIGEST_SIZE,
},
@@ -3437,7 +3437,7 @@ static struct caam_aead_alg driver_aeads[] = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
- .decrypt = aead_givdecrypt,
+ .decrypt = aead_decrypt,
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA512_DIGEST_SIZE,
},
@@ -3483,7 +3483,7 @@ static struct caam_aead_alg driver_aeads[] = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
- .decrypt = aead_givdecrypt,
+ .decrypt = aead_decrypt,
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = MD5_DIGEST_SIZE,
},
@@ -3531,7 +3531,7 @@ static struct caam_aead_alg driver_aeads[] = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
- .decrypt = aead_givdecrypt,
+ .decrypt = aead_decrypt,
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
},
@@ -3579,7 +3579,7 @@ static struct caam_aead_alg driver_aeads[] = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
- .decrypt = aead_givdecrypt,
+ .decrypt = aead_decrypt,
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA224_DIGEST_SIZE,
},
@@ -3627,7 +3627,7 @@ static struct caam_aead_alg driver_aeads[] = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
- .decrypt = aead_givdecrypt,
+ .decrypt = aead_decrypt,
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
},
@@ -3675,7 +3675,7 @@ static struct caam_aead_alg driver_aeads[] = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
- .decrypt = aead_givdecrypt,
+ .decrypt = aead_decrypt,
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA384_DIGEST_SIZE,
},
@@ -3723,7 +3723,7 @@ static struct caam_aead_alg driver_aeads[] = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
- .decrypt = aead_givdecrypt,
+ .decrypt = aead_decrypt,
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA512_DIGEST_SIZE,
},
@@ -3769,7 +3769,7 @@ static struct caam_aead_alg driver_aeads[] = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
- .decrypt = aead_givdecrypt,
+ .decrypt = aead_decrypt,
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = MD5_DIGEST_SIZE,
},
@@ -3815,7 +3815,7 @@ static struct caam_aead_alg driver_aeads[] = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
- .decrypt = aead_givdecrypt,
+ .decrypt = aead_decrypt,
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
},
@@ -3861,7 +3861,7 @@ static struct caam_aead_alg driver_aeads[] = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
- .decrypt = aead_givdecrypt,
+ .decrypt = aead_decrypt,
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = SHA224_DIGEST_SIZE,
},
@@ -3907,7 +3907,7 @@ static struct caam_aead_alg driver_aeads[] = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
- .decrypt = aead_givdecrypt,
+ .decrypt = aead_decrypt,
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
},
@@ -3953,7 +3953,7 @@ static struct caam_aead_alg driver_aeads[] = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
- .decrypt = aead_givdecrypt,
+ .decrypt = aead_decrypt,
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = SHA384_DIGEST_SIZE,
},
@@ -3999,7 +3999,7 @@ static struct caam_aead_alg driver_aeads[] = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
- .decrypt = aead_givdecrypt,
+ .decrypt = aead_decrypt,
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = SHA512_DIGEST_SIZE,
},
@@ -4048,7 +4048,7 @@ static struct caam_aead_alg driver_aeads[] = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
- .decrypt = aead_givdecrypt,
+ .decrypt = aead_decrypt,
.ivsize = CTR_RFC3686_IV_SIZE,
.maxauthsize = MD5_DIGEST_SIZE,
},
@@ -4099,7 +4099,7 @@ static struct caam_aead_alg driver_aeads[] = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
- .decrypt = aead_givdecrypt,
+ .decrypt = aead_decrypt,
.ivsize = CTR_RFC3686_IV_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
},
@@ -4150,7 +4150,7 @@ static struct caam_aead_alg driver_aeads[] = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
- .decrypt = aead_givdecrypt,
+ .decrypt = aead_decrypt,
.ivsize = CTR_RFC3686_IV_SIZE,
.maxauthsize = SHA224_DIGEST_SIZE,
},
@@ -4201,7 +4201,7 @@ static struct caam_aead_alg driver_aeads[] = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
- .decrypt = aead_givdecrypt,
+ .decrypt = aead_decrypt,
.ivsize = CTR_RFC3686_IV_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
},
@@ -4252,7 +4252,7 @@ static struct caam_aead_alg driver_aeads[] = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
- .decrypt = aead_givdecrypt,
+ .decrypt = aead_decrypt,
.ivsize = CTR_RFC3686_IV_SIZE,
.maxauthsize = SHA384_DIGEST_SIZE,
},
@@ -4303,7 +4303,7 @@ static struct caam_aead_alg driver_aeads[] = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
- .decrypt = aead_givdecrypt,
+ .decrypt = aead_decrypt,
.ivsize = CTR_RFC3686_IV_SIZE,
.maxauthsize = SHA512_DIGEST_SIZE,
},
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 49106ea42887..99d5e11db194 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -1873,6 +1873,7 @@ caam_hash_alloc(struct caam_hash_template *template,
template->name);
snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
template->driver_name);
+ t_alg->ahash_alg.setkey = NULL;
}
alg->cra_module = THIS_MODULE;
alg->cra_init = caam_hash_cra_init;
diff --git a/drivers/crypto/nx/nx-842-powernv.c b/drivers/crypto/nx/nx-842-powernv.c
index 9ef51fafdbff..6e105e87b8ff 100644
--- a/drivers/crypto/nx/nx-842-powernv.c
+++ b/drivers/crypto/nx/nx-842-powernv.c
@@ -442,6 +442,14 @@ static int nx842_powernv_function(const unsigned char *in, unsigned int inlen,
(unsigned int)ccw,
(unsigned int)be32_to_cpu(crb->ccw));
+ /*
+ * NX842 coprocessor sets 3rd bit in CR register with XER[S0].
+ * XER[S0] is the integer summary overflow bit which is nothing
+ * to do NX. Since this bit can be set with other return values,
+ * mask this bit.
+ */
+ ret &= ~ICSWX_XERS0;
+
switch (ret) {
case ICSWX_INITIATED:
ret = wait_for_csb(wmem, csb);
@@ -454,10 +462,6 @@ static int nx842_powernv_function(const unsigned char *in, unsigned int inlen,
pr_err_ratelimited("ICSWX rejected\n");
ret = -EPROTO;
break;
- default:
- pr_err_ratelimited("Invalid ICSWX return code %x\n", ret);
- ret = -EPROTO;
- break;
}
if (!ret)
diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c
index 0794f1cc0018..42f0f229f7f7 100644
--- a/drivers/crypto/nx/nx.c
+++ b/drivers/crypto/nx/nx.c
@@ -392,7 +392,7 @@ static void nx_of_update_msc(struct device *dev,
((bytes_so_far + sizeof(struct msc_triplet)) <= lenp) &&
i < msc->triplets;
i++) {
- if (msc->fc > NX_MAX_FC || msc->mode > NX_MAX_MODE) {
+ if (msc->fc >= NX_MAX_FC || msc->mode >= NX_MAX_MODE) {
dev_err(dev, "unknown function code/mode "
"combo: %d/%d (ignored)\n", msc->fc,
msc->mode);
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index 59e4c3af15ed..367b6661ee04 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -1262,8 +1262,8 @@ static struct crypto_alg qat_algs[] = { {
.setkey = qat_alg_ablkcipher_xts_setkey,
.decrypt = qat_alg_ablkcipher_decrypt,
.encrypt = qat_alg_ablkcipher_encrypt,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
+ .min_keysize = 2 * AES_MIN_KEY_SIZE,
+ .max_keysize = 2 * AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
},
},
diff --git a/drivers/crypto/vmx/aes_cbc.c b/drivers/crypto/vmx/aes_cbc.c
index f3801b983f42..3f8bb9a40df1 100644
--- a/drivers/crypto/vmx/aes_cbc.c
+++ b/drivers/crypto/vmx/aes_cbc.c
@@ -191,7 +191,7 @@ struct crypto_alg p8_aes_cbc_alg = {
.cra_init = p8_aes_cbc_init,
.cra_exit = p8_aes_cbc_exit,
.cra_blkcipher = {
- .ivsize = 0,
+ .ivsize = AES_BLOCK_SIZE,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.setkey = p8_aes_cbc_setkey,
diff --git a/drivers/crypto/vmx/aes_ctr.c b/drivers/crypto/vmx/aes_ctr.c
index 404a1b69a3ab..72f138985e18 100644
--- a/drivers/crypto/vmx/aes_ctr.c
+++ b/drivers/crypto/vmx/aes_ctr.c
@@ -175,7 +175,7 @@ struct crypto_alg p8_aes_ctr_alg = {
.cra_init = p8_aes_ctr_init,
.cra_exit = p8_aes_ctr_exit,
.cra_blkcipher = {
- .ivsize = 0,
+ .ivsize = AES_BLOCK_SIZE,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.setkey = p8_aes_ctr_setkey,
diff --git a/drivers/crypto/vmx/ppc-xlate.pl b/drivers/crypto/vmx/ppc-xlate.pl
index b9997335f193..b18e67d0e065 100644
--- a/drivers/crypto/vmx/ppc-xlate.pl
+++ b/drivers/crypto/vmx/ppc-xlate.pl
@@ -139,6 +139,26 @@ my $vmr = sub {
" vor $vx,$vy,$vy";
};
+# Some ABIs specify vrsave, special-purpose register #256, as reserved
+# for system use.
+my $no_vrsave = ($flavour =~ /linux-ppc64le/);
+my $mtspr = sub {
+ my ($f,$idx,$ra) = @_;
+ if ($idx == 256 && $no_vrsave) {
+ " or $ra,$ra,$ra";
+ } else {
+ " mtspr $idx,$ra";
+ }
+};
+my $mfspr = sub {
+ my ($f,$rd,$idx) = @_;
+ if ($idx == 256 && $no_vrsave) {
+ " li $rd,-1";
+ } else {
+ " mfspr $rd,$idx";
+ }
+};
+
# PowerISA 2.06 stuff
sub vsxmem_op {
my ($f, $vrt, $ra, $rb, $op) = @_;
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index 02f9aa4ebe05..e44a1bfb0250 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -242,7 +242,7 @@ struct at_xdmac_lld {
u32 mbr_dus; /* Destination Microblock Stride Register */
};
-
+/* 64-bit alignment needed to update CNDA and CUBC registers in an atomic way. */
struct at_xdmac_desc {
struct at_xdmac_lld lld;
enum dma_transfer_direction direction;
@@ -253,7 +253,7 @@ struct at_xdmac_desc {
unsigned int xfer_size;
struct list_head descs_list;
struct list_head xfer_node;
-};
+} __aligned(sizeof(u64));
static inline void __iomem *at_xdmac_chan_reg_base(struct at_xdmac *atxdmac, unsigned int chan_nb)
{
@@ -1388,6 +1388,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
u32 cur_nda, check_nda, cur_ubc, mask, value;
u8 dwidth = 0;
unsigned long flags;
+ bool initd;
ret = dma_cookie_status(chan, cookie, txstate);
if (ret == DMA_COMPLETE)
@@ -1412,7 +1413,16 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
residue = desc->xfer_size;
/*
* Flush FIFO: only relevant when the transfer is source peripheral
- * synchronized.
+ * synchronized. Flush is needed before reading CUBC because data in
+ * the FIFO are not reported by CUBC. Reporting a residue of the
+ * transfer length while we have data in FIFO can cause issue.
+ * Usecase: atmel USART has a timeout which means I have received
+ * characters but there is no more character received for a while. On
+ * timeout, it requests the residue. If the data are in the DMA FIFO,
+ * we will return a residue of the transfer length. It means no data
+ * received. If an application is waiting for these data, it will hang
+ * since we won't have another USART timeout without receiving new
+ * data.
*/
mask = AT_XDMAC_CC_TYPE | AT_XDMAC_CC_DSYNC;
value = AT_XDMAC_CC_TYPE_PER_TRAN | AT_XDMAC_CC_DSYNC_PER2MEM;
@@ -1423,34 +1433,43 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
}
/*
- * When processing the residue, we need to read two registers but we
- * can't do it in an atomic way. AT_XDMAC_CNDA is used to find where
- * we stand in the descriptor list and AT_XDMAC_CUBC is used
- * to know how many data are remaining for the current descriptor.
- * Since the dma channel is not paused to not loose data, between the
- * AT_XDMAC_CNDA and AT_XDMAC_CUBC read, we may have change of
- * descriptor.
- * For that reason, after reading AT_XDMAC_CUBC, we check if we are
- * still using the same descriptor by reading a second time
- * AT_XDMAC_CNDA. If AT_XDMAC_CNDA has changed, it means we have to
- * read again AT_XDMAC_CUBC.
+ * The easiest way to compute the residue should be to pause the DMA
+ * but doing this can lead to miss some data as some devices don't
+ * have FIFO.
+ * We need to read several registers because:
+ * - DMA is running therefore a descriptor change is possible while
+ * reading these registers
+ * - When the block transfer is done, the value of the CUBC register
+ * is set to its initial value until the fetch of the next descriptor.
+ * This value will corrupt the residue calculation so we have to skip
+ * it.
+ *
+ * INITD -------- ------------
+ * |____________________|
+ * _______________________ _______________
+ * NDA @desc2 \/ @desc3
+ * _______________________/\_______________
+ * __________ ___________ _______________
+ * CUBC 0 \/ MAX desc1 \/ MAX desc2
+ * __________/\___________/\_______________
+ *
+ * Since descriptors are aligned on 64 bits, we can assume that
+ * the update of NDA and CUBC is atomic.
* Memory barriers are used to ensure the read order of the registers.
- * A max number of retries is set because unlikely it can never ends if
- * we are transferring a lot of data with small buffers.
+ * A max number of retries is set because unlikely it could never ends.
*/
- cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
- rmb();
- cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC);
for (retry = 0; retry < AT_XDMAC_RESIDUE_MAX_RETRIES; retry++) {
- rmb();
check_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
-
- if (likely(cur_nda == check_nda))
- break;
-
- cur_nda = check_nda;
+ rmb();
+ initd = !!(at_xdmac_chan_read(atchan, AT_XDMAC_CC) & AT_XDMAC_CC_INITD);
rmb();
cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC);
+ rmb();
+ cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
+ rmb();
+
+ if ((check_nda == cur_nda) && initd)
+ break;
}
if (unlikely(retry >= AT_XDMAC_RESIDUE_MAX_RETRIES)) {
@@ -1459,6 +1478,19 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
}
/*
+ * Flush FIFO: only relevant when the transfer is source peripheral
+ * synchronized. Another flush is needed here because CUBC is updated
+ * when the controller sends the data write command. It can lead to
+ * report data that are not written in the memory or the device. The
+ * FIFO flush ensures that data are really written.
+ */
+ if ((desc->lld.mbr_cfg & mask) == value) {
+ at_xdmac_write(atxdmac, AT_XDMAC_GSWF, atchan->mask);
+ while (!(at_xdmac_chan_read(atchan, AT_XDMAC_CIS) & AT_XDMAC_CIS_FIS))
+ cpu_relax();
+ }
+
+ /*
* Remove size of all microblocks already transferred and the current
* one. Then add the remaining size to transfer of the current
* microblock.
diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c
index f1bcc2a163b3..b1bc945f008f 100644
--- a/drivers/dma/sh/usb-dmac.c
+++ b/drivers/dma/sh/usb-dmac.c
@@ -600,27 +600,30 @@ static irqreturn_t usb_dmac_isr_channel(int irq, void *dev)
{
struct usb_dmac_chan *chan = dev;
irqreturn_t ret = IRQ_NONE;
- u32 mask = USB_DMACHCR_TE;
- u32 check_bits = USB_DMACHCR_TE | USB_DMACHCR_SP;
+ u32 mask = 0;
u32 chcr;
+ bool xfer_end = false;
spin_lock(&chan->vc.lock);
chcr = usb_dmac_chan_read(chan, USB_DMACHCR);
- if (chcr & check_bits)
- mask |= USB_DMACHCR_DE | check_bits;
+ if (chcr & (USB_DMACHCR_TE | USB_DMACHCR_SP)) {
+ mask |= USB_DMACHCR_DE | USB_DMACHCR_TE | USB_DMACHCR_SP;
+ if (chcr & USB_DMACHCR_DE)
+ xfer_end = true;
+ ret |= IRQ_HANDLED;
+ }
if (chcr & USB_DMACHCR_NULL) {
/* An interruption of TE will happen after we set FTE */
mask |= USB_DMACHCR_NULL;
chcr |= USB_DMACHCR_FTE;
ret |= IRQ_HANDLED;
}
- usb_dmac_chan_write(chan, USB_DMACHCR, chcr & ~mask);
+ if (mask)
+ usb_dmac_chan_write(chan, USB_DMACHCR, chcr & ~mask);
- if (chcr & check_bits) {
+ if (xfer_end)
usb_dmac_isr_transfer_end(chan);
- ret |= IRQ_HANDLED;
- }
spin_unlock(&chan->vc.lock);
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index 1b2c2187b347..dc68394da682 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -966,7 +966,7 @@ static void edac_inc_ue_error(struct mem_ctl_info *mci,
mci->ue_mc += count;
if (!enable_per_layer_report) {
- mci->ce_noinfo_count += count;
+ mci->ue_noinfo_count += count;
return;
}
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index 58aed67b7eba..3c8f19f5ac81 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -313,7 +313,6 @@ static struct device_type csrow_attr_type = {
* possible dynamic channel DIMM Label attribute files
*
*/
-
DEVICE_CHANNEL(ch0_dimm_label, S_IRUGO | S_IWUSR,
channel_dimm_label_show, channel_dimm_label_store, 0);
DEVICE_CHANNEL(ch1_dimm_label, S_IRUGO | S_IWUSR,
@@ -326,6 +325,10 @@ DEVICE_CHANNEL(ch4_dimm_label, S_IRUGO | S_IWUSR,
channel_dimm_label_show, channel_dimm_label_store, 4);
DEVICE_CHANNEL(ch5_dimm_label, S_IRUGO | S_IWUSR,
channel_dimm_label_show, channel_dimm_label_store, 5);
+DEVICE_CHANNEL(ch6_dimm_label, S_IRUGO | S_IWUSR,
+ channel_dimm_label_show, channel_dimm_label_store, 6);
+DEVICE_CHANNEL(ch7_dimm_label, S_IRUGO | S_IWUSR,
+ channel_dimm_label_show, channel_dimm_label_store, 7);
/* Total possible dynamic DIMM Label attribute file table */
static struct attribute *dynamic_csrow_dimm_attr[] = {
@@ -335,6 +338,8 @@ static struct attribute *dynamic_csrow_dimm_attr[] = {
&dev_attr_legacy_ch3_dimm_label.attr.attr,
&dev_attr_legacy_ch4_dimm_label.attr.attr,
&dev_attr_legacy_ch5_dimm_label.attr.attr,
+ &dev_attr_legacy_ch6_dimm_label.attr.attr,
+ &dev_attr_legacy_ch7_dimm_label.attr.attr,
NULL
};
@@ -351,6 +356,10 @@ DEVICE_CHANNEL(ch4_ce_count, S_IRUGO,
channel_ce_count_show, NULL, 4);
DEVICE_CHANNEL(ch5_ce_count, S_IRUGO,
channel_ce_count_show, NULL, 5);
+DEVICE_CHANNEL(ch6_ce_count, S_IRUGO,
+ channel_ce_count_show, NULL, 6);
+DEVICE_CHANNEL(ch7_ce_count, S_IRUGO,
+ channel_ce_count_show, NULL, 7);
/* Total possible dynamic ce_count attribute file table */
static struct attribute *dynamic_csrow_ce_count_attr[] = {
@@ -360,6 +369,8 @@ static struct attribute *dynamic_csrow_ce_count_attr[] = {
&dev_attr_legacy_ch3_ce_count.attr.attr,
&dev_attr_legacy_ch4_ce_count.attr.attr,
&dev_attr_legacy_ch5_ce_count.attr.attr,
+ &dev_attr_legacy_ch6_ce_count.attr.attr,
+ &dev_attr_legacy_ch7_ce_count.attr.attr,
NULL
};
@@ -371,9 +382,16 @@ static umode_t csrow_dev_is_visible(struct kobject *kobj,
if (idx >= csrow->nr_channels)
return 0;
+
+ if (idx >= ARRAY_SIZE(dynamic_csrow_ce_count_attr) - 1) {
+ WARN_ONCE(1, "idx: %d\n", idx);
+ return 0;
+ }
+
/* Only expose populated DIMMs */
if (!csrow->channels[idx]->dimm->nr_pages)
return 0;
+
return attr->mode;
}
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 438ffcbe6654..008b8babf31e 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -50,6 +50,7 @@ config GPIO_DEVRES
config OF_GPIO
def_bool y
depends on OF
+ depends on HAS_IOMEM
config GPIO_ACPI
def_bool y
diff --git a/drivers/gpio/gpio-intel-mid.c b/drivers/gpio/gpio-intel-mid.c
index 70097472b02c..c50e930d97d3 100644
--- a/drivers/gpio/gpio-intel-mid.c
+++ b/drivers/gpio/gpio-intel-mid.c
@@ -17,7 +17,6 @@
* Moorestown platform Langwell chip.
* Medfield platform Penwell chip.
* Clovertrail platform Cloverview chip.
- * Merrifield platform Tangier chip.
*/
#include <linux/module.h>
@@ -64,10 +63,6 @@ enum GPIO_REG {
/* intel_mid gpio driver data */
struct intel_mid_gpio_ddata {
u16 ngpio; /* number of gpio pins */
- u32 gplr_offset; /* offset of first GPLR register from base */
- u32 flis_base; /* base address of FLIS registers */
- u32 flis_len; /* length of FLIS registers */
- u32 (*get_flis_offset)(int gpio);
u32 chip_irq_type; /* chip interrupt type */
};
@@ -257,15 +252,6 @@ static const struct intel_mid_gpio_ddata gpio_cloverview_core = {
.chip_irq_type = INTEL_MID_IRQ_TYPE_EDGE,
};
-static const struct intel_mid_gpio_ddata gpio_tangier = {
- .ngpio = 192,
- .gplr_offset = 4,
- .flis_base = 0xff0c0000,
- .flis_len = 0x8000,
- .get_flis_offset = NULL,
- .chip_irq_type = INTEL_MID_IRQ_TYPE_EDGE,
-};
-
static const struct pci_device_id intel_gpio_ids[] = {
{
/* Lincroft */
@@ -292,11 +278,6 @@ static const struct pci_device_id intel_gpio_ids[] = {
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x08f7),
.driver_data = (kernel_ulong_t)&gpio_cloverview_core,
},
- {
- /* Tangier */
- PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x1199),
- .driver_data = (kernel_ulong_t)&gpio_tangier,
- },
{ 0 }
};
MODULE_DEVICE_TABLE(pci, intel_gpio_ids);
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index 2d4892cc70fb..c844d7eccb6c 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -86,7 +86,7 @@ MODULE_DEVICE_TABLE(acpi, pca953x_acpi_ids);
#define MAX_BANK 5
#define BANK_SZ 8
-#define NBANK(chip) (chip->gpio_chip.ngpio / BANK_SZ)
+#define NBANK(chip) DIV_ROUND_UP(chip->gpio_chip.ngpio, BANK_SZ)
struct pca953x_chip {
unsigned gpio_start;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 053fc2f465df..ff5566c69f7d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -710,9 +710,9 @@ int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev);
void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev);
int amdgpu_gart_init(struct amdgpu_device *adev);
void amdgpu_gart_fini(struct amdgpu_device *adev);
-void amdgpu_gart_unbind(struct amdgpu_device *adev, unsigned offset,
+void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
int pages);
-int amdgpu_gart_bind(struct amdgpu_device *adev, unsigned offset,
+int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
int pages, struct page **pagelist,
dma_addr_t *dma_addr, uint32_t flags);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
index 9416e0f5c1db..51a9942cdb40 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
@@ -331,6 +331,19 @@ bool amdgpu_atombios_get_connector_info_from_object_table(struct amdgpu_device *
(le16_to_cpu(path->usConnObjectId) &
OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT;
+ /* Skip TV/CV support */
+ if ((le16_to_cpu(path->usDeviceTag) ==
+ ATOM_DEVICE_TV1_SUPPORT) ||
+ (le16_to_cpu(path->usDeviceTag) ==
+ ATOM_DEVICE_CV_SUPPORT))
+ continue;
+
+ if (con_obj_id >= ARRAY_SIZE(object_connector_convert)) {
+ DRM_ERROR("invalid con_obj_id %d for device tag 0x%04x\n",
+ con_obj_id, le16_to_cpu(path->usDeviceTag));
+ continue;
+ }
+
connector_type =
object_connector_convert[con_obj_id];
connector_object_id = con_obj_id;
@@ -566,28 +579,19 @@ int amdgpu_atombios_get_clock_info(struct amdgpu_device *adev)
le16_to_cpu(firmware_info->info.usReferenceClock);
ppll->reference_div = 0;
- if (crev < 2)
- ppll->pll_out_min =
- le16_to_cpu(firmware_info->info.usMinPixelClockPLL_Output);
- else
- ppll->pll_out_min =
- le32_to_cpu(firmware_info->info_12.ulMinPixelClockPLL_Output);
+ ppll->pll_out_min =
+ le32_to_cpu(firmware_info->info_12.ulMinPixelClockPLL_Output);
ppll->pll_out_max =
le32_to_cpu(firmware_info->info.ulMaxPixelClockPLL_Output);
- if (crev >= 4) {
- ppll->lcd_pll_out_min =
- le16_to_cpu(firmware_info->info_14.usLcdMinPixelClockPLL_Output) * 100;
- if (ppll->lcd_pll_out_min == 0)
- ppll->lcd_pll_out_min = ppll->pll_out_min;
- ppll->lcd_pll_out_max =
- le16_to_cpu(firmware_info->info_14.usLcdMaxPixelClockPLL_Output) * 100;
- if (ppll->lcd_pll_out_max == 0)
- ppll->lcd_pll_out_max = ppll->pll_out_max;
- } else {
+ ppll->lcd_pll_out_min =
+ le16_to_cpu(firmware_info->info_14.usLcdMinPixelClockPLL_Output) * 100;
+ if (ppll->lcd_pll_out_min == 0)
ppll->lcd_pll_out_min = ppll->pll_out_min;
+ ppll->lcd_pll_out_max =
+ le16_to_cpu(firmware_info->info_14.usLcdMaxPixelClockPLL_Output) * 100;
+ if (ppll->lcd_pll_out_max == 0)
ppll->lcd_pll_out_max = ppll->pll_out_max;
- }
if (ppll->pll_out_min == 0)
ppll->pll_out_min = 64800;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
index 5a8fbadbd27b..29adbbe225c4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
@@ -10,6 +10,7 @@
#include <linux/slab.h>
#include <linux/acpi.h>
#include <linux/pci.h>
+#include <linux/delay.h>
#include "amdgpu_acpi.h"
@@ -256,6 +257,10 @@ static int amdgpu_atpx_set_discrete_state(struct amdgpu_atpx *atpx, u8 state)
if (!info)
return -EIO;
kfree(info);
+
+ /* 200ms delay is required after off */
+ if (state == 0)
+ msleep(200);
}
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index 7ef2c13921b4..930083336968 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -1690,7 +1690,6 @@ amdgpu_connector_add(struct amdgpu_device *adev,
DRM_MODE_SCALE_NONE);
/* no HPD on analog connectors */
amdgpu_connector->hpd.hpd = AMDGPU_HPD_NONE;
- connector->polled = DRM_CONNECTOR_POLL_CONNECT;
connector->interlace_allowed = true;
connector->doublescan_allowed = true;
break;
@@ -1893,8 +1892,10 @@ amdgpu_connector_add(struct amdgpu_device *adev,
}
if (amdgpu_connector->hpd.hpd == AMDGPU_HPD_NONE) {
- if (i2c_bus->valid)
- connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+ if (i2c_bus->valid) {
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT |
+ DRM_CONNECTOR_POLL_DISCONNECT;
+ }
} else
connector->polled = DRM_CONNECTOR_POLL_HPD;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index c961fe093e12..16302f7d59f6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1793,7 +1793,23 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
}
drm_kms_helper_poll_enable(dev);
+
+ /*
+ * Most of the connector probing functions try to acquire runtime pm
+ * refs to ensure that the GPU is powered on when connector polling is
+ * performed. Since we're calling this from a runtime PM callback,
+ * trying to acquire rpm refs will cause us to deadlock.
+ *
+ * Since we're guaranteed to be holding the rpm lock, it's safe to
+ * temporarily disable the rpm helpers so this doesn't deadlock us.
+ */
+#ifdef CONFIG_PM
+ dev->dev->power.disable_depth++;
+#endif
drm_helper_hpd_irq_event(dev);
+#ifdef CONFIG_PM
+ dev->dev->power.disable_depth--;
+#endif
if (fbcon) {
amdgpu_fbdev_set_suspend(adev, 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index 7312d729d300..22a613a95bf0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -221,7 +221,7 @@ void amdgpu_gart_table_vram_free(struct amdgpu_device *adev)
* Unbinds the requested pages from the gart page table and
* replaces them with the dummy page (all asics).
*/
-void amdgpu_gart_unbind(struct amdgpu_device *adev, unsigned offset,
+void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
int pages)
{
unsigned t;
@@ -269,7 +269,7 @@ void amdgpu_gart_unbind(struct amdgpu_device *adev, unsigned offset,
* (all asics).
* Returns 0 for success, -EINVAL for failure.
*/
-int amdgpu_gart_bind(struct amdgpu_device *adev, unsigned offset,
+int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
int pages, struct page **pagelist, dma_addr_t *dma_addr,
uint32_t flags)
{
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index 9e25edafa721..c77a1ebfc632 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -288,7 +288,7 @@ void amdgpu_ib_pool_fini(struct amdgpu_device *adev)
int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
{
unsigned i;
- int r;
+ int r, ret = 0;
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
@@ -309,10 +309,11 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
} else {
/* still not good, but we can live with it */
DRM_ERROR("amdgpu: failed testing IB on ring %d (%d).\n", i, r);
+ ret = r;
}
}
}
- return 0;
+ return ret;
}
/*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 1cbb16e15307..475c38fe9245 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -233,8 +233,8 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
adev = amdgpu_get_adev(bo->bdev);
ring = adev->mman.buffer_funcs_ring;
- old_start = old_mem->start << PAGE_SHIFT;
- new_start = new_mem->start << PAGE_SHIFT;
+ old_start = (u64)old_mem->start << PAGE_SHIFT;
+ new_start = (u64)new_mem->start << PAGE_SHIFT;
switch (old_mem->mem_type) {
case TTM_PL_VRAM:
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
index 92b6acadfc52..21aacc1f45c1 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
@@ -243,7 +243,7 @@ static void amdgpu_atombios_dp_get_adjust_train(const u8 link_status[DP_LINK_STA
/* convert bits per color to bits per pixel */
/* get bpc from the EDID */
-static int amdgpu_atombios_dp_convert_bpc_to_bpp(int bpc)
+static unsigned amdgpu_atombios_dp_convert_bpc_to_bpp(int bpc)
{
if (bpc == 0)
return 24;
@@ -251,64 +251,32 @@ static int amdgpu_atombios_dp_convert_bpc_to_bpp(int bpc)
return bpc * 3;
}
-/* get the max pix clock supported by the link rate and lane num */
-static int amdgpu_atombios_dp_get_max_dp_pix_clock(int link_rate,
- int lane_num,
- int bpp)
-{
- return (link_rate * lane_num * 8) / bpp;
-}
-
/***** amdgpu specific DP functions *****/
-/* First get the min lane# when low rate is used according to pixel clock
- * (prefer low rate), second check max lane# supported by DP panel,
- * if the max lane# < low rate lane# then use max lane# instead.
- */
-static int amdgpu_atombios_dp_get_dp_lane_number(struct drm_connector *connector,
+static int amdgpu_atombios_dp_get_dp_link_config(struct drm_connector *connector,
const u8 dpcd[DP_DPCD_SIZE],
- int pix_clock)
-{
- int bpp = amdgpu_atombios_dp_convert_bpc_to_bpp(amdgpu_connector_get_monitor_bpc(connector));
- int max_link_rate = drm_dp_max_link_rate(dpcd);
- int max_lane_num = drm_dp_max_lane_count(dpcd);
- int lane_num;
- int max_dp_pix_clock;
-
- for (lane_num = 1; lane_num < max_lane_num; lane_num <<= 1) {
- max_dp_pix_clock = amdgpu_atombios_dp_get_max_dp_pix_clock(max_link_rate, lane_num, bpp);
- if (pix_clock <= max_dp_pix_clock)
- break;
- }
-
- return lane_num;
-}
-
-static int amdgpu_atombios_dp_get_dp_link_clock(struct drm_connector *connector,
- const u8 dpcd[DP_DPCD_SIZE],
- int pix_clock)
+ unsigned pix_clock,
+ unsigned *dp_lanes, unsigned *dp_rate)
{
- int bpp = amdgpu_atombios_dp_convert_bpc_to_bpp(amdgpu_connector_get_monitor_bpc(connector));
- int lane_num, max_pix_clock;
-
- if (amdgpu_connector_encoder_get_dp_bridge_encoder_id(connector) ==
- ENCODER_OBJECT_ID_NUTMEG)
- return 270000;
-
- lane_num = amdgpu_atombios_dp_get_dp_lane_number(connector, dpcd, pix_clock);
- max_pix_clock = amdgpu_atombios_dp_get_max_dp_pix_clock(162000, lane_num, bpp);
- if (pix_clock <= max_pix_clock)
- return 162000;
- max_pix_clock = amdgpu_atombios_dp_get_max_dp_pix_clock(270000, lane_num, bpp);
- if (pix_clock <= max_pix_clock)
- return 270000;
- if (amdgpu_connector_is_dp12_capable(connector)) {
- max_pix_clock = amdgpu_atombios_dp_get_max_dp_pix_clock(540000, lane_num, bpp);
- if (pix_clock <= max_pix_clock)
- return 540000;
+ unsigned bpp =
+ amdgpu_atombios_dp_convert_bpc_to_bpp(amdgpu_connector_get_monitor_bpc(connector));
+ static const unsigned link_rates[3] = { 162000, 270000, 540000 };
+ unsigned max_link_rate = drm_dp_max_link_rate(dpcd);
+ unsigned max_lane_num = drm_dp_max_lane_count(dpcd);
+ unsigned lane_num, i, max_pix_clock;
+
+ for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
+ for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) {
+ max_pix_clock = (lane_num * link_rates[i] * 8) / bpp;
+ if (max_pix_clock >= pix_clock) {
+ *dp_lanes = lane_num;
+ *dp_rate = link_rates[i];
+ return 0;
+ }
+ }
}
- return drm_dp_max_link_rate(dpcd);
+ return -EINVAL;
}
static u8 amdgpu_atombios_dp_encoder_service(struct amdgpu_device *adev,
@@ -422,6 +390,7 @@ void amdgpu_atombios_dp_set_link_config(struct drm_connector *connector,
{
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
struct amdgpu_connector_atom_dig *dig_connector;
+ int ret;
if (!amdgpu_connector->con_priv)
return;
@@ -429,10 +398,14 @@ void amdgpu_atombios_dp_set_link_config(struct drm_connector *connector,
if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
(dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) {
- dig_connector->dp_clock =
- amdgpu_atombios_dp_get_dp_link_clock(connector, dig_connector->dpcd, mode->clock);
- dig_connector->dp_lane_count =
- amdgpu_atombios_dp_get_dp_lane_number(connector, dig_connector->dpcd, mode->clock);
+ ret = amdgpu_atombios_dp_get_dp_link_config(connector, dig_connector->dpcd,
+ mode->clock,
+ &dig_connector->dp_lane_count,
+ &dig_connector->dp_clock);
+ if (ret) {
+ dig_connector->dp_clock = 0;
+ dig_connector->dp_lane_count = 0;
+ }
}
}
@@ -441,14 +414,17 @@ int amdgpu_atombios_dp_mode_valid_helper(struct drm_connector *connector,
{
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
struct amdgpu_connector_atom_dig *dig_connector;
- int dp_clock;
+ unsigned dp_lanes, dp_clock;
+ int ret;
if (!amdgpu_connector->con_priv)
return MODE_CLOCK_HIGH;
dig_connector = amdgpu_connector->con_priv;
- dp_clock =
- amdgpu_atombios_dp_get_dp_link_clock(connector, dig_connector->dpcd, mode->clock);
+ ret = amdgpu_atombios_dp_get_dp_link_config(connector, dig_connector->dpcd,
+ mode->clock, &dp_lanes, &dp_clock);
+ if (ret)
+ return MODE_CLOCK_HIGH;
if ((dp_clock == 540000) &&
(!amdgpu_connector_is_dp12_capable(connector)))
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
index 1cd6de575305..542517d4e584 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
@@ -98,6 +98,7 @@ amdgpu_atombios_encoder_set_backlight_level(struct amdgpu_encoder *amdgpu_encode
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
if (dig->backlight_level == 0)
amdgpu_atombios_encoder_setup_dig_transmitter(encoder,
ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index 5f712ceddf08..c568293cb6c1 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -52,6 +52,7 @@ static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev);
static void cik_sdma_set_irq_funcs(struct amdgpu_device *adev);
static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev);
static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev);
+static int cik_sdma_soft_reset(void *handle);
MODULE_FIRMWARE("radeon/bonaire_sdma.bin");
MODULE_FIRMWARE("radeon/bonaire_sdma1.bin");
@@ -1030,6 +1031,8 @@ static int cik_sdma_resume(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ cik_sdma_soft_reset(handle);
+
return cik_sdma_hw_init(adev);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
index 8035d4d6a4f5..653917a3bcc2 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
@@ -1955,10 +1955,8 @@ static void cz_dpm_powergate_vce(struct amdgpu_device *adev, bool gate)
}
} else { /*pi->caps_vce_pg*/
cz_update_vce_dpm(adev);
- cz_enable_vce_dpm(adev, true);
+ cz_enable_vce_dpm(adev, !gate);
}
-
- return;
}
const struct amd_ip_funcs cz_dpm_ip_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index ea87033bfaf6..df17fababbd6 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -167,6 +167,7 @@ static int gmc_v7_0_init_microcode(struct amdgpu_device *adev)
break;
case CHIP_KAVERI:
case CHIP_KABINI:
+ case CHIP_MULLINS:
return 0;
default: BUG();
}
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index e5aec45bf985..1ac29d703c12 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -108,7 +108,6 @@ steal_encoder(struct drm_atomic_state *state,
struct drm_crtc_state *crtc_state;
struct drm_connector *connector;
struct drm_connector_state *connector_state;
- int ret;
/*
* We can only steal an encoder coming from a connector, which means we
@@ -139,9 +138,6 @@ steal_encoder(struct drm_atomic_state *state,
if (IS_ERR(connector_state))
return PTR_ERR(connector_state);
- ret = drm_atomic_set_crtc_for_connector(connector_state, NULL);
- if (ret)
- return ret;
connector_state->best_encoder = NULL;
}
diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c
index 6743ff7dccfa..7f4a6c550319 100644
--- a/drivers/gpu/drm/drm_cache.c
+++ b/drivers/gpu/drm/drm_cache.c
@@ -136,6 +136,7 @@ drm_clflush_virt_range(void *addr, unsigned long length)
mb();
for (; addr < end; addr += size)
clflushopt(addr);
+ clflushopt(end - 1); /* force serialisation */
mb();
return;
}
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index dc84003f694e..5e4bb4837bae 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -5231,6 +5231,9 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
unsigned long flags;
int ret = -EINVAL;
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS ||
page_flip->reserved != 0)
return -EINVAL;
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index d5d2c03fd136..8c9ac021608f 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -73,6 +73,8 @@
#define EDID_QUIRK_FORCE_8BPC (1 << 8)
/* Force 12bpc */
#define EDID_QUIRK_FORCE_12BPC (1 << 9)
+/* Force 6bpc */
+#define EDID_QUIRK_FORCE_6BPC (1 << 10)
struct detailed_mode_closure {
struct drm_connector *connector;
@@ -99,6 +101,9 @@ static struct edid_quirk {
/* Unknown Acer */
{ "ACR", 2423, EDID_QUIRK_FIRST_DETAILED_PREFERRED },
+ /* AEO model 0 reports 8 bpc, but is a 6 bpc panel */
+ { "AEO", 0, EDID_QUIRK_FORCE_6BPC },
+
/* Belinea 10 15 55 */
{ "MAX", 1516, EDID_QUIRK_PREFER_LARGE_60 },
{ "MAX", 0x77e, EDID_QUIRK_PREFER_LARGE_60 },
@@ -3820,6 +3825,9 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
drm_add_display_info(edid, &connector->display_info, connector);
+ if (quirks & EDID_QUIRK_FORCE_6BPC)
+ connector->display_info.bpc = 6;
+
if (quirks & EDID_QUIRK_FORCE_8BPC)
connector->display_info.bpc = 8;
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index c7de454e8e88..b205224f1a44 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -338,27 +338,32 @@ drm_gem_handle_create_tail(struct drm_file *file_priv,
spin_unlock(&file_priv->table_lock);
idr_preload_end();
mutex_unlock(&dev->object_name_lock);
- if (ret < 0) {
- drm_gem_object_handle_unreference_unlocked(obj);
- return ret;
- }
+ if (ret < 0)
+ goto err_unref;
+
*handlep = ret;
ret = drm_vma_node_allow(&obj->vma_node, file_priv->filp);
- if (ret) {
- drm_gem_handle_delete(file_priv, *handlep);
- return ret;
- }
+ if (ret)
+ goto err_remove;
if (dev->driver->gem_open_object) {
ret = dev->driver->gem_open_object(obj, file_priv);
- if (ret) {
- drm_gem_handle_delete(file_priv, *handlep);
- return ret;
- }
+ if (ret)
+ goto err_revoke;
}
return 0;
+
+err_revoke:
+ drm_vma_node_revoke(&obj->vma_node, file_priv->filp);
+err_remove:
+ spin_lock(&file_priv->table_lock);
+ idr_remove(&file_priv->object_idr, *handlep);
+ spin_unlock(&file_priv->table_lock);
+err_unref:
+ drm_gem_object_handle_unreference_unlocked(obj);
+ return ret;
}
/**
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index d3ce4da6a6ad..d400d6773bbb 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3313,6 +3313,9 @@ static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
}
extern void intel_i2c_reset(struct drm_device *dev);
+/* intel_bios.c */
+bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port);
+
/* intel_opregion.c */
#ifdef CONFIG_ACPI
extern int intel_opregion_setup(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 6ed7d63a0688..201947b4377c 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -513,9 +513,7 @@ i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
return ret;
if (r->presumed_offset != offset &&
- __copy_to_user_inatomic(&user_relocs->presumed_offset,
- &r->presumed_offset,
- sizeof(r->presumed_offset))) {
+ __put_user(r->presumed_offset, &user_relocs->presumed_offset)) {
return -EFAULT;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 86c7500454b4..b37fe0df743e 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -2747,6 +2747,7 @@ void i915_global_gtt_cleanup(struct drm_device *dev)
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
ppgtt->base.cleanup(&ppgtt->base);
+ kfree(ppgtt);
}
if (drm_mm_initialized(&vm->mm)) {
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 9ed9f6dde86f..cace154bbdc0 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -3240,19 +3240,20 @@ enum skl_disp_power_wells {
#define PORT_HOTPLUG_STAT (dev_priv->info.display_mmio_offset + 0x61114)
/*
- * HDMI/DP bits are gen4+
+ * HDMI/DP bits are g4x+
*
* WARNING: Bspec for hpd status bits on gen4 seems to be completely confused.
* Please check the detailed lore in the commit message for for experimental
* evidence.
*/
-#define PORTD_HOTPLUG_LIVE_STATUS_G4X (1 << 29)
+/* Bspec says GM45 should match G4X/VLV/CHV, but reality disagrees */
+#define PORTD_HOTPLUG_LIVE_STATUS_GM45 (1 << 29)
+#define PORTC_HOTPLUG_LIVE_STATUS_GM45 (1 << 28)
+#define PORTB_HOTPLUG_LIVE_STATUS_GM45 (1 << 27)
+/* G4X/VLV/CHV DP/HDMI bits again match Bspec */
+#define PORTD_HOTPLUG_LIVE_STATUS_G4X (1 << 27)
#define PORTC_HOTPLUG_LIVE_STATUS_G4X (1 << 28)
-#define PORTB_HOTPLUG_LIVE_STATUS_G4X (1 << 27)
-/* VLV DP/HDMI bits again match Bspec */
-#define PORTD_HOTPLUG_LIVE_STATUS_VLV (1 << 27)
-#define PORTC_HOTPLUG_LIVE_STATUS_VLV (1 << 28)
-#define PORTB_HOTPLUG_LIVE_STATUS_VLV (1 << 29)
+#define PORTB_HOTPLUG_LIVE_STATUS_G4X (1 << 29)
#define PORTD_HOTPLUG_INT_STATUS (3 << 21)
#define PORTD_HOTPLUG_INT_LONG_PULSE (2 << 21)
#define PORTD_HOTPLUG_INT_SHORT_PULSE (1 << 21)
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index ce82f9c7df24..d14bdc537587 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -1351,3 +1351,42 @@ intel_parse_bios(struct drm_device *dev)
return 0;
}
+
+/**
+ * intel_bios_is_port_present - is the specified digital port present
+ * @dev_priv: i915 device instance
+ * @port: port to check
+ *
+ * Return true if the device in %port is present.
+ */
+bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port)
+{
+ static const struct {
+ u16 dp, hdmi;
+ } port_mapping[] = {
+ [PORT_B] = { DVO_PORT_DPB, DVO_PORT_HDMIB, },
+ [PORT_C] = { DVO_PORT_DPC, DVO_PORT_HDMIC, },
+ [PORT_D] = { DVO_PORT_DPD, DVO_PORT_HDMID, },
+ [PORT_E] = { DVO_PORT_DPE, DVO_PORT_HDMIE, },
+ };
+ int i;
+
+ /* FIXME maybe deal with port A as well? */
+ if (WARN_ON(port == PORT_A) || port >= ARRAY_SIZE(port_mapping))
+ return false;
+
+ if (!dev_priv->vbt.child_dev_num)
+ return false;
+
+ for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
+ const union child_device_config *p_child =
+ &dev_priv->vbt.child_dev[i];
+ if ((p_child->common.dvo_port == port_mapping[port].dp ||
+ p_child->common.dvo_port == port_mapping[port].hdmi) &&
+ (p_child->common.device_type & (DEVICE_TYPE_TMDS_DVI_SIGNALING |
+ DEVICE_TYPE_DISPLAYPORT_OUTPUT)))
+ return true;
+ }
+
+ return false;
+}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index c41bc42b6fa7..a3254c3bcc7c 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -11952,21 +11952,11 @@ connected_sink_compute_bpp(struct intel_connector *connector,
pipe_config->pipe_bpp = connector->base.display_info.bpc*3;
}
- /* Clamp bpp to default limit on screens without EDID 1.4 */
- if (connector->base.display_info.bpc == 0) {
- int type = connector->base.connector_type;
- int clamp_bpp = 24;
-
- /* Fall back to 18 bpp when DP sink capability is unknown. */
- if (type == DRM_MODE_CONNECTOR_DisplayPort ||
- type == DRM_MODE_CONNECTOR_eDP)
- clamp_bpp = 18;
-
- if (bpp > clamp_bpp) {
- DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of %d\n",
- bpp, clamp_bpp);
- pipe_config->pipe_bpp = clamp_bpp;
- }
+ /* Clamp bpp to 8 on screens without EDID 1.4 */
+ if (connector->base.display_info.bpc == 0 && bpp > 24) {
+ DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n",
+ bpp);
+ pipe_config->pipe_bpp = 24;
}
}
@@ -14170,6 +14160,8 @@ static void intel_setup_outputs(struct drm_device *dev)
if (I915_READ(PCH_DP_D) & DP_DETECTED)
intel_dp_init(dev, PCH_DP_D, PORT_D);
} else if (IS_VALLEYVIEW(dev)) {
+ bool has_edp, has_port;
+
/*
* The DP_DETECTED bit is the latched state of the DDC
* SDA pin at boot. However since eDP doesn't require DDC
@@ -14178,27 +14170,37 @@ static void intel_setup_outputs(struct drm_device *dev)
* Thus we can't rely on the DP_DETECTED bit alone to detect
* eDP ports. Consult the VBT as well as DP_DETECTED to
* detect eDP ports.
+ *
+ * Sadly the straps seem to be missing sometimes even for HDMI
+ * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
+ * and VBT for the presence of the port. Additionally we can't
+ * trust the port type the VBT declares as we've seen at least
+ * HDMI ports that the VBT claim are DP or eDP.
*/
- if (I915_READ(VLV_HDMIB) & SDVO_DETECTED &&
- !intel_dp_is_edp(dev, PORT_B))
+ has_edp = intel_dp_is_edp(dev, PORT_B);
+ has_port = intel_bios_is_port_present(dev_priv, PORT_B);
+ if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port)
+ has_edp &= intel_dp_init(dev, VLV_DP_B, PORT_B);
+ if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
intel_hdmi_init(dev, VLV_HDMIB, PORT_B);
- if (I915_READ(VLV_DP_B) & DP_DETECTED ||
- intel_dp_is_edp(dev, PORT_B))
- intel_dp_init(dev, VLV_DP_B, PORT_B);
- if (I915_READ(VLV_HDMIC) & SDVO_DETECTED &&
- !intel_dp_is_edp(dev, PORT_C))
+ has_edp = intel_dp_is_edp(dev, PORT_C);
+ has_port = intel_bios_is_port_present(dev_priv, PORT_C);
+ if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port)
+ has_edp &= intel_dp_init(dev, VLV_DP_C, PORT_C);
+ if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
intel_hdmi_init(dev, VLV_HDMIC, PORT_C);
- if (I915_READ(VLV_DP_C) & DP_DETECTED ||
- intel_dp_is_edp(dev, PORT_C))
- intel_dp_init(dev, VLV_DP_C, PORT_C);
if (IS_CHERRYVIEW(dev)) {
- /* eDP not supported on port D, so don't check VBT */
- if (I915_READ(CHV_HDMID) & SDVO_DETECTED)
- intel_hdmi_init(dev, CHV_HDMID, PORT_D);
- if (I915_READ(CHV_DP_D) & DP_DETECTED)
+ /*
+ * eDP not supported on port D,
+ * so no need to worry about it
+ */
+ has_port = intel_bios_is_port_present(dev_priv, PORT_D);
+ if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port)
intel_dp_init(dev, CHV_DP_D, PORT_D);
+ if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port)
+ intel_hdmi_init(dev, CHV_HDMID, PORT_D);
}
intel_dsi_init(dev);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 8e1d6d74c203..ebbd23407a80 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -4592,20 +4592,20 @@ static bool g4x_digital_port_connected(struct drm_i915_private *dev_priv,
return I915_READ(PORT_HOTPLUG_STAT) & bit;
}
-static bool vlv_digital_port_connected(struct drm_i915_private *dev_priv,
- struct intel_digital_port *port)
+static bool gm45_digital_port_connected(struct drm_i915_private *dev_priv,
+ struct intel_digital_port *port)
{
u32 bit;
switch (port->port) {
case PORT_B:
- bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
+ bit = PORTB_HOTPLUG_LIVE_STATUS_GM45;
break;
case PORT_C:
- bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
+ bit = PORTC_HOTPLUG_LIVE_STATUS_GM45;
break;
case PORT_D:
- bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
+ bit = PORTD_HOTPLUG_LIVE_STATUS_GM45;
break;
default:
MISSING_CASE(port->port);
@@ -4657,8 +4657,8 @@ bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
return cpt_digital_port_connected(dev_priv, port);
else if (IS_BROXTON(dev_priv))
return bxt_digital_port_connected(dev_priv, port);
- else if (IS_VALLEYVIEW(dev_priv))
- return vlv_digital_port_connected(dev_priv, port);
+ else if (IS_GM45(dev_priv))
+ return gm45_digital_port_connected(dev_priv, port);
else
return g4x_digital_port_connected(dev_priv, port);
}
@@ -6113,8 +6113,9 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
return true;
}
-void
-intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
+bool intel_dp_init(struct drm_device *dev,
+ int output_reg,
+ enum port port)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_digital_port *intel_dig_port;
@@ -6124,7 +6125,7 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
if (!intel_dig_port)
- return;
+ return false;
intel_connector = intel_connector_alloc();
if (!intel_connector)
@@ -6179,15 +6180,14 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
if (!intel_dp_init_connector(intel_dig_port, intel_connector))
goto err_init_connector;
- return;
+ return true;
err_init_connector:
drm_encoder_cleanup(encoder);
kfree(intel_connector);
err_connector_alloc:
kfree(intel_dig_port);
-
- return;
+ return false;
}
void intel_dp_mst_suspend(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index c5f11e0c5d5b..67f72a7ee7cb 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -1195,7 +1195,7 @@ void intel_csr_ucode_fini(struct drm_device *dev);
void assert_csr_loaded(struct drm_i915_private *dev_priv);
/* intel_dp.c */
-void intel_dp_init(struct drm_device *dev, int output_reg, enum port port);
+bool intel_dp_init(struct drm_device *dev, int output_reg, enum port port);
bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
struct intel_connector *intel_connector);
void intel_dp_set_link_params(struct intel_dp *intel_dp,
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 4b8ed9f2dabc..dff69fef47e0 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -2030,6 +2030,9 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
enum port port = intel_dig_port->port;
uint8_t alternate_ddc_pin;
+ DRM_DEBUG_KMS("Adding HDMI connector on port %c\n",
+ port_name(port));
+
drm_connector_init(dev, connector, &intel_hdmi_connector_funcs,
DRM_MODE_CONNECTOR_HDMIA);
drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs);
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 6dc13c02c28e..e362a30776fa 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -682,7 +682,7 @@ static void intel_didl_outputs(struct drm_device *dev)
}
if (!acpi_video_bus) {
- DRM_ERROR("No ACPI video bus found\n");
+ DRM_DEBUG_KMS("No ACPI video bus found\n");
return;
}
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 62284e45d531..1e851e037c29 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -1789,16 +1789,20 @@ static uint32_t ilk_compute_cur_wm(const struct intel_crtc_state *cstate,
const struct intel_plane_state *pstate,
uint32_t mem_value)
{
- int bpp = pstate->base.fb ? pstate->base.fb->bits_per_pixel / 8 : 0;
+ /*
+ * We treat the cursor plane as always-on for the purposes of watermark
+ * calculation. Until we have two-stage watermark programming merged,
+ * this is necessary to avoid flickering.
+ */
+ int cpp = 4;
+ int width = pstate->visible ? pstate->base.crtc_w : 64;
- if (!cstate->base.active || !pstate->visible)
+ if (!cstate->base.active)
return 0;
return ilk_wm_method2(ilk_pipe_pixel_rate(cstate),
cstate->base.adjusted_mode.crtc_htotal,
- drm_rect_width(&pstate->dst),
- bpp,
- mem_value);
+ width, cpp, mem_value);
}
/* Only for WM_LP. */
@@ -4522,7 +4526,8 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv)
else
gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq);
dev_priv->rps.last_adj = 0;
- I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
+ I915_WRITE(GEN6_PMINTRMSK,
+ gen6_sanitize_rps_pm_mask(dev_priv, ~0));
}
mutex_unlock(&dev_priv->rps.hw_lock);
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 6d7cd3fe21e7..1847f83b1e33 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -55,6 +55,14 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
return submit;
}
+static inline unsigned long __must_check
+copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
+{
+ if (access_ok(VERIFY_READ, from, n))
+ return __copy_from_user_inatomic(to, from, n);
+ return -EFAULT;
+}
+
static int submit_lookup_objects(struct msm_gem_submit *submit,
struct drm_msm_gem_submit *args, struct drm_file *file)
{
@@ -62,6 +70,7 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
int ret = 0;
spin_lock(&file->table_lock);
+ pagefault_disable();
for (i = 0; i < args->nr_bos; i++) {
struct drm_msm_gem_submit_bo submit_bo;
@@ -70,10 +79,15 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
void __user *userptr =
to_user_ptr(args->bos + (i * sizeof(submit_bo)));
- ret = copy_from_user(&submit_bo, userptr, sizeof(submit_bo));
- if (ret) {
- ret = -EFAULT;
- goto out_unlock;
+ ret = copy_from_user_inatomic(&submit_bo, userptr, sizeof(submit_bo));
+ if (unlikely(ret)) {
+ pagefault_enable();
+ spin_unlock(&file->table_lock);
+ ret = copy_from_user(&submit_bo, userptr, sizeof(submit_bo));
+ if (ret)
+ goto out;
+ spin_lock(&file->table_lock);
+ pagefault_disable();
}
if (submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) {
@@ -113,9 +127,12 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
}
out_unlock:
- submit->nr_bos = i;
+ pagefault_enable();
spin_unlock(&file->table_lock);
+out:
+ submit->nr_bos = i;
+
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 1d3ee5179ab8..d236fc7c425b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -308,7 +308,16 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
bool boot = false;
int ret;
- /* remove conflicting drivers (vesafb, efifb etc) */
+ /* We need to check that the chipset is supported before booting
+ * fbdev off the hardware, as there's no way to put it back.
+ */
+ ret = nvkm_device_pci_new(pdev, NULL, "error", true, false, 0, &device);
+ if (ret)
+ return ret;
+
+ nvkm_device_del(&device);
+
+ /* Remove conflicting drivers (vesafb, efifb etc). */
aper = alloc_apertures(3);
if (!aper)
return -ENOMEM;
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
index 8f715feadf56..f90568327468 100644
--- a/drivers/gpu/drm/nouveau/nv04_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
@@ -107,11 +107,11 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
((image->dx + image->width) & 0xffff));
OUT_RING(chan, bg);
OUT_RING(chan, fg);
- OUT_RING(chan, (image->height << 16) | image->width);
+ OUT_RING(chan, (image->height << 16) | ALIGN(image->width, 8));
OUT_RING(chan, (image->height << 16) | image->width);
OUT_RING(chan, (image->dy << 16) | (image->dx & 0xffff));
- dsize = ALIGN(image->width * image->height, 32) >> 5;
+ dsize = ALIGN(ALIGN(image->width, 8) * image->height, 32) >> 5;
while (dsize) {
int iter_len = dsize > 128 ? 128 : dsize;
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
index a4e259a00430..c8e096533f60 100644
--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
@@ -125,7 +125,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
OUT_RING(chan, 0);
OUT_RING(chan, image->dy);
- dwords = ALIGN(image->width * image->height, 32) >> 5;
+ dwords = ALIGN(ALIGN(image->width, 8) * image->height, 32) >> 5;
while (dwords) {
int push = dwords > 2047 ? 2047 : dwords;
diff --git a/drivers/gpu/drm/nouveau/nvc0_fbcon.c b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
index f28315e865a5..22d32578dafd 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
@@ -125,7 +125,7 @@ nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
OUT_RING (chan, 0);
OUT_RING (chan, image->dy);
- dwords = ALIGN(image->width * image->height, 32) >> 5;
+ dwords = ALIGN(ALIGN(image->width, 8) * image->height, 32) >> 5;
while (dwords) {
int push = dwords > 2047 ? 2047 : dwords;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c
index 69de8c6259fe..f1e15a4d4f64 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c
@@ -76,8 +76,8 @@ nv30_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
nvkm_wo32(chan->inst, i, 0x00040004);
for (i = 0x1f18; i <= 0x3088 ; i += 16) {
nvkm_wo32(chan->inst, i + 0, 0x10700ff9);
- nvkm_wo32(chan->inst, i + 1, 0x0436086c);
- nvkm_wo32(chan->inst, i + 2, 0x000c001b);
+ nvkm_wo32(chan->inst, i + 4, 0x0436086c);
+ nvkm_wo32(chan->inst, i + 8, 0x000c001b);
}
for (i = 0x30b8; i < 0x30c8; i += 4)
nvkm_wo32(chan->inst, i, 0x0000ffff);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c
index 2207dac23981..300f5ed5de0b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c
@@ -75,8 +75,8 @@ nv34_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
nvkm_wo32(chan->inst, i, 0x00040004);
for (i = 0x15ac; i <= 0x271c ; i += 16) {
nvkm_wo32(chan->inst, i + 0, 0x10700ff9);
- nvkm_wo32(chan->inst, i + 1, 0x0436086c);
- nvkm_wo32(chan->inst, i + 2, 0x000c001b);
+ nvkm_wo32(chan->inst, i + 4, 0x0436086c);
+ nvkm_wo32(chan->inst, i + 8, 0x000c001b);
}
for (i = 0x274c; i < 0x275c; i += 4)
nvkm_wo32(chan->inst, i, 0x0000ffff);
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index bd73b4069069..44ee72e04df9 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -302,77 +302,31 @@ static int convert_bpc_to_bpp(int bpc)
return bpc * 3;
}
-/* get the max pix clock supported by the link rate and lane num */
-static int dp_get_max_dp_pix_clock(int link_rate,
- int lane_num,
- int bpp)
-{
- return (link_rate * lane_num * 8) / bpp;
-}
-
/***** radeon specific DP functions *****/
-int radeon_dp_get_max_link_rate(struct drm_connector *connector,
- const u8 dpcd[DP_DPCD_SIZE])
-{
- int max_link_rate;
-
- if (radeon_connector_is_dp12_capable(connector))
- max_link_rate = min(drm_dp_max_link_rate(dpcd), 540000);
- else
- max_link_rate = min(drm_dp_max_link_rate(dpcd), 270000);
-
- return max_link_rate;
-}
-
-/* First get the min lane# when low rate is used according to pixel clock
- * (prefer low rate), second check max lane# supported by DP panel,
- * if the max lane# < low rate lane# then use max lane# instead.
- */
-static int radeon_dp_get_dp_lane_number(struct drm_connector *connector,
- const u8 dpcd[DP_DPCD_SIZE],
- int pix_clock)
-{
- int bpp = convert_bpc_to_bpp(radeon_get_monitor_bpc(connector));
- int max_link_rate = radeon_dp_get_max_link_rate(connector, dpcd);
- int max_lane_num = drm_dp_max_lane_count(dpcd);
- int lane_num;
- int max_dp_pix_clock;
-
- for (lane_num = 1; lane_num < max_lane_num; lane_num <<= 1) {
- max_dp_pix_clock = dp_get_max_dp_pix_clock(max_link_rate, lane_num, bpp);
- if (pix_clock <= max_dp_pix_clock)
- break;
- }
-
- return lane_num;
-}
-
-static int radeon_dp_get_dp_link_clock(struct drm_connector *connector,
- const u8 dpcd[DP_DPCD_SIZE],
- int pix_clock)
+int radeon_dp_get_dp_link_config(struct drm_connector *connector,
+ const u8 dpcd[DP_DPCD_SIZE],
+ unsigned pix_clock,
+ unsigned *dp_lanes, unsigned *dp_rate)
{
int bpp = convert_bpc_to_bpp(radeon_get_monitor_bpc(connector));
- int lane_num, max_pix_clock;
-
- if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) ==
- ENCODER_OBJECT_ID_NUTMEG)
- return 270000;
-
- lane_num = radeon_dp_get_dp_lane_number(connector, dpcd, pix_clock);
- max_pix_clock = dp_get_max_dp_pix_clock(162000, lane_num, bpp);
- if (pix_clock <= max_pix_clock)
- return 162000;
- max_pix_clock = dp_get_max_dp_pix_clock(270000, lane_num, bpp);
- if (pix_clock <= max_pix_clock)
- return 270000;
- if (radeon_connector_is_dp12_capable(connector)) {
- max_pix_clock = dp_get_max_dp_pix_clock(540000, lane_num, bpp);
- if (pix_clock <= max_pix_clock)
- return 540000;
+ static const unsigned link_rates[3] = { 162000, 270000, 540000 };
+ unsigned max_link_rate = drm_dp_max_link_rate(dpcd);
+ unsigned max_lane_num = drm_dp_max_lane_count(dpcd);
+ unsigned lane_num, i, max_pix_clock;
+
+ for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
+ for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) {
+ max_pix_clock = (lane_num * link_rates[i] * 8) / bpp;
+ if (max_pix_clock >= pix_clock) {
+ *dp_lanes = lane_num;
+ *dp_rate = link_rates[i];
+ return 0;
+ }
+ }
}
- return radeon_dp_get_max_link_rate(connector, dpcd);
+ return -EINVAL;
}
static u8 radeon_dp_encoder_service(struct radeon_device *rdev,
@@ -491,6 +445,7 @@ void radeon_dp_set_link_config(struct drm_connector *connector,
{
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct radeon_connector_atom_dig *dig_connector;
+ int ret;
if (!radeon_connector->con_priv)
return;
@@ -498,10 +453,14 @@ void radeon_dp_set_link_config(struct drm_connector *connector,
if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
(dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) {
- dig_connector->dp_clock =
- radeon_dp_get_dp_link_clock(connector, dig_connector->dpcd, mode->clock);
- dig_connector->dp_lane_count =
- radeon_dp_get_dp_lane_number(connector, dig_connector->dpcd, mode->clock);
+ ret = radeon_dp_get_dp_link_config(connector, dig_connector->dpcd,
+ mode->clock,
+ &dig_connector->dp_lane_count,
+ &dig_connector->dp_clock);
+ if (ret) {
+ dig_connector->dp_clock = 0;
+ dig_connector->dp_lane_count = 0;
+ }
}
}
@@ -510,7 +469,8 @@ int radeon_dp_mode_valid_helper(struct drm_connector *connector,
{
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct radeon_connector_atom_dig *dig_connector;
- int dp_clock;
+ unsigned dp_clock, dp_lanes;
+ int ret;
if ((mode->clock > 340000) &&
(!radeon_connector_is_dp12_capable(connector)))
@@ -520,8 +480,12 @@ int radeon_dp_mode_valid_helper(struct drm_connector *connector,
return MODE_CLOCK_HIGH;
dig_connector = radeon_connector->con_priv;
- dp_clock =
- radeon_dp_get_dp_link_clock(connector, dig_connector->dpcd, mode->clock);
+ ret = radeon_dp_get_dp_link_config(connector, dig_connector->dpcd,
+ mode->clock,
+ &dp_lanes,
+ &dp_clock);
+ if (ret)
+ return MODE_CLOCK_HIGH;
if ((dp_clock == 540000) &&
(!radeon_connector_is_dp12_capable(connector)))
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 0b04b9282f56..d4ac8c837314 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -120,6 +120,7 @@ atombios_set_backlight_level(struct radeon_encoder *radeon_encoder, u8 level)
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
if (dig->backlight_level == 0)
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0);
else {
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index de9a2ffcf5f7..0c5b3eeff82d 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -1155,7 +1155,7 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
le16_to_cpu(firmware_info->info.usReferenceClock);
p1pll->reference_div = 0;
- if (crev < 2)
+ if ((frev < 2) && (crev < 2))
p1pll->pll_out_min =
le16_to_cpu(firmware_info->info.usMinPixelClockPLL_Output);
else
@@ -1164,7 +1164,7 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
p1pll->pll_out_max =
le32_to_cpu(firmware_info->info.ulMaxPixelClockPLL_Output);
- if (crev >= 4) {
+ if (((frev < 2) && (crev >= 4)) || (frev >= 2)) {
p1pll->lcd_pll_out_min =
le16_to_cpu(firmware_info->info_14.usLcdMinPixelClockPLL_Output) * 100;
if (p1pll->lcd_pll_out_min == 0)
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
index c4b4f298a283..69ce95571136 100644
--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
@@ -10,6 +10,7 @@
#include <linux/slab.h>
#include <linux/acpi.h>
#include <linux/pci.h>
+#include <linux/delay.h>
#include "radeon_acpi.h"
@@ -255,6 +256,10 @@ static int radeon_atpx_set_discrete_state(struct radeon_atpx *atpx, u8 state)
if (!info)
return -EIO;
kfree(info);
+
+ /* 200ms delay is required after off */
+ if (state == 0)
+ msleep(200);
}
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 9cfc1c3e1965..30f00748ed37 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -2058,7 +2058,6 @@ radeon_add_atom_connector(struct drm_device *dev,
RADEON_OUTPUT_CSC_BYPASS);
/* no HPD on analog connectors */
radeon_connector->hpd.hpd = RADEON_HPD_NONE;
- connector->polled = DRM_CONNECTOR_POLL_CONNECT;
connector->interlace_allowed = true;
connector->doublescan_allowed = true;
break;
@@ -2308,8 +2307,10 @@ radeon_add_atom_connector(struct drm_device *dev,
}
if (radeon_connector->hpd.hpd == RADEON_HPD_NONE) {
- if (i2c_bus->valid)
- connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+ if (i2c_bus->valid) {
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT |
+ DRM_CONNECTOR_POLL_DISCONNECT;
+ }
} else
connector->polled = DRM_CONNECTOR_POLL_HPD;
@@ -2385,7 +2386,6 @@ radeon_add_legacy_connector(struct drm_device *dev,
1);
/* no HPD on analog connectors */
radeon_connector->hpd.hpd = RADEON_HPD_NONE;
- connector->polled = DRM_CONNECTOR_POLL_CONNECT;
connector->interlace_allowed = true;
connector->doublescan_allowed = true;
break;
@@ -2470,10 +2470,13 @@ radeon_add_legacy_connector(struct drm_device *dev,
}
if (radeon_connector->hpd.hpd == RADEON_HPD_NONE) {
- if (i2c_bus->valid)
- connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+ if (i2c_bus->valid) {
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT |
+ DRM_CONNECTOR_POLL_DISCONNECT;
+ }
} else
connector->polled = DRM_CONNECTOR_POLL_HPD;
+
connector->display_info.subpixel_order = subpixel_order;
drm_connector_register(connector);
}
diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c
index 744f5c49c664..6dd39bdedb97 100644
--- a/drivers/gpu/drm/radeon/radeon_dp_mst.c
+++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c
@@ -525,11 +525,9 @@ static bool radeon_mst_mode_fixup(struct drm_encoder *encoder,
drm_mode_set_crtcinfo(adjusted_mode, 0);
{
struct radeon_connector_atom_dig *dig_connector;
-
dig_connector = mst_enc->connector->con_priv;
dig_connector->dp_lane_count = drm_dp_max_lane_count(dig_connector->dpcd);
- dig_connector->dp_clock = radeon_dp_get_max_link_rate(&mst_enc->connector->base,
- dig_connector->dpcd);
+ dig_connector->dp_clock = drm_dp_max_link_rate(dig_connector->dpcd);
DRM_DEBUG_KMS("dig clock %p %d %d\n", dig_connector,
dig_connector->dp_lane_count, dig_connector->dp_clock);
}
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index bba112628b47..7a0666ac4e23 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -757,8 +757,10 @@ extern u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector);
extern bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector);
extern int radeon_dp_get_panel_mode(struct drm_encoder *encoder,
struct drm_connector *connector);
-int radeon_dp_get_max_link_rate(struct drm_connector *connector,
- const u8 *dpcd);
+extern int radeon_dp_get_dp_link_config(struct drm_connector *connector,
+ const u8 *dpcd,
+ unsigned pix_clock,
+ unsigned *dp_lanes, unsigned *dp_rate);
extern void radeon_dp_set_rx_power_state(struct drm_connector *connector,
u8 power_state);
extern void radeon_dp_aux_init(struct radeon_connector *radeon_connector);
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index f342aad79cc6..35310336dd0a 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -263,8 +263,8 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
rdev = radeon_get_rdev(bo->bdev);
ridx = radeon_copy_ring_index(rdev);
- old_start = old_mem->start << PAGE_SHIFT;
- new_start = new_mem->start << PAGE_SHIFT;
+ old_start = (u64)old_mem->start << PAGE_SHIFT;
+ new_start = (u64)new_mem->start << PAGE_SHIFT;
switch (old_mem->mem_type) {
case TTM_PL_VRAM:
diff --git a/drivers/gpu/msm/kgsl_pool.c b/drivers/gpu/msm/kgsl_pool.c
index 6ecbab466c7c..bd1e432d8c7d 100644
--- a/drivers/gpu/msm/kgsl_pool.c
+++ b/drivers/gpu/msm/kgsl_pool.c
@@ -303,6 +303,27 @@ int kgsl_pool_alloc_page(int *page_size, struct page **pages,
if ((pages == NULL) || pages_len < (*page_size >> PAGE_SHIFT))
return -EINVAL;
+ /* If the pool is not configured get pages from the system */
+ if (!kgsl_num_pools) {
+ gfp_t gfp_mask = kgsl_gfp_mask(order);
+
+ page = alloc_pages(gfp_mask, order);
+ if (page == NULL) {
+ /* Retry with lower order pages */
+ if (order > 0) {
+ size_t size = PAGE_SIZE << --order;
+ *page_size = kgsl_get_page_size(size,
+ ilog2(size));
+ *align = ilog2(*page_size);
+ return -EAGAIN;
+
+ } else
+ return -ENOMEM;
+ }
+ _kgsl_pool_zero_page(page, order);
+ goto done;
+ }
+
pool = _kgsl_get_pool_from_order(order);
if (pool == NULL)
return -EINVAL;
@@ -338,6 +359,7 @@ int kgsl_pool_alloc_page(int *page_size, struct page **pages,
_kgsl_pool_zero_page(page, order);
}
+done:
for (j = 0; j < (*page_size >> PAGE_SHIFT); j++) {
p = nth_page(page, j);
pages[pcount] = p;
diff --git a/drivers/gpu/msm/kgsl_sharedmem.c b/drivers/gpu/msm/kgsl_sharedmem.c
index 283b72c22db4..6e2a0e3f2645 100644
--- a/drivers/gpu/msm/kgsl_sharedmem.c
+++ b/drivers/gpu/msm/kgsl_sharedmem.c
@@ -613,25 +613,6 @@ int kgsl_cache_range_op(struct kgsl_memdesc *memdesc, uint64_t offset,
}
EXPORT_SYMBOL(kgsl_cache_range_op);
-#ifndef CONFIG_ALLOC_BUFFERS_IN_4K_CHUNKS
-static inline int get_page_size(size_t size, unsigned int align)
-{
- if (align >= ilog2(SZ_1M) && size >= SZ_1M)
- return SZ_1M;
- else if (align >= ilog2(SZ_64K) && size >= SZ_64K)
- return SZ_64K;
- else if (align >= ilog2(SZ_8K) && size >= SZ_8K)
- return SZ_8K;
- else
- return PAGE_SIZE;
-}
-#else
-static inline int get_page_size(size_t size, unsigned int align)
-{
- return PAGE_SIZE;
-}
-#endif
-
int
kgsl_sharedmem_page_alloc_user(struct kgsl_memdesc *memdesc,
uint64_t size)
@@ -648,7 +629,7 @@ kgsl_sharedmem_page_alloc_user(struct kgsl_memdesc *memdesc,
align = (memdesc->flags & KGSL_MEMALIGN_MASK) >> KGSL_MEMALIGN_SHIFT;
- page_size = get_page_size(size, align);
+ page_size = kgsl_get_page_size(size, align);
/*
* The alignment cannot be less than the intended page size - it can be
@@ -719,7 +700,7 @@ kgsl_sharedmem_page_alloc_user(struct kgsl_memdesc *memdesc,
memdesc->page_count += page_count;
/* Get the needed page size for the next iteration */
- page_size = get_page_size(len, align);
+ page_size = kgsl_get_page_size(len, align);
}
/* Call to the hypervisor to lock any secure buffer allocations */
diff --git a/drivers/gpu/msm/kgsl_sharedmem.h b/drivers/gpu/msm/kgsl_sharedmem.h
index c1c2afa68756..7db8ce0413c2 100644
--- a/drivers/gpu/msm/kgsl_sharedmem.h
+++ b/drivers/gpu/msm/kgsl_sharedmem.h
@@ -363,4 +363,30 @@ static inline void kgsl_free_sgt(struct sg_table *sgt)
}
}
+/**
+ * kgsl_get_page_size() - Get supported pagesize
+ * @size: Size of the page
+ * @align: Desired alignment of the size
+ *
+ * Return supported pagesize
+ */
+#ifndef CONFIG_ALLOC_BUFFERS_IN_4K_CHUNKS
+static inline int kgsl_get_page_size(size_t size, unsigned int align)
+{
+ if (align >= ilog2(SZ_1M) && size >= SZ_1M)
+ return SZ_1M;
+ else if (align >= ilog2(SZ_64K) && size >= SZ_64K)
+ return SZ_64K;
+ else if (align >= ilog2(SZ_8K) && size >= SZ_8K)
+ return SZ_8K;
+ else
+ return PAGE_SIZE;
+}
+#else
+static inline int kgsl_get_page_size(size_t size, unsigned int align)
+{
+ return PAGE_SIZE;
+}
+#endif
+
#endif /* __KGSL_SHAREDMEM_H */
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index 774cd2210566..21febbb0d84e 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -1418,8 +1418,10 @@ static int sixaxis_set_operational_usb(struct hid_device *hdev)
}
ret = hid_hw_output_report(hdev, buf, 1);
- if (ret < 0)
- hid_err(hdev, "can't set operational mode: step 3\n");
+ if (ret < 0) {
+ hid_info(hdev, "can't set operational mode: step 3, ignoring\n");
+ ret = 0;
+ }
out:
kfree(buf);
diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c
index 022176189346..54dc4ce09f35 100644
--- a/drivers/hid/uhid.c
+++ b/drivers/hid/uhid.c
@@ -51,10 +51,26 @@ struct uhid_device {
u32 report_id;
u32 report_type;
struct uhid_event report_buf;
+ struct work_struct worker;
};
static struct miscdevice uhid_misc;
+static void uhid_device_add_worker(struct work_struct *work)
+{
+ struct uhid_device *uhid = container_of(work, struct uhid_device, worker);
+ int ret;
+
+ ret = hid_add_device(uhid->hid);
+ if (ret) {
+ hid_err(uhid->hid, "Cannot register HID device: error %d\n", ret);
+
+ hid_destroy_device(uhid->hid);
+ uhid->hid = NULL;
+ uhid->running = false;
+ }
+}
+
static void uhid_queue(struct uhid_device *uhid, struct uhid_event *ev)
{
__u8 newhead;
@@ -516,18 +532,14 @@ static int uhid_dev_create2(struct uhid_device *uhid,
uhid->hid = hid;
uhid->running = true;
- ret = hid_add_device(hid);
- if (ret) {
- hid_err(hid, "Cannot register HID device\n");
- goto err_hid;
- }
+ /* Adding of a HID device is done through a worker, to allow HID drivers
+ * which use feature requests during .probe to work, without they would
+ * be blocked on devlock, which is held by uhid_char_write.
+ */
+ schedule_work(&uhid->worker);
return 0;
-err_hid:
- hid_destroy_device(hid);
- uhid->hid = NULL;
- uhid->running = false;
err_free:
kfree(uhid->rd_data);
uhid->rd_data = NULL;
@@ -568,6 +580,8 @@ static int uhid_dev_destroy(struct uhid_device *uhid)
uhid->running = false;
wake_up_interruptible(&uhid->report_wait);
+ cancel_work_sync(&uhid->worker);
+
hid_destroy_device(uhid->hid);
kfree(uhid->rd_data);
@@ -630,6 +644,7 @@ static int uhid_char_open(struct inode *inode, struct file *file)
init_waitqueue_head(&uhid->waitq);
init_waitqueue_head(&uhid->report_wait);
uhid->running = false;
+ INIT_WORK(&uhid->worker, uhid_device_add_worker);
file->private_data = uhid;
nonseekable_open(inode, file);
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index 9098f13f2f44..1ef37c727572 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -28,6 +28,7 @@
#include <linux/module.h>
#include <linux/hyperv.h>
#include <linux/uio.h>
+#include <linux/interrupt.h>
#include "hyperv_vmbus.h"
@@ -496,8 +497,21 @@ static void reset_channel_cb(void *arg)
static int vmbus_close_internal(struct vmbus_channel *channel)
{
struct vmbus_channel_close_channel *msg;
+ struct tasklet_struct *tasklet;
int ret;
+ /*
+ * process_chn_event(), running in the tasklet, can race
+ * with vmbus_close_internal() in the case of SMP guest, e.g., when
+ * the former is accessing channel->inbound.ring_buffer, the latter
+ * could be freeing the ring_buffer pages.
+ *
+ * To resolve the race, we can serialize them by disabling the
+ * tasklet when the latter is running here.
+ */
+ tasklet = hv_context.event_dpc[channel->target_cpu];
+ tasklet_disable(tasklet);
+
channel->state = CHANNEL_OPEN_STATE;
channel->sc_creation_callback = NULL;
/* Stop callback and cancel the timer asap */
@@ -525,7 +539,7 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
* If we failed to post the close msg,
* it is perhaps better to leak memory.
*/
- return ret;
+ goto out;
}
/* Tear down the gpadl for the channel's ring buffer */
@@ -538,7 +552,7 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
* If we failed to teardown gpadl,
* it is perhaps better to leak memory.
*/
- return ret;
+ goto out;
}
}
@@ -549,12 +563,9 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
free_pages((unsigned long)channel->ringbuffer_pages,
get_order(channel->ringbuffer_pagecount * PAGE_SIZE));
- /*
- * If the channel has been rescinded; process device removal.
- */
- if (channel->rescind)
- hv_process_channel_removal(channel,
- channel->offermsg.child_relid);
+out:
+ tasklet_enable(tasklet);
+
return ret;
}
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index 652afd11a9ef..37238dffd947 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -28,6 +28,7 @@
#include <linux/list.h>
#include <linux/module.h>
#include <linux/completion.h>
+#include <linux/delay.h>
#include <linux/hyperv.h>
#include "hyperv_vmbus.h"
@@ -191,6 +192,8 @@ void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid)
if (channel == NULL)
return;
+ BUG_ON(!channel->rescind);
+
if (channel->target_cpu != get_cpu()) {
put_cpu();
smp_call_function_single(channel->target_cpu,
@@ -230,9 +233,7 @@ void vmbus_free_channels(void)
list_for_each_entry_safe(channel, tmp, &vmbus_connection.chn_list,
listentry) {
- /* if we don't set rescind to true, vmbus_close_internal()
- * won't invoke hv_process_channel_removal().
- */
+ /* hv_process_channel_removal() needs this */
channel->rescind = true;
vmbus_device_unregister(channel->device_obj);
@@ -459,6 +460,17 @@ static void init_vp_index(struct vmbus_channel *channel, const uuid_le *type_gui
cpumask_of_node(primary->numa_node));
cur_cpu = -1;
+
+ /*
+ * Normally Hyper-V host doesn't create more subchannels than there
+ * are VCPUs on the node but it is possible when not all present VCPUs
+ * on the node are initialized by guest. Clear the alloced_cpus_in_node
+ * to start over.
+ */
+ if (cpumask_equal(&primary->alloced_cpus_in_node,
+ cpumask_of_node(primary->numa_node)))
+ cpumask_clear(&primary->alloced_cpus_in_node);
+
while (true) {
cur_cpu = cpumask_next(cur_cpu, &available_mask);
if (cur_cpu >= nr_cpu_ids) {
@@ -488,6 +500,40 @@ static void init_vp_index(struct vmbus_channel *channel, const uuid_le *type_gui
channel->target_vp = hv_context.vp_index[cur_cpu];
}
+static void vmbus_wait_for_unload(void)
+{
+ int cpu = smp_processor_id();
+ void *page_addr = hv_context.synic_message_page[cpu];
+ struct hv_message *msg = (struct hv_message *)page_addr +
+ VMBUS_MESSAGE_SINT;
+ struct vmbus_channel_message_header *hdr;
+ bool unloaded = false;
+
+ while (1) {
+ if (msg->header.message_type == HVMSG_NONE) {
+ mdelay(10);
+ continue;
+ }
+
+ hdr = (struct vmbus_channel_message_header *)msg->u.payload;
+ if (hdr->msgtype == CHANNELMSG_UNLOAD_RESPONSE)
+ unloaded = true;
+
+ msg->header.message_type = HVMSG_NONE;
+ /*
+ * header.message_type needs to be written before we do
+ * wrmsrl() below.
+ */
+ mb();
+
+ if (msg->header.message_flags.msg_pending)
+ wrmsrl(HV_X64_MSR_EOM, 0);
+
+ if (unloaded)
+ break;
+ }
+}
+
/*
* vmbus_unload_response - Handler for the unload response.
*/
@@ -513,7 +559,14 @@ void vmbus_initiate_unload(void)
hdr.msgtype = CHANNELMSG_UNLOAD;
vmbus_post_msg(&hdr, sizeof(struct vmbus_channel_message_header));
- wait_for_completion(&vmbus_connection.unload_event);
+ /*
+ * vmbus_initiate_unload() is also called on crash and the crash can be
+ * happening in an interrupt context, where scheduling is impossible.
+ */
+ if (!in_interrupt())
+ wait_for_completion(&vmbus_connection.unload_event);
+ else
+ vmbus_wait_for_unload();
}
/*
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
index 6341be8739ae..63194a9a7189 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -293,8 +293,14 @@ void hv_cleanup(void)
* Cleanup the TSC page based CS.
*/
if (ms_hyperv.features & HV_X64_MSR_REFERENCE_TSC_AVAILABLE) {
- clocksource_change_rating(&hyperv_cs_tsc, 10);
- clocksource_unregister(&hyperv_cs_tsc);
+ /*
+ * Crash can happen in an interrupt context and unregistering
+ * a clocksource is impossible and redundant in this case.
+ */
+ if (!oops_in_progress) {
+ clocksource_change_rating(&hyperv_cs_tsc, 10);
+ clocksource_unregister(&hyperv_cs_tsc);
+ }
hypercall_msr.as_uint64 = 0;
wrmsrl(HV_X64_MSR_REFERENCE_TSC, hypercall_msr.as_uint64);
diff --git a/drivers/hv/hv_fcopy.c b/drivers/hv/hv_fcopy.c
index db4b887b889d..c37a71e13de0 100644
--- a/drivers/hv/hv_fcopy.c
+++ b/drivers/hv/hv_fcopy.c
@@ -51,7 +51,6 @@ static struct {
struct hv_fcopy_hdr *fcopy_msg; /* current message */
struct vmbus_channel *recv_channel; /* chn we got the request */
u64 recv_req_id; /* request ID. */
- void *fcopy_context; /* for the channel callback */
} fcopy_transaction;
static void fcopy_respond_to_host(int error);
@@ -67,6 +66,13 @@ static struct hvutil_transport *hvt;
*/
static int dm_reg_value;
+static void fcopy_poll_wrapper(void *channel)
+{
+ /* Transaction is finished, reset the state here to avoid races. */
+ fcopy_transaction.state = HVUTIL_READY;
+ hv_fcopy_onchannelcallback(channel);
+}
+
static void fcopy_timeout_func(struct work_struct *dummy)
{
/*
@@ -74,13 +80,7 @@ static void fcopy_timeout_func(struct work_struct *dummy)
* process the pending transaction.
*/
fcopy_respond_to_host(HV_E_FAIL);
-
- /* Transaction is finished, reset the state. */
- if (fcopy_transaction.state > HVUTIL_READY)
- fcopy_transaction.state = HVUTIL_READY;
-
- hv_poll_channel(fcopy_transaction.fcopy_context,
- hv_fcopy_onchannelcallback);
+ hv_poll_channel(fcopy_transaction.recv_channel, fcopy_poll_wrapper);
}
static int fcopy_handle_handshake(u32 version)
@@ -108,9 +108,7 @@ static int fcopy_handle_handshake(u32 version)
return -EINVAL;
}
pr_debug("FCP: userspace daemon ver. %d registered\n", version);
- fcopy_transaction.state = HVUTIL_READY;
- hv_poll_channel(fcopy_transaction.fcopy_context,
- hv_fcopy_onchannelcallback);
+ hv_poll_channel(fcopy_transaction.recv_channel, fcopy_poll_wrapper);
return 0;
}
@@ -227,15 +225,8 @@ void hv_fcopy_onchannelcallback(void *context)
int util_fw_version;
int fcopy_srv_version;
- if (fcopy_transaction.state > HVUTIL_READY) {
- /*
- * We will defer processing this callback once
- * the current transaction is complete.
- */
- fcopy_transaction.fcopy_context = context;
+ if (fcopy_transaction.state > HVUTIL_READY)
return;
- }
- fcopy_transaction.fcopy_context = NULL;
vmbus_recvpacket(channel, recv_buffer, PAGE_SIZE * 2, &recvlen,
&requestid);
@@ -275,7 +266,8 @@ void hv_fcopy_onchannelcallback(void *context)
* Send the information to the user-level daemon.
*/
schedule_work(&fcopy_send_work);
- schedule_delayed_work(&fcopy_timeout_work, 5*HZ);
+ schedule_delayed_work(&fcopy_timeout_work,
+ HV_UTIL_TIMEOUT * HZ);
return;
}
icmsghdr->icflags = ICMSGHDRFLAG_TRANSACTION | ICMSGHDRFLAG_RESPONSE;
@@ -304,9 +296,8 @@ static int fcopy_on_msg(void *msg, int len)
if (cancel_delayed_work_sync(&fcopy_timeout_work)) {
fcopy_transaction.state = HVUTIL_USERSPACE_RECV;
fcopy_respond_to_host(*val);
- fcopy_transaction.state = HVUTIL_READY;
- hv_poll_channel(fcopy_transaction.fcopy_context,
- hv_fcopy_onchannelcallback);
+ hv_poll_channel(fcopy_transaction.recv_channel,
+ fcopy_poll_wrapper);
}
return 0;
diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c
index 74c38a9f34a6..2a3420c4ca59 100644
--- a/drivers/hv/hv_kvp.c
+++ b/drivers/hv/hv_kvp.c
@@ -66,7 +66,6 @@ static struct {
struct hv_kvp_msg *kvp_msg; /* current message */
struct vmbus_channel *recv_channel; /* chn we got the request */
u64 recv_req_id; /* request ID. */
- void *kvp_context; /* for the channel callback */
} kvp_transaction;
/*
@@ -94,6 +93,13 @@ static struct hvutil_transport *hvt;
*/
#define HV_DRV_VERSION "3.1"
+static void kvp_poll_wrapper(void *channel)
+{
+ /* Transaction is finished, reset the state here to avoid races. */
+ kvp_transaction.state = HVUTIL_READY;
+ hv_kvp_onchannelcallback(channel);
+}
+
static void
kvp_register(int reg_value)
{
@@ -121,12 +127,7 @@ static void kvp_timeout_func(struct work_struct *dummy)
*/
kvp_respond_to_host(NULL, HV_E_FAIL);
- /* Transaction is finished, reset the state. */
- if (kvp_transaction.state > HVUTIL_READY)
- kvp_transaction.state = HVUTIL_READY;
-
- hv_poll_channel(kvp_transaction.kvp_context,
- hv_kvp_onchannelcallback);
+ hv_poll_channel(kvp_transaction.recv_channel, kvp_poll_wrapper);
}
static int kvp_handle_handshake(struct hv_kvp_msg *msg)
@@ -218,9 +219,7 @@ static int kvp_on_msg(void *msg, int len)
*/
if (cancel_delayed_work_sync(&kvp_timeout_work)) {
kvp_respond_to_host(message, error);
- kvp_transaction.state = HVUTIL_READY;
- hv_poll_channel(kvp_transaction.kvp_context,
- hv_kvp_onchannelcallback);
+ hv_poll_channel(kvp_transaction.recv_channel, kvp_poll_wrapper);
}
return 0;
@@ -596,15 +595,8 @@ void hv_kvp_onchannelcallback(void *context)
int util_fw_version;
int kvp_srv_version;
- if (kvp_transaction.state > HVUTIL_READY) {
- /*
- * We will defer processing this callback once
- * the current transaction is complete.
- */
- kvp_transaction.kvp_context = context;
+ if (kvp_transaction.state > HVUTIL_READY)
return;
- }
- kvp_transaction.kvp_context = NULL;
vmbus_recvpacket(channel, recv_buffer, PAGE_SIZE * 4, &recvlen,
&requestid);
@@ -668,7 +660,8 @@ void hv_kvp_onchannelcallback(void *context)
* user-mode not responding.
*/
schedule_work(&kvp_sendkey_work);
- schedule_delayed_work(&kvp_timeout_work, 5*HZ);
+ schedule_delayed_work(&kvp_timeout_work,
+ HV_UTIL_TIMEOUT * HZ);
return;
diff --git a/drivers/hv/hv_snapshot.c b/drivers/hv/hv_snapshot.c
index 815405f2e777..81882d4848bd 100644
--- a/drivers/hv/hv_snapshot.c
+++ b/drivers/hv/hv_snapshot.c
@@ -53,7 +53,6 @@ static struct {
struct vmbus_channel *recv_channel; /* chn we got the request */
u64 recv_req_id; /* request ID. */
struct hv_vss_msg *msg; /* current message */
- void *vss_context; /* for the channel callback */
} vss_transaction;
@@ -74,6 +73,13 @@ static void vss_timeout_func(struct work_struct *dummy);
static DECLARE_DELAYED_WORK(vss_timeout_work, vss_timeout_func);
static DECLARE_WORK(vss_send_op_work, vss_send_op);
+static void vss_poll_wrapper(void *channel)
+{
+ /* Transaction is finished, reset the state here to avoid races. */
+ vss_transaction.state = HVUTIL_READY;
+ hv_vss_onchannelcallback(channel);
+}
+
/*
* Callback when data is received from user mode.
*/
@@ -86,12 +92,7 @@ static void vss_timeout_func(struct work_struct *dummy)
pr_warn("VSS: timeout waiting for daemon to reply\n");
vss_respond_to_host(HV_E_FAIL);
- /* Transaction is finished, reset the state. */
- if (vss_transaction.state > HVUTIL_READY)
- vss_transaction.state = HVUTIL_READY;
-
- hv_poll_channel(vss_transaction.vss_context,
- hv_vss_onchannelcallback);
+ hv_poll_channel(vss_transaction.recv_channel, vss_poll_wrapper);
}
static int vss_handle_handshake(struct hv_vss_msg *vss_msg)
@@ -138,9 +139,8 @@ static int vss_on_msg(void *msg, int len)
if (cancel_delayed_work_sync(&vss_timeout_work)) {
vss_respond_to_host(vss_msg->error);
/* Transaction is finished, reset the state. */
- vss_transaction.state = HVUTIL_READY;
- hv_poll_channel(vss_transaction.vss_context,
- hv_vss_onchannelcallback);
+ hv_poll_channel(vss_transaction.recv_channel,
+ vss_poll_wrapper);
}
} else {
/* This is a spurious call! */
@@ -238,15 +238,8 @@ void hv_vss_onchannelcallback(void *context)
struct icmsg_hdr *icmsghdrp;
struct icmsg_negotiate *negop = NULL;
- if (vss_transaction.state > HVUTIL_READY) {
- /*
- * We will defer processing this callback once
- * the current transaction is complete.
- */
- vss_transaction.vss_context = context;
+ if (vss_transaction.state > HVUTIL_READY)
return;
- }
- vss_transaction.vss_context = NULL;
vmbus_recvpacket(channel, recv_buffer, PAGE_SIZE * 2, &recvlen,
&requestid);
@@ -338,6 +331,11 @@ static void vss_on_reset(void)
int
hv_vss_init(struct hv_util_service *srv)
{
+ if (vmbus_proto_version < VERSION_WIN8_1) {
+ pr_warn("Integration service 'Backup (volume snapshot)'"
+ " not supported on this host version.\n");
+ return -ENOTSUPP;
+ }
recv_buffer = srv->recv_buffer;
/*
diff --git a/drivers/hv/hv_utils_transport.c b/drivers/hv/hv_utils_transport.c
index 6a9d80a5332d..1505ee6e6605 100644
--- a/drivers/hv/hv_utils_transport.c
+++ b/drivers/hv/hv_utils_transport.c
@@ -204,9 +204,12 @@ int hvutil_transport_send(struct hvutil_transport *hvt, void *msg, int len)
goto out_unlock;
}
hvt->outmsg = kzalloc(len, GFP_KERNEL);
- memcpy(hvt->outmsg, msg, len);
- hvt->outmsg_len = len;
- wake_up_interruptible(&hvt->outmsg_q);
+ if (hvt->outmsg) {
+ memcpy(hvt->outmsg, msg, len);
+ hvt->outmsg_len = len;
+ wake_up_interruptible(&hvt->outmsg_q);
+ } else
+ ret = -ENOMEM;
out_unlock:
mutex_unlock(&hvt->outmsg_lock);
return ret;
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index 3782636562a1..12156db2e88e 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -31,6 +31,11 @@
#include <linux/hyperv.h>
/*
+ * Timeout for services such as KVP and fcopy.
+ */
+#define HV_UTIL_TIMEOUT 30
+
+/*
* The below CPUID leaves are present if VersionAndFeatures.HypervisorPresent
* is set by CPUID(HVCPUID_VERSION_FEATURES).
*/
@@ -759,11 +764,7 @@ static inline void hv_poll_channel(struct vmbus_channel *channel,
if (!channel)
return;
- if (channel->target_cpu != smp_processor_id())
- smp_call_function_single(channel->target_cpu,
- cb, channel, true);
- else
- cb(channel);
+ smp_call_function_single(channel->target_cpu, cb, channel, true);
}
enum hvutil_device_state {
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index f19b6f7a467a..509ed9731630 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -41,6 +41,7 @@
#include <linux/ptrace.h>
#include <linux/screen_info.h>
#include <linux/kdebug.h>
+#include <linux/random.h>
#include "hyperv_vmbus.h"
static struct acpi_device *hv_acpi_dev;
@@ -104,6 +105,7 @@ static struct notifier_block hyperv_panic_block = {
};
struct resource *hyperv_mmio;
+DEFINE_SEMAPHORE(hyperv_mmio_lock);
static int vmbus_exists(void)
{
@@ -602,23 +604,11 @@ static int vmbus_remove(struct device *child_device)
{
struct hv_driver *drv;
struct hv_device *dev = device_to_hv_device(child_device);
- u32 relid = dev->channel->offermsg.child_relid;
if (child_device->driver) {
drv = drv_to_hv_drv(child_device->driver);
if (drv->remove)
drv->remove(dev);
- else {
- hv_process_channel_removal(dev->channel, relid);
- pr_err("remove not set for driver %s\n",
- dev_name(child_device));
- }
- } else {
- /*
- * We don't have a driver for this device; deal with the
- * rescind message by removing the channel.
- */
- hv_process_channel_removal(dev->channel, relid);
}
return 0;
@@ -653,7 +643,10 @@ static void vmbus_shutdown(struct device *child_device)
static void vmbus_device_release(struct device *device)
{
struct hv_device *hv_dev = device_to_hv_device(device);
+ struct vmbus_channel *channel = hv_dev->channel;
+ hv_process_channel_removal(channel,
+ channel->offermsg.child_relid);
kfree(hv_dev);
}
@@ -826,6 +819,8 @@ static void vmbus_isr(void)
else
tasklet_schedule(&msg_dpc);
}
+
+ add_interrupt_randomness(HYPERVISOR_CALLBACK_VECTOR, 0);
}
@@ -867,7 +862,7 @@ static int vmbus_bus_init(int irq)
on_each_cpu(hv_synic_init, NULL, 1);
ret = vmbus_connect();
if (ret)
- goto err_alloc;
+ goto err_connect;
if (vmbus_proto_version > VERSION_WIN7)
cpu_hotplug_disable();
@@ -885,6 +880,8 @@ static int vmbus_bus_init(int irq)
return 0;
+err_connect:
+ on_each_cpu(hv_synic_cleanup, NULL, 1);
err_alloc:
hv_synic_free();
hv_remove_vmbus_irq();
@@ -1144,7 +1141,10 @@ int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
resource_size_t range_min, range_max, start, local_min, local_max;
const char *dev_n = dev_name(&device_obj->device);
u32 fb_end = screen_info.lfb_base + (screen_info.lfb_size << 1);
- int i;
+ int i, retval;
+
+ retval = -ENXIO;
+ down(&hyperv_mmio_lock);
for (iter = hyperv_mmio; iter; iter = iter->sibling) {
if ((iter->start >= max) || (iter->end <= min))
@@ -1181,13 +1181,17 @@ int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
for (; start + size - 1 <= local_max; start += align) {
*new = request_mem_region_exclusive(start, size,
dev_n);
- if (*new)
- return 0;
+ if (*new) {
+ retval = 0;
+ goto exit;
+ }
}
}
}
- return -ENXIO;
+exit:
+ up(&hyperv_mmio_lock);
+ return retval;
}
EXPORT_SYMBOL_GPL(vmbus_allocate_mmio);
diff --git a/drivers/hwmon/iio_hwmon.c b/drivers/hwmon/iio_hwmon.c
index 17ae2eb26ce2..d5c06f2764f4 100644
--- a/drivers/hwmon/iio_hwmon.c
+++ b/drivers/hwmon/iio_hwmon.c
@@ -109,24 +109,24 @@ static int iio_hwmon_probe(struct platform_device *pdev)
switch (type) {
case IIO_VOLTAGE:
- a->dev_attr.attr.name = kasprintf(GFP_KERNEL,
- "in%d_input",
- in_i++);
+ a->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL,
+ "in%d_input",
+ in_i++);
break;
case IIO_TEMP:
- a->dev_attr.attr.name = kasprintf(GFP_KERNEL,
- "temp%d_input",
- temp_i++);
+ a->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL,
+ "temp%d_input",
+ temp_i++);
break;
case IIO_CURRENT:
- a->dev_attr.attr.name = kasprintf(GFP_KERNEL,
- "curr%d_input",
- curr_i++);
+ a->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL,
+ "curr%d_input",
+ curr_i++);
break;
case IIO_HUMIDITYRELATIVE:
- a->dev_attr.attr.name = kasprintf(GFP_KERNEL,
- "humidity%d_input",
- humidity_i++);
+ a->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL,
+ "humidity%d_input",
+ humidity_i++);
break;
default:
ret = -EINVAL;
diff --git a/drivers/hwmon/qpnp-adc-common.c b/drivers/hwmon/qpnp-adc-common.c
index d8ef5f59edcb..567fa0c360ae 100644
--- a/drivers/hwmon/qpnp-adc-common.c
+++ b/drivers/hwmon/qpnp-adc-common.c
@@ -1896,8 +1896,8 @@ int32_t qpnp_adc_get_devicetree_data(struct platform_device *pdev,
}
for_each_child_of_node(node, child) {
- int channel_num, scaling, post_scaling, hw_settle_time;
- int fast_avg_setup, calib_type = 0, rc;
+ int channel_num, scaling = 0, post_scaling = 0;
+ int fast_avg_setup, calib_type = 0, rc, hw_settle_time = 0;
const char *calibration_param, *channel_name;
channel_name = of_get_property(child,
diff --git a/drivers/hwtracing/intel_th/core.c b/drivers/hwtracing/intel_th/core.c
index 165d3001c301..c6ec5c62b7a9 100644
--- a/drivers/hwtracing/intel_th/core.c
+++ b/drivers/hwtracing/intel_th/core.c
@@ -419,6 +419,38 @@ static struct intel_th_subdevice {
},
};
+#ifdef CONFIG_MODULES
+static void __intel_th_request_hub_module(struct work_struct *work)
+{
+ struct intel_th *th = container_of(work, struct intel_th,
+ request_module_work);
+
+ request_module("intel_th_%s", th->hub->name);
+}
+
+static int intel_th_request_hub_module(struct intel_th *th)
+{
+ INIT_WORK(&th->request_module_work, __intel_th_request_hub_module);
+ schedule_work(&th->request_module_work);
+
+ return 0;
+}
+
+static void intel_th_request_hub_module_flush(struct intel_th *th)
+{
+ flush_work(&th->request_module_work);
+}
+#else
+static inline int intel_th_request_hub_module(struct intel_th *th)
+{
+ return -EINVAL;
+}
+
+static inline void intel_th_request_hub_module_flush(struct intel_th *th)
+{
+}
+#endif /* CONFIG_MODULES */
+
static int intel_th_populate(struct intel_th *th, struct resource *devres,
unsigned int ndevres, int irq)
{
@@ -488,7 +520,7 @@ static int intel_th_populate(struct intel_th *th, struct resource *devres,
/* need switch driver to be loaded to enumerate the rest */
if (subdev->type == INTEL_TH_SWITCH && !req) {
th->hub = thdev;
- err = request_module("intel_th_%s", subdev->name);
+ err = intel_th_request_hub_module(th);
if (!err)
req++;
}
@@ -603,6 +635,7 @@ void intel_th_free(struct intel_th *th)
{
int i;
+ intel_th_request_hub_module_flush(th);
for (i = 0; i < TH_SUBDEVICE_MAX; i++)
if (th->thdev[i] != th->hub)
intel_th_device_remove(th->thdev[i]);
diff --git a/drivers/hwtracing/intel_th/intel_th.h b/drivers/hwtracing/intel_th/intel_th.h
index 57fd72b20fae..d03a6cd1c65d 100644
--- a/drivers/hwtracing/intel_th/intel_th.h
+++ b/drivers/hwtracing/intel_th/intel_th.h
@@ -197,6 +197,9 @@ struct intel_th {
int id;
int major;
+#ifdef CONFIG_MODULES
+ struct work_struct request_module_work;
+#endif /* CONFIG_MODULES */
#ifdef CONFIG_INTEL_TH_DEBUG
struct dentry *dbg;
#endif
diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c
index 641e87936064..d57a2f75dccf 100644
--- a/drivers/hwtracing/intel_th/pci.c
+++ b/drivers/hwtracing/intel_th/pci.c
@@ -67,6 +67,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa126),
.driver_data = (kernel_ulong_t)0,
},
+ {
+ /* Kaby Lake PCH-H */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa2a6),
+ .driver_data = (kernel_ulong_t)0,
+ },
{ 0 },
};
diff --git a/drivers/i2c/busses/i2c-cros-ec-tunnel.c b/drivers/i2c/busses/i2c-cros-ec-tunnel.c
index a0d95ff682ae..2d5ff86398d0 100644
--- a/drivers/i2c/busses/i2c-cros-ec-tunnel.c
+++ b/drivers/i2c/busses/i2c-cros-ec-tunnel.c
@@ -215,7 +215,7 @@ static int ec_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg i2c_msgs[],
msg->outsize = request_len;
msg->insize = response_len;
- result = cros_ec_cmd_xfer(bus->ec, msg);
+ result = cros_ec_cmd_xfer_status(bus->ec, msg);
if (result < 0) {
dev_err(dev, "Error transferring EC i2c message %d\n", result);
goto exit;
diff --git a/drivers/i2c/busses/i2c-efm32.c b/drivers/i2c/busses/i2c-efm32.c
index 8eff62738877..e253598d764c 100644
--- a/drivers/i2c/busses/i2c-efm32.c
+++ b/drivers/i2c/busses/i2c-efm32.c
@@ -433,7 +433,7 @@ static int efm32_i2c_probe(struct platform_device *pdev)
ret = request_irq(ddata->irq, efm32_i2c_irq, 0, DRIVER_NAME, ddata);
if (ret < 0) {
dev_err(&pdev->dev, "failed to request irq (%d)\n", ret);
- return ret;
+ goto err_disable_clk;
}
ret = i2c_add_adapter(&ddata->adapter);
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 27fa0cb09538..85f39cc3e276 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -244,6 +244,13 @@ struct i801_priv {
struct platform_device *mux_pdev;
#endif
struct platform_device *tco_pdev;
+
+ /*
+ * If set to true the host controller registers are reserved for
+ * ACPI AML use. Protected by acpi_lock.
+ */
+ bool acpi_reserved;
+ struct mutex acpi_lock;
};
#define FEATURE_SMBUS_PEC (1 << 0)
@@ -714,9 +721,15 @@ static s32 i801_access(struct i2c_adapter *adap, u16 addr,
{
int hwpec;
int block = 0;
- int ret, xact = 0;
+ int ret = 0, xact = 0;
struct i801_priv *priv = i2c_get_adapdata(adap);
+ mutex_lock(&priv->acpi_lock);
+ if (priv->acpi_reserved) {
+ mutex_unlock(&priv->acpi_lock);
+ return -EBUSY;
+ }
+
hwpec = (priv->features & FEATURE_SMBUS_PEC) && (flags & I2C_CLIENT_PEC)
&& size != I2C_SMBUS_QUICK
&& size != I2C_SMBUS_I2C_BLOCK_DATA;
@@ -773,7 +786,8 @@ static s32 i801_access(struct i2c_adapter *adap, u16 addr,
default:
dev_err(&priv->pci_dev->dev, "Unsupported transaction %d\n",
size);
- return -EOPNOTSUPP;
+ ret = -EOPNOTSUPP;
+ goto out;
}
if (hwpec) /* enable/disable hardware PEC */
@@ -796,11 +810,11 @@ static s32 i801_access(struct i2c_adapter *adap, u16 addr,
~(SMBAUXCTL_CRC | SMBAUXCTL_E32B), SMBAUXCTL(priv));
if (block)
- return ret;
+ goto out;
if (ret)
- return ret;
+ goto out;
if ((read_write == I2C_SMBUS_WRITE) || (xact == I801_QUICK))
- return 0;
+ goto out;
switch (xact & 0x7f) {
case I801_BYTE: /* Result put in SMBHSTDAT0 */
@@ -812,7 +826,10 @@ static s32 i801_access(struct i2c_adapter *adap, u16 addr,
(inb_p(SMBHSTDAT1(priv)) << 8);
break;
}
- return 0;
+
+out:
+ mutex_unlock(&priv->acpi_lock);
+ return ret;
}
@@ -1249,6 +1266,72 @@ static void i801_add_tco(struct i801_priv *priv)
priv->tco_pdev = pdev;
}
+#ifdef CONFIG_ACPI
+static acpi_status
+i801_acpi_io_handler(u32 function, acpi_physical_address address, u32 bits,
+ u64 *value, void *handler_context, void *region_context)
+{
+ struct i801_priv *priv = handler_context;
+ struct pci_dev *pdev = priv->pci_dev;
+ acpi_status status;
+
+ /*
+ * Once BIOS AML code touches the OpRegion we warn and inhibit any
+ * further access from the driver itself. This device is now owned
+ * by the system firmware.
+ */
+ mutex_lock(&priv->acpi_lock);
+
+ if (!priv->acpi_reserved) {
+ priv->acpi_reserved = true;
+
+ dev_warn(&pdev->dev, "BIOS is accessing SMBus registers\n");
+ dev_warn(&pdev->dev, "Driver SMBus register access inhibited\n");
+ }
+
+ if ((function & ACPI_IO_MASK) == ACPI_READ)
+ status = acpi_os_read_port(address, (u32 *)value, bits);
+ else
+ status = acpi_os_write_port(address, (u32)*value, bits);
+
+ mutex_unlock(&priv->acpi_lock);
+
+ return status;
+}
+
+static int i801_acpi_probe(struct i801_priv *priv)
+{
+ struct acpi_device *adev;
+ acpi_status status;
+
+ adev = ACPI_COMPANION(&priv->pci_dev->dev);
+ if (adev) {
+ status = acpi_install_address_space_handler(adev->handle,
+ ACPI_ADR_SPACE_SYSTEM_IO, i801_acpi_io_handler,
+ NULL, priv);
+ if (ACPI_SUCCESS(status))
+ return 0;
+ }
+
+ return acpi_check_resource_conflict(&priv->pci_dev->resource[SMBBAR]);
+}
+
+static void i801_acpi_remove(struct i801_priv *priv)
+{
+ struct acpi_device *adev;
+
+ adev = ACPI_COMPANION(&priv->pci_dev->dev);
+ if (!adev)
+ return;
+
+ acpi_remove_address_space_handler(adev->handle,
+ ACPI_ADR_SPACE_SYSTEM_IO, i801_acpi_io_handler);
+}
+#else
+static inline int i801_acpi_probe(struct i801_priv *priv) { return 0; }
+static inline void i801_acpi_remove(struct i801_priv *priv) { }
+#endif
+
static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
unsigned char temp;
@@ -1266,6 +1349,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
priv->adapter.dev.parent = &dev->dev;
ACPI_COMPANION_SET(&priv->adapter.dev, ACPI_COMPANION(&dev->dev));
priv->adapter.retries = 3;
+ mutex_init(&priv->acpi_lock);
priv->pci_dev = dev;
switch (dev->device) {
@@ -1328,10 +1412,8 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
return -ENODEV;
}
- err = acpi_check_resource_conflict(&dev->resource[SMBBAR]);
- if (err) {
+ if (i801_acpi_probe(priv))
return -ENODEV;
- }
err = pcim_iomap_regions(dev, 1 << SMBBAR,
dev_driver_string(&dev->dev));
@@ -1340,6 +1422,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
"Failed to request SMBus region 0x%lx-0x%Lx\n",
priv->smba,
(unsigned long long)pci_resource_end(dev, SMBBAR));
+ i801_acpi_remove(priv);
return err;
}
@@ -1404,6 +1487,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
err = i2c_add_adapter(&priv->adapter);
if (err) {
dev_err(&dev->dev, "Failed to add SMBus adapter\n");
+ i801_acpi_remove(priv);
return err;
}
@@ -1422,6 +1506,7 @@ static void i801_remove(struct pci_dev *dev)
i801_del_mux(priv);
i2c_del_adapter(&priv->adapter);
+ i801_acpi_remove(priv);
pci_write_config_byte(dev, SMBHSTCFG, priv->original_hstcfg);
platform_device_unregister(priv->tco_pdev);
diff --git a/drivers/i2c/muxes/i2c-mux-reg.c b/drivers/i2c/muxes/i2c-mux-reg.c
index 5fbd5bd0878f..49fc2c7e560a 100644
--- a/drivers/i2c/muxes/i2c-mux-reg.c
+++ b/drivers/i2c/muxes/i2c-mux-reg.c
@@ -150,7 +150,7 @@ static int i2c_mux_reg_probe_dt(struct regmux *mux,
mux->data.idle_in_use = true;
/* map address from "reg" if exists */
- if (of_address_to_resource(np, 0, &res)) {
+ if (of_address_to_resource(np, 0, &res) == 0) {
mux->data.reg_size = resource_size(&res);
mux->data.reg = devm_ioremap_resource(&pdev->dev, &res);
if (IS_ERR(mux->data.reg))
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 146eed70bdf4..ba947df5a8c7 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -716,6 +716,26 @@ static struct cpuidle_state avn_cstates[] = {
{
.enter = NULL }
};
+static struct cpuidle_state knl_cstates[] = {
+ {
+ .name = "C1-KNL",
+ .desc = "MWAIT 0x00",
+ .flags = MWAIT2flg(0x00),
+ .exit_latency = 1,
+ .target_residency = 2,
+ .enter = &intel_idle,
+ .enter_freeze = intel_idle_freeze },
+ {
+ .name = "C6-KNL",
+ .desc = "MWAIT 0x10",
+ .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 120,
+ .target_residency = 500,
+ .enter = &intel_idle,
+ .enter_freeze = intel_idle_freeze },
+ {
+ .enter = NULL }
+};
/**
* intel_idle
@@ -890,6 +910,10 @@ static const struct idle_cpu idle_cpu_avn = {
.disable_promotion_to_c1e = true,
};
+static const struct idle_cpu idle_cpu_knl = {
+ .state_table = knl_cstates,
+};
+
#define ICPU(model, cpu) \
{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_MWAIT, (unsigned long)&cpu }
@@ -921,6 +945,7 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = {
ICPU(0x56, idle_cpu_bdw),
ICPU(0x4e, idle_cpu_skl),
ICPU(0x5e, idle_cpu_skl),
+ ICPU(0x57, idle_cpu_knl),
{}
};
MODULE_DEVICE_TABLE(x86cpu, intel_idle_ids);
diff --git a/drivers/iio/adc/qcom-rradc.c b/drivers/iio/adc/qcom-rradc.c
index 0e8fda8b9080..ec774917f4a4 100644
--- a/drivers/iio/adc/qcom-rradc.c
+++ b/drivers/iio/adc/qcom-rradc.c
@@ -28,9 +28,8 @@
#define FG_ADC_RR_SKIN_TEMP_MSB 0x51
#define FG_ADC_RR_RR_ADC_CTL 0x52
#define FG_ADC_RR_ADC_CTL_CONTINUOUS_SEL_MASK 0x8
-#define FG_ADC_RR_ADC_CTL_CONTINUOUS_SEL BIT(3)
+#define FG_ADC_RR_ADC_CTL_CONTINUOUS_SEL BIT(3)
#define FG_ADC_RR_ADC_LOG 0x53
-#define FG_ADC_RR_ADC_LOG_CLR_CTRL_MASK 0xFE
#define FG_ADC_RR_ADC_LOG_CLR_CTRL BIT(0)
#define FG_ADC_RR_FAKE_BATT_LOW_LSB 0x58
@@ -40,6 +39,7 @@
#define FG_ADC_RR_BATT_ID_CTRL 0x60
#define FG_ADC_RR_BATT_ID_TRIGGER 0x61
+#define FG_ADC_RR_BATT_ID_TRIGGER_CTL BIT(0)
#define FG_ADC_RR_BATT_ID_STS 0x62
#define FG_ADC_RR_BATT_ID_CFG 0x63
#define FG_ADC_RR_BATT_ID_5_LSB 0x66
@@ -182,9 +182,11 @@
#define FG_RR_ADC_COHERENT_CHECK_RETRY 5
#define FG_RR_ADC_MAX_CONTINUOUS_BUFFER_LEN 16
#define FG_RR_ADC_STS_CHANNEL_READING_MASK 0x3
+#define FG_RR_ADC_STS_CHANNEL_STS 0x2
-#define FG_RR_CONV_CONTINUOUS_TIME_MIN 80000
-#define FG_RR_CONV_CONTINUOUS_TIME_MAX 81000
+#define FG_RR_CONV_CONTINUOUS_TIME_MIN_US 50000
+#define FG_RR_CONV_CONTINUOUS_TIME_MAX_US 51000
+#define FG_RR_CONV_MAX_RETRY_CNT 50
/*
* The channel number is not a physical index in hardware,
@@ -570,40 +572,157 @@ static const struct rradc_channels rradc_chans[] = {
FG_ADC_RR_AUX_THERM_STS)
};
+static int rradc_enable_continuous_mode(struct rradc_chip *chip)
+{
+ int rc = 0;
+
+ /* Clear channel log */
+ rc = rradc_masked_write(chip, FG_ADC_RR_ADC_LOG,
+ FG_ADC_RR_ADC_LOG_CLR_CTRL,
+ FG_ADC_RR_ADC_LOG_CLR_CTRL);
+ if (rc < 0) {
+ pr_err("log ctrl update to clear failed:%d\n", rc);
+ return rc;
+ }
+
+ rc = rradc_masked_write(chip, FG_ADC_RR_ADC_LOG,
+ FG_ADC_RR_ADC_LOG_CLR_CTRL, 0);
+ if (rc < 0) {
+ pr_err("log ctrl update to not clear failed:%d\n", rc);
+ return rc;
+ }
+
+ /* Switch to continuous mode */
+ rc = rradc_masked_write(chip, FG_ADC_RR_RR_ADC_CTL,
+ FG_ADC_RR_ADC_CTL_CONTINUOUS_SEL_MASK,
+ FG_ADC_RR_ADC_CTL_CONTINUOUS_SEL);
+ if (rc < 0) {
+ pr_err("Update to continuous mode failed:%d\n", rc);
+ return rc;
+ }
+
+ return rc;
+}
+
+static int rradc_disable_continuous_mode(struct rradc_chip *chip)
+{
+ int rc = 0;
+
+ /* Switch to non continuous mode */
+ rc = rradc_masked_write(chip, FG_ADC_RR_RR_ADC_CTL,
+ FG_ADC_RR_ADC_CTL_CONTINUOUS_SEL_MASK, 0);
+ if (rc < 0) {
+ pr_err("Update to non-continuous mode failed:%d\n", rc);
+ return rc;
+ }
+
+ return rc;
+}
+
+static int rradc_check_status_ready_with_retry(struct rradc_chip *chip,
+ struct rradc_chan_prop *prop, u8 *buf, u16 status)
+{
+ int rc = 0, retry_cnt = 0, mask = 0;
+
+ switch (prop->channel) {
+ case RR_ADC_BATT_ID:
+ /* BATT_ID STS bit does not get set initially */
+ mask = FG_RR_ADC_STS_CHANNEL_STS;
+ break;
+ default:
+ mask = FG_RR_ADC_STS_CHANNEL_READING_MASK;
+ break;
+ }
+
+ while (((buf[0] & mask) != mask) &&
+ (retry_cnt < FG_RR_CONV_MAX_RETRY_CNT)) {
+ pr_debug("%s is not ready; nothing to read:0x%x\n",
+ rradc_chans[prop->channel].datasheet_name, buf[0]);
+ usleep_range(FG_RR_CONV_CONTINUOUS_TIME_MIN_US,
+ FG_RR_CONV_CONTINUOUS_TIME_MAX_US);
+ retry_cnt++;
+ rc = rradc_read(chip, status, buf, 1);
+ if (rc < 0) {
+ pr_err("status read failed:%d\n", rc);
+ return rc;
+ }
+ }
+
+ if (retry_cnt >= FG_RR_CONV_MAX_RETRY_CNT)
+ rc = -ENODATA;
+
+ return rc;
+}
+
+static int rradc_read_channel_with_continuous_mode(struct rradc_chip *chip,
+ struct rradc_chan_prop *prop, u8 *buf)
+{
+ int rc = 0;
+ u16 status = 0;
+
+ rc = rradc_enable_continuous_mode(chip);
+ if (rc < 0) {
+ pr_err("Failed to switch to continuous mode\n");
+ return rc;
+ }
+
+ status = rradc_chans[prop->channel].sts;
+ rc = rradc_read(chip, status, buf, 1);
+ if (rc < 0) {
+ pr_err("status read failed:%d\n", rc);
+ return rc;
+ }
+
+ rc = rradc_check_status_ready_with_retry(chip, prop,
+ buf, status);
+ if (rc < 0) {
+ pr_err("Status read failed:%d\n", rc);
+ return rc;
+ }
+
+ rc = rradc_disable_continuous_mode(chip);
+ if (rc < 0) {
+ pr_err("Failed to switch to non continuous mode\n");
+ return rc;
+ }
+
+ return rc;
+}
+
static int rradc_do_conversion(struct rradc_chip *chip,
struct rradc_chan_prop *prop, u16 *data)
{
- int rc = 0, bytes_to_read = 0, retry = 0;
+ int rc = 0, bytes_to_read = 0;
u8 buf[6];
u16 offset = 0, batt_id_5 = 0, batt_id_15 = 0, batt_id_150 = 0;
u16 status = 0;
mutex_lock(&chip->lock);
- if ((prop->channel != RR_ADC_BATT_ID) &&
- (prop->channel != RR_ADC_CHG_HOT_TEMP) &&
- (prop->channel != RR_ADC_CHG_TOO_HOT_TEMP) &&
- (prop->channel != RR_ADC_SKIN_HOT_TEMP) &&
- (prop->channel != RR_ADC_SKIN_TOO_HOT_TEMP) &&
- (prop->channel != RR_ADC_USBIN_V)) {
- /* BATT_ID STS bit does not get set initially */
- status = rradc_chans[prop->channel].sts;
- rc = rradc_read(chip, status, buf, 1);
+ switch (prop->channel) {
+ case RR_ADC_BATT_ID:
+ rc = rradc_masked_write(chip, FG_ADC_RR_BATT_ID_TRIGGER,
+ FG_ADC_RR_BATT_ID_TRIGGER_CTL,
+ FG_ADC_RR_BATT_ID_TRIGGER_CTL);
if (rc < 0) {
- pr_err("status read failed:%d\n", rc);
+ pr_err("BATT_ID trigger set failed:%d\n", rc);
goto fail;
}
- buf[0] &= FG_RR_ADC_STS_CHANNEL_READING_MASK;
- if (buf[0] != FG_RR_ADC_STS_CHANNEL_READING_MASK) {
- pr_debug("%s is not ready; nothing to read\n",
- rradc_chans[prop->channel].datasheet_name);
- rc = -ENODATA;
+ rc = rradc_read_channel_with_continuous_mode(chip, prop, buf);
+ if (rc < 0) {
+ pr_err("Error reading in continuous mode:%d\n", rc);
goto fail;
}
- }
- if (prop->channel == RR_ADC_USBIN_V) {
+ rc = rradc_masked_write(chip, FG_ADC_RR_BATT_ID_TRIGGER,
+ FG_ADC_RR_BATT_ID_TRIGGER_CTL, 0);
+ if (rc < 0) {
+ pr_err("BATT_ID trigger re-set failed:%d\n", rc);
+ goto fail;
+ }
+ break;
+ case RR_ADC_USBIN_V:
/* Force conversion every cycle */
rc = rradc_masked_write(chip, FG_ADC_RR_USB_IN_V_TRIGGER,
FG_ADC_RR_USB_IN_V_EVERY_CYCLE_MASK,
@@ -613,31 +732,27 @@ static int rradc_do_conversion(struct rradc_chip *chip,
goto fail;
}
- /* Clear channel log */
- rc = rradc_masked_write(chip, FG_ADC_RR_ADC_LOG,
- FG_ADC_RR_ADC_LOG_CLR_CTRL_MASK,
- FG_ADC_RR_ADC_LOG_CLR_CTRL);
+ rc = rradc_read_channel_with_continuous_mode(chip, prop, buf);
if (rc < 0) {
- pr_err("log ctrl update to clear failed:%d\n", rc);
+ pr_err("Error reading in continuous mode:%d\n", rc);
goto fail;
}
- rc = rradc_masked_write(chip, FG_ADC_RR_ADC_LOG,
- FG_ADC_RR_ADC_LOG_CLR_CTRL_MASK, 0);
- if (rc < 0) {
- pr_err("log ctrl update to not clear failed:%d\n", rc);
- goto fail;
- }
-
- /* Switch to continuous mode */
- rc = rradc_masked_write(chip, FG_ADC_RR_RR_ADC_CTL,
- FG_ADC_RR_ADC_CTL_CONTINUOUS_SEL_MASK,
- FG_ADC_RR_ADC_CTL_CONTINUOUS_SEL);
+ /* Restore usb_in trigger */
+ rc = rradc_masked_write(chip, FG_ADC_RR_USB_IN_V_TRIGGER,
+ FG_ADC_RR_USB_IN_V_EVERY_CYCLE_MASK, 0);
if (rc < 0) {
- pr_err("Update to continuous mode failed:%d\n", rc);
+ pr_err("Restore every cycle update failed:%d\n", rc);
goto fail;
}
-
+ break;
+ case RR_ADC_CHG_HOT_TEMP:
+ case RR_ADC_CHG_TOO_HOT_TEMP:
+ case RR_ADC_SKIN_HOT_TEMP:
+ case RR_ADC_SKIN_TOO_HOT_TEMP:
+ pr_debug("Read only the data registers\n");
+ break;
+ default:
status = rradc_chans[prop->channel].sts;
rc = rradc_read(chip, status, buf, 1);
if (rc < 0) {
@@ -645,41 +760,14 @@ static int rradc_do_conversion(struct rradc_chip *chip,
goto fail;
}
- buf[0] &= FG_RR_ADC_STS_CHANNEL_READING_MASK;
- while ((buf[0] != FG_RR_ADC_STS_CHANNEL_READING_MASK) &&
- (retry < 2)) {
- pr_debug("%s is not ready; nothing to read\n",
- rradc_chans[prop->channel].datasheet_name);
- usleep_range(FG_RR_CONV_CONTINUOUS_TIME_MIN,
- FG_RR_CONV_CONTINUOUS_TIME_MAX);
- retry++;
- rc = rradc_read(chip, status, buf, 1);
- if (rc < 0) {
- pr_err("status read failed:%d\n", rc);
- goto fail;
- }
- }
-
- /* Switch to non continuous mode */
- rc = rradc_masked_write(chip, FG_ADC_RR_RR_ADC_CTL,
- FG_ADC_RR_ADC_CTL_CONTINUOUS_SEL_MASK, 0);
- if (rc < 0) {
- pr_err("Update to continuous mode failed:%d\n", rc);
- goto fail;
- }
-
- /* Restore usb_in trigger */
- rc = rradc_masked_write(chip, FG_ADC_RR_USB_IN_V_TRIGGER,
- FG_ADC_RR_USB_IN_V_EVERY_CYCLE_MASK, 0);
+ rc = rradc_check_status_ready_with_retry(chip, prop,
+ buf, status);
if (rc < 0) {
- pr_err("Restore every cycle update failed:%d\n", rc);
- goto fail;
- }
-
- if (retry >= 2) {
+ pr_debug("Status read failed:%d\n", rc);
rc = -ENODATA;
goto fail;
}
+ break;
}
offset = rradc_chans[prop->channel].lsb;
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
index 0f6f63b20263..7afd226a3321 100644
--- a/drivers/iio/industrialio-buffer.c
+++ b/drivers/iio/industrialio-buffer.c
@@ -107,6 +107,7 @@ ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
{
struct iio_dev *indio_dev = filp->private_data;
struct iio_buffer *rb = indio_dev->buffer;
+ DEFINE_WAIT_FUNC(wait, woken_wake_function);
size_t datum_size;
size_t to_wait;
int ret;
@@ -131,19 +132,29 @@ ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
else
to_wait = min_t(size_t, n / datum_size, rb->watermark);
+ add_wait_queue(&rb->pollq, &wait);
do {
- ret = wait_event_interruptible(rb->pollq,
- iio_buffer_ready(indio_dev, rb, to_wait, n / datum_size));
- if (ret)
- return ret;
+ if (!indio_dev->info) {
+ ret = -ENODEV;
+ break;
+ }
- if (!indio_dev->info)
- return -ENODEV;
+ if (!iio_buffer_ready(indio_dev, rb, to_wait, n / datum_size)) {
+ if (signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ break;
+ }
+
+ wait_woken(&wait, TASK_INTERRUPTIBLE,
+ MAX_SCHEDULE_TIMEOUT);
+ continue;
+ }
ret = rb->access->read_first_n(rb, n, buf);
if (ret == 0 && (filp->f_flags & O_NONBLOCK))
ret = -EAGAIN;
} while (ret == 0);
+ remove_wait_queue(&rb->pollq, &wait);
return ret;
}
diff --git a/drivers/infiniband/core/iwpm_util.c b/drivers/infiniband/core/iwpm_util.c
index 5fb089e91353..fb43a242847b 100644
--- a/drivers/infiniband/core/iwpm_util.c
+++ b/drivers/infiniband/core/iwpm_util.c
@@ -634,6 +634,7 @@ static int send_nlmsg_done(struct sk_buff *skb, u8 nl_client, int iwpm_pid)
if (!(ibnl_put_msg(skb, &nlh, 0, 0, nl_client,
RDMA_NL_IWPM_MAPINFO, NLM_F_MULTI))) {
pr_warn("%s Unable to put NLMSG_DONE\n", __func__);
+ dev_kfree_skb(skb);
return -ENOMEM;
}
nlh->nlmsg_type = NLMSG_DONE;
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index a95a32ba596e..d3b7ecd106f7 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -534,7 +534,7 @@ static int ib_nl_send_msg(struct ib_sa_query *query, gfp_t gfp_mask)
data = ibnl_put_msg(skb, &nlh, query->seq, 0, RDMA_NL_LS,
RDMA_NL_LS_OP_RESOLVE, NLM_F_REQUEST);
if (!data) {
- kfree_skb(skb);
+ nlmsg_free(skb);
return -EMSGSIZE;
}
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index 870e56b6b25f..05179f47bbde 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -526,7 +526,7 @@ int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
tun_tx_ix = (++tun_qp->tx_ix_head) & (MLX4_NUM_TUNNEL_BUFS - 1);
spin_unlock(&tun_qp->tx_lock);
if (ret)
- goto out;
+ goto end;
tun_mad = (struct mlx4_rcv_tunnel_mad *) (tun_qp->tx_ring[tun_tx_ix].buf.addr);
if (tun_qp->tx_ring[tun_tx_ix].ah)
@@ -595,9 +595,15 @@ int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
wr.wr.send_flags = IB_SEND_SIGNALED;
ret = ib_post_send(src_qp, &wr.wr, &bad_wr);
-out:
- if (ret)
- ib_destroy_ah(ah);
+ if (!ret)
+ return 0;
+ out:
+ spin_lock(&tun_qp->tx_lock);
+ tun_qp->tx_ix_tail++;
+ spin_unlock(&tun_qp->tx_lock);
+ tun_qp->tx_ring[tun_tx_ix].ah = NULL;
+end:
+ ib_destroy_ah(ah);
return ret;
}
@@ -1278,9 +1284,15 @@ int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port,
ret = ib_post_send(send_qp, &wr.wr, &bad_wr);
+ if (!ret)
+ return 0;
+
+ spin_lock(&sqp->tx_lock);
+ sqp->tx_ix_tail++;
+ spin_unlock(&sqp->tx_lock);
+ sqp->tx_ring[wire_tx_ix].ah = NULL;
out:
- if (ret)
- ib_destroy_ah(ah);
+ ib_destroy_ah(ah);
return ret;
}
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 13eaaf45288f..ea1e2ddaddf5 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -357,7 +357,7 @@ static int send_wqe_overhead(enum mlx4_ib_qp_type type, u32 flags)
sizeof (struct mlx4_wqe_raddr_seg);
case MLX4_IB_QPT_RC:
return sizeof (struct mlx4_wqe_ctrl_seg) +
- sizeof (struct mlx4_wqe_atomic_seg) +
+ sizeof (struct mlx4_wqe_masked_atomic_seg) +
sizeof (struct mlx4_wqe_raddr_seg);
case MLX4_IB_QPT_SMI:
case MLX4_IB_QPT_GSI:
@@ -1162,8 +1162,10 @@ struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd,
{
err = create_qp_common(to_mdev(pd->device), pd, init_attr,
udata, 0, &qp, gfp);
- if (err)
+ if (err) {
+ kfree(qp);
return ERR_PTR(err);
+ }
qp->ibqp.qp_num = qp->mqp.qpn;
qp->xrcdn = xrcdn;
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index 92ddae101ecc..8184267c7901 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -763,7 +763,8 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
if (attr->flags)
return ERR_PTR(-EINVAL);
- if (entries < 0)
+ if (entries < 0 ||
+ (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz))))
return ERR_PTR(-EINVAL);
entries = roundup_pow_of_two(entries + 1);
@@ -1094,11 +1095,16 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
return -ENOSYS;
}
- if (entries < 1)
+ if (entries < 1 ||
+ entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz))) {
+ mlx5_ib_warn(dev, "wrong entries number %d, max %d\n",
+ entries,
+ 1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz));
return -EINVAL;
+ }
entries = roundup_pow_of_two(entries + 1);
- if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)) + 1)
+ if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)) + 1)
return -EINVAL;
if (entries == ibcq->cqe + 1)
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index fd17443aeacd..bfc940ff9c8a 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -962,14 +962,11 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
break;
case MLX5_DEV_EVENT_PORT_DOWN:
+ case MLX5_DEV_EVENT_PORT_INITIALIZED:
ibev.event = IB_EVENT_PORT_ERR;
port = (u8)param;
break;
- case MLX5_DEV_EVENT_PORT_INITIALIZED:
- /* not used by ULPs */
- return;
-
case MLX5_DEV_EVENT_LID_CHANGE:
ibev.event = IB_EVENT_LID_CHANGE;
port = (u8)param;
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 307bdbca8938..cfcfbb6b84d7 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -226,6 +226,8 @@ static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap,
qp->rq.max_gs = 0;
qp->rq.wqe_cnt = 0;
qp->rq.wqe_shift = 0;
+ cap->max_recv_wr = 0;
+ cap->max_recv_sge = 0;
} else {
if (ucmd) {
qp->rq.wqe_cnt = ucmd->rq_wqe_count;
@@ -2525,10 +2527,11 @@ static u8 get_fence(u8 fence, struct ib_send_wr *wr)
return MLX5_FENCE_MODE_SMALL_AND_FENCE;
else
return fence;
-
- } else {
- return 0;
+ } else if (unlikely(wr->send_flags & IB_SEND_FENCE)) {
+ return MLX5_FENCE_MODE_FENCE;
}
+
+ return 0;
}
static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
@@ -3092,17 +3095,19 @@ int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr
qp_attr->cap.max_recv_sge = qp->rq.max_gs;
if (!ibqp->uobject) {
- qp_attr->cap.max_send_wr = qp->sq.wqe_cnt;
+ qp_attr->cap.max_send_wr = qp->sq.max_post;
qp_attr->cap.max_send_sge = qp->sq.max_gs;
+ qp_init_attr->qp_context = ibqp->qp_context;
} else {
qp_attr->cap.max_send_wr = 0;
qp_attr->cap.max_send_sge = 0;
}
- /* We don't support inline sends for kernel QPs (yet), and we
- * don't know what userspace's value should be.
- */
- qp_attr->cap.max_inline_data = 0;
+ qp_init_attr->qp_type = ibqp->qp_type;
+ qp_init_attr->recv_cq = ibqp->recv_cq;
+ qp_init_attr->send_cq = ibqp->send_cq;
+ qp_init_attr->srq = ibqp->srq;
+ qp_attr->cap.max_inline_data = qp->max_inline_data;
qp_init_attr->cap = qp_attr->cap;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 5ea0c14070d1..fa9c42ff1fb0 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -245,8 +245,6 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
skb_reset_mac_header(skb);
skb_pull(skb, IPOIB_ENCAP_LEN);
- skb->truesize = SKB_TRUESIZE(skb->len);
-
++dev->stats.rx_packets;
dev->stats.rx_bytes += skb->len;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 7d3281866ffc..942dffca6a9d 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -1131,7 +1131,9 @@ struct ipoib_neigh *ipoib_neigh_get(struct net_device *dev, u8 *daddr)
neigh = NULL;
goto out_unlock;
}
- neigh->alive = jiffies;
+
+ if (likely(skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE))
+ neigh->alive = jiffies;
goto out_unlock;
}
}
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index fd4100d56d8c..aff42d5e2296 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -317,6 +317,19 @@ static struct usb_device_id xpad_table[] = {
MODULE_DEVICE_TABLE(usb, xpad_table);
+struct xpad_output_packet {
+ u8 data[XPAD_PKT_LEN];
+ u8 len;
+ bool pending;
+};
+
+#define XPAD_OUT_CMD_IDX 0
+#define XPAD_OUT_FF_IDX 1
+#define XPAD_OUT_LED_IDX (1 + IS_ENABLED(CONFIG_JOYSTICK_XPAD_FF))
+#define XPAD_NUM_OUT_PACKETS (1 + \
+ IS_ENABLED(CONFIG_JOYSTICK_XPAD_FF) + \
+ IS_ENABLED(CONFIG_JOYSTICK_XPAD_LEDS))
+
struct usb_xpad {
struct input_dev *dev; /* input device interface */
struct usb_device *udev; /* usb device */
@@ -329,9 +342,13 @@ struct usb_xpad {
dma_addr_t idata_dma;
struct urb *irq_out; /* urb for interrupt out report */
+ bool irq_out_active; /* we must not use an active URB */
unsigned char *odata; /* output data */
dma_addr_t odata_dma;
- struct mutex odata_mutex;
+ spinlock_t odata_lock;
+
+ struct xpad_output_packet out_packets[XPAD_NUM_OUT_PACKETS];
+ int last_out_packet;
#if defined(CONFIG_JOYSTICK_XPAD_LEDS)
struct xpad_led *led;
@@ -678,18 +695,71 @@ exit:
__func__, retval);
}
+/* Callers must hold xpad->odata_lock spinlock */
+static bool xpad_prepare_next_out_packet(struct usb_xpad *xpad)
+{
+ struct xpad_output_packet *pkt, *packet = NULL;
+ int i;
+
+ for (i = 0; i < XPAD_NUM_OUT_PACKETS; i++) {
+ if (++xpad->last_out_packet >= XPAD_NUM_OUT_PACKETS)
+ xpad->last_out_packet = 0;
+
+ pkt = &xpad->out_packets[xpad->last_out_packet];
+ if (pkt->pending) {
+ dev_dbg(&xpad->intf->dev,
+ "%s - found pending output packet %d\n",
+ __func__, xpad->last_out_packet);
+ packet = pkt;
+ break;
+ }
+ }
+
+ if (packet) {
+ memcpy(xpad->odata, packet->data, packet->len);
+ xpad->irq_out->transfer_buffer_length = packet->len;
+ packet->pending = false;
+ return true;
+ }
+
+ return false;
+}
+
+/* Callers must hold xpad->odata_lock spinlock */
+static int xpad_try_sending_next_out_packet(struct usb_xpad *xpad)
+{
+ int error;
+
+ if (!xpad->irq_out_active && xpad_prepare_next_out_packet(xpad)) {
+ error = usb_submit_urb(xpad->irq_out, GFP_ATOMIC);
+ if (error) {
+ dev_err(&xpad->intf->dev,
+ "%s - usb_submit_urb failed with result %d\n",
+ __func__, error);
+ return -EIO;
+ }
+
+ xpad->irq_out_active = true;
+ }
+
+ return 0;
+}
+
static void xpad_irq_out(struct urb *urb)
{
struct usb_xpad *xpad = urb->context;
struct device *dev = &xpad->intf->dev;
- int retval, status;
+ int status = urb->status;
+ int error;
+ unsigned long flags;
- status = urb->status;
+ spin_lock_irqsave(&xpad->odata_lock, flags);
switch (status) {
case 0:
/* success */
- return;
+ xpad->irq_out_active = xpad_prepare_next_out_packet(xpad);
+ break;
case -ECONNRESET:
case -ENOENT:
@@ -697,19 +767,26 @@ static void xpad_irq_out(struct urb *urb)
/* this urb is terminated, clean up */
dev_dbg(dev, "%s - urb shutting down with status: %d\n",
__func__, status);
- return;
+ xpad->irq_out_active = false;
+ break;
default:
dev_dbg(dev, "%s - nonzero urb status received: %d\n",
__func__, status);
- goto exit;
+ break;
}
-exit:
- retval = usb_submit_urb(urb, GFP_ATOMIC);
- if (retval)
- dev_err(dev, "%s - usb_submit_urb failed with result %d\n",
- __func__, retval);
+ if (xpad->irq_out_active) {
+ error = usb_submit_urb(urb, GFP_ATOMIC);
+ if (error) {
+ dev_err(dev,
+ "%s - usb_submit_urb failed with result %d\n",
+ __func__, error);
+ xpad->irq_out_active = false;
+ }
+ }
+
+ spin_unlock_irqrestore(&xpad->odata_lock, flags);
}
static int xpad_init_output(struct usb_interface *intf, struct usb_xpad *xpad)
@@ -728,7 +805,7 @@ static int xpad_init_output(struct usb_interface *intf, struct usb_xpad *xpad)
goto fail1;
}
- mutex_init(&xpad->odata_mutex);
+ spin_lock_init(&xpad->odata_lock);
xpad->irq_out = usb_alloc_urb(0, GFP_KERNEL);
if (!xpad->irq_out) {
@@ -770,27 +847,57 @@ static void xpad_deinit_output(struct usb_xpad *xpad)
static int xpad_inquiry_pad_presence(struct usb_xpad *xpad)
{
+ struct xpad_output_packet *packet =
+ &xpad->out_packets[XPAD_OUT_CMD_IDX];
+ unsigned long flags;
+ int retval;
+
+ spin_lock_irqsave(&xpad->odata_lock, flags);
+
+ packet->data[0] = 0x08;
+ packet->data[1] = 0x00;
+ packet->data[2] = 0x0F;
+ packet->data[3] = 0xC0;
+ packet->data[4] = 0x00;
+ packet->data[5] = 0x00;
+ packet->data[6] = 0x00;
+ packet->data[7] = 0x00;
+ packet->data[8] = 0x00;
+ packet->data[9] = 0x00;
+ packet->data[10] = 0x00;
+ packet->data[11] = 0x00;
+ packet->len = 12;
+ packet->pending = true;
+
+ /* Reset the sequence so we send out presence first */
+ xpad->last_out_packet = -1;
+ retval = xpad_try_sending_next_out_packet(xpad);
+
+ spin_unlock_irqrestore(&xpad->odata_lock, flags);
+
+ return retval;
+}
+
+static int xpad_start_xbox_one(struct usb_xpad *xpad)
+{
+ struct xpad_output_packet *packet =
+ &xpad->out_packets[XPAD_OUT_CMD_IDX];
+ unsigned long flags;
int retval;
- mutex_lock(&xpad->odata_mutex);
+ spin_lock_irqsave(&xpad->odata_lock, flags);
- xpad->odata[0] = 0x08;
- xpad->odata[1] = 0x00;
- xpad->odata[2] = 0x0F;
- xpad->odata[3] = 0xC0;
- xpad->odata[4] = 0x00;
- xpad->odata[5] = 0x00;
- xpad->odata[6] = 0x00;
- xpad->odata[7] = 0x00;
- xpad->odata[8] = 0x00;
- xpad->odata[9] = 0x00;
- xpad->odata[10] = 0x00;
- xpad->odata[11] = 0x00;
- xpad->irq_out->transfer_buffer_length = 12;
+ /* Xbox one controller needs to be initialized. */
+ packet->data[0] = 0x05;
+ packet->data[1] = 0x20;
+ packet->len = 2;
+ packet->pending = true;
- retval = usb_submit_urb(xpad->irq_out, GFP_KERNEL);
+ /* Reset the sequence so we send out start packet first */
+ xpad->last_out_packet = -1;
+ retval = xpad_try_sending_next_out_packet(xpad);
- mutex_unlock(&xpad->odata_mutex);
+ spin_unlock_irqrestore(&xpad->odata_lock, flags);
return retval;
}
@@ -799,8 +906,11 @@ static int xpad_inquiry_pad_presence(struct usb_xpad *xpad)
static int xpad_play_effect(struct input_dev *dev, void *data, struct ff_effect *effect)
{
struct usb_xpad *xpad = input_get_drvdata(dev);
+ struct xpad_output_packet *packet = &xpad->out_packets[XPAD_OUT_FF_IDX];
__u16 strong;
__u16 weak;
+ int retval;
+ unsigned long flags;
if (effect->type != FF_RUMBLE)
return 0;
@@ -808,69 +918,80 @@ static int xpad_play_effect(struct input_dev *dev, void *data, struct ff_effect
strong = effect->u.rumble.strong_magnitude;
weak = effect->u.rumble.weak_magnitude;
+ spin_lock_irqsave(&xpad->odata_lock, flags);
+
switch (xpad->xtype) {
case XTYPE_XBOX:
- xpad->odata[0] = 0x00;
- xpad->odata[1] = 0x06;
- xpad->odata[2] = 0x00;
- xpad->odata[3] = strong / 256; /* left actuator */
- xpad->odata[4] = 0x00;
- xpad->odata[5] = weak / 256; /* right actuator */
- xpad->irq_out->transfer_buffer_length = 6;
+ packet->data[0] = 0x00;
+ packet->data[1] = 0x06;
+ packet->data[2] = 0x00;
+ packet->data[3] = strong / 256; /* left actuator */
+ packet->data[4] = 0x00;
+ packet->data[5] = weak / 256; /* right actuator */
+ packet->len = 6;
+ packet->pending = true;
break;
case XTYPE_XBOX360:
- xpad->odata[0] = 0x00;
- xpad->odata[1] = 0x08;
- xpad->odata[2] = 0x00;
- xpad->odata[3] = strong / 256; /* left actuator? */
- xpad->odata[4] = weak / 256; /* right actuator? */
- xpad->odata[5] = 0x00;
- xpad->odata[6] = 0x00;
- xpad->odata[7] = 0x00;
- xpad->irq_out->transfer_buffer_length = 8;
+ packet->data[0] = 0x00;
+ packet->data[1] = 0x08;
+ packet->data[2] = 0x00;
+ packet->data[3] = strong / 256; /* left actuator? */
+ packet->data[4] = weak / 256; /* right actuator? */
+ packet->data[5] = 0x00;
+ packet->data[6] = 0x00;
+ packet->data[7] = 0x00;
+ packet->len = 8;
+ packet->pending = true;
break;
case XTYPE_XBOX360W:
- xpad->odata[0] = 0x00;
- xpad->odata[1] = 0x01;
- xpad->odata[2] = 0x0F;
- xpad->odata[3] = 0xC0;
- xpad->odata[4] = 0x00;
- xpad->odata[5] = strong / 256;
- xpad->odata[6] = weak / 256;
- xpad->odata[7] = 0x00;
- xpad->odata[8] = 0x00;
- xpad->odata[9] = 0x00;
- xpad->odata[10] = 0x00;
- xpad->odata[11] = 0x00;
- xpad->irq_out->transfer_buffer_length = 12;
+ packet->data[0] = 0x00;
+ packet->data[1] = 0x01;
+ packet->data[2] = 0x0F;
+ packet->data[3] = 0xC0;
+ packet->data[4] = 0x00;
+ packet->data[5] = strong / 256;
+ packet->data[6] = weak / 256;
+ packet->data[7] = 0x00;
+ packet->data[8] = 0x00;
+ packet->data[9] = 0x00;
+ packet->data[10] = 0x00;
+ packet->data[11] = 0x00;
+ packet->len = 12;
+ packet->pending = true;
break;
case XTYPE_XBOXONE:
- xpad->odata[0] = 0x09; /* activate rumble */
- xpad->odata[1] = 0x08;
- xpad->odata[2] = 0x00;
- xpad->odata[3] = 0x08; /* continuous effect */
- xpad->odata[4] = 0x00; /* simple rumble mode */
- xpad->odata[5] = 0x03; /* L and R actuator only */
- xpad->odata[6] = 0x00; /* TODO: LT actuator */
- xpad->odata[7] = 0x00; /* TODO: RT actuator */
- xpad->odata[8] = strong / 256; /* left actuator */
- xpad->odata[9] = weak / 256; /* right actuator */
- xpad->odata[10] = 0x80; /* length of pulse */
- xpad->odata[11] = 0x00; /* stop period of pulse */
- xpad->irq_out->transfer_buffer_length = 12;
+ packet->data[0] = 0x09; /* activate rumble */
+ packet->data[1] = 0x08;
+ packet->data[2] = 0x00;
+ packet->data[3] = 0x08; /* continuous effect */
+ packet->data[4] = 0x00; /* simple rumble mode */
+ packet->data[5] = 0x03; /* L and R actuator only */
+ packet->data[6] = 0x00; /* TODO: LT actuator */
+ packet->data[7] = 0x00; /* TODO: RT actuator */
+ packet->data[8] = strong / 256; /* left actuator */
+ packet->data[9] = weak / 256; /* right actuator */
+ packet->data[10] = 0x80; /* length of pulse */
+ packet->data[11] = 0x00; /* stop period of pulse */
+ packet->len = 12;
+ packet->pending = true;
break;
default:
dev_dbg(&xpad->dev->dev,
"%s - rumble command sent to unsupported xpad type: %d\n",
__func__, xpad->xtype);
- return -EINVAL;
+ retval = -EINVAL;
+ goto out;
}
- return usb_submit_urb(xpad->irq_out, GFP_ATOMIC);
+ retval = xpad_try_sending_next_out_packet(xpad);
+
+out:
+ spin_unlock_irqrestore(&xpad->odata_lock, flags);
+ return retval;
}
static int xpad_init_ff(struct usb_xpad *xpad)
@@ -921,36 +1042,44 @@ struct xpad_led {
*/
static void xpad_send_led_command(struct usb_xpad *xpad, int command)
{
+ struct xpad_output_packet *packet =
+ &xpad->out_packets[XPAD_OUT_LED_IDX];
+ unsigned long flags;
+
command %= 16;
- mutex_lock(&xpad->odata_mutex);
+ spin_lock_irqsave(&xpad->odata_lock, flags);
switch (xpad->xtype) {
case XTYPE_XBOX360:
- xpad->odata[0] = 0x01;
- xpad->odata[1] = 0x03;
- xpad->odata[2] = command;
- xpad->irq_out->transfer_buffer_length = 3;
+ packet->data[0] = 0x01;
+ packet->data[1] = 0x03;
+ packet->data[2] = command;
+ packet->len = 3;
+ packet->pending = true;
break;
+
case XTYPE_XBOX360W:
- xpad->odata[0] = 0x00;
- xpad->odata[1] = 0x00;
- xpad->odata[2] = 0x08;
- xpad->odata[3] = 0x40 + command;
- xpad->odata[4] = 0x00;
- xpad->odata[5] = 0x00;
- xpad->odata[6] = 0x00;
- xpad->odata[7] = 0x00;
- xpad->odata[8] = 0x00;
- xpad->odata[9] = 0x00;
- xpad->odata[10] = 0x00;
- xpad->odata[11] = 0x00;
- xpad->irq_out->transfer_buffer_length = 12;
+ packet->data[0] = 0x00;
+ packet->data[1] = 0x00;
+ packet->data[2] = 0x08;
+ packet->data[3] = 0x40 + command;
+ packet->data[4] = 0x00;
+ packet->data[5] = 0x00;
+ packet->data[6] = 0x00;
+ packet->data[7] = 0x00;
+ packet->data[8] = 0x00;
+ packet->data[9] = 0x00;
+ packet->data[10] = 0x00;
+ packet->data[11] = 0x00;
+ packet->len = 12;
+ packet->pending = true;
break;
}
- usb_submit_urb(xpad->irq_out, GFP_KERNEL);
- mutex_unlock(&xpad->odata_mutex);
+ xpad_try_sending_next_out_packet(xpad);
+
+ spin_unlock_irqrestore(&xpad->odata_lock, flags);
}
/*
@@ -1048,13 +1177,8 @@ static int xpad_open(struct input_dev *dev)
if (usb_submit_urb(xpad->irq_in, GFP_KERNEL))
return -EIO;
- if (xpad->xtype == XTYPE_XBOXONE) {
- /* Xbox one controller needs to be initialized. */
- xpad->odata[0] = 0x05;
- xpad->odata[1] = 0x20;
- xpad->irq_out->transfer_buffer_length = 2;
- return usb_submit_urb(xpad->irq_out, GFP_KERNEL);
- }
+ if (xpad->xtype == XTYPE_XBOXONE)
+ return xpad_start_xbox_one(xpad);
return 0;
}
@@ -1200,22 +1324,15 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id
int ep_irq_in_idx;
int i, error;
+ if (intf->cur_altsetting->desc.bNumEndpoints != 2)
+ return -ENODEV;
+
for (i = 0; xpad_device[i].idVendor; i++) {
if ((le16_to_cpu(udev->descriptor.idVendor) == xpad_device[i].idVendor) &&
(le16_to_cpu(udev->descriptor.idProduct) == xpad_device[i].idProduct))
break;
}
- if (xpad_device[i].xtype == XTYPE_XBOXONE &&
- intf->cur_altsetting->desc.bInterfaceNumber != 0) {
- /*
- * The Xbox One controller lists three interfaces all with the
- * same interface class, subclass and protocol. Differentiate by
- * interface number.
- */
- return -ENODEV;
- }
-
xpad = kzalloc(sizeof(struct usb_xpad), GFP_KERNEL);
if (!xpad)
return -ENOMEM;
@@ -1246,6 +1363,8 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id
if (intf->cur_altsetting->desc.bInterfaceClass == USB_CLASS_VENDOR_SPEC) {
if (intf->cur_altsetting->desc.bInterfaceProtocol == 129)
xpad->xtype = XTYPE_XBOX360W;
+ else if (intf->cur_altsetting->desc.bInterfaceProtocol == 208)
+ xpad->xtype = XTYPE_XBOXONE;
else
xpad->xtype = XTYPE_XBOX360;
} else {
@@ -1260,6 +1379,17 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id
xpad->mapping |= MAP_STICKS_TO_NULL;
}
+ if (xpad->xtype == XTYPE_XBOXONE &&
+ intf->cur_altsetting->desc.bInterfaceNumber != 0) {
+ /*
+ * The Xbox One controller lists three interfaces all with the
+ * same interface class, subclass and protocol. Differentiate by
+ * interface number.
+ */
+ error = -ENODEV;
+ goto err_free_in_urb;
+ }
+
error = xpad_init_output(intf, xpad);
if (error)
goto err_free_in_urb;
diff --git a/drivers/input/keyboard/tegra-kbc.c b/drivers/input/keyboard/tegra-kbc.c
index acc5394afb03..29485bc4221c 100644
--- a/drivers/input/keyboard/tegra-kbc.c
+++ b/drivers/input/keyboard/tegra-kbc.c
@@ -376,7 +376,7 @@ static int tegra_kbc_start(struct tegra_kbc *kbc)
/* Reset the KBC controller to clear all previous status.*/
reset_control_assert(kbc->rst);
udelay(100);
- reset_control_assert(kbc->rst);
+ reset_control_deassert(kbc->rst);
udelay(100);
tegra_kbc_config_pins(kbc);
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index 2f589857a039..d15b33813021 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -4,7 +4,8 @@
* Copyright (c) 2013 ELAN Microelectronics Corp.
*
* Author: 林政維 (Duson Lin) <dusonlin@emc.com.tw>
- * Version: 1.6.0
+ * Author: KT Liao <kt.liao@emc.com.tw>
+ * Version: 1.6.2
*
* Based on cyapa driver:
* copyright (c) 2011-2012 Cypress Semiconductor, Inc.
@@ -40,7 +41,7 @@
#include "elan_i2c.h"
#define DRIVER_NAME "elan_i2c"
-#define ELAN_DRIVER_VERSION "1.6.1"
+#define ELAN_DRIVER_VERSION "1.6.2"
#define ELAN_VENDOR_ID 0x04f3
#define ETP_MAX_PRESSURE 255
#define ETP_FWIDTH_REDUCE 90
@@ -199,9 +200,41 @@ static int elan_sleep(struct elan_tp_data *data)
return error;
}
+static int elan_query_product(struct elan_tp_data *data)
+{
+ int error;
+
+ error = data->ops->get_product_id(data->client, &data->product_id);
+ if (error)
+ return error;
+
+ error = data->ops->get_sm_version(data->client, &data->ic_type,
+ &data->sm_version);
+ if (error)
+ return error;
+
+ return 0;
+}
+
+static int elan_check_ASUS_special_fw(struct elan_tp_data *data)
+{
+ if (data->ic_type != 0x0E)
+ return false;
+
+ switch (data->product_id) {
+ case 0x05 ... 0x07:
+ case 0x09:
+ case 0x13:
+ return true;
+ default:
+ return false;
+ }
+}
+
static int __elan_initialize(struct elan_tp_data *data)
{
struct i2c_client *client = data->client;
+ bool woken_up = false;
int error;
error = data->ops->initialize(client);
@@ -210,6 +243,27 @@ static int __elan_initialize(struct elan_tp_data *data)
return error;
}
+ error = elan_query_product(data);
+ if (error)
+ return error;
+
+ /*
+ * Some ASUS devices were shipped with firmware that requires
+ * touchpads to be woken up first, before attempting to switch
+ * them into absolute reporting mode.
+ */
+ if (elan_check_ASUS_special_fw(data)) {
+ error = data->ops->sleep_control(client, false);
+ if (error) {
+ dev_err(&client->dev,
+ "failed to wake device up: %d\n", error);
+ return error;
+ }
+
+ msleep(200);
+ woken_up = true;
+ }
+
data->mode |= ETP_ENABLE_ABS;
error = data->ops->set_mode(client, data->mode);
if (error) {
@@ -218,11 +272,13 @@ static int __elan_initialize(struct elan_tp_data *data)
return error;
}
- error = data->ops->sleep_control(client, false);
- if (error) {
- dev_err(&client->dev,
- "failed to wake device up: %d\n", error);
- return error;
+ if (!woken_up) {
+ error = data->ops->sleep_control(client, false);
+ if (error) {
+ dev_err(&client->dev,
+ "failed to wake device up: %d\n", error);
+ return error;
+ }
}
return 0;
@@ -248,10 +304,6 @@ static int elan_query_device_info(struct elan_tp_data *data)
{
int error;
- error = data->ops->get_product_id(data->client, &data->product_id);
- if (error)
- return error;
-
error = data->ops->get_version(data->client, false, &data->fw_version);
if (error)
return error;
@@ -261,11 +313,6 @@ static int elan_query_device_info(struct elan_tp_data *data)
if (error)
return error;
- error = data->ops->get_sm_version(data->client, &data->ic_type,
- &data->sm_version);
- if (error)
- return error;
-
error = data->ops->get_version(data->client, true, &data->iap_version);
if (error)
return error;
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index 78f93cf68840..be5b399da5d3 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -1568,13 +1568,7 @@ static int elantech_set_properties(struct elantech_data *etd)
case 5:
etd->hw_version = 3;
break;
- case 6:
- case 7:
- case 8:
- case 9:
- case 10:
- case 13:
- case 14:
+ case 6 ... 14:
etd->hw_version = 4;
break;
default:
diff --git a/drivers/input/mouse/vmmouse.c b/drivers/input/mouse/vmmouse.c
index a3f0f5a47490..0f586780ceb4 100644
--- a/drivers/input/mouse/vmmouse.c
+++ b/drivers/input/mouse/vmmouse.c
@@ -355,18 +355,11 @@ int vmmouse_detect(struct psmouse *psmouse, bool set_properties)
return -ENXIO;
}
- if (!request_region(VMMOUSE_PROTO_PORT, 4, "vmmouse")) {
- psmouse_dbg(psmouse, "VMMouse port in use.\n");
- return -EBUSY;
- }
-
/* Check if the device is present */
response = ~VMMOUSE_PROTO_MAGIC;
VMMOUSE_CMD(GETVERSION, 0, version, response, dummy1, dummy2);
- if (response != VMMOUSE_PROTO_MAGIC || version == 0xffffffffU) {
- release_region(VMMOUSE_PROTO_PORT, 4);
+ if (response != VMMOUSE_PROTO_MAGIC || version == 0xffffffffU)
return -ENXIO;
- }
if (set_properties) {
psmouse->vendor = VMMOUSE_VENDOR;
@@ -374,8 +367,6 @@ int vmmouse_detect(struct psmouse *psmouse, bool set_properties)
psmouse->model = version;
}
- release_region(VMMOUSE_PROTO_PORT, 4);
-
return 0;
}
@@ -394,7 +385,6 @@ static void vmmouse_disconnect(struct psmouse *psmouse)
psmouse_reset(psmouse);
input_unregister_device(priv->abs_dev);
kfree(priv);
- release_region(VMMOUSE_PROTO_PORT, 4);
}
/**
@@ -438,15 +428,10 @@ int vmmouse_init(struct psmouse *psmouse)
struct input_dev *rel_dev = psmouse->dev, *abs_dev;
int error;
- if (!request_region(VMMOUSE_PROTO_PORT, 4, "vmmouse")) {
- psmouse_dbg(psmouse, "VMMouse port in use.\n");
- return -EBUSY;
- }
-
psmouse_reset(psmouse);
error = vmmouse_enable(psmouse);
if (error)
- goto release_region;
+ return error;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
abs_dev = input_allocate_device();
@@ -502,8 +487,5 @@ init_fail:
kfree(priv);
psmouse->private = NULL;
-release_region:
- release_region(VMMOUSE_PROTO_PORT, 4);
-
return error;
}
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index 454195709a82..405252a884dd 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -1277,6 +1277,7 @@ static int __init i8042_create_kbd_port(void)
serio->start = i8042_start;
serio->stop = i8042_stop;
serio->close = i8042_port_close;
+ serio->ps2_cmd_mutex = &i8042_mutex;
serio->port_data = port;
serio->dev.parent = &i8042_platform_device->dev;
strlcpy(serio->name, "i8042 KBD port", sizeof(serio->name));
@@ -1304,6 +1305,7 @@ static int __init i8042_create_aux_port(int idx)
serio->write = i8042_aux_write;
serio->start = i8042_start;
serio->stop = i8042_stop;
+ serio->ps2_cmd_mutex = &i8042_mutex;
serio->port_data = port;
serio->dev.parent = &i8042_platform_device->dev;
if (idx < 0) {
@@ -1373,21 +1375,6 @@ static void i8042_unregister_ports(void)
}
}
-/*
- * Checks whether port belongs to i8042 controller.
- */
-bool i8042_check_port_owner(const struct serio *port)
-{
- int i;
-
- for (i = 0; i < I8042_NUM_PORTS; i++)
- if (i8042_ports[i].serio == port)
- return true;
-
- return false;
-}
-EXPORT_SYMBOL(i8042_check_port_owner);
-
static void i8042_free_irqs(void)
{
if (i8042_aux_irq_registered)
diff --git a/drivers/input/serio/libps2.c b/drivers/input/serio/libps2.c
index 316f2c897101..83e9c663aa67 100644
--- a/drivers/input/serio/libps2.c
+++ b/drivers/input/serio/libps2.c
@@ -56,19 +56,17 @@ EXPORT_SYMBOL(ps2_sendbyte);
void ps2_begin_command(struct ps2dev *ps2dev)
{
- mutex_lock(&ps2dev->cmd_mutex);
+ struct mutex *m = ps2dev->serio->ps2_cmd_mutex ?: &ps2dev->cmd_mutex;
- if (i8042_check_port_owner(ps2dev->serio))
- i8042_lock_chip();
+ mutex_lock(m);
}
EXPORT_SYMBOL(ps2_begin_command);
void ps2_end_command(struct ps2dev *ps2dev)
{
- if (i8042_check_port_owner(ps2dev->serio))
- i8042_unlock_chip();
+ struct mutex *m = ps2dev->serio->ps2_cmd_mutex ?: &ps2dev->cmd_mutex;
- mutex_unlock(&ps2dev->cmd_mutex);
+ mutex_unlock(m);
}
EXPORT_SYMBOL(ps2_end_command);
diff --git a/drivers/input/touchscreen/sur40.c b/drivers/input/touchscreen/sur40.c
index d214f22ed305..45b466e3bbe8 100644
--- a/drivers/input/touchscreen/sur40.c
+++ b/drivers/input/touchscreen/sur40.c
@@ -126,7 +126,7 @@ struct sur40_image_header {
#define VIDEO_PACKET_SIZE 16384
/* polling interval (ms) */
-#define POLL_INTERVAL 4
+#define POLL_INTERVAL 1
/* maximum number of contacts FIXME: this is a guess? */
#define MAX_CONTACTS 64
@@ -441,7 +441,7 @@ static void sur40_process_video(struct sur40_state *sur40)
/* return error if streaming was stopped in the meantime */
if (sur40->sequence == -1)
- goto err_poll;
+ return;
/* mark as finished */
v4l2_get_timestamp(&new_buf->vb.timestamp);
@@ -730,6 +730,7 @@ static int sur40_start_streaming(struct vb2_queue *vq, unsigned int count)
static void sur40_stop_streaming(struct vb2_queue *vq)
{
struct sur40_state *sur40 = vb2_get_drv_priv(vq);
+ vb2_wait_for_all_buffers(vq);
sur40->sequence = -1;
/* Release all active buffers */
diff --git a/drivers/input/touchscreen/tsc2004.c b/drivers/input/touchscreen/tsc2004.c
index 7295c198aa08..6fe55d598fac 100644
--- a/drivers/input/touchscreen/tsc2004.c
+++ b/drivers/input/touchscreen/tsc2004.c
@@ -22,6 +22,11 @@
#include <linux/regmap.h>
#include "tsc200x-core.h"
+static const struct input_id tsc2004_input_id = {
+ .bustype = BUS_I2C,
+ .product = 2004,
+};
+
static int tsc2004_cmd(struct device *dev, u8 cmd)
{
u8 tx = TSC200X_CMD | TSC200X_CMD_12BIT | cmd;
@@ -42,7 +47,7 @@ static int tsc2004_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
- return tsc200x_probe(&i2c->dev, i2c->irq, BUS_I2C,
+ return tsc200x_probe(&i2c->dev, i2c->irq, &tsc2004_input_id,
devm_regmap_init_i2c(i2c, &tsc200x_regmap_config),
tsc2004_cmd);
}
diff --git a/drivers/input/touchscreen/tsc2005.c b/drivers/input/touchscreen/tsc2005.c
index b9f593dfd2ef..f2c5f0e47f77 100644
--- a/drivers/input/touchscreen/tsc2005.c
+++ b/drivers/input/touchscreen/tsc2005.c
@@ -24,6 +24,11 @@
#include <linux/regmap.h>
#include "tsc200x-core.h"
+static const struct input_id tsc2005_input_id = {
+ .bustype = BUS_SPI,
+ .product = 2005,
+};
+
static int tsc2005_cmd(struct device *dev, u8 cmd)
{
u8 tx = TSC200X_CMD | TSC200X_CMD_12BIT | cmd;
@@ -62,7 +67,7 @@ static int tsc2005_probe(struct spi_device *spi)
if (error)
return error;
- return tsc200x_probe(&spi->dev, spi->irq, BUS_SPI,
+ return tsc200x_probe(&spi->dev, spi->irq, &tsc2005_input_id,
devm_regmap_init_spi(spi, &tsc200x_regmap_config),
tsc2005_cmd);
}
diff --git a/drivers/input/touchscreen/tsc200x-core.c b/drivers/input/touchscreen/tsc200x-core.c
index 15240c1ee850..dfa7f1c4f545 100644
--- a/drivers/input/touchscreen/tsc200x-core.c
+++ b/drivers/input/touchscreen/tsc200x-core.c
@@ -450,7 +450,7 @@ static void tsc200x_close(struct input_dev *input)
mutex_unlock(&ts->mutex);
}
-int tsc200x_probe(struct device *dev, int irq, __u16 bustype,
+int tsc200x_probe(struct device *dev, int irq, const struct input_id *tsc_id,
struct regmap *regmap,
int (*tsc200x_cmd)(struct device *dev, u8 cmd))
{
@@ -547,9 +547,18 @@ int tsc200x_probe(struct device *dev, int irq, __u16 bustype,
snprintf(ts->phys, sizeof(ts->phys),
"%s/input-ts", dev_name(dev));
- input_dev->name = "TSC200X touchscreen";
+ if (tsc_id->product == 2004) {
+ input_dev->name = "TSC200X touchscreen";
+ } else {
+ input_dev->name = devm_kasprintf(dev, GFP_KERNEL,
+ "TSC%04d touchscreen",
+ tsc_id->product);
+ if (!input_dev->name)
+ return -ENOMEM;
+ }
+
input_dev->phys = ts->phys;
- input_dev->id.bustype = bustype;
+ input_dev->id = *tsc_id;
input_dev->dev.parent = dev;
input_dev->evbit[0] = BIT(EV_ABS) | BIT(EV_KEY);
input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
diff --git a/drivers/input/touchscreen/tsc200x-core.h b/drivers/input/touchscreen/tsc200x-core.h
index 7a482d102614..49a63a3c6840 100644
--- a/drivers/input/touchscreen/tsc200x-core.h
+++ b/drivers/input/touchscreen/tsc200x-core.h
@@ -70,7 +70,7 @@
extern const struct regmap_config tsc200x_regmap_config;
extern const struct dev_pm_ops tsc200x_pm_ops;
-int tsc200x_probe(struct device *dev, int irq, __u16 bustype,
+int tsc200x_probe(struct device *dev, int irq, const struct input_id *tsc_id,
struct regmap *regmap,
int (*tsc200x_cmd)(struct device *dev, u8 cmd));
int tsc200x_remove(struct device *dev);
diff --git a/drivers/input/touchscreen/wacom_w8001.c b/drivers/input/touchscreen/wacom_w8001.c
index 2792ca397dd0..3ed0ce1e4dcb 100644
--- a/drivers/input/touchscreen/wacom_w8001.c
+++ b/drivers/input/touchscreen/wacom_w8001.c
@@ -27,7 +27,7 @@ MODULE_AUTHOR("Jaya Kumar <jayakumar.lkml@gmail.com>");
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
-#define W8001_MAX_LENGTH 11
+#define W8001_MAX_LENGTH 13
#define W8001_LEAD_MASK 0x80
#define W8001_LEAD_BYTE 0x80
#define W8001_TAB_MASK 0x40
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index b9319b76a8a1..0397985a2601 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -352,9 +352,11 @@ static void init_iommu_group(struct device *dev)
if (!domain)
goto out;
- dma_domain = to_pdomain(domain)->priv;
+ if (to_pdomain(domain)->flags == PD_DMA_OPS_MASK) {
+ dma_domain = to_pdomain(domain)->priv;
+ init_unity_mappings_for_device(dev, dma_domain);
+ }
- init_unity_mappings_for_device(dev, dma_domain);
out:
iommu_group_put(group);
}
@@ -2322,8 +2324,15 @@ static void update_device_table(struct protection_domain *domain)
{
struct iommu_dev_data *dev_data;
- list_for_each_entry(dev_data, &domain->dev_list, list)
+ list_for_each_entry(dev_data, &domain->dev_list, list) {
set_dte_entry(dev_data->devid, domain, dev_data->ats.enabled);
+
+ if (dev_data->devid == dev_data->alias)
+ continue;
+
+ /* There is an alias, update device table entry for it */
+ set_dte_entry(dev_data->alias, domain, dev_data->ats.enabled);
+ }
}
static void update_domain(struct protection_domain *domain)
@@ -2970,9 +2979,7 @@ static struct iommu_domain *amd_iommu_domain_alloc(unsigned type)
static void amd_iommu_domain_free(struct iommu_domain *dom)
{
struct protection_domain *domain;
-
- if (!dom)
- return;
+ struct dma_ops_domain *dma_dom;
domain = to_pdomain(dom);
@@ -2981,13 +2988,24 @@ static void amd_iommu_domain_free(struct iommu_domain *dom)
BUG_ON(domain->dev_cnt != 0);
- if (domain->mode != PAGE_MODE_NONE)
- free_pagetable(domain);
+ if (!dom)
+ return;
+
+ switch (dom->type) {
+ case IOMMU_DOMAIN_DMA:
+ dma_dom = domain->priv;
+ dma_ops_domain_free(dma_dom);
+ break;
+ default:
+ if (domain->mode != PAGE_MODE_NONE)
+ free_pagetable(domain);
- if (domain->flags & PD_IOMMUV2_MASK)
- free_gcr3_table(domain);
+ if (domain->flags & PD_IOMMUV2_MASK)
+ free_gcr3_table(domain);
- protection_domain_free(domain);
+ protection_domain_free(domain);
+ break;
+ }
}
static void amd_iommu_detach_device(struct iommu_domain *dom,
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index 8487987458a1..00df3832faab 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -870,7 +870,7 @@ static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu)
* We may have concurrent producers, so we need to be careful
* not to touch any of the shadow cmdq state.
*/
- queue_read(cmd, Q_ENT(q, idx), q->ent_dwords);
+ queue_read(cmd, Q_ENT(q, cons), q->ent_dwords);
dev_err(smmu->dev, "skipping command in error state:\n");
for (i = 0; i < ARRAY_SIZE(cmd); ++i)
dev_err(smmu->dev, "\t0x%016llx\n", (unsigned long long)cmd[i]);
@@ -881,7 +881,7 @@ static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu)
return;
}
- queue_write(cmd, Q_ENT(q, idx), q->ent_dwords);
+ queue_write(Q_ENT(q, cons), cmd, q->ent_dwords);
}
static void arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu,
@@ -1025,6 +1025,9 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
case STRTAB_STE_0_CFG_S2_TRANS:
ste_live = true;
break;
+ case STRTAB_STE_0_CFG_ABORT:
+ if (disable_bypass)
+ break;
default:
BUG(); /* STE corruption */
}
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 58f2fe687a24..347a3c17f73a 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -68,7 +68,8 @@ void iommu_put_dma_cookie(struct iommu_domain *domain)
if (!iovad)
return;
- put_iova_domain(iovad);
+ if (iovad->granule)
+ put_iova_domain(iovad);
kfree(iovad);
domain->iova_cookie = NULL;
}
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index 97c41b8ab5d9..29a31eb9ace3 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -647,6 +647,7 @@ static struct platform_driver exynos_sysmmu_driver __refdata = {
.name = "exynos-sysmmu",
.of_match_table = sysmmu_of_match,
.pm = &sysmmu_pm_ops,
+ .suppress_bind_attrs = true,
}
};
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 6763a4dfed94..24d81308a1a6 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -2032,7 +2032,7 @@ out_unlock:
spin_unlock(&iommu->lock);
spin_unlock_irqrestore(&device_domain_lock, flags);
- return 0;
+ return ret;
}
struct domain_context_mapping_data {
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index a159529f9d53..c5f1757ac61d 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -41,6 +41,7 @@
#define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1ULL << 0)
#define ITS_FLAGS_WORKAROUND_CAVIUM_22375 (1ULL << 1)
+#define ITS_FLAGS_WORKAROUND_CAVIUM_23144 (1ULL << 2)
#define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0)
@@ -71,6 +72,7 @@ struct its_node {
struct list_head its_device_list;
u64 flags;
u32 ite_size;
+ int numa_node;
};
#define ITS_ITT_ALIGN SZ_256
@@ -600,11 +602,23 @@ static void its_unmask_irq(struct irq_data *d)
static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
bool force)
{
- unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask);
+ unsigned int cpu;
+ const struct cpumask *cpu_mask = cpu_online_mask;
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
struct its_collection *target_col;
u32 id = its_get_event_id(d);
+ /* lpi cannot be routed to a redistributor that is on a foreign node */
+ if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
+ if (its_dev->its->numa_node >= 0) {
+ cpu_mask = cpumask_of_node(its_dev->its->numa_node);
+ if (!cpumask_intersects(mask_val, cpu_mask))
+ return -EINVAL;
+ }
+ }
+
+ cpu = cpumask_any_and(mask_val, cpu_mask);
+
if (cpu >= nr_cpu_ids)
return -EINVAL;
@@ -1081,6 +1095,16 @@ static void its_cpu_init_collection(void)
list_for_each_entry(its, &its_nodes, entry) {
u64 target;
+ /* avoid cross node collections and its mapping */
+ if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
+ struct device_node *cpu_node;
+
+ cpu_node = of_get_cpu_node(cpu, NULL);
+ if (its->numa_node != NUMA_NO_NODE &&
+ its->numa_node != of_node_to_nid(cpu_node))
+ continue;
+ }
+
/*
* We now have to bind each collection to its target
* redistributor.
@@ -1308,9 +1332,14 @@ static void its_irq_domain_activate(struct irq_domain *domain,
{
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
u32 event = its_get_event_id(d);
+ const struct cpumask *cpu_mask = cpu_online_mask;
+
+ /* get the cpu_mask of local node */
+ if (its_dev->its->numa_node >= 0)
+ cpu_mask = cpumask_of_node(its_dev->its->numa_node);
/* Bind the LPI to the first possible CPU */
- its_dev->event_map.col_map[event] = cpumask_first(cpu_online_mask);
+ its_dev->event_map.col_map[event] = cpumask_first(cpu_mask);
/* Map the GIC IRQ and event to the device */
its_send_mapvi(its_dev, d->hwirq, event);
@@ -1400,6 +1429,13 @@ static void __maybe_unused its_enable_quirk_cavium_22375(void *data)
its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375;
}
+static void __maybe_unused its_enable_quirk_cavium_23144(void *data)
+{
+ struct its_node *its = data;
+
+ its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144;
+}
+
static const struct gic_quirk its_quirks[] = {
#ifdef CONFIG_CAVIUM_ERRATUM_22375
{
@@ -1409,6 +1445,14 @@ static const struct gic_quirk its_quirks[] = {
.init = its_enable_quirk_cavium_22375,
},
#endif
+#ifdef CONFIG_CAVIUM_ERRATUM_23144
+ {
+ .desc = "ITS: Cavium erratum 23144",
+ .iidr = 0xa100034c, /* ThunderX pass 1.x */
+ .mask = 0xffff0fff,
+ .init = its_enable_quirk_cavium_23144,
+ },
+#endif
{
}
};
@@ -1470,6 +1514,7 @@ static int its_probe(struct device_node *node, struct irq_domain *parent)
its->base = its_base;
its->phys_base = res.start;
its->ite_size = ((readl_relaxed(its_base + GITS_TYPER) >> 4) & 0xf) + 1;
+ its->numa_node = of_node_to_nid(node);
its->cmd_base = kzalloc(ITS_CMD_QUEUE_SZ, GFP_KERNEL);
if (!its->cmd_base) {
diff --git a/drivers/leds/leds-qpnp-wled.c b/drivers/leds/leds-qpnp-wled.c
index 85b1133df4f5..77014f93927c 100644
--- a/drivers/leds/leds-qpnp-wled.c
+++ b/drivers/leds/leds-qpnp-wled.c
@@ -465,6 +465,24 @@ out:
return rc;
}
+static int qpnp_wled_swire_avdd_config(struct qpnp_wled *wled)
+{
+ int rc;
+ u8 val;
+
+ if (wled->pmic_rev_id->pmic_subtype != PMI8998_SUBTYPE &&
+ wled->pmic_rev_id->pmic_subtype != PM2FALCON_SUBTYPE)
+ return 0;
+
+ if (!wled->disp_type_amoled || wled->avdd_mode_spmi)
+ return 0;
+
+ val = QPNP_WLED_AVDD_MV_TO_REG(wled->avdd_target_voltage_mv);
+ rc = qpnp_wled_write_reg(wled,
+ QPNP_WLED_SWIRE_AVDD_REG(wled->ctrl_base), val);
+ return rc;
+}
+
static int qpnp_wled_sync_reg_toggle(struct qpnp_wled *wled)
{
int rc;
@@ -884,8 +902,20 @@ static void qpnp_wled_work(struct work_struct *work)
}
if (!!level != wled->prev_state) {
- rc = qpnp_wled_module_en(wled, wled->ctrl_base, !!level);
+ if (!!level) {
+ /*
+ * For AMOLED display in pmi8998, SWIRE_AVDD_DEFAULT has
+ * to be reconfigured every time the module is enabled.
+ */
+ rc = qpnp_wled_swire_avdd_config(wled);
+ if (rc < 0) {
+ pr_err("Write to SWIRE_AVDD_DEFAULT register failed rc:%d\n",
+ rc);
+ goto unlock_mutex;
+ }
+ }
+ rc = qpnp_wled_module_en(wled, wled->ctrl_base, !!level);
if (rc) {
dev_err(&wled->pdev->dev, "wled %sable failed\n",
level ? "en" : "dis");
@@ -1246,22 +1276,22 @@ static int qpnp_wled_avdd_mode_config(struct qpnp_wled *wled)
wled->avdd_target_voltage_mv = QPNP_WLED_AVDD_MIN_MV;
}
- reg = QPNP_WLED_AVDD_MV_TO_REG(wled->avdd_target_voltage_mv);
-
if (wled->avdd_mode_spmi) {
+ reg = QPNP_WLED_AVDD_MV_TO_REG(wled->avdd_target_voltage_mv);
reg |= QPNP_WLED_AVDD_SEL_SPMI_BIT;
rc = qpnp_wled_write_reg(wled,
QPNP_WLED_AMOLED_VOUT_REG(wled->ctrl_base),
reg);
+ if (rc < 0)
+ pr_err("Write to AMOLED_VOUT register failed, rc=%d\n",
+ rc);
} else {
- rc = qpnp_wled_write_reg(wled,
- QPNP_WLED_SWIRE_AVDD_REG(wled->ctrl_base),
- reg);
+ rc = qpnp_wled_swire_avdd_config(wled);
+ if (rc < 0)
+ pr_err("Write to SWIRE_AVDD_DEFAULT register failed rc:%d\n",
+ rc);
}
- if (rc < 0)
- dev_err(&wled->pdev->dev, "Write to VOUT/AVDD register failed, rc=%d\n",
- rc);
return rc;
}
diff --git a/drivers/lightnvm/gennvm.c b/drivers/lightnvm/gennvm.c
index a54b339951a3..2a96ff6923f0 100644
--- a/drivers/lightnvm/gennvm.c
+++ b/drivers/lightnvm/gennvm.c
@@ -89,6 +89,7 @@ static int gennvm_block_bb(struct ppa_addr ppa, int nr_blocks, u8 *blks,
list_move_tail(&blk->list, &lun->bb_list);
lun->vlun.nr_bad_blocks++;
+ lun->vlun.nr_free_blocks--;
}
return 0;
@@ -345,7 +346,7 @@ static void gennvm_generic_to_addr_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
static int gennvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
{
if (!dev->ops->submit_io)
- return 0;
+ return -ENODEV;
/* Convert address space */
gennvm_generic_to_addr_mode(dev, rqd);
diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
index 134e4faba482..a9859489acf6 100644
--- a/drivers/lightnvm/rrpc.c
+++ b/drivers/lightnvm/rrpc.c
@@ -287,6 +287,8 @@ static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block *rblk)
}
page = mempool_alloc(rrpc->page_pool, GFP_NOIO);
+ if (!page)
+ return -ENOMEM;
while ((slot = find_first_zero_bit(rblk->invalid_pages,
nr_pgs_per_blk)) < nr_pgs_per_blk) {
@@ -427,7 +429,7 @@ static void rrpc_lun_gc(struct work_struct *work)
if (nr_blocks_need < rrpc->nr_luns)
nr_blocks_need = rrpc->nr_luns;
- spin_lock(&lun->lock);
+ spin_lock(&rlun->lock);
while (nr_blocks_need > lun->nr_free_blocks &&
!list_empty(&rlun->prio_list)) {
struct rrpc_block *rblock = block_prio_find_max(rlun);
@@ -436,16 +438,16 @@ static void rrpc_lun_gc(struct work_struct *work)
if (!rblock->nr_invalid_pages)
break;
+ gcb = mempool_alloc(rrpc->gcb_pool, GFP_ATOMIC);
+ if (!gcb)
+ break;
+
list_del_init(&rblock->prio);
BUG_ON(!block_is_full(rrpc, rblock));
pr_debug("rrpc: selected block '%lu' for GC\n", block->id);
- gcb = mempool_alloc(rrpc->gcb_pool, GFP_ATOMIC);
- if (!gcb)
- break;
-
gcb->rrpc = rrpc;
gcb->rblk = rblock;
INIT_WORK(&gcb->ws_gc, rrpc_block_gc);
@@ -454,7 +456,7 @@ static void rrpc_lun_gc(struct work_struct *work)
nr_blocks_need--;
}
- spin_unlock(&lun->lock);
+ spin_unlock(&rlun->lock);
/* TODO: Hint that request queue can be started again */
}
@@ -650,11 +652,12 @@ static int rrpc_end_io(struct nvm_rq *rqd, int error)
if (bio_data_dir(rqd->bio) == WRITE)
rrpc_end_io_write(rrpc, rrqd, laddr, npages);
+ bio_put(rqd->bio);
+
if (rrqd->flags & NVM_IOTYPE_GC)
return 0;
rrpc_unlock_rq(rrpc, rqd);
- bio_put(rqd->bio);
if (npages > 1)
nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, rqd->dma_ppa_list);
@@ -841,6 +844,13 @@ static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio,
err = nvm_submit_io(rrpc->dev, rqd);
if (err) {
pr_err("rrpc: I/O submission failed: %d\n", err);
+ bio_put(bio);
+ if (!(flags & NVM_IOTYPE_GC)) {
+ rrpc_unlock_rq(rrpc, rqd);
+ if (rqd->nr_pages > 1)
+ nvm_dev_dma_free(rrpc->dev,
+ rqd->ppa_list, rqd->dma_ppa_list);
+ }
return NVM_IO_ERR;
}
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index e562bdedfb07..7d5aa2c5c81d 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -518,7 +518,7 @@ config DM_LOG_WRITES
If unsure, say N.
config DM_ANDROID_VERITY
- bool "Android verity target support"
+ tristate "Android verity target support"
depends on DM_VERITY
depends on X509_CERTIFICATE_PARSER
depends on SYSTEM_TRUSTED_KEYRING
@@ -526,6 +526,7 @@ config DM_ANDROID_VERITY
depends on KEYS
depends on ASYMMETRIC_KEY_TYPE
depends on ASYMMETRIC_PUBLIC_KEY_SUBTYPE
+ depends on MD_LINEAR
---help---
This device-mapper target is virtually a VERITY target. This
target is setup by reading the metadata contents piggybacked
diff --git a/drivers/md/Makefile b/drivers/md/Makefile
index ce7cf06d0e8a..2b2ba36638cd 100644
--- a/drivers/md/Makefile
+++ b/drivers/md/Makefile
@@ -61,6 +61,7 @@ obj-$(CONFIG_DM_CACHE_CLEANER) += dm-cache-cleaner.o
obj-$(CONFIG_DM_ERA) += dm-era.o
obj-$(CONFIG_DM_LOG_WRITES) += dm-log-writes.o
obj-$(CONFIG_DM_REQ_CRYPT) += dm-req-crypt.o
+obj-$(CONFIG_DM_ANDROID_VERITY) += dm-android-verity.o
ifeq ($(CONFIG_DM_UEVENT),y)
dm-mod-objs += dm-uevent.o
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index a296425a7270..3d5c0ba13181 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -1818,7 +1818,7 @@ static int cache_alloc(struct cache_sb *sb, struct cache *ca)
free = roundup_pow_of_two(ca->sb.nbuckets) >> 10;
if (!init_fifo(&ca->free[RESERVE_BTREE], 8, GFP_KERNEL) ||
- !init_fifo(&ca->free[RESERVE_PRIO], prio_buckets(ca), GFP_KERNEL) ||
+ !init_fifo_exact(&ca->free[RESERVE_PRIO], prio_buckets(ca), GFP_KERNEL) ||
!init_fifo(&ca->free[RESERVE_MOVINGGC], free, GFP_KERNEL) ||
!init_fifo(&ca->free[RESERVE_NONE], free, GFP_KERNEL) ||
!init_fifo(&ca->free_inc, free << 2, GFP_KERNEL) ||
diff --git a/drivers/md/dm-android-verity.c b/drivers/md/dm-android-verity.c
index bb6c1285e499..13c60bee8af5 100644
--- a/drivers/md/dm-android-verity.c
+++ b/drivers/md/dm-android-verity.c
@@ -48,7 +48,7 @@ static char buildvariant[BUILD_VARIANT];
static bool target_added;
static bool verity_enabled = true;
-struct dentry *debug_dir;
+static struct dentry *debug_dir;
static int android_verity_ctr(struct dm_target *ti, unsigned argc, char **argv);
static struct target_type android_verity_target = {
@@ -532,7 +532,7 @@ blkdev_release:
}
/* helper functions to extract properties from dts */
-const char *find_dt_value(const char *name)
+static const char *find_dt_value(const char *name)
{
struct device_node *firmware;
const char *value;
diff --git a/drivers/md/dm-android-verity.h b/drivers/md/dm-android-verity.h
index 0fcd54aaf5f6..0c7ff6afec69 100644
--- a/drivers/md/dm-android-verity.h
+++ b/drivers/md/dm-android-verity.h
@@ -72,9 +72,6 @@
* if fec is not present
* <data_blocks> <verity_tree> <verity_metdata_32K>
*/
-/* TODO: rearrange structure to reduce memory holes
- * depends on userspace change.
- */
struct fec_header {
__le32 magic;
__le32 version;
@@ -83,7 +80,7 @@ struct fec_header {
__le32 fec_size;
__le64 inp_size;
u8 hash[SHA256_DIGEST_SIZE];
-};
+} __attribute__((packed));
struct android_metadata_header {
__le32 magic_number;
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
index 09e2afcafd2d..cd0a93df4cb7 100644
--- a/drivers/md/dm-flakey.c
+++ b/drivers/md/dm-flakey.c
@@ -289,10 +289,16 @@ static int flakey_map(struct dm_target *ti, struct bio *bio)
pb->bio_submitted = true;
/*
- * Map reads as normal.
+ * Map reads as normal only if corrupt_bio_byte set.
*/
- if (bio_data_dir(bio) == READ)
- goto map_bio;
+ if (bio_data_dir(bio) == READ) {
+ /* If flags were specified, only corrupt those that match. */
+ if (fc->corrupt_bio_byte && (fc->corrupt_bio_rw == READ) &&
+ all_corrupt_bio_flags_match(bio, fc))
+ goto map_bio;
+ else
+ return -EIO;
+ }
/*
* Drop writes?
@@ -330,12 +336,13 @@ static int flakey_end_io(struct dm_target *ti, struct bio *bio, int error)
/*
* Corrupt successful READs while in down state.
- * If flags were specified, only corrupt those that match.
*/
- if (fc->corrupt_bio_byte && !error && pb->bio_submitted &&
- (bio_data_dir(bio) == READ) && (fc->corrupt_bio_rw == READ) &&
- all_corrupt_bio_flags_match(bio, fc))
- corrupt_bio_data(bio, fc);
+ if (!error && pb->bio_submitted && (bio_data_dir(bio) == READ)) {
+ if (fc->corrupt_bio_byte)
+ corrupt_bio_data(bio, fc);
+ else
+ return -EIO;
+ }
return error;
}
diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
index 8505a771de42..2ff5f32a4b99 100644
--- a/drivers/md/dm-linear.c
+++ b/drivers/md/dm-linear.c
@@ -66,6 +66,7 @@ int dm_linear_ctr(struct dm_target *ti, unsigned int argc, char **argv)
kfree(lc);
return ret;
}
+EXPORT_SYMBOL_GPL(dm_linear_ctr);
void dm_linear_dtr(struct dm_target *ti)
{
@@ -74,6 +75,7 @@ void dm_linear_dtr(struct dm_target *ti)
dm_put_device(ti, lc->dev);
kfree(lc);
}
+EXPORT_SYMBOL_GPL(dm_linear_dtr);
static sector_t linear_map_sector(struct dm_target *ti, sector_t bi_sector)
{
@@ -98,6 +100,7 @@ int dm_linear_map(struct dm_target *ti, struct bio *bio)
return DM_MAPIO_REMAPPED;
}
+EXPORT_SYMBOL_GPL(dm_linear_map);
void dm_linear_status(struct dm_target *ti, status_type_t type,
unsigned status_flags, char *result, unsigned maxlen)
@@ -115,6 +118,7 @@ void dm_linear_status(struct dm_target *ti, status_type_t type,
break;
}
}
+EXPORT_SYMBOL_GPL(dm_linear_status);
int dm_linear_prepare_ioctl(struct dm_target *ti,
struct block_device **bdev, fmode_t *mode)
@@ -132,6 +136,7 @@ int dm_linear_prepare_ioctl(struct dm_target *ti,
return 1;
return 0;
}
+EXPORT_SYMBOL_GPL(dm_linear_prepare_ioctl);
int dm_linear_iterate_devices(struct dm_target *ti,
iterate_devices_callout_fn fn, void *data)
@@ -140,6 +145,7 @@ int dm_linear_iterate_devices(struct dm_target *ti,
return fn(ti, lc->dev, lc->start, ti->len, data);
}
+EXPORT_SYMBOL_GPL(dm_linear_iterate_devices);
static struct target_type linear_target = {
.name = "linear",
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
index 5214ed2c7507..9d3d4b297201 100644
--- a/drivers/md/dm-verity-target.c
+++ b/drivers/md/dm-verity-target.c
@@ -592,6 +592,7 @@ int verity_map(struct dm_target *ti, struct bio *bio)
return DM_MAPIO_SUBMITTED;
}
+EXPORT_SYMBOL_GPL(verity_map);
/*
* Status: V (valid) or C (corruption found)
@@ -655,6 +656,7 @@ void verity_status(struct dm_target *ti, status_type_t type,
break;
}
}
+EXPORT_SYMBOL_GPL(verity_status);
int verity_prepare_ioctl(struct dm_target *ti,
struct block_device **bdev, fmode_t *mode)
@@ -668,6 +670,7 @@ int verity_prepare_ioctl(struct dm_target *ti,
return 1;
return 0;
}
+EXPORT_SYMBOL_GPL(verity_prepare_ioctl);
int verity_iterate_devices(struct dm_target *ti,
iterate_devices_callout_fn fn, void *data)
@@ -676,6 +679,7 @@ int verity_iterate_devices(struct dm_target *ti,
return fn(ti, v->data_dev, v->data_start, ti->len, data);
}
+EXPORT_SYMBOL_GPL(verity_iterate_devices);
void verity_io_hints(struct dm_target *ti, struct queue_limits *limits)
{
@@ -689,6 +693,7 @@ void verity_io_hints(struct dm_target *ti, struct queue_limits *limits)
blk_limits_io_min(limits, limits->logical_block_size);
}
+EXPORT_SYMBOL_GPL(verity_io_hints);
void verity_dtr(struct dm_target *ti)
{
@@ -719,6 +724,7 @@ void verity_dtr(struct dm_target *ti)
kfree(v);
}
+EXPORT_SYMBOL_GPL(verity_dtr);
static int verity_alloc_zero_digest(struct dm_verity *v)
{
@@ -1053,6 +1059,7 @@ bad:
return r;
}
+EXPORT_SYMBOL_GPL(verity_ctr);
static struct target_type verity_target = {
.name = "verity",
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 479fdbb3dcb2..d8615788b17d 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -3085,7 +3085,8 @@ static void unlock_fs(struct mapped_device *md)
* Caller must hold md->suspend_lock
*/
static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
- unsigned suspend_flags, int interruptible)
+ unsigned suspend_flags, int interruptible,
+ int dmf_suspended_flag)
{
bool do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG;
bool noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG;
@@ -3152,6 +3153,8 @@ static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
* to finish.
*/
r = dm_wait_for_completion(md, interruptible);
+ if (!r)
+ set_bit(dmf_suspended_flag, &md->flags);
if (noflush)
clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
@@ -3213,12 +3216,10 @@ retry:
map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
- r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE);
+ r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE, DMF_SUSPENDED);
if (r)
goto out_unlock;
- set_bit(DMF_SUSPENDED, &md->flags);
-
dm_table_postsuspend_targets(map);
out_unlock:
@@ -3312,9 +3313,8 @@ static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_fla
* would require changing .presuspend to return an error -- avoid this
* until there is a need for more elaborate variants of internal suspend.
*/
- (void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE);
-
- set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
+ (void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE,
+ DMF_SUSPENDED_INTERNALLY);
dm_table_postsuspend_targets(map);
}
diff --git a/drivers/media/dvb-core/dvb_ringbuffer.c b/drivers/media/dvb-core/dvb_ringbuffer.c
index d61be58e22f0..0d44deed65bd 100644
--- a/drivers/media/dvb-core/dvb_ringbuffer.c
+++ b/drivers/media/dvb-core/dvb_ringbuffer.c
@@ -57,7 +57,13 @@ void dvb_ringbuffer_init(struct dvb_ringbuffer *rbuf, void *data, size_t len)
int dvb_ringbuffer_empty(struct dvb_ringbuffer *rbuf)
{
- return (rbuf->pread == rbuf->pwrite);
+ /* smp_load_acquire() to load write pointer on reader side
+ * this pairs with smp_store_release() in dvb_ringbuffer_write(),
+ * dvb_ringbuffer_write_user(), or dvb_ringbuffer_reset()
+ *
+ * for memory barriers also see Documentation/circular-buffers.txt
+ */
+ return (rbuf->pread == smp_load_acquire(&rbuf->pwrite));
}
@@ -66,7 +72,12 @@ ssize_t dvb_ringbuffer_free(struct dvb_ringbuffer *rbuf)
{
ssize_t free;
- free = rbuf->pread - rbuf->pwrite;
+ /* ACCESS_ONCE() to load read pointer on writer side
+ * this pairs with smp_store_release() in dvb_ringbuffer_read(),
+ * dvb_ringbuffer_read_user(), dvb_ringbuffer_flush(),
+ * or dvb_ringbuffer_reset()
+ */
+ free = ACCESS_ONCE(rbuf->pread) - rbuf->pwrite;
if (free <= 0)
free += rbuf->size;
return free-1;
@@ -78,7 +89,11 @@ ssize_t dvb_ringbuffer_avail(struct dvb_ringbuffer *rbuf)
{
ssize_t avail;
- avail = rbuf->pwrite - rbuf->pread;
+ /* smp_load_acquire() to load write pointer on reader side
+ * this pairs with smp_store_release() in dvb_ringbuffer_write(),
+ * dvb_ringbuffer_write_user(), or dvb_ringbuffer_reset()
+ */
+ avail = smp_load_acquire(&rbuf->pwrite) - rbuf->pread;
if (avail < 0)
avail += rbuf->size;
return avail;
@@ -88,14 +103,25 @@ ssize_t dvb_ringbuffer_avail(struct dvb_ringbuffer *rbuf)
void dvb_ringbuffer_flush(struct dvb_ringbuffer *rbuf)
{
- rbuf->pread = rbuf->pwrite;
+ /* dvb_ringbuffer_flush() counts as read operation
+ * smp_load_acquire() to load write pointer
+ * smp_store_release() to update read pointer, this ensures that the
+ * correct pointer is visible for subsequent dvb_ringbuffer_free()
+ * calls on other cpu cores
+ */
+ smp_store_release(&rbuf->pread, smp_load_acquire(&rbuf->pwrite));
rbuf->error = 0;
}
EXPORT_SYMBOL(dvb_ringbuffer_flush);
void dvb_ringbuffer_reset(struct dvb_ringbuffer *rbuf)
{
- rbuf->pread = rbuf->pwrite = 0;
+ /* dvb_ringbuffer_reset() counts as read and write operation
+ * smp_store_release() to update read pointer
+ */
+ smp_store_release(&rbuf->pread, 0);
+ /* smp_store_release() to update write pointer */
+ smp_store_release(&rbuf->pwrite, 0);
rbuf->error = 0;
}
@@ -121,12 +147,17 @@ ssize_t dvb_ringbuffer_read_user(struct dvb_ringbuffer *rbuf, u8 __user *buf, si
return -EFAULT;
buf += split;
todo -= split;
- rbuf->pread = 0;
+ /* smp_store_release() for read pointer update to ensure
+ * that buf is not overwritten until read is complete,
+ * this pairs with ACCESS_ONCE() in dvb_ringbuffer_free()
+ */
+ smp_store_release(&rbuf->pread, 0);
}
if (copy_to_user(buf, rbuf->data+rbuf->pread, todo))
return -EFAULT;
- rbuf->pread = (rbuf->pread + todo) % rbuf->size;
+ /* smp_store_release() to update read pointer, see above */
+ smp_store_release(&rbuf->pread, (rbuf->pread + todo) % rbuf->size);
return len;
}
@@ -141,11 +172,16 @@ void dvb_ringbuffer_read(struct dvb_ringbuffer *rbuf, u8 *buf, size_t len)
memcpy(buf, rbuf->data+rbuf->pread, split);
buf += split;
todo -= split;
- rbuf->pread = 0;
+ /* smp_store_release() for read pointer update to ensure
+ * that buf is not overwritten until read is complete,
+ * this pairs with ACCESS_ONCE() in dvb_ringbuffer_free()
+ */
+ smp_store_release(&rbuf->pread, 0);
}
memcpy(buf, rbuf->data+rbuf->pread, todo);
- rbuf->pread = (rbuf->pread + todo) % rbuf->size;
+ /* smp_store_release() to update read pointer, see above */
+ smp_store_release(&rbuf->pread, (rbuf->pread + todo) % rbuf->size);
}
@@ -160,10 +196,16 @@ ssize_t dvb_ringbuffer_write(struct dvb_ringbuffer *rbuf, const u8 *buf, size_t
memcpy(rbuf->data+rbuf->pwrite, buf, split);
buf += split;
todo -= split;
- rbuf->pwrite = 0;
+ /* smp_store_release() for write pointer update to ensure that
+ * written data is visible on other cpu cores before the pointer
+ * update, this pairs with smp_load_acquire() in
+ * dvb_ringbuffer_empty() or dvb_ringbuffer_avail()
+ */
+ smp_store_release(&rbuf->pwrite, 0);
}
memcpy(rbuf->data+rbuf->pwrite, buf, todo);
- rbuf->pwrite = (rbuf->pwrite + todo) % rbuf->size;
+ /* smp_store_release() for write pointer update, see above */
+ smp_store_release(&rbuf->pwrite, (rbuf->pwrite + todo) % rbuf->size);
return len;
}
@@ -184,15 +226,18 @@ ssize_t dvb_ringbuffer_write_user(struct dvb_ringbuffer *rbuf,
return -EFAULT;
buf += split;
todo -= split;
- rbuf->pwrite = 0;
+ /* smp_store_release() for write pointer update to ensure that
+ * written data is visible on other cpu cores before the pointer
+ * update, this pairs with smp_load_acquire() in
+ * dvb_ringbuffer_empty() or dvb_ringbuffer_avail()
+ */
+ smp_store_release(&rbuf->pwrite, 0);
}
-
- if (copy_from_user(rbuf->data + rbuf->pwrite, buf, todo)) {
- rbuf->pwrite = oldpwrite;
- return -EFAULT;
- }
-
- rbuf->pwrite = (rbuf->pwrite + todo) % rbuf->size;
+ status = copy_from_user(rbuf->data+rbuf->pwrite, buf, todo);
+ if (status)
+ return len - todo;
+ /* smp_store_release() for write pointer update, see above */
+ smp_store_release(&rbuf->pwrite, (rbuf->pwrite + todo) % rbuf->size);
return len;
}
diff --git a/drivers/media/dvb-frontends/Kconfig b/drivers/media/dvb-frontends/Kconfig
index 292c9479bb75..310e4b8beae8 100644
--- a/drivers/media/dvb-frontends/Kconfig
+++ b/drivers/media/dvb-frontends/Kconfig
@@ -264,7 +264,7 @@ config DVB_MB86A16
config DVB_TDA10071
tristate "NXP TDA10071"
depends on DVB_CORE && I2C
- select REGMAP
+ select REGMAP_I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
help
Say Y when you want to support this frontend.
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.c b/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.c
index 4200215705d0..de29692414d2 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.c
@@ -188,11 +188,27 @@ static int msm_isp_prepare_v4l2_buf(struct msm_isp_buf_mgr *buf_mgr,
int ret;
struct msm_isp_buffer_mapped_info *mapped_info;
uint32_t accu_length = 0;
+ struct msm_isp_bufq *bufq = NULL;
+ bufq = msm_isp_get_bufq(buf_mgr, buf_info->bufq_handle);
+ if (!bufq) {
+ pr_err("%s: Invalid bufq, stream id %x\n",
+ __func__, stream_id);
+ return -EINVAL;
+ }
for (i = 0; i < qbuf_buf->num_planes; i++) {
mapped_info = &buf_info->mapped_info[i];
mapped_info->buf_fd = qbuf_buf->planes[i].addr;
- ret = cam_smmu_get_phy_addr(buf_mgr->iommu_hdl,
+
+ if (bufq->security_mode == SECURE_MODE)
+ ret = cam_smmu_get_stage2_phy_addr(buf_mgr->iommu_hdl,
+ mapped_info->buf_fd,
+ CAM_SMMU_MAP_RW,
+ buf_mgr->client,
+ &(mapped_info->paddr),
+ &(mapped_info->len));
+ else
+ ret = cam_smmu_get_phy_addr(buf_mgr->iommu_hdl,
mapped_info->buf_fd,
CAM_SMMU_MAP_RW,
&(mapped_info->paddr),
@@ -242,8 +258,13 @@ static void msm_isp_unprepare_v4l2_buf(
for (i = 0; i < buf_info->num_planes; i++) {
mapped_info = &buf_info->mapped_info[i];
-
- cam_smmu_put_phy_addr(buf_mgr->iommu_hdl, mapped_info->buf_fd);
+ /* SEC_CAM: check any change is needed for secure_mode */
+ if (bufq->security_mode == SECURE_MODE)
+ cam_smmu_put_stage2_phy_addr(buf_mgr->iommu_hdl,
+ mapped_info->buf_fd);
+ else
+ cam_smmu_put_phy_addr(buf_mgr->iommu_hdl,
+ mapped_info->buf_fd);
}
return;
}
@@ -259,7 +280,15 @@ static int msm_isp_map_buf(struct msm_isp_buf_mgr *buf_mgr,
__func__, __LINE__, buf_mgr, mapped_info);
return -EINVAL;
}
- ret = cam_smmu_get_phy_addr(buf_mgr->iommu_hdl,
+ if (buf_mgr->secure_enable == SECURE_MODE)
+ ret = cam_smmu_get_stage2_phy_addr(buf_mgr->iommu_hdl,
+ fd,
+ CAM_SMMU_MAP_RW,
+ buf_mgr->client,
+ &(mapped_info->paddr),
+ &(mapped_info->len));
+ else
+ ret = cam_smmu_get_phy_addr(buf_mgr->iommu_hdl,
fd,
CAM_SMMU_MAP_RW,
&(mapped_info->paddr),
@@ -275,7 +304,11 @@ static int msm_isp_map_buf(struct msm_isp_buf_mgr *buf_mgr,
return rc;
smmu_map_error:
- cam_smmu_put_phy_addr(buf_mgr->iommu_hdl,
+ if (buf_mgr->secure_enable == SECURE_MODE)
+ cam_smmu_put_stage2_phy_addr(buf_mgr->iommu_hdl,
+ fd);
+ else
+ cam_smmu_put_phy_addr(buf_mgr->iommu_hdl,
fd);
return rc;
}
@@ -289,7 +322,12 @@ static int msm_isp_unmap_buf(struct msm_isp_buf_mgr *buf_mgr,
return -EINVAL;
}
- cam_smmu_put_phy_addr(buf_mgr->iommu_hdl,
+ /* SEC_CAMERA: recheck Put part for stats */
+ if (buf_mgr->secure_enable == SECURE_MODE)
+ cam_smmu_put_stage2_phy_addr(buf_mgr->iommu_hdl,
+ fd);
+ else
+ cam_smmu_put_phy_addr(buf_mgr->iommu_hdl,
fd);
return 0;
@@ -921,7 +959,7 @@ static int msm_isp_get_buf_src(struct msm_isp_buf_mgr *buf_mgr,
}
static int msm_isp_request_bufq(struct msm_isp_buf_mgr *buf_mgr,
- struct msm_isp_buf_request *buf_request)
+ struct msm_isp_buf_request_ver2 *buf_request)
{
int i;
struct msm_isp_bufq *bufq = NULL;
@@ -961,6 +999,7 @@ static int msm_isp_request_bufq(struct msm_isp_buf_mgr *buf_mgr,
bufq->num_bufs = buf_request->num_buf;
bufq->buf_type = buf_request->buf_type;
INIT_LIST_HEAD(&bufq->head);
+ bufq->security_mode = buf_request->security_mode;
for (i = 0; i < buf_request->num_buf; i++) {
bufq->bufs[i].state = MSM_ISP_BUFFER_STATE_INITIALIZED;
@@ -1032,15 +1071,25 @@ static int msm_isp_buf_put_scratch(struct msm_isp_buf_mgr *buf_mgr)
if (!buf_mgr->scratch_buf_addr)
return 0;
- rc = cam_smmu_put_phy_addr_scratch(buf_mgr->iommu_hdl,
+ if (buf_mgr->secure_enable == SECURE_MODE) {
+ rc = cam_smmu_free_stage2_scratch_mem(buf_mgr->iommu_hdl,
+ buf_mgr->client, buf_mgr->sc_handle);
+ if (buf_mgr->scratch_buf_stats_addr)
+ rc = cam_smmu_put_phy_addr_scratch(buf_mgr->iommu_hdl,
+ buf_mgr->scratch_buf_stats_addr);
+ } else {
+ rc = cam_smmu_put_phy_addr_scratch(buf_mgr->iommu_hdl,
buf_mgr->scratch_buf_addr);
+ }
if (rc)
pr_err("%s: failed to put scratch buffer to img iommu: %d\n",
__func__, rc);
- if (!rc)
+ if (!rc) {
buf_mgr->scratch_buf_addr = 0;
+ buf_mgr->scratch_buf_stats_addr = 0;
+ }
return rc;
}
@@ -1057,17 +1106,40 @@ static int msm_isp_buf_put_scratch(struct msm_isp_buf_mgr *buf_mgr)
static int msm_isp_buf_get_scratch(struct msm_isp_buf_mgr *buf_mgr)
{
int rc;
+ size_t range = buf_mgr->scratch_buf_range;
if (buf_mgr->scratch_buf_addr || !buf_mgr->scratch_buf_range)
/* already mapped or not supported */
return 0;
- rc = cam_smmu_get_phy_addr_scratch(
+ if (buf_mgr->secure_enable == SECURE_MODE) {
+ rc = cam_smmu_alloc_get_stage2_scratch_mem(buf_mgr->iommu_hdl,
+ CAM_SMMU_MAP_RW,
+ buf_mgr->client,
+ &buf_mgr->sc_handle,
+ &buf_mgr->scratch_buf_addr,
+ &range);
+ if (rc)
+ goto done;
+
+ rc = cam_smmu_get_phy_addr_scratch(
+ buf_mgr->iommu_hdl,
+ CAM_SMMU_MAP_RW,
+ &buf_mgr->scratch_buf_stats_addr,
+ buf_mgr->scratch_buf_range,
+ SZ_4K);
+ if (rc)
+ msm_isp_buf_put_scratch(buf_mgr);
+ } else {
+ rc = cam_smmu_get_phy_addr_scratch(
buf_mgr->iommu_hdl,
CAM_SMMU_MAP_RW,
&buf_mgr->scratch_buf_addr,
buf_mgr->scratch_buf_range,
SZ_4K);
+ buf_mgr->scratch_buf_stats_addr = buf_mgr->scratch_buf_addr;
+ }
+done:
if (rc) {
pr_err("%s: failed to map scratch buffer to img iommu: %d\n",
__func__, rc);
@@ -1085,20 +1157,23 @@ int msm_isp_smmu_attach(struct msm_isp_buf_mgr *buf_mgr,
pr_debug("%s: cmd->security_mode : %d\n", __func__, cmd->security_mode);
mutex_lock(&buf_mgr->lock);
if (cmd->iommu_attach_mode == IOMMU_ATTACH) {
- buf_mgr->secure_enable = cmd->security_mode;
-
/*
* Call hypervisor thru scm call to notify secure or
* non-secure mode
*/
if (buf_mgr->attach_ref_cnt == 0) {
- rc = cam_smmu_ops(buf_mgr->iommu_hdl,
- CAM_SMMU_ATTACH);
+ if (cmd->security_mode == SECURE_MODE)
+ rc = cam_smmu_ops(buf_mgr->iommu_hdl,
+ CAM_SMMU_ATTACH_SEC_VFE_NS_STATS);
+ else
+ rc = cam_smmu_ops(buf_mgr->iommu_hdl,
+ CAM_SMMU_ATTACH);
if (rc < 0) {
pr_err("%s: img smmu attach error, rc :%d\n",
__func__, rc);
- goto err1;
+ goto err1;
}
+ buf_mgr->secure_enable = cmd->security_mode;
}
buf_mgr->attach_ref_cnt++;
rc = msm_isp_buf_get_scratch(buf_mgr);
@@ -1113,8 +1188,12 @@ int msm_isp_smmu_attach(struct msm_isp_buf_mgr *buf_mgr,
if (buf_mgr->attach_ref_cnt == 0) {
rc = msm_isp_buf_put_scratch(buf_mgr);
- rc |= cam_smmu_ops(buf_mgr->iommu_hdl,
- CAM_SMMU_DETACH);
+ if (buf_mgr->secure_enable == SECURE_MODE)
+ rc |= cam_smmu_ops(buf_mgr->iommu_hdl,
+ CAM_SMMU_DETACH_SEC_VFE_NS_STATS);
+ else
+ rc |= cam_smmu_ops(buf_mgr->iommu_hdl,
+ CAM_SMMU_DETACH);
if (rc < 0) {
pr_err("%s: img/stats smmu detach error, rc :%d\n",
__func__, rc);
@@ -1126,8 +1205,11 @@ int msm_isp_smmu_attach(struct msm_isp_buf_mgr *buf_mgr,
return rc;
err2:
- if (cam_smmu_ops(buf_mgr->iommu_hdl, CAM_SMMU_DETACH))
- pr_err("%s: img smmu detach error\n", __func__);
+ if (buf_mgr->secure_enable == SECURE_MODE)
+ cam_smmu_ops(buf_mgr->iommu_hdl,
+ CAM_SMMU_DETACH_SEC_VFE_NS_STATS);
+ else
+ cam_smmu_ops(buf_mgr->iommu_hdl, CAM_SMMU_DETACH);
err1:
mutex_unlock(&buf_mgr->lock);
return rc;
@@ -1162,12 +1244,11 @@ static int msm_isp_init_isp_buf_mgr(struct msm_isp_buf_mgr *buf_mgr,
buf_mgr->pagefault_debug_disable = 0;
buf_mgr->frameId_mismatch_recovery = 0;
- mutex_unlock(&buf_mgr->lock);
- return 0;
-
+ /* create ION client */
+ buf_mgr->client = msm_ion_client_create("vfe");
get_handle_error:
mutex_unlock(&buf_mgr->lock);
- return rc;
+ return 0;
}
static int msm_isp_deinit_isp_buf_mgr(
@@ -1186,10 +1267,21 @@ static int msm_isp_deinit_isp_buf_mgr(
buf_mgr->pagefault_debug_disable = 0;
msm_isp_buf_put_scratch(buf_mgr);
- cam_smmu_ops(buf_mgr->iommu_hdl, CAM_SMMU_DETACH);
+ if (buf_mgr->attach_ref_cnt != 0) {
+ if (buf_mgr->secure_enable == SECURE_MODE)
+ cam_smmu_ops(buf_mgr->iommu_hdl,
+ CAM_SMMU_DETACH_SEC_VFE_NS_STATS);
+ else
+ cam_smmu_ops(buf_mgr->iommu_hdl, CAM_SMMU_DETACH);
+ }
cam_smmu_destroy_handle(buf_mgr->iommu_hdl);
-
buf_mgr->attach_ref_cnt = 0;
+ buf_mgr->secure_enable = 0;
+ buf_mgr->attach_ref_cnt = 0;
+ if (buf_mgr->client) {
+ ion_client_destroy(buf_mgr->client);
+ buf_mgr->client = NULL;
+ }
mutex_unlock(&buf_mgr->lock);
return 0;
}
@@ -1200,8 +1292,20 @@ int msm_isp_proc_buf_cmd(struct msm_isp_buf_mgr *buf_mgr,
switch (cmd) {
case VIDIOC_MSM_ISP_REQUEST_BUF: {
struct msm_isp_buf_request *buf_req = arg;
+ struct msm_isp_buf_request_ver2 buf_req_ver2;
+
+ memcpy(&buf_req_ver2, buf_req,
+ sizeof(struct msm_isp_buf_request));
+ buf_req_ver2.security_mode = NON_SECURE_MODE;
+ buf_mgr->ops->request_buf(buf_mgr, &buf_req_ver2);
+ memcpy(buf_req, &buf_req_ver2,
+ sizeof(struct msm_isp_buf_request));
+ break;
+ }
+ case VIDIOC_MSM_ISP_REQUEST_BUF_VER2: {
+ struct msm_isp_buf_request_ver2 *buf_req_ver2 = arg;
- buf_mgr->ops->request_buf(buf_mgr, buf_req);
+ buf_mgr->ops->request_buf(buf_mgr, buf_req_ver2);
break;
}
case VIDIOC_MSM_ISP_ENQUEUE_BUF: {
@@ -1393,7 +1497,6 @@ int msm_isp_create_isp_buf_mgr(
buf_mgr->open_count = 0;
buf_mgr->pagefault_debug_disable = 0;
buf_mgr->secure_enable = NON_SECURE_MODE;
- buf_mgr->attach_state = MSM_ISP_BUF_MGR_DETACH;
buf_mgr->scratch_buf_range = scratch_buf_range;
mutex_init(&buf_mgr->lock);
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.h b/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.h
index 43519ee74062..21fab0590b55 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.h
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.h
@@ -119,11 +119,12 @@ struct msm_isp_bufq {
spinlock_t bufq_lock;
/*Native buffer queue*/
struct list_head head;
+ enum smmu_attach_mode security_mode;
};
struct msm_isp_buf_ops {
int (*request_buf)(struct msm_isp_buf_mgr *buf_mgr,
- struct msm_isp_buf_request *buf_request);
+ struct msm_isp_buf_request_ver2 *buf_request);
int (*enqueue_buf)(struct msm_isp_buf_mgr *buf_mgr,
struct msm_isp_qbuf_info *info);
@@ -191,21 +192,20 @@ struct msm_isp_buf_mgr {
struct msm_sd_req_vb2_q *vb2_ops;
- /*IOMMU driver*/
- int iommu_hdl;
/*Add secure mode*/
int secure_enable;
- int num_iommu_ctx;
- int num_iommu_secure_ctx;
int attach_ref_cnt;
enum msm_isp_buf_mgr_state attach_state;
struct device *isp_dev;
struct mutex lock;
/* Scratch buffer */
dma_addr_t scratch_buf_addr;
+ dma_addr_t scratch_buf_stats_addr;
uint32_t scratch_buf_range;
+ int iommu_hdl;
+ struct ion_handle *sc_handle;
};
int msm_isp_create_isp_buf_mgr(struct msm_isp_buf_mgr *buf_mgr,
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp.c
index 094996b2d60b..35daf30bac63 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp.c
@@ -655,8 +655,6 @@ int vfe_hw_probe(struct platform_device *pdev)
goto probe_fail3;
}
msm_isp_enable_debugfs(vfe_dev, msm_isp_bw_request_history);
- vfe_dev->buf_mgr->num_iommu_secure_ctx =
- vfe_dev->hw_info->num_iommu_secure_ctx;
vfe_dev->buf_mgr->init_done = 1;
vfe_dev->vfe_open_cnt = 0;
return rc;
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp.h b/drivers/media/platform/msm/camera_v2/isp/msm_isp.h
index 1f860f2c5b12..9c7eba21fde1 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp.h
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp.h
@@ -243,6 +243,10 @@ struct msm_vfe_core_ops {
int (*start_fetch_eng_multi_pass)(struct vfe_device *vfe_dev,
void *arg);
void (*set_halt_restart_mask)(struct vfe_device *vfe_dev);
+ void (*set_bus_err_ign_mask)(struct vfe_device *vfe_dev,
+ int wm, int enable);
+ void (*get_bus_err_mask)(struct vfe_device *vfe_dev,
+ uint32_t *bus_err, uint32_t *irq_status1);
};
struct msm_vfe_stats_ops {
@@ -786,6 +790,8 @@ struct vfe_device {
/* irq info */
uint32_t irq0_mask;
uint32_t irq1_mask;
+
+ uint32_t bus_err_ign_mask;
};
struct vfe_parent_device {
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp32.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp32.c
index 8275f8cedf2e..43f562b18209 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp32.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp32.c
@@ -1474,6 +1474,8 @@ struct msm_vfe_hardware_info vfe32_hw_info = {
.is_module_cfg_lock_needed =
msm_vfe32_is_module_cfg_lock_needed,
.ahb_clk_cfg = NULL,
+ .set_bus_err_ign_mask = NULL,
+ .get_bus_err_mask = NULL,
},
.stats_ops = {
.get_stats_idx = msm_vfe32_get_stats_idx,
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c
index 2d937fc3ed05..a1fb307b09c1 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c
@@ -2263,6 +2263,8 @@ struct msm_vfe_hardware_info vfe40_hw_info = {
msm_vfe40_start_fetch_engine_multi_pass,
.set_halt_restart_mask =
msm_vfe40_set_halt_restart_mask,
+ .set_bus_err_ign_mask = NULL,
+ .get_bus_err_mask = NULL,
},
.stats_ops = {
.get_stats_idx = msm_vfe40_get_stats_idx,
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp44.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp44.c
index 15820b5f398b..0a72a041de28 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp44.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp44.c
@@ -1869,6 +1869,8 @@ struct msm_vfe_hardware_info vfe44_hw_info = {
.ahb_clk_cfg = NULL,
.set_halt_restart_mask =
msm_vfe44_set_halt_restart_mask,
+ .set_bus_err_ign_mask = NULL,
+ .get_bus_err_mask = NULL,
},
.stats_ops = {
.get_stats_idx = msm_vfe44_get_stats_idx,
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp46.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp46.c
index 23fbc4f5e33a..f2d53c956fdc 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp46.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp46.c
@@ -1945,6 +1945,8 @@ struct msm_vfe_hardware_info vfe46_hw_info = {
.ahb_clk_cfg = NULL,
.set_halt_restart_mask =
msm_vfe46_set_halt_restart_mask,
+ .set_bus_err_ign_mask = NULL,
+ .get_bus_err_mask = NULL,
},
.stats_ops = {
.get_stats_idx = msm_vfe46_get_stats_idx,
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c
index 8793745aac71..13c6e000fefc 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c
@@ -2860,6 +2860,8 @@ struct msm_vfe_hardware_info vfe47_hw_info = {
msm_vfe47_start_fetch_engine_multi_pass,
.set_halt_restart_mask =
msm_vfe47_set_halt_restart_mask,
+ .set_bus_err_ign_mask = NULL,
+ .get_bus_err_mask = NULL,
},
.stats_ops = {
.get_stats_idx = msm_vfe47_get_stats_idx,
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp48.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp48.c
index c533f23c1163..f346ceb6c9e5 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp48.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp48.c
@@ -241,6 +241,25 @@ static void msm_vfe48_put_regulators(struct vfe_device *vfe_dev)
vfe_dev->vfe_num_regulators = 0;
}
+static void msm_vfe48_get_bus_err_mask(struct vfe_device *vfe_dev,
+ uint32_t *bus_err, uint32_t *irq_status1)
+{
+ *bus_err = msm_camera_io_r(vfe_dev->vfe_base + 0xC94);
+
+ *bus_err &= ~vfe_dev->bus_err_ign_mask;
+ if (*bus_err == 0)
+ *irq_status1 &= ~(1 << 4);
+}
+
+static void msm_vfe48_set_bus_err_ign_mask(struct vfe_device *vfe_dev,
+ int wm, int enable)
+{
+ if (enable)
+ vfe_dev->bus_err_ign_mask |= (1 << wm);
+ else
+ vfe_dev->bus_err_ign_mask &= ~(1 << wm);
+}
+
struct msm_vfe_hardware_info vfe48_hw_info = {
.num_iommu_ctx = 1,
.num_iommu_secure_ctx = 0,
@@ -315,6 +334,8 @@ struct msm_vfe_hardware_info vfe48_hw_info = {
msm_vfe47_start_fetch_engine_multi_pass,
.set_halt_restart_mask =
msm_vfe47_set_halt_restart_mask,
+ .set_bus_err_ign_mask = msm_vfe48_set_bus_err_ign_mask,
+ .get_bus_err_mask = msm_vfe48_get_bus_err_mask,
},
.stats_ops = {
.get_stats_idx = msm_vfe47_get_stats_idx,
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
index 1bf628de4df0..f6e0d9083b22 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
@@ -13,6 +13,7 @@
#include <media/v4l2-subdev.h>
#include <asm/div64.h>
#include "msm_isp_util.h"
+#include "msm_isp_stats_util.h"
#include "msm_isp_axi_util.h"
#include "msm_isp48.h"
@@ -429,6 +430,13 @@ static void msm_isp_axi_reserve_wm(struct vfe_device *vfe_dev,
vfe_dev->pdev->id,
stream_info->stream_handle[vfe_idx], j);
stream_info->wm[vfe_idx][i] = j;
+ /* setup var to ignore bus error from RDI wm */
+ if (stream_info->stream_src >= RDI_INTF_0) {
+ if (vfe_dev->hw_info->vfe_ops.core_ops.
+ set_bus_err_ign_mask)
+ vfe_dev->hw_info->vfe_ops.core_ops.
+ set_bus_err_ign_mask(vfe_dev, j, 1);
+ }
}
}
@@ -442,6 +450,13 @@ void msm_isp_axi_free_wm(struct vfe_device *vfe_dev,
for (i = 0; i < stream_info->num_planes; i++) {
axi_data->free_wm[stream_info->wm[vfe_idx][i]] = 0;
axi_data->num_used_wm--;
+ if (stream_info->stream_src >= RDI_INTF_0) {
+ if (vfe_dev->hw_info->vfe_ops.core_ops.
+ set_bus_err_ign_mask)
+ vfe_dev->hw_info->vfe_ops.core_ops.
+ set_bus_err_ign_mask(vfe_dev,
+ stream_info->wm[vfe_idx][i], 0);
+ }
}
if (stream_info->stream_src <= IDEAL_RAW)
axi_data->num_pix_stream++;
@@ -2186,6 +2201,7 @@ static void msm_isp_input_disable(struct vfe_device *vfe_dev, int cmd_type)
if (msm_vfe_is_vfe48(vfe_dev))
vfe_dev->hw_info->vfe_ops.core_ops.reset_hw(vfe_dev,
0, 1);
+ vfe_dev->hw_info->vfe_ops.core_ops.init_hw_reg(vfe_dev);
}
}
@@ -2758,12 +2774,11 @@ static void __msm_isp_stop_axi_streams(struct vfe_device *vfe_dev,
if (!update_vfes[k])
continue;
vfe_dev = update_vfes[k];
- axi_data = &vfe_dev->axi_data;
- if (axi_data->src_info[VFE_PIX_0].active == 0) {
- vfe_dev->hw_info->vfe_ops.stats_ops.enable_module(
- vfe_dev, 0xFF, 0);
- }
+ /* make sure all stats are stopped if camif is stopped */
+ if (vfe_dev->axi_data.src_info[VFE_PIX_0].active == 0)
+ msm_isp_stop_all_stats_stream(vfe_dev);
}
+
for (i = 0; i < num_streams; i++) {
stream_info = streams[i];
spin_lock_irqsave(&stream_info->lock, flags);
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c
index 22a7f6886964..38ce78d941c9 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c
@@ -23,7 +23,7 @@ static inline void msm_isp_stats_cfg_wm_scratch(struct vfe_device *vfe_dev,
{
vfe_dev->hw_info->vfe_ops.stats_ops.update_ping_pong_addr(
vfe_dev, stream_info,
- pingpong_status, vfe_dev->buf_mgr->scratch_buf_addr);
+ pingpong_status, vfe_dev->buf_mgr->scratch_buf_stats_addr);
}
static inline void msm_isp_stats_cfg_stream_scratch(
@@ -548,15 +548,12 @@ int msm_isp_release_stats_stream(struct vfe_device *vfe_dev, void *arg)
return 0;
}
-void msm_isp_release_all_stats_stream(struct vfe_device *vfe_dev)
+void msm_isp_stop_all_stats_stream(struct vfe_device *vfe_dev)
{
- struct msm_vfe_stats_stream_release_cmd
- stream_release_cmd[MSM_ISP_STATS_MAX];
struct msm_vfe_stats_stream_cfg_cmd stream_cfg_cmd;
struct msm_vfe_stats_stream *stream_info;
int i;
int vfe_idx;
- int num_stream = 0;
unsigned long flags;
stream_cfg_cmd.enable = 0;
@@ -565,7 +562,8 @@ void msm_isp_release_all_stats_stream(struct vfe_device *vfe_dev)
for (i = 0; i < MSM_ISP_STATS_MAX; i++) {
stream_info = msm_isp_get_stats_stream_common_data(vfe_dev, i);
spin_lock_irqsave(&stream_info->lock, flags);
- if (stream_info->state == STATS_AVAILABLE) {
+ if (stream_info->state == STATS_AVAILABLE ||
+ stream_info->state == STATS_INACTIVE) {
spin_unlock_irqrestore(&stream_info->lock, flags);
continue;
}
@@ -575,12 +573,6 @@ void msm_isp_release_all_stats_stream(struct vfe_device *vfe_dev)
spin_unlock_irqrestore(&stream_info->lock, flags);
continue;
}
- stream_release_cmd[num_stream++].stream_handle =
- stream_info->stream_handle[vfe_idx];
- if (stream_info->state == STATS_INACTIVE) {
- spin_unlock_irqrestore(&stream_info->lock, flags);
- continue;
- }
stream_cfg_cmd.stream_handle[
stream_cfg_cmd.num_streams] =
stream_info->stream_handle[vfe_idx];
@@ -589,6 +581,37 @@ void msm_isp_release_all_stats_stream(struct vfe_device *vfe_dev)
}
if (stream_cfg_cmd.num_streams)
msm_isp_cfg_stats_stream(vfe_dev, &stream_cfg_cmd);
+}
+
+void msm_isp_release_all_stats_stream(struct vfe_device *vfe_dev)
+{
+ struct msm_vfe_stats_stream_release_cmd
+ stream_release_cmd[MSM_ISP_STATS_MAX];
+ struct msm_vfe_stats_stream *stream_info;
+ int i;
+ int vfe_idx;
+ int num_stream = 0;
+ unsigned long flags;
+
+ msm_isp_stop_all_stats_stream(vfe_dev);
+
+ for (i = 0; i < MSM_ISP_STATS_MAX; i++) {
+ stream_info = msm_isp_get_stats_stream_common_data(vfe_dev, i);
+ spin_lock_irqsave(&stream_info->lock, flags);
+ if (stream_info->state == STATS_AVAILABLE) {
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ continue;
+ }
+ vfe_idx = msm_isp_get_vfe_idx_for_stats_stream_user(vfe_dev,
+ stream_info);
+ if (vfe_idx == -ENOTTY) {
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ continue;
+ }
+ stream_release_cmd[num_stream++].stream_handle =
+ stream_info->stream_handle[vfe_idx];
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ }
for (i = 0; i < num_stream; i++)
msm_isp_release_stats_stream(vfe_dev, &stream_release_cmd[i]);
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.h b/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.h
index e9728f33fae1..2e3a24dd1f0d 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.h
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.h
@@ -29,6 +29,7 @@ int msm_isp_stats_restart(struct vfe_device *vfe_dev);
void msm_isp_release_all_stats_stream(struct vfe_device *vfe_dev);
void msm_isp_process_stats_reg_upd_epoch_irq(struct vfe_device *vfe_dev,
enum msm_isp_comp_irq_types irq);
+void msm_isp_stop_all_stats_stream(struct vfe_device *vfe_dev);
static inline int msm_isp_get_vfe_idx_for_stats_stream_user(
struct vfe_device *vfe_dev,
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c
index 778df297f93c..4abb6d1d91a8 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c
@@ -819,6 +819,7 @@ static long msm_isp_ioctl_unlocked(struct v4l2_subdev *sd,
break;
}
case VIDIOC_MSM_ISP_REQUEST_BUF:
+ case VIDIOC_MSM_ISP_REQUEST_BUF_VER2:
/* fallthrough */
case VIDIOC_MSM_ISP_ENQUEUE_BUF:
/* fallthrough */
@@ -1788,12 +1789,17 @@ static int msm_isp_process_overflow_irq(
uint32_t *irq_status0, uint32_t *irq_status1)
{
uint32_t overflow_mask;
+ uint32_t bus_err = 0;
/* if there are no active streams - do not start recovery */
if (!vfe_dev->axi_data.num_active_stream)
return 0;
- /*Mask out all other irqs if recovery is started*/
+ if (vfe_dev->hw_info->vfe_ops.core_ops.
+ get_bus_err_mask)
+ vfe_dev->hw_info->vfe_ops.core_ops.get_bus_err_mask(
+ vfe_dev, &bus_err, irq_status1);
+ /* Mask out all other irqs if recovery is started */
if (atomic_read(&vfe_dev->error_info.overflow_state) != NO_OVERFLOW) {
uint32_t halt_restart_mask0, halt_restart_mask1;
vfe_dev->hw_info->vfe_ops.core_ops.
@@ -1805,14 +1811,13 @@ static int msm_isp_process_overflow_irq(
return 0;
}
- /*Check if any overflow bit is set*/
+ /* Check if any overflow bit is set */
vfe_dev->hw_info->vfe_ops.core_ops.
get_overflow_mask(&overflow_mask);
overflow_mask &= *irq_status1;
if (overflow_mask) {
struct msm_isp_event_data error_event;
- uint32_t val = 0;
int i;
struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
@@ -1827,10 +1832,8 @@ static int msm_isp_process_overflow_irq(
*irq_status1 &= ~overflow_mask;
return 0;
}
- if (msm_vfe_is_vfe48(vfe_dev))
- val = msm_camera_io_r(vfe_dev->vfe_base + 0xC94);
pr_err("%s: vfe %d overflow mask %x, bus_error %x\n",
- __func__, vfe_dev->pdev->id, overflow_mask, val);
+ __func__, vfe_dev->pdev->id, overflow_mask, bus_err);
for (i = 0; i < axi_data->hw_info->num_wm; i++) {
if (!axi_data->free_wm[i])
continue;
diff --git a/drivers/media/platform/msm/camera_v2/jpeg_dma/msm_jpeg_dma_dev.c b/drivers/media/platform/msm/camera_v2/jpeg_dma/msm_jpeg_dma_dev.c
index 3301fc446193..4b4846907d0f 100644
--- a/drivers/media/platform/msm/camera_v2/jpeg_dma/msm_jpeg_dma_dev.c
+++ b/drivers/media/platform/msm/camera_v2/jpeg_dma/msm_jpeg_dma_dev.c
@@ -537,6 +537,7 @@ static int msm_jpegdma_open(struct file *file)
if (!ctx)
return -ENOMEM;
+ mutex_init(&ctx->lock);
ctx->jdma_device = device;
dev_dbg(ctx->jdma_device->dev, "Jpeg v4l2 dma open\n");
/* Set ctx defaults */
@@ -835,12 +836,13 @@ static int msm_jpegdma_qbuf(struct file *file, void *fh,
int ret;
msm_jpegdma_cast_long_to_buff_ptr(buf->m.userptr, &up_buff);
-
+ mutex_lock(&ctx->lock);
if (!access_ok(VERIFY_READ, up_buff,
sizeof(struct msm_jpeg_dma_buff)) ||
get_user(kp_buff.fd, &up_buff->fd) ||
get_user(kp_buff.offset, &up_buff->offset)) {
dev_err(ctx->jdma_device->dev, "Error getting user data\n");
+ mutex_unlock(&ctx->lock);
return -EFAULT;
}
@@ -849,6 +851,7 @@ static int msm_jpegdma_qbuf(struct file *file, void *fh,
put_user(kp_buff.fd, &up_buff->fd) ||
put_user(kp_buff.offset, &up_buff->offset)) {
dev_err(ctx->jdma_device->dev, "Error putting user data\n");
+ mutex_unlock(&ctx->lock);
return -EFAULT;
}
@@ -871,7 +874,7 @@ static int msm_jpegdma_qbuf(struct file *file, void *fh,
ret = v4l2_m2m_qbuf(file, ctx->m2m_ctx, buf);
if (ret < 0)
dev_err(ctx->jdma_device->dev, "QBuf fail\n");
-
+ mutex_unlock(&ctx->lock);
return ret;
}
@@ -1032,10 +1035,11 @@ static int msm_jpegdma_s_crop(struct file *file, void *fh,
if (crop->c.height % formats[ctx->format_idx].v_align)
return -EINVAL;
+ mutex_lock(&ctx->lock);
ctx->crop = crop->c;
if (atomic_read(&ctx->active))
ret = msm_jpegdma_update_hw_config(ctx);
-
+ mutex_unlock(&ctx->lock);
return ret;
}
@@ -1240,12 +1244,14 @@ void msm_jpegdma_isr_processing_done(struct msm_jpegdma_device *dma)
ctx = v4l2_m2m_get_curr_priv(dma->m2m_dev);
if (ctx) {
+ mutex_lock(&ctx->lock);
ctx->plane_idx++;
if (ctx->plane_idx >= formats[ctx->format_idx].num_planes) {
src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
dst_buf = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
if (src_buf == NULL || dst_buf == NULL) {
dev_err(ctx->jdma_device->dev, "Error, buffer list empty\n");
+ mutex_unlock(&ctx->lock);
mutex_unlock(&dma->lock);
return;
}
@@ -1261,11 +1267,13 @@ void msm_jpegdma_isr_processing_done(struct msm_jpegdma_device *dma)
src_buf = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
if (src_buf == NULL || dst_buf == NULL) {
dev_err(ctx->jdma_device->dev, "Error, buffer list empty\n");
+ mutex_unlock(&ctx->lock);
mutex_unlock(&dma->lock);
return;
}
msm_jpegdma_process_buffers(ctx, src_buf, dst_buf);
}
+ mutex_unlock(&ctx->lock);
}
mutex_unlock(&dma->lock);
}
diff --git a/drivers/media/platform/msm/camera_v2/jpeg_dma/msm_jpeg_dma_dev.h b/drivers/media/platform/msm/camera_v2/jpeg_dma/msm_jpeg_dma_dev.h
index 6a1205daf1d2..4911ce3aa5bd 100644
--- a/drivers/media/platform/msm/camera_v2/jpeg_dma/msm_jpeg_dma_dev.h
+++ b/drivers/media/platform/msm/camera_v2/jpeg_dma/msm_jpeg_dma_dev.h
@@ -254,6 +254,7 @@ struct msm_jpegdma_buf_handle {
* @format_idx: Current format index.
*/
struct jpegdma_ctx {
+ struct mutex lock;
struct msm_jpegdma_device *jdma_device;
atomic_t active;
struct completion completion;
diff --git a/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp_soc.c b/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp_soc.c
index 8213f736205a..6911e8fc5f2f 100644
--- a/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp_soc.c
+++ b/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp_soc.c
@@ -53,6 +53,11 @@ void msm_cpp_fetch_dt_params(struct cpp_device *cpp_dev)
&cpp_dev->bus_master_flag);
if (rc)
cpp_dev->bus_master_flag = 0;
+
+ if (of_property_read_bool(of_node, "qcom,micro-reset"))
+ cpp_dev->micro_reset = 1;
+ else
+ cpp_dev->micro_reset = 0;
}
int msm_cpp_get_clock_index(struct cpp_device *cpp_dev, const char *clk_name)
diff --git a/drivers/media/platform/msm/camera_v2/sensor/msm_sensor_driver.c b/drivers/media/platform/msm/camera_v2/sensor/msm_sensor_driver.c
index c1c25b655d1f..1dd2b0d26007 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/msm_sensor_driver.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/msm_sensor_driver.c
@@ -677,6 +677,24 @@ static void msm_sensor_fill_sensor_info(struct msm_sensor_ctrl_t *s_ctrl,
}
/* static function definition */
+static int32_t msm_sensor_driver_is_special_support(
+ struct msm_sensor_ctrl_t *s_ctrl,
+ char *sensor_name)
+{
+ int32_t rc = 0, i = 0;
+ struct msm_camera_sensor_board_info *sensordata = s_ctrl->sensordata;
+
+ for (i = 0; i < sensordata->special_support_size; i++) {
+ if (!strcmp(sensordata->special_support_sensors[i],
+ sensor_name)) {
+ rc = TRUE;
+ break;
+ }
+ }
+ return rc;
+}
+
+/* static function definition */
int32_t msm_sensor_driver_probe(void *setting,
struct msm_sensor_info_t *probed_info, char *entity_name)
{
@@ -802,6 +820,16 @@ int32_t msm_sensor_driver_probe(void *setting,
CDBG("s_ctrl[%d] %pK", slave_info->camera_id, s_ctrl);
+ if (s_ctrl->sensordata->special_support_size > 0) {
+ if (!msm_sensor_driver_is_special_support(s_ctrl,
+ slave_info->sensor_name)) {
+ pr_err("%s:%s is not support on this board\n",
+ __func__, slave_info->sensor_name);
+ rc = 0;
+ goto free_slave_info;
+ }
+ }
+
if (s_ctrl->is_probe_succeed == 1) {
/*
* Different sensor on this camera slot has been connected
@@ -1009,10 +1037,10 @@ free_slave_info:
static int32_t msm_sensor_driver_get_dt_data(struct msm_sensor_ctrl_t *s_ctrl)
{
- int32_t rc = 0;
+ int32_t rc = 0, i = 0;
struct msm_camera_sensor_board_info *sensordata = NULL;
struct device_node *of_node = s_ctrl->of_node;
- uint32_t cell_id;
+ uint32_t cell_id;
s_ctrl->sensordata = kzalloc(sizeof(*sensordata), GFP_KERNEL);
if (!s_ctrl->sensordata) {
@@ -1047,6 +1075,35 @@ static int32_t msm_sensor_driver_get_dt_data(struct msm_sensor_ctrl_t *s_ctrl)
goto FREE_SENSOR_DATA;
}
+ sensordata->special_support_size =
+ of_property_count_strings(of_node,
+ "qcom,special-support-sensors");
+
+ if (sensordata->special_support_size < 0)
+ sensordata->special_support_size = 0;
+
+ if (sensordata->special_support_size > MAX_SPECIAL_SUPPORT_SIZE) {
+ pr_debug("%s:support_size exceed max support size\n", __func__);
+ sensordata->special_support_size = MAX_SPECIAL_SUPPORT_SIZE;
+ }
+
+ if (sensordata->special_support_size) {
+ for (i = 0; i < sensordata->special_support_size; i++) {
+ rc = of_property_read_string_index(of_node,
+ "qcom,special-support-sensors", i,
+ &(sensordata->special_support_sensors[i]));
+ if (rc < 0) {
+ /* if read sensor support names failed,
+ * set support all sensors, break;
+ */
+ sensordata->special_support_size = 0;
+ break;
+ }
+ CDBG("%s special_support_sensors[%d] = %s\n", __func__,
+ i, sensordata->special_support_sensors[i]);
+ }
+ }
+
/* Read subdev info */
rc = msm_sensor_get_sub_module_index(of_node, &sensordata->sensor_info);
if (rc < 0) {
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
index dc5a5a0dc851..4e695fc774f6 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
@@ -2108,7 +2108,7 @@ static ssize_t sde_rotator_show_caps(struct device *dev,
#define SPRINT(fmt, ...) \
(cnt += scnprintf(buf + cnt, len - cnt, fmt, ##__VA_ARGS__))
- SPRINT("wb_count=%d\n", mgr->queue_count);
+ SPRINT("queue_count=%d\n", mgr->queue_count);
SPRINT("downscale=1\n");
SPRINT("ubwc=1\n");
@@ -2415,7 +2415,7 @@ static int sde_rotator_register_clk(struct platform_device *pdev,
static void sde_rotator_unregister_clk(struct sde_rot_mgr *mgr)
{
- kfree(mgr->rot_clk);
+ devm_kfree(mgr->device, mgr->rot_clk);
mgr->rot_clk = NULL;
mgr->num_rot_clk = 0;
}
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r1_ctl.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_r1_ctl.c
index b5c0790ef063..fef4a8585eaa 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r1_ctl.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_r1_ctl.c
@@ -167,7 +167,7 @@ int sde_mdp_mixer_pipe_update(struct sde_mdp_pipe *pipe,
int sde_mdp_display_wait4comp(struct sde_mdp_ctl *ctl)
{
- int ret;
+ int ret = 0;
if (!ctl) {
SDEROT_ERR("invalid ctl\n");
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
index eaf35733b38a..ff46a607db0c 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
@@ -2096,6 +2096,8 @@ static ssize_t sde_hw_rotator_show_caps(struct sde_rot_mgr *mgr,
else
SPRINT("min_downscale=2.0\n");
+ SPRINT("downscale_compression=1\n");
+
#undef SPRINT
return cnt;
}
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c
index c11c4b61d832..da9845e35fde 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c
@@ -214,10 +214,9 @@ static int sde_smmu_attach(struct sde_rot_data_type *mdata)
SDEROT_DBG("iommu v2 domain[%i] attached\n", i);
}
} else {
- SDEROT_ERR(
+ SDEROT_DBG(
"iommu device not attached for domain[%d]\n",
i);
- return -ENODEV;
}
}
return 0;
@@ -492,11 +491,13 @@ int sde_smmu_probe(struct platform_device *pdev)
mp->num_vreg = 1;
}
- rc = sde_rot_config_vreg(&pdev->dev, mp->vreg_config,
- mp->num_vreg, true);
- if (rc) {
- SDEROT_ERR("vreg config failed rc=%d\n", rc);
- return rc;
+ if (mp->vreg_config) {
+ rc = sde_rot_config_vreg(&pdev->dev, mp->vreg_config,
+ mp->num_vreg, true);
+ if (rc) {
+ SDEROT_ERR("vreg config failed rc=%d\n", rc);
+ goto release_vreg;
+ }
}
rc = sde_smmu_clk_register(pdev, mp);
@@ -504,18 +505,16 @@ int sde_smmu_probe(struct platform_device *pdev)
SDEROT_ERR(
"smmu clk register failed for domain[%d] with err:%d\n",
smmu_domain.domain, rc);
- sde_rot_config_vreg(&pdev->dev, mp->vreg_config, mp->num_vreg,
- false);
- return rc;
+ goto disable_vreg;
}
snprintf(name, MAX_CLIENT_NAME_LEN, "smmu:%u", smmu_domain.domain);
sde_smmu->reg_bus_clt = sde_reg_bus_vote_client_create(name);
if (IS_ERR_OR_NULL(sde_smmu->reg_bus_clt)) {
SDEROT_ERR("mdss bus client register failed\n");
- sde_rot_config_vreg(&pdev->dev, mp->vreg_config, mp->num_vreg,
- false);
- return PTR_ERR(sde_smmu->reg_bus_clt);
+ rc = PTR_ERR(sde_smmu->reg_bus_clt);
+ sde_smmu->reg_bus_clt = NULL;
+ goto unregister_clk;
}
rc = sde_smmu_enable_power(sde_smmu, true);
@@ -531,6 +530,7 @@ int sde_smmu_probe(struct platform_device *pdev)
SDEROT_ERR("iommu create mapping failed for domain[%d]\n",
smmu_domain.domain);
rc = PTR_ERR(sde_smmu->mmu_mapping);
+ sde_smmu->mmu_mapping = NULL;
goto disable_power;
}
@@ -558,13 +558,20 @@ int sde_smmu_probe(struct platform_device *pdev)
release_mapping:
arm_iommu_release_mapping(sde_smmu->mmu_mapping);
+ sde_smmu->mmu_mapping = NULL;
disable_power:
sde_smmu_enable_power(sde_smmu, false);
bus_client_destroy:
sde_reg_bus_vote_client_destroy(sde_smmu->reg_bus_clt);
sde_smmu->reg_bus_clt = NULL;
- sde_rot_config_vreg(&pdev->dev, mp->vreg_config, mp->num_vreg,
- false);
+unregister_clk:
+disable_vreg:
+ sde_rot_config_vreg(&pdev->dev, sde_smmu->mp.vreg_config,
+ sde_smmu->mp.num_vreg, false);
+release_vreg:
+ devm_kfree(&pdev->dev, sde_smmu->mp.vreg_config);
+ sde_smmu->mp.vreg_config = NULL;
+ sde_smmu->mp.num_vreg = 0;
return rc;
}
@@ -575,9 +582,21 @@ int sde_smmu_remove(struct platform_device *pdev)
for (i = 0; i < SDE_IOMMU_MAX_DOMAIN; i++) {
sde_smmu = sde_smmu_get_cb(i);
- if (sde_smmu && sde_smmu->dev &&
- (sde_smmu->dev == &pdev->dev))
- arm_iommu_release_mapping(sde_smmu->mmu_mapping);
+ if (!sde_smmu || !sde_smmu->dev ||
+ (sde_smmu->dev != &pdev->dev))
+ continue;
+
+ sde_smmu->dev = NULL;
+ arm_iommu_release_mapping(sde_smmu->mmu_mapping);
+ sde_smmu->mmu_mapping = NULL;
+ sde_smmu_enable_power(sde_smmu, false);
+ sde_reg_bus_vote_client_destroy(sde_smmu->reg_bus_clt);
+ sde_smmu->reg_bus_clt = NULL;
+ sde_rot_config_vreg(&pdev->dev, sde_smmu->mp.vreg_config,
+ sde_smmu->mp.num_vreg, false);
+ devm_kfree(&pdev->dev, sde_smmu->mp.vreg_config);
+ sde_smmu->mp.vreg_config = NULL;
+ sde_smmu->mp.num_vreg = 0;
}
return 0;
}
diff --git a/drivers/media/platform/msm/vidc/msm_vidc.c b/drivers/media/platform/msm/vidc/msm_vidc.c
index 28faa1436ef5..8b1329db1742 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc.c
@@ -1243,14 +1243,13 @@ void *msm_vidc_open(int core_id, int session_type)
return inst;
fail_init:
- v4l2_fh_del(&inst->event_handler);
- v4l2_fh_exit(&inst->event_handler);
- vb2_queue_release(&inst->bufq[OUTPUT_PORT].vb2_bufq);
-
mutex_lock(&core->lock);
list_del(&inst->list);
mutex_unlock(&core->lock);
+ v4l2_fh_del(&inst->event_handler);
+ v4l2_fh_exit(&inst->event_handler);
+ vb2_queue_release(&inst->bufq[OUTPUT_PORT].vb2_bufq);
fail_bufq_output:
vb2_queue_release(&inst->bufq[CAPTURE_PORT].vb2_bufq);
fail_bufq_capture:
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c
index 3ffe2ecfd5ef..c8946f98ced4 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c
@@ -1029,6 +1029,11 @@ static int match_child(struct device *dev, void *data)
return !strcmp(dev_name(dev), (char *)data);
}
+static void s5p_mfc_memdev_release(struct device *dev)
+{
+ dma_release_declared_memory(dev);
+}
+
static void *mfc_get_drv_data(struct platform_device *pdev);
static int s5p_mfc_alloc_memdevs(struct s5p_mfc_dev *dev)
@@ -1041,6 +1046,9 @@ static int s5p_mfc_alloc_memdevs(struct s5p_mfc_dev *dev)
mfc_err("Not enough memory\n");
return -ENOMEM;
}
+
+ dev_set_name(dev->mem_dev_l, "%s", "s5p-mfc-l");
+ dev->mem_dev_l->release = s5p_mfc_memdev_release;
device_initialize(dev->mem_dev_l);
of_property_read_u32_array(dev->plat_dev->dev.of_node,
"samsung,mfc-l", mem_info, 2);
@@ -1058,6 +1066,9 @@ static int s5p_mfc_alloc_memdevs(struct s5p_mfc_dev *dev)
mfc_err("Not enough memory\n");
return -ENOMEM;
}
+
+ dev_set_name(dev->mem_dev_r, "%s", "s5p-mfc-r");
+ dev->mem_dev_r->release = s5p_mfc_memdev_release;
device_initialize(dev->mem_dev_r);
of_property_read_u32_array(dev->plat_dev->dev.of_node,
"samsung,mfc-r", mem_info, 2);
diff --git a/drivers/media/rc/ir-rc5-decoder.c b/drivers/media/rc/ir-rc5-decoder.c
index 84fa6e9b59a1..67314c034cdb 100644
--- a/drivers/media/rc/ir-rc5-decoder.c
+++ b/drivers/media/rc/ir-rc5-decoder.c
@@ -29,7 +29,7 @@
#define RC5_BIT_START (1 * RC5_UNIT)
#define RC5_BIT_END (1 * RC5_UNIT)
#define RC5X_SPACE (4 * RC5_UNIT)
-#define RC5_TRAILER (10 * RC5_UNIT) /* In reality, approx 100 */
+#define RC5_TRAILER (6 * RC5_UNIT) /* In reality, approx 100 */
enum rc5_state {
STATE_INACTIVE,
diff --git a/drivers/media/tuners/tuner-xc2028.c b/drivers/media/tuners/tuner-xc2028.c
index 082ff5608455..317ef63ee789 100644
--- a/drivers/media/tuners/tuner-xc2028.c
+++ b/drivers/media/tuners/tuner-xc2028.c
@@ -1407,8 +1407,10 @@ static int xc2028_set_config(struct dvb_frontend *fe, void *priv_cfg)
memcpy(&priv->ctrl, p, sizeof(priv->ctrl));
if (p->fname) {
priv->ctrl.fname = kstrdup(p->fname, GFP_KERNEL);
- if (priv->ctrl.fname == NULL)
- return -ENOMEM;
+ if (priv->ctrl.fname == NULL) {
+ rc = -ENOMEM;
+ goto unlock;
+ }
}
/*
@@ -1440,6 +1442,7 @@ static int xc2028_set_config(struct dvb_frontend *fe, void *priv_cfg)
} else
priv->state = XC2028_WAITING_FIRMWARE;
}
+unlock:
mutex_unlock(&priv->lock);
return rc;
diff --git a/drivers/media/usb/airspy/airspy.c b/drivers/media/usb/airspy/airspy.c
index 565a59310747..34b35ebd60ac 100644
--- a/drivers/media/usb/airspy/airspy.c
+++ b/drivers/media/usb/airspy/airspy.c
@@ -1073,7 +1073,7 @@ static int airspy_probe(struct usb_interface *intf,
if (ret) {
dev_err(s->dev, "Failed to register as video device (%d)\n",
ret);
- goto err_unregister_v4l2_dev;
+ goto err_free_controls;
}
dev_info(s->dev, "Registered as %s\n",
video_device_node_name(&s->vdev));
@@ -1082,7 +1082,6 @@ static int airspy_probe(struct usb_interface *intf,
err_free_controls:
v4l2_ctrl_handler_free(&s->hdl);
-err_unregister_v4l2_dev:
v4l2_device_unregister(&s->v4l2_dev);
err_free_mem:
kfree(s);
diff --git a/drivers/media/usb/usbtv/usbtv-audio.c b/drivers/media/usb/usbtv/usbtv-audio.c
index 78c12d22dfbb..5dab02432e82 100644
--- a/drivers/media/usb/usbtv/usbtv-audio.c
+++ b/drivers/media/usb/usbtv/usbtv-audio.c
@@ -278,6 +278,9 @@ static void snd_usbtv_trigger(struct work_struct *work)
{
struct usbtv *chip = container_of(work, struct usbtv, snd_trigger);
+ if (!chip->snd)
+ return;
+
if (atomic_read(&chip->snd_stream))
usbtv_audio_start(chip);
else
@@ -378,6 +381,8 @@ err:
void usbtv_audio_free(struct usbtv *usbtv)
{
+ cancel_work_sync(&usbtv->snd_trigger);
+
if (usbtv->snd && usbtv->udev) {
snd_card_free(usbtv->snd);
usbtv->snd = NULL;
diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
index d11fd6ac2df0..5cefca95734e 100644
--- a/drivers/media/usb/uvc/uvc_driver.c
+++ b/drivers/media/usb/uvc/uvc_driver.c
@@ -148,6 +148,26 @@ static struct uvc_format_desc uvc_fmts[] = {
.guid = UVC_GUID_FORMAT_H264,
.fcc = V4L2_PIX_FMT_H264,
},
+ {
+ .name = "Greyscale 8 L/R (Y8I)",
+ .guid = UVC_GUID_FORMAT_Y8I,
+ .fcc = V4L2_PIX_FMT_Y8I,
+ },
+ {
+ .name = "Greyscale 12 L/R (Y12I)",
+ .guid = UVC_GUID_FORMAT_Y12I,
+ .fcc = V4L2_PIX_FMT_Y12I,
+ },
+ {
+ .name = "Depth data 16-bit (Z16)",
+ .guid = UVC_GUID_FORMAT_Z16,
+ .fcc = V4L2_PIX_FMT_Z16,
+ },
+ {
+ .name = "Bayer 10-bit (SRGGB10P)",
+ .guid = UVC_GUID_FORMAT_RW10,
+ .fcc = V4L2_PIX_FMT_SRGGB10P,
+ },
};
/* ------------------------------------------------------------------------
diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h
index f0f2391e1b43..7e4d3eea371b 100644
--- a/drivers/media/usb/uvc/uvcvideo.h
+++ b/drivers/media/usb/uvc/uvcvideo.h
@@ -119,6 +119,18 @@
#define UVC_GUID_FORMAT_H264 \
{ 'H', '2', '6', '4', 0x00, 0x00, 0x10, 0x00, \
0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_Y8I \
+ { 'Y', '8', 'I', ' ', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_Y12I \
+ { 'Y', '1', '2', 'I', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_Z16 \
+ { 'Z', '1', '6', ' ', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_RW10 \
+ { 'R', 'W', '1', '0', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
/* ------------------------------------------------------------------------
* Driver specific constants.
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index 11f39791ec33..47f37683893a 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -1505,7 +1505,7 @@ static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb,
void *pb, int nonblocking)
{
unsigned long flags;
- int ret;
+ int ret = 0;
/*
* Wait for at least one buffer to become available on the done_list.
@@ -1521,10 +1521,12 @@ static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb,
spin_lock_irqsave(&q->done_lock, flags);
*vb = list_first_entry(&q->done_list, struct vb2_buffer, done_entry);
/*
- * Only remove the buffer from done_list if v4l2_buffer can handle all
- * the planes.
+ * Only remove the buffer from done_list if all planes can be
+ * handled. Some cases such as V4L2 file I/O and DVB have pb
+ * == NULL; skip the check then as there's nothing to verify.
*/
- ret = call_bufop(q, verify_planes_array, *vb, pb);
+ if (pb)
+ ret = call_bufop(q, verify_planes_array, *vb, pb);
if (!ret)
list_del(&(*vb)->done_entry);
spin_unlock_irqrestore(&q->done_lock, flags);
diff --git a/drivers/media/v4l2-core/videobuf2-v4l2.c b/drivers/media/v4l2-core/videobuf2-v4l2.c
index 502984c724ff..6c441be8f893 100644
--- a/drivers/media/v4l2-core/videobuf2-v4l2.c
+++ b/drivers/media/v4l2-core/videobuf2-v4l2.c
@@ -67,6 +67,11 @@ static int __verify_planes_array(struct vb2_buffer *vb, const struct v4l2_buffer
return 0;
}
+static int __verify_planes_array_core(struct vb2_buffer *vb, const void *pb)
+{
+ return __verify_planes_array(vb, pb);
+}
+
/**
* __verify_length() - Verify that the bytesused value for each plane fits in
* the plane length and that the data offset doesn't exceed the bytesused value.
@@ -432,6 +437,7 @@ static int __fill_vb2_buffer(struct vb2_buffer *vb,
}
static const struct vb2_buf_ops v4l2_buf_ops = {
+ .verify_planes_array = __verify_planes_array_core,
.fill_user_buffer = __fill_v4l2_buffer,
.fill_vb2_buffer = __fill_vb2_buffer,
.set_timestamp = __set_timestamp,
diff --git a/drivers/mfd/msm-cdc-pinctrl.c b/drivers/mfd/msm-cdc-pinctrl.c
index 3ffd20245877..9622256a280d 100644
--- a/drivers/mfd/msm-cdc-pinctrl.c
+++ b/drivers/mfd/msm-cdc-pinctrl.c
@@ -180,13 +180,15 @@ static int msm_cdc_pinctrl_probe(struct platform_device *pdev)
ret = PTR_ERR(gpio_data->pinctrl_sleep);
goto err_lookup_state;
}
-
- /* Set pinctrl state to aud_sleep by default */
- ret = pinctrl_select_state(gpio_data->pinctrl,
- gpio_data->pinctrl_sleep);
- if (ret)
- dev_err(&pdev->dev, "%s: set cdc gpio sleep state fail: %d\n",
- __func__, ret);
+ /* skip setting to sleep state for LPI_TLMM GPIOs */
+ if (!of_property_read_bool(pdev->dev.of_node, "qcom,lpi-gpios")) {
+ /* Set pinctrl state to aud_sleep by default */
+ ret = pinctrl_select_state(gpio_data->pinctrl,
+ gpio_data->pinctrl_sleep);
+ if (ret)
+ dev_err(&pdev->dev, "%s: set cdc gpio sleep state fail: %d\n",
+ __func__, ret);
+ }
gpio_data->gpio = of_get_named_gpio(pdev->dev.of_node,
"qcom,cdc-rst-n-gpio", 0);
diff --git a/drivers/mfd/qcom_rpm.c b/drivers/mfd/qcom_rpm.c
index 207a3bd68559..a867cc91657e 100644
--- a/drivers/mfd/qcom_rpm.c
+++ b/drivers/mfd/qcom_rpm.c
@@ -34,7 +34,13 @@ struct qcom_rpm_resource {
struct qcom_rpm_data {
u32 version;
const struct qcom_rpm_resource *resource_table;
- unsigned n_resources;
+ unsigned int n_resources;
+ unsigned int req_ctx_off;
+ unsigned int req_sel_off;
+ unsigned int ack_ctx_off;
+ unsigned int ack_sel_off;
+ unsigned int req_sel_size;
+ unsigned int ack_sel_size;
};
struct qcom_rpm {
@@ -61,11 +67,7 @@ struct qcom_rpm {
#define RPM_REQUEST_TIMEOUT (5 * HZ)
-#define RPM_REQUEST_CONTEXT 3
-#define RPM_REQ_SELECT 11
-#define RPM_ACK_CONTEXT 15
-#define RPM_ACK_SELECTOR 23
-#define RPM_SELECT_SIZE 7
+#define RPM_MAX_SEL_SIZE 7
#define RPM_NOTIFICATION BIT(30)
#define RPM_REJECTED BIT(31)
@@ -157,6 +159,12 @@ static const struct qcom_rpm_data apq8064_template = {
.version = 3,
.resource_table = apq8064_rpm_resource_table,
.n_resources = ARRAY_SIZE(apq8064_rpm_resource_table),
+ .req_ctx_off = 3,
+ .req_sel_off = 11,
+ .ack_ctx_off = 15,
+ .ack_sel_off = 23,
+ .req_sel_size = 4,
+ .ack_sel_size = 7,
};
static const struct qcom_rpm_resource msm8660_rpm_resource_table[] = {
@@ -240,6 +248,12 @@ static const struct qcom_rpm_data msm8660_template = {
.version = 2,
.resource_table = msm8660_rpm_resource_table,
.n_resources = ARRAY_SIZE(msm8660_rpm_resource_table),
+ .req_ctx_off = 3,
+ .req_sel_off = 11,
+ .ack_ctx_off = 19,
+ .ack_sel_off = 27,
+ .req_sel_size = 7,
+ .ack_sel_size = 7,
};
static const struct qcom_rpm_resource msm8960_rpm_resource_table[] = {
@@ -322,6 +336,12 @@ static const struct qcom_rpm_data msm8960_template = {
.version = 3,
.resource_table = msm8960_rpm_resource_table,
.n_resources = ARRAY_SIZE(msm8960_rpm_resource_table),
+ .req_ctx_off = 3,
+ .req_sel_off = 11,
+ .ack_ctx_off = 15,
+ .ack_sel_off = 23,
+ .req_sel_size = 4,
+ .ack_sel_size = 7,
};
static const struct qcom_rpm_resource ipq806x_rpm_resource_table[] = {
@@ -362,6 +382,12 @@ static const struct qcom_rpm_data ipq806x_template = {
.version = 3,
.resource_table = ipq806x_rpm_resource_table,
.n_resources = ARRAY_SIZE(ipq806x_rpm_resource_table),
+ .req_ctx_off = 3,
+ .req_sel_off = 11,
+ .ack_ctx_off = 15,
+ .ack_sel_off = 23,
+ .req_sel_size = 4,
+ .ack_sel_size = 7,
};
static const struct of_device_id qcom_rpm_of_match[] = {
@@ -380,7 +406,7 @@ int qcom_rpm_write(struct qcom_rpm *rpm,
{
const struct qcom_rpm_resource *res;
const struct qcom_rpm_data *data = rpm->data;
- u32 sel_mask[RPM_SELECT_SIZE] = { 0 };
+ u32 sel_mask[RPM_MAX_SEL_SIZE] = { 0 };
int left;
int ret = 0;
int i;
@@ -398,12 +424,12 @@ int qcom_rpm_write(struct qcom_rpm *rpm,
writel_relaxed(buf[i], RPM_REQ_REG(rpm, res->target_id + i));
bitmap_set((unsigned long *)sel_mask, res->select_id, 1);
- for (i = 0; i < ARRAY_SIZE(sel_mask); i++) {
+ for (i = 0; i < rpm->data->req_sel_size; i++) {
writel_relaxed(sel_mask[i],
- RPM_CTRL_REG(rpm, RPM_REQ_SELECT + i));
+ RPM_CTRL_REG(rpm, rpm->data->req_sel_off + i));
}
- writel_relaxed(BIT(state), RPM_CTRL_REG(rpm, RPM_REQUEST_CONTEXT));
+ writel_relaxed(BIT(state), RPM_CTRL_REG(rpm, rpm->data->req_ctx_off));
reinit_completion(&rpm->ack);
regmap_write(rpm->ipc_regmap, rpm->ipc_offset, BIT(rpm->ipc_bit));
@@ -426,10 +452,11 @@ static irqreturn_t qcom_rpm_ack_interrupt(int irq, void *dev)
u32 ack;
int i;
- ack = readl_relaxed(RPM_CTRL_REG(rpm, RPM_ACK_CONTEXT));
- for (i = 0; i < RPM_SELECT_SIZE; i++)
- writel_relaxed(0, RPM_CTRL_REG(rpm, RPM_ACK_SELECTOR + i));
- writel(0, RPM_CTRL_REG(rpm, RPM_ACK_CONTEXT));
+ ack = readl_relaxed(RPM_CTRL_REG(rpm, rpm->data->ack_ctx_off));
+ for (i = 0; i < rpm->data->ack_sel_size; i++)
+ writel_relaxed(0,
+ RPM_CTRL_REG(rpm, rpm->data->ack_sel_off + i));
+ writel(0, RPM_CTRL_REG(rpm, rpm->data->ack_ctx_off));
if (ack & RPM_NOTIFICATION) {
dev_warn(rpm->dev, "ignoring notification!\n");
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 9f0d9b7b7e17..ca092ef75cfe 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -555,6 +555,11 @@ config QPNP_MISC
peripheral. The MISC peripheral holds the USB ID interrupt
and the driver provides an API to check if this interrupt
is available on the current PMIC chip.
+config MEMORY_STATE_TIME
+ tristate "Memory freq/bandwidth time statistics"
+ depends on PROFILING
+ help
+ Memory time statistics exported to /sys/kernel/memory_state_time
source "drivers/misc/c2port/Kconfig"
source "drivers/misc/eeprom/Kconfig"
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 6acb70964fb8..40b7e3b603e2 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -64,3 +64,4 @@ obj-$(CONFIG_CXL_BASE) += cxl/
obj-$(CONFIG_UID_CPUTIME) += uid_cputime.o
obj-y += qcom/
obj-$(CONFIG_QPNP_MISC) += qpnp-misc.o
+obj-$(CONFIG_MEMORY_STATE_TIME) += memory_state_time.o
diff --git a/drivers/misc/cxl/Makefile b/drivers/misc/cxl/Makefile
index 6982f603fadc..ab6f392d3504 100644
--- a/drivers/misc/cxl/Makefile
+++ b/drivers/misc/cxl/Makefile
@@ -1,4 +1,4 @@
-ccflags-y := -Werror -Wno-unused-const-variable
+ccflags-y := -Werror $(call cc-disable-warning, unused-const-variable)
cxl-y += main.o file.o irq.o fault.o native.o
cxl-y += context.o sysfs.o debugfs.o pci.o trace.o
diff --git a/drivers/misc/cxl/api.c b/drivers/misc/cxl/api.c
index 103baf0e0c5b..ea3eeb7011e1 100644
--- a/drivers/misc/cxl/api.c
+++ b/drivers/misc/cxl/api.c
@@ -25,7 +25,6 @@ struct cxl_context *cxl_dev_context_init(struct pci_dev *dev)
afu = cxl_pci_to_afu(dev);
- get_device(&afu->dev);
ctx = cxl_context_alloc();
if (IS_ERR(ctx)) {
rc = PTR_ERR(ctx);
@@ -61,7 +60,6 @@ err_mapping:
err_ctx:
kfree(ctx);
err_dev:
- put_device(&afu->dev);
return ERR_PTR(rc);
}
EXPORT_SYMBOL_GPL(cxl_dev_context_init);
@@ -87,8 +85,6 @@ int cxl_release_context(struct cxl_context *ctx)
if (ctx->status >= STARTED)
return -EBUSY;
- put_device(&ctx->afu->dev);
-
cxl_context_free(ctx);
return 0;
@@ -176,7 +172,7 @@ int cxl_start_context(struct cxl_context *ctx, u64 wed,
if (task) {
ctx->pid = get_task_pid(task, PIDTYPE_PID);
- get_pid(ctx->pid);
+ ctx->glpid = get_task_pid(task->group_leader, PIDTYPE_PID);
kernel = false;
}
diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c
index 2faa1270d085..262b88eac414 100644
--- a/drivers/misc/cxl/context.c
+++ b/drivers/misc/cxl/context.c
@@ -42,7 +42,7 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master,
spin_lock_init(&ctx->sste_lock);
ctx->afu = afu;
ctx->master = master;
- ctx->pid = NULL; /* Set in start work ioctl */
+ ctx->pid = ctx->glpid = NULL; /* Set in start work ioctl */
mutex_init(&ctx->mapping_lock);
ctx->mapping = mapping;
@@ -97,6 +97,12 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master,
ctx->pe = i;
ctx->elem = &ctx->afu->spa[i];
ctx->pe_inserted = false;
+
+ /*
+ * take a ref on the afu so that it stays alive at-least till
+ * this context is reclaimed inside reclaim_ctx.
+ */
+ cxl_afu_get(afu);
return 0;
}
@@ -211,7 +217,11 @@ int __detach_context(struct cxl_context *ctx)
WARN_ON(cxl_detach_process(ctx) &&
cxl_adapter_link_ok(ctx->afu->adapter));
flush_work(&ctx->fault_work); /* Only needed for dedicated process */
+
+ /* release the reference to the group leader and mm handling pid */
put_pid(ctx->pid);
+ put_pid(ctx->glpid);
+
cxl_ctx_put();
return 0;
}
@@ -278,6 +288,9 @@ static void reclaim_ctx(struct rcu_head *rcu)
if (ctx->irq_bitmap)
kfree(ctx->irq_bitmap);
+ /* Drop ref to the afu device taken during cxl_context_init */
+ cxl_afu_put(ctx->afu);
+
kfree(ctx);
}
diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h
index 0cfb9c129f27..a521bc72cec2 100644
--- a/drivers/misc/cxl/cxl.h
+++ b/drivers/misc/cxl/cxl.h
@@ -403,6 +403,18 @@ struct cxl_afu {
bool enabled;
};
+/* AFU refcount management */
+static inline struct cxl_afu *cxl_afu_get(struct cxl_afu *afu)
+{
+
+ return (get_device(&afu->dev) == NULL) ? NULL : afu;
+}
+
+static inline void cxl_afu_put(struct cxl_afu *afu)
+{
+ put_device(&afu->dev);
+}
+
struct cxl_irq_name {
struct list_head list;
@@ -433,6 +445,9 @@ struct cxl_context {
unsigned int sst_size, sst_lru;
wait_queue_head_t wq;
+ /* pid of the group leader associated with the pid */
+ struct pid *glpid;
+ /* use mm context associated with this pid for ds faults */
struct pid *pid;
spinlock_t lock; /* Protects pending_irq_mask, pending_fault and fault_addr */
/* Only used in PR mode */
diff --git a/drivers/misc/cxl/fault.c b/drivers/misc/cxl/fault.c
index 25a5418c55cb..81c3f75b7330 100644
--- a/drivers/misc/cxl/fault.c
+++ b/drivers/misc/cxl/fault.c
@@ -166,13 +166,92 @@ static void cxl_handle_page_fault(struct cxl_context *ctx,
cxl_ack_irq(ctx, CXL_PSL_TFC_An_R, 0);
}
+/*
+ * Returns the mm_struct corresponding to the context ctx via ctx->pid
+ * In case the task has exited we use the task group leader accessible
+ * via ctx->glpid to find the next task in the thread group that has a
+ * valid mm_struct associated with it. If a task with valid mm_struct
+ * is found the ctx->pid is updated to use the task struct for subsequent
+ * translations. In case no valid mm_struct is found in the task group to
+ * service the fault a NULL is returned.
+ */
+static struct mm_struct *get_mem_context(struct cxl_context *ctx)
+{
+ struct task_struct *task = NULL;
+ struct mm_struct *mm = NULL;
+ struct pid *old_pid = ctx->pid;
+
+ if (old_pid == NULL) {
+ pr_warn("%s: Invalid context for pe=%d\n",
+ __func__, ctx->pe);
+ return NULL;
+ }
+
+ task = get_pid_task(old_pid, PIDTYPE_PID);
+
+ /*
+ * pid_alive may look racy but this saves us from costly
+ * get_task_mm when the task is a zombie. In worst case
+ * we may think a task is alive, which is about to die
+ * but get_task_mm will return NULL.
+ */
+ if (task != NULL && pid_alive(task))
+ mm = get_task_mm(task);
+
+ /* release the task struct that was taken earlier */
+ if (task)
+ put_task_struct(task);
+ else
+ pr_devel("%s: Context owning pid=%i for pe=%i dead\n",
+ __func__, pid_nr(old_pid), ctx->pe);
+
+ /*
+ * If we couldn't find the mm context then use the group
+ * leader to iterate over the task group and find a task
+ * that gives us mm_struct.
+ */
+ if (unlikely(mm == NULL && ctx->glpid != NULL)) {
+
+ rcu_read_lock();
+ task = pid_task(ctx->glpid, PIDTYPE_PID);
+ if (task)
+ do {
+ mm = get_task_mm(task);
+ if (mm) {
+ ctx->pid = get_task_pid(task,
+ PIDTYPE_PID);
+ break;
+ }
+ task = next_thread(task);
+ } while (task && !thread_group_leader(task));
+ rcu_read_unlock();
+
+ /* check if we switched pid */
+ if (ctx->pid != old_pid) {
+ if (mm)
+ pr_devel("%s:pe=%i switch pid %i->%i\n",
+ __func__, ctx->pe, pid_nr(old_pid),
+ pid_nr(ctx->pid));
+ else
+ pr_devel("%s:Cannot find mm for pid=%i\n",
+ __func__, pid_nr(old_pid));
+
+ /* drop the reference to older pid */
+ put_pid(old_pid);
+ }
+ }
+
+ return mm;
+}
+
+
+
void cxl_handle_fault(struct work_struct *fault_work)
{
struct cxl_context *ctx =
container_of(fault_work, struct cxl_context, fault_work);
u64 dsisr = ctx->dsisr;
u64 dar = ctx->dar;
- struct task_struct *task = NULL;
struct mm_struct *mm = NULL;
if (cxl_p2n_read(ctx->afu, CXL_PSL_DSISR_An) != dsisr ||
@@ -195,17 +274,17 @@ void cxl_handle_fault(struct work_struct *fault_work)
"DSISR: %#llx DAR: %#llx\n", ctx->pe, dsisr, dar);
if (!ctx->kernel) {
- if (!(task = get_pid_task(ctx->pid, PIDTYPE_PID))) {
- pr_devel("cxl_handle_fault unable to get task %i\n",
- pid_nr(ctx->pid));
+
+ mm = get_mem_context(ctx);
+ /* indicates all the thread in task group have exited */
+ if (mm == NULL) {
+ pr_devel("%s: unable to get mm for pe=%d pid=%i\n",
+ __func__, ctx->pe, pid_nr(ctx->pid));
cxl_ack_ae(ctx);
return;
- }
- if (!(mm = get_task_mm(task))) {
- pr_devel("cxl_handle_fault unable to get mm %i\n",
- pid_nr(ctx->pid));
- cxl_ack_ae(ctx);
- goto out;
+ } else {
+ pr_devel("Handling page fault for pe=%d pid=%i\n",
+ ctx->pe, pid_nr(ctx->pid));
}
}
@@ -218,33 +297,22 @@ void cxl_handle_fault(struct work_struct *fault_work)
if (mm)
mmput(mm);
-out:
- if (task)
- put_task_struct(task);
}
static void cxl_prefault_one(struct cxl_context *ctx, u64 ea)
{
- int rc;
- struct task_struct *task;
struct mm_struct *mm;
- if (!(task = get_pid_task(ctx->pid, PIDTYPE_PID))) {
- pr_devel("cxl_prefault_one unable to get task %i\n",
- pid_nr(ctx->pid));
- return;
- }
- if (!(mm = get_task_mm(task))) {
+ mm = get_mem_context(ctx);
+ if (mm == NULL) {
pr_devel("cxl_prefault_one unable to get mm %i\n",
pid_nr(ctx->pid));
- put_task_struct(task);
return;
}
- rc = cxl_fault_segment(ctx, mm, ea);
+ cxl_fault_segment(ctx, mm, ea);
mmput(mm);
- put_task_struct(task);
}
static u64 next_segment(u64 ea, u64 vsid)
@@ -263,18 +331,13 @@ static void cxl_prefault_vma(struct cxl_context *ctx)
struct copro_slb slb;
struct vm_area_struct *vma;
int rc;
- struct task_struct *task;
struct mm_struct *mm;
- if (!(task = get_pid_task(ctx->pid, PIDTYPE_PID))) {
- pr_devel("cxl_prefault_vma unable to get task %i\n",
- pid_nr(ctx->pid));
- return;
- }
- if (!(mm = get_task_mm(task))) {
+ mm = get_mem_context(ctx);
+ if (mm == NULL) {
pr_devel("cxl_prefault_vm unable to get mm %i\n",
pid_nr(ctx->pid));
- goto out1;
+ return;
}
down_read(&mm->mmap_sem);
@@ -295,8 +358,6 @@ static void cxl_prefault_vma(struct cxl_context *ctx)
up_read(&mm->mmap_sem);
mmput(mm);
-out1:
- put_task_struct(task);
}
void cxl_prefault(struct cxl_context *ctx, u64 wed)
diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c
index 7ccd2998be92..783337d22f36 100644
--- a/drivers/misc/cxl/file.c
+++ b/drivers/misc/cxl/file.c
@@ -67,7 +67,13 @@ static int __afu_open(struct inode *inode, struct file *file, bool master)
spin_unlock(&adapter->afu_list_lock);
goto err_put_adapter;
}
- get_device(&afu->dev);
+
+ /*
+ * taking a ref to the afu so that it doesn't go away
+ * for rest of the function. This ref is released before
+ * we return.
+ */
+ cxl_afu_get(afu);
spin_unlock(&adapter->afu_list_lock);
if (!afu->current_mode)
@@ -90,13 +96,12 @@ static int __afu_open(struct inode *inode, struct file *file, bool master)
file->private_data = ctx;
cxl_ctx_get();
- /* Our ref on the AFU will now hold the adapter */
- put_device(&adapter->dev);
-
- return 0;
+ /* indicate success */
+ rc = 0;
err_put_afu:
- put_device(&afu->dev);
+ /* release the ref taken earlier */
+ cxl_afu_put(afu);
err_put_adapter:
put_device(&adapter->dev);
return rc;
@@ -131,8 +136,6 @@ int afu_release(struct inode *inode, struct file *file)
mutex_unlock(&ctx->mapping_lock);
}
- put_device(&ctx->afu->dev);
-
/*
* At this this point all bottom halfs have finished and we should be
* getting no more IRQs from the hardware for this context. Once it's
@@ -198,8 +201,12 @@ static long afu_ioctl_start_work(struct cxl_context *ctx,
* where a process (master, some daemon, etc) has opened the chardev on
* behalf of another process, so the AFU's mm gets bound to the process
* that performs this ioctl and not the process that opened the file.
+ * Also we grab the PID of the group leader so that if the task that
+ * has performed the attach operation exits the mm context of the
+ * process is still accessible.
*/
- ctx->pid = get_pid(get_task_pid(current, PIDTYPE_PID));
+ ctx->pid = get_task_pid(current, PIDTYPE_PID);
+ ctx->glpid = get_task_pid(current->group_leader, PIDTYPE_PID);
trace_cxl_attach(ctx, work.work_element_descriptor, work.num_interrupts, amr);
diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
index be2c8e248e2e..0c6c17a1c59e 100644
--- a/drivers/misc/cxl/pci.c
+++ b/drivers/misc/cxl/pci.c
@@ -138,6 +138,7 @@ static const struct pci_device_id cxl_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x0477), },
{ PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x044b), },
{ PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x04cf), },
+ { PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x0601), },
{ PCI_DEVICE_CLASS(0x120000, ~0), },
{ }
diff --git a/drivers/misc/memory_state_time.c b/drivers/misc/memory_state_time.c
new file mode 100644
index 000000000000..34c797a06a31
--- /dev/null
+++ b/drivers/misc/memory_state_time.c
@@ -0,0 +1,454 @@
+/* drivers/misc/memory_state_time.c
+ *
+ * Copyright (C) 2016 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/hashtable.h>
+#include <linux/kconfig.h>
+#include <linux/kernel.h>
+#include <linux/kobject.h>
+#include <linux/memory-state-time.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_platform.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/time.h>
+#include <linux/timekeeping.h>
+#include <linux/workqueue.h>
+
+#define KERNEL_ATTR_RO(_name) \
+static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
+
+#define KERNEL_ATTR_RW(_name) \
+static struct kobj_attribute _name##_attr = \
+ __ATTR(_name, 0644, _name##_show, _name##_store)
+
+#define FREQ_HASH_BITS 4
+DECLARE_HASHTABLE(freq_hash_table, FREQ_HASH_BITS);
+
+static DEFINE_MUTEX(mem_lock);
+
+#define TAG "memory_state_time"
+#define BW_NODE "/soc/memory-state-time"
+#define FREQ_TBL "freq-tbl"
+#define BW_TBL "bw-buckets"
+#define NUM_SOURCES "num-sources"
+
+#define LOWEST_FREQ 2
+
+static int curr_bw;
+static int curr_freq;
+static u32 *bw_buckets;
+static u32 *freq_buckets;
+static int num_freqs;
+static int num_buckets;
+static int registered_bw_sources;
+static u64 last_update;
+static bool init_success;
+static struct workqueue_struct *memory_wq;
+static u32 num_sources = 10;
+static int *bandwidths;
+
+struct freq_entry {
+ int freq;
+ u64 *buckets; /* Bandwidth buckets. */
+ struct hlist_node hash;
+};
+
+struct queue_container {
+ struct work_struct update_state;
+ int value;
+ u64 time_now;
+ int id;
+ struct mutex *lock;
+};
+
+static int find_bucket(int bw)
+{
+ int i;
+
+ if (bw_buckets != NULL) {
+ for (i = 0; i < num_buckets; i++) {
+ if (bw_buckets[i] > bw) {
+ pr_debug("Found bucket %d for bandwidth %d\n",
+ i, bw);
+ return i;
+ }
+ }
+ return num_buckets - 1;
+ }
+ return 0;
+}
+
+static u64 get_time_diff(u64 time_now)
+{
+ u64 ms;
+
+ ms = time_now - last_update;
+ last_update = time_now;
+ return ms;
+}
+
+static ssize_t show_stat_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ int i, j;
+ int len = 0;
+ struct freq_entry *freq_entry;
+
+ for (i = 0; i < num_freqs; i++) {
+ hash_for_each_possible(freq_hash_table, freq_entry, hash,
+ freq_buckets[i]) {
+ if (freq_entry->freq == freq_buckets[i]) {
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "%d ", freq_buckets[i]);
+ if (len >= PAGE_SIZE)
+ break;
+ for (j = 0; j < num_buckets; j++) {
+ len += scnprintf(buf + len,
+ PAGE_SIZE - len,
+ "%llu ",
+ freq_entry->buckets[j]);
+ }
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "\n");
+ }
+ }
+ }
+ pr_debug("Current Time: %llu\n", ktime_get_boot_ns());
+ return len;
+}
+KERNEL_ATTR_RO(show_stat);
+
+static void update_table(u64 time_now)
+{
+ struct freq_entry *freq_entry;
+
+ pr_debug("Last known bw %d freq %d\n", curr_bw, curr_freq);
+ hash_for_each_possible(freq_hash_table, freq_entry, hash, curr_freq) {
+ if (curr_freq == freq_entry->freq) {
+ freq_entry->buckets[find_bucket(curr_bw)]
+ += get_time_diff(time_now);
+ break;
+ }
+ }
+}
+
+static bool freq_exists(int freq)
+{
+ int i;
+
+ for (i = 0; i < num_freqs; i++) {
+ if (freq == freq_buckets[i])
+ return true;
+ }
+ return false;
+}
+
+static int calculate_total_bw(int bw, int index)
+{
+ int i;
+ int total_bw = 0;
+
+ pr_debug("memory_state_time New bw %d for id %d\n", bw, index);
+ bandwidths[index] = bw;
+ for (i = 0; i < registered_bw_sources; i++)
+ total_bw += bandwidths[i];
+ return total_bw;
+}
+
+static void freq_update_do_work(struct work_struct *work)
+{
+ struct queue_container *freq_state_update
+ = container_of(work, struct queue_container,
+ update_state);
+ if (freq_state_update) {
+ mutex_lock(&mem_lock);
+ update_table(freq_state_update->time_now);
+ curr_freq = freq_state_update->value;
+ mutex_unlock(&mem_lock);
+ kfree(freq_state_update);
+ }
+}
+
+static void bw_update_do_work(struct work_struct *work)
+{
+ struct queue_container *bw_state_update
+ = container_of(work, struct queue_container,
+ update_state);
+ if (bw_state_update) {
+ mutex_lock(&mem_lock);
+ update_table(bw_state_update->time_now);
+ curr_bw = calculate_total_bw(bw_state_update->value,
+ bw_state_update->id);
+ mutex_unlock(&mem_lock);
+ kfree(bw_state_update);
+ }
+}
+
+static void memory_state_freq_update(struct memory_state_update_block *ub,
+ int value)
+{
+ if (IS_ENABLED(CONFIG_MEMORY_STATE_TIME)) {
+ if (freq_exists(value) && init_success) {
+ struct queue_container *freq_container
+ = kmalloc(sizeof(struct queue_container),
+ GFP_KERNEL);
+ if (!freq_container)
+ return;
+ INIT_WORK(&freq_container->update_state,
+ freq_update_do_work);
+ freq_container->time_now = ktime_get_boot_ns();
+ freq_container->value = value;
+ pr_debug("Scheduling freq update in work queue\n");
+ queue_work(memory_wq, &freq_container->update_state);
+ } else {
+ pr_debug("Freq does not exist.\n");
+ }
+ }
+}
+
+static void memory_state_bw_update(struct memory_state_update_block *ub,
+ int value)
+{
+ if (IS_ENABLED(CONFIG_MEMORY_STATE_TIME)) {
+ if (init_success) {
+ struct queue_container *bw_container
+ = kmalloc(sizeof(struct queue_container),
+ GFP_KERNEL);
+ if (!bw_container)
+ return;
+ INIT_WORK(&bw_container->update_state,
+ bw_update_do_work);
+ bw_container->time_now = ktime_get_boot_ns();
+ bw_container->value = value;
+ bw_container->id = ub->id;
+ pr_debug("Scheduling bandwidth update in work queue\n");
+ queue_work(memory_wq, &bw_container->update_state);
+ }
+ }
+}
+
+struct memory_state_update_block *memory_state_register_frequency_source(void)
+{
+ struct memory_state_update_block *block;
+
+ if (IS_ENABLED(CONFIG_MEMORY_STATE_TIME)) {
+ pr_debug("Allocating frequency source\n");
+ block = kmalloc(sizeof(struct memory_state_update_block),
+ GFP_KERNEL);
+ if (!block)
+ return NULL;
+ block->update_call = memory_state_freq_update;
+ return block;
+ }
+ pr_err("Config option disabled.\n");
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(memory_state_register_frequency_source);
+
+struct memory_state_update_block *memory_state_register_bandwidth_source(void)
+{
+ struct memory_state_update_block *block;
+
+ if (IS_ENABLED(CONFIG_MEMORY_STATE_TIME)) {
+ pr_debug("Allocating bandwidth source %d\n",
+ registered_bw_sources);
+ block = kmalloc(sizeof(struct memory_state_update_block),
+ GFP_KERNEL);
+ if (!block)
+ return NULL;
+ block->update_call = memory_state_bw_update;
+ if (registered_bw_sources < num_sources) {
+ block->id = registered_bw_sources++;
+ } else {
+ pr_err("Unable to allocate source; max number reached\n");
+ kfree(block);
+ return NULL;
+ }
+ return block;
+ }
+ pr_err("Config option disabled.\n");
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(memory_state_register_bandwidth_source);
+
+/* Buckets are designated by their maximum.
+ * Returns the buckets decided by the capability of the device.
+ */
+static int get_bw_buckets(struct device *dev)
+{
+ int ret, lenb;
+ struct device_node *node = dev->of_node;
+
+ of_property_read_u32(node, NUM_SOURCES, &num_sources);
+ if (of_find_property(node, BW_TBL, &lenb)) {
+ bandwidths = devm_kzalloc(dev,
+ sizeof(*bandwidths) * num_sources, GFP_KERNEL);
+ if (!bandwidths)
+ return -ENOMEM;
+ lenb /= sizeof(*bw_buckets);
+ bw_buckets = devm_kzalloc(dev, lenb * sizeof(*bw_buckets),
+ GFP_KERNEL);
+ if (!bw_buckets) {
+ devm_kfree(dev, bandwidths);
+ return -ENOMEM;
+ }
+ ret = of_property_read_u32_array(node, BW_TBL, bw_buckets,
+ lenb);
+ if (ret < 0) {
+ devm_kfree(dev, bandwidths);
+ devm_kfree(dev, bw_buckets);
+ pr_err("Unable to read bandwidth table from device tree.\n");
+ return ret;
+ }
+ }
+ curr_bw = 0;
+ num_buckets = lenb;
+ return 0;
+}
+
+/* Adds struct freq_entry nodes to the hashtable for each compatible frequency.
+ * Returns the supported number of frequencies.
+ */
+static int freq_buckets_init(struct device *dev)
+{
+ struct freq_entry *freq_entry;
+ int i;
+ int ret, lenf;
+ struct device_node *node = dev->of_node;
+
+ if (of_find_property(node, FREQ_TBL, &lenf)) {
+ lenf /= sizeof(*freq_buckets);
+ freq_buckets = devm_kzalloc(dev, lenf * sizeof(*freq_buckets),
+ GFP_KERNEL);
+ if (!freq_buckets)
+ return -ENOMEM;
+ pr_debug("freqs found len %d\n", lenf);
+ ret = of_property_read_u32_array(node, FREQ_TBL, freq_buckets,
+ lenf);
+ if (ret < 0) {
+ devm_kfree(dev, freq_buckets);
+ pr_err("Unable to read frequency table from device tree.\n");
+ return ret;
+ }
+ pr_debug("ret freq %d\n", ret);
+ }
+ num_freqs = lenf;
+ curr_freq = freq_buckets[LOWEST_FREQ];
+
+ for (i = 0; i < num_freqs; i++) {
+ freq_entry = devm_kzalloc(dev, sizeof(struct freq_entry),
+ GFP_KERNEL);
+ if (!freq_entry)
+ return -ENOMEM;
+ freq_entry->buckets = devm_kzalloc(dev, sizeof(u64)*num_buckets,
+ GFP_KERNEL);
+ if (!freq_entry->buckets) {
+ devm_kfree(dev, freq_entry);
+ return -ENOMEM;
+ }
+ pr_debug("memory_state_time Adding freq to ht %d\n",
+ freq_buckets[i]);
+ freq_entry->freq = freq_buckets[i];
+ hash_add(freq_hash_table, &freq_entry->hash, freq_buckets[i]);
+ }
+ return 0;
+}
+
+struct kobject *memory_kobj;
+EXPORT_SYMBOL_GPL(memory_kobj);
+
+static struct attribute *memory_attrs[] = {
+ &show_stat_attr.attr,
+ NULL
+};
+
+static struct attribute_group memory_attr_group = {
+ .attrs = memory_attrs,
+};
+
+static int memory_state_time_probe(struct platform_device *pdev)
+{
+ int error;
+
+ error = get_bw_buckets(&pdev->dev);
+ if (error)
+ return error;
+ error = freq_buckets_init(&pdev->dev);
+ if (error)
+ return error;
+ last_update = ktime_get_boot_ns();
+ init_success = true;
+
+ pr_debug("memory_state_time initialized with num_freqs %d\n",
+ num_freqs);
+ return 0;
+}
+
+static const struct of_device_id match_table[] = {
+ { .compatible = "memory-state-time" },
+ {}
+};
+
+static struct platform_driver memory_state_time_driver = {
+ .probe = memory_state_time_probe,
+ .driver = {
+ .name = "memory-state-time",
+ .of_match_table = match_table,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init memory_state_time_init(void)
+{
+ int error;
+
+ hash_init(freq_hash_table);
+ memory_wq = create_singlethread_workqueue("memory_wq");
+ if (!memory_wq) {
+ pr_err("Unable to create workqueue.\n");
+ return -EINVAL;
+ }
+ /*
+ * Create sys/kernel directory for memory_state_time.
+ */
+ memory_kobj = kobject_create_and_add(TAG, kernel_kobj);
+ if (!memory_kobj) {
+ pr_err("Unable to allocate memory_kobj for sysfs directory.\n");
+ error = -ENOMEM;
+ goto wq;
+ }
+ error = sysfs_create_group(memory_kobj, &memory_attr_group);
+ if (error) {
+ pr_err("Unable to create sysfs folder.\n");
+ goto kobj;
+ }
+
+ error = platform_driver_register(&memory_state_time_driver);
+ if (error) {
+ pr_err("Unable to register memory_state_time platform driver.\n");
+ goto group;
+ }
+ return 0;
+
+group: sysfs_remove_group(memory_kobj, &memory_attr_group);
+kobj: kobject_put(memory_kobj);
+wq: destroy_workqueue(memory_wq);
+ return error;
+}
+module_init(memory_state_time_init);
diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c
index 26676a51d83c..a7b3663ad7fe 100644
--- a/drivers/misc/qseecom.c
+++ b/drivers/misc/qseecom.c
@@ -80,6 +80,9 @@
/* Encrypt/Decrypt Data Integrity Partition (DIP) for MDTP */
#define SCM_MDTP_CIPHER_DIP 0x01
+/* Maximum Allowed Size (128K) of Data Integrity Partition (DIP) for MDTP */
+#define MAX_DIP 0x20000
+
#define RPMB_SERVICE 0x2000
#define SSD_SERVICE 0x3000
@@ -6056,7 +6059,8 @@ static int qseecom_mdtp_cipher_dip(void __user *argp)
}
if (req.in_buf == NULL || req.out_buf == NULL ||
- req.in_buf_size == 0 || req.out_buf_size == 0 ||
+ req.in_buf_size == 0 || req.in_buf_size > MAX_DIP ||
+ req.out_buf_size == 0 || req.out_buf_size > MAX_DIP ||
req.direction > 1) {
pr_err("invalid parameters\n");
ret = -EINVAL;
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 39cb46a5ce11..9ec0928658cd 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -2854,8 +2854,8 @@ static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq,
packed_cmd_hdr = packed->cmd_hdr;
memset(packed_cmd_hdr, 0, sizeof(packed->cmd_hdr));
- packed_cmd_hdr[0] = (packed->nr_entries << 16) |
- (PACKED_CMD_WR << 8) | PACKED_CMD_VER;
+ packed_cmd_hdr[0] = cpu_to_le32((packed->nr_entries << 16) |
+ (PACKED_CMD_WR << 8) | PACKED_CMD_VER);
hdr_blocks = mmc_large_sector(card) ? 8 : 1;
/*
@@ -2869,14 +2869,14 @@ static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq,
((brq->data.blocks * brq->data.blksz) >=
card->ext_csd.data_tag_unit_size);
/* Argument of CMD23 */
- packed_cmd_hdr[(i * 2)] =
+ packed_cmd_hdr[(i * 2)] = cpu_to_le32(
(do_rel_wr ? MMC_CMD23_ARG_REL_WR : 0) |
(do_data_tag ? MMC_CMD23_ARG_TAG_REQ : 0) |
- blk_rq_sectors(prq);
+ blk_rq_sectors(prq));
/* Argument of CMD18 or CMD25 */
- packed_cmd_hdr[((i * 2)) + 1] =
+ packed_cmd_hdr[((i * 2)) + 1] = cpu_to_le32(
mmc_card_blockaddr(card) ?
- blk_rq_pos(prq) : blk_rq_pos(prq) << 9;
+ blk_rq_pos(prq) : blk_rq_pos(prq) << 9);
packed->blocks += blk_rq_sectors(prq);
i++;
}
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 6ad91042409e..a444a3a80f52 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -948,6 +948,17 @@ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
pr_debug("%s: %d bytes transferred: %d\n",
mmc_hostname(host),
mrq->data->bytes_xfered, mrq->data->error);
+ if (mrq->lat_hist_enabled) {
+ ktime_t completion;
+ u_int64_t delta_us;
+
+ completion = ktime_get();
+ delta_us = ktime_us_delta(completion,
+ mrq->io_start);
+ blk_update_latency_hist(&host->io_lat_s,
+ (mrq->data->flags & MMC_DATA_READ),
+ delta_us);
+ }
trace_mmc_blk_rw_end(cmd->opcode, cmd->arg, mrq->data);
}
@@ -1698,6 +1709,11 @@ struct mmc_async_req *mmc_start_req(struct mmc_host *host,
}
if (!err && areq) {
+ if (host->latency_hist_enabled) {
+ areq->mrq->io_start = ktime_get();
+ areq->mrq->lat_hist_enabled = 1;
+ } else
+ areq->mrq->lat_hist_enabled = 0;
trace_mmc_blk_rw_start(areq->mrq->cmd->opcode,
areq->mrq->cmd->arg,
areq->mrq->data);
@@ -3233,7 +3249,7 @@ void mmc_init_erase(struct mmc_card *card)
}
static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card,
- unsigned int arg, unsigned int qty)
+ unsigned int arg, unsigned int qty)
{
unsigned int erase_timeout;
@@ -4394,6 +4410,54 @@ static void __exit mmc_exit(void)
destroy_workqueue(workqueue);
}
+static ssize_t
+latency_hist_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct mmc_host *host = cls_dev_to_mmc_host(dev);
+
+ return blk_latency_hist_show(&host->io_lat_s, buf);
+}
+
+/*
+ * Values permitted 0, 1, 2.
+ * 0 -> Disable IO latency histograms (default)
+ * 1 -> Enable IO latency histograms
+ * 2 -> Zero out IO latency histograms
+ */
+static ssize_t
+latency_hist_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct mmc_host *host = cls_dev_to_mmc_host(dev);
+ long value;
+
+ if (kstrtol(buf, 0, &value))
+ return -EINVAL;
+ if (value == BLK_IO_LAT_HIST_ZERO)
+ blk_zero_latency_hist(&host->io_lat_s);
+ else if (value == BLK_IO_LAT_HIST_ENABLE ||
+ value == BLK_IO_LAT_HIST_DISABLE)
+ host->latency_hist_enabled = value;
+ return count;
+}
+
+static DEVICE_ATTR(latency_hist, S_IRUGO | S_IWUSR,
+ latency_hist_show, latency_hist_store);
+
+void
+mmc_latency_hist_sysfs_init(struct mmc_host *host)
+{
+ if (device_create_file(&host->class_dev, &dev_attr_latency_hist))
+ dev_err(&host->class_dev,
+ "Failed to create latency_hist sysfs entry\n");
+}
+
+void
+mmc_latency_hist_sysfs_exit(struct mmc_host *host)
+{
+ device_remove_file(&host->class_dev, &dev_attr_latency_hist);
+}
+
subsys_initcall(mmc_init);
module_exit(mmc_exit);
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 1c4a15dc5fbb..f6a54a8e1076 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -880,6 +880,8 @@ int mmc_add_host(struct mmc_host *host)
pr_err("%s: failed to create sysfs group with err %d\n",
__func__, err);
+ mmc_latency_hist_sysfs_init(host);
+
mmc_start_host(host);
if (!(host->pm_flags & MMC_PM_IGNORE_PM_NOTIFY))
register_pm_notifier(&host->pm_notify);
@@ -910,6 +912,8 @@ void mmc_remove_host(struct mmc_host *host)
sysfs_remove_group(&host->parent->kobj, &dev_attr_grp);
sysfs_remove_group(&host->class_dev.kobj, &clk_scaling_attr_grp);
+ mmc_latency_hist_sysfs_exit(host);
+
device_del(&host->class_dev);
led_trigger_unregister_simple(host->led);
diff --git a/drivers/mmc/core/host.h b/drivers/mmc/core/host.h
index 992bf5397633..bf38533406fd 100644
--- a/drivers/mmc/core/host.h
+++ b/drivers/mmc/core/host.h
@@ -12,6 +12,8 @@
#define _MMC_CORE_HOST_H
#include <linux/mmc/host.h>
+#define cls_dev_to_mmc_host(d) container_of(d, struct mmc_host, class_dev)
+
int mmc_register_host_class(void);
void mmc_unregister_host_class(void);
@@ -21,5 +23,8 @@ void mmc_retune_hold(struct mmc_host *host);
void mmc_retune_release(struct mmc_host *host);
int mmc_retune(struct mmc_host *host);
+void mmc_latency_hist_sysfs_init(struct mmc_host *host);
+void mmc_latency_hist_sysfs_exit(struct mmc_host *host);
+
#endif
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index f41832252761..01959bd2d523 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -97,6 +97,7 @@ config MMC_RICOH_MMC
config MMC_SDHCI_ACPI
tristate "SDHCI support for ACPI enumerated SDHCI controllers"
depends on MMC_SDHCI && ACPI
+ select IOSF_MBI if X86
help
This selects support for ACPI enumerated SDHCI controllers,
identified by ACPI Compatibility ID PNP0D40 or specific
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index 8aea3fa6938b..5a05bf400ca8 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -41,6 +41,11 @@
#include <linux/mmc/pm.h>
#include <linux/mmc/slot-gpio.h>
+#ifdef CONFIG_X86
+#include <asm/cpu_device_id.h>
+#include <asm/iosf_mbi.h>
+#endif
+
#include "sdhci.h"
enum {
@@ -146,6 +151,75 @@ static const struct sdhci_acpi_chip sdhci_acpi_chip_int = {
.ops = &sdhci_acpi_ops_int,
};
+#ifdef CONFIG_X86
+
+static bool sdhci_acpi_byt(void)
+{
+ static const struct x86_cpu_id byt[] = {
+ { X86_VENDOR_INTEL, 6, 0x37 },
+ {}
+ };
+
+ return x86_match_cpu(byt);
+}
+
+#define BYT_IOSF_SCCEP 0x63
+#define BYT_IOSF_OCP_NETCTRL0 0x1078
+#define BYT_IOSF_OCP_TIMEOUT_BASE GENMASK(10, 8)
+
+static void sdhci_acpi_byt_setting(struct device *dev)
+{
+ u32 val = 0;
+
+ if (!sdhci_acpi_byt())
+ return;
+
+ if (iosf_mbi_read(BYT_IOSF_SCCEP, 0x06, BYT_IOSF_OCP_NETCTRL0,
+ &val)) {
+ dev_err(dev, "%s read error\n", __func__);
+ return;
+ }
+
+ if (!(val & BYT_IOSF_OCP_TIMEOUT_BASE))
+ return;
+
+ val &= ~BYT_IOSF_OCP_TIMEOUT_BASE;
+
+ if (iosf_mbi_write(BYT_IOSF_SCCEP, 0x07, BYT_IOSF_OCP_NETCTRL0,
+ val)) {
+ dev_err(dev, "%s write error\n", __func__);
+ return;
+ }
+
+ dev_dbg(dev, "%s completed\n", __func__);
+}
+
+static bool sdhci_acpi_byt_defer(struct device *dev)
+{
+ if (!sdhci_acpi_byt())
+ return false;
+
+ if (!iosf_mbi_available())
+ return true;
+
+ sdhci_acpi_byt_setting(dev);
+
+ return false;
+}
+
+#else
+
+static inline void sdhci_acpi_byt_setting(struct device *dev)
+{
+}
+
+static inline bool sdhci_acpi_byt_defer(struct device *dev)
+{
+ return false;
+}
+
+#endif
+
static int bxt_get_cd(struct mmc_host *mmc)
{
int gpio_cd = mmc_gpio_get_cd(mmc);
@@ -337,6 +411,9 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
if (acpi_bus_get_status(device) || !device->status.present)
return -ENODEV;
+ if (sdhci_acpi_byt_defer(dev))
+ return -EPROBE_DEFER;
+
hid = acpi_device_hid(device);
uid = device->pnp.unique_id;
@@ -460,6 +537,8 @@ static int sdhci_acpi_resume(struct device *dev)
{
struct sdhci_acpi_host *c = dev_get_drvdata(dev);
+ sdhci_acpi_byt_setting(&c->pdev->dev);
+
return sdhci_resume_host(c->host);
}
@@ -483,6 +562,8 @@ static int sdhci_acpi_runtime_resume(struct device *dev)
{
struct sdhci_acpi_host *c = dev_get_drvdata(dev);
+ sdhci_acpi_byt_setting(&c->pdev->dev);
+
return sdhci_runtime_resume_host(c->host);
}
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 886229317fea..b5b2c8612663 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -576,7 +576,7 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
host->align_buffer, host->align_buffer_sz, direction);
if (dma_mapping_error(mmc_dev(host->mmc), host->align_addr))
goto fail;
- BUG_ON(host->align_addr & host->align_mask);
+ BUG_ON(host->align_addr & SDHCI_ADMA2_MASK);
host->sg_count = sdhci_pre_dma_transfer(host, data);
if (host->sg_count < 0)
@@ -598,8 +598,8 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
* the (up to three) bytes that screw up the
* alignment.
*/
- offset = (host->align_sz - (addr & host->align_mask)) &
- host->align_mask;
+ offset = (SDHCI_ADMA2_ALIGN - (addr & SDHCI_ADMA2_MASK)) &
+ SDHCI_ADMA2_MASK;
if (offset) {
if (data->flags & MMC_DATA_WRITE) {
buffer = sdhci_kmap_atomic(sg, &flags);
@@ -613,8 +613,8 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
BUG_ON(offset > 65536);
- align += host->align_sz;
- align_addr += host->align_sz;
+ align += SDHCI_ADMA2_ALIGN;
+ align_addr += SDHCI_ADMA2_ALIGN;
desc += host->desc_sz;
@@ -698,7 +698,7 @@ static void sdhci_adma_table_post(struct sdhci_host *host,
/* Do a quick scan of the SG list for any unaligned mappings */
has_unaligned = false;
for_each_sg(data->sg, sg, host->sg_count, i)
- if (sg_dma_address(sg) & host->align_mask) {
+ if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
has_unaligned = true;
break;
}
@@ -710,15 +710,15 @@ static void sdhci_adma_table_post(struct sdhci_host *host,
align = host->align_buffer;
for_each_sg(data->sg, sg, host->sg_count, i) {
- if (sg_dma_address(sg) & host->align_mask) {
- size = host->align_sz -
- (sg_dma_address(sg) & host->align_mask);
+ if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
+ size = SDHCI_ADMA2_ALIGN -
+ (sg_dma_address(sg) & SDHCI_ADMA2_MASK);
buffer = sdhci_kmap_atomic(sg, &flags);
memcpy(buffer, align, size);
sdhci_kunmap_atomic(buffer, &flags);
- align += host->align_sz;
+ align += SDHCI_ADMA2_ALIGN;
}
}
}
@@ -1445,7 +1445,9 @@ static void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
pwr = SDHCI_POWER_330;
break;
default:
- BUG();
+ WARN(1, "%s: Invalid vdd %#x\n",
+ mmc_hostname(host->mmc), vdd);
+ break;
}
}
@@ -3810,24 +3812,17 @@ int sdhci_add_host(struct sdhci_host *host)
if (host->flags & SDHCI_USE_64_BIT_DMA) {
host->adma_table_sz = (SDHCI_MAX_SEGS * 2 + 1) *
SDHCI_ADMA2_64_DESC_SZ;
- host->align_buffer_sz = SDHCI_MAX_SEGS *
- SDHCI_ADMA2_64_ALIGN;
host->desc_sz = SDHCI_ADMA2_64_DESC_SZ;
- host->align_sz = SDHCI_ADMA2_64_ALIGN;
- host->align_mask = SDHCI_ADMA2_64_ALIGN - 1;
} else {
host->adma_table_sz = (SDHCI_MAX_SEGS * 2 + 1) *
SDHCI_ADMA2_32_DESC_SZ;
- host->align_buffer_sz = SDHCI_MAX_SEGS *
- SDHCI_ADMA2_32_ALIGN;
host->desc_sz = SDHCI_ADMA2_32_DESC_SZ;
- host->align_sz = SDHCI_ADMA2_32_ALIGN;
- host->align_mask = SDHCI_ADMA2_32_ALIGN - 1;
}
host->adma_table = dma_alloc_coherent(mmc_dev(mmc),
host->adma_table_sz,
&host->adma_addr,
GFP_KERNEL);
+ host->align_buffer_sz = SDHCI_MAX_SEGS * SDHCI_ADMA2_ALIGN;
host->align_buffer = kmalloc(host->align_buffer_sz, GFP_KERNEL);
if (!host->adma_table || !host->align_buffer) {
if (host->adma_table)
@@ -3841,7 +3836,7 @@ int sdhci_add_host(struct sdhci_host *host)
host->flags &= ~SDHCI_USE_ADMA;
host->adma_table = NULL;
host->align_buffer = NULL;
- } else if (host->adma_addr & host->align_mask) {
+ } else if (host->adma_addr & (SDHCI_ADMA2_DESC_ALIGN - 1)) {
pr_warn("%s: unable to allocate aligned ADMA descriptor\n",
mmc_hostname(mmc));
host->flags &= ~SDHCI_USE_ADMA;
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 6ef2005c8d4c..e5419b42a444 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -284,22 +284,27 @@
/* ADMA2 32-bit DMA descriptor size */
#define SDHCI_ADMA2_32_DESC_SZ 8
-/* ADMA2 32-bit DMA alignment */
-#define SDHCI_ADMA2_32_ALIGN 4
-
/* ADMA2 32-bit descriptor */
struct sdhci_adma2_32_desc {
__le16 cmd;
__le16 len;
__le32 addr;
-} __packed __aligned(SDHCI_ADMA2_32_ALIGN);
+} __packed __aligned(4);
+
+/* ADMA2 data alignment */
+#define SDHCI_ADMA2_ALIGN 4
+#define SDHCI_ADMA2_MASK (SDHCI_ADMA2_ALIGN - 1)
+
+/*
+ * ADMA2 descriptor alignment. Some controllers (e.g. Intel) require 8 byte
+ * alignment for the descriptor table even in 32-bit DMA mode. Memory
+ * allocation is at least 8 byte aligned anyway, so just stipulate 8 always.
+ */
+#define SDHCI_ADMA2_DESC_ALIGN 8
/* ADMA2 64-bit DMA descriptor size */
#define SDHCI_ADMA2_64_DESC_SZ 12
-/* ADMA2 64-bit DMA alignment */
-#define SDHCI_ADMA2_64_ALIGN 8
-
/*
* ADMA2 64-bit descriptor. Note 12-byte descriptor can't always be 8-byte
* aligned.
@@ -581,8 +586,6 @@ struct sdhci_host {
dma_addr_t align_addr; /* Mapped bounce buffer */
unsigned int desc_sz; /* ADMA descriptor size */
- unsigned int align_sz; /* ADMA alignment */
- unsigned int align_mask; /* ADMA alignment mask */
struct tasklet_struct finish_tasklet; /* Tasklet structures */
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index ce7b2cab5762..54ab48827258 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -2586,7 +2586,7 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
int cached = writelen > bytes && page != blockmask;
uint8_t *wbuf = buf;
int use_bufpoi;
- int part_pagewr = (column || writelen < (mtd->writesize - 1));
+ int part_pagewr = (column || writelen < mtd->writesize);
if (part_pagewr)
use_bufpoi = 1;
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 19c54afdbc49..9b7bc6326fa2 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -931,7 +931,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
for (i = 0; i < UBI_MAX_DEVICES; i++) {
ubi = ubi_devices[i];
if (ubi && mtd->index == ubi->mtd->index) {
- ubi_err(ubi, "mtd%d is already attached to ubi%d",
+ pr_err("ubi: mtd%d is already attached to ubi%d",
mtd->index, i);
return -EEXIST;
}
@@ -946,7 +946,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
* no sense to attach emulated MTD devices, so we prohibit this.
*/
if (mtd->type == MTD_UBIVOLUME) {
- ubi_err(ubi, "refuse attaching mtd%d - it is already emulated on top of UBI",
+ pr_err("ubi: refuse attaching mtd%d - it is already emulated on top of UBI",
mtd->index);
return -EINVAL;
}
@@ -957,7 +957,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
if (!ubi_devices[ubi_num])
break;
if (ubi_num == UBI_MAX_DEVICES) {
- ubi_err(ubi, "only %d UBI devices may be created",
+ pr_err("ubi: only %d UBI devices may be created",
UBI_MAX_DEVICES);
return -ENFILE;
}
@@ -967,7 +967,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
/* Make sure ubi_num is not busy */
if (ubi_devices[ubi_num]) {
- ubi_err(ubi, "already exists");
+ pr_err("ubi: ubi%i already exists", ubi_num);
return -EEXIST;
}
}
@@ -1049,6 +1049,9 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
goto out_detach;
}
+ /* Make device "available" before it becomes accessible via sysfs */
+ ubi_devices[ubi_num] = ubi;
+
err = uif_init(ubi, &ref);
if (err)
goto out_detach;
@@ -1093,7 +1096,6 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
wake_up_process(ubi->bgt_thread);
spin_unlock(&ubi->wl_lock);
- ubi_devices[ubi_num] = ubi;
ubi_notify_all(ubi, UBI_VOLUME_ADDED, NULL);
return ubi_num;
@@ -1104,6 +1106,7 @@ out_uif:
ubi_assert(ref);
uif_close(ubi);
out_detach:
+ ubi_devices[ubi_num] = NULL;
ubi_wl_close(ubi);
ubi_free_internal_volumes(ubi);
vfree(ubi->vtbl);
diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c
index 1ae17bb9b889..3ea4c022cbb9 100644
--- a/drivers/mtd/ubi/vmt.c
+++ b/drivers/mtd/ubi/vmt.c
@@ -488,13 +488,6 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
spin_unlock(&ubi->volumes_lock);
}
- /* Change volume table record */
- vtbl_rec = ubi->vtbl[vol_id];
- vtbl_rec.reserved_pebs = cpu_to_be32(reserved_pebs);
- err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
- if (err)
- goto out_acc;
-
if (pebs < 0) {
for (i = 0; i < -pebs; i++) {
err = ubi_eba_unmap_leb(ubi, vol, reserved_pebs + i);
@@ -512,6 +505,24 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
spin_unlock(&ubi->volumes_lock);
}
+ /*
+ * When we shrink a volume we have to flush all pending (erase) work.
+ * Otherwise it can happen that upon next attach UBI finds a LEB with
+ * lnum > highest_lnum and refuses to attach.
+ */
+ if (pebs < 0) {
+ err = ubi_wl_flush(ubi, vol_id, UBI_ALL);
+ if (err)
+ goto out_acc;
+ }
+
+ /* Change volume table record */
+ vtbl_rec = ubi->vtbl[vol_id];
+ vtbl_rec.reserved_pebs = cpu_to_be32(reserved_pebs);
+ err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
+ if (err)
+ goto out_acc;
+
vol->reserved_pebs = reserved_pebs;
if (vol->vol_type == UBI_DYNAMIC_VOLUME) {
vol->used_ebs = reserved_pebs;
diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
index db760e84119f..b8df0f5e8c25 100644
--- a/drivers/net/bonding/bond_netlink.c
+++ b/drivers/net/bonding/bond_netlink.c
@@ -446,7 +446,11 @@ static int bond_newlink(struct net *src_net, struct net_device *bond_dev,
if (err < 0)
return err;
- return register_netdevice(bond_dev);
+ err = register_netdevice(bond_dev);
+
+ netif_carrier_off(bond_dev);
+
+ return err;
}
static size_t bond_get_size(const struct net_device *bond_dev)
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index 8b3275d7792a..8f5e93cb7975 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -712,9 +712,10 @@ static int at91_poll_rx(struct net_device *dev, int quota)
/* upper group completed, look again in lower */
if (priv->rx_next > get_mb_rx_low_last(priv) &&
- quota > 0 && mb > get_mb_rx_last(priv)) {
+ mb > get_mb_rx_last(priv)) {
priv->rx_next = get_mb_rx_first(priv);
- goto again;
+ if (quota > 0)
+ goto again;
}
return received;
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
index f91b094288da..e3dccd3200d5 100644
--- a/drivers/net/can/c_can/c_can.c
+++ b/drivers/net/can/c_can/c_can.c
@@ -332,9 +332,23 @@ static void c_can_setup_tx_object(struct net_device *dev, int iface,
priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), ctrl);
- for (i = 0; i < frame->can_dlc; i += 2) {
- priv->write_reg(priv, C_CAN_IFACE(DATA1_REG, iface) + i / 2,
- frame->data[i] | (frame->data[i + 1] << 8));
+ if (priv->type == BOSCH_D_CAN) {
+ u32 data = 0, dreg = C_CAN_IFACE(DATA1_REG, iface);
+
+ for (i = 0; i < frame->can_dlc; i += 4, dreg += 2) {
+ data = (u32)frame->data[i];
+ data |= (u32)frame->data[i + 1] << 8;
+ data |= (u32)frame->data[i + 2] << 16;
+ data |= (u32)frame->data[i + 3] << 24;
+ priv->write_reg32(priv, dreg, data);
+ }
+ } else {
+ for (i = 0; i < frame->can_dlc; i += 2) {
+ priv->write_reg(priv,
+ C_CAN_IFACE(DATA1_REG, iface) + i / 2,
+ frame->data[i] |
+ (frame->data[i + 1] << 8));
+ }
}
}
@@ -402,10 +416,20 @@ static int c_can_read_msg_object(struct net_device *dev, int iface, u32 ctrl)
} else {
int i, dreg = C_CAN_IFACE(DATA1_REG, iface);
- for (i = 0; i < frame->can_dlc; i += 2, dreg ++) {
- data = priv->read_reg(priv, dreg);
- frame->data[i] = data;
- frame->data[i + 1] = data >> 8;
+ if (priv->type == BOSCH_D_CAN) {
+ for (i = 0; i < frame->can_dlc; i += 4, dreg += 2) {
+ data = priv->read_reg32(priv, dreg);
+ frame->data[i] = data;
+ frame->data[i + 1] = data >> 8;
+ frame->data[i + 2] = data >> 16;
+ frame->data[i + 3] = data >> 24;
+ }
+ } else {
+ for (i = 0; i < frame->can_dlc; i += 2, dreg++) {
+ data = priv->read_reg(priv, dreg);
+ frame->data[i] = data;
+ frame->data[i + 1] = data >> 8;
+ }
}
}
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index 910c12e2638e..ad535a854e5c 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -798,6 +798,9 @@ static int can_validate(struct nlattr *tb[], struct nlattr *data[])
* - control mode with CAN_CTRLMODE_FD set
*/
+ if (!data)
+ return 0;
+
if (data[IFLA_CAN_CTRLMODE]) {
struct can_ctrlmode *cm = nla_data(data[IFLA_CAN_CTRLMODE]);
@@ -1008,6 +1011,11 @@ static int can_newlink(struct net *src_net, struct net_device *dev,
return -EOPNOTSUPP;
}
+static void can_dellink(struct net_device *dev, struct list_head *head)
+{
+ return;
+}
+
static struct rtnl_link_ops can_link_ops __read_mostly = {
.kind = "can",
.maxtype = IFLA_CAN_MAX,
@@ -1016,6 +1024,7 @@ static struct rtnl_link_ops can_link_ops __read_mostly = {
.validate = can_validate,
.newlink = can_newlink,
.changelink = can_changelink,
+ .dellink = can_dellink,
.get_size = can_get_size,
.fill_info = can_fill_info,
.get_xstats_size = can_get_xstats_size,
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 28f7610b03fe..c32f5d32f811 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -219,7 +219,7 @@ err_dma:
dma_unmap_single(dma_dev, slot->dma_addr, skb_headlen(skb),
DMA_TO_DEVICE);
- while (i > 0) {
+ while (i-- > 0) {
int index = (ring->end + i) % BGMAC_TX_RING_SLOTS;
struct bgmac_slot_info *slot = &ring->slots[index];
u32 ctl1 = le32_to_cpu(ring->cpu_base[index].ctl1);
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index b89504405b72..7445da218bd9 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -2526,7 +2526,7 @@ static void handle_timestamp(struct octeon_device *oct,
octeon_swap_8B_data(&resp->timestamp, 1);
- if (unlikely((skb_shinfo(skb)->tx_flags | SKBTX_IN_PROGRESS) != 0)) {
+ if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) != 0)) {
struct skb_shared_hwtstamps ts;
u64 ns = resp->timestamp;
diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h
index 39ca6744a4e6..22471d283a95 100644
--- a/drivers/net/ethernet/cavium/thunder/nic.h
+++ b/drivers/net/ethernet/cavium/thunder/nic.h
@@ -116,6 +116,15 @@
#define NIC_PF_INTR_ID_MBOX0 8
#define NIC_PF_INTR_ID_MBOX1 9
+/* Minimum FIFO level before all packets for the CQ are dropped
+ *
+ * This value ensures that once a packet has been "accepted"
+ * for reception it will not get dropped due to non-availability
+ * of CQ descriptor. An errata in HW mandates this value to be
+ * atleast 0x100.
+ */
+#define NICPF_CQM_MIN_DROP_LEVEL 0x100
+
/* Global timer for CQ timer thresh interrupts
* Calculated for SCLK of 700Mhz
* value written should be a 1/16th of what is expected
diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c
index 5f24d11cb16a..16baaafed26c 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nic_main.c
@@ -309,6 +309,7 @@ static void nic_set_lmac_vf_mapping(struct nicpf *nic)
static void nic_init_hw(struct nicpf *nic)
{
int i;
+ u64 cqm_cfg;
/* Enable NIC HW block */
nic_reg_write(nic, NIC_PF_CFG, 0x3);
@@ -345,6 +346,11 @@ static void nic_init_hw(struct nicpf *nic)
/* Enable VLAN ethertype matching and stripping */
nic_reg_write(nic, NIC_PF_RX_ETYPE_0_7,
(2 << 19) | (ETYPE_ALG_VLAN_STRIP << 16) | ETH_P_8021Q);
+
+ /* Check if HW expected value is higher (could be in future chips) */
+ cqm_cfg = nic_reg_read(nic, NIC_PF_CQM_CFG);
+ if (cqm_cfg < NICPF_CQM_MIN_DROP_LEVEL)
+ nic_reg_write(nic, NIC_PF_CQM_CFG, NICPF_CQM_MIN_DROP_LEVEL);
}
/* Channel parse index configuration */
diff --git a/drivers/net/ethernet/cavium/thunder/nic_reg.h b/drivers/net/ethernet/cavium/thunder/nic_reg.h
index dd536be20193..afb10e326b4f 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_reg.h
+++ b/drivers/net/ethernet/cavium/thunder/nic_reg.h
@@ -21,7 +21,7 @@
#define NIC_PF_TCP_TIMER (0x0060)
#define NIC_PF_BP_CFG (0x0080)
#define NIC_PF_RRM_CFG (0x0088)
-#define NIC_PF_CQM_CF (0x00A0)
+#define NIC_PF_CQM_CFG (0x00A0)
#define NIC_PF_CNM_CF (0x00A8)
#define NIC_PF_CNM_STATUS (0x00B0)
#define NIC_PF_CQ_AVG_CFG (0x00C0)
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index dde8dc720cd3..b7093b9cd1e8 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -566,8 +566,7 @@ static inline void nicvf_set_rxhash(struct net_device *netdev,
static void nicvf_rcv_pkt_handler(struct net_device *netdev,
struct napi_struct *napi,
- struct cmp_queue *cq,
- struct cqe_rx_t *cqe_rx, int cqe_type)
+ struct cqe_rx_t *cqe_rx)
{
struct sk_buff *skb;
struct nicvf *nic = netdev_priv(netdev);
@@ -583,7 +582,7 @@ static void nicvf_rcv_pkt_handler(struct net_device *netdev,
}
/* Check for errors */
- err = nicvf_check_cqe_rx_errs(nic, cq, cqe_rx);
+ err = nicvf_check_cqe_rx_errs(nic, cqe_rx);
if (err && !cqe_rx->rb_cnt)
return;
@@ -674,8 +673,7 @@ loop:
cq_idx, cq_desc->cqe_type);
switch (cq_desc->cqe_type) {
case CQE_TYPE_RX:
- nicvf_rcv_pkt_handler(netdev, napi, cq,
- cq_desc, CQE_TYPE_RX);
+ nicvf_rcv_pkt_handler(netdev, napi, cq_desc);
work_done++;
break;
case CQE_TYPE_SEND:
@@ -1117,7 +1115,6 @@ int nicvf_stop(struct net_device *netdev)
/* Clear multiqset info */
nic->pnicvf = nic;
- nic->sqs_count = 0;
return 0;
}
@@ -1346,6 +1343,9 @@ void nicvf_update_stats(struct nicvf *nic)
drv_stats->tx_frames_ok = stats->tx_ucast_frames_ok +
stats->tx_bcast_frames_ok +
stats->tx_mcast_frames_ok;
+ drv_stats->rx_frames_ok = stats->rx_ucast_frames +
+ stats->rx_bcast_frames +
+ stats->rx_mcast_frames;
drv_stats->rx_drops = stats->rx_drop_red +
stats->rx_drop_overrun;
drv_stats->tx_drops = stats->tx_drops;
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index d1c217eaf417..912ee28ab58b 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -1414,16 +1414,12 @@ void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx)
}
/* Check for errors in the receive cmp.queue entry */
-int nicvf_check_cqe_rx_errs(struct nicvf *nic,
- struct cmp_queue *cq, struct cqe_rx_t *cqe_rx)
+int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
{
struct nicvf_hw_stats *stats = &nic->hw_stats;
- struct nicvf_drv_stats *drv_stats = &nic->drv_stats;
- if (!cqe_rx->err_level && !cqe_rx->err_opcode) {
- drv_stats->rx_frames_ok++;
+ if (!cqe_rx->err_level && !cqe_rx->err_opcode)
return 0;
- }
if (netif_msg_rx_err(nic))
netdev_err(nic->netdev,
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
index 033e8306e91c..5652c612e20b 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
@@ -344,8 +344,7 @@ u64 nicvf_queue_reg_read(struct nicvf *nic,
/* Stats */
void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx);
void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx);
-int nicvf_check_cqe_rx_errs(struct nicvf *nic,
- struct cmp_queue *cq, struct cqe_rx_t *cqe_rx);
+int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx);
int nicvf_check_cqe_tx_errs(struct nicvf *nic,
struct cmp_queue *cq, struct cqe_send_t *cqe_tx);
#endif /* NICVF_QUEUES_H */
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index 9df26c2263bc..42718cc7d4e8 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -549,7 +549,9 @@ static int bgx_xaui_check_link(struct lmac *lmac)
}
/* Clear rcvflt bit (latching high) and read it back */
- bgx_reg_modify(bgx, lmacid, BGX_SPUX_STATUS2, SPU_STATUS2_RCVFLT);
+ if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT)
+ bgx_reg_modify(bgx, lmacid,
+ BGX_SPUX_STATUS2, SPU_STATUS2_RCVFLT);
if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) {
dev_err(&bgx->pdev->dev, "Receive fault, retry training\n");
if (bgx->use_training) {
@@ -568,13 +570,6 @@ static int bgx_xaui_check_link(struct lmac *lmac)
return -1;
}
- /* Wait for MAC RX to be ready */
- if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_RX_CTL,
- SMU_RX_CTL_STATUS, true)) {
- dev_err(&bgx->pdev->dev, "SMU RX link not okay\n");
- return -1;
- }
-
/* Wait for BGX RX to be idle */
if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL, SMU_CTL_RX_IDLE, false)) {
dev_err(&bgx->pdev->dev, "SMU RX not idle\n");
@@ -587,29 +582,30 @@ static int bgx_xaui_check_link(struct lmac *lmac)
return -1;
}
- if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) {
- dev_err(&bgx->pdev->dev, "Receive fault\n");
- return -1;
- }
-
- /* Receive link is latching low. Force it high and verify it */
- bgx_reg_modify(bgx, lmacid, BGX_SPUX_STATUS1, SPU_STATUS1_RCV_LNK);
- if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_STATUS1,
- SPU_STATUS1_RCV_LNK, false)) {
- dev_err(&bgx->pdev->dev, "SPU receive link down\n");
- return -1;
- }
-
+ /* Clear receive packet disable */
cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_MISC_CONTROL);
cfg &= ~SPU_MISC_CTL_RX_DIS;
bgx_reg_write(bgx, lmacid, BGX_SPUX_MISC_CONTROL, cfg);
- return 0;
+
+ /* Check for MAC RX faults */
+ cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_RX_CTL);
+ /* 0 - Link is okay, 1 - Local fault, 2 - Remote fault */
+ cfg &= SMU_RX_CTL_STATUS;
+ if (!cfg)
+ return 0;
+
+ /* Rx local/remote fault seen.
+ * Do lmac reinit to see if condition recovers
+ */
+ bgx_lmac_xaui_init(bgx, lmacid, bgx->lmac_type);
+
+ return -1;
}
static void bgx_poll_for_link(struct work_struct *work)
{
struct lmac *lmac;
- u64 link;
+ u64 spu_link, smu_link;
lmac = container_of(work, struct lmac, dwork.work);
@@ -619,8 +615,11 @@ static void bgx_poll_for_link(struct work_struct *work)
bgx_poll_reg(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1,
SPU_STATUS1_RCV_LNK, false);
- link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1);
- if (link & SPU_STATUS1_RCV_LNK) {
+ spu_link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1);
+ smu_link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SMUX_RX_CTL);
+
+ if ((spu_link & SPU_STATUS1_RCV_LNK) &&
+ !(smu_link & SMU_RX_CTL_STATUS)) {
lmac->link_up = 1;
if (lmac->bgx->lmac_type == BGX_MODE_XLAUI)
lmac->last_speed = 40000;
@@ -634,9 +633,15 @@ static void bgx_poll_for_link(struct work_struct *work)
}
if (lmac->last_link != lmac->link_up) {
+ if (lmac->link_up) {
+ if (bgx_xaui_check_link(lmac)) {
+ /* Errors, clear link_up state */
+ lmac->link_up = 0;
+ lmac->last_speed = SPEED_UNKNOWN;
+ lmac->last_duplex = DUPLEX_UNKNOWN;
+ }
+ }
lmac->last_link = lmac->link_up;
- if (lmac->link_up)
- bgx_xaui_check_link(lmac);
}
queue_delayed_work(lmac->check_link, &lmac->dwork, HZ * 2);
@@ -708,7 +713,7 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid)
static void bgx_lmac_disable(struct bgx *bgx, u8 lmacid)
{
struct lmac *lmac;
- u64 cmrx_cfg;
+ u64 cfg;
lmac = &bgx->lmac[lmacid];
if (lmac->check_link) {
@@ -717,9 +722,33 @@ static void bgx_lmac_disable(struct bgx *bgx, u8 lmacid)
destroy_workqueue(lmac->check_link);
}
- cmrx_cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
- cmrx_cfg &= ~(1 << 15);
- bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cmrx_cfg);
+ /* Disable packet reception */
+ cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
+ cfg &= ~CMR_PKT_RX_EN;
+ bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
+
+ /* Give chance for Rx/Tx FIFO to get drained */
+ bgx_poll_reg(bgx, lmacid, BGX_CMRX_RX_FIFO_LEN, (u64)0x1FFF, true);
+ bgx_poll_reg(bgx, lmacid, BGX_CMRX_TX_FIFO_LEN, (u64)0x3FFF, true);
+
+ /* Disable packet transmission */
+ cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
+ cfg &= ~CMR_PKT_TX_EN;
+ bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
+
+ /* Disable serdes lanes */
+ if (!lmac->is_sgmii)
+ bgx_reg_modify(bgx, lmacid,
+ BGX_SPUX_CONTROL1, SPU_CTL_LOW_POWER);
+ else
+ bgx_reg_modify(bgx, lmacid,
+ BGX_GMP_PCS_MRX_CTL, PCS_MRX_CTL_PWR_DN);
+
+ /* Disable LMAC */
+ cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
+ cfg &= ~CMR_EN;
+ bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
+
bgx_flush_dmac_addrs(bgx, lmacid);
if ((bgx->lmac_type != BGX_MODE_XFI) &&
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
index 149e179363a1..42010d2e5ddf 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
@@ -41,6 +41,7 @@
#define BGX_CMRX_RX_STAT10 0xC0
#define BGX_CMRX_RX_BP_DROP 0xC8
#define BGX_CMRX_RX_DMAC_CTL 0x0E8
+#define BGX_CMRX_RX_FIFO_LEN 0x108
#define BGX_CMR_RX_DMACX_CAM 0x200
#define RX_DMACX_CAM_EN BIT_ULL(48)
#define RX_DMACX_CAM_LMACID(x) (x << 49)
@@ -50,6 +51,7 @@
#define BGX_CMR_CHAN_MSK_AND 0x450
#define BGX_CMR_BIST_STATUS 0x460
#define BGX_CMR_RX_LMACS 0x468
+#define BGX_CMRX_TX_FIFO_LEN 0x518
#define BGX_CMRX_TX_STAT0 0x600
#define BGX_CMRX_TX_STAT1 0x608
#define BGX_CMRX_TX_STAT2 0x610
diff --git a/drivers/net/ethernet/intel/e1000/e1000.h b/drivers/net/ethernet/intel/e1000/e1000.h
index 69707108d23c..98fe5a2cd6e3 100644
--- a/drivers/net/ethernet/intel/e1000/e1000.h
+++ b/drivers/net/ethernet/intel/e1000/e1000.h
@@ -213,8 +213,11 @@ struct e1000_rx_ring {
};
#define E1000_DESC_UNUSED(R) \
- ((((R)->next_to_clean > (R)->next_to_use) \
- ? 0 : (R)->count) + (R)->next_to_clean - (R)->next_to_use - 1)
+({ \
+ unsigned int clean = smp_load_acquire(&(R)->next_to_clean); \
+ unsigned int use = READ_ONCE((R)->next_to_use); \
+ (clean > use ? 0 : (R)->count) + clean - use - 1; \
+})
#define E1000_RX_DESC_EXT(R, i) \
(&(((union e1000_rx_desc_extended *)((R).desc))[i]))
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index fd7be860c201..068023595d84 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -3876,7 +3876,10 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
eop_desc = E1000_TX_DESC(*tx_ring, eop);
}
- tx_ring->next_to_clean = i;
+ /* Synchronize with E1000_DESC_UNUSED called from e1000_xmit_frame,
+ * which will reuse the cleaned buffers.
+ */
+ smp_store_release(&tx_ring->next_to_clean, i);
netdev_completed_queue(netdev, pkts_compl, bytes_compl);
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 0a854a47d31a..80ec587d510e 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -1959,8 +1959,10 @@ static irqreturn_t e1000_intr_msix_rx(int __always_unused irq, void *data)
* previous interrupt.
*/
if (rx_ring->set_itr) {
- writel(1000000000 / (rx_ring->itr_val * 256),
- rx_ring->itr_register);
+ u32 itr = rx_ring->itr_val ?
+ 1000000000 / (rx_ring->itr_val * 256) : 0;
+
+ writel(itr, rx_ring->itr_register);
rx_ring->set_itr = 0;
}
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h
index 14440200499b..48809e5d3f79 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k.h
@@ -33,7 +33,7 @@
#include "fm10k_pf.h"
#include "fm10k_vf.h"
-#define FM10K_MAX_JUMBO_FRAME_SIZE 15358 /* Maximum supported size 15K */
+#define FM10K_MAX_JUMBO_FRAME_SIZE 15342 /* Maximum supported size 15K */
#define MAX_QUEUES FM10K_MAX_QUEUES_PF
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index e76a44cf330c..09281558bfbc 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -1428,6 +1428,10 @@ static int fm10k_poll(struct napi_struct *napi, int budget)
fm10k_for_each_ring(ring, q_vector->tx)
clean_complete &= fm10k_clean_tx_irq(q_vector, ring);
+ /* Handle case where we are called by netpoll with a budget of 0 */
+ if (budget <= 0)
+ return budget;
+
/* attempt to distribute budget to each queue fairly, but don't
* allow the budget to go below 1 because we'll exit polling
*/
@@ -1966,8 +1970,10 @@ int fm10k_init_queueing_scheme(struct fm10k_intfc *interface)
/* Allocate memory for queues */
err = fm10k_alloc_q_vectors(interface);
- if (err)
+ if (err) {
+ fm10k_reset_msix_capability(interface);
return err;
+ }
/* Map rings to devices, and map devices to physical queues */
fm10k_assign_rings(interface);
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
index 74be792f3f1b..7f3fb51bc37b 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
@@ -159,13 +159,30 @@ static void fm10k_reinit(struct fm10k_intfc *interface)
fm10k_mbx_free_irq(interface);
+ /* free interrupts */
+ fm10k_clear_queueing_scheme(interface);
+
/* delay any future reset requests */
interface->last_reset = jiffies + (10 * HZ);
/* reset and initialize the hardware so it is in a known state */
- err = hw->mac.ops.reset_hw(hw) ? : hw->mac.ops.init_hw(hw);
- if (err)
+ err = hw->mac.ops.reset_hw(hw);
+ if (err) {
+ dev_err(&interface->pdev->dev, "reset_hw failed: %d\n", err);
+ goto reinit_err;
+ }
+
+ err = hw->mac.ops.init_hw(hw);
+ if (err) {
dev_err(&interface->pdev->dev, "init_hw failed: %d\n", err);
+ goto reinit_err;
+ }
+
+ err = fm10k_init_queueing_scheme(interface);
+ if (err) {
+ dev_err(&interface->pdev->dev, "init_queueing_scheme failed: %d\n", err);
+ goto reinit_err;
+ }
/* reassociate interrupts */
fm10k_mbx_request_irq(interface);
@@ -193,6 +210,10 @@ static void fm10k_reinit(struct fm10k_intfc *interface)
fm10k_iov_resume(interface->pdev);
+reinit_err:
+ if (err)
+ netif_device_detach(netdev);
+
rtnl_unlock();
clear_bit(__FM10K_RESETTING, &interface->state);
@@ -1101,6 +1122,10 @@ void fm10k_mbx_free_irq(struct fm10k_intfc *interface)
struct fm10k_hw *hw = &interface->hw;
int itr_reg;
+ /* no mailbox IRQ to free if MSI-X is not enabled */
+ if (!interface->msix_entries)
+ return;
+
/* disconnect the mailbox */
hw->mbx.ops.disconnect(hw, &hw->mbx);
@@ -1423,10 +1448,15 @@ int fm10k_mbx_request_irq(struct fm10k_intfc *interface)
err = fm10k_mbx_request_irq_pf(interface);
else
err = fm10k_mbx_request_irq_vf(interface);
+ if (err)
+ return err;
/* connect mailbox */
- if (!err)
- err = hw->mbx.ops.connect(hw, &hw->mbx);
+ err = hw->mbx.ops.connect(hw, &hw->mbx);
+
+ /* if the mailbox failed to connect, then free IRQ */
+ if (err)
+ fm10k_mbx_free_irq(interface);
return err;
}
@@ -1684,7 +1714,13 @@ static int fm10k_sw_init(struct fm10k_intfc *interface,
interface->last_reset = jiffies + (10 * HZ);
/* reset and initialize the hardware so it is in a known state */
- err = hw->mac.ops.reset_hw(hw) ? : hw->mac.ops.init_hw(hw);
+ err = hw->mac.ops.reset_hw(hw);
+ if (err) {
+ dev_err(&pdev->dev, "reset_hw failed: %d\n", err);
+ return err;
+ }
+
+ err = hw->mac.ops.init_hw(hw);
if (err) {
dev_err(&pdev->dev, "init_hw failed: %d\n", err);
return err;
@@ -2071,8 +2107,10 @@ static int fm10k_resume(struct pci_dev *pdev)
/* reset hardware to known state */
err = hw->mac.ops.init_hw(&interface->hw);
- if (err)
+ if (err) {
+ dev_err(&pdev->dev, "init_hw failed: %d\n", err);
return err;
+ }
/* reset statistics starting values */
hw->mac.ops.rebind_hw_stats(hw, &interface->stats);
@@ -2185,6 +2223,9 @@ static pci_ers_result_t fm10k_io_error_detected(struct pci_dev *pdev,
if (netif_running(netdev))
fm10k_close(netdev);
+ /* free interrupts */
+ fm10k_clear_queueing_scheme(interface);
+
fm10k_mbx_free_irq(interface);
pci_disable_device(pdev);
@@ -2248,11 +2289,21 @@ static void fm10k_io_resume(struct pci_dev *pdev)
int err = 0;
/* reset hardware to known state */
- hw->mac.ops.init_hw(&interface->hw);
+ err = hw->mac.ops.init_hw(&interface->hw);
+ if (err) {
+ dev_err(&pdev->dev, "init_hw failed: %d\n", err);
+ return;
+ }
/* reset statistics starting values */
hw->mac.ops.rebind_hw_stats(hw, &interface->stats);
+ err = fm10k_init_queueing_scheme(interface);
+ if (err) {
+ dev_err(&interface->pdev->dev, "init_queueing_scheme failed: %d\n", err);
+ return;
+ }
+
/* reassociate interrupts */
fm10k_mbx_request_irq(interface);
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_type.h b/drivers/net/ethernet/intel/fm10k/fm10k_type.h
index 318a212f0a78..35afd711d144 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_type.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_type.h
@@ -77,6 +77,7 @@ struct fm10k_hw;
#define FM10K_PCIE_SRIOV_CTRL_VFARI 0x10
#define FM10K_ERR_PARAM -2
+#define FM10K_ERR_NO_RESOURCES -3
#define FM10K_ERR_REQUESTS_PENDING -4
#define FM10K_ERR_RESET_REQUESTED -5
#define FM10K_ERR_DMA_PENDING -6
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c
index 36c8b0aa08fd..d512575c33f3 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c
@@ -103,7 +103,14 @@ static s32 fm10k_init_hw_vf(struct fm10k_hw *hw)
s32 err;
u16 i;
- /* assume we always have at least 1 queue */
+ /* verify we have at least 1 queue */
+ if (!~fm10k_read_reg(hw, FM10K_TXQCTL(0)) ||
+ !~fm10k_read_reg(hw, FM10K_RXQCTL(0))) {
+ err = FM10K_ERR_NO_RESOURCES;
+ goto reset_max_queues;
+ }
+
+ /* determine how many queues we have */
for (i = 1; tqdloc0 && (i < FM10K_MAX_QUEUES_POOL); i++) {
/* verify the Descriptor cache offsets are increasing */
tqdloc = ~fm10k_read_reg(hw, FM10K_TQDLOC(i));
@@ -119,7 +126,7 @@ static s32 fm10k_init_hw_vf(struct fm10k_hw *hw)
/* shut down queues we own and reset DMA configuration */
err = fm10k_disable_queues_generic(hw, i);
if (err)
- return err;
+ goto reset_max_queues;
/* record maximum queue count */
hw->mac.max_queues = i;
@@ -129,6 +136,11 @@ static s32 fm10k_init_hw_vf(struct fm10k_hw *hw)
FM10K_TXQCTL_VID_MASK) >> FM10K_TXQCTL_VID_SHIFT;
return 0;
+
+reset_max_queues:
+ hw->mac.max_queues = 0;
+
+ return err;
}
/* This structure defines the attibutes to be parsed below */
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 4dd3e26129b4..7e258a83ccab 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -767,6 +767,8 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid);
int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid);
struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr,
bool is_vf, bool is_netdev);
+int i40e_del_mac_all_vlan(struct i40e_vsi *vsi, u8 *macaddr,
+ bool is_vf, bool is_netdev);
bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi);
struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr,
bool is_vf, bool is_netdev);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 3f385ffe420f..488a50d59dca 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -2164,8 +2164,7 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
case TCP_V4_FLOW:
switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
case 0:
- hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
- break;
+ return -EINVAL;
case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
break;
@@ -2176,8 +2175,7 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
case TCP_V6_FLOW:
switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
case 0:
- hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
- break;
+ return -EINVAL;
case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
break;
@@ -2188,9 +2186,7 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
case UDP_V4_FLOW:
switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
case 0:
- hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
- BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
- break;
+ return -EINVAL;
case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
@@ -2202,9 +2198,7 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
case UDP_V6_FLOW:
switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
case 0:
- hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
- BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
- break;
+ return -EINVAL;
case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 4a9873ec28c7..2215bebe208e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -1317,6 +1317,42 @@ struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr,
}
/**
+ * i40e_del_mac_all_vlan - Remove a MAC filter from all VLANS
+ * @vsi: the VSI to be searched
+ * @macaddr: the mac address to be removed
+ * @is_vf: true if it is a VF
+ * @is_netdev: true if it is a netdev
+ *
+ * Removes a given MAC address from a VSI, regardless of VLAN
+ *
+ * Returns 0 for success, or error
+ **/
+int i40e_del_mac_all_vlan(struct i40e_vsi *vsi, u8 *macaddr,
+ bool is_vf, bool is_netdev)
+{
+ struct i40e_mac_filter *f = NULL;
+ int changed = 0;
+
+ WARN(!spin_is_locked(&vsi->mac_filter_list_lock),
+ "Missing mac_filter_list_lock\n");
+ list_for_each_entry(f, &vsi->mac_filter_list, list) {
+ if ((ether_addr_equal(macaddr, f->macaddr)) &&
+ (is_vf == f->is_vf) &&
+ (is_netdev == f->is_netdev)) {
+ f->counter--;
+ f->changed = true;
+ changed = 1;
+ }
+ }
+ if (changed) {
+ vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
+ vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
+ return 0;
+ }
+ return -ENOENT;
+}
+
+/**
* i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM
* @vsi: the PF Main VSI - inappropriate for any other VSI
* @macaddr: the MAC address
@@ -1547,9 +1583,11 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
spin_unlock_bh(&vsi->mac_filter_list_lock);
}
- i40e_sync_vsi_filters(vsi, false);
ether_addr_copy(netdev->dev_addr, addr->sa_data);
-
+ /* schedule our worker thread which will take care of
+ * applying the new filter changes
+ */
+ i40e_service_event_schedule(vsi->back);
return 0;
}
@@ -1935,11 +1973,13 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl)
/* Now process 'del_list' outside the lock */
if (!list_empty(&tmp_del_list)) {
+ int del_list_size;
+
filter_list_len = pf->hw.aq.asq_buf_size /
sizeof(struct i40e_aqc_remove_macvlan_element_data);
- del_list = kcalloc(filter_list_len,
- sizeof(struct i40e_aqc_remove_macvlan_element_data),
- GFP_KERNEL);
+ del_list_size = filter_list_len *
+ sizeof(struct i40e_aqc_remove_macvlan_element_data);
+ del_list = kzalloc(del_list_size, GFP_KERNEL);
if (!del_list) {
i40e_cleanup_add_list(&tmp_add_list);
@@ -1971,7 +2011,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl)
NULL);
aq_err = pf->hw.aq.asq_last_status;
num_del = 0;
- memset(del_list, 0, sizeof(*del_list));
+ memset(del_list, 0, del_list_size);
if (ret && aq_err != I40E_AQ_RC_ENOENT)
dev_err(&pf->pdev->dev,
@@ -2004,13 +2044,14 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl)
}
if (!list_empty(&tmp_add_list)) {
+ int add_list_size;
/* do all the adds now */
filter_list_len = pf->hw.aq.asq_buf_size /
sizeof(struct i40e_aqc_add_macvlan_element_data),
- add_list = kcalloc(filter_list_len,
- sizeof(struct i40e_aqc_add_macvlan_element_data),
- GFP_KERNEL);
+ add_list_size = filter_list_len *
+ sizeof(struct i40e_aqc_add_macvlan_element_data);
+ add_list = kzalloc(add_list_size, GFP_KERNEL);
if (!add_list) {
/* Purge element from temporary lists */
i40e_cleanup_add_list(&tmp_add_list);
@@ -2048,7 +2089,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl)
if (ret)
break;
- memset(add_list, 0, sizeof(*add_list));
+ memset(add_list, 0, add_list_size);
}
/* Entries from tmp_add_list were cloned from MAC
* filter list, hence clean those cloned entries
@@ -2112,12 +2153,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl)
*/
if (pf->cur_promisc != cur_promisc) {
pf->cur_promisc = cur_promisc;
- if (grab_rtnl)
- i40e_do_reset_safe(pf,
- BIT(__I40E_PF_RESET_REQUESTED));
- else
- i40e_do_reset(pf,
- BIT(__I40E_PF_RESET_REQUESTED));
+ set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
}
} else {
ret = i40e_aq_set_vsi_unicast_promiscuous(
@@ -2377,16 +2413,13 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
}
}
- /* Make sure to release before sync_vsi_filter because that
- * function will lock/unlock as necessary
- */
spin_unlock_bh(&vsi->mac_filter_list_lock);
- if (test_bit(__I40E_DOWN, &vsi->back->state) ||
- test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
- return 0;
-
- return i40e_sync_vsi_filters(vsi, false);
+ /* schedule our worker thread which will take care of
+ * applying the new filter changes
+ */
+ i40e_service_event_schedule(vsi->back);
+ return 0;
}
/**
@@ -2459,16 +2492,13 @@ int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
}
}
- /* Make sure to release before sync_vsi_filter because that
- * function with lock/unlock as necessary
- */
spin_unlock_bh(&vsi->mac_filter_list_lock);
- if (test_bit(__I40E_DOWN, &vsi->back->state) ||
- test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
- return 0;
-
- return i40e_sync_vsi_filters(vsi, false);
+ /* schedule our worker thread which will take care of
+ * applying the new filter changes
+ */
+ i40e_service_event_schedule(vsi->back);
+ return 0;
}
/**
@@ -2711,6 +2741,11 @@ static void i40e_config_xps_tx_ring(struct i40e_ring *ring)
netif_set_xps_queue(ring->netdev, mask, ring->queue_index);
free_cpumask_var(mask);
}
+
+ /* schedule our worker thread which will take care of
+ * applying the new filter changes
+ */
+ i40e_service_event_schedule(vsi->back);
}
/**
@@ -6685,6 +6720,7 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
struct i40e_hw *hw = &pf->hw;
u8 set_fc_aq_fail = 0;
i40e_status ret;
+ u32 val;
u32 v;
/* Now we wait for GRST to settle out.
@@ -6823,6 +6859,20 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
}
}
+ /* Reconfigure hardware for allowing smaller MSS in the case
+ * of TSO, so that we avoid the MDD being fired and causing
+ * a reset in the case of small MSS+TSO.
+ */
+#define I40E_REG_MSS 0x000E64DC
+#define I40E_REG_MSS_MIN_MASK 0x3FF0000
+#define I40E_64BYTE_MSS 0x400000
+ val = rd32(hw, I40E_REG_MSS);
+ if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) {
+ val &= ~I40E_REG_MSS_MIN_MASK;
+ val |= I40E_64BYTE_MSS;
+ wr32(hw, I40E_REG_MSS, val);
+ }
+
if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
(pf->hw.aq.fw_maj_ver < 4)) {
msleep(75);
@@ -10183,6 +10233,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
u16 link_status;
int err;
u32 len;
+ u32 val;
u32 i;
u8 set_fc_aq_fail;
@@ -10493,6 +10544,17 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
i40e_stat_str(&pf->hw, err),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ /* Reconfigure hardware for allowing smaller MSS in the case
+ * of TSO, so that we avoid the MDD being fired and causing
+ * a reset in the case of small MSS+TSO.
+ */
+ val = rd32(hw, I40E_REG_MSS);
+ if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) {
+ val &= ~I40E_REG_MSS_MIN_MASK;
+ val |= I40E_64BYTE_MSS;
+ wr32(hw, I40E_REG_MSS, val);
+ }
+
if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
(pf->hw.aq.fw_maj_ver < 4)) {
msleep(75);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 635b3ac17877..26c55bba4bf3 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -235,6 +235,9 @@ static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
"Filter deleted for PCTYPE %d loc = %d\n",
fd_data->pctype, fd_data->fd_id);
}
+ if (err)
+ kfree(raw_packet);
+
return err ? -EOPNOTSUPP : 0;
}
@@ -312,6 +315,9 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
fd_data->pctype, fd_data->fd_id);
}
+ if (err)
+ kfree(raw_packet);
+
return err ? -EOPNOTSUPP : 0;
}
@@ -387,6 +393,9 @@ static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
}
}
+ if (err)
+ kfree(raw_packet);
+
return err ? -EOPNOTSUPP : 0;
}
@@ -526,11 +535,7 @@ static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
struct i40e_tx_buffer *tx_buffer)
{
if (tx_buffer->skb) {
- if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
- kfree(tx_buffer->raw_buf);
- else
- dev_kfree_skb_any(tx_buffer->skb);
-
+ dev_kfree_skb_any(tx_buffer->skb);
if (dma_unmap_len(tx_buffer, len))
dma_unmap_single(ring->dev,
dma_unmap_addr(tx_buffer, dma),
@@ -542,6 +547,10 @@ static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
dma_unmap_len(tx_buffer, len),
DMA_TO_DEVICE);
}
+
+ if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
+ kfree(tx_buffer->raw_buf);
+
tx_buffer->next_to_watch = NULL;
tx_buffer->skb = NULL;
dma_unmap_len_set(tx_buffer, len, 0);
@@ -1416,31 +1425,12 @@ checksum_fail:
}
/**
- * i40e_rx_hash - returns the hash value from the Rx descriptor
- * @ring: descriptor ring
- * @rx_desc: specific descriptor
- **/
-static inline u32 i40e_rx_hash(struct i40e_ring *ring,
- union i40e_rx_desc *rx_desc)
-{
- const __le64 rss_mask =
- cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
- I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
-
- if ((ring->netdev->features & NETIF_F_RXHASH) &&
- (rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask)
- return le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
- else
- return 0;
-}
-
-/**
- * i40e_ptype_to_hash - get a hash type
+ * i40e_ptype_to_htype - get a hash type
* @ptype: the ptype value from the descriptor
*
* Returns a hash type to be used by skb_set_hash
**/
-static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype)
+static inline enum pkt_hash_types i40e_ptype_to_htype(u8 ptype)
{
struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
@@ -1458,6 +1448,30 @@ static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype)
}
/**
+ * i40e_rx_hash - set the hash value in the skb
+ * @ring: descriptor ring
+ * @rx_desc: specific descriptor
+ **/
+static inline void i40e_rx_hash(struct i40e_ring *ring,
+ union i40e_rx_desc *rx_desc,
+ struct sk_buff *skb,
+ u8 rx_ptype)
+{
+ u32 hash;
+ const __le64 rss_mask =
+ cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
+ I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
+
+ if (ring->netdev->features & NETIF_F_RXHASH)
+ return;
+
+ if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {
+ hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
+ skb_set_hash(skb, hash, i40e_ptype_to_htype(rx_ptype));
+ }
+}
+
+/**
* i40e_clean_rx_irq_ps - Reclaim resources after receive; packet split
* @rx_ring: rx ring to clean
* @budget: how many cleans we're allowed
@@ -1606,8 +1620,8 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
continue;
}
- skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc),
- i40e_ptype_to_hash(rx_ptype));
+ i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
+
if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) {
i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status &
I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
@@ -1736,8 +1750,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
continue;
}
- skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc),
- i40e_ptype_to_hash(rx_ptype));
+ i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) {
i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status &
I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 44462b40f2d7..e116d9a99b8e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -549,12 +549,15 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
i40e_vsi_add_pvid(vsi, vf->port_vlan_id);
spin_lock_bh(&vsi->mac_filter_list_lock);
- f = i40e_add_filter(vsi, vf->default_lan_addr.addr,
- vf->port_vlan_id ? vf->port_vlan_id : -1,
- true, false);
- if (!f)
- dev_info(&pf->pdev->dev,
- "Could not allocate VF MAC addr\n");
+ if (is_valid_ether_addr(vf->default_lan_addr.addr)) {
+ f = i40e_add_filter(vsi, vf->default_lan_addr.addr,
+ vf->port_vlan_id ? vf->port_vlan_id : -1,
+ true, false);
+ if (!f)
+ dev_info(&pf->pdev->dev,
+ "Could not add MAC filter %pM for VF %d\n",
+ vf->default_lan_addr.addr, vf->vf_id);
+ }
f = i40e_add_filter(vsi, brdcast,
vf->port_vlan_id ? vf->port_vlan_id : -1,
true, false);
@@ -1680,8 +1683,12 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
spin_lock_bh(&vsi->mac_filter_list_lock);
/* delete addresses from the list */
for (i = 0; i < al->num_elements; i++)
- i40e_del_filter(vsi, al->list[i].addr,
- I40E_VLAN_ANY, true, false);
+ if (i40e_del_mac_all_vlan(vsi, al->list[i].addr, true, false)) {
+ ret = I40E_ERR_INVALID_MAC_ADDR;
+ spin_unlock_bh(&vsi->mac_filter_list_lock);
+ goto error_param;
+ }
+
spin_unlock_bh(&vsi->mac_filter_list_lock);
/* program the updated filter list */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index 47e9a90d6b10..39db70a597ed 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -51,11 +51,7 @@ static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
struct i40e_tx_buffer *tx_buffer)
{
if (tx_buffer->skb) {
- if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
- kfree(tx_buffer->raw_buf);
- else
- dev_kfree_skb_any(tx_buffer->skb);
-
+ dev_kfree_skb_any(tx_buffer->skb);
if (dma_unmap_len(tx_buffer, len))
dma_unmap_single(ring->dev,
dma_unmap_addr(tx_buffer, dma),
@@ -67,6 +63,10 @@ static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
dma_unmap_len(tx_buffer, len),
DMA_TO_DEVICE);
}
+
+ if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
+ kfree(tx_buffer->raw_buf);
+
tx_buffer->next_to_watch = NULL;
tx_buffer->skb = NULL;
dma_unmap_len_set(tx_buffer, len, 0);
@@ -245,16 +245,6 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
tx_ring->q_vector->tx.total_bytes += total_bytes;
tx_ring->q_vector->tx.total_packets += total_packets;
- /* check to see if there are any non-cache aligned descriptors
- * waiting to be written back, and kick the hardware to force
- * them to be written back in case of napi polling
- */
- if (budget &&
- !((i & WB_STRIDE) == WB_STRIDE) &&
- !test_bit(__I40E_DOWN, &tx_ring->vsi->state) &&
- (I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
- tx_ring->arm_wb = true;
-
netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev,
tx_ring->queue_index),
total_packets, total_bytes);
@@ -889,31 +879,12 @@ checksum_fail:
}
/**
- * i40e_rx_hash - returns the hash value from the Rx descriptor
- * @ring: descriptor ring
- * @rx_desc: specific descriptor
- **/
-static inline u32 i40e_rx_hash(struct i40e_ring *ring,
- union i40e_rx_desc *rx_desc)
-{
- const __le64 rss_mask =
- cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
- I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
-
- if ((ring->netdev->features & NETIF_F_RXHASH) &&
- (rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask)
- return le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
- else
- return 0;
-}
-
-/**
- * i40e_ptype_to_hash - get a hash type
+ * i40e_ptype_to_htype - get a hash type
* @ptype: the ptype value from the descriptor
*
* Returns a hash type to be used by skb_set_hash
**/
-static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype)
+static inline enum pkt_hash_types i40e_ptype_to_htype(u8 ptype)
{
struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
@@ -931,6 +902,30 @@ static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype)
}
/**
+ * i40e_rx_hash - set the hash value in the skb
+ * @ring: descriptor ring
+ * @rx_desc: specific descriptor
+ **/
+static inline void i40e_rx_hash(struct i40e_ring *ring,
+ union i40e_rx_desc *rx_desc,
+ struct sk_buff *skb,
+ u8 rx_ptype)
+{
+ u32 hash;
+ const __le64 rss_mask =
+ cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
+ I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
+
+ if (ring->netdev->features & NETIF_F_RXHASH)
+ return;
+
+ if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {
+ hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
+ skb_set_hash(skb, hash, i40e_ptype_to_htype(rx_ptype));
+ }
+}
+
+/**
* i40e_clean_rx_irq_ps - Reclaim resources after receive; packet split
* @rx_ring: rx ring to clean
* @budget: how many cleans we're allowed
@@ -1071,8 +1066,8 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
continue;
}
- skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc),
- i40e_ptype_to_hash(rx_ptype));
+ i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
+
/* probably a little skewed due to removing CRC */
total_rx_bytes += skb->len;
total_rx_packets++;
@@ -1189,8 +1184,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
continue;
}
- skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc),
- i40e_ptype_to_hash(rx_ptype));
+ i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
/* probably a little skewed due to removing CRC */
total_rx_bytes += skb->len;
total_rx_packets++;
@@ -1770,6 +1764,9 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
u32 td_tag = 0;
dma_addr_t dma;
u16 gso_segs;
+ u16 desc_count = 0;
+ bool tail_bump = true;
+ bool do_rs = false;
if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
@@ -1810,6 +1807,8 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_desc++;
i++;
+ desc_count++;
+
if (i == tx_ring->count) {
tx_desc = I40E_TX_DESC(tx_ring, 0);
i = 0;
@@ -1829,6 +1828,8 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_desc++;
i++;
+ desc_count++;
+
if (i == tx_ring->count) {
tx_desc = I40E_TX_DESC(tx_ring, 0);
i = 0;
@@ -1843,35 +1844,7 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_bi = &tx_ring->tx_bi[i];
}
- /* Place RS bit on last descriptor of any packet that spans across the
- * 4th descriptor (WB_STRIDE aka 0x3) in a 64B cacheline.
- */
#define WB_STRIDE 0x3
- if (((i & WB_STRIDE) != WB_STRIDE) &&
- (first <= &tx_ring->tx_bi[i]) &&
- (first >= &tx_ring->tx_bi[i & ~WB_STRIDE])) {
- tx_desc->cmd_type_offset_bsz =
- build_ctob(td_cmd, td_offset, size, td_tag) |
- cpu_to_le64((u64)I40E_TX_DESC_CMD_EOP <<
- I40E_TXD_QW1_CMD_SHIFT);
- } else {
- tx_desc->cmd_type_offset_bsz =
- build_ctob(td_cmd, td_offset, size, td_tag) |
- cpu_to_le64((u64)I40E_TXD_CMD <<
- I40E_TXD_QW1_CMD_SHIFT);
- }
-
- netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev,
- tx_ring->queue_index),
- first->bytecount);
-
- /* Force memory writes to complete before letting h/w
- * know there are new descriptors to fetch. (Only
- * applicable for weak-ordered memory model archs,
- * such as IA-64).
- */
- wmb();
-
/* set next_to_watch value indicating a packet is present */
first->next_to_watch = tx_desc;
@@ -1881,15 +1854,78 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_ring->next_to_use = i;
+ netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev,
+ tx_ring->queue_index),
+ first->bytecount);
i40evf_maybe_stop_tx(tx_ring, DESC_NEEDED);
+
+ /* Algorithm to optimize tail and RS bit setting:
+ * if xmit_more is supported
+ * if xmit_more is true
+ * do not update tail and do not mark RS bit.
+ * if xmit_more is false and last xmit_more was false
+ * if every packet spanned less than 4 desc
+ * then set RS bit on 4th packet and update tail
+ * on every packet
+ * else
+ * update tail and set RS bit on every packet.
+ * if xmit_more is false and last_xmit_more was true
+ * update tail and set RS bit.
+ * else (kernel < 3.18)
+ * if every packet spanned less than 4 desc
+ * then set RS bit on 4th packet and update tail
+ * on every packet
+ * else
+ * set RS bit on EOP for every packet and update tail
+ *
+ * Optimization: wmb to be issued only in case of tail update.
+ * Also optimize the Descriptor WB path for RS bit with the same
+ * algorithm.
+ *
+ * Note: If there are less than 4 packets
+ * pending and interrupts were disabled the service task will
+ * trigger a force WB.
+ */
+ if (skb->xmit_more &&
+ !netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
+ tx_ring->queue_index))) {
+ tx_ring->flags |= I40E_TXR_FLAGS_LAST_XMIT_MORE_SET;
+ tail_bump = false;
+ } else if (!skb->xmit_more &&
+ !netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
+ tx_ring->queue_index)) &&
+ (!(tx_ring->flags & I40E_TXR_FLAGS_LAST_XMIT_MORE_SET)) &&
+ (tx_ring->packet_stride < WB_STRIDE) &&
+ (desc_count < WB_STRIDE)) {
+ tx_ring->packet_stride++;
+ } else {
+ tx_ring->packet_stride = 0;
+ tx_ring->flags &= ~I40E_TXR_FLAGS_LAST_XMIT_MORE_SET;
+ do_rs = true;
+ }
+ if (do_rs)
+ tx_ring->packet_stride = 0;
+
+ tx_desc->cmd_type_offset_bsz =
+ build_ctob(td_cmd, td_offset, size, td_tag) |
+ cpu_to_le64((u64)(do_rs ? I40E_TXD_CMD :
+ I40E_TX_DESC_CMD_EOP) <<
+ I40E_TXD_QW1_CMD_SHIFT);
+
/* notify HW of packet */
- if (!skb->xmit_more ||
- netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
- tx_ring->queue_index)))
- writel(i, tx_ring->tail);
- else
+ if (!tail_bump)
prefetchw(tx_desc + 1);
+ if (tail_bump) {
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+ writel(i, tx_ring->tail);
+ }
+
return;
dma_error:
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
index ebc1bf77f036..998976844e4e 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
@@ -267,6 +267,8 @@ struct i40e_ring {
bool ring_active; /* is ring online or not */
bool arm_wb; /* do something to arm write back */
+ u8 packet_stride;
+#define I40E_TXR_FLAGS_LAST_XMIT_MORE_SET BIT(2)
u16 flags;
#define I40E_TXR_FLAGS_WB_ON_ITR BIT(0)
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
index 4790437a50ac..2ac62efc36f7 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
@@ -477,54 +477,30 @@ static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter,
switch (nfc->flow_type) {
case TCP_V4_FLOW:
- switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
- case 0:
- hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
- break;
- case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ if (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3))
hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
- break;
- default:
+ else
return -EINVAL;
- }
break;
case TCP_V6_FLOW:
- switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
- case 0:
- hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
- break;
- case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ if (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3))
hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
- break;
- default:
+ else
return -EINVAL;
- }
break;
case UDP_V4_FLOW:
- switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
- case 0:
- hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
- BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
- break;
- case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ if (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
- break;
- default:
+ } else {
return -EINVAL;
}
break;
case UDP_V6_FLOW:
- switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
- case 0:
- hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
- BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
- break;
- case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ if (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
- break;
- default:
+ } else {
return -EINVAL;
}
break;
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index 99d2cffae0cd..5f03ab3dfa19 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -1864,6 +1864,9 @@ void i40evf_free_all_tx_resources(struct i40evf_adapter *adapter)
{
int i;
+ if (!adapter->tx_rings)
+ return;
+
for (i = 0; i < adapter->num_active_queues; i++)
if (adapter->tx_rings[i]->desc)
i40evf_free_tx_resources(adapter->tx_rings[i]);
@@ -1932,6 +1935,9 @@ void i40evf_free_all_rx_resources(struct i40evf_adapter *adapter)
{
int i;
+ if (!adapter->rx_rings)
+ return;
+
for (i = 0; i < adapter->num_active_queues; i++)
if (adapter->rx_rings[i]->desc)
i40evf_free_rx_resources(adapter->rx_rings[i]);
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
index 32e620e1eb5c..5de3f52fd31f 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
@@ -391,6 +391,7 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
struct i40e_virtchnl_ether_addr_list *veal;
int len, i = 0, count = 0;
struct i40evf_mac_filter *f;
+ bool more = false;
if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
@@ -415,7 +416,9 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
count = (I40EVF_MAX_AQ_BUF_SIZE -
sizeof(struct i40e_virtchnl_ether_addr_list)) /
sizeof(struct i40e_virtchnl_ether_addr);
- len = I40EVF_MAX_AQ_BUF_SIZE;
+ len = sizeof(struct i40e_virtchnl_ether_addr_list) +
+ (count * sizeof(struct i40e_virtchnl_ether_addr));
+ more = true;
}
veal = kzalloc(len, GFP_ATOMIC);
@@ -431,7 +434,8 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
f->add = false;
}
}
- adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER;
+ if (!more)
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER;
i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
(u8 *)veal, len);
kfree(veal);
@@ -450,6 +454,7 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
struct i40e_virtchnl_ether_addr_list *veal;
struct i40evf_mac_filter *f, *ftmp;
int len, i = 0, count = 0;
+ bool more = false;
if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
@@ -474,7 +479,9 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
count = (I40EVF_MAX_AQ_BUF_SIZE -
sizeof(struct i40e_virtchnl_ether_addr_list)) /
sizeof(struct i40e_virtchnl_ether_addr);
- len = I40EVF_MAX_AQ_BUF_SIZE;
+ len = sizeof(struct i40e_virtchnl_ether_addr_list) +
+ (count * sizeof(struct i40e_virtchnl_ether_addr));
+ more = true;
}
veal = kzalloc(len, GFP_ATOMIC);
if (!veal)
@@ -490,7 +497,8 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
kfree(f);
}
}
- adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER;
+ if (!more)
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER;
i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS,
(u8 *)veal, len);
kfree(veal);
@@ -509,6 +517,7 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter)
struct i40e_virtchnl_vlan_filter_list *vvfl;
int len, i = 0, count = 0;
struct i40evf_vlan_filter *f;
+ bool more = false;
if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
@@ -534,7 +543,9 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter)
count = (I40EVF_MAX_AQ_BUF_SIZE -
sizeof(struct i40e_virtchnl_vlan_filter_list)) /
sizeof(u16);
- len = I40EVF_MAX_AQ_BUF_SIZE;
+ len = sizeof(struct i40e_virtchnl_vlan_filter_list) +
+ (count * sizeof(u16));
+ more = true;
}
vvfl = kzalloc(len, GFP_ATOMIC);
if (!vvfl)
@@ -549,7 +560,8 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter)
f->add = false;
}
}
- adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
+ if (!more)
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len);
kfree(vvfl);
}
@@ -567,6 +579,7 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter)
struct i40e_virtchnl_vlan_filter_list *vvfl;
struct i40evf_vlan_filter *f, *ftmp;
int len, i = 0, count = 0;
+ bool more = false;
if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
@@ -592,7 +605,9 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter)
count = (I40EVF_MAX_AQ_BUF_SIZE -
sizeof(struct i40e_virtchnl_vlan_filter_list)) /
sizeof(u16);
- len = I40EVF_MAX_AQ_BUF_SIZE;
+ len = sizeof(struct i40e_virtchnl_vlan_filter_list) +
+ (count * sizeof(u16));
+ more = true;
}
vvfl = kzalloc(len, GFP_ATOMIC);
if (!vvfl)
@@ -608,7 +623,8 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter)
kfree(f);
}
}
- adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
+ if (!more)
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len);
kfree(vvfl);
}
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index 7a73510e547c..97bf0c3d5c69 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -294,6 +294,7 @@ static s32 igb_init_phy_params_82575(struct e1000_hw *hw)
case I210_I_PHY_ID:
phy->type = e1000_phy_i210;
phy->ops.check_polarity = igb_check_polarity_m88;
+ phy->ops.get_cfg_done = igb_get_cfg_done_i210;
phy->ops.get_phy_info = igb_get_phy_info_m88;
phy->ops.get_cable_length = igb_get_cable_length_m88_gen2;
phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82580;
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c
index 65d931669f81..29f59c76878a 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.c
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.c
@@ -900,3 +900,30 @@ s32 igb_pll_workaround_i210(struct e1000_hw *hw)
wr32(E1000_MDICNFG, mdicnfg);
return ret_val;
}
+
+/**
+ * igb_get_cfg_done_i210 - Read config done bit
+ * @hw: pointer to the HW structure
+ *
+ * Read the management control register for the config done bit for
+ * completion status. NOTE: silicon which is EEPROM-less will fail trying
+ * to read the config done bit, so an error is *ONLY* logged and returns
+ * 0. If we were to return with error, EEPROM-less silicon
+ * would not be able to be reset or change link.
+ **/
+s32 igb_get_cfg_done_i210(struct e1000_hw *hw)
+{
+ s32 timeout = PHY_CFG_TIMEOUT;
+ u32 mask = E1000_NVM_CFG_DONE_PORT_0;
+
+ while (timeout) {
+ if (rd32(E1000_EEMNGCTL_I210) & mask)
+ break;
+ usleep_range(1000, 2000);
+ timeout--;
+ }
+ if (!timeout)
+ hw_dbg("MNG configuration cycle has not completed.\n");
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.h b/drivers/net/ethernet/intel/igb/e1000_i210.h
index 3442b6357d01..eaa68a50cb3b 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.h
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.h
@@ -34,6 +34,7 @@ s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data);
s32 igb_init_nvm_params_i210(struct e1000_hw *hw);
bool igb_get_flash_presence_i210(struct e1000_hw *hw);
s32 igb_pll_workaround_i210(struct e1000_hw *hw);
+s32 igb_get_cfg_done_i210(struct e1000_hw *hw);
#define E1000_STM_OPCODE 0xDB00
#define E1000_EEPROM_FLASH_SIZE_WORD 0x11
diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h
index 4af2870e49f8..0fdcd4d1b982 100644
--- a/drivers/net/ethernet/intel/igb/e1000_regs.h
+++ b/drivers/net/ethernet/intel/igb/e1000_regs.h
@@ -66,6 +66,7 @@
#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */
#define E1000_PBS 0x01008 /* Packet Buffer Size */
#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */
+#define E1000_EEMNGCTL_I210 0x12030 /* MNG EEprom Control */
#define E1000_EEARBC_I210 0x12024 /* EEPROM Auto Read Bus Control */
#define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */
#define E1000_I2CCMD 0x01028 /* SFPI2C Command Register - RW */
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 1a2f1cc44b28..e3cb93bdb21a 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -389,6 +389,8 @@ struct igb_adapter {
u16 link_speed;
u16 link_duplex;
+ u8 __iomem *io_addr; /* Mainly for iounmap use */
+
struct work_struct reset_task;
struct work_struct watchdog_task;
bool fc_autoneg;
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index ea7b09887245..fa3b4cbea23b 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -2294,9 +2294,11 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
err = -EIO;
- hw->hw_addr = pci_iomap(pdev, 0, 0);
- if (!hw->hw_addr)
+ adapter->io_addr = pci_iomap(pdev, 0, 0);
+ if (!adapter->io_addr)
goto err_ioremap;
+ /* hw->hw_addr can be altered, we'll use adapter->io_addr for unmap */
+ hw->hw_addr = adapter->io_addr;
netdev->netdev_ops = &igb_netdev_ops;
igb_set_ethtool_ops(netdev);
@@ -2656,7 +2658,7 @@ err_sw_init:
#ifdef CONFIG_PCI_IOV
igb_disable_sriov(pdev);
#endif
- pci_iounmap(pdev, hw->hw_addr);
+ pci_iounmap(pdev, adapter->io_addr);
err_ioremap:
free_netdev(netdev);
err_alloc_etherdev:
@@ -2823,7 +2825,7 @@ static void igb_remove(struct pci_dev *pdev)
igb_clear_interrupt_scheme(adapter);
- pci_iounmap(pdev, hw->hw_addr);
+ pci_iounmap(pdev, adapter->io_addr);
if (hw->flash_address)
iounmap(hw->flash_address);
pci_release_selected_regions(pdev,
@@ -2856,6 +2858,13 @@ static void igb_probe_vfs(struct igb_adapter *adapter)
if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211))
return;
+ /* Of the below we really only want the effect of getting
+ * IGB_FLAG_HAS_MSIX set (if available), without which
+ * igb_enable_sriov() has no effect.
+ */
+ igb_set_interrupt_capability(adapter, true);
+ igb_reset_interrupt_capability(adapter);
+
pci_sriov_set_totalvfs(pdev, 7);
igb_enable_sriov(pdev, max_vfs);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index aed8d029b23d..cd9b284bc83b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -2786,7 +2786,8 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
ixgbe_for_each_ring(ring, q_vector->tx)
clean_complete &= !!ixgbe_clean_tx_irq(q_vector, ring);
- if (!ixgbe_qv_lock_napi(q_vector))
+ /* Exit if we are called by netpoll or busy polling is active */
+ if ((budget <= 0) || !ixgbe_qv_lock_napi(q_vector))
return budget;
/* attempt to distribute budget to each queue fairly, but don't allow
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index a4ac6fedac75..71ec9cb08e06 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -226,7 +226,7 @@
/* Various constants */
/* Coalescing */
-#define MVNETA_TXDONE_COAL_PKTS 1
+#define MVNETA_TXDONE_COAL_PKTS 0 /* interrupt per packet */
#define MVNETA_RX_COAL_PKTS 32
#define MVNETA_RX_COAL_USEC 100
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 2e022e900939..7cc9df717323 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -399,6 +399,9 @@ static int mlx5e_get_coalesce(struct net_device *netdev,
{
struct mlx5e_priv *priv = netdev_priv(netdev);
+ if (!MLX5_CAP_GEN(priv->mdev, cq_moderation))
+ return -ENOTSUPP;
+
coal->rx_coalesce_usecs = priv->params.rx_cq_moderation_usec;
coal->rx_max_coalesced_frames = priv->params.rx_cq_moderation_pkts;
coal->tx_coalesce_usecs = priv->params.tx_cq_moderation_usec;
@@ -416,11 +419,18 @@ static int mlx5e_set_coalesce(struct net_device *netdev,
int tc;
int i;
+ if (!MLX5_CAP_GEN(mdev, cq_moderation))
+ return -ENOTSUPP;
+
+ mutex_lock(&priv->state_lock);
priv->params.tx_cq_moderation_usec = coal->tx_coalesce_usecs;
priv->params.tx_cq_moderation_pkts = coal->tx_max_coalesced_frames;
priv->params.rx_cq_moderation_usec = coal->rx_coalesce_usecs;
priv->params.rx_cq_moderation_pkts = coal->rx_max_coalesced_frames;
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+ goto out;
+
for (i = 0; i < priv->params.num_channels; ++i) {
c = priv->channel[i];
@@ -436,6 +446,8 @@ static int mlx5e_set_coalesce(struct net_device *netdev,
coal->rx_max_coalesced_frames);
}
+out:
+ mutex_unlock(&priv->state_lock);
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index cbd17e25beeb..90e876ecc720 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -863,12 +863,10 @@ static int mlx5e_open_cq(struct mlx5e_channel *c,
if (err)
goto err_destroy_cq;
- err = mlx5_core_modify_cq_moderation(mdev, &cq->mcq,
- moderation_usecs,
- moderation_frames);
- if (err)
- goto err_destroy_cq;
-
+ if (MLX5_CAP_GEN(mdev, cq_moderation))
+ mlx5_core_modify_cq_moderation(mdev, &cq->mcq,
+ moderation_usecs,
+ moderation_frames);
return 0;
err_destroy_cq:
@@ -1963,6 +1961,8 @@ static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
}
if (!MLX5_CAP_ETH(mdev, self_lb_en_modifiable))
mlx5_core_warn(mdev, "Self loop back prevention is not supported\n");
+ if (!MLX5_CAP_GEN(mdev, cq_moderation))
+ mlx5_core_warn(mdev, "CQ modiration is not supported\n");
return 0;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
index 3dd548ab8df1..40365cb1abe6 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_spq.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
@@ -794,13 +794,12 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn,
* in a bitmap and increasing the chain consumer only
* for the first successive completed entries.
*/
- bitmap_set(p_spq->p_comp_bitmap, pos, SPQ_RING_SIZE);
+ __set_bit(pos, p_spq->p_comp_bitmap);
while (test_bit(p_spq->comp_bitmap_idx,
p_spq->p_comp_bitmap)) {
- bitmap_clear(p_spq->p_comp_bitmap,
- p_spq->comp_bitmap_idx,
- SPQ_RING_SIZE);
+ __clear_bit(p_spq->comp_bitmap_idx,
+ p_spq->p_comp_bitmap);
p_spq->comp_bitmap_idx++;
qed_chain_return_produced(&p_spq->chain);
}
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 174e06ec7c2f..e5bb870b5461 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -2390,8 +2390,6 @@ ppp_unregister_channel(struct ppp_channel *chan)
spin_lock_bh(&pn->all_channels_lock);
list_del(&pch->list);
spin_unlock_bh(&pn->all_channels_lock);
- put_net(pch->chan_net);
- pch->chan_net = NULL;
pch->file.dead = 1;
wake_up_interruptible(&pch->file.rwait);
@@ -2984,6 +2982,9 @@ ppp_disconnect_channel(struct channel *pch)
*/
static void ppp_destroy_channel(struct channel *pch)
{
+ put_net(pch->chan_net);
+ pch->chan_net = NULL;
+
atomic_dec(&channel_count);
if (!pch->file.dead) {
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/sdio.c
index 7e74ac3ad815..bcf29bf6f727 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio.c
@@ -3401,10 +3401,6 @@ static int brcmf_sdio_download_firmware(struct brcmf_sdio *bus,
goto err;
}
- /* Allow full data communication using DPC from now on. */
- brcmf_sdiod_change_state(bus->sdiodev, BRCMF_SDIOD_DATA);
- bcmerror = 0;
-
err:
brcmf_sdio_clkctl(bus, CLK_SDONLY, false);
sdio_release_host(bus->sdiodev->func[1]);
@@ -4112,6 +4108,9 @@ static void brcmf_sdio_firmware_callback(struct device *dev,
}
if (err == 0) {
+ /* Allow full data communication using DPC from now on. */
+ brcmf_sdiod_change_state(bus->sdiodev, BRCMF_SDIOD_DATA);
+
err = brcmf_sdiod_intr_register(sdiodev);
if (err != 0)
brcmf_err("intr register failed:%d\n", err);
diff --git a/drivers/net/wireless/cnss/cnss_pci.c b/drivers/net/wireless/cnss/cnss_pci.c
index 1e56d445c6e1..f53ed2693879 100644
--- a/drivers/net/wireless/cnss/cnss_pci.c
+++ b/drivers/net/wireless/cnss/cnss_pci.c
@@ -2472,7 +2472,7 @@ void *cnss_pci_get_virt_ramdump_mem(unsigned long *size)
void cnss_pci_device_crashed(void)
{
if (penv && penv->subsys) {
- subsys_set_crash_status(penv->subsys, true);
+ subsys_set_crash_status(penv->subsys, CRASH_STATUS_ERR_FATAL);
subsystem_restart_dev(penv->subsys);
}
}
@@ -2491,7 +2491,7 @@ EXPORT_SYMBOL(cnss_get_virt_ramdump_mem);
void cnss_device_crashed(void)
{
if (penv && penv->subsys) {
- subsys_set_crash_status(penv->subsys, true);
+ subsys_set_crash_status(penv->subsys, CRASH_STATUS_ERR_FATAL);
subsystem_restart_dev(penv->subsys);
}
}
diff --git a/drivers/net/wireless/cnss/cnss_sdio.c b/drivers/net/wireless/cnss/cnss_sdio.c
index 01b969ec627f..ce7dbc64c4c3 100644
--- a/drivers/net/wireless/cnss/cnss_sdio.c
+++ b/drivers/net/wireless/cnss/cnss_sdio.c
@@ -605,7 +605,8 @@ void cnss_sdio_device_crashed(void)
return;
ssr_info = &cnss_pdata->ssr_info;
if (ssr_info->subsys) {
- subsys_set_crash_status(ssr_info->subsys, true);
+ subsys_set_crash_status(ssr_info->subsys,
+ CRASH_STATUS_ERR_FATAL);
subsystem_restart_dev(ssr_info->subsys);
}
}
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 0c67b57be83c..c851bc53831c 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -2672,10 +2672,10 @@ static int nvme_dev_add(struct nvme_dev *dev)
return 0;
}
-static int nvme_dev_map(struct nvme_dev *dev)
+static int nvme_pci_enable(struct nvme_dev *dev)
{
u64 cap;
- int bars, result = -ENOMEM;
+ int result = -ENOMEM;
struct pci_dev *pdev = to_pci_dev(dev->dev);
if (pci_enable_device_mem(pdev))
@@ -2683,24 +2683,14 @@ static int nvme_dev_map(struct nvme_dev *dev)
dev->entry[0].vector = pdev->irq;
pci_set_master(pdev);
- bars = pci_select_bars(pdev, IORESOURCE_MEM);
- if (!bars)
- goto disable_pci;
-
- if (pci_request_selected_regions(pdev, bars, "nvme"))
- goto disable_pci;
if (dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64)) &&
dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(32)))
goto disable;
- dev->bar = ioremap(pci_resource_start(pdev, 0), 8192);
- if (!dev->bar)
- goto disable;
-
if (readl(&dev->bar->csts) == -1) {
result = -ENODEV;
- goto unmap;
+ goto disable;
}
/*
@@ -2710,7 +2700,7 @@ static int nvme_dev_map(struct nvme_dev *dev)
if (!pdev->irq) {
result = pci_enable_msix(pdev, dev->entry, 1);
if (result < 0)
- goto unmap;
+ goto disable;
}
cap = lo_hi_readq(&dev->bar->cap);
@@ -2734,18 +2724,21 @@ static int nvme_dev_map(struct nvme_dev *dev)
return 0;
- unmap:
- iounmap(dev->bar);
- dev->bar = NULL;
disable:
- pci_release_regions(pdev);
- disable_pci:
pci_disable_device(pdev);
+
return result;
}
static void nvme_dev_unmap(struct nvme_dev *dev)
{
+ if (dev->bar)
+ iounmap(dev->bar);
+ pci_release_regions(to_pci_dev(dev->dev));
+}
+
+static void nvme_pci_disable(struct nvme_dev *dev)
+{
struct pci_dev *pdev = to_pci_dev(dev->dev);
if (pdev->msi_enabled)
@@ -2753,12 +2746,6 @@ static void nvme_dev_unmap(struct nvme_dev *dev)
else if (pdev->msix_enabled)
pci_disable_msix(pdev);
- if (dev->bar) {
- iounmap(dev->bar);
- dev->bar = NULL;
- pci_release_regions(pdev);
- }
-
if (pci_is_enabled(pdev))
pci_disable_device(pdev);
}
@@ -2962,7 +2949,7 @@ static void nvme_dev_shutdown(struct nvme_dev *dev)
nvme_dev_list_remove(dev);
- if (dev->bar) {
+ if (pci_is_enabled(to_pci_dev(dev->dev))) {
nvme_freeze_queues(dev);
csts = readl(&dev->bar->csts);
}
@@ -2976,7 +2963,7 @@ static void nvme_dev_shutdown(struct nvme_dev *dev)
nvme_shutdown_ctrl(dev);
nvme_disable_queue(dev, 0);
}
- nvme_dev_unmap(dev);
+ nvme_pci_disable(dev);
for (i = dev->queue_count - 1; i >= 0; i--)
nvme_clear_queue(dev->queues[i]);
@@ -3136,7 +3123,7 @@ static void nvme_probe_work(struct work_struct *work)
bool start_thread = false;
int result;
- result = nvme_dev_map(dev);
+ result = nvme_pci_enable(dev);
if (result)
goto out;
@@ -3292,6 +3279,27 @@ static ssize_t nvme_sysfs_reset(struct device *dev,
}
static DEVICE_ATTR(reset_controller, S_IWUSR, NULL, nvme_sysfs_reset);
+static int nvme_dev_map(struct nvme_dev *dev)
+{
+ int bars;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
+
+ bars = pci_select_bars(pdev, IORESOURCE_MEM);
+ if (!bars)
+ return -ENODEV;
+ if (pci_request_selected_regions(pdev, bars, "nvme"))
+ return -ENODEV;
+
+ dev->bar = ioremap(pci_resource_start(pdev, 0), 8192);
+ if (!dev->bar)
+ goto release;
+
+ return 0;
+release:
+ pci_release_regions(pdev);
+ return -ENODEV;
+}
+
static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
int node, result = -ENOMEM;
@@ -3317,6 +3325,11 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
INIT_WORK(&dev->reset_work, nvme_reset_work);
dev->dev = get_device(&pdev->dev);
pci_set_drvdata(pdev, dev);
+
+ result = nvme_dev_map(dev);
+ if (result)
+ goto free;
+
result = nvme_set_instance(dev);
if (result)
goto put_pci;
@@ -3355,6 +3368,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
nvme_release_instance(dev);
put_pci:
put_device(dev->dev);
+ nvme_dev_unmap(dev);
free:
kfree(dev->queues);
kfree(dev->entry);
@@ -3398,6 +3412,7 @@ static void nvme_remove(struct pci_dev *pdev)
nvme_free_queues(dev, 0);
nvme_release_cmb(dev);
nvme_release_prp_pools(dev);
+ nvme_dev_unmap(dev);
kref_put(&dev->kref, nvme_free_dev);
}
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 017dd94f16ea..31341290cd91 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -112,6 +112,7 @@ static ssize_t of_node_property_read(struct file *filp, struct kobject *kobj,
return memory_read_from_buffer(buf, count, &offset, pp->value, pp->length);
}
+/* always return newly allocated name, caller must free after use */
static const char *safe_name(struct kobject *kobj, const char *orig_name)
{
const char *name = orig_name;
@@ -126,9 +127,12 @@ static const char *safe_name(struct kobject *kobj, const char *orig_name)
name = kasprintf(GFP_KERNEL, "%s#%i", orig_name, ++i);
}
- if (name != orig_name)
+ if (name == orig_name) {
+ name = kstrdup(orig_name, GFP_KERNEL);
+ } else {
pr_warn("device-tree: Duplicate name in %s, renamed to \"%s\"\n",
kobject_name(kobj), name);
+ }
return name;
}
@@ -159,6 +163,7 @@ int __of_add_property_sysfs(struct device_node *np, struct property *pp)
int __of_attach_node_sysfs(struct device_node *np)
{
const char *name;
+ struct kobject *parent;
struct property *pp;
int rc;
@@ -171,15 +176,16 @@ int __of_attach_node_sysfs(struct device_node *np)
np->kobj.kset = of_kset;
if (!np->parent) {
/* Nodes without parents are new top level trees */
- rc = kobject_add(&np->kobj, NULL, "%s",
- safe_name(&of_kset->kobj, "base"));
+ name = safe_name(&of_kset->kobj, "base");
+ parent = NULL;
} else {
name = safe_name(&np->parent->kobj, kbasename(np->full_name));
- if (!name || !name[0])
- return -EINVAL;
-
- rc = kobject_add(&np->kobj, &np->parent->kobj, "%s", name);
+ parent = &np->parent->kobj;
}
+ if (!name)
+ return -ENOMEM;
+ rc = kobject_add(&np->kobj, parent, "%s", name);
+ kfree(name);
if (rc)
return rc;
@@ -1753,6 +1759,12 @@ int __of_remove_property(struct device_node *np, struct property *prop)
return 0;
}
+void __of_sysfs_remove_bin_file(struct device_node *np, struct property *prop)
+{
+ sysfs_remove_bin_file(&np->kobj, &prop->attr);
+ kfree(prop->attr.attr.name);
+}
+
void __of_remove_property_sysfs(struct device_node *np, struct property *prop)
{
if (!IS_ENABLED(CONFIG_SYSFS))
@@ -1760,7 +1772,7 @@ void __of_remove_property_sysfs(struct device_node *np, struct property *prop)
/* at early boot, bail here and defer setup to of_init() */
if (of_kset && of_node_is_attached(np))
- sysfs_remove_bin_file(&np->kobj, &prop->attr);
+ __of_sysfs_remove_bin_file(np, prop);
}
/**
@@ -1830,7 +1842,7 @@ void __of_update_property_sysfs(struct device_node *np, struct property *newprop
return;
if (oldprop)
- sysfs_remove_bin_file(&np->kobj, &oldprop->attr);
+ __of_sysfs_remove_bin_file(np, oldprop);
__of_add_property_sysfs(np, newprop);
}
@@ -2241,20 +2253,13 @@ struct device_node *of_graph_get_endpoint_by_regs(
const struct device_node *parent, int port_reg, int reg)
{
struct of_endpoint endpoint;
- struct device_node *node, *prev_node = NULL;
-
- while (1) {
- node = of_graph_get_next_endpoint(parent, prev_node);
- of_node_put(prev_node);
- if (!node)
- break;
+ struct device_node *node = NULL;
+ for_each_endpoint_of_node(parent, node) {
of_graph_parse_endpoint(node, &endpoint);
if (((port_reg == -1) || (endpoint.port == port_reg)) &&
((reg == -1) || (endpoint.id == reg)))
return node;
-
- prev_node = node;
}
return NULL;
diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c
index 53826b84e0ec..2d72ddcf534f 100644
--- a/drivers/of/dynamic.c
+++ b/drivers/of/dynamic.c
@@ -55,7 +55,7 @@ void __of_detach_node_sysfs(struct device_node *np)
/* only remove properties if on sysfs */
if (of_node_is_attached(np)) {
for_each_property_of_node(np, pp)
- sysfs_remove_bin_file(&np->kobj, &pp->attr);
+ __of_sysfs_remove_bin_file(np, pp);
kobject_del(&np->kobj);
}
diff --git a/drivers/of/of_private.h b/drivers/of/of_private.h
index 8e882e706cd8..46ddbee22ce3 100644
--- a/drivers/of/of_private.h
+++ b/drivers/of/of_private.h
@@ -81,6 +81,9 @@ extern int __of_attach_node_sysfs(struct device_node *np);
extern void __of_detach_node(struct device_node *np);
extern void __of_detach_node_sysfs(struct device_node *np);
+extern void __of_sysfs_remove_bin_file(struct device_node *np,
+ struct property *prop);
+
/* iterators for transactions, used for overlays */
/* forward iterator */
#define for_each_transaction_entry(_oft, _te) \
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 7eaa4c87fec7..10a6a8e5db88 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -1278,6 +1278,8 @@ struct irq_domain *pci_msi_create_irq_domain(struct fwnode_handle *fwnode,
if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS)
pci_msi_domain_update_chip_ops(info);
+ info->flags |= MSI_FLAG_ACTIVATE_EARLY;
+
domain = msi_create_irq_domain(fwnode, info, parent);
if (!domain)
return NULL;
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index eead54cd01b2..d7508704c992 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -1372,10 +1372,10 @@ int __must_check pci_create_sysfs_dev_files(struct pci_dev *pdev)
if (!sysfs_initialized)
return -EACCES;
- if (pdev->cfg_size < PCI_CFG_SPACE_EXP_SIZE)
- retval = sysfs_create_bin_file(&pdev->dev.kobj, &pci_config_attr);
- else
+ if (pdev->cfg_size > PCI_CFG_SPACE_SIZE)
retval = sysfs_create_bin_file(&pdev->dev.kobj, &pcie_config_attr);
+ else
+ retval = sysfs_create_bin_file(&pdev->dev.kobj, &pci_config_attr);
if (retval)
goto err;
@@ -1427,10 +1427,10 @@ err_rom_file:
err_resource_files:
pci_remove_resource_files(pdev);
err_config_file:
- if (pdev->cfg_size < PCI_CFG_SPACE_EXP_SIZE)
- sysfs_remove_bin_file(&pdev->dev.kobj, &pci_config_attr);
- else
+ if (pdev->cfg_size > PCI_CFG_SPACE_SIZE)
sysfs_remove_bin_file(&pdev->dev.kobj, &pcie_config_attr);
+ else
+ sysfs_remove_bin_file(&pdev->dev.kobj, &pci_config_attr);
err:
return retval;
}
@@ -1464,10 +1464,10 @@ void pci_remove_sysfs_dev_files(struct pci_dev *pdev)
pci_remove_capabilities_sysfs(pdev);
- if (pdev->cfg_size < PCI_CFG_SPACE_EXP_SIZE)
- sysfs_remove_bin_file(&pdev->dev.kobj, &pci_config_attr);
- else
+ if (pdev->cfg_size > PCI_CFG_SPACE_SIZE)
sysfs_remove_bin_file(&pdev->dev.kobj, &pcie_config_attr);
+ else
+ sysfs_remove_bin_file(&pdev->dev.kobj, &pci_config_attr);
pci_remove_resource_files(pdev);
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 7e327309cf69..42774bc39786 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -287,6 +287,18 @@ static void quirk_citrine(struct pci_dev *dev)
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, quirk_citrine);
+/*
+ * This chip can cause bus lockups if config addresses above 0x600
+ * are read or written.
+ */
+static void quirk_nfp6000(struct pci_dev *dev)
+{
+ dev->cfg_size = 0x600;
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP4000, quirk_nfp6000);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP6000, quirk_nfp6000);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP6000_VF, quirk_nfp6000);
+
/* On IBM Crocodile ipr SAS adapters, expand BAR to system page size */
static void quirk_extend_bar_to_page(struct pci_dev *dev)
{
@@ -3115,13 +3127,15 @@ static void quirk_no_bus_reset(struct pci_dev *dev)
}
/*
- * Atheros AR93xx chips do not behave after a bus reset. The device will
- * throw a Link Down error on AER-capable systems and regardless of AER,
- * config space of the device is never accessible again and typically
- * causes the system to hang or reset when access is attempted.
+ * Some Atheros AR9xxx and QCA988x chips do not behave after a bus reset.
+ * The device will throw a Link Down error on AER-capable systems and
+ * regardless of AER, config space of the device is never accessible again
+ * and typically causes the system to hang or reset when access is attempted.
* http://www.spinics.net/lists/linux-pci/msg34797.html
*/
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0030, quirk_no_bus_reset);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0032, quirk_no_bus_reset);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x003c, quirk_no_bus_reset);
static void quirk_no_pm_reset(struct pci_dev *dev)
{
diff --git a/drivers/pinctrl/freescale/pinctrl-imx.c b/drivers/pinctrl/freescale/pinctrl-imx.c
index 1029aa7889b5..398ec45aadef 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx.c
@@ -207,9 +207,9 @@ static int imx_pmx_set(struct pinctrl_dev *pctldev, unsigned selector,
pin_reg = &info->pin_regs[pin_id];
if (pin_reg->mux_reg == -1) {
- dev_err(ipctl->dev, "Pin(%s) does not support mux function\n",
+ dev_dbg(ipctl->dev, "Pin(%s) does not support mux function\n",
info->pins[pin_id].name);
- return -EINVAL;
+ continue;
}
if (info->flags & SHARE_MUX_CONF_REG) {
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index 84936bae6e5e..4e377599d266 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -160,7 +160,6 @@ struct chv_pin_context {
* @pctldev: Pointer to the pin controller device
* @chip: GPIO chip in this pin controller
* @regs: MMIO registers
- * @lock: Lock to serialize register accesses
* @intr_lines: Stores mapping between 16 HW interrupt wires and GPIO
* offset (in GPIO number space)
* @community: Community this pinctrl instance represents
@@ -174,7 +173,6 @@ struct chv_pinctrl {
struct pinctrl_dev *pctldev;
struct gpio_chip chip;
void __iomem *regs;
- raw_spinlock_t lock;
unsigned intr_lines[16];
const struct chv_community *community;
u32 saved_intmask;
@@ -659,6 +657,17 @@ static const struct chv_community *chv_communities[] = {
&southeast_community,
};
+/*
+ * Lock to serialize register accesses
+ *
+ * Due to a silicon issue, a shared lock must be used to prevent
+ * concurrent accesses across the 4 GPIO controllers.
+ *
+ * See Intel Atom Z8000 Processor Series Specification Update (Rev. 005),
+ * errata #CHT34, for further information.
+ */
+static DEFINE_RAW_SPINLOCK(chv_lock);
+
static void __iomem *chv_padreg(struct chv_pinctrl *pctrl, unsigned offset,
unsigned reg)
{
@@ -720,13 +729,13 @@ static void chv_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s,
u32 ctrl0, ctrl1;
bool locked;
- raw_spin_lock_irqsave(&pctrl->lock, flags);
+ raw_spin_lock_irqsave(&chv_lock, flags);
ctrl0 = readl(chv_padreg(pctrl, offset, CHV_PADCTRL0));
ctrl1 = readl(chv_padreg(pctrl, offset, CHV_PADCTRL1));
locked = chv_pad_locked(pctrl, offset);
- raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+ raw_spin_unlock_irqrestore(&chv_lock, flags);
if (ctrl0 & CHV_PADCTRL0_GPIOEN) {
seq_puts(s, "GPIO ");
@@ -789,14 +798,14 @@ static int chv_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned function,
grp = &pctrl->community->groups[group];
- raw_spin_lock_irqsave(&pctrl->lock, flags);
+ raw_spin_lock_irqsave(&chv_lock, flags);
/* Check first that the pad is not locked */
for (i = 0; i < grp->npins; i++) {
if (chv_pad_locked(pctrl, grp->pins[i])) {
dev_warn(pctrl->dev, "unable to set mode for locked pin %u\n",
grp->pins[i]);
- raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+ raw_spin_unlock_irqrestore(&chv_lock, flags);
return -EBUSY;
}
}
@@ -839,7 +848,7 @@ static int chv_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned function,
pin, altfunc->mode, altfunc->invert_oe ? "" : "not ");
}
- raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+ raw_spin_unlock_irqrestore(&chv_lock, flags);
return 0;
}
@@ -853,13 +862,13 @@ static int chv_gpio_request_enable(struct pinctrl_dev *pctldev,
void __iomem *reg;
u32 value;
- raw_spin_lock_irqsave(&pctrl->lock, flags);
+ raw_spin_lock_irqsave(&chv_lock, flags);
if (chv_pad_locked(pctrl, offset)) {
value = readl(chv_padreg(pctrl, offset, CHV_PADCTRL0));
if (!(value & CHV_PADCTRL0_GPIOEN)) {
/* Locked so cannot enable */
- raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+ raw_spin_unlock_irqrestore(&chv_lock, flags);
return -EBUSY;
}
} else {
@@ -899,7 +908,7 @@ static int chv_gpio_request_enable(struct pinctrl_dev *pctldev,
chv_writel(value, reg);
}
- raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+ raw_spin_unlock_irqrestore(&chv_lock, flags);
return 0;
}
@@ -913,13 +922,13 @@ static void chv_gpio_disable_free(struct pinctrl_dev *pctldev,
void __iomem *reg;
u32 value;
- raw_spin_lock_irqsave(&pctrl->lock, flags);
+ raw_spin_lock_irqsave(&chv_lock, flags);
reg = chv_padreg(pctrl, offset, CHV_PADCTRL0);
value = readl(reg) & ~CHV_PADCTRL0_GPIOEN;
chv_writel(value, reg);
- raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+ raw_spin_unlock_irqrestore(&chv_lock, flags);
}
static int chv_gpio_set_direction(struct pinctrl_dev *pctldev,
@@ -931,7 +940,7 @@ static int chv_gpio_set_direction(struct pinctrl_dev *pctldev,
unsigned long flags;
u32 ctrl0;
- raw_spin_lock_irqsave(&pctrl->lock, flags);
+ raw_spin_lock_irqsave(&chv_lock, flags);
ctrl0 = readl(reg) & ~CHV_PADCTRL0_GPIOCFG_MASK;
if (input)
@@ -940,7 +949,7 @@ static int chv_gpio_set_direction(struct pinctrl_dev *pctldev,
ctrl0 |= CHV_PADCTRL0_GPIOCFG_GPO << CHV_PADCTRL0_GPIOCFG_SHIFT;
chv_writel(ctrl0, reg);
- raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+ raw_spin_unlock_irqrestore(&chv_lock, flags);
return 0;
}
@@ -965,10 +974,10 @@ static int chv_config_get(struct pinctrl_dev *pctldev, unsigned pin,
u16 arg = 0;
u32 term;
- raw_spin_lock_irqsave(&pctrl->lock, flags);
+ raw_spin_lock_irqsave(&chv_lock, flags);
ctrl0 = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0));
ctrl1 = readl(chv_padreg(pctrl, pin, CHV_PADCTRL1));
- raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+ raw_spin_unlock_irqrestore(&chv_lock, flags);
term = (ctrl0 & CHV_PADCTRL0_TERM_MASK) >> CHV_PADCTRL0_TERM_SHIFT;
@@ -1042,7 +1051,7 @@ static int chv_config_set_pull(struct chv_pinctrl *pctrl, unsigned pin,
unsigned long flags;
u32 ctrl0, pull;
- raw_spin_lock_irqsave(&pctrl->lock, flags);
+ raw_spin_lock_irqsave(&chv_lock, flags);
ctrl0 = readl(reg);
switch (param) {
@@ -1065,7 +1074,7 @@ static int chv_config_set_pull(struct chv_pinctrl *pctrl, unsigned pin,
pull = CHV_PADCTRL0_TERM_20K << CHV_PADCTRL0_TERM_SHIFT;
break;
default:
- raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+ raw_spin_unlock_irqrestore(&chv_lock, flags);
return -EINVAL;
}
@@ -1083,7 +1092,7 @@ static int chv_config_set_pull(struct chv_pinctrl *pctrl, unsigned pin,
pull = CHV_PADCTRL0_TERM_20K << CHV_PADCTRL0_TERM_SHIFT;
break;
default:
- raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+ raw_spin_unlock_irqrestore(&chv_lock, flags);
return -EINVAL;
}
@@ -1091,12 +1100,12 @@ static int chv_config_set_pull(struct chv_pinctrl *pctrl, unsigned pin,
break;
default:
- raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+ raw_spin_unlock_irqrestore(&chv_lock, flags);
return -EINVAL;
}
chv_writel(ctrl0, reg);
- raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+ raw_spin_unlock_irqrestore(&chv_lock, flags);
return 0;
}
@@ -1162,9 +1171,9 @@ static int chv_gpio_get(struct gpio_chip *chip, unsigned offset)
unsigned long flags;
u32 ctrl0, cfg;
- raw_spin_lock_irqsave(&pctrl->lock, flags);
+ raw_spin_lock_irqsave(&chv_lock, flags);
ctrl0 = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0));
- raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+ raw_spin_unlock_irqrestore(&chv_lock, flags);
cfg = ctrl0 & CHV_PADCTRL0_GPIOCFG_MASK;
cfg >>= CHV_PADCTRL0_GPIOCFG_SHIFT;
@@ -1182,7 +1191,7 @@ static void chv_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
void __iomem *reg;
u32 ctrl0;
- raw_spin_lock_irqsave(&pctrl->lock, flags);
+ raw_spin_lock_irqsave(&chv_lock, flags);
reg = chv_padreg(pctrl, pin, CHV_PADCTRL0);
ctrl0 = readl(reg);
@@ -1194,7 +1203,7 @@ static void chv_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
chv_writel(ctrl0, reg);
- raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+ raw_spin_unlock_irqrestore(&chv_lock, flags);
}
static int chv_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
@@ -1204,9 +1213,9 @@ static int chv_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
u32 ctrl0, direction;
unsigned long flags;
- raw_spin_lock_irqsave(&pctrl->lock, flags);
+ raw_spin_lock_irqsave(&chv_lock, flags);
ctrl0 = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0));
- raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+ raw_spin_unlock_irqrestore(&chv_lock, flags);
direction = ctrl0 & CHV_PADCTRL0_GPIOCFG_MASK;
direction >>= CHV_PADCTRL0_GPIOCFG_SHIFT;
@@ -1244,14 +1253,14 @@ static void chv_gpio_irq_ack(struct irq_data *d)
int pin = chv_gpio_offset_to_pin(pctrl, irqd_to_hwirq(d));
u32 intr_line;
- raw_spin_lock(&pctrl->lock);
+ raw_spin_lock(&chv_lock);
intr_line = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0));
intr_line &= CHV_PADCTRL0_INTSEL_MASK;
intr_line >>= CHV_PADCTRL0_INTSEL_SHIFT;
chv_writel(BIT(intr_line), pctrl->regs + CHV_INTSTAT);
- raw_spin_unlock(&pctrl->lock);
+ raw_spin_unlock(&chv_lock);
}
static void chv_gpio_irq_mask_unmask(struct irq_data *d, bool mask)
@@ -1262,7 +1271,7 @@ static void chv_gpio_irq_mask_unmask(struct irq_data *d, bool mask)
u32 value, intr_line;
unsigned long flags;
- raw_spin_lock_irqsave(&pctrl->lock, flags);
+ raw_spin_lock_irqsave(&chv_lock, flags);
intr_line = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0));
intr_line &= CHV_PADCTRL0_INTSEL_MASK;
@@ -1275,7 +1284,7 @@ static void chv_gpio_irq_mask_unmask(struct irq_data *d, bool mask)
value |= BIT(intr_line);
chv_writel(value, pctrl->regs + CHV_INTMASK);
- raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+ raw_spin_unlock_irqrestore(&chv_lock, flags);
}
static void chv_gpio_irq_mask(struct irq_data *d)
@@ -1309,7 +1318,7 @@ static unsigned chv_gpio_irq_startup(struct irq_data *d)
unsigned long flags;
u32 intsel, value;
- raw_spin_lock_irqsave(&pctrl->lock, flags);
+ raw_spin_lock_irqsave(&chv_lock, flags);
intsel = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0));
intsel &= CHV_PADCTRL0_INTSEL_MASK;
intsel >>= CHV_PADCTRL0_INTSEL_SHIFT;
@@ -1324,7 +1333,7 @@ static unsigned chv_gpio_irq_startup(struct irq_data *d)
irq_set_handler_locked(d, handler);
pctrl->intr_lines[intsel] = offset;
}
- raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+ raw_spin_unlock_irqrestore(&chv_lock, flags);
}
chv_gpio_irq_unmask(d);
@@ -1340,7 +1349,7 @@ static int chv_gpio_irq_type(struct irq_data *d, unsigned type)
unsigned long flags;
u32 value;
- raw_spin_lock_irqsave(&pctrl->lock, flags);
+ raw_spin_lock_irqsave(&chv_lock, flags);
/*
* Pins which can be used as shared interrupt are configured in
@@ -1389,7 +1398,7 @@ static int chv_gpio_irq_type(struct irq_data *d, unsigned type)
else if (type & IRQ_TYPE_LEVEL_MASK)
irq_set_handler_locked(d, handle_level_irq);
- raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+ raw_spin_unlock_irqrestore(&chv_lock, flags);
return 0;
}
@@ -1501,7 +1510,6 @@ static int chv_pinctrl_probe(struct platform_device *pdev)
if (i == ARRAY_SIZE(chv_communities))
return -ENODEV;
- raw_spin_lock_init(&pctrl->lock);
pctrl->dev = &pdev->dev;
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index 3318f1d6193c..7340ff78839a 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -48,17 +48,6 @@ static int amd_gpio_direction_input(struct gpio_chip *gc, unsigned offset)
spin_lock_irqsave(&gpio_dev->lock, flags);
pin_reg = readl(gpio_dev->base + offset * 4);
- /*
- * Suppose BIOS or Bootloader sets specific debounce for the
- * GPIO. if not, set debounce to be 2.75ms and remove glitch.
- */
- if ((pin_reg & DB_TMR_OUT_MASK) == 0) {
- pin_reg |= 0xf;
- pin_reg |= BIT(DB_TMR_OUT_UNIT_OFF);
- pin_reg |= DB_TYPE_REMOVE_GLITCH << DB_CNTRL_OFF;
- pin_reg &= ~BIT(DB_TMR_LARGE_OFF);
- }
-
pin_reg &= ~BIT(OUTPUT_ENABLE_OFF);
writel(pin_reg, gpio_dev->base + offset * 4);
spin_unlock_irqrestore(&gpio_dev->lock, flags);
@@ -331,15 +320,6 @@ static void amd_gpio_irq_enable(struct irq_data *d)
spin_lock_irqsave(&gpio_dev->lock, flags);
pin_reg = readl(gpio_dev->base + (d->hwirq)*4);
- /*
- Suppose BIOS or Bootloader sets specific debounce for the
- GPIO. if not, set debounce to be 2.75ms.
- */
- if ((pin_reg & DB_TMR_OUT_MASK) == 0) {
- pin_reg |= 0xf;
- pin_reg |= BIT(DB_TMR_OUT_UNIT_OFF);
- pin_reg &= ~BIT(DB_TMR_LARGE_OFF);
- }
pin_reg |= BIT(INTERRUPT_ENABLE_OFF);
pin_reg |= BIT(INTERRUPT_MASK_OFF);
writel(pin_reg, gpio_dev->base + (d->hwirq)*4);
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
index 23b6b8c29a99..73d8d47ea465 100644
--- a/drivers/pinctrl/pinctrl-single.c
+++ b/drivers/pinctrl/pinctrl-single.c
@@ -1576,6 +1576,9 @@ static inline void pcs_irq_set(struct pcs_soc_data *pcs_soc,
else
mask &= ~soc_mask;
pcs->write(mask, pcswi->reg);
+
+ /* flush posted write */
+ mask = pcs->read(pcswi->reg);
raw_spin_unlock(&pcs->lock);
}
diff --git a/drivers/pinctrl/qcom/Kconfig b/drivers/pinctrl/qcom/Kconfig
index 6b83b50d382e..68546eec7f61 100644
--- a/drivers/pinctrl/qcom/Kconfig
+++ b/drivers/pinctrl/qcom/Kconfig
@@ -127,4 +127,11 @@ config PINCTRL_WCD
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
WCD gpio controller block.
+config PINCTRL_LPI
+ tristate "Qualcomm Technologies, Inc LPI pin controller driver"
+ depends on GPIOLIB && OF
+ help
+ This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+ LPI gpio controller block.
+
endif
diff --git a/drivers/pinctrl/qcom/Makefile b/drivers/pinctrl/qcom/Makefile
index 7d9cc7a9eb43..fa228c7243e2 100644
--- a/drivers/pinctrl/qcom/Makefile
+++ b/drivers/pinctrl/qcom/Makefile
@@ -16,3 +16,4 @@ obj-$(CONFIG_PINCTRL_MSM8996) += pinctrl-msm8996.o
obj-$(CONFIG_PINCTRL_MSM8998) += pinctrl-msm8998.o
obj-$(CONFIG_PINCTRL_MSMFALCON) += pinctrl-msmfalcon.o
obj-$(CONFIG_PINCTRL_WCD) += pinctrl-wcd.o
+obj-$(CONFIG_PINCTRL_LPI) += pinctrl-lpi.o
diff --git a/drivers/pinctrl/qcom/pinctrl-lpi.c b/drivers/pinctrl/qcom/pinctrl-lpi.c
new file mode 100644
index 000000000000..4829ba7f4bf0
--- /dev/null
+++ b/drivers/pinctrl/qcom/pinctrl-lpi.c
@@ -0,0 +1,643 @@
+/*
+ * Copyright (c) 2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/gpio.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/platform_device.h>
+#include <linux/qdsp6v2/audio_notifier.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include "../core.h"
+#include "../pinctrl-utils.h"
+
+#define LPI_ADDRESS_SIZE 0xC000
+
+#define LPI_GPIO_REG_VAL_CTL 0x00
+#define LPI_GPIO_REG_DIR_CTL 0x04
+
+#define LPI_GPIO_REG_PULL_SHIFT 0x0
+#define LPI_GPIO_REG_PULL_MASK 0x3
+
+#define LPI_GPIO_REG_FUNCTION_SHIFT 0x2
+#define LPI_GPIO_REG_FUNCTION_MASK 0x3C
+
+#define LPI_GPIO_REG_OUT_STRENGTH_SHIFT 0x6
+#define LPI_GPIO_REG_OUT_STRENGTH_MASK 0x1C0
+
+#define LPI_GPIO_REG_OE_SHIFT 0x9
+#define LPI_GPIO_REG_OE_MASK 0x200
+
+#define LPI_GPIO_REG_DIR_SHIFT 0x1
+#define LPI_GPIO_REG_DIR_MASK 0x2
+
+#define LPI_GPIO_BIAS_DISABLE 0x0
+#define LPI_GPIO_PULL_DOWN 0x1
+#define LPI_GPIO_KEEPER 0x2
+#define LPI_GPIO_PULL_UP 0x3
+
+#define LPI_GPIO_FUNC_GPIO "gpio"
+#define LPI_GPIO_FUNC_FUNC1 "func1"
+#define LPI_GPIO_FUNC_FUNC2 "func2"
+#define LPI_GPIO_FUNC_FUNC3 "func3"
+#define LPI_GPIO_FUNC_FUNC4 "func4"
+#define LPI_GPIO_FUNC_FUNC5 "func5"
+
+static bool lpi_dev_up;
+/* The index of each function in lpi_gpio_functions[] array */
+enum lpi_gpio_func_index {
+ LPI_GPIO_FUNC_INDEX_GPIO = 0x00,
+ LPI_GPIO_FUNC_INDEX_FUNC1 = 0x01,
+ LPI_GPIO_FUNC_INDEX_FUNC2 = 0x02,
+ LPI_GPIO_FUNC_INDEX_FUNC3 = 0x03,
+ LPI_GPIO_FUNC_INDEX_FUNC4 = 0x04,
+ LPI_GPIO_FUNC_INDEX_FUNC5 = 0x05,
+};
+
+/**
+ * struct lpi_gpio_pad - keep current GPIO settings
+ * @offset: Nth GPIO in supported GPIOs.
+ * @output_enabled: Set to true if GPIO output logic is enabled.
+ * @value: value of a pin
+ * @base: Address base of LPI GPIO PAD.
+ * @pullup: Constant current which flow through GPIO output buffer.
+ * @strength: No, Low, Medium, High
+ * @function: See lpi_gpio_functions[]
+ */
+struct lpi_gpio_pad {
+ u16 offset;
+ bool output_enabled;
+ bool value;
+ char __iomem *base;
+ unsigned int pullup;
+ unsigned int strength;
+ unsigned int function;
+};
+
+struct lpi_gpio_state {
+ struct device *dev;
+ struct pinctrl_dev *ctrl;
+ struct gpio_chip chip;
+ char __iomem *base;
+};
+
+static const char *const lpi_gpio_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7",
+ "gpio8", "gpio9", "gpio10", "gpio11", "gpio12", "gpio13", "gpio14",
+ "gpio15", "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21",
+ "gpio22", "gpio23", "gpio24", "gpio25", "gpio26", "gpio27", "gpio28",
+ "gpio29", "gpio30", "gpio31",
+};
+
+static const u32 lpi_offset[] = {
+ 0x00000000,
+ 0x00001000,
+ 0x00002000,
+ 0x00003000,
+ 0x00003010,
+ 0x00004000,
+ 0x00004010,
+ 0x00005000,
+ 0x00005010,
+ 0x00005020,
+ 0x00005030,
+ 0x00005040,
+ 0x00005050,
+ 0x00006000,
+ 0x00006010,
+ 0x00007000,
+ 0x00007010,
+ 0x00008000,
+ 0x00008010,
+ 0x00008020,
+ 0x00008030,
+ 0x00008040,
+ 0x00008050,
+ 0x00008060,
+ 0x00008070,
+ 0x00009000,
+ 0x00009010,
+ 0x0000A000,
+ 0x0000A010,
+ 0x0000B000,
+ 0x0000B010,
+};
+
+static const char *const lpi_gpio_functions[] = {
+ [LPI_GPIO_FUNC_INDEX_GPIO] = LPI_GPIO_FUNC_GPIO,
+ [LPI_GPIO_FUNC_INDEX_FUNC1] = LPI_GPIO_FUNC_FUNC1,
+ [LPI_GPIO_FUNC_INDEX_FUNC2] = LPI_GPIO_FUNC_FUNC2,
+ [LPI_GPIO_FUNC_INDEX_FUNC3] = LPI_GPIO_FUNC_FUNC3,
+ [LPI_GPIO_FUNC_INDEX_FUNC4] = LPI_GPIO_FUNC_FUNC4,
+ [LPI_GPIO_FUNC_INDEX_FUNC5] = LPI_GPIO_FUNC_FUNC5,
+};
+
+static inline struct lpi_gpio_state *to_gpio_state(struct gpio_chip *chip)
+{
+ return container_of(chip, struct lpi_gpio_state, chip);
+};
+
+static int lpi_gpio_read(struct lpi_gpio_pad *pad, unsigned int addr)
+{
+ int ret;
+
+ if (!lpi_dev_up) {
+ pr_err_ratelimited("%s: ADSP is down due to SSR, return\n",
+ __func__);
+ return 0;
+ }
+
+ ret = ioread32(pad->base + pad->offset + addr);
+ if (ret < 0)
+ pr_err("%s: read 0x%x failed\n", __func__, addr);
+
+ return ret;
+}
+
+static int lpi_gpio_write(struct lpi_gpio_pad *pad, unsigned int addr,
+ unsigned int val)
+{
+ if (!lpi_dev_up) {
+ pr_err_ratelimited("%s: ADSP is down due to SSR, return\n",
+ __func__);
+ return 0;
+ }
+
+ iowrite32(val, pad->base + pad->offset + addr);
+ return 0;
+}
+
+static int lpi_gpio_get_groups_count(struct pinctrl_dev *pctldev)
+{
+ /* Every PIN is a group */
+ return pctldev->desc->npins;
+}
+
+static const char *lpi_gpio_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned pin)
+{
+ return pctldev->desc->pins[pin].name;
+}
+
+static int lpi_gpio_get_group_pins(struct pinctrl_dev *pctldev, unsigned pin,
+ const unsigned **pins, unsigned *num_pins)
+{
+ *pins = &pctldev->desc->pins[pin].number;
+ *num_pins = 1;
+ return 0;
+}
+
+static const struct pinctrl_ops lpi_gpio_pinctrl_ops = {
+ .get_groups_count = lpi_gpio_get_groups_count,
+ .get_group_name = lpi_gpio_get_group_name,
+ .get_group_pins = lpi_gpio_get_group_pins,
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_group,
+ .dt_free_map = pinctrl_utils_dt_free_map,
+};
+
+static int lpi_gpio_get_functions_count(struct pinctrl_dev *pctldev)
+{
+ return ARRAY_SIZE(lpi_gpio_functions);
+}
+
+static const char *lpi_gpio_get_function_name(struct pinctrl_dev *pctldev,
+ unsigned function)
+{
+ return lpi_gpio_functions[function];
+}
+
+static int lpi_gpio_get_function_groups(struct pinctrl_dev *pctldev,
+ unsigned function,
+ const char *const **groups,
+ unsigned *const num_qgroups)
+{
+ *groups = lpi_gpio_groups;
+ *num_qgroups = pctldev->desc->npins;
+ return 0;
+}
+
+static int lpi_gpio_set_mux(struct pinctrl_dev *pctldev, unsigned function,
+ unsigned pin)
+{
+ struct lpi_gpio_pad *pad;
+ unsigned int val;
+
+ pad = pctldev->desc->pins[pin].drv_data;
+
+ pad->function = function;
+
+ val = lpi_gpio_read(pad, LPI_GPIO_REG_VAL_CTL);
+ val &= ~(LPI_GPIO_REG_FUNCTION_MASK);
+ val |= pad->function << LPI_GPIO_REG_FUNCTION_SHIFT;
+ lpi_gpio_write(pad, LPI_GPIO_REG_VAL_CTL, val);
+ return 0;
+}
+
+static const struct pinmux_ops lpi_gpio_pinmux_ops = {
+ .get_functions_count = lpi_gpio_get_functions_count,
+ .get_function_name = lpi_gpio_get_function_name,
+ .get_function_groups = lpi_gpio_get_function_groups,
+ .set_mux = lpi_gpio_set_mux,
+};
+
+static int lpi_config_get(struct pinctrl_dev *pctldev,
+ unsigned int pin, unsigned long *config)
+{
+ unsigned param = pinconf_to_config_param(*config);
+ struct lpi_gpio_pad *pad;
+ unsigned arg;
+
+ pad = pctldev->desc->pins[pin].drv_data;
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ arg = pad->pullup = LPI_GPIO_BIAS_DISABLE;
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ arg = pad->pullup == LPI_GPIO_PULL_DOWN;
+ break;
+ case PIN_CONFIG_BIAS_BUS_HOLD:
+ arg = pad->pullup = LPI_GPIO_KEEPER;
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ arg = pad->pullup == LPI_GPIO_PULL_UP;
+ break;
+ case PIN_CONFIG_INPUT_ENABLE:
+ case PIN_CONFIG_OUTPUT:
+ arg = pad->output_enabled;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ *config = pinconf_to_config_packed(param, arg);
+ return 0;
+}
+
+static unsigned lpi_drive_to_regval(u32 arg)
+{
+ return (arg/2 - 1);
+}
+
+static int lpi_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
+ unsigned long *configs, unsigned nconfs)
+{
+ struct lpi_gpio_pad *pad;
+ unsigned param, arg;
+ int i, ret = 0, val;
+
+ pad = pctldev->desc->pins[pin].drv_data;
+
+ for (i = 0; i < nconfs; i++) {
+ param = pinconf_to_config_param(configs[i]);
+ arg = pinconf_to_config_argument(configs[i]);
+
+ dev_dbg(pctldev->dev, "%s: param: %d arg: %d pin: %d\n",
+ __func__, param, arg, pin);
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ pad->pullup = LPI_GPIO_BIAS_DISABLE;
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ pad->pullup = LPI_GPIO_PULL_DOWN;
+ break;
+ case PIN_CONFIG_BIAS_BUS_HOLD:
+ pad->pullup = LPI_GPIO_KEEPER;
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ pad->pullup = LPI_GPIO_PULL_UP;
+ break;
+ case PIN_CONFIG_INPUT_ENABLE:
+ pad->output_enabled = false;
+ break;
+ case PIN_CONFIG_OUTPUT:
+ pad->output_enabled = true;
+ pad->value = arg;
+ break;
+ case PIN_CONFIG_DRIVE_STRENGTH:
+ pad->strength = arg;
+ break;
+ default:
+ ret = -EINVAL;
+ goto done;
+ }
+ }
+
+ val = lpi_gpio_read(pad, LPI_GPIO_REG_VAL_CTL);
+ val &= ~(LPI_GPIO_REG_PULL_MASK | LPI_GPIO_REG_OUT_STRENGTH_MASK |
+ LPI_GPIO_REG_OE_MASK);
+ val |= pad->pullup << LPI_GPIO_REG_PULL_SHIFT;
+ val |= lpi_drive_to_regval(pad->strength) <<
+ LPI_GPIO_REG_OUT_STRENGTH_SHIFT;
+ if (pad->output_enabled)
+ val |= pad->value << LPI_GPIO_REG_OE_SHIFT;
+
+ lpi_gpio_write(pad, LPI_GPIO_REG_VAL_CTL, val);
+ lpi_gpio_write(pad, LPI_GPIO_REG_DIR_CTL,
+ pad->output_enabled << LPI_GPIO_REG_DIR_SHIFT);
+done:
+ return ret;
+}
+
+static const struct pinconf_ops lpi_gpio_pinconf_ops = {
+ .is_generic = true,
+ .pin_config_group_get = lpi_config_get,
+ .pin_config_group_set = lpi_config_set,
+};
+
+static int lpi_gpio_direction_input(struct gpio_chip *chip, unsigned pin)
+{
+ struct lpi_gpio_state *state = to_gpio_state(chip);
+ unsigned long config;
+
+ config = pinconf_to_config_packed(PIN_CONFIG_INPUT_ENABLE, 1);
+
+ return lpi_config_set(state->ctrl, pin, &config, 1);
+}
+
+static int lpi_gpio_direction_output(struct gpio_chip *chip,
+ unsigned pin, int val)
+{
+ struct lpi_gpio_state *state = to_gpio_state(chip);
+ unsigned long config;
+
+ config = pinconf_to_config_packed(PIN_CONFIG_OUTPUT, val);
+
+ return lpi_config_set(state->ctrl, pin, &config, 1);
+}
+
+static int lpi_gpio_get(struct gpio_chip *chip, unsigned pin)
+{
+ struct lpi_gpio_state *state = to_gpio_state(chip);
+ struct lpi_gpio_pad *pad;
+ int value;
+
+ pad = state->ctrl->desc->pins[pin].drv_data;
+
+ value = lpi_gpio_read(pad, LPI_GPIO_REG_VAL_CTL);
+ return value;
+}
+
+static void lpi_gpio_set(struct gpio_chip *chip, unsigned pin, int value)
+{
+ struct lpi_gpio_state *state = to_gpio_state(chip);
+ unsigned long config;
+
+ config = pinconf_to_config_packed(PIN_CONFIG_OUTPUT, value);
+
+ lpi_config_set(state->ctrl, pin, &config, 1);
+}
+
+static int lpi_notifier_service_cb(struct notifier_block *this,
+ unsigned long opcode, void *ptr)
+{
+ pr_debug("%s: Service opcode 0x%lx\n", __func__, opcode);
+
+ switch (opcode) {
+ case AUDIO_NOTIFIER_SERVICE_DOWN:
+ lpi_dev_up = false;
+ break;
+ case AUDIO_NOTIFIER_SERVICE_UP:
+ lpi_dev_up = true;
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block service_nb = {
+ .notifier_call = lpi_notifier_service_cb,
+ .priority = -INT_MAX,
+};
+
+#ifdef CONFIG_DEBUG_FS
+#include <linux/seq_file.h>
+
+static unsigned lpi_regval_to_drive(u32 val)
+{
+ return (val + 1) * 2;
+}
+
+static void lpi_gpio_dbg_show_one(struct seq_file *s,
+ struct pinctrl_dev *pctldev,
+ struct gpio_chip *chip,
+ unsigned offset,
+ unsigned gpio)
+{
+ struct pinctrl_pin_desc pindesc;
+ struct lpi_gpio_pad *pad;
+ unsigned func;
+ int is_out;
+ int drive;
+ int pull;
+ u32 ctl_reg;
+
+ static const char * const pulls[] = {
+ "no pull",
+ "pull down",
+ "keeper",
+ "pull up"
+ };
+
+ pindesc = pctldev->desc->pins[offset];
+ pad = pctldev->desc->pins[offset].drv_data;
+ ctl_reg = lpi_gpio_read(pad, LPI_GPIO_REG_DIR_CTL);
+ is_out = (ctl_reg & LPI_GPIO_REG_DIR_MASK) >> LPI_GPIO_REG_DIR_SHIFT;
+ ctl_reg = lpi_gpio_read(pad, LPI_GPIO_REG_VAL_CTL);
+
+ func = (ctl_reg & LPI_GPIO_REG_FUNCTION_MASK) >>
+ LPI_GPIO_REG_FUNCTION_SHIFT;
+ drive = (ctl_reg & LPI_GPIO_REG_OUT_STRENGTH_MASK) >>
+ LPI_GPIO_REG_OUT_STRENGTH_SHIFT;
+ pull = (ctl_reg & LPI_GPIO_REG_PULL_MASK) >> LPI_GPIO_REG_PULL_SHIFT;
+
+ seq_printf(s, " %-8s: %-3s %d",
+ pindesc.name, is_out ? "out" : "in", func);
+ seq_printf(s, " %dmA", lpi_regval_to_drive(drive));
+ seq_printf(s, " %s", pulls[pull]);
+}
+
+static void lpi_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
+{
+ unsigned gpio = chip->base;
+ unsigned i;
+
+ for (i = 0; i < chip->ngpio; i++, gpio++) {
+ lpi_gpio_dbg_show_one(s, NULL, chip, i, gpio);
+ seq_puts(s, "\n");
+ }
+}
+
+#else
+#define lpi_gpio_dbg_show NULL
+#endif
+
+static const struct gpio_chip lpi_gpio_template = {
+ .direction_input = lpi_gpio_direction_input,
+ .direction_output = lpi_gpio_direction_output,
+ .get = lpi_gpio_get,
+ .set = lpi_gpio_set,
+ .request = gpiochip_generic_request,
+ .free = gpiochip_generic_free,
+ .dbg_show = lpi_gpio_dbg_show,
+};
+
+static int lpi_pinctrl_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct pinctrl_pin_desc *pindesc;
+ struct pinctrl_desc *pctrldesc;
+ struct lpi_gpio_pad *pad, *pads;
+ struct lpi_gpio_state *state;
+ int ret, npins, i;
+ char __iomem *lpi_base;
+ u32 reg;
+
+ ret = of_property_read_u32(dev->of_node, "reg", &reg);
+ if (ret < 0) {
+ dev_err(dev, "missing base address\n");
+ return ret;
+ }
+
+ ret = of_property_read_u32(dev->of_node, "qcom,num-gpios", &npins);
+ if (ret < 0)
+ return ret;
+
+ WARN_ON(npins > ARRAY_SIZE(lpi_gpio_groups));
+
+ state = devm_kzalloc(dev, sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, state);
+
+ state->dev = &pdev->dev;
+
+ pindesc = devm_kcalloc(dev, npins, sizeof(*pindesc), GFP_KERNEL);
+ if (!pindesc)
+ return -ENOMEM;
+
+ pads = devm_kcalloc(dev, npins, sizeof(*pads), GFP_KERNEL);
+ if (!pads)
+ return -ENOMEM;
+
+ pctrldesc = devm_kzalloc(dev, sizeof(*pctrldesc), GFP_KERNEL);
+ if (!pctrldesc)
+ return -ENOMEM;
+
+ pctrldesc->pctlops = &lpi_gpio_pinctrl_ops;
+ pctrldesc->pmxops = &lpi_gpio_pinmux_ops;
+ pctrldesc->confops = &lpi_gpio_pinconf_ops;
+ pctrldesc->owner = THIS_MODULE;
+ pctrldesc->name = dev_name(dev);
+ pctrldesc->pins = pindesc;
+ pctrldesc->npins = npins;
+
+ lpi_base = ioremap(reg, LPI_ADDRESS_SIZE);
+ if (lpi_base == NULL) {
+ dev_err(state->dev, "%s ioremap failed\n", __func__);
+ return -ENOMEM;
+ }
+ state->base = lpi_base;
+ for (i = 0; i < npins; i++, pindesc++) {
+ pad = &pads[i];
+ pindesc->drv_data = pad;
+ pindesc->number = i;
+ pindesc->name = lpi_gpio_groups[i];
+
+ pad->base = lpi_base;
+ pad->offset = lpi_offset[i];
+ }
+
+ state->chip = lpi_gpio_template;
+ state->chip.dev = dev;
+ state->chip.base = -1;
+ state->chip.ngpio = npins;
+ state->chip.label = dev_name(dev);
+ state->chip.of_gpio_n_cells = 2;
+ state->chip.can_sleep = false;
+
+ state->ctrl = pinctrl_register(pctrldesc, dev, state);
+ if (IS_ERR(state->ctrl)) {
+ iounmap(state->base);
+ return PTR_ERR(state->ctrl);
+ }
+
+ ret = gpiochip_add(&state->chip);
+ if (ret) {
+ dev_err(state->dev, "can't add gpio chip\n");
+ goto err_chip;
+ }
+
+ ret = gpiochip_add_pin_range(&state->chip, dev_name(dev), 0, 0, npins);
+ if (ret) {
+ dev_err(dev, "failed to add pin range\n");
+ goto err_range;
+ }
+
+ lpi_dev_up = false;
+ ret = audio_notifier_register("lpi_tlmm", AUDIO_NOTIFIER_ADSP_DOMAIN,
+ &service_nb);
+ if (ret < 0) {
+ pr_err("%s: Audio notifier register failed ret = %d\n",
+ __func__, ret);
+ goto err_range;
+ }
+
+ return 0;
+
+err_range:
+ gpiochip_remove(&state->chip);
+err_chip:
+ pinctrl_unregister(state->ctrl);
+ iounmap(state->base);
+ return ret;
+}
+
+static int lpi_pinctrl_remove(struct platform_device *pdev)
+{
+ struct lpi_gpio_state *state = platform_get_drvdata(pdev);
+
+ iounmap(state->base);
+ gpiochip_remove(&state->chip);
+ pinctrl_unregister(state->ctrl);
+ return 0;
+}
+
+static const struct of_device_id lpi_pinctrl_of_match[] = {
+ { .compatible = "qcom,lpi-pinctrl" }, /* Generic */
+ { },
+};
+
+MODULE_DEVICE_TABLE(of, lpi_pinctrl_of_match);
+
+static struct platform_driver lpi_pinctrl_driver = {
+ .driver = {
+ .name = "qcom-lpi-pinctrl",
+ .of_match_table = lpi_pinctrl_of_match,
+ },
+ .probe = lpi_pinctrl_probe,
+ .remove = lpi_pinctrl_remove,
+};
+
+module_platform_driver(lpi_pinctrl_driver);
+
+MODULE_DESCRIPTION("QTI LPI GPIO pin control driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/chrome/cros_ec_dev.c b/drivers/platform/chrome/cros_ec_dev.c
index d45cd254ed1c..2b331d5b9e79 100644
--- a/drivers/platform/chrome/cros_ec_dev.c
+++ b/drivers/platform/chrome/cros_ec_dev.c
@@ -147,13 +147,19 @@ static long ec_device_ioctl_xcmd(struct cros_ec_dev *ec, void __user *arg)
goto exit;
}
+ if (u_cmd.outsize != s_cmd->outsize ||
+ u_cmd.insize != s_cmd->insize) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
s_cmd->command += ec->cmd_offset;
ret = cros_ec_cmd_xfer(ec->ec_dev, s_cmd);
/* Only copy data to userland if data was received. */
if (ret < 0)
goto exit;
- if (copy_to_user(arg, s_cmd, sizeof(*s_cmd) + u_cmd.insize))
+ if (copy_to_user(arg, s_cmd, sizeof(*s_cmd) + s_cmd->insize))
ret = -EFAULT;
exit:
kfree(s_cmd);
diff --git a/drivers/platform/chrome/cros_ec_proto.c b/drivers/platform/chrome/cros_ec_proto.c
index 990308ca384f..92430f781eb7 100644
--- a/drivers/platform/chrome/cros_ec_proto.c
+++ b/drivers/platform/chrome/cros_ec_proto.c
@@ -380,3 +380,20 @@ int cros_ec_cmd_xfer(struct cros_ec_device *ec_dev,
return ret;
}
EXPORT_SYMBOL(cros_ec_cmd_xfer);
+
+int cros_ec_cmd_xfer_status(struct cros_ec_device *ec_dev,
+ struct cros_ec_command *msg)
+{
+ int ret;
+
+ ret = cros_ec_cmd_xfer(ec_dev, msg);
+ if (ret < 0) {
+ dev_err(ec_dev->dev, "Command xfer error (err:%d)\n", ret);
+ } else if (msg->result != EC_RES_SUCCESS) {
+ dev_dbg(ec_dev->dev, "Command result (err: %d)\n", msg->result);
+ return -EPROTO;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(cros_ec_cmd_xfer_status);
diff --git a/drivers/platform/msm/ipa/ipa_api.c b/drivers/platform/msm/ipa/ipa_api.c
index 159452c6e3bd..4dc3910737e1 100644
--- a/drivers/platform/msm/ipa/ipa_api.c
+++ b/drivers/platform/msm/ipa/ipa_api.c
@@ -1634,6 +1634,25 @@ u16 ipa_get_smem_restr_bytes(void)
EXPORT_SYMBOL(ipa_get_smem_restr_bytes);
/**
+ * ipa_broadcast_wdi_quota_reach_ind() - quota reach
+ * @uint32_t fid: [in] input netdev ID
+ * @uint64_t num_bytes: [in] used bytes
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_broadcast_wdi_quota_reach_ind(uint32_t fid,
+ uint64_t num_bytes)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_broadcast_wdi_quota_reach_ind,
+ fid, num_bytes);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_broadcast_wdi_quota_reach_ind);
+
+/**
* ipa_uc_wdi_get_dbpa() - To retrieve
* doorbell physical address of wlan pipes
* @param: [in/out] input/output parameters
diff --git a/drivers/platform/msm/ipa/ipa_api.h b/drivers/platform/msm/ipa/ipa_api.h
index 78fcdeb4b7a0..69bc4ae1fa6a 100644
--- a/drivers/platform/msm/ipa/ipa_api.h
+++ b/drivers/platform/msm/ipa/ipa_api.h
@@ -183,6 +183,9 @@ struct ipa_api_controller {
u16 (*ipa_get_smem_restr_bytes)(void);
+ int (*ipa_broadcast_wdi_quota_reach_ind)(uint32_t fid,
+ uint64_t num_bytes);
+
int (*ipa_uc_wdi_get_dbpa)(struct ipa_wdi_db_params *out);
int (*ipa_uc_reg_rdyCB)(struct ipa_wdi_uc_ready_params *param);
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c
index d51e9ac97fe0..a7f1f9a040f9 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c
@@ -2328,6 +2328,7 @@ static int ipa_lan_rx_pyld_hdlr(struct sk_buff *skb,
unsigned int used = *(unsigned int *)skb->cb;
unsigned int used_align = ALIGN(used, 32);
unsigned long unused = IPA_GENERIC_RX_BUFF_BASE_SZ - used;
+ u32 skb2_len;
IPA_DUMP_BUFF(skb->data, 0, skb->len);
@@ -2510,8 +2511,9 @@ begin:
sys->drop_packet = true;
}
- skb2 = ipa_skb_copy_for_client(skb,
- status->pkt_len + IPA_PKT_STATUS_SIZE);
+ skb2_len = status->pkt_len + IPA_PKT_STATUS_SIZE;
+ skb2_len = min(skb2_len, skb->len);
+ skb2 = ipa_skb_copy_for_client(skb, skb2_len);
if (likely(skb2)) {
if (skb->len < len + IPA_PKT_STATUS_SIZE) {
IPADBG("SPL skb len %d len %d\n",
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
index 3a666419385e..b4f447f56d1c 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
@@ -963,6 +963,10 @@ struct ipa3_uc_wdi_ctx {
struct IpaHwStatsWDIInfoData_t *wdi_uc_stats_mmio;
void *priv;
ipa_uc_ready_cb uc_ready_cb;
+ /* for AP+STA stats update */
+#ifdef IPA_WAN_MSG_IPv6_ADDR_GW_LEN
+ ipa_wdi_meter_notifier_cb stats_notify;
+#endif
};
/**
@@ -1674,6 +1678,7 @@ int ipa3_resume_wdi_pipe(u32 clnt_hdl);
int ipa3_suspend_wdi_pipe(u32 clnt_hdl);
int ipa3_get_wdi_stats(struct IpaHwStatsWDIInfoData_t *stats);
u16 ipa3_get_smem_restr_bytes(void);
+int ipa3_broadcast_wdi_quota_reach_ind(uint32_t fid, uint64_t num_bytes);
int ipa3_setup_uc_ntn_pipes(struct ipa_ntn_conn_in_params *in,
ipa_notify_cb notify, void *priv, u8 hdr_len,
struct ipa_ntn_conn_out_params *outp);
@@ -1714,6 +1719,9 @@ enum ipacm_client_enum ipa3_get_client(int pipe_idx);
bool ipa3_get_client_uplink(int pipe_idx);
+int ipa3_get_wlan_stats(struct ipa_get_wdi_sap_stats *wdi_sap_stats);
+
+int ipa3_set_wlan_quota(struct ipa_set_wifi_quota *wdi_quota);
/*
* IPADMA
*/
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
index a6b075583162..bf794faa7f6a 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
@@ -880,7 +880,8 @@ static void ipa3_q6_clnt_ind_cb(struct qmi_handle *handle, unsigned int msg_id,
IPAWANDBG("Quota reached indication on qmux(%d) Mbytes(%lu)\n",
qmi_ind.apn.mux_id,
(unsigned long int) qmi_ind.apn.num_Mbytes);
- ipa3_broadcast_quota_reach_ind(qmi_ind.apn.mux_id);
+ ipa3_broadcast_quota_reach_ind(qmi_ind.apn.mux_id,
+ IPA_UPSTEAM_MODEM);
}
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h
index e0126ec392c3..044200485878 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h
@@ -181,7 +181,8 @@ int rmnet_ipa3_poll_tethering_stats(struct wan_ioctl_poll_tethering_stats
int rmnet_ipa3_set_data_quota(struct wan_ioctl_set_data_quota *data);
-void ipa3_broadcast_quota_reach_ind(uint32_t mux_id);
+void ipa3_broadcast_quota_reach_ind(uint32_t mux_id,
+ enum ipa_upstream_type upstream_type);
int rmnet_ipa3_set_tether_client_pipe(struct wan_ioctl_set_tether_client_pipe
*data);
@@ -189,6 +190,8 @@ int rmnet_ipa3_set_tether_client_pipe(struct wan_ioctl_set_tether_client_pipe
int rmnet_ipa3_query_tethering_stats(struct wan_ioctl_query_tether_stats *data,
bool reset);
+int rmnet_ipa3_reset_tethering_stats(struct wan_ioctl_reset_tether_stats *data);
+
int ipa3_qmi_get_data_stats(struct ipa_get_data_stats_req_msg_v01 *req,
struct ipa_get_data_stats_resp_msg_v01 *resp);
@@ -283,7 +286,8 @@ static inline int rmnet_ipa3_set_data_quota(
return -EPERM;
}
-static inline void ipa3_broadcast_quota_reach_ind(uint32_t mux_id) { }
+static inline void ipa3_broadcast_quota_reach_ind(uint32_t mux_id,
+ enum ipa_upstream_type upstream_type) { }
static inline int ipa3_qmi_get_data_stats(
struct ipa_get_data_stats_req_msg_v01 *req,
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
index e38f6f2860cf..8cb6935cd720 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
@@ -13,6 +13,7 @@
#include <linux/dmapool.h>
#include <linux/delay.h>
#include <linux/mm.h>
+#include "ipa_qmi_service.h"
#define IPA_HOLB_TMR_DIS 0x0
@@ -1185,6 +1186,12 @@ int ipa3_connect_wdi_pipe(struct ipa_wdi_in_params *in,
ep->client_notify = in->sys.notify;
ep->priv = in->sys.priv;
+ /* for AP+STA stats update */
+ if (in->wdi_notify)
+ ipa3_ctx->uc_wdi_ctx.stats_notify = in->wdi_notify;
+ else
+ IPADBG("in->wdi_notify is null\n");
+
if (!ep->skip_ep_cfg) {
if (ipa3_cfg_ep(ipa_ep_idx, &in->sys.ipa_ep_cfg)) {
IPAERR("fail to configure EP.\n");
@@ -1276,6 +1283,12 @@ int ipa3_disconnect_wdi_pipe(u32 clnt_hdl)
IPADBG("client (ep: %d) disconnected\n", clnt_hdl);
+ /* for AP+STA stats update */
+ if (ipa3_ctx->uc_wdi_ctx.stats_notify)
+ ipa3_ctx->uc_wdi_ctx.stats_notify = NULL;
+ else
+ IPADBG("uc_wdi_ctx.stats_notify already null\n");
+
uc_timeout:
return result;
}
@@ -1618,6 +1631,23 @@ uc_timeout:
return result;
}
+/**
+ * ipa_broadcast_wdi_quota_reach_ind() - quota reach
+ * @uint32_t fid: [in] input netdev ID
+ * @uint64_t num_bytes: [in] used bytes
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa3_broadcast_wdi_quota_reach_ind(uint32_t fid,
+ uint64_t num_bytes)
+{
+ IPAERR("Quota reached indication on fis(%d) Mbytes(%lu)\n",
+ fid,
+ (unsigned long int) num_bytes);
+ ipa3_broadcast_quota_reach_ind(0, IPA_UPSTEAM_WLAN);
+ return 0;
+}
+
int ipa3_write_qmapid_wdi_pipe(u32 clnt_hdl, u8 qmap_id)
{
int result = 0;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
index c5b5d1892485..683777d4cacd 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
@@ -1000,11 +1000,39 @@ void ipa3_set_client(int index, enum ipacm_client_enum client, bool uplink)
}
}
+/* ipa3_get_wlan_stats() - get ipa wifi stats
+ *
+ * Return value: success or failure
+ */
+int ipa3_get_wlan_stats(struct ipa_get_wdi_sap_stats *wdi_sap_stats)
+{
+ if (ipa3_ctx->uc_wdi_ctx.stats_notify) {
+ ipa3_ctx->uc_wdi_ctx.stats_notify(IPA_GET_WDI_SAP_STATS,
+ wdi_sap_stats);
+ } else {
+ IPAERR("uc_wdi_ctx.stats_notify NULL\n");
+ return -EFAULT;
+ }
+ return 0;
+}
+
+int ipa3_set_wlan_quota(struct ipa_set_wifi_quota *wdi_quota)
+{
+ if (ipa3_ctx->uc_wdi_ctx.stats_notify) {
+ ipa3_ctx->uc_wdi_ctx.stats_notify(IPA_SET_WIFI_QUOTA,
+ wdi_quota);
+ } else {
+ IPAERR("uc_wdi_ctx.stats_notify NULL\n");
+ return -EFAULT;
+ }
+ return 0;
+}
+
/**
* ipa3_get_client() - provide client mapping
* @client: client type
*
- * Return value: none
+ * Return value: client mapping enum
*/
enum ipacm_client_enum ipa3_get_client(int pipe_idx)
{
@@ -3122,6 +3150,8 @@ int ipa3_bind_api_controller(enum ipa_hw_type ipa_hw_type,
api_ctrl->ipa_suspend_wdi_pipe = ipa3_suspend_wdi_pipe;
api_ctrl->ipa_get_wdi_stats = ipa3_get_wdi_stats;
api_ctrl->ipa_get_smem_restr_bytes = ipa3_get_smem_restr_bytes;
+ api_ctrl->ipa_broadcast_wdi_quota_reach_ind =
+ ipa3_broadcast_wdi_quota_reach_ind;
api_ctrl->ipa_uc_wdi_get_dbpa = ipa3_uc_wdi_get_dbpa;
api_ctrl->ipa_uc_reg_rdyCB = ipa3_uc_reg_rdyCB;
api_ctrl->ipa_uc_dereg_rdyCB = ipa3_uc_dereg_rdyCB;
diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
index 0419249890e9..ff197705d845 100644
--- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
@@ -52,6 +52,7 @@
#define DEFAULT_OUTSTANDING_LOW 32
#define IPA_WWAN_DEV_NAME "rmnet_ipa%d"
+#define IPA_UPSTEAM_WLAN_IFACE_NAME "wlan0"
#define IPA_WWAN_RX_SOFTIRQ_THRESH 16
@@ -783,6 +784,22 @@ static int find_vchannel_name_index(const char *vchannel_name)
return MAX_NUM_OF_MUX_CHANNEL;
}
+static enum ipa_upstream_type find_upstream_type(const char *upstreamIface)
+{
+ int i;
+
+ for (i = 0; i < MAX_NUM_OF_MUX_CHANNEL; i++) {
+ if (strcmp(rmnet_ipa3_ctx->mux_channel[i].vchannel_name,
+ upstreamIface) == 0)
+ return IPA_UPSTEAM_MODEM;
+ }
+
+ if (strcmp(IPA_UPSTEAM_WLAN_IFACE_NAME, upstreamIface) == 0)
+ return IPA_UPSTEAM_WLAN;
+ else
+ return MAX_NUM_OF_MUX_CHANNEL;
+}
+
static int ipa3_wwan_register_to_ipa(int index)
{
struct ipa_tx_intf tx_properties = {0};
@@ -2598,10 +2615,10 @@ int rmnet_ipa3_poll_tethering_stats(struct wan_ioctl_poll_tethering_stats *data)
}
/**
- * rmnet_ipa_set_data_quota() - Data quota setting IOCTL handler
+ * rmnet_ipa_set_data_quota_modem() - Data quota setting IOCTL handler
* @data - IOCTL data
*
- * This function handles WAN_IOC_SET_DATA_QUOTA.
+ * This function handles WAN_IOC_SET_DATA_QUOTA on modem interface.
* It translates the given interface name to the Modem MUX ID and
* sends the request of the quota to the IPA Modem driver via QMI.
*
@@ -2610,12 +2627,17 @@ int rmnet_ipa3_poll_tethering_stats(struct wan_ioctl_poll_tethering_stats *data)
* -EFAULT: Invalid interface name provided
* other: See ipa_qmi_set_data_quota
*/
-int rmnet_ipa3_set_data_quota(struct wan_ioctl_set_data_quota *data)
+static int rmnet_ipa3_set_data_quota_modem(
+ struct wan_ioctl_set_data_quota *data)
{
u32 mux_id;
int index;
struct ipa_set_data_usage_quota_req_msg_v01 req;
+ /* stop quota */
+ if (!data->set_quota)
+ ipa3_qmi_stop_data_qouta();
+
index = find_vchannel_name_index(data->interface_name);
IPAWANERR("iface name %s, quota %lu\n",
data->interface_name,
@@ -2639,6 +2661,64 @@ int rmnet_ipa3_set_data_quota(struct wan_ioctl_set_data_quota *data)
return ipa3_qmi_set_data_quota(&req);
}
+static int rmnet_ipa3_set_data_quota_wifi(struct wan_ioctl_set_data_quota *data)
+{
+ struct ipa_set_wifi_quota wifi_quota;
+ int rc = 0;
+
+ memset(&wifi_quota, 0, sizeof(struct ipa_set_wifi_quota));
+ wifi_quota.set_quota = data->set_quota;
+ wifi_quota.quota_bytes = data->quota_mbytes;
+ IPAWANERR("iface name %s, quota %lu\n",
+ data->interface_name,
+ (unsigned long int) data->quota_mbytes);
+
+ rc = ipa3_set_wlan_quota(&wifi_quota);
+ /* check if wlan-fw takes this quota-set */
+ if (!wifi_quota.set_valid)
+ rc = -EFAULT;
+ return rc;
+}
+
+/**
+ * rmnet_ipa_set_data_quota() - Data quota setting IOCTL handler
+ * @data - IOCTL data
+ *
+ * This function handles WAN_IOC_SET_DATA_QUOTA.
+ * It translates the given interface name to the Modem MUX ID and
+ * sends the request of the quota to the IPA Modem driver via QMI.
+ *
+ * Return codes:
+ * 0: Success
+ * -EFAULT: Invalid interface name provided
+ * other: See ipa_qmi_set_data_quota
+ */
+int rmnet_ipa3_set_data_quota(struct wan_ioctl_set_data_quota *data)
+{
+ enum ipa_upstream_type upstream_type;
+ int rc = 0;
+
+ /* get IPA backhaul type */
+ upstream_type = find_upstream_type(data->interface_name);
+
+ if (upstream_type == IPA_UPSTEAM_MAX) {
+ IPAWANERR("Wrong interface_name name %s\n",
+ data->interface_name);
+ } else if (upstream_type == IPA_UPSTEAM_WLAN) {
+ rc = rmnet_ipa3_set_data_quota_wifi(data);
+ if (rc) {
+ IPAWANERR("set quota on wifi failed\n");
+ return rc;
+ }
+ } else {
+ rc = rmnet_ipa3_set_data_quota_modem(data);
+ if (rc) {
+ IPAWANERR("set quota on modem failed\n");
+ return rc;
+ }
+ }
+ return rc;
+}
/* rmnet_ipa_set_tether_client_pipe() -
* @data - IOCTL data
*
@@ -2686,8 +2766,61 @@ int rmnet_ipa3_set_tether_client_pipe(
return 0;
}
-int rmnet_ipa3_query_tethering_stats(struct wan_ioctl_query_tether_stats *data,
- bool reset)
+static int rmnet_ipa3_query_tethering_stats_wifi(
+ struct wan_ioctl_query_tether_stats *data, bool reset)
+{
+ struct ipa_get_wdi_sap_stats *sap_stats;
+ int rc;
+
+ sap_stats = kzalloc(sizeof(struct ipa_get_wdi_sap_stats),
+ GFP_KERNEL);
+ if (!sap_stats) {
+ IPAWANERR("Can't allocate memory for stats message\n");
+ return -ENOMEM;
+ }
+ memset(sap_stats, 0, sizeof(struct ipa_get_wdi_sap_stats));
+
+ sap_stats->reset_stats = reset;
+ IPAWANDBG("reset the pipe stats %d\n", sap_stats->reset_stats);
+
+ rc = ipa3_get_wlan_stats(sap_stats);
+ if (rc) {
+ IPAWANERR("can't get ipa3_get_wlan_stats\n");
+ kfree(sap_stats);
+ return rc;
+ } else if (reset) {
+ kfree(sap_stats);
+ return 0;
+ }
+
+ if (sap_stats->stats_valid) {
+ data->ipv4_tx_packets = sap_stats->ipv4_tx_packets;
+ data->ipv4_tx_bytes = sap_stats->ipv4_tx_bytes;
+ data->ipv4_rx_packets = sap_stats->ipv4_rx_packets;
+ data->ipv4_rx_bytes = sap_stats->ipv4_rx_bytes;
+ data->ipv6_tx_packets = sap_stats->ipv6_tx_packets;
+ data->ipv6_tx_bytes = sap_stats->ipv6_tx_bytes;
+ data->ipv6_rx_packets = sap_stats->ipv6_rx_packets;
+ data->ipv6_rx_bytes = sap_stats->ipv6_rx_bytes;
+ }
+
+ IPAWANDBG("v4_rx_p(%lu) v6_rx_p(%lu) v4_rx_b(%lu) v6_rx_b(%lu)\n",
+ (unsigned long int) data->ipv4_rx_packets,
+ (unsigned long int) data->ipv6_rx_packets,
+ (unsigned long int) data->ipv4_rx_bytes,
+ (unsigned long int) data->ipv6_rx_bytes);
+ IPAWANDBG("tx_p_v4(%lu)v6(%lu)tx_b_v4(%lu) v6(%lu)\n",
+ (unsigned long int) data->ipv4_tx_packets,
+ (unsigned long int) data->ipv6_tx_packets,
+ (unsigned long int) data->ipv4_tx_bytes,
+ (unsigned long int) data->ipv6_tx_bytes);
+
+ kfree(sap_stats);
+ return rc;
+}
+
+static int rmnet_ipa3_query_tethering_stats_modem(
+ struct wan_ioctl_query_tether_stats *data, bool reset)
{
struct ipa_get_data_stats_req_msg_v01 *req;
struct ipa_get_data_stats_resp_msg_v01 *resp;
@@ -2774,7 +2907,7 @@ int rmnet_ipa3_query_tethering_stats(struct wan_ioctl_query_tether_stats *data,
}
}
}
- IPAWANDBG_LOW("v4_rx_p(%lu) v6_rx_p(%lu) v4_rx_b(%lu) v6_rx_b(%lu)\n",
+ IPAWANDBG("v4_rx_p(%lu) v6_rx_p(%lu) v4_rx_b(%lu) v6_rx_b(%lu)\n",
(unsigned long int) data->ipv4_rx_packets,
(unsigned long int) data->ipv6_rx_packets,
(unsigned long int) data->ipv4_rx_bytes,
@@ -2824,7 +2957,7 @@ int rmnet_ipa3_query_tethering_stats(struct wan_ioctl_query_tether_stats *data,
}
}
}
- IPAWANDBG_LOW("tx_p_v4(%lu)v6(%lu)tx_b_v4(%lu) v6(%lu)\n",
+ IPAWANDBG("tx_p_v4(%lu)v6(%lu)tx_b_v4(%lu) v6(%lu)\n",
(unsigned long int) data->ipv4_tx_packets,
(unsigned long int) data->ipv6_tx_packets,
(unsigned long int) data->ipv4_tx_bytes,
@@ -2834,6 +2967,69 @@ int rmnet_ipa3_query_tethering_stats(struct wan_ioctl_query_tether_stats *data,
return 0;
}
+int rmnet_ipa3_query_tethering_stats(struct wan_ioctl_query_tether_stats *data,
+ bool reset)
+{
+ enum ipa_upstream_type upstream_type;
+ int rc = 0;
+
+ /* get IPA backhaul type */
+ upstream_type = find_upstream_type(data->upstreamIface);
+
+ if (upstream_type == IPA_UPSTEAM_MAX) {
+ IPAWANERR(" Wrong upstreamIface name %s\n",
+ data->upstreamIface);
+ } else if (upstream_type == IPA_UPSTEAM_WLAN) {
+ IPAWANDBG_LOW(" query wifi-backhaul stats\n");
+ rc = rmnet_ipa3_query_tethering_stats_wifi(
+ data, false);
+ if (rc) {
+ IPAWANERR("wlan WAN_IOC_QUERY_TETHER_STATS failed\n");
+ return rc;
+ }
+ } else {
+ IPAWANDBG_LOW(" query modem-backhaul stats\n");
+ rc = rmnet_ipa3_query_tethering_stats_modem(
+ data, false);
+ if (rc) {
+ IPAWANERR("modem WAN_IOC_QUERY_TETHER_STATS failed\n");
+ return rc;
+ }
+ }
+ return rc;
+}
+
+int rmnet_ipa3_reset_tethering_stats(struct wan_ioctl_reset_tether_stats *data)
+{
+ enum ipa_upstream_type upstream_type;
+ int rc = 0;
+
+ /* get IPA backhaul type */
+ upstream_type = find_upstream_type(data->upstreamIface);
+
+ if (upstream_type == IPA_UPSTEAM_MAX) {
+ IPAWANERR(" Wrong upstreamIface name %s\n",
+ data->upstreamIface);
+ } else if (upstream_type == IPA_UPSTEAM_WLAN) {
+ IPAWANERR(" reset wifi-backhaul stats\n");
+ rc = rmnet_ipa3_query_tethering_stats_wifi(
+ NULL, true);
+ if (rc) {
+ IPAWANERR("reset WLAN stats failed\n");
+ return rc;
+ }
+ } else {
+ IPAWANERR(" reset modem-backhaul stats\n");
+ rc = rmnet_ipa3_query_tethering_stats_modem(
+ NULL, true);
+ if (rc) {
+ IPAWANERR("reset MODEM stats failed\n");
+ return rc;
+ }
+ }
+ return rc;
+}
+
/**
* ipa3_broadcast_quota_reach_ind() - Send Netlink broadcast on Quota
* @mux_id - The MUX ID on which the quota has been reached
@@ -2843,23 +3039,28 @@ int rmnet_ipa3_query_tethering_stats(struct wan_ioctl_query_tether_stats *data,
* on the specific interface which matches the mux_id has been reached.
*
*/
-void ipa3_broadcast_quota_reach_ind(u32 mux_id)
+void ipa3_broadcast_quota_reach_ind(u32 mux_id,
+ enum ipa_upstream_type upstream_type)
{
char alert_msg[IPA_QUOTA_REACH_ALERT_MAX_SIZE];
char iface_name_m[IPA_QUOTA_REACH_IF_NAME_MAX_SIZE];
char iface_name_l[IPA_QUOTA_REACH_IF_NAME_MAX_SIZE];
char *envp[IPA_UEVENT_NUM_EVNP] = {
- alert_msg, iface_name_l, iface_name_m, NULL };
+ alert_msg, iface_name_l, iface_name_m, NULL};
int res;
int index;
- index = ipa3_find_mux_channel_index(mux_id);
-
- if (index == MAX_NUM_OF_MUX_CHANNEL) {
- IPAWANERR("%u is an mux ID\n", mux_id);
+ /* check upstream_type*/
+ if (upstream_type == IPA_UPSTEAM_MAX) {
+ IPAWANERR(" Wrong upstreamIface type %d\n", upstream_type);
return;
+ } else if (upstream_type == IPA_UPSTEAM_MODEM) {
+ index = ipa3_find_mux_channel_index(mux_id);
+ if (index == MAX_NUM_OF_MUX_CHANNEL) {
+ IPAWANERR("%u is an mux ID\n", mux_id);
+ return;
+ }
}
-
res = snprintf(alert_msg, IPA_QUOTA_REACH_ALERT_MAX_SIZE,
"ALERT_NAME=%s", "quotaReachedAlert");
if (IPA_QUOTA_REACH_ALERT_MAX_SIZE <= res) {
@@ -2867,15 +3068,25 @@ void ipa3_broadcast_quota_reach_ind(u32 mux_id)
return;
}
/* posting msg for L-release for CNE */
+ if (upstream_type == IPA_UPSTEAM_MODEM) {
res = snprintf(iface_name_l, IPA_QUOTA_REACH_IF_NAME_MAX_SIZE,
"UPSTREAM=%s", rmnet_ipa3_ctx->mux_channel[index].vchannel_name);
+ } else {
+ res = snprintf(iface_name_l, IPA_QUOTA_REACH_IF_NAME_MAX_SIZE,
+ "UPSTREAM=%s", IPA_UPSTEAM_WLAN_IFACE_NAME);
+ }
if (IPA_QUOTA_REACH_IF_NAME_MAX_SIZE <= res) {
IPAWANERR("message too long (%d)", res);
return;
}
/* posting msg for M-release for CNE */
+ if (upstream_type == IPA_UPSTEAM_MODEM) {
res = snprintf(iface_name_m, IPA_QUOTA_REACH_IF_NAME_MAX_SIZE,
"INTERFACE=%s", rmnet_ipa3_ctx->mux_channel[index].vchannel_name);
+ } else {
+ res = snprintf(iface_name_m, IPA_QUOTA_REACH_IF_NAME_MAX_SIZE,
+ "INTERFACE=%s", IPA_UPSTEAM_WLAN_IFACE_NAME);
+ }
if (IPA_QUOTA_REACH_IF_NAME_MAX_SIZE <= res) {
IPAWANERR("message too long (%d)", res);
return;
diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c
index 92636cba0f1c..ffd127a21ed1 100644
--- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c
+++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c
@@ -279,8 +279,9 @@ static long ipa3_wan_ioctl(struct file *filp,
break;
}
- if (rmnet_ipa3_query_tethering_stats(NULL, true)) {
- IPAWANERR("WAN_IOC_QUERY_TETHER_STATS failed\n");
+ if (rmnet_ipa3_reset_tethering_stats(
+ (struct wan_ioctl_reset_tether_stats *)param)) {
+ IPAWANERR("WAN_IOC_RESET_TETHER_STATS failed\n");
retval = -EFAULT;
break;
}
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
index fb4dd7b3ee71..af2046c87806 100644
--- a/drivers/platform/x86/hp-wmi.c
+++ b/drivers/platform/x86/hp-wmi.c
@@ -723,6 +723,11 @@ static int __init hp_wmi_rfkill_setup(struct platform_device *device)
if (err)
return err;
+ err = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 1, &wireless,
+ sizeof(wireless), 0);
+ if (err)
+ return err;
+
if (wireless & 0x1) {
wifi_rfkill = rfkill_alloc("hp-wifi", &device->dev,
RFKILL_TYPE_WLAN,
@@ -910,7 +915,7 @@ static int __init hp_wmi_bios_setup(struct platform_device *device)
gps_rfkill = NULL;
rfkill2_count = 0;
- if (hp_wmi_bios_2009_later() || hp_wmi_rfkill_setup(device))
+ if (hp_wmi_rfkill_setup(device))
hp_wmi_rfkill2_setup(device);
err = device_create_file(&device->dev, &dev_attr_display);
diff --git a/drivers/pnp/quirks.c b/drivers/pnp/quirks.c
index 943c1cb9566c..d28e3ab9479c 100644
--- a/drivers/pnp/quirks.c
+++ b/drivers/pnp/quirks.c
@@ -342,7 +342,9 @@ static void quirk_amd_mmconfig_area(struct pnp_dev *dev)
/* Device IDs of parts that have 32KB MCH space */
static const unsigned int mch_quirk_devices[] = {
0x0154, /* Ivy Bridge */
+ 0x0a04, /* Haswell-ULT */
0x0c00, /* Haswell */
+ 0x1604, /* Broadwell */
};
static struct pci_dev *get_intel_host(void)
diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
index 456987c88baa..b13cd074c52a 100644
--- a/drivers/power/power_supply_core.c
+++ b/drivers/power/power_supply_core.c
@@ -565,11 +565,12 @@ static int power_supply_read_temp(struct thermal_zone_device *tzd,
WARN_ON(tzd == NULL);
psy = tzd->devdata;
- ret = psy->desc->get_property(psy, POWER_SUPPLY_PROP_TEMP, &val);
+ ret = power_supply_get_property(psy, POWER_SUPPLY_PROP_TEMP, &val);
+ if (ret)
+ return ret;
/* Convert tenths of degree Celsius to milli degree Celsius. */
- if (!ret)
- *temp = val.intval * 100;
+ *temp = val.intval * 100;
return ret;
}
@@ -612,10 +613,12 @@ static int ps_get_max_charge_cntl_limit(struct thermal_cooling_device *tcd,
int ret;
psy = tcd->devdata;
- ret = psy->desc->get_property(psy,
- POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX, &val);
- if (!ret)
- *state = val.intval;
+ ret = power_supply_get_property(psy,
+ POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX, &val);
+ if (ret)
+ return ret;
+
+ *state = val.intval;
return ret;
}
@@ -628,10 +631,12 @@ static int ps_get_cur_chrage_cntl_limit(struct thermal_cooling_device *tcd,
int ret;
psy = tcd->devdata;
- ret = psy->desc->get_property(psy,
- POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT, &val);
- if (!ret)
- *state = val.intval;
+ ret = power_supply_get_property(psy,
+ POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT, &val);
+ if (ret)
+ return ret;
+
+ *state = val.intval;
return ret;
}
diff --git a/drivers/power/qcom-charger/qpnp-smb2.c b/drivers/power/qcom-charger/qpnp-smb2.c
index e73ed2f1d288..543189ae5498 100644
--- a/drivers/power/qcom-charger/qpnp-smb2.c
+++ b/drivers/power/qcom-charger/qpnp-smb2.c
@@ -210,6 +210,13 @@ static struct smb_params v1_params = {
.max_u = 2000,
.step_u = 200,
},
+ .freq_boost = {
+ .name = "boost switching frequency",
+ .reg = CFG_BUCKBOOST_FREQ_SELECT_BOOST_REG,
+ .min_u = 600,
+ .max_u = 2000,
+ .step_u = 200,
+ },
};
#define STEP_CHARGING_MAX_STEPS 5
@@ -218,6 +225,7 @@ struct smb_dt_props {
int usb_icl_ua;
int otg_cl_ua;
int dc_icl_ua;
+ int boost_threshold_ua;
int fv_uv;
int wipower_max_uw;
u32 step_soc_threshold[STEP_CHARGING_MAX_STEPS - 1];
@@ -243,6 +251,7 @@ module_param_named(
);
#define MICRO_1P5A 1500000
+#define MICRO_P1A 100000
static int smb2_parse_dt(struct smb2 *chip)
{
struct smb_charger *chg = &chip->chg;
@@ -304,6 +313,12 @@ static int smb2_parse_dt(struct smb2 *chip)
if (rc < 0)
chip->dt.dc_icl_ua = -EINVAL;
+ rc = of_property_read_u32(node,
+ "qcom,boost-threshold-ua",
+ &chip->dt.boost_threshold_ua);
+ if (rc < 0)
+ chip->dt.boost_threshold_ua = MICRO_P1A;
+
rc = of_property_read_u32(node, "qcom,wipower-max-uw",
&chip->dt.wipower_max_uw);
if (rc < 0)
@@ -370,6 +385,7 @@ static enum power_supply_property smb2_usb_props[] = {
POWER_SUPPLY_PROP_PD_ACTIVE,
POWER_SUPPLY_PROP_INPUT_CURRENT_SETTLED,
POWER_SUPPLY_PROP_INPUT_CURRENT_NOW,
+ POWER_SUPPLY_PROP_BOOST_CURRENT,
POWER_SUPPLY_PROP_PE_START,
};
@@ -436,6 +452,9 @@ static int smb2_usb_get_prop(struct power_supply *psy,
case POWER_SUPPLY_PROP_INPUT_CURRENT_NOW:
rc = smblib_get_prop_usb_current_now(chg, val);
break;
+ case POWER_SUPPLY_PROP_BOOST_CURRENT:
+ val->intval = chg->boost_current_ua;
+ break;
case POWER_SUPPLY_PROP_PD_IN_HARD_RESET:
rc = smblib_get_prop_pd_in_hard_reset(chg, val);
break;
@@ -490,6 +509,9 @@ static int smb2_usb_set_prop(struct power_supply *psy,
case POWER_SUPPLY_PROP_PD_USB_SUSPEND_SUPPORTED:
chg->system_suspend_supported = val->intval;
break;
+ case POWER_SUPPLY_PROP_BOOST_CURRENT:
+ rc = smblib_set_prop_boost_current(chg, val);
+ break;
default:
pr_err("set prop %d is not supported\n", psp);
rc = -EINVAL;
@@ -1073,6 +1095,7 @@ static int smb2_init_hw(struct smb2 *chip)
chg->otg_cl_ua = chip->dt.otg_cl_ua;
chg->dcp_icl_ua = chip->dt.usb_icl_ua;
+ chg->boost_threshold_ua = chip->dt.boost_threshold_ua;
rc = smblib_read(chg, APSD_RESULT_STATUS_REG, &stat);
if (rc < 0) {
diff --git a/drivers/power/qcom-charger/smb-lib.c b/drivers/power/qcom-charger/smb-lib.c
index 987e8258d301..9348091ec8dd 100644
--- a/drivers/power/qcom-charger/smb-lib.c
+++ b/drivers/power/qcom-charger/smb-lib.c
@@ -780,16 +780,6 @@ static int smblib_otg_cl_config(struct smb_charger *chg, int otg_cl_ua)
return rc;
}
- /* configure PFM/PWM mode for OTG regulator */
- rc = smblib_masked_write(chg, DC_ENG_SSUPPLY_CFG3_REG,
- ENG_SSUPPLY_CFG_SKIP_TH_V0P2_BIT,
- otg_cl_ua > MICRO_250MA ? 1 : 0);
- if (rc < 0) {
- smblib_err(chg,
- "Couldn't write DC_ENG_SSUPPLY_CFG3_REG rc=%d\n", rc);
- return rc;
- }
-
return rc;
}
@@ -1155,32 +1145,9 @@ int smblib_get_prop_batt_capacity(struct smb_charger *chg,
int smblib_get_prop_batt_status(struct smb_charger *chg,
union power_supply_propval *val)
{
- union power_supply_propval pval = {0, };
- bool usb_online, dc_online;
u8 stat;
int rc;
- rc = smblib_get_prop_usb_online(chg, &pval);
- if (rc < 0) {
- smblib_err(chg, "Couldn't get usb online property rc=%d\n",
- rc);
- return rc;
- }
- usb_online = (bool)pval.intval;
-
- rc = smblib_get_prop_dc_online(chg, &pval);
- if (rc < 0) {
- smblib_err(chg, "Couldn't get dc online property rc=%d\n",
- rc);
- return rc;
- }
- dc_online = (bool)pval.intval;
-
- if (!usb_online && !dc_online) {
- val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
- return rc;
- }
-
rc = smblib_read(chg, BATTERY_CHARGER_STATUS_1_REG, &stat);
if (rc < 0) {
smblib_err(chg, "Couldn't read BATTERY_CHARGER_STATUS_1 rc=%d\n",
@@ -1498,7 +1465,7 @@ int smblib_get_prop_dc_online(struct smb_charger *chg,
stat);
val->intval = (stat & USE_DCIN_BIT) &&
- (stat & VALID_INPUT_POWER_SOURCE_BIT);
+ (stat & VALID_INPUT_POWER_SOURCE_STS_BIT);
return rc;
}
@@ -1511,7 +1478,7 @@ int smblib_get_prop_dc_current_max(struct smb_charger *chg,
}
/*******************
- * USB PSY SETTERS *
+ * DC PSY SETTERS *
* *****************/
int smblib_set_prop_dc_current_max(struct smb_charger *chg,
@@ -1564,7 +1531,7 @@ int smblib_get_prop_usb_online(struct smb_charger *chg,
stat);
val->intval = (stat & USE_USBIN_BIT) &&
- (stat & VALID_INPUT_POWER_SOURCE_BIT);
+ (stat & VALID_INPUT_POWER_SOURCE_STS_BIT);
return rc;
}
@@ -1893,6 +1860,25 @@ int smblib_set_prop_usb_current_max(struct smb_charger *chg,
return rc;
}
+#define FSW_2MHZ 2000
+#define FSW_800KHZ_RESET 800
+int smblib_set_prop_boost_current(struct smb_charger *chg,
+ const union power_supply_propval *val)
+{
+ int rc = 0;
+
+ rc = smblib_set_charge_param(chg, &chg->param.freq_boost,
+ val->intval <= chg->boost_threshold_ua ?
+ FSW_2MHZ : FSW_800KHZ_RESET);
+ if (rc < 0) {
+ dev_err(chg->dev, "Error in setting freq_boost rc=%d\n", rc);
+ return rc;
+ }
+
+ chg->boost_current_ua = val->intval;
+ return rc;
+}
+
int smblib_set_prop_typec_power_role(struct smb_charger *chg,
const union power_supply_propval *val)
{
@@ -2763,6 +2749,12 @@ static void typec_sink_insertion(struct smb_charger *chg)
false, 0);
}
+static void typec_sink_removal(struct smb_charger *chg)
+{
+ smblib_set_charge_param(chg, &chg->param.freq_boost, FSW_800KHZ_RESET);
+ chg->boost_current_ua = 0;
+}
+
static void smblib_handle_typec_removal(struct smb_charger *chg)
{
vote(chg->pd_disallowed_votable_indirect, CC_DETACHED_VOTER, true, 0);
@@ -2782,6 +2774,7 @@ static void smblib_handle_typec_removal(struct smb_charger *chg)
vote(chg->apsd_disable_votable, PD_HARD_RESET_VOTER, false, 0);
typec_source_removal(chg);
+ typec_sink_removal(chg);
smblib_update_usb_type(chg);
}
@@ -2799,6 +2792,7 @@ static void smblib_handle_typec_insertion(struct smb_charger *chg,
typec_sink_insertion(chg);
} else {
typec_source_insertion(chg);
+ typec_sink_removal(chg);
}
vote(chg->pd_disallowed_votable_indirect, LEGACY_CABLE_VOTER,
diff --git a/drivers/power/qcom-charger/smb-lib.h b/drivers/power/qcom-charger/smb-lib.h
index 001b62ad5b02..089c6a6fe1b2 100644
--- a/drivers/power/qcom-charger/smb-lib.h
+++ b/drivers/power/qcom-charger/smb-lib.h
@@ -110,6 +110,7 @@ struct smb_params {
struct smb_chg_param step_soc;
struct smb_chg_param step_cc_delta[5];
struct smb_chg_param freq_buck;
+ struct smb_chg_param freq_boost;
};
struct parallel_params {
@@ -198,6 +199,7 @@ struct smb_charger {
int voltage_max_uv;
int pd_active;
bool system_suspend_supported;
+ int boost_threshold_ua;
int system_temp_level;
int thermal_levels;
@@ -216,6 +218,7 @@ struct smb_charger {
/* workaround flag */
u32 wa_flags;
enum cc2_sink_type cc2_sink_detach_flag;
+ int boost_current_ua;
};
int smblib_read(struct smb_charger *chg, u16 addr, u8 *val);
@@ -347,6 +350,8 @@ int smblib_set_prop_usb_voltage_min(struct smb_charger *chg,
const union power_supply_propval *val);
int smblib_set_prop_usb_voltage_max(struct smb_charger *chg,
const union power_supply_propval *val);
+int smblib_set_prop_boost_current(struct smb_charger *chg,
+ const union power_supply_propval *val);
int smblib_set_prop_typec_power_role(struct smb_charger *chg,
const union power_supply_propval *val);
int smblib_set_prop_pd_active(struct smb_charger *chg,
diff --git a/drivers/power/qcom-charger/smb-reg.h b/drivers/power/qcom-charger/smb-reg.h
index ed43051adab6..d9d12c9d7cf6 100644
--- a/drivers/power/qcom-charger/smb-reg.h
+++ b/drivers/power/qcom-charger/smb-reg.h
@@ -1000,5 +1000,6 @@ enum {
#define SYSOK_OPTIONS_MASK GENMASK(2, 0)
#define CFG_BUCKBOOST_FREQ_SELECT_BUCK_REG (MISC_BASE + 0xA0)
+#define CFG_BUCKBOOST_FREQ_SELECT_BOOST_REG (MISC_BASE + 0xA1)
#endif /* __SMB2_CHARGER_REG_H */
diff --git a/drivers/power/qcom-charger/smb1351-charger.c b/drivers/power/qcom-charger/smb1351-charger.c
index 79fbe33acf5d..e9d8c0e08447 100644
--- a/drivers/power/qcom-charger/smb1351-charger.c
+++ b/drivers/power/qcom-charger/smb1351-charger.c
@@ -3087,7 +3087,7 @@ static int smb1351_main_charger_probe(struct i2c_client *client,
chip->adc_param.low_temp = chip->batt_cool_decidegc;
chip->adc_param.high_temp = chip->batt_warm_decidegc;
}
- chip->adc_param.timer_interval = ADC_MEAS2_INTERVAL_1S;
+ chip->adc_param.timer_interval = ADC_MEAS1_INTERVAL_500MS;
chip->adc_param.state_request = ADC_TM_WARM_COOL_THR_ENABLE;
chip->adc_param.btm_ctx = chip;
chip->adc_param.threshold_notification =
diff --git a/drivers/pps/clients/pps_parport.c b/drivers/pps/clients/pps_parport.c
index 38a8bbe74810..83797d89c30f 100644
--- a/drivers/pps/clients/pps_parport.c
+++ b/drivers/pps/clients/pps_parport.c
@@ -195,7 +195,7 @@ static void parport_detach(struct parport *port)
struct pps_client_pp *device;
/* FIXME: oooh, this is ugly! */
- if (strcmp(pardev->name, KBUILD_MODNAME))
+ if (!pardev || strcmp(pardev->name, KBUILD_MODNAME))
/* not our port */
return;
diff --git a/drivers/pwm/pwm-fsl-ftm.c b/drivers/pwm/pwm-fsl-ftm.c
index f9dfc8b6407a..7225ac6b3df5 100644
--- a/drivers/pwm/pwm-fsl-ftm.c
+++ b/drivers/pwm/pwm-fsl-ftm.c
@@ -80,7 +80,6 @@ struct fsl_pwm_chip {
struct mutex lock;
- unsigned int use_count;
unsigned int cnt_select;
unsigned int clk_ps;
@@ -300,9 +299,6 @@ static int fsl_counter_clock_enable(struct fsl_pwm_chip *fpc)
{
int ret;
- if (fpc->use_count++ != 0)
- return 0;
-
/* select counter clock source */
regmap_update_bits(fpc->regmap, FTM_SC, FTM_SC_CLK_MASK,
FTM_SC_CLK(fpc->cnt_select));
@@ -334,25 +330,6 @@ static int fsl_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
return ret;
}
-static void fsl_counter_clock_disable(struct fsl_pwm_chip *fpc)
-{
- /*
- * already disabled, do nothing
- */
- if (fpc->use_count == 0)
- return;
-
- /* there are still users, so can't disable yet */
- if (--fpc->use_count > 0)
- return;
-
- /* no users left, disable PWM counter clock */
- regmap_update_bits(fpc->regmap, FTM_SC, FTM_SC_CLK_MASK, 0);
-
- clk_disable_unprepare(fpc->clk[FSL_PWM_CLK_CNTEN]);
- clk_disable_unprepare(fpc->clk[fpc->cnt_select]);
-}
-
static void fsl_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
{
struct fsl_pwm_chip *fpc = to_fsl_chip(chip);
@@ -362,7 +339,8 @@ static void fsl_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
regmap_update_bits(fpc->regmap, FTM_OUTMASK, BIT(pwm->hwpwm),
BIT(pwm->hwpwm));
- fsl_counter_clock_disable(fpc);
+ clk_disable_unprepare(fpc->clk[FSL_PWM_CLK_CNTEN]);
+ clk_disable_unprepare(fpc->clk[fpc->cnt_select]);
regmap_read(fpc->regmap, FTM_OUTMASK, &val);
if ((val & 0xFF) == 0xFF)
@@ -492,17 +470,24 @@ static int fsl_pwm_remove(struct platform_device *pdev)
static int fsl_pwm_suspend(struct device *dev)
{
struct fsl_pwm_chip *fpc = dev_get_drvdata(dev);
- u32 val;
+ int i;
regcache_cache_only(fpc->regmap, true);
regcache_mark_dirty(fpc->regmap);
- /* read from cache */
- regmap_read(fpc->regmap, FTM_OUTMASK, &val);
- if ((val & 0xFF) != 0xFF) {
+ for (i = 0; i < fpc->chip.npwm; i++) {
+ struct pwm_device *pwm = &fpc->chip.pwms[i];
+
+ if (!test_bit(PWMF_REQUESTED, &pwm->flags))
+ continue;
+
+ clk_disable_unprepare(fpc->clk[FSL_PWM_CLK_SYS]);
+
+ if (!pwm_is_enabled(pwm))
+ continue;
+
clk_disable_unprepare(fpc->clk[FSL_PWM_CLK_CNTEN]);
clk_disable_unprepare(fpc->clk[fpc->cnt_select]);
- clk_disable_unprepare(fpc->clk[FSL_PWM_CLK_SYS]);
}
return 0;
@@ -511,12 +496,19 @@ static int fsl_pwm_suspend(struct device *dev)
static int fsl_pwm_resume(struct device *dev)
{
struct fsl_pwm_chip *fpc = dev_get_drvdata(dev);
- u32 val;
+ int i;
+
+ for (i = 0; i < fpc->chip.npwm; i++) {
+ struct pwm_device *pwm = &fpc->chip.pwms[i];
+
+ if (!test_bit(PWMF_REQUESTED, &pwm->flags))
+ continue;
- /* read from cache */
- regmap_read(fpc->regmap, FTM_OUTMASK, &val);
- if ((val & 0xFF) != 0xFF) {
clk_prepare_enable(fpc->clk[FSL_PWM_CLK_SYS]);
+
+ if (!pwm_is_enabled(pwm))
+ continue;
+
clk_prepare_enable(fpc->clk[fpc->cnt_select]);
clk_prepare_enable(fpc->clk[FSL_PWM_CLK_CNTEN]);
}
diff --git a/drivers/pwm/pwm-lpc32xx.c b/drivers/pwm/pwm-lpc32xx.c
index 9fde60ce8e7b..6e203a65effb 100644
--- a/drivers/pwm/pwm-lpc32xx.c
+++ b/drivers/pwm/pwm-lpc32xx.c
@@ -24,9 +24,7 @@ struct lpc32xx_pwm_chip {
void __iomem *base;
};
-#define PWM_ENABLE (1 << 31)
-#define PWM_RELOADV(x) (((x) & 0xFF) << 8)
-#define PWM_DUTY(x) ((x) & 0xFF)
+#define PWM_ENABLE BIT(31)
#define to_lpc32xx_pwm_chip(_chip) \
container_of(_chip, struct lpc32xx_pwm_chip, chip)
@@ -38,40 +36,27 @@ static int lpc32xx_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
unsigned long long c;
int period_cycles, duty_cycles;
u32 val;
-
- c = clk_get_rate(lpc32xx->clk) / 256;
- c = c * period_ns;
- do_div(c, NSEC_PER_SEC);
-
- /* Handle high and low extremes */
- if (c == 0)
- c = 1;
- if (c > 255)
- c = 0; /* 0 set division by 256 */
- period_cycles = c;
-
- /* The duty-cycle value is as follows:
- *
- * DUTY-CYCLE HIGH LEVEL
- * 1 99.9%
- * 25 90.0%
- * 128 50.0%
- * 220 10.0%
- * 255 0.1%
- * 0 0.0%
- *
- * In other words, the register value is duty-cycle % 256 with
- * duty-cycle in the range 1-256.
- */
- c = 256 * duty_ns;
- do_div(c, period_ns);
- if (c > 255)
- c = 255;
- duty_cycles = 256 - c;
+ c = clk_get_rate(lpc32xx->clk);
+
+ /* The highest acceptable divisor is 256, which is represented by 0 */
+ period_cycles = div64_u64(c * period_ns,
+ (unsigned long long)NSEC_PER_SEC * 256);
+ if (!period_cycles)
+ period_cycles = 1;
+ if (period_cycles > 255)
+ period_cycles = 0;
+
+ /* Compute 256 x #duty/period value and care for corner cases */
+ duty_cycles = div64_u64((unsigned long long)(period_ns - duty_ns) * 256,
+ period_ns);
+ if (!duty_cycles)
+ duty_cycles = 1;
+ if (duty_cycles > 255)
+ duty_cycles = 255;
val = readl(lpc32xx->base + (pwm->hwpwm << 2));
val &= ~0xFFFF;
- val |= PWM_RELOADV(period_cycles) | PWM_DUTY(duty_cycles);
+ val |= (period_cycles << 8) | duty_cycles;
writel(val, lpc32xx->base + (pwm->hwpwm << 2));
return 0;
@@ -134,7 +119,7 @@ static int lpc32xx_pwm_probe(struct platform_device *pdev)
lpc32xx->chip.dev = &pdev->dev;
lpc32xx->chip.ops = &lpc32xx_pwm_ops;
- lpc32xx->chip.npwm = 2;
+ lpc32xx->chip.npwm = 1;
lpc32xx->chip.base = -1;
ret = pwmchip_add(&lpc32xx->chip);
diff --git a/drivers/regulator/anatop-regulator.c b/drivers/regulator/anatop-regulator.c
index 63cd5e68c864..3a6d0290c54c 100644
--- a/drivers/regulator/anatop-regulator.c
+++ b/drivers/regulator/anatop-regulator.c
@@ -296,7 +296,7 @@ static int anatop_regulator_probe(struct platform_device *pdev)
if (!sreg->sel && !strcmp(sreg->name, "vddpu"))
sreg->sel = 22;
- if (!sreg->sel) {
+ if (!sreg->bypass && !sreg->sel) {
dev_err(&pdev->dev, "Failed to read a valid default voltage selector.\n");
return -EINVAL;
}
diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
index 9e03d158f411..4f7ce0097191 100644
--- a/drivers/remoteproc/remoteproc_core.c
+++ b/drivers/remoteproc/remoteproc_core.c
@@ -1239,11 +1239,6 @@ int rproc_add(struct rproc *rproc)
if (ret < 0)
return ret;
- /* expose to rproc_get_by_phandle users */
- mutex_lock(&rproc_list_mutex);
- list_add(&rproc->node, &rproc_list);
- mutex_unlock(&rproc_list_mutex);
-
dev_info(dev, "%s is available\n", rproc->name);
dev_info(dev, "Note: remoteproc is still under development and considered experimental.\n");
@@ -1251,8 +1246,16 @@ int rproc_add(struct rproc *rproc)
/* create debugfs entries */
rproc_create_debug_dir(rproc);
+ ret = rproc_add_virtio_devices(rproc);
+ if (ret < 0)
+ return ret;
- return rproc_add_virtio_devices(rproc);
+ /* expose to rproc_get_by_phandle users */
+ mutex_lock(&rproc_list_mutex);
+ list_add(&rproc->node, &rproc_list);
+ mutex_unlock(&rproc_list_mutex);
+
+ return 0;
}
EXPORT_SYMBOL(rproc_add);
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index ffb860d18701..f92528822f06 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -149,12 +149,14 @@ static int s3c_rtc_setfreq(struct s3c_rtc *info, int freq)
if (!is_power_of_2(freq))
return -EINVAL;
+ s3c_rtc_enable_clk(info);
spin_lock_irq(&info->pie_lock);
if (info->data->set_freq)
info->data->set_freq(info, freq);
spin_unlock_irq(&info->pie_lock);
+ s3c_rtc_disable_clk(info);
return 0;
}
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 4abfbdb285ec..84c13dffa3a8 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -1584,9 +1584,18 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
unsigned long long now;
int expires;
+ cqr = (struct dasd_ccw_req *) intparm;
if (IS_ERR(irb)) {
switch (PTR_ERR(irb)) {
case -EIO:
+ if (cqr && cqr->status == DASD_CQR_CLEAR_PENDING) {
+ device = (struct dasd_device *) cqr->startdev;
+ cqr->status = DASD_CQR_CLEARED;
+ dasd_device_clear_timer(device);
+ wake_up(&dasd_flush_wq);
+ dasd_schedule_device_bh(device);
+ return;
+ }
break;
case -ETIMEDOUT:
DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: "
@@ -1602,7 +1611,6 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
}
now = get_tod_clock();
- cqr = (struct dasd_ccw_req *) intparm;
/* check for conditions that should be handled immediately */
if (!cqr ||
!(scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) &&
diff --git a/drivers/s390/char/sclp_ctl.c b/drivers/s390/char/sclp_ctl.c
index 648cb86afd42..ea607a4a1bdd 100644
--- a/drivers/s390/char/sclp_ctl.c
+++ b/drivers/s390/char/sclp_ctl.c
@@ -56,6 +56,7 @@ static int sclp_ctl_ioctl_sccb(void __user *user_area)
{
struct sclp_ctl_sccb ctl_sccb;
struct sccb_header *sccb;
+ unsigned long copied;
int rc;
if (copy_from_user(&ctl_sccb, user_area, sizeof(ctl_sccb)))
@@ -65,14 +66,15 @@ static int sclp_ctl_ioctl_sccb(void __user *user_area)
sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!sccb)
return -ENOMEM;
- if (copy_from_user(sccb, u64_to_uptr(ctl_sccb.sccb), sizeof(*sccb))) {
+ copied = PAGE_SIZE -
+ copy_from_user(sccb, u64_to_uptr(ctl_sccb.sccb), PAGE_SIZE);
+ if (offsetof(struct sccb_header, length) +
+ sizeof(sccb->length) > copied || sccb->length > copied) {
rc = -EFAULT;
goto out_free;
}
- if (sccb->length > PAGE_SIZE || sccb->length < 8)
- return -EINVAL;
- if (copy_from_user(sccb, u64_to_uptr(ctl_sccb.sccb), sccb->length)) {
- rc = -EFAULT;
+ if (sccb->length < 8) {
+ rc = -EINVAL;
goto out_free;
}
rc = sclp_sync_request(ctl_sccb.cmdw, sccb);
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c
index c692dfebd0ba..50597f9522fe 100644
--- a/drivers/s390/cio/chp.c
+++ b/drivers/s390/cio/chp.c
@@ -139,11 +139,11 @@ static ssize_t chp_measurement_chars_read(struct file *filp,
device = container_of(kobj, struct device, kobj);
chp = to_channelpath(device);
- if (!chp->cmg_chars)
+ if (chp->cmg == -1)
return 0;
- return memory_read_from_buffer(buf, count, &off,
- chp->cmg_chars, sizeof(struct cmg_chars));
+ return memory_read_from_buffer(buf, count, &off, &chp->cmg_chars,
+ sizeof(chp->cmg_chars));
}
static struct bin_attribute chp_measurement_chars_attr = {
@@ -416,7 +416,8 @@ static void chp_release(struct device *dev)
* chp_update_desc - update channel-path description
* @chp - channel-path
*
- * Update the channel-path description of the specified channel-path.
+ * Update the channel-path description of the specified channel-path
+ * including channel measurement related information.
* Return zero on success, non-zero otherwise.
*/
int chp_update_desc(struct channel_path *chp)
@@ -428,8 +429,10 @@ int chp_update_desc(struct channel_path *chp)
return rc;
rc = chsc_determine_fmt1_channel_path_desc(chp->chpid, &chp->desc_fmt1);
+ if (rc)
+ return rc;
- return rc;
+ return chsc_get_channel_measurement_chars(chp);
}
/**
@@ -466,14 +469,6 @@ int chp_new(struct chp_id chpid)
ret = -ENODEV;
goto out_free;
}
- /* Get channel-measurement characteristics. */
- if (css_chsc_characteristics.scmc && css_chsc_characteristics.secm) {
- ret = chsc_get_channel_measurement_chars(chp);
- if (ret)
- goto out_free;
- } else {
- chp->cmg = -1;
- }
dev_set_name(&chp->dev, "chp%x.%02x", chpid.cssid, chpid.id);
/* make it known to the system */
diff --git a/drivers/s390/cio/chp.h b/drivers/s390/cio/chp.h
index 4efd5b867cc3..af0232290dc4 100644
--- a/drivers/s390/cio/chp.h
+++ b/drivers/s390/cio/chp.h
@@ -48,7 +48,7 @@ struct channel_path {
/* Channel-measurement related stuff: */
int cmg;
int shared;
- void *cmg_chars;
+ struct cmg_chars cmg_chars;
};
/* Return channel_path struct for given chpid. */
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index a831d18596a5..c424c0c7367e 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -14,6 +14,7 @@
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/device.h>
+#include <linux/mutex.h>
#include <linux/pci.h>
#include <asm/cio.h>
@@ -224,8 +225,9 @@ out_unreg:
void chsc_chp_offline(struct chp_id chpid)
{
- char dbf_txt[15];
+ struct channel_path *chp = chpid_to_chp(chpid);
struct chp_link link;
+ char dbf_txt[15];
sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id);
CIO_TRACE_EVENT(2, dbf_txt);
@@ -236,6 +238,11 @@ void chsc_chp_offline(struct chp_id chpid)
link.chpid = chpid;
/* Wait until previous actions have settled. */
css_wait_for_slow_path();
+
+ mutex_lock(&chp->lock);
+ chp_update_desc(chp);
+ mutex_unlock(&chp->lock);
+
for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &link);
}
@@ -690,8 +697,9 @@ static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
void chsc_chp_online(struct chp_id chpid)
{
- char dbf_txt[15];
+ struct channel_path *chp = chpid_to_chp(chpid);
struct chp_link link;
+ char dbf_txt[15];
sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id);
CIO_TRACE_EVENT(2, dbf_txt);
@@ -701,6 +709,11 @@ void chsc_chp_online(struct chp_id chpid)
link.chpid = chpid;
/* Wait until previous actions have settled. */
css_wait_for_slow_path();
+
+ mutex_lock(&chp->lock);
+ chp_update_desc(chp);
+ mutex_unlock(&chp->lock);
+
for_each_subchannel_staged(__s390_process_res_acc, NULL,
&link);
css_schedule_reprobe();
@@ -967,22 +980,19 @@ static void
chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv,
struct cmg_chars *chars)
{
- struct cmg_chars *cmg_chars;
int i, mask;
- cmg_chars = chp->cmg_chars;
for (i = 0; i < NR_MEASUREMENT_CHARS; i++) {
mask = 0x80 >> (i + 3);
if (cmcv & mask)
- cmg_chars->values[i] = chars->values[i];
+ chp->cmg_chars.values[i] = chars->values[i];
else
- cmg_chars->values[i] = 0;
+ chp->cmg_chars.values[i] = 0;
}
}
int chsc_get_channel_measurement_chars(struct channel_path *chp)
{
- struct cmg_chars *cmg_chars;
int ccode, ret;
struct {
@@ -1006,10 +1016,11 @@ int chsc_get_channel_measurement_chars(struct channel_path *chp)
u32 data[NR_MEASUREMENT_CHARS];
} __attribute__ ((packed)) *scmc_area;
- chp->cmg_chars = NULL;
- cmg_chars = kmalloc(sizeof(*cmg_chars), GFP_KERNEL);
- if (!cmg_chars)
- return -ENOMEM;
+ chp->shared = -1;
+ chp->cmg = -1;
+
+ if (!css_chsc_characteristics.scmc || !css_chsc_characteristics.secm)
+ return 0;
spin_lock_irq(&chsc_page_lock);
memset(chsc_page, 0, PAGE_SIZE);
@@ -1031,25 +1042,19 @@ int chsc_get_channel_measurement_chars(struct channel_path *chp)
scmc_area->response.code);
goto out;
}
- if (scmc_area->not_valid) {
- chp->cmg = -1;
- chp->shared = -1;
+ if (scmc_area->not_valid)
goto out;
- }
+
chp->cmg = scmc_area->cmg;
chp->shared = scmc_area->shared;
if (chp->cmg != 2 && chp->cmg != 3) {
/* No cmg-dependent data. */
goto out;
}
- chp->cmg_chars = cmg_chars;
chsc_initialize_cmg_chars(chp, scmc_area->cmcv,
(struct cmg_chars *) &scmc_area->data);
out:
spin_unlock_irq(&chsc_page_lock);
- if (!chp->cmg_chars)
- kfree(cmg_chars);
-
return ret;
}
diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c
index b2afad5a5682..2a34eb5f6161 100644
--- a/drivers/s390/cio/cmf.c
+++ b/drivers/s390/cio/cmf.c
@@ -753,6 +753,17 @@ static void reset_cmb(struct ccw_device *cdev)
cmf_generic_reset(cdev);
}
+static int cmf_enabled(struct ccw_device *cdev)
+{
+ int enabled;
+
+ spin_lock_irq(cdev->ccwlock);
+ enabled = !!cdev->private->cmb;
+ spin_unlock_irq(cdev->ccwlock);
+
+ return enabled;
+}
+
static struct attribute_group cmf_attr_group;
static struct cmb_operations cmbops_basic = {
@@ -1153,13 +1164,8 @@ static ssize_t cmb_enable_show(struct device *dev,
char *buf)
{
struct ccw_device *cdev = to_ccwdev(dev);
- int enabled;
- spin_lock_irq(cdev->ccwlock);
- enabled = !!cdev->private->cmb;
- spin_unlock_irq(cdev->ccwlock);
-
- return sprintf(buf, "%d\n", enabled);
+ return sprintf(buf, "%d\n", cmf_enabled(cdev));
}
static ssize_t cmb_enable_store(struct device *dev,
@@ -1199,15 +1205,20 @@ int ccw_set_cmf(struct ccw_device *cdev, int enable)
* @cdev: The ccw device to be enabled
*
* Returns %0 for success or a negative error value.
- *
+ * Note: If this is called on a device for which channel measurement is already
+ * enabled a reset of the measurement data is triggered.
* Context:
* non-atomic
*/
int enable_cmf(struct ccw_device *cdev)
{
- int ret;
+ int ret = 0;
device_lock(&cdev->dev);
+ if (cmf_enabled(cdev)) {
+ cmbops->reset(cdev);
+ goto out_unlock;
+ }
get_device(&cdev->dev);
ret = cmbops->alloc(cdev);
if (ret)
@@ -1226,7 +1237,7 @@ int enable_cmf(struct ccw_device *cdev)
out:
if (ret)
put_device(&cdev->dev);
-
+out_unlock:
device_unlock(&cdev->dev);
return ret;
}
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 8f1b091e1732..df036b872b05 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -1051,6 +1051,7 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev)
qeth_l2_set_offline(cgdev);
if (card->dev) {
+ netif_napi_del(&card->napi);
unregister_netdev(card->dev);
card->dev = NULL;
}
@@ -1126,6 +1127,7 @@ static int qeth_l2_setup_netdev(struct qeth_card *card)
qeth_l2_request_initial_mac(card);
SET_NETDEV_DEV(card->dev, &card->gdev->dev);
netif_napi_add(card->dev, &card->napi, qeth_l2_poll, QETH_NAPI_WEIGHT);
+ netif_carrier_off(card->dev);
return register_netdev(card->dev);
}
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 543960e96b42..cc4d3c3d8cc5 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -3220,6 +3220,7 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
SET_NETDEV_DEV(card->dev, &card->gdev->dev);
netif_napi_add(card->dev, &card->napi, qeth_l3_poll, QETH_NAPI_WEIGHT);
+ netif_carrier_off(card->dev);
return register_netdev(card->dev);
}
@@ -3246,6 +3247,7 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
qeth_l3_set_offline(cgdev);
if (card->dev) {
+ netif_napi_del(&card->napi);
unregister_netdev(card->dev);
card->dev = NULL;
}
diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
index 54195a117f72..f78cc943d230 100644
--- a/drivers/scsi/aacraid/commctrl.c
+++ b/drivers/scsi/aacraid/commctrl.c
@@ -63,7 +63,7 @@ static int ioctl_send_fib(struct aac_dev * dev, void __user *arg)
struct fib *fibptr;
struct hw_fib * hw_fib = (struct hw_fib *)0;
dma_addr_t hw_fib_pa = (dma_addr_t)0LL;
- unsigned size;
+ unsigned int size, osize;
int retval;
if (dev->in_reset) {
@@ -87,7 +87,8 @@ static int ioctl_send_fib(struct aac_dev * dev, void __user *arg)
* will not overrun the buffer when we copy the memory. Return
* an error if we would.
*/
- size = le16_to_cpu(kfib->header.Size) + sizeof(struct aac_fibhdr);
+ osize = size = le16_to_cpu(kfib->header.Size) +
+ sizeof(struct aac_fibhdr);
if (size < le16_to_cpu(kfib->header.SenderSize))
size = le16_to_cpu(kfib->header.SenderSize);
if (size > dev->max_fib_size) {
@@ -118,6 +119,14 @@ static int ioctl_send_fib(struct aac_dev * dev, void __user *arg)
goto cleanup;
}
+ /* Sanity check the second copy */
+ if ((osize != le16_to_cpu(kfib->header.Size) +
+ sizeof(struct aac_fibhdr))
+ || (size < le16_to_cpu(kfib->header.SenderSize))) {
+ retval = -EINVAL;
+ goto cleanup;
+ }
+
if (kfib->header.Command == cpu_to_le16(TakeABreakPt)) {
aac_adapter_interrupt(dev);
/*
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index 333db5953607..41f9a00e4f74 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -2664,7 +2664,7 @@ static bool arcmsr_hbaB_get_config(struct AdapterControlBlock *acb)
if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \
miscellaneous data' timeout \n", acb->host->host_no);
- return false;
+ goto err_free_dma;
}
count = 8;
while (count){
@@ -2694,19 +2694,23 @@ static bool arcmsr_hbaB_get_config(struct AdapterControlBlock *acb)
acb->firm_model,
acb->firm_version);
- acb->signature = readl(&reg->message_rwbuffer[1]);
+ acb->signature = readl(&reg->message_rwbuffer[0]);
/*firm_signature,1,00-03*/
- acb->firm_request_len = readl(&reg->message_rwbuffer[2]);
+ acb->firm_request_len = readl(&reg->message_rwbuffer[1]);
/*firm_request_len,1,04-07*/
- acb->firm_numbers_queue = readl(&reg->message_rwbuffer[3]);
+ acb->firm_numbers_queue = readl(&reg->message_rwbuffer[2]);
/*firm_numbers_queue,2,08-11*/
- acb->firm_sdram_size = readl(&reg->message_rwbuffer[4]);
+ acb->firm_sdram_size = readl(&reg->message_rwbuffer[3]);
/*firm_sdram_size,3,12-15*/
- acb->firm_hd_channels = readl(&reg->message_rwbuffer[5]);
+ acb->firm_hd_channels = readl(&reg->message_rwbuffer[4]);
/*firm_ide_channels,4,16-19*/
acb->firm_cfg_version = readl(&reg->message_rwbuffer[25]); /*firm_cfg_version,25,100-103*/
/*firm_ide_channels,4,16-19*/
return true;
+err_free_dma:
+ dma_free_coherent(&acb->pdev->dev, acb->roundup_ccbsize,
+ acb->dma_coherent2, acb->dma_coherent_handle2);
+ return false;
}
static bool arcmsr_hbaC_get_config(struct AdapterControlBlock *pACB)
@@ -2880,15 +2884,15 @@ static bool arcmsr_hbaD_get_config(struct AdapterControlBlock *acb)
iop_device_map++;
count--;
}
- acb->signature = readl(&reg->msgcode_rwbuffer[1]);
+ acb->signature = readl(&reg->msgcode_rwbuffer[0]);
/*firm_signature,1,00-03*/
- acb->firm_request_len = readl(&reg->msgcode_rwbuffer[2]);
+ acb->firm_request_len = readl(&reg->msgcode_rwbuffer[1]);
/*firm_request_len,1,04-07*/
- acb->firm_numbers_queue = readl(&reg->msgcode_rwbuffer[3]);
+ acb->firm_numbers_queue = readl(&reg->msgcode_rwbuffer[2]);
/*firm_numbers_queue,2,08-11*/
- acb->firm_sdram_size = readl(&reg->msgcode_rwbuffer[4]);
+ acb->firm_sdram_size = readl(&reg->msgcode_rwbuffer[3]);
/*firm_sdram_size,3,12-15*/
- acb->firm_hd_channels = readl(&reg->msgcode_rwbuffer[5]);
+ acb->firm_hd_channels = readl(&reg->msgcode_rwbuffer[4]);
/*firm_hd_channels,4,16-19*/
acb->firm_cfg_version = readl(&reg->msgcode_rwbuffer[25]);
pr_notice("Areca RAID Controller%d: Model %s, F/W %s\n",
diff --git a/drivers/scsi/constants.c b/drivers/scsi/constants.c
index fa09d4be2b53..2b456ca69d5c 100644
--- a/drivers/scsi/constants.c
+++ b/drivers/scsi/constants.c
@@ -1181,8 +1181,9 @@ static const char * const snstext[] = {
/* Get sense key string or NULL if not available */
const char *
-scsi_sense_key_string(unsigned char key) {
- if (key <= 0xE)
+scsi_sense_key_string(unsigned char key)
+{
+ if (key < ARRAY_SIZE(snstext))
return snstext[key];
return NULL;
}
diff --git a/drivers/scsi/cxlflash/common.h b/drivers/scsi/cxlflash/common.h
index c11cd193f896..5ada9268a450 100644
--- a/drivers/scsi/cxlflash/common.h
+++ b/drivers/scsi/cxlflash/common.h
@@ -165,6 +165,8 @@ struct afu {
struct sisl_host_map __iomem *host_map; /* MC host map */
struct sisl_ctrl_map __iomem *ctrl_map; /* MC control map */
+ struct kref mapcount;
+
ctx_hndl_t ctx_hndl; /* master's context handle */
u64 *hrrq_start;
u64 *hrrq_end;
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
index 1e5bf0ca81da..c86847c68448 100644
--- a/drivers/scsi/cxlflash/main.c
+++ b/drivers/scsi/cxlflash/main.c
@@ -289,7 +289,7 @@ static void context_reset(struct afu_cmd *cmd)
atomic64_set(&afu->room, room);
if (room)
goto write_rrin;
- udelay(nretry);
+ udelay(1 << nretry);
} while (nretry++ < MC_ROOM_RETRY_CNT);
pr_err("%s: no cmd_room to send reset\n", __func__);
@@ -303,7 +303,7 @@ write_rrin:
if (rrin != 0x1)
break;
/* Double delay each time */
- udelay(2 << nretry);
+ udelay(1 << nretry);
} while (nretry++ < MC_ROOM_RETRY_CNT);
}
@@ -338,7 +338,7 @@ retry:
atomic64_set(&afu->room, room);
if (room)
goto write_ioarrin;
- udelay(nretry);
+ udelay(1 << nretry);
} while (nretry++ < MC_ROOM_RETRY_CNT);
dev_err(dev, "%s: no cmd_room to send 0x%X\n",
@@ -352,7 +352,7 @@ retry:
* afu->room.
*/
if (nretry++ < MC_ROOM_RETRY_CNT) {
- udelay(nretry);
+ udelay(1 << nretry);
goto retry;
}
@@ -368,6 +368,7 @@ out:
no_room:
afu->read_room = true;
+ kref_get(&cfg->afu->mapcount);
schedule_work(&cfg->work_q);
rc = SCSI_MLQUEUE_HOST_BUSY;
goto out;
@@ -473,6 +474,16 @@ out:
return rc;
}
+static void afu_unmap(struct kref *ref)
+{
+ struct afu *afu = container_of(ref, struct afu, mapcount);
+
+ if (likely(afu->afu_map)) {
+ cxl_psa_unmap((void __iomem *)afu->afu_map);
+ afu->afu_map = NULL;
+ }
+}
+
/**
* cxlflash_driver_info() - information handler for this host driver
* @host: SCSI host associated with device.
@@ -503,6 +514,7 @@ static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
ulong lock_flags;
short lflag = 0;
int rc = 0;
+ int kref_got = 0;
dev_dbg_ratelimited(dev, "%s: (scp=%p) %d/%d/%d/%llu "
"cdb=(%08X-%08X-%08X-%08X)\n",
@@ -547,6 +559,9 @@ static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
goto out;
}
+ kref_get(&cfg->afu->mapcount);
+ kref_got = 1;
+
cmd->rcb.ctx_id = afu->ctx_hndl;
cmd->rcb.port_sel = port_sel;
cmd->rcb.lun_id = lun_to_lunid(scp->device->lun);
@@ -587,6 +602,8 @@ static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
}
out:
+ if (kref_got)
+ kref_put(&afu->mapcount, afu_unmap);
pr_devel("%s: returning rc=%d\n", __func__, rc);
return rc;
}
@@ -632,20 +649,36 @@ static void free_mem(struct cxlflash_cfg *cfg)
* @cfg: Internal structure associated with the host.
*
* Safe to call with AFU in a partially allocated/initialized state.
+ *
+ * Cleans up all state associated with the command queue, and unmaps
+ * the MMIO space.
+ *
+ * - complete() will take care of commands we initiated (they'll be checked
+ * in as part of the cleanup that occurs after the completion)
+ *
+ * - cmd_checkin() will take care of entries that we did not initiate and that
+ * have not (and will not) complete because they are sitting on a [now stale]
+ * hardware queue
*/
static void stop_afu(struct cxlflash_cfg *cfg)
{
int i;
struct afu *afu = cfg->afu;
+ struct afu_cmd *cmd;
if (likely(afu)) {
- for (i = 0; i < CXLFLASH_NUM_CMDS; i++)
- complete(&afu->cmd[i].cevent);
+ for (i = 0; i < CXLFLASH_NUM_CMDS; i++) {
+ cmd = &afu->cmd[i];
+ complete(&cmd->cevent);
+ if (!atomic_read(&cmd->free))
+ cmd_checkin(cmd);
+ }
if (likely(afu->afu_map)) {
cxl_psa_unmap((void __iomem *)afu->afu_map);
afu->afu_map = NULL;
}
+ kref_put(&afu->mapcount, afu_unmap);
}
}
@@ -731,8 +764,8 @@ static void cxlflash_remove(struct pci_dev *pdev)
scsi_remove_host(cfg->host);
/* fall through */
case INIT_STATE_AFU:
- term_afu(cfg);
cancel_work_sync(&cfg->work_q);
+ term_afu(cfg);
case INIT_STATE_PCI:
pci_release_regions(cfg->dev);
pci_disable_device(pdev);
@@ -1108,7 +1141,7 @@ static const struct asyc_intr_info ainfo[] = {
{SISL_ASTATUS_FC1_OTHER, "other error", 1, CLR_FC_ERROR | LINK_RESET},
{SISL_ASTATUS_FC1_LOGO, "target initiated LOGO", 1, 0},
{SISL_ASTATUS_FC1_CRC_T, "CRC threshold exceeded", 1, LINK_RESET},
- {SISL_ASTATUS_FC1_LOGI_R, "login timed out, retrying", 1, 0},
+ {SISL_ASTATUS_FC1_LOGI_R, "login timed out, retrying", 1, LINK_RESET},
{SISL_ASTATUS_FC1_LOGI_F, "login failed", 1, CLR_FC_ERROR},
{SISL_ASTATUS_FC1_LOGI_S, "login succeeded", 1, SCAN_HOST},
{SISL_ASTATUS_FC1_LINK_DN, "link down", 1, 0},
@@ -1316,6 +1349,7 @@ static irqreturn_t cxlflash_async_err_irq(int irq, void *data)
__func__, port);
cfg->lr_state = LINK_RESET_REQUIRED;
cfg->lr_port = port;
+ kref_get(&cfg->afu->mapcount);
schedule_work(&cfg->work_q);
}
@@ -1336,6 +1370,7 @@ static irqreturn_t cxlflash_async_err_irq(int irq, void *data)
if (info->action & SCAN_HOST) {
atomic_inc(&cfg->scan_host_needed);
+ kref_get(&cfg->afu->mapcount);
schedule_work(&cfg->work_q);
}
}
@@ -1731,6 +1766,7 @@ static int init_afu(struct cxlflash_cfg *cfg)
rc = -ENOMEM;
goto err1;
}
+ kref_init(&afu->mapcount);
/* No byte reverse on reading afu_version or string will be backwards */
reg = readq(&afu->afu_map->global.regs.afu_version);
@@ -1765,8 +1801,7 @@ out:
return rc;
err2:
- cxl_psa_unmap((void __iomem *)afu->afu_map);
- afu->afu_map = NULL;
+ kref_put(&afu->mapcount, afu_unmap);
err1:
term_mc(cfg, UNDO_START);
goto out;
@@ -2114,6 +2149,16 @@ static ssize_t lun_mode_store(struct device *dev,
rc = kstrtouint(buf, 10, &lun_mode);
if (!rc && (lun_mode < 5) && (lun_mode != afu->internal_lun)) {
afu->internal_lun = lun_mode;
+
+ /*
+ * When configured for internal LUN, there is only one channel,
+ * channel number 0, else there will be 2 (default).
+ */
+ if (afu->internal_lun)
+ shost->max_channel = 0;
+ else
+ shost->max_channel = NUM_FC_PORTS - 1;
+
afu_reset(cfg);
scsi_scan_host(cfg->host);
}
@@ -2274,6 +2319,7 @@ static struct scsi_host_template driver_template = {
* Device dependent values
*/
static struct dev_dependent_vals dev_corsa_vals = { CXLFLASH_MAX_SECTORS };
+static struct dev_dependent_vals dev_flash_gt_vals = { CXLFLASH_MAX_SECTORS };
/*
* PCI device binding table
@@ -2281,6 +2327,8 @@ static struct dev_dependent_vals dev_corsa_vals = { CXLFLASH_MAX_SECTORS };
static struct pci_device_id cxlflash_pci_table[] = {
{PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CORSA,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_corsa_vals},
+ {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_FLASH_GT,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_flash_gt_vals},
{}
};
@@ -2339,6 +2387,7 @@ static void cxlflash_worker_thread(struct work_struct *work)
if (atomic_dec_if_positive(&cfg->scan_host_needed) >= 0)
scsi_scan_host(cfg->host);
+ kref_put(&afu->mapcount, afu_unmap);
}
/**
diff --git a/drivers/scsi/cxlflash/main.h b/drivers/scsi/cxlflash/main.h
index 60324566c14f..3d2d606fafb3 100644
--- a/drivers/scsi/cxlflash/main.h
+++ b/drivers/scsi/cxlflash/main.h
@@ -24,8 +24,8 @@
#define CXLFLASH_ADAPTER_NAME "IBM POWER CXL Flash Adapter"
#define CXLFLASH_DRIVER_DATE "(August 13, 2015)"
-#define PCI_DEVICE_ID_IBM_CORSA 0x04F0
-#define CXLFLASH_SUBS_DEV_ID 0x04F0
+#define PCI_DEVICE_ID_IBM_CORSA 0x04F0
+#define PCI_DEVICE_ID_IBM_FLASH_GT 0x0600
/* Since there is only one target, make it 0 */
#define CXLFLASH_TARGET 0
diff --git a/drivers/scsi/cxlflash/superpipe.c b/drivers/scsi/cxlflash/superpipe.c
index cac2e6a50efd..babe7ccc1777 100644
--- a/drivers/scsi/cxlflash/superpipe.c
+++ b/drivers/scsi/cxlflash/superpipe.c
@@ -1380,7 +1380,7 @@ static int cxlflash_disk_attach(struct scsi_device *sdev,
}
ctxid = cxl_process_element(ctx);
- if (unlikely((ctxid > MAX_CONTEXT) || (ctxid < 0))) {
+ if (unlikely((ctxid >= MAX_CONTEXT) || (ctxid < 0))) {
dev_err(dev, "%s: ctxid (%d) invalid!\n", __func__, ctxid);
rc = -EPERM;
goto err2;
@@ -1508,7 +1508,7 @@ static int recover_context(struct cxlflash_cfg *cfg, struct ctx_info *ctxi)
}
ctxid = cxl_process_element(ctx);
- if (unlikely((ctxid > MAX_CONTEXT) || (ctxid < 0))) {
+ if (unlikely((ctxid >= MAX_CONTEXT) || (ctxid < 0))) {
dev_err(dev, "%s: ctxid (%d) invalid!\n", __func__, ctxid);
rc = -EPERM;
goto err1;
@@ -1590,6 +1590,13 @@ err1:
* place at the same time and the failure was due to CXL services being
* unable to keep up.
*
+ * As this routine is called on ioctl context, it holds the ioctl r/w
+ * semaphore that is used to drain ioctls in recovery scenarios. The
+ * implementation to achieve the pacing described above (a local mutex)
+ * requires that the ioctl r/w semaphore be dropped and reacquired to
+ * avoid a 3-way deadlock when multiple process recoveries operate in
+ * parallel.
+ *
* Because a user can detect an error condition before the kernel, it is
* quite possible for this routine to act as the kernel's EEH detection
* source (MMIO read of mbox_r). Because of this, there is a window of
@@ -1617,9 +1624,17 @@ static int cxlflash_afu_recover(struct scsi_device *sdev,
int rc = 0;
atomic_inc(&cfg->recovery_threads);
+ up_read(&cfg->ioctl_rwsem);
rc = mutex_lock_interruptible(mutex);
+ down_read(&cfg->ioctl_rwsem);
if (rc)
goto out;
+ rc = check_state(cfg);
+ if (rc) {
+ dev_err(dev, "%s: Failed state! rc=%d\n", __func__, rc);
+ rc = -ENODEV;
+ goto out;
+ }
dev_dbg(dev, "%s: reason 0x%016llX rctxid=%016llX\n",
__func__, recover->reason, rctxid);
diff --git a/drivers/scsi/cxlflash/vlun.c b/drivers/scsi/cxlflash/vlun.c
index a53f583e2d7b..50f8e9300770 100644
--- a/drivers/scsi/cxlflash/vlun.c
+++ b/drivers/scsi/cxlflash/vlun.c
@@ -1008,6 +1008,8 @@ int cxlflash_disk_virtual_open(struct scsi_device *sdev, void *arg)
virt->last_lba = last_lba;
virt->rsrc_handle = rsrc_handle;
+ if (lli->port_sel == BOTH_PORTS)
+ virt->hdr.return_flags |= DK_CXLFLASH_ALL_PORTS_ACTIVE;
out:
if (likely(ctxi))
put_context(ctxi);
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 43ac62623bf2..7a58128a0000 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -10095,6 +10095,7 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
ioa_cfg->intr_flag = IPR_USE_MSI;
else {
ioa_cfg->intr_flag = IPR_USE_LSI;
+ ioa_cfg->clear_isr = 1;
ioa_cfg->nvectors = 1;
dev_info(&pdev->dev, "Cannot enable MSI.\n");
}
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index b0e6fe46448d..80d3c740a8a8 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -72,6 +72,7 @@ void lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *);
void lpfc_retry_pport_discovery(struct lpfc_hba *);
void lpfc_release_rpi(struct lpfc_hba *, struct lpfc_vport *, uint16_t);
+void lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_mbx_cmpl_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index b6fa257ea3e0..59ced8864b2f 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -455,9 +455,9 @@ int
lpfc_issue_reg_vfi(struct lpfc_vport *vport)
{
struct lpfc_hba *phba = vport->phba;
- LPFC_MBOXQ_t *mboxq;
+ LPFC_MBOXQ_t *mboxq = NULL;
struct lpfc_nodelist *ndlp;
- struct lpfc_dmabuf *dmabuf;
+ struct lpfc_dmabuf *dmabuf = NULL;
int rc = 0;
/* move forward in case of SLI4 FC port loopback test and pt2pt mode */
@@ -471,25 +471,33 @@ lpfc_issue_reg_vfi(struct lpfc_vport *vport)
}
}
- dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
- if (!dmabuf) {
+ mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mboxq) {
rc = -ENOMEM;
goto fail;
}
- dmabuf->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &dmabuf->phys);
- if (!dmabuf->virt) {
- rc = -ENOMEM;
- goto fail_free_dmabuf;
- }
- mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
- if (!mboxq) {
- rc = -ENOMEM;
- goto fail_free_coherent;
+ /* Supply CSP's only if we are fabric connect or pt-to-pt connect */
+ if ((vport->fc_flag & FC_FABRIC) || (vport->fc_flag & FC_PT2PT)) {
+ dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+ if (!dmabuf) {
+ rc = -ENOMEM;
+ goto fail;
+ }
+ dmabuf->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &dmabuf->phys);
+ if (!dmabuf->virt) {
+ rc = -ENOMEM;
+ goto fail;
+ }
+ memcpy(dmabuf->virt, &phba->fc_fabparam,
+ sizeof(struct serv_parm));
}
+
vport->port_state = LPFC_FABRIC_CFG_LINK;
- memcpy(dmabuf->virt, &phba->fc_fabparam, sizeof(vport->fc_sparam));
- lpfc_reg_vfi(mboxq, vport, dmabuf->phys);
+ if (dmabuf)
+ lpfc_reg_vfi(mboxq, vport, dmabuf->phys);
+ else
+ lpfc_reg_vfi(mboxq, vport, 0);
mboxq->mbox_cmpl = lpfc_mbx_cmpl_reg_vfi;
mboxq->vport = vport;
@@ -497,17 +505,19 @@ lpfc_issue_reg_vfi(struct lpfc_vport *vport)
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED) {
rc = -ENXIO;
- goto fail_free_mbox;
+ goto fail;
}
return 0;
-fail_free_mbox:
- mempool_free(mboxq, phba->mbox_mem_pool);
-fail_free_coherent:
- lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
-fail_free_dmabuf:
- kfree(dmabuf);
fail:
+ if (mboxq)
+ mempool_free(mboxq, phba->mbox_mem_pool);
+ if (dmabuf) {
+ if (dmabuf->virt)
+ lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
+ kfree(dmabuf);
+ }
+
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
"0289 Issue Register VFI failed: Err %d\n", rc);
@@ -711,9 +721,10 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
* For FC we need to do some special processing because of the SLI
* Port's default settings of the Common Service Parameters.
*/
- if (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC) {
+ if ((phba->sli_rev == LPFC_SLI_REV4) &&
+ (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC)) {
/* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */
- if ((phba->sli_rev == LPFC_SLI_REV4) && fabric_param_changed)
+ if (fabric_param_changed)
lpfc_unregister_fcf_prep(phba);
/* This should just update the VFI CSPs*/
@@ -824,13 +835,21 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
spin_lock_irq(shost->host_lock);
vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
+ vport->fc_flag |= FC_PT2PT;
spin_unlock_irq(shost->host_lock);
- phba->fc_edtov = FF_DEF_EDTOV;
- phba->fc_ratov = FF_DEF_RATOV;
+ /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */
+ if ((phba->sli_rev == LPFC_SLI_REV4) && phba->fc_topology_changed) {
+ lpfc_unregister_fcf_prep(phba);
+
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag &= ~FC_VFI_REGISTERED;
+ spin_unlock_irq(shost->host_lock);
+ phba->fc_topology_changed = 0;
+ }
+
rc = memcmp(&vport->fc_portname, &sp->portName,
sizeof(vport->fc_portname));
- memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
if (rc >= 0) {
/* This side will initiate the PLOGI */
@@ -839,38 +858,14 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
spin_unlock_irq(shost->host_lock);
/*
- * N_Port ID cannot be 0, set our to LocalID the other
- * side will be RemoteID.
+ * N_Port ID cannot be 0, set our Id to LocalID
+ * the other side will be RemoteID.
*/
/* not equal */
if (rc)
vport->fc_myDID = PT2PT_LocalID;
- mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
- if (!mbox)
- goto fail;
-
- lpfc_config_link(phba, mbox);
-
- mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
- mbox->vport = vport;
- rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
- if (rc == MBX_NOT_FINISHED) {
- mempool_free(mbox, phba->mbox_mem_pool);
- goto fail;
- }
-
- /*
- * For SLI4, the VFI/VPI are registered AFTER the
- * Nport with the higher WWPN sends the PLOGI with
- * an assigned NPortId.
- */
-
- /* not equal */
- if ((phba->sli_rev == LPFC_SLI_REV4) && rc)
- lpfc_issue_reg_vfi(vport);
-
/* Decrement ndlp reference count indicating that ndlp can be
* safely released when other references to it are done.
*/
@@ -912,29 +907,20 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
/* If we are pt2pt with another NPort, force NPIV off! */
phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
- spin_lock_irq(shost->host_lock);
- vport->fc_flag |= FC_PT2PT;
- spin_unlock_irq(shost->host_lock);
- /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */
- if ((phba->sli_rev == LPFC_SLI_REV4) && phba->fc_topology_changed) {
- lpfc_unregister_fcf_prep(phba);
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox)
+ goto fail;
- /* The FC_VFI_REGISTERED flag will get clear in the cmpl
- * handler for unreg_vfi, but if we don't force the
- * FC_VFI_REGISTERED flag then the reg_vfi mailbox could be
- * built with the update bit set instead of just the vp bit to
- * change the Nport ID. We need to have the vp set and the
- * Upd cleared on topology changes.
- */
- spin_lock_irq(shost->host_lock);
- vport->fc_flag &= ~FC_VFI_REGISTERED;
- spin_unlock_irq(shost->host_lock);
- phba->fc_topology_changed = 0;
- lpfc_issue_reg_vfi(vport);
+ lpfc_config_link(phba, mbox);
+
+ mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
+ mbox->vport = vport;
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
+ if (rc == MBX_NOT_FINISHED) {
+ mempool_free(mbox, phba->mbox_mem_pool);
+ goto fail;
}
- /* Start discovery - this should just do CLEAR_LA */
- lpfc_disc_start(vport);
return 0;
fail:
return -ENXIO;
@@ -1157,6 +1143,7 @@ flogifail:
spin_lock_irq(&phba->hbalock);
phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
spin_unlock_irq(&phba->hbalock);
+
lpfc_nlp_put(ndlp);
if (!lpfc_error_lost_link(irsp)) {
@@ -3792,14 +3779,17 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_nlp_set_state(vport, ndlp,
NLP_STE_REG_LOGIN_ISSUE);
}
+
+ ndlp->nlp_flag |= NLP_REG_LOGIN_SEND;
if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
!= MBX_NOT_FINISHED)
goto out;
- else
- /* Decrement the ndlp reference count we
- * set for this failed mailbox command.
- */
- lpfc_nlp_put(ndlp);
+
+ /* Decrement the ndlp reference count we
+ * set for this failed mailbox command.
+ */
+ lpfc_nlp_put(ndlp);
+ ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
/* ELS rsp: Cannot issue reg_login for <NPortid> */
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
@@ -3856,6 +3846,7 @@ out:
* the routine lpfc_els_free_iocb.
*/
cmdiocb->context1 = NULL;
+
}
lpfc_els_free_iocb(phba, cmdiocb);
@@ -3898,6 +3889,7 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
IOCB_t *oldcmd;
struct lpfc_iocbq *elsiocb;
uint8_t *pcmd;
+ struct serv_parm *sp;
uint16_t cmdsize;
int rc;
ELS_PKT *els_pkt_ptr;
@@ -3927,6 +3919,7 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
"Issue ACC: did:x%x flg:x%x",
ndlp->nlp_DID, ndlp->nlp_flag, 0);
break;
+ case ELS_CMD_FLOGI:
case ELS_CMD_PLOGI:
cmdsize = (sizeof(struct serv_parm) + sizeof(uint32_t));
elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
@@ -3944,10 +3937,34 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
pcmd += sizeof(uint32_t);
- memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm));
+ sp = (struct serv_parm *)pcmd;
+
+ if (flag == ELS_CMD_FLOGI) {
+ /* Copy the received service parameters back */
+ memcpy(sp, &phba->fc_fabparam,
+ sizeof(struct serv_parm));
+
+ /* Clear the F_Port bit */
+ sp->cmn.fPort = 0;
+
+ /* Mark all class service parameters as invalid */
+ sp->cls1.classValid = 0;
+ sp->cls2.classValid = 0;
+ sp->cls3.classValid = 0;
+ sp->cls4.classValid = 0;
+
+ /* Copy our worldwide names */
+ memcpy(&sp->portName, &vport->fc_sparam.portName,
+ sizeof(struct lpfc_name));
+ memcpy(&sp->nodeName, &vport->fc_sparam.nodeName,
+ sizeof(struct lpfc_name));
+ } else {
+ memcpy(pcmd, &vport->fc_sparam,
+ sizeof(struct serv_parm));
+ }
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
- "Issue ACC PLOGI: did:x%x flg:x%x",
+ "Issue ACC FLOGI/PLOGI: did:x%x flg:x%x",
ndlp->nlp_DID, ndlp->nlp_flag, 0);
break;
case ELS_CMD_PRLO:
@@ -4681,28 +4698,25 @@ lpfc_rdp_res_speed(struct fc_rdp_port_speed_desc *desc, struct lpfc_hba *phba)
desc->tag = cpu_to_be32(RDP_PORT_SPEED_DESC_TAG);
- switch (phba->sli4_hba.link_state.speed) {
- case LPFC_FC_LA_SPEED_1G:
+ switch (phba->fc_linkspeed) {
+ case LPFC_LINK_SPEED_1GHZ:
rdp_speed = RDP_PS_1GB;
break;
- case LPFC_FC_LA_SPEED_2G:
+ case LPFC_LINK_SPEED_2GHZ:
rdp_speed = RDP_PS_2GB;
break;
- case LPFC_FC_LA_SPEED_4G:
+ case LPFC_LINK_SPEED_4GHZ:
rdp_speed = RDP_PS_4GB;
break;
- case LPFC_FC_LA_SPEED_8G:
+ case LPFC_LINK_SPEED_8GHZ:
rdp_speed = RDP_PS_8GB;
break;
- case LPFC_FC_LA_SPEED_10G:
+ case LPFC_LINK_SPEED_10GHZ:
rdp_speed = RDP_PS_10GB;
break;
- case LPFC_FC_LA_SPEED_16G:
+ case LPFC_LINK_SPEED_16GHZ:
rdp_speed = RDP_PS_16GB;
break;
- case LPFC_FC_LA_SPEED_32G:
- rdp_speed = RDP_PS_32GB;
- break;
default:
rdp_speed = RDP_PS_UNKNOWN;
break;
@@ -5739,7 +5753,6 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
IOCB_t *icmd = &cmdiocb->iocb;
struct serv_parm *sp;
LPFC_MBOXQ_t *mbox;
- struct ls_rjt stat;
uint32_t cmd, did;
int rc;
uint32_t fc_flag = 0;
@@ -5765,135 +5778,92 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
return 1;
}
- if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3, 1))) {
- /* For a FLOGI we accept, then if our portname is greater
- * then the remote portname we initiate Nport login.
- */
+ (void) lpfc_check_sparm(vport, ndlp, sp, CLASS3, 1);
- rc = memcmp(&vport->fc_portname, &sp->portName,
- sizeof(struct lpfc_name));
- if (!rc) {
- if (phba->sli_rev < LPFC_SLI_REV4) {
- mbox = mempool_alloc(phba->mbox_mem_pool,
- GFP_KERNEL);
- if (!mbox)
- return 1;
- lpfc_linkdown(phba);
- lpfc_init_link(phba, mbox,
- phba->cfg_topology,
- phba->cfg_link_speed);
- mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0;
- mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
- mbox->vport = vport;
- rc = lpfc_sli_issue_mbox(phba, mbox,
- MBX_NOWAIT);
- lpfc_set_loopback_flag(phba);
- if (rc == MBX_NOT_FINISHED)
- mempool_free(mbox, phba->mbox_mem_pool);
- return 1;
- } else {
- /* abort the flogi coming back to ourselves
- * due to external loopback on the port.
- */
- lpfc_els_abort_flogi(phba);
- return 0;
- }
- } else if (rc > 0) { /* greater than */
- spin_lock_irq(shost->host_lock);
- vport->fc_flag |= FC_PT2PT_PLOGI;
- spin_unlock_irq(shost->host_lock);
+ /*
+ * If our portname is greater than the remote portname,
+ * then we initiate Nport login.
+ */
- /* If we have the high WWPN we can assign our own
- * myDID; otherwise, we have to WAIT for a PLOGI
- * from the remote NPort to find out what it
- * will be.
- */
- vport->fc_myDID = PT2PT_LocalID;
- } else
- vport->fc_myDID = PT2PT_RemoteID;
+ rc = memcmp(&vport->fc_portname, &sp->portName,
+ sizeof(struct lpfc_name));
- /*
- * The vport state should go to LPFC_FLOGI only
- * AFTER we issue a FLOGI, not receive one.
+ if (!rc) {
+ if (phba->sli_rev < LPFC_SLI_REV4) {
+ mbox = mempool_alloc(phba->mbox_mem_pool,
+ GFP_KERNEL);
+ if (!mbox)
+ return 1;
+ lpfc_linkdown(phba);
+ lpfc_init_link(phba, mbox,
+ phba->cfg_topology,
+ phba->cfg_link_speed);
+ mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0;
+ mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ mbox->vport = vport;
+ rc = lpfc_sli_issue_mbox(phba, mbox,
+ MBX_NOWAIT);
+ lpfc_set_loopback_flag(phba);
+ if (rc == MBX_NOT_FINISHED)
+ mempool_free(mbox, phba->mbox_mem_pool);
+ return 1;
+ }
+
+ /* abort the flogi coming back to ourselves
+ * due to external loopback on the port.
*/
+ lpfc_els_abort_flogi(phba);
+ return 0;
+
+ } else if (rc > 0) { /* greater than */
spin_lock_irq(shost->host_lock);
- fc_flag = vport->fc_flag;
- port_state = vport->port_state;
- vport->fc_flag |= FC_PT2PT;
- vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
+ vport->fc_flag |= FC_PT2PT_PLOGI;
spin_unlock_irq(shost->host_lock);
- lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
- "3311 Rcv Flogi PS x%x new PS x%x "
- "fc_flag x%x new fc_flag x%x\n",
- port_state, vport->port_state,
- fc_flag, vport->fc_flag);
- /*
- * We temporarily set fc_myDID to make it look like we are
- * a Fabric. This is done just so we end up with the right
- * did / sid on the FLOGI ACC rsp.
+ /* If we have the high WWPN we can assign our own
+ * myDID; otherwise, we have to WAIT for a PLOGI
+ * from the remote NPort to find out what it
+ * will be.
*/
- did = vport->fc_myDID;
- vport->fc_myDID = Fabric_DID;
-
+ vport->fc_myDID = PT2PT_LocalID;
} else {
- /* Reject this request because invalid parameters */
- stat.un.b.lsRjtRsvd0 = 0;
- stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
- stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
- stat.un.b.vendorUnique = 0;
-
- /*
- * We temporarily set fc_myDID to make it look like we are
- * a Fabric. This is done just so we end up with the right
- * did / sid on the FLOGI LS_RJT rsp.
- */
- did = vport->fc_myDID;
- vport->fc_myDID = Fabric_DID;
-
- lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
- NULL);
+ vport->fc_myDID = PT2PT_RemoteID;
+ }
- /* Now lets put fc_myDID back to what its supposed to be */
- vport->fc_myDID = did;
+ /*
+ * The vport state should go to LPFC_FLOGI only
+ * AFTER we issue a FLOGI, not receive one.
+ */
+ spin_lock_irq(shost->host_lock);
+ fc_flag = vport->fc_flag;
+ port_state = vport->port_state;
+ vport->fc_flag |= FC_PT2PT;
+ vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
+ spin_unlock_irq(shost->host_lock);
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "3311 Rcv Flogi PS x%x new PS x%x "
+ "fc_flag x%x new fc_flag x%x\n",
+ port_state, vport->port_state,
+ fc_flag, vport->fc_flag);
- return 1;
- }
+ /*
+ * We temporarily set fc_myDID to make it look like we are
+ * a Fabric. This is done just so we end up with the right
+ * did / sid on the FLOGI ACC rsp.
+ */
+ did = vport->fc_myDID;
+ vport->fc_myDID = Fabric_DID;
- /* send our FLOGI first */
- if (vport->port_state < LPFC_FLOGI) {
- vport->fc_myDID = 0;
- lpfc_initial_flogi(vport);
- vport->fc_myDID = Fabric_DID;
- }
+ memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
/* Send back ACC */
- lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL);
+ lpfc_els_rsp_acc(vport, ELS_CMD_FLOGI, cmdiocb, ndlp, NULL);
/* Now lets put fc_myDID back to what its supposed to be */
vport->fc_myDID = did;
- if (!(vport->fc_flag & FC_PT2PT_PLOGI)) {
-
- mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
- if (!mbox)
- goto fail;
-
- lpfc_config_link(phba, mbox);
-
- mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
- mbox->vport = vport;
- rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
- if (rc == MBX_NOT_FINISHED) {
- mempool_free(mbox, phba->mbox_mem_pool);
- goto fail;
- }
- }
-
return 0;
-fail:
- return 1;
}
/**
@@ -7345,7 +7315,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
/* reject till our FLOGI completes */
if ((vport->port_state < LPFC_FABRIC_CFG_LINK) &&
- (cmd != ELS_CMD_FLOGI)) {
+ (cmd != ELS_CMD_FLOGI)) {
rjt_err = LSRJT_UNABLE_TPC;
rjt_exp = LSEXP_NOTHING_MORE;
goto lsrjt;
@@ -7381,6 +7351,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
rjt_exp = LSEXP_NOTHING_MORE;
break;
}
+
if (vport->port_state < LPFC_DISC_AUTH) {
if (!(phba->pport->fc_flag & FC_PT2PT) ||
(phba->pport->fc_flag & FC_PT2PT_PLOGI)) {
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index bfc2442dd74a..d3668aa555d5 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -1083,7 +1083,7 @@ out:
}
-static void
+void
lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
struct lpfc_vport *vport = pmb->vport;
@@ -1113,8 +1113,10 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
/* Start discovery by sending a FLOGI. port_state is identically
* LPFC_FLOGI while waiting for FLOGI cmpl
*/
- if (vport->port_state != LPFC_FLOGI || vport->fc_flag & FC_PT2PT_PLOGI)
+ if (vport->port_state != LPFC_FLOGI)
lpfc_initial_flogi(vport);
+ else if (vport->fc_flag & FC_PT2PT)
+ lpfc_disc_start(vport);
return;
out:
@@ -2963,8 +2965,10 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
out_free_mem:
mempool_free(mboxq, phba->mbox_mem_pool);
- lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
- kfree(dmabuf);
+ if (dmabuf) {
+ lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
+ kfree(dmabuf);
+ }
return;
}
@@ -3448,10 +3452,10 @@ lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
spin_unlock_irq(shost->host_lock);
- } else
- /* Good status, call state machine */
- lpfc_disc_state_machine(vport, ndlp, pmb,
- NLP_EVT_CMPL_REG_LOGIN);
+ }
+
+ /* Call state machine */
+ lpfc_disc_state_machine(vport, ndlp, pmb, NLP_EVT_CMPL_REG_LOGIN);
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index b0d92b84bcdc..c14ab6c3ae40 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -8834,9 +8834,12 @@ found:
* already mapped to this phys_id.
*/
if (cpup->irq != LPFC_VECTOR_MAP_EMPTY) {
- chann[saved_chann] =
- cpup->channel_id;
- saved_chann++;
+ if (saved_chann <=
+ LPFC_FCP_IO_CHAN_MAX) {
+ chann[saved_chann] =
+ cpup->channel_id;
+ saved_chann++;
+ }
goto out;
}
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index f87f90e9b7df..1e34b5408a29 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -2145,10 +2145,12 @@ lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys)
reg_vfi->wwn[1] = cpu_to_le32(reg_vfi->wwn[1]);
reg_vfi->e_d_tov = phba->fc_edtov;
reg_vfi->r_a_tov = phba->fc_ratov;
- reg_vfi->bde.addrHigh = putPaddrHigh(phys);
- reg_vfi->bde.addrLow = putPaddrLow(phys);
- reg_vfi->bde.tus.f.bdeSize = sizeof(vport->fc_sparam);
- reg_vfi->bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+ if (phys) {
+ reg_vfi->bde.addrHigh = putPaddrHigh(phys);
+ reg_vfi->bde.addrLow = putPaddrLow(phys);
+ reg_vfi->bde.tus.f.bdeSize = sizeof(vport->fc_sparam);
+ reg_vfi->bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+ }
bf_set(lpfc_reg_vfi_nport_id, reg_vfi, vport->fc_myDID);
/* Only FC supports upd bit */
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index ed9a2c80c4aa..193733e8c823 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -280,38 +280,12 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
uint32_t *lp;
IOCB_t *icmd;
struct serv_parm *sp;
+ uint32_t ed_tov;
LPFC_MBOXQ_t *mbox;
struct ls_rjt stat;
int rc;
memset(&stat, 0, sizeof (struct ls_rjt));
- if (vport->port_state <= LPFC_FDISC) {
- /* Before responding to PLOGI, check for pt2pt mode.
- * If we are pt2pt, with an outstanding FLOGI, abort
- * the FLOGI and resend it first.
- */
- if (vport->fc_flag & FC_PT2PT) {
- lpfc_els_abort_flogi(phba);
- if (!(vport->fc_flag & FC_PT2PT_PLOGI)) {
- /* If the other side is supposed to initiate
- * the PLOGI anyway, just ACC it now and
- * move on with discovery.
- */
- phba->fc_edtov = FF_DEF_EDTOV;
- phba->fc_ratov = FF_DEF_RATOV;
- /* Start discovery - this should just do
- CLEAR_LA */
- lpfc_disc_start(vport);
- } else
- lpfc_initial_flogi(vport);
- } else {
- stat.un.b.lsRjtRsnCode = LSRJT_LOGICAL_BSY;
- stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
- lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb,
- ndlp, NULL);
- return 0;
- }
- }
pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
lp = (uint32_t *) pcmd->virt;
sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
@@ -404,30 +378,46 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
/* Check for Nport to NPort pt2pt protocol */
if ((vport->fc_flag & FC_PT2PT) &&
!(vport->fc_flag & FC_PT2PT_PLOGI)) {
-
/* rcv'ed PLOGI decides what our NPortId will be */
vport->fc_myDID = icmd->un.rcvels.parmRo;
- mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
- if (mbox == NULL)
- goto out;
- lpfc_config_link(phba, mbox);
- mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
- mbox->vport = vport;
- rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
- if (rc == MBX_NOT_FINISHED) {
- mempool_free(mbox, phba->mbox_mem_pool);
- goto out;
+
+ ed_tov = be32_to_cpu(sp->cmn.e_d_tov);
+ if (sp->cmn.edtovResolution) {
+ /* E_D_TOV ticks are in nanoseconds */
+ ed_tov = (phba->fc_edtov + 999999) / 1000000;
}
+
/*
- * For SLI4, the VFI/VPI are registered AFTER the
- * Nport with the higher WWPN sends us a PLOGI with
- * our assigned NPortId.
+ * For pt-to-pt, use the larger EDTOV
+ * RATOV = 2 * EDTOV
*/
+ if (ed_tov > phba->fc_edtov)
+ phba->fc_edtov = ed_tov;
+ phba->fc_ratov = (2 * phba->fc_edtov) / 1000;
+
+ memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
+
+ /* Issue config_link / reg_vfi to account for updated TOV's */
+
if (phba->sli_rev == LPFC_SLI_REV4)
lpfc_issue_reg_vfi(vport);
+ else {
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (mbox == NULL)
+ goto out;
+ lpfc_config_link(phba, mbox);
+ mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ mbox->vport = vport;
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
+ if (rc == MBX_NOT_FINISHED) {
+ mempool_free(mbox, phba->mbox_mem_pool);
+ goto out;
+ }
+ }
lpfc_can_disctmo(vport);
}
+
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox)
goto out;
@@ -1038,7 +1028,9 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
uint32_t *lp;
IOCB_t *irsp;
struct serv_parm *sp;
+ uint32_t ed_tov;
LPFC_MBOXQ_t *mbox;
+ int rc;
cmdiocb = (struct lpfc_iocbq *) arg;
rspiocb = cmdiocb->context_un.rsp_iocb;
@@ -1094,18 +1086,63 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
ndlp->nlp_maxframe =
((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb;
+ if ((vport->fc_flag & FC_PT2PT) &&
+ (vport->fc_flag & FC_PT2PT_PLOGI)) {
+ ed_tov = be32_to_cpu(sp->cmn.e_d_tov);
+ if (sp->cmn.edtovResolution) {
+ /* E_D_TOV ticks are in nanoseconds */
+ ed_tov = (phba->fc_edtov + 999999) / 1000000;
+ }
+
+ /*
+ * Use the larger EDTOV
+ * RATOV = 2 * EDTOV for pt-to-pt
+ */
+ if (ed_tov > phba->fc_edtov)
+ phba->fc_edtov = ed_tov;
+ phba->fc_ratov = (2 * phba->fc_edtov) / 1000;
+
+ memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
+
+ /* Issue config_link / reg_vfi to account for updated TOV's */
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ lpfc_issue_reg_vfi(vport);
+ } else {
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ "0133 PLOGI: no memory "
+ "for config_link "
+ "Data: x%x x%x x%x x%x\n",
+ ndlp->nlp_DID, ndlp->nlp_state,
+ ndlp->nlp_flag, ndlp->nlp_rpi);
+ goto out;
+ }
+
+ lpfc_config_link(phba, mbox);
+
+ mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ mbox->vport = vport;
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
+ if (rc == MBX_NOT_FINISHED) {
+ mempool_free(mbox, phba->mbox_mem_pool);
+ goto out;
+ }
+ }
+ }
+
+ lpfc_unreg_rpi(vport, ndlp);
+
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
- "0133 PLOGI: no memory for reg_login "
- "Data: x%x x%x x%x x%x\n",
- ndlp->nlp_DID, ndlp->nlp_state,
- ndlp->nlp_flag, ndlp->nlp_rpi);
+ "0018 PLOGI: no memory for reg_login "
+ "Data: x%x x%x x%x x%x\n",
+ ndlp->nlp_DID, ndlp->nlp_state,
+ ndlp->nlp_flag, ndlp->nlp_rpi);
goto out;
}
- lpfc_unreg_rpi(vport, ndlp);
-
if (lpfc_reg_rpi(phba, vport->vpi, irsp->un.elsreq64.remoteID,
(uint8_t *) sp, mbox, ndlp->nlp_rpi) == 0) {
switch (ndlp->nlp_DID) {
@@ -2299,6 +2336,9 @@ lpfc_cmpl_reglogin_npr_node(struct lpfc_vport *vport,
if (vport->phba->sli_rev < LPFC_SLI_REV4)
ndlp->nlp_rpi = mb->un.varWords[0];
ndlp->nlp_flag |= NLP_RPI_REGISTERED;
+ if (ndlp->nlp_flag & NLP_LOGO_ACC) {
+ lpfc_unreg_rpi(vport, ndlp);
+ }
} else {
if (ndlp->nlp_flag & NLP_NODEV_REMOVE) {
lpfc_drop_node(vport, ndlp);
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 4679ed4444a7..bae36cc3740b 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -3859,7 +3859,7 @@ int lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba,
uint32_t tag;
uint16_t hwq;
- if (shost_use_blk_mq(cmnd->device->host)) {
+ if (cmnd && shost_use_blk_mq(cmnd->device->host)) {
tag = blk_mq_unique_tag(cmnd->request);
hwq = blk_mq_unique_tag_to_hwq(tag);
@@ -3908,9 +3908,9 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
uint32_t logit = LOG_FCP;
/* Sanity check on return of outstanding command */
- if (!(lpfc_cmd->pCmd))
- return;
cmd = lpfc_cmd->pCmd;
+ if (!cmd)
+ return;
shost = cmd->device->host;
lpfc_cmd->result = (pIocbOut->iocb.un.ulpWord[4] & IOERR_PARAM_MASK);
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index f9585cdd8933..92dfd6a5178c 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -14842,10 +14842,12 @@ lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
struct lpfc_dmabuf *h_buf;
struct hbq_dmabuf *seq_dmabuf = NULL;
struct hbq_dmabuf *temp_dmabuf = NULL;
+ uint8_t found = 0;
INIT_LIST_HEAD(&dmabuf->dbuf.list);
dmabuf->time_stamp = jiffies;
new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
+
/* Use the hdr_buf to find the sequence that this frame belongs to */
list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
temp_hdr = (struct fc_frame_header *)h_buf->virt;
@@ -14885,7 +14887,8 @@ lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
return seq_dmabuf;
}
/* find the correct place in the sequence to insert this frame */
- list_for_each_entry_reverse(d_buf, &seq_dmabuf->dbuf.list, list) {
+ d_buf = list_entry(seq_dmabuf->dbuf.list.prev, typeof(*d_buf), list);
+ while (!found) {
temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt;
/*
@@ -14895,9 +14898,17 @@ lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
if (be16_to_cpu(new_hdr->fh_seq_cnt) >
be16_to_cpu(temp_hdr->fh_seq_cnt)) {
list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list);
- return seq_dmabuf;
+ found = 1;
+ break;
}
+
+ if (&d_buf->list == &seq_dmabuf->dbuf.list)
+ break;
+ d_buf = list_entry(d_buf->list.prev, typeof(*d_buf), list);
}
+
+ if (found)
+ return seq_dmabuf;
return NULL;
}
@@ -16173,7 +16184,7 @@ fail_fcf_read:
}
/**
- * lpfc_check_next_fcf_pri
+ * lpfc_check_next_fcf_pri_level
* phba pointer to the lpfc_hba struct for this port.
* This routine is called from the lpfc_sli4_fcf_rr_next_index_get
* routine when the rr_bmask is empty. The FCF indecies are put into the
@@ -16329,8 +16340,12 @@ next_priority:
if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX &&
phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag &
- LPFC_FCF_FLOGI_FAILED)
+ LPFC_FCF_FLOGI_FAILED) {
+ if (list_is_singular(&phba->fcf.fcf_pri_list))
+ return LPFC_FCOE_FCF_NEXT_NONE;
+
goto next_priority;
+ }
lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
"2845 Get next roundrobin failover FCF (x%x)\n",
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index c0f7c8ce54aa..ef4ff03242ea 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -1083,6 +1083,8 @@ struct megasas_ctrl_info {
#define VD_EXT_DEBUG 0
+#define SCAN_PD_CHANNEL 0x1
+#define SCAN_VD_CHANNEL 0x2
enum MR_SCSI_CMD_TYPE {
READ_WRITE_LDIO = 0,
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 00ce3e269a43..3f8d357b1bac 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -735,6 +735,7 @@ megasas_fire_cmd_skinny(struct megasas_instance *instance,
&(regs)->inbound_high_queue_port);
writel((lower_32_bits(frame_phys_addr) | (frame_count<<1))|1,
&(regs)->inbound_low_queue_port);
+ mmiowb();
spin_unlock_irqrestore(&instance->hba_lock, flags);
}
@@ -4669,7 +4670,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
/* Find first memory bar */
bar_list = pci_select_bars(instance->pdev, IORESOURCE_MEM);
instance->bar = find_first_bit(&bar_list, sizeof(unsigned long));
- if (pci_request_selected_regions(instance->pdev, instance->bar,
+ if (pci_request_selected_regions(instance->pdev, 1<<instance->bar,
"megasas: LSI")) {
dev_printk(KERN_DEBUG, &instance->pdev->dev, "IO memory region busy!\n");
return -EBUSY;
@@ -4960,7 +4961,7 @@ fail_ready_state:
iounmap(instance->reg_set);
fail_ioremap:
- pci_release_selected_regions(instance->pdev, instance->bar);
+ pci_release_selected_regions(instance->pdev, 1<<instance->bar);
return -EINVAL;
}
@@ -4981,7 +4982,7 @@ static void megasas_release_mfi(struct megasas_instance *instance)
iounmap(instance->reg_set);
- pci_release_selected_regions(instance->pdev, instance->bar);
+ pci_release_selected_regions(instance->pdev, 1<<instance->bar);
}
/**
@@ -5476,7 +5477,6 @@ static int megasas_probe_one(struct pci_dev *pdev,
spin_lock_init(&instance->hba_lock);
spin_lock_init(&instance->completion_lock);
- mutex_init(&instance->aen_mutex);
mutex_init(&instance->reset_mutex);
/*
@@ -6443,10 +6443,10 @@ static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg)
}
spin_unlock_irqrestore(&instance->hba_lock, flags);
- mutex_lock(&instance->aen_mutex);
+ mutex_lock(&instance->reset_mutex);
error = megasas_register_aen(instance, aen.seq_num,
aen.class_locale_word);
- mutex_unlock(&instance->aen_mutex);
+ mutex_unlock(&instance->reset_mutex);
return error;
}
@@ -6477,9 +6477,9 @@ static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg)
int i;
int error = 0;
compat_uptr_t ptr;
- unsigned long local_raw_ptr;
u32 local_sense_off;
u32 local_sense_len;
+ u32 user_sense_off;
if (clear_user(ioc, sizeof(*ioc)))
return -EFAULT;
@@ -6497,17 +6497,16 @@ static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg)
* sense_len is not null, so prepare the 64bit value under
* the same condition.
*/
- if (get_user(local_raw_ptr, ioc->frame.raw) ||
- get_user(local_sense_off, &ioc->sense_off) ||
- get_user(local_sense_len, &ioc->sense_len))
+ if (get_user(local_sense_off, &ioc->sense_off) ||
+ get_user(local_sense_len, &ioc->sense_len) ||
+ get_user(user_sense_off, &cioc->sense_off))
return -EFAULT;
-
if (local_sense_len) {
void __user **sense_ioc_ptr =
- (void __user **)((u8*)local_raw_ptr + local_sense_off);
+ (void __user **)((u8 *)((unsigned long)&ioc->frame.raw) + local_sense_off);
compat_uptr_t *sense_cioc_ptr =
- (compat_uptr_t *)(cioc->frame.raw + cioc->sense_off);
+ (compat_uptr_t *)(((unsigned long)&cioc->frame.raw) + user_sense_off);
if (get_user(ptr, sense_cioc_ptr) ||
put_user(compat_ptr(ptr), sense_ioc_ptr))
return -EFAULT;
@@ -6648,6 +6647,7 @@ megasas_aen_polling(struct work_struct *work)
int i, j, doscan = 0;
u32 seq_num, wait_time = MEGASAS_RESET_WAIT_TIME;
int error;
+ u8 dcmd_ret = 0;
if (!instance) {
printk(KERN_ERR "invalid instance!\n");
@@ -6660,16 +6660,7 @@ megasas_aen_polling(struct work_struct *work)
wait_time = MEGASAS_ROUTINE_WAIT_TIME_VF;
/* Don't run the event workqueue thread if OCR is running */
- for (i = 0; i < wait_time; i++) {
- if (instance->adprecovery == MEGASAS_HBA_OPERATIONAL)
- break;
- if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
- dev_notice(&instance->pdev->dev, "%s waiting for "
- "controller reset to finish for scsi%d\n",
- __func__, instance->host->host_no);
- }
- msleep(1000);
- }
+ mutex_lock(&instance->reset_mutex);
instance->ev = NULL;
host = instance->host;
@@ -6677,212 +6668,127 @@ megasas_aen_polling(struct work_struct *work)
megasas_decode_evt(instance);
switch (le32_to_cpu(instance->evt_detail->code)) {
- case MR_EVT_PD_INSERTED:
- if (megasas_get_pd_list(instance) == 0) {
- for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
- for (j = 0;
- j < MEGASAS_MAX_DEV_PER_CHANNEL;
- j++) {
-
- pd_index =
- (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
-
- sdev1 = scsi_device_lookup(host, i, j, 0);
-
- if (instance->pd_list[pd_index].driveState
- == MR_PD_STATE_SYSTEM) {
- if (!sdev1)
- scsi_add_device(host, i, j, 0);
-
- if (sdev1)
- scsi_device_put(sdev1);
- }
- }
- }
- }
- doscan = 0;
- break;
+ case MR_EVT_PD_INSERTED:
case MR_EVT_PD_REMOVED:
- if (megasas_get_pd_list(instance) == 0) {
- for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
- for (j = 0;
- j < MEGASAS_MAX_DEV_PER_CHANNEL;
- j++) {
-
- pd_index =
- (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
-
- sdev1 = scsi_device_lookup(host, i, j, 0);
-
- if (instance->pd_list[pd_index].driveState
- == MR_PD_STATE_SYSTEM) {
- if (sdev1)
- scsi_device_put(sdev1);
- } else {
- if (sdev1) {
- scsi_remove_device(sdev1);
- scsi_device_put(sdev1);
- }
- }
- }
- }
- }
- doscan = 0;
+ dcmd_ret = megasas_get_pd_list(instance);
+ if (dcmd_ret == 0)
+ doscan = SCAN_PD_CHANNEL;
break;
case MR_EVT_LD_OFFLINE:
case MR_EVT_CFG_CLEARED:
case MR_EVT_LD_DELETED:
- if (!instance->requestorId ||
- megasas_get_ld_vf_affiliation(instance, 0)) {
- if (megasas_ld_list_query(instance,
- MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
- megasas_get_ld_list(instance);
- for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
- for (j = 0;
- j < MEGASAS_MAX_DEV_PER_CHANNEL;
- j++) {
-
- ld_index =
- (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
-
- sdev1 = scsi_device_lookup(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
-
- if (instance->ld_ids[ld_index]
- != 0xff) {
- if (sdev1)
- scsi_device_put(sdev1);
- } else {
- if (sdev1) {
- scsi_remove_device(sdev1);
- scsi_device_put(sdev1);
- }
- }
- }
- }
- doscan = 0;
- }
- break;
case MR_EVT_LD_CREATED:
if (!instance->requestorId ||
- megasas_get_ld_vf_affiliation(instance, 0)) {
- if (megasas_ld_list_query(instance,
- MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
- megasas_get_ld_list(instance);
- for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
- for (j = 0;
- j < MEGASAS_MAX_DEV_PER_CHANNEL;
- j++) {
- ld_index =
- (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
-
- sdev1 = scsi_device_lookup(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
-
- if (instance->ld_ids[ld_index]
- != 0xff) {
- if (!sdev1)
- scsi_add_device(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
- }
- if (sdev1)
- scsi_device_put(sdev1);
- }
- }
- doscan = 0;
- }
+ (instance->requestorId && megasas_get_ld_vf_affiliation(instance, 0)))
+ dcmd_ret = megasas_ld_list_query(instance, MR_LD_QUERY_TYPE_EXPOSED_TO_HOST);
+
+ if (dcmd_ret == 0)
+ doscan = SCAN_VD_CHANNEL;
+
break;
+
case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED:
case MR_EVT_FOREIGN_CFG_IMPORTED:
case MR_EVT_LD_STATE_CHANGE:
- doscan = 1;
+ dcmd_ret = megasas_get_pd_list(instance);
+
+ if (dcmd_ret != 0)
+ break;
+
+ if (!instance->requestorId ||
+ (instance->requestorId && megasas_get_ld_vf_affiliation(instance, 0)))
+ dcmd_ret = megasas_ld_list_query(instance, MR_LD_QUERY_TYPE_EXPOSED_TO_HOST);
+
+ if (dcmd_ret != 0)
+ break;
+
+ doscan = SCAN_VD_CHANNEL | SCAN_PD_CHANNEL;
+ dev_info(&instance->pdev->dev, "scanning for scsi%d...\n",
+ instance->host->host_no);
break;
+
case MR_EVT_CTRL_PROP_CHANGED:
- megasas_get_ctrl_info(instance);
- break;
+ dcmd_ret = megasas_get_ctrl_info(instance);
+ break;
default:
doscan = 0;
break;
}
} else {
dev_err(&instance->pdev->dev, "invalid evt_detail!\n");
+ mutex_unlock(&instance->reset_mutex);
kfree(ev);
return;
}
- if (doscan) {
- dev_info(&instance->pdev->dev, "scanning for scsi%d...\n",
- instance->host->host_no);
- if (megasas_get_pd_list(instance) == 0) {
- for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
- for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
- pd_index = i*MEGASAS_MAX_DEV_PER_CHANNEL + j;
- sdev1 = scsi_device_lookup(host, i, j, 0);
- if (instance->pd_list[pd_index].driveState ==
- MR_PD_STATE_SYSTEM) {
- if (!sdev1) {
- scsi_add_device(host, i, j, 0);
- }
- if (sdev1)
- scsi_device_put(sdev1);
- } else {
- if (sdev1) {
- scsi_remove_device(sdev1);
- scsi_device_put(sdev1);
- }
+ mutex_unlock(&instance->reset_mutex);
+
+ if (doscan & SCAN_PD_CHANNEL) {
+ for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
+ for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
+ pd_index = i*MEGASAS_MAX_DEV_PER_CHANNEL + j;
+ sdev1 = scsi_device_lookup(host, i, j, 0);
+ if (instance->pd_list[pd_index].driveState ==
+ MR_PD_STATE_SYSTEM) {
+ if (!sdev1)
+ scsi_add_device(host, i, j, 0);
+ else
+ scsi_device_put(sdev1);
+ } else {
+ if (sdev1) {
+ scsi_remove_device(sdev1);
+ scsi_device_put(sdev1);
}
}
}
}
+ }
- if (!instance->requestorId ||
- megasas_get_ld_vf_affiliation(instance, 0)) {
- if (megasas_ld_list_query(instance,
- MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
- megasas_get_ld_list(instance);
- for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
- for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL;
- j++) {
- ld_index =
- (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
-
- sdev1 = scsi_device_lookup(host,
- MEGASAS_MAX_PD_CHANNELS + i, j, 0);
- if (instance->ld_ids[ld_index]
- != 0xff) {
- if (!sdev1)
- scsi_add_device(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
- else
- scsi_device_put(sdev1);
- } else {
- if (sdev1) {
- scsi_remove_device(sdev1);
- scsi_device_put(sdev1);
- }
+ if (doscan & SCAN_VD_CHANNEL) {
+ for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
+ for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
+ ld_index = (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
+ sdev1 = scsi_device_lookup(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
+ if (instance->ld_ids[ld_index] != 0xff) {
+ if (!sdev1)
+ scsi_add_device(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
+ else
+ scsi_device_put(sdev1);
+ } else {
+ if (sdev1) {
+ scsi_remove_device(sdev1);
+ scsi_device_put(sdev1);
}
}
}
}
}
- if (instance->aen_cmd != NULL) {
- kfree(ev);
- return ;
- }
-
- seq_num = le32_to_cpu(instance->evt_detail->seq_num) + 1;
+ if (dcmd_ret == 0)
+ seq_num = le32_to_cpu(instance->evt_detail->seq_num) + 1;
+ else
+ seq_num = instance->last_seq_num;
/* Register AEN with FW for latest sequence number plus 1 */
class_locale.members.reserved = 0;
class_locale.members.locale = MR_EVT_LOCALE_ALL;
class_locale.members.class = MR_EVT_CLASS_DEBUG;
- mutex_lock(&instance->aen_mutex);
+
+ if (instance->aen_cmd != NULL) {
+ kfree(ev);
+ return;
+ }
+
+ mutex_lock(&instance->reset_mutex);
error = megasas_register_aen(instance, seq_num,
class_locale.word);
- mutex_unlock(&instance->aen_mutex);
-
if (error)
- dev_err(&instance->pdev->dev, "register aen failed error %x\n", error);
+ dev_err(&instance->pdev->dev,
+ "register aen failed error %x\n", error);
+ mutex_unlock(&instance->reset_mutex);
kfree(ev);
}
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 8d630a552b07..021b994fdae8 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -201,6 +201,7 @@ megasas_fire_cmd_fusion(struct megasas_instance *instance,
&instance->reg_set->inbound_low_queue_port);
writel(le32_to_cpu(req_desc->u.high),
&instance->reg_set->inbound_high_queue_port);
+ mmiowb();
spin_unlock_irqrestore(&instance->hba_lock, flags);
#endif
}
@@ -2437,7 +2438,7 @@ megasas_release_fusion(struct megasas_instance *instance)
iounmap(instance->reg_set);
- pci_release_selected_regions(instance->pdev, instance->bar);
+ pci_release_selected_regions(instance->pdev, 1<<instance->bar);
}
/**
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 11393ebf1a68..5b2c37f1e908 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -2020,8 +2020,10 @@ mpt3sas_base_unmap_resources(struct MPT3SAS_ADAPTER *ioc)
_base_free_irq(ioc);
_base_disable_msix(ioc);
- if (ioc->msix96_vector)
+ if (ioc->msix96_vector) {
kfree(ioc->replyPostRegisterIndex);
+ ioc->replyPostRegisterIndex = NULL;
+ }
if (ioc->chip_phys) {
iounmap(ioc->chip);
@@ -2155,6 +2157,17 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
} else
ioc->msix96_vector = 0;
+ if (ioc->is_warpdrive) {
+ ioc->reply_post_host_index[0] = (resource_size_t __iomem *)
+ &ioc->chip->ReplyPostHostIndex;
+
+ for (i = 1; i < ioc->cpu_msix_table_sz; i++)
+ ioc->reply_post_host_index[i] =
+ (resource_size_t __iomem *)
+ ((u8 __iomem *)&ioc->chip->Doorbell + (0x4000 + ((i - 1)
+ * 4)));
+ }
+
list_for_each_entry(reply_q, &ioc->reply_queue_list, list)
pr_info(MPT3SAS_FMT "%s: IRQ %d\n",
reply_q->name, ((ioc->msix_enable) ? "PCI-MSI-X enabled" :
@@ -2229,6 +2242,12 @@ mpt3sas_base_get_reply_virt_addr(struct MPT3SAS_ADAPTER *ioc, u32 phys_addr)
return ioc->reply + (phys_addr - (u32)ioc->reply_dma);
}
+static inline u8
+_base_get_msix_index(struct MPT3SAS_ADAPTER *ioc)
+{
+ return ioc->cpu_msix_table[raw_smp_processor_id()];
+}
+
/**
* mpt3sas_base_get_smid - obtain a free smid from internal queue
* @ioc: per adapter object
@@ -2289,6 +2308,7 @@ mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx,
request->scmd = scmd;
request->cb_idx = cb_idx;
smid = request->smid;
+ request->msix_io = _base_get_msix_index(ioc);
list_del(&request->tracker_list);
spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
return smid;
@@ -2411,12 +2431,6 @@ _base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
}
#endif
-static inline u8
-_base_get_msix_index(struct MPT3SAS_ADAPTER *ioc)
-{
- return ioc->cpu_msix_table[raw_smp_processor_id()];
-}
-
/**
* mpt3sas_base_put_smid_scsi_io - send SCSI_IO request to firmware
* @ioc: per adapter object
@@ -2470,18 +2484,19 @@ mpt3sas_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid,
* mpt3sas_base_put_smid_hi_priority - send Task Managment request to firmware
* @ioc: per adapter object
* @smid: system request message index
- *
+ * @msix_task: msix_task will be same as msix of IO incase of task abort else 0.
* Return nothing.
*/
void
-mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid)
+mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid,
+ u16 msix_task)
{
Mpi2RequestDescriptorUnion_t descriptor;
u64 *request = (u64 *)&descriptor;
descriptor.HighPriority.RequestFlags =
MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
- descriptor.HighPriority.MSIxIndex = 0;
+ descriptor.HighPriority.MSIxIndex = msix_task;
descriptor.HighPriority.SMID = cpu_to_le16(smid);
descriptor.HighPriority.LMID = 0;
descriptor.HighPriority.Reserved1 = 0;
@@ -5201,17 +5216,6 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
if (r)
goto out_free_resources;
- if (ioc->is_warpdrive) {
- ioc->reply_post_host_index[0] = (resource_size_t __iomem *)
- &ioc->chip->ReplyPostHostIndex;
-
- for (i = 1; i < ioc->cpu_msix_table_sz; i++)
- ioc->reply_post_host_index[i] =
- (resource_size_t __iomem *)
- ((u8 __iomem *)&ioc->chip->Doorbell + (0x4000 + ((i - 1)
- * 4)));
- }
-
pci_set_drvdata(ioc->pdev, ioc->shost);
r = _base_get_ioc_facts(ioc, CAN_SLEEP);
if (r)
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index 5ad271efbd45..92648a5ea2d2 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -643,6 +643,7 @@ struct chain_tracker {
* @cb_idx: callback index
* @direct_io: To indicate whether I/O is direct (WARPDRIVE)
* @tracker_list: list of free request (ioc->free_list)
+ * @msix_io: IO's msix
*/
struct scsiio_tracker {
u16 smid;
@@ -651,6 +652,7 @@ struct scsiio_tracker {
u8 direct_io;
struct list_head chain_list;
struct list_head tracker_list;
+ u16 msix_io;
};
/**
@@ -1213,7 +1215,8 @@ void mpt3sas_base_put_smid_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid,
u16 handle);
void mpt3sas_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid,
u16 handle);
-void mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid);
+void mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc,
+ u16 smid, u16 msix_task);
void mpt3sas_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid);
void mpt3sas_base_initialize_callback_handler(void);
u8 mpt3sas_base_register_callback_handler(MPT_CALLBACK cb_func);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
index d8366b056b70..4ccde5a05b70 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -817,7 +817,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
tm_request->DevHandle));
ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz,
data_in_dma, data_in_sz);
- mpt3sas_base_put_smid_hi_priority(ioc, smid);
+ mpt3sas_base_put_smid_hi_priority(ioc, smid, 0);
break;
}
case MPI2_FUNCTION_SMP_PASSTHROUGH:
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 9ab77b06434d..6180f7970bbf 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -2193,6 +2193,7 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel,
unsigned long timeleft;
struct scsiio_tracker *scsi_lookup = NULL;
int rc;
+ u16 msix_task = 0;
if (m_type == TM_MUTEX_ON)
mutex_lock(&ioc->tm_cmds.mutex);
@@ -2256,7 +2257,12 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel,
int_to_scsilun(lun, (struct scsi_lun *)mpi_request->LUN);
mpt3sas_scsih_set_tm_flag(ioc, handle);
init_completion(&ioc->tm_cmds.done);
- mpt3sas_base_put_smid_hi_priority(ioc, smid);
+ if ((type == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK) &&
+ (scsi_lookup->msix_io < ioc->reply_queue_count))
+ msix_task = scsi_lookup->msix_io;
+ else
+ msix_task = 0;
+ mpt3sas_base_put_smid_hi_priority(ioc, smid, msix_task);
timeleft = wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ);
if (!(ioc->tm_cmds.status & MPT3_CMD_COMPLETE)) {
pr_err(MPT3SAS_FMT "%s: timeout\n",
@@ -3151,7 +3157,7 @@ _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
mpi_request->DevHandle = cpu_to_le16(handle);
mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
- mpt3sas_base_put_smid_hi_priority(ioc, smid);
+ mpt3sas_base_put_smid_hi_priority(ioc, smid, 0);
mpt3sas_trigger_master(ioc, MASTER_TRIGGER_DEVICE_REMOVAL);
out:
@@ -3332,7 +3338,7 @@ _scsih_tm_tr_volume_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
mpi_request->DevHandle = cpu_to_le16(handle);
mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
- mpt3sas_base_put_smid_hi_priority(ioc, smid);
+ mpt3sas_base_put_smid_hi_priority(ioc, smid, 0);
}
/**
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 75514a15bea0..f57d96984ae4 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -1578,7 +1578,7 @@ void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd)
qlt_send_notify_ack(vha, &mcmd->orig_iocb.imm_ntfy,
0, 0, 0, 0, 0, 0);
else {
- if (mcmd->se_cmd.se_tmr_req->function == TMR_ABORT_TASK)
+ if (mcmd->orig_iocb.atio.u.raw.entry_type == ABTS_RECV_24XX)
qlt_24xx_send_abts_resp(vha, &mcmd->orig_iocb.abts,
mcmd->fc_tm_rsp, false);
else
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index 93cbefa75b26..11cdb172cfaf 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -426,7 +426,7 @@ static struct scsi_dev_info_list *scsi_dev_info_list_find(const char *vendor,
* here, and we don't know what device it is
* trying to work with, leave it as-is.
*/
- vmax = 8; /* max length of vendor */
+ vmax = sizeof(devinfo->vendor);
vskip = vendor;
while (vmax > 0 && *vskip == ' ') {
vmax--;
@@ -436,7 +436,7 @@ static struct scsi_dev_info_list *scsi_dev_info_list_find(const char *vendor,
while (vmax > 0 && vskip[vmax - 1] == ' ')
--vmax;
- mmax = 16; /* max length of model */
+ mmax = sizeof(devinfo->model);
mskip = model;
while (mmax > 0 && *mskip == ' ') {
mmax--;
@@ -452,10 +452,12 @@ static struct scsi_dev_info_list *scsi_dev_info_list_find(const char *vendor,
* Behave like the older version of get_device_flags.
*/
if (memcmp(devinfo->vendor, vskip, vmax) ||
- devinfo->vendor[vmax])
+ (vmax < sizeof(devinfo->vendor) &&
+ devinfo->vendor[vmax]))
continue;
if (memcmp(devinfo->model, mskip, mmax) ||
- devinfo->model[mmax])
+ (mmax < sizeof(devinfo->model) &&
+ devinfo->model[mmax]))
continue;
return devinfo;
} else {
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index d47624000edf..98cabf409bf0 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -1059,11 +1059,12 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
}
error = scsi_dh_add_device(sdev);
- if (error) {
+ if (error)
+ /*
+ * device_handler is optional, so any error can be ignored
+ */
sdev_printk(KERN_INFO, sdev,
"failed to add device handler: %d\n", error);
- return error;
- }
device_enable_async_suspend(&sdev->sdev_dev);
error = device_add(&sdev->sdev_dev);
diff --git a/drivers/scsi/ufs/ufs-qcom-ice.c b/drivers/scsi/ufs/ufs-qcom-ice.c
index 1ba4f2bafba3..070d27df6b49 100644
--- a/drivers/scsi/ufs/ufs-qcom-ice.c
+++ b/drivers/scsi/ufs/ufs-qcom-ice.c
@@ -339,7 +339,8 @@ int ufs_qcom_ice_cfg_start(struct ufs_qcom_host *qcom_host,
req = cmd->request;
if (req->bio)
- lba = req->bio->bi_iter.bi_sector;
+ lba = (req->bio->bi_iter.bi_sector) >>
+ UFS_QCOM_ICE_TR_DATA_UNIT_4_KB;
slot = req->tag;
if (slot < 0 || slot > qcom_host->hba->nutrs) {
@@ -390,6 +391,7 @@ int ufs_qcom_ice_cfg_start(struct ufs_qcom_host *qcom_host,
bypass = ice_set.decr_bypass ? UFS_QCOM_ICE_ENABLE_BYPASS :
UFS_QCOM_ICE_DISABLE_BYPASS;
+
/* Configure ICE index */
ctrl_info_val =
(ice_set.crypto_data.key_index &
@@ -398,8 +400,7 @@ int ufs_qcom_ice_cfg_start(struct ufs_qcom_host *qcom_host,
/* Configure data unit size of transfer request */
ctrl_info_val |=
- (UFS_QCOM_ICE_TR_DATA_UNIT_4_KB &
- MASK_UFS_QCOM_ICE_CTRL_INFO_CDU)
+ UFS_QCOM_ICE_TR_DATA_UNIT_4_KB
<< OFFSET_UFS_QCOM_ICE_CTRL_INFO_CDU;
/* Configure ICE bypass mode */
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index 1e200370deea..2b0731b8358c 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -99,13 +99,10 @@ static int ufs_qcom_host_clk_get(struct device *dev,
int err = 0;
clk = devm_clk_get(dev, name);
- if (IS_ERR(clk)) {
+ if (IS_ERR(clk))
err = PTR_ERR(clk);
- dev_err(dev, "%s: failed to get %s err %d",
- __func__, name, err);
- } else {
+ else
*clk_out = clk;
- }
return err;
}
@@ -184,20 +181,29 @@ static int ufs_qcom_init_lane_clks(struct ufs_qcom_host *host)
err = ufs_qcom_host_clk_get(dev,
"rx_lane0_sync_clk", &host->rx_l0_sync_clk);
- if (err)
+ if (err) {
+ dev_err(dev, "%s: failed to get rx_lane0_sync_clk, err %d",
+ __func__, err);
goto out;
+ }
err = ufs_qcom_host_clk_get(dev,
"tx_lane0_sync_clk", &host->tx_l0_sync_clk);
- if (err)
+ if (err) {
+ dev_err(dev, "%s: failed to get tx_lane0_sync_clk, err %d",
+ __func__, err);
goto out;
+ }
/* In case of single lane per direction, don't read lane1 clocks */
if (host->hba->lanes_per_direction > 1) {
err = ufs_qcom_host_clk_get(dev, "rx_lane1_sync_clk",
&host->rx_l1_sync_clk);
- if (err)
+ if (err) {
+ dev_err(dev, "%s: failed to get rx_lane1_sync_clk, err %d",
+ __func__, err);
goto out;
+ }
/* The tx lane1 clk could be muxed, hence keep this optional */
ufs_qcom_host_clk_get(dev, "tx_lane1_sync_clk",
@@ -822,7 +828,8 @@ static int ufs_qcom_crypto_req_setup(struct ufs_hba *hba,
/* Use request LBA as the DUN value */
if (req->bio)
- *dun = req->bio->bi_iter.bi_sector;
+ *dun = (req->bio->bi_iter.bi_sector) >>
+ UFS_QCOM_ICE_TR_DATA_UNIT_4_KB;
ret = ufs_qcom_ice_req_setup(host, lrbp->cmd, cc_index, enable);
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 2c86606ecd2e..6e88e4b11273 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -42,6 +42,8 @@
#include <linux/devfreq.h>
#include <linux/nls.h>
#include <linux/of.h>
+#include <linux/blkdev.h>
+
#include "ufshcd.h"
#include "ufshci.h"
#include "ufs_quirks.h"
@@ -350,7 +352,7 @@ static inline bool ufshcd_is_valid_pm_lvl(int lvl)
}
static irqreturn_t ufshcd_intr(int irq, void *__hba);
-static void ufshcd_tmc_handler(struct ufs_hba *hba);
+static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba);
static void ufshcd_async_scan(void *data, async_cookie_t cookie);
static int ufshcd_reset_and_restore(struct ufs_hba *hba);
static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd);
@@ -2466,9 +2468,6 @@ static int ufshcd_prepare_crypto_utrd(struct ufs_hba *hba,
goto out;
req_desc->header.dword_0 |= cc_index | UTRD_CRYPTO_ENABLE;
- if (lrbp->cmd->request && lrbp->cmd->request->bio)
- dun = lrbp->cmd->request->bio->bi_iter.bi_sector;
-
req_desc->header.dword_1 = (u32)(dun & 0xFFFFFFFF);
req_desc->header.dword_3 = (u32)((dun >> 32) & 0xFFFFFFFF);
out:
@@ -2846,6 +2845,18 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
/* Vote PM QoS for the request */
ufshcd_vops_pm_qos_req_start(hba, cmd->request);
+ /* IO svc time latency histogram */
+ if (hba != NULL && cmd->request != NULL) {
+ if (hba->latency_hist_enabled &&
+ (cmd->request->cmd_type == REQ_TYPE_FS)) {
+ cmd->request->lat_hist_io_start = ktime_get();
+ cmd->request->lat_hist_enabled = 1;
+ } else
+ cmd->request->lat_hist_enabled = 0;
+ }
+
+ WARN_ON(hba->clk_gating.state != CLKS_ON);
+
lrbp = &hba->lrb[tag];
WARN_ON(lrbp->cmd);
@@ -5228,19 +5239,29 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
* ufshcd_uic_cmd_compl - handle completion of uic command
* @hba: per adapter instance
* @intr_status: interrupt status generated by the controller
+ *
+ * Returns
+ * IRQ_HANDLED - If interrupt is valid
+ * IRQ_NONE - If invalid interrupt
*/
-static void ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
+static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
{
+ irqreturn_t retval = IRQ_NONE;
+
if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) {
hba->active_uic_cmd->argument2 |=
ufshcd_get_uic_cmd_result(hba);
hba->active_uic_cmd->argument3 =
ufshcd_get_dme_attr_val(hba);
complete(&hba->active_uic_cmd->done);
+ retval = IRQ_HANDLED;
}
- if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done)
+ if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done) {
complete(hba->uic_async_done);
+ retval = IRQ_HANDLED;
+ }
+ return retval;
}
/**
@@ -5313,6 +5334,7 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
struct scsi_cmnd *cmd;
int result;
int index;
+ struct request *req;
for_each_set_bit(index, &completed_reqs, hba->nutrs) {
lrbp = &hba->lrb[index];
@@ -5342,6 +5364,23 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
lrbp, cmd->request);
}
+ clear_bit_unlock(index, &hba->lrb_in_use);
+ req = cmd->request;
+ if (req) {
+ /* Update IO svc time latency histogram */
+ if (req->lat_hist_enabled) {
+ ktime_t completion;
+ u_int64_t delta_us;
+
+ completion = ktime_get();
+ delta_us = ktime_us_delta(completion,
+ req->lat_hist_io_start);
+ /* rq_data_dir() => true if WRITE */
+ blk_update_latency_hist(&hba->io_lat_s,
+ (rq_data_dir(req) == READ),
+ delta_us);
+ }
+ }
/* Do not touch lrbp after scsi done */
cmd->scsi_done(cmd);
} else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE) {
@@ -5367,8 +5406,12 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
/**
* ufshcd_transfer_req_compl - handle SCSI and query command completion
* @hba: per adapter instance
+ *
+ * Returns
+ * IRQ_HANDLED - If interrupt is valid
+ * IRQ_NONE - If invalid interrupt
*/
-static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
+static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba)
{
unsigned long completed_reqs;
u32 tr_doorbell;
@@ -5386,7 +5429,12 @@ static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
- __ufshcd_transfer_req_compl(hba, completed_reqs);
+ if (completed_reqs) {
+ __ufshcd_transfer_req_compl(hba, completed_reqs);
+ return IRQ_HANDLED;
+ } else {
+ return IRQ_NONE;
+ }
}
/**
@@ -5964,15 +6012,20 @@ static void ufshcd_update_uic_reg_hist(struct ufs_uic_err_reg_hist *reg_hist,
/**
* ufshcd_update_uic_error - check and set fatal UIC error flags.
* @hba: per-adapter instance
+ *
+ * Returns
+ * IRQ_HANDLED - If interrupt is valid
+ * IRQ_NONE - If invalid interrupt
*/
-static void ufshcd_update_uic_error(struct ufs_hba *hba)
+static irqreturn_t ufshcd_update_uic_error(struct ufs_hba *hba)
{
u32 reg;
+ irqreturn_t retval = IRQ_NONE;
/* PHY layer lane error */
reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
if ((reg & UIC_PHY_ADAPTER_LAYER_ERROR) &&
- (reg & UIC_PHY_ADAPTER_LAYER_ERROR_CODE_MASK)) {
+ (reg & UIC_PHY_ADAPTER_LAYER_ERROR_CODE_MASK)) {
/*
* To know whether this error is fatal or not, DB timeout
* must be checked but this error is handled separately.
@@ -5994,61 +6047,79 @@ static void ufshcd_update_uic_error(struct ufs_hba *hba)
}
}
}
+ retval |= IRQ_HANDLED;
}
/* PA_INIT_ERROR is fatal and needs UIC reset */
reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
- if (reg)
+ if ((reg & UIC_DATA_LINK_LAYER_ERROR) &&
+ (reg & UIC_DATA_LINK_LAYER_ERROR_CODE_MASK)) {
ufshcd_update_uic_reg_hist(&hba->ufs_stats.dl_err, reg);
- if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT) {
- hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
- } else if (hba->dev_quirks &
- UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
- if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED)
- hba->uic_error |=
- UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
- else if (reg & UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT)
- hba->uic_error |= UFSHCD_UIC_DL_TCx_REPLAY_ERROR;
+ if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT) {
+ hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
+ } else if (hba->dev_quirks &
+ UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
+ if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED)
+ hba->uic_error |=
+ UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
+ else if (reg &
+ UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT)
+ hba->uic_error |=
+ UFSHCD_UIC_DL_TCx_REPLAY_ERROR;
+ }
+ retval |= IRQ_HANDLED;
}
/* UIC NL/TL/DME errors needs software retry */
reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER);
- if (reg) {
+ if ((reg & UIC_NETWORK_LAYER_ERROR) &&
+ (reg & UIC_NETWORK_LAYER_ERROR_CODE_MASK)) {
ufshcd_update_uic_reg_hist(&hba->ufs_stats.nl_err, reg);
hba->uic_error |= UFSHCD_UIC_NL_ERROR;
+ retval |= IRQ_HANDLED;
}
reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER);
- if (reg) {
+ if ((reg & UIC_TRANSPORT_LAYER_ERROR) &&
+ (reg & UIC_TRANSPORT_LAYER_ERROR_CODE_MASK)) {
ufshcd_update_uic_reg_hist(&hba->ufs_stats.tl_err, reg);
hba->uic_error |= UFSHCD_UIC_TL_ERROR;
+ retval |= IRQ_HANDLED;
}
reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME);
- if (reg) {
+ if ((reg & UIC_DME_ERROR) &&
+ (reg & UIC_DME_ERROR_CODE_MASK)) {
ufshcd_update_uic_reg_hist(&hba->ufs_stats.dme_err, reg);
hba->uic_error |= UFSHCD_UIC_DME_ERROR;
+ retval |= IRQ_HANDLED;
}
dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n",
__func__, hba->uic_error);
+ return retval;
}
/**
* ufshcd_check_errors - Check for errors that need s/w attention
* @hba: per-adapter instance
+ *
+ * Returns
+ * IRQ_HANDLED - If interrupt is valid
+ * IRQ_NONE - If invalid interrupt
*/
-static void ufshcd_check_errors(struct ufs_hba *hba)
+static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba)
{
bool queue_eh_work = false;
+ irqreturn_t retval = IRQ_NONE;
if (hba->errors & INT_FATAL_ERRORS || hba->ce_error)
queue_eh_work = true;
if (hba->errors & UIC_ERROR) {
hba->uic_error = 0;
- ufshcd_update_uic_error(hba);
+ retval = ufshcd_update_uic_error(hba);
if (hba->uic_error)
queue_eh_work = true;
}
@@ -6073,6 +6144,7 @@ static void ufshcd_check_errors(struct ufs_hba *hba)
hba->ufshcd_state = UFSHCD_STATE_ERROR;
schedule_work(&hba->eh_work);
}
+ retval |= IRQ_HANDLED;
}
/*
* if (!queue_eh_work) -
@@ -6080,28 +6152,44 @@ static void ufshcd_check_errors(struct ufs_hba *hba)
* itself without s/w intervention or errors that will be
* handled by the SCSI core layer.
*/
+ return retval;
}
/**
* ufshcd_tmc_handler - handle task management function completion
* @hba: per adapter instance
+ *
+ * Returns
+ * IRQ_HANDLED - If interrupt is valid
+ * IRQ_NONE - If invalid interrupt
*/
-static void ufshcd_tmc_handler(struct ufs_hba *hba)
+static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba)
{
u32 tm_doorbell;
tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
hba->tm_condition = tm_doorbell ^ hba->outstanding_tasks;
- wake_up(&hba->tm_wq);
+ if (hba->tm_condition) {
+ wake_up(&hba->tm_wq);
+ return IRQ_HANDLED;
+ } else {
+ return IRQ_NONE;
+ }
}
/**
* ufshcd_sl_intr - Interrupt service routine
* @hba: per adapter instance
* @intr_status: contains interrupts generated by the controller
+ *
+ * Returns
+ * IRQ_HANDLED - If interrupt is valid
+ * IRQ_NONE - If invalid interrupt
*/
-static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
+static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
{
+ irqreturn_t retval = IRQ_NONE;
+
ufsdbg_error_inject_dispatcher(hba,
ERR_INJECT_INTR, intr_status, &intr_status);
@@ -6109,16 +6197,18 @@ static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
hba->errors = UFSHCD_ERROR_MASK & intr_status;
if (hba->errors || hba->ce_error)
- ufshcd_check_errors(hba);
+ retval |= ufshcd_check_errors(hba);
if (intr_status & UFSHCD_UIC_MASK)
- ufshcd_uic_cmd_compl(hba, intr_status);
+ retval |= ufshcd_uic_cmd_compl(hba, intr_status);
if (intr_status & UTP_TASK_REQ_COMPL)
- ufshcd_tmc_handler(hba);
+ retval |= ufshcd_tmc_handler(hba);
if (intr_status & UTP_TRANSFER_REQ_COMPL)
- ufshcd_transfer_req_compl(hba);
+ retval |= ufshcd_transfer_req_compl(hba);
+
+ return retval;
}
/**
@@ -6126,27 +6216,44 @@ static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
* @irq: irq number
* @__hba: pointer to adapter instance
*
- * Returns IRQ_HANDLED - If interrupt is valid
- * IRQ_NONE - If invalid interrupt
+ * Returns
+ * IRQ_HANDLED - If interrupt is valid
+ * IRQ_NONE - If invalid interrupt
*/
static irqreturn_t ufshcd_intr(int irq, void *__hba)
{
u32 intr_status, enabled_intr_status;
irqreturn_t retval = IRQ_NONE;
struct ufs_hba *hba = __hba;
+ int retries = hba->nutrs;
spin_lock(hba->host->host_lock);
intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
- enabled_intr_status =
- intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
- if (intr_status)
- ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
+ /*
+ * There could be max of hba->nutrs reqs in flight and in worst case
+ * if the reqs get finished 1 by 1 after the interrupt status is
+ * read, make sure we handle them by checking the interrupt status
+ * again in a loop until we process all of the reqs before returning.
+ */
+ do {
+ enabled_intr_status =
+ intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
+ if (intr_status)
+ ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
+ if (enabled_intr_status)
+ retval |= ufshcd_sl_intr(hba, enabled_intr_status);
+
+ intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
+ } while (intr_status && --retries);
- if (enabled_intr_status) {
- ufshcd_sl_intr(hba, enabled_intr_status);
- retval = IRQ_HANDLED;
+ if (retval == IRQ_NONE) {
+ dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x\n",
+ __func__, intr_status);
+ ufshcd_hex_dump("host regs: ", hba->mmio_base,
+ UFSHCI_REG_SPACE_SIZE);
}
+
spin_unlock(hba->host->host_lock);
return retval;
}
@@ -8835,6 +8942,54 @@ out:
}
EXPORT_SYMBOL(ufshcd_shutdown);
+/*
+ * Values permitted 0, 1, 2.
+ * 0 -> Disable IO latency histograms (default)
+ * 1 -> Enable IO latency histograms
+ * 2 -> Zero out IO latency histograms
+ */
+static ssize_t
+latency_hist_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ long value;
+
+ if (kstrtol(buf, 0, &value))
+ return -EINVAL;
+ if (value == BLK_IO_LAT_HIST_ZERO)
+ blk_zero_latency_hist(&hba->io_lat_s);
+ else if (value == BLK_IO_LAT_HIST_ENABLE ||
+ value == BLK_IO_LAT_HIST_DISABLE)
+ hba->latency_hist_enabled = value;
+ return count;
+}
+
+ssize_t
+latency_hist_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ return blk_latency_hist_show(&hba->io_lat_s, buf);
+}
+
+static DEVICE_ATTR(latency_hist, S_IRUGO | S_IWUSR,
+ latency_hist_show, latency_hist_store);
+
+static void
+ufshcd_init_latency_hist(struct ufs_hba *hba)
+{
+ if (device_create_file(hba->dev, &dev_attr_latency_hist))
+ dev_err(hba->dev, "Failed to create latency_hist sysfs entry\n");
+}
+
+static void
+ufshcd_exit_latency_hist(struct ufs_hba *hba)
+{
+ device_create_file(hba->dev, &dev_attr_latency_hist);
+}
+
/**
* ufshcd_remove - de-allocate SCSI host and host memory space
* data structure memory
@@ -8851,6 +9006,7 @@ void ufshcd_remove(struct ufs_hba *hba)
ufshcd_exit_hibern8_on_idle(hba);
if (ufshcd_is_clkscaling_supported(hba)) {
device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
+ ufshcd_exit_latency_hist(hba);
devfreq_remove_device(hba->devfreq);
}
ufshcd_hba_exit(hba);
@@ -9514,6 +9670,8 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
/* Hold auto suspend until async scan completes */
pm_runtime_get_sync(dev);
+ ufshcd_init_latency_hist(hba);
+
/*
* We are assuming that device wasn't put in sleep/power-down
* state exclusively during the boot stage before kernel.
@@ -9534,6 +9692,7 @@ out_remove_scsi_host:
scsi_remove_host(hba->host);
exit_gating:
ufshcd_exit_clk_gating(hba);
+ ufshcd_exit_latency_hist(hba);
out_disable:
hba->is_irq_enabled = false;
ufshcd_hba_exit(hba);
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 81eab2cbb6cb..b20afd85beab 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -907,6 +907,9 @@ struct ufs_hba {
bool full_init_linereset;
struct pinctrl *pctrl;
+
+ int latency_hist_enabled;
+ struct io_latency_state io_lat_s;
};
static inline void ufshcd_mark_shutdown_ongoing(struct ufs_hba *hba)
diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c
index 7067c5733773..49510bfd6b24 100644
--- a/drivers/soc/qcom/icnss.c
+++ b/drivers/soc/qcom/icnss.c
@@ -65,126 +65,6 @@ module_param(qmi_timeout, ulong, 0600);
#define NUM_REG_LOG_PAGES 4
#define ICNSS_MAGIC 0x5abc5abc
-/*
- * Registers: MPM2_PSHOLD
- * Base Address: 0x10AC000
- */
-#define MPM_WCSSAON_CONFIG_OFFSET 0x18
-#define MPM_WCSSAON_CONFIG_ARES_N BIT(0)
-#define MPM_WCSSAON_CONFIG_WLAN_DISABLE BIT(1)
-#define MPM_WCSSAON_CONFIG_MSM_CLAMP_EN_OVRD BIT(6)
-#define MPM_WCSSAON_CONFIG_MSM_CLAMP_EN_OVRD_VAL BIT(7)
-#define MPM_WCSSAON_CONFIG_FORCE_ACTIVE BIT(14)
-#define MPM_WCSSAON_CONFIG_FORCE_XO_ENABLE BIT(19)
-#define MPM_WCSSAON_CONFIG_DISCONNECT_CLR BIT(21)
-#define MPM_WCSSAON_CONFIG_M2W_CLAMP_EN BIT(22)
-
-/*
- * Registers: WCSS_SR_SHADOW_REGISTERS
- * Base Address: 0x18820000
- */
-#define SR_WCSSAON_SR_LSB_OFFSET 0x22070
-#define SR_WCSSAON_SR_LSB_RETENTION_STATUS BIT(20)
-
-#define SR_PMM_SR_MSB 0x2206C
-#define SR_PMM_SR_MSB_AHB_CLOCK_MASK GENMASK(26, 22)
-#define SR_PMM_SR_MSB_XO_CLOCK_MASK GENMASK(31, 27)
-
-/*
- * Registers: WCSS_HM_A_WCSS_CLK_CTL_WCSS_CC_REG
- * Base Address: 0x189D0000
- */
-#define WCSS_WLAN1_GDSCR_OFFSET 0x1D3004
-#define WCSS_WLAN1_GDSCR_SW_COLLAPSE BIT(0)
-#define WCSS_WLAN1_GDSCR_HW_CONTROL BIT(1)
-#define WCSS_WLAN1_GDSCR_PWR_ON BIT(31)
-
-#define WCSS_RFACTRL_GDSCR_OFFSET 0x1D60C8
-#define WCSS_RFACTRL_GDSCR_SW_COLLAPSE BIT(0)
-#define WCSS_RFACTRL_GDSCR_HW_CONTROL BIT(1)
-#define WCSS_RFACTRL_GDSCR_PWR_ON BIT(31)
-
-#define WCSS_CLK_CTL_WCSS_CSS_GDSCR_OFFSET 0x1D1004
-#define WCSS_CLK_CTL_WCSS_CSS_GDSCR_SW_COLLAPSE BIT(0)
-#define WCSS_CLK_CTL_WCSS_CSS_GDSCR_HW_CONTROL BIT(1)
-#define WCSS_CLK_CTL_WCSS_CSS_GDSCR_PWR_ON BIT(31)
-
-#define WCSS_CLK_CTL_NOC_CMD_RCGR_OFFSET 0x1D1030
-#define WCSS_CLK_CTL_NOC_CMD_RCGR_UPDATE BIT(0)
-
-#define WCSS_CLK_CTL_NOC_CFG_RCGR_OFFSET 0x1D1034
-#define WCSS_CLK_CTL_NOC_CFG_RCGR_SRC_SEL GENMASK(10, 8)
-
-#define WCSS_CLK_CTL_REF_CMD_RCGR_OFFSET 0x1D602C
-#define WCSS_CLK_CTL_REF_CMD_RCGR_UPDATE BIT(0)
-
-#define WCSS_CLK_CTL_REF_CFG_RCGR_OFFSET 0x1D6030
-#define WCSS_CLK_CTL_REF_CFG_RCGR_SRC_SEL GENMASK(10, 8)
-
-/*
- * Registers: WCSS_HM_A_WIFI_APB_3_A_WCMN_MAC_WCMN_REG
- * Base Address: 0x18AF0000
- */
-#define WCMN_PMM_WLAN1_CFG_REG1_OFFSET 0x2F0804
-#define WCMN_PMM_WLAN1_CFG_REG1_RFIF_ADC_PORDN_N BIT(9)
-#define WCMN_PMM_WLAN1_CFG_REG1_ADC_DIGITAL_CLAMP BIT(10)
-
-/*
- * Registers: WCSS_HM_A_PMM_PMM
- * Base Address: 0x18880000
- */
-#define WCSS_HM_A_PMM_ROOT_CLK_ENABLE 0x80010
-#define PMM_TCXO_CLK_ENABLE BIT(13)
-
-#define PMM_COMMON_IDLEREQ_CSR_OFFSET 0x80120
-#define PMM_COMMON_IDLEREQ_CSR_SW_WNOC_IDLEREQ_SET BIT(16)
-#define PMM_COMMON_IDLEREQ_CSR_WNOC_IDLEACK BIT(26)
-#define PMM_COMMON_IDLEREQ_CSR_WNOC_IDLE BIT(27)
-
-#define PMM_RFACTRL_IDLEREQ_CSR_OFFSET 0x80164
-#define PMM_RFACTRL_IDLEREQ_CSR_SW_RFACTRL_IDLEREQ_SET BIT(16)
-#define PMM_RFACTRL_IDLEREQ_CSR_RFACTRL_IDLETACK BIT(26)
-
-#define PMM_WSI_CMD_OFFSET 0x800E0
-#define PMM_WSI_CMD_USE_WLAN1_WSI BIT(0)
-#define PMM_WSI_CMD_SW_USE_PMM_WSI BIT(2)
-#define PMM_WSI_CMD_SW_BUS_SYNC BIT(3)
-#define PMM_WSI_CMD_SW_RF_RESET BIT(4)
-#define PMM_WSI_CMD_SW_REG_READ BIT(5)
-#define PMM_WSI_CMD_SW_XO_DIS BIT(8)
-#define PMM_WSI_CMD_SW_FORCE_IDLE BIT(9)
-#define PMM_WSI_CMD_PMM_WSI_SM GENMASK(24, 16)
-#define PMM_WSI_CMD_RF_CMD_IP BIT(31)
-
-#define PMM_REG_RW_ADDR_OFFSET 0x800F0
-#define PMM_REG_RW_ADDR_SW_REG_RW_ADDR GENMASK(15, 0)
-
-#define PMM_REG_READ_DATA_OFFSET 0x800F8
-
-#define PMM_RF_VAULT_REG_ADDR_OFFSET 0x800FC
-#define PMM_RF_VAULT_REG_ADDR_RF_VAULT_REG_ADDR GENMASK(15, 0)
-
-#define PMM_RF_VAULT_REG_DATA_OFFSET 0x80100
-#define PMM_RF_VAULT_REG_DATA_RF_VAULT_REG_DATA GENMASK(31, 0)
-
-#define PMM_XO_DIS_ADDR_OFFSET 0x800E8
-#define PMM_XO_DIS_ADDR_XO_DIS_ADDR GENMASK(15, 0)
-
-#define PMM_XO_DIS_DATA_OFFSET 0x800EC
-#define PMM_XO_DIS_DATA_XO_DIS_DATA GENMASK(31, 0)
-
-#define PMM_RF_RESET_ADDR_OFFSET 0x80104
-#define PMM_RF_RESET_ADDR_RF_RESET_ADDR GENMASK(15, 0)
-
-#define PMM_RF_RESET_DATA_OFFSET 0x80108
-#define PMM_RF_RESET_DATA_RF_RESET_DATA GENMASK(31, 0)
-
-#define ICNSS_HW_REG_RETRY 10
-
-#define WCSS_HM_A_PMM_HW_VERSION_V10 0x40000000
-#define WCSS_HM_A_PMM_HW_VERSION_V20 0x40010000
-#define WCSS_HM_A_PMM_HW_VERSION_Q10 0x40010001
-
#define ICNSS_SERVICE_LOCATION_CLIENT_NAME "ICNSS-WLAN"
#define ICNSS_WLAN_SERVICE_NAME "wlan/fw"
@@ -326,38 +206,6 @@ struct ce_irq_list {
irqreturn_t (*handler)(int, void *);
};
-struct icnss_vreg_info {
- struct regulator *reg;
- const char *name;
- u32 min_v;
- u32 max_v;
- u32 load_ua;
- unsigned long settle_delay;
- bool required;
-};
-
-struct icnss_clk_info {
- struct clk *handle;
- const char *name;
- u32 freq;
- bool required;
-};
-
-static struct icnss_vreg_info icnss_vreg_info[] = {
- {NULL, "vdd-0.8-cx-mx", 800000, 800000, 0, 0, true},
- {NULL, "vdd-1.8-xo", 1800000, 1800000, 0, 0, false},
- {NULL, "vdd-1.3-rfa", 1304000, 1304000, 0, 0, false},
- {NULL, "vdd-3.3-ch0", 3312000, 3312000, 0, 0, false},
-};
-
-#define ICNSS_VREG_INFO_SIZE ARRAY_SIZE(icnss_vreg_info)
-
-static struct icnss_clk_info icnss_clk_info[] = {
- {NULL, "cxo_ref_clk_pin", 0, false},
-};
-
-#define ICNSS_CLK_INFO_SIZE ARRAY_SIZE(icnss_clk_info)
-
struct icnss_stats {
struct {
uint32_t posted;
@@ -421,13 +269,9 @@ static struct icnss_priv {
struct platform_device *pdev;
struct icnss_driver_ops *ops;
struct ce_irq_list ce_irq_list[ICNSS_MAX_IRQ_REGISTRATIONS];
- struct icnss_vreg_info vreg_info[ICNSS_VREG_INFO_SIZE];
- struct icnss_clk_info clk_info[ICNSS_CLK_INFO_SIZE];
u32 ce_irqs[ICNSS_MAX_IRQ_REGISTRATIONS];
phys_addr_t mem_base_pa;
void __iomem *mem_base_va;
- phys_addr_t mpm_config_pa;
- void __iomem *mpm_config_va;
struct dma_iommu_mapping *smmu_mapping;
dma_addr_t smmu_iova_start;
size_t smmu_iova_len;
@@ -477,71 +321,6 @@ static struct icnss_priv {
struct icnss_wlan_mac_addr wlan_mac_addr;
} *penv;
-static void icnss_hw_write_reg(void *base, u32 offset, u32 val)
-{
- writel_relaxed(val, base + offset);
- wmb(); /* Ensure data is written to hardware register */
-}
-
-static u32 icnss_hw_read_reg(void *base, u32 offset)
-{
- u32 rdata = readl_relaxed(base + offset);
-
- icnss_reg_dbg(" READ: offset: 0x%06x 0x%08x\n", offset, rdata);
-
- return rdata;
-}
-
-static void icnss_hw_write_reg_field(void *base, u32 offset, u32 mask, u32 val)
-{
- u32 shift = find_first_bit((void *)&mask, 32);
- u32 rdata = readl_relaxed(base + offset);
-
- val = (rdata & ~mask) | (val << shift);
-
- icnss_reg_dbg("WRITE: offset: 0x%06x 0x%08x -> 0x%08x\n",
- offset, rdata, val);
-
- icnss_hw_write_reg(base, offset, val);
-}
-
-static int icnss_hw_poll_reg_field(void *base, u32 offset, u32 mask, u32 val,
- unsigned long usecs, int retry)
-{
- u32 shift;
- u32 rdata;
- int r = 0;
-
- shift = find_first_bit((void *)&mask, 32);
-
- val = val << shift;
-
- rdata = readl_relaxed(base + offset);
-
- icnss_reg_dbg(" POLL: offset: 0x%06x 0x%08x == 0x%08x & 0x%08x\n",
- offset, val, rdata, mask);
-
- while ((rdata & mask) != val) {
- if (retry != 0 && r >= retry) {
- icnss_pr_err("POLL FAILED: offset: 0x%06x 0x%08x == 0x%08x & 0x%08x\n",
- offset, val, rdata, mask);
-
- return -EIO;
- }
-
- r++;
- udelay(usecs);
- rdata = readl_relaxed(base + offset);
-
- if (retry)
- icnss_reg_dbg(" POLL: offset: 0x%06x 0x%08x == 0x%08x & 0x%08x\n",
- offset, val, rdata, mask);
-
- }
-
- return 0;
-}
-
static void icnss_pm_stay_awake(struct icnss_priv *priv)
{
if (atomic_inc_return(&priv->pm_count) != 1)
@@ -886,683 +665,6 @@ out:
return ret;
}
-static int icnss_vreg_on(struct icnss_priv *priv)
-{
- int ret = 0;
- struct icnss_vreg_info *vreg_info;
- int i;
-
- for (i = 0; i < ICNSS_VREG_INFO_SIZE; i++) {
- vreg_info = &priv->vreg_info[i];
-
- if (!vreg_info->reg)
- continue;
-
- icnss_pr_dbg("Regulator %s being enabled\n", vreg_info->name);
-
- ret = regulator_set_voltage(vreg_info->reg, vreg_info->min_v,
- vreg_info->max_v);
-
- if (ret) {
- icnss_pr_err("Regulator %s, can't set voltage: min_v: %u, max_v: %u, ret: %d\n",
- vreg_info->name, vreg_info->min_v,
- vreg_info->max_v, ret);
- break;
- }
-
- if (vreg_info->load_ua) {
- ret = regulator_set_load(vreg_info->reg,
- vreg_info->load_ua);
-
- if (ret < 0) {
- icnss_pr_err("Regulator %s, can't set load: %u, ret: %d\n",
- vreg_info->name,
- vreg_info->load_ua, ret);
- break;
- }
- }
-
- ret = regulator_enable(vreg_info->reg);
- if (ret) {
- icnss_pr_err("Regulator %s, can't enable: %d\n",
- vreg_info->name, ret);
- break;
- }
-
- if (vreg_info->settle_delay)
- udelay(vreg_info->settle_delay);
- }
-
- if (!ret)
- return 0;
-
- for (; i >= 0; i--) {
- vreg_info = &priv->vreg_info[i];
-
- if (!vreg_info->reg)
- continue;
-
- regulator_disable(vreg_info->reg);
-
- regulator_set_load(vreg_info->reg, 0);
-
- regulator_set_voltage(vreg_info->reg, 0, vreg_info->max_v);
- }
-
- return ret;
-}
-
-static int icnss_vreg_off(struct icnss_priv *priv)
-{
- int ret = 0;
- struct icnss_vreg_info *vreg_info;
- int i;
-
- for (i = ICNSS_VREG_INFO_SIZE - 1; i >= 0; i--) {
- vreg_info = &priv->vreg_info[i];
-
- if (!vreg_info->reg)
- continue;
-
- icnss_pr_dbg("Regulator %s being disabled\n", vreg_info->name);
-
- ret = regulator_disable(vreg_info->reg);
- if (ret)
- icnss_pr_err("Regulator %s, can't disable: %d\n",
- vreg_info->name, ret);
-
- ret = regulator_set_load(vreg_info->reg, 0);
- if (ret < 0)
- icnss_pr_err("Regulator %s, can't set load: %d\n",
- vreg_info->name, ret);
-
- ret = regulator_set_voltage(vreg_info->reg, 0,
- vreg_info->max_v);
-
- if (ret)
- icnss_pr_err("Regulator %s, can't set voltage: %d\n",
- vreg_info->name, ret);
- }
-
- return ret;
-}
-
-static int icnss_clk_init(struct icnss_priv *priv)
-{
- struct icnss_clk_info *clk_info;
- int i;
- int ret = 0;
-
- for (i = 0; i < ICNSS_CLK_INFO_SIZE; i++) {
- clk_info = &priv->clk_info[i];
-
- if (!clk_info->handle)
- continue;
-
- icnss_pr_dbg("Clock %s being enabled\n", clk_info->name);
-
- if (clk_info->freq) {
- ret = clk_set_rate(clk_info->handle, clk_info->freq);
-
- if (ret) {
- icnss_pr_err("Clock %s, can't set frequency: %u, ret: %d\n",
- clk_info->name, clk_info->freq,
- ret);
- break;
- }
- }
-
- ret = clk_prepare_enable(clk_info->handle);
-
- if (ret) {
- icnss_pr_err("Clock %s, can't enable: %d\n",
- clk_info->name, ret);
- break;
- }
- }
-
- if (ret == 0)
- return 0;
-
- for (; i >= 0; i--) {
- clk_info = &priv->clk_info[i];
-
- if (!clk_info->handle)
- continue;
-
- clk_disable_unprepare(clk_info->handle);
- }
-
- return ret;
-}
-
-static int icnss_clk_deinit(struct icnss_priv *priv)
-{
- struct icnss_clk_info *clk_info;
- int i;
-
- for (i = 0; i < ICNSS_CLK_INFO_SIZE; i++) {
- clk_info = &priv->clk_info[i];
-
- if (!clk_info->handle)
- continue;
-
- icnss_pr_dbg("Clock %s being disabled\n", clk_info->name);
-
- clk_disable_unprepare(clk_info->handle);
- }
-
- return 0;
-}
-
-static void icnss_hw_top_level_release_reset(struct icnss_priv *priv)
-{
- icnss_pr_dbg("RESET: HW Release reset: state: 0x%lx\n", priv->state);
-
- icnss_hw_write_reg_field(priv->mpm_config_va, MPM_WCSSAON_CONFIG_OFFSET,
- MPM_WCSSAON_CONFIG_ARES_N, 1);
-
- icnss_hw_write_reg_field(priv->mpm_config_va, MPM_WCSSAON_CONFIG_OFFSET,
- MPM_WCSSAON_CONFIG_WLAN_DISABLE, 0x0);
-
- icnss_hw_poll_reg_field(priv->mpm_config_va,
- MPM_WCSSAON_CONFIG_OFFSET,
- MPM_WCSSAON_CONFIG_ARES_N, 1, 10,
- ICNSS_HW_REG_RETRY);
-}
-
-static void icnss_hw_top_level_reset(struct icnss_priv *priv)
-{
- icnss_pr_dbg("RESET: HW top level reset: state: 0x%lx\n", priv->state);
-
- icnss_hw_write_reg_field(priv->mpm_config_va,
- MPM_WCSSAON_CONFIG_OFFSET,
- MPM_WCSSAON_CONFIG_ARES_N, 0);
-
- icnss_hw_poll_reg_field(priv->mpm_config_va,
- MPM_WCSSAON_CONFIG_OFFSET,
- MPM_WCSSAON_CONFIG_ARES_N, 0, 10,
- ICNSS_HW_REG_RETRY);
-}
-
-static void icnss_hw_io_reset(struct icnss_priv *priv, bool on)
-{
- u32 hw_version = priv->soc_info.soc_id;
-
- if (on && !test_bit(ICNSS_FW_READY, &priv->state))
- return;
-
- icnss_pr_dbg("HW io reset: %s, SoC: 0x%x, state: 0x%lx\n",
- on ? "ON" : "OFF", priv->soc_info.soc_id, priv->state);
-
- if (hw_version == WCSS_HM_A_PMM_HW_VERSION_V10 ||
- hw_version == WCSS_HM_A_PMM_HW_VERSION_V20) {
- icnss_hw_write_reg_field(priv->mpm_config_va,
- MPM_WCSSAON_CONFIG_OFFSET,
- MPM_WCSSAON_CONFIG_MSM_CLAMP_EN_OVRD_VAL, 0);
- icnss_hw_write_reg_field(priv->mpm_config_va,
- MPM_WCSSAON_CONFIG_OFFSET,
- MPM_WCSSAON_CONFIG_MSM_CLAMP_EN_OVRD, on);
- } else if (hw_version == WCSS_HM_A_PMM_HW_VERSION_Q10) {
- icnss_hw_write_reg_field(priv->mpm_config_va,
- MPM_WCSSAON_CONFIG_OFFSET,
- MPM_WCSSAON_CONFIG_M2W_CLAMP_EN,
- on);
- }
-}
-
-static int icnss_hw_reset_wlan_ss_power_down(struct icnss_priv *priv)
-{
- u32 rdata;
-
- icnss_pr_dbg("RESET: WLAN SS power down, state: 0x%lx\n", priv->state);
-
- rdata = icnss_hw_read_reg(priv->mem_base_va, WCSS_WLAN1_GDSCR_OFFSET);
-
- if ((rdata & WCSS_WLAN1_GDSCR_PWR_ON) == 0)
- return 0;
-
- icnss_hw_write_reg_field(priv->mem_base_va, WCSS_WLAN1_GDSCR_OFFSET,
- WCSS_WLAN1_GDSCR_HW_CONTROL, 0);
-
- icnss_hw_write_reg_field(priv->mem_base_va, WCSS_WLAN1_GDSCR_OFFSET,
- WCSS_WLAN1_GDSCR_SW_COLLAPSE, 1);
-
- icnss_hw_poll_reg_field(priv->mem_base_va, WCSS_WLAN1_GDSCR_OFFSET,
- WCSS_WLAN1_GDSCR_PWR_ON, 0, 10,
- ICNSS_HW_REG_RETRY);
-
- icnss_hw_write_reg_field(priv->mem_base_va,
- WCMN_PMM_WLAN1_CFG_REG1_OFFSET,
- WCMN_PMM_WLAN1_CFG_REG1_ADC_DIGITAL_CLAMP, 1);
-
- icnss_hw_write_reg_field(priv->mem_base_va,
- WCMN_PMM_WLAN1_CFG_REG1_OFFSET,
- WCMN_PMM_WLAN1_CFG_REG1_RFIF_ADC_PORDN_N, 0);
-
- return 0;
-}
-
-static int icnss_hw_reset_common_ss_power_down(struct icnss_priv *priv)
-{
- u32 rdata;
-
- icnss_pr_dbg("RESET: Common SS power down, state: 0x%lx\n",
- priv->state);
-
- rdata = icnss_hw_read_reg(priv->mem_base_va,
- WCSS_CLK_CTL_WCSS_CSS_GDSCR_OFFSET);
-
- if ((rdata & WCSS_CLK_CTL_WCSS_CSS_GDSCR_PWR_ON) == 0)
- return 0;
-
- icnss_hw_write_reg_field(priv->mem_base_va,
- PMM_COMMON_IDLEREQ_CSR_OFFSET,
- PMM_COMMON_IDLEREQ_CSR_SW_WNOC_IDLEREQ_SET,
- 1);
-
- icnss_hw_poll_reg_field(priv->mem_base_va,
- PMM_COMMON_IDLEREQ_CSR_OFFSET,
- PMM_COMMON_IDLEREQ_CSR_WNOC_IDLEACK,
- 1, 20, ICNSS_HW_REG_RETRY);
-
- icnss_hw_poll_reg_field(priv->mem_base_va,
- PMM_COMMON_IDLEREQ_CSR_OFFSET,
- PMM_COMMON_IDLEREQ_CSR_WNOC_IDLE,
- 1, 10, ICNSS_HW_REG_RETRY);
-
- icnss_hw_write_reg_field(priv->mem_base_va,
- WCSS_CLK_CTL_WCSS_CSS_GDSCR_OFFSET,
- WCSS_CLK_CTL_WCSS_CSS_GDSCR_HW_CONTROL, 0);
-
- icnss_hw_write_reg_field(priv->mem_base_va,
- WCSS_CLK_CTL_WCSS_CSS_GDSCR_OFFSET,
- WCSS_CLK_CTL_WCSS_CSS_GDSCR_SW_COLLAPSE, 1);
-
- icnss_hw_poll_reg_field(priv->mem_base_va,
- WCSS_CLK_CTL_WCSS_CSS_GDSCR_OFFSET,
- WCSS_CLK_CTL_WCSS_CSS_GDSCR_PWR_ON, 0, 10,
- ICNSS_HW_REG_RETRY);
-
- return 0;
-
-}
-
-static int icnss_hw_reset_wlan_rfactrl_power_down(struct icnss_priv *priv)
-{
- u32 rdata;
-
- icnss_pr_dbg("RESET: RFACTRL power down, state: 0x%lx\n", priv->state);
-
- rdata = icnss_hw_read_reg(priv->mem_base_va, WCSS_RFACTRL_GDSCR_OFFSET);
-
- if ((rdata & WCSS_RFACTRL_GDSCR_PWR_ON) == 0)
- return 0;
-
- icnss_hw_write_reg_field(priv->mem_base_va,
- PMM_RFACTRL_IDLEREQ_CSR_OFFSET,
- PMM_RFACTRL_IDLEREQ_CSR_SW_RFACTRL_IDLEREQ_SET,
- 1);
-
- icnss_hw_poll_reg_field(priv->mem_base_va,
- PMM_RFACTRL_IDLEREQ_CSR_OFFSET,
- PMM_RFACTRL_IDLEREQ_CSR_RFACTRL_IDLETACK,
- 1, 10, ICNSS_HW_REG_RETRY);
-
- icnss_hw_write_reg_field(priv->mem_base_va, WCSS_RFACTRL_GDSCR_OFFSET,
- WCSS_RFACTRL_GDSCR_HW_CONTROL, 0);
-
- icnss_hw_write_reg_field(priv->mem_base_va, WCSS_RFACTRL_GDSCR_OFFSET,
- WCSS_RFACTRL_GDSCR_SW_COLLAPSE, 1);
-
- return 0;
-}
-
-static void icnss_hw_wsi_cmd_error_recovery(struct icnss_priv *priv)
-{
- icnss_pr_dbg("RESET: WSI CMD Error recovery, state: 0x%lx\n",
- priv->state);
-
- icnss_hw_write_reg_field(priv->mem_base_va, PMM_WSI_CMD_OFFSET,
- PMM_WSI_CMD_SW_FORCE_IDLE, 1);
-
- icnss_hw_poll_reg_field(priv->mem_base_va, PMM_WSI_CMD_OFFSET,
- PMM_WSI_CMD_PMM_WSI_SM, 1, 100, 0);
-
- icnss_hw_write_reg_field(priv->mem_base_va, PMM_WSI_CMD_OFFSET,
- PMM_WSI_CMD_SW_FORCE_IDLE, 0);
-
- icnss_hw_write_reg_field(priv->mem_base_va, PMM_WSI_CMD_OFFSET,
- PMM_WSI_CMD_SW_BUS_SYNC, 1);
-
- icnss_hw_poll_reg_field(priv->mem_base_va, PMM_WSI_CMD_OFFSET,
- PMM_WSI_CMD_RF_CMD_IP, 0, 100, 0);
-
- icnss_hw_write_reg_field(priv->mem_base_va, PMM_WSI_CMD_OFFSET,
- PMM_WSI_CMD_SW_BUS_SYNC, 0);
-}
-
-static u32 icnss_hw_rf_register_read_command(struct icnss_priv *priv, u32 addr)
-{
- u32 rdata = 0;
- int ret;
- int i;
-
- icnss_pr_dbg("RF register read command, addr: 0x%04x, state: 0x%lx\n",
- addr, priv->state);
-
- for (i = 0; i < ICNSS_HW_REG_RETRY; i++) {
- icnss_hw_write_reg_field(priv->mem_base_va, PMM_WSI_CMD_OFFSET,
- PMM_WSI_CMD_USE_WLAN1_WSI, 1);
-
- icnss_hw_write_reg_field(priv->mem_base_va, PMM_WSI_CMD_OFFSET,
- PMM_WSI_CMD_SW_USE_PMM_WSI, 1);
-
- icnss_hw_write_reg_field(priv->mem_base_va,
- PMM_REG_RW_ADDR_OFFSET,
- PMM_REG_RW_ADDR_SW_REG_RW_ADDR,
- addr & 0xFFFF);
-
- icnss_hw_write_reg_field(priv->mem_base_va, PMM_WSI_CMD_OFFSET,
- PMM_WSI_CMD_SW_REG_READ, 1);
-
- ret = icnss_hw_poll_reg_field(priv->mem_base_va,
- PMM_WSI_CMD_OFFSET,
- PMM_WSI_CMD_RF_CMD_IP, 0, 10,
- ICNSS_HW_REG_RETRY);
- if (ret == 0)
- break;
-
- icnss_hw_wsi_cmd_error_recovery(priv);
- }
-
-
- rdata = icnss_hw_read_reg(priv->mem_base_va, PMM_REG_READ_DATA_OFFSET);
-
- icnss_hw_write_reg_field(priv->mem_base_va, PMM_WSI_CMD_OFFSET,
- PMM_WSI_CMD_SW_USE_PMM_WSI, 0);
-
- icnss_hw_write_reg_field(priv->mem_base_va, PMM_WSI_CMD_OFFSET,
- PMM_WSI_CMD_SW_REG_READ, 0);
-
- icnss_pr_dbg("RF register read command, data: 0x%08x, state: 0x%lx\n",
- rdata, priv->state);
-
- return rdata;
-}
-
-static int icnss_hw_reset_rf_reset_cmd(struct icnss_priv *priv)
-{
- u32 rdata;
- int ret;
-
- icnss_pr_dbg("RESET: RF reset command, state: 0x%lx\n", priv->state);
-
- rdata = icnss_hw_rf_register_read_command(priv, 0x5080);
-
- icnss_hw_write_reg_field(priv->mem_base_va, PMM_WSI_CMD_OFFSET,
- PMM_WSI_CMD_USE_WLAN1_WSI, 1);
-
- icnss_hw_write_reg_field(priv->mem_base_va, PMM_WSI_CMD_OFFSET,
- PMM_WSI_CMD_SW_USE_PMM_WSI, 1);
-
- icnss_hw_write_reg_field(priv->mem_base_va,
- PMM_RF_VAULT_REG_ADDR_OFFSET,
- PMM_RF_VAULT_REG_ADDR_RF_VAULT_REG_ADDR,
- 0x5082);
-
- icnss_hw_write_reg_field(priv->mem_base_va,
- PMM_RF_VAULT_REG_DATA_OFFSET,
- PMM_RF_VAULT_REG_DATA_RF_VAULT_REG_DATA,
- 0x12AB8FAD);
-
- icnss_hw_write_reg_field(priv->mem_base_va, PMM_RF_RESET_ADDR_OFFSET,
- PMM_RF_RESET_ADDR_RF_RESET_ADDR, 0x5080);
-
- icnss_hw_write_reg_field(priv->mem_base_va, PMM_RF_RESET_DATA_OFFSET,
- PMM_RF_RESET_DATA_RF_RESET_DATA,
- rdata & 0xBFFF);
-
- icnss_hw_write_reg_field(priv->mem_base_va, PMM_WSI_CMD_OFFSET,
- PMM_WSI_CMD_SW_RF_RESET, 1);
-
- ret = icnss_hw_poll_reg_field(priv->mem_base_va, PMM_WSI_CMD_OFFSET,
- PMM_WSI_CMD_RF_CMD_IP, 0, 10,
- ICNSS_HW_REG_RETRY);
-
- if (ret) {
- icnss_pr_err("RESET: RF reset command failed, state: 0x%lx\n",
- priv->state);
- return ret;
- }
-
- icnss_hw_write_reg_field(priv->mem_base_va, PMM_WSI_CMD_OFFSET,
- PMM_WSI_CMD_SW_USE_PMM_WSI, 0);
-
- icnss_hw_write_reg_field(priv->mem_base_va, PMM_WSI_CMD_OFFSET,
- PMM_WSI_CMD_SW_RF_RESET, 0);
-
- return 0;
-}
-
-static int icnss_hw_reset_switch_to_cxo(struct icnss_priv *priv)
-{
- u32 rdata;
-
- icnss_pr_dbg("RESET: Switch to CXO, state: 0x%lx\n", priv->state);
-
- rdata = icnss_hw_read_reg(priv->mem_base_va,
- WCSS_HM_A_PMM_ROOT_CLK_ENABLE);
-
- icnss_pr_dbg("RESET: PMM_TCXO_CLK_ENABLE : 0x%05lx\n",
- rdata & PMM_TCXO_CLK_ENABLE);
-
- if ((rdata & PMM_TCXO_CLK_ENABLE) == 0) {
- icnss_pr_dbg("RESET: Set PMM_TCXO_CLK_ENABLE to 1\n");
-
- icnss_hw_write_reg_field(priv->mem_base_va,
- WCSS_HM_A_PMM_ROOT_CLK_ENABLE,
- PMM_TCXO_CLK_ENABLE, 1);
- icnss_hw_poll_reg_field(priv->mem_base_va,
- WCSS_HM_A_PMM_ROOT_CLK_ENABLE,
- PMM_TCXO_CLK_ENABLE, 1, 10,
- ICNSS_HW_REG_RETRY);
- }
-
- icnss_hw_write_reg_field(priv->mem_base_va,
- WCSS_CLK_CTL_NOC_CFG_RCGR_OFFSET,
- WCSS_CLK_CTL_NOC_CFG_RCGR_SRC_SEL, 0);
-
- icnss_hw_write_reg_field(priv->mem_base_va,
- WCSS_CLK_CTL_NOC_CMD_RCGR_OFFSET,
- WCSS_CLK_CTL_NOC_CMD_RCGR_UPDATE, 1);
-
- icnss_hw_write_reg_field(priv->mem_base_va,
- WCSS_CLK_CTL_REF_CFG_RCGR_OFFSET,
- WCSS_CLK_CTL_REF_CFG_RCGR_SRC_SEL, 0);
-
- icnss_hw_write_reg_field(priv->mem_base_va,
- WCSS_CLK_CTL_REF_CMD_RCGR_OFFSET,
- WCSS_CLK_CTL_REF_CMD_RCGR_UPDATE, 1);
-
- return 0;
-}
-
-static int icnss_hw_reset_xo_disable_cmd(struct icnss_priv *priv)
-{
- int ret;
-
- icnss_pr_dbg("RESET: XO disable command, state: 0x%lx\n", priv->state);
-
- icnss_hw_write_reg_field(priv->mem_base_va, PMM_WSI_CMD_OFFSET,
- PMM_WSI_CMD_USE_WLAN1_WSI, 1);
- icnss_hw_write_reg_field(priv->mem_base_va, PMM_WSI_CMD_OFFSET,
- PMM_WSI_CMD_SW_USE_PMM_WSI, 1);
-
- icnss_hw_write_reg_field(priv->mem_base_va,
- PMM_RF_VAULT_REG_ADDR_OFFSET,
- PMM_RF_VAULT_REG_ADDR_RF_VAULT_REG_ADDR,
- 0x5082);
-
- icnss_hw_write_reg_field(priv->mem_base_va,
- PMM_RF_VAULT_REG_DATA_OFFSET,
- PMM_RF_VAULT_REG_DATA_RF_VAULT_REG_DATA,
- 0x12AB8FAD);
-
- icnss_hw_write_reg_field(priv->mem_base_va, PMM_XO_DIS_ADDR_OFFSET,
- PMM_XO_DIS_ADDR_XO_DIS_ADDR, 0x5081);
-
- icnss_hw_write_reg_field(priv->mem_base_va, PMM_XO_DIS_DATA_OFFSET,
- PMM_XO_DIS_DATA_XO_DIS_DATA, 1);
-
- icnss_hw_write_reg_field(priv->mem_base_va, PMM_WSI_CMD_OFFSET,
- PMM_WSI_CMD_SW_XO_DIS, 1);
-
- ret = icnss_hw_poll_reg_field(priv->mem_base_va, PMM_WSI_CMD_OFFSET,
- PMM_WSI_CMD_RF_CMD_IP, 0, 10,
- ICNSS_HW_REG_RETRY);
- if (ret) {
- icnss_pr_err("RESET: XO disable command failed, state: 0x%lx\n",
- priv->state);
- return ret;
- }
-
- icnss_hw_write_reg_field(priv->mem_base_va, PMM_WSI_CMD_OFFSET,
- PMM_WSI_CMD_SW_USE_PMM_WSI, 0);
-
- icnss_hw_write_reg_field(priv->mem_base_va, PMM_WSI_CMD_OFFSET,
- PMM_WSI_CMD_SW_XO_DIS, 0);
-
- return 0;
-}
-
-static int icnss_hw_reset(struct icnss_priv *priv)
-{
- u32 rdata;
- u32 rdata1;
- int i;
- int ret = 0;
-
- if (test_bit(HW_ONLY_TOP_LEVEL_RESET, &quirks))
- goto top_level_reset;
-
- icnss_pr_dbg("RESET: START, state: 0x%lx\n", priv->state);
-
- icnss_hw_write_reg_field(priv->mpm_config_va, MPM_WCSSAON_CONFIG_OFFSET,
- MPM_WCSSAON_CONFIG_FORCE_ACTIVE, 1);
-
- icnss_hw_poll_reg_field(priv->mem_base_va, SR_WCSSAON_SR_LSB_OFFSET,
- SR_WCSSAON_SR_LSB_RETENTION_STATUS, 1, 200,
- ICNSS_HW_REG_RETRY);
-
- for (i = 0; i < ICNSS_HW_REG_RETRY; i++) {
- rdata = icnss_hw_read_reg(priv->mem_base_va, SR_PMM_SR_MSB);
- udelay(10);
- rdata1 = icnss_hw_read_reg(priv->mem_base_va, SR_PMM_SR_MSB);
-
- icnss_pr_dbg("RESET: XO: 0x%05lx/0x%05lx, AHB: 0x%05lx/0x%05lx\n",
- rdata & SR_PMM_SR_MSB_XO_CLOCK_MASK,
- rdata1 & SR_PMM_SR_MSB_XO_CLOCK_MASK,
- rdata & SR_PMM_SR_MSB_AHB_CLOCK_MASK,
- rdata1 & SR_PMM_SR_MSB_AHB_CLOCK_MASK);
-
- if ((rdata & SR_PMM_SR_MSB_AHB_CLOCK_MASK) !=
- (rdata1 & SR_PMM_SR_MSB_AHB_CLOCK_MASK) &&
- (rdata & SR_PMM_SR_MSB_XO_CLOCK_MASK) !=
- (rdata1 & SR_PMM_SR_MSB_XO_CLOCK_MASK))
- break;
-
- icnss_hw_write_reg_field(priv->mpm_config_va,
- MPM_WCSSAON_CONFIG_OFFSET,
- MPM_WCSSAON_CONFIG_FORCE_XO_ENABLE,
- 0x1);
- usleep_range(2000, 3000);
- }
-
- if (i >= ICNSS_HW_REG_RETRY)
- goto top_level_reset;
-
- icnss_hw_write_reg_field(priv->mpm_config_va, MPM_WCSSAON_CONFIG_OFFSET,
- MPM_WCSSAON_CONFIG_DISCONNECT_CLR, 0x1);
-
- usleep_range(200, 300);
-
- icnss_hw_reset_wlan_ss_power_down(priv);
-
- icnss_hw_reset_common_ss_power_down(priv);
-
- icnss_hw_reset_wlan_rfactrl_power_down(priv);
-
- ret = icnss_hw_reset_rf_reset_cmd(priv);
- if (ret) {
- icnss_hw_write_reg_field(priv->mpm_config_va,
- MPM_WCSSAON_CONFIG_OFFSET,
- MPM_WCSSAON_CONFIG_FORCE_ACTIVE, 0);
- icnss_hw_write_reg_field(priv->mpm_config_va,
- MPM_WCSSAON_CONFIG_OFFSET,
- MPM_WCSSAON_CONFIG_DISCONNECT_CLR, 0);
- icnss_hw_write_reg_field(priv->mpm_config_va,
- MPM_WCSSAON_CONFIG_OFFSET,
- MPM_WCSSAON_CONFIG_WLAN_DISABLE, 1);
- goto top_level_reset;
- }
-
- icnss_hw_reset_switch_to_cxo(priv);
-
- for (i = 0; i < ICNSS_HW_REG_RETRY; i++) {
- rdata = icnss_hw_read_reg(priv->mem_base_va, SR_PMM_SR_MSB);
- usleep_range(5, 10);
- rdata1 = icnss_hw_read_reg(priv->mem_base_va, SR_PMM_SR_MSB);
-
- icnss_pr_dbg("RESET: SR_PMM_SR_MSB: 0x%08x/0x%08x, XO: 0x%05lx/0x%05lx, AHB: 0x%05lx/0x%05lx\n",
- rdata, rdata1,
- rdata & SR_PMM_SR_MSB_XO_CLOCK_MASK,
- rdata1 & SR_PMM_SR_MSB_XO_CLOCK_MASK,
- rdata & SR_PMM_SR_MSB_AHB_CLOCK_MASK,
- rdata1 & SR_PMM_SR_MSB_AHB_CLOCK_MASK);
-
- if ((rdata & SR_PMM_SR_MSB_AHB_CLOCK_MASK) !=
- (rdata1 & SR_PMM_SR_MSB_AHB_CLOCK_MASK) &&
- (rdata & SR_PMM_SR_MSB_XO_CLOCK_MASK) !=
- (rdata1 & SR_PMM_SR_MSB_XO_CLOCK_MASK))
- break;
- usleep_range(5, 10);
- }
-
- ret = icnss_hw_reset_xo_disable_cmd(priv);
- if (ret) {
- icnss_hw_write_reg_field(priv->mpm_config_va,
- MPM_WCSSAON_CONFIG_OFFSET,
- MPM_WCSSAON_CONFIG_FORCE_ACTIVE, 0);
- icnss_hw_write_reg_field(priv->mpm_config_va,
- MPM_WCSSAON_CONFIG_OFFSET,
- MPM_WCSSAON_CONFIG_DISCONNECT_CLR, 0);
- icnss_hw_write_reg_field(priv->mpm_config_va,
- MPM_WCSSAON_CONFIG_OFFSET,
- MPM_WCSSAON_CONFIG_WLAN_DISABLE, 1);
- goto top_level_reset;
- }
-
- icnss_hw_write_reg_field(priv->mpm_config_va, MPM_WCSSAON_CONFIG_OFFSET,
- MPM_WCSSAON_CONFIG_FORCE_ACTIVE, 0);
-
- icnss_hw_write_reg_field(priv->mpm_config_va, MPM_WCSSAON_CONFIG_OFFSET,
- MPM_WCSSAON_CONFIG_DISCONNECT_CLR, 0);
-
- icnss_hw_write_reg_field(priv->mpm_config_va, MPM_WCSSAON_CONFIG_OFFSET,
- MPM_WCSSAON_CONFIG_WLAN_DISABLE, 1);
-
- icnss_hw_poll_reg_field(priv->mem_base_va, SR_WCSSAON_SR_LSB_OFFSET,
- BIT(26), 1, 200, ICNSS_HW_REG_RETRY);
-
-top_level_reset:
- icnss_hw_top_level_reset(priv);
-
- icnss_pr_dbg("RESET: DONE, state: 0x%lx\n", priv->state);
-
- return 0;
-}
-
static int icnss_hw_power_on(struct icnss_priv *priv)
{
int ret = 0;
@@ -1578,21 +680,6 @@ static int icnss_hw_power_on(struct icnss_priv *priv)
set_bit(ICNSS_POWER_ON, &priv->state);
spin_unlock_irqrestore(&priv->on_off_lock, flags);
- ret = icnss_vreg_on(priv);
- if (ret)
- goto out;
-
- ret = icnss_clk_init(priv);
- if (ret)
- goto out;
-
- icnss_hw_top_level_release_reset(priv);
-
- icnss_hw_io_reset(penv, 1);
-
- return ret;
-out:
- clear_bit(ICNSS_POWER_ON, &priv->state);
return ret;
}
@@ -1614,19 +701,6 @@ static int icnss_hw_power_off(struct icnss_priv *priv)
clear_bit(ICNSS_POWER_ON, &priv->state);
spin_unlock_irqrestore(&priv->on_off_lock, flags);
- icnss_hw_io_reset(penv, 0);
-
- icnss_hw_reset(priv);
-
- icnss_clk_deinit(priv);
-
- ret = icnss_vreg_off(priv);
- if (ret)
- goto out;
-
- return ret;
-out:
- set_bit(ICNSS_POWER_ON, &priv->state);
return ret;
}
@@ -2822,7 +1896,8 @@ static int icnss_modem_notifier_nb(struct notifier_block *nb,
icnss_pr_dbg("Modem-Notify: event %lu\n", code);
- if (code == SUBSYS_AFTER_SHUTDOWN) {
+ if (code == SUBSYS_AFTER_SHUTDOWN &&
+ notif->crashed != CRASH_STATUS_WDOG_BITE) {
icnss_remove_msa_permissions(priv);
icnss_pr_info("Collecting msa0 segment dump\n");
icnss_msa0_ramdump(priv);
@@ -3692,117 +2767,6 @@ static void icnss_smmu_deinit(struct icnss_priv *priv)
priv->smmu_mapping = NULL;
}
-static int icnss_get_vreg_info(struct device *dev,
- struct icnss_vreg_info *vreg_info)
-{
- int ret = 0;
- char prop_name[MAX_PROP_SIZE];
- struct regulator *reg;
- const __be32 *prop;
- int len = 0;
- int i;
-
- reg = devm_regulator_get_optional(dev, vreg_info->name);
-
- if (PTR_ERR(reg) == -EPROBE_DEFER) {
- icnss_pr_err("EPROBE_DEFER for regulator: %s\n",
- vreg_info->name);
- ret = PTR_ERR(reg);
- goto out;
- }
-
- if (IS_ERR(reg)) {
- ret = PTR_ERR(reg);
-
- if (vreg_info->required) {
-
- icnss_pr_err("Regulator %s doesn't exist: %d\n",
- vreg_info->name, ret);
- goto out;
- } else {
- icnss_pr_dbg("Optional regulator %s doesn't exist: %d\n",
- vreg_info->name, ret);
- goto done;
- }
- }
-
- vreg_info->reg = reg;
-
- snprintf(prop_name, MAX_PROP_SIZE,
- "qcom,%s-config", vreg_info->name);
-
- prop = of_get_property(dev->of_node, prop_name, &len);
-
- icnss_pr_dbg("Got regulator config, prop: %s, len: %d\n",
- prop_name, len);
-
- if (!prop || len < (2 * sizeof(__be32))) {
- icnss_pr_dbg("Property %s %s\n", prop_name,
- prop ? "invalid format" : "doesn't exist");
- goto done;
- }
-
- for (i = 0; (i * sizeof(__be32)) < len; i++) {
- switch (i) {
- case 0:
- vreg_info->min_v = be32_to_cpup(&prop[0]);
- break;
- case 1:
- vreg_info->max_v = be32_to_cpup(&prop[1]);
- break;
- case 2:
- vreg_info->load_ua = be32_to_cpup(&prop[2]);
- break;
- case 3:
- vreg_info->settle_delay = be32_to_cpup(&prop[3]);
- break;
- default:
- icnss_pr_dbg("Property %s, ignoring value at %d\n",
- prop_name, i);
- break;
- }
- }
-
-done:
- icnss_pr_dbg("Regulator: %s, min_v: %u, max_v: %u, load: %u, delay: %lu\n",
- vreg_info->name, vreg_info->min_v, vreg_info->max_v,
- vreg_info->load_ua, vreg_info->settle_delay);
-
- return 0;
-
-out:
- return ret;
-}
-
-static int icnss_get_clk_info(struct device *dev,
- struct icnss_clk_info *clk_info)
-{
- struct clk *handle;
- int ret = 0;
-
- handle = devm_clk_get(dev, clk_info->name);
-
- if (IS_ERR(handle)) {
- ret = PTR_ERR(handle);
- if (clk_info->required) {
- icnss_pr_err("Clock %s isn't available: %d\n",
- clk_info->name, ret);
- goto out;
- } else {
- icnss_pr_dbg("Ignoring clock %s: %d\n", clk_info->name,
- ret);
- ret = 0;
- goto out;
- }
- }
-
- icnss_pr_dbg("Clock: %s, freq: %u\n", clk_info->name, clk_info->freq);
-
- clk_info->handle = handle;
-out:
- return ret;
-}
-
static int icnss_test_mode_show(struct seq_file *s, void *data)
{
struct icnss_priv *priv = s->private;
@@ -4475,21 +3439,6 @@ static int icnss_probe(struct platform_device *pdev)
if (ret == -EPROBE_DEFER)
goto out;
- memcpy(priv->vreg_info, icnss_vreg_info, sizeof(icnss_vreg_info));
- for (i = 0; i < ICNSS_VREG_INFO_SIZE; i++) {
- ret = icnss_get_vreg_info(dev, &priv->vreg_info[i]);
-
- if (ret)
- goto out;
- }
-
- memcpy(priv->clk_info, icnss_clk_info, sizeof(icnss_clk_info));
- for (i = 0; i < ICNSS_CLK_INFO_SIZE; i++) {
- ret = icnss_get_clk_info(dev, &priv->clk_info[i]);
- if (ret)
- goto out;
- }
-
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "membase");
if (!res) {
icnss_pr_err("Memory base not found in DT\n");
@@ -4509,26 +3458,6 @@ static int icnss_probe(struct platform_device *pdev)
icnss_pr_dbg("MEM_BASE pa: %pa, va: 0x%p\n", &priv->mem_base_pa,
priv->mem_base_va);
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- "mpm_config");
- if (!res) {
- icnss_pr_err("MPM Config not found\n");
- ret = -EINVAL;
- goto out;
- }
- priv->mpm_config_pa = res->start;
- priv->mpm_config_va = devm_ioremap(dev, priv->mpm_config_pa,
- resource_size(res));
- if (!priv->mpm_config_va) {
- icnss_pr_err("MPM Config ioremap failed, phy addr: %pa\n",
- &priv->mpm_config_pa);
- ret = -EINVAL;
- goto out;
- }
-
- icnss_pr_dbg("MPM_CONFIG pa: %pa, va: 0x%p\n", &priv->mpm_config_pa,
- priv->mpm_config_va);
-
for (i = 0; i < ICNSS_MAX_IRQ_REGISTRATIONS; i++) {
res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, i);
if (!res) {
diff --git a/drivers/soc/qcom/pil-q6v5-mss.c b/drivers/soc/qcom/pil-q6v5-mss.c
index dc803bdfd554..8ed98e2cbd5e 100644
--- a/drivers/soc/qcom/pil-q6v5-mss.c
+++ b/drivers/soc/qcom/pil-q6v5-mss.c
@@ -82,7 +82,7 @@ static irqreturn_t modem_err_fatal_intr_handler(int irq, void *dev_id)
return IRQ_HANDLED;
pr_err("Fatal error on the modem.\n");
- subsys_set_crash_status(drv->subsys, true);
+ subsys_set_crash_status(drv->subsys, CRASH_STATUS_ERR_FATAL);
restart_modem(drv);
return IRQ_HANDLED;
}
@@ -193,7 +193,7 @@ static irqreturn_t modem_wdog_bite_intr_handler(int irq, void *dev_id)
!gpio_get_value(drv->subsys_desc.err_fatal_gpio))
panic("%s: System ramdump requested. Triggering device restart!\n",
__func__);
- subsys_set_crash_status(drv->subsys, true);
+ subsys_set_crash_status(drv->subsys, CRASH_STATUS_WDOG_BITE);
restart_modem(drv);
return IRQ_HANDLED;
}
diff --git a/drivers/soc/qcom/pil-q6v5.c b/drivers/soc/qcom/pil-q6v5.c
index 43e3adeb0732..a39c2b6aa672 100644
--- a/drivers/soc/qcom/pil-q6v5.c
+++ b/drivers/soc/qcom/pil-q6v5.c
@@ -198,9 +198,9 @@ void pil_q6v5_halt_axi_port(struct pil_desc *pil, void __iomem *halt_base)
ret = readl_poll_timeout(halt_base + AXI_HALTACK,
status, status != 0, 50, HALT_ACK_TIMEOUT_US);
if (ret)
- dev_warn(pil->dev, "Port %p halt timeout\n", halt_base);
+ dev_warn(pil->dev, "Port %pK halt timeout\n", halt_base);
else if (!readl_relaxed(halt_base + AXI_IDLE))
- dev_warn(pil->dev, "Port %p halt failed\n", halt_base);
+ dev_warn(pil->dev, "Port %pK halt failed\n", halt_base);
/* Clear halt request (port will remain halted until reset) */
writel_relaxed(0, halt_base + AXI_HALTREQ);
diff --git a/drivers/soc/qcom/scm.c b/drivers/soc/qcom/scm.c
index f2216f968319..045a5001fc9f 100644
--- a/drivers/soc/qcom/scm.c
+++ b/drivers/soc/qcom/scm.c
@@ -134,6 +134,7 @@ struct scm_response {
#define R3_STR "r3"
#define R4_STR "r4"
#define R5_STR "r5"
+#define R6_STR "r6"
#endif
@@ -481,6 +482,7 @@ static int __scm_call_armv8_32(u32 w0, u32 w1, u32 w2, u32 w3, u32 w4, u32 w5,
register u32 r3 asm("r3") = w3;
register u32 r4 asm("r4") = w4;
register u32 r5 asm("r5") = w5;
+ register u32 r6 asm("r6") = 0;
do {
asm volatile(
@@ -494,13 +496,14 @@ static int __scm_call_armv8_32(u32 w0, u32 w1, u32 w2, u32 w3, u32 w4, u32 w5,
__asmeq("%7", R3_STR)
__asmeq("%8", R4_STR)
__asmeq("%9", R5_STR)
+ __asmeq("%10", R6_STR)
#ifdef REQUIRES_SEC
".arch_extension sec\n"
#endif
"smc #0\n"
: "=r" (r0), "=r" (r1), "=r" (r2), "=r" (r3)
: "r" (r0), "r" (r1), "r" (r2), "r" (r3), "r" (r4),
- "r" (r5));
+ "r" (r5), "r" (r6));
} while (r0 == SCM_INTERRUPTED);
diff --git a/drivers/soc/qcom/spcom.c b/drivers/soc/qcom/spcom.c
index 5fee83f8ada4..5b3e6c36810e 100644
--- a/drivers/soc/qcom/spcom.c
+++ b/drivers/soc/qcom/spcom.c
@@ -2127,6 +2127,11 @@ static ssize_t spcom_device_read(struct file *filp, char __user *user_buff,
return -ENOMEM;
actual_size = spcom_handle_read(ch, buf, size);
+ if ((actual_size <= 0) || (actual_size > size)) {
+ pr_err("invalid actual_size [%d].\n", actual_size);
+ kfree(buf);
+ return -EFAULT;
+ }
ret = copy_to_user(user_buff, buf, actual_size);
diff --git a/drivers/soc/qcom/subsys-pil-tz.c b/drivers/soc/qcom/subsys-pil-tz.c
index e70a56e7ce2e..b8d096a9c057 100644
--- a/drivers/soc/qcom/subsys-pil-tz.c
+++ b/drivers/soc/qcom/subsys-pil-tz.c
@@ -876,7 +876,7 @@ static irqreturn_t subsys_err_fatal_intr_handler (int irq, void *dev_id)
d->subsys_desc.name);
return IRQ_HANDLED;
}
- subsys_set_crash_status(d->subsys, true);
+ subsys_set_crash_status(d->subsys, CRASH_STATUS_ERR_FATAL);
log_failure_reason(d);
subsystem_restart_dev(d->subsys);
@@ -895,7 +895,7 @@ static irqreturn_t subsys_wdog_bite_irq_handler(int irq, void *dev_id)
!gpio_get_value(d->subsys_desc.err_fatal_gpio))
panic("%s: System ramdump requested. Triggering device restart!\n",
__func__);
- subsys_set_crash_status(d->subsys, true);
+ subsys_set_crash_status(d->subsys, CRASH_STATUS_WDOG_BITE);
log_failure_reason(d);
subsystem_restart_dev(d->subsys);
@@ -952,7 +952,7 @@ static void clear_wdog(struct pil_tz_data *d)
if (!subsys_get_crash_status(d->subsys)) {
pr_err("wdog bite received from %s!\n", d->subsys_desc.name);
__raw_writel(BIT(d->bits_arr[ERR_READY]), d->irq_clear);
- subsys_set_crash_status(d->subsys, true);
+ subsys_set_crash_status(d->subsys, CRASH_STATUS_WDOG_BITE);
log_failure_reason(d);
subsystem_restart_dev(d->subsys);
}
diff --git a/drivers/soc/qcom/subsystem_notif.c b/drivers/soc/qcom/subsystem_notif.c
index 431bbd8cee6f..8b24008b5abe 100644
--- a/drivers/soc/qcom/subsystem_notif.c
+++ b/drivers/soc/qcom/subsystem_notif.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011, 2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011, 2013, 2016 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -196,7 +196,7 @@ static int subsys_notifier_test_call(struct notifier_block *this,
switch (code) {
default:
- printk(KERN_WARNING "%s: Notification %s from subsystem %p\n",
+ pr_warn("%s: Notification %s from subsystem %pK\n",
__func__, notif_to_string(code), data);
break;
@@ -212,7 +212,7 @@ static struct notifier_block nb = {
static void subsys_notif_reg_test_notifier(const char *subsys_name)
{
void *handle = subsys_notif_register_notifier(subsys_name, &nb);
- printk(KERN_WARNING "%s: Registered test notifier, handle=%p",
+ pr_warn("%s: Registered test notifier, handle=%pK",
__func__, handle);
}
#endif
diff --git a/drivers/soc/qcom/subsystem_restart.c b/drivers/soc/qcom/subsystem_restart.c
index 015e60ac622c..77362912321d 100644
--- a/drivers/soc/qcom/subsystem_restart.c
+++ b/drivers/soc/qcom/subsystem_restart.c
@@ -178,7 +178,7 @@ struct subsys_device {
struct cdev char_dev;
dev_t dev_no;
struct completion err_ready;
- bool crashed;
+ enum crash_status crashed;
int notif_state;
struct list_head list;
};
@@ -598,10 +598,11 @@ static void subsystem_shutdown(struct subsys_device *dev, void *data)
{
const char *name = dev->desc->name;
- pr_info("[%p]: Shutting down %s\n", current, name);
+ pr_info("[%s:%d]: Shutting down %s\n",
+ current->comm, current->pid, name);
if (dev->desc->shutdown(dev->desc, true) < 0)
- panic("subsys-restart: [%p]: Failed to shutdown %s!",
- current, name);
+ panic("subsys-restart: [%s:%d]: Failed to shutdown %s!",
+ current->comm, current->pid, name);
dev->crash_count++;
subsys_set_state(dev, SUBSYS_OFFLINE);
disable_all_irqs(dev);
@@ -613,7 +614,8 @@ static void subsystem_ramdump(struct subsys_device *dev, void *data)
if (dev->desc->ramdump)
if (dev->desc->ramdump(is_ramdump_enabled(dev), dev->desc) < 0)
- pr_warn("%s[%p]: Ramdump failed.\n", name, current);
+ pr_warn("%s[%s:%d]: Ramdump failed.\n",
+ name, current->comm, current->pid);
dev->do_ramdump_on_put = false;
}
@@ -628,13 +630,14 @@ static void subsystem_powerup(struct subsys_device *dev, void *data)
const char *name = dev->desc->name;
int ret;
- pr_info("[%p]: Powering up %s\n", current, name);
+ pr_info("[%s:%d]: Powering up %s\n", current->comm, current->pid, name);
init_completion(&dev->err_ready);
if (dev->desc->powerup(dev->desc) < 0) {
notify_each_subsys_device(&dev, 1, SUBSYS_POWERUP_FAILURE,
NULL);
- panic("[%p]: Powerup error: %s!", current, name);
+ panic("[%s:%d]: Powerup error: %s!",
+ current->comm, current->pid, name);
}
enable_all_irqs(dev);
@@ -642,11 +645,11 @@ static void subsystem_powerup(struct subsys_device *dev, void *data)
if (ret) {
notify_each_subsys_device(&dev, 1, SUBSYS_POWERUP_FAILURE,
NULL);
- panic("[%p]: Timed out waiting for error ready: %s!",
- current, name);
+ panic("[%s:%d]: Timed out waiting for error ready: %s!",
+ current->comm, current->pid, name);
}
subsys_set_state(dev, SUBSYS_ONLINE);
- subsys_set_crash_status(dev, false);
+ subsys_set_crash_status(dev, CRASH_STATUS_NO_CRASH);
}
static int __find_subsys(struct device *dev, void *data)
@@ -952,8 +955,8 @@ static void subsystem_restart_wq_func(struct work_struct *work)
*/
mutex_lock(&soc_order_reg_lock);
- pr_debug("[%p]: Starting restart sequence for %s\n", current,
- desc->name);
+ pr_debug("[%s:%d]: Starting restart sequence for %s\n",
+ current->comm, current->pid, desc->name);
notify_each_subsys_device(list, count, SUBSYS_BEFORE_SHUTDOWN, NULL);
for_each_subsys_device(list, count, NULL, subsystem_shutdown);
notify_each_subsys_device(list, count, SUBSYS_AFTER_SHUTDOWN, NULL);
@@ -974,8 +977,8 @@ static void subsystem_restart_wq_func(struct work_struct *work)
for_each_subsys_device(list, count, NULL, subsystem_powerup);
notify_each_subsys_device(list, count, SUBSYS_AFTER_POWERUP, NULL);
- pr_info("[%p]: Restart sequence for %s completed.\n",
- current, desc->name);
+ pr_info("[%s:%d]: Restart sequence for %s completed.\n",
+ current->comm, current->pid, desc->name);
mutex_unlock(&soc_order_reg_lock);
mutex_unlock(&track->lock);
@@ -1126,12 +1129,13 @@ int subsystem_crashed(const char *name)
}
EXPORT_SYMBOL(subsystem_crashed);
-void subsys_set_crash_status(struct subsys_device *dev, bool crashed)
+void subsys_set_crash_status(struct subsys_device *dev,
+ enum crash_status crashed)
{
dev->crashed = crashed;
}
-bool subsys_get_crash_status(struct subsys_device *dev)
+enum crash_status subsys_get_crash_status(struct subsys_device *dev)
{
return dev->crashed;
}
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index 73c8ea0b1360..3cac73e4c3e4 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -548,7 +548,14 @@ static void reset_sccr1(struct driver_data *drv_data)
u32 sccr1_reg;
sccr1_reg = pxa2xx_spi_read(drv_data, SSCR1) & ~drv_data->int_cr1;
- sccr1_reg &= ~SSCR1_RFT;
+ switch (drv_data->ssp_type) {
+ case QUARK_X1000_SSP:
+ sccr1_reg &= ~QUARK_X1000_SSCR1_RFT;
+ break;
+ default:
+ sccr1_reg &= ~SSCR1_RFT;
+ break;
+ }
sccr1_reg |= chip->threshold;
pxa2xx_spi_write(drv_data, SSCR1, sccr1_reg);
}
diff --git a/drivers/spi/spi-sun4i.c b/drivers/spi/spi-sun4i.c
index fbb0a4d74e91..39d7c7c70112 100644
--- a/drivers/spi/spi-sun4i.c
+++ b/drivers/spi/spi-sun4i.c
@@ -170,13 +170,17 @@ static int sun4i_spi_transfer_one(struct spi_master *master,
{
struct sun4i_spi *sspi = spi_master_get_devdata(master);
unsigned int mclk_rate, div, timeout;
+ unsigned int start, end, tx_time;
unsigned int tx_len = 0;
int ret = 0;
u32 reg;
/* We don't support transfer larger than the FIFO */
if (tfr->len > SUN4I_FIFO_DEPTH)
- return -EINVAL;
+ return -EMSGSIZE;
+
+ if (tfr->tx_buf && tfr->len >= SUN4I_FIFO_DEPTH)
+ return -EMSGSIZE;
reinit_completion(&sspi->done);
sspi->tx_buf = tfr->tx_buf;
@@ -269,8 +273,12 @@ static int sun4i_spi_transfer_one(struct spi_master *master,
sun4i_spi_write(sspi, SUN4I_BURST_CNT_REG, SUN4I_BURST_CNT(tfr->len));
sun4i_spi_write(sspi, SUN4I_XMIT_CNT_REG, SUN4I_XMIT_CNT(tx_len));
- /* Fill the TX FIFO */
- sun4i_spi_fill_fifo(sspi, SUN4I_FIFO_DEPTH);
+ /*
+ * Fill the TX FIFO
+ * Filling the FIFO fully causes timeout for some reason
+ * at least on spi2 on A10s
+ */
+ sun4i_spi_fill_fifo(sspi, SUN4I_FIFO_DEPTH - 1);
/* Enable the interrupts */
sun4i_spi_write(sspi, SUN4I_INT_CTL_REG, SUN4I_INT_CTL_TC);
@@ -279,9 +287,16 @@ static int sun4i_spi_transfer_one(struct spi_master *master,
reg = sun4i_spi_read(sspi, SUN4I_CTL_REG);
sun4i_spi_write(sspi, SUN4I_CTL_REG, reg | SUN4I_CTL_XCH);
+ tx_time = max(tfr->len * 8 * 2 / (tfr->speed_hz / 1000), 100U);
+ start = jiffies;
timeout = wait_for_completion_timeout(&sspi->done,
- msecs_to_jiffies(1000));
+ msecs_to_jiffies(tx_time));
+ end = jiffies;
if (!timeout) {
+ dev_warn(&master->dev,
+ "%s: timeout transferring %u bytes@%iHz for %i(%i)ms",
+ dev_name(&spi->dev), tfr->len, tfr->speed_hz,
+ jiffies_to_msecs(end - start), tx_time);
ret = -ETIMEDOUT;
goto out;
}
diff --git a/drivers/spi/spi-sun6i.c b/drivers/spi/spi-sun6i.c
index ac48f59705a8..e77add01b0e9 100644
--- a/drivers/spi/spi-sun6i.c
+++ b/drivers/spi/spi-sun6i.c
@@ -160,6 +160,7 @@ static int sun6i_spi_transfer_one(struct spi_master *master,
{
struct sun6i_spi *sspi = spi_master_get_devdata(master);
unsigned int mclk_rate, div, timeout;
+ unsigned int start, end, tx_time;
unsigned int tx_len = 0;
int ret = 0;
u32 reg;
@@ -269,9 +270,16 @@ static int sun6i_spi_transfer_one(struct spi_master *master,
reg = sun6i_spi_read(sspi, SUN6I_TFR_CTL_REG);
sun6i_spi_write(sspi, SUN6I_TFR_CTL_REG, reg | SUN6I_TFR_CTL_XCH);
+ tx_time = max(tfr->len * 8 * 2 / (tfr->speed_hz / 1000), 100U);
+ start = jiffies;
timeout = wait_for_completion_timeout(&sspi->done,
- msecs_to_jiffies(1000));
+ msecs_to_jiffies(tx_time));
+ end = jiffies;
if (!timeout) {
+ dev_warn(&master->dev,
+ "%s: timeout transferring %u bytes@%iHz for %i(%i)ms",
+ dev_name(&spi->dev), tfr->len, tfr->speed_hz,
+ jiffies_to_msecs(end - start), tx_time);
ret = -ETIMEDOUT;
goto out;
}
diff --git a/drivers/spi/spi_qsd.c b/drivers/spi/spi_qsd.c
index 934ac0e642d9..a5d59e7f2d90 100644
--- a/drivers/spi/spi_qsd.c
+++ b/drivers/spi/spi_qsd.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2008-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2008-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -45,6 +45,8 @@
#include <linux/msm-bus-board.h>
#include "spi_qsd.h"
+#define SPI_MAX_BYTES_PER_WORD (4)
+
static int msm_spi_pm_resume_runtime(struct device *device);
static int msm_spi_pm_suspend_runtime(struct device *device);
static inline void msm_spi_dma_unmap_buffers(struct msm_spi *dd);
@@ -438,10 +440,12 @@ static void msm_spi_read_word_from_fifo(struct msm_spi *dd)
u32 data_in;
int i;
int shift;
+ int read_bytes = (dd->pack_words ?
+ SPI_MAX_BYTES_PER_WORD : dd->bytes_per_word);
data_in = readl_relaxed(dd->base + SPI_INPUT_FIFO);
if (dd->read_buf) {
- for (i = 0; (i < dd->bytes_per_word) &&
+ for (i = 0; (i < read_bytes) &&
dd->rx_bytes_remaining; i++) {
/* The data format depends on bytes_per_word:
4 bytes: 0x12345678
@@ -454,8 +458,8 @@ static void msm_spi_read_word_from_fifo(struct msm_spi *dd)
dd->rx_bytes_remaining--;
}
} else {
- if (dd->rx_bytes_remaining >= dd->bytes_per_word)
- dd->rx_bytes_remaining -= dd->bytes_per_word;
+ if (dd->rx_bytes_remaining >= read_bytes)
+ dd->rx_bytes_remaining -= read_bytes;
else
dd->rx_bytes_remaining = 0;
}
@@ -552,7 +556,7 @@ msm_spi_set_bpw_and_no_io_flags(struct msm_spi *dd, u32 *config, int n)
if (n != (*config & SPI_CFG_N))
*config = (*config & ~SPI_CFG_N) | n;
- if (dd->mode == SPI_BAM_MODE) {
+ if (dd->tx_mode == SPI_BAM_MODE) {
if (dd->read_buf == NULL)
*config |= SPI_NO_INPUT;
if (dd->write_buf == NULL)
@@ -617,25 +621,34 @@ static void msm_spi_set_spi_config(struct msm_spi *dd, int bpw)
static void msm_spi_set_mx_counts(struct msm_spi *dd, u32 n_words)
{
/*
- * n_words cannot exceed fifo_size, and only one READ COUNT
- * interrupt is generated per transaction, so for transactions
- * larger than fifo size READ COUNT must be disabled.
- * For those transactions we usually move to Data Mover mode.
+ * For FIFO mode:
+ * - Set the MX_OUTPUT_COUNT/MX_INPUT_COUNT registers to 0
+ * - Set the READ/WRITE_COUNT registers to 0 (infinite mode)
+ * or num bytes (finite mode) if less than fifo worth of data.
+ * For Block mode:
+ * - Set the MX_OUTPUT/MX_INPUT_COUNT registers to num xfer bytes.
+ * - Set the READ/WRITE_COUNT registers to 0.
*/
- if (dd->mode == SPI_FIFO_MODE) {
- if (n_words <= dd->input_fifo_size) {
- writel_relaxed(n_words,
- dd->base + SPI_MX_READ_COUNT);
- msm_spi_set_write_count(dd, n_words);
- } else {
- writel_relaxed(0, dd->base + SPI_MX_READ_COUNT);
- msm_spi_set_write_count(dd, 0);
- }
- if (dd->qup_ver == SPI_QUP_VERSION_BFAM) {
- /* must be zero for FIFO */
- writel_relaxed(0, dd->base + SPI_MX_INPUT_COUNT);
+ if (dd->tx_mode != SPI_BAM_MODE) {
+ if (dd->tx_mode == SPI_FIFO_MODE) {
+ if (n_words <= dd->input_fifo_size)
+ msm_spi_set_write_count(dd, n_words);
+ else
+ msm_spi_set_write_count(dd, 0);
writel_relaxed(0, dd->base + SPI_MX_OUTPUT_COUNT);
- }
+ } else
+ writel_relaxed(n_words, dd->base + SPI_MX_OUTPUT_COUNT);
+
+ if (dd->rx_mode == SPI_FIFO_MODE) {
+ if (n_words <= dd->input_fifo_size)
+ writel_relaxed(n_words,
+ dd->base + SPI_MX_READ_COUNT);
+ else
+ writel_relaxed(0,
+ dd->base + SPI_MX_READ_COUNT);
+ writel_relaxed(0, dd->base + SPI_MX_INPUT_COUNT);
+ } else
+ writel_relaxed(n_words, dd->base + SPI_MX_INPUT_COUNT);
} else {
/* must be zero for BAM and DMOV */
writel_relaxed(0, dd->base + SPI_MX_READ_COUNT);
@@ -882,7 +895,7 @@ xfr_err:
static int
msm_spi_bam_next_transfer(struct msm_spi *dd)
{
- if (dd->mode != SPI_BAM_MODE)
+ if (dd->tx_mode != SPI_BAM_MODE)
return 0;
if (dd->tx_bytes_remaining > 0) {
@@ -901,7 +914,7 @@ msm_spi_bam_next_transfer(struct msm_spi *dd)
static int msm_spi_dma_send_next(struct msm_spi *dd)
{
int ret = 0;
- if (dd->mode == SPI_BAM_MODE)
+ if (dd->tx_mode == SPI_BAM_MODE)
ret = msm_spi_bam_next_transfer(dd);
return ret;
}
@@ -932,32 +945,38 @@ static inline irqreturn_t msm_spi_qup_irq(int irq, void *dev_id)
}
op = readl_relaxed(dd->base + SPI_OPERATIONAL);
+ writel_relaxed(op, dd->base + SPI_OPERATIONAL);
+ /*
+ * Ensure service flag was cleared before further
+ * processing of interrupt.
+ */
+ mb();
if (op & SPI_OP_INPUT_SERVICE_FLAG) {
- writel_relaxed(SPI_OP_INPUT_SERVICE_FLAG,
- dd->base + SPI_OPERATIONAL);
- /*
- * Ensure service flag was cleared before further
- * processing of interrupt.
- */
- mb();
ret |= msm_spi_input_irq(irq, dev_id);
}
if (op & SPI_OP_OUTPUT_SERVICE_FLAG) {
- writel_relaxed(SPI_OP_OUTPUT_SERVICE_FLAG,
- dd->base + SPI_OPERATIONAL);
- /*
- * Ensure service flag was cleared before further
- * processing of interrupt.
- */
- mb();
ret |= msm_spi_output_irq(irq, dev_id);
}
- if (dd->done) {
+ if (dd->tx_mode != SPI_BAM_MODE) {
+ if (!dd->rx_done) {
+ if (dd->rx_bytes_remaining == 0)
+ dd->rx_done = true;
+ }
+ if (!dd->tx_done) {
+ if (!dd->tx_bytes_remaining &&
+ (op & SPI_OP_IP_FIFO_NOT_EMPTY)) {
+ dd->tx_done = true;
+ }
+ }
+ }
+ if (dd->tx_done && dd->rx_done) {
+ msm_spi_set_state(dd, SPI_OP_STATE_RESET);
+ dd->tx_done = false;
+ dd->rx_done = false;
complete(&dd->rx_transfer_complete);
complete(&dd->tx_transfer_complete);
- dd->done = 0;
}
return ret;
}
@@ -968,17 +987,23 @@ static irqreturn_t msm_spi_input_irq(int irq, void *dev_id)
dd->stat_rx++;
- if (dd->mode == SPI_MODE_NONE)
+ if (dd->rx_mode == SPI_MODE_NONE)
return IRQ_HANDLED;
- if (dd->mode == SPI_FIFO_MODE) {
+ if (dd->rx_mode == SPI_FIFO_MODE) {
while ((readl_relaxed(dd->base + SPI_OPERATIONAL) &
SPI_OP_IP_FIFO_NOT_EMPTY) &&
(dd->rx_bytes_remaining > 0)) {
msm_spi_read_word_from_fifo(dd);
}
- if (dd->rx_bytes_remaining == 0)
- msm_spi_complete(dd);
+ } else if (dd->rx_mode == SPI_BLOCK_MODE) {
+ int count = 0;
+
+ while (dd->rx_bytes_remaining &&
+ (count < dd->input_block_size)) {
+ msm_spi_read_word_from_fifo(dd);
+ count += SPI_MAX_BYTES_PER_WORD;
+ }
}
return IRQ_HANDLED;
@@ -989,18 +1014,20 @@ static void msm_spi_write_word_to_fifo(struct msm_spi *dd)
u32 word;
u8 byte;
int i;
+ int write_bytes =
+ (dd->pack_words ? SPI_MAX_BYTES_PER_WORD : dd->bytes_per_word);
word = 0;
if (dd->write_buf) {
- for (i = 0; (i < dd->bytes_per_word) &&
+ for (i = 0; (i < write_bytes) &&
dd->tx_bytes_remaining; i++) {
dd->tx_bytes_remaining--;
byte = *dd->write_buf++;
word |= (byte << (BITS_PER_BYTE * i));
}
} else
- if (dd->tx_bytes_remaining > dd->bytes_per_word)
- dd->tx_bytes_remaining -= dd->bytes_per_word;
+ if (dd->tx_bytes_remaining > write_bytes)
+ dd->tx_bytes_remaining -= write_bytes;
else
dd->tx_bytes_remaining = 0;
dd->write_xfr_cnt++;
@@ -1012,11 +1039,22 @@ static inline void msm_spi_write_rmn_to_fifo(struct msm_spi *dd)
{
int count = 0;
- while ((dd->tx_bytes_remaining > 0) && (count < dd->input_fifo_size) &&
- !(readl_relaxed(dd->base + SPI_OPERATIONAL) &
- SPI_OP_OUTPUT_FIFO_FULL)) {
- msm_spi_write_word_to_fifo(dd);
- count++;
+ if (dd->tx_mode == SPI_FIFO_MODE) {
+ while ((dd->tx_bytes_remaining > 0) &&
+ (count < dd->input_fifo_size) &&
+ !(readl_relaxed(dd->base + SPI_OPERATIONAL)
+ & SPI_OP_OUTPUT_FIFO_FULL)) {
+ msm_spi_write_word_to_fifo(dd);
+ count++;
+ }
+ }
+
+ if (dd->tx_mode == SPI_BLOCK_MODE) {
+ while (dd->tx_bytes_remaining &&
+ (count < dd->output_block_size)) {
+ msm_spi_write_word_to_fifo(dd);
+ count += SPI_MAX_BYTES_PER_WORD;
+ }
}
}
@@ -1026,11 +1064,11 @@ static irqreturn_t msm_spi_output_irq(int irq, void *dev_id)
dd->stat_tx++;
- if (dd->mode == SPI_MODE_NONE)
+ if (dd->tx_mode == SPI_MODE_NONE)
return IRQ_HANDLED;
/* Output FIFO is empty. Transmit any outstanding write data. */
- if (dd->mode == SPI_FIFO_MODE)
+ if ((dd->tx_mode == SPI_FIFO_MODE) || (dd->tx_mode == SPI_BLOCK_MODE))
msm_spi_write_rmn_to_fifo(dd);
return IRQ_HANDLED;
@@ -1106,7 +1144,7 @@ error:
static int msm_spi_dma_map_buffers(struct msm_spi *dd)
{
int ret = 0;
- if (dd->mode == SPI_BAM_MODE)
+ if (dd->tx_mode == SPI_BAM_MODE)
ret = msm_spi_bam_map_buffers(dd);
return ret;
}
@@ -1135,7 +1173,7 @@ static void msm_spi_bam_unmap_buffers(struct msm_spi *dd)
static inline void msm_spi_dma_unmap_buffers(struct msm_spi *dd)
{
- if (dd->mode == SPI_BAM_MODE)
+ if (dd->tx_mode == SPI_BAM_MODE)
msm_spi_bam_unmap_buffers(dd);
}
@@ -1197,9 +1235,11 @@ static void
msm_spi_set_transfer_mode(struct msm_spi *dd, u8 bpw, u32 read_count)
{
if (msm_spi_use_dma(dd, dd->cur_transfer, bpw)) {
- dd->mode = SPI_BAM_MODE;
+ dd->tx_mode = SPI_BAM_MODE;
+ dd->rx_mode = SPI_BAM_MODE;
} else {
- dd->mode = SPI_FIFO_MODE;
+ dd->rx_mode = SPI_FIFO_MODE;
+ dd->tx_mode = SPI_FIFO_MODE;
dd->read_len = dd->cur_transfer->len;
dd->write_len = dd->cur_transfer->len;
}
@@ -1215,14 +1255,19 @@ static void msm_spi_set_qup_io_modes(struct msm_spi *dd)
spi_iom = readl_relaxed(dd->base + SPI_IO_MODES);
/* Set input and output transfer mode: FIFO, DMOV, or BAM */
spi_iom &= ~(SPI_IO_M_INPUT_MODE | SPI_IO_M_OUTPUT_MODE);
- spi_iom = (spi_iom | (dd->mode << OUTPUT_MODE_SHIFT));
- spi_iom = (spi_iom | (dd->mode << INPUT_MODE_SHIFT));
- /* Turn on packing for data mover */
- if (dd->mode == SPI_BAM_MODE)
+ spi_iom = (spi_iom | (dd->tx_mode << OUTPUT_MODE_SHIFT));
+ spi_iom = (spi_iom | (dd->rx_mode << INPUT_MODE_SHIFT));
+ /* Always enable packing for all % 8 bits_per_word */
+ if (dd->cur_transfer->bits_per_word &&
+ ((dd->cur_transfer->bits_per_word == 8) ||
+ (dd->cur_transfer->bits_per_word == 16) ||
+ (dd->cur_transfer->bits_per_word == 32))) {
spi_iom |= SPI_IO_M_PACK_EN | SPI_IO_M_UNPACK_EN;
- else {
+ dd->pack_words = true;
+ } else {
spi_iom &= ~(SPI_IO_M_PACK_EN | SPI_IO_M_UNPACK_EN);
spi_iom |= SPI_IO_M_OUTPUT_BIT_SHIFT_EN;
+ dd->pack_words = false;
}
/*if (dd->mode == SPI_BAM_MODE) {
@@ -1280,7 +1325,7 @@ static void msm_spi_set_qup_op_mask(struct msm_spi *dd)
{
/* mask INPUT and OUTPUT service flags in to prevent IRQs on FIFO status
* change in BAM mode */
- u32 mask = (dd->mode == SPI_BAM_MODE) ?
+ u32 mask = (dd->tx_mode == SPI_BAM_MODE) ?
QUP_OP_MASK_OUTPUT_SERVICE_FLAG | QUP_OP_MASK_INPUT_SERVICE_FLAG
: 0;
writel_relaxed(mask, dd->base + QUP_OPERATIONAL_MASK);
@@ -1321,6 +1366,8 @@ static int msm_spi_process_transfer(struct msm_spi *dd)
dd->rx_bytes_remaining = dd->cur_msg_len;
dd->read_buf = dd->cur_transfer->rx_buf;
dd->write_buf = dd->cur_transfer->tx_buf;
+ dd->tx_done = false;
+ dd->rx_done = false;
init_completion(&dd->tx_transfer_complete);
init_completion(&dd->rx_transfer_complete);
if (dd->cur_transfer->bits_per_word)
@@ -1351,10 +1398,12 @@ static int msm_spi_process_transfer(struct msm_spi *dd)
msm_spi_set_transfer_mode(dd, bpw, read_count);
msm_spi_set_mx_counts(dd, read_count);
- if (dd->mode == SPI_BAM_MODE) {
+ if (dd->tx_mode == SPI_BAM_MODE) {
ret = msm_spi_dma_map_buffers(dd);
if (ret < 0) {
pr_err("Mapping DMA buffers\n");
+ dd->tx_mode = SPI_MODE_NONE;
+ dd->rx_mode = SPI_MODE_NONE;
return ret;
}
}
@@ -1368,11 +1417,11 @@ static int msm_spi_process_transfer(struct msm_spi *dd)
the first. Restricting this to one write avoids contention
issues and race conditions between this thread and the int handler
*/
- if (dd->mode == SPI_FIFO_MODE) {
+ if (dd->tx_mode != SPI_BAM_MODE) {
if (msm_spi_prepare_for_write(dd))
goto transfer_end;
msm_spi_start_write(dd, read_count);
- } else if (dd->mode == SPI_BAM_MODE) {
+ } else {
if ((msm_spi_bam_begin_transfer(dd)) < 0) {
dev_err(dd->dev, "%s: BAM transfer setup failed\n",
__func__);
@@ -1388,11 +1437,11 @@ static int msm_spi_process_transfer(struct msm_spi *dd)
* might fire before the first word is written resulting in a
* possible race condition.
*/
- if (dd->mode != SPI_BAM_MODE)
+ if (dd->tx_mode != SPI_BAM_MODE)
if (msm_spi_set_state(dd, SPI_OP_STATE_RUN)) {
dev_warn(dd->dev,
"%s: Failed to set QUP to run-state. Mode:%d",
- __func__, dd->mode);
+ __func__, dd->tx_mode);
goto transfer_end;
}
@@ -1422,10 +1471,11 @@ static int msm_spi_process_transfer(struct msm_spi *dd)
msm_spi_udelay(dd->xfrs_delay_usec);
transfer_end:
- if ((dd->mode == SPI_BAM_MODE) && status)
+ if ((dd->tx_mode == SPI_BAM_MODE) && status)
msm_spi_bam_flush(dd);
msm_spi_dma_unmap_buffers(dd);
- dd->mode = SPI_MODE_NONE;
+ dd->tx_mode = SPI_MODE_NONE;
+ dd->rx_mode = SPI_MODE_NONE;
msm_spi_set_state(dd, SPI_OP_STATE_RESET);
if (!dd->cur_transfer->cs_change)
@@ -2350,7 +2400,8 @@ static int init_resources(struct platform_device *pdev)
pclk_enabled = 0;
dd->transfer_pending = 0;
- dd->mode = SPI_MODE_NONE;
+ dd->tx_mode = SPI_MODE_NONE;
+ dd->rx_mode = SPI_MODE_NONE;
rc = msm_spi_request_irq(dd, pdev, master);
if (rc)
diff --git a/drivers/spi/spi_qsd.h b/drivers/spi/spi_qsd.h
index cfc0e754f255..fb906939c03a 100644
--- a/drivers/spi/spi_qsd.h
+++ b/drivers/spi/spi_qsd.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -113,6 +113,8 @@
#define INPUT_MODE_SHIFT QSD_REG(10) QUP_REG(12)
/* SPI_OPERATIONAL fields */
+#define SPI_OP_IN_BLK_RD_REQ_FLAG 0x00002000
+#define SPI_OP_OUT_BLK_WR_REQ_FLAG 0x00001000
#define SPI_OP_MAX_INPUT_DONE_FLAG 0x00000800
#define SPI_OP_MAX_OUTPUT_DONE_FLAG 0x00000400
#define SPI_OP_INPUT_SERVICE_FLAG 0x00000200
@@ -318,7 +320,8 @@ struct msm_spi {
bool transfer_pending;
wait_queue_head_t continue_suspend;
/* DMA data */
- enum msm_spi_mode mode;
+ enum msm_spi_mode tx_mode;
+ enum msm_spi_mode rx_mode;
bool use_dma;
int tx_dma_chan;
int tx_dma_crci;
@@ -349,7 +352,8 @@ struct msm_spi {
#endif
struct msm_spi_platform_data *pdata; /* Platform data */
/* When set indicates multiple transfers in a single message */
- bool done;
+ bool rx_done;
+ bool tx_done;
u32 cur_msg_len;
/* Used in FIFO mode to keep track of the transfer being processed */
struct spi_transfer *cur_tx_transfer;
@@ -367,6 +371,7 @@ struct msm_spi {
struct pinctrl_state *pins_active;
struct pinctrl_state *pins_sleep;
bool is_init_complete;
+ bool pack_words;
};
/* Forward declaration */
@@ -523,7 +528,8 @@ static inline void msm_spi_set_write_count(struct msm_spi *dd, int val)
static inline void msm_spi_complete(struct msm_spi *dd)
{
- dd->done = 1;
+ dd->tx_done = true;
+ dd->rx_done = true;
}
static inline void msm_spi_enable_error_flags(struct msm_spi *dd)
diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c
index a76a7ff618b9..68a4559f9d26 100644
--- a/drivers/staging/android/lowmemorykiller.c
+++ b/drivers/staging/android/lowmemorykiller.c
@@ -98,6 +98,8 @@ static unsigned long lowmem_count(struct shrinker *s,
static atomic_t shift_adj = ATOMIC_INIT(0);
static short adj_max_shift = 353;
+module_param_named(adj_max_shift, adj_max_shift, short,
+ S_IRUGO | S_IWUSR);
/* User knob to enable/disable adaptive lmk feature */
static int enable_adaptive_lmk;
diff --git a/drivers/staging/comedi/drivers/comedi_test.c b/drivers/staging/comedi/drivers/comedi_test.c
index 4ab186669f0c..ec5b9a23494d 100644
--- a/drivers/staging/comedi/drivers/comedi_test.c
+++ b/drivers/staging/comedi/drivers/comedi_test.c
@@ -56,11 +56,6 @@
#define N_CHANS 8
-enum waveform_state_bits {
- WAVEFORM_AI_RUNNING,
- WAVEFORM_AO_RUNNING
-};
-
/* Data unique to this driver */
struct waveform_private {
struct timer_list ai_timer; /* timer for AI commands */
@@ -68,7 +63,6 @@ struct waveform_private {
unsigned int wf_amplitude; /* waveform amplitude in microvolts */
unsigned int wf_period; /* waveform period in microseconds */
unsigned int wf_current; /* current time in waveform period */
- unsigned long state_bits;
unsigned int ai_scan_period; /* AI scan period in usec */
unsigned int ai_convert_period; /* AI conversion period in usec */
struct timer_list ao_timer; /* timer for AO commands */
@@ -191,10 +185,6 @@ static void waveform_ai_timer(unsigned long arg)
unsigned int nsamples;
unsigned int time_increment;
- /* check command is still active */
- if (!test_bit(WAVEFORM_AI_RUNNING, &devpriv->state_bits))
- return;
-
now = ktime_to_us(ktime_get());
nsamples = comedi_nsamples_left(s, UINT_MAX);
@@ -386,11 +376,6 @@ static int waveform_ai_cmd(struct comedi_device *dev,
*/
devpriv->ai_timer.expires =
jiffies + usecs_to_jiffies(devpriv->ai_convert_period) + 1;
-
- /* mark command as active */
- smp_mb__before_atomic();
- set_bit(WAVEFORM_AI_RUNNING, &devpriv->state_bits);
- smp_mb__after_atomic();
add_timer(&devpriv->ai_timer);
return 0;
}
@@ -400,11 +385,12 @@ static int waveform_ai_cancel(struct comedi_device *dev,
{
struct waveform_private *devpriv = dev->private;
- /* mark command as no longer active */
- clear_bit(WAVEFORM_AI_RUNNING, &devpriv->state_bits);
- smp_mb__after_atomic();
- /* cannot call del_timer_sync() as may be called from timer routine */
- del_timer(&devpriv->ai_timer);
+ if (in_softirq()) {
+ /* Assume we were called from the timer routine itself. */
+ del_timer(&devpriv->ai_timer);
+ } else {
+ del_timer_sync(&devpriv->ai_timer);
+ }
return 0;
}
@@ -436,10 +422,6 @@ static void waveform_ao_timer(unsigned long arg)
u64 scans_since;
unsigned int scans_avail = 0;
- /* check command is still active */
- if (!test_bit(WAVEFORM_AO_RUNNING, &devpriv->state_bits))
- return;
-
/* determine number of scan periods since last time */
now = ktime_to_us(ktime_get());
scans_since = now - devpriv->ao_last_scan_time;
@@ -518,11 +500,6 @@ static int waveform_ao_inttrig_start(struct comedi_device *dev,
devpriv->ao_last_scan_time = ktime_to_us(ktime_get());
devpriv->ao_timer.expires =
jiffies + usecs_to_jiffies(devpriv->ao_scan_period);
-
- /* mark command as active */
- smp_mb__before_atomic();
- set_bit(WAVEFORM_AO_RUNNING, &devpriv->state_bits);
- smp_mb__after_atomic();
add_timer(&devpriv->ao_timer);
return 1;
@@ -608,11 +585,12 @@ static int waveform_ao_cancel(struct comedi_device *dev,
struct waveform_private *devpriv = dev->private;
s->async->inttrig = NULL;
- /* mark command as no longer active */
- clear_bit(WAVEFORM_AO_RUNNING, &devpriv->state_bits);
- smp_mb__after_atomic();
- /* cannot call del_timer_sync() as may be called from timer routine */
- del_timer(&devpriv->ao_timer);
+ if (in_softirq()) {
+ /* Assume we were called from the timer routine itself. */
+ del_timer(&devpriv->ao_timer);
+ } else {
+ del_timer_sync(&devpriv->ao_timer);
+ }
return 0;
}
diff --git a/drivers/staging/comedi/drivers/daqboard2000.c b/drivers/staging/comedi/drivers/daqboard2000.c
index 57ab6680e3ae..e5fee6e0fb47 100644
--- a/drivers/staging/comedi/drivers/daqboard2000.c
+++ b/drivers/staging/comedi/drivers/daqboard2000.c
@@ -636,7 +636,7 @@ static const void *daqboard2000_find_boardinfo(struct comedi_device *dev,
const struct daq200_boardtype *board;
int i;
- if (pcidev->subsystem_device != PCI_VENDOR_ID_IOTECH)
+ if (pcidev->subsystem_vendor != PCI_VENDOR_ID_IOTECH)
return NULL;
for (i = 0; i < ARRAY_SIZE(boardtypes); i++) {
diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c
index 27fbf1a81097..35ab4a9ef95d 100644
--- a/drivers/staging/comedi/drivers/ni_mio_common.c
+++ b/drivers/staging/comedi/drivers/ni_mio_common.c
@@ -2823,7 +2823,15 @@ static int ni_ao_inttrig(struct comedi_device *dev,
int i;
static const int timeout = 1000;
- if (trig_num != cmd->start_arg)
+ /*
+ * Require trig_num == cmd->start_arg when cmd->start_src == TRIG_INT.
+ * For backwards compatibility, also allow trig_num == 0 when
+ * cmd->start_src != TRIG_INT (i.e. when cmd->start_src == TRIG_EXT);
+ * in that case, the internal trigger is being used as a pre-trigger
+ * before the external trigger.
+ */
+ if (!(trig_num == cmd->start_arg ||
+ (trig_num == 0 && cmd->start_src != TRIG_INT)))
return -EINVAL;
/* Null trig at beginning prevent ao start trigger from executing more than
@@ -5346,7 +5354,7 @@ static int ni_E_init(struct comedi_device *dev,
s->maxdata = (devpriv->is_m_series) ? 0xffffffff
: 0x00ffffff;
s->insn_read = ni_tio_insn_read;
- s->insn_write = ni_tio_insn_read;
+ s->insn_write = ni_tio_insn_write;
s->insn_config = ni_tio_insn_config;
#ifdef PCIDMA
if (dev->irq && devpriv->mite) {
diff --git a/drivers/staging/lustre/lustre/llite/llite_internal.h b/drivers/staging/lustre/lustre/llite/llite_internal.h
index 9096d311e45d..c2d9b793759d 100644
--- a/drivers/staging/lustre/lustre/llite/llite_internal.h
+++ b/drivers/staging/lustre/lustre/llite/llite_internal.h
@@ -631,8 +631,6 @@ struct ll_file_data {
struct lov_stripe_md;
-extern spinlock_t inode_lock;
-
extern struct dentry *llite_root;
extern struct kset *llite_kset;
diff --git a/drivers/staging/rdma/ipath/ipath_file_ops.c b/drivers/staging/rdma/ipath/ipath_file_ops.c
index 13c3cd11ab92..05d30f433b19 100644
--- a/drivers/staging/rdma/ipath/ipath_file_ops.c
+++ b/drivers/staging/rdma/ipath/ipath_file_ops.c
@@ -45,6 +45,8 @@
#include <linux/uio.h>
#include <asm/pgtable.h>
+#include <rdma/ib.h>
+
#include "ipath_kernel.h"
#include "ipath_common.h"
#include "ipath_user_sdma.h"
@@ -2243,6 +2245,9 @@ static ssize_t ipath_write(struct file *fp, const char __user *data,
ssize_t ret = 0;
void *dest;
+ if (WARN_ON_ONCE(!ib_safe_file_access(fp)))
+ return -EACCES;
+
if (count < sizeof(cmd.type)) {
ret = -EINVAL;
goto bail;
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 72204fbf2bb1..bd810c109277 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -492,7 +492,8 @@ static void iscsit_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
bool scsi_cmd = (cmd->iscsi_opcode == ISCSI_OP_SCSI_CMD);
spin_lock_bh(&conn->cmd_lock);
- if (!list_empty(&cmd->i_conn_node))
+ if (!list_empty(&cmd->i_conn_node) &&
+ !(cmd->se_cmd.transport_state & CMD_T_FABRIC_STOP))
list_del_init(&cmd->i_conn_node);
spin_unlock_bh(&conn->cmd_lock);
@@ -4194,6 +4195,7 @@ transport_err:
static void iscsit_release_commands_from_conn(struct iscsi_conn *conn)
{
+ LIST_HEAD(tmp_list);
struct iscsi_cmd *cmd = NULL, *cmd_tmp = NULL;
struct iscsi_session *sess = conn->sess;
/*
@@ -4202,18 +4204,26 @@ static void iscsit_release_commands_from_conn(struct iscsi_conn *conn)
* has been reset -> returned sleeping pre-handler state.
*/
spin_lock_bh(&conn->cmd_lock);
- list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_conn_node) {
+ list_splice_init(&conn->conn_cmd_list, &tmp_list);
+ list_for_each_entry(cmd, &tmp_list, i_conn_node) {
+ struct se_cmd *se_cmd = &cmd->se_cmd;
+
+ if (se_cmd->se_tfo != NULL) {
+ spin_lock(&se_cmd->t_state_lock);
+ se_cmd->transport_state |= CMD_T_FABRIC_STOP;
+ spin_unlock(&se_cmd->t_state_lock);
+ }
+ }
+ spin_unlock_bh(&conn->cmd_lock);
+
+ list_for_each_entry_safe(cmd, cmd_tmp, &tmp_list, i_conn_node) {
list_del_init(&cmd->i_conn_node);
- spin_unlock_bh(&conn->cmd_lock);
iscsit_increment_maxcmdsn(cmd, sess);
-
iscsit_free_cmd(cmd, true);
- spin_lock_bh(&conn->cmd_lock);
}
- spin_unlock_bh(&conn->cmd_lock);
}
static void iscsit_stop_timers_for_cmds(
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index 96e78c823d13..316f66172335 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -1357,8 +1357,9 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
}
login->zero_tsih = zero_tsih;
- conn->sess->se_sess->sup_prot_ops =
- conn->conn_transport->iscsit_get_sup_prot_ops(conn);
+ if (conn->sess)
+ conn->sess->se_sess->sup_prot_ops =
+ conn->conn_transport->iscsit_get_sup_prot_ops(conn);
tpg = conn->tpg;
if (!tpg) {
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 3436a83568ea..dcd5ed26eb18 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -832,13 +832,15 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
* in ATA and we need to set TPE=1
*/
bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
- struct request_queue *q, int block_size)
+ struct request_queue *q)
{
+ int block_size = queue_logical_block_size(q);
+
if (!blk_queue_discard(q))
return false;
- attrib->max_unmap_lba_count = (q->limits.max_discard_sectors << 9) /
- block_size;
+ attrib->max_unmap_lba_count =
+ q->limits.max_discard_sectors >> (ilog2(block_size) - 9);
/*
* Currently hardcoded to 1 in Linux/SCSI code..
*/
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index 75f0f08b2a34..79291869bce6 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -161,8 +161,7 @@ static int fd_configure_device(struct se_device *dev)
dev_size, div_u64(dev_size, fd_dev->fd_block_size),
fd_dev->fd_block_size);
- if (target_configure_unmap_from_queue(&dev->dev_attrib, q,
- fd_dev->fd_block_size))
+ if (target_configure_unmap_from_queue(&dev->dev_attrib, q))
pr_debug("IFILE: BLOCK Discard support available,"
" disabled by default\n");
/*
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 2c53dcefff3e..4620c1dcdbc7 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -121,8 +121,7 @@ static int iblock_configure_device(struct se_device *dev)
dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q);
dev->dev_attrib.hw_queue_depth = q->nr_requests;
- if (target_configure_unmap_from_queue(&dev->dev_attrib, q,
- dev->dev_attrib.hw_block_size))
+ if (target_configure_unmap_from_queue(&dev->dev_attrib, q))
pr_debug("IBLOCK: BLOCK Discard support available,"
" disabled by default\n");
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
index dae0750c2032..253a91bff943 100644
--- a/drivers/target/target_core_internal.h
+++ b/drivers/target/target_core_internal.h
@@ -148,6 +148,7 @@ sense_reason_t target_cmd_size_check(struct se_cmd *cmd, unsigned int size);
void target_qf_do_work(struct work_struct *work);
bool target_check_wce(struct se_device *dev);
bool target_check_fua(struct se_device *dev);
+void __target_execute_cmd(struct se_cmd *, bool);
/* target_core_stat.c */
void target_stat_setup_dev_default_groups(struct se_device *);
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index 98698d875742..c220bb8dfa9d 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -594,7 +594,7 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool succes
cmd->transport_state |= CMD_T_ACTIVE|CMD_T_BUSY|CMD_T_SENT;
spin_unlock_irq(&cmd->t_state_lock);
- __target_execute_cmd(cmd);
+ __target_execute_cmd(cmd, false);
kfree(buf);
return ret;
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index d151bc3d6971..7bc3778a1ac9 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -1270,23 +1270,6 @@ target_setup_cmd_from_cdb(struct se_cmd *cmd, unsigned char *cdb)
trace_target_sequencer_start(cmd);
- /*
- * Check for an existing UNIT ATTENTION condition
- */
- ret = target_scsi3_ua_check(cmd);
- if (ret)
- return ret;
-
- ret = target_alua_state_check(cmd);
- if (ret)
- return ret;
-
- ret = target_check_reservation(cmd);
- if (ret) {
- cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
- return ret;
- }
-
ret = dev->transport->parse_cdb(cmd);
if (ret == TCM_UNSUPPORTED_SCSI_OPCODE)
pr_warn_ratelimited("%s/%s: Unsupported SCSI Opcode 0x%02x, sending CHECK_CONDITION.\n",
@@ -1749,20 +1732,45 @@ queue_full:
}
EXPORT_SYMBOL(transport_generic_request_failure);
-void __target_execute_cmd(struct se_cmd *cmd)
+void __target_execute_cmd(struct se_cmd *cmd, bool do_checks)
{
sense_reason_t ret;
- if (cmd->execute_cmd) {
- ret = cmd->execute_cmd(cmd);
- if (ret) {
- spin_lock_irq(&cmd->t_state_lock);
- cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT);
- spin_unlock_irq(&cmd->t_state_lock);
+ if (!cmd->execute_cmd) {
+ ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ goto err;
+ }
+ if (do_checks) {
+ /*
+ * Check for an existing UNIT ATTENTION condition after
+ * target_handle_task_attr() has done SAM task attr
+ * checking, and possibly have already defered execution
+ * out to target_restart_delayed_cmds() context.
+ */
+ ret = target_scsi3_ua_check(cmd);
+ if (ret)
+ goto err;
+
+ ret = target_alua_state_check(cmd);
+ if (ret)
+ goto err;
- transport_generic_request_failure(cmd, ret);
+ ret = target_check_reservation(cmd);
+ if (ret) {
+ cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
+ goto err;
}
}
+
+ ret = cmd->execute_cmd(cmd);
+ if (!ret)
+ return;
+err:
+ spin_lock_irq(&cmd->t_state_lock);
+ cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT);
+ spin_unlock_irq(&cmd->t_state_lock);
+
+ transport_generic_request_failure(cmd, ret);
}
static int target_write_prot_action(struct se_cmd *cmd)
@@ -1807,6 +1815,8 @@ static bool target_handle_task_attr(struct se_cmd *cmd)
if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
return false;
+ cmd->se_cmd_flags |= SCF_TASK_ATTR_SET;
+
/*
* Check for the existence of HEAD_OF_QUEUE, and if true return 1
* to allow the passed struct se_cmd list of tasks to the front of the list.
@@ -1887,7 +1897,7 @@ void target_execute_cmd(struct se_cmd *cmd)
return;
}
- __target_execute_cmd(cmd);
+ __target_execute_cmd(cmd, true);
}
EXPORT_SYMBOL(target_execute_cmd);
@@ -1911,7 +1921,7 @@ static void target_restart_delayed_cmds(struct se_device *dev)
list_del(&cmd->se_delayed_node);
spin_unlock(&dev->delayed_cmd_lock);
- __target_execute_cmd(cmd);
+ __target_execute_cmd(cmd, true);
if (cmd->sam_task_attr == TCM_ORDERED_TAG)
break;
@@ -1929,6 +1939,9 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
return;
+ if (!(cmd->se_cmd_flags & SCF_TASK_ATTR_SET))
+ goto restart;
+
if (cmd->sam_task_attr == TCM_SIMPLE_TAG) {
atomic_dec_mb(&dev->simple_cmds);
dev->dev_cur_ordered_id++;
@@ -1945,7 +1958,7 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED\n",
dev->dev_cur_ordered_id);
}
-
+restart:
target_restart_delayed_cmds(dev);
}
@@ -2533,15 +2546,10 @@ static void target_release_cmd_kref(struct kref *kref)
bool fabric_stop;
spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
- if (list_empty(&se_cmd->se_cmd_list)) {
- spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
- target_free_cmd_mem(se_cmd);
- se_cmd->se_tfo->release_cmd(se_cmd);
- return;
- }
spin_lock(&se_cmd->t_state_lock);
- fabric_stop = (se_cmd->transport_state & CMD_T_FABRIC_STOP);
+ fabric_stop = (se_cmd->transport_state & CMD_T_FABRIC_STOP) &&
+ (se_cmd->transport_state & CMD_T_ABORTED);
spin_unlock(&se_cmd->t_state_lock);
if (se_cmd->cmd_wait_set || fabric_stop) {
diff --git a/drivers/thermal/msm-tsens.c b/drivers/thermal/msm-tsens.c
index 7592bcb984ff..0a9e561268b6 100644
--- a/drivers/thermal/msm-tsens.c
+++ b/drivers/thermal/msm-tsens.c
@@ -832,8 +832,12 @@ struct tsens_tm_device {
bool calibration_less_mode;
bool tsens_local_init;
bool gain_offset_programmed;
+ bool cycle_compltn_monitor;
+ bool wd_bark;
int tsens_factor;
uint32_t tsens_num_sensor;
+ uint32_t cycle_compltn_monitor_val;
+ uint32_t wd_bark_val;
int tsens_irq;
int tsens_critical_irq;
void *tsens_addr;
@@ -5331,6 +5335,7 @@ static int get_device_tree_data(struct platform_device *pdev,
u32 *tsens_slope_data, *sensor_id, *client_id;
u32 *temp1_calib_offset_factor, *temp2_calib_offset_factor;
u32 rc = 0, i, tsens_num_sensors = 0;
+ u32 cycle_monitor = 0, wd_bark = 0;
const struct of_device_id *id;
rc = of_property_read_u32(of_node,
@@ -5428,6 +5433,28 @@ static int get_device_tree_data(struct platform_device *pdev,
}
}
+ rc = of_property_read_u32(of_node,
+ "qcom,cycle-monitor", &cycle_monitor);
+ if (rc) {
+ pr_debug("Default cycle completion monitor\n");
+ tmdev->cycle_compltn_monitor = false;
+ } else {
+ pr_debug("Use specified cycle completion monitor\n");
+ tmdev->cycle_compltn_monitor = true;
+ tmdev->cycle_compltn_monitor_val = cycle_monitor;
+ }
+
+ rc = of_property_read_u32(of_node,
+ "qcom,wd-bark", &wd_bark);
+ if (rc) {
+ pr_debug("Default Watchdog bark\n");
+ tmdev->wd_bark = false;
+ } else {
+ pr_debug("Use specified Watchdog bark\n");
+ tmdev->wd_bark = true;
+ tmdev->wd_bark_val = wd_bark;
+ }
+
if (!strcmp(id->compatible, "qcom,mdm9630-tsens") ||
(!strcmp(id->compatible, "qcom,msmzirc-tsens")) ||
(!strcmp(id->compatible, "qcom,msm8994-tsens")) ||
diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
index 7865228f664f..807d80145686 100644
--- a/drivers/tty/pty.c
+++ b/drivers/tty/pty.c
@@ -679,14 +679,14 @@ static void pty_unix98_remove(struct tty_driver *driver, struct tty_struct *tty)
/* this is called once with whichever end is closed last */
static void pty_unix98_shutdown(struct tty_struct *tty)
{
- struct inode *ptmx_inode;
+ struct pts_fs_info *fsi;
if (tty->driver->subtype == PTY_TYPE_MASTER)
- ptmx_inode = tty->driver_data;
+ fsi = tty->driver_data;
else
- ptmx_inode = tty->link->driver_data;
- devpts_kill_index(ptmx_inode, tty->index);
- devpts_del_ref(ptmx_inode);
+ fsi = tty->link->driver_data;
+ devpts_kill_index(fsi, tty->index);
+ devpts_put_ref(fsi);
}
static const struct tty_operations ptm_unix98_ops = {
@@ -738,6 +738,7 @@ static const struct tty_operations pty_unix98_ops = {
static int ptmx_open(struct inode *inode, struct file *filp)
{
+ struct pts_fs_info *fsi;
struct tty_struct *tty;
struct inode *slave_inode;
int retval;
@@ -752,47 +753,41 @@ static int ptmx_open(struct inode *inode, struct file *filp)
if (retval)
return retval;
+ fsi = devpts_get_ref(inode, filp);
+ retval = -ENODEV;
+ if (!fsi)
+ goto out_free_file;
+
/* find a device that is not in use. */
mutex_lock(&devpts_mutex);
- index = devpts_new_index(inode);
- if (index < 0) {
- retval = index;
- mutex_unlock(&devpts_mutex);
- goto err_file;
- }
-
+ index = devpts_new_index(fsi);
mutex_unlock(&devpts_mutex);
- mutex_lock(&tty_mutex);
- tty = tty_init_dev(ptm_driver, index);
+ retval = index;
+ if (index < 0)
+ goto out_put_ref;
- if (IS_ERR(tty)) {
- retval = PTR_ERR(tty);
- goto out;
- }
+ mutex_lock(&tty_mutex);
+ tty = tty_init_dev(ptm_driver, index);
/* The tty returned here is locked so we can safely
drop the mutex */
mutex_unlock(&tty_mutex);
- set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */
- tty->driver_data = inode;
+ retval = PTR_ERR(tty);
+ if (IS_ERR(tty))
+ goto out;
/*
- * In the case where all references to ptmx inode are dropped and we
- * still have /dev/tty opened pointing to the master/slave pair (ptmx
- * is closed/released before /dev/tty), we must make sure that the inode
- * is still valid when we call the final pty_unix98_shutdown, thus we
- * hold an additional reference to the ptmx inode. For the same /dev/tty
- * last close case, we also need to make sure the super_block isn't
- * destroyed (devpts instance unmounted), before /dev/tty is closed and
- * on its release devpts_kill_index is called.
+ * From here on out, the tty is "live", and the index and
+ * fsi will be killed/put by the tty_release()
*/
- devpts_add_ref(inode);
+ set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */
+ tty->driver_data = fsi;
tty_add_file(tty, filp);
- slave_inode = devpts_pty_new(inode,
+ slave_inode = devpts_pty_new(fsi,
MKDEV(UNIX98_PTY_SLAVE_MAJOR, index), index,
tty->link);
if (IS_ERR(slave_inode)) {
@@ -811,12 +806,14 @@ static int ptmx_open(struct inode *inode, struct file *filp)
return 0;
err_release:
tty_unlock(tty);
+ // This will also put-ref the fsi
tty_release(inode, filp);
return retval;
out:
- mutex_unlock(&tty_mutex);
- devpts_kill_index(inode, index);
-err_file:
+ devpts_kill_index(fsi, index);
+out_put_ref:
+ devpts_put_ref(fsi);
+out_free_file:
tty_free_file(filp);
return retval;
}
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index 7bbadd176c74..7b5462eb8388 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -485,19 +485,21 @@ static void atmel_start_tx(struct uart_port *port)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
- if (atmel_use_pdc_tx(port)) {
- if (atmel_uart_readl(port, ATMEL_PDC_PTSR) & ATMEL_PDC_TXTEN)
- /* The transmitter is already running. Yes, we
- really need this.*/
- return;
+ if (atmel_use_pdc_tx(port) && (atmel_uart_readl(port, ATMEL_PDC_PTSR)
+ & ATMEL_PDC_TXTEN))
+ /* The transmitter is already running. Yes, we
+ really need this.*/
+ return;
+ if (atmel_use_pdc_tx(port) || atmel_use_dma_tx(port))
if ((port->rs485.flags & SER_RS485_ENABLED) &&
!(port->rs485.flags & SER_RS485_RX_DURING_TX))
atmel_stop_rx(port);
+ if (atmel_use_pdc_tx(port))
/* re-enable PDC transmit */
atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
- }
+
/* Enable interrupts */
atmel_uart_writel(port, ATMEL_US_IER, atmel_port->tx_done_mask);
}
diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
index 9fdb06e08d4b..8f68acd1d95d 100644
--- a/drivers/tty/serial/msm_serial.c
+++ b/drivers/tty/serial/msm_serial.c
@@ -35,7 +35,6 @@
#include <linux/slab.h>
#include <linux/clk.h>
#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
#include <linux/delay.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -404,12 +403,8 @@ static void msm_stop_tx(struct uart_port *port)
{
struct msm_port *msm_port = UART_TO_MSM(port);
- if (pm_runtime_get_sync(port->dev) < 0)
- return;
msm_port->imr &= ~UART_IMR_TXLEV;
msm_write(port, msm_port->imr, UART_IMR);
- pm_runtime_mark_last_busy(port->dev);
- pm_runtime_put_autosuspend(port->dev);
}
static void msm_start_tx(struct uart_port *port)
@@ -421,12 +416,8 @@ static void msm_start_tx(struct uart_port *port)
if (dma->count)
return;
- if (pm_runtime_get_sync(port->dev) < 0)
- return;
msm_port->imr |= UART_IMR_TXLEV;
msm_write(port, msm_port->imr, UART_IMR);
- pm_runtime_mark_last_busy(port->dev);
- pm_runtime_put_autosuspend(port->dev);
}
static void msm_reset_dm_count(struct uart_port *port, int count)
@@ -448,8 +439,6 @@ static void msm_complete_tx_dma(void *args)
unsigned int count;
u32 val;
- if (pm_runtime_get_sync(port->dev) < 0)
- return;
spin_lock_irqsave(&port->lock, flags);
/* Already stopped */
@@ -486,8 +475,6 @@ static void msm_complete_tx_dma(void *args)
msm_handle_tx(port);
done:
spin_unlock_irqrestore(&port->lock, flags);
- pm_runtime_mark_last_busy(port->dev);
- pm_runtime_put_autosuspend(port->dev);
}
static int msm_handle_tx_dma(struct msm_port *msm_port, unsigned int count)
@@ -560,8 +547,6 @@ static void msm_complete_rx_dma(void *args)
unsigned long flags;
u32 val;
- if (pm_runtime_get_sync(port->dev) < 0)
- return;
spin_lock_irqsave(&port->lock, flags);
/* Already stopped */
@@ -613,8 +598,6 @@ done:
if (count)
tty_flip_buffer_push(tport);
- pm_runtime_mark_last_busy(port->dev);
- pm_runtime_put_autosuspend(port->dev);
}
static void msm_start_rx_dma(struct msm_port *msm_port)
@@ -689,28 +672,19 @@ static void msm_stop_rx(struct uart_port *port)
struct msm_port *msm_port = UART_TO_MSM(port);
struct msm_dma *dma = &msm_port->rx_dma;
- if (pm_runtime_get_sync(port->dev) < 0)
- return;
msm_port->imr &= ~(UART_IMR_RXLEV | UART_IMR_RXSTALE);
msm_write(port, msm_port->imr, UART_IMR);
if (dma->chan)
msm_stop_dma(port, dma);
- pm_runtime_mark_last_busy(port->dev);
- pm_runtime_put_autosuspend(port->dev);
}
static void msm_enable_ms(struct uart_port *port)
{
struct msm_port *msm_port = UART_TO_MSM(port);
- if (pm_runtime_get_sync(port->dev) < 0)
- return;
-
msm_port->imr |= UART_IMR_DELTA_CTS;
msm_write(port, msm_port->imr, UART_IMR);
- pm_runtime_mark_last_busy(port->dev);
- pm_runtime_put_autosuspend(port->dev);
}
static void msm_handle_rx_dm(struct uart_port *port, unsigned int misr)
@@ -957,8 +931,6 @@ static irqreturn_t msm_uart_irq(int irq, void *dev_id)
unsigned int misr;
u32 val;
- if (pm_runtime_get_sync(port->dev) < 0)
- return IRQ_NONE;
spin_lock_irqsave(&port->lock, flags);
misr = msm_read(port, UART_MISR);
msm_write(port, 0, UART_IMR); /* disable interrupt */
@@ -992,25 +964,13 @@ static irqreturn_t msm_uart_irq(int irq, void *dev_id)
msm_write(port, msm_port->imr, UART_IMR); /* restore interrupt */
spin_unlock_irqrestore(&port->lock, flags);
- pm_runtime_mark_last_busy(port->dev);
- pm_runtime_put_autosuspend(port->dev);
return IRQ_HANDLED;
}
static unsigned int msm_tx_empty(struct uart_port *port)
{
- int ret;
-
- ret = pm_runtime_get_sync(port->dev);
- if (ret < 0)
- return ret;
-
- ret = msm_read(port, UART_SR) & UART_SR_TX_EMPTY ? TIOCSER_TEMT : 0;
- pm_runtime_mark_last_busy(port->dev);
- pm_runtime_put_autosuspend(port->dev);
-
- return ret;
+ return (msm_read(port, UART_SR) & UART_SR_TX_EMPTY) ? TIOCSER_TEMT : 0;
}
static unsigned int msm_get_mctrl(struct uart_port *port)
@@ -1039,8 +999,6 @@ static void msm_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
unsigned int mr;
- if (pm_runtime_get_sync(port->dev) < 0)
- return;
mr = msm_read(port, UART_MR1);
if (!(mctrl & TIOCM_RTS)) {
@@ -1051,20 +1009,14 @@ static void msm_set_mctrl(struct uart_port *port, unsigned int mctrl)
mr |= UART_MR1_RX_RDY_CTL;
msm_write(port, mr, UART_MR1);
}
- pm_runtime_mark_last_busy(port->dev);
- pm_runtime_put_autosuspend(port->dev);
}
static void msm_break_ctl(struct uart_port *port, int break_ctl)
{
- if (pm_runtime_get_sync(port->dev) < 0)
- return;
if (break_ctl)
msm_write(port, UART_CR_CMD_START_BREAK, UART_CR);
else
msm_write(port, UART_CR_CMD_STOP_BREAK, UART_CR);
- pm_runtime_mark_last_busy(port->dev);
- pm_runtime_put_autosuspend(port->dev);
}
struct msm_baud_map {
@@ -1224,15 +1176,16 @@ static int msm_startup(struct uart_port *port)
* avoid losing received character
*/
ret = clk_prepare_enable(msm_port->clk);
- if (ret)
- return ret;
- ret = clk_prepare(msm_port->pclk);
- if (ret)
+ if (ret) {
+ goto err_clk;
return ret;
+ }
- ret = pm_runtime_get_sync(port->dev);
- if (ret < 0)
- goto err;
+ ret = clk_prepare_enable(msm_port->pclk);
+ if (ret) {
+ goto err_pclk;
+ return ret;
+ }
msm_serial_set_mnd_regs(port);
@@ -1260,15 +1213,13 @@ static int msm_startup(struct uart_port *port)
msm_request_rx_dma(msm_port, msm_port->uart.mapbase);
}
- pm_runtime_mark_last_busy(port->dev);
- pm_runtime_put_autosuspend(port->dev);
-
return 0;
-err:
- clk_unprepare(msm_port->pclk);
+err_pclk:
clk_disable_unprepare(msm_port->clk);
+err_clk:
free_irq(port->irq, port);
+
return ret;
}
@@ -1276,18 +1227,13 @@ static void msm_shutdown(struct uart_port *port)
{
struct msm_port *msm_port = UART_TO_MSM(port);
- if (pm_runtime_get_sync(port->dev) < 0)
- return;
-
msm_port->imr = 0;
msm_write(port, 0, UART_IMR); /* disable interrupts */
if (msm_port->is_uartdm)
msm_release_dma(msm_port);
- pm_runtime_mark_last_busy(port->dev);
- pm_runtime_put_autosuspend(port->dev);
- clk_unprepare(msm_port->pclk);
+ clk_disable_unprepare(msm_port->pclk);
clk_disable_unprepare(msm_port->clk);
free_irq(port->irq, port);
@@ -1301,8 +1247,6 @@ static void msm_set_termios(struct uart_port *port, struct ktermios *termios,
unsigned long flags;
unsigned int baud, mr;
- if (pm_runtime_get_sync(port->dev) < 0)
- return;
spin_lock_irqsave(&port->lock, flags);
if (dma->chan) /* Terminate if any */
@@ -1376,8 +1320,6 @@ static void msm_set_termios(struct uart_port *port, struct ktermios *termios,
msm_start_rx_dma(msm_port);
spin_unlock_irqrestore(&port->lock, flags);
- pm_runtime_mark_last_busy(port->dev);
- pm_runtime_put_autosuspend(port->dev);
}
static const char *msm_type(struct uart_port *port)
@@ -1464,20 +1406,14 @@ static void msm_power(struct uart_port *port, unsigned int state,
*/
if (clk_prepare_enable(msm_port->clk))
return;
- if (clk_prepare(msm_port->pclk)) {
- clk_disable_unprepare(msm_port->clk);
- return;
- }
- if (pm_runtime_get_sync(port->dev) < 0) {
- clk_unprepare(msm_port->pclk);
+ if (clk_prepare_enable(msm_port->pclk)) {
clk_disable_unprepare(msm_port->clk);
return;
}
break;
case 3:
- pm_runtime_put(port->dev);
- clk_unprepare(msm_port->pclk);
clk_disable_unprepare(msm_port->clk);
+ clk_disable_unprepare(msm_port->pclk);
break;
default:
pr_err("msm_serial: Unknown PM state %d\n", state);
@@ -1719,11 +1655,7 @@ static void msm_console_write(struct console *co, const char *s,
port = msm_get_port_from_line(co->index);
msm_port = UART_TO_MSM(port);
- if (pm_runtime_get_sync(port->dev) < 0)
- return;
__msm_console_write(port, s, count, msm_port->is_uartdm);
- pm_runtime_mark_last_busy(port->dev);
- pm_runtime_put_autosuspend(port->dev);
}
static int __init msm_console_setup(struct console *co, char *options)
@@ -1885,12 +1817,6 @@ static int msm_serial_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, port);
- pm_runtime_use_autosuspend(&pdev->dev);
- pm_runtime_set_autosuspend_delay(&pdev->dev, 500);
- pm_runtime_irq_safe(&pdev->dev);
- pm_runtime_enable(&pdev->dev);
- pm_runtime_set_suspended(&pdev->dev);
-
return uart_add_one_port(&msm_uart_driver, port);
}
@@ -1899,7 +1825,6 @@ static int msm_serial_remove(struct platform_device *pdev)
struct uart_port *port = platform_get_drvdata(pdev);
uart_remove_one_port(&msm_uart_driver, port);
- pm_runtime_disable(&pdev->dev);
return 0;
}
@@ -1910,34 +1835,6 @@ static const struct of_device_id msm_match_table[] = {
{}
};
-#ifdef CONFIG_PM
-static int msm_serial_runtime_suspend(struct device *dev)
-{
- struct uart_port *port = dev_get_drvdata(dev);
- struct msm_port *msm_port = UART_TO_MSM(port);
-
- if (msm_port->is_uartdm)
- clk_disable(msm_port->pclk);
-
- return 0;
-}
-
-static int msm_serial_runtime_resume(struct device *dev)
-{
- struct uart_port *port = dev_get_drvdata(dev);
- struct msm_port *msm_port = UART_TO_MSM(port);
- int ret;
-
- if (msm_port->is_uartdm) {
- ret = clk_enable(msm_port->pclk);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-#endif
-
#ifdef CONFIG_PM_SLEEP
static int msm_serial_suspend(struct device *dev)
{
@@ -1960,8 +1857,6 @@ static int msm_serial_resume(struct device *dev)
static const struct dev_pm_ops msm_serial_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(msm_serial_suspend, msm_serial_resume)
- SET_RUNTIME_PM_OPS(msm_serial_runtime_suspend,
- msm_serial_runtime_resume, NULL)
};
static struct platform_driver msm_platform_driver = {
diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
index c8ab5370670d..ab8308ff7e69 100644
--- a/drivers/tty/serial/samsung.c
+++ b/drivers/tty/serial/samsung.c
@@ -1676,7 +1676,7 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
return -ENODEV;
if (port->mapbase != 0)
- return 0;
+ return -EINVAL;
/* setup info for port */
port->dev = &platdev->dev;
@@ -1730,22 +1730,25 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
ourport->dma = devm_kzalloc(port->dev,
sizeof(*ourport->dma),
GFP_KERNEL);
- if (!ourport->dma)
- return -ENOMEM;
+ if (!ourport->dma) {
+ ret = -ENOMEM;
+ goto err;
+ }
}
ourport->clk = clk_get(&platdev->dev, "uart");
if (IS_ERR(ourport->clk)) {
pr_err("%s: Controller clock not found\n",
dev_name(&platdev->dev));
- return PTR_ERR(ourport->clk);
+ ret = PTR_ERR(ourport->clk);
+ goto err;
}
ret = clk_prepare_enable(ourport->clk);
if (ret) {
pr_err("uart: clock failed to prepare+enable: %d\n", ret);
clk_put(ourport->clk);
- return ret;
+ goto err;
}
/* Keep all interrupts masked and cleared */
@@ -1761,7 +1764,12 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
/* reset the fifos (and setup the uart) */
s3c24xx_serial_resetport(port, cfg);
+
return 0;
+
+err:
+ port->mapbase = 0;
+ return ret;
}
/* Device driver serial port probe */
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index 391a1225b0ba..ca367b05e440 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -1585,8 +1585,11 @@ static int ci_udc_pullup(struct usb_gadget *_gadget, int is_on)
{
struct ci_hdrc *ci = container_of(_gadget, struct ci_hdrc, gadget);
- /* Data+ pullup controlled by OTG state machine in OTG fsm mode */
- if (ci_otg_is_fsm_mode(ci))
+ /*
+ * Data+ pullup controlled by OTG state machine in OTG fsm mode;
+ * and don't touch Data+ in host mode for dual role config.
+ */
+ if (ci_otg_is_fsm_mode(ci) || ci->role == CI_ROLE_HOST)
return 0;
pm_runtime_get_sync(&ci->gadget.dev);
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index d37fdcc3143c..7f374369e539 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1336,7 +1336,6 @@ made_compressed_probe:
spin_lock_init(&acm->write_lock);
spin_lock_init(&acm->read_lock);
mutex_init(&acm->mutex);
- acm->rx_endpoint = usb_rcvbulkpipe(usb_dev, epread->bEndpointAddress);
acm->is_int_ep = usb_endpoint_xfer_int(epread);
if (acm->is_int_ep)
acm->bInterval = epread->bInterval;
@@ -1376,14 +1375,14 @@ made_compressed_probe:
urb->transfer_dma = rb->dma;
if (acm->is_int_ep) {
usb_fill_int_urb(urb, acm->dev,
- acm->rx_endpoint,
+ usb_rcvintpipe(usb_dev, epread->bEndpointAddress),
rb->base,
acm->readsize,
acm_read_bulk_callback, rb,
acm->bInterval);
} else {
usb_fill_bulk_urb(urb, acm->dev,
- acm->rx_endpoint,
+ usb_rcvbulkpipe(usb_dev, epread->bEndpointAddress),
rb->base,
acm->readsize,
acm_read_bulk_callback, rb);
diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h
index ccfaba9ab4e4..b30ac5fcde68 100644
--- a/drivers/usb/class/cdc-acm.h
+++ b/drivers/usb/class/cdc-acm.h
@@ -95,7 +95,6 @@ struct acm {
struct urb *read_urbs[ACM_NR];
struct acm_rb read_buffers[ACM_NR];
int rx_buflimit;
- int rx_endpoint;
spinlock_t read_lock;
int write_used; /* number of non-empty write buffers */
int transmitting;
diff --git a/drivers/usb/common/common.c b/drivers/usb/common/common.c
index 666155f1a185..ddc6bfb02164 100644
--- a/drivers/usb/common/common.c
+++ b/drivers/usb/common/common.c
@@ -51,6 +51,7 @@ static const char *const speed_names[] = {
[USB_SPEED_HIGH] = "high-speed",
[USB_SPEED_WIRELESS] = "wireless",
[USB_SPEED_SUPER] = "super-speed",
+ [USB_SPEED_SUPER_PLUS] = "super-speed-plus",
};
const char *usb_speed_string(enum usb_device_speed speed)
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index 5050760f5e17..80c8d90d8b75 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -142,6 +142,31 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
}
}
+static const unsigned short low_speed_maxpacket_maxes[4] = {
+ [USB_ENDPOINT_XFER_CONTROL] = 8,
+ [USB_ENDPOINT_XFER_ISOC] = 0,
+ [USB_ENDPOINT_XFER_BULK] = 0,
+ [USB_ENDPOINT_XFER_INT] = 8,
+};
+static const unsigned short full_speed_maxpacket_maxes[4] = {
+ [USB_ENDPOINT_XFER_CONTROL] = 64,
+ [USB_ENDPOINT_XFER_ISOC] = 1023,
+ [USB_ENDPOINT_XFER_BULK] = 64,
+ [USB_ENDPOINT_XFER_INT] = 64,
+};
+static const unsigned short high_speed_maxpacket_maxes[4] = {
+ [USB_ENDPOINT_XFER_CONTROL] = 64,
+ [USB_ENDPOINT_XFER_ISOC] = 1024,
+ [USB_ENDPOINT_XFER_BULK] = 512,
+ [USB_ENDPOINT_XFER_INT] = 1024,
+};
+static const unsigned short super_speed_maxpacket_maxes[4] = {
+ [USB_ENDPOINT_XFER_CONTROL] = 512,
+ [USB_ENDPOINT_XFER_ISOC] = 1024,
+ [USB_ENDPOINT_XFER_BULK] = 1024,
+ [USB_ENDPOINT_XFER_INT] = 1024,
+};
+
static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
int asnum, struct usb_host_interface *ifp, int num_ep,
unsigned char *buffer, int size)
@@ -150,6 +175,8 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
struct usb_endpoint_descriptor *d;
struct usb_host_endpoint *endpoint;
int n, i, j, retval;
+ unsigned int maxp;
+ const unsigned short *maxpacket_maxes;
d = (struct usb_endpoint_descriptor *) buffer;
buffer += d->bLength;
@@ -191,6 +218,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
if (usb_endpoint_xfer_int(d)) {
i = 1;
switch (to_usb_device(ddev)->speed) {
+ case USB_SPEED_SUPER_PLUS:
case USB_SPEED_SUPER:
case USB_SPEED_HIGH:
/* Many device manufacturers are using full-speed
@@ -256,6 +284,42 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
endpoint->desc.wMaxPacketSize = cpu_to_le16(8);
}
+ /* Validate the wMaxPacketSize field */
+ maxp = usb_endpoint_maxp(&endpoint->desc);
+
+ /* Find the highest legal maxpacket size for this endpoint */
+ i = 0; /* additional transactions per microframe */
+ switch (to_usb_device(ddev)->speed) {
+ case USB_SPEED_LOW:
+ maxpacket_maxes = low_speed_maxpacket_maxes;
+ break;
+ case USB_SPEED_FULL:
+ maxpacket_maxes = full_speed_maxpacket_maxes;
+ break;
+ case USB_SPEED_HIGH:
+ /* Bits 12..11 are allowed only for HS periodic endpoints */
+ if (usb_endpoint_xfer_int(d) || usb_endpoint_xfer_isoc(d)) {
+ i = maxp & (BIT(12) | BIT(11));
+ maxp &= ~i;
+ }
+ /* fallthrough */
+ default:
+ maxpacket_maxes = high_speed_maxpacket_maxes;
+ break;
+ case USB_SPEED_SUPER:
+ case USB_SPEED_SUPER_PLUS:
+ maxpacket_maxes = super_speed_maxpacket_maxes;
+ break;
+ }
+ j = maxpacket_maxes[usb_endpoint_type(&endpoint->desc)];
+
+ if (maxp > j) {
+ dev_warn(ddev, "config %d interface %d altsetting %d endpoint 0x%X has invalid maxpacket %d, setting to %d\n",
+ cfgno, inum, asnum, d->bEndpointAddress, maxp, j);
+ maxp = j;
+ endpoint->desc.wMaxPacketSize = cpu_to_le16(i | maxp);
+ }
+
/*
* Some buggy high speed devices have bulk endpoints using
* maxpacket sizes other than 512. High speed HCDs may not
@@ -263,9 +327,6 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
*/
if (to_usb_device(ddev)->speed == USB_SPEED_HIGH
&& usb_endpoint_xfer_bulk(d)) {
- unsigned maxp;
-
- maxp = usb_endpoint_maxp(&endpoint->desc) & 0x07ff;
if (maxp != 512)
dev_warn(ddev, "config %d interface %d altsetting %d "
"bulk endpoint 0x%X has invalid maxpacket %d\n",
@@ -274,7 +335,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
}
/* Parse a possible SuperSpeed endpoint companion descriptor */
- if (to_usb_device(ddev)->speed == USB_SPEED_SUPER)
+ if (to_usb_device(ddev)->speed >= USB_SPEED_SUPER)
usb_parse_ss_endpoint_companion(ddev, cfgno,
inum, asnum, endpoint, buffer, size);
diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
index 2a3bbdf7eb94..332ed277a06c 100644
--- a/drivers/usb/core/devices.c
+++ b/drivers/usb/core/devices.c
@@ -221,7 +221,7 @@ static char *usb_dump_endpoint_descriptor(int speed, char *start, char *end,
break;
case USB_ENDPOINT_XFER_INT:
type = "Int.";
- if (speed == USB_SPEED_HIGH || speed == USB_SPEED_SUPER)
+ if (speed == USB_SPEED_HIGH || speed >= USB_SPEED_SUPER)
interval = 1 << (desc->bInterval - 1);
else
interval = desc->bInterval;
@@ -230,7 +230,7 @@ static char *usb_dump_endpoint_descriptor(int speed, char *start, char *end,
return start;
}
interval *= (speed == USB_SPEED_HIGH ||
- speed == USB_SPEED_SUPER) ? 125 : 1000;
+ speed >= USB_SPEED_SUPER) ? 125 : 1000;
if (interval % 1000)
unit = 'u';
else {
@@ -322,7 +322,7 @@ static char *usb_dump_config_descriptor(char *start, char *end,
if (start > end)
return start;
- if (speed == USB_SPEED_SUPER)
+ if (speed >= USB_SPEED_SUPER)
mul = 8;
else
mul = 2;
@@ -534,6 +534,8 @@ static ssize_t usb_device_dump(char __user **buffer, size_t *nbytes,
speed = "480"; break;
case USB_SPEED_SUPER:
speed = "5000"; break;
+ case USB_SPEED_SUPER_PLUS:
+ speed = "10000"; break;
default:
speed = "??";
}
@@ -553,7 +555,7 @@ static ssize_t usb_device_dump(char __user **buffer, size_t *nbytes,
/* super/high speed reserves 80%, full/low reserves 90% */
if (usbdev->speed == USB_SPEED_HIGH ||
- usbdev->speed == USB_SPEED_SUPER)
+ usbdev->speed >= USB_SPEED_SUPER)
max = 800;
else
max = FRAME_TIME_MAX_USECS_ALLOC;
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index 3ffb01ff6549..f5c92d904ded 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -1530,11 +1530,17 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
as->urb->start_frame = uurb->start_frame;
as->urb->number_of_packets = number_of_packets;
as->urb->stream_id = stream_id;
- if (uurb->type == USBDEVFS_URB_TYPE_ISO ||
- ps->dev->speed == USB_SPEED_HIGH)
- as->urb->interval = 1 << min(15, ep->desc.bInterval - 1);
- else
- as->urb->interval = ep->desc.bInterval;
+
+ if (ep->desc.bInterval) {
+ if (uurb->type == USBDEVFS_URB_TYPE_ISO ||
+ ps->dev->speed == USB_SPEED_HIGH ||
+ ps->dev->speed >= USB_SPEED_SUPER)
+ as->urb->interval = 1 <<
+ min(15, ep->desc.bInterval - 1);
+ else
+ as->urb->interval = ep->desc.bInterval;
+ }
+
as->urb->context = as;
as->urb->complete = async_completed;
for (totlen = u = 0; u < number_of_packets; u++) {
diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
index b8b580e5ae6e..40378487e023 100644
--- a/drivers/usb/core/hcd-pci.c
+++ b/drivers/usb/core/hcd-pci.c
@@ -206,7 +206,7 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
* The xHCI driver has its own irq management
* make sure irq setup is not touched for xhci in generic hcd code
*/
- if ((driver->flags & HCD_MASK) != HCD_USB3) {
+ if ((driver->flags & HCD_MASK) < HCD_USB3) {
if (!dev->irq) {
dev_err(&dev->dev,
"Found HC with no IRQ. Check BIOS/PCI %s setup!\n",
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index ac0eb0939ecf..a24a8ea9df7c 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1078,7 +1078,7 @@ static int register_root_hub(struct usb_hcd *hcd)
retval = usb_get_bos_descriptor(usb_dev);
if (!retval) {
usb_dev->lpm_capable = usb_device_supports_lpm(usb_dev);
- } else if (usb_dev->speed == USB_SPEED_SUPER) {
+ } else if (usb_dev->speed >= USB_SPEED_SUPER) {
mutex_unlock(&usb_bus_list_lock);
dev_dbg(parent_dev, "can't read %s bos descriptor %d\n",
dev_name(&usb_dev->dev), retval);
@@ -2112,7 +2112,7 @@ int usb_alloc_streams(struct usb_interface *interface,
hcd = bus_to_hcd(dev->bus);
if (!hcd->driver->alloc_streams || !hcd->driver->free_streams)
return -EINVAL;
- if (dev->speed != USB_SPEED_SUPER)
+ if (dev->speed < USB_SPEED_SUPER)
return -EINVAL;
if (dev->state < USB_STATE_CONFIGURED)
return -ENODEV;
@@ -2160,7 +2160,7 @@ int usb_free_streams(struct usb_interface *interface,
dev = interface_to_usbdev(interface);
hcd = bus_to_hcd(dev->bus);
- if (dev->speed != USB_SPEED_SUPER)
+ if (dev->speed < USB_SPEED_SUPER)
return -EINVAL;
/* Double-free is not allowed */
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 5839111ab4e0..29242ffe8dca 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -303,7 +303,7 @@ static void usb_set_lpm_parameters(struct usb_device *udev)
unsigned int hub_u1_del;
unsigned int hub_u2_del;
- if (!udev->lpm_capable || udev->speed != USB_SPEED_SUPER)
+ if (!udev->lpm_capable || udev->speed < USB_SPEED_SUPER)
return;
hub = usb_hub_to_struct_hub(udev->parent);
@@ -1047,14 +1047,11 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
/* Continue a partial initialization */
if (type == HUB_INIT2 || type == HUB_INIT3) {
- device_lock(hub->intfdev);
+ device_lock(&hdev->dev);
/* Was the hub disconnected while we were waiting? */
- if (hub->disconnected) {
- device_unlock(hub->intfdev);
- kref_put(&hub->kref, hub_release);
- return;
- }
+ if (hub->disconnected)
+ goto disconnected;
if (type == HUB_INIT2)
goto init2;
goto init3;
@@ -1257,7 +1254,7 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
queue_delayed_work(system_power_efficient_wq,
&hub->init_work,
msecs_to_jiffies(delay));
- device_unlock(hub->intfdev);
+ device_unlock(&hdev->dev);
return; /* Continues at init3: below */
} else {
msleep(delay);
@@ -1276,12 +1273,12 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
/* Scan all ports that need attention */
kick_hub_wq(hub);
- /* Allow autosuspend if it was suppressed */
- if (type <= HUB_INIT3)
+ if (type == HUB_INIT2 || type == HUB_INIT3) {
+ /* Allow autosuspend if it was suppressed */
+ disconnected:
usb_autopm_put_interface_async(to_usb_interface(hub->intfdev));
-
- if (type == HUB_INIT2 || type == HUB_INIT3)
- device_unlock(hub->intfdev);
+ device_unlock(&hdev->dev);
+ }
kref_put(&hub->kref, hub_release);
}
@@ -1310,8 +1307,6 @@ static void hub_quiesce(struct usb_hub *hub, enum hub_quiescing_type type)
struct usb_device *hdev = hub->hdev;
int i;
- cancel_delayed_work_sync(&hub->init_work);
-
/* hub_wq and related activity won't re-trigger */
hub->quiescing = 1;
@@ -2656,7 +2651,7 @@ static unsigned hub_is_wusb(struct usb_hub *hub)
*/
static bool use_new_scheme(struct usb_device *udev, int retry)
{
- if (udev->speed == USB_SPEED_SUPER)
+ if (udev->speed >= USB_SPEED_SUPER)
return false;
return USE_NEW_SCHEME(retry);
@@ -3998,7 +3993,7 @@ int usb_disable_lpm(struct usb_device *udev)
struct usb_hcd *hcd;
if (!udev || !udev->parent ||
- udev->speed != USB_SPEED_SUPER ||
+ udev->speed < USB_SPEED_SUPER ||
!udev->lpm_capable ||
udev->state < USB_STATE_DEFAULT)
return 0;
@@ -4055,7 +4050,7 @@ void usb_enable_lpm(struct usb_device *udev)
struct usb_hcd *hcd;
if (!udev || !udev->parent ||
- udev->speed != USB_SPEED_SUPER ||
+ udev->speed < USB_SPEED_SUPER ||
!udev->lpm_capable ||
udev->state < USB_STATE_DEFAULT)
return;
@@ -4321,7 +4316,9 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
retval = -ENODEV;
- if (oldspeed != USB_SPEED_UNKNOWN && oldspeed != udev->speed) {
+ /* Don't allow speed changes at reset, except usb 3.0 to faster */
+ if (oldspeed != USB_SPEED_UNKNOWN && oldspeed != udev->speed &&
+ !(oldspeed == USB_SPEED_SUPER && udev->speed > oldspeed)) {
dev_dbg(&udev->dev, "device reset changed speed!\n");
goto fail;
}
@@ -4333,6 +4330,7 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
* reported as 0xff in the device descriptor). WUSB1.0[4.8.1].
*/
switch (udev->speed) {
+ case USB_SPEED_SUPER_PLUS:
case USB_SPEED_SUPER:
case USB_SPEED_WIRELESS: /* fixed at 512 */
udev->ep0.desc.wMaxPacketSize = cpu_to_le16(512);
@@ -4359,7 +4357,7 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
else
speed = usb_speed_string(udev->speed);
- if (udev->speed != USB_SPEED_SUPER)
+ if (udev->speed < USB_SPEED_SUPER)
dev_info(&udev->dev,
"%s %s USB device number %d using %s\n",
(udev->config) ? "reset" : "new", speed,
@@ -4489,11 +4487,12 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
devnum, retval);
goto fail;
}
- if (udev->speed == USB_SPEED_SUPER) {
+ if (udev->speed >= USB_SPEED_SUPER) {
devnum = udev->devnum;
dev_info(&udev->dev,
- "%s SuperSpeed USB device number %d using %s\n",
+ "%s SuperSpeed%s USB device number %d using %s\n",
(udev->config) ? "reset" : "new",
+ (udev->speed == USB_SPEED_SUPER_PLUS) ? "Plus" : "",
devnum, udev->bus->controller->driver->name);
}
@@ -4532,7 +4531,7 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
* got from those devices show they aren't superspeed devices. Warm
* reset the port attached by the devices can fix them.
*/
- if ((udev->speed == USB_SPEED_SUPER) &&
+ if ((udev->speed >= USB_SPEED_SUPER) &&
(le16_to_cpu(udev->descriptor.bcdUSB) < 0x0300)) {
dev_err(&udev->dev, "got a wrong device descriptor, "
"warm reset device\n");
@@ -4543,7 +4542,7 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
}
if (udev->descriptor.bMaxPacketSize0 == 0xff ||
- udev->speed == USB_SPEED_SUPER)
+ udev->speed >= USB_SPEED_SUPER)
i = 512;
else
i = udev->descriptor.bMaxPacketSize0;
@@ -4753,7 +4752,7 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
udev->level = hdev->level + 1;
udev->wusb = hub_is_wusb(hub);
- /* Only USB 3.0 devices are connected to SuperSpeed hubs. */
+ /* Devices connected to SuperSpeed hubs are USB 3.0 or later */
if (hub_is_superspeed(hub->hdev))
udev->speed = USB_SPEED_SUPER;
else
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 944a6dca0fcb..d2e50a27140c 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -128,6 +128,9 @@ static const struct usb_device_id usb_quirk_list[] = {
{ USB_DEVICE(0x04f3, 0x016f), .driver_info =
USB_QUIRK_DEVICE_QUALIFIER },
+ { USB_DEVICE(0x04f3, 0x0381), .driver_info =
+ USB_QUIRK_NO_LPM },
+
{ USB_DEVICE(0x04f3, 0x21b8), .driver_info =
USB_QUIRK_DEVICE_QUALIFIER },
diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c
index 3d274778caaf..c601e25b609f 100644
--- a/drivers/usb/core/urb.c
+++ b/drivers/usb/core/urb.c
@@ -401,7 +401,7 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
/* SuperSpeed isoc endpoints have up to 16 bursts of up to
* 3 packets each
*/
- if (dev->speed == USB_SPEED_SUPER) {
+ if (dev->speed >= USB_SPEED_SUPER) {
int burst = 1 + ep->ss_ep_comp.bMaxBurst;
int mult = USB_SS_MULT(ep->ss_ep_comp.bmAttributes);
max *= burst;
@@ -499,6 +499,7 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
}
/* too big? */
switch (dev->speed) {
+ case USB_SPEED_SUPER_PLUS:
case USB_SPEED_SUPER: /* units are 125us */
/* Handle up to 2^(16-1) microframes */
if (urb->interval > (1 << 15))
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index b43d542e3bd4..062677f8e91d 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -682,9 +682,6 @@ EXPORT_SYMBOL(usb_sec_event_ring_setup);
int usb_sec_event_ring_cleanup(struct usb_device *dev,
unsigned intr_num)
{
- if (dev->state == USB_STATE_NOTATTACHED)
- return 0;
-
return usb_hcd_sec_event_ring_cleanup(dev, intr_num);
}
EXPORT_SYMBOL(usb_sec_event_ring_cleanup);
diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
index ccb35af525e2..fbff25ff23a9 100644
--- a/drivers/usb/core/usb.h
+++ b/drivers/usb/core/usb.h
@@ -45,7 +45,7 @@ static inline unsigned usb_get_max_power(struct usb_device *udev,
struct usb_host_config *c)
{
/* SuperSpeed power is in 8 mA units; others are in 2 mA units */
- unsigned mul = (udev->speed == USB_SPEED_SUPER ? 8 : 2);
+ unsigned mul = (udev->speed >= USB_SPEED_SUPER ? 8 : 2);
return c->desc.bMaxPower * mul;
}
diff --git a/drivers/usb/dwc3/dbm.c b/drivers/usb/dwc3/dbm.c
index 0fbb1fb39f5c..cc7fb4026fb8 100644
--- a/drivers/usb/dwc3/dbm.c
+++ b/drivers/usb/dwc3/dbm.c
@@ -194,7 +194,7 @@ static int find_matching_dbm_ep(struct dbm *dbm, u8 usb_ep)
if (dbm->ep_num_mapping[i] == usb_ep)
return i;
- pr_err("%s: No DBM EP matches USB EP %d", __func__, usb_ep);
+ pr_debug("%s: No DBM EP matches USB EP %d", __func__, usb_ep);
return -ENODEV; /* Not found */
}
@@ -385,7 +385,7 @@ int dbm_ep_unconfig(struct dbm *dbm, u8 usb_ep)
dbm_ep = find_matching_dbm_ep(dbm, usb_ep);
if (dbm_ep < 0) {
- pr_err("usb ep index %d has no corresponding dbm ep\n", usb_ep);
+ pr_debug("usb ep index %d has no corespondng dbm ep\n", usb_ep);
return -ENODEV;
}
diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c
index 8a1b0d870e7a..76bf29e78dad 100644
--- a/drivers/usb/dwc3/dwc3-msm.c
+++ b/drivers/usb/dwc3/dwc3-msm.c
@@ -204,6 +204,8 @@ struct dwc3_msm {
unsigned int irq_to_affin;
struct notifier_block dwc3_cpu_notifier;
+ struct notifier_block usbdev_nb;
+ bool hc_died;
struct extcon_dev *extcon_vbus;
struct extcon_dev *extcon_id;
@@ -652,6 +654,14 @@ static int dwc3_msm_ep_queue(struct usb_ep *ep,
return -EPERM;
}
+ if (!mdwc->original_ep_ops[dep->number]) {
+ dev_err(mdwc->dev,
+ "ep [%s,%d] was unconfigured as msm endpoint\n",
+ ep->name, dep->number);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ return -EINVAL;
+ }
+
if (!request) {
dev_err(mdwc->dev, "%s: request is NULL\n", __func__);
spin_unlock_irqrestore(&dwc->lock, flags);
@@ -659,14 +669,11 @@ static int dwc3_msm_ep_queue(struct usb_ep *ep,
}
if (!(request->udc_priv & MSM_SPS_MODE)) {
- /* Not SPS mode, call original queue */
- dev_vdbg(mdwc->dev, "%s: not sps mode, use regular queue\n",
+ dev_err(mdwc->dev, "%s: sps mode is not set\n",
__func__);
spin_unlock_irqrestore(&dwc->lock, flags);
- return (mdwc->original_ep_ops[dep->number])->queue(ep,
- request,
- gfp_flags);
+ return -EINVAL;
}
/* HW restriction regarding TRB size (8KB) */
@@ -1343,8 +1350,7 @@ static int dwc3_msm_gsi_ep_op(struct usb_ep *ep,
*
* @return int - 0 on success, negetive on error.
*/
-int msm_ep_config(struct usb_ep *ep, struct usb_request *request,
- gfp_t gfp_flags)
+int msm_ep_config(struct usb_ep *ep, struct usb_request *request)
{
struct dwc3_ep *dep = to_dwc3_ep(ep);
struct dwc3 *dwc = dep->dwc;
@@ -1356,23 +1362,27 @@ int msm_ep_config(struct usb_ep *ep, struct usb_request *request,
bool disable_wb;
bool internal_mem;
bool ioc;
+ unsigned long flags;
+ spin_lock_irqsave(&dwc->lock, flags);
/* Save original ep ops for future restore*/
if (mdwc->original_ep_ops[dep->number]) {
dev_err(mdwc->dev,
"ep [%s,%d] already configured as msm endpoint\n",
ep->name, dep->number);
+ spin_unlock_irqrestore(&dwc->lock, flags);
return -EPERM;
}
mdwc->original_ep_ops[dep->number] = ep->ops;
/* Set new usb ops as we like */
- new_ep_ops = kzalloc(sizeof(struct usb_ep_ops), gfp_flags);
+ new_ep_ops = kzalloc(sizeof(struct usb_ep_ops), GFP_ATOMIC);
if (!new_ep_ops) {
dev_err(mdwc->dev,
"%s: unable to allocate mem for new usb ep ops\n",
__func__);
+ spin_unlock_irqrestore(&dwc->lock, flags);
return -ENOMEM;
}
(*new_ep_ops) = (*ep->ops);
@@ -1380,8 +1390,10 @@ int msm_ep_config(struct usb_ep *ep, struct usb_request *request,
new_ep_ops->gsi_ep_op = dwc3_msm_gsi_ep_op;
ep->ops = new_ep_ops;
- if (!mdwc->dbm || !request || (dep->endpoint.ep_type == EP_TYPE_GSI))
+ if (!mdwc->dbm || !request || (dep->endpoint.ep_type == EP_TYPE_GSI)) {
+ spin_unlock_irqrestore(&dwc->lock, flags);
return 0;
+ }
/*
* Configure the DBM endpoint if required.
@@ -1397,9 +1409,12 @@ int msm_ep_config(struct usb_ep *ep, struct usb_request *request,
if (ret < 0) {
dev_err(mdwc->dev,
"error %d after calling dbm_ep_config\n", ret);
+ spin_unlock_irqrestore(&dwc->lock, flags);
return ret;
}
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
return 0;
}
EXPORT_SYMBOL(msm_ep_config);
@@ -1419,12 +1434,15 @@ int msm_ep_unconfig(struct usb_ep *ep)
struct dwc3 *dwc = dep->dwc;
struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
struct usb_ep_ops *old_ep_ops;
+ unsigned long flags;
+ spin_lock_irqsave(&dwc->lock, flags);
/* Restore original ep ops */
if (!mdwc->original_ep_ops[dep->number]) {
dev_err(mdwc->dev,
"ep [%s,%d] was not configured as msm endpoint\n",
ep->name, dep->number);
+ spin_unlock_irqrestore(&dwc->lock, flags);
return -EINVAL;
}
old_ep_ops = (struct usb_ep_ops *)ep->ops;
@@ -1436,6 +1454,31 @@ int msm_ep_unconfig(struct usb_ep *ep)
* Do HERE more usb endpoint un-configurations
* which are specific to MSM.
*/
+ if (!mdwc->dbm || (dep->endpoint.ep_type == EP_TYPE_GSI)) {
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ return 0;
+ }
+
+ if (dep->busy_slot == dep->free_slot && list_empty(&dep->request_list)
+ && list_empty(&dep->req_queued)) {
+ dev_dbg(mdwc->dev,
+ "%s: request is not queued, disable DBM ep for ep %s\n",
+ __func__, ep->name);
+ /* Unconfigure dbm ep */
+ dbm_ep_unconfig(mdwc->dbm, dep->number);
+
+ /*
+ * If this is the last endpoint we unconfigured, than reset also
+ * the event buffers; unless unconfiguring the ep due to lpm,
+ * in which case the event buffer only gets reset during the
+ * block reset.
+ */
+ if (dbm_get_num_of_eps_configured(mdwc->dbm) == 0 &&
+ !dbm_reset_ep_after_lpm(mdwc->dbm))
+ dbm_event_buffer_config(mdwc->dbm, 0, 0, 0);
+ }
+
+ spin_unlock_irqrestore(&dwc->lock, flags);
return 0;
}
@@ -1494,6 +1537,33 @@ static void dwc3_restart_usb_work(struct work_struct *w)
flush_delayed_work(&mdwc->sm_work);
}
+static int msm_dwc3_usbdev_notify(struct notifier_block *self,
+ unsigned long action, void *priv)
+{
+ struct dwc3_msm *mdwc = container_of(self, struct dwc3_msm, usbdev_nb);
+ struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
+ struct usb_bus *bus = priv;
+
+ /* Interested only in recovery when HC dies */
+ if (action != USB_BUS_DIED)
+ return 0;
+
+ dev_dbg(mdwc->dev, "%s initiate recovery from hc_died\n", __func__);
+ /* Recovery already under process */
+ if (mdwc->hc_died)
+ return 0;
+
+ if (bus->controller != &dwc->xhci->dev) {
+ dev_dbg(mdwc->dev, "%s event for diff HCD\n", __func__);
+ return 0;
+ }
+
+ mdwc->hc_died = true;
+ schedule_delayed_work(&mdwc->sm_work, 0);
+ return 0;
+}
+
+
/*
* Check whether the DWC3 requires resetting the ep
* after going to Low Power Mode (lpm)
@@ -2037,15 +2107,6 @@ static int dwc3_msm_suspend(struct dwc3_msm *mdwc)
enable_irq_wake(mdwc->ss_phy_irq);
enable_irq(mdwc->ss_phy_irq);
}
- /*
- * Enable power event irq during bus suspend in host mode for
- * mapping MPM pin for DP so that wakeup can happen in system
- * suspend.
- */
- if (mdwc->in_host_mode) {
- enable_irq(mdwc->pwr_event_irq);
- enable_irq_wake(mdwc->pwr_event_irq);
- }
mdwc->lpm_flags |= MDWC3_ASYNC_IRQ_WAKE_CAPABILITY;
}
@@ -2151,6 +2212,9 @@ static int dwc3_msm_resume(struct dwc3_msm *mdwc)
atomic_set(&dwc->in_lpm, 0);
+ /* enable power evt irq for IN P3 detection */
+ enable_irq(mdwc->pwr_event_irq);
+
/* Disable HSPHY auto suspend */
dwc3_msm_write_reg(mdwc->base, DWC3_GUSB2PHYCFG(0),
dwc3_msm_read_reg(mdwc->base, DWC3_GUSB2PHYCFG(0)) &
@@ -2165,18 +2229,11 @@ static int dwc3_msm_resume(struct dwc3_msm *mdwc)
disable_irq_wake(mdwc->ss_phy_irq);
disable_irq_nosync(mdwc->ss_phy_irq);
}
- if (mdwc->in_host_mode) {
- disable_irq_wake(mdwc->pwr_event_irq);
- disable_irq(mdwc->pwr_event_irq);
- }
mdwc->lpm_flags &= ~MDWC3_ASYNC_IRQ_WAKE_CAPABILITY;
}
dev_info(mdwc->dev, "DWC3 exited from low power mode\n");
- /* enable power evt irq for IN P3 detection */
- enable_irq(mdwc->pwr_event_irq);
-
/* Enable core irq */
if (dwc->irq)
enable_irq(dwc->irq);
@@ -3168,6 +3225,8 @@ static int dwc3_otg_start_host(struct dwc3_msm *mdwc, int on)
mdwc->host_nb.notifier_call = dwc3_msm_host_notifier;
usb_register_notify(&mdwc->host_nb);
+ mdwc->usbdev_nb.notifier_call = msm_dwc3_usbdev_notify;
+ usb_register_atomic_notify(&mdwc->usbdev_nb);
/*
* FIXME If micro A cable is disconnected during system suspend,
* xhci platform device will be removed before runtime pm is
@@ -3211,6 +3270,7 @@ static int dwc3_otg_start_host(struct dwc3_msm *mdwc, int on)
} else {
dev_dbg(mdwc->dev, "%s: turn off host\n", __func__);
+ usb_unregister_atomic_notify(&mdwc->usbdev_nb);
if (!IS_ERR(mdwc->vbus_reg))
ret = regulator_disable(mdwc->vbus_reg);
if (ret) {
@@ -3502,11 +3562,12 @@ static void dwc3_otg_sm_work(struct work_struct *w)
break;
case OTG_STATE_A_HOST:
- if (test_bit(ID, &mdwc->inputs)) {
- dev_dbg(mdwc->dev, "id\n");
+ if (test_bit(ID, &mdwc->inputs) || mdwc->hc_died) {
+ dev_dbg(mdwc->dev, "id || hc_died\n");
dwc3_otg_start_host(mdwc, 0);
mdwc->otg_state = OTG_STATE_B_IDLE;
mdwc->vbus_retry_count = 0;
+ mdwc->hc_died = false;
work = 1;
} else {
dev_dbg(mdwc->dev, "still in a_host state. Resuming root hub.\n");
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index 009d83048c8c..3d731d1b5c60 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -36,6 +36,7 @@
#define PCI_DEVICE_ID_INTEL_SPTH 0xa130
#define PCI_DEVICE_ID_INTEL_BXT 0x0aaa
#define PCI_DEVICE_ID_INTEL_APL 0x5aaa
+#define PCI_DEVICE_ID_INTEL_KBP 0xa2b0
static const struct acpi_gpio_params reset_gpios = { 0, 0, false };
static const struct acpi_gpio_params cs_gpios = { 1, 0, false };
@@ -214,6 +215,7 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SPTH), },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BXT), },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_APL), },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBP), },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB), },
{ } /* Terminating Entry */
};
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 251ae0a7a5ee..6852df4f7d1f 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -1356,6 +1356,17 @@ static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request,
goto out;
}
+ /*
+ * Queuing endless request to USB endpoint through generic ep queue
+ * API should not be allowed.
+ */
+ if (dep->endpoint.endless) {
+ dev_dbg(dwc->dev, "trying to queue endless request %p to %s\n",
+ request, ep->name);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ return -EPERM;
+ }
+
if (dwc3_gadget_is_suspended(dwc)) {
if (dwc->gadget.remote_wakeup)
dwc3_gadget_wakeup(&dwc->gadget);
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index 9360b0613154..04985ccbbe6d 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -675,7 +675,7 @@ static int bos_desc(struct usb_composite_dev *cdev)
usb_ext->bLength = USB_DT_USB_EXT_CAP_SIZE;
usb_ext->bDescriptorType = USB_DT_DEVICE_CAPABILITY;
usb_ext->bDevCapabilityType = USB_CAP_TYPE_EXT;
- usb_ext->bmAttributes = cpu_to_le32(USB_LPM_SUPPORT);
+ usb_ext->bmAttributes = cpu_to_le32(USB_LPM_SUPPORT | USB_BESL_SUPPORT);
if (gadget_is_superspeed(cdev->gadget)) {
/*
@@ -1631,6 +1631,12 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
case USB_DT_DEVICE:
cdev->desc.bNumConfigurations =
count_configs(cdev, USB_DT_DEVICE);
+ if (cdev->desc.bNumConfigurations == 0) {
+ pr_err("%s:config is not active. send stall\n",
+ __func__);
+ break;
+ }
+
cdev->desc.bMaxPacketSize0 =
cdev->gadget->ep0->maxpacket;
cdev->desc.bcdUSB = cpu_to_le16(0x0200);
@@ -1655,7 +1661,9 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
if (!gadget_is_dualspeed(gadget) ||
gadget->speed >= USB_SPEED_SUPER)
break;
+ spin_lock(&cdev->lock);
device_qual(cdev);
+ spin_unlock(&cdev->lock);
value = min_t(int, w_length,
sizeof(struct usb_qualifier_descriptor));
break;
@@ -1665,7 +1673,9 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
break;
/* FALLTHROUGH */
case USB_DT_CONFIG:
+ spin_lock(&cdev->lock);
value = config_desc(cdev, w_value);
+ spin_unlock(&cdev->lock);
if (value >= 0)
value = min(w_length, (u16) value);
break;
diff --git a/drivers/usb/gadget/function/f_accessory.c b/drivers/usb/gadget/function/f_accessory.c
index bc1c34f05fa3..013a9d6702db 100644
--- a/drivers/usb/gadget/function/f_accessory.c
+++ b/drivers/usb/gadget/function/f_accessory.c
@@ -575,15 +575,6 @@ static int create_bulk_endpoints(struct acc_dev *dev,
ep->driver_data = dev; /* claim the endpoint */
dev->ep_out = ep;
- ep = usb_ep_autoconfig(cdev->gadget, out_desc);
- if (!ep) {
- DBG(cdev, "usb_ep_autoconfig for ep_out failed\n");
- return -ENODEV;
- }
- DBG(cdev, "usb_ep_autoconfig for ep_out got %s\n", ep->name);
- ep->driver_data = dev; /* claim the endpoint */
- dev->ep_out = ep;
-
/* now allocate requests for our endpoints */
for (i = 0; i < TX_REQ_MAX; i++) {
req = acc_request_new(dev->ep_in, BULK_BUFFER_SIZE);
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 19d6a997ee6c..052b6dbc4471 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -3124,6 +3124,7 @@ static int _ffs_func_bind(struct usb_configuration *c,
const int super = func->ffs->ss_descs_count;
int fs_len, hs_len, ss_len, ret, i;
+ struct ffs_ep *eps_ptr;
/* Make it a single chunk, less management later on */
vla_group(d);
@@ -3175,12 +3176,9 @@ static int _ffs_func_bind(struct usb_configuration *c,
ffs->raw_descs_length);
memset(vla_ptr(vlabuf, d, inums), 0xff, d_inums__sz);
- for (ret = ffs->eps_count; ret; --ret) {
- struct ffs_ep *ptr;
-
- ptr = vla_ptr(vlabuf, d, eps);
- ptr[ret].num = -1;
- }
+ eps_ptr = vla_ptr(vlabuf, d, eps);
+ for (i = 0; i < ffs->eps_count; i++)
+ eps_ptr[i].num = -1;
/* Save pointers
* d_eps == vlabuf, func->eps used to kfree vlabuf later
diff --git a/drivers/usb/gadget/function/f_gsi.c b/drivers/usb/gadget/function/f_gsi.c
index 8088593fad1a..47e077d180ec 100644
--- a/drivers/usb/gadget/function/f_gsi.c
+++ b/drivers/usb/gadget/function/f_gsi.c
@@ -2253,7 +2253,7 @@ skip_string_id_alloc:
if (!ep)
goto fail;
gsi->d_port.in_ep = ep;
- msm_ep_config(gsi->d_port.in_ep, NULL, GFP_KERNEL);
+ msm_ep_config(gsi->d_port.in_ep, NULL);
ep->driver_data = cdev; /* claim */
}
@@ -2263,7 +2263,7 @@ skip_string_id_alloc:
if (!ep)
goto fail;
gsi->d_port.out_ep = ep;
- msm_ep_config(gsi->d_port.out_ep, NULL, GFP_KERNEL);
+ msm_ep_config(gsi->d_port.out_ep, NULL);
ep->driver_data = cdev; /* claim */
}
diff --git a/drivers/usb/gadget/function/f_qc_rndis.c b/drivers/usb/gadget/function/f_qc_rndis.c
index eb306529981f..ede1c8dd51a6 100644
--- a/drivers/usb/gadget/function/f_qc_rndis.c
+++ b/drivers/usb/gadget/function/f_qc_rndis.c
@@ -472,16 +472,26 @@ static void rndis_qc_response_available(void *_rndis)
static void rndis_qc_response_complete(struct usb_ep *ep,
struct usb_request *req)
{
- struct f_rndis_qc *rndis = req->context;
+ struct f_rndis_qc *rndis;
int status = req->status;
struct usb_composite_dev *cdev;
+ struct usb_ep *notify_ep;
+
+ spin_lock(&rndis_lock);
+ rndis = _rndis_qc;
+ if (!rndis || !rndis->notify || !rndis->notify->driver_data) {
+ spin_unlock(&rndis_lock);
+ return;
+ }
if (!rndis->func.config || !rndis->func.config->cdev) {
pr_err("%s(): cdev or config is NULL.\n", __func__);
+ spin_unlock(&rndis_lock);
return;
}
cdev = rndis->func.config->cdev;
+
/* after TX:
* - USB_CDC_GET_ENCAPSULATED_RESPONSE (ep0/control)
* - RNDIS_RESPONSE_AVAILABLE (status/irq)
@@ -491,7 +501,7 @@ static void rndis_qc_response_complete(struct usb_ep *ep,
case -ESHUTDOWN:
/* connection gone */
atomic_set(&rndis->notify_count, 0);
- break;
+ goto out;
default:
pr_info("RNDIS %s response error %d, %d/%d\n",
ep->name, status,
@@ -499,30 +509,47 @@ static void rndis_qc_response_complete(struct usb_ep *ep,
/* FALLTHROUGH */
case 0:
if (ep != rndis->notify)
- break;
+ goto out;
/* handle multiple pending RNDIS_RESPONSE_AVAILABLE
* notifications by resending until we're done
*/
if (atomic_dec_and_test(&rndis->notify_count))
- break;
- status = usb_ep_queue(rndis->notify, req, GFP_ATOMIC);
+ goto out;
+ notify_ep = rndis->notify;
+ spin_unlock(&rndis_lock);
+ status = usb_ep_queue(notify_ep, req, GFP_ATOMIC);
if (status) {
- atomic_dec(&rndis->notify_count);
+ spin_lock(&rndis_lock);
+ if (!_rndis_qc)
+ goto out;
+ atomic_dec(&_rndis_qc->notify_count);
DBG(cdev, "notify/1 --> %d\n", status);
+ spin_unlock(&rndis_lock);
}
- break;
}
+
+ return;
+
+out:
+ spin_unlock(&rndis_lock);
}
static void rndis_qc_command_complete(struct usb_ep *ep,
struct usb_request *req)
{
- struct f_rndis_qc *rndis = req->context;
+ struct f_rndis_qc *rndis;
int status;
rndis_init_msg_type *buf;
u32 ul_max_xfer_size, dl_max_xfer_size;
+ spin_lock(&rndis_lock);
+ rndis = _rndis_qc;
+ if (!rndis || !rndis->notify || !rndis->notify->driver_data) {
+ spin_unlock(&rndis_lock);
+ return;
+ }
+
/* received RNDIS command from USB_CDC_SEND_ENCAPSULATED_COMMAND */
status = rndis_msg_parser(rndis->params, (u8 *) req->buf);
if (status < 0)
@@ -551,6 +578,7 @@ static void rndis_qc_command_complete(struct usb_ep *ep,
rndis_get_dl_max_xfer_size(rndis->params);
ipa_data_set_dl_max_xfer_size(dl_max_xfer_size);
}
+ spin_unlock(&rndis_lock);
}
static int
@@ -749,13 +777,16 @@ static void rndis_qc_disable(struct usb_function *f)
{
struct f_rndis_qc *rndis = func_to_rndis_qc(f);
struct usb_composite_dev *cdev = f->config->cdev;
+ unsigned long flags;
if (!rndis->notify->driver_data)
return;
DBG(cdev, "rndis deactivated\n");
+ spin_lock_irqsave(&rndis_lock, flags);
rndis_uninit(rndis->params);
+ spin_unlock_irqrestore(&rndis_lock, flags);
ipa_data_disconnect(&rndis->bam_port, USB_IPA_FUNC_RNDIS);
msm_ep_unconfig(rndis->bam_port.out);
@@ -1092,18 +1123,14 @@ rndis_qc_unbind(struct usb_configuration *c, struct usb_function *f)
void rndis_ipa_reset_trigger(void)
{
struct f_rndis_qc *rndis;
- unsigned long flags;
- spin_lock_irqsave(&rndis_lock, flags);
rndis = _rndis_qc;
if (!rndis) {
pr_err("%s: No RNDIS instance", __func__);
- spin_unlock_irqrestore(&rndis_lock, flags);
return;
}
rndis->net_ready_trigger = false;
- spin_unlock_irqrestore(&rndis_lock, flags);
}
/*
diff --git a/drivers/usb/gadget/function/f_qdss.c b/drivers/usb/gadget/function/f_qdss.c
index 8e04d9451b67..16bd7d890d3e 100644
--- a/drivers/usb/gadget/function/f_qdss.c
+++ b/drivers/usb/gadget/function/f_qdss.c
@@ -844,32 +844,37 @@ EXPORT_SYMBOL(usb_qdss_open);
void usb_qdss_close(struct usb_qdss_ch *ch)
{
struct f_qdss *qdss = ch->priv_usb;
- struct usb_gadget *gadget = qdss->gadget;
+ struct usb_gadget *gadget;
unsigned long flags;
int status;
pr_debug("usb_qdss_close\n");
spin_lock_irqsave(&qdss_lock, flags);
+ if (!qdss || !qdss->usb_connected) {
+ ch->app_conn = 0;
+ spin_unlock_irqrestore(&qdss_lock, flags);
+ return;
+ }
+
usb_ep_dequeue(qdss->port.data, qdss->endless_req);
usb_ep_free_request(qdss->port.data, qdss->endless_req);
qdss->endless_req = NULL;
+ gadget = qdss->gadget;
ch->app_conn = 0;
spin_unlock_irqrestore(&qdss_lock, flags);
- if (qdss->usb_connected) {
- status = uninit_data(qdss->port.data);
- if (status)
- pr_err("%s: uninit_data error\n", __func__);
+ status = uninit_data(qdss->port.data);
+ if (status)
+ pr_err("%s: uninit_data error\n", __func__);
- status = set_qdss_data_connection(
+ status = set_qdss_data_connection(
gadget,
qdss->port.data,
qdss->port.data->address,
0);
- if (status)
- pr_err("%s:qdss_disconnect error\n", __func__);
- }
+ if (status)
+ pr_err("%s:qdss_disconnect error\n", __func__);
usb_gadget_restart(gadget);
}
EXPORT_SYMBOL(usb_qdss_close);
diff --git a/drivers/usb/gadget/function/f_rmnet.c b/drivers/usb/gadget/function/f_rmnet.c
index 0fd7e213ef99..3458f42ee06e 100644
--- a/drivers/usb/gadget/function/f_rmnet.c
+++ b/drivers/usb/gadget/function/f_rmnet.c
@@ -26,20 +26,18 @@
#define RMNET_NOTIFY_INTERVAL 5
#define RMNET_MAX_NOTIFY_SIZE sizeof(struct usb_cdc_notification)
-
#define ACM_CTRL_DTR (1 << 0)
-/* TODO: use separate structures for data and
- * control paths
- */
struct f_rmnet {
struct usb_function func;
+ enum qti_port_type qti_port_type;
+ enum ipa_func_type func_type;
struct grmnet port;
int ifc_id;
atomic_t online;
atomic_t ctrl_online;
struct usb_composite_dev *cdev;
-
+ struct gadget_ipa_port ipa_port;
spinlock_t lock;
/* usb eps*/
@@ -47,11 +45,9 @@ struct f_rmnet {
struct usb_request *notify_req;
/* control info */
- struct gadget_ipa_port ipa_port;
struct list_head cpkt_resp_q;
unsigned long notify_count;
- unsigned long cpkts_len;
-} *rmnet_port;
+};
static struct usb_interface_descriptor rmnet_interface_desc = {
.bLength = USB_DT_INTERFACE_SIZE,
@@ -213,6 +209,70 @@ static struct usb_gadget_strings *rmnet_strings[] = {
NULL,
};
+static struct usb_interface_descriptor dpl_data_intf_desc = {
+ .bLength = sizeof(dpl_data_intf_desc),
+ .bDescriptorType = USB_DT_INTERFACE,
+ .bAlternateSetting = 0,
+ .bNumEndpoints = 1,
+ .bInterfaceClass = 0xff,
+ .bInterfaceSubClass = 0xff,
+ .bInterfaceProtocol = 0xff,
+};
+
+static struct usb_endpoint_descriptor dpl_hs_data_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor dpl_ss_data_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor dpl_data_ep_comp_desc = {
+ .bLength = sizeof(dpl_data_ep_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+ .bMaxBurst = 1,
+ .bmAttributes = 0,
+ .wBytesPerInterval = 0,
+};
+
+static struct usb_descriptor_header *dpl_hs_data_only_desc[] = {
+ (struct usb_descriptor_header *) &dpl_data_intf_desc,
+ (struct usb_descriptor_header *) &dpl_hs_data_desc,
+ NULL,
+};
+
+static struct usb_descriptor_header *dpl_ss_data_only_desc[] = {
+ (struct usb_descriptor_header *) &dpl_data_intf_desc,
+ (struct usb_descriptor_header *) &dpl_ss_data_desc,
+ (struct usb_descriptor_header *) &dpl_data_ep_comp_desc,
+ NULL,
+};
+
+/* string descriptors: */
+
+static struct usb_string dpl_string_defs[] = {
+ [0].s = "QDSS DATA",
+ {}, /* end of list */
+};
+
+static struct usb_gadget_strings dpl_string_table = {
+ .language = 0x0409,
+ .strings = dpl_string_defs,
+};
+
+static struct usb_gadget_strings *dpl_strings[] = {
+ &dpl_string_table,
+ NULL,
+};
+
static void frmnet_ctrl_response_available(struct f_rmnet *dev);
/* ------- misc functions --------------------*/
@@ -227,6 +287,24 @@ static inline struct f_rmnet *port_to_rmnet(struct grmnet *r)
return container_of(r, struct f_rmnet, port);
}
+int name_to_prot(struct f_rmnet *dev, const char *name)
+{
+ if (!name)
+ goto error;
+
+ if (!strncasecmp("rmnet", name, MAX_INST_NAME_LEN)) {
+ dev->qti_port_type = QTI_PORT_RMNET;
+ dev->func_type = USB_IPA_FUNC_RMNET;
+ } else if (!strncasecmp("dpl", name, MAX_INST_NAME_LEN)) {
+ dev->qti_port_type = QTI_PORT_DPL;
+ dev->func_type = USB_IPA_FUNC_DPL;
+ }
+ return 0;
+
+error:
+ return -EINVAL;
+}
+
static struct usb_request *
frmnet_alloc_req(struct usb_ep *ep, unsigned len, gfp_t flags)
{
@@ -279,51 +357,57 @@ static void rmnet_free_ctrl_pkt(struct rmnet_ctrl_pkt *pkt)
/* -------------------------------------------*/
-static int gport_rmnet_connect(struct f_rmnet *dev, unsigned intf)
+static int gport_rmnet_connect(struct f_rmnet *dev)
{
int ret;
int src_connection_idx = 0, dst_connection_idx = 0;
struct usb_gadget *gadget = dev->cdev->gadget;
enum usb_ctrl usb_bam_type;
+ int bam_pipe_num = (dev->qti_port_type == QTI_PORT_DPL) ? 1 : 0;
- ret = gqti_ctrl_connect(&dev->port, QTI_PORT_RMNET, dev->ifc_id);
+ ret = gqti_ctrl_connect(&dev->port, dev->qti_port_type, dev->ifc_id);
if (ret) {
pr_err("%s: gqti_ctrl_connect failed: err:%d\n",
__func__, ret);
return ret;
}
-
+ if (dev->qti_port_type == QTI_PORT_DPL)
+ dev->port.send_encap_cmd(QTI_PORT_DPL, NULL, 0);
dev->ipa_port.cdev = dev->cdev;
- ipa_data_port_select(USB_IPA_FUNC_RMNET);
+ ipa_data_port_select(dev->func_type);
usb_bam_type = usb_bam_get_bam_type(gadget->name);
- src_connection_idx = usb_bam_get_connection_idx(usb_bam_type,
- IPA_P_BAM, USB_TO_PEER_PERIPHERAL, USB_BAM_DEVICE,
- QTI_PORT_RMNET);
- dst_connection_idx = usb_bam_get_connection_idx(usb_bam_type,
- IPA_P_BAM, PEER_PERIPHERAL_TO_USB, USB_BAM_DEVICE,
- QTI_PORT_RMNET);
+
+ if (dev->ipa_port.in) {
+ dst_connection_idx = usb_bam_get_connection_idx(usb_bam_type,
+ IPA_P_BAM, PEER_PERIPHERAL_TO_USB,
+ USB_BAM_DEVICE, bam_pipe_num);
+ }
+ if (dev->ipa_port.out) {
+ src_connection_idx = usb_bam_get_connection_idx(usb_bam_type,
+ IPA_P_BAM, USB_TO_PEER_PERIPHERAL,
+ USB_BAM_DEVICE, bam_pipe_num);
+ }
if (dst_connection_idx < 0 || src_connection_idx < 0) {
pr_err("%s: usb_bam_get_connection_idx failed\n",
__func__);
- gqti_ctrl_disconnect(&dev->port, QTI_PORT_RMNET);
+ gqti_ctrl_disconnect(&dev->port, dev->qti_port_type);
return -EINVAL;
}
- ret = ipa_data_connect(&dev->ipa_port, USB_IPA_FUNC_RMNET,
+ ret = ipa_data_connect(&dev->ipa_port, dev->func_type,
src_connection_idx, dst_connection_idx);
if (ret) {
pr_err("%s: ipa_data_connect failed: err:%d\n",
__func__, ret);
- gqti_ctrl_disconnect(&dev->port, QTI_PORT_RMNET);
+ gqti_ctrl_disconnect(&dev->port, dev->qti_port_type);
return ret;
}
-
return 0;
}
static int gport_rmnet_disconnect(struct f_rmnet *dev)
{
- gqti_ctrl_disconnect(&dev->port, QTI_PORT_RMNET);
- ipa_data_disconnect(&dev->ipa_port, USB_IPA_FUNC_RMNET);
+ gqti_ctrl_disconnect(&dev->port, dev->qti_port_type);
+ ipa_data_disconnect(&dev->ipa_port, dev->func_type);
return 0;
}
@@ -333,24 +417,25 @@ static void frmnet_free(struct usb_function *f)
opts = container_of(f->fi, struct f_rmnet_opts, func_inst);
opts->refcnt--;
- kfree(rmnet_port);
- rmnet_port = NULL;
}
static void frmnet_unbind(struct usb_configuration *c, struct usb_function *f)
{
struct f_rmnet *dev = func_to_rmnet(f);
+ struct usb_gadget *gadget = c->cdev->gadget;
- pr_debug("%s: start unbinding\n", __func__);
- if (gadget_is_superspeed(c->cdev->gadget))
+ pr_debug("%s: start unbinding\nclear_desc\n", __func__);
+ if (gadget_is_superspeed(gadget) && f->ss_descriptors)
usb_free_descriptors(f->ss_descriptors);
- if (gadget_is_dualspeed(c->cdev->gadget))
+
+ if (gadget_is_dualspeed(gadget) && f->hs_descriptors)
usb_free_descriptors(f->hs_descriptors);
- usb_free_descriptors(f->fs_descriptors);
- frmnet_free_req(dev->notify, dev->notify_req);
+ if (f->fs_descriptors)
+ usb_free_descriptors(f->fs_descriptors);
- kfree(f->name);
+ if (dev->notify_req)
+ frmnet_free_req(dev->notify, dev->notify_req);
}
static void frmnet_purge_responses(struct f_rmnet *dev)
@@ -384,11 +469,11 @@ static void frmnet_suspend(struct usb_function *f)
pr_debug("%s: dev: %p remote_wakeup: %d\n",
__func__, dev, remote_wakeup_allowed);
- usb_ep_fifo_flush(dev->notify);
- frmnet_purge_responses(dev);
-
- ipa_data_suspend(&dev->ipa_port, USB_IPA_FUNC_RMNET,
- remote_wakeup_allowed);
+ if (dev->notify) {
+ usb_ep_fifo_flush(dev->notify);
+ frmnet_purge_responses(dev);
+ }
+ ipa_data_suspend(&dev->ipa_port, dev->func_type, remote_wakeup_allowed);
}
static void frmnet_resume(struct usb_function *f)
@@ -404,8 +489,7 @@ static void frmnet_resume(struct usb_function *f)
pr_debug("%s: dev: %p remote_wakeup: %d\n",
__func__, dev, remote_wakeup_allowed);
- ipa_data_resume(&dev->ipa_port, USB_IPA_FUNC_RMNET,
- remote_wakeup_allowed);
+ ipa_data_resume(&dev->ipa_port, dev->func_type, remote_wakeup_allowed);
}
static void frmnet_disable(struct usb_function *f)
@@ -413,15 +497,13 @@ static void frmnet_disable(struct usb_function *f)
struct f_rmnet *dev = func_to_rmnet(f);
pr_debug("%s: Disabling\n", __func__);
- usb_ep_disable(dev->notify);
- dev->notify->driver_data = NULL;
-
atomic_set(&dev->online, 0);
+ if (dev->notify) {
+ usb_ep_disable(dev->notify);
+ dev->notify->driver_data = NULL;
+ frmnet_purge_responses(dev);
+ }
- frmnet_purge_responses(dev);
-
- msm_ep_unconfig(dev->ipa_port.out);
- msm_ep_unconfig(dev->ipa_port.in);
gport_rmnet_disconnect(dev);
}
@@ -430,64 +512,78 @@ frmnet_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
{
struct f_rmnet *dev = func_to_rmnet(f);
struct usb_composite_dev *cdev = f->config->cdev;
- int ret;
- struct list_head *cpkt;
+ int ret = 0;
pr_debug("%s: dev: %p\n", __func__, dev);
dev->cdev = cdev;
- if (dev->notify->driver_data) {
- pr_debug("%s: reset port\n", __func__);
- usb_ep_disable(dev->notify);
- }
+ if (dev->notify) {
+ if (dev->notify->driver_data) {
+ pr_debug("%s: reset port\n", __func__);
+ usb_ep_disable(dev->notify);
+ }
- ret = config_ep_by_speed(cdev->gadget, f, dev->notify);
- if (ret) {
- dev->notify->desc = NULL;
- ERROR(cdev, "config_ep_by_speed failes for ep %s, result %d\n",
+ ret = config_ep_by_speed(cdev->gadget, f, dev->notify);
+ if (ret) {
+ dev->notify->desc = NULL;
+ ERROR(cdev,
+ "config_ep_by_speed failed for ep %s, result %d\n",
dev->notify->name, ret);
- return ret;
- }
- ret = usb_ep_enable(dev->notify);
+ return ret;
+ }
- if (ret) {
- pr_err("%s: usb ep#%s enable failed, err#%d\n",
+ ret = usb_ep_enable(dev->notify);
+ if (ret) {
+ pr_err("%s: usb ep#%s enable failed, err#%d\n",
__func__, dev->notify->name, ret);
- dev->notify->desc = NULL;
- return ret;
- }
- dev->notify->driver_data = dev;
-
- if (!dev->ipa_port.in->desc || !dev->ipa_port.out->desc) {
- if (config_ep_by_speed(cdev->gadget, f, dev->ipa_port.in) ||
- config_ep_by_speed(cdev->gadget, f, dev->ipa_port.out)) {
- pr_err("%s(): config_ep_by_speed failed.\n", __func__);
- ret = -EINVAL;
- goto err_disable_ep;
+ dev->notify->desc = NULL;
+ return ret;
}
- dev->ipa_port.cdev = dev->cdev;
+
+ dev->notify->driver_data = dev;
+ }
+
+ if (dev->ipa_port.in && !dev->ipa_port.in->desc
+ && config_ep_by_speed(cdev->gadget, f, dev->ipa_port.in)) {
+ pr_err("%s(): config_ep_by_speed failed.\n",
+ __func__);
+ dev->ipa_port.in->desc = NULL;
+ ret = -EINVAL;
+ goto err_disable_ep;
+ }
+
+ if (dev->ipa_port.out && !dev->ipa_port.out->desc
+ && config_ep_by_speed(cdev->gadget, f, dev->ipa_port.out)) {
+ pr_err("%s(): config_ep_by_speed failed.\n",
+ __func__);
+ dev->ipa_port.out->desc = NULL;
+ ret = -EINVAL;
+ goto err_disable_ep;
}
- ret = gport_rmnet_connect(dev, intf);
+ ret = gport_rmnet_connect(dev);
if (ret) {
pr_err("%s(): gport_rmnet_connect fail with err:%d\n",
- __func__, ret);
+ __func__, ret);
goto err_disable_ep;
}
atomic_set(&dev->online, 1);
-
/*
- * In case notifications were aborted, but there are pending control
- * packets in the response queue, re-add the notifications.
- */
- list_for_each(cpkt, &dev->cpkt_resp_q)
- frmnet_ctrl_response_available(dev);
+ * In case notifications were aborted, but there are
+ * pending control packets in the response queue,
+ * re-add the notifications.
+ */
+ if (dev->qti_port_type == QTI_PORT_RMNET) {
+ struct list_head *cpkt;
+
+ list_for_each(cpkt, &dev->cpkt_resp_q)
+ frmnet_ctrl_response_available(dev);
+ }
return ret;
err_disable_ep:
- dev->ipa_port.in->desc = NULL;
- dev->ipa_port.out->desc = NULL;
- usb_ep_disable(dev->notify);
+ if (dev->notify && dev->notify->driver_data)
+ usb_ep_disable(dev->notify);
return ret;
}
@@ -813,108 +909,119 @@ invalid:
return ret;
}
-static int frmnet_bind(struct usb_configuration *c, struct usb_function *f)
+static int ipa_update_function_bind_params(struct f_rmnet *dev,
+ struct usb_composite_dev *cdev, struct ipa_function_bind_info *info)
{
- struct f_rmnet *dev = func_to_rmnet(f);
- struct usb_ep *ep;
- struct usb_composite_dev *cdev = c->cdev;
- int ret = -ENODEV;
-
- if (rmnet_string_defs[0].id == 0) {
- ret = usb_string_id(c->cdev);
- if (ret < 0) {
- pr_err("%s: failed to get string id, err:%d\n",
- __func__, ret);
- return ret;
- }
- rmnet_string_defs[0].id = ret;
+ struct usb_ep *ep;
+ struct usb_function *f = &dev->func;
+ int status;
+
+ /* maybe allocate device-global string IDs */
+ if (info->string_defs[0].id != 0)
+ goto skip_string_id_alloc;
+
+ if (info->data_str_idx >= 0 && info->data_desc) {
+ /* data interface label */
+ status = usb_string_id(cdev);
+ if (status < 0)
+ return status;
+ info->string_defs[info->data_str_idx].id = status;
+ info->data_desc->iInterface = status;
}
- pr_debug("%s: start binding\n", __func__);
- dev->ifc_id = usb_interface_id(c, f);
- if (dev->ifc_id < 0) {
- pr_err("%s: unable to allocate ifc id, err:%d\n",
- __func__, dev->ifc_id);
- return dev->ifc_id;
- }
- rmnet_interface_desc.bInterfaceNumber = dev->ifc_id;
+skip_string_id_alloc:
+ if (info->data_desc)
+ info->data_desc->bInterfaceNumber = dev->ifc_id;
- ep = usb_ep_autoconfig(cdev->gadget, &rmnet_fs_in_desc);
- if (!ep) {
- pr_err("%s: usb epin autoconfig failed\n", __func__);
- return -ENODEV;
- }
- dev->ipa_port.in = ep;
- ep->driver_data = cdev;
-
- ep = usb_ep_autoconfig(cdev->gadget, &rmnet_fs_out_desc);
- if (!ep) {
- pr_err("%s: usb epout autoconfig failed\n", __func__);
- ret = -ENODEV;
- goto ep_auto_out_fail;
+ if (info->fs_in_desc) {
+ ep = usb_ep_autoconfig(cdev->gadget, info->fs_in_desc);
+ if (!ep) {
+ pr_err("%s: usb epin autoconfig failed\n",
+ __func__);
+ return -ENODEV;
+ }
+ dev->ipa_port.in = ep;
+ ep->driver_data = cdev;
}
- dev->ipa_port.out = ep;
- ep->driver_data = cdev;
-
- ep = usb_ep_autoconfig(cdev->gadget, &rmnet_fs_notify_desc);
- if (!ep) {
- pr_err("%s: usb epnotify autoconfig failed\n", __func__);
- ret = -ENODEV;
- goto ep_auto_notify_fail;
+
+ if (info->fs_out_desc) {
+ ep = usb_ep_autoconfig(cdev->gadget, info->fs_out_desc);
+ if (!ep) {
+ pr_err("%s: usb epout autoconfig failed\n",
+ __func__);
+ status = -ENODEV;
+ goto ep_auto_out_fail;
+ }
+ dev->ipa_port.out = ep;
+ ep->driver_data = cdev;
}
- dev->notify = ep;
- ep->driver_data = cdev;
- dev->notify_req = frmnet_alloc_req(ep,
+ if (info->fs_notify_desc) {
+ ep = usb_ep_autoconfig(cdev->gadget, info->fs_notify_desc);
+ if (!ep) {
+ pr_err("%s: usb epnotify autoconfig failed\n",
+ __func__);
+ status = -ENODEV;
+ goto ep_auto_notify_fail;
+ }
+ dev->notify = ep;
+ ep->driver_data = cdev;
+ dev->notify_req = frmnet_alloc_req(ep,
sizeof(struct usb_cdc_notification),
GFP_KERNEL);
- if (IS_ERR(dev->notify_req)) {
- pr_err("%s: unable to allocate memory for notify req\n",
+ if (IS_ERR(dev->notify_req)) {
+ pr_err("%s: unable to allocate memory for notify req\n",
__func__);
- ret = -ENOMEM;
- goto ep_notify_alloc_fail;
- }
-
- dev->notify_req->complete = frmnet_notify_complete;
- dev->notify_req->context = dev;
+ status = -ENOMEM;
+ goto ep_notify_alloc_fail;
+ }
- ret = -ENOMEM;
- f->fs_descriptors = usb_copy_descriptors(rmnet_fs_function);
+ dev->notify_req->complete = frmnet_notify_complete;
+ dev->notify_req->context = dev;
+ }
+ status = -ENOMEM;
+ f->fs_descriptors = usb_copy_descriptors(info->fs_desc_hdr);
if (!f->fs_descriptors) {
- pr_err("%s: no descriptors,usb_copy descriptors(fs)failed\n",
+ pr_err("%s: no descriptors, usb_copy descriptors(fs)failed\n",
__func__);
goto fail;
}
+
if (gadget_is_dualspeed(cdev->gadget)) {
- rmnet_hs_in_desc.bEndpointAddress =
- rmnet_fs_in_desc.bEndpointAddress;
- rmnet_hs_out_desc.bEndpointAddress =
- rmnet_fs_out_desc.bEndpointAddress;
- rmnet_hs_notify_desc.bEndpointAddress =
- rmnet_fs_notify_desc.bEndpointAddress;
+ if (info->fs_in_desc && info->hs_in_desc)
+ info->hs_in_desc->bEndpointAddress =
+ info->fs_in_desc->bEndpointAddress;
+ if (info->fs_out_desc && info->hs_out_desc)
+ info->hs_out_desc->bEndpointAddress =
+ info->fs_out_desc->bEndpointAddress;
+ if (info->fs_notify_desc && info->hs_notify_desc)
+ info->hs_notify_desc->bEndpointAddress =
+ info->fs_notify_desc->bEndpointAddress;
/* copy descriptors, and track endpoint copies */
- f->hs_descriptors = usb_copy_descriptors(rmnet_hs_function);
-
+ f->hs_descriptors = usb_copy_descriptors(info->hs_desc_hdr);
if (!f->hs_descriptors) {
- pr_err("%s: no hs_descriptors,usb_copy descriptors(hs)failed\n",
- __func__);
+ pr_err("%s: no hs_descriptors, usb_copy descriptors(hs)failed\n",
+ __func__);
goto fail;
}
}
if (gadget_is_superspeed(cdev->gadget)) {
- rmnet_ss_in_desc.bEndpointAddress =
- rmnet_fs_in_desc.bEndpointAddress;
- rmnet_ss_out_desc.bEndpointAddress =
- rmnet_fs_out_desc.bEndpointAddress;
- rmnet_ss_notify_desc.bEndpointAddress =
- rmnet_fs_notify_desc.bEndpointAddress;
+ if (info->fs_in_desc && info->ss_in_desc)
+ info->ss_in_desc->bEndpointAddress =
+ info->fs_in_desc->bEndpointAddress;
- /* copy descriptors, and track endpoint copies */
- f->ss_descriptors = usb_copy_descriptors(rmnet_ss_function);
+ if (info->fs_out_desc && info->ss_out_desc)
+ info->ss_out_desc->bEndpointAddress =
+ info->fs_out_desc->bEndpointAddress;
+ if (info->fs_notify_desc && info->ss_notify_desc)
+ info->ss_notify_desc->bEndpointAddress =
+ info->fs_notify_desc->bEndpointAddress;
+ /* copy descriptors, and track endpoint copies */
+ f->ss_descriptors = usb_copy_descriptors(info->ss_desc_hdr);
if (!f->ss_descriptors) {
pr_err("%s: no ss_descriptors,usb_copy descriptors(ss)failed\n",
__func__);
@@ -922,57 +1029,95 @@ static int frmnet_bind(struct usb_configuration *c, struct usb_function *f)
}
}
- pr_debug("%s: RmNet %s Speed, IN:%s OUT:%s\n",
- __func__, gadget_is_dualspeed(cdev->gadget) ? "dual" : "full",
- dev->ipa_port.in->name, dev->ipa_port.out->name);
-
return 0;
fail:
- if (f->ss_descriptors)
+ if (gadget_is_superspeed(cdev->gadget) && f->ss_descriptors)
usb_free_descriptors(f->ss_descriptors);
- if (f->hs_descriptors)
+ if (gadget_is_dualspeed(cdev->gadget) && f->hs_descriptors)
usb_free_descriptors(f->hs_descriptors);
if (f->fs_descriptors)
usb_free_descriptors(f->fs_descriptors);
if (dev->notify_req)
frmnet_free_req(dev->notify, dev->notify_req);
ep_notify_alloc_fail:
- dev->notify->driver_data = NULL;
- dev->notify = NULL;
+ dev->notify->driver_data = NULL;
+ dev->notify = NULL;
ep_auto_notify_fail:
- dev->ipa_port.out->driver_data = NULL;
- dev->ipa_port.out = NULL;
+ dev->ipa_port.out->driver_data = NULL;
+ dev->ipa_port.out = NULL;
ep_auto_out_fail:
- dev->ipa_port.in->driver_data = NULL;
- dev->ipa_port.in = NULL;
+ dev->ipa_port.in->driver_data = NULL;
+ dev->ipa_port.in = NULL;
+
+ return status;
+}
+
+static int frmnet_bind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct f_rmnet *dev = func_to_rmnet(f);
+ struct usb_composite_dev *cdev = c->cdev;
+ int ret = -ENODEV;
+ struct ipa_function_bind_info info = {0};
+
+ pr_debug("%s: start binding\n", __func__);
+ dev->ifc_id = usb_interface_id(c, f);
+ if (dev->ifc_id < 0) {
+ pr_err("%s: unable to allocate ifc id, err:%d\n",
+ __func__, dev->ifc_id);
+ return dev->ifc_id;
+ }
+
+ info.data_str_idx = 0;
+ if (dev->qti_port_type == QTI_PORT_RMNET) {
+ info.string_defs = rmnet_string_defs;
+ info.data_desc = &rmnet_interface_desc;
+ info.fs_in_desc = &rmnet_fs_in_desc;
+ info.fs_out_desc = &rmnet_fs_out_desc;
+ info.fs_notify_desc = &rmnet_fs_notify_desc;
+ info.hs_in_desc = &rmnet_hs_in_desc;
+ info.hs_out_desc = &rmnet_hs_out_desc;
+ info.hs_notify_desc = &rmnet_hs_notify_desc;
+ info.ss_in_desc = &rmnet_ss_in_desc;
+ info.ss_out_desc = &rmnet_ss_out_desc;
+ info.ss_notify_desc = &rmnet_ss_notify_desc;
+ info.fs_desc_hdr = rmnet_fs_function;
+ info.hs_desc_hdr = rmnet_hs_function;
+ info.ss_desc_hdr = rmnet_ss_function;
+ } else {
+ info.string_defs = dpl_string_defs;
+ info.data_desc = &dpl_data_intf_desc;
+ info.fs_in_desc = &dpl_hs_data_desc;
+ info.hs_in_desc = &dpl_hs_data_desc;
+ info.ss_in_desc = &dpl_ss_data_desc;
+ info.fs_desc_hdr = dpl_hs_data_only_desc;
+ info.hs_desc_hdr = dpl_hs_data_only_desc;
+ info.ss_desc_hdr = dpl_ss_data_only_desc;
+ }
+
+ ret = ipa_update_function_bind_params(dev, cdev, &info);
return ret;
}
static struct usb_function *frmnet_bind_config(struct usb_function_instance *fi)
{
- struct f_rmnet_opts *opts;
- int status;
+ struct f_rmnet_opts *opts;
struct f_rmnet *dev;
struct usb_function *f;
- unsigned long flags;
- /* allocate and initialize one new instance */
- status = -ENOMEM;
opts = container_of(fi, struct f_rmnet_opts, func_inst);
opts->refcnt++;
dev = opts->dev;
- spin_lock_irqsave(&dev->lock, flags);
f = &dev->func;
- f->name = kasprintf(GFP_ATOMIC, "rmnet%d", 0);
- spin_unlock_irqrestore(&dev->lock, flags);
- if (!f->name) {
- pr_err("%s: cannot allocate memory for name\n", __func__);
- return ERR_PTR(-ENOMEM);
+ if (dev->qti_port_type == QTI_PORT_RMNET) {
+ f->name = "rmnet";
+ f->strings = rmnet_strings;
+ } else {
+ f->name = "dpl";
+ f->strings = dpl_strings;
}
- f->strings = rmnet_strings;
f->bind = frmnet_bind;
f->unbind = frmnet_unbind;
f->disable = frmnet_disable;
@@ -1004,21 +1149,53 @@ static void rmnet_free_inst(struct usb_function_instance *f)
{
struct f_rmnet_opts *opts = container_of(f, struct f_rmnet_opts,
func_inst);
- ipa_data_free(USB_IPA_FUNC_RMNET);
+ ipa_data_free(opts->dev->func_type);
+ kfree(opts->dev);
kfree(opts);
}
static int rmnet_set_inst_name(struct usb_function_instance *fi,
const char *name)
{
- int name_len;
- int ret;
+ int name_len, ret = 0;
+ struct f_rmnet *dev;
+ struct f_rmnet_opts *opts = container_of(fi,
+ struct f_rmnet_opts, func_inst);
name_len = strlen(name) + 1;
if (name_len > MAX_INST_NAME_LEN)
return -ENAMETOOLONG;
- ret = ipa_data_setup(USB_IPA_FUNC_RMNET);
+ dev = kzalloc(sizeof(struct f_rmnet), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ spin_lock_init(&dev->lock);
+ /* Update qti->qti_port_type */
+ ret = name_to_prot(dev, name);
+ if (ret < 0) {
+ pr_err("%s: failed to find prot for %s instance\n",
+ __func__, name);
+ goto fail;
+ }
+
+ if (dev->qti_port_type >= QTI_NUM_PORTS ||
+ dev->func_type >= USB_IPA_NUM_FUNCS) {
+ pr_err("%s: invalid prot\n", __func__);
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ INIT_LIST_HEAD(&dev->cpkt_resp_q);
+ ret = ipa_data_setup(dev->func_type);
+ if (ret)
+ goto fail;
+
+ opts->dev = dev;
+ return 0;
+
+fail:
+ kfree(dev);
return ret;
}
@@ -1062,14 +1239,6 @@ static struct usb_function_instance *rmnet_alloc_inst(void)
static struct usb_function *rmnet_alloc(struct usb_function_instance *fi)
{
- struct f_rmnet_opts *opts = container_of(fi,
- struct f_rmnet_opts, func_inst);
- rmnet_port = kzalloc(sizeof(struct f_rmnet), GFP_KERNEL);
- if (!rmnet_port)
- return ERR_PTR(-ENOMEM);
- opts->dev = rmnet_port;
- spin_lock_init(&rmnet_port->lock);
- INIT_LIST_HEAD(&rmnet_port->cpkt_resp_q);
return frmnet_bind_config(fi);
}
diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
index 044ca79d3cb5..12628dd36e55 100644
--- a/drivers/usb/gadget/function/f_uac2.c
+++ b/drivers/usb/gadget/function/f_uac2.c
@@ -1291,6 +1291,7 @@ in_rq_cur(struct usb_function *fn, const struct usb_ctrlrequest *cr)
if (control_selector == UAC2_CS_CONTROL_SAM_FREQ) {
struct cntrl_cur_lay3 c;
+ memset(&c, 0, sizeof(struct cntrl_cur_lay3));
if (entity_id == USB_IN_CLK_ID)
c.dCUR = p_srate;
diff --git a/drivers/usb/gadget/function/u_bam.c b/drivers/usb/gadget/function/u_bam.c
index bc3dd69dcb00..bbb744b33c3a 100644
--- a/drivers/usb/gadget/function/u_bam.c
+++ b/drivers/usb/gadget/function/u_bam.c
@@ -1395,7 +1395,7 @@ static void gbam2bam_connect_work(struct work_struct *w)
d->src_pipe_idx;
d->rx_req->length = 32*1024;
d->rx_req->udc_priv = sps_params;
- msm_ep_config(port->port_usb->out, d->rx_req, GFP_ATOMIC);
+ msm_ep_config(port->port_usb->out, d->rx_req);
/* Configure for TX */
configure_data_fifo(d->usb_bam_type, d->dst_connection_idx,
@@ -1403,7 +1403,7 @@ static void gbam2bam_connect_work(struct work_struct *w)
sps_params = MSM_SPS_MODE | MSM_DISABLE_WB | d->dst_pipe_idx;
d->tx_req->length = 32*1024;
d->tx_req->udc_priv = sps_params;
- msm_ep_config(port->port_usb->in, d->tx_req, GFP_ATOMIC);
+ msm_ep_config(port->port_usb->in, d->tx_req);
} else {
/* Configure for RX */
diff --git a/drivers/usb/gadget/function/u_bam_data.c b/drivers/usb/gadget/function/u_bam_data.c
index 226af4bf1595..e479907e93ef 100644
--- a/drivers/usb/gadget/function/u_bam_data.c
+++ b/drivers/usb/gadget/function/u_bam_data.c
@@ -935,7 +935,7 @@ static void bam2bam_data_connect_work(struct work_struct *w)
| MSM_PRODUCER | d->src_pipe_idx;
d->rx_req->length = 32*1024;
d->rx_req->udc_priv = sps_params;
- msm_ep_config(port->port_usb->out, d->rx_req, GFP_ATOMIC);
+ msm_ep_config(port->port_usb->out, d->rx_req);
/* Configure TX */
configure_usb_data_fifo(d->usb_bam_type,
@@ -945,7 +945,7 @@ static void bam2bam_data_connect_work(struct work_struct *w)
| d->dst_pipe_idx;
d->tx_req->length = 32*1024;
d->tx_req->udc_priv = sps_params;
- msm_ep_config(port->port_usb->in, d->tx_req, GFP_ATOMIC);
+ msm_ep_config(port->port_usb->in, d->tx_req);
} else {
/* Configure RX */
diff --git a/drivers/usb/gadget/function/u_ctrl_qti.c b/drivers/usb/gadget/function/u_ctrl_qti.c
index c0650b0abf8c..287bfeb21ea3 100644
--- a/drivers/usb/gadget/function/u_ctrl_qti.c
+++ b/drivers/usb/gadget/function/u_ctrl_qti.c
@@ -204,7 +204,6 @@ int gqti_ctrl_connect(void *gr, enum qti_port_type qport, unsigned intf)
{
struct qti_ctrl_port *port;
struct grmnet *g_rmnet = NULL;
- struct gqdss *g_dpl = NULL;
unsigned long flags;
pr_debug("%s: port type:%d gadget:%p\n", __func__, qport, gr);
@@ -224,17 +223,13 @@ int gqti_ctrl_connect(void *gr, enum qti_port_type qport, unsigned intf)
port->ep_type = DATA_EP_TYPE_HSUSB;
port->intf = intf;
- if (gr && port->port_type == QTI_PORT_RMNET) {
+ if (gr) {
port->port_usb = gr;
g_rmnet = (struct grmnet *)gr;
g_rmnet->send_encap_cmd = gqti_ctrl_send_cpkt_tomodem;
g_rmnet->notify_modem = gqti_ctrl_notify_modem;
- } else if (gr && port->port_type == QTI_PORT_DPL) {
- port->port_usb = gr;
- g_dpl = (struct gqdss *)gr;
- g_dpl->send_encap_cmd = gqti_ctrl_send_cpkt_tomodem;
- g_dpl->notify_modem = gqti_ctrl_notify_modem;
- atomic_set(&port->line_state, 1);
+ if (port->port_type == QTI_PORT_DPL)
+ atomic_set(&port->line_state, 1);
} else {
spin_unlock_irqrestore(&port->lock, flags);
pr_err("%s(): Port is used without port type.\n", __func__);
@@ -263,7 +258,6 @@ void gqti_ctrl_disconnect(void *gr, enum qti_port_type qport)
unsigned long flags;
struct rmnet_ctrl_pkt *cpkt;
struct grmnet *g_rmnet = NULL;
- struct gqdss *g_dpl = NULL;
pr_debug("%s: gadget:%p\n", __func__, gr);
@@ -287,14 +281,10 @@ void gqti_ctrl_disconnect(void *gr, enum qti_port_type qport)
port->ipa_cons_idx = -1;
port->port_usb = NULL;
- if (gr && port->port_type == QTI_PORT_RMNET) {
+ if (gr) {
g_rmnet = (struct grmnet *)gr;
g_rmnet->send_encap_cmd = NULL;
g_rmnet->notify_modem = NULL;
- } else if (gr && port->port_type == QTI_PORT_DPL) {
- g_dpl = (struct gqdss *)gr;
- g_dpl->send_encap_cmd = NULL;
- g_dpl->notify_modem = NULL;
} else {
pr_err("%s(): unrecognized gadget type(%d).\n",
__func__, port->port_type);
diff --git a/drivers/usb/gadget/function/u_data_ipa.c b/drivers/usb/gadget/function/u_data_ipa.c
index 1850aa6ea19d..bbdeb2f7910d 100644
--- a/drivers/usb/gadget/function/u_data_ipa.c
+++ b/drivers/usb/gadget/function/u_data_ipa.c
@@ -457,7 +457,7 @@ static void ipa_data_connect_work(struct work_struct *w)
configure_fifo(port->usb_bam_type,
port->src_connection_idx,
port->port_usb->out);
- ret = msm_ep_config(gport->out, port->rx_req, GFP_ATOMIC);
+ ret = msm_ep_config(gport->out, port->rx_req);
if (ret) {
pr_err("msm_ep_config() failed for OUT EP\n");
usb_bam_free_fifos(port->usb_bam_type,
@@ -475,7 +475,7 @@ static void ipa_data_connect_work(struct work_struct *w)
port->tx_req->udc_priv = sps_params;
configure_fifo(port->usb_bam_type,
port->dst_connection_idx, gport->in);
- ret = msm_ep_config(gport->in, port->tx_req, GFP_ATOMIC);
+ ret = msm_ep_config(gport->in, port->tx_req);
if (ret) {
pr_err("msm_ep_config() failed for IN EP\n");
goto unconfig_msm_ep_out;
@@ -837,13 +837,16 @@ void ipa_data_suspend(struct gadget_ipa_port *gp, enum ipa_func_type func,
* the BAM disconnect API. This lets us restore this info when
* the USB bus is resumed.
*/
- gp->in_ep_desc_backup = gp->in->desc;
- gp->out_ep_desc_backup = gp->out->desc;
-
- pr_debug("in_ep_desc_backup = %p, out_ep_desc_backup = %p",
- gp->in_ep_desc_backup,
- gp->out_ep_desc_backup);
-
+ if (gp->in) {
+ gp->in_ep_desc_backup = gp->in->desc;
+ pr_debug("in_ep_desc_backup = %p\n",
+ gp->in_ep_desc_backup);
+ }
+ if (gp->out) {
+ gp->out_ep_desc_backup = gp->out->desc;
+ pr_debug("out_ep_desc_backup = %p\n",
+ gp->out_ep_desc_backup);
+ }
ipa_data_disconnect(gp, func);
return;
}
@@ -919,8 +922,8 @@ void ipa_data_resume(struct gadget_ipa_port *gp, enum ipa_func_type func,
struct ipa_data_ch_info *port;
unsigned long flags;
struct usb_gadget *gadget = NULL;
- u8 src_connection_idx;
- u8 dst_connection_idx;
+ u8 src_connection_idx = 0;
+ u8 dst_connection_idx = 0;
enum usb_ctrl usb_bam_type;
pr_debug("dev:%p port number:%d\n", gp, func);
@@ -944,20 +947,25 @@ void ipa_data_resume(struct gadget_ipa_port *gp, enum ipa_func_type func,
gadget = gp->cdev->gadget;
/* resume with remote wakeup disabled */
if (!remote_wakeup_enabled) {
- /* Restore endpoint descriptors info. */
- gp->in->desc = gp->in_ep_desc_backup;
- gp->out->desc = gp->out_ep_desc_backup;
-
- pr_debug("in_ep_desc_backup = %p, out_ep_desc_backup = %p",
- gp->in_ep_desc_backup,
- gp->out_ep_desc_backup);
+ int bam_pipe_num = (func == USB_IPA_FUNC_DPL) ? 1 : 0;
usb_bam_type = usb_bam_get_bam_type(gadget->name);
- src_connection_idx = usb_bam_get_connection_idx(usb_bam_type,
- IPA_P_BAM, USB_TO_PEER_PERIPHERAL, USB_BAM_DEVICE,
- 0);
- dst_connection_idx = usb_bam_get_connection_idx(usb_bam_type,
- IPA_P_BAM, PEER_PERIPHERAL_TO_USB, USB_BAM_DEVICE,
- 0);
+ /* Restore endpoint descriptors info. */
+ if (gp->in) {
+ gp->in->desc = gp->in_ep_desc_backup;
+ pr_debug("in_ep_desc_backup = %p\n",
+ gp->in_ep_desc_backup);
+ dst_connection_idx = usb_bam_get_connection_idx(
+ usb_bam_type, IPA_P_BAM, PEER_PERIPHERAL_TO_USB,
+ USB_BAM_DEVICE, bam_pipe_num);
+ }
+ if (gp->out) {
+ gp->out->desc = gp->out_ep_desc_backup;
+ pr_debug("out_ep_desc_backup = %p\n",
+ gp->out_ep_desc_backup);
+ src_connection_idx = usb_bam_get_connection_idx(
+ usb_bam_type, IPA_P_BAM, USB_TO_PEER_PERIPHERAL,
+ USB_BAM_DEVICE, bam_pipe_num);
+ }
ipa_data_connect(gp, func,
src_connection_idx, dst_connection_idx);
return;
diff --git a/drivers/usb/gadget/function/u_data_ipa.h b/drivers/usb/gadget/function/u_data_ipa.h
index 14411575af22..17dccbc4cf16 100644
--- a/drivers/usb/gadget/function/u_data_ipa.h
+++ b/drivers/usb/gadget/function/u_data_ipa.h
@@ -47,6 +47,25 @@ struct gadget_ipa_port {
};
+struct ipa_function_bind_info {
+ struct usb_string *string_defs;
+ int data_str_idx;
+ struct usb_interface_descriptor *data_desc;
+ struct usb_endpoint_descriptor *fs_in_desc;
+ struct usb_endpoint_descriptor *fs_out_desc;
+ struct usb_endpoint_descriptor *fs_notify_desc;
+ struct usb_endpoint_descriptor *hs_in_desc;
+ struct usb_endpoint_descriptor *hs_out_desc;
+ struct usb_endpoint_descriptor *hs_notify_desc;
+ struct usb_endpoint_descriptor *ss_in_desc;
+ struct usb_endpoint_descriptor *ss_out_desc;
+ struct usb_endpoint_descriptor *ss_notify_desc;
+
+ struct usb_descriptor_header **fs_desc_hdr;
+ struct usb_descriptor_header **hs_desc_hdr;
+ struct usb_descriptor_header **ss_desc_hdr;
+};
+
/* for configfs support */
#define MAX_INST_NAME_LEN 40
diff --git a/drivers/usb/gadget/function/u_qdss.c b/drivers/usb/gadget/function/u_qdss.c
index e26f0977b9b7..42a9cda68659 100644
--- a/drivers/usb/gadget/function/u_qdss.c
+++ b/drivers/usb/gadget/function/u_qdss.c
@@ -99,7 +99,7 @@ static int init_data(struct usb_ep *ep)
pr_debug("init_data\n");
- res = msm_ep_config(ep, qdss->endless_req, GFP_ATOMIC);
+ res = msm_ep_config(ep, qdss->endless_req);
if (res)
pr_err("msm_ep_config failed\n");
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
index 55386619a0f1..e57f48f9528f 100644
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -541,7 +541,7 @@ static ssize_t ep_aio(struct kiocb *iocb,
*/
spin_lock_irq(&epdata->dev->lock);
value = -ENODEV;
- if (unlikely(epdata->ep))
+ if (unlikely(epdata->ep == NULL))
goto fail;
req = usb_ep_alloc_request(epdata->ep, GFP_ATOMIC);
diff --git a/drivers/usb/gadget/udc/fsl_qe_udc.c b/drivers/usb/gadget/udc/fsl_qe_udc.c
index 5fb6f8b4f0b4..c73689b72f95 100644
--- a/drivers/usb/gadget/udc/fsl_qe_udc.c
+++ b/drivers/usb/gadget/udc/fsl_qe_udc.c
@@ -2053,7 +2053,7 @@ static void setup_received_handle(struct qe_udc *udc,
struct qe_ep *ep;
if (wValue != 0 || wLength != 0
- || pipe > USB_MAX_ENDPOINTS)
+ || pipe >= USB_MAX_ENDPOINTS)
break;
ep = &udc->eps[pipe];
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index 48c92bf78bd0..f7661d9750fd 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -332,11 +332,11 @@ static void ehci_turn_off_all_ports(struct ehci_hcd *ehci)
int port = HCS_N_PORTS(ehci->hcs_params);
while (port--) {
- ehci_writel(ehci, PORT_RWC_BITS,
- &ehci->regs->port_status[port]);
spin_unlock_irq(&ehci->lock);
ehci_port_power(ehci, port, false);
spin_lock_irq(&ehci->lock);
+ ehci_writel(ehci, PORT_RWC_BITS,
+ &ehci->regs->port_status[port]);
}
}
diff --git a/drivers/usb/host/ohci-q.c b/drivers/usb/host/ohci-q.c
index d029bbe9eb36..641fed609911 100644
--- a/drivers/usb/host/ohci-q.c
+++ b/drivers/usb/host/ohci-q.c
@@ -183,7 +183,6 @@ static int ed_schedule (struct ohci_hcd *ohci, struct ed *ed)
{
int branch;
- ed->state = ED_OPER;
ed->ed_prev = NULL;
ed->ed_next = NULL;
ed->hwNextED = 0;
@@ -259,6 +258,8 @@ static int ed_schedule (struct ohci_hcd *ohci, struct ed *ed)
/* the HC may not see the schedule updates yet, but if it does
* then they'll be properly ordered.
*/
+
+ ed->state = ED_OPER;
return 0;
}
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index a7b055bc279a..5ff3f39af38d 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -377,6 +377,9 @@ static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend)
ret = 0;
virt_dev = xhci->devs[slot_id];
+ if (!virt_dev)
+ return -ENODEV;
+
cmd = xhci_alloc_command(xhci, false, true, GFP_NOIO);
if (!cmd) {
xhci_dbg(xhci, "Couldn't allocate command structure.\n");
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 29dc6ab252b1..373e9d039457 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -1072,7 +1072,7 @@ static u32 xhci_find_real_port_number(struct xhci_hcd *xhci,
struct usb_device *top_dev;
struct usb_hcd *hcd;
- if (udev->speed == USB_SPEED_SUPER)
+ if (udev->speed >= USB_SPEED_SUPER)
hcd = xhci->shared_hcd;
else
hcd = xhci->main_hcd;
@@ -1107,6 +1107,7 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
/* 3) Only the control endpoint is valid - one endpoint context */
slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1) | udev->route);
switch (udev->speed) {
+ case USB_SPEED_SUPER_PLUS:
case USB_SPEED_SUPER:
slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SS);
max_packets = MAX_PACKET(512);
@@ -1294,6 +1295,7 @@ static unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
}
/* Fall through - SS and HS isoc/int have same decoding */
+ case USB_SPEED_SUPER_PLUS:
case USB_SPEED_SUPER:
if (usb_endpoint_xfer_int(&ep->desc) ||
usb_endpoint_xfer_isoc(&ep->desc)) {
@@ -1334,7 +1336,7 @@ static unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
static u32 xhci_get_endpoint_mult(struct usb_device *udev,
struct usb_host_endpoint *ep)
{
- if (udev->speed != USB_SPEED_SUPER ||
+ if (udev->speed < USB_SPEED_SUPER ||
!usb_endpoint_xfer_isoc(&ep->desc))
return 0;
return ep->ss_ep_comp.bmAttributes;
@@ -1384,7 +1386,7 @@ static u32 xhci_get_max_esit_payload(struct usb_device *udev,
usb_endpoint_xfer_bulk(&ep->desc))
return 0;
- if (udev->speed == USB_SPEED_SUPER)
+ if (udev->speed >= USB_SPEED_SUPER)
return le16_to_cpu(ep->ss_ep_comp.wBytesPerInterval);
max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc));
@@ -1455,6 +1457,7 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc));
max_burst = 0;
switch (udev->speed) {
+ case USB_SPEED_SUPER_PLUS:
case USB_SPEED_SUPER:
/* dig out max burst from ep companion desc */
max_burst = ep->ss_ep_comp.bMaxBurst;
@@ -1786,11 +1789,84 @@ void xhci_free_command(struct xhci_hcd *xhci,
kfree(command);
}
+void xhci_handle_sec_intr_events(struct xhci_hcd *xhci, int intr_num)
+{
+ union xhci_trb *erdp_trb, *current_trb;
+ struct xhci_segment *seg;
+ u64 erdp_reg;
+ u32 iman_reg;
+ dma_addr_t deq;
+ unsigned long segment_offset;
+
+ /* disable irq, ack pending interrupt and ack all pending events */
+
+ iman_reg =
+ readl_relaxed(&xhci->sec_ir_set[intr_num]->irq_pending);
+ iman_reg &= ~IMAN_IE;
+ writel_relaxed(iman_reg,
+ &xhci->sec_ir_set[intr_num]->irq_pending);
+ iman_reg =
+ readl_relaxed(&xhci->sec_ir_set[intr_num]->irq_pending);
+ if (iman_reg & IMAN_IP)
+ writel_relaxed(iman_reg,
+ &xhci->sec_ir_set[intr_num]->irq_pending);
+
+ /* last acked event trb is in erdp reg */
+ erdp_reg =
+ xhci_read_64(xhci, &xhci->sec_ir_set[intr_num]->erst_dequeue);
+ deq = (dma_addr_t)(erdp_reg & ~ERST_PTR_MASK);
+ if (!deq) {
+ pr_debug("%s: event ring handling not required\n", __func__);
+ return;
+ }
+
+ seg = xhci->sec_event_ring[intr_num]->first_seg;
+ segment_offset = deq - seg->dma;
+
+ /* find out virtual address of the last acked event trb */
+ erdp_trb = current_trb = &seg->trbs[0] +
+ (segment_offset/sizeof(*current_trb));
+
+ /* read cycle state of the last acked trb to find out CCS */
+ xhci->sec_event_ring[intr_num]->cycle_state =
+ (current_trb->event_cmd.flags & TRB_CYCLE);
+
+ while (1) {
+ /* last trb of the event ring: toggle cycle state */
+ if (current_trb == &seg->trbs[TRBS_PER_SEGMENT - 1]) {
+ xhci->sec_event_ring[intr_num]->cycle_state ^= 1;
+ current_trb = &seg->trbs[0];
+ } else {
+ current_trb++;
+ }
+
+ /* cycle state transition */
+ if ((le32_to_cpu(current_trb->event_cmd.flags) & TRB_CYCLE) !=
+ xhci->sec_event_ring[intr_num]->cycle_state)
+ break;
+ }
+
+ if (erdp_trb != current_trb) {
+ deq =
+ xhci_trb_virt_to_dma(xhci->sec_event_ring[intr_num]->deq_seg,
+ current_trb);
+ if (deq == 0)
+ xhci_warn(xhci,
+ "WARN ivalid SW event ring dequeue ptr.\n");
+ /* Update HC event ring dequeue pointer */
+ erdp_reg &= ERST_PTR_MASK;
+ erdp_reg |= ((u64) deq & (u64) ~ERST_PTR_MASK);
+ }
+
+ /* Clear the event handler busy flag (RW1C); event ring is empty. */
+ erdp_reg |= ERST_EHB;
+ xhci_write_64(xhci, erdp_reg,
+ &xhci->sec_ir_set[intr_num]->erst_dequeue);
+}
+
int xhci_sec_event_ring_cleanup(struct usb_hcd *hcd, unsigned intr_num)
{
int size;
- u32 iman_reg;
- u64 erdp_reg;
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
struct device *dev = xhci_to_hcd(xhci)->self.controller;
@@ -1803,28 +1879,7 @@ int xhci_sec_event_ring_cleanup(struct usb_hcd *hcd, unsigned intr_num)
size =
sizeof(struct xhci_erst_entry)*(xhci->sec_erst[intr_num].num_entries);
if (xhci->sec_erst[intr_num].entries) {
- /*
- * disable irq, ack pending interrupt and clear EHB for xHC to
- * generate interrupt again when new event ring is setup
- */
- iman_reg =
- readl_relaxed(&xhci->sec_ir_set[intr_num]->irq_pending);
- iman_reg &= ~IMAN_IE;
- writel_relaxed(iman_reg,
- &xhci->sec_ir_set[intr_num]->irq_pending);
- iman_reg =
- readl_relaxed(&xhci->sec_ir_set[intr_num]->irq_pending);
- if (iman_reg & IMAN_IP)
- writel_relaxed(iman_reg,
- &xhci->sec_ir_set[intr_num]->irq_pending);
- /* make sure IP gets cleared before clearing EHB */
- mb();
-
- erdp_reg = xhci_read_64(xhci,
- &xhci->sec_ir_set[intr_num]->erst_dequeue);
- xhci_write_64(xhci, erdp_reg | ERST_EHB,
- &xhci->sec_ir_set[intr_num]->erst_dequeue);
-
+ xhci_handle_sec_intr_events(xhci, intr_num);
dma_free_coherent(dev, size, xhci->sec_erst[intr_num].entries,
xhci->sec_erst[intr_num].erst_dma_addr);
xhci->sec_erst[intr_num].entries = NULL;
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index de644e56aa3b..963867c2c1d5 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -311,11 +311,12 @@ static void xhci_pci_remove(struct pci_dev *dev)
usb_remove_hcd(xhci->shared_hcd);
usb_put_hcd(xhci->shared_hcd);
}
- usb_hcd_pci_remove(dev);
/* Workaround for spurious wakeups at shutdown with HSW */
if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
pci_set_power_state(dev, PCI_D3hot);
+
+ usb_hcd_pci_remove(dev);
}
#ifdef CONFIG_PM
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index fcb9b4b822aa..1221a80e0bdc 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -336,7 +336,7 @@ static int xhci_plat_runtime_suspend(struct device *dev)
dev_dbg(dev, "xhci-plat runtime suspend\n");
- return 0;
+ return xhci_suspend(xhci, true);
}
static int xhci_plat_runtime_resume(struct device *dev)
@@ -350,7 +350,7 @@ static int xhci_plat_runtime_resume(struct device *dev)
dev_dbg(dev, "xhci-plat runtime resume\n");
- ret = 0;
+ ret = xhci_resume(xhci, false);
pm_runtime_mark_last_busy(dev);
return ret;
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 34cd23724bed..1f37b89e7267 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -1331,12 +1331,6 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
cmd = list_entry(xhci->cmd_list.next, struct xhci_command, cmd_list);
- if (cmd->command_trb != xhci->cmd_ring->dequeue) {
- xhci_err(xhci,
- "Command completion event does not match command\n");
- return;
- }
-
del_timer(&xhci->cmd_timer);
trace_xhci_cmd_completion(cmd_trb, (struct xhci_generic_trb *) event);
@@ -1348,6 +1342,13 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
xhci_handle_stopped_cmd_ring(xhci, cmd);
return;
}
+
+ if (cmd->command_trb != xhci->cmd_ring->dequeue) {
+ xhci_err(xhci,
+ "Command completion event does not match command\n");
+ return;
+ }
+
/*
* Host aborted the command ring, check if the current command was
* supposed to be aborted, otherwise continue normally.
@@ -3575,7 +3576,7 @@ static unsigned int xhci_get_burst_count(struct xhci_hcd *xhci,
{
unsigned int max_burst;
- if (xhci->hci_version < 0x100 || udev->speed != USB_SPEED_SUPER)
+ if (xhci->hci_version < 0x100 || udev->speed < USB_SPEED_SUPER)
return 0;
max_burst = urb->ep->ss_ep_comp.bMaxBurst;
@@ -3601,6 +3602,7 @@ static unsigned int xhci_get_last_burst_packet_count(struct xhci_hcd *xhci,
return 0;
switch (udev->speed) {
+ case USB_SPEED_SUPER_PLUS:
case USB_SPEED_SUPER:
/* bMaxBurst is zero based: 0 means 1 packet per burst */
max_burst = urb->ep->ss_ep_comp.bMaxBurst;
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index a37b219a8dc5..8cfc4ca4c050 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -2085,6 +2085,7 @@ static unsigned int xhci_get_block_size(struct usb_device *udev)
case USB_SPEED_HIGH:
return HS_BLOCK;
case USB_SPEED_SUPER:
+ case USB_SPEED_SUPER_PLUS:
return SS_BLOCK;
case USB_SPEED_UNKNOWN:
case USB_SPEED_WIRELESS:
@@ -2210,7 +2211,7 @@ static int xhci_check_bw_table(struct xhci_hcd *xhci,
unsigned int packets_remaining = 0;
unsigned int i;
- if (virt_dev->udev->speed == USB_SPEED_SUPER)
+ if (virt_dev->udev->speed >= USB_SPEED_SUPER)
return xhci_check_ss_bw(xhci, virt_dev);
if (virt_dev->udev->speed == USB_SPEED_HIGH) {
@@ -2411,7 +2412,7 @@ void xhci_drop_ep_from_interval_table(struct xhci_hcd *xhci,
if (xhci_is_async_ep(ep_bw->type))
return;
- if (udev->speed == USB_SPEED_SUPER) {
+ if (udev->speed >= USB_SPEED_SUPER) {
if (xhci_is_sync_in_ep(ep_bw->type))
xhci->devs[udev->slot_id]->bw_table->ss_bw_in -=
xhci_get_ss_bw_consumed(ep_bw);
@@ -2449,6 +2450,7 @@ void xhci_drop_ep_from_interval_table(struct xhci_hcd *xhci,
interval_bw->overhead[HS_OVERHEAD_TYPE] -= 1;
break;
case USB_SPEED_SUPER:
+ case USB_SPEED_SUPER_PLUS:
case USB_SPEED_UNKNOWN:
case USB_SPEED_WIRELESS:
/* Should never happen because only LS/FS/HS endpoints will get
@@ -2508,6 +2510,7 @@ static void xhci_add_ep_to_interval_table(struct xhci_hcd *xhci,
interval_bw->overhead[HS_OVERHEAD_TYPE] += 1;
break;
case USB_SPEED_SUPER:
+ case USB_SPEED_SUPER_PLUS:
case USB_SPEED_UNKNOWN:
case USB_SPEED_WIRELESS:
/* Should never happen because only LS/FS/HS endpoints will get
diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c
index 1a812eafe670..1624b09d9748 100644
--- a/drivers/usb/misc/usbtest.c
+++ b/drivers/usb/misc/usbtest.c
@@ -558,7 +558,6 @@ static void sg_timeout(unsigned long _req)
{
struct usb_sg_request *req = (struct usb_sg_request *) _req;
- req->status = -ETIMEDOUT;
usb_sg_cancel(req);
}
@@ -589,8 +588,10 @@ static int perform_sglist(
mod_timer(&sg_timer, jiffies +
msecs_to_jiffies(SIMPLE_IO_TIMEOUT));
usb_sg_wait(req);
- del_timer_sync(&sg_timer);
- retval = req->status;
+ if (!del_timer_sync(&sg_timer))
+ retval = -ETIMEDOUT;
+ else
+ retval = req->status;
/* FIXME check resulting data pattern */
diff --git a/drivers/usb/pd/policy_engine.c b/drivers/usb/pd/policy_engine.c
index 2fbe4c8faa79..dfc63b47ae81 100644
--- a/drivers/usb/pd/policy_engine.c
+++ b/drivers/usb/pd/policy_engine.c
@@ -2974,11 +2974,6 @@ struct usbpd *usbpd_create(struct device *parent)
goto destroy_wq;
}
- pd->psy_nb.notifier_call = psy_changed;
- ret = power_supply_reg_notifier(&pd->psy_nb);
- if (ret)
- goto put_psy;
-
/*
* associate extcon with the parent dev as it could have a DT
* node which will be useful for extcon_get_edev_by_phandle()
@@ -2987,26 +2982,26 @@ struct usbpd *usbpd_create(struct device *parent)
if (IS_ERR(pd->extcon)) {
usbpd_err(&pd->dev, "failed to allocate extcon device\n");
ret = PTR_ERR(pd->extcon);
- goto unreg_psy;
+ goto put_psy;
}
pd->extcon->mutually_exclusive = usbpd_extcon_exclusive;
ret = devm_extcon_dev_register(parent, pd->extcon);
if (ret) {
usbpd_err(&pd->dev, "failed to register extcon device\n");
- goto unreg_psy;
+ goto put_psy;
}
pd->vbus = devm_regulator_get(parent, "vbus");
if (IS_ERR(pd->vbus)) {
ret = PTR_ERR(pd->vbus);
- goto unreg_psy;
+ goto put_psy;
}
pd->vconn = devm_regulator_get(parent, "vconn");
if (IS_ERR(pd->vconn)) {
ret = PTR_ERR(pd->vconn);
- goto unreg_psy;
+ goto put_psy;
}
pd->vconn_is_external = device_property_present(parent,
@@ -3031,7 +3026,7 @@ struct usbpd *usbpd_create(struct device *parent)
&pd->dr_desc);
if (IS_ERR(pd->dual_role)) {
usbpd_err(&pd->dev, "could not register dual_role instance\n");
- goto unreg_psy;
+ goto put_psy;
} else {
pd->dual_role->drv_data = pd;
}
@@ -3045,13 +3040,18 @@ struct usbpd *usbpd_create(struct device *parent)
INIT_LIST_HEAD(&pd->svid_handlers);
init_completion(&pd->swap_complete);
+ pd->psy_nb.notifier_call = psy_changed;
+ ret = power_supply_reg_notifier(&pd->psy_nb);
+ if (ret)
+ goto del_inst;
+
/* force read initial power_supply values */
psy_changed(&pd->psy_nb, PSY_EVENT_PROP_CHANGED, pd->usb_psy);
return pd;
-unreg_psy:
- power_supply_unreg_notifier(&pd->psy_nb);
+del_inst:
+ list_del(&pd->instance);
put_psy:
power_supply_put(pd->usb_psy);
destroy_wq:
diff --git a/drivers/usb/phy/phy-msm-qusb-v2.c b/drivers/usb/phy/phy-msm-qusb-v2.c
index 0f9447c986a4..8b35ac933028 100644
--- a/drivers/usb/phy/phy-msm-qusb-v2.c
+++ b/drivers/usb/phy/phy-msm-qusb-v2.c
@@ -50,6 +50,7 @@
#define DPSE_INTERRUPT BIT(0)
#define QUSB2PHY_PORT_TUNE1 0x23c
+#define QUSB2PHY_TEST1 0x24C
#define QUSB2PHY_1P2_VOL_MIN 1200000 /* uV */
#define QUSB2PHY_1P2_VOL_MAX 1200000 /* uV */
@@ -630,6 +631,14 @@ static int qusb_phy_set_suspend(struct usb_phy *phy, int suspend)
writel_relaxed(intr_mask,
qphy->base + QUSB2PHY_INTR_CTRL);
+ /* enable phy auto-resume */
+ writel_relaxed(0x91,
+ qphy->base + QUSB2PHY_TEST1);
+ /* flush the previous write before next write */
+ wmb();
+ writel_relaxed(0x90,
+ qphy->base + QUSB2PHY_TEST1);
+
dev_dbg(phy->dev, "%s: intr_mask = %x\n",
__func__, intr_mask);
diff --git a/drivers/usb/phy/phy-msm-qusb.c b/drivers/usb/phy/phy-msm-qusb.c
index 839d701ec9b5..09a61acceb81 100644
--- a/drivers/usb/phy/phy-msm-qusb.c
+++ b/drivers/usb/phy/phy-msm-qusb.c
@@ -701,7 +701,12 @@ static int qusb_phy_set_suspend(struct usb_phy *phy, int suspend)
if (qphy->tcsr_clamp_dig_n)
writel_relaxed(0x0,
qphy->tcsr_clamp_dig_n);
- qusb_phy_enable_power(qphy, false);
+ /* Do not disable power rails if there is vote for it */
+ if (!qphy->rm_pulldown)
+ qusb_phy_enable_power(qphy, false);
+ else
+ dev_dbg(phy->dev, "race with rm_pulldown. Keep ldo ON\n");
+
/*
* Set put_into_high_z_state to true so next USB
* cable connect, DPF_DMF request performs PHY
@@ -1088,6 +1093,10 @@ static int qusb_phy_probe(struct platform_device *pdev)
if (ret)
usb_remove_phy(&qphy->phy);
+ /* de-assert clamp dig n to reduce leakage on 1p8 upon boot up */
+ if (qphy->tcsr_clamp_dig_n)
+ writel_relaxed(0x0, qphy->tcsr_clamp_dig_n);
+
return ret;
}
diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
index f1893e08e51a..36e5b5c530bd 100644
--- a/drivers/usb/renesas_usbhs/fifo.c
+++ b/drivers/usb/renesas_usbhs/fifo.c
@@ -808,20 +808,27 @@ static void xfer_work(struct work_struct *work)
{
struct usbhs_pkt *pkt = container_of(work, struct usbhs_pkt, work);
struct usbhs_pipe *pipe = pkt->pipe;
- struct usbhs_fifo *fifo = usbhs_pipe_to_fifo(pipe);
+ struct usbhs_fifo *fifo;
struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
struct dma_async_tx_descriptor *desc;
- struct dma_chan *chan = usbhsf_dma_chan_get(fifo, pkt);
+ struct dma_chan *chan;
struct device *dev = usbhs_priv_to_dev(priv);
enum dma_transfer_direction dir;
+ unsigned long flags;
+ usbhs_lock(priv, flags);
+ fifo = usbhs_pipe_to_fifo(pipe);
+ if (!fifo)
+ goto xfer_work_end;
+
+ chan = usbhsf_dma_chan_get(fifo, pkt);
dir = usbhs_pipe_is_dir_in(pipe) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV;
desc = dmaengine_prep_slave_single(chan, pkt->dma + pkt->actual,
pkt->trans, dir,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc)
- return;
+ goto xfer_work_end;
desc->callback = usbhsf_dma_complete;
desc->callback_param = pipe;
@@ -829,7 +836,7 @@ static void xfer_work(struct work_struct *work)
pkt->cookie = dmaengine_submit(desc);
if (pkt->cookie < 0) {
dev_err(dev, "Failed to submit dma descriptor\n");
- return;
+ goto xfer_work_end;
}
dev_dbg(dev, " %s %d (%d/ %d)\n",
@@ -840,6 +847,9 @@ static void xfer_work(struct work_struct *work)
usbhs_pipe_set_trans_count_if_bulk(pipe, pkt->trans);
dma_async_issue_pending(chan);
usbhs_pipe_enable(pipe);
+
+xfer_work_end:
+ usbhs_unlock(priv, flags);
}
/*
@@ -859,7 +869,7 @@ static int usbhsf_dma_prepare_push(struct usbhs_pkt *pkt, int *is_done)
/* use PIO if packet is less than pio_dma_border or pipe is DCP */
if ((len < usbhs_get_dparam(priv, pio_dma_border)) ||
- usbhs_pipe_is_dcp(pipe))
+ usbhs_pipe_type_is(pipe, USB_ENDPOINT_XFER_ISOC))
goto usbhsf_pio_prepare_push;
/* check data length if this driver don't use USB-DMAC */
@@ -964,7 +974,7 @@ static int usbhsf_dma_prepare_pop_with_usb_dmac(struct usbhs_pkt *pkt,
/* use PIO if packet is less than pio_dma_border or pipe is DCP */
if ((pkt->length < usbhs_get_dparam(priv, pio_dma_border)) ||
- usbhs_pipe_is_dcp(pipe))
+ usbhs_pipe_type_is(pipe, USB_ENDPOINT_XFER_ISOC))
goto usbhsf_pio_prepare_pop;
fifo = usbhsf_get_dma_fifo(priv, pkt);
diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c
index fa14198daf77..efc4fae123a4 100644
--- a/drivers/usb/renesas_usbhs/mod_gadget.c
+++ b/drivers/usb/renesas_usbhs/mod_gadget.c
@@ -586,6 +586,9 @@ static int usbhsg_ep_enable(struct usb_ep *ep,
struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv);
struct usbhs_pipe *pipe;
int ret = -EIO;
+ unsigned long flags;
+
+ usbhs_lock(priv, flags);
/*
* if it already have pipe,
@@ -594,7 +597,8 @@ static int usbhsg_ep_enable(struct usb_ep *ep,
if (uep->pipe) {
usbhs_pipe_clear(uep->pipe);
usbhs_pipe_sequence_data0(uep->pipe);
- return 0;
+ ret = 0;
+ goto usbhsg_ep_enable_end;
}
pipe = usbhs_pipe_malloc(priv,
@@ -614,14 +618,20 @@ static int usbhsg_ep_enable(struct usb_ep *ep,
* use dmaengine if possible.
* It will use pio handler if impossible.
*/
- if (usb_endpoint_dir_in(desc))
+ if (usb_endpoint_dir_in(desc)) {
pipe->handler = &usbhs_fifo_dma_push_handler;
- else
+ } else {
pipe->handler = &usbhs_fifo_dma_pop_handler;
+ usbhs_xxxsts_clear(priv, BRDYSTS,
+ usbhs_pipe_number(pipe));
+ }
ret = 0;
}
+usbhsg_ep_enable_end:
+ usbhs_unlock(priv, flags);
+
return ret;
}
@@ -1065,7 +1075,7 @@ int usbhs_mod_gadget_probe(struct usbhs_priv *priv)
gpriv->transceiver = usb_get_phy(USB_PHY_TYPE_UNDEFINED);
dev_info(dev, "%stransceiver found\n",
- gpriv->transceiver ? "" : "no ");
+ !IS_ERR(gpriv->transceiver) ? "" : "no ");
/*
* CAUTION
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index b61f12160d37..8c48c9d83d48 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -648,6 +648,8 @@ static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(FTDI_VID, FTDI_ELV_TFD128_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ELV_FM3RX_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ELV_WS777_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_PALMSENS_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_IVIUM_XSTAT_PID) },
{ USB_DEVICE(FTDI_VID, LINX_SDMUSBQSS_PID) },
{ USB_DEVICE(FTDI_VID, LINX_MASTERDEVEL2_PID) },
{ USB_DEVICE(FTDI_VID, LINX_FUTURE_0_PID) },
@@ -1008,6 +1010,7 @@ static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(ICPDAS_VID, ICPDAS_I7560U_PID) },
{ USB_DEVICE(ICPDAS_VID, ICPDAS_I7561U_PID) },
{ USB_DEVICE(ICPDAS_VID, ICPDAS_I7563U_PID) },
+ { USB_DEVICE(WICED_VID, WICED_USB20706V2_PID) },
{ } /* Terminating entry */
};
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index c5d6c1e73e8e..f87a938cf005 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -406,6 +406,12 @@
#define FTDI_4N_GALAXY_DE_3_PID 0xF3C2
/*
+ * Ivium Technologies product IDs
+ */
+#define FTDI_PALMSENS_PID 0xf440
+#define FTDI_IVIUM_XSTAT_PID 0xf441
+
+/*
* Linx Technologies product ids
*/
#define LINX_SDMUSBQSS_PID 0xF448 /* Linx SDM-USB-QS-S */
@@ -673,6 +679,12 @@
#define INTREPID_NEOVI_PID 0x0701
/*
+ * WICED USB UART
+ */
+#define WICED_VID 0x0A5C
+#define WICED_USB20706V2_PID 0x6422
+
+/*
* Definitions for ID TECH (www.idt-net.com) devices
*/
#define IDTECH_VID 0x0ACD /* ID TECH Vendor ID */
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index 06c7dbc1c802..63db004af21f 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -1252,7 +1252,7 @@ static int mos7720_write(struct tty_struct *tty, struct usb_serial_port *port,
if (urb->transfer_buffer == NULL) {
urb->transfer_buffer = kmalloc(URB_TRANSFER_BUFFER_SIZE,
- GFP_KERNEL);
+ GFP_ATOMIC);
if (!urb->transfer_buffer)
goto exit;
}
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index 8ac9b55f05af..7f3ddd7ba2ce 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -1340,8 +1340,8 @@ static int mos7840_write(struct tty_struct *tty, struct usb_serial_port *port,
}
if (urb->transfer_buffer == NULL) {
- urb->transfer_buffer =
- kmalloc(URB_TRANSFER_BUFFER_SIZE, GFP_KERNEL);
+ urb->transfer_buffer = kmalloc(URB_TRANSFER_BUFFER_SIZE,
+ GFP_ATOMIC);
if (!urb->transfer_buffer)
goto exit;
}
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index d96d423d00e6..9894e341c6ac 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -273,6 +273,13 @@ static void option_instat_callback(struct urb *urb);
#define TELIT_PRODUCT_LE922_USBCFG5 0x1045
#define TELIT_PRODUCT_LE920 0x1200
#define TELIT_PRODUCT_LE910 0x1201
+#define TELIT_PRODUCT_LE910_USBCFG4 0x1206
+#define TELIT_PRODUCT_LE920A4_1207 0x1207
+#define TELIT_PRODUCT_LE920A4_1208 0x1208
+#define TELIT_PRODUCT_LE920A4_1211 0x1211
+#define TELIT_PRODUCT_LE920A4_1212 0x1212
+#define TELIT_PRODUCT_LE920A4_1213 0x1213
+#define TELIT_PRODUCT_LE920A4_1214 0x1214
/* ZTE PRODUCTS */
#define ZTE_VENDOR_ID 0x19d2
@@ -518,6 +525,12 @@ static void option_instat_callback(struct urb *urb);
#define VIATELECOM_VENDOR_ID 0x15eb
#define VIATELECOM_PRODUCT_CDS7 0x0001
+/* WeTelecom products */
+#define WETELECOM_VENDOR_ID 0x22de
+#define WETELECOM_PRODUCT_WMD200 0x6801
+#define WETELECOM_PRODUCT_6802 0x6802
+#define WETELECOM_PRODUCT_WMD300 0x6803
+
struct option_blacklist_info {
/* bitmask of interface numbers blacklisted for send_setup */
const unsigned long sendsetup;
@@ -627,6 +640,11 @@ static const struct option_blacklist_info telit_le920_blacklist = {
.reserved = BIT(1) | BIT(5),
};
+static const struct option_blacklist_info telit_le920a4_blacklist_1 = {
+ .sendsetup = BIT(0),
+ .reserved = BIT(1),
+};
+
static const struct option_blacklist_info telit_le922_blacklist_usbcfg0 = {
.sendsetup = BIT(2),
.reserved = BIT(0) | BIT(1) | BIT(3),
@@ -1198,8 +1216,20 @@ static const struct usb_device_id option_ids[] = {
.driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
.driver_info = (kernel_ulong_t)&telit_le910_blacklist },
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4),
+ .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920),
.driver_info = (kernel_ulong_t)&telit_le920_blacklist },
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1207) },
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1208),
+ .driver_info = (kernel_ulong_t)&telit_le920a4_blacklist_1 },
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1211),
+ .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1212),
+ .driver_info = (kernel_ulong_t)&telit_le920a4_blacklist_1 },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1213, 0xff) },
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1214),
+ .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&net_intf1_blacklist },
@@ -1963,9 +1993,13 @@ static const struct usb_device_id option_ids[] = {
.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */
{ USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */
{ USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) },
{ USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) },
+ { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD200, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_6802, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD300, 0xff, 0xff, 0xff) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, option_ids);
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index 46f1f13b41f1..a0ca291bc07f 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -1432,7 +1432,7 @@ int usb_serial_register_drivers(struct usb_serial_driver *const serial_drivers[]
rc = usb_register(udriver);
if (rc)
- return rc;
+ goto failed_usb_register;
for (sd = serial_drivers; *sd; ++sd) {
(*sd)->usb_driver = udriver;
@@ -1450,6 +1450,8 @@ int usb_serial_register_drivers(struct usb_serial_driver *const serial_drivers[]
while (sd-- > serial_drivers)
usb_serial_deregister(*sd);
usb_deregister(udriver);
+failed_usb_register:
+ kfree(udriver);
return rc;
}
EXPORT_SYMBOL_GPL(usb_serial_register_drivers);
diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c
index 3b3ba15558b7..20e9a86d2dcf 100644
--- a/drivers/vfio/pci/vfio_pci_intrs.c
+++ b/drivers/vfio/pci/vfio_pci_intrs.c
@@ -563,67 +563,80 @@ static int vfio_pci_set_msi_trigger(struct vfio_pci_device *vdev,
}
static int vfio_pci_set_ctx_trigger_single(struct eventfd_ctx **ctx,
- uint32_t flags, void *data)
+ unsigned int count, uint32_t flags,
+ void *data)
{
- int32_t fd = *(int32_t *)data;
-
- if (!(flags & VFIO_IRQ_SET_DATA_TYPE_MASK))
- return -EINVAL;
-
/* DATA_NONE/DATA_BOOL enables loopback testing */
if (flags & VFIO_IRQ_SET_DATA_NONE) {
- if (*ctx)
- eventfd_signal(*ctx, 1);
- return 0;
+ if (*ctx) {
+ if (count) {
+ eventfd_signal(*ctx, 1);
+ } else {
+ eventfd_ctx_put(*ctx);
+ *ctx = NULL;
+ }
+ return 0;
+ }
} else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
- uint8_t trigger = *(uint8_t *)data;
+ uint8_t trigger;
+
+ if (!count)
+ return -EINVAL;
+
+ trigger = *(uint8_t *)data;
if (trigger && *ctx)
eventfd_signal(*ctx, 1);
- return 0;
- }
- /* Handle SET_DATA_EVENTFD */
- if (fd == -1) {
- if (*ctx)
- eventfd_ctx_put(*ctx);
- *ctx = NULL;
return 0;
- } else if (fd >= 0) {
- struct eventfd_ctx *efdctx;
- efdctx = eventfd_ctx_fdget(fd);
- if (IS_ERR(efdctx))
- return PTR_ERR(efdctx);
- if (*ctx)
- eventfd_ctx_put(*ctx);
- *ctx = efdctx;
+ } else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
+ int32_t fd;
+
+ if (!count)
+ return -EINVAL;
+
+ fd = *(int32_t *)data;
+ if (fd == -1) {
+ if (*ctx)
+ eventfd_ctx_put(*ctx);
+ *ctx = NULL;
+ } else if (fd >= 0) {
+ struct eventfd_ctx *efdctx;
+
+ efdctx = eventfd_ctx_fdget(fd);
+ if (IS_ERR(efdctx))
+ return PTR_ERR(efdctx);
+
+ if (*ctx)
+ eventfd_ctx_put(*ctx);
+
+ *ctx = efdctx;
+ }
return 0;
- } else
- return -EINVAL;
+ }
+
+ return -EINVAL;
}
static int vfio_pci_set_err_trigger(struct vfio_pci_device *vdev,
unsigned index, unsigned start,
unsigned count, uint32_t flags, void *data)
{
- if (index != VFIO_PCI_ERR_IRQ_INDEX)
+ if (index != VFIO_PCI_ERR_IRQ_INDEX || start != 0 || count > 1)
return -EINVAL;
- /*
- * We should sanitize start & count, but that wasn't caught
- * originally, so this IRQ index must forever ignore them :-(
- */
-
- return vfio_pci_set_ctx_trigger_single(&vdev->err_trigger, flags, data);
+ return vfio_pci_set_ctx_trigger_single(&vdev->err_trigger,
+ count, flags, data);
}
static int vfio_pci_set_req_trigger(struct vfio_pci_device *vdev,
unsigned index, unsigned start,
unsigned count, uint32_t flags, void *data)
{
- if (index != VFIO_PCI_REQ_IRQ_INDEX || start != 0 || count != 1)
+ if (index != VFIO_PCI_REQ_IRQ_INDEX || start != 0 || count > 1)
return -EINVAL;
- return vfio_pci_set_ctx_trigger_single(&vdev->req_trigger, flags, data);
+ return vfio_pci_set_ctx_trigger_single(&vdev->req_trigger,
+ count, flags, data);
}
int vfio_pci_set_irqs_ioctl(struct vfio_pci_device *vdev, uint32_t flags,
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 29cfc57d496e..e4110d6de0b5 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -88,7 +88,7 @@ struct vhost_scsi_cmd {
struct scatterlist *tvc_prot_sgl;
struct page **tvc_upages;
/* Pointer to response header iovec */
- struct iovec *tvc_resp_iov;
+ struct iovec tvc_resp_iov;
/* Pointer to vhost_scsi for our device */
struct vhost_scsi *tvc_vhost;
/* Pointer to vhost_virtqueue for the cmd */
@@ -557,7 +557,7 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
memcpy(v_rsp.sense, cmd->tvc_sense_buf,
se_cmd->scsi_sense_length);
- iov_iter_init(&iov_iter, READ, cmd->tvc_resp_iov,
+ iov_iter_init(&iov_iter, READ, &cmd->tvc_resp_iov,
cmd->tvc_in_iovs, sizeof(v_rsp));
ret = copy_to_iter(&v_rsp, sizeof(v_rsp), &iov_iter);
if (likely(ret == sizeof(v_rsp))) {
@@ -1054,7 +1054,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
}
cmd->tvc_vhost = vs;
cmd->tvc_vq = vq;
- cmd->tvc_resp_iov = &vq->iov[out];
+ cmd->tvc_resp_iov = vq->iov[out];
cmd->tvc_in_iovs = in;
pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n",
diff --git a/drivers/video/fbdev/msm/mdss_dsi_host.c b/drivers/video/fbdev/msm/mdss_dsi_host.c
index d445f95924ef..635ef68b4e94 100644
--- a/drivers/video/fbdev/msm/mdss_dsi_host.c
+++ b/drivers/video/fbdev/msm/mdss_dsi_host.c
@@ -2142,7 +2142,7 @@ static int mdss_dsi_cmd_dma_rx(struct mdss_dsi_ctrl_pdata *ctrl,
u32 *lp, *temp, data;
int i, j = 0, off, cnt;
bool ack_error = false;
- char reg[16];
+ char reg[16] = {0x0};
int repeated_bytes = 0;
lp = (u32 *)rp->data;
diff --git a/drivers/video/fbdev/msm/mdss_dsi_panel.c b/drivers/video/fbdev/msm/mdss_dsi_panel.c
index f925fd5296d4..79e74df12988 100644
--- a/drivers/video/fbdev/msm/mdss_dsi_panel.c
+++ b/drivers/video/fbdev/msm/mdss_dsi_panel.c
@@ -1226,6 +1226,58 @@ void mdss_dsi_panel_dsc_pps_send(struct mdss_dsi_ctrl_pdata *ctrl,
mdss_dsi_panel_cmds_send(ctrl, &pcmds, CMD_REQ_COMMIT);
}
+static int mdss_dsi_parse_hdr_settings(struct device_node *np,
+ struct mdss_panel_info *pinfo)
+{
+ int rc = 0;
+ struct mdss_panel_hdr_properties *hdr_prop;
+
+ if (!np) {
+ pr_err("%s: device node pointer is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ if (!pinfo) {
+ pr_err("%s: panel info is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ hdr_prop = &pinfo->hdr_properties;
+ hdr_prop->hdr_enabled = of_property_read_bool(np,
+ "qcom,mdss-dsi-panel-hdr-enabled");
+
+ if (hdr_prop->hdr_enabled) {
+ rc = of_property_read_u32_array(np,
+ "qcom,mdss-dsi-panel-hdr-color-primaries",
+ hdr_prop->display_primaries,
+ DISPLAY_PRIMARIES_COUNT);
+ if (rc) {
+ pr_info("%s:%d, Unable to read color primaries,rc:%u",
+ __func__, __LINE__,
+ hdr_prop->hdr_enabled = false);
+ }
+
+ rc = of_property_read_u32(np,
+ "qcom,mdss-dsi-panel-peak-brightness",
+ &(hdr_prop->peak_brightness));
+ if (rc) {
+ pr_info("%s:%d, Unable to read hdr brightness, rc:%u",
+ __func__, __LINE__, rc);
+ hdr_prop->hdr_enabled = false;
+ }
+
+ rc = of_property_read_u32(np,
+ "qcom,mdss-dsi-panel-blackness-level",
+ &(hdr_prop->blackness_level));
+ if (rc) {
+ pr_info("%s:%d, Unable to read hdr brightness, rc:%u",
+ __func__, __LINE__, rc);
+ hdr_prop->hdr_enabled = false;
+ }
+ }
+ return 0;
+}
+
static int mdss_dsi_parse_dsc_version(struct device_node *np,
struct mdss_panel_timing *timing)
{
@@ -2606,6 +2658,9 @@ static int mdss_panel_parse_dt(struct device_node *np,
rc = mdss_panel_parse_display_timings(np, &ctrl_pdata->panel_data);
if (rc)
return rc;
+ rc = mdss_dsi_parse_hdr_settings(np, pinfo);
+ if (rc)
+ return rc;
pinfo->mipi.rx_eot_ignore = of_property_read_bool(np,
"qcom,mdss-dsi-rx-eot-ignore");
diff --git a/drivers/video/fbdev/msm/mdss_fb.c b/drivers/video/fbdev/msm/mdss_fb.c
index 98ca6c3da20b..e37f0a602c0f 100644
--- a/drivers/video/fbdev/msm/mdss_fb.c
+++ b/drivers/video/fbdev/msm/mdss_fb.c
@@ -566,7 +566,13 @@ static ssize_t mdss_fb_get_panel_info(struct device *dev,
"min_fps=%d\nmax_fps=%d\npanel_name=%s\n"
"primary_panel=%d\nis_pluggable=%d\ndisplay_id=%s\n"
"is_cec_supported=%d\nis_pingpong_split=%d\n"
- "dfps_porch_mode=%d\npu_roi_cnt=%d\ndual_dsi=%d",
+ "dfps_porch_mode=%d\npu_roi_cnt=%d\ndual_dsi=%d\n"
+ "is_hdr_enabled=%d\n"
+ "peak_brightness=%d\nblackness_level=%d\n"
+ "white_chromaticity_x=%d\nwhite_chromaticity_y=%d\n"
+ "red_chromaticity_x=%d\nred_chromaticity_y=%d\n"
+ "green_chromaticity_x=%d\ngreen_chromaticity_y=%d\n"
+ "blue_chromaticity_x=%d\nblue_chromaticity_y=%d\n",
pinfo->partial_update_enabled,
pinfo->roi_alignment.xstart_pix_align,
pinfo->roi_alignment.width_pix_align,
@@ -580,7 +586,17 @@ static ssize_t mdss_fb_get_panel_info(struct device *dev,
pinfo->is_pluggable, pinfo->display_id,
pinfo->is_cec_supported, is_pingpong_split(mfd),
dfps_porch_mode, pinfo->partial_update_enabled,
- is_panel_split(mfd));
+ is_panel_split(mfd), pinfo->hdr_properties.hdr_enabled,
+ pinfo->hdr_properties.peak_brightness,
+ pinfo->hdr_properties.blackness_level,
+ pinfo->hdr_properties.display_primaries[0],
+ pinfo->hdr_properties.display_primaries[1],
+ pinfo->hdr_properties.display_primaries[2],
+ pinfo->hdr_properties.display_primaries[3],
+ pinfo->hdr_properties.display_primaries[4],
+ pinfo->hdr_properties.display_primaries[5],
+ pinfo->hdr_properties.display_primaries[6],
+ pinfo->hdr_properties.display_primaries[7]);
return ret;
}
diff --git a/drivers/video/fbdev/msm/mdss_hdmi_hdcp2p2.c b/drivers/video/fbdev/msm/mdss_hdmi_hdcp2p2.c
index 7934e4cf3bc4..ce5c8c412c99 100644
--- a/drivers/video/fbdev/msm/mdss_hdmi_hdcp2p2.c
+++ b/drivers/video/fbdev/msm/mdss_hdmi_hdcp2p2.c
@@ -666,7 +666,7 @@ static void hdmi_hdcp2p2_link_cb(void *data)
static void hdmi_hdcp2p2_recv_msg(struct hdmi_hdcp2p2_ctrl *ctrl)
{
- int rc = 0, timeout_hsync;
+ int timeout_hsync = 0, rc = 0;
char *recvd_msg_buf = NULL;
struct hdmi_tx_hdcp2p2_ddc_data *ddc_data;
struct hdmi_tx_ddc_ctrl *ddc_ctrl;
@@ -1079,7 +1079,7 @@ error:
static bool hdmi_hdcp2p2_supported(struct hdmi_hdcp2p2_ctrl *ctrl)
{
- u8 hdcp2version;
+ u8 hdcp2version = 0;
int rc = hdmi_hdcp2p2_read_version(ctrl, &hdcp2version);
if (rc)
diff --git a/drivers/video/fbdev/msm/mdss_hdmi_tx.c b/drivers/video/fbdev/msm/mdss_hdmi_tx.c
index e9ae30bb6914..92d2ffdd41cd 100644
--- a/drivers/video/fbdev/msm/mdss_hdmi_tx.c
+++ b/drivers/video/fbdev/msm/mdss_hdmi_tx.c
@@ -64,8 +64,10 @@
#define HDMI_TX_3_MAX_PCLK_RATE 297000
#define HDMI_TX_4_MAX_PCLK_RATE 600000
-#define hdmi_tx_get_fd(x) (x ? hdmi_ctrl->feature_data[ffs(x) - 1] : 0)
-#define hdmi_tx_set_fd(x, y) {if (x) hdmi_ctrl->feature_data[ffs(x) - 1] = y; }
+#define hdmi_tx_get_fd(x) ((x && (ffs(x) > 0)) ? \
+ hdmi_ctrl->feature_data[ffs(x) - 1] : 0)
+#define hdmi_tx_set_fd(x, y) {if (x && (ffs(x) > 0)) \
+ hdmi_ctrl->feature_data[ffs(x) - 1] = y; }
#define MAX_EDID_READ_RETRY 5
diff --git a/drivers/video/fbdev/msm/mdss_mdp.c b/drivers/video/fbdev/msm/mdss_mdp.c
index 58e0d9676736..a53743f8def8 100644
--- a/drivers/video/fbdev/msm/mdss_mdp.c
+++ b/drivers/video/fbdev/msm/mdss_mdp.c
@@ -823,7 +823,7 @@ void mdss_mdp_irq_clear(struct mdss_data_type *mdata,
int mdss_mdp_irq_enable(u32 intr_type, u32 intf_num)
{
- int irq_idx, idx;
+ int irq_idx = 0;
unsigned long irq_flags;
int ret = 0;
struct mdss_data_type *mdata = mdss_mdp_get_mdata();
@@ -842,7 +842,7 @@ int mdss_mdp_irq_enable(u32 intr_type, u32 intf_num)
spin_lock_irqsave(&mdp_lock, irq_flags);
if (mdata->mdp_irq_mask[irq.reg_idx] & irq.irq_mask) {
pr_warn("MDSS MDP IRQ-0x%x is already set, mask=%x\n",
- irq.irq_mask, mdata->mdp_irq_mask[idx]);
+ irq.irq_mask, mdata->mdp_irq_mask[irq.reg_idx]);
ret = -EBUSY;
} else {
pr_debug("MDP IRQ mask old=%x new=%x\n",
@@ -2433,6 +2433,8 @@ static void __update_sspp_info(struct mdss_mdp_pipe *pipe,
size_t len = PAGE_SIZE;
int num_bytes = BITS_TO_BYTES(MDP_IMGTYPE_LIMIT1);
+ if (!pipe)
+ return;
#define SPRINT(fmt, ...) \
(*cnt += scnprintf(buf + *cnt, len - *cnt, fmt, ##__VA_ARGS__))
diff --git a/drivers/video/fbdev/msm/mdss_mdp_layer.c b/drivers/video/fbdev/msm/mdss_mdp_layer.c
index 3fc8e3883250..38e439f9c649 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_layer.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_layer.c
@@ -855,12 +855,18 @@ static int __validate_layer_reconfig(struct mdp_input_layer *layer,
*/
if (pipe->csc_coeff_set != layer->color_space) {
src_fmt = mdss_mdp_get_format_params(layer->buffer.format);
- if (pipe->src_fmt->is_yuv && src_fmt && src_fmt->is_yuv) {
- status = -EPERM;
- pr_err("csc change is not permitted on used pipe\n");
+ if (!src_fmt) {
+ pr_err("Invalid layer format %d\n",
+ layer->buffer.format);
+ status = -EINVAL;
+ } else {
+ if (pipe->src_fmt->is_yuv && src_fmt &&
+ src_fmt->is_yuv) {
+ status = -EPERM;
+ pr_err("csc change is not permitted on used pipe\n");
+ }
}
}
-
return status;
}
diff --git a/drivers/video/fbdev/msm/mdss_mdp_pipe.c b/drivers/video/fbdev/msm/mdss_mdp_pipe.c
index 4d42e42035c3..6311352cb0cf 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_pipe.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_pipe.c
@@ -933,17 +933,18 @@ int mdss_mdp_smp_handoff(struct mdss_data_type *mdata)
data = readl_relaxed(mdata->mdp_base +
MDSS_MDP_REG_SMP_ALLOC_W0 + off);
client_id = (data >> s) & 0xFF;
- if (test_bit(i, mdata->mmb_alloc_map)) {
- /*
- * Certain pipes may have a dedicated set of
- * SMP MMBs statically allocated to them. In
- * such cases, we do not need to do anything
- * here.
- */
- pr_debug("smp mmb %d already assigned to pipe %d (client_id %d)\n"
- , i, pipe ? pipe->num : -1, client_id);
- continue;
- }
+ if (i < ARRAY_SIZE(mdata->mmb_alloc_map))
+ if (test_bit(i, mdata->mmb_alloc_map)) {
+ /*
+ * Certain pipes may have a dedicated set of
+ * SMP MMBs statically allocated to them. In
+ * such cases, we do not need to do anything
+ * here.
+ */
+ pr_debug("smp mmb %d already assigned to pipe %d (client_id %d)\n"
+ , i, pipe ? pipe->num : -1, client_id);
+ continue;
+ }
if (client_id) {
if (client_id != prev_id) {
diff --git a/drivers/video/fbdev/msm/mdss_mdp_pp.c b/drivers/video/fbdev/msm/mdss_mdp_pp.c
index 5a8438caba4b..264024289691 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_pp.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_pp.c
@@ -2461,7 +2461,7 @@ static int pp_dspp_setup(u32 disp_num, struct mdss_mdp_mixer *mixer)
}
if (flags & PP_FLAGS_DIRTY_DITHER) {
- if (!pp_ops[DITHER].pp_set_config) {
+ if (!pp_ops[DITHER].pp_set_config && addr) {
pp_dither_config(addr, pp_sts,
&mdss_pp_res->dither_disp_cfg[disp_num]);
} else {
@@ -5308,7 +5308,8 @@ static int pp_hist_collect(struct mdp_histogram_data *hist,
else if (block == SSPP_VIG)
v_base = ctl_base +
MDSS_MDP_REG_VIG_HIST_CTL_BASE;
- sum = pp_hist_read(v_base, hist_info);
+ if (v_base)
+ sum = pp_hist_read(v_base, hist_info);
}
writel_relaxed(0, hist_info->base);
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
diff --git a/drivers/video/fbdev/msm/mdss_mdp_pp_v1_7.c b/drivers/video/fbdev/msm/mdss_mdp_pp_v1_7.c
index 1e4adc984802..71cab148e1c3 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_pp_v1_7.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_pp_v1_7.c
@@ -833,6 +833,8 @@ static int pp_gamut_set_config(char __iomem *base_addr,
struct mdp_gamut_cfg_data *gamut_cfg_data = NULL;
struct mdp_gamut_data_v1_7 *gamut_data = NULL;
char __iomem *base_addr_scale = base_addr;
+ uint64_t gamut_val;
+
if (!base_addr || !cfg_data || !pp_sts) {
pr_err("invalid params base_addr %pK cfg_data %pK pp_sts_type %pK\n",
base_addr, cfg_data, pp_sts);
@@ -900,12 +902,18 @@ static int pp_gamut_set_config(char __iomem *base_addr,
val = index_start;
val |= GAMUT_TABLE_SELECT(i);
writel_relaxed(val, (base_addr + GAMUT_TABLE_INDEX));
- for (j = 0; j < gamut_data->tbl_size[i]; j++) {
- writel_relaxed(gamut_data->c1_c2_data[i][j],
- base_addr + GAMUT_TABLE_LOWER_GB);
- writel_relaxed(gamut_data->c0_data[i][j],
- base_addr + GAMUT_TABLE_UPPER_R);
+
+ writel_relaxed(gamut_data->c1_c2_data[i][0],
+ base_addr + GAMUT_TABLE_LOWER_GB);
+ for (j = 0; j < gamut_data->tbl_size[i] - 1 ; j++) {
+ gamut_val = gamut_data->c1_c2_data[i][j + 1];
+ gamut_val = (gamut_val << 32) |
+ gamut_data->c0_data[i][j];
+ writeq_relaxed(gamut_val,
+ base_addr + GAMUT_TABLE_UPPER_R);
}
+ writel_relaxed(gamut_data->c0_data[i][j],
+ base_addr + GAMUT_TABLE_UPPER_R);
if ((i >= MDP_GAMUT_SCALE_OFF_TABLE_NUM) ||
(!gamut_data->map_en))
continue;
diff --git a/drivers/video/fbdev/msm/mdss_panel.h b/drivers/video/fbdev/msm/mdss_panel.h
index 16bb48e22bee..4698d441f365 100644
--- a/drivers/video/fbdev/msm/mdss_panel.h
+++ b/drivers/video/fbdev/msm/mdss_panel.h
@@ -56,6 +56,9 @@ struct panel_id {
#define DSC_PPS_LEN 128
#define INTF_EVENT_STR(x) #x
+/* HDR propeties count */
+#define DISPLAY_PRIMARIES_COUNT 8 /* WRGB x and y values*/
+
static inline const char *mdss_panel2str(u32 panel)
{
static const char const *names[] = {
@@ -734,6 +737,19 @@ struct mdss_dsi_dual_pu_roi {
bool enabled;
};
+struct mdss_panel_hdr_properties {
+ bool hdr_enabled;
+
+ /* WRGB X and y values arrayed in format */
+ /* [WX, WY, RX, RY, GX, GY, BX, BY] */
+ u32 display_primaries[DISPLAY_PRIMARIES_COUNT];
+
+ /* peak brightness supported by panel */
+ u32 peak_brightness;
+ /* Blackness level supported by panel */
+ u32 blackness_level;
+};
+
struct mdss_panel_info {
u32 xres;
u32 yres;
@@ -878,6 +894,9 @@ struct mdss_panel_info {
/* stores initial adaptive variable refresh vtotal value */
u32 saved_avr_vtotal;
+
+ /* HDR properties of display panel*/
+ struct mdss_panel_hdr_properties hdr_properties;
};
struct mdss_panel_timing {
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 8ab6238c9299..56f7e2521202 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -196,6 +196,8 @@ static unsigned leak_balloon(struct virtio_balloon *vb, size_t num)
num = min(num, ARRAY_SIZE(vb->pfns));
mutex_lock(&vb->balloon_lock);
+ /* We can't release more pages than taken */
+ num = min(num, (size_t)vb->num_pages);
for (vb->num_pfns = 0; vb->num_pfns < num;
vb->num_pfns += VIRTIO_BALLOON_PAGES_PER_PAGE) {
page = balloon_page_dequeue(vb_dev_info);
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index ee663c458b20..dc2b94142f53 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -202,6 +202,8 @@ static inline int virtqueue_add(struct virtqueue *_vq,
* host should service the ring ASAP. */
if (out_sgs)
vq->notify(&vq->vq);
+ if (indirect)
+ kfree(desc);
END_USE(vq);
return -ENOSPC;
}
diff --git a/drivers/w1/masters/omap_hdq.c b/drivers/w1/masters/omap_hdq.c
index 0e2f43bccf1f..0c427d6a12d1 100644
--- a/drivers/w1/masters/omap_hdq.c
+++ b/drivers/w1/masters/omap_hdq.c
@@ -390,8 +390,6 @@ static int hdq_read_byte(struct hdq_data *hdq_data, u8 *val)
goto out;
}
- hdq_data->hdq_irqstatus = 0;
-
if (!(hdq_data->hdq_irqstatus & OMAP_HDQ_INT_STATUS_RXCOMPLETE)) {
hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS,
OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO,
diff --git a/drivers/xen/xen-pciback/conf_space.c b/drivers/xen/xen-pciback/conf_space.c
index 9c234209d8b5..47a4177b16d2 100644
--- a/drivers/xen/xen-pciback/conf_space.c
+++ b/drivers/xen/xen-pciback/conf_space.c
@@ -183,8 +183,7 @@ int xen_pcibk_config_read(struct pci_dev *dev, int offset, int size,
field_start = OFFSET(cfg_entry);
field_end = OFFSET(cfg_entry) + field->size;
- if ((req_start >= field_start && req_start < field_end)
- || (req_end > field_start && req_end <= field_end)) {
+ if (req_end > field_start && field_end > req_start) {
err = conf_space_read(dev, cfg_entry, field_start,
&tmp_val);
if (err)
@@ -230,8 +229,7 @@ int xen_pcibk_config_write(struct pci_dev *dev, int offset, int size, u32 value)
field_start = OFFSET(cfg_entry);
field_end = OFFSET(cfg_entry) + field->size;
- if ((req_start >= field_start && req_start < field_end)
- || (req_end > field_start && req_end <= field_end)) {
+ if (req_end > field_start && field_end > req_start) {
tmp_val = 0;
err = xen_pcibk_config_read(dev, field_start,
diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
index 9433e46518c8..531e76474983 100644
--- a/drivers/xen/xenbus/xenbus_dev_frontend.c
+++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
@@ -316,11 +316,18 @@ static int xenbus_write_transaction(unsigned msg_type,
rc = -ENOMEM;
goto out;
}
+ } else {
+ list_for_each_entry(trans, &u->transactions, list)
+ if (trans->handle.id == u->u.msg.tx_id)
+ break;
+ if (&trans->list == &u->transactions)
+ return -ESRCH;
}
reply = xenbus_dev_request_and_reply(&u->u.msg);
if (IS_ERR(reply)) {
- kfree(trans);
+ if (msg_type == XS_TRANSACTION_START)
+ kfree(trans);
rc = PTR_ERR(reply);
goto out;
}
@@ -333,12 +340,7 @@ static int xenbus_write_transaction(unsigned msg_type,
list_add(&trans->list, &u->transactions);
}
} else if (u->u.msg.type == XS_TRANSACTION_END) {
- list_for_each_entry(trans, &u->transactions, list)
- if (trans->handle.id == u->u.msg.tx_id)
- break;
- BUG_ON(&trans->list == &u->transactions);
list_del(&trans->list);
-
kfree(trans);
}
diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c
index ba804f3d8278..ce65591b4168 100644
--- a/drivers/xen/xenbus/xenbus_xs.c
+++ b/drivers/xen/xenbus/xenbus_xs.c
@@ -250,9 +250,6 @@ void *xenbus_dev_request_and_reply(struct xsd_sockmsg *msg)
mutex_unlock(&xs_state.request_mutex);
- if (IS_ERR(ret))
- return ret;
-
if ((msg->type == XS_TRANSACTION_END) ||
((req_msg.type == XS_TRANSACTION_START) &&
(msg->type == XS_ERROR)))
diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
index 7bf835f85bc8..12ceaf52dae6 100644
--- a/fs/9p/vfs_file.c
+++ b/fs/9p/vfs_file.c
@@ -74,7 +74,7 @@ int v9fs_file_open(struct inode *inode, struct file *file)
v9fs_proto_dotu(v9ses));
fid = file->private_data;
if (!fid) {
- fid = v9fs_fid_clone(file->f_path.dentry);
+ fid = v9fs_fid_clone(file_dentry(file));
if (IS_ERR(fid))
return PTR_ERR(fid);
@@ -100,7 +100,7 @@ int v9fs_file_open(struct inode *inode, struct file *file)
* because we want write after unlink usecase
* to work.
*/
- fid = v9fs_writeback_fid(file->f_path.dentry);
+ fid = v9fs_writeback_fid(file_dentry(file));
if (IS_ERR(fid)) {
err = PTR_ERR(fid);
mutex_unlock(&v9inode->v_mutex);
@@ -516,7 +516,7 @@ v9fs_mmap_file_mmap(struct file *filp, struct vm_area_struct *vma)
* because we want write after unlink usecase
* to work.
*/
- fid = v9fs_writeback_fid(filp->f_path.dentry);
+ fid = v9fs_writeback_fid(file_dentry(filp));
if (IS_ERR(fid)) {
retval = PTR_ERR(fid);
mutex_unlock(&v9inode->v_mutex);
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 385b449fd7ed..1391f72c28c3 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -1770,6 +1770,7 @@ struct btrfs_fs_info {
struct btrfs_workqueue *qgroup_rescan_workers;
struct completion qgroup_rescan_completion;
struct btrfs_work qgroup_rescan_work;
+ bool qgroup_rescan_running; /* protected by qgroup_rescan_lock */
/* filesystem state */
unsigned long fs_state;
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 41fb43183406..85b207d19aa5 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -2276,6 +2276,7 @@ static void btrfs_init_qgroup(struct btrfs_fs_info *fs_info)
fs_info->quota_enabled = 0;
fs_info->pending_quota_state = 0;
fs_info->qgroup_ulist = NULL;
+ fs_info->qgroup_rescan_running = false;
mutex_init(&fs_info->qgroup_rescan_lock);
}
@@ -3811,7 +3812,7 @@ void close_ctree(struct btrfs_root *root)
smp_mb();
/* wait for the qgroup rescan worker to stop */
- btrfs_qgroup_wait_for_completion(fs_info);
+ btrfs_qgroup_wait_for_completion(fs_info, false);
/* wait for the uuid_scan task to finish */
down(&fs_info->uuid_tree_rescan_sem);
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 9abe18763a7f..257bbdcb5df6 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -2786,12 +2786,6 @@ struct bio *btrfs_bio_clone(struct bio *bio, gfp_t gfp_mask)
btrfs_bio->csum = NULL;
btrfs_bio->csum_allocated = NULL;
btrfs_bio->end_io = NULL;
-
-#ifdef CONFIG_BLK_CGROUP
- /* FIXME, put this into bio_clone_bioset */
- if (bio->bi_css)
- bio_associate_blkcg(new, bio->bi_css);
-#endif
}
return new;
}
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 5e5db3687e34..353f4bae658c 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -1526,27 +1526,24 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
reserve_bytes = num_pages << PAGE_CACHE_SHIFT;
- if (BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW |
- BTRFS_INODE_PREALLOC)) {
- ret = check_can_nocow(inode, pos, &write_bytes);
- if (ret < 0)
- break;
- if (ret > 0) {
- /*
- * For nodata cow case, no need to reserve
- * data space.
- */
- only_release_metadata = true;
- /*
- * our prealloc extent may be smaller than
- * write_bytes, so scale down.
- */
- num_pages = DIV_ROUND_UP(write_bytes + offset,
- PAGE_CACHE_SIZE);
- reserve_bytes = num_pages << PAGE_CACHE_SHIFT;
- goto reserve_metadata;
- }
+ if ((BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW |
+ BTRFS_INODE_PREALLOC)) &&
+ check_can_nocow(inode, pos, &write_bytes) > 0) {
+ /*
+ * For nodata cow case, no need to reserve
+ * data space.
+ */
+ only_release_metadata = true;
+ /*
+ * our prealloc extent may be smaller than
+ * write_bytes, so scale down.
+ */
+ num_pages = DIV_ROUND_UP(write_bytes + offset,
+ PAGE_CACHE_SIZE);
+ reserve_bytes = num_pages << PAGE_CACHE_SHIFT;
+ goto reserve_metadata;
}
+
ret = btrfs_check_data_free_space(inode, pos, write_bytes);
if (ret < 0)
break;
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index bfcd87ee8ff5..65f30b3b04f9 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -5121,7 +5121,7 @@ static long btrfs_ioctl_quota_rescan_wait(struct file *file, void __user *arg)
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- return btrfs_qgroup_wait_for_completion(root->fs_info);
+ return btrfs_qgroup_wait_for_completion(root->fs_info, true);
}
static long _btrfs_ioctl_set_received_subvol(struct file *file,
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index 5279fdae7142..bcc965ed5fa1 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -995,7 +995,7 @@ int btrfs_quota_disable(struct btrfs_trans_handle *trans,
goto out;
fs_info->quota_enabled = 0;
fs_info->pending_quota_state = 0;
- btrfs_qgroup_wait_for_completion(fs_info);
+ btrfs_qgroup_wait_for_completion(fs_info, false);
spin_lock(&fs_info->qgroup_lock);
quota_root = fs_info->quota_root;
fs_info->quota_root = NULL;
@@ -2283,6 +2283,10 @@ static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
int err = -ENOMEM;
int ret = 0;
+ mutex_lock(&fs_info->qgroup_rescan_lock);
+ fs_info->qgroup_rescan_running = true;
+ mutex_unlock(&fs_info->qgroup_rescan_lock);
+
path = btrfs_alloc_path();
if (!path)
goto out;
@@ -2349,6 +2353,9 @@ out:
}
done:
+ mutex_lock(&fs_info->qgroup_rescan_lock);
+ fs_info->qgroup_rescan_running = false;
+ mutex_unlock(&fs_info->qgroup_rescan_lock);
complete_all(&fs_info->qgroup_rescan_completion);
}
@@ -2467,20 +2474,26 @@ btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info)
return 0;
}
-int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info)
+int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info,
+ bool interruptible)
{
int running;
int ret = 0;
mutex_lock(&fs_info->qgroup_rescan_lock);
spin_lock(&fs_info->qgroup_lock);
- running = fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN;
+ running = fs_info->qgroup_rescan_running;
spin_unlock(&fs_info->qgroup_lock);
mutex_unlock(&fs_info->qgroup_rescan_lock);
- if (running)
+ if (!running)
+ return 0;
+
+ if (interruptible)
ret = wait_for_completion_interruptible(
&fs_info->qgroup_rescan_completion);
+ else
+ wait_for_completion(&fs_info->qgroup_rescan_completion);
return ret;
}
diff --git a/fs/btrfs/qgroup.h b/fs/btrfs/qgroup.h
index ecb2c143ef75..3d73e4c9c7df 100644
--- a/fs/btrfs/qgroup.h
+++ b/fs/btrfs/qgroup.h
@@ -46,7 +46,8 @@ int btrfs_quota_disable(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info);
int btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info);
void btrfs_qgroup_rescan_resume(struct btrfs_fs_info *fs_info);
-int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info);
+int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info,
+ bool interruptible);
int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info, u64 src, u64 dst);
int btrfs_del_qgroup_relation(struct btrfs_trans_handle *trans,
diff --git a/fs/cifs/cifs_fs_sb.h b/fs/cifs/cifs_fs_sb.h
index 3182273a3407..1418daa03d95 100644
--- a/fs/cifs/cifs_fs_sb.h
+++ b/fs/cifs/cifs_fs_sb.h
@@ -46,6 +46,9 @@
#define CIFS_MOUNT_CIFS_BACKUPUID 0x200000 /* backup intent bit for a user */
#define CIFS_MOUNT_CIFS_BACKUPGID 0x400000 /* backup intent bit for a group */
#define CIFS_MOUNT_MAP_SFM_CHR 0x800000 /* SFM/MAC mapping for illegal chars */
+#define CIFS_MOUNT_USE_PREFIX_PATH 0x1000000 /* make subpath with unaccessible
+ * root mountable
+ */
struct cifs_sb_info {
struct rb_root tlink_tree;
@@ -67,5 +70,6 @@ struct cifs_sb_info {
struct backing_dev_info bdi;
struct delayed_work prune_tlinks;
struct rcu_head rcu;
+ char *prepath;
};
#endif /* _CIFS_FS_SB_H */
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
index e682b36a210f..4acbc390a7d6 100644
--- a/fs/cifs/cifsencrypt.c
+++ b/fs/cifs/cifsencrypt.c
@@ -731,24 +731,26 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp)
memcpy(ses->auth_key.response + baselen, tiblob, tilen);
+ mutex_lock(&ses->server->srv_mutex);
+
rc = crypto_hmacmd5_alloc(ses->server);
if (rc) {
cifs_dbg(VFS, "could not crypto alloc hmacmd5 rc %d\n", rc);
- goto setup_ntlmv2_rsp_ret;
+ goto unlock;
}
/* calculate ntlmv2_hash */
rc = calc_ntlmv2_hash(ses, ntlmv2_hash, nls_cp);
if (rc) {
cifs_dbg(VFS, "could not get v2 hash rc %d\n", rc);
- goto setup_ntlmv2_rsp_ret;
+ goto unlock;
}
/* calculate first part of the client response (CR1) */
rc = CalcNTLMv2_response(ses, ntlmv2_hash);
if (rc) {
cifs_dbg(VFS, "Could not calculate CR1 rc: %d\n", rc);
- goto setup_ntlmv2_rsp_ret;
+ goto unlock;
}
/* now calculate the session key for NTLMv2 */
@@ -757,13 +759,13 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp)
if (rc) {
cifs_dbg(VFS, "%s: Could not set NTLMV2 Hash as a key\n",
__func__);
- goto setup_ntlmv2_rsp_ret;
+ goto unlock;
}
rc = crypto_shash_init(&ses->server->secmech.sdeschmacmd5->shash);
if (rc) {
cifs_dbg(VFS, "%s: Could not init hmacmd5\n", __func__);
- goto setup_ntlmv2_rsp_ret;
+ goto unlock;
}
rc = crypto_shash_update(&ses->server->secmech.sdeschmacmd5->shash,
@@ -771,7 +773,7 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp)
CIFS_HMAC_MD5_HASH_SIZE);
if (rc) {
cifs_dbg(VFS, "%s: Could not update with response\n", __func__);
- goto setup_ntlmv2_rsp_ret;
+ goto unlock;
}
rc = crypto_shash_final(&ses->server->secmech.sdeschmacmd5->shash,
@@ -779,6 +781,8 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp)
if (rc)
cifs_dbg(VFS, "%s: Could not generate md5 hash\n", __func__);
+unlock:
+ mutex_unlock(&ses->server->srv_mutex);
setup_ntlmv2_rsp_ret:
kfree(tiblob);
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index cbc0f4bca0c0..450578097fb7 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -686,6 +686,14 @@ cifs_do_mount(struct file_system_type *fs_type,
goto out_cifs_sb;
}
+ if (volume_info->prepath) {
+ cifs_sb->prepath = kstrdup(volume_info->prepath, GFP_KERNEL);
+ if (cifs_sb->prepath == NULL) {
+ root = ERR_PTR(-ENOMEM);
+ goto out_cifs_sb;
+ }
+ }
+
cifs_setup_cifs_sb(volume_info, cifs_sb);
rc = cifs_mount(cifs_sb, volume_info);
@@ -724,7 +732,11 @@ cifs_do_mount(struct file_system_type *fs_type,
sb->s_flags |= MS_ACTIVE;
}
- root = cifs_get_root(volume_info, sb);
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH)
+ root = dget(sb->s_root);
+ else
+ root = cifs_get_root(volume_info, sb);
+
if (IS_ERR(root))
goto out_super;
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 5481a6eb9a95..61c3a5ab8637 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -3517,6 +3517,44 @@ cifs_get_volume_info(char *mount_data, const char *devname)
return volume_info;
}
+static int
+cifs_are_all_path_components_accessible(struct TCP_Server_Info *server,
+ unsigned int xid,
+ struct cifs_tcon *tcon,
+ struct cifs_sb_info *cifs_sb,
+ char *full_path)
+{
+ int rc;
+ char *s;
+ char sep, tmp;
+
+ sep = CIFS_DIR_SEP(cifs_sb);
+ s = full_path;
+
+ rc = server->ops->is_path_accessible(xid, tcon, cifs_sb, "");
+ while (rc == 0) {
+ /* skip separators */
+ while (*s == sep)
+ s++;
+ if (!*s)
+ break;
+ /* next separator */
+ while (*s && *s != sep)
+ s++;
+
+ /*
+ * temporarily null-terminate the path at the end of
+ * the current component
+ */
+ tmp = *s;
+ *s = 0;
+ rc = server->ops->is_path_accessible(xid, tcon, cifs_sb,
+ full_path);
+ *s = tmp;
+ }
+ return rc;
+}
+
int
cifs_mount(struct cifs_sb_info *cifs_sb, struct smb_vol *volume_info)
{
@@ -3654,6 +3692,16 @@ remote_path_check:
kfree(full_path);
goto mount_fail_check;
}
+
+ rc = cifs_are_all_path_components_accessible(server,
+ xid, tcon, cifs_sb,
+ full_path);
+ if (rc != 0) {
+ cifs_dbg(VFS, "cannot query dirs between root and final path, "
+ "enabling CIFS_MOUNT_USE_PREFIX_PATH\n");
+ cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
+ rc = 0;
+ }
kfree(full_path);
}
@@ -3923,6 +3971,7 @@ cifs_umount(struct cifs_sb_info *cifs_sb)
bdi_destroy(&cifs_sb->bdi);
kfree(cifs_sb->mountdata);
+ kfree(cifs_sb->prepath);
call_rcu(&cifs_sb->rcu, delayed_free);
}
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index c3eb998a99bd..26a3b389a265 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -84,6 +84,7 @@ build_path_from_dentry(struct dentry *direntry)
struct dentry *temp;
int namelen;
int dfsplen;
+ int pplen = 0;
char *full_path;
char dirsep;
struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb);
@@ -95,8 +96,12 @@ build_path_from_dentry(struct dentry *direntry)
dfsplen = strnlen(tcon->treeName, MAX_TREE_SIZE + 1);
else
dfsplen = 0;
+
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH)
+ pplen = cifs_sb->prepath ? strlen(cifs_sb->prepath) + 1 : 0;
+
cifs_bp_rename_retry:
- namelen = dfsplen;
+ namelen = dfsplen + pplen;
seq = read_seqbegin(&rename_lock);
rcu_read_lock();
for (temp = direntry; !IS_ROOT(temp);) {
@@ -137,7 +142,7 @@ cifs_bp_rename_retry:
}
}
rcu_read_unlock();
- if (namelen != dfsplen || read_seqretry(&rename_lock, seq)) {
+ if (namelen != dfsplen + pplen || read_seqretry(&rename_lock, seq)) {
cifs_dbg(FYI, "did not end path lookup where expected. namelen=%ddfsplen=%d\n",
namelen, dfsplen);
/* presumably this is only possible if racing with a rename
@@ -153,6 +158,17 @@ cifs_bp_rename_retry:
those safely to '/' if any are found in the middle of the prepath */
/* BB test paths to Windows with '/' in the midst of prepath */
+ if (pplen) {
+ int i;
+
+ cifs_dbg(FYI, "using cifs_sb prepath <%s>\n", cifs_sb->prepath);
+ memcpy(full_path+dfsplen+1, cifs_sb->prepath, pplen-1);
+ full_path[dfsplen] = '\\';
+ for (i = 0; i < pplen-1; i++)
+ if (full_path[dfsplen+1+i] == '/')
+ full_path[dfsplen+1+i] = CIFS_DIR_SEP(cifs_sb);
+ }
+
if (dfsplen) {
strncpy(full_path, tcon->treeName, dfsplen);
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) {
@@ -229,6 +245,13 @@ cifs_do_create(struct inode *inode, struct dentry *direntry, unsigned int xid,
goto cifs_create_get_file_info;
}
+ if (S_ISDIR(newinode->i_mode)) {
+ CIFSSMBClose(xid, tcon, fid->netfid);
+ iput(newinode);
+ rc = -EISDIR;
+ goto out;
+ }
+
if (!S_ISREG(newinode->i_mode)) {
/*
* The server may allow us to open things like
@@ -399,10 +422,14 @@ cifs_create_set_dentry:
if (rc != 0) {
cifs_dbg(FYI, "Create worked, get_inode_info failed rc = %d\n",
rc);
- if (server->ops->close)
- server->ops->close(xid, tcon, fid);
- goto out;
+ goto out_err;
}
+
+ if (S_ISDIR(newinode->i_mode)) {
+ rc = -EISDIR;
+ goto out_err;
+ }
+
d_drop(direntry);
d_add(direntry, newinode);
@@ -410,6 +437,13 @@ out:
kfree(buf);
kfree(full_path);
return rc;
+
+out_err:
+ if (server->ops->close)
+ server->ops->close(xid, tcon, fid);
+ if (newinode)
+ iput(newinode);
+ goto out;
}
int
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index a329f5ba35aa..9cdeb0293267 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -982,10 +982,26 @@ struct inode *cifs_root_iget(struct super_block *sb)
struct inode *inode = NULL;
long rc;
struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
+ char *path = NULL;
+ int len;
+
+ if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH)
+ && cifs_sb->prepath) {
+ len = strlen(cifs_sb->prepath);
+ path = kzalloc(len + 2 /* leading sep + null */, GFP_KERNEL);
+ if (path == NULL)
+ return ERR_PTR(-ENOMEM);
+ path[0] = '/';
+ memcpy(path+1, cifs_sb->prepath, len);
+ } else {
+ path = kstrdup("", GFP_KERNEL);
+ if (path == NULL)
+ return ERR_PTR(-ENOMEM);
+ }
xid = get_xid();
if (tcon->unix_ext) {
- rc = cifs_get_inode_info_unix(&inode, "", sb, xid);
+ rc = cifs_get_inode_info_unix(&inode, path, sb, xid);
/* some servers mistakenly claim POSIX support */
if (rc != -EOPNOTSUPP)
goto iget_no_retry;
@@ -993,7 +1009,8 @@ struct inode *cifs_root_iget(struct super_block *sb)
tcon->unix_ext = false;
}
- rc = cifs_get_inode_info(&inode, "", NULL, sb, xid, NULL);
+ convert_delimiter(path, CIFS_DIR_SEP(cifs_sb));
+ rc = cifs_get_inode_info(&inode, path, NULL, sb, xid, NULL);
iget_no_retry:
if (!inode) {
@@ -1022,6 +1039,7 @@ iget_no_retry:
}
out:
+ kfree(path);
/* can not call macro free_xid here since in a void func
* TODO: This is no longer true
*/
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index 53ccdde6ff18..dd8543caa56e 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -1039,6 +1039,9 @@ smb2_new_lease_key(struct cifs_fid *fid)
get_random_bytes(fid->lease_key, SMB2_LEASE_KEY_SIZE);
}
+#define SMB2_SYMLINK_STRUCT_SIZE \
+ (sizeof(struct smb2_err_rsp) - 1 + sizeof(struct smb2_symlink_err_rsp))
+
static int
smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
const char *full_path, char **target_path,
@@ -1051,7 +1054,10 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
struct cifs_fid fid;
struct smb2_err_rsp *err_buf = NULL;
struct smb2_symlink_err_rsp *symlink;
- unsigned int sub_len, sub_offset;
+ unsigned int sub_len;
+ unsigned int sub_offset;
+ unsigned int print_len;
+ unsigned int print_offset;
cifs_dbg(FYI, "%s: path: %s\n", __func__, full_path);
@@ -1072,11 +1078,33 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
kfree(utf16_path);
return -ENOENT;
}
+
+ if (le32_to_cpu(err_buf->ByteCount) < sizeof(struct smb2_symlink_err_rsp) ||
+ get_rfc1002_length(err_buf) + 4 < SMB2_SYMLINK_STRUCT_SIZE) {
+ kfree(utf16_path);
+ return -ENOENT;
+ }
+
/* open must fail on symlink - reset rc */
rc = 0;
symlink = (struct smb2_symlink_err_rsp *)err_buf->ErrorData;
sub_len = le16_to_cpu(symlink->SubstituteNameLength);
sub_offset = le16_to_cpu(symlink->SubstituteNameOffset);
+ print_len = le16_to_cpu(symlink->PrintNameLength);
+ print_offset = le16_to_cpu(symlink->PrintNameOffset);
+
+ if (get_rfc1002_length(err_buf) + 4 <
+ SMB2_SYMLINK_STRUCT_SIZE + sub_offset + sub_len) {
+ kfree(utf16_path);
+ return -ENOENT;
+ }
+
+ if (get_rfc1002_length(err_buf) + 4 <
+ SMB2_SYMLINK_STRUCT_SIZE + print_offset + print_len) {
+ kfree(utf16_path);
+ return -ENOENT;
+ }
+
*target_path = cifs_strndup_from_utf16(
(char *)symlink->PathBuffer + sub_offset,
sub_len, true, cifs_sb->local_nls);
diff --git a/fs/dcache.c b/fs/dcache.c
index d04920036269..9e5099997fcd 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -578,7 +578,6 @@ static struct dentry *dentry_kill(struct dentry *dentry)
failed:
spin_unlock(&dentry->d_lock);
- cpu_relax();
return dentry; /* try again with same dentry */
}
@@ -752,6 +751,8 @@ void dput(struct dentry *dentry)
return;
repeat:
+ might_sleep();
+
rcu_read_lock();
if (likely(fast_dput(dentry))) {
rcu_read_unlock();
@@ -783,8 +784,10 @@ repeat:
kill_it:
dentry = dentry_kill(dentry);
- if (dentry)
+ if (dentry) {
+ cond_resched();
goto repeat;
+ }
}
EXPORT_SYMBOL(dput);
diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c
index 706de324f2a6..c82edb049117 100644
--- a/fs/devpts/inode.c
+++ b/fs/devpts/inode.c
@@ -128,6 +128,7 @@ static const match_table_t tokens = {
struct pts_fs_info {
struct ida allocated_ptys;
struct pts_mount_opts mount_opts;
+ struct super_block *sb;
struct dentry *ptmx_dentry;
};
@@ -358,7 +359,7 @@ static const struct super_operations devpts_sops = {
.show_options = devpts_show_options,
};
-static void *new_pts_fs_info(void)
+static void *new_pts_fs_info(struct super_block *sb)
{
struct pts_fs_info *fsi;
@@ -369,6 +370,7 @@ static void *new_pts_fs_info(void)
ida_init(&fsi->allocated_ptys);
fsi->mount_opts.mode = DEVPTS_DEFAULT_MODE;
fsi->mount_opts.ptmxmode = DEVPTS_DEFAULT_PTMX_MODE;
+ fsi->sb = sb;
return fsi;
}
@@ -384,7 +386,7 @@ devpts_fill_super(struct super_block *s, void *data, int silent)
s->s_op = &devpts_sops;
s->s_time_gran = 1;
- s->s_fs_info = new_pts_fs_info();
+ s->s_fs_info = new_pts_fs_info(s);
if (!s->s_fs_info)
goto fail;
@@ -524,17 +526,14 @@ static struct file_system_type devpts_fs_type = {
* to the System V naming convention
*/
-int devpts_new_index(struct inode *ptmx_inode)
+int devpts_new_index(struct pts_fs_info *fsi)
{
- struct super_block *sb = pts_sb_from_inode(ptmx_inode);
- struct pts_fs_info *fsi;
int index;
int ida_ret;
- if (!sb)
+ if (!fsi)
return -ENODEV;
- fsi = DEVPTS_SB(sb);
retry:
if (!ida_pre_get(&fsi->allocated_ptys, GFP_KERNEL))
return -ENOMEM;
@@ -564,11 +563,8 @@ retry:
return index;
}
-void devpts_kill_index(struct inode *ptmx_inode, int idx)
+void devpts_kill_index(struct pts_fs_info *fsi, int idx)
{
- struct super_block *sb = pts_sb_from_inode(ptmx_inode);
- struct pts_fs_info *fsi = DEVPTS_SB(sb);
-
mutex_lock(&allocated_ptys_lock);
ida_remove(&fsi->allocated_ptys, idx);
pty_count--;
@@ -578,21 +574,25 @@ void devpts_kill_index(struct inode *ptmx_inode, int idx)
/*
* pty code needs to hold extra references in case of last /dev/tty close
*/
-
-void devpts_add_ref(struct inode *ptmx_inode)
+struct pts_fs_info *devpts_get_ref(struct inode *ptmx_inode, struct file *file)
{
- struct super_block *sb = pts_sb_from_inode(ptmx_inode);
+ struct super_block *sb;
+ struct pts_fs_info *fsi;
+
+ sb = pts_sb_from_inode(ptmx_inode);
+ if (!sb)
+ return NULL;
+ fsi = DEVPTS_SB(sb);
+ if (!fsi)
+ return NULL;
atomic_inc(&sb->s_active);
- ihold(ptmx_inode);
+ return fsi;
}
-void devpts_del_ref(struct inode *ptmx_inode)
+void devpts_put_ref(struct pts_fs_info *fsi)
{
- struct super_block *sb = pts_sb_from_inode(ptmx_inode);
-
- iput(ptmx_inode);
- deactivate_super(sb);
+ deactivate_super(fsi->sb);
}
/**
@@ -604,22 +604,21 @@ void devpts_del_ref(struct inode *ptmx_inode)
*
* The created inode is returned. Remove it from /dev/pts/ by devpts_pty_kill.
*/
-struct inode *devpts_pty_new(struct inode *ptmx_inode, dev_t device, int index,
+struct inode *devpts_pty_new(struct pts_fs_info *fsi, dev_t device, int index,
void *priv)
{
struct dentry *dentry;
- struct super_block *sb = pts_sb_from_inode(ptmx_inode);
+ struct super_block *sb;
struct inode *inode;
struct dentry *root;
- struct pts_fs_info *fsi;
struct pts_mount_opts *opts;
char s[12];
- if (!sb)
+ if (!fsi)
return ERR_PTR(-ENODEV);
+ sb = fsi->sb;
root = sb->s_root;
- fsi = DEVPTS_SB(sb);
opts = &fsi->mount_opts;
inode = new_inode(sb);
diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c
index 8c536c02e295..c93fe5fce41e 100644
--- a/fs/ecryptfs/file.c
+++ b/fs/ecryptfs/file.c
@@ -113,7 +113,6 @@ static int ecryptfs_readdir(struct file *file, struct dir_context *ctx)
.sb = inode->i_sb,
};
lower_file = ecryptfs_file_to_lower(file);
- lower_file->f_pos = ctx->pos;
rc = iterate_dir(lower_file, &buf.ctx);
ctx->pos = buf.ctx.pos;
if (rc < 0)
@@ -171,6 +170,19 @@ out:
return rc;
}
+static int ecryptfs_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct file *lower_file = ecryptfs_file_to_lower(file);
+ /*
+ * Don't allow mmap on top of file systems that don't support it
+ * natively. If FILESYSTEM_MAX_STACK_DEPTH > 2 or ecryptfs
+ * allows recursive mounting, this will need to be extended.
+ */
+ if (!lower_file->f_op->mmap)
+ return -ENODEV;
+ return generic_file_mmap(file, vma);
+}
+
/**
* ecryptfs_open
* @inode: inode speciying file to open
@@ -270,6 +282,45 @@ out:
return rc;
}
+/**
+ * ecryptfs_dir_open
+ * @inode: inode speciying file to open
+ * @file: Structure to return filled in
+ *
+ * Opens the file specified by inode.
+ *
+ * Returns zero on success; non-zero otherwise
+ */
+static int ecryptfs_dir_open(struct inode *inode, struct file *file)
+{
+ struct dentry *ecryptfs_dentry = file->f_path.dentry;
+ /* Private value of ecryptfs_dentry allocated in
+ * ecryptfs_lookup() */
+ struct ecryptfs_file_info *file_info;
+ struct file *lower_file;
+
+ /* Released in ecryptfs_release or end of function if failure */
+ file_info = kmem_cache_zalloc(ecryptfs_file_info_cache, GFP_KERNEL);
+ ecryptfs_set_file_private(file, file_info);
+ if (unlikely(!file_info)) {
+ ecryptfs_printk(KERN_ERR,
+ "Error attempting to allocate memory\n");
+ return -ENOMEM;
+ }
+ lower_file = dentry_open(ecryptfs_dentry_to_lower_path(ecryptfs_dentry),
+ file->f_flags, current_cred());
+ if (IS_ERR(lower_file)) {
+ printk(KERN_ERR "%s: Error attempting to initialize "
+ "the lower file for the dentry with name "
+ "[%pd]; rc = [%ld]\n", __func__,
+ ecryptfs_dentry, PTR_ERR(lower_file));
+ kmem_cache_free(ecryptfs_file_info_cache, file_info);
+ return PTR_ERR(lower_file);
+ }
+ ecryptfs_set_file_lower(file, lower_file);
+ return 0;
+}
+
static int ecryptfs_flush(struct file *file, fl_owner_t td)
{
struct file *lower_file = ecryptfs_file_to_lower(file);
@@ -291,6 +342,19 @@ static int ecryptfs_release(struct inode *inode, struct file *file)
return 0;
}
+static int ecryptfs_dir_release(struct inode *inode, struct file *file)
+{
+ fput(ecryptfs_file_to_lower(file));
+ kmem_cache_free(ecryptfs_file_info_cache,
+ ecryptfs_file_to_private(file));
+ return 0;
+}
+
+static loff_t ecryptfs_dir_llseek(struct file *file, loff_t offset, int whence)
+{
+ return vfs_llseek(ecryptfs_file_to_lower(file), offset, whence);
+}
+
static int
ecryptfs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
{
@@ -370,25 +434,21 @@ const struct file_operations ecryptfs_dir_fops = {
#ifdef CONFIG_COMPAT
.compat_ioctl = ecryptfs_compat_ioctl,
#endif
- .open = ecryptfs_open,
- .flush = ecryptfs_flush,
- .release = ecryptfs_release,
+ .open = ecryptfs_dir_open,
+ .release = ecryptfs_dir_release,
.fsync = ecryptfs_fsync,
- .fasync = ecryptfs_fasync,
- .splice_read = generic_file_splice_read,
- .llseek = default_llseek,
+ .llseek = ecryptfs_dir_llseek,
};
const struct file_operations ecryptfs_main_fops = {
.llseek = generic_file_llseek,
.read_iter = ecryptfs_read_update_atime,
.write_iter = generic_file_write_iter,
- .iterate = ecryptfs_readdir,
.unlocked_ioctl = ecryptfs_unlocked_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = ecryptfs_compat_ioctl,
#endif
- .mmap = generic_file_mmap,
+ .mmap = ecryptfs_mmap,
.open = ecryptfs_open,
.flush = ecryptfs_flush,
.release = ecryptfs_release,
diff --git a/fs/ecryptfs/kthread.c b/fs/ecryptfs/kthread.c
index e818f5ac7a26..866bb18efefe 100644
--- a/fs/ecryptfs/kthread.c
+++ b/fs/ecryptfs/kthread.c
@@ -25,7 +25,6 @@
#include <linux/slab.h>
#include <linux/wait.h>
#include <linux/mount.h>
-#include <linux/file.h>
#include "ecryptfs_kernel.h"
struct ecryptfs_open_req {
@@ -148,7 +147,7 @@ int ecryptfs_privileged_open(struct file **lower_file,
flags |= IS_RDONLY(d_inode(lower_dentry)) ? O_RDONLY : O_RDWR;
(*lower_file) = dentry_open(&req.path, flags, cred);
if (!IS_ERR(*lower_file))
- goto have_file;
+ goto out;
if ((flags & O_ACCMODE) == O_RDONLY) {
rc = PTR_ERR((*lower_file));
goto out;
@@ -166,16 +165,8 @@ int ecryptfs_privileged_open(struct file **lower_file,
mutex_unlock(&ecryptfs_kthread_ctl.mux);
wake_up(&ecryptfs_kthread_ctl.wait);
wait_for_completion(&req.done);
- if (IS_ERR(*lower_file)) {
+ if (IS_ERR(*lower_file))
rc = PTR_ERR(*lower_file);
- goto out;
- }
-have_file:
- if ((*lower_file)->f_op->mmap == NULL) {
- fput(*lower_file);
- *lower_file = NULL;
- rc = -EMEDIUMTYPE;
- }
out:
return rc;
}
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index fe1f50fe764f..f97110461c19 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -208,6 +208,9 @@ static int ext4_init_block_bitmap(struct super_block *sb,
memset(bh->b_data, 0, sb->s_blocksize);
bit_max = ext4_num_base_meta_clusters(sb, block_group);
+ if ((bit_max >> 3) >= bh->b_size)
+ return -EFSCORRUPTED;
+
for (bit = 0; bit < bit_max; bit++)
ext4_set_bit(bit, bh->b_data);
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 3578b25fccfd..8eac7d586997 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -376,9 +376,13 @@ static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext)
ext4_fsblk_t block = ext4_ext_pblock(ext);
int len = ext4_ext_get_actual_len(ext);
ext4_lblk_t lblock = le32_to_cpu(ext->ee_block);
- ext4_lblk_t last = lblock + len - 1;
- if (len == 0 || lblock > last)
+ /*
+ * We allow neither:
+ * - zero length
+ * - overflow/wrap-around
+ */
+ if (lblock + len <= lblock)
return 0;
return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len);
}
@@ -469,6 +473,10 @@ static int __ext4_ext_check(const char *function, unsigned int line,
error_msg = "invalid extent entries";
goto corrupted;
}
+ if (unlikely(depth > 32)) {
+ error_msg = "too large eh_depth";
+ goto corrupted;
+ }
/* Verify checksum on non-root extent tree nodes */
if (ext_depth(inode) != depth &&
!ext4_extent_block_csum_verify(inode, eh)) {
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 27d284042d1a..55a1b5c0072a 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -52,25 +52,31 @@ static __u32 ext4_inode_csum(struct inode *inode, struct ext4_inode *raw,
struct ext4_inode_info *ei)
{
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
- __u16 csum_lo;
- __u16 csum_hi = 0;
__u32 csum;
+ __u16 dummy_csum = 0;
+ int offset = offsetof(struct ext4_inode, i_checksum_lo);
+ unsigned int csum_size = sizeof(dummy_csum);
- csum_lo = le16_to_cpu(raw->i_checksum_lo);
- raw->i_checksum_lo = 0;
- if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
- EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) {
- csum_hi = le16_to_cpu(raw->i_checksum_hi);
- raw->i_checksum_hi = 0;
- }
-
- csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)raw,
- EXT4_INODE_SIZE(inode->i_sb));
+ csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)raw, offset);
+ csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum, csum_size);
+ offset += csum_size;
+ csum = ext4_chksum(sbi, csum, (__u8 *)raw + offset,
+ EXT4_GOOD_OLD_INODE_SIZE - offset);
- raw->i_checksum_lo = cpu_to_le16(csum_lo);
- if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
- EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi))
- raw->i_checksum_hi = cpu_to_le16(csum_hi);
+ if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
+ offset = offsetof(struct ext4_inode, i_checksum_hi);
+ csum = ext4_chksum(sbi, csum, (__u8 *)raw +
+ EXT4_GOOD_OLD_INODE_SIZE,
+ offset - EXT4_GOOD_OLD_INODE_SIZE);
+ if (EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) {
+ csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum,
+ csum_size);
+ offset += csum_size;
+ csum = ext4_chksum(sbi, csum, (__u8 *)raw + offset,
+ EXT4_INODE_SIZE(inode->i_sb) -
+ offset);
+ }
+ }
return csum;
}
@@ -206,9 +212,9 @@ void ext4_evict_inode(struct inode *inode)
* Note that directories do not have this problem because they
* don't use page cache.
*/
- if (ext4_should_journal_data(inode) &&
- (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode)) &&
- inode->i_ino != EXT4_JOURNAL_INO) {
+ if (inode->i_ino != EXT4_JOURNAL_INO &&
+ ext4_should_journal_data(inode) &&
+ (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode))) {
journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
tid_t commit_tid = EXT4_I(inode)->i_datasync_tid;
@@ -2591,13 +2597,36 @@ retry:
done = true;
}
}
- ext4_journal_stop(handle);
+ /*
+ * Caution: If the handle is synchronous,
+ * ext4_journal_stop() can wait for transaction commit
+ * to finish which may depend on writeback of pages to
+ * complete or on page lock to be released. In that
+ * case, we have to wait until after after we have
+ * submitted all the IO, released page locks we hold,
+ * and dropped io_end reference (for extent conversion
+ * to be able to complete) before stopping the handle.
+ */
+ if (!ext4_handle_valid(handle) || handle->h_sync == 0) {
+ ext4_journal_stop(handle);
+ handle = NULL;
+ }
/* Submit prepared bio */
ext4_io_submit(&mpd.io_submit);
/* Unlock pages we didn't use */
mpage_release_unused_pages(&mpd, give_up_on_write);
- /* Drop our io_end reference we got from init */
- ext4_put_io_end(mpd.io_submit.io_end);
+ /*
+ * Drop our io_end reference we got from init. We have
+ * to be careful and use deferred io_end finishing if
+ * we are still holding the transaction as we can
+ * release the last reference to io_end which may end
+ * up doing unwritten extent conversion.
+ */
+ if (handle) {
+ ext4_put_io_end_defer(mpd.io_submit.io_end);
+ ext4_journal_stop(handle);
+ } else
+ ext4_put_io_end(mpd.io_submit.io_end);
if (ret == -ENOSPC && sbi->s_journal) {
/*
@@ -3224,7 +3253,9 @@ static ssize_t ext4_ext_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
get_block_func = ext4_get_block_write;
dio_flags = DIO_LOCKING;
}
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
+#if defined(CONFIG_EXT4_FS_ENCRYPTION) && \
+!defined(CONFIG_EXT4_FS_ICE_ENCRYPTION)
+
BUG_ON(ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode));
#endif
if (IS_DAX(inode))
@@ -3291,7 +3322,9 @@ static ssize_t ext4_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
size_t count = iov_iter_count(iter);
ssize_t ret;
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
+#if defined(CONFIG_EXT4_FS_ENCRYPTION) && \
+!defined(CONFIG_EXT4_FS_ICE_ENCRYPTION)
+
if (ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode))
return 0;
#endif
@@ -5173,8 +5206,6 @@ int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
sbi->s_want_extra_isize,
iloc, handle);
if (ret) {
- ext4_set_inode_state(inode,
- EXT4_STATE_NO_EXPAND);
if (mnt_count !=
le16_to_cpu(sbi->s_es->s_mnt_count)) {
ext4_warning(inode->i_sb,
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 2f53c3822daa..a0daca4b127b 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -2933,7 +2933,7 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
ext4_error(sb, "Allocating blocks %llu-%llu which overlap "
"fs metadata", block, block+len);
/* File system mounted not to panic on error
- * Fix the bitmap and repeat the block allocation
+ * Fix the bitmap and return EFSCORRUPTED
* We leak some of the blocks here.
*/
ext4_lock_group(sb, ac->ac_b_ex.fe_group);
@@ -2942,7 +2942,7 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
if (!err)
- err = -EAGAIN;
+ err = -EFSCORRUPTED;
goto out_err;
}
@@ -4507,18 +4507,7 @@ repeat:
}
if (likely(ac->ac_status == AC_STATUS_FOUND)) {
*errp = ext4_mb_mark_diskspace_used(ac, handle, reserv_clstrs);
- if (*errp == -EAGAIN) {
- /*
- * drop the reference that we took
- * in ext4_mb_use_best_found
- */
- ext4_mb_release_context(ac);
- ac->ac_b_ex.fe_group = 0;
- ac->ac_b_ex.fe_start = 0;
- ac->ac_b_ex.fe_len = 0;
- ac->ac_status = AC_STATUS_CONTINUE;
- goto repeat;
- } else if (*errp) {
+ if (*errp) {
ext4_discard_allocated_blocks(ac);
goto errout;
} else {
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index d5a634ed6c93..20892f27f7be 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -420,15 +420,14 @@ static __le32 ext4_dx_csum(struct inode *inode, struct ext4_dir_entry *dirent,
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
struct ext4_inode_info *ei = EXT4_I(inode);
__u32 csum;
- __le32 save_csum;
int size;
+ __u32 dummy_csum = 0;
+ int offset = offsetof(struct dx_tail, dt_checksum);
size = count_offset + (count * sizeof(struct dx_entry));
- save_csum = t->dt_checksum;
- t->dt_checksum = 0;
csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)dirent, size);
- csum = ext4_chksum(sbi, csum, (__u8 *)t, sizeof(struct dx_tail));
- t->dt_checksum = save_csum;
+ csum = ext4_chksum(sbi, csum, (__u8 *)t, offset);
+ csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum, sizeof(dummy_csum));
return cpu_to_le32(csum);
}
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 852c26806af2..5bab28caa9d4 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -2030,23 +2030,25 @@ failed:
static __le16 ext4_group_desc_csum(struct super_block *sb, __u32 block_group,
struct ext4_group_desc *gdp)
{
- int offset;
+ int offset = offsetof(struct ext4_group_desc, bg_checksum);
__u16 crc = 0;
__le32 le_group = cpu_to_le32(block_group);
struct ext4_sb_info *sbi = EXT4_SB(sb);
if (ext4_has_metadata_csum(sbi->s_sb)) {
/* Use new metadata_csum algorithm */
- __le16 save_csum;
__u32 csum32;
+ __u16 dummy_csum = 0;
- save_csum = gdp->bg_checksum;
- gdp->bg_checksum = 0;
csum32 = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&le_group,
sizeof(le_group));
- csum32 = ext4_chksum(sbi, csum32, (__u8 *)gdp,
- sbi->s_desc_size);
- gdp->bg_checksum = save_csum;
+ csum32 = ext4_chksum(sbi, csum32, (__u8 *)gdp, offset);
+ csum32 = ext4_chksum(sbi, csum32, (__u8 *)&dummy_csum,
+ sizeof(dummy_csum));
+ offset += sizeof(dummy_csum);
+ if (offset < sbi->s_desc_size)
+ csum32 = ext4_chksum(sbi, csum32, (__u8 *)gdp + offset,
+ sbi->s_desc_size - offset);
crc = csum32 & 0xFFFF;
goto out;
@@ -2056,8 +2058,6 @@ static __le16 ext4_group_desc_csum(struct super_block *sb, __u32 block_group,
if (!ext4_has_feature_gdt_csum(sb))
return 0;
- offset = offsetof(struct ext4_group_desc, bg_checksum);
-
crc = crc16(~0, sbi->s_es->s_uuid, sizeof(sbi->s_es->s_uuid));
crc = crc16(crc, (__u8 *)&le_group, sizeof(le_group));
crc = crc16(crc, (__u8 *)gdp, offset);
@@ -2093,6 +2093,7 @@ void ext4_group_desc_csum_set(struct super_block *sb, __u32 block_group,
/* Called at mount-time, super-block is locked */
static int ext4_check_descriptors(struct super_block *sb,
+ ext4_fsblk_t sb_block,
ext4_group_t *first_not_zeroed)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
@@ -2123,6 +2124,11 @@ static int ext4_check_descriptors(struct super_block *sb,
grp = i;
block_bitmap = ext4_block_bitmap(sb, gdp);
+ if (block_bitmap == sb_block) {
+ ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
+ "Block bitmap for group %u overlaps "
+ "superblock", i);
+ }
if (block_bitmap < first_block || block_bitmap > last_block) {
ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
"Block bitmap for group %u not in group "
@@ -2130,6 +2136,11 @@ static int ext4_check_descriptors(struct super_block *sb,
return 0;
}
inode_bitmap = ext4_inode_bitmap(sb, gdp);
+ if (inode_bitmap == sb_block) {
+ ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
+ "Inode bitmap for group %u overlaps "
+ "superblock", i);
+ }
if (inode_bitmap < first_block || inode_bitmap > last_block) {
ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
"Inode bitmap for group %u not in group "
@@ -2137,6 +2148,11 @@ static int ext4_check_descriptors(struct super_block *sb,
return 0;
}
inode_table = ext4_inode_table(sb, gdp);
+ if (inode_table == sb_block) {
+ ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
+ "Inode table for group %u overlaps "
+ "superblock", i);
+ }
if (inode_table < first_block ||
inode_table + sbi->s_itb_per_group - 1 > last_block) {
ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
@@ -2240,6 +2256,16 @@ static void ext4_orphan_cleanup(struct super_block *sb,
while (es->s_last_orphan) {
struct inode *inode;
+ /*
+ * We may have encountered an error during cleanup; if
+ * so, skip the rest.
+ */
+ if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) {
+ jbd_debug(1, "Skipping orphan recovery on fs with errors.\n");
+ es->s_last_orphan = 0;
+ break;
+ }
+
inode = ext4_orphan_get(sb, le32_to_cpu(es->s_last_orphan));
if (IS_ERR(inode)) {
es->s_last_orphan = 0;
@@ -3372,6 +3398,13 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
goto failed_mount;
}
+ if (le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) > (blocksize / 4)) {
+ ext4_msg(sb, KERN_ERR,
+ "Number of reserved GDT blocks insanely large: %d",
+ le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks));
+ goto failed_mount;
+ }
+
if (sbi->s_mount_opt & EXT4_MOUNT_DAX) {
if (blocksize != PAGE_SIZE) {
ext4_msg(sb, KERN_ERR,
@@ -3623,7 +3656,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
goto failed_mount2;
}
}
- if (!ext4_check_descriptors(sb, &first_not_zeroed)) {
+ if (!ext4_check_descriptors(sb, logical_sb_block, &first_not_zeroed)) {
ext4_msg(sb, KERN_ERR, "group descriptors corrupted!");
ret = -EFSCORRUPTED;
goto failed_mount2;
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index 06fd5f7f993d..7c23363ecf19 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -123,17 +123,18 @@ static __le32 ext4_xattr_block_csum(struct inode *inode,
{
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
__u32 csum;
- __le32 save_csum;
__le64 dsk_block_nr = cpu_to_le64(block_nr);
+ __u32 dummy_csum = 0;
+ int offset = offsetof(struct ext4_xattr_header, h_checksum);
- save_csum = hdr->h_checksum;
- hdr->h_checksum = 0;
csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&dsk_block_nr,
sizeof(dsk_block_nr));
- csum = ext4_chksum(sbi, csum, (__u8 *)hdr,
- EXT4_BLOCK_SIZE(inode->i_sb));
+ csum = ext4_chksum(sbi, csum, (__u8 *)hdr, offset);
+ csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum, sizeof(dummy_csum));
+ offset += sizeof(dummy_csum);
+ csum = ext4_chksum(sbi, csum, (__u8 *)hdr + offset,
+ EXT4_BLOCK_SIZE(inode->i_sb) - offset);
- hdr->h_checksum = save_csum;
return cpu_to_le32(csum);
}
@@ -1284,15 +1285,19 @@ int ext4_expand_extra_isize_ea(struct inode *inode, int new_extra_isize,
size_t min_offs, free;
int total_ino;
void *base, *start, *end;
- int extra_isize = 0, error = 0, tried_min_extra_isize = 0;
+ int error = 0, tried_min_extra_isize = 0;
int s_min_extra_isize = le16_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_min_extra_isize);
+ int isize_diff; /* How much do we need to grow i_extra_isize */
down_write(&EXT4_I(inode)->xattr_sem);
+ /*
+ * Set EXT4_STATE_NO_EXPAND to avoid recursion when marking inode dirty
+ */
+ ext4_set_inode_state(inode, EXT4_STATE_NO_EXPAND);
retry:
- if (EXT4_I(inode)->i_extra_isize >= new_extra_isize) {
- up_write(&EXT4_I(inode)->xattr_sem);
- return 0;
- }
+ isize_diff = new_extra_isize - EXT4_I(inode)->i_extra_isize;
+ if (EXT4_I(inode)->i_extra_isize >= new_extra_isize)
+ goto out;
header = IHDR(inode, raw_inode);
entry = IFIRST(header);
@@ -1313,7 +1318,7 @@ retry:
goto cleanup;
free = ext4_xattr_free_space(last, &min_offs, base, &total_ino);
- if (free >= new_extra_isize) {
+ if (free >= isize_diff) {
entry = IFIRST(header);
ext4_xattr_shift_entries(entry, EXT4_I(inode)->i_extra_isize
- new_extra_isize, (void *)raw_inode +
@@ -1321,8 +1326,7 @@ retry:
(void *)header, total_ino,
inode->i_sb->s_blocksize);
EXT4_I(inode)->i_extra_isize = new_extra_isize;
- error = 0;
- goto cleanup;
+ goto out;
}
/*
@@ -1345,7 +1349,7 @@ retry:
end = bh->b_data + bh->b_size;
min_offs = end - base;
free = ext4_xattr_free_space(first, &min_offs, base, NULL);
- if (free < new_extra_isize) {
+ if (free < isize_diff) {
if (!tried_min_extra_isize && s_min_extra_isize) {
tried_min_extra_isize++;
new_extra_isize = s_min_extra_isize;
@@ -1359,7 +1363,7 @@ retry:
free = inode->i_sb->s_blocksize;
}
- while (new_extra_isize > 0) {
+ while (isize_diff > 0) {
size_t offs, size, entry_size;
struct ext4_xattr_entry *small_entry = NULL;
struct ext4_xattr_info i = {
@@ -1390,7 +1394,7 @@ retry:
EXT4_XATTR_SIZE(le32_to_cpu(last->e_value_size)) +
EXT4_XATTR_LEN(last->e_name_len);
if (total_size <= free && total_size < min_total_size) {
- if (total_size < new_extra_isize) {
+ if (total_size < isize_diff) {
small_entry = last;
} else {
entry = last;
@@ -1445,22 +1449,22 @@ retry:
error = ext4_xattr_ibody_set(handle, inode, &i, is);
if (error)
goto cleanup;
+ total_ino -= entry_size;
entry = IFIRST(header);
- if (entry_size + EXT4_XATTR_SIZE(size) >= new_extra_isize)
- shift_bytes = new_extra_isize;
+ if (entry_size + EXT4_XATTR_SIZE(size) >= isize_diff)
+ shift_bytes = isize_diff;
else
- shift_bytes = entry_size + size;
+ shift_bytes = entry_size + EXT4_XATTR_SIZE(size);
/* Adjust the offsets and shift the remaining entries ahead */
- ext4_xattr_shift_entries(entry, EXT4_I(inode)->i_extra_isize -
- shift_bytes, (void *)raw_inode +
- EXT4_GOOD_OLD_INODE_SIZE + extra_isize + shift_bytes,
- (void *)header, total_ino - entry_size,
- inode->i_sb->s_blocksize);
+ ext4_xattr_shift_entries(entry, -shift_bytes,
+ (void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE +
+ EXT4_I(inode)->i_extra_isize + shift_bytes,
+ (void *)header, total_ino, inode->i_sb->s_blocksize);
- extra_isize += shift_bytes;
- new_extra_isize -= shift_bytes;
- EXT4_I(inode)->i_extra_isize = extra_isize;
+ isize_diff -= shift_bytes;
+ EXT4_I(inode)->i_extra_isize += shift_bytes;
+ header = IHDR(inode, raw_inode);
i.name = b_entry_name;
i.value = buffer;
@@ -1482,6 +1486,8 @@ retry:
kfree(bs);
}
brelse(bh);
+out:
+ ext4_clear_inode_state(inode, EXT4_STATE_NO_EXPAND);
up_write(&EXT4_I(inode)->xattr_sem);
return 0;
@@ -1493,6 +1499,10 @@ cleanup:
kfree(is);
kfree(bs);
brelse(bh);
+ /*
+ * We deliberately leave EXT4_STATE_NO_EXPAND set here since inode
+ * size expansion failed.
+ */
up_write(&EXT4_I(inode)->xattr_sem);
return error;
}
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 461dcf5e4526..338ae65a160f 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -435,6 +435,15 @@ static int fuse_flush(struct file *file, fl_owner_t id)
fuse_sync_writes(inode);
mutex_unlock(&inode->i_mutex);
+ if (test_bit(AS_ENOSPC, &file->f_mapping->flags) &&
+ test_and_clear_bit(AS_ENOSPC, &file->f_mapping->flags))
+ err = -ENOSPC;
+ if (test_bit(AS_EIO, &file->f_mapping->flags) &&
+ test_and_clear_bit(AS_EIO, &file->f_mapping->flags))
+ err = -EIO;
+ if (err)
+ return err;
+
req = fuse_get_req_nofail_nopages(fc, file);
memset(&inarg, 0, sizeof(inarg));
inarg.fh = ff->fh;
@@ -480,6 +489,21 @@ int fuse_fsync_common(struct file *file, loff_t start, loff_t end,
goto out;
fuse_sync_writes(inode);
+
+ /*
+ * Due to implementation of fuse writeback
+ * filemap_write_and_wait_range() does not catch errors.
+ * We have to do this directly after fuse_sync_writes()
+ */
+ if (test_bit(AS_ENOSPC, &file->f_mapping->flags) &&
+ test_and_clear_bit(AS_ENOSPC, &file->f_mapping->flags))
+ err = -ENOSPC;
+ if (test_bit(AS_EIO, &file->f_mapping->flags) &&
+ test_and_clear_bit(AS_EIO, &file->f_mapping->flags))
+ err = -EIO;
+ if (err)
+ goto out;
+
err = sync_inode_metadata(inode, 1);
if (err)
goto out;
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 33ec874302cb..43bb5eb17ad2 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -934,7 +934,7 @@ static void fuse_send_init(struct fuse_conn *fc, struct fuse_req *req)
arg->flags |= FUSE_ASYNC_READ | FUSE_POSIX_LOCKS | FUSE_ATOMIC_O_TRUNC |
FUSE_EXPORT_SUPPORT | FUSE_BIG_WRITES | FUSE_DONT_MASK |
FUSE_SPLICE_WRITE | FUSE_SPLICE_MOVE | FUSE_SPLICE_READ |
- FUSE_FLOCK_LOCKS | FUSE_IOCTL_DIR | FUSE_AUTO_INVAL_DATA |
+ FUSE_FLOCK_LOCKS | FUSE_HAS_IOCTL_DIR | FUSE_AUTO_INVAL_DATA |
FUSE_DO_READDIRPLUS | FUSE_READDIRPLUS_AUTO | FUSE_ASYNC_DIO |
FUSE_WRITEBACK_CACHE | FUSE_NO_OPEN_SUPPORT;
req->in.h.opcode = FUSE_INIT;
diff --git a/fs/inode.c b/fs/inode.c
index 1be5f9003eb3..b0edef500590 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -1733,8 +1733,8 @@ static int __remove_privs(struct dentry *dentry, int kill)
*/
int file_remove_privs(struct file *file)
{
- struct dentry *dentry = file->f_path.dentry;
- struct inode *inode = d_inode(dentry);
+ struct dentry *dentry = file_dentry(file);
+ struct inode *inode = file_inode(file);
int kill;
int error = 0;
@@ -1742,7 +1742,7 @@ int file_remove_privs(struct file *file)
if (IS_NOSEC(inode))
return 0;
- kill = file_needs_remove_privs(file);
+ kill = dentry_needs_remove_privs(dentry);
if (kill < 0)
return kill;
if (kill)
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
index 36345fefa3ff..2d964ce45606 100644
--- a/fs/jbd2/commit.c
+++ b/fs/jbd2/commit.c
@@ -124,7 +124,7 @@ static int journal_submit_commit_record(journal_t *journal,
struct commit_header *tmp;
struct buffer_head *bh;
int ret;
- struct timespec now = current_kernel_time();
+ struct timespec64 now = current_kernel_time64();
*cbh = NULL;
diff --git a/fs/locks.c b/fs/locks.c
index 6333263b7bc8..8eddae23e10b 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -1602,7 +1602,7 @@ generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **pr
{
struct file_lock *fl, *my_fl = NULL, *lease;
struct dentry *dentry = filp->f_path.dentry;
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = file_inode(filp);
struct file_lock_context *ctx;
bool is_deleg = (*flp)->fl_flags & FL_DELEG;
int error;
diff --git a/fs/namei.c b/fs/namei.c
index 441033da002b..1b4585e9a463 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -887,6 +887,7 @@ static inline int may_follow_link(struct nameidata *nd)
{
const struct inode *inode;
const struct inode *parent;
+ kuid_t puid;
if (!sysctl_protected_symlinks)
return 0;
@@ -902,7 +903,8 @@ static inline int may_follow_link(struct nameidata *nd)
return 0;
/* Allowed if parent directory and link owner match. */
- if (uid_eq(parent->i_uid, inode->i_uid))
+ puid = parent->i_uid;
+ if (uid_valid(puid) && uid_eq(puid, inode->i_uid))
return 0;
if (nd->flags & LOOKUP_RCU)
diff --git a/fs/namespace.c b/fs/namespace.c
index 33064fcbfff9..5be02a0635be 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -1562,6 +1562,7 @@ void __detach_mounts(struct dentry *dentry)
goto out_unlock;
lock_mount_hash();
+ event++;
while (!hlist_empty(&mp->m_list)) {
mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list);
if (mnt->mnt.mnt_flags & MNT_UMOUNT) {
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 7b9316406930..7a9b6e347249 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -1261,6 +1261,9 @@ int nfs_updatepage(struct file *file, struct page *page,
dprintk("NFS: nfs_updatepage(%pD2 %d@%lld)\n",
file, count, (long long)(page_file_offset(page) + offset));
+ if (!count)
+ goto out;
+
if (nfs_can_extend_write(file, page, inode)) {
count = max(count + offset, nfs_page_length(page));
offset = 0;
@@ -1271,7 +1274,7 @@ int nfs_updatepage(struct file *file, struct page *page,
nfs_set_pageerror(page);
else
__set_page_dirty_nobuffers(page);
-
+out:
dprintk("NFS: nfs_updatepage returns %d (isize %lld)\n",
status, (long long)i_size_read(inode));
return status;
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index ed2f64ca49de..f7ea624780a7 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -4882,6 +4882,32 @@ nfsd4_test_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
return nfs_ok;
}
+static __be32
+nfsd4_free_lock_stateid(stateid_t *stateid, struct nfs4_stid *s)
+{
+ struct nfs4_ol_stateid *stp = openlockstateid(s);
+ __be32 ret;
+
+ mutex_lock(&stp->st_mutex);
+
+ ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
+ if (ret)
+ goto out;
+
+ ret = nfserr_locks_held;
+ if (check_for_locks(stp->st_stid.sc_file,
+ lockowner(stp->st_stateowner)))
+ goto out;
+
+ release_lock_stateid(stp);
+ ret = nfs_ok;
+
+out:
+ mutex_unlock(&stp->st_mutex);
+ nfs4_put_stid(s);
+ return ret;
+}
+
__be32
nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_free_stateid *free_stateid)
@@ -4889,7 +4915,6 @@ nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
stateid_t *stateid = &free_stateid->fr_stateid;
struct nfs4_stid *s;
struct nfs4_delegation *dp;
- struct nfs4_ol_stateid *stp;
struct nfs4_client *cl = cstate->session->se_client;
__be32 ret = nfserr_bad_stateid;
@@ -4908,18 +4933,9 @@ nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
ret = nfserr_locks_held;
break;
case NFS4_LOCK_STID:
- ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
- if (ret)
- break;
- stp = openlockstateid(s);
- ret = nfserr_locks_held;
- if (check_for_locks(stp->st_stid.sc_file,
- lockowner(stp->st_stateowner)))
- break;
- WARN_ON(!unhash_lock_stateid(stp));
+ atomic_inc(&s->sc_count);
spin_unlock(&cl->cl_lock);
- nfs4_put_stid(s);
- ret = nfs_ok;
+ ret = nfsd4_free_lock_stateid(stateid, s);
goto out;
case NFS4_REVOKED_DELEG_STID:
dp = delegstateid(s);
@@ -5486,7 +5502,7 @@ static __be32
lookup_or_create_lock_state(struct nfsd4_compound_state *cstate,
struct nfs4_ol_stateid *ost,
struct nfsd4_lock *lock,
- struct nfs4_ol_stateid **lst, bool *new)
+ struct nfs4_ol_stateid **plst, bool *new)
{
__be32 status;
struct nfs4_file *fi = ost->st_stid.sc_file;
@@ -5494,7 +5510,9 @@ lookup_or_create_lock_state(struct nfsd4_compound_state *cstate,
struct nfs4_client *cl = oo->oo_owner.so_client;
struct inode *inode = d_inode(cstate->current_fh.fh_dentry);
struct nfs4_lockowner *lo;
+ struct nfs4_ol_stateid *lst;
unsigned int strhashval;
+ bool hashed;
lo = find_lockowner_str(cl, &lock->lk_new_owner);
if (!lo) {
@@ -5510,12 +5528,27 @@ lookup_or_create_lock_state(struct nfsd4_compound_state *cstate,
goto out;
}
- *lst = find_or_create_lock_stateid(lo, fi, inode, ost, new);
- if (*lst == NULL) {
+retry:
+ lst = find_or_create_lock_stateid(lo, fi, inode, ost, new);
+ if (lst == NULL) {
status = nfserr_jukebox;
goto out;
}
+
+ mutex_lock(&lst->st_mutex);
+
+ /* See if it's still hashed to avoid race with FREE_STATEID */
+ spin_lock(&cl->cl_lock);
+ hashed = !list_empty(&lst->st_perfile);
+ spin_unlock(&cl->cl_lock);
+
+ if (!hashed) {
+ mutex_unlock(&lst->st_mutex);
+ nfs4_put_stid(&lst->st_stid);
+ goto retry;
+ }
status = nfs_ok;
+ *plst = lst;
out:
nfs4_put_stateowner(&lo->lo_owner);
return status;
@@ -5582,8 +5615,6 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
goto out;
status = lookup_or_create_lock_state(cstate, open_stp, lock,
&lock_stp, &new);
- if (status == nfs_ok)
- mutex_lock(&lock_stp->st_mutex);
} else {
status = nfs4_preprocess_seqid_op(cstate,
lock->lk_old_lock_seqid,
diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c
index 69bd801afb53..37e49cb2ac4c 100644
--- a/fs/nilfs2/the_nilfs.c
+++ b/fs/nilfs2/the_nilfs.c
@@ -443,7 +443,7 @@ static int nilfs_valid_sb(struct nilfs_super_block *sbp)
if (!sbp || le16_to_cpu(sbp->s_magic) != NILFS_SUPER_MAGIC)
return 0;
bytes = le16_to_cpu(sbp->s_bytes);
- if (bytes > BLOCK_SIZE)
+ if (bytes < sumoff + 4 || bytes > BLOCK_SIZE)
return 0;
crc = crc32_le(le32_to_cpu(sbp->s_crc_seed), (unsigned char *)sbp,
sumoff);
diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
index eff6319d5037..9e52609cd683 100644
--- a/fs/overlayfs/copy_up.c
+++ b/fs/overlayfs/copy_up.c
@@ -48,6 +48,8 @@ int ovl_copy_xattr(struct dentry *old, struct dentry *new)
}
for (name = buf; name < (buf + list_size); name += strlen(name) + 1) {
+ if (ovl_is_private_xattr(name))
+ continue;
retry:
size = vfs_getxattr(old, name, value, value_size);
if (size == -ERANGE)
diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
index 0597820f5d9d..220b04f04523 100644
--- a/fs/overlayfs/inode.c
+++ b/fs/overlayfs/inode.c
@@ -63,6 +63,9 @@ int ovl_setattr(struct dentry *dentry, struct iattr *attr)
if (!err) {
upperdentry = ovl_dentry_upper(dentry);
+ if (attr->ia_valid & (ATTR_KILL_SUID|ATTR_KILL_SGID))
+ attr->ia_valid &= ~ATTR_MODE;
+
mutex_lock(&upperdentry->d_inode->i_mutex);
err = notify_change(upperdentry, attr, NULL);
if (!err)
@@ -216,7 +219,7 @@ static int ovl_readlink(struct dentry *dentry, char __user *buf, int bufsiz)
}
-static bool ovl_is_private_xattr(const char *name)
+bool ovl_is_private_xattr(const char *name)
{
return strncmp(name, OVL_XATTR_PRE_NAME, OVL_XATTR_PRE_LEN) == 0;
}
@@ -274,7 +277,8 @@ ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size)
struct path realpath;
enum ovl_path_type type = ovl_path_real(dentry, &realpath);
ssize_t res;
- int off;
+ size_t len;
+ char *s;
res = vfs_listxattr(realpath.dentry, list, size);
if (res <= 0 || size == 0)
@@ -284,17 +288,19 @@ ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size)
return res;
/* filter out private xattrs */
- for (off = 0; off < res;) {
- char *s = list + off;
- size_t slen = strlen(s) + 1;
+ for (s = list, len = res; len;) {
+ size_t slen = strnlen(s, len) + 1;
- BUG_ON(off + slen > res);
+ /* underlying fs providing us with an broken xattr list? */
+ if (WARN_ON(slen > len))
+ return -EIO;
+ len -= slen;
if (ovl_is_private_xattr(s)) {
res -= slen;
- memmove(s, s + slen, res - off);
+ memmove(s, s + slen, len);
} else {
- off += slen;
+ s += slen;
}
}
diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
index 735e1d49b301..c319d5eaabcf 100644
--- a/fs/overlayfs/overlayfs.h
+++ b/fs/overlayfs/overlayfs.h
@@ -174,6 +174,7 @@ ssize_t ovl_getxattr(struct dentry *dentry, const char *name,
ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size);
int ovl_removexattr(struct dentry *dentry, const char *name);
struct inode *ovl_d_select_inode(struct dentry *dentry, unsigned file_flags);
+bool ovl_is_private_xattr(const char *name);
struct inode *ovl_new_inode(struct super_block *sb, umode_t mode,
struct ovl_entry *oe);
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
index a1acc6004a91..d70208c0de84 100644
--- a/fs/overlayfs/super.c
+++ b/fs/overlayfs/super.c
@@ -376,7 +376,8 @@ static struct ovl_entry *ovl_alloc_entry(unsigned int numlower)
static bool ovl_dentry_remote(struct dentry *dentry)
{
return dentry->d_flags &
- (DCACHE_OP_REVALIDATE | DCACHE_OP_WEAK_REVALIDATE);
+ (DCACHE_OP_REVALIDATE | DCACHE_OP_WEAK_REVALIDATE |
+ DCACHE_OP_REAL);
}
static bool ovl_dentry_weird(struct dentry *dentry)
@@ -762,6 +763,10 @@ retry:
struct kstat stat = {
.mode = S_IFDIR | 0,
};
+ struct iattr attr = {
+ .ia_valid = ATTR_MODE,
+ .ia_mode = stat.mode,
+ };
if (work->d_inode) {
err = -EEXIST;
@@ -777,6 +782,21 @@ retry:
err = ovl_create_real(dir, work, &stat, NULL, NULL, true);
if (err)
goto out_dput;
+
+ err = vfs_removexattr(work, XATTR_NAME_POSIX_ACL_DEFAULT);
+ if (err && err != -ENODATA && err != -EOPNOTSUPP)
+ goto out_dput;
+
+ err = vfs_removexattr(work, XATTR_NAME_POSIX_ACL_ACCESS);
+ if (err && err != -ENODATA && err != -EOPNOTSUPP)
+ goto out_dput;
+
+ /* Clear any inherited mode bits */
+ inode_lock(work->d_inode);
+ err = notify_change(work, &attr, NULL);
+ inode_unlock(work->d_inode);
+ if (err)
+ goto out_dput;
}
out_unlock:
mutex_unlock(&dir->i_mutex);
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 7d61792c053a..fed79c45b54c 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -2459,16 +2459,27 @@ static ssize_t timerslack_ns_write(struct file *file, const char __user *buf,
if (!p)
return -ESRCH;
- if (ptrace_may_access(p, PTRACE_MODE_ATTACH_FSCREDS)) {
- task_lock(p);
- if (slack_ns == 0)
- p->timer_slack_ns = p->default_timer_slack_ns;
- else
- p->timer_slack_ns = slack_ns;
- task_unlock(p);
- } else
- count = -EPERM;
+ if (p != current) {
+ if (!capable(CAP_SYS_NICE)) {
+ count = -EPERM;
+ goto out;
+ }
+
+ err = security_task_setscheduler(p);
+ if (err) {
+ count = err;
+ goto out;
+ }
+ }
+
+ task_lock(p);
+ if (slack_ns == 0)
+ p->timer_slack_ns = p->default_timer_slack_ns;
+ else
+ p->timer_slack_ns = slack_ns;
+ task_unlock(p);
+out:
put_task_struct(p);
return count;
@@ -2478,19 +2489,28 @@ static int timerslack_ns_show(struct seq_file *m, void *v)
{
struct inode *inode = m->private;
struct task_struct *p;
- int err = 0;
+ int err = 0;
p = get_proc_task(inode);
if (!p)
return -ESRCH;
- if (ptrace_may_access(p, PTRACE_MODE_ATTACH_FSCREDS)) {
- task_lock(p);
- seq_printf(m, "%llu\n", p->timer_slack_ns);
- task_unlock(p);
- } else
- err = -EPERM;
+ if (p != current) {
+
+ if (!capable(CAP_SYS_NICE)) {
+ err = -EPERM;
+ goto out;
+ }
+ err = security_task_getscheduler(p);
+ if (err)
+ goto out;
+ }
+ task_lock(p);
+ seq_printf(m, "%llu\n", p->timer_slack_ns);
+ task_unlock(p);
+
+out:
put_task_struct(p);
return err;
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index be3003ef2b4e..6a44fb94228c 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -300,23 +300,29 @@ static int do_maps_open(struct inode *inode, struct file *file,
sizeof(struct proc_maps_private));
}
-static pid_t pid_of_stack(struct proc_maps_private *priv,
- struct vm_area_struct *vma, bool is_pid)
+/*
+ * Indicate if the VMA is a stack for the given task; for
+ * /proc/PID/maps that is the stack of the main task.
+ */
+static int is_stack(struct proc_maps_private *priv,
+ struct vm_area_struct *vma, int is_pid)
{
- struct inode *inode = priv->inode;
- struct task_struct *task;
- pid_t ret = 0;
+ int stack = 0;
+
+ if (is_pid) {
+ stack = vma->vm_start <= vma->vm_mm->start_stack &&
+ vma->vm_end >= vma->vm_mm->start_stack;
+ } else {
+ struct inode *inode = priv->inode;
+ struct task_struct *task;
- rcu_read_lock();
- task = pid_task(proc_pid(inode), PIDTYPE_PID);
- if (task) {
- task = task_of_stack(task, vma, is_pid);
+ rcu_read_lock();
+ task = pid_task(proc_pid(inode), PIDTYPE_PID);
if (task)
- ret = task_pid_nr_ns(task, inode->i_sb->s_fs_info);
+ stack = vma_is_stack_for_task(vma, task);
+ rcu_read_unlock();
}
- rcu_read_unlock();
-
- return ret;
+ return stack;
}
static void
@@ -376,8 +382,6 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
name = arch_vma_name(vma);
if (!name) {
- pid_t tid;
-
if (!mm) {
name = "[vdso]";
goto done;
@@ -389,23 +393,10 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
goto done;
}
- tid = pid_of_stack(priv, vma, is_pid);
- if (tid != 0) {
- /*
- * Thread stack in /proc/PID/task/TID/maps or
- * the main process stack.
- */
- if (!is_pid || (vma->vm_start <= mm->start_stack &&
- vma->vm_end >= mm->start_stack)) {
- name = "[stack]";
- } else {
- /* Thread stack in /proc/PID/maps */
- seq_pad(m, ' ');
- seq_printf(m, "[stack:%d]", tid);
- }
+ if (is_stack(priv, vma, is_pid)) {
+ name = "[stack]";
goto done;
}
-
if (vma_get_anon_name(vma)) {
seq_pad(m, ' ');
seq_print_vma_name(m, vma);
@@ -1862,19 +1853,8 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
seq_file_path(m, file, "\n\t= ");
} else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
seq_puts(m, " heap");
- } else {
- pid_t tid = pid_of_stack(proc_priv, vma, is_pid);
- if (tid != 0) {
- /*
- * Thread stack in /proc/PID/task/TID/maps or
- * the main process stack.
- */
- if (!is_pid || (vma->vm_start <= mm->start_stack &&
- vma->vm_end >= mm->start_stack))
- seq_puts(m, " stack");
- else
- seq_printf(m, " stack:%d", tid);
- }
+ } else if (is_stack(proc_priv, vma, is_pid)) {
+ seq_puts(m, " stack");
}
if (is_vm_hugetlb_page(vma))
diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
index e0d64c92e4f6..faacb0c0d857 100644
--- a/fs/proc/task_nommu.c
+++ b/fs/proc/task_nommu.c
@@ -123,23 +123,26 @@ unsigned long task_statm(struct mm_struct *mm,
return size;
}
-static pid_t pid_of_stack(struct proc_maps_private *priv,
- struct vm_area_struct *vma, bool is_pid)
+static int is_stack(struct proc_maps_private *priv,
+ struct vm_area_struct *vma, int is_pid)
{
- struct inode *inode = priv->inode;
- struct task_struct *task;
- pid_t ret = 0;
-
- rcu_read_lock();
- task = pid_task(proc_pid(inode), PIDTYPE_PID);
- if (task) {
- task = task_of_stack(task, vma, is_pid);
+ struct mm_struct *mm = vma->vm_mm;
+ int stack = 0;
+
+ if (is_pid) {
+ stack = vma->vm_start <= mm->start_stack &&
+ vma->vm_end >= mm->start_stack;
+ } else {
+ struct inode *inode = priv->inode;
+ struct task_struct *task;
+
+ rcu_read_lock();
+ task = pid_task(proc_pid(inode), PIDTYPE_PID);
if (task)
- ret = task_pid_nr_ns(task, inode->i_sb->s_fs_info);
+ stack = vma_is_stack_for_task(vma, task);
+ rcu_read_unlock();
}
- rcu_read_unlock();
-
- return ret;
+ return stack;
}
/*
@@ -181,21 +184,9 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma,
if (file) {
seq_pad(m, ' ');
seq_file_path(m, file, "");
- } else if (mm) {
- pid_t tid = pid_of_stack(priv, vma, is_pid);
-
- if (tid != 0) {
- seq_pad(m, ' ');
- /*
- * Thread stack in /proc/PID/task/TID/maps or
- * the main process stack.
- */
- if (!is_pid || (vma->vm_start <= mm->start_stack &&
- vma->vm_end >= mm->start_stack))
- seq_printf(m, "[stack]");
- else
- seq_printf(m, "[stack:%d]", tid);
- }
+ } else if (mm && is_stack(priv, vma, is_pid)) {
+ seq_pad(m, ' ');
+ seq_printf(m, "[stack]");
}
seq_putc(m, '\n');
diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c
index 588461bb2dd4..40a0fe0a4e05 100644
--- a/fs/pstore/platform.c
+++ b/fs/pstore/platform.c
@@ -431,6 +431,40 @@ static int pstore_write_compat(enum pstore_type_id type,
size, psi);
}
+static int pstore_write_buf_user_compat(enum pstore_type_id type,
+ enum kmsg_dump_reason reason,
+ u64 *id, unsigned int part,
+ const char __user *buf,
+ bool compressed, size_t size,
+ struct pstore_info *psi)
+{
+ unsigned long flags = 0;
+ size_t i, bufsize = size;
+ long ret = 0;
+
+ if (unlikely(!access_ok(VERIFY_READ, buf, size)))
+ return -EFAULT;
+ if (bufsize > psinfo->bufsize)
+ bufsize = psinfo->bufsize;
+ spin_lock_irqsave(&psinfo->buf_lock, flags);
+ for (i = 0; i < size; ) {
+ size_t c = min(size - i, bufsize);
+
+ ret = __copy_from_user(psinfo->buf, buf + i, c);
+ if (unlikely(ret != 0)) {
+ ret = -EFAULT;
+ break;
+ }
+ ret = psi->write_buf(type, reason, id, part, psinfo->buf,
+ compressed, c, psi);
+ if (unlikely(ret < 0))
+ break;
+ i += c;
+ }
+ spin_unlock_irqrestore(&psinfo->buf_lock, flags);
+ return unlikely(ret < 0) ? ret : size;
+}
+
/*
* platform specific persistent storage driver registers with
* us here. If pstore is already mounted, call the platform
@@ -453,6 +487,8 @@ int pstore_register(struct pstore_info *psi)
if (!psi->write)
psi->write = pstore_write_compat;
+ if (!psi->write_buf_user)
+ psi->write_buf_user = pstore_write_buf_user_compat;
psinfo = psi;
mutex_init(&psinfo->read_mutex);
spin_unlock(&pstore_lock);
diff --git a/fs/pstore/pmsg.c b/fs/pstore/pmsg.c
index 7de20cd3797f..78f6176c020f 100644
--- a/fs/pstore/pmsg.c
+++ b/fs/pstore/pmsg.c
@@ -19,48 +19,25 @@
#include "internal.h"
static DEFINE_MUTEX(pmsg_lock);
-#define PMSG_MAX_BOUNCE_BUFFER_SIZE (2*PAGE_SIZE)
static ssize_t write_pmsg(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
- size_t i, buffer_size;
- char *buffer;
+ u64 id;
+ int ret;
if (!count)
return 0;
+ /* check outside lock, page in any data. write_buf_user also checks */
if (!access_ok(VERIFY_READ, buf, count))
return -EFAULT;
- buffer_size = count;
- if (buffer_size > PMSG_MAX_BOUNCE_BUFFER_SIZE)
- buffer_size = PMSG_MAX_BOUNCE_BUFFER_SIZE;
- buffer = vmalloc(buffer_size);
- if (!buffer)
- return -ENOMEM;
-
mutex_lock(&pmsg_lock);
- for (i = 0; i < count; ) {
- size_t c = min(count - i, buffer_size);
- u64 id;
- long ret;
-
- ret = __copy_from_user(buffer, buf + i, c);
- if (unlikely(ret != 0)) {
- mutex_unlock(&pmsg_lock);
- vfree(buffer);
- return -EFAULT;
- }
- psinfo->write_buf(PSTORE_TYPE_PMSG, 0, &id, 0, buffer, 0, c,
- psinfo);
-
- i += c;
- }
-
+ ret = psinfo->write_buf_user(PSTORE_TYPE_PMSG, 0, &id, 0, buf, 0, count,
+ psinfo);
mutex_unlock(&pmsg_lock);
- vfree(buffer);
- return count;
+ return ret ? ret : count;
}
static const struct file_operations pmsg_fops = {
diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c
index 414041342a99..5b10c2b4146c 100644
--- a/fs/pstore/ram.c
+++ b/fs/pstore/ram.c
@@ -331,6 +331,24 @@ static int notrace ramoops_pstore_write_buf(enum pstore_type_id type,
return 0;
}
+static int notrace ramoops_pstore_write_buf_user(enum pstore_type_id type,
+ enum kmsg_dump_reason reason,
+ u64 *id, unsigned int part,
+ const char __user *buf,
+ bool compressed, size_t size,
+ struct pstore_info *psi)
+{
+ if (type == PSTORE_TYPE_PMSG) {
+ struct ramoops_context *cxt = psi->data;
+
+ if (!cxt->mprz)
+ return -ENOMEM;
+ return persistent_ram_write_user(cxt->mprz, buf, size);
+ }
+
+ return -EINVAL;
+}
+
static int ramoops_pstore_erase(enum pstore_type_id type, u64 id, int count,
struct timespec time, struct pstore_info *psi)
{
@@ -369,6 +387,7 @@ static struct ramoops_context oops_cxt = {
.open = ramoops_pstore_open,
.read = ramoops_pstore_read,
.write_buf = ramoops_pstore_write_buf,
+ .write_buf_user = ramoops_pstore_write_buf_user,
.erase = ramoops_pstore_erase,
},
};
diff --git a/fs/pstore/ram_core.c b/fs/pstore/ram_core.c
index 76c3f80efdfa..aa9afe573155 100644
--- a/fs/pstore/ram_core.c
+++ b/fs/pstore/ram_core.c
@@ -17,15 +17,16 @@
#include <linux/device.h>
#include <linux/err.h>
#include <linux/errno.h>
-#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/io.h>
+#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/memblock.h>
+#include <linux/pstore_ram.h>
#include <linux/rslib.h>
#include <linux/slab.h>
+#include <linux/uaccess.h>
#include <linux/vmalloc.h>
-#include <linux/pstore_ram.h>
#include <asm/page.h>
struct persistent_ram_buffer {
@@ -303,6 +304,16 @@ static void notrace persistent_ram_update(struct persistent_ram_zone *prz,
persistent_ram_update_ecc(prz, start, count);
}
+static int notrace persistent_ram_update_user(struct persistent_ram_zone *prz,
+ const void __user *s, unsigned int start, unsigned int count)
+{
+ struct persistent_ram_buffer *buffer = prz->buffer;
+ int ret = unlikely(__copy_from_user(buffer->data + start, s, count)) ?
+ -EFAULT : 0;
+ persistent_ram_update_ecc(prz, start, count);
+ return ret;
+}
+
void persistent_ram_save_old(struct persistent_ram_zone *prz)
{
struct persistent_ram_buffer *buffer = prz->buffer;
@@ -356,6 +367,38 @@ int notrace persistent_ram_write(struct persistent_ram_zone *prz,
return count;
}
+int notrace persistent_ram_write_user(struct persistent_ram_zone *prz,
+ const void __user *s, unsigned int count)
+{
+ int rem, ret = 0, c = count;
+ size_t start;
+
+ if (unlikely(!access_ok(VERIFY_READ, s, count)))
+ return -EFAULT;
+ if (unlikely(c > prz->buffer_size)) {
+ s += c - prz->buffer_size;
+ c = prz->buffer_size;
+ }
+
+ buffer_size_add(prz, c);
+
+ start = buffer_start_add(prz, c);
+
+ rem = prz->buffer_size - start;
+ if (unlikely(rem < c)) {
+ ret = persistent_ram_update_user(prz, s, start, rem);
+ s += rem;
+ c -= rem;
+ start = 0;
+ }
+ if (likely(!ret))
+ ret = persistent_ram_update_user(prz, s, start, c);
+
+ persistent_ram_update_header_ecc(prz);
+
+ return unlikely(ret) ? ret : count;
+}
+
size_t persistent_ram_old_size(struct persistent_ram_zone *prz)
{
return prz->old_log_size;
diff --git a/fs/sdcardfs/derived_perm.c b/fs/sdcardfs/derived_perm.c
index 128b3e56851f..41e0e11b3c35 100644
--- a/fs/sdcardfs/derived_perm.c
+++ b/fs/sdcardfs/derived_perm.c
@@ -112,7 +112,7 @@ void get_derived_permission(struct dentry *parent, struct dentry *dentry)
void get_derive_permissions_recursive(struct dentry *parent) {
struct dentry *dentry;
list_for_each_entry(dentry, &parent->d_subdirs, d_child) {
- if (dentry && dentry->d_inode) {
+ if (dentry->d_inode) {
mutex_lock(&dentry->d_inode->i_mutex);
get_derived_permission(parent, dentry);
fix_derived_permission(dentry->d_inode);
diff --git a/fs/seq_file.c b/fs/seq_file.c
index e85664b7c7d9..d672e2fec459 100644
--- a/fs/seq_file.c
+++ b/fs/seq_file.c
@@ -222,8 +222,10 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
size -= n;
buf += n;
copied += n;
- if (!m->count)
+ if (!m->count) {
+ m->from = 0;
m->index++;
+ }
if (!size)
goto Done;
}
diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
index f35523d4fa3a..b803213d1307 100644
--- a/fs/sysfs/file.c
+++ b/fs/sysfs/file.c
@@ -114,9 +114,15 @@ static ssize_t sysfs_kf_read(struct kernfs_open_file *of, char *buf,
* If buf != of->prealloc_buf, we don't know how
* large it is, so cannot safely pass it to ->show
*/
- if (pos || WARN_ON_ONCE(buf != of->prealloc_buf))
+ if (WARN_ON_ONCE(buf != of->prealloc_buf))
return 0;
len = ops->show(kobj, of->kn->priv, buf);
+ if (pos) {
+ if (len <= pos)
+ return 0;
+ len -= pos;
+ memmove(buf, buf + pos, len);
+ }
return min(count, len);
}
diff --git a/fs/ubifs/tnc_commit.c b/fs/ubifs/tnc_commit.c
index b45345d701e7..51157da3f76e 100644
--- a/fs/ubifs/tnc_commit.c
+++ b/fs/ubifs/tnc_commit.c
@@ -370,7 +370,7 @@ static int layout_in_gaps(struct ubifs_info *c, int cnt)
p = c->gap_lebs;
do {
- ubifs_assert(p < c->gap_lebs + sizeof(int) * c->lst.idx_lebs);
+ ubifs_assert(p < c->gap_lebs + c->lst.idx_lebs);
written = layout_leb_in_gaps(c, p);
if (written < 0) {
err = written;
diff --git a/fs/xfs/libxfs/xfs_sb.c b/fs/xfs/libxfs/xfs_sb.c
index 8a53eaa349f4..7088be6afb3c 100644
--- a/fs/xfs/libxfs/xfs_sb.c
+++ b/fs/xfs/libxfs/xfs_sb.c
@@ -581,7 +581,8 @@ xfs_sb_verify(
* Only check the in progress field for the primary superblock as
* mkfs.xfs doesn't clear it from secondary superblocks.
*/
- return xfs_mount_validate_sb(mp, &sb, bp->b_bn == XFS_SB_DADDR,
+ return xfs_mount_validate_sb(mp, &sb,
+ bp->b_maps[0].bm_bn == XFS_SB_DADDR,
check_version);
}
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 8f5a12ab2f2b..f608dd5e2374 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -530,15 +530,19 @@
#define INIT_TEXT \
*(.init.text) \
+ *(.text.startup) \
MEM_DISCARD(init.text)
#define EXIT_DATA \
*(.exit.data) \
+ *(.fini_array) \
+ *(.dtors) \
MEM_DISCARD(exit.data) \
MEM_DISCARD(exit.rodata)
#define EXIT_TEXT \
*(.exit.text) \
+ *(.text.exit) \
MEM_DISCARD(exit.text)
#define EXIT_CALL \
diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
index 17c445612e01..2cdc723d750f 100644
--- a/include/drm/i915_pciids.h
+++ b/include/drm/i915_pciids.h
@@ -277,7 +277,9 @@
INTEL_VGA_DEVICE(0x191D, info) /* WKS GT2 */
#define INTEL_SKL_GT3_IDS(info) \
+ INTEL_VGA_DEVICE(0x1923, info), /* ULT GT3 */ \
INTEL_VGA_DEVICE(0x1926, info), /* ULT GT3 */ \
+ INTEL_VGA_DEVICE(0x1927, info), /* ULT GT3 */ \
INTEL_VGA_DEVICE(0x192B, info), /* Halo GT3 */ \
INTEL_VGA_DEVICE(0x192A, info) /* SRV GT3 */ \
@@ -289,6 +291,8 @@
#define INTEL_BXT_IDS(info) \
INTEL_VGA_DEVICE(0x0A84, info), \
INTEL_VGA_DEVICE(0x1A84, info), \
- INTEL_VGA_DEVICE(0x5A84, info)
+ INTEL_VGA_DEVICE(0x1A85, info), \
+ INTEL_VGA_DEVICE(0x5A84, info), /* APL HD Graphics 505 */ \
+ INTEL_VGA_DEVICE(0x5A85, info) /* APL HD Graphics 500 */
#endif /* _I915_PCIIDS_H */
diff --git a/include/dt-bindings/msm/msm-bus-ids.h b/include/dt-bindings/msm/msm-bus-ids.h
index bfd774a99963..a75d304473d5 100644
--- a/include/dt-bindings/msm/msm-bus-ids.h
+++ b/include/dt-bindings/msm/msm-bus-ids.h
@@ -164,7 +164,8 @@
#define MSM_BUS_MASTER_MSS_CE 120
#define MSM_BUS_MASTER_CDSP_PROC 121
#define MSM_BUS_MASTER_GNOC_SNOC 122
-#define MSM_BUS_MASTER_MASTER_LAST 123
+#define MSM_BUS_MASTER_PIMEM 123
+#define MSM_BUS_MASTER_MASTER_LAST 124
#define MSM_BUS_SYSTEM_FPB_MASTER_SYSTEM MSM_BUS_SYSTEM_MASTER_SYSTEM_FPB
#define MSM_BUS_CPSS_FPB_MASTER_SYSTEM MSM_BUS_SYSTEM_MASTER_CPSS_FPB
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 1991aea2ec4c..3672893b275e 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -920,7 +920,7 @@ static inline struct fwnode_handle *acpi_get_next_subnode(struct device *dev,
return NULL;
}
-#define ACPI_DECLARE_PROBE_ENTRY(table, name, table_id, subtable, validate, data, fn) \
+#define ACPI_DECLARE_PROBE_ENTRY(table, name, table_id, subtable, valid, data, fn) \
static const void * __acpi_table_##name[] \
__attribute__((unused)) \
= { (void *) table_id, \
diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h
index 1b4d69f68c33..140c29635069 100644
--- a/include/linux/backing-dev-defs.h
+++ b/include/linux/backing-dev-defs.h
@@ -163,6 +163,7 @@ struct backing_dev_info {
wait_queue_head_t wb_waitq;
struct device *dev;
+ struct device *owner;
struct timer_list laptop_mode_wb_timer;
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index c82794f20110..89d3de3e096b 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -24,6 +24,7 @@ __printf(3, 4)
int bdi_register(struct backing_dev_info *bdi, struct device *parent,
const char *fmt, ...);
int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev);
+int bdi_register_owner(struct backing_dev_info *bdi, struct device *owner);
void bdi_unregister(struct backing_dev_info *bdi);
int __must_check bdi_setup_and_register(struct backing_dev_info *, char *);
diff --git a/include/linux/bcma/bcma.h b/include/linux/bcma/bcma.h
index 3feb1b2d75d8..14cd6f77e284 100644
--- a/include/linux/bcma/bcma.h
+++ b/include/linux/bcma/bcma.h
@@ -156,6 +156,7 @@ struct bcma_host_ops {
#define BCMA_CORE_DEFAULT 0xFFF
#define BCMA_MAX_NR_CORES 16
+#define BCMA_CORE_SIZE 0x1000
/* Chip IDs of PCIe devices */
#define BCMA_CHIP_ID_BCM4313 0x4313
diff --git a/include/linux/bio.h b/include/linux/bio.h
index fbe47bc700bd..42e4e3cbb001 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -527,11 +527,14 @@ extern unsigned int bvec_nr_vecs(unsigned short idx);
int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css);
int bio_associate_current(struct bio *bio);
void bio_disassociate_task(struct bio *bio);
+void bio_clone_blkcg_association(struct bio *dst, struct bio *src);
#else /* CONFIG_BLK_CGROUP */
static inline int bio_associate_blkcg(struct bio *bio,
struct cgroup_subsys_state *blkcg_css) { return 0; }
static inline int bio_associate_current(struct bio *bio) { return -ENOENT; }
static inline void bio_disassociate_task(struct bio *bio) { }
+static inline void bio_clone_blkcg_association(struct bio *dst,
+ struct bio *src) { }
#endif /* CONFIG_BLK_CGROUP */
#ifdef CONFIG_HIGHMEM
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 84af69b95026..ae64a897622c 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -197,6 +197,9 @@ struct request {
/* for bidi */
struct request *next_rq;
+
+ ktime_t lat_hist_io_start;
+ int lat_hist_enabled;
};
static inline unsigned short req_get_ioprio(struct request *req)
@@ -893,7 +896,7 @@ static inline unsigned int blk_rq_get_max_sectors(struct request *rq)
{
struct request_queue *q = rq->q;
- if (unlikely(rq->cmd_type == REQ_TYPE_BLOCK_PC))
+ if (unlikely(rq->cmd_type != REQ_TYPE_FS))
return q->limits.max_hw_sectors;
if (!q->limits.chunk_sectors || (rq->cmd_flags & REQ_DISCARD))
@@ -1662,6 +1665,79 @@ extern int bdev_write_page(struct block_device *, sector_t, struct page *,
struct writeback_control *);
extern long bdev_direct_access(struct block_device *, sector_t,
void __pmem **addr, unsigned long *pfn, long size);
+
+/*
+ * X-axis for IO latency histogram support.
+ */
+static const u_int64_t latency_x_axis_us[] = {
+ 100,
+ 200,
+ 300,
+ 400,
+ 500,
+ 600,
+ 700,
+ 800,
+ 900,
+ 1000,
+ 1200,
+ 1400,
+ 1600,
+ 1800,
+ 2000,
+ 2500,
+ 3000,
+ 4000,
+ 5000,
+ 6000,
+ 7000,
+ 9000,
+ 10000
+};
+
+#define BLK_IO_LAT_HIST_DISABLE 0
+#define BLK_IO_LAT_HIST_ENABLE 1
+#define BLK_IO_LAT_HIST_ZERO 2
+
+struct io_latency_state {
+ u_int64_t latency_y_axis_read[ARRAY_SIZE(latency_x_axis_us) + 1];
+ u_int64_t latency_reads_elems;
+ u_int64_t latency_y_axis_write[ARRAY_SIZE(latency_x_axis_us) + 1];
+ u_int64_t latency_writes_elems;
+};
+
+static inline void
+blk_update_latency_hist(struct io_latency_state *s,
+ int read,
+ u_int64_t delta_us)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(latency_x_axis_us); i++) {
+ if (delta_us < (u_int64_t)latency_x_axis_us[i]) {
+ if (read)
+ s->latency_y_axis_read[i]++;
+ else
+ s->latency_y_axis_write[i]++;
+ break;
+ }
+ }
+ if (i == ARRAY_SIZE(latency_x_axis_us)) {
+ /* Overflowed the histogram */
+ if (read)
+ s->latency_y_axis_read[i]++;
+ else
+ s->latency_y_axis_write[i]++;
+ }
+ if (read)
+ s->latency_reads_elems++;
+ else
+ s->latency_writes_elems++;
+}
+
+void blk_zero_latency_hist(struct io_latency_state *s);
+ssize_t blk_latency_hist_show(struct io_latency_state *s, char *buf);
+
#else /* CONFIG_BLOCK */
struct block_device;
diff --git a/include/linux/capability.h b/include/linux/capability.h
index af9f0b9e80e6..5f8249d378a2 100644
--- a/include/linux/capability.h
+++ b/include/linux/capability.h
@@ -214,6 +214,7 @@ extern bool has_ns_capability_noaudit(struct task_struct *t,
struct user_namespace *ns, int cap);
extern bool capable(int cap);
extern bool ns_capable(struct user_namespace *ns, int cap);
+extern bool ns_capable_noaudit(struct user_namespace *ns, int cap);
#else
static inline bool has_capability(struct task_struct *t, int cap)
{
@@ -241,6 +242,10 @@ static inline bool ns_capable(struct user_namespace *ns, int cap)
{
return true;
}
+static inline bool ns_capable_noaudit(struct user_namespace *ns, int cap)
+{
+ return true;
+}
#endif /* CONFIG_MULTIUSER */
extern bool capable_wrt_inode_uidgid(const struct inode *inode, int cap);
extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap);
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index af746a212e88..fe865e627528 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -160,6 +160,7 @@ u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy);
int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu);
int cpufreq_update_policy(unsigned int cpu);
bool have_governor_per_policy(void);
+bool cpufreq_driver_is_slow(void);
struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy);
#else
static inline unsigned int cpufreq_get(unsigned int cpu)
@@ -317,6 +318,14 @@ struct cpufreq_driver {
*/
#define CPUFREQ_NEED_INITIAL_FREQ_CHECK (1 << 5)
+/*
+ * Indicates that it is safe to call cpufreq_driver_target from
+ * non-interruptable context in scheduler hot paths. Drivers must
+ * opt-in to this flag, as the safe default is that they might sleep
+ * or be too slow for hot path use.
+ */
+#define CPUFREQ_DRIVER_FAST (1 << 6)
+
int cpufreq_register_driver(struct cpufreq_driver *driver_data);
int cpufreq_unregister_driver(struct cpufreq_driver *driver_data);
@@ -504,6 +513,9 @@ extern struct cpufreq_governor cpufreq_gov_conservative;
#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE)
extern struct cpufreq_governor cpufreq_gov_interactive;
#define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_interactive)
+#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_SCHED)
+extern struct cpufreq_governor cpufreq_gov_sched;
+#define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_sched)
#endif
/*********************************************************************
@@ -633,4 +645,8 @@ unsigned int cpufreq_generic_get(unsigned int cpu);
int cpufreq_generic_init(struct cpufreq_policy *policy,
struct cpufreq_frequency_table *table,
unsigned int transition_latency);
+
+struct sched_domain;
+unsigned long cpufreq_scale_freq_capacity(struct sched_domain *sd, int cpu);
+unsigned long cpufreq_scale_max_freq_capacity(int cpu);
#endif /* _LINUX_CPUFREQ_H */
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index 786ad32631a6..6eae1576499e 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -204,7 +204,7 @@ static inline int cpuidle_enter_freeze(struct cpuidle_driver *drv,
#endif
/* kernel/sched/idle.c */
-extern void sched_idle_set_state(struct cpuidle_state *idle_state);
+extern void sched_idle_set_state(struct cpuidle_state *idle_state, int index);
extern void default_idle_call(void);
#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
diff --git a/include/linux/devpts_fs.h b/include/linux/devpts_fs.h
index e0ee0b3000b2..358a4db72a27 100644
--- a/include/linux/devpts_fs.h
+++ b/include/linux/devpts_fs.h
@@ -15,38 +15,24 @@
#include <linux/errno.h>
+struct pts_fs_info;
+
#ifdef CONFIG_UNIX98_PTYS
-int devpts_new_index(struct inode *ptmx_inode);
-void devpts_kill_index(struct inode *ptmx_inode, int idx);
-void devpts_add_ref(struct inode *ptmx_inode);
-void devpts_del_ref(struct inode *ptmx_inode);
+/* Look up a pts fs info and get a ref to it */
+struct pts_fs_info *devpts_get_ref(struct inode *, struct file *);
+void devpts_put_ref(struct pts_fs_info *);
+
+int devpts_new_index(struct pts_fs_info *);
+void devpts_kill_index(struct pts_fs_info *, int);
+
/* mknod in devpts */
-struct inode *devpts_pty_new(struct inode *ptmx_inode, dev_t device, int index,
- void *priv);
+struct inode *devpts_pty_new(struct pts_fs_info *, dev_t, int, void *);
/* get private structure */
void *devpts_get_priv(struct inode *pts_inode);
/* unlink */
void devpts_pty_kill(struct inode *inode);
-#else
-
-/* Dummy stubs in the no-pty case */
-static inline int devpts_new_index(struct inode *ptmx_inode) { return -EINVAL; }
-static inline void devpts_kill_index(struct inode *ptmx_inode, int idx) { }
-static inline void devpts_add_ref(struct inode *ptmx_inode) { }
-static inline void devpts_del_ref(struct inode *ptmx_inode) { }
-static inline struct inode *devpts_pty_new(struct inode *ptmx_inode,
- dev_t device, int index, void *priv)
-{
- return ERR_PTR(-EINVAL);
-}
-static inline void *devpts_get_priv(struct inode *pts_inode)
-{
- return NULL;
-}
-static inline void devpts_pty_kill(struct inode *inode) { }
-
#endif
diff --git a/include/linux/fs.h b/include/linux/fs.h
index cc2796b2486f..94edbb64f1c6 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -717,6 +717,31 @@ enum inode_i_mutex_lock_class
I_MUTEX_PARENT2,
};
+static inline void inode_lock(struct inode *inode)
+{
+ mutex_lock(&inode->i_mutex);
+}
+
+static inline void inode_unlock(struct inode *inode)
+{
+ mutex_unlock(&inode->i_mutex);
+}
+
+static inline int inode_trylock(struct inode *inode)
+{
+ return mutex_trylock(&inode->i_mutex);
+}
+
+static inline int inode_is_locked(struct inode *inode)
+{
+ return mutex_is_locked(&inode->i_mutex);
+}
+
+static inline void inode_lock_nested(struct inode *inode, unsigned subclass)
+{
+ mutex_lock_nested(&inode->i_mutex, subclass);
+}
+
void lock_two_nondirectories(struct inode *, struct inode*);
void unlock_two_nondirectories(struct inode *, struct inode*);
@@ -3038,8 +3063,8 @@ static inline bool dir_emit_dots(struct file *file, struct dir_context *ctx)
}
static inline bool dir_relax(struct inode *inode)
{
- mutex_unlock(&inode->i_mutex);
- mutex_lock(&inode->i_mutex);
+ inode_unlock(inode);
+ inode_lock(inode);
return !IS_DEADDIR(inode);
}
diff --git a/include/linux/i8042.h b/include/linux/i8042.h
index 0f9bafa17a02..d98780ca9604 100644
--- a/include/linux/i8042.h
+++ b/include/linux/i8042.h
@@ -62,7 +62,6 @@ struct serio;
void i8042_lock_chip(void);
void i8042_unlock_chip(void);
int i8042_command(unsigned char *param, int command);
-bool i8042_check_port_owner(const struct serio *);
int i8042_install_filter(bool (*filter)(unsigned char data, unsigned char str,
struct serio *serio));
int i8042_remove_filter(bool (*filter)(unsigned char data, unsigned char str,
@@ -83,11 +82,6 @@ static inline int i8042_command(unsigned char *param, int command)
return -ENODEV;
}
-static inline bool i8042_check_port_owner(const struct serio *serio)
-{
- return false;
-}
-
static inline int i8042_install_filter(bool (*filter)(unsigned char data, unsigned char str,
struct serio *serio))
{
diff --git a/include/linux/ipa.h b/include/linux/ipa.h
index c3ffe51d8069..d545604cc22d 100644
--- a/include/linux/ipa.h
+++ b/include/linux/ipa.h
@@ -1066,6 +1066,12 @@ struct ipa_wdi_in_params {
#endif
};
+enum ipa_upstream_type {
+ IPA_UPSTEAM_MODEM = 1,
+ IPA_UPSTEAM_WLAN,
+ IPA_UPSTEAM_MAX
+};
+
/**
* struct ipa_wdi_out_params - information provided to WDI client
* @uc_door_bell_pa: physical address of IPA uc doorbell
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 2955e672391d..924853d33a13 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -830,8 +830,4 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
/* OTHER_WRITABLE? Generally considered a bad idea. */ \
BUILD_BUG_ON_ZERO((perms) & 2) + \
(perms))
-
-/* To identify board information in panic logs, set this */
-extern char *mach_panic_string;
-
#endif
diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
index 034117b3be5f..f09648d14694 100644
--- a/include/linux/lightnvm.h
+++ b/include/linux/lightnvm.h
@@ -58,8 +58,9 @@ enum {
/* Block Types */
NVM_BLK_T_FREE = 0x0,
NVM_BLK_T_BAD = 0x1,
- NVM_BLK_T_DEV = 0x2,
- NVM_BLK_T_HOST = 0x4,
+ NVM_BLK_T_GRWN_BAD = 0x2,
+ NVM_BLK_T_DEV = 0x4,
+ NVM_BLK_T_HOST = 0x8,
};
struct nvm_id_group {
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index cd0e2413c358..435fd8426b8a 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -174,6 +174,11 @@ struct mem_cgroup_thresholds {
struct mem_cgroup_threshold_ary *spare;
};
+struct mem_cgroup_id {
+ int id;
+ atomic_t ref;
+};
+
/*
* The memory controller data structure. The memory controller controls both
* page cache and RSS per cgroup. We would eventually like to provide
@@ -183,6 +188,9 @@ struct mem_cgroup_thresholds {
struct mem_cgroup {
struct cgroup_subsys_state css;
+ /* Private memcg ID. Used to ID objects that outlive the cgroup */
+ struct mem_cgroup_id id;
+
/* Accounted resources */
struct page_counter memory;
struct page_counter memsw;
diff --git a/include/linux/memory-state-time.h b/include/linux/memory-state-time.h
new file mode 100644
index 000000000000..d2212b027866
--- /dev/null
+++ b/include/linux/memory-state-time.h
@@ -0,0 +1,42 @@
+/* include/linux/memory-state-time.h
+ *
+ * Copyright (C) 2016 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/workqueue.h>
+
+#define UPDATE_MEMORY_STATE(BLOCK, VALUE) BLOCK->update_call(BLOCK, VALUE)
+
+struct memory_state_update_block;
+
+typedef void (*memory_state_update_fn_t)(struct memory_state_update_block *ub,
+ int value);
+
+/* This struct is populated when you pass it to a memory_state_register*
+ * function. The update_call function is used for an update and defined in the
+ * typedef memory_state_update_fn_t
+ */
+struct memory_state_update_block {
+ memory_state_update_fn_t update_call;
+ int id;
+};
+
+/* Register a frequency struct memory_state_update_block to provide updates to
+ * memory_state_time about frequency changes using its update_call function.
+ */
+struct memory_state_update_block *memory_state_register_frequency_source(void);
+
+/* Register a bandwidth struct memory_state_update_block to provide updates to
+ * memory_state_time about bandwidth changes using its update_call function.
+ */
+struct memory_state_update_block *memory_state_register_bandwidth_source(void);
diff --git a/include/linux/mfd/cros_ec.h b/include/linux/mfd/cros_ec.h
index 494682ce4bf3..3ab3cede28ea 100644
--- a/include/linux/mfd/cros_ec.h
+++ b/include/linux/mfd/cros_ec.h
@@ -224,6 +224,21 @@ int cros_ec_cmd_xfer(struct cros_ec_device *ec_dev,
struct cros_ec_command *msg);
/**
+ * cros_ec_cmd_xfer_status - Send a command to the ChromeOS EC
+ *
+ * This function is identical to cros_ec_cmd_xfer, except it returns success
+ * status only if both the command was transmitted successfully and the EC
+ * replied with success status. It's not necessary to check msg->result when
+ * using this function.
+ *
+ * @ec_dev: EC device
+ * @msg: Message to write
+ * @return: Num. of bytes transferred on success, <0 on failure
+ */
+int cros_ec_cmd_xfer_status(struct cros_ec_device *ec_dev,
+ struct cros_ec_command *msg);
+
+/**
* cros_ec_remove - Remove a ChromeOS EC
*
* Call this to deregister a ChromeOS EC, then clean up any private data.
diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h
index f079fb1a31f7..a8786d27ab81 100644
--- a/include/linux/mlx5/qp.h
+++ b/include/linux/mlx5/qp.h
@@ -160,6 +160,7 @@ enum {
enum {
MLX5_FENCE_MODE_NONE = 0 << 5,
MLX5_FENCE_MODE_INITIATOR_SMALL = 1 << 5,
+ MLX5_FENCE_MODE_FENCE = 2 << 5,
MLX5_FENCE_MODE_STRONG_ORDERING = 3 << 5,
MLX5_FENCE_MODE_SMALL_AND_FENCE = 4 << 5,
};
@@ -534,9 +535,9 @@ struct mlx5_destroy_qp_mbox_out {
struct mlx5_modify_qp_mbox_in {
struct mlx5_inbox_hdr hdr;
__be32 qpn;
- u8 rsvd1[4];
- __be32 optparam;
u8 rsvd0[4];
+ __be32 optparam;
+ u8 rsvd1[4];
struct mlx5_qp_context ctx;
};
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 0c4178e5b656..97b11c9fd48a 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1324,8 +1324,7 @@ static inline int stack_guard_page_end(struct vm_area_struct *vma,
!vma_growsup(vma->vm_next, addr);
}
-extern struct task_struct *task_of_stack(struct task_struct *task,
- struct vm_area_struct *vma, bool in_group);
+int vma_is_stack_for_task(struct vm_area_struct *vma, struct task_struct *t);
extern unsigned long move_page_tables(struct vm_area_struct *vma,
unsigned long old_addr, struct vm_area_struct *new_vma,
diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h
index 89efaa67cc59..a5a3bb286361 100644
--- a/include/linux/mmc/core.h
+++ b/include/linux/mmc/core.h
@@ -110,6 +110,8 @@ struct mmc_request {
struct mmc_host *host;
struct mmc_cmdq_req *cmdq_req;
struct request *req;
+ ktime_t io_start;
+ int lat_hist_enabled;
};
struct mmc_bus_ops {
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 5cfed430b8d4..276dbf19805b 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -17,6 +17,7 @@
#include <linux/device.h>
#include <linux/devfreq.h>
#include <linux/fault-inject.h>
+#include <linux/blkdev.h>
#include <linux/mmc/core.h>
#include <linux/mmc/card.h>
@@ -553,7 +554,6 @@ struct mmc_host {
int num_funcs;
} embedded_sdio_data;
#endif
-
/*
* Set to 1 to just stop the SDCLK to the card without
* actually disabling the clock from it's source.
@@ -585,6 +585,11 @@ struct mmc_host {
*/
void *cmdq_private;
struct mmc_request *err_mrq;
+#ifdef CONFIG_BLOCK
+ int latency_hist_enabled;
+ struct io_latency_state io_lat_s;
+#endif
+
unsigned long private[0] ____cacheline_aligned;
};
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index ad4c3f186f61..ddb3b927de39 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -75,10 +75,12 @@ extern int *get_migratetype_fallbacks(int mtype);
bool is_cma_pageblock(struct page *page);
# define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
# define get_cma_migrate_type() MIGRATE_CMA
+# define is_migrate_cma_page(_page) (get_pageblock_migratetype(_page) == MIGRATE_CMA)
#else
# define is_cma_pageblock(page) false
# define is_migrate_cma(migratetype) false
# define get_cma_migrate_type() MIGRATE_MOVABLE
+# define is_migrate_cma_page(_page) false
#endif
#define for_each_migratetype_order(order, type) \
diff --git a/include/linux/msi.h b/include/linux/msi.h
index f71a25e5fd25..f0f43ec45ee7 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -254,12 +254,12 @@ enum {
* callbacks.
*/
MSI_FLAG_USE_DEF_CHIP_OPS = (1 << 1),
- /* Build identity map between hwirq and irq */
- MSI_FLAG_IDENTITY_MAP = (1 << 2),
/* Support multiple PCI MSI interrupts */
- MSI_FLAG_MULTI_PCI_MSI = (1 << 3),
+ MSI_FLAG_MULTI_PCI_MSI = (1 << 2),
/* Support PCI MSIX interrupts */
- MSI_FLAG_PCI_MSIX = (1 << 4),
+ MSI_FLAG_PCI_MSIX = (1 << 3),
+ /* Needs early activate, required for PCI */
+ MSI_FLAG_ACTIVATE_EARLY = (1 << 4),
};
int msi_domain_set_affinity(struct irq_data *data, const struct cpumask *mask,
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index d9ba49cedc5d..37f05cb1dfd6 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2495,6 +2495,13 @@
#define PCI_DEVICE_ID_KORENIX_JETCARDF2 0x1700
#define PCI_DEVICE_ID_KORENIX_JETCARDF3 0x17ff
+#define PCI_VENDOR_ID_NETRONOME 0x19ee
+#define PCI_DEVICE_ID_NETRONOME_NFP3200 0x3200
+#define PCI_DEVICE_ID_NETRONOME_NFP3240 0x3240
+#define PCI_DEVICE_ID_NETRONOME_NFP4000 0x4000
+#define PCI_DEVICE_ID_NETRONOME_NFP6000 0x6000
+#define PCI_DEVICE_ID_NETRONOME_NFP6000_VF 0x6003
+
#define PCI_VENDOR_ID_QMI 0x1a32
#define PCI_VENDOR_ID_AZWAVE 0x1a3b
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 12d3415a3ef5..490ff31d1d88 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -121,6 +121,7 @@ struct hw_perf_event {
struct { /* intel_cqm */
int cqm_state;
u32 cqm_rmid;
+ int is_group_event;
struct list_head cqm_events_entry;
struct list_head cqm_groups_entry;
struct list_head cqm_group_entry;
diff --git a/include/linux/pstore.h b/include/linux/pstore.h
index 831479f8df8f..5cae2c6c90ad 100644
--- a/include/linux/pstore.h
+++ b/include/linux/pstore.h
@@ -22,12 +22,13 @@
#ifndef _LINUX_PSTORE_H
#define _LINUX_PSTORE_H
-#include <linux/time.h>
+#include <linux/compiler.h>
+#include <linux/errno.h>
#include <linux/kmsg_dump.h>
#include <linux/mutex.h>
-#include <linux/types.h>
#include <linux/spinlock.h>
-#include <linux/errno.h>
+#include <linux/time.h>
+#include <linux/types.h>
/* types */
enum pstore_type_id {
@@ -67,6 +68,10 @@ struct pstore_info {
enum kmsg_dump_reason reason, u64 *id,
unsigned int part, const char *buf, bool compressed,
size_t size, struct pstore_info *psi);
+ int (*write_buf_user)(enum pstore_type_id type,
+ enum kmsg_dump_reason reason, u64 *id,
+ unsigned int part, const char __user *buf,
+ bool compressed, size_t size, struct pstore_info *psi);
int (*erase)(enum pstore_type_id type, u64 id,
int count, struct timespec time,
struct pstore_info *psi);
diff --git a/include/linux/pstore_ram.h b/include/linux/pstore_ram.h
index 712757f320a4..45ac5a0d29ee 100644
--- a/include/linux/pstore_ram.h
+++ b/include/linux/pstore_ram.h
@@ -17,11 +17,12 @@
#ifndef __LINUX_PSTORE_RAM_H__
#define __LINUX_PSTORE_RAM_H__
+#include <linux/compiler.h>
#include <linux/device.h>
+#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/types.h>
-#include <linux/init.h>
struct persistent_ram_buffer;
struct rs_control;
@@ -59,7 +60,9 @@ void persistent_ram_free(struct persistent_ram_zone *prz);
void persistent_ram_zap(struct persistent_ram_zone *prz);
int persistent_ram_write(struct persistent_ram_zone *prz, const void *s,
- unsigned int count);
+ unsigned int count);
+int persistent_ram_write_user(struct persistent_ram_zone *prz,
+ const void __user *s, unsigned int count);
void persistent_ram_save_old(struct persistent_ram_zone *prz);
size_t persistent_ram_old_size(struct persistent_ram_zone *prz);
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 9c3be2d56ac5..36007d90a678 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -173,6 +173,9 @@ extern bool single_task_running(void);
extern unsigned long nr_iowait(void);
extern unsigned long nr_iowait_cpu(int cpu);
extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load);
+#ifdef CONFIG_CPU_QUIET
+extern u64 nr_running_integral(unsigned int cpu);
+#endif
extern void sched_update_nr_prod(int cpu, long delta, bool inc);
extern void sched_get_nr_running_avg(int *avg, int *iowait_avg, int *big_avg);
@@ -1007,6 +1010,14 @@ enum cpu_idle_type {
#define SCHED_CAPACITY_SHIFT 10
#define SCHED_CAPACITY_SCALE (1L << SCHED_CAPACITY_SHIFT)
+struct sched_capacity_reqs {
+ unsigned long cfs;
+ unsigned long rt;
+ unsigned long dl;
+
+ unsigned long total;
+};
+
/*
* Wake-queues are lists of tasks with a pending wakeup, whose
* callers have already marked the task as woken internally,
@@ -1069,6 +1080,7 @@ extern void wake_up_q(struct wake_q_head *head);
#define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */
#define SD_OVERLAP 0x2000 /* sched_domains of this level overlap */
#define SD_NUMA 0x4000 /* cross-node balancing */
+#define SD_SHARE_CAP_STATES 0x8000 /* Domain members share capacity state */
#ifdef CONFIG_SCHED_SMT
static inline int cpu_smt_flags(void)
@@ -1101,6 +1113,24 @@ struct sched_domain_attr {
extern int sched_domain_level_max;
+struct capacity_state {
+ unsigned long cap; /* compute capacity */
+ unsigned long power; /* power consumption at this compute capacity */
+};
+
+struct idle_state {
+ unsigned long power; /* power consumption in this idle state */
+};
+
+struct sched_group_energy {
+ unsigned int nr_idle_states; /* number of idle states */
+ struct idle_state *idle_states; /* ptr to idle state array */
+ unsigned int nr_cap_states; /* number of capacity states */
+ struct capacity_state *cap_states; /* ptr to capacity state array */
+};
+
+unsigned long capacity_curr_of(int cpu);
+
struct sched_group;
struct sched_domain {
@@ -1199,6 +1229,8 @@ bool cpus_share_cache(int this_cpu, int that_cpu);
typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
typedef int (*sched_domain_flags_f)(void);
+typedef
+const struct sched_group_energy * const(*sched_domain_energy_f)(int cpu);
#define SDTL_OVERLAP 0x01
@@ -1211,6 +1243,7 @@ struct sd_data {
struct sched_domain_topology_level {
sched_domain_mask_f mask;
sched_domain_flags_f sd_flags;
+ sched_domain_energy_f energy;
int flags;
int numa_level;
struct sd_data data;
@@ -1362,6 +1395,7 @@ struct ravg {
u32 sum_history[RAVG_HIST_SIZE_MAX];
u32 *curr_window_cpu, *prev_window_cpu;
u32 curr_window, prev_window;
+ u64 curr_burst, avg_burst, avg_sleep_time;
u16 active_windows;
u32 pred_demand;
u8 busy_buckets[NUM_BUSY_BUCKETS];
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
index 5d0899df64ff..0538de6dfb6f 100644
--- a/include/linux/sched/sysctl.h
+++ b/include/linux/sched/sysctl.h
@@ -40,6 +40,10 @@ extern unsigned int sysctl_sched_min_granularity;
extern unsigned int sysctl_sched_wakeup_granularity;
extern unsigned int sysctl_sched_child_runs_first;
extern unsigned int sysctl_sched_wake_to_idle;
+extern unsigned int sysctl_sched_is_big_little;
+extern unsigned int sysctl_sched_sync_hint_enable;
+extern unsigned int sysctl_sched_initial_task_util;
+extern unsigned int sysctl_sched_cstate_aware;
#ifdef CONFIG_SCHED_HMP
extern int sysctl_sched_freq_inc_notify;
@@ -68,6 +72,8 @@ extern unsigned int sysctl_sched_freq_aggregate;
extern unsigned int sysctl_sched_enable_thread_grouping;
extern unsigned int sysctl_sched_freq_aggregate_threshold_pct;
extern unsigned int sysctl_sched_prefer_sync_wakee_to_waker;
+extern unsigned int sysctl_sched_short_burst;
+extern unsigned int sysctl_sched_short_sleep;
#else /* CONFIG_SCHED_HMP */
diff --git a/include/linux/sched_energy.h b/include/linux/sched_energy.h
new file mode 100644
index 000000000000..a1057e481eff
--- /dev/null
+++ b/include/linux/sched_energy.h
@@ -0,0 +1,46 @@
+#ifndef _LINUX_SCHED_ENERGY_H
+#define _LINUX_SCHED_ENERGY_H
+
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+/*
+ * There doesn't seem to be an NR_CPUS style max number of sched domain
+ * levels so here's an arbitrary constant one for the moment.
+ *
+ * The levels alluded to here correspond to entries in struct
+ * sched_domain_topology_level that are meant to be populated by arch
+ * specific code (topology.c).
+ */
+#define NR_SD_LEVELS 8
+
+#define SD_LEVEL0 0
+#define SD_LEVEL1 1
+#define SD_LEVEL2 2
+#define SD_LEVEL3 3
+#define SD_LEVEL4 4
+#define SD_LEVEL5 5
+#define SD_LEVEL6 6
+#define SD_LEVEL7 7
+
+/*
+ * Convenience macro for iterating through said sd levels.
+ */
+#define for_each_possible_sd_level(level) \
+ for (level = 0; level < NR_SD_LEVELS; level++)
+
+extern bool sched_energy_aware;
+
+#ifdef CONFIG_SMP
+
+extern struct sched_group_energy *sge_array[NR_CPUS][NR_SD_LEVELS];
+
+void init_sched_energy_costs(void);
+
+#else
+
+#define init_sched_energy_costs() do { } while (0)
+
+#endif /* CONFIG_SMP */
+
+#endif
diff --git a/include/linux/serio.h b/include/linux/serio.h
index df4ab5de1586..c733cff44e18 100644
--- a/include/linux/serio.h
+++ b/include/linux/serio.h
@@ -31,7 +31,8 @@ struct serio {
struct serio_device_id id;
- spinlock_t lock; /* protects critical sections from port's interrupt handler */
+ /* Protects critical sections from port's interrupt handler */
+ spinlock_t lock;
int (*write)(struct serio *, unsigned char);
int (*open)(struct serio *);
@@ -40,16 +41,29 @@ struct serio {
void (*stop)(struct serio *);
struct serio *parent;
- struct list_head child_node; /* Entry in parent->children list */
+ /* Entry in parent->children list */
+ struct list_head child_node;
struct list_head children;
- unsigned int depth; /* level of nesting in serio hierarchy */
+ /* Level of nesting in serio hierarchy */
+ unsigned int depth;
- struct serio_driver *drv; /* accessed from interrupt, must be protected by serio->lock and serio->sem */
- struct mutex drv_mutex; /* protects serio->drv so attributes can pin driver */
+ /*
+ * serio->drv is accessed from interrupt handlers; when modifying
+ * caller should acquire serio->drv_mutex and serio->lock.
+ */
+ struct serio_driver *drv;
+ /* Protects serio->drv so attributes can pin current driver */
+ struct mutex drv_mutex;
struct device dev;
struct list_head node;
+
+ /*
+ * For use by PS/2 layer when several ports share hardware and
+ * may get indigestion when exposed to concurrent access (i8042).
+ */
+ struct mutex *ps2_cmd_mutex;
};
#define to_serio_port(d) container_of(d, struct serio, dev)
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 2037a861e367..4ef384b172e0 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -144,6 +144,18 @@ void kfree(const void *);
void kzfree(const void *);
size_t ksize(const void *);
+#ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR
+const char *__check_heap_object(const void *ptr, unsigned long n,
+ struct page *page);
+#else
+static inline const char *__check_heap_object(const void *ptr,
+ unsigned long n,
+ struct page *page)
+{
+ return NULL;
+}
+#endif
+
/*
* Some archs want to perform DMA into kmalloc caches and need a guaranteed
* alignment larger than the alignment of a 64-bit integer.
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 33885118523c..f4e857e920cd 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -81,6 +81,7 @@ struct kmem_cache {
int reserved; /* Reserved bytes at the end of slabs */
const char *name; /* Name (only for display!) */
struct list_head list; /* List of slab caches */
+ int red_left_pad; /* Left redzone padding size */
#ifdef CONFIG_SYSFS
struct kobject kobj; /* For sysfs */
#endif
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
index ff307b548ed3..4cf89517783a 100644
--- a/include/linux/thread_info.h
+++ b/include/linux/thread_info.h
@@ -145,6 +145,31 @@ static inline bool test_and_clear_restore_sigmask(void)
#error "no set_restore_sigmask() provided and default one won't work"
#endif
+#ifndef CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES
+static inline int arch_within_stack_frames(const void * const stack,
+ const void * const stackend,
+ const void *obj, unsigned long len)
+{
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_HARDENED_USERCOPY
+extern void __check_object_size(const void *ptr, unsigned long n,
+ bool to_user);
+
+static __always_inline void check_object_size(const void *ptr, unsigned long n,
+ bool to_user)
+{
+ if (!__builtin_constant_p(n))
+ __check_object_size(ptr, n, to_user);
+}
+#else
+static inline void check_object_size(const void *ptr, unsigned long n,
+ bool to_user)
+{ }
+#endif /* CONFIG_HARDENED_USERCOPY */
+
#endif /* __KERNEL__ */
#endif /* _LINUX_THREAD_INFO_H */
diff --git a/include/linux/time.h b/include/linux/time.h
index beebe3a02d43..297f09f23896 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -125,6 +125,32 @@ static inline bool timeval_valid(const struct timeval *tv)
extern struct timespec timespec_trunc(struct timespec t, unsigned gran);
+/*
+ * Validates if a timespec/timeval used to inject a time offset is valid.
+ * Offsets can be postive or negative. The value of the timeval/timespec
+ * is the sum of its fields, but *NOTE*: the field tv_usec/tv_nsec must
+ * always be non-negative.
+ */
+static inline bool timeval_inject_offset_valid(const struct timeval *tv)
+{
+ /* We don't check the tv_sec as it can be positive or negative */
+
+ /* Can't have more microseconds then a second */
+ if (tv->tv_usec < 0 || tv->tv_usec >= USEC_PER_SEC)
+ return false;
+ return true;
+}
+
+static inline bool timespec_inject_offset_valid(const struct timespec *ts)
+{
+ /* We don't check the tv_sec as it can be positive or negative */
+
+ /* Can't have more nanoseconds then a second */
+ if (ts->tv_nsec < 0 || ts->tv_nsec >= NSEC_PER_SEC)
+ return false;
+ return true;
+}
+
#define CURRENT_TIME (current_kernel_time())
#define CURRENT_TIME_SEC ((struct timespec) { get_seconds(), 0 })
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index 558129af828a..f30c187ed785 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -111,4 +111,11 @@ extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count);
#define probe_kernel_address(addr, retval) \
probe_kernel_read(&retval, addr, sizeof(retval))
+#ifndef user_access_begin
+#define user_access_begin() do { } while (0)
+#define user_access_end() do { } while (0)
+#define unsafe_get_user(x, ptr, err) do { if (unlikely(__get_user(x, ptr))) goto err; } while (0)
+#define unsafe_put_user(x, ptr, err) do { if (unlikely(__put_user(x, ptr))) goto err; } while (0)
+#endif
+
#endif /* __LINUX_UACCESS_H__ */
diff --git a/include/linux/usb/msm_hsusb.h b/include/linux/usb/msm_hsusb.h
index 1eb442f8dc6c..21fddf0cbf09 100644
--- a/include/linux/usb/msm_hsusb.h
+++ b/include/linux/usb/msm_hsusb.h
@@ -296,8 +296,7 @@ static inline void msm_usb_irq_disable(bool disable)
#endif
#ifdef CONFIG_USB_DWC3_QCOM
-int msm_ep_config(struct usb_ep *ep, struct usb_request *request,
- gfp_t gfp_flags);
+int msm_ep_config(struct usb_ep *ep, struct usb_request *request);
int msm_ep_unconfig(struct usb_ep *ep);
void dwc3_tx_fifo_resize_request(struct usb_ep *ep, bool qdss_enable);
int msm_data_fifo_config(struct usb_ep *ep, phys_addr_t addr, u32 size,
@@ -312,8 +311,7 @@ static inline int msm_data_fifo_config(struct usb_ep *ep, phys_addr_t addr,
return -ENODEV;
}
-static inline int msm_ep_config(struct usb_ep *ep, struct usb_request *request,
- gfp_t gfp_flags)
+static inline int msm_ep_config(struct usb_ep *ep, struct usb_request *request)
{
return -ENODEV;
}
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index cc1e8d6b3454..195b625a4a76 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -731,7 +731,7 @@ struct cfg80211_bitrate_mask {
* MAC address based access control
* @pbss: If set, start as a PCP instead of AP. Relevant for DMG
* networks.
- * @beacon_rate: masks for setting user configured beacon tx rate.
+ * @beacon_rate: bitrate to be used for beacons
*/
struct cfg80211_ap_settings {
struct cfg80211_chan_def chandef;
@@ -1393,6 +1393,7 @@ struct mesh_config {
* @beacon_interval: beacon interval to use
* @mcast_rate: multicat rate for Mesh Node [6Mbps is the default for 802.11a]
* @basic_rates: basic rates to use when creating the mesh
+ * @beacon_rate: bitrate to be used for beacons
*
* These parameters are fixed when the mesh is created.
*/
@@ -1413,6 +1414,7 @@ struct mesh_setup {
u16 beacon_interval;
int mcast_rate[IEEE80211_NUM_BANDS];
u32 basic_rates;
+ struct cfg80211_bitrate_mask beacon_rate;
};
/**
diff --git a/include/net/fib_rules.h b/include/net/fib_rules.h
index 55b5419cb6a7..bdd985f41022 100644
--- a/include/net/fib_rules.h
+++ b/include/net/fib_rules.h
@@ -89,7 +89,6 @@ struct fib_rules_ops {
[FRA_FWMARK] = { .type = NLA_U32 }, \
[FRA_FWMASK] = { .type = NLA_U32 }, \
[FRA_TABLE] = { .type = NLA_U32 }, \
- [FRA_GOTO] = { .type = NLA_U32 }, \
[FRA_UID_START] = { .type = NLA_U32 }, \
[FRA_UID_END] = { .type = NLA_U32 }, \
[FRA_SUPPRESS_PREFIXLEN] = { .type = NLA_U32 }, \
diff --git a/include/soc/qcom/camera2.h b/include/soc/qcom/camera2.h
index bf9db17e6981..5a61d2b372c3 100644
--- a/include/soc/qcom/camera2.h
+++ b/include/soc/qcom/camera2.h
@@ -20,6 +20,7 @@
#include <linux/of_device.h>
#include <linux/of.h>
+#define MAX_SPECIAL_SUPPORT_SIZE 10
enum msm_camera_device_type_t {
MSM_CAMERA_I2C_DEVICE,
@@ -148,6 +149,8 @@ struct msm_camera_sensor_board_info {
const char *actuator_name;
const char *ois_name;
const char *flash_name;
+ const char *special_support_sensors[MAX_SPECIAL_SUPPORT_SIZE];
+ int32_t special_support_size;
struct msm_camera_slave_info *slave_info;
struct msm_camera_csi_lane_params *csi_lane_params;
struct msm_camera_sensor_strobe_flash_data *strobe_flash_data;
diff --git a/include/soc/qcom/subsystem_restart.h b/include/soc/qcom/subsystem_restart.h
index 780666c332e2..763eaa9ad918 100644
--- a/include/soc/qcom/subsystem_restart.h
+++ b/include/soc/qcom/subsystem_restart.h
@@ -25,6 +25,12 @@ enum {
RESET_LEVEL_MAX
};
+enum crash_status {
+ CRASH_STATUS_NO_CRASH = 0,
+ CRASH_STATUS_ERR_FATAL,
+ CRASH_STATUS_WDOG_BITE,
+};
+
struct device;
struct module;
@@ -89,7 +95,7 @@ struct subsys_desc {
/**
* struct notif_data - additional notif information
- * @crashed: indicates if subsystem has crashed
+ * @crashed: indicates if subsystem has crashed due to wdog bite or err fatal
* @enable_ramdump: ramdumps disabled if set to 0
* @enable_mini_ramdumps: enable flag for minimized critical-memory-only
* ramdumps
@@ -97,7 +103,7 @@ struct subsys_desc {
* @pdev: subsystem platform device pointer
*/
struct notif_data {
- bool crashed;
+ enum crash_status crashed;
int enable_ramdump;
int enable_mini_ramdumps;
bool no_auth;
@@ -120,8 +126,9 @@ extern struct subsys_device *subsys_register(struct subsys_desc *desc);
extern void subsys_unregister(struct subsys_device *dev);
extern void subsys_default_online(struct subsys_device *dev);
-extern void subsys_set_crash_status(struct subsys_device *dev, bool crashed);
-extern bool subsys_get_crash_status(struct subsys_device *dev);
+extern void subsys_set_crash_status(struct subsys_device *dev,
+ enum crash_status crashed);
+extern enum crash_status subsys_get_crash_status(struct subsys_device *dev);
void notify_proxy_vote(struct device *device);
void notify_proxy_unvote(struct device *device);
void complete_err_ready(struct subsys_device *subsys);
@@ -174,9 +181,10 @@ struct subsys_device *subsys_register(struct subsys_desc *desc)
static inline void subsys_unregister(struct subsys_device *dev) { }
static inline void subsys_default_online(struct subsys_device *dev) { }
+static inline void subsys_set_crash_status(struct subsys_device *dev,
+ enum crash_status crashed) { }
static inline
-void subsys_set_crash_status(struct subsys_device *dev, bool crashed) { }
-static inline bool subsys_get_crash_status(struct subsys_device *dev)
+enum crash_status subsys_get_crash_status(struct subsys_device *dev)
{
return false;
}
diff --git a/include/sound/apr_audio-v2.h b/include/sound/apr_audio-v2.h
index 06d952a07c2a..e098e2329ac6 100644
--- a/include/sound/apr_audio-v2.h
+++ b/include/sound/apr_audio-v2.h
@@ -8017,11 +8017,10 @@ struct asm_eq_params {
/* Band cut equalizer effect.*/
#define ASM_PARAM_EQ_BAND_CUT 6
-/* Voice get & set params */
-#define VOICE_CMD_SET_PARAM 0x0001133D
-#define VOICE_CMD_GET_PARAM 0x0001133E
-#define VOICE_EVT_GET_PARAM_ACK 0x00011008
-
+/* Get & set params */
+#define VSS_ICOMMON_CMD_SET_PARAM_V2 0x0001133D
+#define VSS_ICOMMON_CMD_GET_PARAM_V2 0x0001133E
+#define VSS_ICOMMON_RSP_GET_PARAM 0x00011008
/** ID of the Bass Boost module.
This module supports the following parameter IDs:
diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h
index 28ee5c2e6bcd..711322a8ee35 100644
--- a/include/target/target_core_backend.h
+++ b/include/target/target_core_backend.h
@@ -96,6 +96,6 @@ sense_reason_t passthrough_parse_cdb(struct se_cmd *cmd,
bool target_sense_desc_format(struct se_device *dev);
sector_t target_to_linux_sector(struct se_device *dev, sector_t lb);
bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
- struct request_queue *q, int block_size);
+ struct request_queue *q);
#endif /* TARGET_CORE_BACKEND_H */
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 689f4d207122..59081c73b296 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -139,6 +139,7 @@ enum se_cmd_flags_table {
SCF_COMPARE_AND_WRITE_POST = 0x00100000,
SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC = 0x00200000,
SCF_ACK_KREF = 0x00400000,
+ SCF_TASK_ATTR_SET = 0x01000000,
};
/* struct se_dev_entry->lun_flags and struct se_lun->lun_access */
diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h
index 7fb2557a760e..ce9ea736f1d7 100644
--- a/include/target/target_core_fabric.h
+++ b/include/target/target_core_fabric.h
@@ -163,7 +163,6 @@ int core_tmr_alloc_req(struct se_cmd *, void *, u8, gfp_t);
void core_tmr_release_req(struct se_tmr_req *);
int transport_generic_handle_tmr(struct se_cmd *);
void transport_generic_request_failure(struct se_cmd *, sense_reason_t);
-void __target_execute_cmd(struct se_cmd *);
int transport_lookup_tmr_lun(struct se_cmd *, u64);
void core_allocate_nexus_loss_ua(struct se_node_acl *acl);
diff --git a/include/trace/events/cpufreq_sched.h b/include/trace/events/cpufreq_sched.h
new file mode 100644
index 000000000000..a46cd088e969
--- /dev/null
+++ b/include/trace/events/cpufreq_sched.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (C) 2015 Steve Muckle <smuckle@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM cpufreq_sched
+
+#if !defined(_TRACE_CPUFREQ_SCHED_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_CPUFREQ_SCHED_H
+
+#include <linux/sched.h>
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(cpufreq_sched_throttled,
+ TP_PROTO(unsigned int rem),
+ TP_ARGS(rem),
+ TP_STRUCT__entry(
+ __field( unsigned int, rem)
+ ),
+ TP_fast_assign(
+ __entry->rem = rem;
+ ),
+ TP_printk("throttled - %d usec remaining", __entry->rem)
+);
+
+TRACE_EVENT(cpufreq_sched_request_opp,
+ TP_PROTO(int cpu,
+ unsigned long capacity,
+ unsigned int freq_new,
+ unsigned int requested_freq),
+ TP_ARGS(cpu, capacity, freq_new, requested_freq),
+ TP_STRUCT__entry(
+ __field( int, cpu)
+ __field( unsigned long, capacity)
+ __field( unsigned int, freq_new)
+ __field( unsigned int, requested_freq)
+ ),
+ TP_fast_assign(
+ __entry->cpu = cpu;
+ __entry->capacity = capacity;
+ __entry->freq_new = freq_new;
+ __entry->requested_freq = requested_freq;
+ ),
+ TP_printk("cpu %d cap change, cluster cap request %ld => OPP %d "
+ "(cur %d)",
+ __entry->cpu, __entry->capacity, __entry->freq_new,
+ __entry->requested_freq)
+);
+
+TRACE_EVENT(cpufreq_sched_update_capacity,
+ TP_PROTO(int cpu,
+ bool request,
+ struct sched_capacity_reqs *scr,
+ unsigned long new_capacity),
+ TP_ARGS(cpu, request, scr, new_capacity),
+ TP_STRUCT__entry(
+ __field( int, cpu)
+ __field( bool, request)
+ __field( unsigned long, cfs)
+ __field( unsigned long, rt)
+ __field( unsigned long, dl)
+ __field( unsigned long, total)
+ __field( unsigned long, new_total)
+ ),
+ TP_fast_assign(
+ __entry->cpu = cpu;
+ __entry->request = request;
+ __entry->cfs = scr->cfs;
+ __entry->rt = scr->rt;
+ __entry->dl = scr->dl;
+ __entry->total = scr->total;
+ __entry->new_total = new_capacity;
+ ),
+ TP_printk("cpu=%d set_cap=%d cfs=%ld rt=%ld dl=%ld old_tot=%ld "
+ "new_tot=%ld",
+ __entry->cpu, __entry->request, __entry->cfs, __entry->rt,
+ __entry->dl, __entry->total, __entry->new_total)
+);
+
+#endif /* _TRACE_CPUFREQ_SCHED_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/power.h b/include/trace/events/power.h
index bc33e91ec5e6..8387688fb71b 100644
--- a/include/trace/events/power.h
+++ b/include/trace/events/power.h
@@ -186,6 +186,13 @@ TRACE_EVENT(cpu_frequency_switch_end,
TP_printk("cpu_id=%lu", (unsigned long)__entry->cpu_id)
);
+
+DEFINE_EVENT(cpu, cpu_capacity,
+
+ TP_PROTO(unsigned int capacity, unsigned int cpu_id),
+
+ TP_ARGS(capacity, cpu_id)
+);
TRACE_EVENT(device_pm_callback_start,
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 72bbed9ad5db..0a59832e0515 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -134,6 +134,8 @@ TRACE_EVENT(sched_task_load,
__field( int, best_cpu )
__field( u64, latency )
__field( int, grp_id )
+ __field( u64, avg_burst )
+ __field( u64, avg_sleep )
),
TP_fast_assign(
@@ -150,13 +152,16 @@ TRACE_EVENT(sched_task_load,
sched_ktime_clock() -
p->ravg.mark_start : 0;
__entry->grp_id = p->grp ? p->grp->id : 0;
+ __entry->avg_burst = p->ravg.avg_burst;
+ __entry->avg_sleep = p->ravg.avg_sleep_time;
),
- TP_printk("%d (%s): demand=%u boost=%d reason=%d sync=%d need_idle=%d flags=%x grp=%d best_cpu=%d latency=%llu",
+ TP_printk("%d (%s): demand=%u boost=%d reason=%d sync=%d need_idle=%d flags=%x grp=%d best_cpu=%d latency=%llu avg_burst=%llu avg_sleep=%llu",
__entry->pid, __entry->comm, __entry->demand,
__entry->boost, __entry->reason, __entry->sync,
__entry->need_idle, __entry->flags, __entry->grp_id,
- __entry->best_cpu, __entry->latency)
+ __entry->best_cpu, __entry->latency, __entry->avg_burst,
+ __entry->avg_sleep)
);
TRACE_EVENT(sched_set_preferred_cluster,
@@ -1378,6 +1383,355 @@ TRACE_EVENT(sched_isolate,
__entry->requested_cpu, __entry->isolated_cpus,
__entry->time, __entry->isolate)
);
+
+TRACE_EVENT(sched_contrib_scale_f,
+
+ TP_PROTO(int cpu, unsigned long freq_scale_factor,
+ unsigned long cpu_scale_factor),
+
+ TP_ARGS(cpu, freq_scale_factor, cpu_scale_factor),
+
+ TP_STRUCT__entry(
+ __field(int, cpu)
+ __field(unsigned long, freq_scale_factor)
+ __field(unsigned long, cpu_scale_factor)
+ ),
+
+ TP_fast_assign(
+ __entry->cpu = cpu;
+ __entry->freq_scale_factor = freq_scale_factor;
+ __entry->cpu_scale_factor = cpu_scale_factor;
+ ),
+
+ TP_printk("cpu=%d freq_scale_factor=%lu cpu_scale_factor=%lu",
+ __entry->cpu, __entry->freq_scale_factor,
+ __entry->cpu_scale_factor)
+);
+
+#ifdef CONFIG_SMP
+
+/*
+ * Tracepoint for accounting sched averages for tasks.
+ */
+TRACE_EVENT(sched_load_avg_task,
+
+ TP_PROTO(struct task_struct *tsk, struct sched_avg *avg),
+
+ TP_ARGS(tsk, avg),
+
+ TP_STRUCT__entry(
+ __array( char, comm, TASK_COMM_LEN )
+ __field( pid_t, pid )
+ __field( int, cpu )
+ __field( unsigned long, load_avg )
+ __field( unsigned long, util_avg )
+ __field( u64, load_sum )
+ __field( u32, util_sum )
+ __field( u32, period_contrib )
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
+ __entry->pid = tsk->pid;
+ __entry->cpu = task_cpu(tsk);
+ __entry->load_avg = avg->load_avg;
+ __entry->util_avg = avg->util_avg;
+ __entry->load_sum = avg->load_sum;
+ __entry->util_sum = avg->util_sum;
+ __entry->period_contrib = avg->period_contrib;
+ ),
+
+ TP_printk("comm=%s pid=%d cpu=%d load_avg=%lu util_avg=%lu load_sum=%llu"
+ " util_sum=%u period_contrib=%u",
+ __entry->comm,
+ __entry->pid,
+ __entry->cpu,
+ __entry->load_avg,
+ __entry->util_avg,
+ (u64)__entry->load_sum,
+ (u32)__entry->util_sum,
+ (u32)__entry->period_contrib)
+);
+
+/*
+ * Tracepoint for accounting sched averages for cpus.
+ */
+TRACE_EVENT(sched_load_avg_cpu,
+
+ TP_PROTO(int cpu, struct cfs_rq *cfs_rq),
+
+ TP_ARGS(cpu, cfs_rq),
+
+ TP_STRUCT__entry(
+ __field( int, cpu )
+ __field( unsigned long, load_avg )
+ __field( unsigned long, util_avg )
+ ),
+
+ TP_fast_assign(
+ __entry->cpu = cpu;
+ __entry->load_avg = cfs_rq->avg.load_avg;
+ __entry->util_avg = cfs_rq->avg.util_avg;
+ ),
+
+ TP_printk("cpu=%d load_avg=%lu util_avg=%lu",
+ __entry->cpu, __entry->load_avg, __entry->util_avg)
+);
+
+/*
+ * Tracepoint for sched_tune_config settings
+ */
+TRACE_EVENT(sched_tune_config,
+
+ TP_PROTO(int boost),
+
+ TP_ARGS(boost),
+
+ TP_STRUCT__entry(
+ __field( int, boost )
+ ),
+
+ TP_fast_assign(
+ __entry->boost = boost;
+ ),
+
+ TP_printk("boost=%d ", __entry->boost)
+);
+
+/*
+ * Tracepoint for accounting CPU boosted utilization
+ */
+TRACE_EVENT(sched_boost_cpu,
+
+ TP_PROTO(int cpu, unsigned long util, long margin),
+
+ TP_ARGS(cpu, util, margin),
+
+ TP_STRUCT__entry(
+ __field( int, cpu )
+ __field( unsigned long, util )
+ __field(long, margin )
+ ),
+
+ TP_fast_assign(
+ __entry->cpu = cpu;
+ __entry->util = util;
+ __entry->margin = margin;
+ ),
+
+ TP_printk("cpu=%d util=%lu margin=%ld",
+ __entry->cpu,
+ __entry->util,
+ __entry->margin)
+);
+
+/*
+ * Tracepoint for schedtune_tasks_update
+ */
+TRACE_EVENT(sched_tune_tasks_update,
+
+ TP_PROTO(struct task_struct *tsk, int cpu, int tasks, int idx,
+ int boost, int max_boost),
+
+ TP_ARGS(tsk, cpu, tasks, idx, boost, max_boost),
+
+ TP_STRUCT__entry(
+ __array( char, comm, TASK_COMM_LEN )
+ __field( pid_t, pid )
+ __field( int, cpu )
+ __field( int, tasks )
+ __field( int, idx )
+ __field( int, boost )
+ __field( int, max_boost )
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
+ __entry->pid = tsk->pid;
+ __entry->cpu = cpu;
+ __entry->tasks = tasks;
+ __entry->idx = idx;
+ __entry->boost = boost;
+ __entry->max_boost = max_boost;
+ ),
+
+ TP_printk("pid=%d comm=%s "
+ "cpu=%d tasks=%d idx=%d boost=%d max_boost=%d",
+ __entry->pid, __entry->comm,
+ __entry->cpu, __entry->tasks, __entry->idx,
+ __entry->boost, __entry->max_boost)
+);
+
+/*
+ * Tracepoint for schedtune_boostgroup_update
+ */
+TRACE_EVENT(sched_tune_boostgroup_update,
+
+ TP_PROTO(int cpu, int variation, int max_boost),
+
+ TP_ARGS(cpu, variation, max_boost),
+
+ TP_STRUCT__entry(
+ __field( int, cpu )
+ __field( int, variation )
+ __field( int, max_boost )
+ ),
+
+ TP_fast_assign(
+ __entry->cpu = cpu;
+ __entry->variation = variation;
+ __entry->max_boost = max_boost;
+ ),
+
+ TP_printk("cpu=%d variation=%d max_boost=%d",
+ __entry->cpu, __entry->variation, __entry->max_boost)
+);
+
+/*
+ * Tracepoint for accounting task boosted utilization
+ */
+TRACE_EVENT(sched_boost_task,
+
+ TP_PROTO(struct task_struct *tsk, unsigned long util, long margin),
+
+ TP_ARGS(tsk, util, margin),
+
+ TP_STRUCT__entry(
+ __array( char, comm, TASK_COMM_LEN )
+ __field( pid_t, pid )
+ __field( unsigned long, util )
+ __field( long, margin )
+
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
+ __entry->pid = tsk->pid;
+ __entry->util = util;
+ __entry->margin = margin;
+ ),
+
+ TP_printk("comm=%s pid=%d util=%lu margin=%ld",
+ __entry->comm, __entry->pid,
+ __entry->util,
+ __entry->margin)
+);
+
+/*
+ * Tracepoint for accounting sched group energy
+ */
+TRACE_EVENT(sched_energy_diff,
+
+ TP_PROTO(struct task_struct *tsk, int scpu, int dcpu, int udelta,
+ int nrgb, int nrga, int nrgd, int capb, int capa, int capd,
+ int nrgn, int nrgp),
+
+ TP_ARGS(tsk, scpu, dcpu, udelta,
+ nrgb, nrga, nrgd, capb, capa, capd,
+ nrgn, nrgp),
+
+ TP_STRUCT__entry(
+ __array( char, comm, TASK_COMM_LEN )
+ __field( pid_t, pid )
+ __field( int, scpu )
+ __field( int, dcpu )
+ __field( int, udelta )
+ __field( int, nrgb )
+ __field( int, nrga )
+ __field( int, nrgd )
+ __field( int, capb )
+ __field( int, capa )
+ __field( int, capd )
+ __field( int, nrgn )
+ __field( int, nrgp )
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
+ __entry->pid = tsk->pid;
+ __entry->scpu = scpu;
+ __entry->dcpu = dcpu;
+ __entry->udelta = udelta;
+ __entry->nrgb = nrgb;
+ __entry->nrga = nrga;
+ __entry->nrgd = nrgd;
+ __entry->capb = capb;
+ __entry->capa = capa;
+ __entry->capd = capd;
+ __entry->nrgn = nrgn;
+ __entry->nrgp = nrgp;
+ ),
+
+ TP_printk("pid=%d comm=%s "
+ "src_cpu=%d dst_cpu=%d usage_delta=%d "
+ "nrg_before=%d nrg_after=%d nrg_diff=%d "
+ "cap_before=%d cap_after=%d cap_delta=%d "
+ "nrg_delta=%d nrg_payoff=%d",
+ __entry->pid, __entry->comm,
+ __entry->scpu, __entry->dcpu, __entry->udelta,
+ __entry->nrgb, __entry->nrga, __entry->nrgd,
+ __entry->capb, __entry->capa, __entry->capd,
+ __entry->nrgn, __entry->nrgp)
+);
+
+/*
+ * Tracepoint for schedtune_tasks_update
+ */
+TRACE_EVENT(sched_tune_filter,
+
+ TP_PROTO(int nrg_delta, int cap_delta,
+ int nrg_gain, int cap_gain,
+ int payoff, int region),
+
+ TP_ARGS(nrg_delta, cap_delta, nrg_gain, cap_gain, payoff, region),
+
+ TP_STRUCT__entry(
+ __field( int, nrg_delta )
+ __field( int, cap_delta )
+ __field( int, nrg_gain )
+ __field( int, cap_gain )
+ __field( int, payoff )
+ __field( int, region )
+ ),
+
+ TP_fast_assign(
+ __entry->nrg_delta = nrg_delta;
+ __entry->cap_delta = cap_delta;
+ __entry->nrg_gain = nrg_gain;
+ __entry->cap_gain = cap_gain;
+ __entry->payoff = payoff;
+ __entry->region = region;
+ ),
+
+ TP_printk("nrg_delta=%d cap_delta=%d nrg_gain=%d cap_gain=%d payoff=%d region=%d",
+ __entry->nrg_delta, __entry->cap_delta,
+ __entry->nrg_gain, __entry->cap_gain,
+ __entry->payoff, __entry->region)
+);
+
+/*
+ * Tracepoint for system overutilized flag
+ */
+TRACE_EVENT(sched_overutilized,
+
+ TP_PROTO(bool overutilized),
+
+ TP_ARGS(overutilized),
+
+ TP_STRUCT__entry(
+ __field( bool, overutilized )
+ ),
+
+ TP_fast_assign(
+ __entry->overutilized = overutilized;
+ ),
+
+ TP_printk("overutilized=%d",
+ __entry->overutilized ? 1 : 0)
+);
+
+#endif /* CONFIG_SMP */
+
#endif /* _TRACE_SCHED_H */
/* This part must be outside protection */
diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
index 003dca933803..5664ca07c9c7 100644
--- a/include/trace/events/sunrpc.h
+++ b/include/trace/events/sunrpc.h
@@ -529,20 +529,27 @@ TRACE_EVENT(svc_xprt_do_enqueue,
TP_STRUCT__entry(
__field(struct svc_xprt *, xprt)
- __field_struct(struct sockaddr_storage, ss)
__field(int, pid)
__field(unsigned long, flags)
+ __dynamic_array(unsigned char, addr, xprt != NULL ?
+ xprt->xpt_remotelen : 0)
),
TP_fast_assign(
__entry->xprt = xprt;
- xprt ? memcpy(&__entry->ss, &xprt->xpt_remote, sizeof(__entry->ss)) : memset(&__entry->ss, 0, sizeof(__entry->ss));
__entry->pid = rqst? rqst->rq_task->pid : 0;
- __entry->flags = xprt ? xprt->xpt_flags : 0;
+ if (xprt) {
+ memcpy(__get_dynamic_array(addr),
+ &xprt->xpt_remote,
+ xprt->xpt_remotelen);
+ __entry->flags = xprt->xpt_flags;
+ } else
+ __entry->flags = 0;
),
TP_printk("xprt=0x%p addr=%pIScp pid=%d flags=%s", __entry->xprt,
- (struct sockaddr *)&__entry->ss,
+ __get_dynamic_array_len(addr) != 0 ?
+ (struct sockaddr *)__get_dynamic_array(addr) : NULL,
__entry->pid, show_svc_xprt_flags(__entry->flags))
);
@@ -553,18 +560,25 @@ TRACE_EVENT(svc_xprt_dequeue,
TP_STRUCT__entry(
__field(struct svc_xprt *, xprt)
- __field_struct(struct sockaddr_storage, ss)
__field(unsigned long, flags)
+ __dynamic_array(unsigned char, addr, xprt != NULL ?
+ xprt->xpt_remotelen : 0)
),
TP_fast_assign(
- __entry->xprt = xprt,
- xprt ? memcpy(&__entry->ss, &xprt->xpt_remote, sizeof(__entry->ss)) : memset(&__entry->ss, 0, sizeof(__entry->ss));
- __entry->flags = xprt ? xprt->xpt_flags : 0;
+ __entry->xprt = xprt;
+ if (xprt) {
+ memcpy(__get_dynamic_array(addr),
+ &xprt->xpt_remote,
+ xprt->xpt_remotelen);
+ __entry->flags = xprt->xpt_flags;
+ } else
+ __entry->flags = 0;
),
TP_printk("xprt=0x%p addr=%pIScp flags=%s", __entry->xprt,
- (struct sockaddr *)&__entry->ss,
+ __get_dynamic_array_len(addr) != 0 ?
+ (struct sockaddr *)__get_dynamic_array(addr) : NULL,
show_svc_xprt_flags(__entry->flags))
);
@@ -592,19 +606,26 @@ TRACE_EVENT(svc_handle_xprt,
TP_STRUCT__entry(
__field(struct svc_xprt *, xprt)
__field(int, len)
- __field_struct(struct sockaddr_storage, ss)
__field(unsigned long, flags)
+ __dynamic_array(unsigned char, addr, xprt != NULL ?
+ xprt->xpt_remotelen : 0)
),
TP_fast_assign(
__entry->xprt = xprt;
- xprt ? memcpy(&__entry->ss, &xprt->xpt_remote, sizeof(__entry->ss)) : memset(&__entry->ss, 0, sizeof(__entry->ss));
__entry->len = len;
- __entry->flags = xprt ? xprt->xpt_flags : 0;
+ if (xprt) {
+ memcpy(__get_dynamic_array(addr),
+ &xprt->xpt_remote,
+ xprt->xpt_remotelen);
+ __entry->flags = xprt->xpt_flags;
+ } else
+ __entry->flags = 0;
),
TP_printk("xprt=0x%p addr=%pIScp len=%d flags=%s", __entry->xprt,
- (struct sockaddr *)&__entry->ss,
+ __get_dynamic_array_len(addr) != 0 ?
+ (struct sockaddr *)__get_dynamic_array(addr) : NULL,
__entry->len, show_svc_xprt_flags(__entry->flags))
);
#endif /* _TRACE_SUNRPC_H */
diff --git a/include/uapi/linux/hyperv.h b/include/uapi/linux/hyperv.h
index e4c0a35d6417..e347b24ef9fb 100644
--- a/include/uapi/linux/hyperv.h
+++ b/include/uapi/linux/hyperv.h
@@ -313,6 +313,7 @@ enum hv_kvp_exchg_pool {
#define HV_INVALIDARG 0x80070057
#define HV_GUID_NOTFOUND 0x80041002
#define HV_ERROR_ALREADY_EXISTS 0x80070050
+#define HV_ERROR_DISK_FULL 0x80070070
#define ADDR_FAMILY_NONE 0x00
#define ADDR_FAMILY_IPV4 0x01
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index 696a4322844a..441a6b423ad8 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -1328,7 +1328,13 @@ enum nl80211_commands {
* enum nl80211_band value is used as the index (nla_type() of the nested
* data. If a band is not included, it will be configured to allow all
* rates based on negotiated supported rates information. This attribute
- * is used with %NL80211_CMD_SET_TX_BITRATE_MASK.
+ * is used with %NL80211_CMD_SET_TX_BITRATE_MASK and with starting AP,
+ * and joining mesh networks (not IBSS yet). In the later case, it must
+ * specify just a single bitrate, which is to be used for the beacon.
+ * The driver must also specify support for this with the extended
+ * features NL80211_EXT_FEATURE_BEACON_RATE_LEGACY,
+ * NL80211_EXT_FEATURE_BEACON_RATE_HT and
+ * NL80211_EXT_FEATURE_BEACON_RATE_VHT.
*
* @NL80211_ATTR_FRAME_MATCH: A binary attribute which typically must contain
* at least one byte, currently used with @NL80211_CMD_REGISTER_FRAME.
@@ -4432,12 +4438,47 @@ enum nl80211_feature_flags {
/**
* enum nl80211_ext_feature_index - bit index of extended features.
* @NL80211_EXT_FEATURE_VHT_IBSS: This driver supports IBSS with VHT datarates.
+ * @NL80211_EXT_FEATURE_RRM: This driver supports RRM. When featured, user can
+ * can request to use RRM (see %NL80211_ATTR_USE_RRM) with
+ * %NL80211_CMD_ASSOCIATE and %NL80211_CMD_CONNECT requests, which will set
+ * the ASSOC_REQ_USE_RRM flag in the association request even if
+ * NL80211_FEATURE_QUIET is not advertized.
+ * @NL80211_EXT_FEATURE_MU_MIMO_AIR_SNIFFER: This device supports MU-MIMO air
+ * sniffer which means that it can be configured to hear packets from
+ * certain groups which can be configured by the
+ * %NL80211_ATTR_MU_MIMO_GROUP_DATA attribute,
+ * or can be configured to follow a station by configuring the
+ * %NL80211_ATTR_MU_MIMO_FOLLOW_MAC_ADDR attribute.
+ * @NL80211_EXT_FEATURE_SCAN_START_TIME: This driver includes the actual
+ * time the scan started in scan results event. The time is the TSF of
+ * the BSS that the interface that requested the scan is connected to
+ * (if available).
+ * @NL80211_EXT_FEATURE_BSS_PARENT_TSF: Per BSS, this driver reports the
+ * time the last beacon/probe was received. The time is the TSF of the
+ * BSS that the interface that requested the scan is connected to
+ * (if available).
+ * @NL80211_EXT_FEATURE_SET_SCAN_DWELL: This driver supports configuration of
+ * channel dwell time.
+ * @NL80211_EXT_FEATURE_BEACON_RATE_LEGACY: Driver supports beacon rate
+ * configuration (AP/mesh), supporting a legacy (non HT/VHT) rate.
+ * @NL80211_EXT_FEATURE_BEACON_RATE_HT: Driver supports beacon rate
+ * configuration (AP/mesh) with HT rates.
+ * @NL80211_EXT_FEATURE_BEACON_RATE_VHT: Driver supports beacon rate
+ * configuration (AP/mesh) with VHT rates.
*
* @NUM_NL80211_EXT_FEATURES: number of extended features.
* @MAX_NL80211_EXT_FEATURES: highest extended feature index.
*/
enum nl80211_ext_feature_index {
NL80211_EXT_FEATURE_VHT_IBSS,
+ NL80211_EXT_FEATURE_RRM,
+ NL80211_EXT_FEATURE_MU_MIMO_AIR_SNIFFER,
+ NL80211_EXT_FEATURE_SCAN_START_TIME,
+ NL80211_EXT_FEATURE_BSS_PARENT_TSF,
+ NL80211_EXT_FEATURE_SET_SCAN_DWELL,
+ NL80211_EXT_FEATURE_BEACON_RATE_LEGACY,
+ NL80211_EXT_FEATURE_BEACON_RATE_HT,
+ NL80211_EXT_FEATURE_BEACON_RATE_VHT,
/* add new features before the definition below */
NUM_NL80211_EXT_FEATURES,
diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h
index 355eea225dd9..3eb02a1d6d8c 100644
--- a/include/uapi/linux/rtnetlink.h
+++ b/include/uapi/linux/rtnetlink.h
@@ -306,12 +306,12 @@ enum rtattr_type_t {
RTA_TABLE,
RTA_MARK,
RTA_MFC_STATS,
+ RTA_UID,
RTA_VIA,
RTA_NEWDST,
RTA_PREF,
RTA_ENCAP_TYPE,
RTA_ENCAP,
- RTA_UID,
__RTA_MAX
};
diff --git a/include/uapi/linux/usb/ch9.h b/include/uapi/linux/usb/ch9.h
index 4338eb7b09b3..779a62aafafe 100644
--- a/include/uapi/linux/usb/ch9.h
+++ b/include/uapi/linux/usb/ch9.h
@@ -954,6 +954,7 @@ enum usb_device_speed {
USB_SPEED_HIGH, /* usb 2.0 */
USB_SPEED_WIRELESS, /* wireless (usb 2.5) */
USB_SPEED_SUPER, /* usb 3.0 */
+ USB_SPEED_SUPER_PLUS, /* usb 3.1 */
};
diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
index 7be4c28cc1ca..686fc6143010 100644
--- a/include/uapi/linux/videodev2.h
+++ b/include/uapi/linux/videodev2.h
@@ -640,6 +640,9 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_JPGL v4l2_fourcc('J', 'P', 'G', 'L') /* JPEG-Lite */
#define V4L2_PIX_FMT_SE401 v4l2_fourcc('S', '4', '0', '1') /* se401 janggu compressed rgb */
#define V4L2_PIX_FMT_S5C_UYVY_JPG v4l2_fourcc('S', '5', 'C', 'I') /* S5C73M3 interleaved UYVY/JPEG */
+#define V4L2_PIX_FMT_Y8I v4l2_fourcc('Y', '8', 'I', ' ') /* Greyscale 8-bit L/R interleaved */
+#define V4L2_PIX_FMT_Y12I v4l2_fourcc('Y', '1', '2', 'I') /* Greyscale 12-bit L/R interleaved */
+#define V4L2_PIX_FMT_Z16 v4l2_fourcc('Z', '1', '6', ' ') /* Depth data 16-bit */
#define V4L2_PIX_FMT_SDE_ABGR_8888 \
v4l2_fourcc('R', 'A', '2', '4') /* 32-bit ABGR 8:8:8:8 */
diff --git a/include/uapi/media/msmb_isp.h b/include/uapi/media/msmb_isp.h
index 9399f6e84004..44d75aa107d9 100644
--- a/include/uapi/media/msmb_isp.h
+++ b/include/uapi/media/msmb_isp.h
@@ -2,6 +2,7 @@
#define __UAPI_MSMB_ISP__
#include <linux/videodev2.h>
+#include <media/msmb_camera.h>
#define MAX_PLANES_PER_STREAM 3
#define MAX_NUM_STREAM 7
@@ -556,6 +557,16 @@ struct msm_isp_buf_request {
enum msm_isp_buf_type buf_type;
};
+struct msm_isp_buf_request_ver2 {
+ uint32_t session_id;
+ uint32_t stream_id;
+ uint8_t num_buf;
+ uint32_t handle;
+ enum msm_isp_buf_type buf_type;
+ enum smmu_attach_mode security_mode;
+ uint32_t reserved[4];
+};
+
struct msm_isp_qbuf_plane {
uint32_t addr;
uint32_t offset;
@@ -884,8 +895,11 @@ enum msm_isp_ioctl_cmd_code {
MSM_ISP_SET_DUAL_HW_MASTER_SLAVE,
MSM_ISP_MAP_BUF_START_FE,
MSM_ISP_UNMAP_BUF,
+ MSM_ISP_AHB_CLK_CFG,
+ MSM_ISP_DUAL_HW_MASTER_SLAVE_SYNC,
MSM_ISP_FETCH_ENG_MULTI_PASS_START,
MSM_ISP_MAP_BUF_START_MULTI_PASS_FE,
+ MSM_ISP_REQUEST_BUF_VER2,
};
#define VIDIOC_MSM_VFE_REG_CFG \
@@ -989,10 +1003,10 @@ enum msm_isp_ioctl_cmd_code {
struct msm_isp_unmap_buf_req)
#define VIDIOC_MSM_ISP_AHB_CLK_CFG \
- _IOWR('V', BASE_VIDIOC_PRIVATE+25, struct msm_isp_ahb_clk_cfg)
+ _IOWR('V', MSM_ISP_AHB_CLK_CFG, struct msm_isp_ahb_clk_cfg)
#define VIDIOC_MSM_ISP_DUAL_HW_MASTER_SLAVE_SYNC \
- _IOWR('V', BASE_VIDIOC_PRIVATE+26, \
+ _IOWR('V', MSM_ISP_DUAL_HW_MASTER_SLAVE_SYNC, \
struct msm_isp_dual_hw_master_slave_sync)
#define VIDIOC_MSM_ISP_FETCH_ENG_MULTI_PASS_START \
@@ -1002,4 +1016,8 @@ enum msm_isp_ioctl_cmd_code {
#define VIDIOC_MSM_ISP_MAP_BUF_START_MULTI_PASS_FE \
_IOWR('V', MSM_ISP_MAP_BUF_START_MULTI_PASS_FE, \
struct msm_vfe_fetch_eng_multi_pass_start)
+
+#define VIDIOC_MSM_ISP_REQUEST_BUF_VER2 \
+ _IOWR('V', MSM_ISP_REQUEST_BUF_VER2, struct msm_isp_buf_request_ver2)
+
#endif /* __MSMB_ISP__ */
diff --git a/include/uapi/scsi/cxlflash_ioctl.h b/include/uapi/scsi/cxlflash_ioctl.h
index 831351b2e660..2302f3ce5f86 100644
--- a/include/uapi/scsi/cxlflash_ioctl.h
+++ b/include/uapi/scsi/cxlflash_ioctl.h
@@ -31,6 +31,16 @@ struct dk_cxlflash_hdr {
};
/*
+ * Return flag definitions available to all ioctls
+ *
+ * Similar to the input flags, these are grown from the bottom-up with the
+ * intention that ioctl-specific return flag definitions would grow from the
+ * top-down, allowing the two sets to co-exist. While not required/enforced
+ * at this time, this provides future flexibility.
+ */
+#define DK_CXLFLASH_ALL_PORTS_ACTIVE 0x0000000000000001ULL
+
+/*
* Notes:
* -----
* The 'context_id' field of all ioctl structures contains the context
diff --git a/init/Kconfig b/init/Kconfig
index eb9e1a0aa688..ac3f4c210aed 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -392,6 +392,15 @@ config IRQ_TIME_ACCOUNTING
endchoice
+config SCHED_WALT
+ bool "Support window based load tracking"
+ depends on SMP
+ help
+ This feature will allow the scheduler to maintain a tunable window
+ based set of metrics for tasks and runqueues. These metrics can be
+ used to guide task placement as well as task frequency requirements
+ for cpufreq governors.
+
config BSD_PROCESS_ACCT
bool "BSD Process Accounting"
depends on MULTIUSER
@@ -999,6 +1008,23 @@ config CGROUP_CPUACCT
Provides a simple Resource Controller for monitoring the
total CPU consumed by the tasks in a cgroup.
+config CGROUP_SCHEDTUNE
+ bool "CFS tasks boosting cgroup subsystem (EXPERIMENTAL)"
+ depends on SCHED_TUNE
+ help
+ This option provides the "schedtune" controller which improves the
+ flexibility of the task boosting mechanism by introducing the support
+ to define "per task" boost values.
+
+ This new controller:
+ 1. allows only a two layers hierarchy, where the root defines the
+ system-wide boost value and its direct childrens define each one a
+ different "class of tasks" to be boosted with a different value
+ 2. supports up to 16 different task classes, each one which could be
+ configured with a different boost value
+
+ Say N if unsure.
+
config PAGE_COUNTER
bool
@@ -1283,6 +1309,7 @@ config SCHED_AUTOGROUP
config SCHED_TUNE
bool "Boosting for CFS tasks (EXPERIMENTAL)"
+ depends on SMP
help
This option enables the system-wide support for task boosting.
When this support is enabled a new sysctl interface is exposed to
@@ -1789,6 +1816,7 @@ choice
config SLAB
bool "SLAB"
+ select HAVE_HARDENED_USERCOPY_ALLOCATOR
help
The regular slab allocator that is established and known to work
well in all environments. It organizes cache hot objects in
@@ -1796,6 +1824,7 @@ config SLAB
config SLUB
bool "SLUB (Unqueued Allocator)"
+ select HAVE_HARDENED_USERCOPY_ALLOCATOR
help
SLUB is a slab allocator that minimizes cache line usage
instead of managing queues of cached objects (SLAB approach).
diff --git a/init/do_mounts_dm.c b/init/do_mounts_dm.c
index f521bc5ae248..ecda58df9a19 100644
--- a/init/do_mounts_dm.c
+++ b/init/do_mounts_dm.c
@@ -176,7 +176,8 @@ static void __init dm_substitute_devices(char *str, size_t str_len)
continue;
/* Temporarily terminate with a nul */
- candidate_end--;
+ if (*candidate_end)
+ candidate_end--;
old_char = *candidate_end;
*candidate_end = '\0';
diff --git a/ipc/msg.c b/ipc/msg.c
index 1471db9a7e61..c6521c205cb4 100644
--- a/ipc/msg.c
+++ b/ipc/msg.c
@@ -680,7 +680,7 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext,
rcu_read_lock();
ipc_lock_object(&msq->q_perm);
- ipc_rcu_putref(msq, ipc_rcu_free);
+ ipc_rcu_putref(msq, msg_rcu_free);
/* raced with RMID? */
if (!ipc_valid_object(&msq->q_perm)) {
err = -EIDRM;
diff --git a/ipc/sem.c b/ipc/sem.c
index b471e5a3863d..20d07008ad5e 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -442,7 +442,7 @@ static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns
static inline void sem_lock_and_putref(struct sem_array *sma)
{
sem_lock(sma, NULL, -1);
- ipc_rcu_putref(sma, ipc_rcu_free);
+ ipc_rcu_putref(sma, sem_rcu_free);
}
static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
@@ -1385,7 +1385,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
rcu_read_unlock();
sem_io = ipc_alloc(sizeof(ushort)*nsems);
if (sem_io == NULL) {
- ipc_rcu_putref(sma, ipc_rcu_free);
+ ipc_rcu_putref(sma, sem_rcu_free);
return -ENOMEM;
}
@@ -1419,20 +1419,20 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
if (nsems > SEMMSL_FAST) {
sem_io = ipc_alloc(sizeof(ushort)*nsems);
if (sem_io == NULL) {
- ipc_rcu_putref(sma, ipc_rcu_free);
+ ipc_rcu_putref(sma, sem_rcu_free);
return -ENOMEM;
}
}
if (copy_from_user(sem_io, p, nsems*sizeof(ushort))) {
- ipc_rcu_putref(sma, ipc_rcu_free);
+ ipc_rcu_putref(sma, sem_rcu_free);
err = -EFAULT;
goto out_free;
}
for (i = 0; i < nsems; i++) {
if (sem_io[i] > SEMVMX) {
- ipc_rcu_putref(sma, ipc_rcu_free);
+ ipc_rcu_putref(sma, sem_rcu_free);
err = -ERANGE;
goto out_free;
}
@@ -1722,7 +1722,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
/* step 2: allocate new undo structure */
new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL);
if (!new) {
- ipc_rcu_putref(sma, ipc_rcu_free);
+ ipc_rcu_putref(sma, sem_rcu_free);
return ERR_PTR(-ENOMEM);
}
diff --git a/kernel/capability.c b/kernel/capability.c
index 45432b54d5c6..00411c82dac5 100644
--- a/kernel/capability.c
+++ b/kernel/capability.c
@@ -361,6 +361,24 @@ bool has_capability_noaudit(struct task_struct *t, int cap)
return has_ns_capability_noaudit(t, &init_user_ns, cap);
}
+static bool ns_capable_common(struct user_namespace *ns, int cap, bool audit)
+{
+ int capable;
+
+ if (unlikely(!cap_valid(cap))) {
+ pr_crit("capable() called with invalid cap=%u\n", cap);
+ BUG();
+ }
+
+ capable = audit ? security_capable(current_cred(), ns, cap) :
+ security_capable_noaudit(current_cred(), ns, cap);
+ if (capable == 0) {
+ current->flags |= PF_SUPERPRIV;
+ return true;
+ }
+ return false;
+}
+
/**
* ns_capable - Determine if the current task has a superior capability in effect
* @ns: The usernamespace we want the capability in
@@ -374,19 +392,27 @@ bool has_capability_noaudit(struct task_struct *t, int cap)
*/
bool ns_capable(struct user_namespace *ns, int cap)
{
- if (unlikely(!cap_valid(cap))) {
- pr_crit("capable() called with invalid cap=%u\n", cap);
- BUG();
- }
-
- if (security_capable(current_cred(), ns, cap) == 0) {
- current->flags |= PF_SUPERPRIV;
- return true;
- }
- return false;
+ return ns_capable_common(ns, cap, true);
}
EXPORT_SYMBOL(ns_capable);
+/**
+ * ns_capable_noaudit - Determine if the current task has a superior capability
+ * (unaudited) in effect
+ * @ns: The usernamespace we want the capability in
+ * @cap: The capability to be tested for
+ *
+ * Return true if the current task has the given superior capability currently
+ * available for use, false if not.
+ *
+ * This sets PF_SUPERPRIV on the task if the capability is available on the
+ * assumption that it's about to be used.
+ */
+bool ns_capable_noaudit(struct user_namespace *ns, int cap)
+{
+ return ns_capable_common(ns, cap, false);
+}
+EXPORT_SYMBOL(ns_capable_noaudit);
/**
* capable - Determine if the current task has a superior capability in effect
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 8c9823947c7a..e94c3c189338 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -4854,6 +4854,7 @@ static void init_and_link_css(struct cgroup_subsys_state *css,
memset(css, 0, sizeof(*css));
css->cgroup = cgrp;
css->ss = ss;
+ css->id = -1;
INIT_LIST_HEAD(&css->sibling);
INIT_LIST_HEAD(&css->children);
css->serial_nr = css_serial_nr_next++;
diff --git a/kernel/cred.c b/kernel/cred.c
index 71179a09c1d6..ff8606f77d90 100644
--- a/kernel/cred.c
+++ b/kernel/cred.c
@@ -689,6 +689,8 @@ EXPORT_SYMBOL(set_security_override_from_ctx);
*/
int set_create_files_as(struct cred *new, struct inode *inode)
{
+ if (!uid_valid(inode->i_uid) || !gid_valid(inode->i_gid))
+ return -EINVAL;
new->fsuid = inode->i_uid;
new->fsgid = inode->i_gid;
return security_kernel_create_files_as(new, inode);
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index 7dad84913abf..7b1b772ab1ce 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -171,8 +171,10 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
err = -EAGAIN;
ptep = page_check_address(page, mm, addr, &ptl, 0);
- if (!ptep)
+ if (!ptep) {
+ mem_cgroup_cancel_charge(kpage, memcg);
goto unlock;
+ }
get_page(kpage);
page_add_new_anon_rmap(kpage, vma, addr);
@@ -199,7 +201,6 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
err = 0;
unlock:
- mem_cgroup_cancel_charge(kpage, memcg);
mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
unlock_page(page);
return err;
@@ -1692,8 +1693,7 @@ static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr)
int result;
pagefault_disable();
- result = __copy_from_user_inatomic(&opcode, (void __user*)vaddr,
- sizeof(opcode));
+ result = __get_user(opcode, (uprobe_opcode_t __user *)vaddr);
pagefault_enable();
if (likely(result == 0))
diff --git a/kernel/exit.c b/kernel/exit.c
index d61f001c5788..d8a12cc06aee 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -54,6 +54,8 @@
#include <linux/writeback.h>
#include <linux/shm.h>
+#include "sched/tune.h"
+
#include <asm/uaccess.h>
#include <asm/unistd.h>
#include <asm/pgtable.h>
@@ -711,6 +713,7 @@ void do_exit(long code)
exit_signals(tsk); /* sets PF_EXITING */
sched_exit(tsk);
+ schedtune_exit_task(tsk);
/*
* tsk->flags are checked in the futex code to protect against
diff --git a/kernel/futex.c b/kernel/futex.c
index 9d8163afd87c..e8af73cc51a7 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -681,7 +681,7 @@ static int get_futex_value_locked(u32 *dest, u32 __user *from)
int ret;
pagefault_disable();
- ret = __copy_from_user_inatomic(dest, from, sizeof(u32));
+ ret = __get_user(*dest, from);
pagefault_enable();
return ret ? -EFAULT : 0;
diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c
index 6b0c0b74a2a1..4b21779d5163 100644
--- a/kernel/irq/msi.c
+++ b/kernel/irq/msi.c
@@ -268,7 +268,7 @@ int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
struct msi_domain_ops *ops = info->ops;
msi_alloc_info_t arg;
struct msi_desc *desc;
- int i, ret, virq = -1;
+ int i, ret, virq;
ret = ops->msi_check(domain, info, dev);
if (ret == 0)
@@ -278,12 +278,8 @@ int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
for_each_msi_entry(desc, dev) {
ops->set_desc(&arg, desc);
- if (info->flags & MSI_FLAG_IDENTITY_MAP)
- virq = (int)ops->get_hwirq(info, &arg);
- else
- virq = -1;
- virq = __irq_domain_alloc_irqs(domain, virq, desc->nvec_used,
+ virq = __irq_domain_alloc_irqs(domain, -1, desc->nvec_used,
dev_to_node(dev), &arg, false);
if (virq < 0) {
ret = -ENOSPC;
@@ -307,6 +303,17 @@ int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
else
dev_dbg(dev, "irq [%d-%d] for MSI\n",
virq, virq + desc->nvec_used - 1);
+ /*
+ * This flag is set by the PCI layer as we need to activate
+ * the MSI entries before the PCI layer enables MSI in the
+ * card. Otherwise the card latches a random msi message.
+ */
+ if (info->flags & MSI_FLAG_ACTIVATE_EARLY) {
+ struct irq_data *irq_data;
+
+ irq_data = irq_domain_get_irq_data(domain, desc->irq);
+ irq_domain_activate_irq(irq_data);
+ }
}
return 0;
diff --git a/kernel/module.c b/kernel/module.c
index fe5248ab3378..ea5ba3e8d472 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -2612,13 +2612,18 @@ static inline void kmemleak_load_module(const struct module *mod,
#endif
#ifdef CONFIG_MODULE_SIG
-static int module_sig_check(struct load_info *info)
+static int module_sig_check(struct load_info *info, int flags)
{
int err = -ENOKEY;
const unsigned long markerlen = sizeof(MODULE_SIG_STRING) - 1;
const void *mod = info->hdr;
- if (info->len > markerlen &&
+ /*
+ * Require flags == 0, as a module with version information
+ * removed is no longer the module that was signed
+ */
+ if (flags == 0 &&
+ info->len > markerlen &&
memcmp(mod + info->len - markerlen, MODULE_SIG_STRING, markerlen) == 0) {
/* We truncate the module to discard the signature */
info->len -= markerlen;
@@ -2637,7 +2642,7 @@ static int module_sig_check(struct load_info *info)
return err;
}
#else /* !CONFIG_MODULE_SIG */
-static int module_sig_check(struct load_info *info)
+static int module_sig_check(struct load_info *info, int flags)
{
return 0;
}
@@ -3450,7 +3455,7 @@ static int load_module(struct load_info *info, const char __user *uargs,
long err;
char *after_dashes;
- err = module_sig_check(info);
+ err = module_sig_check(info, flags);
if (err)
goto free_copy;
diff --git a/kernel/panic.c b/kernel/panic.c
index b4a0edc489c5..982a52352cfc 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -31,9 +31,6 @@
#define PANIC_TIMER_STEP 100
#define PANIC_BLINK_SPD 18
-/* Machine specific panic information string */
-char *mach_panic_string;
-
int panic_on_oops = CONFIG_PANIC_ON_OOPS_VALUE;
static unsigned long tainted_mask;
static int pause_on_oops;
@@ -423,11 +420,6 @@ late_initcall(init_oops_id);
void print_oops_end_marker(void)
{
init_oops_id();
-
- if (mach_panic_string)
- printk(KERN_WARNING "Board Information: %s\n",
- mach_panic_string);
-
pr_warn("---[ end trace %016llx ]---\n", (unsigned long long)oops_id);
}
diff --git a/kernel/rcu/sync.c b/kernel/rcu/sync.c
index e358313a0d6c..b49cf3ac2d47 100644
--- a/kernel/rcu/sync.c
+++ b/kernel/rcu/sync.c
@@ -68,6 +68,7 @@ void rcu_sync_lockdep_assert(struct rcu_sync *rsp)
RCU_LOCKDEP_WARN(!gp_ops[rsp->gp_type].held(),
"suspicious rcu_sync_is_idle() usage");
}
+EXPORT_SYMBOL_GPL(rcu_sync_lockdep_assert);
#endif
/**
diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile
index 7c0382a3eace..308f80ce2e43 100644
--- a/kernel/sched/Makefile
+++ b/kernel/sched/Makefile
@@ -14,7 +14,7 @@ endif
obj-y += core.o loadavg.o clock.o cputime.o
obj-y += idle_task.o fair.o rt.o deadline.o stop_task.o
obj-y += wait.o completion.o idle.o sched_avg.o
-obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o
+obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o energy.o
obj-$(CONFIG_SCHED_HMP) += hmp.o boost.o
obj-$(CONFIG_SCHED_AUTOGROUP) += auto_group.o
obj-$(CONFIG_SCHEDSTATS) += stats.o
@@ -22,3 +22,4 @@ obj-$(CONFIG_SCHED_DEBUG) += debug.o
obj-$(CONFIG_SCHED_TUNE) += tune.o
obj-$(CONFIG_CGROUP_CPUACCT) += cpuacct.o
obj-$(CONFIG_SCHED_CORE_CTL) += core_ctl.o
+obj-$(CONFIG_CPU_FREQ_GOV_SCHED) += cpufreq_sched.o
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index ee708909dc17..b70a76058b00 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -291,6 +291,18 @@ int sysctl_sched_rt_runtime = 950000;
/* cpus with isolated domains */
cpumask_var_t cpu_isolated_map;
+struct rq *
+lock_rq_of(struct task_struct *p, unsigned long *flags)
+{
+ return task_rq_lock(p, flags);
+}
+
+void
+unlock_rq_of(struct rq *rq, struct task_struct *p, unsigned long *flags)
+{
+ task_rq_unlock(rq, p, flags);
+}
+
/*
* this_rq_lock - lock this runqueue and disable interrupts.
*/
@@ -631,7 +643,10 @@ int get_nohz_timer_target(void)
rcu_read_lock();
for_each_domain(cpu, sd) {
for_each_cpu(i, sched_domain_span(sd)) {
- if (!idle_cpu(i) && is_housekeeping_cpu(cpu)) {
+ if (cpu == i)
+ continue;
+
+ if (!idle_cpu(i) && is_housekeeping_cpu(i)) {
cpu = i;
goto unlock;
}
@@ -1708,7 +1723,7 @@ int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags)
return cpu;
}
-static void update_avg(u64 *avg, u64 sample)
+void update_avg(u64 *avg, u64 sample)
{
s64 diff = sample - *avg;
*avg += diff >> 3;
@@ -2100,7 +2115,7 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
set_task_cpu(p, cpu);
}
- set_task_last_wake(p, wallclock);
+ note_task_waking(p, wallclock);
#endif /* CONFIG_SMP */
ttwu_queue(p, cpu);
stat:
@@ -2169,7 +2184,7 @@ static void try_to_wake_up_local(struct task_struct *p)
update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
update_task_ravg(p, rq, TASK_WAKE, wallclock, 0);
ttwu_activate(rq, p, ENQUEUE_WAKEUP);
- set_task_last_wake(p, wallclock);
+ note_task_waking(p, wallclock);
}
ttwu_do_wakeup(rq, p, 0);
@@ -2580,7 +2595,7 @@ void wake_up_new_task(struct task_struct *p)
rq = __task_rq_lock(p);
mark_task_starting(p);
- activate_task(rq, p, 0);
+ activate_task(rq, p, ENQUEUE_WAKEUP_NEW);
p->on_rq = TASK_ON_RQ_QUEUED;
trace_sched_wakeup_new(p);
check_preempt_curr(rq, p, WF_FORK);
@@ -2961,6 +2976,36 @@ unsigned long nr_iowait_cpu(int cpu)
return atomic_read(&this->nr_iowait);
}
+#ifdef CONFIG_CPU_QUIET
+u64 nr_running_integral(unsigned int cpu)
+{
+ unsigned int seqcnt;
+ u64 integral;
+ struct rq *q;
+
+ if (cpu >= nr_cpu_ids)
+ return 0;
+
+ q = cpu_rq(cpu);
+
+ /*
+ * Update average to avoid reading stalled value if there were
+ * no run-queue changes for a long time. On the other hand if
+ * the changes are happening right now, just read current value
+ * directly.
+ */
+
+ seqcnt = read_seqcount_begin(&q->ave_seqcnt);
+ integral = do_nr_running_integral(q);
+ if (read_seqcount_retry(&q->ave_seqcnt, seqcnt)) {
+ read_seqcount_begin(&q->ave_seqcnt);
+ integral = q->nr_running_integral;
+ }
+
+ return integral;
+}
+#endif
+
void get_iowait_load(unsigned long *nr_waiters, unsigned long *load)
{
struct rq *rq = this_rq();
@@ -3051,6 +3096,93 @@ unsigned long long task_sched_runtime(struct task_struct *p)
return ns;
}
+#ifdef CONFIG_CPU_FREQ_GOV_SCHED
+
+static inline
+unsigned long add_capacity_margin(unsigned long cpu_capacity)
+{
+ cpu_capacity = cpu_capacity * capacity_margin;
+ cpu_capacity /= SCHED_CAPACITY_SCALE;
+ return cpu_capacity;
+}
+
+static inline
+unsigned long sum_capacity_reqs(unsigned long cfs_cap,
+ struct sched_capacity_reqs *scr)
+{
+ unsigned long total = add_capacity_margin(cfs_cap + scr->rt);
+ return total += scr->dl;
+}
+
+static void sched_freq_tick_pelt(int cpu)
+{
+ unsigned long cpu_utilization = capacity_max;
+ unsigned long capacity_curr = capacity_curr_of(cpu);
+ struct sched_capacity_reqs *scr;
+
+ scr = &per_cpu(cpu_sched_capacity_reqs, cpu);
+ if (sum_capacity_reqs(cpu_utilization, scr) < capacity_curr)
+ return;
+
+ /*
+ * To make free room for a task that is building up its "real"
+ * utilization and to harm its performance the least, request
+ * a jump to a higher OPP as soon as the margin of free capacity
+ * is impacted (specified by capacity_margin).
+ */
+ set_cfs_cpu_capacity(cpu, true, cpu_utilization);
+}
+
+#ifdef CONFIG_SCHED_WALT
+static void sched_freq_tick_walt(int cpu)
+{
+ unsigned long cpu_utilization = cpu_util(cpu);
+ unsigned long capacity_curr = capacity_curr_of(cpu);
+
+ if (walt_disabled || !sysctl_sched_use_walt_cpu_util)
+ return sched_freq_tick_pelt(cpu);
+
+ /*
+ * Add a margin to the WALT utilization.
+ * NOTE: WALT tracks a single CPU signal for all the scheduling
+ * classes, thus this margin is going to be added to the DL class as
+ * well, which is something we do not do in sched_freq_tick_pelt case.
+ */
+ cpu_utilization = add_capacity_margin(cpu_utilization);
+ if (cpu_utilization <= capacity_curr)
+ return;
+
+ /*
+ * It is likely that the load is growing so we
+ * keep the added margin in our request as an
+ * extra boost.
+ */
+ set_cfs_cpu_capacity(cpu, true, cpu_utilization);
+
+}
+#define _sched_freq_tick(cpu) sched_freq_tick_walt(cpu)
+#else
+#define _sched_freq_tick(cpu) sched_freq_tick_pelt(cpu)
+#endif /* CONFIG_SCHED_WALT */
+
+static void sched_freq_tick(int cpu)
+{
+ unsigned long capacity_orig, capacity_curr;
+
+ if (!sched_freq())
+ return;
+
+ capacity_orig = capacity_orig_of(cpu);
+ capacity_curr = capacity_curr_of(cpu);
+ if (capacity_curr == capacity_orig)
+ return;
+
+ _sched_freq_tick(cpu);
+}
+#else
+static inline void sched_freq_tick(int cpu) { }
+#endif /* CONFIG_CPU_FREQ_GOV_SCHED */
+
/*
* This function gets called by the timer code, with HZ frequency.
* We call it with interrupts disabled.
@@ -3077,6 +3209,7 @@ void scheduler_tick(void)
wallclock = sched_ktime_clock();
update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
early_notif = early_detection_notify(rq, wallclock);
+ sched_freq_tick(cpu);
raw_spin_unlock(&rq->lock);
if (early_notif)
@@ -3403,6 +3536,8 @@ static void __sched notrace __schedule(bool preempt)
if (likely(prev != next)) {
update_task_ravg(prev, rq, PUT_PREV_TASK, wallclock, 0);
update_task_ravg(next, rq, PICK_NEXT_TASK, wallclock, 0);
+ if (!is_idle_task(prev) && !prev->on_rq)
+ update_avg_burst(prev);
rq->nr_switches++;
rq->curr = next;
@@ -5193,15 +5328,19 @@ void show_state_filter(unsigned long state_filter)
/*
* reset the NMI-timeout, listing all files on a slow
* console might take a lot of time:
+ * Also, reset softlockup watchdogs on all CPUs, because
+ * another CPU might be blocked waiting for us to process
+ * an IPI.
*/
touch_nmi_watchdog();
+ touch_all_softlockup_watchdogs();
if (!state_filter || (p->state & state_filter))
sched_show_task(p);
}
touch_all_softlockup_watchdogs();
-#ifdef CONFIG_SYSRQ_SCHED_DEBUG
+#ifdef CONFIG_SCHED_DEBUG
sysrq_sched_debug_show();
#endif
rcu_read_unlock();
@@ -5231,14 +5370,14 @@ void init_idle(struct task_struct *idle, int cpu, bool cpu_up)
struct rq *rq = cpu_rq(cpu);
unsigned long flags;
- __sched_fork(0, idle);
-
if (!cpu_up)
init_new_task_load(idle, true);
raw_spin_lock_irqsave(&idle->pi_lock, flags);
raw_spin_lock(&rq->lock);
+ __sched_fork(0, idle);
+
idle->state = TASK_RUNNING;
idle->se.exec_start = sched_clock();
@@ -5911,9 +6050,60 @@ set_table_entry(struct ctl_table *entry,
}
static struct ctl_table *
+sd_alloc_ctl_energy_table(struct sched_group_energy *sge)
+{
+ struct ctl_table *table = sd_alloc_ctl_entry(5);
+
+ if (table == NULL)
+ return NULL;
+
+ set_table_entry(&table[0], "nr_idle_states", &sge->nr_idle_states,
+ sizeof(int), 0644, proc_dointvec_minmax, false);
+ set_table_entry(&table[1], "idle_states", &sge->idle_states[0].power,
+ sge->nr_idle_states*sizeof(struct idle_state), 0644,
+ proc_doulongvec_minmax, false);
+ set_table_entry(&table[2], "nr_cap_states", &sge->nr_cap_states,
+ sizeof(int), 0644, proc_dointvec_minmax, false);
+ set_table_entry(&table[3], "cap_states", &sge->cap_states[0].cap,
+ sge->nr_cap_states*sizeof(struct capacity_state), 0644,
+ proc_doulongvec_minmax, false);
+
+ return table;
+}
+
+static struct ctl_table *
+sd_alloc_ctl_group_table(struct sched_group *sg)
+{
+ struct ctl_table *table = sd_alloc_ctl_entry(2);
+
+ if (table == NULL)
+ return NULL;
+
+ table->procname = kstrdup("energy", GFP_KERNEL);
+ table->mode = 0555;
+ table->child = sd_alloc_ctl_energy_table((struct sched_group_energy *)sg->sge);
+
+ return table;
+}
+
+static struct ctl_table *
sd_alloc_ctl_domain_table(struct sched_domain *sd)
{
- struct ctl_table *table = sd_alloc_ctl_entry(14);
+ struct ctl_table *table;
+ unsigned int nr_entries = 14;
+
+ int i = 0;
+ struct sched_group *sg = sd->groups;
+
+ if (sg->sge) {
+ int nr_sgs = 0;
+
+ do {} while (nr_sgs++, sg = sg->next, sg != sd->groups);
+
+ nr_entries += nr_sgs;
+ }
+
+ table = sd_alloc_ctl_entry(nr_entries);
if (table == NULL)
return NULL;
@@ -5946,7 +6136,19 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd)
sizeof(long), 0644, proc_doulongvec_minmax, false);
set_table_entry(&table[12], "name", sd->name,
CORENAME_MAX_SIZE, 0444, proc_dostring, false);
- /* &table[13] is terminator */
+ sg = sd->groups;
+ if (sg->sge) {
+ char buf[32];
+ struct ctl_table *entry = &table[13];
+
+ do {
+ snprintf(buf, 32, "group%d", i);
+ entry->procname = kstrdup(buf, GFP_KERNEL);
+ entry->mode = 0555;
+ entry->child = sd_alloc_ctl_group_table(sg);
+ } while (entry++, i++, sg = sg->next, sg != sd->groups);
+ }
+ /* &table[nr_entries-1] is terminator */
return table;
}
@@ -6066,7 +6268,6 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
set_window_start(rq);
raw_spin_unlock_irqrestore(&rq->lock, flags);
rq->calc_load_update = calc_load_update;
- account_reset_rq(rq);
break;
case CPU_ONLINE:
@@ -6259,7 +6460,7 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
printk(KERN_CONT " %*pbl",
cpumask_pr_args(sched_group_cpus(group)));
if (group->sgc->capacity != SCHED_CAPACITY_SCALE) {
- printk(KERN_CONT " (cpu_capacity = %d)",
+ printk(KERN_CONT " (cpu_capacity = %lu)",
group->sgc->capacity);
}
@@ -6320,7 +6521,8 @@ static int sd_degenerate(struct sched_domain *sd)
SD_BALANCE_EXEC |
SD_SHARE_CPUCAPACITY |
SD_SHARE_PKG_RESOURCES |
- SD_SHARE_POWERDOMAIN)) {
+ SD_SHARE_POWERDOMAIN |
+ SD_SHARE_CAP_STATES)) {
if (sd->groups != sd->groups->next)
return 0;
}
@@ -6352,7 +6554,8 @@ sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
SD_SHARE_CPUCAPACITY |
SD_SHARE_PKG_RESOURCES |
SD_PREFER_SIBLING |
- SD_SHARE_POWERDOMAIN);
+ SD_SHARE_POWERDOMAIN |
+ SD_SHARE_CAP_STATES);
if (nr_node_ids == 1)
pflags &= ~SD_SERIALIZE;
}
@@ -6431,6 +6634,8 @@ static int init_rootdomain(struct root_domain *rd)
if (cpupri_init(&rd->cpupri) != 0)
goto free_rto_mask;
+
+ init_max_cpu_capacity(&rd->max_cpu_capacity);
return 0;
free_rto_mask:
@@ -6536,11 +6741,13 @@ DEFINE_PER_CPU(int, sd_llc_id);
DEFINE_PER_CPU(struct sched_domain *, sd_numa);
DEFINE_PER_CPU(struct sched_domain *, sd_busy);
DEFINE_PER_CPU(struct sched_domain *, sd_asym);
+DEFINE_PER_CPU(struct sched_domain *, sd_ea);
+DEFINE_PER_CPU(struct sched_domain *, sd_scs);
static void update_top_cache_domain(int cpu)
{
struct sched_domain *sd;
- struct sched_domain *busy_sd = NULL;
+ struct sched_domain *busy_sd = NULL, *ea_sd = NULL;
int id = cpu;
int size = 1;
@@ -6561,6 +6768,17 @@ static void update_top_cache_domain(int cpu)
sd = highest_flag_domain(cpu, SD_ASYM_PACKING);
rcu_assign_pointer(per_cpu(sd_asym, cpu), sd);
+
+ for_each_domain(cpu, sd) {
+ if (sd->groups->sge)
+ ea_sd = sd;
+ else
+ break;
+ }
+ rcu_assign_pointer(per_cpu(sd_ea, cpu), ea_sd);
+
+ sd = highest_flag_domain(cpu, SD_SHARE_CAP_STATES);
+ rcu_assign_pointer(per_cpu(sd_scs, cpu), sd);
}
/*
@@ -6733,6 +6951,7 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
* die on a /0 trap.
*/
sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span);
+ sg->sgc->max_capacity = SCHED_CAPACITY_SCALE;
/*
* Make sure the first group of this domain contains the
@@ -6865,6 +7084,66 @@ static void init_sched_groups_capacity(int cpu, struct sched_domain *sd)
}
/*
+ * Check that the per-cpu provided sd energy data is consistent for all cpus
+ * within the mask.
+ */
+static inline void check_sched_energy_data(int cpu, sched_domain_energy_f fn,
+ const struct cpumask *cpumask)
+{
+ const struct sched_group_energy * const sge = fn(cpu);
+ struct cpumask mask;
+ int i;
+
+ if (cpumask_weight(cpumask) <= 1)
+ return;
+
+ cpumask_xor(&mask, cpumask, get_cpu_mask(cpu));
+
+ for_each_cpu(i, &mask) {
+ const struct sched_group_energy * const e = fn(i);
+ int y;
+
+ BUG_ON(e->nr_idle_states != sge->nr_idle_states);
+
+ for (y = 0; y < (e->nr_idle_states); y++) {
+ BUG_ON(e->idle_states[y].power !=
+ sge->idle_states[y].power);
+ }
+
+ BUG_ON(e->nr_cap_states != sge->nr_cap_states);
+
+ for (y = 0; y < (e->nr_cap_states); y++) {
+ BUG_ON(e->cap_states[y].cap != sge->cap_states[y].cap);
+ BUG_ON(e->cap_states[y].power !=
+ sge->cap_states[y].power);
+ }
+ }
+}
+
+static void init_sched_energy(int cpu, struct sched_domain *sd,
+ sched_domain_energy_f fn)
+{
+ if (!(fn && fn(cpu)))
+ return;
+
+ if (cpu != group_balance_cpu(sd->groups))
+ return;
+
+ if (sd->child && !sd->child->groups->sge) {
+ pr_err("BUG: EAS setup broken for CPU%d\n", cpu);
+#ifdef CONFIG_SCHED_DEBUG
+ pr_err(" energy data on %s but not on %s domain\n",
+ sd->name, sd->child->name);
+#endif
+ return;
+ }
+
+ check_sched_energy_data(cpu, fn, sched_group_cpus(sd->groups));
+
+ sd->groups->sge = fn(cpu);
+}
+
+/*
* Initializers for schedule domains
* Non-inlined to reduce accumulated stack pressure in build_sched_domains()
*/
@@ -6972,6 +7251,7 @@ static int sched_domains_curr_level;
* SD_SHARE_PKG_RESOURCES - describes shared caches
* SD_NUMA - describes NUMA topologies
* SD_SHARE_POWERDOMAIN - describes shared power domain
+ * SD_SHARE_CAP_STATES - describes shared capacity states
*
* Odd one out:
* SD_ASYM_PACKING - describes SMT quirks
@@ -6981,7 +7261,8 @@ static int sched_domains_curr_level;
SD_SHARE_PKG_RESOURCES | \
SD_NUMA | \
SD_ASYM_PACKING | \
- SD_SHARE_POWERDOMAIN)
+ SD_SHARE_POWERDOMAIN | \
+ SD_SHARE_CAP_STATES)
static struct sched_domain *
sd_init(struct sched_domain_topology_level *tl, int cpu)
@@ -7534,6 +7815,7 @@ static int build_sched_domains(const struct cpumask *cpu_map,
enum s_alloc alloc_state;
struct sched_domain *sd;
struct s_data d;
+ struct rq *rq = NULL;
int i, ret = -ENOMEM;
alloc_state = __visit_domain_allocation_hell(&d, cpu_map);
@@ -7572,10 +7854,14 @@ static int build_sched_domains(const struct cpumask *cpu_map,
/* Calculate CPU capacity for physical packages and nodes */
for (i = nr_cpumask_bits-1; i >= 0; i--) {
+ struct sched_domain_topology_level *tl = sched_domain_topology;
+
if (!cpumask_test_cpu(i, cpu_map))
continue;
- for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
+ for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent, tl++) {
+ if (energy_aware())
+ init_sched_energy(i, sd, tl->energy);
claim_allocations(i, sd);
init_sched_groups_capacity(i, sd);
}
@@ -7584,6 +7870,7 @@ static int build_sched_domains(const struct cpumask *cpu_map,
/* Attach the domains */
rcu_read_lock();
for_each_cpu(i, cpu_map) {
+ rq = cpu_rq(i);
sd = *per_cpu_ptr(d.sd, i);
cpu_attach_domain(sd, d.rd, i);
}
diff --git a/kernel/sched/cpufreq_sched.c b/kernel/sched/cpufreq_sched.c
new file mode 100644
index 000000000000..f6f9b9b3a4a8
--- /dev/null
+++ b/kernel/sched/cpufreq_sched.c
@@ -0,0 +1,499 @@
+/*
+ * Copyright (C) 2015 Michael Turquette <mturquette@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/cpufreq.h>
+#include <linux/module.h>
+#include <linux/kthread.h>
+#include <linux/percpu.h>
+#include <linux/irq_work.h>
+#include <linux/delay.h>
+#include <linux/string.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/cpufreq_sched.h>
+
+#include "sched.h"
+
+#define THROTTLE_DOWN_NSEC 50000000 /* 50ms default */
+#define THROTTLE_UP_NSEC 500000 /* 500us default */
+
+struct static_key __read_mostly __sched_freq = STATIC_KEY_INIT_FALSE;
+static bool __read_mostly cpufreq_driver_slow;
+
+#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_SCHED
+static struct cpufreq_governor cpufreq_gov_sched;
+#endif
+
+static DEFINE_PER_CPU(unsigned long, enabled);
+DEFINE_PER_CPU(struct sched_capacity_reqs, cpu_sched_capacity_reqs);
+
+/**
+ * gov_data - per-policy data internal to the governor
+ * @up_throttle: next throttling period expiry if increasing OPP
+ * @down_throttle: next throttling period expiry if decreasing OPP
+ * @up_throttle_nsec: throttle period length in nanoseconds if increasing OPP
+ * @down_throttle_nsec: throttle period length in nanoseconds if decreasing OPP
+ * @task: worker thread for dvfs transition that may block/sleep
+ * @irq_work: callback used to wake up worker thread
+ * @requested_freq: last frequency requested by the sched governor
+ *
+ * struct gov_data is the per-policy cpufreq_sched-specific data structure. A
+ * per-policy instance of it is created when the cpufreq_sched governor receives
+ * the CPUFREQ_GOV_START condition and a pointer to it exists in the gov_data
+ * member of struct cpufreq_policy.
+ *
+ * Readers of this data must call down_read(policy->rwsem). Writers must
+ * call down_write(policy->rwsem).
+ */
+struct gov_data {
+ ktime_t up_throttle;
+ ktime_t down_throttle;
+ unsigned int up_throttle_nsec;
+ unsigned int down_throttle_nsec;
+ struct task_struct *task;
+ struct irq_work irq_work;
+ unsigned int requested_freq;
+};
+
+static void cpufreq_sched_try_driver_target(struct cpufreq_policy *policy,
+ unsigned int freq)
+{
+ struct gov_data *gd = policy->governor_data;
+
+ /* avoid race with cpufreq_sched_stop */
+ if (!down_write_trylock(&policy->rwsem))
+ return;
+
+ __cpufreq_driver_target(policy, freq, CPUFREQ_RELATION_L);
+
+ gd->up_throttle = ktime_add_ns(ktime_get(), gd->up_throttle_nsec);
+ gd->down_throttle = ktime_add_ns(ktime_get(), gd->down_throttle_nsec);
+ up_write(&policy->rwsem);
+}
+
+static bool finish_last_request(struct gov_data *gd, unsigned int cur_freq)
+{
+ ktime_t now = ktime_get();
+
+ ktime_t throttle = gd->requested_freq < cur_freq ?
+ gd->down_throttle : gd->up_throttle;
+
+ if (ktime_after(now, throttle))
+ return false;
+
+ while (1) {
+ int usec_left = ktime_to_ns(ktime_sub(throttle, now));
+
+ usec_left /= NSEC_PER_USEC;
+ trace_cpufreq_sched_throttled(usec_left);
+ usleep_range(usec_left, usec_left + 100);
+ now = ktime_get();
+ if (ktime_after(now, throttle))
+ return true;
+ }
+}
+
+/*
+ * we pass in struct cpufreq_policy. This is safe because changing out the
+ * policy requires a call to __cpufreq_governor(policy, CPUFREQ_GOV_STOP),
+ * which tears down all of the data structures and __cpufreq_governor(policy,
+ * CPUFREQ_GOV_START) will do a full rebuild, including this kthread with the
+ * new policy pointer
+ */
+static int cpufreq_sched_thread(void *data)
+{
+ struct sched_param param;
+ struct cpufreq_policy *policy;
+ struct gov_data *gd;
+ unsigned int new_request = 0;
+ unsigned int last_request = 0;
+ int ret;
+
+ policy = (struct cpufreq_policy *) data;
+ gd = policy->governor_data;
+
+ param.sched_priority = 50;
+ ret = sched_setscheduler_nocheck(gd->task, SCHED_FIFO, &param);
+ if (ret) {
+ pr_warn("%s: failed to set SCHED_FIFO\n", __func__);
+ do_exit(-EINVAL);
+ } else {
+ pr_debug("%s: kthread (%d) set to SCHED_FIFO\n",
+ __func__, gd->task->pid);
+ }
+
+ do {
+ new_request = gd->requested_freq;
+ if (new_request == last_request) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (kthread_should_stop())
+ break;
+ schedule();
+ } else {
+ /*
+ * if the frequency thread sleeps while waiting to be
+ * unthrottled, start over to check for a newer request
+ */
+ if (finish_last_request(gd, policy->cur))
+ continue;
+ last_request = new_request;
+ cpufreq_sched_try_driver_target(policy, new_request);
+ }
+ } while (!kthread_should_stop());
+
+ return 0;
+}
+
+static void cpufreq_sched_irq_work(struct irq_work *irq_work)
+{
+ struct gov_data *gd;
+
+ gd = container_of(irq_work, struct gov_data, irq_work);
+ if (!gd)
+ return;
+
+ wake_up_process(gd->task);
+}
+
+static void update_fdomain_capacity_request(int cpu)
+{
+ unsigned int freq_new, index_new, cpu_tmp;
+ struct cpufreq_policy *policy;
+ struct gov_data *gd;
+ unsigned long capacity = 0;
+
+ /*
+ * Avoid grabbing the policy if possible. A test is still
+ * required after locking the CPU's policy to avoid racing
+ * with the governor changing.
+ */
+ if (!per_cpu(enabled, cpu))
+ return;
+
+ policy = cpufreq_cpu_get(cpu);
+ if (IS_ERR_OR_NULL(policy))
+ return;
+
+ if (policy->governor != &cpufreq_gov_sched ||
+ !policy->governor_data)
+ goto out;
+
+ gd = policy->governor_data;
+
+ /* find max capacity requested by cpus in this policy */
+ for_each_cpu(cpu_tmp, policy->cpus) {
+ struct sched_capacity_reqs *scr;
+
+ scr = &per_cpu(cpu_sched_capacity_reqs, cpu_tmp);
+ capacity = max(capacity, scr->total);
+ }
+
+ /* Convert the new maximum capacity request into a cpu frequency */
+ freq_new = capacity * policy->max >> SCHED_CAPACITY_SHIFT;
+ if (cpufreq_frequency_table_target(policy, policy->freq_table,
+ freq_new, CPUFREQ_RELATION_L,
+ &index_new))
+ goto out;
+ freq_new = policy->freq_table[index_new].frequency;
+
+ if (freq_new > policy->max)
+ freq_new = policy->max;
+
+ if (freq_new < policy->min)
+ freq_new = policy->min;
+
+ trace_cpufreq_sched_request_opp(cpu, capacity, freq_new,
+ gd->requested_freq);
+ if (freq_new == gd->requested_freq)
+ goto out;
+
+ gd->requested_freq = freq_new;
+
+ /*
+ * Throttling is not yet supported on platforms with fast cpufreq
+ * drivers.
+ */
+ if (cpufreq_driver_slow)
+ irq_work_queue_on(&gd->irq_work, cpu);
+ else
+ cpufreq_sched_try_driver_target(policy, freq_new);
+
+out:
+ cpufreq_cpu_put(policy);
+}
+
+void update_cpu_capacity_request(int cpu, bool request)
+{
+ unsigned long new_capacity;
+ struct sched_capacity_reqs *scr;
+
+ /* The rq lock serializes access to the CPU's sched_capacity_reqs. */
+ lockdep_assert_held(&cpu_rq(cpu)->lock);
+
+ scr = &per_cpu(cpu_sched_capacity_reqs, cpu);
+
+ new_capacity = scr->cfs + scr->rt;
+ new_capacity = new_capacity * capacity_margin
+ / SCHED_CAPACITY_SCALE;
+ new_capacity += scr->dl;
+
+ if (new_capacity == scr->total)
+ return;
+
+ trace_cpufreq_sched_update_capacity(cpu, request, scr, new_capacity);
+
+ scr->total = new_capacity;
+ if (request)
+ update_fdomain_capacity_request(cpu);
+}
+
+static inline void set_sched_freq(void)
+{
+ static_key_slow_inc(&__sched_freq);
+}
+
+static inline void clear_sched_freq(void)
+{
+ static_key_slow_dec(&__sched_freq);
+}
+
+static struct attribute_group sched_attr_group_gov_pol;
+static struct attribute_group *get_sysfs_attr(void)
+{
+ return &sched_attr_group_gov_pol;
+}
+
+static int cpufreq_sched_policy_init(struct cpufreq_policy *policy)
+{
+ struct gov_data *gd;
+ int cpu;
+ int rc;
+
+ for_each_cpu(cpu, policy->cpus)
+ memset(&per_cpu(cpu_sched_capacity_reqs, cpu), 0,
+ sizeof(struct sched_capacity_reqs));
+
+ gd = kzalloc(sizeof(*gd), GFP_KERNEL);
+ if (!gd)
+ return -ENOMEM;
+
+ gd->up_throttle_nsec = policy->cpuinfo.transition_latency ?
+ policy->cpuinfo.transition_latency :
+ THROTTLE_UP_NSEC;
+ gd->down_throttle_nsec = THROTTLE_DOWN_NSEC;
+ pr_debug("%s: throttle threshold = %u [ns]\n",
+ __func__, gd->up_throttle_nsec);
+
+ rc = sysfs_create_group(get_governor_parent_kobj(policy), get_sysfs_attr());
+ if (rc) {
+ pr_err("%s: couldn't create sysfs attributes: %d\n", __func__, rc);
+ goto err;
+ }
+
+ policy->governor_data = gd;
+ if (cpufreq_driver_is_slow()) {
+ cpufreq_driver_slow = true;
+ gd->task = kthread_create(cpufreq_sched_thread, policy,
+ "kschedfreq:%d",
+ cpumask_first(policy->related_cpus));
+ if (IS_ERR_OR_NULL(gd->task)) {
+ pr_err("%s: failed to create kschedfreq thread\n",
+ __func__);
+ goto err;
+ }
+ get_task_struct(gd->task);
+ kthread_bind_mask(gd->task, policy->related_cpus);
+ wake_up_process(gd->task);
+ init_irq_work(&gd->irq_work, cpufreq_sched_irq_work);
+ }
+
+ set_sched_freq();
+
+ return 0;
+
+err:
+ policy->governor_data = NULL;
+ kfree(gd);
+ return -ENOMEM;
+}
+
+static int cpufreq_sched_policy_exit(struct cpufreq_policy *policy)
+{
+ struct gov_data *gd = policy->governor_data;
+
+ clear_sched_freq();
+ if (cpufreq_driver_slow) {
+ kthread_stop(gd->task);
+ put_task_struct(gd->task);
+ }
+
+ sysfs_remove_group(get_governor_parent_kobj(policy), get_sysfs_attr());
+
+ policy->governor_data = NULL;
+
+ kfree(gd);
+ return 0;
+}
+
+static int cpufreq_sched_start(struct cpufreq_policy *policy)
+{
+ int cpu;
+
+ for_each_cpu(cpu, policy->cpus)
+ per_cpu(enabled, cpu) = 1;
+
+ return 0;
+}
+
+static void cpufreq_sched_limits(struct cpufreq_policy *policy)
+{
+ unsigned int clamp_freq;
+ struct gov_data *gd = policy->governor_data;;
+
+ pr_debug("limit event for cpu %u: %u - %u kHz, currently %u kHz\n",
+ policy->cpu, policy->min, policy->max,
+ policy->cur);
+
+ clamp_freq = clamp(gd->requested_freq, policy->min, policy->max);
+
+ if (policy->cur != clamp_freq)
+ __cpufreq_driver_target(policy, clamp_freq, CPUFREQ_RELATION_L);
+}
+
+static int cpufreq_sched_stop(struct cpufreq_policy *policy)
+{
+ int cpu;
+
+ for_each_cpu(cpu, policy->cpus)
+ per_cpu(enabled, cpu) = 0;
+
+ return 0;
+}
+
+static int cpufreq_sched_setup(struct cpufreq_policy *policy,
+ unsigned int event)
+{
+ switch (event) {
+ case CPUFREQ_GOV_POLICY_INIT:
+ return cpufreq_sched_policy_init(policy);
+ case CPUFREQ_GOV_POLICY_EXIT:
+ return cpufreq_sched_policy_exit(policy);
+ case CPUFREQ_GOV_START:
+ return cpufreq_sched_start(policy);
+ case CPUFREQ_GOV_STOP:
+ return cpufreq_sched_stop(policy);
+ case CPUFREQ_GOV_LIMITS:
+ cpufreq_sched_limits(policy);
+ break;
+ }
+ return 0;
+}
+
+/* Tunables */
+static ssize_t show_up_throttle_nsec(struct gov_data *gd, char *buf)
+{
+ return sprintf(buf, "%u\n", gd->up_throttle_nsec);
+}
+
+static ssize_t store_up_throttle_nsec(struct gov_data *gd,
+ const char *buf, size_t count)
+{
+ int ret;
+ long unsigned int val;
+
+ ret = kstrtoul(buf, 0, &val);
+ if (ret < 0)
+ return ret;
+ gd->up_throttle_nsec = val;
+ return count;
+}
+
+static ssize_t show_down_throttle_nsec(struct gov_data *gd, char *buf)
+{
+ return sprintf(buf, "%u\n", gd->down_throttle_nsec);
+}
+
+static ssize_t store_down_throttle_nsec(struct gov_data *gd,
+ const char *buf, size_t count)
+{
+ int ret;
+ long unsigned int val;
+
+ ret = kstrtoul(buf, 0, &val);
+ if (ret < 0)
+ return ret;
+ gd->down_throttle_nsec = val;
+ return count;
+}
+
+/*
+ * Create show/store routines
+ * - sys: One governor instance for complete SYSTEM
+ * - pol: One governor instance per struct cpufreq_policy
+ */
+#define show_gov_pol_sys(file_name) \
+static ssize_t show_##file_name##_gov_pol \
+(struct cpufreq_policy *policy, char *buf) \
+{ \
+ return show_##file_name(policy->governor_data, buf); \
+}
+
+#define store_gov_pol_sys(file_name) \
+static ssize_t store_##file_name##_gov_pol \
+(struct cpufreq_policy *policy, const char *buf, size_t count) \
+{ \
+ return store_##file_name(policy->governor_data, buf, count); \
+}
+
+#define gov_pol_attr_rw(_name) \
+ static struct freq_attr _name##_gov_pol = \
+ __ATTR(_name, 0644, show_##_name##_gov_pol, store_##_name##_gov_pol)
+
+#define show_store_gov_pol_sys(file_name) \
+ show_gov_pol_sys(file_name); \
+ store_gov_pol_sys(file_name)
+#define tunable_handlers(file_name) \
+ show_gov_pol_sys(file_name); \
+ store_gov_pol_sys(file_name); \
+ gov_pol_attr_rw(file_name)
+
+tunable_handlers(down_throttle_nsec);
+tunable_handlers(up_throttle_nsec);
+
+/* Per policy governor instance */
+static struct attribute *sched_attributes_gov_pol[] = {
+ &up_throttle_nsec_gov_pol.attr,
+ &down_throttle_nsec_gov_pol.attr,
+ NULL,
+};
+
+static struct attribute_group sched_attr_group_gov_pol = {
+ .attrs = sched_attributes_gov_pol,
+ .name = "sched",
+};
+
+#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_SCHED
+static
+#endif
+struct cpufreq_governor cpufreq_gov_sched = {
+ .name = "sched",
+ .governor = cpufreq_sched_setup,
+ .owner = THIS_MODULE,
+};
+
+static int __init cpufreq_sched_init(void)
+{
+ int cpu;
+
+ for_each_cpu(cpu, cpu_possible_mask)
+ per_cpu(enabled, cpu) = 0;
+ return cpufreq_register_governor(&cpufreq_gov_sched);
+}
+
+/* Try to make this the default governor */
+fs_initcall(cpufreq_sched_init);
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index f29b132a9f8b..692d1f888f17 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -611,19 +611,25 @@ static void cputime_adjust(struct task_cputime *curr,
stime = curr->stime;
utime = curr->utime;
- if (utime == 0) {
- stime = rtime;
+ /*
+ * If either stime or both stime and utime are 0, assume all runtime is
+ * userspace. Once a task gets some ticks, the monotonicy code at
+ * 'update' will ensure things converge to the observed ratio.
+ */
+ if (stime == 0) {
+ utime = rtime;
goto update;
}
- if (stime == 0) {
- utime = rtime;
+ if (utime == 0) {
+ stime = rtime;
goto update;
}
stime = scale_stime((__force u64)stime, (__force u64)rtime,
(__force u64)(stime + utime));
+update:
/*
* Make sure stime doesn't go backwards; this preserves monotonicity
* for utime because rtime is monotonic.
@@ -646,7 +652,6 @@ static void cputime_adjust(struct task_cputime *curr,
stime = rtime - utime;
}
-update:
prev->stime = stime;
prev->utime = utime;
out:
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 44178fea87d0..685ae83b2bfa 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -43,6 +43,24 @@ static inline int on_dl_rq(struct sched_dl_entity *dl_se)
return !RB_EMPTY_NODE(&dl_se->rb_node);
}
+static void add_average_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
+{
+ u64 se_bw = dl_se->dl_bw;
+
+ dl_rq->avg_bw += se_bw;
+}
+
+static void clear_average_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
+{
+ u64 se_bw = dl_se->dl_bw;
+
+ dl_rq->avg_bw -= se_bw;
+ if (dl_rq->avg_bw < 0) {
+ WARN_ON(1);
+ dl_rq->avg_bw = 0;
+ }
+}
+
static inline int is_leftmost(struct task_struct *p, struct dl_rq *dl_rq)
{
struct sched_dl_entity *dl_se = &p->dl;
@@ -496,6 +514,9 @@ static void update_dl_entity(struct sched_dl_entity *dl_se,
struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
struct rq *rq = rq_of_dl_rq(dl_rq);
+ if (dl_se->dl_new)
+ add_average_bw(dl_se, dl_rq);
+
/*
* The arrival of a new instance needs special treatment, i.e.,
* the actual scheduling parameters have to be "renewed".
@@ -743,8 +764,6 @@ static void update_curr_dl(struct rq *rq)
curr->se.exec_start = rq_clock_task(rq);
cpuacct_charge(curr, delta_exec);
- sched_rt_avg_update(rq, delta_exec);
-
dl_se->runtime -= dl_se->dl_yielded ? 0 : delta_exec;
if (dl_runtime_exceeded(dl_se)) {
dl_se->dl_throttled = 1;
@@ -1280,6 +1299,8 @@ static void task_fork_dl(struct task_struct *p)
static void task_dead_dl(struct task_struct *p)
{
struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
+ struct dl_rq *dl_rq = dl_rq_of_se(&p->dl);
+ struct rq *rq = rq_of_dl_rq(dl_rq);
/*
* Since we are TASK_DEAD we won't slip out of the domain!
@@ -1288,6 +1309,8 @@ static void task_dead_dl(struct task_struct *p)
/* XXX we should retain the bw until 0-lag */
dl_b->total_bw -= p->dl.dl_bw;
raw_spin_unlock_irq(&dl_b->lock);
+
+ clear_average_bw(&p->dl, &rq->dl);
}
static void set_curr_task_dl(struct rq *rq)
@@ -1596,7 +1619,9 @@ retry:
next_task->on_rq = TASK_ON_RQ_MIGRATING;
deactivate_task(rq, next_task, 0);
+ clear_average_bw(&next_task->dl, &rq->dl);
set_task_cpu(next_task, later_rq->cpu);
+ add_average_bw(&next_task->dl, &later_rq->dl);
activate_task(later_rq, next_task, 0);
next_task->on_rq = TASK_ON_RQ_QUEUED;
ret = 1;
@@ -1686,7 +1711,9 @@ static void pull_dl_task(struct rq *this_rq)
p->on_rq = TASK_ON_RQ_MIGRATING;
deactivate_task(src_rq, p, 0);
+ clear_average_bw(&p->dl, &src_rq->dl);
set_task_cpu(p, this_cpu);
+ add_average_bw(&p->dl, &this_rq->dl);
activate_task(this_rq, p, 0);
p->on_rq = TASK_ON_RQ_QUEUED;
dmin = p->dl.deadline;
@@ -1793,6 +1820,8 @@ static void switched_from_dl(struct rq *rq, struct task_struct *p)
if (!start_dl_timer(p))
__dl_clear_params(p);
+ clear_average_bw(&p->dl, &rq->dl);
+
/*
* Since this might be the only -deadline task on the rq,
* this is the right place to try to pull some other one
diff --git a/kernel/sched/energy.c b/kernel/sched/energy.c
new file mode 100644
index 000000000000..50d183b1e156
--- /dev/null
+++ b/kernel/sched/energy.c
@@ -0,0 +1,134 @@
+/*
+ * Obtain energy cost data from DT and populate relevant scheduler data
+ * structures.
+ *
+ * Copyright (C) 2015 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#define pr_fmt(fmt) "sched-energy: " fmt
+
+#define DEBUG
+
+#include <linux/gfp.h>
+#include <linux/of.h>
+#include <linux/printk.h>
+#include <linux/sched.h>
+#include <linux/sched_energy.h>
+#include <linux/stddef.h>
+
+#include "sched.h"
+
+struct sched_group_energy *sge_array[NR_CPUS][NR_SD_LEVELS];
+bool sched_energy_aware;
+
+static void free_resources(void)
+{
+ int cpu, sd_level;
+ struct sched_group_energy *sge;
+
+ for_each_possible_cpu(cpu) {
+ for_each_possible_sd_level(sd_level) {
+ sge = sge_array[cpu][sd_level];
+ if (sge) {
+ kfree(sge->cap_states);
+ kfree(sge->idle_states);
+ kfree(sge);
+ }
+ }
+ }
+}
+
+void init_sched_energy_costs(void)
+{
+ struct device_node *cn, *cp;
+ struct capacity_state *cap_states;
+ struct idle_state *idle_states;
+ struct sched_group_energy *sge;
+ const struct property *prop;
+ int sd_level, i, nstates, cpu;
+ const __be32 *val;
+
+ if (!energy_aware()) {
+ sched_energy_aware = false;
+ return;
+ }
+
+ sched_energy_aware = true;
+
+ for_each_possible_cpu(cpu) {
+ cn = of_get_cpu_node(cpu, NULL);
+ if (!cn) {
+ pr_warn("CPU device node missing for CPU %d\n", cpu);
+ return;
+ }
+
+ if (!of_find_property(cn, "sched-energy-costs", NULL)) {
+ pr_warn("CPU device node has no sched-energy-costs\n");
+ return;
+ }
+
+ for_each_possible_sd_level(sd_level) {
+ cp = of_parse_phandle(cn, "sched-energy-costs", sd_level);
+ if (!cp)
+ break;
+
+ prop = of_find_property(cp, "busy-cost-data", NULL);
+ if (!prop || !prop->value) {
+ pr_warn("No busy-cost data, skipping sched_energy init\n");
+ goto out;
+ }
+
+ sge = kcalloc(1, sizeof(struct sched_group_energy),
+ GFP_NOWAIT);
+
+ nstates = (prop->length / sizeof(u32)) / 2;
+ cap_states = kcalloc(nstates,
+ sizeof(struct capacity_state),
+ GFP_NOWAIT);
+
+ for (i = 0, val = prop->value; i < nstates; i++) {
+ cap_states[i].cap = be32_to_cpup(val++);
+ cap_states[i].power = be32_to_cpup(val++);
+ }
+
+ sge->nr_cap_states = nstates;
+ sge->cap_states = cap_states;
+
+ prop = of_find_property(cp, "idle-cost-data", NULL);
+ if (!prop || !prop->value) {
+ pr_warn("No idle-cost data, skipping sched_energy init\n");
+ goto out;
+ }
+
+ nstates = (prop->length / sizeof(u32));
+ idle_states = kcalloc(nstates,
+ sizeof(struct idle_state),
+ GFP_NOWAIT);
+
+ for (i = 0, val = prop->value; i < nstates; i++)
+ idle_states[i].power = be32_to_cpup(val++);
+
+ sge->nr_idle_states = nstates;
+ sge->idle_states = idle_states;
+
+ sge_array[cpu][sd_level] = sge;
+ }
+ }
+
+ pr_info("Sched-energy-costs installed from DT\n");
+ return;
+
+out:
+ free_resources();
+}
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 3db77aff2433..87538f7d495a 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -30,10 +30,11 @@
#include <linux/mempolicy.h>
#include <linux/migrate.h>
#include <linux/task_work.h>
+#include <linux/module.h>
#include "sched.h"
#include <trace/events/sched.h>
-
+#include "tune.h"
/*
* Targeted preemption latency for CPU-bound tasks:
* (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds)
@@ -49,6 +50,11 @@
unsigned int sysctl_sched_latency = 6000000ULL;
unsigned int normalized_sysctl_sched_latency = 6000000ULL;
+unsigned int sysctl_sched_is_big_little = 0;
+unsigned int sysctl_sched_sync_hint_enable = 1;
+unsigned int sysctl_sched_initial_task_util = 0;
+unsigned int sysctl_sched_cstate_aware = 1;
+
/*
* The initial- and re-scaling of tunables is configurable
* (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus))
@@ -692,13 +698,13 @@ void init_entity_runnable_average(struct sched_entity *se)
sa->period_contrib = 1023;
sa->load_avg = scale_load_down(se->load.weight);
sa->load_sum = sa->load_avg * LOAD_AVG_MAX;
- sa->util_avg = scale_load_down(SCHED_LOAD_SCALE);
+ sa->util_avg = sched_freq() ?
+ sysctl_sched_initial_task_util :
+ scale_load_down(SCHED_LOAD_SCALE);
sa->util_sum = sa->util_avg * LOAD_AVG_MAX;
/* when this task enqueue'ed, it will contribute to its cfs_rq's load_avg */
}
-static inline unsigned long cfs_rq_runnable_load_avg(struct cfs_rq *cfs_rq);
-static inline unsigned long cfs_rq_load_avg(struct cfs_rq *cfs_rq);
#else
void init_entity_runnable_average(struct sched_entity *se)
{
@@ -1230,8 +1236,6 @@ static void task_numa_assign(struct task_numa_env *env,
{
if (env->best_task)
put_task_struct(env->best_task);
- if (p)
- get_task_struct(p);
env->best_task = p;
env->best_imp = imp;
@@ -1299,20 +1303,30 @@ static void task_numa_compare(struct task_numa_env *env,
long imp = env->p->numa_group ? groupimp : taskimp;
long moveimp = imp;
int dist = env->dist;
+ bool assigned = false;
rcu_read_lock();
raw_spin_lock_irq(&dst_rq->lock);
cur = dst_rq->curr;
/*
- * No need to move the exiting task, and this ensures that ->curr
- * wasn't reaped and thus get_task_struct() in task_numa_assign()
- * is safe under RCU read lock.
- * Note that rcu_read_lock() itself can't protect from the final
- * put_task_struct() after the last schedule().
+ * No need to move the exiting task or idle task.
*/
if ((cur->flags & PF_EXITING) || is_idle_task(cur))
cur = NULL;
+ else {
+ /*
+ * The task_struct must be protected here to protect the
+ * p->numa_faults access in the task_weight since the
+ * numa_faults could already be freed in the following path:
+ * finish_task_switch()
+ * --> put_task_struct()
+ * --> __put_task_struct()
+ * --> task_numa_free()
+ */
+ get_task_struct(cur);
+ }
+
raw_spin_unlock_irq(&dst_rq->lock);
/*
@@ -1396,6 +1410,7 @@ balance:
*/
if (!load_too_imbalanced(src_load, dst_load, env)) {
imp = moveimp - 1;
+ put_task_struct(cur);
cur = NULL;
goto assign;
}
@@ -1421,9 +1436,16 @@ balance:
env->dst_cpu = select_idle_sibling(env->p, env->dst_cpu);
assign:
+ assigned = true;
task_numa_assign(env, cur, imp);
unlock:
rcu_read_unlock();
+ /*
+ * The dst_rq->curr isn't assigned. The protection for task_struct is
+ * finished.
+ */
+ if (cur && !assigned)
+ put_task_struct(cur);
}
static void task_numa_find_cpu(struct task_numa_env *env,
@@ -2591,6 +2613,7 @@ static u32 __compute_runnable_contrib(u64 n)
#define SBC_FLAG_CSTATE_LOAD 0x100
#define SBC_FLAG_BEST_SIBLING 0x200
#define SBC_FLAG_WAKER_CPU 0x400
+#define SBC_FLAG_PACK_TASK 0x800
/* Cluster selection flag */
#define SBC_FLAG_COLOC_CLUSTER 0x10000
@@ -2607,6 +2630,7 @@ struct cpu_select_env {
u8 sync:1;
u8 ignore_prev_cpu:1;
enum sched_boost_policy boost_policy;
+ u8 pack_task:1;
int prev_cpu;
DECLARE_BITMAP(candidate_list, NR_CPUS);
DECLARE_BITMAP(backup_list, NR_CPUS);
@@ -2958,8 +2982,17 @@ static void update_cluster_stats(int cpu, struct cluster_cpu_stats *stats,
{
int cpu_cost;
- cpu_cost = power_cost(cpu, task_load(env->p) +
+ /*
+ * We try to find the least loaded *busy* CPU irrespective
+ * of the power cost.
+ */
+ if (env->pack_task)
+ cpu_cost = cpu_min_power_cost(cpu);
+
+ else
+ cpu_cost = power_cost(cpu, task_load(env->p) +
cpu_cravg_sync(cpu, env->sync));
+
if (cpu_cost <= stats->min_cost)
__update_cluster_stats(cpu, stats, env, cpu_cost);
}
@@ -3034,6 +3067,15 @@ static inline int wake_to_idle(struct task_struct *p)
(p->flags & PF_WAKE_UP_IDLE) || sysctl_sched_wake_to_idle;
}
+static inline bool env_has_special_flags(struct cpu_select_env *env)
+{
+ if (env->need_idle || env->boost_policy != SCHED_BOOST_NONE ||
+ env->reason)
+ return true;
+
+ return false;
+}
+
static inline bool
bias_to_prev_cpu(struct cpu_select_env *env, struct cluster_cpu_stats *stats)
{
@@ -3041,9 +3083,7 @@ bias_to_prev_cpu(struct cpu_select_env *env, struct cluster_cpu_stats *stats)
struct task_struct *task = env->p;
struct sched_cluster *cluster;
- if (env->boost_policy != SCHED_BOOST_NONE || env->reason ||
- !task->ravg.mark_start ||
- env->need_idle || !sched_short_sleep_task_threshold)
+ if (!task->ravg.mark_start || !sched_short_sleep_task_threshold)
return false;
prev_cpu = env->prev_cpu;
@@ -3092,8 +3132,7 @@ bias_to_prev_cpu(struct cpu_select_env *env, struct cluster_cpu_stats *stats)
static inline bool
wake_to_waker_cluster(struct cpu_select_env *env)
{
- return env->boost_policy == SCHED_BOOST_NONE &&
- !env->need_idle && !env->reason && env->sync &&
+ return env->sync &&
task_load(current) > sched_big_waker_task_load &&
task_load(env->p) < sched_small_wakee_task_load;
}
@@ -3118,7 +3157,6 @@ cluster_allowed(struct task_struct *p, struct sched_cluster *cluster)
return !cpumask_empty(&tmp_mask);
}
-
/* return cheapest cpu that can fit this task */
static int select_best_cpu(struct task_struct *p, int target, int reason,
int sync)
@@ -3128,6 +3166,7 @@ static int select_best_cpu(struct task_struct *p, int target, int reason,
struct related_thread_group *grp;
unsigned int sbc_flag = 0;
int cpu = raw_smp_processor_id();
+ bool special;
struct cpu_select_env env = {
.p = p,
@@ -3140,6 +3179,7 @@ static int select_best_cpu(struct task_struct *p, int target, int reason,
.rtg = NULL,
.sbc_best_flag = 0,
.sbc_best_cluster_flag = 0,
+ .pack_task = false,
};
env.boost_policy = task_sched_boost(p) ?
@@ -3149,6 +3189,7 @@ static int select_best_cpu(struct task_struct *p, int target, int reason,
bitmap_zero(env.backup_list, NR_CPUS);
init_cluster_cpu_stats(&stats);
+ special = env_has_special_flags(&env);
rcu_read_lock();
@@ -3160,7 +3201,7 @@ static int select_best_cpu(struct task_struct *p, int target, int reason,
clear_bit(pref_cluster->id, env.candidate_list);
else
env.rtg = grp;
- } else {
+ } else if (!special) {
cluster = cpu_rq(cpu)->cluster;
if (wake_to_waker_cluster(&env)) {
if (bias_to_waker_cpu(p, cpu)) {
@@ -3181,6 +3222,10 @@ static int select_best_cpu(struct task_struct *p, int target, int reason,
}
}
+ if (!special && is_short_burst_task(p)) {
+ env.pack_task = true;
+ sbc_flag = SBC_FLAG_PACK_TASK;
+ }
retry:
cluster = select_least_power_cluster(&env);
@@ -3671,6 +3716,7 @@ __update_load_avg(u64 now, int cpu, struct sched_avg *sa,
scale_freq = arch_scale_freq_capacity(NULL, cpu);
scale_cpu = arch_scale_cpu_capacity(NULL, cpu);
+ trace_sched_contrib_scale_f(cpu, scale_freq, scale_cpu);
/* delta_w is the amount already accumulated against our next period */
delta_w = sa->period_contrib;
@@ -3832,6 +3878,10 @@ static inline void update_load_avg(struct sched_entity *se, int update_tg)
if (update_cfs_rq_load_avg(now, cfs_rq) && update_tg)
update_tg_load_avg(cfs_rq, 0);
+
+ if (entity_is_task(se))
+ trace_sched_load_avg_task(task_of(se), &se->avg);
+ trace_sched_load_avg_cpu(cpu, cfs_rq);
}
static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
@@ -3912,27 +3962,45 @@ dequeue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
max_t(s64, cfs_rq->runnable_load_sum - se->avg.load_sum, 0);
}
-/*
- * Task first catches up with cfs_rq, and then subtract
- * itself from the cfs_rq (task must be off the queue now).
- */
-void remove_entity_load_avg(struct sched_entity *se)
-{
- struct cfs_rq *cfs_rq = cfs_rq_of(se);
- u64 last_update_time;
-
#ifndef CONFIG_64BIT
+static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq)
+{
u64 last_update_time_copy;
+ u64 last_update_time;
do {
last_update_time_copy = cfs_rq->load_last_update_time_copy;
smp_rmb();
last_update_time = cfs_rq->avg.last_update_time;
} while (last_update_time != last_update_time_copy);
+
+ return last_update_time;
+}
#else
- last_update_time = cfs_rq->avg.last_update_time;
+static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq)
+{
+ return cfs_rq->avg.last_update_time;
+}
#endif
+/*
+ * Task first catches up with cfs_rq, and then subtract
+ * itself from the cfs_rq (task must be off the queue now).
+ */
+void remove_entity_load_avg(struct sched_entity *se)
+{
+ struct cfs_rq *cfs_rq = cfs_rq_of(se);
+ u64 last_update_time;
+
+ /*
+ * Newly created task or never used group entity should not be removed
+ * from its (source) cfs_rq
+ */
+ if (se->avg.last_update_time == 0)
+ return;
+
+ last_update_time = cfs_rq_last_update_time(cfs_rq);
+
__update_load_avg(last_update_time, cpu_of(rq_of(cfs_rq)), &se->avg, 0, 0, NULL);
atomic_long_add(se->avg.load_avg, &cfs_rq->removed_load_avg);
atomic_long_add(se->avg.util_avg, &cfs_rq->removed_util_avg);
@@ -5289,6 +5357,30 @@ static inline void hrtick_update(struct rq *rq)
}
#endif
+#ifdef CONFIG_SMP
+static bool cpu_overutilized(int cpu);
+static inline unsigned long boosted_cpu_util(int cpu);
+#else
+#define boosted_cpu_util(cpu) cpu_util(cpu)
+#endif
+
+#if defined(CONFIG_SMP) && defined(CONFIG_CPU_FREQ_GOV_SCHED)
+static void update_capacity_of(int cpu)
+{
+ unsigned long req_cap;
+
+ if (!sched_freq())
+ return;
+
+ /* Convert scale-invariant capacity to cpu. */
+ req_cap = boosted_cpu_util(cpu);
+ req_cap = req_cap * SCHED_CAPACITY_SCALE / capacity_orig_of(cpu);
+ set_cfs_cpu_capacity(cpu, true, req_cap);
+}
+#else
+#define update_capacity_of(X) do {} while(0)
+#endif /* SMP and CPU_FREQ_GOV_SCHED */
+
/*
* The enqueue_task method is called before nr_running is
* increased. Here we update the fair scheduling stats and
@@ -5299,6 +5391,10 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
{
struct cfs_rq *cfs_rq;
struct sched_entity *se = &p->se;
+#ifdef CONFIG_SMP
+ int task_new = flags & ENQUEUE_WAKEUP_NEW;
+ int task_wakeup = flags & ENQUEUE_WAKEUP;
+#endif
for_each_sched_entity(se) {
if (se->on_rq)
@@ -5336,6 +5432,50 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
add_nr_running(rq, 1);
inc_rq_hmp_stats(rq, p, 1);
}
+
+#ifdef CONFIG_SMP
+
+ /*
+ * Update SchedTune accounting.
+ *
+ * We do it before updating the CPU capacity to ensure the
+ * boost value of the current task is accounted for in the
+ * selection of the OPP.
+ *
+ * We do it also in the case where we enqueue a throttled task;
+ * we could argue that a throttled task should not boost a CPU,
+ * however:
+ * a) properly implementing CPU boosting considering throttled
+ * tasks will increase a lot the complexity of the solution
+ * b) it's not easy to quantify the benefits introduced by
+ * such a more complex solution.
+ * Thus, for the time being we go for the simple solution and boost
+ * also for throttled RQs.
+ */
+ schedtune_enqueue_task(p, cpu_of(rq));
+
+ if (energy_aware() && !se) {
+ if (!task_new && !rq->rd->overutilized &&
+ cpu_overutilized(rq->cpu)) {
+ rq->rd->overutilized = true;
+ trace_sched_overutilized(true);
+ }
+
+ }
+
+ if (!se) {
+ /*
+ * We want to potentially trigger a freq switch
+ * request only for tasks that are waking up; this is
+ * because we get here also during load balancing, but
+ * in these cases it seems wise to trigger as single
+ * request after load balancing is done.
+ */
+ if (task_new || task_wakeup)
+ update_capacity_of(cpu_of(rq));
+ }
+
+#endif /* CONFIG_SMP */
hrtick_update(rq);
}
@@ -5399,6 +5539,37 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
sub_nr_running(rq, 1);
dec_rq_hmp_stats(rq, p, 1);
}
+
+#ifdef CONFIG_SMP
+
+ /*
+ * Update SchedTune accounting
+ *
+ * We do it before updating the CPU capacity to ensure the
+ * boost value of the current task is accounted for in the
+ * selection of the OPP.
+ */
+ schedtune_dequeue_task(p, cpu_of(rq));
+
+ if (!se) {
+ /*
+ * We want to potentially trigger a freq switch
+ * request only for tasks that are going to sleep;
+ * this is because we get here also during load
+ * balancing, but in these cases it seems wise to
+ * trigger as single request after load balancing is
+ * done.
+ */
+ if (task_sleep) {
+ if (rq->cfs.nr_running)
+ update_capacity_of(cpu_of(rq));
+ else if (sched_freq())
+ set_cfs_cpu_capacity(cpu_of(rq), false, 0);
+ }
+ }
+
+#endif /* CONFIG_SMP */
+
hrtick_update(rq);
}
@@ -5625,15 +5796,6 @@ static unsigned long target_load(int cpu, int type)
return max(rq->cpu_load[type-1], total);
}
-static unsigned long capacity_of(int cpu)
-{
- return cpu_rq(cpu)->cpu_capacity;
-}
-
-static unsigned long capacity_orig_of(int cpu)
-{
- return cpu_rq(cpu)->cpu_capacity_orig;
-}
static unsigned long cpu_avg_load_per_task(int cpu)
{
@@ -5746,19 +5908,24 @@ static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
return wl;
for_each_sched_entity(se) {
- long w, W;
+ struct cfs_rq *cfs_rq = se->my_q;
+ long W, w = cfs_rq_load_avg(cfs_rq);
- tg = se->my_q->tg;
+ tg = cfs_rq->tg;
/*
* W = @wg + \Sum rw_j
*/
- W = wg + calc_tg_weight(tg, se->my_q);
+ W = wg + atomic_long_read(&tg->load_avg);
+
+ /* Ensure \Sum rw_j >= rw_i */
+ W -= cfs_rq->tg_load_avg_contrib;
+ W += w;
/*
* w = rw_i + @wl
*/
- w = cfs_rq_load_avg(se->my_q) + wl;
+ w += wl;
/*
* wl = S * s'_i; see (2)
@@ -5803,6 +5970,387 @@ static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
#endif
/*
+ * Returns the current capacity of cpu after applying both
+ * cpu and freq scaling.
+ */
+unsigned long capacity_curr_of(int cpu)
+{
+ return cpu_rq(cpu)->cpu_capacity_orig *
+ arch_scale_freq_capacity(NULL, cpu)
+ >> SCHED_CAPACITY_SHIFT;
+}
+
+struct energy_env {
+ struct sched_group *sg_top;
+ struct sched_group *sg_cap;
+ int cap_idx;
+ int util_delta;
+ int src_cpu;
+ int dst_cpu;
+ int energy;
+ int payoff;
+ struct task_struct *task;
+ struct {
+ int before;
+ int after;
+ int delta;
+ int diff;
+ } nrg;
+ struct {
+ int before;
+ int after;
+ int delta;
+ } cap;
+};
+
+/*
+ * __cpu_norm_util() returns the cpu util relative to a specific capacity,
+ * i.e. it's busy ratio, in the range [0..SCHED_LOAD_SCALE] which is useful for
+ * energy calculations. Using the scale-invariant util returned by
+ * cpu_util() and approximating scale-invariant util by:
+ *
+ * util ~ (curr_freq/max_freq)*1024 * capacity_orig/1024 * running_time/time
+ *
+ * the normalized util can be found using the specific capacity.
+ *
+ * capacity = capacity_orig * curr_freq/max_freq
+ *
+ * norm_util = running_time/time ~ util/capacity
+ */
+static unsigned long __cpu_norm_util(int cpu, unsigned long capacity, int delta)
+{
+ int util = __cpu_util(cpu, delta);
+
+ if (util >= capacity)
+ return SCHED_CAPACITY_SCALE;
+
+ return (util << SCHED_CAPACITY_SHIFT)/capacity;
+}
+
+static int calc_util_delta(struct energy_env *eenv, int cpu)
+{
+ if (cpu == eenv->src_cpu)
+ return -eenv->util_delta;
+ if (cpu == eenv->dst_cpu)
+ return eenv->util_delta;
+ return 0;
+}
+
+static
+unsigned long group_max_util(struct energy_env *eenv)
+{
+ int i, delta;
+ unsigned long max_util = 0;
+
+ for_each_cpu(i, sched_group_cpus(eenv->sg_cap)) {
+ delta = calc_util_delta(eenv, i);
+ max_util = max(max_util, __cpu_util(i, delta));
+ }
+
+ return max_util;
+}
+
+/*
+ * group_norm_util() returns the approximated group util relative to it's
+ * current capacity (busy ratio) in the range [0..SCHED_LOAD_SCALE] for use in
+ * energy calculations. Since task executions may or may not overlap in time in
+ * the group the true normalized util is between max(cpu_norm_util(i)) and
+ * sum(cpu_norm_util(i)) when iterating over all cpus in the group, i. The
+ * latter is used as the estimate as it leads to a more pessimistic energy
+ * estimate (more busy).
+ */
+static unsigned
+long group_norm_util(struct energy_env *eenv, struct sched_group *sg)
+{
+ int i, delta;
+ unsigned long util_sum = 0;
+ unsigned long capacity = sg->sge->cap_states[eenv->cap_idx].cap;
+
+ for_each_cpu(i, sched_group_cpus(sg)) {
+ delta = calc_util_delta(eenv, i);
+ util_sum += __cpu_norm_util(i, capacity, delta);
+ }
+
+ if (util_sum > SCHED_CAPACITY_SCALE)
+ return SCHED_CAPACITY_SCALE;
+ return util_sum;
+}
+
+static int find_new_capacity(struct energy_env *eenv,
+ const struct sched_group_energy const *sge)
+{
+ int idx;
+ unsigned long util = group_max_util(eenv);
+
+ for (idx = 0; idx < sge->nr_cap_states; idx++) {
+ if (sge->cap_states[idx].cap >= util)
+ break;
+ }
+
+ eenv->cap_idx = idx;
+
+ return idx;
+}
+
+static int group_idle_state(struct sched_group *sg)
+{
+ int i, state = INT_MAX;
+
+ /* Find the shallowest idle state in the sched group. */
+ for_each_cpu(i, sched_group_cpus(sg))
+ state = min(state, idle_get_state_idx(cpu_rq(i)));
+
+ /* Take non-cpuidle idling into account (active idle/arch_cpu_idle()) */
+ state++;
+
+ return state;
+}
+
+/*
+ * sched_group_energy(): Computes the absolute energy consumption of cpus
+ * belonging to the sched_group including shared resources shared only by
+ * members of the group. Iterates over all cpus in the hierarchy below the
+ * sched_group starting from the bottom working it's way up before going to
+ * the next cpu until all cpus are covered at all levels. The current
+ * implementation is likely to gather the same util statistics multiple times.
+ * This can probably be done in a faster but more complex way.
+ * Note: sched_group_energy() may fail when racing with sched_domain updates.
+ */
+static int sched_group_energy(struct energy_env *eenv)
+{
+ struct sched_domain *sd;
+ int cpu, total_energy = 0;
+ struct cpumask visit_cpus;
+ struct sched_group *sg;
+
+ WARN_ON(!eenv->sg_top->sge);
+
+ cpumask_copy(&visit_cpus, sched_group_cpus(eenv->sg_top));
+
+ while (!cpumask_empty(&visit_cpus)) {
+ struct sched_group *sg_shared_cap = NULL;
+
+ cpu = cpumask_first(&visit_cpus);
+
+ /*
+ * Is the group utilization affected by cpus outside this
+ * sched_group?
+ */
+ sd = rcu_dereference(per_cpu(sd_scs, cpu));
+
+ if (!sd)
+ /*
+ * We most probably raced with hotplug; returning a
+ * wrong energy estimation is better than entering an
+ * infinite loop.
+ */
+ return -EINVAL;
+
+ if (sd->parent)
+ sg_shared_cap = sd->parent->groups;
+
+ for_each_domain(cpu, sd) {
+ sg = sd->groups;
+
+ /* Has this sched_domain already been visited? */
+ if (sd->child && group_first_cpu(sg) != cpu)
+ break;
+
+ do {
+ unsigned long group_util;
+ int sg_busy_energy, sg_idle_energy;
+ int cap_idx, idle_idx;
+
+ if (sg_shared_cap && sg_shared_cap->group_weight >= sg->group_weight)
+ eenv->sg_cap = sg_shared_cap;
+ else
+ eenv->sg_cap = sg;
+
+ cap_idx = find_new_capacity(eenv, sg->sge);
+
+ if (sg->group_weight == 1) {
+ /* Remove capacity of src CPU (before task move) */
+ if (eenv->util_delta == 0 &&
+ cpumask_test_cpu(eenv->src_cpu, sched_group_cpus(sg))) {
+ eenv->cap.before = sg->sge->cap_states[cap_idx].cap;
+ eenv->cap.delta -= eenv->cap.before;
+ }
+ /* Add capacity of dst CPU (after task move) */
+ if (eenv->util_delta != 0 &&
+ cpumask_test_cpu(eenv->dst_cpu, sched_group_cpus(sg))) {
+ eenv->cap.after = sg->sge->cap_states[cap_idx].cap;
+ eenv->cap.delta += eenv->cap.after;
+ }
+ }
+
+ idle_idx = group_idle_state(sg);
+ group_util = group_norm_util(eenv, sg);
+ sg_busy_energy = (group_util * sg->sge->cap_states[cap_idx].power)
+ >> SCHED_CAPACITY_SHIFT;
+ sg_idle_energy = ((SCHED_LOAD_SCALE-group_util)
+ * sg->sge->idle_states[idle_idx].power)
+ >> SCHED_CAPACITY_SHIFT;
+
+ total_energy += sg_busy_energy + sg_idle_energy;
+
+ if (!sd->child)
+ cpumask_xor(&visit_cpus, &visit_cpus, sched_group_cpus(sg));
+
+ if (cpumask_equal(sched_group_cpus(sg), sched_group_cpus(eenv->sg_top)))
+ goto next_cpu;
+
+ } while (sg = sg->next, sg != sd->groups);
+ }
+next_cpu:
+ cpumask_clear_cpu(cpu, &visit_cpus);
+ continue;
+ }
+
+ eenv->energy = total_energy;
+ return 0;
+}
+
+static inline bool cpu_in_sg(struct sched_group *sg, int cpu)
+{
+ return cpu != -1 && cpumask_test_cpu(cpu, sched_group_cpus(sg));
+}
+
+/*
+ * energy_diff(): Estimate the energy impact of changing the utilization
+ * distribution. eenv specifies the change: utilisation amount, source, and
+ * destination cpu. Source or destination cpu may be -1 in which case the
+ * utilization is removed from or added to the system (e.g. task wake-up). If
+ * both are specified, the utilization is migrated.
+ */
+static inline int __energy_diff(struct energy_env *eenv)
+{
+ struct sched_domain *sd;
+ struct sched_group *sg;
+ int sd_cpu = -1, energy_before = 0, energy_after = 0;
+
+ struct energy_env eenv_before = {
+ .util_delta = 0,
+ .src_cpu = eenv->src_cpu,
+ .dst_cpu = eenv->dst_cpu,
+ .nrg = { 0, 0, 0, 0},
+ .cap = { 0, 0, 0 },
+ };
+
+ if (eenv->src_cpu == eenv->dst_cpu)
+ return 0;
+
+ sd_cpu = (eenv->src_cpu != -1) ? eenv->src_cpu : eenv->dst_cpu;
+ sd = rcu_dereference(per_cpu(sd_ea, sd_cpu));
+
+ if (!sd)
+ return 0; /* Error */
+
+ sg = sd->groups;
+
+ do {
+ if (cpu_in_sg(sg, eenv->src_cpu) || cpu_in_sg(sg, eenv->dst_cpu)) {
+ eenv_before.sg_top = eenv->sg_top = sg;
+
+ if (sched_group_energy(&eenv_before))
+ return 0; /* Invalid result abort */
+ energy_before += eenv_before.energy;
+
+ /* Keep track of SRC cpu (before) capacity */
+ eenv->cap.before = eenv_before.cap.before;
+ eenv->cap.delta = eenv_before.cap.delta;
+
+ if (sched_group_energy(eenv))
+ return 0; /* Invalid result abort */
+ energy_after += eenv->energy;
+ }
+ } while (sg = sg->next, sg != sd->groups);
+
+ eenv->nrg.before = energy_before;
+ eenv->nrg.after = energy_after;
+ eenv->nrg.diff = eenv->nrg.after - eenv->nrg.before;
+ eenv->payoff = 0;
+
+ trace_sched_energy_diff(eenv->task,
+ eenv->src_cpu, eenv->dst_cpu, eenv->util_delta,
+ eenv->nrg.before, eenv->nrg.after, eenv->nrg.diff,
+ eenv->cap.before, eenv->cap.after, eenv->cap.delta,
+ eenv->nrg.delta, eenv->payoff);
+
+ return eenv->nrg.diff;
+}
+
+#ifdef CONFIG_SCHED_TUNE
+
+struct target_nrg schedtune_target_nrg;
+
+/*
+ * System energy normalization
+ * Returns the normalized value, in the range [0..SCHED_LOAD_SCALE],
+ * corresponding to the specified energy variation.
+ */
+static inline int
+normalize_energy(int energy_diff)
+{
+ u32 normalized_nrg;
+#ifdef CONFIG_SCHED_DEBUG
+ int max_delta;
+
+ /* Check for boundaries */
+ max_delta = schedtune_target_nrg.max_power;
+ max_delta -= schedtune_target_nrg.min_power;
+ WARN_ON(abs(energy_diff) >= max_delta);
+#endif
+
+ /* Do scaling using positive numbers to increase the range */
+ normalized_nrg = (energy_diff < 0) ? -energy_diff : energy_diff;
+
+ /* Scale by energy magnitude */
+ normalized_nrg <<= SCHED_LOAD_SHIFT;
+
+ /* Normalize on max energy for target platform */
+ normalized_nrg = reciprocal_divide(
+ normalized_nrg, schedtune_target_nrg.rdiv);
+
+ return (energy_diff < 0) ? -normalized_nrg : normalized_nrg;
+}
+
+static inline int
+energy_diff(struct energy_env *eenv)
+{
+ int boost = schedtune_task_boost(eenv->task);
+ int nrg_delta;
+
+ /* Conpute "absolute" energy diff */
+ __energy_diff(eenv);
+
+ /* Return energy diff when boost margin is 0 */
+ if (boost == 0)
+ return eenv->nrg.diff;
+
+ /* Compute normalized energy diff */
+ nrg_delta = normalize_energy(eenv->nrg.diff);
+ eenv->nrg.delta = nrg_delta;
+
+ eenv->payoff = schedtune_accept_deltas(
+ eenv->nrg.delta,
+ eenv->cap.delta,
+ eenv->task);
+
+ /*
+ * When SchedTune is enabled, the energy_diff() function will return
+ * the computed energy payoff value. Since the energy_diff() return
+ * value is expected to be negative by its callers, this evaluation
+ * function return a negative value each time the evaluation return a
+ * positive payoff, which is the condition for the acceptance of
+ * a scheduling decision
+ */
+ return -eenv->payoff;
+}
+#else /* CONFIG_SCHED_TUNE */
+#define energy_diff(eenv) __energy_diff(eenv)
+#endif
+
+/*
* Detect M:N waker/wakee relationships via a switching-frequency heuristic.
* A waker of many should wake a different task than the one last awakened
* at a frequency roughly N times higher than one of its wakees. In order
@@ -5893,6 +6441,154 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
return 1;
}
+static inline unsigned long task_util(struct task_struct *p)
+{
+ return p->se.avg.util_avg;
+}
+
+unsigned int capacity_margin = 1280; /* ~20% margin */
+
+static inline unsigned long boosted_task_util(struct task_struct *task);
+
+static inline bool __task_fits(struct task_struct *p, int cpu, int util)
+{
+ unsigned long capacity = capacity_of(cpu);
+
+ util += boosted_task_util(p);
+
+ return (capacity * 1024) > (util * capacity_margin);
+}
+
+static inline bool task_fits_max(struct task_struct *p, int cpu)
+{
+ unsigned long capacity = capacity_of(cpu);
+ unsigned long max_capacity = cpu_rq(cpu)->rd->max_cpu_capacity.val;
+
+ if (capacity == max_capacity)
+ return true;
+
+ if (capacity * capacity_margin > max_capacity * 1024)
+ return true;
+
+ return __task_fits(p, cpu, 0);
+}
+
+static inline bool task_fits_spare(struct task_struct *p, int cpu)
+{
+ return __task_fits(p, cpu, cpu_util(cpu));
+}
+
+static bool cpu_overutilized(int cpu)
+{
+ return (capacity_of(cpu) * 1024) < (cpu_util(cpu) * capacity_margin);
+}
+
+#ifdef CONFIG_SCHED_TUNE
+
+static long
+schedtune_margin(unsigned long signal, long boost)
+{
+ long long margin = 0;
+
+ /*
+ * Signal proportional compensation (SPC)
+ *
+ * The Boost (B) value is used to compute a Margin (M) which is
+ * proportional to the complement of the original Signal (S):
+ * M = B * (SCHED_LOAD_SCALE - S), if B is positive
+ * M = B * S, if B is negative
+ * The obtained M could be used by the caller to "boost" S.
+ */
+ if (boost >= 0) {
+ margin = SCHED_LOAD_SCALE - signal;
+ margin *= boost;
+ } else
+ margin = -signal * boost;
+ /*
+ * Fast integer division by constant:
+ * Constant : (C) = 100
+ * Precision : 0.1% (P) = 0.1
+ * Reference : C * 100 / P (R) = 100000
+ *
+ * Thus:
+ * Shift bits : ceil(log(R,2)) (S) = 17
+ * Mult const : round(2^S/C) (M) = 1311
+ *
+ *
+ */
+ margin *= 1311;
+ margin >>= 17;
+
+ if (boost < 0)
+ margin *= -1;
+ return margin;
+}
+
+static inline int
+schedtune_cpu_margin(unsigned long util, int cpu)
+{
+ int boost = schedtune_cpu_boost(cpu);
+
+ if (boost == 0)
+ return 0;
+
+ return schedtune_margin(util, boost);
+}
+
+static inline long
+schedtune_task_margin(struct task_struct *task)
+{
+ int boost = schedtune_task_boost(task);
+ unsigned long util;
+ long margin;
+
+ if (boost == 0)
+ return 0;
+
+ util = task_util(task);
+ margin = schedtune_margin(util, boost);
+
+ return margin;
+}
+
+#else /* CONFIG_SCHED_TUNE */
+
+static inline int
+schedtune_cpu_margin(unsigned long util, int cpu)
+{
+ return 0;
+}
+
+static inline int
+schedtune_task_margin(struct task_struct *task)
+{
+ return 0;
+}
+
+#endif /* CONFIG_SCHED_TUNE */
+
+static inline unsigned long
+boosted_cpu_util(int cpu)
+{
+ unsigned long util = cpu_util(cpu);
+ long margin = schedtune_cpu_margin(util, cpu);
+
+ trace_sched_boost_cpu(cpu, util, margin);
+
+ return util + margin;
+}
+
+static inline unsigned long
+boosted_task_util(struct task_struct *task)
+{
+ unsigned long util = task_util(task);
+ long margin = schedtune_task_margin(task);
+
+ trace_sched_boost_task(task, util, margin);
+
+ return util + margin;
+}
+
/*
* find_idlest_group finds and returns the least busy CPU group within the
* domain.
@@ -5902,7 +6598,10 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
int this_cpu, int sd_flag)
{
struct sched_group *idlest = NULL, *group = sd->groups;
+ struct sched_group *fit_group = NULL, *spare_group = NULL;
unsigned long min_load = ULONG_MAX, this_load = 0;
+ unsigned long fit_capacity = ULONG_MAX;
+ unsigned long max_spare_capacity = capacity_margin - SCHED_LOAD_SCALE;
int load_idx = sd->forkexec_idx;
int imbalance = 100 + (sd->imbalance_pct-100)/2;
@@ -5910,7 +6609,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
load_idx = sd->wake_idx;
do {
- unsigned long load, avg_load;
+ unsigned long load, avg_load, spare_capacity;
int local_group;
int i;
@@ -5933,6 +6632,25 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
load = target_load(i, load_idx);
avg_load += load;
+
+ /*
+ * Look for most energy-efficient group that can fit
+ * that can fit the task.
+ */
+ if (capacity_of(i) < fit_capacity && task_fits_spare(p, i)) {
+ fit_capacity = capacity_of(i);
+ fit_group = group;
+ }
+
+ /*
+ * Look for group which has most spare capacity on a
+ * single cpu.
+ */
+ spare_capacity = capacity_of(i) - cpu_util(i);
+ if (spare_capacity > max_spare_capacity) {
+ max_spare_capacity = spare_capacity;
+ spare_group = group;
+ }
}
/* Adjust by relative CPU capacity of the group */
@@ -5946,6 +6664,12 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
}
} while (group = group->next, group != sd->groups);
+ if (fit_group)
+ return fit_group;
+
+ if (spare_group)
+ return spare_group;
+
if (!idlest || 100*this_load < imbalance*min_load)
return NULL;
return idlest;
@@ -5966,7 +6690,7 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
/* Traverse only the allowed CPUs */
for_each_cpu_and(i, sched_group_cpus(group), tsk_cpus_allowed(p)) {
- if (idle_cpu(i)) {
+ if (task_fits_spare(p, i)) {
struct rq *rq = cpu_rq(i);
struct cpuidle_state *idle = idle_get_state(rq);
if (idle && idle->exit_latency < min_exit_latency) {
@@ -5978,7 +6702,8 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
min_exit_latency = idle->exit_latency;
latest_idle_timestamp = rq->idle_stamp;
shallowest_idle_cpu = i;
- } else if ((!idle || idle->exit_latency == min_exit_latency) &&
+ } else if (idle_cpu(i) &&
+ (!idle || idle->exit_latency == min_exit_latency) &&
rq->idle_stamp > latest_idle_timestamp) {
/*
* If equal or no active idle state, then
@@ -5987,6 +6712,13 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
*/
latest_idle_timestamp = rq->idle_stamp;
shallowest_idle_cpu = i;
+ } else if (shallowest_idle_cpu == -1) {
+ /*
+ * If we haven't found an idle CPU yet
+ * pick a non-idle one that can fit the task as
+ * fallback.
+ */
+ shallowest_idle_cpu = i;
}
} else if (shallowest_idle_cpu == -1) {
load = weighted_cpuload(i);
@@ -6008,15 +6740,20 @@ static int select_idle_sibling(struct task_struct *p, int target)
struct sched_domain *sd;
struct sched_group *sg;
int i = task_cpu(p);
+ int best_idle = -1;
+ int best_idle_cstate = -1;
+ int best_idle_capacity = INT_MAX;
- if (idle_cpu(target))
- return target;
+ if (!sysctl_sched_cstate_aware) {
+ if (idle_cpu(target))
+ return target;
- /*
- * If the prevous cpu is cache affine and idle, don't be stupid.
- */
- if (i != target && cpus_share_cache(i, target) && idle_cpu(i))
- return i;
+ /*
+ * If the prevous cpu is cache affine and idle, don't be stupid.
+ */
+ if (i != target && cpus_share_cache(i, target) && idle_cpu(i))
+ return i;
+ }
if (!sysctl_sched_wake_to_idle &&
!(current->flags & PF_WAKE_UP_IDLE) &&
@@ -6034,54 +6771,256 @@ static int select_idle_sibling(struct task_struct *p, int target)
tsk_cpus_allowed(p)))
goto next;
- for_each_cpu(i, sched_group_cpus(sg)) {
- if (i == target || !idle_cpu(i))
- goto next;
- }
+ if (sysctl_sched_cstate_aware) {
+ for_each_cpu_and(i, tsk_cpus_allowed(p), sched_group_cpus(sg)) {
+ struct rq *rq = cpu_rq(i);
+ int idle_idx = idle_get_state_idx(rq);
+ unsigned long new_usage = boosted_task_util(p);
+ unsigned long capacity_orig = capacity_orig_of(i);
+ if (new_usage > capacity_orig || !idle_cpu(i))
+ goto next;
+
+ if (i == target && new_usage <= capacity_curr_of(target))
+ return target;
+
+ if (best_idle < 0 || (idle_idx < best_idle_cstate && capacity_orig <= best_idle_capacity)) {
+ best_idle = i;
+ best_idle_cstate = idle_idx;
+ best_idle_capacity = capacity_orig;
+ }
+ }
+ } else {
+ for_each_cpu(i, sched_group_cpus(sg)) {
+ if (i == target || !idle_cpu(i))
+ goto next;
+ }
- target = cpumask_first_and(sched_group_cpus(sg),
+ target = cpumask_first_and(sched_group_cpus(sg),
tsk_cpus_allowed(p));
- goto done;
+ goto done;
+ }
next:
sg = sg->next;
} while (sg != sd->groups);
}
+ if (best_idle > 0)
+ target = best_idle;
+
done:
return target;
}
-/*
- * cpu_util returns the amount of capacity of a CPU that is used by CFS
- * tasks. The unit of the return value must be the one of capacity so we can
- * compare the utilization with the capacity of the CPU that is available for
- * CFS task (ie cpu_capacity).
- *
- * cfs_rq.avg.util_avg is the sum of running time of runnable tasks plus the
- * recent utilization of currently non-runnable tasks on a CPU. It represents
- * the amount of utilization of a CPU in the range [0..capacity_orig] where
- * capacity_orig is the cpu_capacity available at the highest frequency
- * (arch_scale_freq_capacity()).
- * The utilization of a CPU converges towards a sum equal to or less than the
- * current capacity (capacity_curr <= capacity_orig) of the CPU because it is
- * the running time on this CPU scaled by capacity_curr.
- *
- * Nevertheless, cfs_rq.avg.util_avg can be higher than capacity_curr or even
- * higher than capacity_orig because of unfortunate rounding in
- * cfs.avg.util_avg or just after migrating tasks and new task wakeups until
- * the average stabilizes with the new running time. We need to check that the
- * utilization stays within the range of [0..capacity_orig] and cap it if
- * necessary. Without utilization capping, a group could be seen as overloaded
- * (CPU0 utilization at 121% + CPU1 utilization at 80%) whereas CPU1 has 20% of
- * available capacity. We allow utilization to overshoot capacity_curr (but not
- * capacity_orig) as it useful for predicting the capacity required after task
- * migrations (scheduler-driven DVFS).
- */
-static int cpu_util(int cpu)
+static inline int find_best_target(struct task_struct *p, bool boosted, bool prefer_idle)
{
- unsigned long util = cpu_rq(cpu)->cfs.avg.util_avg;
- unsigned long capacity = capacity_orig_of(cpu);
+ int iter_cpu;
+ int target_cpu = -1;
+ int target_util = 0;
+ int backup_capacity = 0;
+ int best_idle_cpu = -1;
+ int best_idle_cstate = INT_MAX;
+ int backup_cpu = -1;
+ unsigned long task_util_boosted, new_util;
+
+ task_util_boosted = boosted_task_util(p);
+ for (iter_cpu = 0; iter_cpu < NR_CPUS; iter_cpu++) {
+ int cur_capacity;
+ struct rq *rq;
+ int idle_idx;
+
+ /*
+ * Iterate from higher cpus for boosted tasks.
+ */
+ int i = boosted ? NR_CPUS-iter_cpu-1 : iter_cpu;
+
+ if (!cpu_online(i) || !cpumask_test_cpu(i, tsk_cpus_allowed(p)))
+ continue;
+
+ /*
+ * p's blocked utilization is still accounted for on prev_cpu
+ * so prev_cpu will receive a negative bias due to the double
+ * accounting. However, the blocked utilization may be zero.
+ */
+ new_util = cpu_util(i) + task_util_boosted;
- return (util >= capacity) ? capacity : util;
+ /*
+ * Ensure minimum capacity to grant the required boost.
+ * The target CPU can be already at a capacity level higher
+ * than the one required to boost the task.
+ */
+ if (new_util > capacity_orig_of(i))
+ continue;
+
+ /*
+ * Unconditionally favoring tasks that prefer idle cpus to
+ * improve latency.
+ */
+ if (idle_cpu(i) && prefer_idle) {
+ if (best_idle_cpu < 0)
+ best_idle_cpu = i;
+ continue;
+ }
+
+ cur_capacity = capacity_curr_of(i);
+ rq = cpu_rq(i);
+ idle_idx = idle_get_state_idx(rq);
+
+ if (new_util < cur_capacity) {
+ if (cpu_rq(i)->nr_running) {
+ if(prefer_idle) {
+ // Find a target cpu with lowest
+ // utilization.
+ if (target_util == 0 ||
+ target_util < new_util) {
+ target_cpu = i;
+ target_util = new_util;
+ }
+ } else {
+ // Find a target cpu with highest
+ // utilization.
+ if (target_util == 0 ||
+ target_util > new_util) {
+ target_cpu = i;
+ target_util = new_util;
+ }
+ }
+ } else if (!prefer_idle) {
+ if (best_idle_cpu < 0 ||
+ (sysctl_sched_cstate_aware &&
+ best_idle_cstate > idle_idx)) {
+ best_idle_cstate = idle_idx;
+ best_idle_cpu = i;
+ }
+ }
+ } else if (backup_capacity == 0 ||
+ backup_capacity > cur_capacity) {
+ // Find a backup cpu with least capacity.
+ backup_capacity = cur_capacity;
+ backup_cpu = i;
+ }
+ }
+
+ if (prefer_idle && best_idle_cpu >= 0)
+ target_cpu = best_idle_cpu;
+ else if (target_cpu < 0)
+ target_cpu = best_idle_cpu >= 0 ? best_idle_cpu : backup_cpu;
+
+ return target_cpu;
+}
+
+static int energy_aware_wake_cpu(struct task_struct *p, int target, int sync)
+{
+ struct sched_domain *sd;
+ struct sched_group *sg, *sg_target;
+ int target_max_cap = INT_MAX;
+ int target_cpu = task_cpu(p);
+ unsigned long task_util_boosted, new_util;
+ int i;
+
+ if (sysctl_sched_sync_hint_enable && sync) {
+ int cpu = smp_processor_id();
+ cpumask_t search_cpus;
+ cpumask_and(&search_cpus, tsk_cpus_allowed(p), cpu_online_mask);
+ if (cpumask_test_cpu(cpu, &search_cpus))
+ return cpu;
+ }
+
+ sd = rcu_dereference(per_cpu(sd_ea, task_cpu(p)));
+
+ if (!sd)
+ return target;
+
+ sg = sd->groups;
+ sg_target = sg;
+
+ if (sysctl_sched_is_big_little) {
+
+ /*
+ * Find group with sufficient capacity. We only get here if no cpu is
+ * overutilized. We may end up overutilizing a cpu by adding the task,
+ * but that should not be any worse than select_idle_sibling().
+ * load_balance() should sort it out later as we get above the tipping
+ * point.
+ */
+ do {
+ /* Assuming all cpus are the same in group */
+ int max_cap_cpu = group_first_cpu(sg);
+
+ /*
+ * Assume smaller max capacity means more energy-efficient.
+ * Ideally we should query the energy model for the right
+ * answer but it easily ends up in an exhaustive search.
+ */
+ if (capacity_of(max_cap_cpu) < target_max_cap &&
+ task_fits_max(p, max_cap_cpu)) {
+ sg_target = sg;
+ target_max_cap = capacity_of(max_cap_cpu);
+ }
+ } while (sg = sg->next, sg != sd->groups);
+
+ task_util_boosted = boosted_task_util(p);
+ /* Find cpu with sufficient capacity */
+ for_each_cpu_and(i, tsk_cpus_allowed(p), sched_group_cpus(sg_target)) {
+ /*
+ * p's blocked utilization is still accounted for on prev_cpu
+ * so prev_cpu will receive a negative bias due to the double
+ * accounting. However, the blocked utilization may be zero.
+ */
+ new_util = cpu_util(i) + task_util_boosted;
+
+ /*
+ * Ensure minimum capacity to grant the required boost.
+ * The target CPU can be already at a capacity level higher
+ * than the one required to boost the task.
+ */
+ if (new_util > capacity_orig_of(i))
+ continue;
+
+ if (new_util < capacity_curr_of(i)) {
+ target_cpu = i;
+ if (cpu_rq(i)->nr_running)
+ break;
+ }
+
+ /* cpu has capacity at higher OPP, keep it as fallback */
+ if (target_cpu == task_cpu(p))
+ target_cpu = i;
+ }
+ } else {
+ /*
+ * Find a cpu with sufficient capacity
+ */
+#ifdef CONFIG_CGROUP_SCHEDTUNE
+ bool boosted = schedtune_task_boost(p) > 0;
+ bool prefer_idle = schedtune_prefer_idle(p) > 0;
+#else
+ bool boosted = 0;
+ bool prefer_idle = 0;
+#endif
+ int tmp_target = find_best_target(p, boosted, prefer_idle);
+ if (tmp_target >= 0) {
+ target_cpu = tmp_target;
+ if ((boosted || prefer_idle) && idle_cpu(target_cpu))
+ return target_cpu;
+ }
+ }
+
+ if (target_cpu != task_cpu(p)) {
+ struct energy_env eenv = {
+ .util_delta = task_util(p),
+ .src_cpu = task_cpu(p),
+ .dst_cpu = target_cpu,
+ .task = p,
+ };
+
+ /* Not enough spare capacity on previous cpu */
+ if (cpu_overutilized(task_cpu(p)))
+ return target_cpu;
+
+ if (energy_diff(&eenv) >= 0)
+ return task_cpu(p);
+ }
+
+ return target_cpu;
}
/*
@@ -6109,7 +7048,9 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
return select_best_cpu(p, prev_cpu, 0, sync);
if (sd_flag & SD_BALANCE_WAKE)
- want_affine = !wake_wide(p) && cpumask_test_cpu(cpu, tsk_cpus_allowed(p));
+ want_affine = (!wake_wide(p) && task_fits_max(p, cpu) &&
+ cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) ||
+ energy_aware();
rcu_read_lock();
for_each_domain(cpu, tmp) {
@@ -6139,7 +7080,9 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
}
if (!sd) {
- if (sd_flag & SD_BALANCE_WAKE) /* XXX always ? */
+ if (energy_aware() && !cpu_rq(cpu)->rd->overutilized)
+ new_cpu = energy_aware_wake_cpu(p, prev_cpu, sync);
+ else if (sd_flag & SD_BALANCE_WAKE) /* XXX always ? */
new_cpu = select_idle_sibling(p, new_cpu);
} else while (sd) {
@@ -6209,6 +7152,8 @@ static void task_dead_fair(struct task_struct *p)
{
remove_entity_load_avg(&p->se);
}
+#else
+#define task_fits_max(p, cpu) true
#endif /* CONFIG_SMP */
static unsigned long
@@ -6455,6 +7400,8 @@ again:
if (hrtick_enabled(rq))
hrtick_start_fair(rq, p);
+ rq->misfit_task = !task_fits_max(p, rq->cpu);
+
return p;
simple:
cfs_rq = &rq->cfs;
@@ -6476,9 +7423,12 @@ simple:
if (hrtick_enabled(rq))
hrtick_start_fair(rq, p);
+ rq->misfit_task = !task_fits_max(p, rq->cpu);
+
return p;
idle:
+ rq->misfit_task = 0;
/*
* This is OK, because current is on_cpu, which avoids it being picked
* for load-balance and preemption/IRQs are still disabled avoiding
@@ -6691,6 +7641,13 @@ static unsigned long __read_mostly max_load_balance_interval = HZ/10;
enum fbq_type { regular, remote, all };
+enum group_type {
+ group_other = 0,
+ group_misfit_task,
+ group_imbalanced,
+ group_overloaded,
+};
+
#define LBF_ALL_PINNED 0x01
#define LBF_NEED_BREAK 0x02
#define LBF_DST_PINNED 0x04
@@ -6713,6 +7670,7 @@ struct lb_env {
int new_dst_cpu;
enum cpu_idle_type idle;
long imbalance;
+ unsigned int src_grp_nr_running;
/* The set of CPUs under consideration for load-balancing */
struct cpumask *cpus;
unsigned int busiest_grp_capacity;
@@ -6725,6 +7683,7 @@ struct lb_env {
unsigned int loop_max;
enum fbq_type fbq_type;
+ enum group_type busiest_group_type;
struct list_head tasks;
enum sched_boost_policy boost_policy;
};
@@ -7108,6 +8067,10 @@ static void attach_one_task(struct rq *rq, struct task_struct *p)
{
raw_spin_lock(&rq->lock);
attach_task(rq, p);
+ /*
+ * We want to potentially raise target_cpu's OPP.
+ */
+ update_capacity_of(cpu_of(rq));
raw_spin_unlock(&rq->lock);
}
@@ -7129,6 +8092,11 @@ static void attach_tasks(struct lb_env *env)
attach_task(env->dst_rq, p);
}
+ /*
+ * We want to potentially raise env.dst_cpu's OPP.
+ */
+ update_capacity_of(env->dst_cpu);
+
raw_spin_unlock(&env->dst_rq->lock);
}
@@ -7224,12 +8192,6 @@ static unsigned long task_h_load(struct task_struct *p)
/********** Helpers for find_busiest_group ************************/
-enum group_type {
- group_other = 0,
- group_imbalanced,
- group_overloaded,
-};
-
/*
* sg_lb_stats - stats of a sched_group required for load_balancing
*/
@@ -7249,6 +8211,7 @@ struct sg_lb_stats {
unsigned int group_weight;
enum group_type group_type;
int group_no_capacity;
+ int group_misfit_task; /* A cpu has a task too big for its capacity */
#ifdef CONFIG_NUMA_BALANCING
unsigned int nr_numa_running;
unsigned int nr_preferred_running;
@@ -7394,19 +8357,57 @@ static unsigned long scale_rt_capacity(int cpu)
used = div_u64(avg, total);
+ /*
+ * deadline bandwidth is defined at system level so we must
+ * weight this bandwidth with the max capacity of the system.
+ * As a reminder, avg_bw is 20bits width and
+ * scale_cpu_capacity is 10 bits width
+ */
+ used += div_u64(rq->dl.avg_bw, arch_scale_cpu_capacity(NULL, cpu));
+
if (likely(used < SCHED_CAPACITY_SCALE))
return SCHED_CAPACITY_SCALE - used;
return 1;
}
+void init_max_cpu_capacity(struct max_cpu_capacity *mcc)
+{
+ raw_spin_lock_init(&mcc->lock);
+ mcc->val = 0;
+ mcc->cpu = -1;
+}
+
static void update_cpu_capacity(struct sched_domain *sd, int cpu)
{
unsigned long capacity = arch_scale_cpu_capacity(sd, cpu);
struct sched_group *sdg = sd->groups;
+ struct max_cpu_capacity *mcc;
+ unsigned long max_capacity;
+ int max_cap_cpu;
+ unsigned long flags;
cpu_rq(cpu)->cpu_capacity_orig = capacity;
+ mcc = &cpu_rq(cpu)->rd->max_cpu_capacity;
+
+ raw_spin_lock_irqsave(&mcc->lock, flags);
+ max_capacity = mcc->val;
+ max_cap_cpu = mcc->cpu;
+
+ if ((max_capacity > capacity && max_cap_cpu == cpu) ||
+ (max_capacity < capacity)) {
+ mcc->val = capacity;
+ mcc->cpu = cpu;
+#ifdef CONFIG_SCHED_DEBUG
+ raw_spin_unlock_irqrestore(&mcc->lock, flags);
+ pr_info("CPU%d: update max cpu_capacity %lu\n", cpu, capacity);
+ goto skip_unlock;
+#endif
+ }
+ raw_spin_unlock_irqrestore(&mcc->lock, flags);
+
+skip_unlock: __attribute__ ((unused));
capacity *= scale_rt_capacity(cpu);
capacity >>= SCHED_CAPACITY_SHIFT;
@@ -7415,13 +8416,14 @@ static void update_cpu_capacity(struct sched_domain *sd, int cpu)
cpu_rq(cpu)->cpu_capacity = capacity;
sdg->sgc->capacity = capacity;
+ sdg->sgc->max_capacity = capacity;
}
void update_group_capacity(struct sched_domain *sd, int cpu)
{
struct sched_domain *child = sd->child;
struct sched_group *group, *sdg = sd->groups;
- unsigned long capacity;
+ unsigned long capacity, max_capacity;
unsigned long interval;
interval = msecs_to_jiffies(sd->balance_interval);
@@ -7434,6 +8436,7 @@ void update_group_capacity(struct sched_domain *sd, int cpu)
}
capacity = 0;
+ max_capacity = 0;
if (child->flags & SD_OVERLAP) {
/*
@@ -7460,11 +8463,12 @@ void update_group_capacity(struct sched_domain *sd, int cpu)
*/
if (unlikely(!rq->sd)) {
capacity += capacity_of(cpu);
- continue;
+ } else {
+ sgc = rq->sd->groups->sgc;
+ capacity += sgc->capacity;
}
- sgc = rq->sd->groups->sgc;
- capacity += sgc->capacity;
+ max_capacity = max(capacity, max_capacity);
}
} else {
/*
@@ -7474,16 +8478,21 @@ void update_group_capacity(struct sched_domain *sd, int cpu)
group = child->groups;
do {
+ struct sched_group_capacity *sgc = group->sgc;
+
cpumask_t *cpus = sched_group_cpus(group);
/* Revisit this later. This won't work for MT domain */
- if (!cpu_isolated(cpumask_first(cpus)))
- capacity += group->sgc->capacity;
+ if (!cpu_isolated(cpumask_first(cpus))) {
+ capacity += sgc->capacity;
+ max_capacity = max(sgc->max_capacity, max_capacity);
+ }
group = group->next;
} while (group != child->groups);
}
sdg->sgc->capacity = capacity;
+ sdg->sgc->max_capacity = max_capacity;
}
/*
@@ -7578,6 +8587,18 @@ group_is_overloaded(struct lb_env *env, struct sg_lb_stats *sgs)
return false;
}
+
+/*
+ * group_smaller_cpu_capacity: Returns true if sched_group sg has smaller
+ * per-cpu capacity than sched_group ref.
+ */
+static inline bool
+group_smaller_cpu_capacity(struct sched_group *sg, struct sched_group *ref)
+{
+ return sg->sgc->max_capacity + capacity_margin - SCHED_LOAD_SCALE <
+ ref->sgc->max_capacity;
+}
+
static inline enum
group_type group_classify(struct sched_group *group,
struct sg_lb_stats *sgs, struct lb_env *env)
@@ -7588,6 +8609,9 @@ group_type group_classify(struct sched_group *group,
if (sg_imbalanced(group))
return group_imbalanced;
+ if (sgs->group_misfit_task)
+ return group_misfit_task;
+
return group_other;
}
@@ -7599,14 +8623,15 @@ group_type group_classify(struct sched_group *group,
* @local_group: Does group contain this_cpu.
* @sgs: variable to hold the statistics for this group.
* @overload: Indicate more than one runnable task for any CPU.
+ * @overutilized: Indicate overutilization for any CPU.
*/
static inline void update_sg_lb_stats(struct lb_env *env,
struct sched_group *group, int load_idx,
int local_group, struct sg_lb_stats *sgs,
- bool *overload)
+ bool *overload, bool *overutilized)
{
unsigned long load;
- int i;
+ int i, nr_running;
memset(sgs, 0, sizeof(*sgs));
@@ -7631,7 +8656,8 @@ static inline void update_sg_lb_stats(struct lb_env *env,
sgs->group_util += cpu_util(i);
sgs->sum_nr_running += rq->cfs.h_nr_running;
- if (rq->nr_running > 1)
+ nr_running = rq->nr_running;
+ if (nr_running > 1)
*overload = true;
#ifdef CONFIG_SCHED_HMP
@@ -7644,8 +8670,17 @@ static inline void update_sg_lb_stats(struct lb_env *env,
sgs->nr_preferred_running += rq->nr_preferred_running;
#endif
sgs->sum_weighted_load += weighted_cpuload(i);
- if (idle_cpu(i))
+ /*
+ * No need to call idle_cpu() if nr_running is not 0
+ */
+ if (!nr_running && idle_cpu(i))
sgs->idle_cpus++;
+
+ if (energy_aware() && cpu_overutilized(i)) {
+ *overutilized = true;
+ if (!sgs->group_misfit_task && rq->misfit_task)
+ sgs->group_misfit_task = capacity_of(i);
+ }
}
/* Isolated CPU has no weight */
@@ -7727,8 +8762,26 @@ static bool update_sd_pick_busiest(struct lb_env *env,
if (sgs->group_type < busiest->group_type)
return false;
- if (sgs->avg_load <= busiest->avg_load)
- return false;
+ if (energy_aware()) {
+ /*
+ * Candidate sg doesn't face any serious load-balance problems
+ * so don't pick it if the local sg is already filled up.
+ */
+ if (sgs->group_type == group_other &&
+ !group_has_capacity(env, &sds->local_stat))
+ return false;
+
+ if (sgs->avg_load <= busiest->avg_load)
+ return false;
+
+ /*
+ * Candiate sg has no more than one task per cpu and has higher
+ * per-cpu capacity. No reason to pull tasks to less capable cpus.
+ */
+ if (sgs->sum_nr_running <= sgs->group_weight &&
+ group_smaller_cpu_capacity(sds->local, sg))
+ return false;
+ }
/* This is the busiest node in its class. */
if (!(env->sd->flags & SD_ASYM_PACKING))
@@ -7791,7 +8844,7 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd
struct sched_group *sg = env->sd->groups;
struct sg_lb_stats tmp_sgs;
int load_idx, prefer_sibling = 0;
- bool overload = false;
+ bool overload = false, overutilized = false;
if (child && child->flags & SD_PREFER_SIBLING)
prefer_sibling = 1;
@@ -7813,7 +8866,7 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd
}
update_sg_lb_stats(env, sg, load_idx, local_group, sgs,
- &overload);
+ &overload, &overutilized);
if (local_group)
goto next_group;
@@ -7835,6 +8888,16 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd
sgs->group_type = group_classify(sg, sgs, env);
}
+ /*
+ * Ignore task groups with misfit tasks if local group has no
+ * capacity or if per-cpu capacity isn't higher.
+ */
+ if (energy_aware() &&
+ sgs->group_type == group_misfit_task &&
+ (!group_has_capacity(env, &sds->local_stat) ||
+ !group_smaller_cpu_capacity(sg, sds->local)))
+ sgs->group_type = group_other;
+
if (update_sd_pick_busiest(env, sds, sg, sgs)) {
sds->busiest = sg;
sds->busiest_stat = *sgs;
@@ -7853,10 +8916,23 @@ next_group:
if (env->sd->flags & SD_NUMA)
env->fbq_type = fbq_classify_group(&sds->busiest_stat);
+ env->src_grp_nr_running = sds->busiest_stat.sum_nr_running;
+
if (!env->sd->parent) {
/* update overload indicator if we are at root domain */
if (env->dst_rq->rd->overload != overload)
env->dst_rq->rd->overload = overload;
+
+ /* Update over-utilization (tipping point, U >= 0) indicator */
+ if (energy_aware() && env->dst_rq->rd->overutilized != overutilized) {
+ env->dst_rq->rd->overutilized = overutilized;
+ trace_sched_overutilized(overutilized);
+ }
+ } else {
+ if (energy_aware() && !env->dst_rq->rd->overutilized && overutilized) {
+ env->dst_rq->rd->overutilized = true;
+ trace_sched_overutilized(true);
+ }
}
}
@@ -8005,6 +9081,24 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
*/
if (busiest->avg_load <= sds->avg_load ||
local->avg_load >= sds->avg_load) {
+ if (energy_aware()) {
+ /* Misfitting tasks should be migrated in any case */
+ if (busiest->group_type == group_misfit_task) {
+ env->imbalance = busiest->group_misfit_task;
+ return;
+ }
+
+ /*
+ * Busiest group is overloaded, local is not, use the spare
+ * cycles to maximize throughput
+ */
+ if (busiest->group_type == group_overloaded &&
+ local->group_type <= group_misfit_task) {
+ env->imbalance = busiest->load_per_task;
+ return;
+ }
+ }
+
env->imbalance = 0;
return fix_small_imbalance(env, sds);
}
@@ -8038,6 +9132,11 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
(sds->avg_load - local->avg_load) * local->group_capacity
) / SCHED_CAPACITY_SCALE;
+ /* Boost imbalance to allow misfit task to be balanced. */
+ if (energy_aware() && busiest->group_type == group_misfit_task)
+ env->imbalance = max_t(long, env->imbalance,
+ busiest->group_misfit_task);
+
/*
* if *imbalance is less than the average load per runnable task
* there is no guarantee that any tasks will be moved so we'll have
@@ -8079,6 +9178,10 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
* this level.
*/
update_sd_lb_stats(env, &sds);
+
+ if (energy_aware() && !env->dst_rq->rd->overutilized)
+ goto out_balanced;
+
local = &sds.local_stat;
busiest = &sds.busiest_stat;
@@ -8113,6 +9216,11 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
busiest->group_no_capacity)
goto force_balance;
+ /* Misfitting tasks should be dealt with regardless of the avg load */
+ if (energy_aware() && busiest->group_type == group_misfit_task) {
+ goto force_balance;
+ }
+
/*
* If the local group is busier than the selected busiest group
* don't try and pull any tasks.
@@ -8136,7 +9244,8 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
* might end up to just move the imbalance on another group
*/
if ((busiest->group_type != group_overloaded) &&
- (local->idle_cpus <= (busiest->idle_cpus + 1)))
+ (local->idle_cpus <= (busiest->idle_cpus + 1)) &&
+ !group_smaller_cpu_capacity(sds.busiest, sds.local))
goto out_balanced;
} else {
/*
@@ -8149,6 +9258,7 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
}
force_balance:
+ env->busiest_group_type = busiest->group_type;
/* Looks like there is an imbalance. Compute it */
calculate_imbalance(env, &sds);
return sds.busiest;
@@ -8264,7 +9374,8 @@ static struct rq *find_busiest_queue(struct lb_env *env,
*/
if (rq->nr_running == 1 && wl > env->imbalance &&
- !check_cpu_capacity(rq, env->sd))
+ !check_cpu_capacity(rq, env->sd) &&
+ env->busiest_group_type != group_misfit_task)
continue;
/*
@@ -8330,6 +9441,14 @@ static int need_active_balance(struct lb_env *env)
return 1;
}
+ if (energy_aware() &&
+ (capacity_of(env->src_cpu) < capacity_of(env->dst_cpu)) &&
+ env->src_rq->cfs.h_nr_running == 1 &&
+ cpu_overutilized(env->src_cpu) &&
+ !cpu_overutilized(env->dst_cpu)) {
+ return 1;
+ }
+
return unlikely(sd->nr_balance_failed >
sd->cache_nice_tries + NEED_ACTIVE_BALANCE_THRESHOLD);
}
@@ -8473,6 +9592,11 @@ more_balance:
* ld_moved - cumulative load moved across iterations
*/
cur_ld_moved = detach_tasks(&env);
+ /*
+ * We want to potentially lower env.src_cpu's OPP.
+ */
+ if (cur_ld_moved)
+ update_capacity_of(env.src_cpu);
/*
* We've detached some tasks from busiest_rq. Every
@@ -8567,8 +9691,10 @@ no_move:
* excessive cache_hot migrations and active balances.
*/
if (idle != CPU_NEWLY_IDLE &&
- !(env.flags & LBF_BIG_TASK_ACTIVE_BALANCE))
- sd->nr_balance_failed++;
+ !(env.flags & LBF_BIG_TASK_ACTIVE_BALANCE)) {
+ if (env.src_grp_nr_running > 1)
+ sd->nr_balance_failed++;
+ }
if (need_active_balance(&env)) {
raw_spin_lock_irqsave(&busiest->lock, flags);
@@ -8720,6 +9846,7 @@ static int idle_balance(struct rq *this_rq)
struct sched_domain *sd;
int pulled_task = 0;
u64 curr_cost = 0;
+ long removed_util=0;
if (cpu_isolated(this_cpu))
return 0;
@@ -8732,8 +9859,9 @@ static int idle_balance(struct rq *this_rq)
*/
this_rq->idle_stamp = rq_clock(this_rq);
- if (this_rq->avg_idle < sysctl_sched_migration_cost ||
- !this_rq->rd->overload) {
+ if (!energy_aware() &&
+ (this_rq->avg_idle < sysctl_sched_migration_cost ||
+ !this_rq->rd->overload)) {
rcu_read_lock();
sd = rcu_dereference_check_sched_domain(this_rq->sd);
if (sd)
@@ -8745,6 +9873,17 @@ static int idle_balance(struct rq *this_rq)
raw_spin_unlock(&this_rq->lock);
+ /*
+ * If removed_util_avg is !0 we most probably migrated some task away
+ * from this_cpu. In this case we might be willing to trigger an OPP
+ * update, but we want to do so if we don't find anybody else to pull
+ * here (we will trigger an OPP update with the pulled task's enqueue
+ * anyway).
+ *
+ * Record removed_util before calling update_blocked_averages, and use
+ * it below (before returning) to see if an OPP update is required.
+ */
+ removed_util = atomic_long_read(&(this_rq->cfs).removed_util_avg);
update_blocked_averages(this_cpu);
rcu_read_lock();
for_each_domain(this_cpu, sd) {
@@ -8812,6 +9951,12 @@ out:
if (pulled_task) {
idle_exit_fair(this_rq);
this_rq->idle_stamp = 0;
+ } else if (removed_util) {
+ /*
+ * No task pulled and someone has been migrated away.
+ * Good case to trigger an OPP update.
+ */
+ update_capacity_of(this_cpu);
}
return pulled_task;
@@ -8895,6 +10040,10 @@ static int active_load_balance_cpu_stop(void *data)
p = detach_one_task(&env);
if (p) {
schedstat_inc(sd, alb_pushed);
+ /*
+ * We want to potentially lower env.src_cpu's OPP.
+ */
+ update_capacity_of(env.src_cpu);
moved = true;
} else {
schedstat_inc(sd, alb_failed);
@@ -9372,6 +10521,10 @@ static inline int _nohz_kick_needed(struct rq *rq, int cpu, int *type)
if (time_before(now, nohz.next_balance))
return 0;
+ if (rq->nr_running >= 2 &&
+ (!energy_aware() || cpu_overutilized(cpu)))
+ return true;
+
return (rq->nr_running >= 2);
}
@@ -9412,7 +10565,7 @@ static inline bool nohz_kick_needed(struct rq *rq, int *type)
#ifndef CONFIG_SCHED_HMP
rcu_read_lock();
sd = rcu_dereference(per_cpu(sd_busy, cpu));
- if (sd) {
+ if (sd && !energy_aware()) {
sgc = sd->groups->sgc;
nr_busy = atomic_read(&sgc->nr_busy_cpus);
@@ -9523,6 +10676,17 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
if (static_branch_unlikely(&sched_numa_balancing))
task_tick_numa(rq, curr);
+
+#ifdef CONFIG_SMP
+ if (energy_aware() &&
+ !rq->rd->overutilized && cpu_overutilized(task_cpu(curr))) {
+ rq->rd->overutilized = true;
+ trace_sched_overutilized(true);
+ }
+
+ rq->misfit_task = !task_fits_max(curr, rq->cpu);
+#endif
+
}
/*
diff --git a/kernel/sched/features.h b/kernel/sched/features.h
index acee1854c3d0..7cc74e56fde4 100644
--- a/kernel/sched/features.h
+++ b/kernel/sched/features.h
@@ -68,3 +68,9 @@ SCHED_FEAT(FORCE_SD_OVERLAP, false)
SCHED_FEAT(RT_RUNTIME_SHARE, true)
SCHED_FEAT(LB_MIN, false)
SCHED_FEAT(ATTACH_AGE_LOAD, true)
+
+/*
+ * Energy aware scheduling. Use platform energy model to guide scheduling
+ * decisions optimizing for energy efficiency.
+ */
+SCHED_FEAT(ENERGY_AWARE, false)
diff --git a/kernel/sched/hmp.c b/kernel/sched/hmp.c
index 6304c5030137..652e19ea7bb0 100644
--- a/kernel/sched/hmp.c
+++ b/kernel/sched/hmp.c
@@ -74,11 +74,6 @@ inline void clear_ed_task(struct task_struct *p, struct rq *rq)
rq->ed_task = NULL;
}
-inline void set_task_last_wake(struct task_struct *p, u64 wallclock)
-{
- p->last_wake_ts = wallclock;
-}
-
inline void set_task_last_switch_out(struct task_struct *p, u64 wallclock)
{
p->last_switch_out_ts = wallclock;
@@ -961,6 +956,16 @@ sched_long_cpu_selection_threshold = 100 * NSEC_PER_MSEC;
unsigned int __read_mostly sysctl_sched_restrict_cluster_spill;
+/*
+ * Scheduler tries to avoid waking up idle CPUs for tasks running
+ * in short bursts. If the task average burst is less than
+ * sysctl_sched_short_burst nanoseconds and it sleeps on an average
+ * for more than sysctl_sched_short_sleep nanoseconds, then the
+ * task is eligible for packing.
+ */
+unsigned int __read_mostly sysctl_sched_short_burst;
+unsigned int __read_mostly sysctl_sched_short_sleep = 1 * NSEC_PER_MSEC;
+
static void
_update_up_down_migrate(unsigned int *up_migrate, unsigned int *down_migrate)
{
@@ -1552,6 +1557,15 @@ void init_new_task_load(struct task_struct *p, bool idle_task)
INIT_LIST_HEAD(&p->grp_list);
memset(&p->ravg, 0, sizeof(struct ravg));
p->cpu_cycles = 0;
+ p->ravg.curr_burst = 0;
+ /*
+ * Initialize the avg_burst to twice the threshold, so that
+ * a task would not be classified as short burst right away
+ * after fork. It takes at least 6 sleep-wakeup cycles for
+ * the avg_burst to go below the threshold.
+ */
+ p->ravg.avg_burst = 2 * (u64)sysctl_sched_short_burst;
+ p->ravg.avg_sleep_time = 0;
p->ravg.curr_window_cpu = kcalloc(nr_cpu_ids, sizeof(u32), GFP_KERNEL);
p->ravg.prev_window_cpu = kcalloc(nr_cpu_ids, sizeof(u32), GFP_KERNEL);
@@ -2738,12 +2752,14 @@ done:
trace_sched_update_history(rq, p, runtime, samples, event);
}
-static void add_to_task_demand(struct rq *rq, struct task_struct *p, u64 delta)
+static u64 add_to_task_demand(struct rq *rq, struct task_struct *p, u64 delta)
{
delta = scale_exec_time(delta, rq);
p->ravg.sum += delta;
if (unlikely(p->ravg.sum > sched_ravg_window))
p->ravg.sum = sched_ravg_window;
+
+ return delta;
}
/*
@@ -2796,13 +2812,14 @@ static void add_to_task_demand(struct rq *rq, struct task_struct *p, u64 delta)
* IMPORTANT : Leave p->ravg.mark_start unchanged, as update_cpu_busy_time()
* depends on it!
*/
-static void update_task_demand(struct task_struct *p, struct rq *rq,
+static u64 update_task_demand(struct task_struct *p, struct rq *rq,
int event, u64 wallclock)
{
u64 mark_start = p->ravg.mark_start;
u64 delta, window_start = rq->window_start;
int new_window, nr_full_windows;
u32 window_size = sched_ravg_window;
+ u64 runtime;
new_window = mark_start < window_start;
if (!account_busy_for_task_demand(p, event)) {
@@ -2816,7 +2833,7 @@ static void update_task_demand(struct task_struct *p, struct rq *rq,
* it is not necessary to account those.
*/
update_history(rq, p, p->ravg.sum, 1, event);
- return;
+ return 0;
}
if (!new_window) {
@@ -2824,8 +2841,7 @@ static void update_task_demand(struct task_struct *p, struct rq *rq,
* The simple case - busy time contained within the existing
* window.
*/
- add_to_task_demand(rq, p, wallclock - mark_start);
- return;
+ return add_to_task_demand(rq, p, wallclock - mark_start);
}
/*
@@ -2837,13 +2853,16 @@ static void update_task_demand(struct task_struct *p, struct rq *rq,
window_start -= (u64)nr_full_windows * (u64)window_size;
/* Process (window_start - mark_start) first */
- add_to_task_demand(rq, p, window_start - mark_start);
+ runtime = add_to_task_demand(rq, p, window_start - mark_start);
/* Push new sample(s) into task's demand history */
update_history(rq, p, p->ravg.sum, 1, event);
- if (nr_full_windows)
- update_history(rq, p, scale_exec_time(window_size, rq),
- nr_full_windows, event);
+ if (nr_full_windows) {
+ u64 scaled_window = scale_exec_time(window_size, rq);
+
+ update_history(rq, p, scaled_window, nr_full_windows, event);
+ runtime += nr_full_windows * scaled_window;
+ }
/*
* Roll window_start back to current to process any remainder
@@ -2853,13 +2872,31 @@ static void update_task_demand(struct task_struct *p, struct rq *rq,
/* Process (wallclock - window_start) next */
mark_start = window_start;
- add_to_task_demand(rq, p, wallclock - mark_start);
+ runtime += add_to_task_demand(rq, p, wallclock - mark_start);
+
+ return runtime;
+}
+
+static inline void
+update_task_burst(struct task_struct *p, struct rq *rq, int event, int runtime)
+{
+ /*
+ * update_task_demand() has checks for idle task and
+ * exit task. The runtime may include the wait time,
+ * so update the burst only for the cases where the
+ * task is running.
+ */
+ if (event == PUT_PREV_TASK || (event == TASK_UPDATE &&
+ rq->curr == p))
+ p->ravg.curr_burst += runtime;
}
/* Reflect task activity on its demand and cpu's busy time statistics */
void update_task_ravg(struct task_struct *p, struct rq *rq, int event,
u64 wallclock, u64 irqtime)
{
+ u64 runtime;
+
if (!rq->window_start || sched_disable_window_stats ||
p->ravg.mark_start == wallclock)
return;
@@ -2874,7 +2911,9 @@ void update_task_ravg(struct task_struct *p, struct rq *rq, int event,
}
update_task_rq_cpu_cycles(p, rq, event, wallclock, irqtime);
- update_task_demand(p, rq, event, wallclock);
+ runtime = update_task_demand(p, rq, event, wallclock);
+ if (runtime)
+ update_task_burst(p, rq, event, runtime);
update_cpu_busy_time(p, rq, event, wallclock, irqtime);
update_task_pred_demand(rq, p, event);
done:
@@ -2960,6 +2999,8 @@ void reset_task_stats(struct task_struct *p)
p->ravg.curr_window_cpu = curr_window_ptr;
p->ravg.prev_window_cpu = prev_window_ptr;
+ p->ravg.avg_burst = 2 * (u64)sysctl_sched_short_burst;
+
/* Retain EXITING_TASK marker */
p->ravg.sum_history[0] = sum;
}
@@ -4462,6 +4503,20 @@ bool early_detection_notify(struct rq *rq, u64 wallclock)
return 0;
}
+void update_avg_burst(struct task_struct *p)
+{
+ update_avg(&p->ravg.avg_burst, p->ravg.curr_burst);
+ p->ravg.curr_burst = 0;
+}
+
+void note_task_waking(struct task_struct *p, u64 wallclock)
+{
+ u64 sleep_time = wallclock - p->last_switch_out_ts;
+
+ p->last_wake_ts = wallclock;
+ update_avg(&p->ravg.avg_sleep_time, sleep_time);
+}
+
#ifdef CONFIG_CGROUP_SCHED
u64 cpu_upmigrate_discourage_read_u64(struct cgroup_subsys_state *css,
struct cftype *cft)
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
index 2489140a7c51..917c94abf5bb 100644
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -19,9 +19,10 @@
* sched_idle_set_state - Record idle state for the current CPU.
* @idle_state: State to record.
*/
-void sched_idle_set_state(struct cpuidle_state *idle_state)
+void sched_idle_set_state(struct cpuidle_state *idle_state, int index)
{
idle_set_state(this_rq(), idle_state);
+ idle_set_state_idx(this_rq(), index);
}
static int __read_mostly cpu_idle_force_poll;
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 52edd6b158ed..3fe00d6fa335 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -1536,6 +1536,41 @@ static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flag
#endif
}
+#if defined(CONFIG_SMP) && defined(CONFIG_CPU_FREQ_GOV_SCHED)
+static void sched_rt_update_capacity_req(struct rq *rq)
+{
+ u64 total, used, age_stamp, avg;
+ s64 delta;
+
+ if (!sched_freq())
+ return;
+
+ sched_avg_update(rq);
+ /*
+ * Since we're reading these variables without serialization make sure
+ * we read them once before doing sanity checks on them.
+ */
+ age_stamp = READ_ONCE(rq->age_stamp);
+ avg = READ_ONCE(rq->rt_avg);
+ delta = rq_clock(rq) - age_stamp;
+
+ if (unlikely(delta < 0))
+ delta = 0;
+
+ total = sched_avg_period() + delta;
+
+ used = div_u64(avg, total);
+ if (unlikely(used > SCHED_CAPACITY_SCALE))
+ used = SCHED_CAPACITY_SCALE;
+
+ set_rt_cpu_capacity(rq->cpu, 1, (unsigned long)(used));
+}
+#else
+static inline void sched_rt_update_capacity_req(struct rq *rq)
+{ }
+
+#endif
+
static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
struct rt_rq *rt_rq)
{
@@ -1604,8 +1639,17 @@ pick_next_task_rt(struct rq *rq, struct task_struct *prev)
if (prev->sched_class == &rt_sched_class)
update_curr_rt(rq);
- if (!rt_rq->rt_queued)
+ if (!rt_rq->rt_queued) {
+ /*
+ * The next task to be picked on this rq will have a lower
+ * priority than rt tasks so we can spend some time to update
+ * the capacity used by rt tasks based on the last activity.
+ * This value will be the used as an estimation of the next
+ * activity.
+ */
+ sched_rt_update_capacity_req(rq);
return NULL;
+ }
put_prev_task(rq, prev);
@@ -1679,6 +1723,7 @@ static int find_lowest_rq_hmp(struct task_struct *task)
int i;
int restrict_cluster;
int boost_on_big;
+ int pack_task, wakeup_latency, least_wakeup_latency = INT_MAX;
boost_on_big = sched_boost() == FULL_THROTTLE_BOOST &&
sched_boost_policy() == SCHED_BOOST_ON_BIG;
@@ -1695,6 +1740,8 @@ static int find_lowest_rq_hmp(struct task_struct *task)
if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
return best_cpu; /* No targets found */
+ pack_task = is_short_burst_task(task);
+
/*
* At this point we have built a mask of cpus representing the
* lowest priority tasks in the system. Now we want to elect
@@ -1720,6 +1767,20 @@ static int find_lowest_rq_hmp(struct task_struct *task)
if (!restrict_cluster)
cpu_load = scale_load_to_cpu(cpu_load, i);
+ if (pack_task) {
+ wakeup_latency = cpu_rq(i)->wakeup_latency;
+
+ if (wakeup_latency > least_wakeup_latency)
+ continue;
+
+ if (wakeup_latency < least_wakeup_latency) {
+ least_wakeup_latency = wakeup_latency;
+ min_load = cpu_load;
+ best_cpu = i;
+ continue;
+ }
+ }
+
if (cpu_load < min_load ||
(cpu_load == min_load &&
(i == prev_cpu || (best_cpu != prev_cpu &&
@@ -1728,6 +1789,7 @@ static int find_lowest_rq_hmp(struct task_struct *task)
best_cpu = i;
}
}
+
if (restrict_cluster && best_cpu != -1)
break;
}
@@ -2409,6 +2471,9 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
update_curr_rt(rq);
+ if (rq->rt.rt_nr_running)
+ sched_rt_update_capacity_req(rq);
+
watchdog(rq, p);
/*
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index f569c6fe3cbb..afccfd0878b1 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -596,10 +596,18 @@ struct dl_rq {
#else
struct dl_bw dl_bw;
#endif
+ /* This is the "average utilization" for this runqueue */
+ s64 avg_bw;
};
#ifdef CONFIG_SMP
+struct max_cpu_capacity {
+ raw_spinlock_t lock;
+ unsigned long val;
+ int cpu;
+};
+
/*
* We add the notion of a root-domain which will be used to define per-domain
* variables. Each exclusive cpuset essentially defines an island domain by
@@ -618,6 +626,9 @@ struct root_domain {
/* Indicate more than one runnable task for any CPU */
bool overload;
+ /* Indicate one or more cpus over-utilized (tipping point) */
+ bool overutilized;
+
/*
* The bit corresponding to a CPU gets set here if such CPU has more
* than one runnable -deadline task (as it is below for RT tasks).
@@ -633,6 +644,9 @@ struct root_domain {
*/
cpumask_var_t rto_mask;
struct cpupri cpupri;
+
+ /* Maximum cpu capacity in the system. */
+ struct max_cpu_capacity max_cpu_capacity;
};
extern struct root_domain def_root_domain;
@@ -662,6 +676,7 @@ struct rq {
#define CPU_LOAD_IDX_MAX 5
unsigned long cpu_load[CPU_LOAD_IDX_MAX];
unsigned long last_load_update_tick;
+ unsigned int misfit_task;
#ifdef CONFIG_NO_HZ_COMMON
u64 nohz_stamp;
unsigned long nohz_flags;
@@ -669,6 +684,14 @@ struct rq {
#ifdef CONFIG_NO_HZ_FULL
unsigned long last_sched_tick;
#endif
+
+#ifdef CONFIG_CPU_QUIET
+ /* time-based average load */
+ u64 nr_last_stamp;
+ u64 nr_running_integral;
+ seqcount_t ave_seqcnt;
+#endif
+
/* capture load from *all* tasks on this cpu: */
struct load_weight load;
unsigned long nr_load_updates;
@@ -808,6 +831,7 @@ struct rq {
#ifdef CONFIG_CPU_IDLE
/* Must be inspected within a rcu lock section */
struct cpuidle_state *idle_state;
+ int idle_state_idx;
#endif
};
@@ -957,6 +981,8 @@ DECLARE_PER_CPU(int, sd_llc_id);
DECLARE_PER_CPU(struct sched_domain *, sd_numa);
DECLARE_PER_CPU(struct sched_domain *, sd_busy);
DECLARE_PER_CPU(struct sched_domain *, sd_asym);
+DECLARE_PER_CPU(struct sched_domain *, sd_ea);
+DECLARE_PER_CPU(struct sched_domain *, sd_scs);
struct sched_group_capacity {
atomic_t ref;
@@ -964,7 +990,8 @@ struct sched_group_capacity {
* CPU capacity of this group, SCHED_LOAD_SCALE being max capacity
* for a single CPU.
*/
- unsigned int capacity;
+ unsigned long capacity;
+ unsigned long max_capacity; /* Max per-cpu capacity in group */
unsigned long next_update;
int imbalance; /* XXX unrelated to capacity but shared group state */
/*
@@ -981,6 +1008,7 @@ struct sched_group {
unsigned int group_weight;
struct sched_group_capacity *sgc;
+ const struct sched_group_energy const *sge;
/*
* The CPUs this group covers.
@@ -1092,7 +1120,7 @@ extern void mark_task_starting(struct task_struct *p);
extern void set_window_start(struct rq *rq);
extern void migrate_sync_cpu(int cpu, int new_cpu);
extern void update_cluster_topology(void);
-extern void set_task_last_wake(struct task_struct *p, u64 wallclock);
+extern void note_task_waking(struct task_struct *p, u64 wallclock);
extern void set_task_last_switch_out(struct task_struct *p, u64 wallclock);
extern void init_clusters(void);
extern int __init set_sched_enable_hmp(char *str);
@@ -1109,6 +1137,8 @@ extern int update_preferred_cluster(struct related_thread_group *grp,
extern void set_preferred_cluster(struct related_thread_group *grp);
extern void add_new_task_to_grp(struct task_struct *new);
extern unsigned int update_freq_aggregate_threshold(unsigned int threshold);
+extern void update_avg_burst(struct task_struct *p);
+extern void update_avg(u64 *avg, u64 sample);
enum sched_boost_policy {
SCHED_BOOST_NONE,
@@ -1186,6 +1216,11 @@ static inline int cpu_max_power_cost(int cpu)
return cpu_rq(cpu)->cluster->max_power_cost;
}
+static inline int cpu_min_power_cost(int cpu)
+{
+ return cpu_rq(cpu)->cluster->min_power_cost;
+}
+
static inline u32 cpu_cycles_to_freq(u64 cycles, u32 period)
{
return div64_u64(cycles, period);
@@ -1383,6 +1418,12 @@ static inline u64 cpu_cravg_sync(int cpu, int sync)
return load;
}
+static inline bool is_short_burst_task(struct task_struct *p)
+{
+ return p->ravg.avg_burst < sysctl_sched_short_burst &&
+ p->ravg.avg_sleep_time > sysctl_sched_short_sleep;
+}
+
extern void check_for_migration(struct rq *rq, struct task_struct *p);
extern void pre_big_task_count_change(const struct cpumask *cpus);
extern void post_big_task_count_change(const struct cpumask *cpus);
@@ -1478,7 +1519,7 @@ static inline void set_window_start(struct rq *rq) { }
static inline void migrate_sync_cpu(int cpu, int new_cpu) {}
static inline void init_clusters(void) {}
static inline void update_cluster_topology(void) { }
-static inline void set_task_last_wake(struct task_struct *p, u64 wallclock) { }
+static inline void note_task_waking(struct task_struct *p, u64 wallclock) { }
static inline void set_task_last_switch_out(struct task_struct *p,
u64 wallclock) { }
@@ -1647,6 +1688,8 @@ static inline int alloc_related_thread_groups(void) { return 0; }
#define trace_sched_cpu_load_cgroup(...)
#define trace_sched_cpu_load_wakeup(...)
+static inline void update_avg_burst(struct task_struct *p) {}
+
#endif /* CONFIG_SCHED_HMP */
/*
@@ -1913,6 +1956,7 @@ static const u32 prio_to_wmult[40] = {
#endif
#define ENQUEUE_REPLENISH 0x08
#define ENQUEUE_RESTORE 0x10
+#define ENQUEUE_WAKEUP_NEW 0x20
#define DEQUEUE_SLEEP 0x01
#define DEQUEUE_SAVE 0x02
@@ -2004,6 +2048,7 @@ extern const struct sched_class idle_sched_class;
#ifdef CONFIG_SMP
+extern void init_max_cpu_capacity(struct max_cpu_capacity *mcc);
extern void update_group_capacity(struct sched_domain *sd, int cpu);
extern void trigger_load_balance(struct rq *rq);
@@ -2033,6 +2078,17 @@ static inline struct cpuidle_state *idle_get_state(struct rq *rq)
WARN_ON(!rcu_read_lock_held());
return rq->idle_state;
}
+
+static inline void idle_set_state_idx(struct rq *rq, int idle_state_idx)
+{
+ rq->idle_state_idx = idle_state_idx;
+}
+
+static inline int idle_get_state_idx(struct rq *rq)
+{
+ WARN_ON(!rcu_read_lock_held());
+ return rq->idle_state_idx;
+}
#else
static inline void idle_set_state(struct rq *rq,
struct cpuidle_state *idle_state)
@@ -2043,6 +2099,15 @@ static inline struct cpuidle_state *idle_get_state(struct rq *rq)
{
return NULL;
}
+
+static inline void idle_set_state_idx(struct rq *rq, int idle_state_idx)
+{
+}
+
+static inline int idle_get_state_idx(struct rq *rq)
+{
+ return -1;
+}
#endif
#ifdef CONFIG_SYSRQ_SCHED_DEBUG
@@ -2069,7 +2134,7 @@ unsigned long to_ratio(u64 period, u64 runtime);
extern void init_entity_runnable_average(struct sched_entity *se);
-static inline void add_nr_running(struct rq *rq, unsigned count)
+static inline void __add_nr_running(struct rq *rq, unsigned count)
{
unsigned prev_nr = rq->nr_running;
@@ -2098,12 +2163,49 @@ static inline void add_nr_running(struct rq *rq, unsigned count)
}
}
-static inline void sub_nr_running(struct rq *rq, unsigned count)
+static inline void __sub_nr_running(struct rq *rq, unsigned count)
{
sched_update_nr_prod(cpu_of(rq), count, false);
rq->nr_running -= count;
}
+#ifdef CONFIG_CPU_QUIET
+#define NR_AVE_SCALE(x) ((x) << FSHIFT)
+static inline u64 do_nr_running_integral(struct rq *rq)
+{
+ s64 nr, deltax;
+ u64 nr_running_integral = rq->nr_running_integral;
+
+ deltax = rq->clock_task - rq->nr_last_stamp;
+ nr = NR_AVE_SCALE(rq->nr_running);
+
+ nr_running_integral += nr * deltax;
+
+ return nr_running_integral;
+}
+
+static inline void add_nr_running(struct rq *rq, unsigned count)
+{
+ write_seqcount_begin(&rq->ave_seqcnt);
+ rq->nr_running_integral = do_nr_running_integral(rq);
+ rq->nr_last_stamp = rq->clock_task;
+ __add_nr_running(rq, count);
+ write_seqcount_end(&rq->ave_seqcnt);
+}
+
+static inline void sub_nr_running(struct rq *rq, unsigned count)
+{
+ write_seqcount_begin(&rq->ave_seqcnt);
+ rq->nr_running_integral = do_nr_running_integral(rq);
+ rq->nr_last_stamp = rq->clock_task;
+ __sub_nr_running(rq, count);
+ write_seqcount_end(&rq->ave_seqcnt);
+}
+#else
+#define add_nr_running __add_nr_running
+#define sub_nr_running __sub_nr_running
+#endif
+
static inline void rq_last_tick_reset(struct rq *rq)
{
#ifdef CONFIG_NO_HZ_FULL
@@ -2176,10 +2278,137 @@ unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu)
}
#endif
+#ifdef CONFIG_SMP
+static inline unsigned long capacity_of(int cpu)
+{
+ return cpu_rq(cpu)->cpu_capacity;
+}
+
+static inline unsigned long capacity_orig_of(int cpu)
+{
+ return cpu_rq(cpu)->cpu_capacity_orig;
+}
+
+extern unsigned int sysctl_sched_use_walt_cpu_util;
+extern unsigned int walt_ravg_window;
+extern unsigned int walt_disabled;
+
+/*
+ * cpu_util returns the amount of capacity of a CPU that is used by CFS
+ * tasks. The unit of the return value must be the one of capacity so we can
+ * compare the utilization with the capacity of the CPU that is available for
+ * CFS task (ie cpu_capacity).
+ *
+ * cfs_rq.avg.util_avg is the sum of running time of runnable tasks plus the
+ * recent utilization of currently non-runnable tasks on a CPU. It represents
+ * the amount of utilization of a CPU in the range [0..capacity_orig] where
+ * capacity_orig is the cpu_capacity available at the highest frequency
+ * (arch_scale_freq_capacity()).
+ * The utilization of a CPU converges towards a sum equal to or less than the
+ * current capacity (capacity_curr <= capacity_orig) of the CPU because it is
+ * the running time on this CPU scaled by capacity_curr.
+ *
+ * Nevertheless, cfs_rq.avg.util_avg can be higher than capacity_curr or even
+ * higher than capacity_orig because of unfortunate rounding in
+ * cfs.avg.util_avg or just after migrating tasks and new task wakeups until
+ * the average stabilizes with the new running time. We need to check that the
+ * utilization stays within the range of [0..capacity_orig] and cap it if
+ * necessary. Without utilization capping, a group could be seen as overloaded
+ * (CPU0 utilization at 121% + CPU1 utilization at 80%) whereas CPU1 has 20% of
+ * available capacity. We allow utilization to overshoot capacity_curr (but not
+ * capacity_orig) as it useful for predicting the capacity required after task
+ * migrations (scheduler-driven DVFS).
+ */
+static inline unsigned long __cpu_util(int cpu, int delta)
+{
+ unsigned long util = cpu_rq(cpu)->cfs.avg.util_avg;
+ unsigned long capacity = capacity_orig_of(cpu);
+
+ delta += util;
+ if (delta < 0)
+ return 0;
+
+ return (delta >= capacity) ? capacity : delta;
+}
+
+static inline unsigned long cpu_util(int cpu)
+{
+ return __cpu_util(cpu, 0);
+}
+
+#endif
+
+#ifdef CONFIG_CPU_FREQ_GOV_SCHED
+#define capacity_max SCHED_CAPACITY_SCALE
+extern unsigned int capacity_margin;
+extern struct static_key __sched_freq;
+
+static inline bool sched_freq(void)
+{
+ return static_key_false(&__sched_freq);
+}
+
+DECLARE_PER_CPU(struct sched_capacity_reqs, cpu_sched_capacity_reqs);
+void update_cpu_capacity_request(int cpu, bool request);
+
+static inline void set_cfs_cpu_capacity(int cpu, bool request,
+ unsigned long capacity)
+{
+ struct sched_capacity_reqs *scr = &per_cpu(cpu_sched_capacity_reqs, cpu);
+
+ if (scr->cfs != capacity) {
+ scr->cfs = capacity;
+ update_cpu_capacity_request(cpu, request);
+ }
+}
+
+static inline void set_rt_cpu_capacity(int cpu, bool request,
+ unsigned long capacity)
+{
+ if (per_cpu(cpu_sched_capacity_reqs, cpu).rt != capacity) {
+ per_cpu(cpu_sched_capacity_reqs, cpu).rt = capacity;
+ update_cpu_capacity_request(cpu, request);
+ }
+}
+
+static inline void set_dl_cpu_capacity(int cpu, bool request,
+ unsigned long capacity)
+{
+ if (per_cpu(cpu_sched_capacity_reqs, cpu).dl != capacity) {
+ per_cpu(cpu_sched_capacity_reqs, cpu).dl = capacity;
+ update_cpu_capacity_request(cpu, request);
+ }
+}
+#else
+#define sched_freq() false
+static inline void set_cfs_cpu_capacity(int cpu, bool request,
+ unsigned long capacity)
+{ }
+static inline void set_rt_cpu_capacity(int cpu, bool request,
+ unsigned long capacity)
+{ }
+static inline void set_dl_cpu_capacity(int cpu, bool request,
+ unsigned long capacity)
+{ }
+#endif
+
+#ifdef CONFIG_SCHED_HMP
+/*
+ * HMP and EAS are orthogonal. Hopefully the compiler just elides out all code
+ * with the energy_aware() check, so that we don't even pay the comparison
+ * penalty at runtime.
+ */
+#define energy_aware() false
+#else
+static inline bool energy_aware(void)
+{
+ return sched_feat(ENERGY_AWARE);
+}
+#endif
+
static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
{
rq->rt_avg += rt_delta * arch_scale_freq_capacity(NULL, cpu_of(rq));
- sched_avg_update(rq);
}
#else
static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta) { }
@@ -2268,6 +2497,9 @@ task_rq_unlock(struct rq *rq, struct task_struct *p, unsigned long *flags)
raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
}
+extern struct rq *lock_rq_of(struct task_struct *p, unsigned long *flags);
+extern void unlock_rq_of(struct rq *rq, struct task_struct *p, unsigned long *flags);
+
#ifdef CONFIG_SMP
#ifdef CONFIG_PREEMPT
@@ -2340,7 +2572,8 @@ static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest)
static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
__releases(busiest->lock)
{
- raw_spin_unlock(&busiest->lock);
+ if (this_rq != busiest)
+ raw_spin_unlock(&busiest->lock);
lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
}
@@ -2534,16 +2767,3 @@ static inline u64 irq_time_read(int cpu)
}
#endif /* CONFIG_64BIT */
#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
-
-static inline void account_reset_rq(struct rq *rq)
-{
-#ifdef CONFIG_IRQ_TIME_ACCOUNTING
- rq->prev_irq_time = 0;
-#endif
-#ifdef CONFIG_PARAVIRT
- rq->prev_steal_time = 0;
-#endif
-#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
- rq->prev_steal_time_rq = 0;
-#endif
-}
diff --git a/kernel/sched/tune.c b/kernel/sched/tune.c
index ee2af8e0b5ce..3c2e21c5a5a0 100644
--- a/kernel/sched/tune.c
+++ b/kernel/sched/tune.c
@@ -1,13 +1,108 @@
#include <linux/cgroup.h>
#include <linux/err.h>
+#include <linux/kernel.h>
#include <linux/percpu.h>
#include <linux/printk.h>
+#include <linux/rcupdate.h>
#include <linux/slab.h>
+#include <trace/events/sched.h>
+
#include "sched.h"
+#include "tune.h"
+
+#ifdef CONFIG_CGROUP_SCHEDTUNE
+static bool schedtune_initialized = false;
+#endif
unsigned int sysctl_sched_cfs_boost __read_mostly;
+extern struct target_nrg schedtune_target_nrg;
+
+/* Performance Boost region (B) threshold params */
+static int perf_boost_idx;
+
+/* Performance Constraint region (C) threshold params */
+static int perf_constrain_idx;
+
+/**
+ * Performance-Energy (P-E) Space thresholds constants
+ */
+struct threshold_params {
+ int nrg_gain;
+ int cap_gain;
+};
+
+/*
+ * System specific P-E space thresholds constants
+ */
+static struct threshold_params
+threshold_gains[] = {
+ { 0, 5 }, /* < 10% */
+ { 1, 5 }, /* < 20% */
+ { 2, 5 }, /* < 30% */
+ { 3, 5 }, /* < 40% */
+ { 4, 5 }, /* < 50% */
+ { 5, 4 }, /* < 60% */
+ { 5, 3 }, /* < 70% */
+ { 5, 2 }, /* < 80% */
+ { 5, 1 }, /* < 90% */
+ { 5, 0 } /* <= 100% */
+};
+
+static int
+__schedtune_accept_deltas(int nrg_delta, int cap_delta,
+ int perf_boost_idx, int perf_constrain_idx)
+{
+ int payoff = -INT_MAX;
+ int gain_idx = -1;
+
+ /* Performance Boost (B) region */
+ if (nrg_delta >= 0 && cap_delta > 0)
+ gain_idx = perf_boost_idx;
+ /* Performance Constraint (C) region */
+ else if (nrg_delta < 0 && cap_delta <= 0)
+ gain_idx = perf_constrain_idx;
+
+ /* Default: reject schedule candidate */
+ if (gain_idx == -1)
+ return payoff;
+
+ /*
+ * Evaluate "Performance Boost" vs "Energy Increase"
+ *
+ * - Performance Boost (B) region
+ *
+ * Condition: nrg_delta > 0 && cap_delta > 0
+ * Payoff criteria:
+ * cap_gain / nrg_gain < cap_delta / nrg_delta =
+ * cap_gain * nrg_delta < cap_delta * nrg_gain
+ * Note that since both nrg_gain and nrg_delta are positive, the
+ * inequality does not change. Thus:
+ *
+ * payoff = (cap_delta * nrg_gain) - (cap_gain * nrg_delta)
+ *
+ * - Performance Constraint (C) region
+ *
+ * Condition: nrg_delta < 0 && cap_delta < 0
+ * payoff criteria:
+ * cap_gain / nrg_gain > cap_delta / nrg_delta =
+ * cap_gain * nrg_delta < cap_delta * nrg_gain
+ * Note that since nrg_gain > 0 while nrg_delta < 0, the
+ * inequality change. Thus:
+ *
+ * payoff = (cap_delta * nrg_gain) - (cap_gain * nrg_delta)
+ *
+ * This means that, in case of same positive defined {cap,nrg}_gain
+ * for both the B and C regions, we can use the same payoff formula
+ * where a positive value represents the accept condition.
+ */
+ payoff = cap_delta * threshold_gains[gain_idx].nrg_gain;
+ payoff -= nrg_delta * threshold_gains[gain_idx].cap_gain;
+
+ return payoff;
+}
+
#ifdef CONFIG_CGROUP_SCHEDTUNE
/*
@@ -52,6 +147,15 @@ struct schedtune {
bool colocate_update_disabled;
#endif
+ /* Performance Boost (B) region threshold params */
+ int perf_boost_idx;
+
+ /* Performance Constraint (C) region threshold params */
+ int perf_constrain_idx;
+
+ /* Hint to bias scheduling of tasks on that SchedTune CGroup
+ * towards idle CPUs */
+ int prefer_idle;
};
static inline struct schedtune *css_st(struct cgroup_subsys_state *css)
@@ -88,8 +192,42 @@ root_schedtune = {
.colocate = false,
.colocate_update_disabled = false,
#endif
+ .perf_boost_idx = 0,
+ .perf_constrain_idx = 0,
+ .prefer_idle = 0,
};
+int
+schedtune_accept_deltas(int nrg_delta, int cap_delta,
+ struct task_struct *task)
+{
+ struct schedtune *ct;
+ int perf_boost_idx;
+ int perf_constrain_idx;
+
+ /* Optimal (O) region */
+ if (nrg_delta < 0 && cap_delta > 0) {
+ trace_sched_tune_filter(nrg_delta, cap_delta, 0, 0, 1, 0);
+ return INT_MAX;
+ }
+
+ /* Suboptimal (S) region */
+ if (nrg_delta > 0 && cap_delta < 0) {
+ trace_sched_tune_filter(nrg_delta, cap_delta, 0, 0, -1, 5);
+ return -INT_MAX;
+ }
+
+ /* Get task specific perf Boost/Constraints indexes */
+ rcu_read_lock();
+ ct = task_schedtune(task);
+ perf_boost_idx = ct->perf_boost_idx;
+ perf_constrain_idx = ct->perf_constrain_idx;
+ rcu_read_unlock();
+
+ return __schedtune_accept_deltas(nrg_delta, cap_delta,
+ perf_boost_idx, perf_constrain_idx);
+}
+
/*
* Maximum number of boost groups to support
* When per-task boosting is used we still allow only limited number of
@@ -119,13 +257,16 @@ static struct schedtune *allocated_group[BOOSTGROUPS_COUNT] = {
*/
struct boost_groups {
/* Maximum boost value for all RUNNABLE tasks on a CPU */
- unsigned boost_max;
+ bool idle;
+ int boost_max;
struct {
/* The boost for tasks on that boost group */
- unsigned boost;
+ int boost;
/* Count of RUNNABLE tasks on that boost group */
unsigned tasks;
} group[BOOSTGROUPS_COUNT];
+ /* CPU's boost group locking */
+ raw_spinlock_t lock;
};
/* Boost groups affecting each CPU in the system */
@@ -246,7 +387,342 @@ static inline void init_sched_boost(struct schedtune *st) { }
#endif /* CONFIG_SCHED_HMP */
+static void
+schedtune_cpu_update(int cpu)
+{
+ struct boost_groups *bg;
+ int boost_max;
+ int idx;
+
+ bg = &per_cpu(cpu_boost_groups, cpu);
+
+ /* The root boost group is always active */
+ boost_max = bg->group[0].boost;
+ for (idx = 1; idx < BOOSTGROUPS_COUNT; ++idx) {
+ /*
+ * A boost group affects a CPU only if it has
+ * RUNNABLE tasks on that CPU
+ */
+ if (bg->group[idx].tasks == 0)
+ continue;
+
+ boost_max = max(boost_max, bg->group[idx].boost);
+ }
+ /* Ensures boost_max is non-negative when all cgroup boost values
+ * are neagtive. Avoids under-accounting of cpu capacity which may cause
+ * task stacking and frequency spikes.*/
+ boost_max = max(boost_max, 0);
+ bg->boost_max = boost_max;
+}
+
+static int
+schedtune_boostgroup_update(int idx, int boost)
+{
+ struct boost_groups *bg;
+ int cur_boost_max;
+ int old_boost;
+ int cpu;
+
+ /* Update per CPU boost groups */
+ for_each_possible_cpu(cpu) {
+ bg = &per_cpu(cpu_boost_groups, cpu);
+
+ /*
+ * Keep track of current boost values to compute the per CPU
+ * maximum only when it has been affected by the new value of
+ * the updated boost group
+ */
+ cur_boost_max = bg->boost_max;
+ old_boost = bg->group[idx].boost;
+
+ /* Update the boost value of this boost group */
+ bg->group[idx].boost = boost;
+
+ /* Check if this update increase current max */
+ if (boost > cur_boost_max && bg->group[idx].tasks) {
+ bg->boost_max = boost;
+ trace_sched_tune_boostgroup_update(cpu, 1, bg->boost_max);
+ continue;
+ }
+
+ /* Check if this update has decreased current max */
+ if (cur_boost_max == old_boost && old_boost > boost) {
+ schedtune_cpu_update(cpu);
+ trace_sched_tune_boostgroup_update(cpu, -1, bg->boost_max);
+ continue;
+ }
+
+ trace_sched_tune_boostgroup_update(cpu, 0, bg->boost_max);
+ }
+
+ return 0;
+}
+
+#define ENQUEUE_TASK 1
+#define DEQUEUE_TASK -1
+
+static inline void
+schedtune_tasks_update(struct task_struct *p, int cpu, int idx, int task_count)
+{
+ struct boost_groups *bg = &per_cpu(cpu_boost_groups, cpu);
+ int tasks = bg->group[idx].tasks + task_count;
+
+ /* Update boosted tasks count while avoiding to make it negative */
+ bg->group[idx].tasks = max(0, tasks);
+
+ trace_sched_tune_tasks_update(p, cpu, tasks, idx,
+ bg->group[idx].boost, bg->boost_max);
+
+ /* Boost group activation or deactivation on that RQ */
+ if (tasks == 1 || tasks == 0)
+ schedtune_cpu_update(cpu);
+}
+
+/*
+ * NOTE: This function must be called while holding the lock on the CPU RQ
+ */
+void schedtune_enqueue_task(struct task_struct *p, int cpu)
+{
+ struct boost_groups *bg = &per_cpu(cpu_boost_groups, cpu);
+ unsigned long irq_flags;
+ struct schedtune *st;
+ int idx;
+
+ if (!unlikely(schedtune_initialized))
+ return;
+
+ /*
+ * When a task is marked PF_EXITING by do_exit() it's going to be
+ * dequeued and enqueued multiple times in the exit path.
+ * Thus we avoid any further update, since we do not want to change
+ * CPU boosting while the task is exiting.
+ */
+ if (p->flags & PF_EXITING)
+ return;
+
+ /*
+ * Boost group accouting is protected by a per-cpu lock and requires
+ * interrupt to be disabled to avoid race conditions for example on
+ * do_exit()::cgroup_exit() and task migration.
+ */
+ raw_spin_lock_irqsave(&bg->lock, irq_flags);
+ rcu_read_lock();
+
+ st = task_schedtune(p);
+ idx = st->idx;
+
+ schedtune_tasks_update(p, cpu, idx, ENQUEUE_TASK);
+
+ rcu_read_unlock();
+ raw_spin_unlock_irqrestore(&bg->lock, irq_flags);
+}
+
+int schedtune_allow_attach(struct cgroup_taskset *tset)
+{
+ /* We always allows tasks to be moved between existing CGroups */
+ return 0;
+}
+
+int schedtune_can_attach(struct cgroup_taskset *tset)
+{
+ struct task_struct *task;
+ struct cgroup_subsys_state *css;
+ struct boost_groups *bg;
+ unsigned long irq_flags;
+ unsigned int cpu;
+ struct rq *rq;
+ int src_bg; /* Source boost group index */
+ int dst_bg; /* Destination boost group index */
+ int tasks;
+
+ if (!unlikely(schedtune_initialized))
+ return 0;
+
+
+ cgroup_taskset_for_each(task, css, tset) {
+
+ /*
+ * Lock the CPU's RQ the task is enqueued to avoid race
+ * conditions with migration code while the task is being
+ * accounted
+ */
+ rq = lock_rq_of(task, &irq_flags);
+
+ if (!task->on_rq) {
+ unlock_rq_of(rq, task, &irq_flags);
+ continue;
+ }
+
+ /*
+ * Boost group accouting is protected by a per-cpu lock and requires
+ * interrupt to be disabled to avoid race conditions on...
+ */
+ cpu = cpu_of(rq);
+ bg = &per_cpu(cpu_boost_groups, cpu);
+ raw_spin_lock(&bg->lock);
+
+ dst_bg = css_st(css)->idx;
+ src_bg = task_schedtune(task)->idx;
+
+ /*
+ * Current task is not changing boostgroup, which can
+ * happen when the new hierarchy is in use.
+ */
+ if (unlikely(dst_bg == src_bg)) {
+ raw_spin_unlock(&bg->lock);
+ unlock_rq_of(rq, task, &irq_flags);
+ continue;
+ }
+
+ /*
+ * This is the case of a RUNNABLE task which is switching its
+ * current boost group.
+ */
+
+ /* Move task from src to dst boost group */
+ tasks = bg->group[src_bg].tasks - 1;
+ bg->group[src_bg].tasks = max(0, tasks);
+ bg->group[dst_bg].tasks += 1;
+
+ raw_spin_unlock(&bg->lock);
+ unlock_rq_of(rq, task, &irq_flags);
+
+ /* Update CPU boost group */
+ if (bg->group[src_bg].tasks == 0 || bg->group[dst_bg].tasks == 1)
+ schedtune_cpu_update(task_cpu(task));
+
+ }
+
+ return 0;
+}
+
+void schedtune_cancel_attach(struct cgroup_taskset *tset)
+{
+ /* This can happen only if SchedTune controller is mounted with
+ * other hierarchies ane one of them fails. Since usually SchedTune is
+ * mouted on its own hierarcy, for the time being we do not implement
+ * a proper rollback mechanism */
+ WARN(1, "SchedTune cancel attach not implemented");
+}
+
+/*
+ * NOTE: This function must be called while holding the lock on the CPU RQ
+ */
+void schedtune_dequeue_task(struct task_struct *p, int cpu)
+{
+ struct boost_groups *bg = &per_cpu(cpu_boost_groups, cpu);
+ unsigned long irq_flags;
+ struct schedtune *st;
+ int idx;
+
+ if (!unlikely(schedtune_initialized))
+ return;
+
+ /*
+ * When a task is marked PF_EXITING by do_exit() it's going to be
+ * dequeued and enqueued multiple times in the exit path.
+ * Thus we avoid any further update, since we do not want to change
+ * CPU boosting while the task is exiting.
+ * The last dequeue is already enforce by the do_exit() code path
+ * via schedtune_exit_task().
+ */
+ if (p->flags & PF_EXITING)
+ return;
+
+ /*
+ * Boost group accouting is protected by a per-cpu lock and requires
+ * interrupt to be disabled to avoid race conditions on...
+ */
+ raw_spin_lock_irqsave(&bg->lock, irq_flags);
+ rcu_read_lock();
+
+ st = task_schedtune(p);
+ idx = st->idx;
+
+ schedtune_tasks_update(p, cpu, idx, DEQUEUE_TASK);
+
+ rcu_read_unlock();
+ raw_spin_unlock_irqrestore(&bg->lock, irq_flags);
+}
+
+void schedtune_exit_task(struct task_struct *tsk)
+{
+ struct schedtune *st;
+ unsigned long irq_flags;
+ unsigned int cpu;
+ struct rq *rq;
+ int idx;
+
+ if (!unlikely(schedtune_initialized))
+ return;
+
+ rq = lock_rq_of(tsk, &irq_flags);
+ rcu_read_lock();
+
+ cpu = cpu_of(rq);
+ st = task_schedtune(tsk);
+ idx = st->idx;
+ schedtune_tasks_update(tsk, cpu, idx, DEQUEUE_TASK);
+
+ rcu_read_unlock();
+ unlock_rq_of(rq, tsk, &irq_flags);
+}
+
+int schedtune_cpu_boost(int cpu)
+{
+ struct boost_groups *bg;
+
+ bg = &per_cpu(cpu_boost_groups, cpu);
+ return bg->boost_max;
+}
+
+int schedtune_task_boost(struct task_struct *p)
+{
+ struct schedtune *st;
+ int task_boost;
+
+ /* Get task boost value */
+ rcu_read_lock();
+ st = task_schedtune(p);
+ task_boost = st->boost;
+ rcu_read_unlock();
+
+ return task_boost;
+}
+
+int schedtune_prefer_idle(struct task_struct *p)
+{
+ struct schedtune *st;
+ int prefer_idle;
+
+ /* Get prefer_idle value */
+ rcu_read_lock();
+ st = task_schedtune(p);
+ prefer_idle = st->prefer_idle;
+ rcu_read_unlock();
+
+ return prefer_idle;
+}
+
static u64
+prefer_idle_read(struct cgroup_subsys_state *css, struct cftype *cft)
+{
+ struct schedtune *st = css_st(css);
+
+ return st->prefer_idle;
+}
+
+static int
+prefer_idle_write(struct cgroup_subsys_state *css, struct cftype *cft,
+ u64 prefer_idle)
+{
+ struct schedtune *st = css_st(css);
+ st->prefer_idle = prefer_idle;
+
+ return 0;
+}
+
+static s64
boost_read(struct cgroup_subsys_state *css, struct cftype *cft)
{
struct schedtune *st = css_st(css);
@@ -256,16 +732,37 @@ boost_read(struct cgroup_subsys_state *css, struct cftype *cft)
static int
boost_write(struct cgroup_subsys_state *css, struct cftype *cft,
- u64 boost)
+ s64 boost)
{
struct schedtune *st = css_st(css);
+ unsigned threshold_idx;
+ int boost_pct;
- if (boost < 0 || boost > 100)
+ if (boost < -100 || boost > 100)
return -EINVAL;
+ boost_pct = boost;
+
+ /*
+ * Update threshold params for Performance Boost (B)
+ * and Performance Constraint (C) regions.
+ * The current implementatio uses the same cuts for both
+ * B and C regions.
+ */
+ threshold_idx = clamp(boost_pct, 0, 99) / 10;
+ st->perf_boost_idx = threshold_idx;
+ st->perf_constrain_idx = threshold_idx;
st->boost = boost;
- if (css == &root_schedtune.css)
+ if (css == &root_schedtune.css) {
sysctl_sched_cfs_boost = boost;
+ perf_boost_idx = threshold_idx;
+ perf_constrain_idx = threshold_idx;
+ }
+
+ /* Update CPU boost */
+ schedtune_boostgroup_update(st->idx, st->boost);
+
+ trace_sched_tune_config(st->boost);
return 0;
}
@@ -289,8 +786,13 @@ static void schedtune_attach(struct cgroup_taskset *tset)
static struct cftype files[] = {
{
.name = "boost",
- .read_u64 = boost_read,
- .write_u64 = boost_write,
+ .read_s64 = boost_read,
+ .write_s64 = boost_write,
+ },
+ {
+ .name = "prefer_idle",
+ .read_u64 = prefer_idle_read,
+ .write_u64 = prefer_idle_write,
},
#ifdef CONFIG_SCHED_HMP
{
@@ -315,26 +817,19 @@ static struct cftype files[] = {
static int
schedtune_boostgroup_init(struct schedtune *st)
{
- /* Keep track of allocated boost groups */
- allocated_group[st->idx] = st;
-
- return 0;
-}
-
-static int
-schedtune_init(void)
-{
struct boost_groups *bg;
int cpu;
+ /* Keep track of allocated boost groups */
+ allocated_group[st->idx] = st;
+
/* Initialize the per CPU boost groups */
for_each_possible_cpu(cpu) {
bg = &per_cpu(cpu_boost_groups, cpu);
- memset(bg, 0, sizeof(struct boost_groups));
+ bg->group[st->idx].boost = 0;
+ bg->group[st->idx].tasks = 0;
}
- pr_info(" schedtune configured to support %d boost groups\n",
- BOOSTGROUPS_COUNT);
return 0;
}
@@ -344,10 +839,8 @@ schedtune_css_alloc(struct cgroup_subsys_state *parent_css)
struct schedtune *st;
int idx;
- if (!parent_css) {
- schedtune_init();
+ if (!parent_css)
return &root_schedtune.css;
- }
/* Allow only single level hierachies */
if (parent_css != &root_schedtune.css) {
@@ -386,6 +879,9 @@ out:
static void
schedtune_boostgroup_release(struct schedtune *st)
{
+ /* Reset this boost group */
+ schedtune_boostgroup_update(st->idx, 0);
+
/* Keep track of allocated boost groups */
allocated_group[st->idx] = NULL;
}
@@ -402,12 +898,55 @@ schedtune_css_free(struct cgroup_subsys_state *css)
struct cgroup_subsys schedtune_cgrp_subsys = {
.css_alloc = schedtune_css_alloc,
.css_free = schedtune_css_free,
+ .allow_attach = schedtune_allow_attach,
+ .can_attach = schedtune_can_attach,
+ .cancel_attach = schedtune_cancel_attach,
.legacy_cftypes = files,
.early_init = 1,
.allow_attach = subsys_cgroup_allow_attach,
.attach = schedtune_attach,
};
+static inline void
+schedtune_init_cgroups(void)
+{
+ struct boost_groups *bg;
+ int cpu;
+
+ /* Initialize the per CPU boost groups */
+ for_each_possible_cpu(cpu) {
+ bg = &per_cpu(cpu_boost_groups, cpu);
+ memset(bg, 0, sizeof(struct boost_groups));
+ }
+
+ pr_info("schedtune: configured to support %d boost groups\n",
+ BOOSTGROUPS_COUNT);
+
+ schedtune_initialized = true;
+}
+
+#else /* CONFIG_CGROUP_SCHEDTUNE */
+
+int
+schedtune_accept_deltas(int nrg_delta, int cap_delta,
+ struct task_struct *task)
+{
+ /* Optimal (O) region */
+ if (nrg_delta < 0 && cap_delta > 0) {
+ trace_sched_tune_filter(nrg_delta, cap_delta, 0, 0, 1, 0);
+ return INT_MAX;
+ }
+
+ /* Suboptimal (S) region */
+ if (nrg_delta > 0 && cap_delta < 0) {
+ trace_sched_tune_filter(nrg_delta, cap_delta, 0, 0, -1, 5);
+ return -INT_MAX;
+ }
+
+ return __schedtune_accept_deltas(nrg_delta, cap_delta,
+ perf_boost_idx, perf_constrain_idx);
+}
+
#endif /* CONFIG_CGROUP_SCHEDTUNE */
int
@@ -416,10 +955,183 @@ sysctl_sched_cfs_boost_handler(struct ctl_table *table, int write,
loff_t *ppos)
{
int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+ unsigned threshold_idx;
+ int boost_pct;
if (ret || !write)
return ret;
+ if (sysctl_sched_cfs_boost < -100 || sysctl_sched_cfs_boost > 100)
+ return -EINVAL;
+ boost_pct = sysctl_sched_cfs_boost;
+
+ /*
+ * Update threshold params for Performance Boost (B)
+ * and Performance Constraint (C) regions.
+ * The current implementatio uses the same cuts for both
+ * B and C regions.
+ */
+ threshold_idx = clamp(boost_pct, 0, 99) / 10;
+ perf_boost_idx = threshold_idx;
+ perf_constrain_idx = threshold_idx;
+
return 0;
}
+#ifdef CONFIG_SCHED_DEBUG
+static void
+schedtune_test_nrg(unsigned long delta_pwr)
+{
+ unsigned long test_delta_pwr;
+ unsigned long test_norm_pwr;
+ int idx;
+
+ /*
+ * Check normalization constants using some constant system
+ * energy values
+ */
+ pr_info("schedtune: verify normalization constants...\n");
+ for (idx = 0; idx < 6; ++idx) {
+ test_delta_pwr = delta_pwr >> idx;
+
+ /* Normalize on max energy for target platform */
+ test_norm_pwr = reciprocal_divide(
+ test_delta_pwr << SCHED_LOAD_SHIFT,
+ schedtune_target_nrg.rdiv);
+
+ pr_info("schedtune: max_pwr/2^%d: %4lu => norm_pwr: %5lu\n",
+ idx, test_delta_pwr, test_norm_pwr);
+ }
+}
+#else
+#define schedtune_test_nrg(delta_pwr)
+#endif
+
+/*
+ * Compute the min/max power consumption of a cluster and all its CPUs
+ */
+static void
+schedtune_add_cluster_nrg(
+ struct sched_domain *sd,
+ struct sched_group *sg,
+ struct target_nrg *ste)
+{
+ struct sched_domain *sd2;
+ struct sched_group *sg2;
+
+ struct cpumask *cluster_cpus;
+ char str[32];
+
+ unsigned long min_pwr;
+ unsigned long max_pwr;
+ int cpu;
+
+ /* Get Cluster energy using EM data for the first CPU */
+ cluster_cpus = sched_group_cpus(sg);
+ snprintf(str, 32, "CLUSTER[%*pbl]",
+ cpumask_pr_args(cluster_cpus));
+
+ min_pwr = sg->sge->idle_states[sg->sge->nr_idle_states - 1].power;
+ max_pwr = sg->sge->cap_states[sg->sge->nr_cap_states - 1].power;
+ pr_info("schedtune: %-17s min_pwr: %5lu max_pwr: %5lu\n",
+ str, min_pwr, max_pwr);
+
+ /*
+ * Keep track of this cluster's energy in the computation of the
+ * overall system energy
+ */
+ ste->min_power += min_pwr;
+ ste->max_power += max_pwr;
+
+ /* Get CPU energy using EM data for each CPU in the group */
+ for_each_cpu(cpu, cluster_cpus) {
+ /* Get a SD view for the specific CPU */
+ for_each_domain(cpu, sd2) {
+ /* Get the CPU group */
+ sg2 = sd2->groups;
+ min_pwr = sg2->sge->idle_states[sg2->sge->nr_idle_states - 1].power;
+ max_pwr = sg2->sge->cap_states[sg2->sge->nr_cap_states - 1].power;
+
+ ste->min_power += min_pwr;
+ ste->max_power += max_pwr;
+
+ snprintf(str, 32, "CPU[%d]", cpu);
+ pr_info("schedtune: %-17s min_pwr: %5lu max_pwr: %5lu\n",
+ str, min_pwr, max_pwr);
+
+ /*
+ * Assume we have EM data only at the CPU and
+ * the upper CLUSTER level
+ */
+ BUG_ON(!cpumask_equal(
+ sched_group_cpus(sg),
+ sched_group_cpus(sd2->parent->groups)
+ ));
+ break;
+ }
+ }
+}
+
+/*
+ * Initialize the constants required to compute normalized energy.
+ * The values of these constants depends on the EM data for the specific
+ * target system and topology.
+ * Thus, this function is expected to be called by the code
+ * that bind the EM to the topology information.
+ */
+static int
+schedtune_init(void)
+{
+ struct target_nrg *ste = &schedtune_target_nrg;
+ unsigned long delta_pwr = 0;
+ struct sched_domain *sd;
+ struct sched_group *sg;
+
+ pr_info("schedtune: init normalization constants...\n");
+ ste->max_power = 0;
+ ste->min_power = 0;
+
+ rcu_read_lock();
+
+ /*
+ * When EAS is in use, we always have a pointer to the highest SD
+ * which provides EM data.
+ */
+ sd = rcu_dereference(per_cpu(sd_ea, cpumask_first(cpu_online_mask)));
+ if (!sd) {
+ if (energy_aware())
+ pr_warn("schedtune: no energy model data\n");
+ goto nodata;
+ }
+
+ sg = sd->groups;
+ do {
+ schedtune_add_cluster_nrg(sd, sg, ste);
+ } while (sg = sg->next, sg != sd->groups);
+
+ rcu_read_unlock();
+
+ pr_info("schedtune: %-17s min_pwr: %5lu max_pwr: %5lu\n",
+ "SYSTEM", ste->min_power, ste->max_power);
+
+ /* Compute normalization constants */
+ delta_pwr = ste->max_power - ste->min_power;
+ ste->rdiv = reciprocal_value(delta_pwr);
+ pr_info("schedtune: using normalization constants mul: %u sh1: %u sh2: %u\n",
+ ste->rdiv.m, ste->rdiv.sh1, ste->rdiv.sh2);
+
+ schedtune_test_nrg(delta_pwr);
+
+#ifdef CONFIG_CGROUP_SCHEDTUNE
+ schedtune_init_cgroups();
+#else
+ pr_info("schedtune: configured to support global boosting only\n");
+#endif
+
+ return 0;
+
+nodata:
+ rcu_read_unlock();
+ return -EINVAL;
+}
+postcore_initcall(schedtune_init);
diff --git a/kernel/sched/tune.h b/kernel/sched/tune.h
new file mode 100644
index 000000000000..4f6441771e4c
--- /dev/null
+++ b/kernel/sched/tune.h
@@ -0,0 +1,55 @@
+
+#ifdef CONFIG_SCHED_TUNE
+
+#include <linux/reciprocal_div.h>
+
+/*
+ * System energy normalization constants
+ */
+struct target_nrg {
+ unsigned long min_power;
+ unsigned long max_power;
+ struct reciprocal_value rdiv;
+};
+
+#ifdef CONFIG_CGROUP_SCHEDTUNE
+
+int schedtune_cpu_boost(int cpu);
+int schedtune_task_boost(struct task_struct *tsk);
+
+int schedtune_prefer_idle(struct task_struct *tsk);
+
+void schedtune_exit_task(struct task_struct *tsk);
+
+void schedtune_enqueue_task(struct task_struct *p, int cpu);
+void schedtune_dequeue_task(struct task_struct *p, int cpu);
+
+#else /* CONFIG_CGROUP_SCHEDTUNE */
+
+#define schedtune_cpu_boost(cpu) get_sysctl_sched_cfs_boost()
+#define schedtune_task_boost(tsk) get_sysctl_sched_cfs_boost()
+
+#define schedtune_exit_task(task) do { } while (0)
+
+#define schedtune_enqueue_task(task, cpu) do { } while (0)
+#define schedtune_dequeue_task(task, cpu) do { } while (0)
+
+#endif /* CONFIG_CGROUP_SCHEDTUNE */
+
+int schedtune_normalize_energy(int energy);
+int schedtune_accept_deltas(int nrg_delta, int cap_delta,
+ struct task_struct *task);
+
+#else /* CONFIG_SCHED_TUNE */
+
+#define schedtune_cpu_boost(cpu) 0
+#define schedtune_task_boost(tsk) 0
+
+#define schedtune_exit_task(task) do { } while (0)
+
+#define schedtune_enqueue_task(task, cpu) do { } while (0)
+#define schedtune_dequeue_task(task, cpu) do { } while (0)
+
+#define schedtune_accept_deltas(nrg_delta, cap_delta, task) nrg_delta
+
+#endif /* CONFIG_SCHED_TUNE */
diff --git a/kernel/sched/walt.c b/kernel/sched/walt.c
new file mode 100644
index 000000000000..07b7f84b37e2
--- /dev/null
+++ b/kernel/sched/walt.c
@@ -0,0 +1,1171 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ * Window Assisted Load Tracking (WALT) implementation credits:
+ * Srivatsa Vaddagiri, Steve Muckle, Syed Rameez Mustafa, Joonwoo Park,
+ * Pavan Kumar Kondeti, Olav Haugan
+ *
+ * 2016-03-06: Integration with EAS/refactoring by Vikram Mulukutla
+ * and Todd Kjos
+ */
+
+#include <linux/syscore_ops.h>
+#include <linux/cpufreq.h>
+#include <trace/events/sched.h>
+#include <clocksource/arm_arch_timer.h>
+#include "sched.h"
+#include "walt.h"
+
+#define WINDOW_STATS_RECENT 0
+#define WINDOW_STATS_MAX 1
+#define WINDOW_STATS_MAX_RECENT_AVG 2
+#define WINDOW_STATS_AVG 3
+#define WINDOW_STATS_INVALID_POLICY 4
+
+#define EXITING_TASK_MARKER 0xdeaddead
+
+static __read_mostly unsigned int walt_ravg_hist_size = 5;
+static __read_mostly unsigned int walt_window_stats_policy =
+ WINDOW_STATS_MAX_RECENT_AVG;
+static __read_mostly unsigned int walt_account_wait_time = 1;
+static __read_mostly unsigned int walt_freq_account_wait_time = 0;
+static __read_mostly unsigned int walt_io_is_busy = 0;
+
+unsigned int sysctl_sched_walt_init_task_load_pct = 15;
+
+/* 1 -> use PELT based load stats, 0 -> use window-based load stats */
+unsigned int __read_mostly walt_disabled = 0;
+
+static unsigned int max_possible_efficiency = 1024;
+static unsigned int min_possible_efficiency = 1024;
+
+/*
+ * Maximum possible frequency across all cpus. Task demand and cpu
+ * capacity (cpu_power) metrics are scaled in reference to it.
+ */
+static unsigned int max_possible_freq = 1;
+
+/*
+ * Minimum possible max_freq across all cpus. This will be same as
+ * max_possible_freq on homogeneous systems and could be different from
+ * max_possible_freq on heterogenous systems. min_max_freq is used to derive
+ * capacity (cpu_power) of cpus.
+ */
+static unsigned int min_max_freq = 1;
+
+static unsigned int max_capacity = 1024;
+static unsigned int min_capacity = 1024;
+static unsigned int max_load_scale_factor = 1024;
+static unsigned int max_possible_capacity = 1024;
+
+/* Mask of all CPUs that have max_possible_capacity */
+static cpumask_t mpc_mask = CPU_MASK_ALL;
+
+/* Window size (in ns) */
+__read_mostly unsigned int walt_ravg_window = 20000000;
+
+/* Min window size (in ns) = 10ms */
+#define MIN_SCHED_RAVG_WINDOW 10000000
+
+/* Max window size (in ns) = 1s */
+#define MAX_SCHED_RAVG_WINDOW 1000000000
+
+static unsigned int sync_cpu;
+static ktime_t ktime_last;
+static bool walt_ktime_suspended;
+
+static unsigned int task_load(struct task_struct *p)
+{
+ return p->ravg.demand;
+}
+
+void
+walt_inc_cumulative_runnable_avg(struct rq *rq,
+ struct task_struct *p)
+{
+ rq->cumulative_runnable_avg += p->ravg.demand;
+}
+
+void
+walt_dec_cumulative_runnable_avg(struct rq *rq,
+ struct task_struct *p)
+{
+ rq->cumulative_runnable_avg -= p->ravg.demand;
+ BUG_ON((s64)rq->cumulative_runnable_avg < 0);
+}
+
+static void
+fixup_cumulative_runnable_avg(struct rq *rq,
+ struct task_struct *p, s64 task_load_delta)
+{
+ rq->cumulative_runnable_avg += task_load_delta;
+ if ((s64)rq->cumulative_runnable_avg < 0)
+ panic("cra less than zero: tld: %lld, task_load(p) = %u\n",
+ task_load_delta, task_load(p));
+}
+
+u64 walt_ktime_clock(void)
+{
+ if (unlikely(walt_ktime_suspended))
+ return ktime_to_ns(ktime_last);
+ return ktime_get_ns();
+}
+
+static void walt_resume(void)
+{
+ walt_ktime_suspended = false;
+}
+
+static int walt_suspend(void)
+{
+ ktime_last = ktime_get();
+ walt_ktime_suspended = true;
+ return 0;
+}
+
+static struct syscore_ops walt_syscore_ops = {
+ .resume = walt_resume,
+ .suspend = walt_suspend
+};
+
+static int __init walt_init_ops(void)
+{
+ register_syscore_ops(&walt_syscore_ops);
+ return 0;
+}
+late_initcall(walt_init_ops);
+
+void walt_inc_cfs_cumulative_runnable_avg(struct cfs_rq *cfs_rq,
+ struct task_struct *p)
+{
+ cfs_rq->cumulative_runnable_avg += p->ravg.demand;
+}
+
+void walt_dec_cfs_cumulative_runnable_avg(struct cfs_rq *cfs_rq,
+ struct task_struct *p)
+{
+ cfs_rq->cumulative_runnable_avg -= p->ravg.demand;
+}
+
+static int exiting_task(struct task_struct *p)
+{
+ if (p->flags & PF_EXITING) {
+ if (p->ravg.sum_history[0] != EXITING_TASK_MARKER) {
+ p->ravg.sum_history[0] = EXITING_TASK_MARKER;
+ }
+ return 1;
+ }
+ return 0;
+}
+
+static int __init set_walt_ravg_window(char *str)
+{
+ get_option(&str, &walt_ravg_window);
+
+ walt_disabled = (walt_ravg_window < MIN_SCHED_RAVG_WINDOW ||
+ walt_ravg_window > MAX_SCHED_RAVG_WINDOW);
+ return 0;
+}
+
+early_param("walt_ravg_window", set_walt_ravg_window);
+
+static void
+update_window_start(struct rq *rq, u64 wallclock)
+{
+ s64 delta;
+ int nr_windows;
+
+ delta = wallclock - rq->window_start;
+ /* If the MPM global timer is cleared, set delta as 0 to avoid kernel BUG happening */
+ if (delta < 0) {
+ if (arch_timer_read_counter() == 0)
+ delta = 0;
+ else
+ BUG_ON(1);
+ }
+
+ if (delta < walt_ravg_window)
+ return;
+
+ nr_windows = div64_u64(delta, walt_ravg_window);
+ rq->window_start += (u64)nr_windows * (u64)walt_ravg_window;
+}
+
+static u64 scale_exec_time(u64 delta, struct rq *rq)
+{
+ unsigned int cur_freq = rq->cur_freq;
+ int sf;
+
+ if (unlikely(cur_freq > max_possible_freq))
+ cur_freq = rq->max_possible_freq;
+
+ /* round up div64 */
+ delta = div64_u64(delta * cur_freq + max_possible_freq - 1,
+ max_possible_freq);
+
+ sf = DIV_ROUND_UP(rq->efficiency * 1024, max_possible_efficiency);
+
+ delta *= sf;
+ delta >>= 10;
+
+ return delta;
+}
+
+static int cpu_is_waiting_on_io(struct rq *rq)
+{
+ if (!walt_io_is_busy)
+ return 0;
+
+ return atomic_read(&rq->nr_iowait);
+}
+
+void walt_account_irqtime(int cpu, struct task_struct *curr,
+ u64 delta, u64 wallclock)
+{
+ struct rq *rq = cpu_rq(cpu);
+ unsigned long flags, nr_windows;
+ u64 cur_jiffies_ts;
+
+ raw_spin_lock_irqsave(&rq->lock, flags);
+
+ /*
+ * cputime (wallclock) uses sched_clock so use the same here for
+ * consistency.
+ */
+ delta += sched_clock() - wallclock;
+ cur_jiffies_ts = get_jiffies_64();
+
+ if (is_idle_task(curr))
+ walt_update_task_ravg(curr, rq, IRQ_UPDATE, walt_ktime_clock(),
+ delta);
+
+ nr_windows = cur_jiffies_ts - rq->irqload_ts;
+
+ if (nr_windows) {
+ if (nr_windows < 10) {
+ /* Decay CPU's irqload by 3/4 for each window. */
+ rq->avg_irqload *= (3 * nr_windows);
+ rq->avg_irqload = div64_u64(rq->avg_irqload,
+ 4 * nr_windows);
+ } else {
+ rq->avg_irqload = 0;
+ }
+ rq->avg_irqload += rq->cur_irqload;
+ rq->cur_irqload = 0;
+ }
+
+ rq->cur_irqload += delta;
+ rq->irqload_ts = cur_jiffies_ts;
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
+}
+
+
+#define WALT_HIGH_IRQ_TIMEOUT 3
+
+u64 walt_irqload(int cpu) {
+ struct rq *rq = cpu_rq(cpu);
+ s64 delta;
+ delta = get_jiffies_64() - rq->irqload_ts;
+
+ /*
+ * Current context can be preempted by irq and rq->irqload_ts can be
+ * updated by irq context so that delta can be negative.
+ * But this is okay and we can safely return as this means there
+ * was recent irq occurrence.
+ */
+
+ if (delta < WALT_HIGH_IRQ_TIMEOUT)
+ return rq->avg_irqload;
+ else
+ return 0;
+}
+
+int walt_cpu_high_irqload(int cpu) {
+ return walt_irqload(cpu) >= sysctl_sched_walt_cpu_high_irqload;
+}
+
+static int account_busy_for_cpu_time(struct rq *rq, struct task_struct *p,
+ u64 irqtime, int event)
+{
+ if (is_idle_task(p)) {
+ /* TASK_WAKE && TASK_MIGRATE is not possible on idle task! */
+ if (event == PICK_NEXT_TASK)
+ return 0;
+
+ /* PUT_PREV_TASK, TASK_UPDATE && IRQ_UPDATE are left */
+ return irqtime || cpu_is_waiting_on_io(rq);
+ }
+
+ if (event == TASK_WAKE)
+ return 0;
+
+ if (event == PUT_PREV_TASK || event == IRQ_UPDATE ||
+ event == TASK_UPDATE)
+ return 1;
+
+ /* Only TASK_MIGRATE && PICK_NEXT_TASK left */
+ return walt_freq_account_wait_time;
+}
+
+/*
+ * Account cpu activity in its busy time counters (rq->curr/prev_runnable_sum)
+ */
+static void update_cpu_busy_time(struct task_struct *p, struct rq *rq,
+ int event, u64 wallclock, u64 irqtime)
+{
+ int new_window, nr_full_windows = 0;
+ int p_is_curr_task = (p == rq->curr);
+ u64 mark_start = p->ravg.mark_start;
+ u64 window_start = rq->window_start;
+ u32 window_size = walt_ravg_window;
+ u64 delta;
+
+ new_window = mark_start < window_start;
+ if (new_window) {
+ nr_full_windows = div64_u64((window_start - mark_start),
+ window_size);
+ if (p->ravg.active_windows < USHRT_MAX)
+ p->ravg.active_windows++;
+ }
+
+ /* Handle per-task window rollover. We don't care about the idle
+ * task or exiting tasks. */
+ if (new_window && !is_idle_task(p) && !exiting_task(p)) {
+ u32 curr_window = 0;
+
+ if (!nr_full_windows)
+ curr_window = p->ravg.curr_window;
+
+ p->ravg.prev_window = curr_window;
+ p->ravg.curr_window = 0;
+ }
+
+ if (!account_busy_for_cpu_time(rq, p, irqtime, event)) {
+ /* account_busy_for_cpu_time() = 0, so no update to the
+ * task's current window needs to be made. This could be
+ * for example
+ *
+ * - a wakeup event on a task within the current
+ * window (!new_window below, no action required),
+ * - switching to a new task from idle (PICK_NEXT_TASK)
+ * in a new window where irqtime is 0 and we aren't
+ * waiting on IO */
+
+ if (!new_window)
+ return;
+
+ /* A new window has started. The RQ demand must be rolled
+ * over if p is the current task. */
+ if (p_is_curr_task) {
+ u64 prev_sum = 0;
+
+ /* p is either idle task or an exiting task */
+ if (!nr_full_windows) {
+ prev_sum = rq->curr_runnable_sum;
+ }
+
+ rq->prev_runnable_sum = prev_sum;
+ rq->curr_runnable_sum = 0;
+ }
+
+ return;
+ }
+
+ if (!new_window) {
+ /* account_busy_for_cpu_time() = 1 so busy time needs
+ * to be accounted to the current window. No rollover
+ * since we didn't start a new window. An example of this is
+ * when a task starts execution and then sleeps within the
+ * same window. */
+
+ if (!irqtime || !is_idle_task(p) || cpu_is_waiting_on_io(rq))
+ delta = wallclock - mark_start;
+ else
+ delta = irqtime;
+ delta = scale_exec_time(delta, rq);
+ rq->curr_runnable_sum += delta;
+ if (!is_idle_task(p) && !exiting_task(p))
+ p->ravg.curr_window += delta;
+
+ return;
+ }
+
+ if (!p_is_curr_task) {
+ /* account_busy_for_cpu_time() = 1 so busy time needs
+ * to be accounted to the current window. A new window
+ * has also started, but p is not the current task, so the
+ * window is not rolled over - just split up and account
+ * as necessary into curr and prev. The window is only
+ * rolled over when a new window is processed for the current
+ * task.
+ *
+ * Irqtime can't be accounted by a task that isn't the
+ * currently running task. */
+
+ if (!nr_full_windows) {
+ /* A full window hasn't elapsed, account partial
+ * contribution to previous completed window. */
+ delta = scale_exec_time(window_start - mark_start, rq);
+ if (!exiting_task(p))
+ p->ravg.prev_window += delta;
+ } else {
+ /* Since at least one full window has elapsed,
+ * the contribution to the previous window is the
+ * full window (window_size). */
+ delta = scale_exec_time(window_size, rq);
+ if (!exiting_task(p))
+ p->ravg.prev_window = delta;
+ }
+ rq->prev_runnable_sum += delta;
+
+ /* Account piece of busy time in the current window. */
+ delta = scale_exec_time(wallclock - window_start, rq);
+ rq->curr_runnable_sum += delta;
+ if (!exiting_task(p))
+ p->ravg.curr_window = delta;
+
+ return;
+ }
+
+ if (!irqtime || !is_idle_task(p) || cpu_is_waiting_on_io(rq)) {
+ /* account_busy_for_cpu_time() = 1 so busy time needs
+ * to be accounted to the current window. A new window
+ * has started and p is the current task so rollover is
+ * needed. If any of these three above conditions are true
+ * then this busy time can't be accounted as irqtime.
+ *
+ * Busy time for the idle task or exiting tasks need not
+ * be accounted.
+ *
+ * An example of this would be a task that starts execution
+ * and then sleeps once a new window has begun. */
+
+ if (!nr_full_windows) {
+ /* A full window hasn't elapsed, account partial
+ * contribution to previous completed window. */
+ delta = scale_exec_time(window_start - mark_start, rq);
+ if (!is_idle_task(p) && !exiting_task(p))
+ p->ravg.prev_window += delta;
+
+ delta += rq->curr_runnable_sum;
+ } else {
+ /* Since at least one full window has elapsed,
+ * the contribution to the previous window is the
+ * full window (window_size). */
+ delta = scale_exec_time(window_size, rq);
+ if (!is_idle_task(p) && !exiting_task(p))
+ p->ravg.prev_window = delta;
+
+ }
+ /*
+ * Rollover for normal runnable sum is done here by overwriting
+ * the values in prev_runnable_sum and curr_runnable_sum.
+ * Rollover for new task runnable sum has completed by previous
+ * if-else statement.
+ */
+ rq->prev_runnable_sum = delta;
+
+ /* Account piece of busy time in the current window. */
+ delta = scale_exec_time(wallclock - window_start, rq);
+ rq->curr_runnable_sum = delta;
+ if (!is_idle_task(p) && !exiting_task(p))
+ p->ravg.curr_window = delta;
+
+ return;
+ }
+
+ if (irqtime) {
+ /* account_busy_for_cpu_time() = 1 so busy time needs
+ * to be accounted to the current window. A new window
+ * has started and p is the current task so rollover is
+ * needed. The current task must be the idle task because
+ * irqtime is not accounted for any other task.
+ *
+ * Irqtime will be accounted each time we process IRQ activity
+ * after a period of idleness, so we know the IRQ busy time
+ * started at wallclock - irqtime. */
+
+ BUG_ON(!is_idle_task(p));
+ mark_start = wallclock - irqtime;
+
+ /* Roll window over. If IRQ busy time was just in the current
+ * window then that is all that need be accounted. */
+ rq->prev_runnable_sum = rq->curr_runnable_sum;
+ if (mark_start > window_start) {
+ rq->curr_runnable_sum = scale_exec_time(irqtime, rq);
+ return;
+ }
+
+ /* The IRQ busy time spanned multiple windows. Process the
+ * busy time preceding the current window start first. */
+ delta = window_start - mark_start;
+ if (delta > window_size)
+ delta = window_size;
+ delta = scale_exec_time(delta, rq);
+ rq->prev_runnable_sum += delta;
+
+ /* Process the remaining IRQ busy time in the current window. */
+ delta = wallclock - window_start;
+ rq->curr_runnable_sum = scale_exec_time(delta, rq);
+
+ return;
+ }
+
+ BUG();
+}
+
+static int account_busy_for_task_demand(struct task_struct *p, int event)
+{
+ /* No need to bother updating task demand for exiting tasks
+ * or the idle task. */
+ if (exiting_task(p) || is_idle_task(p))
+ return 0;
+
+ /* When a task is waking up it is completing a segment of non-busy
+ * time. Likewise, if wait time is not treated as busy time, then
+ * when a task begins to run or is migrated, it is not running and
+ * is completing a segment of non-busy time. */
+ if (event == TASK_WAKE || (!walt_account_wait_time &&
+ (event == PICK_NEXT_TASK || event == TASK_MIGRATE)))
+ return 0;
+
+ return 1;
+}
+
+/*
+ * Called when new window is starting for a task, to record cpu usage over
+ * recently concluded window(s). Normally 'samples' should be 1. It can be > 1
+ * when, say, a real-time task runs without preemption for several windows at a
+ * stretch.
+ */
+static void update_history(struct rq *rq, struct task_struct *p,
+ u32 runtime, int samples, int event)
+{
+ u32 *hist = &p->ravg.sum_history[0];
+ int ridx, widx;
+ u32 max = 0, avg, demand;
+ u64 sum = 0;
+
+ /* Ignore windows where task had no activity */
+ if (!runtime || is_idle_task(p) || exiting_task(p) || !samples)
+ goto done;
+
+ /* Push new 'runtime' value onto stack */
+ widx = walt_ravg_hist_size - 1;
+ ridx = widx - samples;
+ for (; ridx >= 0; --widx, --ridx) {
+ hist[widx] = hist[ridx];
+ sum += hist[widx];
+ if (hist[widx] > max)
+ max = hist[widx];
+ }
+
+ for (widx = 0; widx < samples && widx < walt_ravg_hist_size; widx++) {
+ hist[widx] = runtime;
+ sum += hist[widx];
+ if (hist[widx] > max)
+ max = hist[widx];
+ }
+
+ p->ravg.sum = 0;
+
+ if (walt_window_stats_policy == WINDOW_STATS_RECENT) {
+ demand = runtime;
+ } else if (walt_window_stats_policy == WINDOW_STATS_MAX) {
+ demand = max;
+ } else {
+ avg = div64_u64(sum, walt_ravg_hist_size);
+ if (walt_window_stats_policy == WINDOW_STATS_AVG)
+ demand = avg;
+ else
+ demand = max(avg, runtime);
+ }
+
+ /*
+ * A throttled deadline sched class task gets dequeued without
+ * changing p->on_rq. Since the dequeue decrements hmp stats
+ * avoid decrementing it here again.
+ */
+ if (task_on_rq_queued(p) && (!task_has_dl_policy(p) ||
+ !p->dl.dl_throttled))
+ fixup_cumulative_runnable_avg(rq, p, demand);
+
+ p->ravg.demand = demand;
+
+done:
+ trace_walt_update_history(rq, p, runtime, samples, event);
+ return;
+}
+
+static void add_to_task_demand(struct rq *rq, struct task_struct *p,
+ u64 delta)
+{
+ delta = scale_exec_time(delta, rq);
+ p->ravg.sum += delta;
+ if (unlikely(p->ravg.sum > walt_ravg_window))
+ p->ravg.sum = walt_ravg_window;
+}
+
+/*
+ * Account cpu demand of task and/or update task's cpu demand history
+ *
+ * ms = p->ravg.mark_start;
+ * wc = wallclock
+ * ws = rq->window_start
+ *
+ * Three possibilities:
+ *
+ * a) Task event is contained within one window.
+ * window_start < mark_start < wallclock
+ *
+ * ws ms wc
+ * | | |
+ * V V V
+ * |---------------|
+ *
+ * In this case, p->ravg.sum is updated *iff* event is appropriate
+ * (ex: event == PUT_PREV_TASK)
+ *
+ * b) Task event spans two windows.
+ * mark_start < window_start < wallclock
+ *
+ * ms ws wc
+ * | | |
+ * V V V
+ * -----|-------------------
+ *
+ * In this case, p->ravg.sum is updated with (ws - ms) *iff* event
+ * is appropriate, then a new window sample is recorded followed
+ * by p->ravg.sum being set to (wc - ws) *iff* event is appropriate.
+ *
+ * c) Task event spans more than two windows.
+ *
+ * ms ws_tmp ws wc
+ * | | | |
+ * V V V V
+ * ---|-------|-------|-------|-------|------
+ * | |
+ * |<------ nr_full_windows ------>|
+ *
+ * In this case, p->ravg.sum is updated with (ws_tmp - ms) first *iff*
+ * event is appropriate, window sample of p->ravg.sum is recorded,
+ * 'nr_full_window' samples of window_size is also recorded *iff*
+ * event is appropriate and finally p->ravg.sum is set to (wc - ws)
+ * *iff* event is appropriate.
+ *
+ * IMPORTANT : Leave p->ravg.mark_start unchanged, as update_cpu_busy_time()
+ * depends on it!
+ */
+static void update_task_demand(struct task_struct *p, struct rq *rq,
+ int event, u64 wallclock)
+{
+ u64 mark_start = p->ravg.mark_start;
+ u64 delta, window_start = rq->window_start;
+ int new_window, nr_full_windows;
+ u32 window_size = walt_ravg_window;
+
+ new_window = mark_start < window_start;
+ if (!account_busy_for_task_demand(p, event)) {
+ if (new_window)
+ /* If the time accounted isn't being accounted as
+ * busy time, and a new window started, only the
+ * previous window need be closed out with the
+ * pre-existing demand. Multiple windows may have
+ * elapsed, but since empty windows are dropped,
+ * it is not necessary to account those. */
+ update_history(rq, p, p->ravg.sum, 1, event);
+ return;
+ }
+
+ if (!new_window) {
+ /* The simple case - busy time contained within the existing
+ * window. */
+ add_to_task_demand(rq, p, wallclock - mark_start);
+ return;
+ }
+
+ /* Busy time spans at least two windows. Temporarily rewind
+ * window_start to first window boundary after mark_start. */
+ delta = window_start - mark_start;
+ nr_full_windows = div64_u64(delta, window_size);
+ window_start -= (u64)nr_full_windows * (u64)window_size;
+
+ /* Process (window_start - mark_start) first */
+ add_to_task_demand(rq, p, window_start - mark_start);
+
+ /* Push new sample(s) into task's demand history */
+ update_history(rq, p, p->ravg.sum, 1, event);
+ if (nr_full_windows)
+ update_history(rq, p, scale_exec_time(window_size, rq),
+ nr_full_windows, event);
+
+ /* Roll window_start back to current to process any remainder
+ * in current window. */
+ window_start += (u64)nr_full_windows * (u64)window_size;
+
+ /* Process (wallclock - window_start) next */
+ mark_start = window_start;
+ add_to_task_demand(rq, p, wallclock - mark_start);
+}
+
+/* Reflect task activity on its demand and cpu's busy time statistics */
+void walt_update_task_ravg(struct task_struct *p, struct rq *rq,
+ int event, u64 wallclock, u64 irqtime)
+{
+ if (walt_disabled || !rq->window_start)
+ return;
+
+ lockdep_assert_held(&rq->lock);
+
+ update_window_start(rq, wallclock);
+
+ if (!p->ravg.mark_start)
+ goto done;
+
+ update_task_demand(p, rq, event, wallclock);
+ update_cpu_busy_time(p, rq, event, wallclock, irqtime);
+
+done:
+ trace_walt_update_task_ravg(p, rq, event, wallclock, irqtime);
+
+ p->ravg.mark_start = wallclock;
+}
+
+unsigned long __weak arch_get_cpu_efficiency(int cpu)
+{
+ return SCHED_LOAD_SCALE;
+}
+
+void walt_init_cpu_efficiency(void)
+{
+ int i, efficiency;
+ unsigned int max = 0, min = UINT_MAX;
+
+ for_each_possible_cpu(i) {
+ efficiency = arch_get_cpu_efficiency(i);
+ cpu_rq(i)->efficiency = efficiency;
+
+ if (efficiency > max)
+ max = efficiency;
+ if (efficiency < min)
+ min = efficiency;
+ }
+
+ if (max)
+ max_possible_efficiency = max;
+
+ if (min)
+ min_possible_efficiency = min;
+}
+
+static void reset_task_stats(struct task_struct *p)
+{
+ u32 sum = 0;
+
+ if (exiting_task(p))
+ sum = EXITING_TASK_MARKER;
+
+ memset(&p->ravg, 0, sizeof(struct ravg));
+ /* Retain EXITING_TASK marker */
+ p->ravg.sum_history[0] = sum;
+}
+
+void walt_mark_task_starting(struct task_struct *p)
+{
+ u64 wallclock;
+ struct rq *rq = task_rq(p);
+
+ if (!rq->window_start) {
+ reset_task_stats(p);
+ return;
+ }
+
+ wallclock = walt_ktime_clock();
+ p->ravg.mark_start = wallclock;
+}
+
+void walt_set_window_start(struct rq *rq)
+{
+ int cpu = cpu_of(rq);
+ struct rq *sync_rq = cpu_rq(sync_cpu);
+
+ if (rq->window_start)
+ return;
+
+ if (cpu == sync_cpu) {
+ rq->window_start = walt_ktime_clock();
+ } else {
+ raw_spin_unlock(&rq->lock);
+ double_rq_lock(rq, sync_rq);
+ rq->window_start = cpu_rq(sync_cpu)->window_start;
+ rq->curr_runnable_sum = rq->prev_runnable_sum = 0;
+ raw_spin_unlock(&sync_rq->lock);
+ }
+
+ rq->curr->ravg.mark_start = rq->window_start;
+}
+
+void walt_migrate_sync_cpu(int cpu)
+{
+ if (cpu == sync_cpu)
+ sync_cpu = smp_processor_id();
+}
+
+void walt_fixup_busy_time(struct task_struct *p, int new_cpu)
+{
+ struct rq *src_rq = task_rq(p);
+ struct rq *dest_rq = cpu_rq(new_cpu);
+ u64 wallclock;
+
+ if (!p->on_rq && p->state != TASK_WAKING)
+ return;
+
+ if (exiting_task(p)) {
+ return;
+ }
+
+ if (p->state == TASK_WAKING)
+ double_rq_lock(src_rq, dest_rq);
+
+ wallclock = walt_ktime_clock();
+
+ walt_update_task_ravg(task_rq(p)->curr, task_rq(p),
+ TASK_UPDATE, wallclock, 0);
+ walt_update_task_ravg(dest_rq->curr, dest_rq,
+ TASK_UPDATE, wallclock, 0);
+
+ walt_update_task_ravg(p, task_rq(p), TASK_MIGRATE, wallclock, 0);
+
+ if (p->ravg.curr_window) {
+ src_rq->curr_runnable_sum -= p->ravg.curr_window;
+ dest_rq->curr_runnable_sum += p->ravg.curr_window;
+ }
+
+ if (p->ravg.prev_window) {
+ src_rq->prev_runnable_sum -= p->ravg.prev_window;
+ dest_rq->prev_runnable_sum += p->ravg.prev_window;
+ }
+
+ if ((s64)src_rq->prev_runnable_sum < 0) {
+ src_rq->prev_runnable_sum = 0;
+ WARN_ON(1);
+ }
+ if ((s64)src_rq->curr_runnable_sum < 0) {
+ src_rq->curr_runnable_sum = 0;
+ WARN_ON(1);
+ }
+
+ trace_walt_migration_update_sum(src_rq, p);
+ trace_walt_migration_update_sum(dest_rq, p);
+
+ if (p->state == TASK_WAKING)
+ double_rq_unlock(src_rq, dest_rq);
+}
+
+/* Keep track of max/min capacity possible across CPUs "currently" */
+static void __update_min_max_capacity(void)
+{
+ int i;
+ int max = 0, min = INT_MAX;
+
+ for_each_online_cpu(i) {
+ if (cpu_rq(i)->capacity > max)
+ max = cpu_rq(i)->capacity;
+ if (cpu_rq(i)->capacity < min)
+ min = cpu_rq(i)->capacity;
+ }
+
+ max_capacity = max;
+ min_capacity = min;
+}
+
+static void update_min_max_capacity(void)
+{
+ unsigned long flags;
+ int i;
+
+ local_irq_save(flags);
+ for_each_possible_cpu(i)
+ raw_spin_lock(&cpu_rq(i)->lock);
+
+ __update_min_max_capacity();
+
+ for_each_possible_cpu(i)
+ raw_spin_unlock(&cpu_rq(i)->lock);
+ local_irq_restore(flags);
+}
+
+/*
+ * Return 'capacity' of a cpu in reference to "least" efficient cpu, such that
+ * least efficient cpu gets capacity of 1024
+ */
+static unsigned long capacity_scale_cpu_efficiency(int cpu)
+{
+ return (1024 * cpu_rq(cpu)->efficiency) / min_possible_efficiency;
+}
+
+/*
+ * Return 'capacity' of a cpu in reference to cpu with lowest max_freq
+ * (min_max_freq), such that one with lowest max_freq gets capacity of 1024.
+ */
+static unsigned long capacity_scale_cpu_freq(int cpu)
+{
+ return (1024 * cpu_rq(cpu)->max_freq) / min_max_freq;
+}
+
+/*
+ * Return load_scale_factor of a cpu in reference to "most" efficient cpu, so
+ * that "most" efficient cpu gets a load_scale_factor of 1
+ */
+static unsigned long load_scale_cpu_efficiency(int cpu)
+{
+ return DIV_ROUND_UP(1024 * max_possible_efficiency,
+ cpu_rq(cpu)->efficiency);
+}
+
+/*
+ * Return load_scale_factor of a cpu in reference to cpu with best max_freq
+ * (max_possible_freq), so that one with best max_freq gets a load_scale_factor
+ * of 1.
+ */
+static unsigned long load_scale_cpu_freq(int cpu)
+{
+ return DIV_ROUND_UP(1024 * max_possible_freq, cpu_rq(cpu)->max_freq);
+}
+
+static int compute_capacity(int cpu)
+{
+ int capacity = 1024;
+
+ capacity *= capacity_scale_cpu_efficiency(cpu);
+ capacity >>= 10;
+
+ capacity *= capacity_scale_cpu_freq(cpu);
+ capacity >>= 10;
+
+ return capacity;
+}
+
+static int compute_load_scale_factor(int cpu)
+{
+ int load_scale = 1024;
+
+ /*
+ * load_scale_factor accounts for the fact that task load
+ * is in reference to "best" performing cpu. Task's load will need to be
+ * scaled (up) by a factor to determine suitability to be placed on a
+ * (little) cpu.
+ */
+ load_scale *= load_scale_cpu_efficiency(cpu);
+ load_scale >>= 10;
+
+ load_scale *= load_scale_cpu_freq(cpu);
+ load_scale >>= 10;
+
+ return load_scale;
+}
+
+static int cpufreq_notifier_policy(struct notifier_block *nb,
+ unsigned long val, void *data)
+{
+ struct cpufreq_policy *policy = (struct cpufreq_policy *)data;
+ int i, update_max = 0;
+ u64 highest_mpc = 0, highest_mplsf = 0;
+ const struct cpumask *cpus = policy->related_cpus;
+ unsigned int orig_min_max_freq = min_max_freq;
+ unsigned int orig_max_possible_freq = max_possible_freq;
+ /* Initialized to policy->max in case policy->related_cpus is empty! */
+ unsigned int orig_max_freq = policy->max;
+
+ if (val != CPUFREQ_NOTIFY && val != CPUFREQ_REMOVE_POLICY &&
+ val != CPUFREQ_CREATE_POLICY)
+ return 0;
+
+ if (val == CPUFREQ_REMOVE_POLICY || val == CPUFREQ_CREATE_POLICY) {
+ update_min_max_capacity();
+ return 0;
+ }
+
+ for_each_cpu(i, policy->related_cpus) {
+ cpumask_copy(&cpu_rq(i)->freq_domain_cpumask,
+ policy->related_cpus);
+ orig_max_freq = cpu_rq(i)->max_freq;
+ cpu_rq(i)->min_freq = policy->min;
+ cpu_rq(i)->max_freq = policy->max;
+ cpu_rq(i)->cur_freq = policy->cur;
+ cpu_rq(i)->max_possible_freq = policy->cpuinfo.max_freq;
+ }
+
+ max_possible_freq = max(max_possible_freq, policy->cpuinfo.max_freq);
+ if (min_max_freq == 1)
+ min_max_freq = UINT_MAX;
+ min_max_freq = min(min_max_freq, policy->cpuinfo.max_freq);
+ BUG_ON(!min_max_freq);
+ BUG_ON(!policy->max);
+
+ /* Changes to policy other than max_freq don't require any updates */
+ if (orig_max_freq == policy->max)
+ return 0;
+
+ /*
+ * A changed min_max_freq or max_possible_freq (possible during bootup)
+ * needs to trigger re-computation of load_scale_factor and capacity for
+ * all possible cpus (even those offline). It also needs to trigger
+ * re-computation of nr_big_task count on all online cpus.
+ *
+ * A changed rq->max_freq otoh needs to trigger re-computation of
+ * load_scale_factor and capacity for just the cluster of cpus involved.
+ * Since small task definition depends on max_load_scale_factor, a
+ * changed load_scale_factor of one cluster could influence
+ * classification of tasks in another cluster. Hence a changed
+ * rq->max_freq will need to trigger re-computation of nr_big_task
+ * count on all online cpus.
+ *
+ * While it should be sufficient for nr_big_tasks to be
+ * re-computed for only online cpus, we have inadequate context
+ * information here (in policy notifier) with regard to hotplug-safety
+ * context in which notification is issued. As a result, we can't use
+ * get_online_cpus() here, as it can lead to deadlock. Until cpufreq is
+ * fixed up to issue notification always in hotplug-safe context,
+ * re-compute nr_big_task for all possible cpus.
+ */
+
+ if (orig_min_max_freq != min_max_freq ||
+ orig_max_possible_freq != max_possible_freq) {
+ cpus = cpu_possible_mask;
+ update_max = 1;
+ }
+
+ /*
+ * Changed load_scale_factor can trigger reclassification of tasks as
+ * big or small. Make this change "atomic" so that tasks are accounted
+ * properly due to changed load_scale_factor
+ */
+ for_each_cpu(i, cpus) {
+ struct rq *rq = cpu_rq(i);
+
+ rq->capacity = compute_capacity(i);
+ rq->load_scale_factor = compute_load_scale_factor(i);
+
+ if (update_max) {
+ u64 mpc, mplsf;
+
+ mpc = div_u64(((u64) rq->capacity) *
+ rq->max_possible_freq, rq->max_freq);
+ rq->max_possible_capacity = (int) mpc;
+
+ mplsf = div_u64(((u64) rq->load_scale_factor) *
+ rq->max_possible_freq, rq->max_freq);
+
+ if (mpc > highest_mpc) {
+ highest_mpc = mpc;
+ cpumask_clear(&mpc_mask);
+ cpumask_set_cpu(i, &mpc_mask);
+ } else if (mpc == highest_mpc) {
+ cpumask_set_cpu(i, &mpc_mask);
+ }
+
+ if (mplsf > highest_mplsf)
+ highest_mplsf = mplsf;
+ }
+ }
+
+ if (update_max) {
+ max_possible_capacity = highest_mpc;
+ max_load_scale_factor = highest_mplsf;
+ }
+
+ __update_min_max_capacity();
+
+ return 0;
+}
+
+static int cpufreq_notifier_trans(struct notifier_block *nb,
+ unsigned long val, void *data)
+{
+ struct cpufreq_freqs *freq = (struct cpufreq_freqs *)data;
+ unsigned int cpu = freq->cpu, new_freq = freq->new;
+ unsigned long flags;
+ int i;
+
+ if (val != CPUFREQ_POSTCHANGE)
+ return 0;
+
+ BUG_ON(!new_freq);
+
+ if (cpu_rq(cpu)->cur_freq == new_freq)
+ return 0;
+
+ for_each_cpu(i, &cpu_rq(cpu)->freq_domain_cpumask) {
+ struct rq *rq = cpu_rq(i);
+
+ raw_spin_lock_irqsave(&rq->lock, flags);
+ walt_update_task_ravg(rq->curr, rq, TASK_UPDATE,
+ walt_ktime_clock(), 0);
+ rq->cur_freq = new_freq;
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
+ }
+
+ return 0;
+}
+
+static struct notifier_block notifier_policy_block = {
+ .notifier_call = cpufreq_notifier_policy
+};
+
+static struct notifier_block notifier_trans_block = {
+ .notifier_call = cpufreq_notifier_trans
+};
+
+static int register_sched_callback(void)
+{
+ int ret;
+
+ ret = cpufreq_register_notifier(&notifier_policy_block,
+ CPUFREQ_POLICY_NOTIFIER);
+
+ if (!ret)
+ ret = cpufreq_register_notifier(&notifier_trans_block,
+ CPUFREQ_TRANSITION_NOTIFIER);
+
+ return 0;
+}
+
+/*
+ * cpufreq callbacks can be registered at core_initcall or later time.
+ * Any registration done prior to that is "forgotten" by cpufreq. See
+ * initialization of variable init_cpufreq_transition_notifier_list_called
+ * for further information.
+ */
+core_initcall(register_sched_callback);
+
+void walt_init_new_task_load(struct task_struct *p)
+{
+ int i;
+ u32 init_load_windows =
+ div64_u64((u64)sysctl_sched_walt_init_task_load_pct *
+ (u64)walt_ravg_window, 100);
+ u32 init_load_pct = current->init_load_pct;
+
+ p->init_load_pct = 0;
+ memset(&p->ravg, 0, sizeof(struct ravg));
+
+ if (init_load_pct) {
+ init_load_windows = div64_u64((u64)init_load_pct *
+ (u64)walt_ravg_window, 100);
+ }
+
+ p->ravg.demand = init_load_windows;
+ for (i = 0; i < RAVG_HIST_SIZE_MAX; ++i)
+ p->ravg.sum_history[i] = init_load_windows;
+}
diff --git a/kernel/sched/walt.h b/kernel/sched/walt.h
new file mode 100644
index 000000000000..e181c87a928d
--- /dev/null
+++ b/kernel/sched/walt.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __WALT_H
+#define __WALT_H
+
+#ifdef CONFIG_SCHED_WALT
+
+void walt_update_task_ravg(struct task_struct *p, struct rq *rq, int event,
+ u64 wallclock, u64 irqtime);
+void walt_inc_cumulative_runnable_avg(struct rq *rq, struct task_struct *p);
+void walt_dec_cumulative_runnable_avg(struct rq *rq, struct task_struct *p);
+void walt_inc_cfs_cumulative_runnable_avg(struct cfs_rq *rq,
+ struct task_struct *p);
+void walt_dec_cfs_cumulative_runnable_avg(struct cfs_rq *rq,
+ struct task_struct *p);
+void walt_fixup_busy_time(struct task_struct *p, int new_cpu);
+void walt_init_new_task_load(struct task_struct *p);
+void walt_mark_task_starting(struct task_struct *p);
+void walt_set_window_start(struct rq *rq);
+void walt_migrate_sync_cpu(int cpu);
+void walt_init_cpu_efficiency(void);
+u64 walt_ktime_clock(void);
+void walt_account_irqtime(int cpu, struct task_struct *curr, u64 delta,
+ u64 wallclock);
+
+u64 walt_irqload(int cpu);
+int walt_cpu_high_irqload(int cpu);
+
+#else /* CONFIG_SCHED_WALT */
+
+static inline void walt_update_task_ravg(struct task_struct *p, struct rq *rq,
+ int event, u64 wallclock, u64 irqtime) { }
+static inline void walt_inc_cumulative_runnable_avg(struct rq *rq, struct task_struct *p) { }
+static inline void walt_dec_cumulative_runnable_avg(struct rq *rq, struct task_struct *p) { }
+static inline void walt_inc_cfs_cumulative_runnable_avg(struct cfs_rq *rq,
+ struct task_struct *p) { }
+static inline void walt_dec_cfs_cumulative_runnable_avg(struct cfs_rq *rq,
+ struct task_struct *p) { }
+static inline void walt_fixup_busy_time(struct task_struct *p, int new_cpu) { }
+static inline void walt_init_new_task_load(struct task_struct *p) { }
+static inline void walt_mark_task_starting(struct task_struct *p) { }
+static inline void walt_set_window_start(struct rq *rq) { }
+static inline void walt_migrate_sync_cpu(int cpu) { }
+static inline void walt_init_cpu_efficiency(void) { }
+static inline u64 walt_ktime_clock(void) { return 0; }
+
+#endif /* CONFIG_SCHED_WALT */
+
+extern unsigned int walt_disabled;
+
+#endif
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index b7cbd7940f7b..12ea4f09c04b 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -507,6 +507,20 @@ static struct ctl_table kern_table[] = {
.extra1 = &zero,
.extra2 = &three,
},
+ {
+ .procname = "sched_short_burst_ns",
+ .data = &sysctl_sched_short_burst,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
+ .procname = "sched_short_sleep_ns",
+ .data = &sysctl_sched_short_sleep,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
#endif /* CONFIG_SCHED_HMP */
#ifdef CONFIG_SCHED_DEBUG
{
@@ -528,6 +542,34 @@ static struct ctl_table kern_table[] = {
.extra2 = &max_sched_granularity_ns,
},
{
+ .procname = "sched_is_big_little",
+ .data = &sysctl_sched_is_big_little,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
+ .procname = "sched_sync_hint_enable",
+ .data = &sysctl_sched_sync_hint_enable,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
+ .procname = "sched_initial_task_util",
+ .data = &sysctl_sched_initial_task_util,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
+ .procname = "sched_cstate_aware",
+ .data = &sysctl_sched_cstate_aware,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
.procname = "sched_wakeup_granularity_ns",
.data = &sysctl_sched_wakeup_granularity,
.maxlen = sizeof(unsigned int),
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index 1347882d131e..b98810d2f3b4 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -323,13 +323,42 @@ static void clocksource_enqueue_watchdog(struct clocksource *cs)
/* cs is a watchdog. */
if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS)
cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
+ }
+ spin_unlock_irqrestore(&watchdog_lock, flags);
+}
+
+static void clocksource_select_watchdog(bool fallback)
+{
+ struct clocksource *cs, *old_wd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&watchdog_lock, flags);
+ /* save current watchdog */
+ old_wd = watchdog;
+ if (fallback)
+ watchdog = NULL;
+
+ list_for_each_entry(cs, &clocksource_list, list) {
+ /* cs is a clocksource to be watched. */
+ if (cs->flags & CLOCK_SOURCE_MUST_VERIFY)
+ continue;
+
+ /* Skip current if we were requested for a fallback. */
+ if (fallback && cs == old_wd)
+ continue;
+
/* Pick the best watchdog. */
- if (!watchdog || cs->rating > watchdog->rating) {
+ if (!watchdog || cs->rating > watchdog->rating)
watchdog = cs;
- /* Reset watchdog cycles */
- clocksource_reset_watchdog();
- }
}
+ /* If we failed to find a fallback restore the old one. */
+ if (!watchdog)
+ watchdog = old_wd;
+
+ /* If we changed the watchdog we need to reset cycles. */
+ if (watchdog != old_wd)
+ clocksource_reset_watchdog();
+
/* Check if the watchdog timer needs to be started. */
clocksource_start_watchdog();
spin_unlock_irqrestore(&watchdog_lock, flags);
@@ -404,6 +433,7 @@ static void clocksource_enqueue_watchdog(struct clocksource *cs)
cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
}
+static void clocksource_select_watchdog(bool fallback) { }
static inline void clocksource_dequeue_watchdog(struct clocksource *cs) { }
static inline void clocksource_resume_watchdog(void) { }
static inline int __clocksource_watchdog_kthread(void) { return 0; }
@@ -736,6 +766,7 @@ int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq)
clocksource_enqueue(cs);
clocksource_enqueue_watchdog(cs);
clocksource_select();
+ clocksource_select_watchdog(false);
mutex_unlock(&clocksource_mutex);
return 0;
}
@@ -758,6 +789,7 @@ void clocksource_change_rating(struct clocksource *cs, int rating)
mutex_lock(&clocksource_mutex);
__clocksource_change_rating(cs, rating);
clocksource_select();
+ clocksource_select_watchdog(false);
mutex_unlock(&clocksource_mutex);
}
EXPORT_SYMBOL(clocksource_change_rating);
@@ -767,12 +799,12 @@ EXPORT_SYMBOL(clocksource_change_rating);
*/
static int clocksource_unbind(struct clocksource *cs)
{
- /*
- * I really can't convince myself to support this on hardware
- * designed by lobotomized monkeys.
- */
- if (clocksource_is_watchdog(cs))
- return -EBUSY;
+ if (clocksource_is_watchdog(cs)) {
+ /* Select and try to install a replacement watchdog. */
+ clocksource_select_watchdog(true);
+ if (clocksource_is_watchdog(cs))
+ return -EBUSY;
+ }
if (cs == curr_clocksource) {
/* Select and try to install a replacement clock source */
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index c3914e8f87b0..271d83e30d19 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -95,6 +95,9 @@ DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) =
};
static const int hrtimer_clock_to_base_table[MAX_CLOCKS] = {
+ /* Make sure we catch unsupported clockids */
+ [0 ... MAX_CLOCKS - 1] = HRTIMER_MAX_CLOCK_BASES,
+
[CLOCK_REALTIME] = HRTIMER_BASE_REALTIME,
[CLOCK_MONOTONIC] = HRTIMER_BASE_MONOTONIC,
[CLOCK_BOOTTIME] = HRTIMER_BASE_BOOTTIME,
@@ -103,7 +106,9 @@ static const int hrtimer_clock_to_base_table[MAX_CLOCKS] = {
static inline int hrtimer_clockid_to_base(clockid_t clock_id)
{
- return hrtimer_clock_to_base_table[clock_id];
+ int base = hrtimer_clock_to_base_table[clock_id];
+ BUG_ON(base == HRTIMER_MAX_CLOCK_BASES);
+ return base;
}
/*
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
index 149cc8086aea..ab861771e37f 100644
--- a/kernel/time/ntp.c
+++ b/kernel/time/ntp.c
@@ -674,8 +674,24 @@ int ntp_validate_timex(struct timex *txc)
return -EINVAL;
}
- if ((txc->modes & ADJ_SETOFFSET) && (!capable(CAP_SYS_TIME)))
- return -EPERM;
+ if (txc->modes & ADJ_SETOFFSET) {
+ /* In order to inject time, you gotta be super-user! */
+ if (!capable(CAP_SYS_TIME))
+ return -EPERM;
+
+ if (txc->modes & ADJ_NANO) {
+ struct timespec ts;
+
+ ts.tv_sec = txc->time.tv_sec;
+ ts.tv_nsec = txc->time.tv_usec;
+ if (!timespec_inject_offset_valid(&ts))
+ return -EINVAL;
+
+ } else {
+ if (!timeval_inject_offset_valid(&txc->time))
+ return -EINVAL;
+ }
+ }
/*
* Check for potential multiplication overflows that can
diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
index f5e86d282d52..80016b329d94 100644
--- a/kernel/time/posix-cpu-timers.c
+++ b/kernel/time/posix-cpu-timers.c
@@ -808,6 +808,7 @@ static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp)
timer->it.cpu.expires = 0;
sample_to_timespec(timer->it_clock, timer->it.cpu.expires,
&itp->it_value);
+ return;
} else {
cpu_timer_sample_group(timer->it_clock, p, &now);
unlock_task_sighand(p, &flags);
diff --git a/kernel/time/sched_clock.c b/kernel/time/sched_clock.c
index 0637823aa5a6..699aff70bbfa 100644
--- a/kernel/time/sched_clock.c
+++ b/kernel/time/sched_clock.c
@@ -71,6 +71,10 @@ struct clock_data {
static struct hrtimer sched_clock_timer;
static int irqtime = -1;
static int initialized;
+static u64 suspend_ns;
+static u64 suspend_cycles;
+static u64 resume_cycles;
+
core_param(irqtime, irqtime, int, 0400);
@@ -282,6 +286,11 @@ static int sched_clock_suspend(void)
struct clock_read_data *rd = &cd.read_data[0];
update_sched_clock();
+
+ suspend_ns = rd->epoch_ns;
+ suspend_cycles = rd->epoch_cyc;
+ pr_info("suspend ns:%17llu suspend cycles:%17llu\n",
+ rd->epoch_ns, rd->epoch_cyc);
hrtimer_cancel(&sched_clock_timer);
rd->read_sched_clock = suspended_sched_clock_read;
@@ -293,6 +302,8 @@ static void sched_clock_resume(void)
struct clock_read_data *rd = &cd.read_data[0];
rd->epoch_cyc = cd.actual_read_sched_clock();
+ resume_cycles = rd->epoch_cyc;
+ pr_info("resume cycles:%17llu\n", rd->epoch_cyc);
hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL);
rd->read_sched_clock = cd.actual_read_sched_clock;
}
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 99188ee5d9d0..4ff237dbc006 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -383,7 +383,10 @@ static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf)
do {
seq = raw_read_seqcount_latch(&tkf->seq);
tkr = tkf->base + (seq & 0x01);
- now = ktime_to_ns(tkr->base) + timekeeping_get_ns(tkr);
+ now = ktime_to_ns(tkr->base);
+
+ now += clocksource_delta(tkr->read(tkr->clock),
+ tkr->cycle_last, tkr->mask);
} while (read_seqcount_retry(&tkf->seq, seq));
return now;
@@ -958,7 +961,7 @@ int timekeeping_inject_offset(struct timespec *ts)
struct timespec64 ts64, tmp;
int ret = 0;
- if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC)
+ if (!timespec_inject_offset_valid(ts))
return -EINVAL;
ts64 = timespec_to_timespec64(*ts);
diff --git a/kernel/time/timekeeping_debug.c b/kernel/time/timekeeping_debug.c
index f6bd65236712..107310a6f36f 100644
--- a/kernel/time/timekeeping_debug.c
+++ b/kernel/time/timekeeping_debug.c
@@ -23,7 +23,9 @@
#include "timekeeping_internal.h"
-static unsigned int sleep_time_bin[32] = {0};
+#define NUM_BINS 32
+
+static unsigned int sleep_time_bin[NUM_BINS] = {0};
static int tk_debug_show_sleep_time(struct seq_file *s, void *data)
{
@@ -69,6 +71,9 @@ late_initcall(tk_debug_sleep_time_init);
void tk_debug_account_sleep_time(struct timespec64 *t)
{
- sleep_time_bin[fls(t->tv_sec)]++;
+ /* Cap bin index so we don't overflow the array */
+ int bin = min(fls(t->tv_sec), NUM_BINS-1);
+
+ sleep_time_bin[bin]++;
}
diff --git a/lib/mpi/mpicoder.c b/lib/mpi/mpicoder.c
index e00ff00e861c..e37dbf53e226 100644
--- a/lib/mpi/mpicoder.c
+++ b/lib/mpi/mpicoder.c
@@ -367,7 +367,9 @@ int mpi_write_to_sgl(MPI a, struct scatterlist *sgl, unsigned *nbytes,
buf_len = sgl->length;
p2 = sg_virt(sgl);
- for (i = a->nlimbs - 1; i >= 0; i--) {
+ for (i = a->nlimbs - 1 - lzeros / BYTES_PER_MPI_LIMB,
+ lzeros %= BYTES_PER_MPI_LIMB;
+ i >= 0; i--) {
alimb = a->d[i];
p = (u8 *)&alimb2;
#if BYTES_PER_MPI_LIMB == 4
@@ -388,17 +390,12 @@ int mpi_write_to_sgl(MPI a, struct scatterlist *sgl, unsigned *nbytes,
#error please implement for this limb size.
#endif
if (lzeros > 0) {
- if (lzeros >= sizeof(alimb)) {
- p -= sizeof(alimb);
- continue;
- } else {
- mpi_limb_t *limb1 = (void *)p - sizeof(alimb);
- mpi_limb_t *limb2 = (void *)p - sizeof(alimb)
- + lzeros;
- *limb1 = *limb2;
- p -= lzeros;
- y = lzeros;
- }
+ mpi_limb_t *limb1 = (void *)p - sizeof(alimb);
+ mpi_limb_t *limb2 = (void *)p - sizeof(alimb)
+ + lzeros;
+ *limb1 = *limb2;
+ p -= lzeros;
+ y = lzeros;
lzeros -= sizeof(alimb);
}
diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
index 2e03567e1e6b..06ebe4efa1f2 100644
--- a/lib/strncpy_from_user.c
+++ b/lib/strncpy_from_user.c
@@ -56,8 +56,8 @@ static inline long do_strncpy_from_user(char *dst, const char __user *src, long
unsigned long c, data;
/* Fall back to byte-at-a-time if we get a page fault */
- if (unlikely(__get_user(c,(unsigned long __user *)(src+res))))
- break;
+ unsafe_get_user(c, (unsigned long __user *)(src+res), byte_at_a_time);
+
*(unsigned long *)(dst+res) = c;
if (has_zero(c, &data, &constants)) {
data = prep_zero_mask(c, data, &constants);
@@ -72,8 +72,7 @@ byte_at_a_time:
while (max) {
char c;
- if (unlikely(__get_user(c,src+res)))
- return -EFAULT;
+ unsafe_get_user(c,src+res, efault);
dst[res] = c;
if (!c)
return res;
@@ -92,6 +91,7 @@ byte_at_a_time:
* Nope: we hit the address space limit, and we still had more
* characters the caller would have wanted. That's an EFAULT.
*/
+efault:
return -EFAULT;
}
@@ -124,7 +124,12 @@ long strncpy_from_user(char *dst, const char __user *src, long count)
src_addr = (unsigned long)src;
if (likely(src_addr < max_addr)) {
unsigned long max = max_addr - src_addr;
- return do_strncpy_from_user(dst, src, count, max);
+ long retval;
+
+ user_access_begin();
+ retval = do_strncpy_from_user(dst, src, count, max);
+ user_access_end();
+ return retval;
}
return -EFAULT;
}
diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
index 3a5f2b366d84..8e105ed4df12 100644
--- a/lib/strnlen_user.c
+++ b/lib/strnlen_user.c
@@ -45,8 +45,7 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count,
src -= align;
max += align;
- if (unlikely(__get_user(c,(unsigned long __user *)src)))
- return 0;
+ unsafe_get_user(c, (unsigned long __user *)src, efault);
c |= aligned_byte_mask(align);
for (;;) {
@@ -61,8 +60,7 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count,
if (unlikely(max <= sizeof(unsigned long)))
break;
max -= sizeof(unsigned long);
- if (unlikely(__get_user(c,(unsigned long __user *)(src+res))))
- return 0;
+ unsafe_get_user(c, (unsigned long __user *)(src+res), efault);
}
res -= align;
@@ -77,6 +75,7 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count,
* Nope: we hit the address space limit, and we still had more
* characters the caller would have wanted. That's 0.
*/
+efault:
return 0;
}
@@ -112,7 +111,12 @@ long strnlen_user(const char __user *str, long count)
src_addr = (unsigned long)str;
if (likely(src_addr < max_addr)) {
unsigned long max = max_addr - src_addr;
- return do_strnlen_user(str, count, max);
+ long retval;
+
+ user_access_begin();
+ retval = do_strnlen_user(str, count, max);
+ user_access_end();
+ return retval;
}
return 0;
}
@@ -141,7 +145,12 @@ long strlen_user(const char __user *str)
src_addr = (unsigned long)str;
if (likely(src_addr < max_addr)) {
unsigned long max = max_addr - src_addr;
- return do_strnlen_user(str, ~0ul, max);
+ long retval;
+
+ user_access_begin();
+ retval = do_strnlen_user(str, ~0ul, max);
+ user_access_end();
+ return retval;
}
return 0;
}
diff --git a/mm/Makefile b/mm/Makefile
index cb1c1c332ea2..4b1a69abce7a 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -5,6 +5,9 @@
KASAN_SANITIZE_slab_common.o := n
KASAN_SANITIZE_slub.o := n
+# Since __builtin_frame_address does work as used, disable the warning.
+CFLAGS_usercopy.o += $(call cc-disable-warning, frame-address)
+
mmu-y := nommu.o
mmu-$(CONFIG_MMU) := gup.o highmem.o memory.o mincore.o \
mlock.o mmap.o mprotect.o mremap.o msync.o rmap.o \
@@ -83,3 +86,4 @@ obj-$(CONFIG_USERFAULTFD) += userfaultfd.o
obj-$(CONFIG_IDLE_PAGE_TRACKING) += page_idle.o
obj-$(CONFIG_FRAME_VECTOR) += frame_vector.o
obj-$(CONFIG_PROCESS_RECLAIM) += process_reclaim.o
+obj-$(CONFIG_HARDENED_USERCOPY) += usercopy.o
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index cbe6f0b96f29..9ef80bf441b3 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -825,6 +825,20 @@ int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev)
}
EXPORT_SYMBOL(bdi_register_dev);
+int bdi_register_owner(struct backing_dev_info *bdi, struct device *owner)
+{
+ int rc;
+
+ rc = bdi_register(bdi, NULL, "%u:%u", MAJOR(owner->devt),
+ MINOR(owner->devt));
+ if (rc)
+ return rc;
+ bdi->owner = owner;
+ get_device(owner);
+ return 0;
+}
+EXPORT_SYMBOL(bdi_register_owner);
+
/*
* Remove bdi from bdi_list, and ensure that it is no longer visible
*/
@@ -849,6 +863,11 @@ void bdi_unregister(struct backing_dev_info *bdi)
device_unregister(bdi->dev);
bdi->dev = NULL;
}
+
+ if (bdi->owner) {
+ put_device(bdi->owner);
+ bdi->owner = NULL;
+ }
}
void bdi_exit(struct backing_dev_info *bdi)
diff --git a/mm/compaction.c b/mm/compaction.c
index 7f9e60489d67..b4c33357f96e 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -475,25 +475,23 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
/* Found a free page, break it into order-0 pages */
isolated = split_free_page(page);
+ if (!isolated)
+ break;
+
total_isolated += isolated;
+ cc->nr_freepages += isolated;
for (i = 0; i < isolated; i++) {
list_add(&page->lru, freelist);
page++;
}
-
- /* If a page was split, advance to the end of it */
- if (isolated) {
- cc->nr_freepages += isolated;
- if (!strict &&
- cc->nr_migratepages <= cc->nr_freepages) {
- blockpfn += isolated;
- break;
- }
-
- blockpfn += isolated - 1;
- cursor += isolated - 1;
- continue;
+ if (!strict && cc->nr_migratepages <= cc->nr_freepages) {
+ blockpfn += isolated;
+ break;
}
+ /* Advance to the end of split page */
+ blockpfn += isolated - 1;
+ cursor += isolated - 1;
+ continue;
isolate_fail:
if (strict)
@@ -503,6 +501,9 @@ isolate_fail:
}
+ if (locked)
+ spin_unlock_irqrestore(&cc->zone->lock, flags);
+
/*
* There is a tiny chance that we have read bogus compound_order(),
* so be careful to not go outside of the pageblock.
@@ -524,9 +525,6 @@ isolate_fail:
if (strict && blockpfn < end_pfn)
total_isolated = 0;
- if (locked)
- spin_unlock_irqrestore(&cc->zone->lock, flags);
-
/* Update the pageblock-skip if the whole pageblock was scanned */
if (blockpfn == end_pfn)
update_pageblock_skip(cc, valid_page, total_isolated, false);
@@ -991,7 +989,6 @@ static void isolate_freepages(struct compact_control *cc)
block_end_pfn = block_start_pfn,
block_start_pfn -= pageblock_nr_pages,
isolate_start_pfn = block_start_pfn) {
-
/*
* This can iterate a massively long zone without finding any
* suitable migration targets, so periodically check if we need
@@ -1015,32 +1012,30 @@ static void isolate_freepages(struct compact_control *cc)
continue;
/* Found a block suitable for isolating free pages from. */
- isolate_freepages_block(cc, &isolate_start_pfn,
- block_end_pfn, freelist, false);
+ isolate_freepages_block(cc, &isolate_start_pfn, block_end_pfn,
+ freelist, false);
/*
- * If we isolated enough freepages, or aborted due to async
- * compaction being contended, terminate the loop.
- * Remember where the free scanner should restart next time,
- * which is where isolate_freepages_block() left off.
- * But if it scanned the whole pageblock, isolate_start_pfn
- * now points at block_end_pfn, which is the start of the next
- * pageblock.
- * In that case we will however want to restart at the start
- * of the previous pageblock.
+ * If we isolated enough freepages, or aborted due to lock
+ * contention, terminate.
*/
if ((cc->nr_freepages >= cc->nr_migratepages)
|| cc->contended) {
- if (isolate_start_pfn >= block_end_pfn)
+ if (isolate_start_pfn >= block_end_pfn) {
+ /*
+ * Restart at previous pageblock if more
+ * freepages can be isolated next time.
+ */
isolate_start_pfn =
block_start_pfn - pageblock_nr_pages;
+ }
break;
- } else {
+ } else if (isolate_start_pfn < block_end_pfn) {
/*
- * isolate_freepages_block() should not terminate
- * prematurely unless contended, or isolated enough
+ * If isolation failed early, do not continue
+ * needlessly.
*/
- VM_BUG_ON(isolate_start_pfn < block_end_pfn);
+ break;
}
}
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index ef6963b577fd..125c7dd55322 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2170,6 +2170,10 @@ static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
* and reducing the surplus.
*/
spin_unlock(&hugetlb_lock);
+
+ /* yield cpu to avoid soft lockup */
+ cond_resched();
+
if (hstate_is_gigantic(h))
ret = alloc_fresh_gigantic_page(h, nodes_allowed);
else
@@ -4209,7 +4213,6 @@ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
if (saddr) {
spte = huge_pte_offset(svma->vm_mm, saddr);
if (spte) {
- mm_inc_nr_pmds(mm);
get_page(virt_to_page(spte));
break;
}
@@ -4224,9 +4227,9 @@ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
if (pud_none(*pud)) {
pud_populate(mm, pud,
(pmd_t *)((unsigned long)spte & PAGE_MASK));
+ mm_inc_nr_pmds(mm);
} else {
put_page(virt_to_page(spte));
- mm_inc_nr_pmds(mm);
}
spin_unlock(ptl);
out:
diff --git a/mm/maccess.c b/mm/maccess.c
index d159b1c96e48..78f9274dd49d 100644
--- a/mm/maccess.c
+++ b/mm/maccess.c
@@ -96,8 +96,7 @@ long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count)
pagefault_disable();
do {
- ret = __copy_from_user_inatomic(dst++,
- (const void __user __force *)src++, 1);
+ ret = __get_user(*dst++, (const char __user __force *)src++);
} while (dst[-1] && ret == 0 && src - unsafe_addr < count);
dst[-1] = '\0';
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 08806bb1f070..b62b33a3cfec 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -272,21 +272,7 @@ static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
{
- return memcg->css.id;
-}
-
-/*
- * A helper function to get mem_cgroup from ID. must be called under
- * rcu_read_lock(). The caller is responsible for calling
- * css_tryget_online() if the mem_cgroup is used for charging. (dropping
- * refcnt from swap can be called against removed memcg.)
- */
-static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
-{
- struct cgroup_subsys_state *css;
-
- css = css_from_id(id, &memory_cgrp_subsys);
- return mem_cgroup_from_css(css);
+ return memcg->id.id;
}
/* Writing them here to avoid exposing memcg's inner layout */
@@ -4124,6 +4110,88 @@ static struct cftype mem_cgroup_legacy_files[] = {
{ }, /* terminate */
};
+/*
+ * Private memory cgroup IDR
+ *
+ * Swap-out records and page cache shadow entries need to store memcg
+ * references in constrained space, so we maintain an ID space that is
+ * limited to 16 bit (MEM_CGROUP_ID_MAX), limiting the total number of
+ * memory-controlled cgroups to 64k.
+ *
+ * However, there usually are many references to the oflline CSS after
+ * the cgroup has been destroyed, such as page cache or reclaimable
+ * slab objects, that don't need to hang on to the ID. We want to keep
+ * those dead CSS from occupying IDs, or we might quickly exhaust the
+ * relatively small ID space and prevent the creation of new cgroups
+ * even when there are much fewer than 64k cgroups - possibly none.
+ *
+ * Maintain a private 16-bit ID space for memcg, and allow the ID to
+ * be freed and recycled when it's no longer needed, which is usually
+ * when the CSS is offlined.
+ *
+ * The only exception to that are records of swapped out tmpfs/shmem
+ * pages that need to be attributed to live ancestors on swapin. But
+ * those references are manageable from userspace.
+ */
+
+static DEFINE_IDR(mem_cgroup_idr);
+
+static void mem_cgroup_id_get_many(struct mem_cgroup *memcg, unsigned int n)
+{
+ atomic_add(n, &memcg->id.ref);
+}
+
+static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg)
+{
+ while (!atomic_inc_not_zero(&memcg->id.ref)) {
+ /*
+ * The root cgroup cannot be destroyed, so it's refcount must
+ * always be >= 1.
+ */
+ if (WARN_ON_ONCE(memcg == root_mem_cgroup)) {
+ VM_BUG_ON(1);
+ break;
+ }
+ memcg = parent_mem_cgroup(memcg);
+ if (!memcg)
+ memcg = root_mem_cgroup;
+ }
+ return memcg;
+}
+
+static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n)
+{
+ if (atomic_sub_and_test(n, &memcg->id.ref)) {
+ idr_remove(&mem_cgroup_idr, memcg->id.id);
+ memcg->id.id = 0;
+
+ /* Memcg ID pins CSS */
+ css_put(&memcg->css);
+ }
+}
+
+static inline void mem_cgroup_id_get(struct mem_cgroup *memcg)
+{
+ mem_cgroup_id_get_many(memcg, 1);
+}
+
+static inline void mem_cgroup_id_put(struct mem_cgroup *memcg)
+{
+ mem_cgroup_id_put_many(memcg, 1);
+}
+
+/**
+ * mem_cgroup_from_id - look up a memcg from a memcg id
+ * @id: the memcg id to look up
+ *
+ * Caller must hold rcu_read_lock().
+ */
+struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
+{
+ WARN_ON_ONCE(!rcu_read_lock_held());
+ return idr_find(&mem_cgroup_idr, id);
+}
+
static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
{
struct mem_cgroup_per_node *pn;
@@ -4178,6 +4246,12 @@ static struct mem_cgroup *mem_cgroup_alloc(void)
if (memcg_wb_domain_init(memcg, GFP_KERNEL))
goto out_free_stat;
+ memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL,
+ 1, MEM_CGROUP_ID_MAX,
+ GFP_KERNEL);
+ if (memcg->id.id < 0)
+ goto out_free_stat;
+
return memcg;
out_free_stat:
@@ -4263,9 +4337,11 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
#ifdef CONFIG_CGROUP_WRITEBACK
INIT_LIST_HEAD(&memcg->cgwb_list);
#endif
+ idr_replace(&mem_cgroup_idr, memcg, memcg->id.id);
return &memcg->css;
free_out:
+ idr_remove(&mem_cgroup_idr, memcg->id.id);
__mem_cgroup_free(memcg);
return ERR_PTR(error);
}
@@ -4277,8 +4353,9 @@ mem_cgroup_css_online(struct cgroup_subsys_state *css)
struct mem_cgroup *parent = mem_cgroup_from_css(css->parent);
int ret;
- if (css->id > MEM_CGROUP_ID_MAX)
- return -ENOSPC;
+ /* Online state pins memcg ID, memcg ID pins CSS */
+ mem_cgroup_id_get(mem_cgroup_from_css(css));
+ css_get(css);
if (!parent)
return 0;
@@ -4352,6 +4429,8 @@ static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
memcg_deactivate_kmem(memcg);
wb_memcg_offline(memcg);
+
+ mem_cgroup_id_put(memcg);
}
static void mem_cgroup_css_released(struct cgroup_subsys_state *css)
@@ -4785,6 +4864,8 @@ static void __mem_cgroup_clear_mc(void)
if (!mem_cgroup_is_root(mc.from))
page_counter_uncharge(&mc.from->memsw, mc.moved_swap);
+ mem_cgroup_id_put_many(mc.from, mc.moved_swap);
+
/*
* we charged both to->memory and to->memsw, so we
* should uncharge to->memory.
@@ -4792,9 +4873,9 @@ static void __mem_cgroup_clear_mc(void)
if (!mem_cgroup_is_root(mc.to))
page_counter_uncharge(&mc.to->memory, mc.moved_swap);
- css_put_many(&mc.from->css, mc.moved_swap);
+ mem_cgroup_id_get_many(mc.to, mc.moved_swap);
+ css_put_many(&mc.to->css, mc.moved_swap);
- /* we've already done css_get(mc.to) */
mc.moved_swap = 0;
}
memcg_oom_recover(from);
@@ -5271,7 +5352,6 @@ struct cgroup_subsys memory_cgrp_subsys = {
.css_reset = mem_cgroup_css_reset,
.can_attach = mem_cgroup_can_attach,
.cancel_attach = mem_cgroup_cancel_attach,
- .attach = mem_cgroup_move_task,
.allow_attach = mem_cgroup_allow_attach,
.post_attach = mem_cgroup_move_task,
.bind = mem_cgroup_bind,
@@ -5681,7 +5761,7 @@ subsys_initcall(mem_cgroup_init);
*/
void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
{
- struct mem_cgroup *memcg;
+ struct mem_cgroup *memcg, *swap_memcg;
unsigned short oldid;
VM_BUG_ON_PAGE(PageLRU(page), page);
@@ -5696,15 +5776,27 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
if (!memcg)
return;
- oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg));
+ /*
+ * In case the memcg owning these pages has been offlined and doesn't
+ * have an ID allocated to it anymore, charge the closest online
+ * ancestor for the swap instead and transfer the memory+swap charge.
+ */
+ swap_memcg = mem_cgroup_id_get_online(memcg);
+ oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg));
VM_BUG_ON_PAGE(oldid, page);
- mem_cgroup_swap_statistics(memcg, true);
+ mem_cgroup_swap_statistics(swap_memcg, true);
page->mem_cgroup = NULL;
if (!mem_cgroup_is_root(memcg))
page_counter_uncharge(&memcg->memory, 1);
+ if (memcg != swap_memcg) {
+ if (!mem_cgroup_is_root(swap_memcg))
+ page_counter_charge(&swap_memcg->memsw, 1);
+ page_counter_uncharge(&memcg->memsw, 1);
+ }
+
/*
* Interrupts should be disabled here because the caller holds the
* mapping->tree_lock lock which is taken with interrupts-off. It is
@@ -5714,6 +5806,9 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
VM_BUG_ON(!irqs_disabled());
mem_cgroup_charge_statistics(memcg, page, -1);
memcg_check_events(memcg, page);
+
+ if (!mem_cgroup_is_root(memcg))
+ css_put(&memcg->css);
}
/**
@@ -5737,7 +5832,7 @@ void mem_cgroup_uncharge_swap(swp_entry_t entry)
if (!mem_cgroup_is_root(memcg))
page_counter_uncharge(&memcg->memsw, 1);
mem_cgroup_swap_statistics(memcg, false);
- css_put(&memcg->css);
+ mem_cgroup_id_put(memcg);
}
rcu_read_unlock();
}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index f12a0bde548b..d41925306d75 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -287,7 +287,9 @@ static inline void reset_deferred_meminit(pg_data_t *pgdat)
/* Returns true if the struct page for the pfn is uninitialised */
static inline bool __meminit early_page_uninitialised(unsigned long pfn)
{
- if (pfn >= NODE_DATA(early_pfn_to_nid(pfn))->first_deferred_pfn)
+ int nid = early_pfn_to_nid(pfn);
+
+ if (node_online(nid) && pfn >= NODE_DATA(nid)->first_deferred_pfn)
return true;
return false;
@@ -1072,7 +1074,7 @@ int __meminit early_pfn_to_nid(unsigned long pfn)
spin_lock(&early_pfn_lock);
nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache);
if (nid < 0)
- nid = 0;
+ nid = first_online_node;
spin_unlock(&early_pfn_lock);
return nid;
diff --git a/mm/process_reclaim.c b/mm/process_reclaim.c
index 8cf5f13548e8..98e5af190fe0 100644
--- a/mm/process_reclaim.c
+++ b/mm/process_reclaim.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -48,6 +48,10 @@ static unsigned long pressure_max = 90;
module_param_named(pressure_min, pressure_min, ulong, S_IRUGO | S_IWUSR);
module_param_named(pressure_max, pressure_max, ulong, S_IRUGO | S_IWUSR);
+static short min_score_adj = 360;
+module_param_named(min_score_adj, min_score_adj, short,
+ S_IRUGO | S_IWUSR);
+
/*
* Scheduling process reclaim workqueue unecessarily
* when the reclaim efficiency is low does not make
@@ -114,7 +118,6 @@ static void swap_fn(struct work_struct *work)
int i;
int tasksize;
int total_sz = 0;
- short min_score_adj = 360;
int total_scan = 0;
int total_reclaimed = 0;
int nr_to_reclaim;
diff --git a/mm/slab.c b/mm/slab.c
index 4765c97ce690..24a615d42d74 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -4228,6 +4228,36 @@ static int __init slab_proc_init(void)
module_init(slab_proc_init);
#endif
+#ifdef CONFIG_HARDENED_USERCOPY
+/*
+ * Rejects objects that are incorrectly sized.
+ *
+ * Returns NULL if check passes, otherwise const char * to name of cache
+ * to indicate an error.
+ */
+const char *__check_heap_object(const void *ptr, unsigned long n,
+ struct page *page)
+{
+ struct kmem_cache *cachep;
+ unsigned int objnr;
+ unsigned long offset;
+
+ /* Find and validate object. */
+ cachep = page->slab_cache;
+ objnr = obj_to_index(cachep, page, (void *)ptr);
+ BUG_ON(objnr >= cachep->num);
+
+ /* Find offset within object. */
+ offset = ptr - index_to_obj(cachep, page, objnr) - obj_offset(cachep);
+
+ /* Allow address range falling entirely within object size. */
+ if (offset <= cachep->object_size && n <= cachep->object_size - offset)
+ return NULL;
+
+ return cachep->name;
+}
+#endif /* CONFIG_HARDENED_USERCOPY */
+
/**
* ksize - get the actual amount of memory allocated for a given object
* @objp: Pointer to the object
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 3c6a86b4ec25..bec2fce9fafc 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -521,8 +521,8 @@ void memcg_create_kmem_cache(struct mem_cgroup *memcg,
goto out_unlock;
cgroup_name(css->cgroup, memcg_name_buf, sizeof(memcg_name_buf));
- cache_name = kasprintf(GFP_KERNEL, "%s(%d:%s)", root_cache->name,
- css->id, memcg_name_buf);
+ cache_name = kasprintf(GFP_KERNEL, "%s(%llu:%s)", root_cache->name,
+ css->serial_nr, memcg_name_buf);
if (!cache_name)
goto out_unlock;
diff --git a/mm/slub.c b/mm/slub.c
index fdc0721ebc31..64bc4e973789 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -124,6 +124,14 @@ static inline int kmem_cache_debug(struct kmem_cache *s)
#endif
}
+static inline void *fixup_red_left(struct kmem_cache *s, void *p)
+{
+ if (kmem_cache_debug(s) && s->flags & SLAB_RED_ZONE)
+ p += s->red_left_pad;
+
+ return p;
+}
+
static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s)
{
#ifdef CONFIG_SLUB_CPU_PARTIAL
@@ -224,24 +232,6 @@ static inline void stat(const struct kmem_cache *s, enum stat_item si)
* Core slab cache functions
*******************************************************************/
-/* Verify that a pointer has an address that is valid within a slab page */
-static inline int check_valid_pointer(struct kmem_cache *s,
- struct page *page, const void *object)
-{
- void *base;
-
- if (!object)
- return 1;
-
- base = page_address(page);
- if (object < base || object >= base + page->objects * s->size ||
- (object - base) % s->size) {
- return 0;
- }
-
- return 1;
-}
-
static inline void *get_freepointer(struct kmem_cache *s, void *object)
{
return *(void **)(object + s->offset);
@@ -271,12 +261,14 @@ static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
/* Loop over all objects in a slab */
#define for_each_object(__p, __s, __addr, __objects) \
- for (__p = (__addr); __p < (__addr) + (__objects) * (__s)->size;\
- __p += (__s)->size)
+ for (__p = fixup_red_left(__s, __addr); \
+ __p < (__addr) + (__objects) * (__s)->size; \
+ __p += (__s)->size)
#define for_each_object_idx(__p, __idx, __s, __addr, __objects) \
- for (__p = (__addr), __idx = 1; __idx <= __objects;\
- __p += (__s)->size, __idx++)
+ for (__p = fixup_red_left(__s, __addr), __idx = 1; \
+ __idx <= __objects; \
+ __p += (__s)->size, __idx++)
/* Determine object index from a given position */
static inline int slab_index(void *p, struct kmem_cache *s, void *addr)
@@ -456,6 +448,22 @@ static void get_map(struct kmem_cache *s, struct page *page, unsigned long *map)
set_bit(slab_index(p, s, addr), map);
}
+static inline int size_from_object(struct kmem_cache *s)
+{
+ if (s->flags & SLAB_RED_ZONE)
+ return s->size - s->red_left_pad;
+
+ return s->size;
+}
+
+static inline void *restore_red_left(struct kmem_cache *s, void *p)
+{
+ if (s->flags & SLAB_RED_ZONE)
+ p -= s->red_left_pad;
+
+ return p;
+}
+
/*
* Debug settings:
*/
@@ -489,6 +497,26 @@ static inline void metadata_access_disable(void)
/*
* Object debugging
*/
+
+/* Verify that a pointer has an address that is valid within a slab page */
+static inline int check_valid_pointer(struct kmem_cache *s,
+ struct page *page, void *object)
+{
+ void *base;
+
+ if (!object)
+ return 1;
+
+ base = page_address(page);
+ object = restore_red_left(s, object);
+ if (object < base || object >= base + page->objects * s->size ||
+ (object - base) % s->size) {
+ return 0;
+ }
+
+ return 1;
+}
+
static void print_section(char *text, u8 *addr, unsigned int length)
{
metadata_access_enable();
@@ -628,7 +656,9 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
pr_err("INFO: Object 0x%p @offset=%tu fp=0x%p\n\n",
p, p - addr, get_freepointer(s, p));
- if (p > addr + 16)
+ if (s->flags & SLAB_RED_ZONE)
+ print_section("Redzone ", p - s->red_left_pad, s->red_left_pad);
+ else if (p > addr + 16)
print_section("Bytes b4 ", p - 16, 16);
print_section("Object ", p, min_t(unsigned long, s->object_size,
@@ -645,9 +675,9 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
if (s->flags & SLAB_STORE_USER)
off += 2 * sizeof(struct track);
- if (off != s->size)
+ if (off != size_from_object(s))
/* Beginning of the filler is the free pointer */
- print_section("Padding ", p + off, s->size - off);
+ print_section("Padding ", p + off, size_from_object(s) - off);
dump_stack();
}
@@ -688,6 +718,9 @@ static void init_object(struct kmem_cache *s, void *object, u8 val)
{
u8 *p = object;
+ if (s->flags & SLAB_RED_ZONE)
+ memset(p - s->red_left_pad, val, s->red_left_pad);
+
if (s->flags & __OBJECT_POISON) {
memset(p, POISON_FREE, s->object_size - 1);
p[s->object_size - 1] = POISON_END;
@@ -781,11 +814,11 @@ static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p)
/* We also have user information there */
off += 2 * sizeof(struct track);
- if (s->size == off)
+ if (size_from_object(s) == off)
return 1;
return check_bytes_and_report(s, page, p, "Object padding",
- p + off, POISON_INUSE, s->size - off);
+ p + off, POISON_INUSE, size_from_object(s) - off);
}
/* Check the pad bytes at the end of a slab page */
@@ -830,6 +863,10 @@ static int check_object(struct kmem_cache *s, struct page *page,
if (s->flags & SLAB_RED_ZONE) {
if (!check_bytes_and_report(s, page, object, "Redzone",
+ object - s->red_left_pad, val, s->red_left_pad))
+ return 0;
+
+ if (!check_bytes_and_report(s, page, object, "Redzone",
endobject, val, s->inuse - s->object_size))
return 0;
} else {
@@ -1480,7 +1517,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
set_freepointer(s, p, NULL);
}
- page->freelist = start;
+ page->freelist = fixup_red_left(s, start);
page->inuse = page->objects;
page->frozen = 1;
@@ -3296,7 +3333,7 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
*/
size += 2 * sizeof(struct track);
- if (flags & SLAB_RED_ZONE)
+ if (flags & SLAB_RED_ZONE) {
/*
* Add some empty padding so that we can catch
* overwrites from earlier objects rather than let
@@ -3305,6 +3342,11 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
* of the object.
*/
size += sizeof(void *);
+
+ s->red_left_pad = sizeof(void *);
+ s->red_left_pad = ALIGN(s->red_left_pad, s->align);
+ size += s->red_left_pad;
+ }
#endif
/*
@@ -3598,6 +3640,46 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
EXPORT_SYMBOL(__kmalloc_node);
#endif
+#ifdef CONFIG_HARDENED_USERCOPY
+/*
+ * Rejects objects that are incorrectly sized.
+ *
+ * Returns NULL if check passes, otherwise const char * to name of cache
+ * to indicate an error.
+ */
+const char *__check_heap_object(const void *ptr, unsigned long n,
+ struct page *page)
+{
+ struct kmem_cache *s;
+ unsigned long offset;
+ size_t object_size;
+
+ /* Find object and usable object size. */
+ s = page->slab_cache;
+ object_size = slab_ksize(s);
+
+ /* Reject impossible pointers. */
+ if (ptr < page_address(page))
+ return s->name;
+
+ /* Find offset within object. */
+ offset = (ptr - page_address(page)) % s->size;
+
+ /* Adjust for redzone and reject if within the redzone. */
+ if (kmem_cache_debug(s) && s->flags & SLAB_RED_ZONE) {
+ if (offset < s->red_left_pad)
+ return s->name;
+ offset -= s->red_left_pad;
+ }
+
+ /* Allow address range falling entirely within object size. */
+ if (offset <= object_size && n <= object_size - offset)
+ return NULL;
+
+ return s->name;
+}
+#endif /* CONFIG_HARDENED_USERCOPY */
+
static size_t __ksize(const void *object)
{
struct page *page;
diff --git a/mm/usercopy.c b/mm/usercopy.c
new file mode 100644
index 000000000000..089328f2b920
--- /dev/null
+++ b/mm/usercopy.c
@@ -0,0 +1,277 @@
+/*
+ * This implements the various checks for CONFIG_HARDENED_USERCOPY*,
+ * which are designed to protect kernel memory from needless exposure
+ * and overwrite under many unintended conditions. This code is based
+ * on PAX_USERCOPY, which is:
+ *
+ * Copyright (C) 2001-2016 PaX Team, Bradley Spengler, Open Source
+ * Security Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <asm/sections.h>
+
+enum {
+ BAD_STACK = -1,
+ NOT_STACK = 0,
+ GOOD_FRAME,
+ GOOD_STACK,
+};
+
+/*
+ * Checks if a given pointer and length is contained by the current
+ * stack frame (if possible).
+ *
+ * Returns:
+ * NOT_STACK: not at all on the stack
+ * GOOD_FRAME: fully within a valid stack frame
+ * GOOD_STACK: fully on the stack (when can't do frame-checking)
+ * BAD_STACK: error condition (invalid stack position or bad stack frame)
+ */
+static noinline int check_stack_object(const void *obj, unsigned long len)
+{
+ const void * const stack = task_stack_page(current);
+ const void * const stackend = stack + THREAD_SIZE;
+ int ret;
+
+ /* Object is not on the stack at all. */
+ if (obj + len <= stack || stackend <= obj)
+ return NOT_STACK;
+
+ /*
+ * Reject: object partially overlaps the stack (passing the
+ * the check above means at least one end is within the stack,
+ * so if this check fails, the other end is outside the stack).
+ */
+ if (obj < stack || stackend < obj + len)
+ return BAD_STACK;
+
+ /* Check if object is safely within a valid frame. */
+ ret = arch_within_stack_frames(stack, stackend, obj, len);
+ if (ret)
+ return ret;
+
+ return GOOD_STACK;
+}
+
+static void report_usercopy(const void *ptr, unsigned long len,
+ bool to_user, const char *type)
+{
+ pr_emerg("kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
+ to_user ? "exposure" : "overwrite",
+ to_user ? "from" : "to", ptr, type ? : "unknown", len);
+ /*
+ * For greater effect, it would be nice to do do_group_exit(),
+ * but BUG() actually hooks all the lock-breaking and per-arch
+ * Oops code, so that is used here instead.
+ */
+ BUG();
+}
+
+/* Returns true if any portion of [ptr,ptr+n) over laps with [low,high). */
+static bool overlaps(const void *ptr, unsigned long n, unsigned long low,
+ unsigned long high)
+{
+ unsigned long check_low = (uintptr_t)ptr;
+ unsigned long check_high = check_low + n;
+
+ /* Does not overlap if entirely above or entirely below. */
+ if (check_low >= high || check_high <= low)
+ return false;
+
+ return true;
+}
+
+/* Is this address range in the kernel text area? */
+static inline const char *check_kernel_text_object(const void *ptr,
+ unsigned long n)
+{
+ unsigned long textlow = (unsigned long)_stext;
+ unsigned long texthigh = (unsigned long)_etext;
+ unsigned long textlow_linear, texthigh_linear;
+
+ if (overlaps(ptr, n, textlow, texthigh))
+ return "<kernel text>";
+
+ /*
+ * Some architectures have virtual memory mappings with a secondary
+ * mapping of the kernel text, i.e. there is more than one virtual
+ * kernel address that points to the kernel image. It is usually
+ * when there is a separate linear physical memory mapping, in that
+ * __pa() is not just the reverse of __va(). This can be detected
+ * and checked:
+ */
+ textlow_linear = (unsigned long)__va(__pa(textlow));
+ /* No different mapping: we're done. */
+ if (textlow_linear == textlow)
+ return NULL;
+
+ /* Check the secondary mapping... */
+ texthigh_linear = (unsigned long)__va(__pa(texthigh));
+ if (overlaps(ptr, n, textlow_linear, texthigh_linear))
+ return "<linear kernel text>";
+
+ return NULL;
+}
+
+static inline const char *check_bogus_address(const void *ptr, unsigned long n)
+{
+ /* Reject if object wraps past end of memory. */
+ if ((unsigned long)ptr + n < (unsigned long)ptr)
+ return "<wrapped address>";
+
+ /* Reject if NULL or ZERO-allocation. */
+ if (ZERO_OR_NULL_PTR(ptr))
+ return "<null>";
+
+ return NULL;
+}
+
+/* Checks for allocs that are marked in some way as spanning multiple pages. */
+static inline const char *check_page_span(const void *ptr, unsigned long n,
+ struct page *page, bool to_user)
+{
+#ifdef CONFIG_HARDENED_USERCOPY_PAGESPAN
+ const void *end = ptr + n - 1;
+ struct page *endpage;
+ bool is_reserved, is_cma;
+
+ /*
+ * Sometimes the kernel data regions are not marked Reserved (see
+ * check below). And sometimes [_sdata,_edata) does not cover
+ * rodata and/or bss, so check each range explicitly.
+ */
+
+ /* Allow reads of kernel rodata region (if not marked as Reserved). */
+ if (ptr >= (const void *)__start_rodata &&
+ end <= (const void *)__end_rodata) {
+ if (!to_user)
+ return "<rodata>";
+ return NULL;
+ }
+
+ /* Allow kernel data region (if not marked as Reserved). */
+ if (ptr >= (const void *)_sdata && end <= (const void *)_edata)
+ return NULL;
+
+ /* Allow kernel bss region (if not marked as Reserved). */
+ if (ptr >= (const void *)__bss_start &&
+ end <= (const void *)__bss_stop)
+ return NULL;
+
+ /* Is the object wholly within one base page? */
+ if (likely(((unsigned long)ptr & (unsigned long)PAGE_MASK) ==
+ ((unsigned long)end & (unsigned long)PAGE_MASK)))
+ return NULL;
+
+ /* Allow if fully inside the same compound (__GFP_COMP) page. */
+ endpage = virt_to_head_page(end);
+ if (likely(endpage == page))
+ return NULL;
+
+ /*
+ * Reject if range is entirely either Reserved (i.e. special or
+ * device memory), or CMA. Otherwise, reject since the object spans
+ * several independently allocated pages.
+ */
+ is_reserved = PageReserved(page);
+ is_cma = is_migrate_cma_page(page);
+ if (!is_reserved && !is_cma)
+ return "<spans multiple pages>";
+
+ for (ptr += PAGE_SIZE; ptr <= end; ptr += PAGE_SIZE) {
+ page = virt_to_head_page(ptr);
+ if (is_reserved && !PageReserved(page))
+ return "<spans Reserved and non-Reserved pages>";
+ if (is_cma && !is_migrate_cma_page(page))
+ return "<spans CMA and non-CMA pages>";
+ }
+#endif
+
+ return NULL;
+}
+
+static inline const char *check_heap_object(const void *ptr, unsigned long n,
+ bool to_user)
+{
+ struct page *page;
+
+ /*
+ * Some architectures (arm64) return true for virt_addr_valid() on
+ * vmalloced addresses. Work around this by checking for vmalloc
+ * first.
+ */
+ if (is_vmalloc_addr(ptr))
+ return NULL;
+
+ if (!virt_addr_valid(ptr))
+ return NULL;
+
+ page = virt_to_head_page(ptr);
+
+ /* Check slab allocator for flags and size. */
+ if (PageSlab(page))
+ return __check_heap_object(ptr, n, page);
+
+ /* Verify object does not incorrectly span multiple pages. */
+ return check_page_span(ptr, n, page, to_user);
+}
+
+/*
+ * Validates that the given object is:
+ * - not bogus address
+ * - known-safe heap or stack object
+ * - not in kernel text
+ */
+void __check_object_size(const void *ptr, unsigned long n, bool to_user)
+{
+ const char *err;
+
+ /* Skip all tests if size is zero. */
+ if (!n)
+ return;
+
+ /* Check for invalid addresses. */
+ err = check_bogus_address(ptr, n);
+ if (err)
+ goto report;
+
+ /* Check for bad heap object. */
+ err = check_heap_object(ptr, n, to_user);
+ if (err)
+ goto report;
+
+ /* Check for bad stack object. */
+ switch (check_stack_object(ptr, n)) {
+ case NOT_STACK:
+ /* Object is not touching the current process stack. */
+ break;
+ case GOOD_FRAME:
+ case GOOD_STACK:
+ /*
+ * Object is either in the correct frame (when it
+ * is possible to check) or just generally on the
+ * process stack (when frame checking not available).
+ */
+ return;
+ default:
+ err = "<process stack>";
+ goto report;
+ }
+
+ /* Check for object in kernel to avoid text exposure. */
+ err = check_kernel_text_object(ptr, n);
+ if (!err)
+ return;
+
+report:
+ report_usercopy(ptr, n, to_user, err);
+}
+EXPORT_SYMBOL(__check_object_size);
diff --git a/mm/util.c b/mm/util.c
index 9af1c12b310c..d5259b62f8d7 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -199,36 +199,11 @@ void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
}
/* Check if the vma is being used as a stack by this task */
-static int vm_is_stack_for_task(struct task_struct *t,
- struct vm_area_struct *vma)
+int vma_is_stack_for_task(struct vm_area_struct *vma, struct task_struct *t)
{
return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t));
}
-/*
- * Check if the vma is being used as a stack.
- * If is_group is non-zero, check in the entire thread group or else
- * just check in the current task. Returns the task_struct of the task
- * that the vma is stack for. Must be called under rcu_read_lock().
- */
-struct task_struct *task_of_stack(struct task_struct *task,
- struct vm_area_struct *vma, bool in_group)
-{
- if (vm_is_stack_for_task(task, vma))
- return task;
-
- if (in_group) {
- struct task_struct *t;
-
- for_each_thread(task, t) {
- if (vm_is_stack_for_task(t, vma))
- return t;
- }
- }
-
- return NULL;
-}
-
#if defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
void arch_pick_mmap_layout(struct mm_struct *mm)
{
diff --git a/mm/vmpressure.c b/mm/vmpressure.c
index 75b7ffe9e7a3..f514dc40dab1 100644
--- a/mm/vmpressure.c
+++ b/mm/vmpressure.c
@@ -41,7 +41,7 @@
* TODO: Make the window size depend on machine size, as we do for vmstat
* thresholds. Currently we set it to 512 pages (2MB for 4KB pages).
*/
-static const unsigned long vmpressure_win = SWAP_CLUSTER_MAX * 16;
+static unsigned long vmpressure_win = SWAP_CLUSTER_MAX * 16;
/*
* These thresholds are used when we account memory pressure through
@@ -290,6 +290,29 @@ void vmpressure_memcg(gfp_t gfp, struct mem_cgroup *memcg,
schedule_work(&vmpr->work);
}
+void calculate_vmpressure_win(void)
+{
+ long x;
+
+ x = global_page_state(NR_FILE_PAGES) -
+ global_page_state(NR_SHMEM) -
+ total_swapcache_pages() +
+ global_page_state(NR_FREE_PAGES);
+ if (x < 1)
+ x = 1;
+ /*
+ * For low (free + cached), vmpressure window should be
+ * small, and high for higher values of (free + cached).
+ * But it should not be linear as well. This ensures
+ * timely vmpressure notifications when system is under
+ * memory pressure, and optimal number of events when
+ * cached is high. The sqaure root function is empirically
+ * found to serve the purpose.
+ */
+ x = int_sqrt(x);
+ vmpressure_win = x;
+}
+
void vmpressure_global(gfp_t gfp, unsigned long scanned,
unsigned long reclaimed)
{
@@ -304,6 +327,9 @@ void vmpressure_global(gfp_t gfp, unsigned long scanned,
return;
spin_lock(&vmpr->sr_lock);
+ if (!vmpr->scanned)
+ calculate_vmpressure_win();
+
vmpr->scanned += scanned;
vmpr->reclaimed += reclaimed;
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index f737493a6e71..bd3c021932be 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -927,7 +927,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
break;
}
- if (get_user(opt, (u32 __user *) optval)) {
+ if (get_user(opt, (u16 __user *) optval)) {
err = -EFAULT;
break;
}
diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c
index 7d8f581d9f1f..ddc3573894b0 100644
--- a/net/ceph/osdmap.c
+++ b/net/ceph/osdmap.c
@@ -1192,6 +1192,115 @@ struct ceph_osdmap *ceph_osdmap_decode(void **p, void *end)
}
/*
+ * Encoding order is (new_up_client, new_state, new_weight). Need to
+ * apply in the (new_weight, new_state, new_up_client) order, because
+ * an incremental map may look like e.g.
+ *
+ * new_up_client: { osd=6, addr=... } # set osd_state and addr
+ * new_state: { osd=6, xorstate=EXISTS } # clear osd_state
+ */
+static int decode_new_up_state_weight(void **p, void *end,
+ struct ceph_osdmap *map)
+{
+ void *new_up_client;
+ void *new_state;
+ void *new_weight_end;
+ u32 len;
+
+ new_up_client = *p;
+ ceph_decode_32_safe(p, end, len, e_inval);
+ len *= sizeof(u32) + sizeof(struct ceph_entity_addr);
+ ceph_decode_need(p, end, len, e_inval);
+ *p += len;
+
+ new_state = *p;
+ ceph_decode_32_safe(p, end, len, e_inval);
+ len *= sizeof(u32) + sizeof(u8);
+ ceph_decode_need(p, end, len, e_inval);
+ *p += len;
+
+ /* new_weight */
+ ceph_decode_32_safe(p, end, len, e_inval);
+ while (len--) {
+ s32 osd;
+ u32 w;
+
+ ceph_decode_need(p, end, 2*sizeof(u32), e_inval);
+ osd = ceph_decode_32(p);
+ w = ceph_decode_32(p);
+ BUG_ON(osd >= map->max_osd);
+ pr_info("osd%d weight 0x%x %s\n", osd, w,
+ w == CEPH_OSD_IN ? "(in)" :
+ (w == CEPH_OSD_OUT ? "(out)" : ""));
+ map->osd_weight[osd] = w;
+
+ /*
+ * If we are marking in, set the EXISTS, and clear the
+ * AUTOOUT and NEW bits.
+ */
+ if (w) {
+ map->osd_state[osd] |= CEPH_OSD_EXISTS;
+ map->osd_state[osd] &= ~(CEPH_OSD_AUTOOUT |
+ CEPH_OSD_NEW);
+ }
+ }
+ new_weight_end = *p;
+
+ /* new_state (up/down) */
+ *p = new_state;
+ len = ceph_decode_32(p);
+ while (len--) {
+ s32 osd;
+ u8 xorstate;
+ int ret;
+
+ osd = ceph_decode_32(p);
+ xorstate = ceph_decode_8(p);
+ if (xorstate == 0)
+ xorstate = CEPH_OSD_UP;
+ BUG_ON(osd >= map->max_osd);
+ if ((map->osd_state[osd] & CEPH_OSD_UP) &&
+ (xorstate & CEPH_OSD_UP))
+ pr_info("osd%d down\n", osd);
+ if ((map->osd_state[osd] & CEPH_OSD_EXISTS) &&
+ (xorstate & CEPH_OSD_EXISTS)) {
+ pr_info("osd%d does not exist\n", osd);
+ map->osd_weight[osd] = CEPH_OSD_IN;
+ ret = set_primary_affinity(map, osd,
+ CEPH_OSD_DEFAULT_PRIMARY_AFFINITY);
+ if (ret)
+ return ret;
+ memset(map->osd_addr + osd, 0, sizeof(*map->osd_addr));
+ map->osd_state[osd] = 0;
+ } else {
+ map->osd_state[osd] ^= xorstate;
+ }
+ }
+
+ /* new_up_client */
+ *p = new_up_client;
+ len = ceph_decode_32(p);
+ while (len--) {
+ s32 osd;
+ struct ceph_entity_addr addr;
+
+ osd = ceph_decode_32(p);
+ ceph_decode_copy(p, &addr, sizeof(addr));
+ ceph_decode_addr(&addr);
+ BUG_ON(osd >= map->max_osd);
+ pr_info("osd%d up\n", osd);
+ map->osd_state[osd] |= CEPH_OSD_EXISTS | CEPH_OSD_UP;
+ map->osd_addr[osd] = addr;
+ }
+
+ *p = new_weight_end;
+ return 0;
+
+e_inval:
+ return -EINVAL;
+}
+
+/*
* decode and apply an incremental map update.
*/
struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
@@ -1290,49 +1399,10 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
__remove_pg_pool(&map->pg_pools, pi);
}
- /* new_up */
- ceph_decode_32_safe(p, end, len, e_inval);
- while (len--) {
- u32 osd;
- struct ceph_entity_addr addr;
- ceph_decode_32_safe(p, end, osd, e_inval);
- ceph_decode_copy_safe(p, end, &addr, sizeof(addr), e_inval);
- ceph_decode_addr(&addr);
- pr_info("osd%d up\n", osd);
- BUG_ON(osd >= map->max_osd);
- map->osd_state[osd] |= CEPH_OSD_UP | CEPH_OSD_EXISTS;
- map->osd_addr[osd] = addr;
- }
-
- /* new_state */
- ceph_decode_32_safe(p, end, len, e_inval);
- while (len--) {
- u32 osd;
- u8 xorstate;
- ceph_decode_32_safe(p, end, osd, e_inval);
- xorstate = **(u8 **)p;
- (*p)++; /* clean flag */
- if (xorstate == 0)
- xorstate = CEPH_OSD_UP;
- if (xorstate & CEPH_OSD_UP)
- pr_info("osd%d down\n", osd);
- if (osd < map->max_osd)
- map->osd_state[osd] ^= xorstate;
- }
-
- /* new_weight */
- ceph_decode_32_safe(p, end, len, e_inval);
- while (len--) {
- u32 osd, off;
- ceph_decode_need(p, end, sizeof(u32)*2, e_inval);
- osd = ceph_decode_32(p);
- off = ceph_decode_32(p);
- pr_info("osd%d weight 0x%x %s\n", osd, off,
- off == CEPH_OSD_IN ? "(in)" :
- (off == CEPH_OSD_OUT ? "(out)" : ""));
- if (osd < map->max_osd)
- map->osd_weight[osd] = off;
- }
+ /* new_up_client, new_state, new_weight */
+ err = decode_new_up_state_weight(p, end, map);
+ if (err)
+ goto bad;
/* new_pg_temp */
err = decode_new_pg_temp(p, end, map);
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 2b68418c7198..ffe95d954007 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -479,6 +479,9 @@ static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh,
if (!rtnh_ok(rtnh, remaining))
return -EINVAL;
+ if (rtnh->rtnh_flags & (RTNH_F_DEAD | RTNH_F_LINKDOWN))
+ return -EINVAL;
+
nexthop_nh->nh_flags =
(cfg->fc_flags & ~0xFF) | rtnh->rtnh_flags;
nexthop_nh->nh_oif = rtnh->rtnh_ifindex;
@@ -1003,6 +1006,9 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
if (fib_props[cfg->fc_type].scope > cfg->fc_scope)
goto err_inval;
+ if (cfg->fc_flags & (RTNH_F_DEAD | RTNH_F_LINKDOWN))
+ goto err_inval;
+
#ifdef CONFIG_IP_ROUTE_MULTIPATH
if (cfg->fc_mp) {
nhs = fib_count_nexthops(cfg->fc_mp, cfg->fc_mp_len);
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 0d5278ca4777..76c86e7d5e10 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -657,7 +657,7 @@ int ping_common_sendmsg(int family, struct msghdr *msg, size_t len,
void *user_icmph, size_t icmph_len) {
u8 type, code;
- if (len > 0xFFFF)
+ if (len > 0xFFFF || len < icmph_len)
return -EMSGSIZE;
/*
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 32027efa5033..5f0b6b0f0249 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -3391,6 +3391,23 @@ static int tcp_ack_update_window(struct sock *sk, const struct sk_buff *skb, u32
return flag;
}
+static bool __tcp_oow_rate_limited(struct net *net, int mib_idx,
+ u32 *last_oow_ack_time)
+{
+ if (*last_oow_ack_time) {
+ s32 elapsed = (s32)(tcp_time_stamp - *last_oow_ack_time);
+
+ if (0 <= elapsed && elapsed < sysctl_tcp_invalid_ratelimit) {
+ NET_INC_STATS_BH(net, mib_idx);
+ return true; /* rate-limited: don't send yet! */
+ }
+ }
+
+ *last_oow_ack_time = tcp_time_stamp;
+
+ return false; /* not rate-limited: go ahead, send dupack now! */
+}
+
/* Return true if we're currently rate-limiting out-of-window ACKs and
* thus shouldn't send a dupack right now. We rate-limit dupacks in
* response to out-of-window SYNs or ACKs to mitigate ACK loops or DoS
@@ -3404,21 +3421,9 @@ bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb,
/* Data packets without SYNs are not likely part of an ACK loop. */
if ((TCP_SKB_CB(skb)->seq != TCP_SKB_CB(skb)->end_seq) &&
!tcp_hdr(skb)->syn)
- goto not_rate_limited;
-
- if (*last_oow_ack_time) {
- s32 elapsed = (s32)(tcp_time_stamp - *last_oow_ack_time);
-
- if (0 <= elapsed && elapsed < sysctl_tcp_invalid_ratelimit) {
- NET_INC_STATS_BH(net, mib_idx);
- return true; /* rate-limited: don't send yet! */
- }
- }
-
- *last_oow_ack_time = tcp_time_stamp;
+ return false;
-not_rate_limited:
- return false; /* not rate-limited: go ahead, send dupack now! */
+ return __tcp_oow_rate_limited(net, mib_idx, last_oow_ack_time);
}
/* RFC 5961 7 [ACK Throttling] */
@@ -3431,9 +3436,9 @@ static void tcp_send_challenge_ack(struct sock *sk, const struct sk_buff *skb)
u32 count, now;
/* First check our per-socket dupack rate limit. */
- if (tcp_oow_rate_limited(sock_net(sk), skb,
- LINUX_MIB_TCPACKSKIPPEDCHALLENGE,
- &tp->last_oow_ack_time))
+ if (__tcp_oow_rate_limited(sock_net(sk),
+ LINUX_MIB_TCPACKSKIPPEDCHALLENGE,
+ &tp->last_oow_ack_time))
return;
/* Then check host-wide RFC 5961 rate limit. */
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 0dd207cd1f38..9eb81a4b0da2 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -239,7 +239,8 @@ void tcp_select_initial_window(int __space, __u32 mss,
/* Set window scaling on max possible window
* See RFC1323 for an explanation of the limit to 14
*/
- space = max_t(u32, sysctl_tcp_rmem[2], sysctl_rmem_max);
+ space = max_t(u32, space, sysctl_tcp_rmem[2]);
+ space = max_t(u32, space, sysctl_rmem_max);
space = min_t(u32, space, *window_clamp);
while (space > 65535 && (*rcv_wscale) < 14) {
space >>= 1;
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 04822b7d7966..6d98857faca0 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1281,6 +1281,7 @@ int udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock,
int peeked, off = 0;
int err;
int is_udplite = IS_UDPLITE(sk);
+ bool checksum_valid = false;
bool slow;
if (flags & MSG_ERRQUEUE)
@@ -1306,11 +1307,12 @@ try_again:
*/
if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) {
- if (udp_lib_checksum_complete(skb))
+ checksum_valid = !udp_lib_checksum_complete(skb);
+ if (!checksum_valid)
goto csum_copy_err;
}
- if (skb_csum_unnecessary(skb))
+ if (checksum_valid || skb_csum_unnecessary(skb))
err = skb_copy_datagram_msg(skb, sizeof(struct udphdr),
msg, copied);
else {
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index a3ec7a77a1ee..41e5c9520c7d 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -98,7 +98,7 @@ static void icmpv6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
if (!(type & ICMPV6_INFOMSG_MASK))
if (icmp6->icmp6_type == ICMPV6_ECHO_REQUEST)
- ping_err(skb, offset, info);
+ ping_err(skb, offset, ntohl(info));
}
static int icmpv6_rcv(struct sk_buff *skb);
diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
index 2cfaedf03252..9411c8d770a5 100644
--- a/net/ipv6/ping.c
+++ b/net/ipv6/ping.c
@@ -84,7 +84,7 @@ int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
struct icmp6hdr user_icmph;
int addr_type;
struct in6_addr *daddr;
- int iif = 0;
+ int oif = 0;
struct flowi6 fl6;
int err;
int hlimit;
@@ -106,25 +106,30 @@ int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
if (u->sin6_family != AF_INET6) {
return -EAFNOSUPPORT;
}
- if (sk->sk_bound_dev_if &&
- sk->sk_bound_dev_if != u->sin6_scope_id) {
- return -EINVAL;
- }
daddr = &(u->sin6_addr);
- iif = u->sin6_scope_id;
+ if (__ipv6_addr_needs_scope_id(ipv6_addr_type(daddr)))
+ oif = u->sin6_scope_id;
} else {
if (sk->sk_state != TCP_ESTABLISHED)
return -EDESTADDRREQ;
daddr = &sk->sk_v6_daddr;
}
- if (!iif)
- iif = sk->sk_bound_dev_if;
+ if (!oif)
+ oif = sk->sk_bound_dev_if;
+
+ if (!oif)
+ oif = np->sticky_pktinfo.ipi6_ifindex;
+
+ if (!oif && ipv6_addr_is_multicast(daddr))
+ oif = np->mcast_oif;
+ else if (!oif)
+ oif = np->ucast_oif;
addr_type = ipv6_addr_type(daddr);
- if (__ipv6_addr_needs_scope_id(addr_type) && !iif)
- return -EINVAL;
- if (addr_type & IPV6_ADDR_MAPPED)
+ if ((__ipv6_addr_needs_scope_id(addr_type) && !oif) ||
+ (addr_type & IPV6_ADDR_MAPPED) ||
+ (oif && sk->sk_bound_dev_if && oif != sk->sk_bound_dev_if))
return -EINVAL;
/* TODO: use ip6_datagram_send_ctl to get options from cmsg */
@@ -134,17 +139,13 @@ int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
fl6.flowi6_proto = IPPROTO_ICMPV6;
fl6.saddr = np->saddr;
fl6.daddr = *daddr;
+ fl6.flowi6_oif = oif;
fl6.flowi6_mark = sk->sk_mark;
fl6.flowi6_uid = sock_i_uid(sk);
fl6.fl6_icmp_type = user_icmph.icmp6_type;
fl6.fl6_icmp_code = user_icmph.icmp6_code;
security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
- if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr))
- fl6.flowi6_oif = np->mcast_oif;
- else if (!fl6.flowi6_oif)
- fl6.flowi6_oif = np->ucast_oif;
-
dst = ip6_sk_dst_lookup_flow(sk, &fl6, daddr);
if (IS_ERR(dst))
return PTR_ERR(dst);
@@ -154,11 +155,6 @@ int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
if (!np)
return -EBADF;
- if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr))
- fl6.flowi6_oif = np->mcast_oif;
- else if (!fl6.flowi6_oif)
- fl6.flowi6_oif = np->ucast_oif;
-
pfh.icmph.type = user_icmph.icmp6_type;
pfh.icmph.code = user_icmph.icmp6_code;
pfh.icmph.checksum = 0;
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index ed7f4a81a932..1a5b9322e713 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -402,6 +402,7 @@ int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
int peeked, off = 0;
int err;
int is_udplite = IS_UDPLITE(sk);
+ bool checksum_valid = false;
int is_udp4;
bool slow;
@@ -433,11 +434,12 @@ try_again:
*/
if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) {
- if (udp_lib_checksum_complete(skb))
+ checksum_valid = !udp_lib_checksum_complete(skb);
+ if (!checksum_valid)
goto csum_copy_err;
}
- if (skb_csum_unnecessary(skb))
+ if (checksum_valid || skb_csum_unnecessary(skb))
err = skb_copy_datagram_msg(skb, sizeof(struct udphdr),
msg, copied);
else {
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
index 923abd6b3064..8d2f7c9b491d 100644
--- a/net/irda/af_irda.c
+++ b/net/irda/af_irda.c
@@ -1024,8 +1024,11 @@ static int irda_connect(struct socket *sock, struct sockaddr *uaddr,
}
/* Check if we have opened a local TSAP */
- if (!self->tsap)
- irda_open_tsap(self, LSAP_ANY, addr->sir_name);
+ if (!self->tsap) {
+ err = irda_open_tsap(self, LSAP_ANY, addr->sir_name);
+ if (err)
+ goto out;
+ }
/* Move to connecting socket, start sending Connect Requests */
sock->state = SS_CONNECTING;
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index c12f348138ac..19322c047386 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -865,7 +865,7 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
/* free all potentially still buffered bcast frames */
local->total_ps_buffered -= skb_queue_len(&sdata->u.ap.ps.bc_buf);
- skb_queue_purge(&sdata->u.ap.ps.bc_buf);
+ ieee80211_purge_tx_queue(&local->hw, &sdata->u.ap.ps.bc_buf);
mutex_lock(&local->mtx);
ieee80211_vif_copy_chanctx_to_vlans(sdata, true);
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index bdc224d5053a..e1225b395415 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -365,7 +365,7 @@ static void purge_old_ps_buffers(struct ieee80211_local *local)
skb = skb_dequeue(&ps->bc_buf);
if (skb) {
purged++;
- dev_kfree_skb(skb);
+ ieee80211_free_txskb(&local->hw, skb);
}
total += skb_queue_len(&ps->bc_buf);
}
@@ -448,7 +448,7 @@ ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx)
if (skb_queue_len(&ps->bc_buf) >= AP_MAX_BC_BUFFER) {
ps_dbg(tx->sdata,
"BC TX buffer full - dropping the oldest frame\n");
- dev_kfree_skb(skb_dequeue(&ps->bc_buf));
+ ieee80211_free_txskb(&tx->local->hw, skb_dequeue(&ps->bc_buf));
} else
tx->local->total_ps_buffered++;
@@ -3781,7 +3781,7 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw,
sdata = IEEE80211_DEV_TO_SUB_IF(skb->dev);
if (!ieee80211_tx_prepare(sdata, &tx, NULL, skb))
break;
- dev_kfree_skb_any(skb);
+ ieee80211_free_txskb(hw, skb);
}
info = IEEE80211_SKB_CB(skb);
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index 25391fb25516..2fc6ca9d1286 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -897,6 +897,12 @@ struct xt_table_info *xt_alloc_table_info(unsigned int size)
struct xt_table_info *info = NULL;
size_t sz = sizeof(*info) + size;
+ if (sz < sizeof(*info))
+ return NULL;
+
+ if (sz < sizeof(*info))
+ return NULL;
+
/* Pedantry: prevent them from hitting BUG() in vmalloc.c --RR */
if ((SMP_ALIGN(size) >> PAGE_SHIFT) + 2 > totalram_pages)
return NULL;
diff --git a/net/netlabel/netlabel_kapi.c b/net/netlabel/netlabel_kapi.c
index 28cddc85b700..bfa2b6d5b5cf 100644
--- a/net/netlabel/netlabel_kapi.c
+++ b/net/netlabel/netlabel_kapi.c
@@ -824,7 +824,11 @@ socket_setattr_return:
*/
void netlbl_sock_delattr(struct sock *sk)
{
- cipso_v4_sock_delattr(sk);
+ switch (sk->sk_family) {
+ case AF_INET:
+ cipso_v4_sock_delattr(sk);
+ break;
+ }
}
/**
@@ -987,7 +991,11 @@ req_setattr_return:
*/
void netlbl_req_delattr(struct request_sock *req)
{
- cipso_v4_req_delattr(req);
+ switch (req->rsk_ops->family) {
+ case AF_INET:
+ cipso_v4_req_delattr(req);
+ break;
+ }
}
/**
diff --git a/net/rds/recv.c b/net/rds/recv.c
index a00462b0d01d..0514af3ab378 100644
--- a/net/rds/recv.c
+++ b/net/rds/recv.c
@@ -545,5 +545,7 @@ void rds_inc_info_copy(struct rds_incoming *inc,
minfo.fport = inc->i_hdr.h_dport;
}
+ minfo.flags = 0;
+
rds_info_copy(iter, &minfo, sizeof(minfo));
}
diff --git a/net/rds/tcp.c b/net/rds/tcp.c
index 9d6ddbacd875..18e50a8fc05f 100644
--- a/net/rds/tcp.c
+++ b/net/rds/tcp.c
@@ -421,7 +421,7 @@ static int rds_tcp_init(void)
ret = rds_tcp_recv_init();
if (ret)
- goto out_slab;
+ goto out_pernet;
ret = rds_trans_register(&rds_tcp_transport);
if (ret)
@@ -433,8 +433,9 @@ static int rds_tcp_init(void)
out_recv:
rds_tcp_recv_exit();
-out_slab:
+out_pernet:
unregister_pernet_subsys(&rds_tcp_net_ops);
+out_slab:
kmem_cache_destroy(rds_tcp_conn_slab);
out:
return ret;
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index 799e65b944b9..06095cc8815e 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -340,12 +340,14 @@ gss_release_msg(struct gss_upcall_msg *gss_msg)
}
static struct gss_upcall_msg *
-__gss_find_upcall(struct rpc_pipe *pipe, kuid_t uid)
+__gss_find_upcall(struct rpc_pipe *pipe, kuid_t uid, const struct gss_auth *auth)
{
struct gss_upcall_msg *pos;
list_for_each_entry(pos, &pipe->in_downcall, list) {
if (!uid_eq(pos->uid, uid))
continue;
+ if (auth && pos->auth->service != auth->service)
+ continue;
atomic_inc(&pos->count);
dprintk("RPC: %s found msg %p\n", __func__, pos);
return pos;
@@ -365,7 +367,7 @@ gss_add_msg(struct gss_upcall_msg *gss_msg)
struct gss_upcall_msg *old;
spin_lock(&pipe->lock);
- old = __gss_find_upcall(pipe, gss_msg->uid);
+ old = __gss_find_upcall(pipe, gss_msg->uid, gss_msg->auth);
if (old == NULL) {
atomic_inc(&gss_msg->count);
list_add(&gss_msg->list, &pipe->in_downcall);
@@ -714,7 +716,7 @@ gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
err = -ENOENT;
/* Find a matching upcall */
spin_lock(&pipe->lock);
- gss_msg = __gss_find_upcall(pipe, uid);
+ gss_msg = __gss_find_upcall(pipe, uid, NULL);
if (gss_msg == NULL) {
spin_unlock(&pipe->lock);
goto err_put_ctx;
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 027c9ef8a263..1ba417207465 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -2286,6 +2286,10 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
/* SYN_SENT! */
if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO)
xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
+ break;
+ case -EADDRNOTAVAIL:
+ /* Source port number is unavailable. Try a new one! */
+ transport->srcport = 0;
}
out:
return ret;
diff --git a/net/sysctl_net.c b/net/sysctl_net.c
index ed98c1fc3de1..46a71c701e7c 100644
--- a/net/sysctl_net.c
+++ b/net/sysctl_net.c
@@ -46,7 +46,7 @@ static int net_ctl_permissions(struct ctl_table_header *head,
kgid_t root_gid = make_kgid(net->user_ns, 0);
/* Allow network administrator to have same access as root. */
- if (ns_capable(net->user_ns, CAP_NET_ADMIN) ||
+ if (ns_capable_noaudit(net->user_ns, CAP_NET_ADMIN) ||
uid_eq(root_uid, current_euid())) {
int mode = (table->mode >> 6) & 7;
return (mode << 6) | (mode << 3) | mode;
diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
index 2ed732bfe94b..a0c90572d0e5 100644
--- a/net/tipc/netlink_compat.c
+++ b/net/tipc/netlink_compat.c
@@ -574,7 +574,8 @@ static int tipc_nl_compat_link_dump(struct tipc_nl_compat_msg *msg,
link_info.dest = nla_get_flag(link[TIPC_NLA_LINK_DEST]);
link_info.up = htonl(nla_get_flag(link[TIPC_NLA_LINK_UP]));
- strcpy(link_info.str, nla_data(link[TIPC_NLA_LINK_NAME]));
+ nla_strlcpy(link_info.str, link[TIPC_NLA_LINK_NAME],
+ TIPC_MAX_LINK_NAME);
return tipc_add_tlv(msg->rep, TIPC_TLV_LINK_INFO,
&link_info, sizeof(link_info));
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index 69ee2eeef968..f9ff73a8d815 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -296,7 +296,8 @@ static void tipc_subscrb_rcv_cb(struct net *net, int conid,
if (tipc_subscrp_create(net, (struct tipc_subscr *)buf, subscrb, &sub))
return tipc_conn_terminate(tn->topsrv, subscrb->conid);
- tipc_nametbl_subscribe(sub);
+ if (sub)
+ tipc_nametbl_subscribe(sub);
}
/* Handle one request to establish a new subscriber */
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 375d6c1732fa..e5a1df6fc282 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -3478,13 +3478,12 @@ out:
return 0;
}
-static int validate_beacon_tx_rate(struct cfg80211_ap_settings *params)
+static int validate_beacon_tx_rate(struct cfg80211_registered_device *rdev,
+ enum nl80211_band band,
+ struct cfg80211_bitrate_mask *beacon_rate)
{
- u32 rate, count_ht, count_vht, i;
- enum nl80211_band band;
-
- band = params->chandef.chan->band;
- rate = params->beacon_rate.control[band].legacy;
+ u32 count_ht, count_vht, i;
+ u32 rate = beacon_rate->control[band].legacy;
/* Allow only one rate */
if (hweight32(rate) > 1)
@@ -3492,9 +3491,9 @@ static int validate_beacon_tx_rate(struct cfg80211_ap_settings *params)
count_ht = 0;
for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++) {
- if (hweight8(params->beacon_rate.control[band].ht_mcs[i]) > 1) {
+ if (hweight8(beacon_rate->control[band].ht_mcs[i]) > 1) {
return -EINVAL;
- } else if (params->beacon_rate.control[band].ht_mcs[i]) {
+ } else if (beacon_rate->control[band].ht_mcs[i]) {
count_ht++;
if (count_ht > 1)
return -EINVAL;
@@ -3505,9 +3504,9 @@ static int validate_beacon_tx_rate(struct cfg80211_ap_settings *params)
count_vht = 0;
for (i = 0; i < NL80211_VHT_NSS_MAX; i++) {
- if (hweight16(params->beacon_rate.control[band].vht_mcs[i]) > 1) {
+ if (hweight16(beacon_rate->control[band].vht_mcs[i]) > 1) {
return -EINVAL;
- } else if (params->beacon_rate.control[band].vht_mcs[i]) {
+ } else if (beacon_rate->control[band].vht_mcs[i]) {
count_vht++;
if (count_vht > 1)
return -EINVAL;
@@ -3519,6 +3518,19 @@ static int validate_beacon_tx_rate(struct cfg80211_ap_settings *params)
if ((count_ht && count_vht) || (!rate && !count_ht && !count_vht))
return -EINVAL;
+ if (rate &&
+ !wiphy_ext_feature_isset(&rdev->wiphy,
+ NL80211_EXT_FEATURE_BEACON_RATE_LEGACY))
+ return -EINVAL;
+ if (count_ht &&
+ !wiphy_ext_feature_isset(&rdev->wiphy,
+ NL80211_EXT_FEATURE_BEACON_RATE_HT))
+ return -EINVAL;
+ if (count_vht &&
+ !wiphy_ext_feature_isset(&rdev->wiphy,
+ NL80211_EXT_FEATURE_BEACON_RATE_VHT))
+ return -EINVAL;
+
return 0;
}
@@ -3757,7 +3769,8 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
if (err)
return err;
- err = validate_beacon_tx_rate(&params);
+ err = validate_beacon_tx_rate(rdev, params.chandef.chan->band,
+ &params.beacon_rate);
if (err)
return err;
}
@@ -9066,6 +9079,17 @@ static int nl80211_join_mesh(struct sk_buff *skb, struct genl_info *info)
return err;
}
+ if (info->attrs[NL80211_ATTR_TX_RATES] && setup.chandef.chan != NULL) {
+ err = nl80211_parse_tx_bitrate_mask(info, &setup.beacon_rate);
+ if (err)
+ return err;
+
+ err = validate_beacon_tx_rate(rdev, setup.chandef.chan->band,
+ &setup.beacon_rate);
+ if (err)
+ return err;
+ }
+
return cfg80211_join_mesh(rdev, dev, &setup, &cfg);
}
diff --git a/scripts/recordmcount.c b/scripts/recordmcount.c
index e167592793a7..42396a74405d 100644
--- a/scripts/recordmcount.c
+++ b/scripts/recordmcount.c
@@ -33,10 +33,17 @@
#include <string.h>
#include <unistd.h>
+/*
+ * glibc synced up and added the metag number but didn't add the relocations.
+ * Work around this in a crude manner for now.
+ */
#ifndef EM_METAG
-/* Remove this when these make it to the standard system elf.h. */
#define EM_METAG 174
+#endif
+#ifndef R_METAG_ADDR32
#define R_METAG_ADDR32 2
+#endif
+#ifndef R_METAG_NONE
#define R_METAG_NONE 3
#endif
diff --git a/security/Kconfig b/security/Kconfig
index c4f83485bc1f..0a83cd09a198 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -132,6 +132,46 @@ config LSM_MMAP_MIN_ADDR
this low address space will need the permission specific to the
systems running LSM.
+config HAVE_HARDENED_USERCOPY_ALLOCATOR
+ bool
+ help
+ The heap allocator implements __check_heap_object() for
+ validating memory ranges against heap object sizes in
+ support of CONFIG_HARDENED_USERCOPY.
+
+config HAVE_ARCH_HARDENED_USERCOPY
+ bool
+ help
+ The architecture supports CONFIG_HARDENED_USERCOPY by
+ calling check_object_size() just before performing the
+ userspace copies in the low level implementation of
+ copy_to_user() and copy_from_user().
+
+config HARDENED_USERCOPY
+ bool "Harden memory copies between kernel and userspace"
+ depends on HAVE_ARCH_HARDENED_USERCOPY
+ depends on HAVE_HARDENED_USERCOPY_ALLOCATOR
+ select BUG
+ help
+ This option checks for obviously wrong memory regions when
+ copying memory to/from the kernel (via copy_to_user() and
+ copy_from_user() functions) by rejecting memory ranges that
+ are larger than the specified heap object, span multiple
+ separately allocates pages, are not on the process stack,
+ or are part of the kernel text. This kills entire classes
+ of heap overflow exploits and similar kernel memory exposures.
+
+config HARDENED_USERCOPY_PAGESPAN
+ bool "Refuse to copy allocations that span multiple pages"
+ depends on HARDENED_USERCOPY
+ depends on !COMPILE_TEST
+ help
+ When a multi-page allocation is done without __GFP_COMP,
+ hardened usercopy will reject attempts to copy it. There are,
+ however, several cases of this in the kernel that have not all
+ been removed. This config is intended to be used only while
+ trying to find such users.
+
source security/selinux/Kconfig
source security/smack/Kconfig
source security/tomoyo/Kconfig
diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c
index ad4fa49ad1db..9068369f8a1b 100644
--- a/security/apparmor/apparmorfs.c
+++ b/security/apparmor/apparmorfs.c
@@ -331,6 +331,7 @@ static int aa_fs_seq_hash_show(struct seq_file *seq, void *v)
seq_printf(seq, "%.2x", profile->hash[i]);
seq_puts(seq, "\n");
}
+ aa_put_profile(profile);
return 0;
}
diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c
index 795437b10082..b450a27588c8 100644
--- a/sound/core/rawmidi.c
+++ b/sound/core/rawmidi.c
@@ -1633,11 +1633,13 @@ static int snd_rawmidi_dev_register(struct snd_device *device)
return -EBUSY;
}
list_add_tail(&rmidi->list, &snd_rawmidi_devices);
+ mutex_unlock(&register_mutex);
err = snd_register_device(SNDRV_DEVICE_TYPE_RAWMIDI,
rmidi->card, rmidi->device,
&snd_rawmidi_f_ops, rmidi, &rmidi->dev);
if (err < 0) {
rmidi_err(rmidi, "unable to register\n");
+ mutex_lock(&register_mutex);
list_del(&rmidi->list);
mutex_unlock(&register_mutex);
return err;
@@ -1645,6 +1647,7 @@ static int snd_rawmidi_dev_register(struct snd_device *device)
if (rmidi->ops && rmidi->ops->dev_register &&
(err = rmidi->ops->dev_register(rmidi)) < 0) {
snd_unregister_device(&rmidi->dev);
+ mutex_lock(&register_mutex);
list_del(&rmidi->list);
mutex_unlock(&register_mutex);
return err;
@@ -1677,7 +1680,6 @@ static int snd_rawmidi_dev_register(struct snd_device *device)
}
}
#endif /* CONFIG_SND_OSSEMUL */
- mutex_unlock(&register_mutex);
sprintf(name, "midi%d", rmidi->device);
entry = snd_info_create_card_entry(rmidi->card, name, rmidi->card->proc_root);
if (entry) {
diff --git a/sound/core/timer.c b/sound/core/timer.c
index 12768f55f8d5..05a31df05c00 100644
--- a/sound/core/timer.c
+++ b/sound/core/timer.c
@@ -296,8 +296,21 @@ int snd_timer_open(struct snd_timer_instance **ti,
get_device(&timer->card->card_dev);
timeri->slave_class = tid->dev_sclass;
timeri->slave_id = slave_id;
- if (list_empty(&timer->open_list_head) && timer->hw.open)
- timer->hw.open(timer);
+
+ if (list_empty(&timer->open_list_head) && timer->hw.open) {
+ int err = timer->hw.open(timer);
+ if (err) {
+ kfree(timeri->owner);
+ kfree(timeri);
+
+ if (timer->card)
+ put_device(&timer->card->card_dev);
+ module_put(timer->module);
+ mutex_unlock(&register_mutex);
+ return err;
+ }
+ }
+
list_add_tail(&timeri->open_list, &timer->open_list_head);
snd_timer_check_master(timeri);
mutex_unlock(&register_mutex);
@@ -837,6 +850,7 @@ int snd_timer_new(struct snd_card *card, char *id, struct snd_timer_id *tid,
timer->tmr_subdevice = tid->subdevice;
if (id)
strlcpy(timer->id, id, sizeof(timer->id));
+ timer->sticks = 1;
INIT_LIST_HEAD(&timer->device_list);
INIT_LIST_HEAD(&timer->open_list_head);
INIT_LIST_HEAD(&timer->active_list_head);
@@ -1971,6 +1985,7 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
tu->qused--;
spin_unlock_irq(&tu->qlock);
+ mutex_lock(&tu->ioctl_lock);
if (tu->tread) {
if (copy_to_user(buffer, &tu->tqueue[qhead],
sizeof(struct snd_timer_tread)))
@@ -1980,6 +1995,7 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
sizeof(struct snd_timer_read)))
err = -EFAULT;
}
+ mutex_unlock(&tu->ioctl_lock);
spin_lock_irq(&tu->qlock);
if (err < 0)
diff --git a/sound/firewire/fireworks/fireworks.h b/sound/firewire/fireworks/fireworks.h
index c7cb7deafe48..2c316a9bc7f6 100644
--- a/sound/firewire/fireworks/fireworks.h
+++ b/sound/firewire/fireworks/fireworks.h
@@ -106,7 +106,6 @@ struct snd_efw {
u8 *resp_buf;
u8 *pull_ptr;
u8 *push_ptr;
- unsigned int resp_queues;
};
int snd_efw_transaction_cmd(struct fw_unit *unit,
diff --git a/sound/firewire/fireworks/fireworks_hwdep.c b/sound/firewire/fireworks/fireworks_hwdep.c
index 33df8655fe81..2e1d9a23920c 100644
--- a/sound/firewire/fireworks/fireworks_hwdep.c
+++ b/sound/firewire/fireworks/fireworks_hwdep.c
@@ -25,6 +25,7 @@ hwdep_read_resp_buf(struct snd_efw *efw, char __user *buf, long remained,
{
unsigned int length, till_end, type;
struct snd_efw_transaction *t;
+ u8 *pull_ptr;
long count = 0;
if (remained < sizeof(type) + sizeof(struct snd_efw_transaction))
@@ -38,8 +39,17 @@ hwdep_read_resp_buf(struct snd_efw *efw, char __user *buf, long remained,
buf += sizeof(type);
/* write into buffer as many responses as possible */
- while (efw->resp_queues > 0) {
- t = (struct snd_efw_transaction *)(efw->pull_ptr);
+ spin_lock_irq(&efw->lock);
+
+ /*
+ * When another task reaches here during this task's access to user
+ * space, it picks up current position in buffer and can read the same
+ * series of responses.
+ */
+ pull_ptr = efw->pull_ptr;
+
+ while (efw->push_ptr != pull_ptr) {
+ t = (struct snd_efw_transaction *)(pull_ptr);
length = be32_to_cpu(t->length) * sizeof(__be32);
/* confirm enough space for this response */
@@ -49,26 +59,39 @@ hwdep_read_resp_buf(struct snd_efw *efw, char __user *buf, long remained,
/* copy from ring buffer to user buffer */
while (length > 0) {
till_end = snd_efw_resp_buf_size -
- (unsigned int)(efw->pull_ptr - efw->resp_buf);
+ (unsigned int)(pull_ptr - efw->resp_buf);
till_end = min_t(unsigned int, length, till_end);
- if (copy_to_user(buf, efw->pull_ptr, till_end))
+ spin_unlock_irq(&efw->lock);
+
+ if (copy_to_user(buf, pull_ptr, till_end))
return -EFAULT;
- efw->pull_ptr += till_end;
- if (efw->pull_ptr >= efw->resp_buf +
- snd_efw_resp_buf_size)
- efw->pull_ptr -= snd_efw_resp_buf_size;
+ spin_lock_irq(&efw->lock);
+
+ pull_ptr += till_end;
+ if (pull_ptr >= efw->resp_buf + snd_efw_resp_buf_size)
+ pull_ptr -= snd_efw_resp_buf_size;
length -= till_end;
buf += till_end;
count += till_end;
remained -= till_end;
}
-
- efw->resp_queues--;
}
+ /*
+ * All of tasks can read from the buffer nearly simultaneously, but the
+ * last position for each task is different depending on the length of
+ * given buffer. Here, for simplicity, a position of buffer is set by
+ * the latest task. It's better for a listening application to allow one
+ * thread to read from the buffer. Unless, each task can read different
+ * sequence of responses depending on variation of buffer length.
+ */
+ efw->pull_ptr = pull_ptr;
+
+ spin_unlock_irq(&efw->lock);
+
return count;
}
@@ -76,14 +99,17 @@ static long
hwdep_read_locked(struct snd_efw *efw, char __user *buf, long count,
loff_t *offset)
{
- union snd_firewire_event event;
+ union snd_firewire_event event = {
+ .lock_status.type = SNDRV_FIREWIRE_EVENT_LOCK_STATUS,
+ };
- memset(&event, 0, sizeof(event));
+ spin_lock_irq(&efw->lock);
- event.lock_status.type = SNDRV_FIREWIRE_EVENT_LOCK_STATUS;
event.lock_status.status = (efw->dev_lock_count > 0);
efw->dev_lock_changed = false;
+ spin_unlock_irq(&efw->lock);
+
count = min_t(long, count, sizeof(event.lock_status));
if (copy_to_user(buf, &event, count))
@@ -98,10 +124,15 @@ hwdep_read(struct snd_hwdep *hwdep, char __user *buf, long count,
{
struct snd_efw *efw = hwdep->private_data;
DEFINE_WAIT(wait);
+ bool dev_lock_changed;
+ bool queued;
spin_lock_irq(&efw->lock);
- while ((!efw->dev_lock_changed) && (efw->resp_queues == 0)) {
+ dev_lock_changed = efw->dev_lock_changed;
+ queued = efw->push_ptr != efw->pull_ptr;
+
+ while (!dev_lock_changed && !queued) {
prepare_to_wait(&efw->hwdep_wait, &wait, TASK_INTERRUPTIBLE);
spin_unlock_irq(&efw->lock);
schedule();
@@ -109,15 +140,17 @@ hwdep_read(struct snd_hwdep *hwdep, char __user *buf, long count,
if (signal_pending(current))
return -ERESTARTSYS;
spin_lock_irq(&efw->lock);
+ dev_lock_changed = efw->dev_lock_changed;
+ queued = efw->push_ptr != efw->pull_ptr;
}
- if (efw->dev_lock_changed)
+ spin_unlock_irq(&efw->lock);
+
+ if (dev_lock_changed)
count = hwdep_read_locked(efw, buf, count, offset);
- else if (efw->resp_queues > 0)
+ else if (queued)
count = hwdep_read_resp_buf(efw, buf, count, offset);
- spin_unlock_irq(&efw->lock);
-
return count;
}
@@ -160,7 +193,7 @@ hwdep_poll(struct snd_hwdep *hwdep, struct file *file, poll_table *wait)
poll_wait(file, &efw->hwdep_wait, wait);
spin_lock_irq(&efw->lock);
- if (efw->dev_lock_changed || (efw->resp_queues > 0))
+ if (efw->dev_lock_changed || efw->pull_ptr != efw->push_ptr)
events = POLLIN | POLLRDNORM;
else
events = 0;
diff --git a/sound/firewire/fireworks/fireworks_proc.c b/sound/firewire/fireworks/fireworks_proc.c
index 0639dcb13f7d..beb0a0ffee57 100644
--- a/sound/firewire/fireworks/fireworks_proc.c
+++ b/sound/firewire/fireworks/fireworks_proc.c
@@ -188,8 +188,8 @@ proc_read_queues_state(struct snd_info_entry *entry,
else
consumed = (unsigned int)(efw->push_ptr - efw->pull_ptr);
- snd_iprintf(buffer, "%d %d/%d\n",
- efw->resp_queues, consumed, snd_efw_resp_buf_size);
+ snd_iprintf(buffer, "%d/%d\n",
+ consumed, snd_efw_resp_buf_size);
}
static void
diff --git a/sound/firewire/fireworks/fireworks_transaction.c b/sound/firewire/fireworks/fireworks_transaction.c
index f550808d1784..36a08ba51ec7 100644
--- a/sound/firewire/fireworks/fireworks_transaction.c
+++ b/sound/firewire/fireworks/fireworks_transaction.c
@@ -121,11 +121,11 @@ copy_resp_to_buf(struct snd_efw *efw, void *data, size_t length, int *rcode)
size_t capacity, till_end;
struct snd_efw_transaction *t;
- spin_lock_irq(&efw->lock);
-
t = (struct snd_efw_transaction *)data;
length = min_t(size_t, be32_to_cpu(t->length) * sizeof(u32), length);
+ spin_lock_irq(&efw->lock);
+
if (efw->push_ptr < efw->pull_ptr)
capacity = (unsigned int)(efw->pull_ptr - efw->push_ptr);
else
@@ -155,7 +155,6 @@ copy_resp_to_buf(struct snd_efw *efw, void *data, size_t length, int *rcode)
}
/* for hwdep */
- efw->resp_queues++;
wake_up(&efw->hwdep_wait);
*rcode = RCODE_COMPLETE;
diff --git a/sound/firewire/tascam/tascam-hwdep.c b/sound/firewire/tascam/tascam-hwdep.c
index 131267c3a042..106406cbfaa3 100644
--- a/sound/firewire/tascam/tascam-hwdep.c
+++ b/sound/firewire/tascam/tascam-hwdep.c
@@ -16,31 +16,14 @@
#include "tascam.h"
-static long hwdep_read_locked(struct snd_tscm *tscm, char __user *buf,
- long count)
-{
- union snd_firewire_event event;
-
- memset(&event, 0, sizeof(event));
-
- event.lock_status.type = SNDRV_FIREWIRE_EVENT_LOCK_STATUS;
- event.lock_status.status = (tscm->dev_lock_count > 0);
- tscm->dev_lock_changed = false;
-
- count = min_t(long, count, sizeof(event.lock_status));
-
- if (copy_to_user(buf, &event, count))
- return -EFAULT;
-
- return count;
-}
-
static long hwdep_read(struct snd_hwdep *hwdep, char __user *buf, long count,
loff_t *offset)
{
struct snd_tscm *tscm = hwdep->private_data;
DEFINE_WAIT(wait);
- union snd_firewire_event event;
+ union snd_firewire_event event = {
+ .lock_status.type = SNDRV_FIREWIRE_EVENT_LOCK_STATUS,
+ };
spin_lock_irq(&tscm->lock);
@@ -54,10 +37,16 @@ static long hwdep_read(struct snd_hwdep *hwdep, char __user *buf, long count,
spin_lock_irq(&tscm->lock);
}
- memset(&event, 0, sizeof(event));
- count = hwdep_read_locked(tscm, buf, count);
+ event.lock_status.status = (tscm->dev_lock_count > 0);
+ tscm->dev_lock_changed = false;
+
spin_unlock_irq(&tscm->lock);
+ count = min_t(long, count, sizeof(event.lock_status));
+
+ if (copy_to_user(buf, &event, count))
+ return -EFAULT;
+
return count;
}
diff --git a/sound/hda/array.c b/sound/hda/array.c
index 516795baa7db..5dfa610e4471 100644
--- a/sound/hda/array.c
+++ b/sound/hda/array.c
@@ -21,13 +21,15 @@ void *snd_array_new(struct snd_array *array)
return NULL;
if (array->used >= array->alloced) {
int num = array->alloced + array->alloc_align;
+ int oldsize = array->alloced * array->elem_size;
int size = (num + 1) * array->elem_size;
void *nlist;
if (snd_BUG_ON(num >= 4096))
return NULL;
- nlist = krealloc(array->list, size, GFP_KERNEL | __GFP_ZERO);
+ nlist = krealloc(array->list, size, GFP_KERNEL);
if (!nlist)
return NULL;
+ memset(nlist + oldsize, 0, size - oldsize);
array->list = nlist;
array->alloced = num;
}
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 8218cace8fea..d4671973d889 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -944,20 +944,23 @@ static int azx_resume(struct device *dev)
struct snd_card *card = dev_get_drvdata(dev);
struct azx *chip;
struct hda_intel *hda;
+ struct hdac_bus *bus;
if (!card)
return 0;
chip = card->private_data;
hda = container_of(chip, struct hda_intel, chip);
+ bus = azx_bus(chip);
if (chip->disabled || hda->init_failed || !chip->running)
return 0;
- if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL
- && hda->need_i915_power) {
- snd_hdac_display_power(azx_bus(chip), true);
- haswell_set_bclk(hda);
+ if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) {
+ snd_hdac_display_power(bus, true);
+ if (hda->need_i915_power)
+ haswell_set_bclk(hda);
}
+
if (chip->msi)
if (pci_enable_msi(pci) < 0)
chip->msi = 0;
@@ -967,6 +970,11 @@ static int azx_resume(struct device *dev)
hda_intel_init_chip(chip, true);
+ /* power down again for link-controlled chips */
+ if ((chip->driver_caps & AZX_DCAPS_I915_POWERWELL) &&
+ !hda->need_i915_power)
+ snd_hdac_display_power(bus, false);
+
snd_power_change_state(card, SNDRV_CTL_POWER_D0);
trace_azx_resume(chip);
@@ -1046,6 +1054,7 @@ static int azx_runtime_resume(struct device *dev)
chip = card->private_data;
hda = container_of(chip, struct hda_intel, chip);
+ bus = azx_bus(chip);
if (chip->disabled || hda->init_failed)
return 0;
@@ -1053,15 +1062,9 @@ static int azx_runtime_resume(struct device *dev)
return 0;
if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) {
- bus = azx_bus(chip);
- if (hda->need_i915_power) {
- snd_hdac_display_power(bus, true);
+ snd_hdac_display_power(bus, true);
+ if (hda->need_i915_power)
haswell_set_bclk(hda);
- } else {
- /* toggle codec wakeup bit for STATESTS read */
- snd_hdac_set_codec_wakeup(bus, true);
- snd_hdac_set_codec_wakeup(bus, false);
- }
}
/* Read STATESTS before controller reset */
@@ -1081,6 +1084,11 @@ static int azx_runtime_resume(struct device *dev)
azx_writew(chip, WAKEEN, azx_readw(chip, WAKEEN) &
~STATESTS_INT_MASK);
+ /* power down again for link-controlled chips */
+ if ((chip->driver_caps & AZX_DCAPS_I915_POWERWELL) &&
+ !hda->need_i915_power)
+ snd_hdac_display_power(bus, false);
+
trace_azx_runtime_resume(chip);
return 0;
}
@@ -2288,6 +2296,8 @@ static const struct pci_device_id azx_ids[] = {
{ PCI_DEVICE(0x1022, 0x780d),
.driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB },
/* ATI HDMI */
+ { PCI_DEVICE(0x1002, 0x0002),
+ .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
{ PCI_DEVICE(0x1002, 0x1308),
.driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
{ PCI_DEVICE(0x1002, 0x157a),
@@ -2356,6 +2366,10 @@ static const struct pci_device_id azx_ids[] = {
.driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
{ PCI_DEVICE(0x1002, 0xaae8),
.driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
+ { PCI_DEVICE(0x1002, 0xaae0),
+ .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
+ { PCI_DEVICE(0x1002, 0xaaf0),
+ .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
/* VIA VT8251/VT8237A */
{ PCI_DEVICE(0x1106, 0x3288),
.driver_data = AZX_DRIVER_VIA | AZX_DCAPS_POSFIX_VIA },
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index f7bcd8dbac14..a8045b8a2a18 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -51,8 +51,10 @@ MODULE_PARM_DESC(static_hdmi_pcm, "Don't restrict PCM parameters per ELD info");
#define is_broadwell(codec) ((codec)->core.vendor_id == 0x80862808)
#define is_skylake(codec) ((codec)->core.vendor_id == 0x80862809)
#define is_broxton(codec) ((codec)->core.vendor_id == 0x8086280a)
+#define is_kabylake(codec) ((codec)->core.vendor_id == 0x8086280b)
#define is_haswell_plus(codec) (is_haswell(codec) || is_broadwell(codec) \
- || is_skylake(codec) || is_broxton(codec))
+ || is_skylake(codec) || is_broxton(codec) \
+ || is_kabylake(codec))
#define is_valleyview(codec) ((codec)->core.vendor_id == 0x80862882)
#define is_cherryview(codec) ((codec)->core.vendor_id == 0x80862883)
@@ -3584,6 +3586,7 @@ HDA_CODEC_ENTRY(0x80862807, "Haswell HDMI", patch_generic_hdmi),
HDA_CODEC_ENTRY(0x80862808, "Broadwell HDMI", patch_generic_hdmi),
HDA_CODEC_ENTRY(0x80862809, "Skylake HDMI", patch_generic_hdmi),
HDA_CODEC_ENTRY(0x8086280a, "Broxton HDMI", patch_generic_hdmi),
+HDA_CODEC_ENTRY(0x8086280b, "Kabylake HDMI", patch_generic_hdmi),
HDA_CODEC_ENTRY(0x80862880, "CedarTrail HDMI", patch_generic_hdmi),
HDA_CODEC_ENTRY(0x80862882, "Valleyview2 HDMI", patch_generic_hdmi),
HDA_CODEC_ENTRY(0x80862883, "Braswell HDMI", patch_generic_hdmi),
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index abcb5a6a1cd9..eaee626ab185 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -4674,6 +4674,22 @@ static void alc290_fixup_mono_speakers(struct hda_codec *codec,
}
}
+static void alc298_fixup_speaker_volume(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action)
+{
+ if (action == HDA_FIXUP_ACT_PRE_PROBE) {
+ /* The speaker is routed to the Node 0x06 by a mistake, as a result
+ we can't adjust the speaker's volume since this node does not has
+ Amp-out capability. we change the speaker's route to:
+ Node 0x02 (Audio Output) -> Node 0x0c (Audio Mixer) -> Node 0x17 (
+ Pin Complex), since Node 0x02 has Amp-out caps, we can adjust
+ speaker's volume now. */
+
+ hda_nid_t conn1[1] = { 0x0c };
+ snd_hda_override_conn_list(codec, 0x17, 1, conn1);
+ }
+}
+
/* Hook to update amp GPIO4 for automute */
static void alc280_hp_gpio4_automute_hook(struct hda_codec *codec,
struct hda_jack_callback *jack)
@@ -4823,6 +4839,8 @@ enum {
ALC280_FIXUP_HP_HEADSET_MIC,
ALC221_FIXUP_HP_FRONT_MIC,
ALC292_FIXUP_TPT460,
+ ALC298_FIXUP_SPK_VOLUME,
+ ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER,
};
static const struct hda_fixup alc269_fixups[] = {
@@ -5478,6 +5496,21 @@ static const struct hda_fixup alc269_fixups[] = {
.chained = true,
.chain_id = ALC293_FIXUP_LENOVO_SPK_NOISE,
},
+ [ALC298_FIXUP_SPK_VOLUME] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc298_fixup_speaker_volume,
+ .chained = true,
+ .chain_id = ALC298_FIXUP_DELL1_MIC_NO_PRESENCE,
+ },
+ [ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ { 0x1b, 0x90170151 },
+ { }
+ },
+ .chained = true,
+ .chain_id = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE
+ },
};
static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -5522,8 +5555,10 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1028, 0x06df, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK),
SND_PCI_QUIRK(0x1028, 0x06e0, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK),
SND_PCI_QUIRK(0x1028, 0x0704, "Dell XPS 13 9350", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
+ SND_PCI_QUIRK(0x1028, 0x0706, "Dell Inspiron 7559", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER),
SND_PCI_QUIRK(0x1028, 0x0725, "Dell Inspiron 3162", ALC255_FIXUP_DELL_SPK_NOISE),
SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
+ SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME),
SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
@@ -5799,6 +5834,10 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
{0x1b, 0x01014020},
{0x21, 0x0221103f}),
SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+ {0x14, 0x90170130},
+ {0x1b, 0x02011020},
+ {0x21, 0x0221103f}),
+ SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
{0x14, 0x90170150},
{0x1b, 0x02011020},
{0x21, 0x0221105f}),
@@ -5851,6 +5890,10 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
{0x12, 0x90a60170},
{0x14, 0x90170120},
{0x21, 0x02211030}),
+ SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell Inspiron 5468", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+ {0x12, 0x90a60180},
+ {0x14, 0x90170120},
+ {0x21, 0x02211030}),
SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
ALC256_STANDARD_PINS),
SND_HDA_PIN_QUIRK(0x10ec0280, 0x103c, "HP", ALC280_FIXUP_HP_GPIO4,
diff --git a/sound/soc/atmel/atmel_ssc_dai.c b/sound/soc/atmel/atmel_ssc_dai.c
index ba8def5665c4..6726143c7fc5 100644
--- a/sound/soc/atmel/atmel_ssc_dai.c
+++ b/sound/soc/atmel/atmel_ssc_dai.c
@@ -298,8 +298,9 @@ static int atmel_ssc_startup(struct snd_pcm_substream *substream,
clk_enable(ssc_p->ssc->clk);
ssc_p->mck_rate = clk_get_rate(ssc_p->ssc->clk);
- /* Reset the SSC to keep it at a clean status */
- ssc_writel(ssc_p->ssc->regs, CR, SSC_BIT(CR_SWRST));
+ /* Reset the SSC unless initialized to keep it in a clean state */
+ if (!ssc_p->initialized)
+ ssc_writel(ssc_p->ssc->regs, CR, SSC_BIT(CR_SWRST));
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
dir = 0;
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index 553b35a2d717..26a5356fb30e 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -765,11 +765,11 @@ config SND_SOC_WCD_CPE
config AUDIO_EXT_CLK
tristate
- default y if SND_SOC_WCD9335=y || SND_SOC_WCD9330=y || SND_SOC_MSM8X16_WCD=y
+ default y if SND_SOC_WCD9335=y || SND_SOC_WCD9330=y || SND_SOC_MSMFALCON_CDC=y
config SND_SOC_WCD_MBHC
tristate
- default y if (SND_SOC_MSM8909_WCD=y || SND_SOC_MSM8X16_WCD=y || SND_SOC_WCD9335=y) && SND_SOC_MDMCALIFORNIUM!=y
+ default y if (SND_SOC_MSM8909_WCD=y || SND_SOC_MSMFALCON_CDC=y || SND_SOC_WCD9335=y) && SND_SOC_MDMCALIFORNIUM!=y
config SND_SOC_WCD_DSP_MGR
tristate
@@ -994,6 +994,7 @@ config SND_SOC_MSM_HDMI_CODEC_RX
HDMI audio drivers should be built only if the platform
supports hdmi panel.
-source "sound/soc/codecs/msm8x16/Kconfig"
+source "sound/soc/codecs/msmfalcon_cdc/Kconfig"
+source "sound/soc/codecs/msm_sdw/Kconfig"
endmenu
diff --git a/sound/soc/codecs/Makefile b/sound/soc/codecs/Makefile
index 5f21eb37eae4..9dcdc517b9ea 100644
--- a/sound/soc/codecs/Makefile
+++ b/sound/soc/codecs/Makefile
@@ -425,4 +425,5 @@ obj-$(CONFIG_SND_SOC_MSM_STUB) += snd-soc-msm-stub.o
# Amp
obj-$(CONFIG_SND_SOC_MAX9877) += snd-soc-max9877.o
obj-$(CONFIG_SND_SOC_TPA6130A2) += snd-soc-tpa6130a2.o
-obj-y += msm8x16/
+obj-y += msmfalcon_cdc/
+obj-y += msm_sdw/
diff --git a/sound/soc/codecs/msm8x16/Makefile b/sound/soc/codecs/msm8x16/Makefile
deleted file mode 100644
index 36a3d046a307..000000000000
--- a/sound/soc/codecs/msm8x16/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
-snd-soc-msm8952-wcd-objs := msm8x16-wcd.o msm8x16-wcd-tables.o msm89xx-regmap.o
-obj-$(CONFIG_SND_SOC_MSM8X16_WCD) += snd-soc-msm8952-wcd.o msm8916-wcd-irq.o
-
diff --git a/sound/soc/codecs/msm8x16/msm8x16-wcd-tables.c b/sound/soc/codecs/msm8x16/msm8x16-wcd-tables.c
deleted file mode 100644
index b969639b10eb..000000000000
--- a/sound/soc/codecs/msm8x16/msm8x16-wcd-tables.c
+++ /dev/null
@@ -1,263 +0,0 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include "msm8x16-wcd.h"
-
-const u8 msm89xx_pmic_cdc_reg_readable[MSM89XX_PMIC_CDC_CACHE_SIZE] = {
- [MSM89XX_PMIC_DIGITAL_REVISION1] = 1,
- [MSM89XX_PMIC_DIGITAL_REVISION2] = 1,
- [MSM89XX_PMIC_DIGITAL_PERPH_TYPE] = 1,
- [MSM89XX_PMIC_DIGITAL_PERPH_SUBTYPE] = 1,
- [MSM89XX_PMIC_DIGITAL_INT_RT_STS] = 1,
- [MSM89XX_PMIC_DIGITAL_INT_SET_TYPE] = 1,
- [MSM89XX_PMIC_DIGITAL_INT_POLARITY_HIGH] = 1,
- [MSM89XX_PMIC_DIGITAL_INT_POLARITY_LOW] = 1,
- [MSM89XX_PMIC_DIGITAL_INT_EN_SET] = 1,
- [MSM89XX_PMIC_DIGITAL_INT_EN_CLR] = 1,
- [MSM89XX_PMIC_DIGITAL_INT_LATCHED_STS] = 1,
- [MSM89XX_PMIC_DIGITAL_INT_PENDING_STS] = 1,
- [MSM89XX_PMIC_DIGITAL_INT_MID_SEL] = 1,
- [MSM89XX_PMIC_DIGITAL_INT_PRIORITY] = 1,
- [MSM89XX_PMIC_DIGITAL_GPIO_MODE] = 1,
- [MSM89XX_PMIC_DIGITAL_PIN_CTL_OE] = 1,
- [MSM89XX_PMIC_DIGITAL_PIN_CTL_DATA] = 1,
- [MSM89XX_PMIC_DIGITAL_PIN_STATUS] = 1,
- [MSM89XX_PMIC_DIGITAL_HDRIVE_CTL] = 1,
- [MSM89XX_PMIC_DIGITAL_CDC_RST_CTL] = 1,
- [MSM89XX_PMIC_DIGITAL_CDC_TOP_CLK_CTL] = 1,
- [MSM89XX_PMIC_DIGITAL_CDC_ANA_CLK_CTL] = 1,
- [MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL] = 1,
- [MSM89XX_PMIC_DIGITAL_CDC_CONN_TX1_CTL] = 1,
- [MSM89XX_PMIC_DIGITAL_CDC_CONN_TX2_CTL] = 1,
- [MSM89XX_PMIC_DIGITAL_CDC_CONN_HPHR_DAC_CTL] = 1,
- [MSM89XX_PMIC_DIGITAL_CDC_CONN_RX1_CTL] = 1,
- [MSM89XX_PMIC_DIGITAL_CDC_CONN_RX2_CTL] = 1,
- [MSM89XX_PMIC_DIGITAL_CDC_CONN_RX3_CTL] = 1,
- [MSM89XX_PMIC_DIGITAL_CDC_CONN_RX_LB_CTL] = 1,
- [MSM89XX_PMIC_DIGITAL_CDC_RX_CTL1] = 1,
- [MSM89XX_PMIC_DIGITAL_CDC_RX_CTL2] = 1,
- [MSM89XX_PMIC_DIGITAL_CDC_RX_CTL3] = 1,
- [MSM89XX_PMIC_DIGITAL_DEM_BYPASS_DATA0] = 1,
- [MSM89XX_PMIC_DIGITAL_DEM_BYPASS_DATA1] = 1,
- [MSM89XX_PMIC_DIGITAL_DEM_BYPASS_DATA2] = 1,
- [MSM89XX_PMIC_DIGITAL_DEM_BYPASS_DATA3] = 1,
- [MSM89XX_PMIC_DIGITAL_DIG_DEBUG_CTL] = 1,
- [MSM89XX_PMIC_DIGITAL_SPARE_0] = 1,
- [MSM89XX_PMIC_DIGITAL_SPARE_1] = 1,
- [MSM89XX_PMIC_DIGITAL_SPARE_2] = 1,
- [MSM89XX_PMIC_ANALOG_REVISION1] = 1,
- [MSM89XX_PMIC_ANALOG_REVISION2] = 1,
- [MSM89XX_PMIC_ANALOG_REVISION3] = 1,
- [MSM89XX_PMIC_ANALOG_REVISION4] = 1,
- [MSM89XX_PMIC_ANALOG_PERPH_TYPE] = 1,
- [MSM89XX_PMIC_ANALOG_PERPH_SUBTYPE] = 1,
- [MSM89XX_PMIC_ANALOG_INT_RT_STS] = 1,
- [MSM89XX_PMIC_ANALOG_INT_SET_TYPE] = 1,
- [MSM89XX_PMIC_ANALOG_INT_POLARITY_HIGH] = 1,
- [MSM89XX_PMIC_ANALOG_INT_POLARITY_LOW] = 1,
- [MSM89XX_PMIC_ANALOG_INT_EN_SET] = 1,
- [MSM89XX_PMIC_ANALOG_INT_EN_CLR] = 1,
- [MSM89XX_PMIC_ANALOG_INT_LATCHED_STS] = 1,
- [MSM89XX_PMIC_ANALOG_INT_PENDING_STS] = 1,
- [MSM89XX_PMIC_ANALOG_INT_MID_SEL] = 1,
- [MSM89XX_PMIC_ANALOG_INT_PRIORITY] = 1,
- [MSM89XX_PMIC_ANALOG_MICB_1_EN] = 1,
- [MSM89XX_PMIC_ANALOG_MICB_1_VAL] = 1,
- [MSM89XX_PMIC_ANALOG_MICB_1_CTL] = 1,
- [MSM89XX_PMIC_ANALOG_MICB_1_INT_RBIAS] = 1,
- [MSM89XX_PMIC_ANALOG_MICB_2_EN] = 1,
- [MSM89XX_PMIC_ANALOG_TX_1_2_ATEST_CTL_2] = 1,
- [MSM89XX_PMIC_ANALOG_MBHC_DET_CTL_1] = 1,
- [MSM89XX_PMIC_ANALOG_MBHC_DET_CTL_2] = 1,
- [MSM89XX_PMIC_ANALOG_MBHC_FSM_CTL] = 1,
- [MSM89XX_PMIC_ANALOG_MBHC_DBNC_TIMER] = 1,
- [MSM89XX_PMIC_ANALOG_MBHC_BTN0_ZDETL_CTL] = 1,
- [MSM89XX_PMIC_ANALOG_MBHC_BTN1_ZDETM_CTL] = 1,
- [MSM89XX_PMIC_ANALOG_MBHC_BTN2_ZDETH_CTL] = 1,
- [MSM89XX_PMIC_ANALOG_MBHC_BTN3_CTL] = 1,
- [MSM89XX_PMIC_ANALOG_MBHC_BTN4_CTL] = 1,
- [MSM89XX_PMIC_ANALOG_MBHC_BTN_RESULT] = 1,
- [MSM89XX_PMIC_ANALOG_MBHC_ZDET_ELECT_RESULT] = 1,
- [MSM89XX_PMIC_ANALOG_TX_1_EN] = 1,
- [MSM89XX_PMIC_ANALOG_TX_2_EN] = 1,
- [MSM89XX_PMIC_ANALOG_TX_1_2_TEST_CTL_1] = 1,
- [MSM89XX_PMIC_ANALOG_TX_1_2_TEST_CTL_2] = 1,
- [MSM89XX_PMIC_ANALOG_TX_1_2_ATEST_CTL] = 1,
- [MSM89XX_PMIC_ANALOG_TX_1_2_OPAMP_BIAS] = 1,
- [MSM89XX_PMIC_ANALOG_TX_1_2_TXFE_CLKDIV] = 1,
- [MSM89XX_PMIC_ANALOG_TX_3_EN] = 1,
- [MSM89XX_PMIC_ANALOG_NCP_EN] = 1,
- [MSM89XX_PMIC_ANALOG_NCP_CLK] = 1,
- [MSM89XX_PMIC_ANALOG_NCP_DEGLITCH] = 1,
- [MSM89XX_PMIC_ANALOG_NCP_FBCTRL] = 1,
- [MSM89XX_PMIC_ANALOG_NCP_BIAS] = 1,
- [MSM89XX_PMIC_ANALOG_NCP_VCTRL] = 1,
- [MSM89XX_PMIC_ANALOG_NCP_TEST] = 1,
- [MSM89XX_PMIC_ANALOG_RX_CLOCK_DIVIDER] = 1,
- [MSM89XX_PMIC_ANALOG_RX_COM_OCP_CTL] = 1,
- [MSM89XX_PMIC_ANALOG_RX_COM_OCP_COUNT] = 1,
- [MSM89XX_PMIC_ANALOG_RX_COM_BIAS_DAC] = 1,
- [MSM89XX_PMIC_ANALOG_RX_HPH_BIAS_PA] = 1,
- [MSM89XX_PMIC_ANALOG_RX_HPH_BIAS_LDO_OCP] = 1,
- [MSM89XX_PMIC_ANALOG_RX_HPH_BIAS_CNP] = 1,
- [MSM89XX_PMIC_ANALOG_RX_HPH_CNP_EN] = 1,
- [MSM89XX_PMIC_ANALOG_RX_HPH_CNP_WG_CTL] = 1,
- [MSM89XX_PMIC_ANALOG_RX_HPH_CNP_WG_TIME] = 1,
- [MSM89XX_PMIC_ANALOG_RX_HPH_L_TEST] = 1,
- [MSM89XX_PMIC_ANALOG_RX_HPH_L_PA_DAC_CTL] = 1,
- [MSM89XX_PMIC_ANALOG_RX_HPH_R_TEST] = 1,
- [MSM89XX_PMIC_ANALOG_RX_HPH_R_PA_DAC_CTL] = 1,
- [MSM89XX_PMIC_ANALOG_RX_EAR_CTL] = 1,
- [MSM89XX_PMIC_ANALOG_RX_ATEST] = 1,
- [MSM89XX_PMIC_ANALOG_RX_HPH_STATUS] = 1,
- [MSM89XX_PMIC_ANALOG_RX_EAR_STATUS] = 1,
- [MSM89XX_PMIC_ANALOG_SPKR_DAC_CTL] = 1,
- [MSM89XX_PMIC_ANALOG_SPKR_DRV_CLIP_DET] = 1,
- [MSM89XX_PMIC_ANALOG_SPKR_DRV_CTL] = 1,
- [MSM89XX_PMIC_ANALOG_SPKR_ANA_BIAS_SET] = 1,
- [MSM89XX_PMIC_ANALOG_SPKR_OCP_CTL] = 1,
- [MSM89XX_PMIC_ANALOG_SPKR_PWRSTG_CTL] = 1,
- [MSM89XX_PMIC_ANALOG_SPKR_DRV_MISC] = 1,
- [MSM89XX_PMIC_ANALOG_SPKR_DRV_DBG] = 1,
- [MSM89XX_PMIC_ANALOG_CURRENT_LIMIT] = 1,
- [MSM89XX_PMIC_ANALOG_OUTPUT_VOLTAGE] = 1,
- [MSM89XX_PMIC_ANALOG_BYPASS_MODE] = 1,
- [MSM89XX_PMIC_ANALOG_BOOST_EN_CTL] = 1,
- [MSM89XX_PMIC_ANALOG_SLOPE_COMP_IP_ZERO] = 1,
- [MSM89XX_PMIC_ANALOG_RDSON_MAX_DUTY_CYCLE] = 1,
- [MSM89XX_PMIC_ANALOG_BOOST_TEST1_1] = 1,
- [MSM89XX_PMIC_ANALOG_BOOST_TEST_2] = 1,
- [MSM89XX_PMIC_ANALOG_SPKR_SAR_STATUS] = 1,
- [MSM89XX_PMIC_ANALOG_SPKR_DRV_STATUS] = 1,
- [MSM89XX_PMIC_ANALOG_PBUS_ADD_CSR] = 1,
- [MSM89XX_PMIC_ANALOG_PBUS_ADD_SEL] = 1,
- [MSM89XX_PMIC_ANALOG_MASTER_BIAS_CTL] = 1,
- [MSM89XX_PMIC_DIGITAL_INT_LATCHED_CLR] = 1,
- [MSM89XX_PMIC_ANALOG_INT_LATCHED_CLR] = 1,
- [MSM89XX_PMIC_ANALOG_NCP_CLIM_ADDR] = 1,
- [MSM89XX_PMIC_DIGITAL_SEC_ACCESS] = 1,
- [MSM89XX_PMIC_DIGITAL_PERPH_RESET_CTL3] = 1,
- [MSM89XX_PMIC_ANALOG_SEC_ACCESS] = 1,
-};
-
-const u8 msm89xx_cdc_core_reg_readable[MSM89XX_CDC_CORE_CACHE_SIZE] = {
- [MSM89XX_CDC_CORE_CLK_RX_RESET_CTL] = 1,
- [MSM89XX_CDC_CORE_CLK_TX_RESET_B1_CTL] = 1,
- [MSM89XX_CDC_CORE_CLK_DMIC_B1_CTL] = 1,
- [MSM89XX_CDC_CORE_CLK_RX_I2S_CTL] = 1,
- [MSM89XX_CDC_CORE_CLK_TX_I2S_CTL] = 1,
- [MSM89XX_CDC_CORE_CLK_OTHR_RESET_B1_CTL] = 1,
- [MSM89XX_CDC_CORE_CLK_TX_CLK_EN_B1_CTL] = 1,
- [MSM89XX_CDC_CORE_CLK_OTHR_CTL] = 1,
- [MSM89XX_CDC_CORE_CLK_RX_B1_CTL] = 1,
- [MSM89XX_CDC_CORE_CLK_MCLK_CTL] = 1,
- [MSM89XX_CDC_CORE_CLK_PDM_CTL] = 1,
- [MSM89XX_CDC_CORE_CLK_SD_CTL] = 1,
- [MSM89XX_CDC_CORE_CLK_WSA_VI_B1_CTL] = 1,
- [MSM89XX_CDC_CORE_RX1_B1_CTL] = 1,
- [MSM89XX_CDC_CORE_RX2_B1_CTL] = 1,
- [MSM89XX_CDC_CORE_RX3_B1_CTL] = 1,
- [MSM89XX_CDC_CORE_RX1_B2_CTL] = 1,
- [MSM89XX_CDC_CORE_RX2_B2_CTL] = 1,
- [MSM89XX_CDC_CORE_RX3_B2_CTL] = 1,
- [MSM89XX_CDC_CORE_RX1_B3_CTL] = 1,
- [MSM89XX_CDC_CORE_RX2_B3_CTL] = 1,
- [MSM89XX_CDC_CORE_RX3_B3_CTL] = 1,
- [MSM89XX_CDC_CORE_RX1_B4_CTL] = 1,
- [MSM89XX_CDC_CORE_RX2_B4_CTL] = 1,
- [MSM89XX_CDC_CORE_RX3_B4_CTL] = 1,
- [MSM89XX_CDC_CORE_RX1_B5_CTL] = 1,
- [MSM89XX_CDC_CORE_RX2_B5_CTL] = 1,
- [MSM89XX_CDC_CORE_RX3_B5_CTL] = 1,
- [MSM89XX_CDC_CORE_RX1_B6_CTL] = 1,
- [MSM89XX_CDC_CORE_RX2_B6_CTL] = 1,
- [MSM89XX_CDC_CORE_RX3_B6_CTL] = 1,
- [MSM89XX_CDC_CORE_RX1_VOL_CTL_B1_CTL] = 1,
- [MSM89XX_CDC_CORE_RX2_VOL_CTL_B1_CTL] = 1,
- [MSM89XX_CDC_CORE_RX3_VOL_CTL_B1_CTL] = 1,
- [MSM89XX_CDC_CORE_RX1_VOL_CTL_B2_CTL] = 1,
- [MSM89XX_CDC_CORE_RX2_VOL_CTL_B2_CTL] = 1,
- [MSM89XX_CDC_CORE_RX3_VOL_CTL_B2_CTL] = 1,
- [MSM89XX_CDC_CORE_TOP_GAIN_UPDATE] = 1,
- [MSM89XX_CDC_CORE_TOP_CTL] = 1,
- [MSM89XX_CDC_CORE_DEBUG_DESER1_CTL] = 1,
- [MSM89XX_CDC_CORE_DEBUG_DESER2_CTL] = 1,
- [MSM89XX_CDC_CORE_DEBUG_B1_CTL_CFG] = 1,
- [MSM89XX_CDC_CORE_DEBUG_B2_CTL_CFG] = 1,
- [MSM89XX_CDC_CORE_DEBUG_B3_CTL_CFG] = 1,
- [MSM89XX_CDC_CORE_IIR1_GAIN_B1_CTL] = 1,
- [MSM89XX_CDC_CORE_IIR2_GAIN_B1_CTL] = 1,
- [MSM89XX_CDC_CORE_IIR1_GAIN_B2_CTL] = 1,
- [MSM89XX_CDC_CORE_IIR2_GAIN_B2_CTL] = 1,
- [MSM89XX_CDC_CORE_IIR1_GAIN_B3_CTL] = 1,
- [MSM89XX_CDC_CORE_IIR2_GAIN_B3_CTL] = 1,
- [MSM89XX_CDC_CORE_IIR1_GAIN_B4_CTL] = 1,
- [MSM89XX_CDC_CORE_IIR2_GAIN_B4_CTL] = 1,
- [MSM89XX_CDC_CORE_IIR1_GAIN_B5_CTL] = 1,
- [MSM89XX_CDC_CORE_IIR2_GAIN_B5_CTL] = 1,
- [MSM89XX_CDC_CORE_IIR1_GAIN_B6_CTL] = 1,
- [MSM89XX_CDC_CORE_IIR2_GAIN_B6_CTL] = 1,
- [MSM89XX_CDC_CORE_IIR1_GAIN_B7_CTL] = 1,
- [MSM89XX_CDC_CORE_IIR2_GAIN_B7_CTL] = 1,
- [MSM89XX_CDC_CORE_IIR1_GAIN_B8_CTL] = 1,
- [MSM89XX_CDC_CORE_IIR2_GAIN_B8_CTL] = 1,
- [MSM89XX_CDC_CORE_IIR1_CTL] = 1,
- [MSM89XX_CDC_CORE_IIR2_CTL] = 1,
- [MSM89XX_CDC_CORE_IIR1_GAIN_TIMER_CTL] = 1,
- [MSM89XX_CDC_CORE_IIR2_GAIN_TIMER_CTL] = 1,
- [MSM89XX_CDC_CORE_IIR1_COEF_B1_CTL] = 1,
- [MSM89XX_CDC_CORE_IIR2_COEF_B1_CTL] = 1,
- [MSM89XX_CDC_CORE_IIR1_COEF_B2_CTL] = 1,
- [MSM89XX_CDC_CORE_IIR2_COEF_B2_CTL] = 1,
- [MSM89XX_CDC_CORE_CONN_RX1_B1_CTL] = 1,
- [MSM89XX_CDC_CORE_CONN_RX1_B2_CTL] = 1,
- [MSM89XX_CDC_CORE_CONN_RX1_B3_CTL] = 1,
- [MSM89XX_CDC_CORE_CONN_RX2_B1_CTL] = 1,
- [MSM89XX_CDC_CORE_CONN_RX2_B2_CTL] = 1,
- [MSM89XX_CDC_CORE_CONN_RX2_B3_CTL] = 1,
- [MSM89XX_CDC_CORE_CONN_RX3_B1_CTL] = 1,
- [MSM89XX_CDC_CORE_CONN_RX3_B2_CTL] = 1,
- [MSM89XX_CDC_CORE_CONN_TX_B1_CTL] = 1,
- [MSM89XX_CDC_CORE_CONN_EQ1_B1_CTL] = 1,
- [MSM89XX_CDC_CORE_CONN_EQ1_B2_CTL] = 1,
- [MSM89XX_CDC_CORE_CONN_EQ1_B3_CTL] = 1,
- [MSM89XX_CDC_CORE_CONN_EQ1_B4_CTL] = 1,
- [MSM89XX_CDC_CORE_CONN_EQ2_B1_CTL] = 1,
- [MSM89XX_CDC_CORE_CONN_EQ2_B2_CTL] = 1,
- [MSM89XX_CDC_CORE_CONN_EQ2_B3_CTL] = 1,
- [MSM89XX_CDC_CORE_CONN_EQ2_B4_CTL] = 1,
- [MSM89XX_CDC_CORE_CONN_TX_I2S_SD1_CTL] = 1,
- [MSM89XX_CDC_CORE_TX1_VOL_CTL_TIMER] = 1,
- [MSM89XX_CDC_CORE_TX2_VOL_CTL_TIMER] = 1,
- [MSM89XX_CDC_CORE_TX3_VOL_CTL_TIMER] = 1,
- [MSM89XX_CDC_CORE_TX4_VOL_CTL_TIMER] = 1,
- [MSM89XX_CDC_CORE_TX1_VOL_CTL_GAIN] = 1,
- [MSM89XX_CDC_CORE_TX2_VOL_CTL_GAIN] = 1,
- [MSM89XX_CDC_CORE_TX3_VOL_CTL_GAIN] = 1,
- [MSM89XX_CDC_CORE_TX4_VOL_CTL_GAIN] = 1,
- [MSM89XX_CDC_CORE_TX1_VOL_CTL_CFG] = 1,
- [MSM89XX_CDC_CORE_TX2_VOL_CTL_CFG] = 1,
- [MSM89XX_CDC_CORE_TX3_VOL_CTL_CFG] = 1,
- [MSM89XX_CDC_CORE_TX4_VOL_CTL_CFG] = 1,
- [MSM89XX_CDC_CORE_TX1_MUX_CTL] = 1,
- [MSM89XX_CDC_CORE_TX2_MUX_CTL] = 1,
- [MSM89XX_CDC_CORE_TX3_MUX_CTL] = 1,
- [MSM89XX_CDC_CORE_TX4_MUX_CTL] = 1,
- [MSM89XX_CDC_CORE_TX1_CLK_FS_CTL] = 1,
- [MSM89XX_CDC_CORE_TX2_CLK_FS_CTL] = 1,
- [MSM89XX_CDC_CORE_TX3_CLK_FS_CTL] = 1,
- [MSM89XX_CDC_CORE_TX4_CLK_FS_CTL] = 1,
- [MSM89XX_CDC_CORE_TX1_DMIC_CTL] = 1,
- [MSM89XX_CDC_CORE_TX2_DMIC_CTL] = 1,
- [MSM89XX_CDC_CORE_TX3_DMIC_CTL] = 1,
- [MSM89XX_CDC_CORE_TX4_DMIC_CTL] = 1,
-};
diff --git a/sound/soc/codecs/msm8x16/msm8x16-wcd.c b/sound/soc/codecs/msm8x16/msm8x16-wcd.c
deleted file mode 100644
index c73414681c5a..000000000000
--- a/sound/soc/codecs/msm8x16/msm8x16-wcd.c
+++ /dev/null
@@ -1,6022 +0,0 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/firmware.h>
-#include <linux/slab.h>
-#include <linux/platform_device.h>
-#include <linux/device.h>
-#include <linux/printk.h>
-#include <linux/ratelimit.h>
-#include <linux/debugfs.h>
-#include <linux/io.h>
-#include <linux/bitops.h>
-#include <linux/delay.h>
-#include <linux/pm_runtime.h>
-#include <linux/kernel.h>
-#include <linux/gpio.h>
-#include <linux/spmi.h>
-#include <linux/of_gpio.h>
-#include <linux/regulator/consumer.h>
-#include <linux/mfd/wcd9xxx/core.h>
-#include <linux/qdsp6v2/apr.h>
-#include <linux/timer.h>
-#include <linux/workqueue.h>
-#include <linux/sched.h>
-#include <sound/q6afe-v2.h>
-#include <sound/pcm.h>
-#include <sound/pcm_params.h>
-#include <sound/soc.h>
-#include <sound/soc-dapm.h>
-#include <sound/tlv.h>
-#include <sound/q6core.h>
-#include <soc/qcom/subsystem_notif.h>
-#include "../../msm/msmfalcon-common.h"
-#include "../wcd-mbhc-v2.h"
-#include "msm8916-wcd-irq.h"
-#include "msm8x16-wcd.h"
-
-#define DRV_NAME "msm-codec"
-#define MSM89XX_RATES (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |\
- SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000)
-#define MSM89XX_FORMATS (SNDRV_PCM_FMTBIT_S16_LE |\
- SNDRV_PCM_FMTBIT_S24_LE)
-
-#define NUM_INTERPOLATORS 3
-#define BITS_PER_REG 8
-#define MSM89XX_TX_PORT_NUMBER 4
-
-#define MSM89XX_I2S_MASTER_MODE_MASK 0x08
-#define MSM89XX_DIGITAL_CODEC_BASE_ADDR 0x771C000
-#define PMIC_SLAVE_ID_0 0
-#define PMIC_SLAVE_ID_1 1
-
-#define PMIC_MBG_OK 0x2C08
-#define PMIC_LDO7_EN_CTL 0x4646
-#define MASK_MSB_BIT 0x80
-
-#define CODEC_DT_MAX_PROP_SIZE 40
-#define MSM89XX_DIGITAL_CODEC_REG_SIZE 0x400
-#define MAX_ON_DEMAND_SUPPLY_NAME_LENGTH 64
-
-#define MCLK_RATE_9P6MHZ 9600000
-#define MCLK_RATE_12P288MHZ 12288000
-
-#define BUS_DOWN 1
-
-/*
- *50 Milliseconds sufficient for DSP bring up in the modem
- * after Sub System Restart
- */
-#define ADSP_STATE_READY_TIMEOUT_MS 50
-
-#define HPHL_PA_DISABLE (0x01 << 1)
-#define HPHR_PA_DISABLE (0x01 << 2)
-#define EAR_PA_DISABLE (0x01 << 3)
-#define SPKR_PA_DISABLE (0x01 << 4)
-
-enum {
- BOOST_SWITCH = 0,
- BOOST_ALWAYS,
- BYPASS_ALWAYS,
- BOOST_ON_FOREVER,
-};
-
-#define EAR_PMD 0
-#define EAR_PMU 1
-#define SPK_PMD 2
-#define SPK_PMU 3
-
-#define MICBIAS_DEFAULT_VAL 1800000
-#define MICBIAS_MIN_VAL 1600000
-#define MICBIAS_STEP_SIZE 50000
-
-#define DEFAULT_BOOST_VOLTAGE 5000
-#define MIN_BOOST_VOLTAGE 4000
-#define MAX_BOOST_VOLTAGE 5550
-#define BOOST_VOLTAGE_STEP 50
-
-#define MSM89XX_MBHC_BTN_COARSE_ADJ 100 /* in mV */
-#define MSM89XX_MBHC_BTN_FINE_ADJ 12 /* in mV */
-
-#define VOLTAGE_CONVERTER(value, min_value, step_size)\
- ((value - min_value)/step_size)
-
-enum {
- AIF1_PB = 0,
- AIF1_CAP,
- AIF2_VIFEED,
- NUM_CODEC_DAIS,
-};
-
-enum {
- RX_MIX1_INP_SEL_ZERO = 0,
- RX_MIX1_INP_SEL_IIR1,
- RX_MIX1_INP_SEL_IIR2,
- RX_MIX1_INP_SEL_RX1,
- RX_MIX1_INP_SEL_RX2,
- RX_MIX1_INP_SEL_RX3,
-};
-
-static const DECLARE_TLV_DB_SCALE(digital_gain, 0, 1, 0);
-static const DECLARE_TLV_DB_SCALE(analog_gain, 0, 25, 1);
-static struct snd_soc_dai_driver msm8x16_wcd_i2s_dai[];
-/* By default enable the internal speaker boost */
-static bool spkr_boost_en = true;
-
-#define MSM89XX_ACQUIRE_LOCK(x) \
- mutex_lock_nested(&x, SINGLE_DEPTH_NESTING)
-
-#define MSM89XX_RELEASE_LOCK(x) mutex_unlock(&x)
-
-
-/* Codec supports 2 IIR filters */
-enum {
- IIR1 = 0,
- IIR2,
- IIR_MAX,
-};
-
-/* Codec supports 5 bands */
-enum {
- BAND1 = 0,
- BAND2,
- BAND3,
- BAND4,
- BAND5,
- BAND_MAX,
-};
-
-struct hpf_work {
- struct msm8x16_wcd_priv *msm8x16_wcd;
- u32 decimator;
- u8 tx_hpf_cut_of_freq;
- struct delayed_work dwork;
-};
-
-static struct hpf_work tx_hpf_work[NUM_DECIMATORS];
-
-static char on_demand_supply_name[][MAX_ON_DEMAND_SUPPLY_NAME_LENGTH] = {
- "cdc-vdd-mic-bias",
-};
-
-static unsigned long rx_digital_gain_reg[] = {
- MSM89XX_CDC_CORE_RX1_VOL_CTL_B2_CTL,
- MSM89XX_CDC_CORE_RX2_VOL_CTL_B2_CTL,
- MSM89XX_CDC_CORE_RX3_VOL_CTL_B2_CTL,
-};
-
-static unsigned long tx_digital_gain_reg[] = {
- MSM89XX_CDC_CORE_TX1_VOL_CTL_GAIN,
- MSM89XX_CDC_CORE_TX2_VOL_CTL_GAIN,
-};
-
-enum {
- MSM89XX_SPMI_DIGITAL = 0,
- MSM89XX_SPMI_ANALOG,
- MSM89XX_CODEC_CORE,
- MAX_MSM89XX_DEVICE
-};
-
-static struct wcd_mbhc_register
- wcd_mbhc_registers[WCD_MBHC_REG_FUNC_MAX] = {
-
- WCD_MBHC_REGISTER("WCD_MBHC_L_DET_EN",
- MSM89XX_PMIC_ANALOG_MBHC_DET_CTL_1, 0x80, 7, 0),
- WCD_MBHC_REGISTER("WCD_MBHC_GND_DET_EN",
- MSM89XX_PMIC_ANALOG_MBHC_DET_CTL_1, 0x40, 6, 0),
- WCD_MBHC_REGISTER("WCD_MBHC_MECH_DETECTION_TYPE",
- MSM89XX_PMIC_ANALOG_MBHC_DET_CTL_1, 0x20, 5, 0),
- WCD_MBHC_REGISTER("WCD_MBHC_MIC_CLAMP_CTL",
- MSM89XX_PMIC_ANALOG_MBHC_DET_CTL_1, 0x18, 3, 0),
- WCD_MBHC_REGISTER("WCD_MBHC_ELECT_DETECTION_TYPE",
- MSM89XX_PMIC_ANALOG_MBHC_DET_CTL_1, 0x01, 0, 0),
- WCD_MBHC_REGISTER("WCD_MBHC_HS_L_DET_PULL_UP_CTRL",
- MSM89XX_PMIC_ANALOG_MBHC_DET_CTL_2, 0xC0, 6, 0),
- WCD_MBHC_REGISTER("WCD_MBHC_HS_L_DET_PULL_UP_COMP_CTRL",
- MSM89XX_PMIC_ANALOG_MBHC_DET_CTL_2, 0x20, 5, 0),
- WCD_MBHC_REGISTER("WCD_MBHC_HPHL_PLUG_TYPE",
- MSM89XX_PMIC_ANALOG_MBHC_DET_CTL_2, 0x10, 4, 0),
- WCD_MBHC_REGISTER("WCD_MBHC_GND_PLUG_TYPE",
- MSM89XX_PMIC_ANALOG_MBHC_DET_CTL_2, 0x08, 3, 0),
- WCD_MBHC_REGISTER("WCD_MBHC_SW_HPH_LP_100K_TO_GND",
- MSM89XX_PMIC_ANALOG_MBHC_DET_CTL_2, 0x01, 0, 0),
- WCD_MBHC_REGISTER("WCD_MBHC_ELECT_SCHMT_ISRC",
- MSM89XX_PMIC_ANALOG_MBHC_DET_CTL_2, 0x06, 1, 0),
- WCD_MBHC_REGISTER("WCD_MBHC_FSM_EN",
- MSM89XX_PMIC_ANALOG_MBHC_FSM_CTL, 0x80, 7, 0),
- WCD_MBHC_REGISTER("WCD_MBHC_INSREM_DBNC",
- MSM89XX_PMIC_ANALOG_MBHC_DBNC_TIMER, 0xF0, 4, 0),
- WCD_MBHC_REGISTER("WCD_MBHC_BTN_DBNC",
- MSM89XX_PMIC_ANALOG_MBHC_DBNC_TIMER, 0x0C, 2, 0),
- WCD_MBHC_REGISTER("WCD_MBHC_HS_VREF",
- MSM89XX_PMIC_ANALOG_MBHC_BTN3_CTL, 0x03, 0, 0),
- WCD_MBHC_REGISTER("WCD_MBHC_HS_COMP_RESULT",
- MSM89XX_PMIC_ANALOG_MBHC_ZDET_ELECT_RESULT, 0x01,
- 0, 0),
- WCD_MBHC_REGISTER("WCD_MBHC_MIC_SCHMT_RESULT",
- MSM89XX_PMIC_ANALOG_MBHC_ZDET_ELECT_RESULT, 0x02,
- 1, 0),
- WCD_MBHC_REGISTER("WCD_MBHC_HPHL_SCHMT_RESULT",
- MSM89XX_PMIC_ANALOG_MBHC_ZDET_ELECT_RESULT, 0x08,
- 3, 0),
- WCD_MBHC_REGISTER("WCD_MBHC_HPHR_SCHMT_RESULT",
- MSM89XX_PMIC_ANALOG_MBHC_ZDET_ELECT_RESULT, 0x04,
- 2, 0),
- WCD_MBHC_REGISTER("WCD_MBHC_OCP_FSM_EN",
- MSM89XX_PMIC_ANALOG_RX_COM_OCP_CTL, 0x10, 4, 0),
- WCD_MBHC_REGISTER("WCD_MBHC_BTN_RESULT",
- MSM89XX_PMIC_ANALOG_MBHC_BTN_RESULT, 0xFF, 0, 0),
- WCD_MBHC_REGISTER("WCD_MBHC_BTN_ISRC_CTL",
- MSM89XX_PMIC_ANALOG_MBHC_FSM_CTL, 0x70, 4, 0),
- WCD_MBHC_REGISTER("WCD_MBHC_ELECT_RESULT",
- MSM89XX_PMIC_ANALOG_MBHC_ZDET_ELECT_RESULT, 0xFF,
- 0, 0),
- WCD_MBHC_REGISTER("WCD_MBHC_MICB_CTRL",
- MSM89XX_PMIC_ANALOG_MICB_2_EN, 0xC0, 6, 0),
- WCD_MBHC_REGISTER("WCD_MBHC_HPH_CNP_WG_TIME",
- MSM89XX_PMIC_ANALOG_RX_HPH_CNP_WG_TIME, 0xFC, 2, 0),
- WCD_MBHC_REGISTER("WCD_MBHC_HPHR_PA_EN",
- MSM89XX_PMIC_ANALOG_RX_HPH_CNP_EN, 0x10, 4, 0),
- WCD_MBHC_REGISTER("WCD_MBHC_HPHL_PA_EN",
- MSM89XX_PMIC_ANALOG_RX_HPH_CNP_EN, 0x20, 5, 0),
- WCD_MBHC_REGISTER("WCD_MBHC_HPH_PA_EN",
- MSM89XX_PMIC_ANALOG_RX_HPH_CNP_EN, 0x30, 4, 0),
- WCD_MBHC_REGISTER("WCD_MBHC_SWCH_LEVEL_REMOVE",
- MSM89XX_PMIC_ANALOG_MBHC_ZDET_ELECT_RESULT,
- 0x10, 4, 0),
- WCD_MBHC_REGISTER("WCD_MBHC_PULLDOWN_CTRL",
- MSM89XX_PMIC_ANALOG_MICB_2_EN, 0x20, 5, 0),
- WCD_MBHC_REGISTER("WCD_MBHC_ANC_DET_EN",
- 0, 0, 0, 0),
- WCD_MBHC_REGISTER("WCD_MBHC_FSM_STATUS",
- 0, 0, 0, 0),
- WCD_MBHC_REGISTER("WCD_MBHC_MUX_CTL",
- 0, 0, 0, 0),
-};
-
-struct msm8x16_wcd_spmi {
- struct spmi_device *spmi;
- int base;
-};
-
-/* Multiply gain_adj and offset by 1000 and 100 to avoid float arithmetic */
-static const struct wcd_imped_i_ref imped_i_ref[] = {
- {I_h4_UA, 8, 800, 9000, 10000},
- {I_pt5_UA, 10, 100, 990, 4600},
- {I_14_UA, 17, 14, 1050, 700},
- {I_l4_UA, 10, 4, 1165, 110},
- {I_1_UA, 0, 1, 1200, 65},
-};
-
-static const struct wcd_mbhc_intr intr_ids = {
- .mbhc_sw_intr = MSM89XX_IRQ_MBHC_HS_DET,
- .mbhc_btn_press_intr = MSM89XX_IRQ_MBHC_PRESS,
- .mbhc_btn_release_intr = MSM89XX_IRQ_MBHC_RELEASE,
- .mbhc_hs_ins_intr = MSM89XX_IRQ_MBHC_INSREM_DET1,
- .mbhc_hs_rem_intr = MSM89XX_IRQ_MBHC_INSREM_DET,
- .hph_left_ocp = MSM89XX_IRQ_HPHL_OCP,
- .hph_right_ocp = MSM89XX_IRQ_HPHR_OCP,
-};
-
-static int msm_digcdc_clock_control(bool flag);
-static int msm8x16_wcd_dt_parse_vreg_info(struct device *dev,
- struct msm8x16_wcd_regulator *vreg,
- const char *vreg_name, bool ondemand);
-static struct msm8x16_wcd_pdata *msm8x16_wcd_populate_dt_pdata(
- struct device *dev);
-static int msm8x16_wcd_enable_ext_mb_source(struct wcd_mbhc *mbhc,
- bool turn_on);
-static void msm8x16_trim_btn_reg(struct snd_soc_codec *codec);
-static void msm8x16_wcd_set_micb_v(struct snd_soc_codec *codec);
-static void msm8x16_wcd_set_boost_v(struct snd_soc_codec *codec);
-static void msm8x16_wcd_set_auto_zeroing(struct snd_soc_codec *codec,
- bool enable);
-static void msm8x16_wcd_configure_cap(struct snd_soc_codec *codec,
- bool micbias1, bool micbias2);
-static bool msm8x16_wcd_use_mb(struct snd_soc_codec *codec);
-
-struct msm8x16_wcd_spmi msm8x16_wcd_modules[MAX_MSM89XX_DEVICE];
-
-static void *adsp_state_notifier;
-
-static struct snd_soc_codec *registered_codec;
-static struct snd_soc_codec *registered_digcodec;
-
-static int get_codec_version(struct msm8x16_wcd_priv *msm8x16_wcd)
-{
- if (msm8x16_wcd->codec_version == DIANGU)
- return DIANGU;
- else if (msm8x16_wcd->codec_version == CAJON_2_0)
- return CAJON_2_0;
- else if (msm8x16_wcd->codec_version == CAJON)
- return CAJON;
- else if (msm8x16_wcd->codec_version == CONGA)
- return CONGA;
- else if (msm8x16_wcd->pmic_rev == TOMBAK_2_0)
- return TOMBAK_2_0;
- else if (msm8x16_wcd->pmic_rev == TOMBAK_1_0)
- return TOMBAK_1_0;
-
- pr_err("%s: unsupported codec version\n", __func__);
- return UNSUPPORTED;
-}
-
-static int msm_digcdc_clock_control(bool flag)
-{
- int ret = -EINVAL;
- struct msm_asoc_mach_data *pdata = NULL;
-
- pdata = snd_soc_card_get_drvdata(registered_codec->component.card);
-
- if (flag) {
- mutex_lock(&pdata->cdc_int_mclk0_mutex);
- if (atomic_read(&pdata->int_mclk0_enabled) == false) {
- pdata->digital_cdc_core_clk.enable = 1;
- ret = afe_set_lpass_clock_v2(
- AFE_PORT_ID_INT0_MI2S_RX,
- &pdata->digital_cdc_core_clk);
- if (ret < 0) {
- pr_err("failed to enable the INT_MCLK0\n");
- goto err_mclk;
- }
- pr_err("enabled digital codec core clk\n");
- atomic_set(&pdata->int_mclk0_enabled, true);
- schedule_delayed_work(&pdata->disable_int_mclk0_work,
- 50);
- }
-err_mclk:
- mutex_unlock(&pdata->cdc_int_mclk0_mutex);
- return ret;
- }
- return 0;
-}
-
-void enable_digital_callback(void *flag)
-{
- msm_digcdc_clock_control(true);
-}
-
-void disable_digital_callback(void *flag)
-{
- msm_digcdc_clock_control(false);
-}
-
-static int snd_soc_read_wrapper(struct snd_soc_codec *codec, u16 reg)
-{
- int ret = -EINVAL;
- struct msm8x16_wcd *msm8x16_wcd = codec->control_data;
-
- pr_err("%s reg = %x\n", __func__, reg);
- mutex_lock(&msm8x16_wcd->io_lock);
- if (MSM89XX_IS_PMIC_CDC_REG(reg))
- ret = snd_soc_read(codec, reg);
- else if (MSM89XX_IS_CDC_CORE_REG(reg))
- ret = snd_soc_read(registered_digcodec, reg);
- mutex_unlock(&msm8x16_wcd->io_lock);
-
- return ret;
-}
-
-static int snd_soc_write_wrapper(struct snd_soc_codec *codec, u16 reg, u8 val)
-{
- int ret = -EINVAL;
- struct msm8x16_wcd *msm8x16_wcd = codec->control_data;
-
- pr_err("%s reg = %x\n", __func__, reg);
- mutex_lock(&msm8x16_wcd->io_lock);
- if (MSM89XX_IS_PMIC_CDC_REG(reg))
- ret = snd_soc_write(codec, reg, val);
- else if (MSM89XX_IS_CDC_CORE_REG(reg))
- ret = snd_soc_write(registered_digcodec, reg, val);
- mutex_unlock(&msm8x16_wcd->io_lock);
-
- return ret;
-}
-
-static int snd_soc_update_bits_wrapper(struct snd_soc_codec *codec,
- u16 reg, u8 mask, u8 val)
-{
- int ret = -EINVAL;
- struct msm8x16_wcd *msm8x16_wcd = codec->control_data;
-
- pr_err("%s reg = %x\n", __func__, reg);
- mutex_lock(&msm8x16_wcd->io_lock);
- if (MSM89XX_IS_PMIC_CDC_REG(reg))
- ret = snd_soc_update_bits(codec, reg, mask, val);
- else if (MSM89XX_IS_CDC_CORE_REG(reg))
- ret = snd_soc_update_bits(registered_digcodec, reg, mask, val);
- mutex_unlock(&msm8x16_wcd->io_lock);
-
- return ret;
-}
-
-static void wcd_mbhc_meas_imped(struct snd_soc_codec *codec,
- s16 *impedance_l, s16 *impedance_r)
-{
- struct msm8x16_wcd_priv *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
-
- if ((msm8x16_wcd->imped_det_pin == WCD_MBHC_DET_BOTH) ||
- (msm8x16_wcd->imped_det_pin == WCD_MBHC_DET_HPHL)) {
- /* Enable ZDET_L_MEAS_EN */
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_MBHC_FSM_CTL,
- 0x08, 0x08);
- /* Wait for 2ms for measurement to complete */
- usleep_range(2000, 2100);
- /* Read Left impedance value from Result1 */
- *impedance_l = snd_soc_read_wrapper(codec,
- MSM89XX_PMIC_ANALOG_MBHC_BTN_RESULT);
- /* Enable ZDET_R_MEAS_EN */
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_MBHC_FSM_CTL,
- 0x08, 0x00);
- }
- if ((msm8x16_wcd->imped_det_pin == WCD_MBHC_DET_BOTH) ||
- (msm8x16_wcd->imped_det_pin == WCD_MBHC_DET_HPHR)) {
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_MBHC_FSM_CTL,
- 0x04, 0x04);
- /* Wait for 2ms for measurement to complete */
- usleep_range(2000, 2100);
- /* Read Right impedance value from Result1 */
- *impedance_r = snd_soc_read_wrapper(codec,
- MSM89XX_PMIC_ANALOG_MBHC_BTN_RESULT);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_MBHC_FSM_CTL,
- 0x04, 0x00);
- }
-}
-
-static void msm8x16_set_ref_current(struct snd_soc_codec *codec,
- enum wcd_curr_ref curr_ref)
-{
- struct msm8x16_wcd_priv *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
-
- pr_err("%s: curr_ref: %d\n", __func__, curr_ref);
-
- if (get_codec_version(msm8x16_wcd) < CAJON)
- pr_err("%s: Setting ref current not required\n", __func__);
-
- msm8x16_wcd->imped_i_ref = imped_i_ref[curr_ref];
-
- switch (curr_ref) {
- case I_h4_UA:
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_MICB_2_EN,
- 0x07, 0x01);
- break;
- case I_pt5_UA:
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_MICB_2_EN,
- 0x07, 0x04);
- break;
- case I_14_UA:
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_MICB_2_EN,
- 0x07, 0x03);
- break;
- case I_l4_UA:
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_MICB_2_EN,
- 0x07, 0x01);
- break;
- case I_1_UA:
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_MICB_2_EN,
- 0x07, 0x00);
- break;
- default:
- pr_err("%s: No ref current set\n", __func__);
- break;
- }
-}
-
-static bool msm8x16_adj_ref_current(struct snd_soc_codec *codec,
- s16 *impedance_l, s16 *impedance_r)
-{
- int i = 2;
- s16 compare_imp = 0;
-
- struct msm8x16_wcd_priv *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
-
- if (msm8x16_wcd->imped_det_pin == WCD_MBHC_DET_HPHR)
- compare_imp = *impedance_r;
- else
- compare_imp = *impedance_l;
-
- if (get_codec_version(msm8x16_wcd) < CAJON) {
- pr_err("%s: Reference current adjustment not required\n",
- __func__);
- return false;
- }
-
- while (compare_imp < imped_i_ref[i].min_val) {
- msm8x16_set_ref_current(codec,
- imped_i_ref[++i].curr_ref);
- wcd_mbhc_meas_imped(codec,
- impedance_l, impedance_r);
- compare_imp = (msm8x16_wcd->imped_det_pin == WCD_MBHC_DET_HPHR)
- ? *impedance_r : *impedance_l;
- }
-
- return true;
-}
-
-void msm8x16_wcd_spk_ext_pa_cb(
- int (*codec_spk_ext_pa)(struct snd_soc_codec *codec,
- int enable), struct snd_soc_codec *codec)
-{
- struct msm8x16_wcd_priv *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
-
- pr_err("%s: Enter\n", __func__);
- msm8x16_wcd->codec_spk_ext_pa_cb = codec_spk_ext_pa;
-}
-
-void msm8x16_wcd_hph_comp_cb(
- int (*codec_hph_comp_gpio)(bool enable), struct snd_soc_codec *codec)
-{
- struct msm8x16_wcd_priv *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
-
- pr_err("%s: Enter\n", __func__);
- msm8x16_wcd->codec_hph_comp_gpio = codec_hph_comp_gpio;
-}
-
-static void msm8x16_wcd_compute_impedance(struct snd_soc_codec *codec, s16 l,
- s16 r, uint32_t *zl, uint32_t *zr, bool high)
-{
- struct msm8x16_wcd_priv *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
- uint32_t rl = 0, rr = 0;
- struct wcd_imped_i_ref R = msm8x16_wcd->imped_i_ref;
- int codec_ver = get_codec_version(msm8x16_wcd);
-
- switch (codec_ver) {
- case TOMBAK_1_0:
- case TOMBAK_2_0:
- case CONGA:
- if (high) {
- pr_err("%s: This plug has high range impedance\n",
- __func__);
- rl = (uint32_t)(((100 * (l * 400 - 200))/96) - 230);
- rr = (uint32_t)(((100 * (r * 400 - 200))/96) - 230);
- } else {
- pr_err("%s: This plug has low range impedance\n",
- __func__);
- rl = (uint32_t)(((1000 * (l * 2 - 1))/1165) - (13/10));
- rr = (uint32_t)(((1000 * (r * 2 - 1))/1165) - (13/10));
- }
- break;
- case CAJON:
- case CAJON_2_0:
- case DIANGU:
- if (msm8x16_wcd->imped_det_pin == WCD_MBHC_DET_HPHL) {
- rr = (uint32_t)(((DEFAULT_MULTIPLIER * (10 * r - 5)) -
- (DEFAULT_OFFSET * DEFAULT_GAIN))/DEFAULT_GAIN);
- rl = (uint32_t)(((10000 * (R.multiplier * (10 * l - 5)))
- - R.offset * R.gain_adj)/(R.gain_adj * 100));
- } else if (msm8x16_wcd->imped_det_pin == WCD_MBHC_DET_HPHR) {
- rr = (uint32_t)(((10000 * (R.multiplier * (10 * r - 5)))
- - R.offset * R.gain_adj)/(R.gain_adj * 100));
- rl = (uint32_t)(((DEFAULT_MULTIPLIER * (10 * l - 5))-
- (DEFAULT_OFFSET * DEFAULT_GAIN))/DEFAULT_GAIN);
- } else if (msm8x16_wcd->imped_det_pin == WCD_MBHC_DET_NONE) {
- rr = (uint32_t)(((DEFAULT_MULTIPLIER * (10 * r - 5)) -
- (DEFAULT_OFFSET * DEFAULT_GAIN))/DEFAULT_GAIN);
- rl = (uint32_t)(((DEFAULT_MULTIPLIER * (10 * l - 5))-
- (DEFAULT_OFFSET * DEFAULT_GAIN))/DEFAULT_GAIN);
- } else {
- rr = (uint32_t)(((10000 * (R.multiplier * (10 * r - 5)))
- - R.offset * R.gain_adj)/(R.gain_adj * 100));
- rl = (uint32_t)(((10000 * (R.multiplier * (10 * l - 5)))
- - R.offset * R.gain_adj)/(R.gain_adj * 100));
- }
- break;
- default:
- pr_err("%s: No codec mentioned\n", __func__);
- break;
- }
- *zl = rl;
- *zr = rr;
-}
-
-static struct firmware_cal *msm8x16_wcd_get_hwdep_fw_cal(
- struct wcd_mbhc *mbhc,
- enum wcd_cal_type type)
-{
- struct msm8x16_wcd_priv *msm8x16_wcd;
- struct firmware_cal *hwdep_cal;
- struct snd_soc_codec *codec = mbhc->codec;
-
- if (!codec) {
- pr_err("%s: NULL codec pointer\n", __func__);
- return NULL;
- }
- msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
- hwdep_cal = wcdcal_get_fw_cal(msm8x16_wcd->fw_data, type);
- if (!hwdep_cal) {
- dev_err(codec->dev, "%s: cal not sent by %d\n",
- __func__, type);
- return NULL;
- }
- return hwdep_cal;
-}
-
-static void wcd9xxx_spmi_irq_control(struct snd_soc_codec *codec,
- int irq, bool enable)
-{
- if (enable)
- wcd9xxx_spmi_enable_irq(irq);
- else
- wcd9xxx_spmi_disable_irq(irq);
-}
-
-static void msm8x16_mbhc_clk_setup(struct snd_soc_codec *codec,
- bool enable)
-{
- if (enable)
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL,
- 0x08, 0x08);
- else
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL,
- 0x08, 0x00);
-}
-
-static int msm8x16_mbhc_map_btn_code_to_num(struct snd_soc_codec *codec)
-{
- int btn_code;
- int btn;
-
- btn_code = snd_soc_read_wrapper(codec,
- MSM89XX_PMIC_ANALOG_MBHC_BTN_RESULT);
-
- switch (btn_code) {
- case 0:
- btn = 0;
- break;
- case 1:
- btn = 1;
- break;
- case 3:
- btn = 2;
- break;
- case 7:
- btn = 3;
- break;
- case 15:
- btn = 4;
- break;
- default:
- btn = -EINVAL;
- break;
- };
-
- return btn;
-}
-
-static bool msm8x16_spmi_lock_sleep(struct wcd_mbhc *mbhc, bool lock)
-{
- if (lock)
- return wcd9xxx_spmi_lock_sleep();
- wcd9xxx_spmi_unlock_sleep();
- return 0;
-}
-
-static bool msm8x16_wcd_micb_en_status(struct wcd_mbhc *mbhc, int micb_num)
-{
- if (micb_num == MIC_BIAS_1)
- return (snd_soc_read_wrapper(mbhc->codec,
- MSM89XX_PMIC_ANALOG_MICB_1_EN) &
- 0x80);
- if (micb_num == MIC_BIAS_2)
- return (snd_soc_read_wrapper(mbhc->codec,
- MSM89XX_PMIC_ANALOG_MICB_2_EN) &
- 0x80);
- return false;
-}
-
-static void msm8x16_wcd_enable_master_bias(struct snd_soc_codec *codec,
- bool enable)
-{
- if (enable)
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_MASTER_BIAS_CTL,
- 0x30, 0x30);
- else
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_MASTER_BIAS_CTL,
- 0x30, 0x00);
-}
-
-static void msm8x16_wcd_mbhc_common_micb_ctrl(struct snd_soc_codec *codec,
- int event, bool enable)
-{
- u16 reg;
- u8 mask;
- u8 val;
-
- switch (event) {
- case MBHC_COMMON_MICB_PRECHARGE:
- reg = MSM89XX_PMIC_ANALOG_MICB_1_CTL;
- mask = 0x60;
- val = (enable ? 0x60 : 0x00);
- break;
- case MBHC_COMMON_MICB_SET_VAL:
- reg = MSM89XX_PMIC_ANALOG_MICB_1_VAL;
- mask = 0xFF;
- val = (enable ? 0xC0 : 0x00);
- break;
- case MBHC_COMMON_MICB_TAIL_CURR:
- reg = MSM89XX_PMIC_ANALOG_MICB_1_EN;
- mask = 0x04;
- val = (enable ? 0x04 : 0x00);
- break;
- };
- snd_soc_update_bits_wrapper(codec, reg, mask, val);
-}
-
-static void msm8x16_wcd_mbhc_internal_micbias_ctrl(struct snd_soc_codec *codec,
- int micbias_num, bool enable)
-{
- if (micbias_num == 1) {
- if (enable)
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_MICB_1_INT_RBIAS,
- 0x10, 0x10);
- else
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_MICB_1_INT_RBIAS,
- 0x10, 0x00);
- }
-}
-
-static bool msm8x16_wcd_mbhc_hph_pa_on_status(struct snd_soc_codec *codec)
-{
- return (snd_soc_read_wrapper(codec, MSM89XX_PMIC_ANALOG_RX_HPH_CNP_EN) &
- 0x30) ? true : false;
-}
-
-static void msm8x16_wcd_mbhc_program_btn_thr(struct snd_soc_codec *codec,
- s16 *btn_low, s16 *btn_high,
- int num_btn, bool is_micbias)
-{
- int i;
- u32 course, fine, reg_val;
- u16 reg_addr = MSM89XX_PMIC_ANALOG_MBHC_BTN0_ZDETL_CTL;
- s16 *btn_voltage;
-
- btn_voltage = ((is_micbias) ? btn_high : btn_low);
-
- for (i = 0; i < num_btn; i++) {
- course = (btn_voltage[i] / MSM89XX_MBHC_BTN_COARSE_ADJ);
- fine = ((btn_voltage[i] % MSM89XX_MBHC_BTN_COARSE_ADJ) /
- MSM89XX_MBHC_BTN_FINE_ADJ);
-
- reg_val = (course << 5) | (fine << 2);
- snd_soc_update_bits_wrapper(codec, reg_addr, 0xFC, reg_val);
- pr_err("%s: course: %d fine: %d reg_addr: %x reg_val: %x\n",
- __func__, course, fine, reg_addr, reg_val);
- reg_addr++;
- }
-}
-
-static void msm8x16_wcd_mbhc_calc_impedance(struct wcd_mbhc *mbhc, uint32_t *zl,
- uint32_t *zr)
-{
- struct snd_soc_codec *codec = mbhc->codec;
- struct msm8x16_wcd_priv *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
- s16 impedance_l, impedance_r;
- s16 impedance_l_fixed;
- s16 reg0, reg1, reg2, reg3, reg4;
- bool high = false;
- bool min_range_used = false;
-
- WCD_MBHC_RSC_ASSERT_LOCKED(mbhc);
- reg0 = snd_soc_read_wrapper(codec,
- MSM89XX_PMIC_ANALOG_MBHC_DBNC_TIMER);
- reg1 = snd_soc_read_wrapper(codec,
- MSM89XX_PMIC_ANALOG_MBHC_BTN2_ZDETH_CTL);
- reg2 = snd_soc_read_wrapper(codec,
- MSM89XX_PMIC_ANALOG_MBHC_DET_CTL_2);
- reg3 = snd_soc_read_wrapper(codec,
- MSM89XX_PMIC_ANALOG_MICB_2_EN);
- reg4 = snd_soc_read_wrapper(codec,
- MSM89XX_PMIC_ANALOG_MBHC_FSM_CTL);
-
- msm8x16_wcd->imped_det_pin = WCD_MBHC_DET_BOTH;
- mbhc->hph_type = WCD_MBHC_HPH_NONE;
-
- /* disable FSM and micbias and enable pullup*/
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_MBHC_FSM_CTL,
- 0x80, 0x00);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_MICB_2_EN,
- 0xA5, 0x25);
- /*
- * Enable legacy electrical detection current sources
- * and disable fast ramp and enable manual switching
- * of extra capacitance
- */
- pr_err("%s: Setup for impedance det\n", __func__);
-
- msm8x16_set_ref_current(codec, I_h4_UA);
-
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_MBHC_DET_CTL_2,
- 0x06, 0x02);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_MBHC_DBNC_TIMER,
- 0x02, 0x02);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_MBHC_BTN2_ZDETH_CTL,
- 0x02, 0x00);
-
- pr_err("%s: Start performing impedance detection\n",
- __func__);
-
- wcd_mbhc_meas_imped(codec, &impedance_l, &impedance_r);
-
- if (impedance_l > 2 || impedance_r > 2) {
- high = true;
- if (!mbhc->mbhc_cfg->mono_stero_detection) {
- /* Set ZDET_CHG to 0 to discharge ramp */
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_MBHC_FSM_CTL,
- 0x02, 0x00);
- /* wait 40ms for the discharge ramp to complete */
- usleep_range(40000, 40100);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_MBHC_BTN0_ZDETL_CTL,
- 0x03, 0x00);
- msm8x16_wcd->imped_det_pin = (impedance_l > 2 &&
- impedance_r > 2) ?
- WCD_MBHC_DET_NONE :
- ((impedance_l > 2) ?
- WCD_MBHC_DET_HPHR :
- WCD_MBHC_DET_HPHL);
- if (msm8x16_wcd->imped_det_pin == WCD_MBHC_DET_NONE)
- goto exit;
- } else {
- if (get_codec_version(msm8x16_wcd) >= CAJON) {
- if (impedance_l == 63 && impedance_r == 63) {
- pr_err("%s: HPHL and HPHR are floating\n",
- __func__);
- msm8x16_wcd->imped_det_pin =
- WCD_MBHC_DET_NONE;
- mbhc->hph_type = WCD_MBHC_HPH_NONE;
- } else if (impedance_l == 63
- && impedance_r < 63) {
- pr_err("%s: Mono HS with HPHL floating\n",
- __func__);
- msm8x16_wcd->imped_det_pin =
- WCD_MBHC_DET_HPHR;
- mbhc->hph_type = WCD_MBHC_HPH_MONO;
- } else if (impedance_r == 63 &&
- impedance_l < 63) {
- pr_err("%s: Mono HS with HPHR floating\n",
- __func__);
- msm8x16_wcd->imped_det_pin =
- WCD_MBHC_DET_HPHL;
- mbhc->hph_type = WCD_MBHC_HPH_MONO;
- } else if (impedance_l > 3 && impedance_r > 3 &&
- (impedance_l == impedance_r)) {
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_MBHC_DET_CTL_2,
- 0x06, 0x06);
- wcd_mbhc_meas_imped(codec, &impedance_l,
- &impedance_r);
- if (impedance_r == impedance_l)
- pr_err("%s: Mono Headset\n",
- __func__);
- msm8x16_wcd->imped_det_pin =
- WCD_MBHC_DET_NONE;
- mbhc->hph_type =
- WCD_MBHC_HPH_MONO;
- } else {
- pr_err("%s: STEREO headset is found\n",
- __func__);
- msm8x16_wcd->imped_det_pin =
- WCD_MBHC_DET_BOTH;
- mbhc->hph_type = WCD_MBHC_HPH_STEREO;
- }
- }
- }
- }
-
- msm8x16_set_ref_current(codec, I_pt5_UA);
- msm8x16_set_ref_current(codec, I_14_UA);
-
- /* Enable RAMP_L, RAMP_R & ZDET_CHG*/
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_MBHC_BTN0_ZDETL_CTL,
- 0x03, 0x03);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_MBHC_FSM_CTL,
- 0x02, 0x02);
- /* wait for 50msec for the HW to apply ramp on HPHL and HPHR */
- usleep_range(50000, 50100);
- /* Enable ZDET_DISCHG_CAP_CTL to add extra capacitance */
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_MBHC_FSM_CTL,
- 0x01, 0x01);
- /* wait for 5msec for the voltage to get stable */
- usleep_range(5000, 5100);
-
-
- wcd_mbhc_meas_imped(codec, &impedance_l, &impedance_r);
-
- min_range_used = msm8x16_adj_ref_current(codec,
- &impedance_l, &impedance_r);
- if (!mbhc->mbhc_cfg->mono_stero_detection) {
- /* Set ZDET_CHG to 0 to discharge ramp */
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_MBHC_FSM_CTL,
- 0x02, 0x00);
- /* wait for 40msec for the capacitor to discharge */
- usleep_range(40000, 40100);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_MBHC_BTN0_ZDETL_CTL,
- 0x03, 0x00);
- goto exit;
- }
-
- /* we are setting ref current to the minimun range or the measured
- * value larger than the minimum value, so min_range_used is true.
- * If the headset is mono headset with either HPHL or HPHR floating
- * then we have already done the mono stereo detection and do not
- * need to continue further.
- */
-
- if (!min_range_used ||
- msm8x16_wcd->imped_det_pin == WCD_MBHC_DET_HPHL ||
- msm8x16_wcd->imped_det_pin == WCD_MBHC_DET_HPHR)
- goto exit;
-
-
- /* Disable Set ZDET_CONN_RAMP_L and enable ZDET_CONN_FIXED_L */
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_MBHC_BTN0_ZDETL_CTL,
- 0x02, 0x00);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_MBHC_BTN1_ZDETM_CTL,
- 0x02, 0x02);
- /* Set ZDET_CHG to 0 */
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_MBHC_FSM_CTL,
- 0x02, 0x00);
- /* wait for 40msec for the capacitor to discharge */
- usleep_range(40000, 40100);
-
- /* Set ZDET_CONN_RAMP_R to 0 */
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_MBHC_BTN0_ZDETL_CTL,
- 0x01, 0x00);
- /* Enable ZDET_L_MEAS_EN */
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_MBHC_FSM_CTL,
- 0x08, 0x08);
- /* wait for 2msec for the HW to compute left inpedance value */
- usleep_range(2000, 2100);
- /* Read Left impedance value from Result1 */
- impedance_l_fixed = snd_soc_read_wrapper(codec,
- MSM89XX_PMIC_ANALOG_MBHC_BTN_RESULT);
- /* Disable ZDET_L_MEAS_EN */
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_MBHC_FSM_CTL,
- 0x08, 0x00);
- /*
- * Assume impedance_l is L1, impedance_l_fixed is L2.
- * If the following condition is met, we can take this
- * headset as mono one with impedance of L2.
- * Otherwise, take it as stereo with impedance of L1.
- * Condition:
- * abs[(L2-0.5L1)/(L2+0.5L1)] < abs [(L2-L1)/(L2+L1)]
- */
- if ((abs(impedance_l_fixed - impedance_l/2) *
- (impedance_l_fixed + impedance_l)) >=
- (abs(impedance_l_fixed - impedance_l) *
- (impedance_l_fixed + impedance_l/2))) {
- pr_err("%s: STEREO plug type detected\n",
- __func__);
- mbhc->hph_type = WCD_MBHC_HPH_STEREO;
- } else {
- pr_err("%s: MONO plug type detected\n",
- __func__);
- mbhc->hph_type = WCD_MBHC_HPH_MONO;
- impedance_l = impedance_l_fixed;
- }
- /* Enable ZDET_CHG */
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_MBHC_FSM_CTL,
- 0x02, 0x02);
- /* wait for 10msec for the capacitor to charge */
- usleep_range(10000, 10100);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_MBHC_BTN0_ZDETL_CTL,
- 0x02, 0x02);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_MBHC_BTN1_ZDETM_CTL,
- 0x02, 0x00);
- /* Set ZDET_CHG to 0 to discharge HPHL */
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_MBHC_FSM_CTL,
- 0x02, 0x00);
- /* wait for 40msec for the capacitor to discharge */
- usleep_range(40000, 40100);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_MBHC_BTN0_ZDETL_CTL,
- 0x02, 0x00);
-
-exit:
- snd_soc_write_wrapper(codec,
- MSM89XX_PMIC_ANALOG_MBHC_FSM_CTL, reg4);
- snd_soc_write_wrapper(codec,
- MSM89XX_PMIC_ANALOG_MICB_2_EN, reg3);
- snd_soc_write_wrapper(codec,
- MSM89XX_PMIC_ANALOG_MBHC_BTN2_ZDETH_CTL, reg1);
- snd_soc_write_wrapper(codec,
- MSM89XX_PMIC_ANALOG_MBHC_DBNC_TIMER, reg0);
- snd_soc_write_wrapper(codec,
- MSM89XX_PMIC_ANALOG_MBHC_DET_CTL_2, reg2);
- msm8x16_wcd_compute_impedance(codec, impedance_l, impedance_r,
- zl, zr, high);
-
- pr_err("%s: RL %d ohm, RR %d ohm\n", __func__, *zl, *zr);
- pr_err("%s: Impedance detection completed\n", __func__);
-}
-
-static int msm8x16_register_notifier(struct wcd_mbhc *mbhc,
- struct notifier_block *nblock,
- bool enable)
-{
- struct snd_soc_codec *codec = mbhc->codec;
- struct msm8x16_wcd_priv *msm8x16_wcd =
- snd_soc_codec_get_drvdata(codec);
-
- if (enable)
- return blocking_notifier_chain_register(&msm8x16_wcd->notifier,
- nblock);
- return blocking_notifier_chain_unregister(
- &msm8x16_wcd->notifier, nblock);
-}
-
-static int msm8x16_wcd_request_irq(struct snd_soc_codec *codec,
- int irq, irq_handler_t handler,
- const char *name, void *data)
-{
- return wcd9xxx_spmi_request_irq(irq, handler, name, data);
-}
-
-static int msm8x16_wcd_free_irq(struct snd_soc_codec *codec,
- int irq, void *data)
-{
- return wcd9xxx_spmi_free_irq(irq, data);
-}
-
-static const struct wcd_mbhc_cb mbhc_cb = {
- .enable_mb_source = msm8x16_wcd_enable_ext_mb_source,
- .trim_btn_reg = msm8x16_trim_btn_reg,
- .compute_impedance = msm8x16_wcd_mbhc_calc_impedance,
- .set_micbias_value = msm8x16_wcd_set_micb_v,
- .set_auto_zeroing = msm8x16_wcd_set_auto_zeroing,
- .get_hwdep_fw_cal = msm8x16_wcd_get_hwdep_fw_cal,
- .set_cap_mode = msm8x16_wcd_configure_cap,
- .register_notifier = msm8x16_register_notifier,
- .request_irq = msm8x16_wcd_request_irq,
- .irq_control = wcd9xxx_spmi_irq_control,
- .free_irq = msm8x16_wcd_free_irq,
- .clk_setup = msm8x16_mbhc_clk_setup,
- .map_btn_code_to_num = msm8x16_mbhc_map_btn_code_to_num,
- .lock_sleep = msm8x16_spmi_lock_sleep,
- .micbias_enable_status = msm8x16_wcd_micb_en_status,
- .mbhc_bias = msm8x16_wcd_enable_master_bias,
- .mbhc_common_micb_ctrl = msm8x16_wcd_mbhc_common_micb_ctrl,
- .micb_internal = msm8x16_wcd_mbhc_internal_micbias_ctrl,
- .hph_pa_on_status = msm8x16_wcd_mbhc_hph_pa_on_status,
- .set_btn_thr = msm8x16_wcd_mbhc_program_btn_thr,
- .extn_use_mb = msm8x16_wcd_use_mb,
-};
-
-static const uint32_t wcd_imped_val[] = {4, 8, 12, 13, 16,
- 20, 24, 28, 32,
- 36, 40, 44, 48};
-
-void msm8x16_notifier_call(struct snd_soc_codec *codec,
- const enum wcd_notify_event event)
-{
- struct msm8x16_wcd_priv *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
-
- pr_err("%s: notifier call event %d\n", __func__, event);
- blocking_notifier_call_chain(&msm8x16_wcd->notifier, event,
- &msm8x16_wcd->mbhc);
-}
-
-static void msm8x16_wcd_boost_on(struct snd_soc_codec *codec)
-{
- u8 dest = 0x00;
- struct msm8x16_wcd_priv *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
-
-
- if ((dest & MASK_MSB_BIT) == 0) {
- pr_err("PMIC MBG not ON, enable codec hw_en MB bit again\n");
- snd_soc_write_wrapper(codec,
- MSM89XX_PMIC_ANALOG_MASTER_BIAS_CTL, 0x30);
- /* Allow 1ms for PMIC MBG state to be updated */
- usleep_range(CODEC_DELAY_1_MS, CODEC_DELAY_1_1_MS);
- }
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_DIGITAL_PERPH_RESET_CTL3,
- 0x0F, 0x0F);
- snd_soc_write_wrapper(codec,
- MSM89XX_PMIC_ANALOG_SEC_ACCESS,
- 0xA5);
- snd_soc_write_wrapper(codec,
- MSM89XX_PMIC_ANALOG_PERPH_RESET_CTL3,
- 0x0F);
- snd_soc_write_wrapper(codec,
- MSM89XX_PMIC_ANALOG_MASTER_BIAS_CTL,
- 0x30);
- if (get_codec_version(msm8x16_wcd) < CAJON_2_0) {
- snd_soc_write_wrapper(codec,
- MSM89XX_PMIC_ANALOG_CURRENT_LIMIT,
- 0x82);
- } else {
- snd_soc_write_wrapper(codec,
- MSM89XX_PMIC_ANALOG_CURRENT_LIMIT,
- 0xA2);
- }
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_SPKR_DRV_CTL,
- 0x69, 0x69);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_SPKR_DRV_DBG,
- 0x01, 0x01);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_SLOPE_COMP_IP_ZERO,
- 0x88, 0x88);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_SPKR_DAC_CTL,
- 0x03, 0x03);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_SPKR_OCP_CTL,
- 0xE1, 0xE1);
- if (get_codec_version(msm8x16_wcd) < CAJON_2_0) {
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL,
- 0x20, 0x20);
- usleep_range(CODEC_DELAY_1_MS, CODEC_DELAY_1_1_MS);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_BOOST_EN_CTL,
- 0xDF, 0xDF);
- usleep_range(CODEC_DELAY_1_MS, CODEC_DELAY_1_1_MS);
- } else {
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_BOOST_EN_CTL,
- 0x40, 0x00);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL,
- 0x20, 0x20);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_BOOST_EN_CTL,
- 0x80, 0x80);
- usleep_range(500, 510);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_BOOST_EN_CTL,
- 0x40, 0x40);
- usleep_range(500, 510);
- }
-}
-
-static void msm8x16_wcd_boost_off(struct snd_soc_codec *codec)
-{
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_BOOST_EN_CTL,
- 0xDF, 0x5F);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL,
- 0x20, 0x00);
-}
-
-static void msm8x16_wcd_bypass_on(struct snd_soc_codec *codec)
-{
- struct msm8x16_wcd_priv *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
-
- if (get_codec_version(msm8x16_wcd) < CAJON_2_0) {
- snd_soc_write_wrapper(codec,
- MSM89XX_PMIC_ANALOG_SEC_ACCESS,
- 0xA5);
- snd_soc_write_wrapper(codec,
- MSM89XX_PMIC_ANALOG_PERPH_RESET_CTL3,
- 0x07);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_BYPASS_MODE,
- 0x02, 0x02);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_BYPASS_MODE,
- 0x01, 0x00);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_BYPASS_MODE,
- 0x40, 0x40);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_BYPASS_MODE,
- 0x80, 0x80);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_BOOST_EN_CTL,
- 0xDF, 0xDF);
- } else {
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL,
- 0x20, 0x20);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_BYPASS_MODE,
- 0x20, 0x20);
- }
-}
-
-static void msm8x16_wcd_bypass_off(struct snd_soc_codec *codec)
-{
- struct msm8x16_wcd_priv *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
-
- if (get_codec_version(msm8x16_wcd) < CAJON_2_0) {
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_BOOST_EN_CTL,
- 0x80, 0x00);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_BYPASS_MODE,
- 0x80, 0x00);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_BYPASS_MODE,
- 0x02, 0x00);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_BYPASS_MODE,
- 0x40, 0x00);
- } else {
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_BYPASS_MODE,
- 0x20, 0x00);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL,
- 0x20, 0x00);
- }
-}
-
-static void msm8x16_wcd_boost_mode_sequence(struct snd_soc_codec *codec,
- int flag)
-{
- struct msm8x16_wcd_priv *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
-
- if (flag == EAR_PMU) {
- switch (msm8x16_wcd->boost_option) {
- case BOOST_SWITCH:
- if (msm8x16_wcd->ear_pa_boost_set) {
- msm8x16_wcd_boost_off(codec);
- msm8x16_wcd_bypass_on(codec);
- }
- break;
- case BOOST_ALWAYS:
- msm8x16_wcd_boost_on(codec);
- break;
- case BYPASS_ALWAYS:
- msm8x16_wcd_bypass_on(codec);
- break;
- case BOOST_ON_FOREVER:
- msm8x16_wcd_boost_on(codec);
- break;
- default:
- pr_err("%s: invalid boost option: %d\n", __func__,
- msm8x16_wcd->boost_option);
- break;
- }
- } else if (flag == EAR_PMD) {
- switch (msm8x16_wcd->boost_option) {
- case BOOST_SWITCH:
- if (msm8x16_wcd->ear_pa_boost_set)
- msm8x16_wcd_bypass_off(codec);
- break;
- case BOOST_ALWAYS:
- msm8x16_wcd_boost_off(codec);
- /* 80ms for EAR boost to settle down */
- msleep(80);
- break;
- case BYPASS_ALWAYS:
- /* nothing to do as bypass on always */
- break;
- case BOOST_ON_FOREVER:
- /* nothing to do as boost on forever */
- break;
- default:
- pr_err("%s: invalid boost option: %d\n", __func__,
- msm8x16_wcd->boost_option);
- break;
- }
- } else if (flag == SPK_PMU) {
- switch (msm8x16_wcd->boost_option) {
- case BOOST_SWITCH:
- if (msm8x16_wcd->spk_boost_set) {
- msm8x16_wcd_bypass_off(codec);
- msm8x16_wcd_boost_on(codec);
- }
- break;
- case BOOST_ALWAYS:
- msm8x16_wcd_boost_on(codec);
- break;
- case BYPASS_ALWAYS:
- msm8x16_wcd_bypass_on(codec);
- break;
- case BOOST_ON_FOREVER:
- msm8x16_wcd_boost_on(codec);
- break;
- default:
- pr_err("%s: invalid boost option: %d\n", __func__,
- msm8x16_wcd->boost_option);
- break;
- }
- } else if (flag == SPK_PMD) {
- switch (msm8x16_wcd->boost_option) {
- case BOOST_SWITCH:
- if (msm8x16_wcd->spk_boost_set) {
- msm8x16_wcd_boost_off(codec);
- /*
- * Add 40 ms sleep for the spk
- * boost to settle down
- */
- msleep(40);
- }
- break;
- case BOOST_ALWAYS:
- msm8x16_wcd_boost_off(codec);
- /*
- * Add 40 ms sleep for the spk
- * boost to settle down
- */
- msleep(40);
- break;
- case BYPASS_ALWAYS:
- /* nothing to do as bypass on always */
- break;
- case BOOST_ON_FOREVER:
- /* nothing to do as boost on forever */
- break;
- default:
- pr_err("%s: invalid boost option: %d\n", __func__,
- msm8x16_wcd->boost_option);
- break;
- }
- }
-}
-
-static int msm8x16_wcd_dt_parse_vreg_info(struct device *dev,
- struct msm8x16_wcd_regulator *vreg, const char *vreg_name,
- bool ondemand)
-{
- int len, ret = 0;
- const __be32 *prop;
- char prop_name[CODEC_DT_MAX_PROP_SIZE];
- struct device_node *regnode = NULL;
- u32 prop_val;
-
- snprintf(prop_name, CODEC_DT_MAX_PROP_SIZE, "%s-supply",
- vreg_name);
- regnode = of_parse_phandle(dev->of_node, prop_name, 0);
-
- if (!regnode) {
- dev_err(dev, "Looking up %s property in node %s failed\n",
- prop_name, dev->of_node->full_name);
- return -ENODEV;
- }
-
- dev_err(dev, "Looking up %s property in node %s\n",
- prop_name, dev->of_node->full_name);
-
- vreg->name = vreg_name;
- vreg->ondemand = ondemand;
-
- snprintf(prop_name, CODEC_DT_MAX_PROP_SIZE,
- "qcom,%s-voltage", vreg_name);
- prop = of_get_property(dev->of_node, prop_name, &len);
-
- if (!prop || (len != (2 * sizeof(__be32)))) {
- dev_err(dev, "%s %s property\n",
- prop ? "invalid format" : "no", prop_name);
- return -EINVAL;
- }
- vreg->min_uv = be32_to_cpup(&prop[0]);
- vreg->max_uv = be32_to_cpup(&prop[1]);
-
- snprintf(prop_name, CODEC_DT_MAX_PROP_SIZE,
- "qcom,%s-current", vreg_name);
-
- ret = of_property_read_u32(dev->of_node, prop_name, &prop_val);
- if (ret) {
- dev_err(dev, "Looking up %s property in node %s failed",
- prop_name, dev->of_node->full_name);
- return -EFAULT;
- }
- vreg->optimum_ua = prop_val;
-
- dev_err(dev, "%s: vol=[%d %d]uV, curr=[%d]uA, ond %d\n\n", vreg->name,
- vreg->min_uv, vreg->max_uv, vreg->optimum_ua, vreg->ondemand);
- return 0;
-}
-
-static void msm8x16_wcd_dt_parse_boost_info(struct snd_soc_codec *codec)
-{
- struct msm8x16_wcd_priv *msm8x16_wcd_priv =
- snd_soc_codec_get_drvdata(codec);
- const char *prop_name = "qcom,cdc-boost-voltage";
- int boost_voltage, ret;
-
- ret = of_property_read_u32(codec->dev->of_node, prop_name,
- &boost_voltage);
- if (ret) {
- dev_err(codec->dev, "Looking up %s property in node %s failed\n",
- prop_name, codec->dev->of_node->full_name);
- boost_voltage = DEFAULT_BOOST_VOLTAGE;
- }
- if (boost_voltage < MIN_BOOST_VOLTAGE ||
- boost_voltage > MAX_BOOST_VOLTAGE) {
- dev_err(codec->dev,
- "Incorrect boost voltage. Reverting to default\n");
- boost_voltage = DEFAULT_BOOST_VOLTAGE;
- }
-
- msm8x16_wcd_priv->boost_voltage =
- VOLTAGE_CONVERTER(boost_voltage, MIN_BOOST_VOLTAGE,
- BOOST_VOLTAGE_STEP);
- dev_err(codec->dev, "Boost voltage value is: %d\n",
- boost_voltage);
-}
-
-static void msm8x16_wcd_dt_parse_micbias_info(struct device *dev,
- struct wcd9xxx_micbias_setting *micbias)
-{
- const char *prop_name = "qcom,cdc-micbias-cfilt-mv";
- int ret;
-
- ret = of_property_read_u32(dev->of_node, prop_name,
- &micbias->cfilt1_mv);
- if (ret) {
- dev_err(dev, "Looking up %s property in node %s failed",
- prop_name, dev->of_node->full_name);
- micbias->cfilt1_mv = MICBIAS_DEFAULT_VAL;
- }
-}
-
-static struct msm8x16_wcd_pdata *msm8x16_wcd_populate_dt_pdata(
- struct device *dev)
-{
- struct msm8x16_wcd_pdata *pdata;
- int ret, static_cnt, ond_cnt, idx, i;
- const char *name = NULL;
- const char *static_prop_name = "qcom,cdc-static-supplies";
- const char *ond_prop_name = "qcom,cdc-on-demand-supplies";
-
- pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata)
- return NULL;
-
- static_cnt = of_property_count_strings(dev->of_node, static_prop_name);
- if (IS_ERR_VALUE(static_cnt)) {
- dev_err(dev, "%s: Failed to get static supplies %d\n", __func__,
- static_cnt);
- ret = -EINVAL;
- goto err;
- }
-
- /* On-demand supply list is an optional property */
- ond_cnt = of_property_count_strings(dev->of_node, ond_prop_name);
- if (IS_ERR_VALUE(ond_cnt))
- ond_cnt = 0;
-
- WARN_ON(static_cnt <= 0 || ond_cnt < 0);
- if ((static_cnt + ond_cnt) > ARRAY_SIZE(pdata->regulator)) {
- dev_err(dev, "%s: Num of supplies %u > max supported %zd\n",
- __func__, (static_cnt + ond_cnt),
- ARRAY_SIZE(pdata->regulator));
- ret = -EINVAL;
- goto err;
- }
-
- for (idx = 0; idx < static_cnt; idx++) {
- ret = of_property_read_string_index(dev->of_node,
- static_prop_name, idx,
- &name);
- if (ret) {
- dev_err(dev, "%s: of read string %s idx %d error %d\n",
- __func__, static_prop_name, idx, ret);
- goto err;
- }
-
- dev_err(dev, "%s: Found static cdc supply %s\n", __func__,
- name);
- ret = msm8x16_wcd_dt_parse_vreg_info(dev,
- &pdata->regulator[idx],
- name, false);
- if (ret) {
- dev_err(dev, "%s:err parsing vreg for %s idx %d\n",
- __func__, name, idx);
- goto err;
- }
- }
-
- for (i = 0; i < ond_cnt; i++, idx++) {
- ret = of_property_read_string_index(dev->of_node, ond_prop_name,
- i, &name);
- if (ret) {
- dev_err(dev, "%s: err parsing on_demand for %s idx %d\n",
- __func__, ond_prop_name, i);
- goto err;
- }
-
- dev_err(dev, "%s: Found on-demand cdc supply %s\n", __func__,
- name);
- ret = msm8x16_wcd_dt_parse_vreg_info(dev,
- &pdata->regulator[idx],
- name, true);
- if (ret) {
- dev_err(dev, "%s: err parsing vreg on_demand for %s idx %d\n",
- __func__, name, idx);
- goto err;
- }
- }
- msm8x16_wcd_dt_parse_micbias_info(dev, &pdata->micbias);
- return pdata;
-err:
- devm_kfree(dev, pdata);
- dev_err(dev, "%s: Failed to populate DT data ret = %d\n",
- __func__, ret);
- return NULL;
-}
-
-static int msm8x16_wcd_codec_enable_on_demand_supply(
- struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *kcontrol, int event)
-{
- int ret = 0;
- struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
- struct msm8x16_wcd_priv *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
- struct on_demand_supply *supply;
-
- if (w->shift >= ON_DEMAND_SUPPLIES_MAX) {
- dev_err(codec->dev, "%s: error index > MAX Demand supplies",
- __func__);
- ret = -EINVAL;
- goto out;
- }
- dev_err(codec->dev, "%s: supply: %s event: %d ref: %d\n",
- __func__, on_demand_supply_name[w->shift], event,
- atomic_read(&msm8x16_wcd->on_demand_list[w->shift].ref));
-
- supply = &msm8x16_wcd->on_demand_list[w->shift];
- WARN_ONCE(!supply->supply, "%s isn't defined\n",
- on_demand_supply_name[w->shift]);
- if (!supply->supply) {
- dev_err(codec->dev, "%s: err supply not present ond for %d",
- __func__, w->shift);
- goto out;
- }
- switch (event) {
- case SND_SOC_DAPM_PRE_PMU:
- if (atomic_inc_return(&supply->ref) == 1)
- ret = regulator_enable(supply->supply);
- if (ret)
- dev_err(codec->dev, "%s: Failed to enable %s\n",
- __func__,
- on_demand_supply_name[w->shift]);
- break;
- case SND_SOC_DAPM_POST_PMD:
- if (atomic_read(&supply->ref) == 0) {
- dev_err(codec->dev, "%s: %s supply has been disabled.\n",
- __func__, on_demand_supply_name[w->shift]);
- goto out;
- }
- if (atomic_dec_return(&supply->ref) == 0)
- ret = regulator_disable(supply->supply);
- if (ret)
- dev_err(codec->dev, "%s: Failed to disable %s\n",
- __func__,
- on_demand_supply_name[w->shift]);
- break;
- default:
- break;
- }
-out:
- return ret;
-}
-
-static int msm8x16_wcd_codec_enable_clock_block(struct snd_soc_codec *codec,
- int enable)
-{
- struct msm_asoc_mach_data *pdata = NULL;
-
- pdata = snd_soc_card_get_drvdata(codec->component.card);
- if (enable) {
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_CDC_CORE_CLK_MCLK_CTL, 0x01, 0x01);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_CDC_CORE_CLK_PDM_CTL, 0x03, 0x03);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_MASTER_BIAS_CTL, 0x30, 0x30);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_DIGITAL_CDC_RST_CTL, 0x80, 0x80);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_DIGITAL_CDC_TOP_CLK_CTL, 0x0C, 0x0C);
- if (pdata->mclk_freq == MCLK_RATE_12P288MHZ)
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_CDC_CORE_TOP_CTL, 0x01, 0x00);
- else if (pdata->mclk_freq == MCLK_RATE_9P6MHZ)
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_CDC_CORE_TOP_CTL, 0x01, 0x01);
- } else {
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_DIGITAL_CDC_TOP_CLK_CTL, 0x0C, 0x00);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_CDC_CORE_CLK_PDM_CTL, 0x03, 0x00);
-
- }
- return 0;
-}
-
-static int msm8x16_wcd_codec_enable_charge_pump(struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *kcontrol, int event)
-{
- struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
- struct msm8x16_wcd_priv *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
-
- dev_err(codec->dev, "%s: event = %d\n", __func__, event);
- switch (event) {
- case SND_SOC_DAPM_PRE_PMU:
- msm8x16_wcd_codec_enable_clock_block(codec, 1);
- if (!(strcmp(w->name, "EAR CP"))) {
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL,
- 0x80, 0x80);
- msm8x16_wcd_boost_mode_sequence(codec, EAR_PMU);
- } else if (get_codec_version(msm8x16_wcd) == DIANGU) {
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL,
- 0x80, 0x80);
- } else {
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL,
- 0xC0, 0xC0);
- }
- break;
- case SND_SOC_DAPM_POST_PMU:
- usleep_range(CODEC_DELAY_1_MS, CODEC_DELAY_1_1_MS);
- break;
- case SND_SOC_DAPM_POST_PMD:
- usleep_range(CODEC_DELAY_1_MS, CODEC_DELAY_1_1_MS);
- if (!(strcmp(w->name, "EAR CP"))) {
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL,
- 0x80, 0x00);
- if (msm8x16_wcd->boost_option != BOOST_ALWAYS) {
- dev_err(codec->dev,
- "%s: boost_option:%d, tear down ear\n",
- __func__, msm8x16_wcd->boost_option);
- msm8x16_wcd_boost_mode_sequence(codec, EAR_PMD);
- }
- /*
- * Reset pa select bit from ear to hph after ear pa
- * is disabled and HPH DAC disable to reduce ear
- * turn off pop and avoid HPH pop in concurrency
- */
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_RX_EAR_CTL, 0x80, 0x00);
- } else {
- if (get_codec_version(msm8x16_wcd) < DIANGU)
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL,
- 0x40, 0x00);
- if (msm8x16_wcd->rx_bias_count == 0)
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL,
- 0x80, 0x00);
- dev_err(codec->dev, "%s: rx_bias_count = %d\n",
- __func__, msm8x16_wcd->rx_bias_count);
- }
- break;
- }
- return 0;
-}
-
-static int msm8x16_wcd_ear_pa_boost_get(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
- struct msm8x16_wcd_priv *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
-
- ucontrol->value.integer.value[0] =
- (msm8x16_wcd->ear_pa_boost_set ? 1 : 0);
- dev_err(codec->dev, "%s: msm8x16_wcd->ear_pa_boost_set = %d\n",
- __func__, msm8x16_wcd->ear_pa_boost_set);
- return 0;
-}
-
-static int msm8x16_wcd_ear_pa_boost_set(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
- struct msm8x16_wcd_priv *msm8x16_wcd =
- snd_soc_codec_get_drvdata(codec);
-
- dev_err(codec->dev, "%s: ucontrol->value.integer.value[0] = %ld\n",
- __func__, ucontrol->value.integer.value[0]);
- msm8x16_wcd->ear_pa_boost_set =
- (ucontrol->value.integer.value[0] ? true : false);
- return 0;
-}
-
-static int msm8x16_wcd_pa_gain_get(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- u8 ear_pa_gain;
- struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
-
- ear_pa_gain = snd_soc_read_wrapper(codec,
- MSM89XX_PMIC_ANALOG_RX_EAR_CTL);
-
- ear_pa_gain = (ear_pa_gain >> 5) & 0x1;
-
- if (ear_pa_gain == 0x00) {
- ucontrol->value.integer.value[0] = 0;
- } else if (ear_pa_gain == 0x01) {
- ucontrol->value.integer.value[0] = 1;
- } else {
- dev_err(codec->dev, "%s: ERROR: Unsupported Ear Gain = 0x%x\n",
- __func__, ear_pa_gain);
- return -EINVAL;
- }
-
- ucontrol->value.integer.value[0] = ear_pa_gain;
- dev_err(codec->dev, "%s: ear_pa_gain = 0x%x\n",
- __func__, ear_pa_gain);
- return 0;
-}
-
-static int msm8x16_wcd_loopback_mode_get(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
- struct msm_asoc_mach_data *pdata = NULL;
-
- pdata = snd_soc_card_get_drvdata(codec->component.card);
- dev_err(codec->dev, "%s: ucontrol->value.integer.value[0] = %ld\n",
- __func__, ucontrol->value.integer.value[0]);
-
- return pdata->lb_mode;
-}
-
-static int msm8x16_wcd_loopback_mode_put(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
- struct msm_asoc_mach_data *pdata = NULL;
-
- pdata = snd_soc_card_get_drvdata(codec->component.card);
- dev_err(codec->dev, "%s: ucontrol->value.integer.value[0] = %ld\n",
- __func__, ucontrol->value.integer.value[0]);
-
- switch (ucontrol->value.integer.value[0]) {
- case 0:
- pdata->lb_mode = false;
- break;
- case 1:
- pdata->lb_mode = true;
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int msm8x16_wcd_pa_gain_put(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- u8 ear_pa_gain;
- struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
-
- dev_err(codec->dev, "%s: ucontrol->value.integer.value[0] = %ld\n",
- __func__, ucontrol->value.integer.value[0]);
-
- switch (ucontrol->value.integer.value[0]) {
- case 0:
- ear_pa_gain = 0x00;
- break;
- case 1:
- ear_pa_gain = 0x20;
- break;
- default:
- return -EINVAL;
- }
-
- snd_soc_update_bits_wrapper(codec, MSM89XX_PMIC_ANALOG_RX_EAR_CTL,
- 0x20, ear_pa_gain);
- return 0;
-}
-
-static int msm8x16_wcd_hph_mode_get(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
- struct msm8x16_wcd_priv *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
-
- if (msm8x16_wcd->hph_mode == NORMAL_MODE) {
- ucontrol->value.integer.value[0] = 0;
- } else if (msm8x16_wcd->hph_mode == HD2_MODE) {
- ucontrol->value.integer.value[0] = 1;
- } else {
- dev_err(codec->dev, "%s: ERROR: Default HPH Mode= %d\n",
- __func__, msm8x16_wcd->hph_mode);
- }
-
- dev_err(codec->dev, "%s: msm8x16_wcd->hph_mode = %d\n", __func__,
- msm8x16_wcd->hph_mode);
- return 0;
-}
-
-static int msm8x16_wcd_hph_mode_set(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
- struct msm8x16_wcd_priv *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
-
- dev_err(codec->dev, "%s: ucontrol->value.integer.value[0] = %ld\n",
- __func__, ucontrol->value.integer.value[0]);
-
- switch (ucontrol->value.integer.value[0]) {
- case 0:
- msm8x16_wcd->hph_mode = NORMAL_MODE;
- break;
- case 1:
- if (get_codec_version(msm8x16_wcd) >= DIANGU)
- msm8x16_wcd->hph_mode = HD2_MODE;
- break;
- default:
- msm8x16_wcd->hph_mode = NORMAL_MODE;
- break;
- }
- dev_err(codec->dev, "%s: msm8x16_wcd->hph_mode_set = %d\n",
- __func__, msm8x16_wcd->hph_mode);
- return 0;
-}
-
-static int msm8x16_wcd_boost_option_get(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
- struct msm8x16_wcd_priv *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
-
- if (msm8x16_wcd->boost_option == BOOST_SWITCH) {
- ucontrol->value.integer.value[0] = 0;
- } else if (msm8x16_wcd->boost_option == BOOST_ALWAYS) {
- ucontrol->value.integer.value[0] = 1;
- } else if (msm8x16_wcd->boost_option == BYPASS_ALWAYS) {
- ucontrol->value.integer.value[0] = 2;
- } else if (msm8x16_wcd->boost_option == BOOST_ON_FOREVER) {
- ucontrol->value.integer.value[0] = 3;
- } else {
- dev_err(codec->dev, "%s: ERROR: Unsupported Boost option= %d\n",
- __func__, msm8x16_wcd->boost_option);
- return -EINVAL;
- }
-
- dev_err(codec->dev, "%s: msm8x16_wcd->boost_option = %d\n", __func__,
- msm8x16_wcd->boost_option);
- return 0;
-}
-
-static int msm8x16_wcd_boost_option_set(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
- struct msm8x16_wcd_priv *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
-
- dev_err(codec->dev, "%s: ucontrol->value.integer.value[0] = %ld\n",
- __func__, ucontrol->value.integer.value[0]);
-
- switch (ucontrol->value.integer.value[0]) {
- case 0:
- msm8x16_wcd->boost_option = BOOST_SWITCH;
- break;
- case 1:
- msm8x16_wcd->boost_option = BOOST_ALWAYS;
- break;
- case 2:
- msm8x16_wcd->boost_option = BYPASS_ALWAYS;
- msm8x16_wcd_bypass_on(codec);
- break;
- case 3:
- msm8x16_wcd->boost_option = BOOST_ON_FOREVER;
- msm8x16_wcd_boost_on(codec);
- break;
- default:
- pr_err("%s: invalid boost option: %d\n", __func__,
- msm8x16_wcd->boost_option);
- return -EINVAL;
- }
- dev_err(codec->dev, "%s: msm8x16_wcd->boost_option_set = %d\n",
- __func__, msm8x16_wcd->boost_option);
- return 0;
-}
-
-static int msm8x16_wcd_ext_spk_boost_get(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
- struct msm8x16_wcd_priv *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
-
- if (msm8x16_wcd->ext_spk_boost_set == false)
- ucontrol->value.integer.value[0] = 0;
- else
- ucontrol->value.integer.value[0] = 1;
-
- dev_err(codec->dev, "%s: msm8x16_wcd->ext_spk_boost_set = %d\n",
- __func__, msm8x16_wcd->ext_spk_boost_set);
- return 0;
-}
-
-static int msm8x16_wcd_ext_spk_boost_set(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
- struct msm8x16_wcd_priv *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
-
- dev_err(codec->dev, "%s: ucontrol->value.integer.value[0] = %ld\n",
- __func__, ucontrol->value.integer.value[0]);
-
- switch (ucontrol->value.integer.value[0]) {
- case 0:
- msm8x16_wcd->ext_spk_boost_set = false;
- break;
- case 1:
- msm8x16_wcd->ext_spk_boost_set = true;
- break;
- default:
- return -EINVAL;
- }
- dev_err(codec->dev, "%s: msm8x16_wcd->spk_boost_set = %d\n",
- __func__, msm8x16_wcd->spk_boost_set);
- return 0;
-}
-static int msm8x16_wcd_get_iir_enable_audio_mixer(
- struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
- int iir_idx = ((struct soc_multi_mixer_control *)
- kcontrol->private_value)->reg;
- int band_idx = ((struct soc_multi_mixer_control *)
- kcontrol->private_value)->shift;
-
- ucontrol->value.integer.value[0] =
- (snd_soc_read_wrapper(codec,
- (MSM89XX_CDC_CORE_IIR1_CTL + 64 * iir_idx)) &
- (1 << band_idx)) != 0;
-
- dev_err(codec->dev, "%s: IIR #%d band #%d enable %d\n", __func__,
- iir_idx, band_idx,
- (uint32_t)ucontrol->value.integer.value[0]);
- return 0;
-}
-
-static int msm8x16_wcd_put_iir_enable_audio_mixer(
- struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
- int iir_idx = ((struct soc_multi_mixer_control *)
- kcontrol->private_value)->reg;
- int band_idx = ((struct soc_multi_mixer_control *)
- kcontrol->private_value)->shift;
- int value = ucontrol->value.integer.value[0];
-
- /* Mask first 5 bits, 6-8 are reserved */
- snd_soc_update_bits_wrapper(codec,
- (MSM89XX_CDC_CORE_IIR1_CTL + 64 * iir_idx),
- (1 << band_idx), (value << band_idx));
-
- dev_err(codec->dev, "%s: IIR #%d band #%d enable %d\n", __func__,
- iir_idx, band_idx,
- ((snd_soc_read_wrapper(codec,
- (MSM89XX_CDC_CORE_IIR1_CTL + 64 * iir_idx)) &
- (1 << band_idx)) != 0));
-
- return 0;
-}
-static uint32_t get_iir_band_coeff(struct snd_soc_codec *codec,
- int iir_idx, int band_idx,
- int coeff_idx)
-{
- uint32_t value = 0;
-
- /* Address does not automatically update if reading */
- snd_soc_write_wrapper(codec,
- (MSM89XX_CDC_CORE_IIR1_COEF_B1_CTL + 64 * iir_idx),
- ((band_idx * BAND_MAX + coeff_idx)
- * sizeof(uint32_t)) & 0x7F);
-
- value |= snd_soc_read_wrapper(codec,
- (MSM89XX_CDC_CORE_IIR1_COEF_B2_CTL + 64 * iir_idx));
-
- snd_soc_write_wrapper(codec,
- (MSM89XX_CDC_CORE_IIR1_COEF_B1_CTL + 64 * iir_idx),
- ((band_idx * BAND_MAX + coeff_idx)
- * sizeof(uint32_t) + 1) & 0x7F);
-
- value |= (snd_soc_read_wrapper(codec,
- (MSM89XX_CDC_CORE_IIR1_COEF_B2_CTL + 64 * iir_idx)) << 8);
-
- snd_soc_write_wrapper(codec,
- (MSM89XX_CDC_CORE_IIR1_COEF_B1_CTL + 64 * iir_idx),
- ((band_idx * BAND_MAX + coeff_idx)
- * sizeof(uint32_t) + 2) & 0x7F);
-
- value |= (snd_soc_read_wrapper(codec,
- (MSM89XX_CDC_CORE_IIR1_COEF_B2_CTL + 64 * iir_idx)) << 16);
-
- snd_soc_write_wrapper(codec,
- (MSM89XX_CDC_CORE_IIR1_COEF_B1_CTL + 64 * iir_idx),
- ((band_idx * BAND_MAX + coeff_idx)
- * sizeof(uint32_t) + 3) & 0x7F);
-
- /* Mask bits top 2 bits since they are reserved */
- value |= ((snd_soc_read_wrapper(codec,
- (MSM89XX_CDC_CORE_IIR1_COEF_B2_CTL
- + 64 * iir_idx)) & 0x3f) << 24);
-
- return value;
-
-}
-
-static int msm8x16_wcd_get_iir_band_audio_mixer(
- struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
- int iir_idx = ((struct soc_multi_mixer_control *)
- kcontrol->private_value)->reg;
- int band_idx = ((struct soc_multi_mixer_control *)
- kcontrol->private_value)->shift;
-
- ucontrol->value.integer.value[0] =
- get_iir_band_coeff(codec, iir_idx, band_idx, 0);
- ucontrol->value.integer.value[1] =
- get_iir_band_coeff(codec, iir_idx, band_idx, 1);
- ucontrol->value.integer.value[2] =
- get_iir_band_coeff(codec, iir_idx, band_idx, 2);
- ucontrol->value.integer.value[3] =
- get_iir_band_coeff(codec, iir_idx, band_idx, 3);
- ucontrol->value.integer.value[4] =
- get_iir_band_coeff(codec, iir_idx, band_idx, 4);
-
- dev_err(codec->dev, "%s: IIR #%d band #%d b0 = 0x%x\n"
- "%s: IIR #%d band #%d b1 = 0x%x\n"
- "%s: IIR #%d band #%d b2 = 0x%x\n"
- "%s: IIR #%d band #%d a1 = 0x%x\n"
- "%s: IIR #%d band #%d a2 = 0x%x\n",
- __func__, iir_idx, band_idx,
- (uint32_t)ucontrol->value.integer.value[0],
- __func__, iir_idx, band_idx,
- (uint32_t)ucontrol->value.integer.value[1],
- __func__, iir_idx, band_idx,
- (uint32_t)ucontrol->value.integer.value[2],
- __func__, iir_idx, band_idx,
- (uint32_t)ucontrol->value.integer.value[3],
- __func__, iir_idx, band_idx,
- (uint32_t)ucontrol->value.integer.value[4]);
- return 0;
-}
-
-static void set_iir_band_coeff(struct snd_soc_codec *codec,
- int iir_idx, int band_idx,
- uint32_t value)
-{
- snd_soc_write_wrapper(codec,
- (MSM89XX_CDC_CORE_IIR1_COEF_B2_CTL + 64 * iir_idx),
- (value & 0xFF));
-
- snd_soc_write_wrapper(codec,
- (MSM89XX_CDC_CORE_IIR1_COEF_B2_CTL + 64 * iir_idx),
- (value >> 8) & 0xFF);
-
- snd_soc_write_wrapper(codec,
- (MSM89XX_CDC_CORE_IIR1_COEF_B2_CTL + 64 * iir_idx),
- (value >> 16) & 0xFF);
-
- /* Mask top 2 bits, 7-8 are reserved */
- snd_soc_write_wrapper(codec,
- (MSM89XX_CDC_CORE_IIR1_COEF_B2_CTL + 64 * iir_idx),
- (value >> 24) & 0x3F);
-
-}
-
-static int msm8x16_wcd_put_iir_band_audio_mixer(
- struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
- int iir_idx = ((struct soc_multi_mixer_control *)
- kcontrol->private_value)->reg;
- int band_idx = ((struct soc_multi_mixer_control *)
- kcontrol->private_value)->shift;
-
- /* Mask top bit it is reserved */
- /* Updates addr automatically for each B2 write */
- snd_soc_write_wrapper(codec,
- (MSM89XX_CDC_CORE_IIR1_COEF_B1_CTL + 64 * iir_idx),
- (band_idx * BAND_MAX * sizeof(uint32_t)) & 0x7F);
-
-
- set_iir_band_coeff(codec, iir_idx, band_idx,
- ucontrol->value.integer.value[0]);
- set_iir_band_coeff(codec, iir_idx, band_idx,
- ucontrol->value.integer.value[1]);
- set_iir_band_coeff(codec, iir_idx, band_idx,
- ucontrol->value.integer.value[2]);
- set_iir_band_coeff(codec, iir_idx, band_idx,
- ucontrol->value.integer.value[3]);
- set_iir_band_coeff(codec, iir_idx, band_idx,
- ucontrol->value.integer.value[4]);
-
- dev_err(codec->dev, "%s: IIR #%d band #%d b0 = 0x%x\n"
- "%s: IIR #%d band #%d b1 = 0x%x\n"
- "%s: IIR #%d band #%d b2 = 0x%x\n"
- "%s: IIR #%d band #%d a1 = 0x%x\n"
- "%s: IIR #%d band #%d a2 = 0x%x\n",
- __func__, iir_idx, band_idx,
- get_iir_band_coeff(codec, iir_idx, band_idx, 0),
- __func__, iir_idx, band_idx,
- get_iir_band_coeff(codec, iir_idx, band_idx, 1),
- __func__, iir_idx, band_idx,
- get_iir_band_coeff(codec, iir_idx, band_idx, 2),
- __func__, iir_idx, band_idx,
- get_iir_band_coeff(codec, iir_idx, band_idx, 3),
- __func__, iir_idx, band_idx,
- get_iir_band_coeff(codec, iir_idx, band_idx, 4));
- return 0;
-}
-
-static int msm8x16_wcd_compander_get(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
- struct msm8x16_wcd_priv *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
- int comp_idx = ((struct soc_multi_mixer_control *)
- kcontrol->private_value)->reg;
- int rx_idx = ((struct soc_multi_mixer_control *)
- kcontrol->private_value)->shift;
-
- dev_err(codec->dev, "%s: msm8x16_wcd->comp[%d]_enabled[%d] = %d\n",
- __func__, comp_idx, rx_idx,
- msm8x16_wcd->comp_enabled[rx_idx]);
-
- ucontrol->value.integer.value[0] = msm8x16_wcd->comp_enabled[rx_idx];
-
- dev_err(codec->dev, "%s: ucontrol->value.integer.value[0] = %ld\n",
- __func__, ucontrol->value.integer.value[0]);
-
- return 0;
-}
-
-static int msm8x16_wcd_compander_set(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
- struct msm8x16_wcd_priv *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
- int comp_idx = ((struct soc_multi_mixer_control *)
- kcontrol->private_value)->reg;
- int rx_idx = ((struct soc_multi_mixer_control *)
- kcontrol->private_value)->shift;
- int value = ucontrol->value.integer.value[0];
-
- dev_err(codec->dev, "%s: ucontrol->value.integer.value[0] = %ld\n",
- __func__, ucontrol->value.integer.value[0]);
-
- if (get_codec_version(msm8x16_wcd) >= DIANGU) {
- if (!value)
- msm8x16_wcd->comp_enabled[rx_idx] = 0;
- else
- msm8x16_wcd->comp_enabled[rx_idx] = comp_idx;
- }
-
- dev_err(codec->dev, "%s: msm8x16_wcd->comp[%d]_enabled[%d] = %d\n",
- __func__, comp_idx, rx_idx,
- msm8x16_wcd->comp_enabled[rx_idx]);
-
- return 0;
-}
-
-static const char * const msm8x16_wcd_loopback_mode_ctrl_text[] = {
- "DISABLE", "ENABLE"};
-static const struct soc_enum msm8x16_wcd_loopback_mode_ctl_enum[] = {
- SOC_ENUM_SINGLE_EXT(2, msm8x16_wcd_loopback_mode_ctrl_text),
-};
-
-static const char * const msm8x16_wcd_ear_pa_boost_ctrl_text[] = {
- "DISABLE", "ENABLE"};
-static const struct soc_enum msm8x16_wcd_ear_pa_boost_ctl_enum[] = {
- SOC_ENUM_SINGLE_EXT(2, msm8x16_wcd_ear_pa_boost_ctrl_text),
-};
-
-static const char * const msm8x16_wcd_ear_pa_gain_text[] = {
- "POS_1P5_DB", "POS_6_DB"};
-static const struct soc_enum msm8x16_wcd_ear_pa_gain_enum[] = {
- SOC_ENUM_SINGLE_EXT(2, msm8x16_wcd_ear_pa_gain_text),
-};
-
-static const char * const msm8x16_wcd_boost_option_ctrl_text[] = {
- "BOOST_SWITCH", "BOOST_ALWAYS", "BYPASS_ALWAYS",
- "BOOST_ON_FOREVER"};
-static const struct soc_enum msm8x16_wcd_boost_option_ctl_enum[] = {
- SOC_ENUM_SINGLE_EXT(4, msm8x16_wcd_boost_option_ctrl_text),
-};
-static const char * const msm8x16_wcd_spk_boost_ctrl_text[] = {
- "DISABLE", "ENABLE"};
-static const struct soc_enum msm8x16_wcd_spk_boost_ctl_enum[] = {
- SOC_ENUM_SINGLE_EXT(2, msm8x16_wcd_spk_boost_ctrl_text),
-};
-
-static const char * const msm8x16_wcd_ext_spk_boost_ctrl_text[] = {
- "DISABLE", "ENABLE"};
-static const struct soc_enum msm8x16_wcd_ext_spk_boost_ctl_enum[] = {
- SOC_ENUM_SINGLE_EXT(2, msm8x16_wcd_ext_spk_boost_ctrl_text),
-};
-
-static const char * const msm8x16_wcd_hph_mode_ctrl_text[] = {
- "NORMAL", "HD2"};
-static const struct soc_enum msm8x16_wcd_hph_mode_ctl_enum[] = {
- SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(msm8x16_wcd_hph_mode_ctrl_text),
- msm8x16_wcd_hph_mode_ctrl_text),
-};
-
-/*cut of frequency for high pass filter*/
-static const char * const cf_text[] = {
- "MIN_3DB_4Hz", "MIN_3DB_75Hz", "MIN_3DB_150Hz"
-};
-
-static const struct soc_enum cf_dec1_enum =
- SOC_ENUM_SINGLE(MSM89XX_CDC_CORE_TX1_MUX_CTL, 4, 3, cf_text);
-
-static const struct soc_enum cf_dec2_enum =
- SOC_ENUM_SINGLE(MSM89XX_CDC_CORE_TX2_MUX_CTL, 4, 3, cf_text);
-
-static const struct soc_enum cf_rxmix1_enum =
- SOC_ENUM_SINGLE(MSM89XX_CDC_CORE_RX1_B4_CTL, 0, 3, cf_text);
-
-static const struct soc_enum cf_rxmix2_enum =
- SOC_ENUM_SINGLE(MSM89XX_CDC_CORE_RX2_B4_CTL, 0, 3, cf_text);
-
-static const struct soc_enum cf_rxmix3_enum =
- SOC_ENUM_SINGLE(MSM89XX_CDC_CORE_RX3_B4_CTL, 0, 3, cf_text);
-
-static const struct snd_kcontrol_new msm8x16_wcd_snd_controls[] = {
-
- SOC_ENUM_EXT("RX HPH Mode", msm8x16_wcd_hph_mode_ctl_enum[0],
- msm8x16_wcd_hph_mode_get, msm8x16_wcd_hph_mode_set),
-
- SOC_ENUM_EXT("Boost Option", msm8x16_wcd_boost_option_ctl_enum[0],
- msm8x16_wcd_boost_option_get, msm8x16_wcd_boost_option_set),
-
- SOC_ENUM_EXT("EAR PA Boost", msm8x16_wcd_ear_pa_boost_ctl_enum[0],
- msm8x16_wcd_ear_pa_boost_get, msm8x16_wcd_ear_pa_boost_set),
-
- SOC_ENUM_EXT("EAR PA Gain", msm8x16_wcd_ear_pa_gain_enum[0],
- msm8x16_wcd_pa_gain_get, msm8x16_wcd_pa_gain_put),
-
- SOC_ENUM_EXT("Ext Spk Boost", msm8x16_wcd_ext_spk_boost_ctl_enum[0],
- msm8x16_wcd_ext_spk_boost_get, msm8x16_wcd_ext_spk_boost_set),
-
- SOC_ENUM_EXT("LOOPBACK Mode", msm8x16_wcd_loopback_mode_ctl_enum[0],
- msm8x16_wcd_loopback_mode_get, msm8x16_wcd_loopback_mode_put),
-
- SOC_SINGLE_TLV("ADC1 Volume", MSM89XX_PMIC_ANALOG_TX_1_EN, 3,
- 8, 0, analog_gain),
- SOC_SINGLE_TLV("ADC2 Volume", MSM89XX_PMIC_ANALOG_TX_2_EN, 3,
- 8, 0, analog_gain),
- SOC_SINGLE_TLV("ADC3 Volume", MSM89XX_PMIC_ANALOG_TX_3_EN, 3,
- 8, 0, analog_gain),
-
- SOC_SINGLE_SX_TLV("RX1 Digital Volume",
- MSM89XX_CDC_CORE_RX1_VOL_CTL_B2_CTL,
- 0, -84, 40, digital_gain),
- SOC_SINGLE_SX_TLV("RX2 Digital Volume",
- MSM89XX_CDC_CORE_RX2_VOL_CTL_B2_CTL,
- 0, -84, 40, digital_gain),
- SOC_SINGLE_SX_TLV("RX3 Digital Volume",
- MSM89XX_CDC_CORE_RX3_VOL_CTL_B2_CTL,
- 0, -84, 40, digital_gain),
-
- SOC_SINGLE_SX_TLV("DEC1 Volume",
- MSM89XX_CDC_CORE_TX1_VOL_CTL_GAIN,
- 0, -84, 40, digital_gain),
- SOC_SINGLE_SX_TLV("DEC2 Volume",
- MSM89XX_CDC_CORE_TX2_VOL_CTL_GAIN,
- 0, -84, 40, digital_gain),
-
- SOC_SINGLE_SX_TLV("IIR1 INP1 Volume",
- MSM89XX_CDC_CORE_IIR1_GAIN_B1_CTL,
- 0, -84, 40, digital_gain),
- SOC_SINGLE_SX_TLV("IIR1 INP2 Volume",
- MSM89XX_CDC_CORE_IIR1_GAIN_B2_CTL,
- 0, -84, 40, digital_gain),
- SOC_SINGLE_SX_TLV("IIR1 INP3 Volume",
- MSM89XX_CDC_CORE_IIR1_GAIN_B3_CTL,
- 0, -84, 40, digital_gain),
- SOC_SINGLE_SX_TLV("IIR1 INP4 Volume",
- MSM89XX_CDC_CORE_IIR1_GAIN_B4_CTL,
- 0, -84, 40, digital_gain),
- SOC_SINGLE_SX_TLV("IIR2 INP1 Volume",
- MSM89XX_CDC_CORE_IIR2_GAIN_B1_CTL,
- 0, -84, 40, digital_gain),
-
- SOC_ENUM("TX1 HPF cut off", cf_dec1_enum),
- SOC_ENUM("TX2 HPF cut off", cf_dec2_enum),
-
- SOC_SINGLE("TX1 HPF Switch",
- MSM89XX_CDC_CORE_TX1_MUX_CTL, 3, 1, 0),
- SOC_SINGLE("TX2 HPF Switch",
- MSM89XX_CDC_CORE_TX2_MUX_CTL, 3, 1, 0),
-
- SOC_SINGLE("RX1 HPF Switch",
- MSM89XX_CDC_CORE_RX1_B5_CTL, 2, 1, 0),
- SOC_SINGLE("RX2 HPF Switch",
- MSM89XX_CDC_CORE_RX2_B5_CTL, 2, 1, 0),
- SOC_SINGLE("RX3 HPF Switch",
- MSM89XX_CDC_CORE_RX3_B5_CTL, 2, 1, 0),
-
- SOC_ENUM("RX1 HPF cut off", cf_rxmix1_enum),
- SOC_ENUM("RX2 HPF cut off", cf_rxmix2_enum),
- SOC_ENUM("RX3 HPF cut off", cf_rxmix3_enum),
-
- SOC_SINGLE_EXT("IIR1 Enable Band1", IIR1, BAND1, 1, 0,
- msm8x16_wcd_get_iir_enable_audio_mixer,
- msm8x16_wcd_put_iir_enable_audio_mixer),
- SOC_SINGLE_EXT("IIR1 Enable Band2", IIR1, BAND2, 1, 0,
- msm8x16_wcd_get_iir_enable_audio_mixer,
- msm8x16_wcd_put_iir_enable_audio_mixer),
- SOC_SINGLE_EXT("IIR1 Enable Band3", IIR1, BAND3, 1, 0,
- msm8x16_wcd_get_iir_enable_audio_mixer,
- msm8x16_wcd_put_iir_enable_audio_mixer),
- SOC_SINGLE_EXT("IIR1 Enable Band4", IIR1, BAND4, 1, 0,
- msm8x16_wcd_get_iir_enable_audio_mixer,
- msm8x16_wcd_put_iir_enable_audio_mixer),
- SOC_SINGLE_EXT("IIR1 Enable Band5", IIR1, BAND5, 1, 0,
- msm8x16_wcd_get_iir_enable_audio_mixer,
- msm8x16_wcd_put_iir_enable_audio_mixer),
- SOC_SINGLE_EXT("IIR2 Enable Band1", IIR2, BAND1, 1, 0,
- msm8x16_wcd_get_iir_enable_audio_mixer,
- msm8x16_wcd_put_iir_enable_audio_mixer),
- SOC_SINGLE_EXT("IIR2 Enable Band2", IIR2, BAND2, 1, 0,
- msm8x16_wcd_get_iir_enable_audio_mixer,
- msm8x16_wcd_put_iir_enable_audio_mixer),
- SOC_SINGLE_EXT("IIR2 Enable Band3", IIR2, BAND3, 1, 0,
- msm8x16_wcd_get_iir_enable_audio_mixer,
- msm8x16_wcd_put_iir_enable_audio_mixer),
- SOC_SINGLE_EXT("IIR2 Enable Band4", IIR2, BAND4, 1, 0,
- msm8x16_wcd_get_iir_enable_audio_mixer,
- msm8x16_wcd_put_iir_enable_audio_mixer),
- SOC_SINGLE_EXT("IIR2 Enable Band5", IIR2, BAND5, 1, 0,
- msm8x16_wcd_get_iir_enable_audio_mixer,
- msm8x16_wcd_put_iir_enable_audio_mixer),
-
- SOC_SINGLE_MULTI_EXT("IIR1 Band1", IIR1, BAND1, 255, 0, 5,
- msm8x16_wcd_get_iir_band_audio_mixer,
- msm8x16_wcd_put_iir_band_audio_mixer),
- SOC_SINGLE_MULTI_EXT("IIR1 Band2", IIR1, BAND2, 255, 0, 5,
- msm8x16_wcd_get_iir_band_audio_mixer,
- msm8x16_wcd_put_iir_band_audio_mixer),
- SOC_SINGLE_MULTI_EXT("IIR1 Band3", IIR1, BAND3, 255, 0, 5,
- msm8x16_wcd_get_iir_band_audio_mixer,
- msm8x16_wcd_put_iir_band_audio_mixer),
- SOC_SINGLE_MULTI_EXT("IIR1 Band4", IIR1, BAND4, 255, 0, 5,
- msm8x16_wcd_get_iir_band_audio_mixer,
- msm8x16_wcd_put_iir_band_audio_mixer),
- SOC_SINGLE_MULTI_EXT("IIR1 Band5", IIR1, BAND5, 255, 0, 5,
- msm8x16_wcd_get_iir_band_audio_mixer,
- msm8x16_wcd_put_iir_band_audio_mixer),
- SOC_SINGLE_MULTI_EXT("IIR2 Band1", IIR2, BAND1, 255, 0, 5,
- msm8x16_wcd_get_iir_band_audio_mixer,
- msm8x16_wcd_put_iir_band_audio_mixer),
- SOC_SINGLE_MULTI_EXT("IIR2 Band2", IIR2, BAND2, 255, 0, 5,
- msm8x16_wcd_get_iir_band_audio_mixer,
- msm8x16_wcd_put_iir_band_audio_mixer),
- SOC_SINGLE_MULTI_EXT("IIR2 Band3", IIR2, BAND3, 255, 0, 5,
- msm8x16_wcd_get_iir_band_audio_mixer,
- msm8x16_wcd_put_iir_band_audio_mixer),
- SOC_SINGLE_MULTI_EXT("IIR2 Band4", IIR2, BAND4, 255, 0, 5,
- msm8x16_wcd_get_iir_band_audio_mixer,
- msm8x16_wcd_put_iir_band_audio_mixer),
- SOC_SINGLE_MULTI_EXT("IIR2 Band5", IIR2, BAND5, 255, 0, 5,
- msm8x16_wcd_get_iir_band_audio_mixer,
- msm8x16_wcd_put_iir_band_audio_mixer),
-
- SOC_SINGLE_EXT("COMP0 RX1", COMPANDER_1, MSM89XX_RX1, 1, 0,
- msm8x16_wcd_compander_get, msm8x16_wcd_compander_set),
-
- SOC_SINGLE_EXT("COMP0 RX2", COMPANDER_1, MSM89XX_RX2, 1, 0,
- msm8x16_wcd_compander_get, msm8x16_wcd_compander_set),
-};
-
-static int tombak_hph_impedance_get(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- int ret;
- uint32_t zl, zr;
- bool hphr;
- struct soc_multi_mixer_control *mc;
- struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
- struct msm8x16_wcd_priv *priv = snd_soc_codec_get_drvdata(codec);
-
- mc = (struct soc_multi_mixer_control *)(kcontrol->private_value);
-
- hphr = mc->shift;
- ret = wcd_mbhc_get_impedance(&priv->mbhc, &zl, &zr);
- if (ret)
- pr_err("%s: Failed to get mbhc imped", __func__);
- pr_err("%s: zl %u, zr %u\n", __func__, zl, zr);
- ucontrol->value.integer.value[0] = hphr ? zr : zl;
-
- return 0;
-}
-
-static const struct snd_kcontrol_new impedance_detect_controls[] = {
- SOC_SINGLE_EXT("HPHL Impedance", 0, 0, UINT_MAX, 0,
- tombak_hph_impedance_get, NULL),
- SOC_SINGLE_EXT("HPHR Impedance", 0, 1, UINT_MAX, 0,
- tombak_hph_impedance_get, NULL),
-};
-
-static int tombak_get_hph_type(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
- struct msm8x16_wcd_priv *priv = snd_soc_codec_get_drvdata(codec);
- struct wcd_mbhc *mbhc;
-
- if (!priv) {
- pr_err("%s: msm8x16-wcd private data is NULL\n",
- __func__);
- return -EINVAL;
- }
-
- mbhc = &priv->mbhc;
- if (!mbhc) {
- pr_err("%s: mbhc not initialized\n", __func__);
- return -EINVAL;
- }
-
- ucontrol->value.integer.value[0] = (u32) mbhc->hph_type;
- pr_err("%s: hph_type = %u\n", __func__, mbhc->hph_type);
-
- return 0;
-}
-
-static const struct snd_kcontrol_new hph_type_detect_controls[] = {
- SOC_SINGLE_EXT("HPH Type", 0, 0, UINT_MAX, 0,
- tombak_get_hph_type, NULL),
-};
-
-static const char * const rx_mix1_text[] = {
- "ZERO", "IIR1", "IIR2", "RX1", "RX2", "RX3"
-};
-
-static const char * const rx_mix2_text[] = {
- "ZERO", "IIR1", "IIR2"
-};
-
-static const char * const dec_mux_text[] = {
- "ZERO", "ADC1", "ADC2", "ADC3", "DMIC1", "DMIC2"
-};
-
-static const char * const dec3_mux_text[] = {
- "ZERO", "DMIC3"
-};
-
-static const char * const dec4_mux_text[] = {
- "ZERO", "DMIC4"
-};
-
-static const char * const adc2_mux_text[] = {
- "ZERO", "INP2", "INP3"
-};
-
-static const char * const ext_spk_text[] = {
- "Off", "On"
-};
-
-static const char * const wsa_spk_text[] = {
- "ZERO", "WSA"
-};
-
-static const char * const rdac2_mux_text[] = {
- "ZERO", "RX2", "RX1"
-};
-
-static const char * const iir_inp1_text[] = {
- "ZERO", "DEC1", "DEC2", "RX1", "RX2", "RX3"
-};
-
-static const struct soc_enum adc2_enum =
- SOC_ENUM_SINGLE(SND_SOC_NOPM, 0,
- ARRAY_SIZE(adc2_mux_text), adc2_mux_text);
-
-static const struct soc_enum ext_spk_enum =
- SOC_ENUM_SINGLE(SND_SOC_NOPM, 0,
- ARRAY_SIZE(ext_spk_text), ext_spk_text);
-
-static const struct soc_enum wsa_spk_enum =
- SOC_ENUM_SINGLE(SND_SOC_NOPM, 0,
- ARRAY_SIZE(wsa_spk_text), wsa_spk_text);
-
-/* RX1 MIX1 */
-static const struct soc_enum rx_mix1_inp1_chain_enum =
- SOC_ENUM_SINGLE(MSM89XX_CDC_CORE_CONN_RX1_B1_CTL,
- 0, 6, rx_mix1_text);
-
-static const struct soc_enum rx_mix1_inp2_chain_enum =
- SOC_ENUM_SINGLE(MSM89XX_CDC_CORE_CONN_RX1_B1_CTL,
- 3, 6, rx_mix1_text);
-
-static const struct soc_enum rx_mix1_inp3_chain_enum =
- SOC_ENUM_SINGLE(MSM89XX_CDC_CORE_CONN_RX1_B2_CTL,
- 0, 6, rx_mix1_text);
-
-/* RX1 MIX2 */
-static const struct soc_enum rx_mix2_inp1_chain_enum =
- SOC_ENUM_SINGLE(MSM89XX_CDC_CORE_CONN_RX1_B3_CTL,
- 0, 3, rx_mix2_text);
-
-/* RX2 MIX1 */
-static const struct soc_enum rx2_mix1_inp1_chain_enum =
- SOC_ENUM_SINGLE(MSM89XX_CDC_CORE_CONN_RX2_B1_CTL,
- 0, 6, rx_mix1_text);
-
-static const struct soc_enum rx2_mix1_inp2_chain_enum =
- SOC_ENUM_SINGLE(MSM89XX_CDC_CORE_CONN_RX2_B1_CTL,
- 3, 6, rx_mix1_text);
-
-static const struct soc_enum rx2_mix1_inp3_chain_enum =
- SOC_ENUM_SINGLE(MSM89XX_CDC_CORE_CONN_RX2_B1_CTL,
- 0, 6, rx_mix1_text);
-
-/* RX2 MIX2 */
-static const struct soc_enum rx2_mix2_inp1_chain_enum =
- SOC_ENUM_SINGLE(MSM89XX_CDC_CORE_CONN_RX2_B3_CTL,
- 0, 3, rx_mix2_text);
-
-/* RX3 MIX1 */
-static const struct soc_enum rx3_mix1_inp1_chain_enum =
- SOC_ENUM_SINGLE(MSM89XX_CDC_CORE_CONN_RX3_B1_CTL,
- 0, 6, rx_mix1_text);
-
-static const struct soc_enum rx3_mix1_inp2_chain_enum =
- SOC_ENUM_SINGLE(MSM89XX_CDC_CORE_CONN_RX3_B1_CTL,
- 3, 6, rx_mix1_text);
-
-static const struct soc_enum rx3_mix1_inp3_chain_enum =
- SOC_ENUM_SINGLE(MSM89XX_CDC_CORE_CONN_RX3_B1_CTL,
- 0, 6, rx_mix1_text);
-
-/* DEC */
-static const struct soc_enum dec1_mux_enum =
- SOC_ENUM_SINGLE(MSM89XX_CDC_CORE_CONN_TX_B1_CTL,
- 0, 6, dec_mux_text);
-
-static const struct soc_enum dec2_mux_enum =
- SOC_ENUM_SINGLE(MSM89XX_CDC_CORE_CONN_TX_B1_CTL,
- 3, 6, dec_mux_text);
-
-static const struct soc_enum dec3_mux_enum =
- SOC_ENUM_SINGLE(MSM89XX_CDC_CORE_TX3_MUX_CTL, 0,
- ARRAY_SIZE(dec3_mux_text), dec3_mux_text);
-
-static const struct soc_enum dec4_mux_enum =
- SOC_ENUM_SINGLE(MSM89XX_CDC_CORE_TX4_MUX_CTL, 0,
- ARRAY_SIZE(dec4_mux_text), dec4_mux_text);
-
-static const struct soc_enum rdac2_mux_enum =
- SOC_ENUM_SINGLE(MSM89XX_PMIC_DIGITAL_CDC_CONN_HPHR_DAC_CTL,
- 0, 3, rdac2_mux_text);
-
-static const struct soc_enum iir1_inp1_mux_enum =
- SOC_ENUM_SINGLE(MSM89XX_CDC_CORE_CONN_EQ1_B1_CTL,
- 0, 6, iir_inp1_text);
-
-static const struct soc_enum iir2_inp1_mux_enum =
- SOC_ENUM_SINGLE(MSM89XX_CDC_CORE_CONN_EQ2_B1_CTL,
- 0, 6, iir_inp1_text);
-
-static const struct snd_kcontrol_new ext_spk_mux =
- SOC_DAPM_ENUM("Ext Spk Switch Mux", ext_spk_enum);
-
-static const struct snd_kcontrol_new rx_mix1_inp1_mux =
- SOC_DAPM_ENUM("RX1 MIX1 INP1 Mux", rx_mix1_inp1_chain_enum);
-
-static const struct snd_kcontrol_new rx_mix1_inp2_mux =
- SOC_DAPM_ENUM("RX1 MIX1 INP2 Mux", rx_mix1_inp2_chain_enum);
-
-static const struct snd_kcontrol_new rx_mix1_inp3_mux =
- SOC_DAPM_ENUM("RX1 MIX1 INP3 Mux", rx_mix1_inp3_chain_enum);
-
-static const struct snd_kcontrol_new rx2_mix1_inp1_mux =
- SOC_DAPM_ENUM("RX2 MIX1 INP1 Mux", rx2_mix1_inp1_chain_enum);
-
-static const struct snd_kcontrol_new rx2_mix1_inp2_mux =
- SOC_DAPM_ENUM("RX2 MIX1 INP2 Mux", rx2_mix1_inp2_chain_enum);
-
-static const struct snd_kcontrol_new rx2_mix1_inp3_mux =
- SOC_DAPM_ENUM("RX2 MIX1 INP3 Mux", rx2_mix1_inp3_chain_enum);
-
-static const struct snd_kcontrol_new rx3_mix1_inp1_mux =
- SOC_DAPM_ENUM("RX3 MIX1 INP1 Mux", rx3_mix1_inp1_chain_enum);
-
-static const struct snd_kcontrol_new rx3_mix1_inp2_mux =
- SOC_DAPM_ENUM("RX3 MIX1 INP2 Mux", rx3_mix1_inp2_chain_enum);
-
-static const struct snd_kcontrol_new rx3_mix1_inp3_mux =
- SOC_DAPM_ENUM("RX3 MIX1 INP3 Mux", rx3_mix1_inp3_chain_enum);
-
-static const struct snd_kcontrol_new rx1_mix2_inp1_mux =
- SOC_DAPM_ENUM("RX1 MIX2 INP1 Mux", rx_mix2_inp1_chain_enum);
-
-static const struct snd_kcontrol_new rx2_mix2_inp1_mux =
- SOC_DAPM_ENUM("RX2 MIX2 INP1 Mux", rx2_mix2_inp1_chain_enum);
-
-static const struct snd_kcontrol_new tx_adc2_mux =
- SOC_DAPM_ENUM("ADC2 MUX Mux", adc2_enum);
-
-static int msm8x16_wcd_put_dec_enum(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct snd_soc_dapm_widget_list *wlist =
- dapm_kcontrol_get_wlist(kcontrol);
- struct snd_soc_dapm_widget *w = wlist->widgets[0];
- struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
- struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
- unsigned int dec_mux, decimator;
- char *dec_name = NULL;
- char *widget_name = NULL;
- char *temp;
- u16 tx_mux_ctl_reg;
- u8 adc_dmic_sel = 0x0;
- int ret = 0;
- char *dec_num;
-
- if (ucontrol->value.enumerated.item[0] > e->items) {
- dev_err(codec->dev, "%s: Invalid enum value: %d\n",
- __func__, ucontrol->value.enumerated.item[0]);
- return -EINVAL;
- }
- dec_mux = ucontrol->value.enumerated.item[0];
-
- widget_name = kstrndup(w->name, 15, GFP_KERNEL);
- if (!widget_name) {
- dev_err(codec->dev, "%s: failed to copy string\n",
- __func__);
- return -ENOMEM;
- }
- temp = widget_name;
-
- dec_name = strsep(&widget_name, " ");
- widget_name = temp;
- if (!dec_name) {
- dev_err(codec->dev, "%s: Invalid decimator = %s\n",
- __func__, w->name);
- ret = -EINVAL;
- goto out;
- }
-
- dec_num = strpbrk(dec_name, "12");
- if (dec_num == NULL) {
- dev_err(codec->dev, "%s: Invalid DEC selected\n", __func__);
- ret = -EINVAL;
- goto out;
- }
-
- ret = kstrtouint(dec_num, 10, &decimator);
- if (ret < 0) {
- dev_err(codec->dev, "%s: Invalid decimator = %s\n",
- __func__, dec_name);
- ret = -EINVAL;
- goto out;
- }
-
- dev_err(w->dapm->dev, "%s(): widget = %s decimator = %u dec_mux = %u\n"
- , __func__, w->name, decimator, dec_mux);
-
- switch (decimator) {
- case 1:
- case 2:
- if ((dec_mux == 4) || (dec_mux == 5))
- adc_dmic_sel = 0x1;
- else
- adc_dmic_sel = 0x0;
- break;
- default:
- dev_err(codec->dev, "%s: Invalid Decimator = %u\n",
- __func__, decimator);
- ret = -EINVAL;
- goto out;
- }
-
- tx_mux_ctl_reg =
- MSM89XX_CDC_CORE_TX1_MUX_CTL + 32 * (decimator - 1);
-
- snd_soc_update_bits_wrapper(codec, tx_mux_ctl_reg, 0x1, adc_dmic_sel);
-
- ret = snd_soc_dapm_put_enum_double(kcontrol, ucontrol);
-
-out:
- kfree(widget_name);
- return ret;
-}
-
-#define MSM89XX_DEC_ENUM(xname, xenum) \
-{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
- .info = snd_soc_info_enum_double, \
- .get = snd_soc_dapm_get_enum_double, \
- .put = msm8x16_wcd_put_dec_enum, \
- .private_value = (unsigned long)&xenum }
-
-static const struct snd_kcontrol_new dec1_mux =
- MSM89XX_DEC_ENUM("DEC1 MUX Mux", dec1_mux_enum);
-
-static const struct snd_kcontrol_new dec2_mux =
- MSM89XX_DEC_ENUM("DEC2 MUX Mux", dec2_mux_enum);
-
-static const struct snd_kcontrol_new dec3_mux =
- SOC_DAPM_ENUM("DEC3 MUX Mux", dec3_mux_enum);
-
-static const struct snd_kcontrol_new dec4_mux =
- SOC_DAPM_ENUM("DEC4 MUX Mux", dec4_mux_enum);
-
-static const struct snd_kcontrol_new rdac2_mux =
- SOC_DAPM_ENUM("RDAC2 MUX Mux", rdac2_mux_enum);
-
-static const struct snd_kcontrol_new iir1_inp1_mux =
- SOC_DAPM_ENUM("IIR1 INP1 Mux", iir1_inp1_mux_enum);
-
-static const char * const ear_text[] = {
- "ZERO", "Switch",
-};
-
-static const struct soc_enum ear_enum =
- SOC_ENUM_SINGLE(SND_SOC_NOPM, 0, ARRAY_SIZE(ear_text), ear_text);
-
-static const struct snd_kcontrol_new ear_pa_mux[] = {
- SOC_DAPM_ENUM("EAR_S", ear_enum)
-};
-
-static const struct snd_kcontrol_new wsa_spk_mux[] = {
- SOC_DAPM_ENUM("WSA Spk Switch", wsa_spk_enum)
-};
-
-static const struct snd_kcontrol_new iir2_inp1_mux =
- SOC_DAPM_ENUM("IIR2 INP1 Mux", iir2_inp1_mux_enum);
-
-static const char * const hph_text[] = {
- "ZERO", "Switch",
-};
-
-static const struct soc_enum hph_enum =
- SOC_ENUM_SINGLE(SND_SOC_NOPM, 0, ARRAY_SIZE(hph_text), hph_text);
-
-static const struct snd_kcontrol_new hphl_mux[] = {
- SOC_DAPM_ENUM("HPHL", hph_enum)
-};
-
-static const struct snd_kcontrol_new hphr_mux[] = {
- SOC_DAPM_ENUM("HPHR", hph_enum)
-};
-
-static const struct snd_kcontrol_new spkr_mux[] = {
- SOC_DAPM_ENUM("SPK", hph_enum)
-};
-
-static const char * const lo_text[] = {
- "ZERO", "Switch",
-};
-
-static const struct soc_enum lo_enum =
- SOC_ENUM_SINGLE(SND_SOC_NOPM, 0, ARRAY_SIZE(hph_text), hph_text);
-
-static const struct snd_kcontrol_new lo_mux[] = {
- SOC_DAPM_ENUM("LINE_OUT", lo_enum)
-};
-
-static void msm8x16_wcd_codec_enable_adc_block(struct snd_soc_codec *codec,
- int enable)
-{
- struct msm8x16_wcd_priv *wcd8x16 = snd_soc_codec_get_drvdata(codec);
-
- dev_err(codec->dev, "%s %d\n", __func__, enable);
-
- if (enable) {
- wcd8x16->adc_count++;
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_DIGITAL_CDC_ANA_CLK_CTL,
- 0x20, 0x20);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL,
- 0x10, 0x10);
- } else {
- wcd8x16->adc_count--;
- if (!wcd8x16->adc_count) {
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL,
- 0x10, 0x00);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_DIGITAL_CDC_ANA_CLK_CTL,
- 0x20, 0x0);
- }
- }
-}
-
-static int msm8x16_wcd_codec_enable_adc(struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *kcontrol, int event)
-{
- struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
- u16 adc_reg;
- u8 init_bit_shift;
-
- dev_err(codec->dev, "%s %d\n", __func__, event);
-
- adc_reg = MSM89XX_PMIC_ANALOG_TX_1_2_TEST_CTL_2;
-
- if (w->reg == MSM89XX_PMIC_ANALOG_TX_1_EN)
- init_bit_shift = 5;
- else if ((w->reg == MSM89XX_PMIC_ANALOG_TX_2_EN) ||
- (w->reg == MSM89XX_PMIC_ANALOG_TX_3_EN))
- init_bit_shift = 4;
- else {
- dev_err(codec->dev, "%s: Error, invalid adc register\n",
- __func__);
- return -EINVAL;
- }
-
- switch (event) {
- case SND_SOC_DAPM_PRE_PMU:
- msm8x16_wcd_codec_enable_adc_block(codec, 1);
- if (w->reg == MSM89XX_PMIC_ANALOG_TX_2_EN)
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_MICB_1_CTL, 0x02, 0x02);
- /*
- * Add delay of 10 ms to give sufficient time for the voltage
- * to shoot up and settle so that the txfe init does not
- * happen when the input voltage is changing too much.
- */
- usleep_range(10000, 10010);
- snd_soc_update_bits_wrapper(codec,
- adc_reg, 1 << init_bit_shift,
- 1 << init_bit_shift);
- if (w->reg == MSM89XX_PMIC_ANALOG_TX_1_EN)
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_DIGITAL_CDC_CONN_TX1_CTL,
- 0x03, 0x00);
- else if ((w->reg == MSM89XX_PMIC_ANALOG_TX_2_EN) ||
- (w->reg == MSM89XX_PMIC_ANALOG_TX_3_EN))
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_DIGITAL_CDC_CONN_TX2_CTL,
- 0x03, 0x00);
- usleep_range(CODEC_DELAY_1_MS, CODEC_DELAY_1_1_MS);
- break;
- case SND_SOC_DAPM_POST_PMU:
- /*
- * Add delay of 12 ms before deasserting the init
- * to reduce the tx pop
- */
- usleep_range(12000, 12010);
- snd_soc_update_bits_wrapper(codec,
- adc_reg, 1 << init_bit_shift, 0x00);
- usleep_range(CODEC_DELAY_1_MS, CODEC_DELAY_1_1_MS);
- break;
- case SND_SOC_DAPM_POST_PMD:
- msm8x16_wcd_codec_enable_adc_block(codec, 0);
- if (w->reg == MSM89XX_PMIC_ANALOG_TX_2_EN)
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_MICB_1_CTL, 0x02, 0x00);
- if (w->reg == MSM89XX_PMIC_ANALOG_TX_1_EN)
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_DIGITAL_CDC_CONN_TX1_CTL,
- 0x03, 0x02);
- else if ((w->reg == MSM89XX_PMIC_ANALOG_TX_2_EN) ||
- (w->reg == MSM89XX_PMIC_ANALOG_TX_3_EN))
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_DIGITAL_CDC_CONN_TX2_CTL,
- 0x03, 0x02);
-
- break;
- }
- return 0;
-}
-
-static int msm8x16_wcd_codec_enable_spk_pa(struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *kcontrol, int event)
-{
- struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
- struct msm8x16_wcd_priv *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
-
- dev_err(codec->dev, "%s %d %s\n", __func__, event, w->name);
- switch (event) {
- case SND_SOC_DAPM_PRE_PMU:
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_DIGITAL_CDC_ANA_CLK_CTL, 0x10, 0x10);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_SPKR_PWRSTG_CTL, 0x01, 0x01);
- switch (msm8x16_wcd->boost_option) {
- case BOOST_SWITCH:
- if (!msm8x16_wcd->spk_boost_set)
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_SPKR_DAC_CTL,
- 0x10, 0x10);
- break;
- case BOOST_ALWAYS:
- case BOOST_ON_FOREVER:
- break;
- case BYPASS_ALWAYS:
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_SPKR_DAC_CTL,
- 0x10, 0x10);
- break;
- default:
- pr_err("%s: invalid boost option: %d\n", __func__,
- msm8x16_wcd->boost_option);
- break;
- }
- usleep_range(CODEC_DELAY_1_MS, CODEC_DELAY_1_1_MS);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_SPKR_PWRSTG_CTL, 0xE0, 0xE0);
- if (get_codec_version(msm8x16_wcd) != TOMBAK_1_0)
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_RX_EAR_CTL, 0x01, 0x01);
- break;
- case SND_SOC_DAPM_POST_PMU:
- usleep_range(CODEC_DELAY_1_MS, CODEC_DELAY_1_1_MS);
- switch (msm8x16_wcd->boost_option) {
- case BOOST_SWITCH:
- if (msm8x16_wcd->spk_boost_set)
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_SPKR_DRV_CTL,
- 0xEF, 0xEF);
- else
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_SPKR_DAC_CTL,
- 0x10, 0x00);
- break;
- case BOOST_ALWAYS:
- case BOOST_ON_FOREVER:
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_SPKR_DRV_CTL,
- 0xEF, 0xEF);
- break;
- case BYPASS_ALWAYS:
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_SPKR_DAC_CTL, 0x10, 0x00);
- break;
- default:
- pr_err("%s: invalid boost option: %d\n", __func__,
- msm8x16_wcd->boost_option);
- break;
- }
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_CDC_CORE_RX3_B6_CTL, 0x01, 0x00);
- snd_soc_update_bits_wrapper(codec, w->reg, 0x80, 0x80);
- break;
- case SND_SOC_DAPM_PRE_PMD:
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_CDC_CORE_RX3_B6_CTL, 0x01, 0x01);
- msm8x16_wcd->mute_mask |= SPKR_PA_DISABLE;
- /*
- * Add 1 ms sleep for the mute to take effect
- */
- usleep_range(CODEC_DELAY_1_MS, CODEC_DELAY_1_1_MS);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_SPKR_DAC_CTL, 0x10, 0x10);
- if (get_codec_version(msm8x16_wcd) < CAJON_2_0)
- msm8x16_wcd_boost_mode_sequence(codec, SPK_PMD);
- snd_soc_update_bits_wrapper(codec, w->reg, 0x80, 0x00);
- switch (msm8x16_wcd->boost_option) {
- case BOOST_SWITCH:
- if (msm8x16_wcd->spk_boost_set)
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_SPKR_DRV_CTL,
- 0xEF, 0x69);
- break;
- case BOOST_ALWAYS:
- case BOOST_ON_FOREVER:
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_SPKR_DRV_CTL,
- 0xEF, 0x69);
- break;
- case BYPASS_ALWAYS:
- break;
- default:
- pr_err("%s: invalid boost option: %d\n", __func__,
- msm8x16_wcd->boost_option);
- break;
- }
- break;
- case SND_SOC_DAPM_POST_PMD:
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_SPKR_PWRSTG_CTL, 0xE0, 0x00);
- usleep_range(CODEC_DELAY_1_MS, CODEC_DELAY_1_1_MS);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_SPKR_PWRSTG_CTL, 0x01, 0x00);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_SPKR_DAC_CTL, 0x10, 0x00);
- if (get_codec_version(msm8x16_wcd) != TOMBAK_1_0)
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_RX_EAR_CTL, 0x01, 0x00);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_DIGITAL_CDC_ANA_CLK_CTL, 0x10, 0x00);
- if (get_codec_version(msm8x16_wcd) >= CAJON_2_0)
- msm8x16_wcd_boost_mode_sequence(codec, SPK_PMD);
- break;
- }
- return 0;
-}
-
-static int msm8x16_wcd_codec_enable_dig_clk(struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *kcontrol, int event)
-{
- struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
- struct msm8x16_wcd_priv *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
- struct msm_asoc_mach_data *pdata = NULL;
-
- pdata = snd_soc_card_get_drvdata(codec->component.card);
-
- dev_err(codec->dev, "%s event %d w->name %s\n", __func__,
- event, w->name);
- switch (event) {
- case SND_SOC_DAPM_PRE_PMU:
- msm8x16_wcd_codec_enable_clock_block(codec, 1);
- snd_soc_update_bits_wrapper(codec, w->reg, 0x80, 0x80);
- msm8x16_wcd_boost_mode_sequence(codec, SPK_PMU);
- break;
- case SND_SOC_DAPM_POST_PMD:
- if (msm8x16_wcd->rx_bias_count == 0)
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL,
- 0x80, 0x00);
- }
- return 0;
-}
-
-static int msm8x16_wcd_codec_enable_dmic(struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *kcontrol, int event)
-{
- struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
- struct msm8x16_wcd_priv *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
- u8 dmic_clk_en;
- u16 dmic_clk_reg;
- s32 *dmic_clk_cnt;
- unsigned int dmic;
- int ret;
- char *dec_num = strpbrk(w->name, "12");
-
- if (dec_num == NULL) {
- dev_err(codec->dev, "%s: Invalid DMIC\n", __func__);
- return -EINVAL;
- }
-
- ret = kstrtouint(dec_num, 10, &dmic);
- if (ret < 0) {
- dev_err(codec->dev,
- "%s: Invalid DMIC line on the codec\n", __func__);
- return -EINVAL;
- }
-
- switch (dmic) {
- case 1:
- case 2:
- dmic_clk_en = 0x01;
- dmic_clk_cnt = &(msm8x16_wcd->dmic_1_2_clk_cnt);
- dmic_clk_reg = MSM89XX_CDC_CORE_CLK_DMIC_B1_CTL;
- dev_err(codec->dev,
- "%s() event %d DMIC%d dmic_1_2_clk_cnt %d\n",
- __func__, event, dmic, *dmic_clk_cnt);
- break;
- default:
- dev_err(codec->dev, "%s: Invalid DMIC Selection\n", __func__);
- return -EINVAL;
- }
-
- switch (event) {
- case SND_SOC_DAPM_PRE_PMU:
- (*dmic_clk_cnt)++;
- if (*dmic_clk_cnt == 1) {
- snd_soc_update_bits_wrapper(codec, dmic_clk_reg,
- 0x0E, 0x02);
- snd_soc_update_bits_wrapper(codec, dmic_clk_reg,
- dmic_clk_en, dmic_clk_en);
- }
- if (dmic == 1)
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_CDC_CORE_TX1_DMIC_CTL, 0x07, 0x01);
- if (dmic == 2)
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_CDC_CORE_TX2_DMIC_CTL, 0x07, 0x01);
- break;
- case SND_SOC_DAPM_POST_PMD:
- (*dmic_clk_cnt)--;
- if (*dmic_clk_cnt == 0)
- snd_soc_update_bits_wrapper(codec, dmic_clk_reg,
- dmic_clk_en, 0);
- break;
- }
- return 0;
-}
-
-static bool msm8x16_wcd_use_mb(struct snd_soc_codec *codec)
-{
- struct msm8x16_wcd_priv *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
-
- if (get_codec_version(msm8x16_wcd) < CAJON)
- return true;
- else
- return false;
-}
-
-static void msm8x16_wcd_set_auto_zeroing(struct snd_soc_codec *codec,
- bool enable)
-{
- struct msm8x16_wcd_priv *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
-
- if (get_codec_version(msm8x16_wcd) < CONGA) {
- if (enable)
- /*
- * Set autozeroing for special headset detection and
- * buttons to work.
- */
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_MICB_2_EN,
- 0x18, 0x10);
- else
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_MICB_2_EN,
- 0x18, 0x00);
-
- } else {
- pr_err("%s: Auto Zeroing is not required from CONGA\n",
- __func__);
- }
-}
-
-static void msm8x16_trim_btn_reg(struct snd_soc_codec *codec)
-{
- struct msm8x16_wcd_priv *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
-
- if (get_codec_version(msm8x16_wcd) == TOMBAK_1_0) {
- pr_err("%s: This device needs to be trimmed\n", __func__);
- /*
- * Calculate the trim value for each device used
- * till is comes in production by hardware team
- */
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_SEC_ACCESS,
- 0xA5, 0xA5);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_TRIM_CTRL2,
- 0xFF, 0x30);
- } else {
- pr_err("%s: This device is trimmed at ATE\n", __func__);
- }
-}
-static int msm8x16_wcd_enable_ext_mb_source(struct wcd_mbhc *mbhc,
- bool turn_on)
-{
- int ret = 0;
- static int count;
- struct snd_soc_codec *codec = mbhc->codec;
- struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
-
- dev_err(codec->dev, "%s turn_on: %d count: %d\n", __func__, turn_on,
- count);
- if (turn_on) {
- if (!count) {
- ret = snd_soc_dapm_force_enable_pin(dapm,
- "MICBIAS_REGULATOR");
- snd_soc_dapm_sync(dapm);
- }
- count++;
- } else {
- if (count > 0)
- count--;
- if (!count) {
- ret = snd_soc_dapm_disable_pin(dapm,
- "MICBIAS_REGULATOR");
- snd_soc_dapm_sync(dapm);
- }
- }
-
- if (ret)
- dev_err(codec->dev, "%s: Failed to %s external micbias source\n",
- __func__, turn_on ? "enable" : "disabled");
- else
- dev_err(codec->dev, "%s: %s external micbias source\n",
- __func__, turn_on ? "Enabled" : "Disabled");
-
- return ret;
-}
-
-static int msm8x16_wcd_codec_enable_micbias(struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *kcontrol, int event)
-{
- struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
- struct msm8x16_wcd_priv *msm8x16_wcd =
- snd_soc_codec_get_drvdata(codec);
- u16 micb_int_reg;
- char *internal1_text = "Internal1";
- char *internal2_text = "Internal2";
- char *internal3_text = "Internal3";
- char *external2_text = "External2";
- char *external_text = "External";
- bool micbias2;
-
- dev_err(codec->dev, "%s %d\n", __func__, event);
- switch (w->reg) {
- case MSM89XX_PMIC_ANALOG_MICB_1_EN:
- case MSM89XX_PMIC_ANALOG_MICB_2_EN:
- micb_int_reg = MSM89XX_PMIC_ANALOG_MICB_1_INT_RBIAS;
- break;
- default:
- dev_err(codec->dev,
- "%s: Error, invalid micbias register 0x%x\n",
- __func__, w->reg);
- return -EINVAL;
- }
-
- micbias2 = (snd_soc_read_wrapper(codec,
- MSM89XX_PMIC_ANALOG_MICB_2_EN) & 0x80);
- switch (event) {
- case SND_SOC_DAPM_PRE_PMU:
- if (strnstr(w->name, internal1_text, strlen(w->name))) {
- if (get_codec_version(msm8x16_wcd) >= CAJON)
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_TX_1_2_ATEST_CTL_2,
- 0x02, 0x02);
- snd_soc_update_bits_wrapper(codec,
- micb_int_reg, 0x80, 0x80);
- } else if (strnstr(w->name, internal2_text, strlen(w->name))) {
- snd_soc_update_bits_wrapper(codec,
- micb_int_reg, 0x10, 0x10);
- snd_soc_update_bits_wrapper(codec,
- w->reg, 0x60, 0x00);
- } else if (strnstr(w->name, internal3_text, strlen(w->name))) {
- snd_soc_update_bits_wrapper(codec,
- micb_int_reg, 0x2, 0x2);
- /*
- * update MSM89XX_PMIC_ANALOG_TX_1_2_ATEST_CTL_2
- * for external bias only, not for external2.
- */
- } else if (!strnstr(w->name, external2_text, strlen(w->name)) &&
- strnstr(w->name, external_text,
- strlen(w->name))) {
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_TX_1_2_ATEST_CTL_2,
- 0x02, 0x02);
- }
- if (!strnstr(w->name, external_text, strlen(w->name)))
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_MICB_1_EN, 0x05, 0x04);
- if (w->reg == MSM89XX_PMIC_ANALOG_MICB_1_EN)
- msm8x16_wcd_configure_cap(codec, true, micbias2);
-
- break;
- case SND_SOC_DAPM_POST_PMU:
- if (get_codec_version(msm8x16_wcd) <= TOMBAK_2_0)
- usleep_range(20000, 20100);
- if (strnstr(w->name, internal1_text, strlen(w->name))) {
- snd_soc_update_bits_wrapper(codec,
- micb_int_reg, 0x40, 0x40);
- } else if (strnstr(w->name, internal2_text, strlen(w->name))) {
- snd_soc_update_bits_wrapper(codec,
- micb_int_reg, 0x08, 0x08);
- msm8x16_notifier_call(codec,
- WCD_EVENT_POST_MICBIAS_2_ON);
- } else if (strnstr(w->name, internal3_text, 30)) {
- snd_soc_update_bits_wrapper(codec,
- micb_int_reg, 0x01, 0x01);
- } else if (strnstr(w->name, external2_text, strlen(w->name))) {
- msm8x16_notifier_call(codec,
- WCD_EVENT_POST_MICBIAS_2_ON);
- }
- break;
- case SND_SOC_DAPM_POST_PMD:
- if (strnstr(w->name, internal1_text, strlen(w->name))) {
- snd_soc_update_bits_wrapper(codec,
- micb_int_reg, 0xC0, 0x40);
- } else if (strnstr(w->name, internal2_text, strlen(w->name))) {
- msm8x16_notifier_call(codec,
- WCD_EVENT_POST_MICBIAS_2_OFF);
- } else if (strnstr(w->name, internal3_text, 30)) {
- snd_soc_update_bits_wrapper(codec,
- micb_int_reg, 0x2, 0x0);
- } else if (strnstr(w->name, external2_text, strlen(w->name))) {
- /*
- * send micbias turn off event to mbhc driver and then
- * break, as no need to set MICB_1_EN register.
- */
- msm8x16_notifier_call(codec,
- WCD_EVENT_POST_MICBIAS_2_OFF);
- break;
- }
- if (w->reg == MSM89XX_PMIC_ANALOG_MICB_1_EN)
- msm8x16_wcd_configure_cap(codec, false, micbias2);
- break;
- }
- return 0;
-}
-
-static void tx_hpf_corner_freq_callback(struct work_struct *work)
-{
- struct delayed_work *hpf_delayed_work;
- struct hpf_work *hpf_work;
- struct msm8x16_wcd_priv *msm8x16_wcd;
- struct snd_soc_codec *codec;
- u16 tx_mux_ctl_reg;
- u8 hpf_cut_of_freq;
-
- hpf_delayed_work = to_delayed_work(work);
- hpf_work = container_of(hpf_delayed_work, struct hpf_work, dwork);
- msm8x16_wcd = hpf_work->msm8x16_wcd;
- codec = hpf_work->msm8x16_wcd->codec;
- hpf_cut_of_freq = hpf_work->tx_hpf_cut_of_freq;
-
- tx_mux_ctl_reg = MSM89XX_CDC_CORE_TX1_MUX_CTL +
- (hpf_work->decimator - 1) * 32;
-
- dev_err(codec->dev, "%s(): decimator %u hpf_cut_of_freq 0x%x\n",
- __func__, hpf_work->decimator, (unsigned int)hpf_cut_of_freq);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_TX_1_2_TXFE_CLKDIV, 0xFF, 0x51);
-
- snd_soc_update_bits_wrapper(codec,
- tx_mux_ctl_reg, 0x30, hpf_cut_of_freq << 4);
-}
-
-
-#define TX_MUX_CTL_CUT_OFF_FREQ_MASK 0x30
-#define CF_MIN_3DB_4HZ 0x0
-#define CF_MIN_3DB_75HZ 0x1
-#define CF_MIN_3DB_150HZ 0x2
-
-static int msm8x16_wcd_codec_set_iir_gain(struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *kcontrol, int event)
-{
- struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
- int value = 0, reg;
-
- switch (event) {
- case SND_SOC_DAPM_POST_PMU:
- if (w->shift == 0)
- reg = MSM89XX_CDC_CORE_IIR1_GAIN_B1_CTL;
- else if (w->shift == 1)
- reg = MSM89XX_CDC_CORE_IIR2_GAIN_B1_CTL;
- value = snd_soc_read_wrapper(codec, reg);
- snd_soc_write_wrapper(codec, reg, value);
- break;
- default:
- pr_err("%s: event = %d not expected\n", __func__, event);
- }
- return 0;
-}
-
-static int msm8x16_wcd_codec_enable_dec(struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *kcontrol, int event)
-{
- struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
- struct msm_asoc_mach_data *pdata = NULL;
- unsigned int decimator;
- struct msm8x16_wcd_priv *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
- char *dec_name = NULL;
- char *widget_name = NULL;
- char *temp;
- int ret = 0, i;
- u16 dec_reset_reg, tx_vol_ctl_reg, tx_mux_ctl_reg;
- u8 dec_hpf_cut_of_freq;
- int offset;
- char *dec_num;
-
- pdata = snd_soc_card_get_drvdata(codec->component.card);
- dev_err(codec->dev, "%s %d\n", __func__, event);
-
- widget_name = kstrndup(w->name, 15, GFP_KERNEL);
- if (!widget_name)
- return -ENOMEM;
- temp = widget_name;
-
- dec_name = strsep(&widget_name, " ");
- widget_name = temp;
- if (!dec_name) {
- dev_err(codec->dev,
- "%s: Invalid decimator = %s\n", __func__, w->name);
- ret = -EINVAL;
- goto out;
- }
-
- dec_num = strpbrk(dec_name, "1234");
- if (dec_num == NULL) {
- dev_err(codec->dev, "%s: Invalid Decimator\n", __func__);
- ret = -EINVAL;
- goto out;
- }
-
- ret = kstrtouint(dec_num, 10, &decimator);
- if (ret < 0) {
- dev_err(codec->dev,
- "%s: Invalid decimator = %s\n", __func__, dec_name);
- ret = -EINVAL;
- goto out;
- }
-
- dev_err(codec->dev,
- "%s(): widget = %s dec_name = %s decimator = %u\n", __func__,
- w->name, dec_name, decimator);
-
- if (w->reg == MSM89XX_CDC_CORE_CLK_TX_CLK_EN_B1_CTL) {
- dec_reset_reg = MSM89XX_CDC_CORE_CLK_TX_RESET_B1_CTL;
- offset = 0;
- } else {
- dev_err(codec->dev, "%s: Error, incorrect dec\n", __func__);
- ret = -EINVAL;
- goto out;
- }
-
- tx_vol_ctl_reg = MSM89XX_CDC_CORE_TX1_VOL_CTL_CFG +
- 32 * (decimator - 1);
- tx_mux_ctl_reg = MSM89XX_CDC_CORE_TX1_MUX_CTL +
- 32 * (decimator - 1);
-
- switch (event) {
- case SND_SOC_DAPM_PRE_PMU:
- if (decimator == 3 || decimator == 4) {
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_CDC_CORE_CLK_WSA_VI_B1_CTL,
- 0xFF, 0x5);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_CDC_CORE_TX1_DMIC_CTL +
- (decimator - 1) * 0x20, 0x7, 0x2);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_CDC_CORE_TX1_DMIC_CTL +
- (decimator - 1) * 0x20, 0x7, 0x2);
- }
- /* Enableable TX digital mute */
- snd_soc_update_bits_wrapper(codec, tx_vol_ctl_reg, 0x01, 0x01);
- for (i = 0; i < NUM_DECIMATORS; i++) {
- if (decimator == i + 1)
- msm8x16_wcd->dec_active[i] = true;
- }
-
- dec_hpf_cut_of_freq =
- snd_soc_read_wrapper(codec, tx_mux_ctl_reg);
-
- dec_hpf_cut_of_freq = (dec_hpf_cut_of_freq & 0x30) >> 4;
-
- tx_hpf_work[decimator - 1].tx_hpf_cut_of_freq =
- dec_hpf_cut_of_freq;
-
- if (dec_hpf_cut_of_freq != CF_MIN_3DB_150HZ) {
-
- /* set cut of freq to CF_MIN_3DB_150HZ (0x1); */
- snd_soc_update_bits_wrapper(codec, tx_mux_ctl_reg, 0x30,
- CF_MIN_3DB_150HZ << 4);
- }
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_TX_1_2_TXFE_CLKDIV,
- 0xFF, 0x42);
-
- break;
- case SND_SOC_DAPM_POST_PMU:
- /* enable HPF */
- snd_soc_update_bits_wrapper(codec, tx_mux_ctl_reg, 0x08, 0x00);
-
- if (tx_hpf_work[decimator - 1].tx_hpf_cut_of_freq !=
- CF_MIN_3DB_150HZ) {
-
- schedule_delayed_work(&tx_hpf_work[decimator - 1].dwork,
- msecs_to_jiffies(300));
- }
- /* apply the digital gain after the decimator is enabled*/
- if ((w->shift) < ARRAY_SIZE(tx_digital_gain_reg))
- snd_soc_write_wrapper(codec,
- tx_digital_gain_reg[w->shift + offset],
- snd_soc_read_wrapper(codec,
- tx_digital_gain_reg[w->shift + offset])
- );
- if (pdata->lb_mode) {
- pr_err("%s: loopback mode unmute the DEC\n",
- __func__);
- snd_soc_update_bits_wrapper(codec,
- tx_vol_ctl_reg, 0x01, 0x00);
- }
- break;
- case SND_SOC_DAPM_PRE_PMD:
- snd_soc_update_bits_wrapper(codec, tx_vol_ctl_reg, 0x01, 0x01);
- msleep(20);
- snd_soc_update_bits_wrapper(codec, tx_mux_ctl_reg, 0x08, 0x08);
- cancel_delayed_work_sync(&tx_hpf_work[decimator - 1].dwork);
- break;
- case SND_SOC_DAPM_POST_PMD:
- snd_soc_update_bits_wrapper(codec,
- dec_reset_reg, 1 << w->shift, 1 << w->shift);
- snd_soc_update_bits_wrapper(codec,
- dec_reset_reg, 1 << w->shift, 0x0);
- snd_soc_update_bits_wrapper(codec,
- tx_mux_ctl_reg, 0x08, 0x08);
- snd_soc_update_bits_wrapper(codec,
- tx_mux_ctl_reg, 0x30,
- (tx_hpf_work[decimator - 1].tx_hpf_cut_of_freq) << 4);
- snd_soc_update_bits_wrapper(codec,
- tx_vol_ctl_reg, 0x01, 0x00);
- for (i = 0; i < NUM_DECIMATORS; i++) {
- if (decimator == i + 1)
- msm8x16_wcd->dec_active[i] = false;
- }
- if (decimator == 3 || decimator == 4) {
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_CDC_CORE_CLK_WSA_VI_B1_CTL,
- 0xFF, 0x0);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_CDC_CORE_TX1_DMIC_CTL +
- (decimator - 1) * 0x20, 0x7, 0x0);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_CDC_CORE_TX1_DMIC_CTL +
- (decimator - 1) * 0x20, 0x7, 0x0);
- }
- break;
- }
-out:
- kfree(widget_name);
- return ret;
-}
-
-static int msm89xx_wcd_codec_enable_vdd_spkr(struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *kcontrol, int event)
-{
- struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
- struct msm8x16_wcd_priv *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
- int ret = 0;
-
- if (!msm8x16_wcd->ext_spk_boost_set) {
- dev_err(codec->dev, "%s: ext_boost not supported/disabled\n",
- __func__);
- return 0;
- }
- dev_err(codec->dev, "%s: %s %d\n", __func__, w->name, event);
- switch (event) {
- case SND_SOC_DAPM_PRE_PMU:
- if (msm8x16_wcd->spkdrv_reg) {
- ret = regulator_enable(msm8x16_wcd->spkdrv_reg);
- if (ret)
- dev_err(codec->dev,
- "%s Failed to enable spkdrv reg %s\n",
- __func__, MSM89XX_VDD_SPKDRV_NAME);
- }
- break;
- case SND_SOC_DAPM_POST_PMD:
- if (msm8x16_wcd->spkdrv_reg) {
- ret = regulator_disable(msm8x16_wcd->spkdrv_reg);
- if (ret)
- dev_err(codec->dev,
- "%s: Failed to disable spkdrv_reg %s\n",
- __func__, MSM89XX_VDD_SPKDRV_NAME);
- }
- break;
- }
- return 0;
-}
-
-static int msm8x16_wcd_codec_config_compander(struct snd_soc_codec *codec,
- int interp_n, int event)
-{
- struct msm8x16_wcd_priv *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
-
- dev_err(codec->dev, "%s: event %d shift %d, enabled %d\n",
- __func__, event, interp_n,
- msm8x16_wcd->comp_enabled[interp_n]);
-
- /* compander is not enabled */
- if (!msm8x16_wcd->comp_enabled[interp_n])
- return 0;
-
- switch (msm8x16_wcd->comp_enabled[interp_n]) {
- case COMPANDER_1:
- if (SND_SOC_DAPM_EVENT_ON(event)) {
- /* Enable Compander Clock */
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_CDC_CORE_COMP0_B2_CTL, 0x0F, 0x09);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_CDC_CORE_CLK_RX_B2_CTL, 0x01, 0x01);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_CDC_CORE_COMP0_B1_CTL,
- 1 << interp_n, 1 << interp_n);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_CDC_CORE_COMP0_B3_CTL, 0xFF, 0x01);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_CDC_CORE_COMP0_B2_CTL, 0xF0, 0x50);
- /* add sleep for compander to settle */
- usleep_range(1000, 1100);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_CDC_CORE_COMP0_B3_CTL, 0xFF, 0x28);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_CDC_CORE_COMP0_B2_CTL, 0xF0, 0xB0);
-
- /* Enable Compander GPIO */
- if (msm8x16_wcd->codec_hph_comp_gpio)
- msm8x16_wcd->codec_hph_comp_gpio(1);
- } else if (SND_SOC_DAPM_EVENT_OFF(event)) {
- /* Disable Compander GPIO */
- if (msm8x16_wcd->codec_hph_comp_gpio)
- msm8x16_wcd->codec_hph_comp_gpio(0);
-
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_CDC_CORE_COMP0_B2_CTL, 0x0F, 0x05);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_CDC_CORE_COMP0_B1_CTL,
- 1 << interp_n, 0);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_CDC_CORE_CLK_RX_B2_CTL, 0x01, 0x00);
- }
- break;
- default:
- dev_err(codec->dev, "%s: Invalid compander %d\n", __func__,
- msm8x16_wcd->comp_enabled[interp_n]);
- break;
- };
-
- return 0;
-}
-
-static int msm8x16_wcd_codec_enable_interpolator(struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *kcontrol,
- int event)
-{
- struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
- struct msm8x16_wcd_priv *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
-
- dev_err(codec->dev, "%s %d %s\n", __func__, event, w->name);
-
- switch (event) {
- case SND_SOC_DAPM_POST_PMU:
- msm8x16_wcd_codec_config_compander(codec, w->shift, event);
- /* apply the digital gain after the interpolator is enabled*/
- if ((w->shift) < ARRAY_SIZE(rx_digital_gain_reg))
- snd_soc_write_wrapper(codec,
- rx_digital_gain_reg[w->shift],
- snd_soc_read_wrapper(codec,
- rx_digital_gain_reg[w->shift])
- );
- break;
- case SND_SOC_DAPM_POST_PMD:
- msm8x16_wcd_codec_config_compander(codec, w->shift, event);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_CDC_CORE_CLK_RX_RESET_CTL,
- 1 << w->shift, 1 << w->shift);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_CDC_CORE_CLK_RX_RESET_CTL,
- 1 << w->shift, 0x0);
- /*
- * disable the mute enabled during the PMD of this device
- */
- if ((w->shift == 0) &&
- (msm8x16_wcd->mute_mask & HPHL_PA_DISABLE)) {
- pr_err("disabling HPHL mute\n");
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_CDC_CORE_RX1_B6_CTL, 0x01, 0x00);
- if (get_codec_version(msm8x16_wcd) >= CAJON)
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_RX_HPH_BIAS_CNP,
- 0xF0, 0x20);
- msm8x16_wcd->mute_mask &= ~(HPHL_PA_DISABLE);
- } else if ((w->shift == 1) &&
- (msm8x16_wcd->mute_mask & HPHR_PA_DISABLE)) {
- pr_err("disabling HPHR mute\n");
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_CDC_CORE_RX2_B6_CTL, 0x01, 0x00);
- if (get_codec_version(msm8x16_wcd) >= CAJON)
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_RX_HPH_BIAS_CNP,
- 0xF0, 0x20);
- msm8x16_wcd->mute_mask &= ~(HPHR_PA_DISABLE);
- } else if ((w->shift == 2) &&
- (msm8x16_wcd->mute_mask & SPKR_PA_DISABLE)) {
- pr_err("disabling SPKR mute\n");
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_CDC_CORE_RX3_B6_CTL, 0x01, 0x00);
- msm8x16_wcd->mute_mask &= ~(SPKR_PA_DISABLE);
- } else if ((w->shift == 0) &&
- (msm8x16_wcd->mute_mask & EAR_PA_DISABLE)) {
- pr_err("disabling EAR mute\n");
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_CDC_CORE_RX1_B6_CTL, 0x01, 0x00);
- msm8x16_wcd->mute_mask &= ~(EAR_PA_DISABLE);
- }
- }
- return 0;
-}
-
-
-/* The register address is the same as other codec so it can use resmgr */
-static int msm8x16_wcd_codec_enable_rx_bias(struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *kcontrol, int event)
-{
- struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
- struct msm8x16_wcd_priv *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
-
- dev_err(codec->dev, "%s %d\n", __func__, event);
-
- switch (event) {
- case SND_SOC_DAPM_PRE_PMU:
- msm8x16_wcd->rx_bias_count++;
- if (msm8x16_wcd->rx_bias_count == 1) {
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_RX_COM_BIAS_DAC,
- 0x80, 0x80);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_RX_COM_BIAS_DAC,
- 0x01, 0x01);
- }
- break;
- case SND_SOC_DAPM_POST_PMD:
- msm8x16_wcd->rx_bias_count--;
- if (msm8x16_wcd->rx_bias_count == 0) {
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_RX_COM_BIAS_DAC,
- 0x01, 0x00);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_RX_COM_BIAS_DAC,
- 0x80, 0x00);
- }
- break;
- }
- dev_err(codec->dev, "%s rx_bias_count = %d\n",
- __func__, msm8x16_wcd->rx_bias_count);
- return 0;
-}
-
-static uint32_t wcd_get_impedance_value(uint32_t imped)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(wcd_imped_val) - 1; i++) {
- if (imped >= wcd_imped_val[i] &&
- imped < wcd_imped_val[i + 1])
- break;
- }
-
- pr_err("%s: selected impedance value = %d\n",
- __func__, wcd_imped_val[i]);
- return wcd_imped_val[i];
-}
-
-void wcd_imped_config(struct snd_soc_codec *codec,
- uint32_t imped, bool set_gain)
-{
- uint32_t value;
- int codec_version;
- struct msm8x16_wcd_priv *msm8x16_wcd =
- snd_soc_codec_get_drvdata(codec);
-
- value = wcd_get_impedance_value(imped);
-
- if (value < wcd_imped_val[0]) {
- pr_err("%s, detected impedance is less than 4 Ohm\n",
- __func__);
- return;
- }
- if (value >= wcd_imped_val[ARRAY_SIZE(wcd_imped_val) - 1]) {
- pr_err("%s, invalid imped, greater than 48 Ohm\n = %d\n",
- __func__, value);
- return;
- }
-
- codec_version = get_codec_version(msm8x16_wcd);
-
- if (set_gain) {
- switch (codec_version) {
- case TOMBAK_1_0:
- case TOMBAK_2_0:
- case CONGA:
- /*
- * For 32Ohm load and higher loads, Set 0x19E
- * bit 5 to 1 (POS_6_DB_DI). For loads lower
- * than 32Ohm (such as 16Ohm load), Set 0x19E
- * bit 5 to 0 (POS_1P5_DB_DI)
- */
- if (value >= 32)
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_RX_EAR_CTL,
- 0x20, 0x20);
- else
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_RX_EAR_CTL,
- 0x20, 0x00);
- break;
- case CAJON:
- case CAJON_2_0:
- case DIANGU:
- if (value >= 13) {
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_RX_EAR_CTL,
- 0x20, 0x20);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_NCP_VCTRL,
- 0x07, 0x07);
- } else {
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_RX_EAR_CTL,
- 0x20, 0x00);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_NCP_VCTRL,
- 0x07, 0x04);
- }
- break;
- }
- } else {
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_RX_EAR_CTL,
- 0x20, 0x00);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_NCP_VCTRL,
- 0x07, 0x04);
- }
-
- pr_err("%s: Exit\n", __func__);
-}
-
-static int msm8x16_wcd_hphl_dac_event(struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *kcontrol, int event)
-{
- uint32_t impedl, impedr;
- struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
- struct msm8x16_wcd_priv *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
- int ret;
-
- dev_err(codec->dev, "%s %s %d\n", __func__, w->name, event);
- ret = wcd_mbhc_get_impedance(&msm8x16_wcd->mbhc,
- &impedl, &impedr);
-
- switch (event) {
- case SND_SOC_DAPM_PRE_PMU:
- if (get_codec_version(msm8x16_wcd) > CAJON)
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_RX_HPH_CNP_EN,
- 0x08, 0x08);
- if (get_codec_version(msm8x16_wcd) == CAJON ||
- get_codec_version(msm8x16_wcd) == CAJON_2_0) {
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_RX_HPH_L_TEST,
- 0x80, 0x80);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_RX_HPH_R_TEST,
- 0x80, 0x80);
- }
- if (get_codec_version(msm8x16_wcd) > CAJON)
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_RX_HPH_CNP_EN,
- 0x08, 0x00);
- if (msm8x16_wcd->hph_mode == HD2_MODE) {
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_CDC_CORE_RX1_B3_CTL, 0x1C, 0x14);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_CDC_CORE_RX1_B4_CTL, 0x18, 0x10);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_CDC_CORE_RX1_B3_CTL, 0x80, 0x80);
- }
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_RX_HPH_L_PA_DAC_CTL, 0x02, 0x02);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL, 0x01, 0x01);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_DIGITAL_CDC_ANA_CLK_CTL, 0x02, 0x02);
- if (!ret)
- wcd_imped_config(codec, impedl, true);
- else
- dev_err(codec->dev, "Failed to get mbhc impedance %d\n",
- ret);
- break;
- case SND_SOC_DAPM_POST_PMU:
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_RX_HPH_L_PA_DAC_CTL, 0x02, 0x00);
- break;
- case SND_SOC_DAPM_POST_PMD:
- wcd_imped_config(codec, impedl, false);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_DIGITAL_CDC_ANA_CLK_CTL, 0x02, 0x00);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL, 0x01, 0x00);
- if (msm8x16_wcd->hph_mode == HD2_MODE) {
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_CDC_CORE_RX1_B3_CTL, 0x1C, 0x00);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_CDC_CORE_RX1_B4_CTL, 0x18, 0xFF);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_CDC_CORE_RX1_B3_CTL, 0x80, 0x00);
- }
- break;
- }
- return 0;
-}
-
-static int msm8x16_wcd_lo_dac_event(struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *kcontrol, int event)
-{
- struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
-
- dev_err(codec->dev, "%s %s %d\n", __func__, w->name, event);
-
- switch (event) {
- case SND_SOC_DAPM_PRE_PMU:
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_DIGITAL_CDC_ANA_CLK_CTL, 0x10, 0x10);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_RX_LO_EN_CTL, 0x20, 0x20);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_RX_LO_EN_CTL, 0x80, 0x80);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_RX_LO_DAC_CTL, 0x08, 0x08);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_RX_LO_DAC_CTL, 0x40, 0x40);
- break;
- case SND_SOC_DAPM_POST_PMU:
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_RX_LO_DAC_CTL, 0x80, 0x80);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_RX_LO_DAC_CTL, 0x08, 0x00);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_RX_LO_EN_CTL, 0x40, 0x40);
- break;
- case SND_SOC_DAPM_POST_PMD:
- usleep_range(20000, 20100);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_RX_LO_DAC_CTL, 0x80, 0x00);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_RX_LO_DAC_CTL, 0x40, 0x00);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_RX_LO_DAC_CTL, 0x08, 0x00);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_RX_LO_EN_CTL, 0x80, 0x00);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_RX_LO_EN_CTL, 0x40, 0x00);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_RX_LO_EN_CTL, 0x20, 0x00);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_DIGITAL_CDC_ANA_CLK_CTL, 0x10, 0x00);
- break;
- }
- return 0;
-}
-
-static int msm8x16_wcd_hphr_dac_event(struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *kcontrol, int event)
-{
- struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
- struct msm8x16_wcd_priv *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
-
- dev_err(codec->dev, "%s %s %d\n", __func__, w->name, event);
-
- switch (event) {
- case SND_SOC_DAPM_PRE_PMU:
- if (msm8x16_wcd->hph_mode == HD2_MODE) {
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_CDC_CORE_RX2_B3_CTL, 0x1C, 0x14);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_CDC_CORE_RX2_B4_CTL, 0x18, 0x10);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_CDC_CORE_RX2_B3_CTL, 0x80, 0x80);
- }
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_RX_HPH_R_PA_DAC_CTL, 0x02, 0x02);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL, 0x02, 0x02);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_DIGITAL_CDC_ANA_CLK_CTL, 0x01, 0x01);
- break;
- case SND_SOC_DAPM_POST_PMU:
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_RX_HPH_R_PA_DAC_CTL, 0x02, 0x00);
- break;
- case SND_SOC_DAPM_POST_PMD:
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_DIGITAL_CDC_ANA_CLK_CTL, 0x01, 0x00);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL, 0x02, 0x00);
- if (msm8x16_wcd->hph_mode == HD2_MODE) {
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_CDC_CORE_RX2_B3_CTL, 0x1C, 0x00);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_CDC_CORE_RX2_B4_CTL, 0x18, 0xFF);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_CDC_CORE_RX2_B3_CTL, 0x80, 0x00);
- }
- break;
- }
- return 0;
-}
-
-static int msm8x16_wcd_hph_pa_event(struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *kcontrol, int event)
-{
- struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
- struct msm8x16_wcd_priv *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
-
- dev_err(codec->dev, "%s: %s event = %d\n", __func__, w->name, event);
-
- switch (event) {
- case SND_SOC_DAPM_PRE_PMU:
- if (w->shift == 5)
- msm8x16_notifier_call(codec,
- WCD_EVENT_PRE_HPHL_PA_ON);
- else if (w->shift == 4)
- msm8x16_notifier_call(codec,
- WCD_EVENT_PRE_HPHR_PA_ON);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_NCP_FBCTRL, 0x20, 0x20);
- break;
-
- case SND_SOC_DAPM_POST_PMU:
- usleep_range(7000, 7100);
- if (w->shift == 5) {
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_RX_HPH_L_TEST, 0x04, 0x04);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_CDC_CORE_RX1_B6_CTL, 0x01, 0x00);
- } else if (w->shift == 4) {
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_RX_HPH_R_TEST, 0x04, 0x04);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_CDC_CORE_RX2_B6_CTL, 0x01, 0x00);
- }
- break;
-
- case SND_SOC_DAPM_PRE_PMD:
- if (w->shift == 5) {
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_CDC_CORE_RX1_B6_CTL, 0x01, 0x01);
- msleep(20);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_RX_HPH_L_TEST, 0x04, 0x00);
- msm8x16_wcd->mute_mask |= HPHL_PA_DISABLE;
- msm8x16_notifier_call(codec,
- WCD_EVENT_PRE_HPHL_PA_OFF);
- } else if (w->shift == 4) {
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_CDC_CORE_RX2_B6_CTL, 0x01, 0x01);
- msleep(20);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_RX_HPH_R_TEST, 0x04, 0x00);
- msm8x16_wcd->mute_mask |= HPHR_PA_DISABLE;
- msm8x16_notifier_call(codec,
- WCD_EVENT_PRE_HPHR_PA_OFF);
- }
- if (get_codec_version(msm8x16_wcd) >= CAJON) {
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_RX_HPH_BIAS_CNP,
- 0xF0, 0x30);
- }
- break;
- case SND_SOC_DAPM_POST_PMD:
- if (w->shift == 5) {
- clear_bit(WCD_MBHC_HPHL_PA_OFF_ACK,
- &msm8x16_wcd->mbhc.hph_pa_dac_state);
- msm8x16_notifier_call(codec,
- WCD_EVENT_POST_HPHL_PA_OFF);
- } else if (w->shift == 4) {
- clear_bit(WCD_MBHC_HPHR_PA_OFF_ACK,
- &msm8x16_wcd->mbhc.hph_pa_dac_state);
- msm8x16_notifier_call(codec,
- WCD_EVENT_POST_HPHR_PA_OFF);
- }
- usleep_range(4000, 4100);
- usleep_range(CODEC_DELAY_1_MS, CODEC_DELAY_1_1_MS);
-
- dev_err(codec->dev,
- "%s: sleep 10 ms after %s PA disable.\n", __func__,
- w->name);
- usleep_range(10000, 10100);
- break;
- }
- return 0;
-}
-
-static const struct snd_soc_dapm_route audio_map[] = {
- {"RX_I2S_CLK", NULL, "CDC_CONN"},
- {"I2S RX1", NULL, "RX_I2S_CLK"},
- {"I2S RX2", NULL, "RX_I2S_CLK"},
- {"I2S RX3", NULL, "RX_I2S_CLK"},
-
- {"I2S TX1", NULL, "TX_I2S_CLK"},
- {"I2S TX2", NULL, "TX_I2S_CLK"},
- {"AIF2 VI", NULL, "TX_I2S_CLK"},
-
- {"I2S TX1", NULL, "DEC1 MUX"},
- {"I2S TX2", NULL, "DEC2 MUX"},
- {"AIF2 VI", NULL, "DEC3 MUX"},
- {"AIF2 VI", NULL, "DEC4 MUX"},
-
- /* RDAC Connections */
- {"HPHR DAC", NULL, "RDAC2 MUX"},
- {"RDAC2 MUX", "RX1", "RX1 CHAIN"},
- {"RDAC2 MUX", "RX2", "RX2 CHAIN"},
-
- /* WSA */
- {"WSA_SPK OUT", NULL, "WSA Spk Switch"},
- {"WSA Spk Switch", "WSA", "EAR PA"},
-
- /* Earpiece (RX MIX1) */
- {"EAR", NULL, "EAR_S"},
- {"EAR_S", "Switch", "EAR PA"},
- {"EAR PA", NULL, "RX_BIAS"},
- {"EAR PA", NULL, "HPHL DAC"},
- {"EAR PA", NULL, "HPHR DAC"},
- {"EAR PA", NULL, "EAR CP"},
-
- /* Headset (RX MIX1 and RX MIX2) */
- {"HEADPHONE", NULL, "HPHL PA"},
- {"HEADPHONE", NULL, "HPHR PA"},
-
- {"Ext Spk", NULL, "Ext Spk Switch"},
- {"Ext Spk Switch", "On", "HPHL PA"},
- {"Ext Spk Switch", "On", "HPHR PA"},
-
- {"HPHL PA", NULL, "HPHL"},
- {"HPHR PA", NULL, "HPHR"},
- {"HPHL", "Switch", "HPHL DAC"},
- {"HPHR", "Switch", "HPHR DAC"},
- {"HPHL PA", NULL, "CP"},
- {"HPHL PA", NULL, "RX_BIAS"},
- {"HPHR PA", NULL, "CP"},
- {"HPHR PA", NULL, "RX_BIAS"},
- {"HPHL DAC", NULL, "RX1 CHAIN"},
-
- {"SPK_OUT", NULL, "SPK PA"},
- {"SPK PA", NULL, "SPK_RX_BIAS"},
- {"SPK PA", NULL, "SPK"},
- {"SPK", "Switch", "SPK DAC"},
- {"SPK DAC", NULL, "RX3 CHAIN"},
- {"SPK DAC", NULL, "VDD_SPKDRV"},
-
- /* lineout */
- {"LINEOUT", NULL, "LINEOUT PA"},
- {"LINEOUT PA", NULL, "SPK_RX_BIAS"},
- {"LINEOUT PA", NULL, "LINE_OUT"},
- {"LINE_OUT", "Switch", "LINEOUT DAC"},
- {"LINEOUT DAC", NULL, "RX3 CHAIN"},
-
- /* lineout to WSA */
- {"WSA_SPK OUT", NULL, "LINEOUT PA"},
-
- {"RX1 CHAIN", NULL, "RX1 CLK"},
- {"RX2 CHAIN", NULL, "RX2 CLK"},
- {"RX3 CHAIN", NULL, "RX3 CLK"},
- {"RX1 CHAIN", NULL, "RX1 MIX2"},
- {"RX2 CHAIN", NULL, "RX2 MIX2"},
- {"RX3 CHAIN", NULL, "RX3 MIX1"},
-
- {"RX1 MIX1", NULL, "RX1 MIX1 INP1"},
- {"RX1 MIX1", NULL, "RX1 MIX1 INP2"},
- {"RX1 MIX1", NULL, "RX1 MIX1 INP3"},
- {"RX2 MIX1", NULL, "RX2 MIX1 INP1"},
- {"RX2 MIX1", NULL, "RX2 MIX1 INP2"},
- {"RX3 MIX1", NULL, "RX3 MIX1 INP1"},
- {"RX3 MIX1", NULL, "RX3 MIX1 INP2"},
- {"RX1 MIX2", NULL, "RX1 MIX1"},
- {"RX1 MIX2", NULL, "RX1 MIX2 INP1"},
- {"RX2 MIX2", NULL, "RX2 MIX1"},
- {"RX2 MIX2", NULL, "RX2 MIX2 INP1"},
-
- {"RX1 MIX1 INP1", "RX1", "I2S RX1"},
- {"RX1 MIX1 INP1", "RX2", "I2S RX2"},
- {"RX1 MIX1 INP1", "RX3", "I2S RX3"},
- {"RX1 MIX1 INP1", "IIR1", "IIR1"},
- {"RX1 MIX1 INP1", "IIR2", "IIR2"},
- {"RX1 MIX1 INP2", "RX1", "I2S RX1"},
- {"RX1 MIX1 INP2", "RX2", "I2S RX2"},
- {"RX1 MIX1 INP2", "RX3", "I2S RX3"},
- {"RX1 MIX1 INP2", "IIR1", "IIR1"},
- {"RX1 MIX1 INP2", "IIR2", "IIR2"},
- {"RX1 MIX1 INP3", "RX1", "I2S RX1"},
- {"RX1 MIX1 INP3", "RX2", "I2S RX2"},
- {"RX1 MIX1 INP3", "RX3", "I2S RX3"},
-
- {"RX2 MIX1 INP1", "RX1", "I2S RX1"},
- {"RX2 MIX1 INP1", "RX2", "I2S RX2"},
- {"RX2 MIX1 INP1", "RX3", "I2S RX3"},
- {"RX2 MIX1 INP1", "IIR1", "IIR1"},
- {"RX2 MIX1 INP1", "IIR2", "IIR2"},
- {"RX2 MIX1 INP2", "RX1", "I2S RX1"},
- {"RX2 MIX1 INP2", "RX2", "I2S RX2"},
- {"RX2 MIX1 INP2", "RX3", "I2S RX3"},
- {"RX2 MIX1 INP2", "IIR1", "IIR1"},
- {"RX2 MIX1 INP2", "IIR2", "IIR2"},
-
- {"RX3 MIX1 INP1", "RX1", "I2S RX1"},
- {"RX3 MIX1 INP1", "RX2", "I2S RX2"},
- {"RX3 MIX1 INP1", "RX3", "I2S RX3"},
- {"RX3 MIX1 INP1", "IIR1", "IIR1"},
- {"RX3 MIX1 INP1", "IIR2", "IIR2"},
- {"RX3 MIX1 INP2", "RX1", "I2S RX1"},
- {"RX3 MIX1 INP2", "RX2", "I2S RX2"},
- {"RX3 MIX1 INP2", "RX3", "I2S RX3"},
- {"RX3 MIX1 INP2", "IIR1", "IIR1"},
- {"RX3 MIX1 INP2", "IIR2", "IIR2"},
-
- {"RX1 MIX2 INP1", "IIR1", "IIR1"},
- {"RX2 MIX2 INP1", "IIR1", "IIR1"},
- {"RX1 MIX2 INP1", "IIR2", "IIR2"},
- {"RX2 MIX2 INP1", "IIR2", "IIR2"},
-
- /* Decimator Inputs */
- {"DEC1 MUX", "DMIC1", "DMIC1"},
- {"DEC1 MUX", "DMIC2", "DMIC2"},
- {"DEC1 MUX", "ADC1", "ADC1"},
- {"DEC1 MUX", "ADC2", "ADC2"},
- {"DEC1 MUX", "ADC3", "ADC3"},
- {"DEC1 MUX", NULL, "CDC_CONN"},
-
- {"DEC2 MUX", "DMIC1", "DMIC1"},
- {"DEC2 MUX", "DMIC2", "DMIC2"},
- {"DEC2 MUX", "ADC1", "ADC1"},
- {"DEC2 MUX", "ADC2", "ADC2"},
- {"DEC2 MUX", "ADC3", "ADC3"},
- {"DEC2 MUX", NULL, "CDC_CONN"},
-
- {"DEC3 MUX", "DMIC3", "DMIC3"},
- {"DEC4 MUX", "DMIC4", "DMIC4"},
- {"DEC3 MUX", NULL, "CDC_CONN"},
- {"DEC4 MUX", NULL, "CDC_CONN"},
- /* ADC Connections */
- {"ADC2", NULL, "ADC2 MUX"},
- {"ADC3", NULL, "ADC2 MUX"},
- {"ADC2 MUX", "INP2", "ADC2_INP2"},
- {"ADC2 MUX", "INP3", "ADC2_INP3"},
-
- {"ADC1", NULL, "AMIC1"},
- {"ADC2_INP2", NULL, "AMIC2"},
- {"ADC2_INP3", NULL, "AMIC3"},
-
- /* TODO: Fix this */
- {"IIR1", NULL, "IIR1 INP1 MUX"},
- {"IIR1 INP1 MUX", "DEC1", "DEC1 MUX"},
- {"IIR1 INP1 MUX", "DEC2", "DEC2 MUX"},
- {"IIR2", NULL, "IIR2 INP1 MUX"},
- {"IIR2 INP1 MUX", "DEC1", "DEC1 MUX"},
- {"IIR2 INP1 MUX", "DEC2", "DEC2 MUX"},
- {"MIC BIAS Internal1", NULL, "INT_LDO_H"},
- {"MIC BIAS Internal2", NULL, "INT_LDO_H"},
- {"MIC BIAS External", NULL, "INT_LDO_H"},
- {"MIC BIAS External2", NULL, "INT_LDO_H"},
- {"MIC BIAS Internal1", NULL, "MICBIAS_REGULATOR"},
- {"MIC BIAS Internal2", NULL, "MICBIAS_REGULATOR"},
- {"MIC BIAS External", NULL, "MICBIAS_REGULATOR"},
- {"MIC BIAS External2", NULL, "MICBIAS_REGULATOR"},
-};
-
-static int msm8x16_wcd_startup(struct snd_pcm_substream *substream,
- struct snd_soc_dai *dai)
-{
- struct msm8x16_wcd_priv *msm8x16_wcd =
- snd_soc_codec_get_drvdata(dai->codec);
-
- dev_err(dai->codec->dev, "%s(): substream = %s stream = %d\n",
- __func__,
- substream->name, substream->stream);
- /*
- * If status_mask is BU_DOWN it means SSR is not complete.
- * So retun error.
- */
- if (test_bit(BUS_DOWN, &msm8x16_wcd->status_mask)) {
- dev_err(dai->codec->dev, "Error, Device is not up post SSR\n");
- return -EINVAL;
- }
- return 0;
-}
-
-static void msm8x16_wcd_shutdown(struct snd_pcm_substream *substream,
- struct snd_soc_dai *dai)
-{
- dev_err(dai->codec->dev,
- "%s(): substream = %s stream = %d\n", __func__,
- substream->name, substream->stream);
-}
-
-int msm8x16_wcd_mclk_enable(struct snd_soc_codec *codec,
- int mclk_enable, bool dapm)
-{
- struct msm8x16_wcd_priv *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
-
- dev_err(codec->dev, "%s: mclk_enable = %u, dapm = %d\n",
- __func__, mclk_enable, dapm);
- if (mclk_enable) {
- msm8x16_wcd->int_mclk0_enabled = true;
- msm8x16_wcd_codec_enable_clock_block(codec, 1);
- } else {
- if (!msm8x16_wcd->int_mclk0_enabled) {
- dev_err(codec->dev, "Error, MCLK already diabled\n");
- return -EINVAL;
- }
- msm8x16_wcd->int_mclk0_enabled = false;
- msm8x16_wcd_codec_enable_clock_block(codec, 0);
- }
- return 0;
-}
-
-static int msm8x16_wcd_set_dai_sysclk(struct snd_soc_dai *dai,
- int clk_id, unsigned int freq, int dir)
-{
- dev_err(dai->codec->dev, "%s\n", __func__);
- return 0;
-}
-
-static int msm8x16_wcd_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
-{
- dev_err(dai->codec->dev, "%s\n", __func__);
- return 0;
-}
-
-static int msm8x16_wcd_set_channel_map(struct snd_soc_dai *dai,
- unsigned int tx_num, unsigned int *tx_slot,
- unsigned int rx_num, unsigned int *rx_slot)
-
-{
- dev_err(dai->codec->dev, "%s\n", __func__);
- return 0;
-}
-
-static int msm8x16_wcd_get_channel_map(struct snd_soc_dai *dai,
- unsigned int *tx_num, unsigned int *tx_slot,
- unsigned int *rx_num, unsigned int *rx_slot)
-
-{
- dev_err(dai->codec->dev, "%s\n", __func__);
- return 0;
-}
-
-static int msm8x16_wcd_set_interpolator_rate(struct snd_soc_dai *dai,
- u8 rx_fs_rate_reg_val, u32 sample_rate)
-{
- snd_soc_update_bits_wrapper(dai->codec,
- MSM89XX_CDC_CORE_RX1_B5_CTL, 0xF0, rx_fs_rate_reg_val);
- snd_soc_update_bits_wrapper(dai->codec,
- MSM89XX_CDC_CORE_RX2_B5_CTL, 0xF0, rx_fs_rate_reg_val);
- return 0;
-}
-
-static int msm8x16_wcd_set_decimator_rate(struct snd_soc_dai *dai,
- u8 tx_fs_rate_reg_val, u32 sample_rate)
-{
- return 0;
-}
-
-static int msm8x16_wcd_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params,
- struct snd_soc_dai *dai)
-{
- u8 tx_fs_rate, rx_fs_rate, rx_clk_fs_rate;
- int ret;
-
- dev_err(dai->codec->dev,
- "%s: dai_name = %s DAI-ID %x rate %d num_ch %d format %d\n",
- __func__, dai->name, dai->id, params_rate(params),
- params_channels(params), params_format(params));
-
- switch (params_rate(params)) {
- case 8000:
- tx_fs_rate = 0x00;
- rx_fs_rate = 0x00;
- rx_clk_fs_rate = 0x00;
- break;
- case 16000:
- tx_fs_rate = 0x20;
- rx_fs_rate = 0x20;
- rx_clk_fs_rate = 0x01;
- break;
- case 32000:
- tx_fs_rate = 0x40;
- rx_fs_rate = 0x40;
- rx_clk_fs_rate = 0x02;
- break;
- case 48000:
- tx_fs_rate = 0x60;
- rx_fs_rate = 0x60;
- rx_clk_fs_rate = 0x03;
- break;
- case 96000:
- tx_fs_rate = 0x80;
- rx_fs_rate = 0x80;
- rx_clk_fs_rate = 0x04;
- break;
- case 192000:
- tx_fs_rate = 0xA0;
- rx_fs_rate = 0xA0;
- rx_clk_fs_rate = 0x05;
- break;
- default:
- dev_err(dai->codec->dev,
- "%s: Invalid sampling rate %d\n", __func__,
- params_rate(params));
- return -EINVAL;
- }
-
- snd_soc_update_bits_wrapper(dai->codec,
- MSM89XX_CDC_CORE_CLK_RX_I2S_CTL, 0x0F, rx_clk_fs_rate);
-
- switch (substream->stream) {
- case SNDRV_PCM_STREAM_CAPTURE:
- ret = msm8x16_wcd_set_decimator_rate(dai, tx_fs_rate,
- params_rate(params));
- if (ret < 0) {
- dev_err(dai->codec->dev,
- "%s: set decimator rate failed %d\n", __func__,
- ret);
- return ret;
- }
- break;
- case SNDRV_PCM_STREAM_PLAYBACK:
- ret = msm8x16_wcd_set_interpolator_rate(dai, rx_fs_rate,
- params_rate(params));
- if (ret < 0) {
- dev_err(dai->codec->dev,
- "%s: set decimator rate failed %d\n", __func__,
- ret);
- return ret;
- }
- break;
- default:
- dev_err(dai->codec->dev,
- "%s: Invalid stream type %d\n", __func__,
- substream->stream);
- return -EINVAL;
- }
- switch (params_format(params)) {
- case SNDRV_PCM_FORMAT_S16_LE:
- snd_soc_update_bits_wrapper(dai->codec,
- MSM89XX_CDC_CORE_CLK_RX_I2S_CTL, 0x20, 0x20);
- break;
- case SNDRV_PCM_FORMAT_S24_LE:
- snd_soc_update_bits_wrapper(dai->codec,
- MSM89XX_CDC_CORE_CLK_RX_I2S_CTL, 0x20, 0x00);
- break;
- default:
- dev_err(dai->codec->dev, "%s: wrong format selected\n",
- __func__);
- return -EINVAL;
- }
-
- return 0;
-}
-
-int msm8x16_wcd_digital_mute(struct snd_soc_dai *dai, int mute)
-{
- struct snd_soc_codec *codec = NULL;
- u16 tx_vol_ctl_reg = 0;
- u8 decimator = 0, i;
- struct msm8x16_wcd_priv *msm8x16_wcd;
-
- pr_err("%s: Digital Mute val = %d\n", __func__, mute);
-
- if (!dai || !dai->codec) {
- pr_err("%s: Invalid params\n", __func__);
- return -EINVAL;
- }
- codec = dai->codec;
- msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
-
- if ((dai->id != AIF1_CAP) && (dai->id != AIF2_VIFEED)) {
- dev_err(codec->dev, "%s: Not capture use case skip\n",
- __func__);
- return 0;
- }
-
- mute = (mute) ? 1 : 0;
- if (!mute) {
- /*
- * 15 ms is an emperical value for the mute time
- * that was arrived by checking the pop level
- * to be inaudible
- */
- usleep_range(15000, 15010);
- }
-
- for (i = 0; i < NUM_DECIMATORS; i++) {
- if (msm8x16_wcd->dec_active[i])
- decimator = i + 1;
- if (decimator && decimator <= NUM_DECIMATORS) {
- pr_err("%s: Mute = %d Decimator = %d", __func__,
- mute, decimator);
- tx_vol_ctl_reg = MSM89XX_CDC_CORE_TX1_VOL_CTL_CFG +
- 32 * (decimator - 1);
- snd_soc_update_bits_wrapper(codec,
- tx_vol_ctl_reg, 0x01, mute);
- }
- decimator = 0;
- }
- return 0;
-}
-
-static struct snd_soc_dai_ops msm8x16_wcd_dai_ops = {
- .startup = msm8x16_wcd_startup,
- .shutdown = msm8x16_wcd_shutdown,
- .hw_params = msm8x16_wcd_hw_params,
- .set_sysclk = msm8x16_wcd_set_dai_sysclk,
- .set_fmt = msm8x16_wcd_set_dai_fmt,
- .set_channel_map = msm8x16_wcd_set_channel_map,
- .get_channel_map = msm8x16_wcd_get_channel_map,
- .digital_mute = msm8x16_wcd_digital_mute,
-};
-
-static struct snd_soc_dai_driver msm8x16_wcd_i2s_dai[] = {
- {
- .name = "msm8x16_wcd_i2s_rx1",
- .id = AIF1_PB,
- .playback = {
- .stream_name = "AIF1 Playback",
- .rates = MSM89XX_RATES,
- .formats = MSM89XX_FORMATS,
- .rate_max = 192000,
- .rate_min = 8000,
- .channels_min = 1,
- .channels_max = 3,
- },
- .ops = &msm8x16_wcd_dai_ops,
- },
- {
- .name = "msm8x16_wcd_i2s_tx1",
- .id = AIF1_CAP,
- .capture = {
- .stream_name = "AIF1 Capture",
- .rates = MSM89XX_RATES,
- .formats = MSM89XX_FORMATS,
- .rate_max = 192000,
- .rate_min = 8000,
- .channels_min = 1,
- .channels_max = 4,
- },
- .ops = &msm8x16_wcd_dai_ops,
- },
- {
- .name = "cajon_vifeedback",
- .id = AIF2_VIFEED,
- .capture = {
- .stream_name = "VIfeed",
- .rates = MSM89XX_RATES,
- .formats = MSM89XX_FORMATS,
- .rate_max = 48000,
- .rate_min = 48000,
- .channels_min = 2,
- .channels_max = 2,
- },
- .ops = &msm8x16_wcd_dai_ops,
- },
-};
-
-static int msm8x16_wcd_codec_enable_rx_chain(struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *kcontrol, int event)
-{
- struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
-
- switch (event) {
- case SND_SOC_DAPM_POST_PMU:
- dev_err(codec->dev,
- "%s: PMU:Sleeping 20ms after disabling mute\n",
- __func__);
- break;
- case SND_SOC_DAPM_POST_PMD:
- dev_err(codec->dev,
- "%s: PMD:Sleeping 20ms after disabling mute\n",
- __func__);
- snd_soc_update_bits_wrapper(codec, w->reg,
- 1 << w->shift, 0x00);
- msleep(20);
- break;
- }
- return 0;
-}
-
-static int msm8x16_wcd_codec_enable_lo_pa(struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *kcontrol, int event)
-{
- struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
-
- dev_err(codec->dev, "%s: %d %s\n", __func__, event, w->name);
- switch (event) {
- case SND_SOC_DAPM_POST_PMU:
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_CDC_CORE_RX3_B6_CTL, 0x01, 0x00);
- break;
- case SND_SOC_DAPM_POST_PMD:
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_CDC_CORE_RX3_B6_CTL, 0x01, 0x00);
- break;
- }
-
- return 0;
-}
-
-static int msm8x16_wcd_codec_enable_spk_ext_pa(struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *kcontrol, int event)
-{
- struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
- struct msm8x16_wcd_priv *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
-
- dev_err(codec->dev, "%s: %s event = %d\n", __func__, w->name, event);
- switch (event) {
- case SND_SOC_DAPM_POST_PMU:
- dev_err(codec->dev,
- "%s: enable external speaker PA\n", __func__);
- if (msm8x16_wcd->codec_spk_ext_pa_cb)
- msm8x16_wcd->codec_spk_ext_pa_cb(codec, 1);
- break;
- case SND_SOC_DAPM_PRE_PMD:
- dev_err(codec->dev,
- "%s: enable external speaker PA\n", __func__);
- if (msm8x16_wcd->codec_spk_ext_pa_cb)
- msm8x16_wcd->codec_spk_ext_pa_cb(codec, 0);
- break;
- }
- return 0;
-}
-
-static int msm8x16_wcd_codec_enable_ear_pa(struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *kcontrol, int event)
-{
- struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
- struct msm8x16_wcd_priv *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
-
- switch (event) {
- case SND_SOC_DAPM_PRE_PMU:
- dev_err(codec->dev,
- "%s: Sleeping 20ms after select EAR PA\n",
- __func__);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_RX_EAR_CTL, 0x80, 0x80);
- if (get_codec_version(msm8x16_wcd) < CONGA)
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_RX_HPH_CNP_WG_TIME, 0xFF, 0x2A);
- break;
- case SND_SOC_DAPM_POST_PMU:
- dev_err(codec->dev,
- "%s: Sleeping 20ms after enabling EAR PA\n",
- __func__);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_RX_EAR_CTL, 0x40, 0x40);
- usleep_range(7000, 7100);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_CDC_CORE_RX1_B6_CTL, 0x01, 0x00);
- break;
- case SND_SOC_DAPM_PRE_PMD:
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_CDC_CORE_RX1_B6_CTL, 0x01, 0x01);
- msleep(20);
- msm8x16_wcd->mute_mask |= EAR_PA_DISABLE;
- if (msm8x16_wcd->boost_option == BOOST_ALWAYS) {
- dev_err(codec->dev,
- "%s: boost_option:%d, tear down ear\n",
- __func__, msm8x16_wcd->boost_option);
- msm8x16_wcd_boost_mode_sequence(codec, EAR_PMD);
- }
- break;
- case SND_SOC_DAPM_POST_PMD:
- dev_err(codec->dev,
- "%s: Sleeping 7ms after disabling EAR PA\n",
- __func__);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_RX_EAR_CTL, 0x40, 0x00);
- usleep_range(7000, 7100);
- if (get_codec_version(msm8x16_wcd) < CONGA)
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_RX_HPH_CNP_WG_TIME, 0xFF, 0x16);
- break;
- }
- return 0;
-}
-
-static const struct snd_soc_dapm_widget msm8x16_wcd_dapm_widgets[] = {
- /*RX stuff */
- SND_SOC_DAPM_OUTPUT("EAR"),
- SND_SOC_DAPM_OUTPUT("WSA_SPK OUT"),
-
- SND_SOC_DAPM_PGA_E("EAR PA", SND_SOC_NOPM,
- 0, 0, NULL, 0, msm8x16_wcd_codec_enable_ear_pa,
- SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
- SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
- SND_SOC_DAPM_MUX("EAR_S", SND_SOC_NOPM, 0, 0,
- ear_pa_mux),
-
- SND_SOC_DAPM_MUX("WSA Spk Switch", SND_SOC_NOPM, 0, 0,
- wsa_spk_mux),
-
- SND_SOC_DAPM_AIF_IN("I2S RX1", "AIF1 Playback", 0, SND_SOC_NOPM, 0, 0),
-
- SND_SOC_DAPM_AIF_IN("I2S RX2", "AIF1 Playback", 0, SND_SOC_NOPM, 0, 0),
-
- SND_SOC_DAPM_AIF_IN("I2S RX3", "AIF1 Playback", 0, SND_SOC_NOPM, 0, 0),
-
- SND_SOC_DAPM_SUPPLY("INT_LDO_H", SND_SOC_NOPM, 1, 0, NULL, 0),
-
- SND_SOC_DAPM_SPK("Ext Spk", msm8x16_wcd_codec_enable_spk_ext_pa),
-
- SND_SOC_DAPM_OUTPUT("HEADPHONE"),
- SND_SOC_DAPM_PGA_E("HPHL PA", MSM89XX_PMIC_ANALOG_RX_HPH_CNP_EN,
- 5, 0, NULL, 0,
- msm8x16_wcd_hph_pa_event, SND_SOC_DAPM_PRE_PMU |
- SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD |
- SND_SOC_DAPM_POST_PMD),
-
- SND_SOC_DAPM_MUX("HPHL", SND_SOC_NOPM, 0, 0,
- hphl_mux),
-
- SND_SOC_DAPM_MIXER_E("HPHL DAC",
- MSM89XX_PMIC_ANALOG_RX_HPH_L_PA_DAC_CTL, 3, 0, NULL,
- 0, msm8x16_wcd_hphl_dac_event,
- SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
- SND_SOC_DAPM_POST_PMD),
-
- SND_SOC_DAPM_PGA_E("HPHR PA", MSM89XX_PMIC_ANALOG_RX_HPH_CNP_EN,
- 4, 0, NULL, 0,
- msm8x16_wcd_hph_pa_event, SND_SOC_DAPM_PRE_PMU |
- SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD |
- SND_SOC_DAPM_POST_PMD),
-
- SND_SOC_DAPM_MUX("HPHR", SND_SOC_NOPM, 0, 0,
- hphr_mux),
-
- SND_SOC_DAPM_MIXER_E("HPHR DAC",
- MSM89XX_PMIC_ANALOG_RX_HPH_R_PA_DAC_CTL, 3, 0, NULL,
- 0, msm8x16_wcd_hphr_dac_event,
- SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
- SND_SOC_DAPM_POST_PMD),
-
- SND_SOC_DAPM_MUX("SPK", SND_SOC_NOPM, 0, 0,
- spkr_mux),
-
- SND_SOC_DAPM_DAC("SPK DAC", NULL,
- MSM89XX_PMIC_ANALOG_SPKR_DAC_CTL, 7, 0),
-
- SND_SOC_DAPM_MUX("LINE_OUT",
- SND_SOC_NOPM, 0, 0, lo_mux),
-
- SND_SOC_DAPM_DAC_E("LINEOUT DAC", NULL,
- SND_SOC_NOPM, 0, 0, msm8x16_wcd_lo_dac_event,
- SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
- SND_SOC_DAPM_POST_PMD),
-
- /* Speaker */
- SND_SOC_DAPM_OUTPUT("SPK_OUT"),
-
- /* Lineout */
- SND_SOC_DAPM_OUTPUT("LINEOUT"),
-
- SND_SOC_DAPM_PGA_E("SPK PA", MSM89XX_PMIC_ANALOG_SPKR_DRV_CTL,
- 6, 0, NULL, 0, msm8x16_wcd_codec_enable_spk_pa,
- SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
- SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
-
- SND_SOC_DAPM_PGA_E("LINEOUT PA", MSM89XX_PMIC_ANALOG_RX_LO_EN_CTL,
- 5, 0, NULL, 0, msm8x16_wcd_codec_enable_lo_pa,
- SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
-
- SND_SOC_DAPM_SUPPLY("VDD_SPKDRV", SND_SOC_NOPM, 0, 0,
- msm89xx_wcd_codec_enable_vdd_spkr,
- SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
-
- SND_SOC_DAPM_MUX("Ext Spk Switch", SND_SOC_NOPM, 0, 0,
- &ext_spk_mux),
-
- SND_SOC_DAPM_MIXER("RX1 MIX1", SND_SOC_NOPM, 0, 0, NULL, 0),
- SND_SOC_DAPM_MIXER("RX2 MIX1", SND_SOC_NOPM, 0, 0, NULL, 0),
-
- SND_SOC_DAPM_MIXER_E("RX1 MIX2",
- MSM89XX_CDC_CORE_CLK_RX_B1_CTL, MSM89XX_RX1, 0, NULL,
- 0, msm8x16_wcd_codec_enable_interpolator,
- SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
- SND_SOC_DAPM_MIXER_E("RX2 MIX2",
- MSM89XX_CDC_CORE_CLK_RX_B1_CTL, MSM89XX_RX2, 0, NULL,
- 0, msm8x16_wcd_codec_enable_interpolator,
- SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
- SND_SOC_DAPM_MIXER_E("RX3 MIX1",
- MSM89XX_CDC_CORE_CLK_RX_B1_CTL, MSM89XX_RX3, 0, NULL,
- 0, msm8x16_wcd_codec_enable_interpolator,
- SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
-
- SND_SOC_DAPM_SUPPLY("RX1 CLK", MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL,
- 0, 0, NULL, 0),
- SND_SOC_DAPM_SUPPLY("RX2 CLK", MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL,
- 1, 0, NULL, 0),
- SND_SOC_DAPM_SUPPLY("RX3 CLK", MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL,
- 2, 0, msm8x16_wcd_codec_enable_dig_clk, SND_SOC_DAPM_PRE_PMU |
- SND_SOC_DAPM_POST_PMD),
- SND_SOC_DAPM_MIXER_E("RX1 CHAIN", MSM89XX_CDC_CORE_RX1_B6_CTL, 0, 0,
- NULL, 0,
- msm8x16_wcd_codec_enable_rx_chain,
- SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
- SND_SOC_DAPM_MIXER_E("RX2 CHAIN", MSM89XX_CDC_CORE_RX2_B6_CTL, 0, 0,
- NULL, 0,
- msm8x16_wcd_codec_enable_rx_chain,
- SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
- SND_SOC_DAPM_MIXER_E("RX3 CHAIN", MSM89XX_CDC_CORE_RX3_B6_CTL, 0, 0,
- NULL, 0,
- msm8x16_wcd_codec_enable_rx_chain,
- SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
-
- SND_SOC_DAPM_MUX("RX1 MIX1 INP1", SND_SOC_NOPM, 0, 0,
- &rx_mix1_inp1_mux),
- SND_SOC_DAPM_MUX("RX1 MIX1 INP2", SND_SOC_NOPM, 0, 0,
- &rx_mix1_inp2_mux),
- SND_SOC_DAPM_MUX("RX1 MIX1 INP3", SND_SOC_NOPM, 0, 0,
- &rx_mix1_inp3_mux),
-
- SND_SOC_DAPM_MUX("RX2 MIX1 INP1", SND_SOC_NOPM, 0, 0,
- &rx2_mix1_inp1_mux),
- SND_SOC_DAPM_MUX("RX2 MIX1 INP2", SND_SOC_NOPM, 0, 0,
- &rx2_mix1_inp2_mux),
- SND_SOC_DAPM_MUX("RX2 MIX1 INP3", SND_SOC_NOPM, 0, 0,
- &rx2_mix1_inp3_mux),
-
- SND_SOC_DAPM_MUX("RX3 MIX1 INP1", SND_SOC_NOPM, 0, 0,
- &rx3_mix1_inp1_mux),
- SND_SOC_DAPM_MUX("RX3 MIX1 INP2", SND_SOC_NOPM, 0, 0,
- &rx3_mix1_inp2_mux),
- SND_SOC_DAPM_MUX("RX3 MIX1 INP3", SND_SOC_NOPM, 0, 0,
- &rx3_mix1_inp3_mux),
-
- SND_SOC_DAPM_MUX("RX1 MIX2 INP1", SND_SOC_NOPM, 0, 0,
- &rx1_mix2_inp1_mux),
- SND_SOC_DAPM_MUX("RX2 MIX2 INP1", SND_SOC_NOPM, 0, 0,
- &rx2_mix2_inp1_mux),
-
- SND_SOC_DAPM_SUPPLY("MICBIAS_REGULATOR", SND_SOC_NOPM,
- ON_DEMAND_MICBIAS, 0,
- msm8x16_wcd_codec_enable_on_demand_supply,
- SND_SOC_DAPM_PRE_PMU |
- SND_SOC_DAPM_POST_PMD),
-
- SND_SOC_DAPM_SUPPLY("CP", MSM89XX_PMIC_ANALOG_NCP_EN, 0, 0,
- msm8x16_wcd_codec_enable_charge_pump, SND_SOC_DAPM_PRE_PMU |
- SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
-
- SND_SOC_DAPM_SUPPLY("EAR CP", MSM89XX_PMIC_ANALOG_NCP_EN, 4, 0,
- msm8x16_wcd_codec_enable_charge_pump, SND_SOC_DAPM_PRE_PMU |
- SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
-
- SND_SOC_DAPM_SUPPLY_S("RX_BIAS", 1, SND_SOC_NOPM,
- 0, 0, msm8x16_wcd_codec_enable_rx_bias,
- SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
-
- SND_SOC_DAPM_SUPPLY_S("SPK_RX_BIAS", 1, SND_SOC_NOPM, 0, 0,
- msm8x16_wcd_codec_enable_rx_bias, SND_SOC_DAPM_PRE_PMU |
- SND_SOC_DAPM_POST_PMD),
-
- /* TX */
-
- SND_SOC_DAPM_SUPPLY_S("CDC_CONN", -2, MSM89XX_CDC_CORE_CLK_OTHR_CTL,
- 2, 0, NULL, 0),
-
-
- SND_SOC_DAPM_INPUT("AMIC1"),
- SND_SOC_DAPM_MICBIAS_E("MIC BIAS Internal1",
- MSM89XX_PMIC_ANALOG_MICB_1_EN, 7, 0,
- msm8x16_wcd_codec_enable_micbias, SND_SOC_DAPM_PRE_PMU |
- SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
- SND_SOC_DAPM_MICBIAS_E("MIC BIAS Internal2",
- MSM89XX_PMIC_ANALOG_MICB_2_EN, 7, 0,
- msm8x16_wcd_codec_enable_micbias, SND_SOC_DAPM_PRE_PMU |
- SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
- SND_SOC_DAPM_MICBIAS_E("MIC BIAS Internal3",
- MSM89XX_PMIC_ANALOG_MICB_1_EN, 7, 0,
- msm8x16_wcd_codec_enable_micbias, SND_SOC_DAPM_PRE_PMU |
- SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
- SND_SOC_DAPM_ADC_E("ADC1", NULL, MSM89XX_PMIC_ANALOG_TX_1_EN, 7, 0,
- msm8x16_wcd_codec_enable_adc, SND_SOC_DAPM_PRE_PMU |
- SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
- SND_SOC_DAPM_ADC_E("ADC2_INP2",
- NULL, MSM89XX_PMIC_ANALOG_TX_2_EN, 7, 0,
- msm8x16_wcd_codec_enable_adc, SND_SOC_DAPM_PRE_PMU |
- SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
- SND_SOC_DAPM_ADC_E("ADC2_INP3",
- NULL, MSM89XX_PMIC_ANALOG_TX_3_EN, 7, 0,
- msm8x16_wcd_codec_enable_adc, SND_SOC_DAPM_PRE_PMU |
- SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
-
- SND_SOC_DAPM_MIXER("ADC2", SND_SOC_NOPM, 0, 0, NULL, 0),
- SND_SOC_DAPM_MIXER("ADC3", SND_SOC_NOPM, 0, 0, NULL, 0),
-
- SND_SOC_DAPM_MUX("ADC2 MUX", SND_SOC_NOPM, 0, 0,
- &tx_adc2_mux),
-
- SND_SOC_DAPM_MICBIAS_E("MIC BIAS External",
- MSM89XX_PMIC_ANALOG_MICB_1_EN, 7, 0,
- msm8x16_wcd_codec_enable_micbias, SND_SOC_DAPM_PRE_PMU |
- SND_SOC_DAPM_POST_PMD),
-
- SND_SOC_DAPM_MICBIAS_E("MIC BIAS External2",
- MSM89XX_PMIC_ANALOG_MICB_2_EN, 7, 0,
- msm8x16_wcd_codec_enable_micbias, SND_SOC_DAPM_POST_PMU |
- SND_SOC_DAPM_POST_PMD),
-
-
- SND_SOC_DAPM_INPUT("AMIC3"),
-
- SND_SOC_DAPM_MUX_E("DEC1 MUX",
- MSM89XX_CDC_CORE_CLK_TX_CLK_EN_B1_CTL, 0, 0,
- &dec1_mux, msm8x16_wcd_codec_enable_dec,
- SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
- SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
-
- SND_SOC_DAPM_MUX_E("DEC2 MUX",
- MSM89XX_CDC_CORE_CLK_TX_CLK_EN_B1_CTL, 1, 0,
- &dec2_mux, msm8x16_wcd_codec_enable_dec,
- SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
- SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
-
- SND_SOC_DAPM_MUX_E("DEC3 MUX",
- MSM89XX_CDC_CORE_CLK_TX_CLK_EN_B1_CTL, 2, 0,
- &dec3_mux, msm8x16_wcd_codec_enable_dec,
- SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
- SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
-
- SND_SOC_DAPM_MUX_E("DEC4 MUX",
- MSM89XX_CDC_CORE_CLK_TX_CLK_EN_B1_CTL, 3, 0,
- &dec4_mux, msm8x16_wcd_codec_enable_dec,
- SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
- SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
-
- SND_SOC_DAPM_MUX("RDAC2 MUX", SND_SOC_NOPM, 0, 0, &rdac2_mux),
-
- SND_SOC_DAPM_INPUT("AMIC2"),
-
- SND_SOC_DAPM_AIF_OUT("I2S TX1", "AIF1 Capture", 0, SND_SOC_NOPM,
- 0, 0),
- SND_SOC_DAPM_AIF_OUT("I2S TX2", "AIF1 Capture", 0, SND_SOC_NOPM,
- 0, 0),
- SND_SOC_DAPM_AIF_OUT("I2S TX3", "AIF1 Capture", 0, SND_SOC_NOPM,
- 0, 0),
-
- SND_SOC_DAPM_AIF_OUT("AIF2 VI", "VIfeed", 0, SND_SOC_NOPM,
- 0, 0),
- /* Digital Mic Inputs */
- SND_SOC_DAPM_ADC_E("DMIC1", NULL, SND_SOC_NOPM, 0, 0,
- msm8x16_wcd_codec_enable_dmic, SND_SOC_DAPM_PRE_PMU |
- SND_SOC_DAPM_POST_PMD),
-
- SND_SOC_DAPM_ADC_E("DMIC2", NULL, SND_SOC_NOPM, 0, 0,
- msm8x16_wcd_codec_enable_dmic, SND_SOC_DAPM_PRE_PMU |
- SND_SOC_DAPM_POST_PMD),
-
- SND_SOC_DAPM_INPUT("DMIC3"),
-
- SND_SOC_DAPM_INPUT("DMIC4"),
-
- /* Sidetone */
- SND_SOC_DAPM_MUX("IIR1 INP1 MUX", SND_SOC_NOPM, 0, 0, &iir1_inp1_mux),
- SND_SOC_DAPM_PGA_E("IIR1", MSM89XX_CDC_CORE_CLK_SD_CTL, 0, 0, NULL, 0,
- msm8x16_wcd_codec_set_iir_gain, SND_SOC_DAPM_POST_PMU),
-
- SND_SOC_DAPM_MUX("IIR2 INP1 MUX", SND_SOC_NOPM, 0, 0, &iir2_inp1_mux),
- SND_SOC_DAPM_PGA_E("IIR2", MSM89XX_CDC_CORE_CLK_SD_CTL, 1, 0, NULL, 0,
- msm8x16_wcd_codec_set_iir_gain, SND_SOC_DAPM_POST_PMU),
-
- SND_SOC_DAPM_SUPPLY("RX_I2S_CLK",
- MSM89XX_CDC_CORE_CLK_RX_I2S_CTL, 4, 0, NULL, 0),
- SND_SOC_DAPM_SUPPLY("TX_I2S_CLK",
- MSM89XX_CDC_CORE_CLK_TX_I2S_CTL, 4, 0,
- NULL, 0),
-};
-
-static const struct msm8x16_wcd_reg_mask_val msm8x16_wcd_reg_defaults[] = {
- MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_SPKR_DAC_CTL, 0x03),
- MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_CURRENT_LIMIT, 0x82),
- MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_SPKR_OCP_CTL, 0xE1),
-};
-
-static const struct msm8x16_wcd_reg_mask_val msm8x16_wcd_reg_defaults_2_0[] = {
- MSM89XX_REG_VAL(MSM89XX_PMIC_DIGITAL_SEC_ACCESS, 0xA5),
- MSM89XX_REG_VAL(MSM89XX_PMIC_DIGITAL_PERPH_RESET_CTL3, 0x0F),
- MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_TX_1_2_OPAMP_BIAS, 0x4F),
- MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_NCP_FBCTRL, 0x28),
- MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_SPKR_DRV_CTL, 0x69),
- MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_SPKR_DRV_DBG, 0x01),
- MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_BOOST_EN_CTL, 0x5F),
- MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_SLOPE_COMP_IP_ZERO, 0x88),
- MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_SEC_ACCESS, 0xA5),
- MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_PERPH_RESET_CTL3, 0x0F),
- MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_CURRENT_LIMIT, 0x82),
- MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_SPKR_DAC_CTL, 0x03),
- MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_SPKR_OCP_CTL, 0xE1),
- MSM89XX_REG_VAL(MSM89XX_PMIC_DIGITAL_CDC_RST_CTL, 0x80),
-};
-
-static const struct msm8x16_wcd_reg_mask_val msm8909_wcd_reg_defaults[] = {
- MSM89XX_REG_VAL(MSM89XX_PMIC_DIGITAL_SEC_ACCESS, 0xA5),
- MSM89XX_REG_VAL(MSM89XX_PMIC_DIGITAL_PERPH_RESET_CTL3, 0x0F),
- MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_SEC_ACCESS, 0xA5),
- MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_PERPH_RESET_CTL3, 0x0F),
- MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_TX_1_2_OPAMP_BIAS, 0x4C),
- MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_NCP_FBCTRL, 0x28),
- MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_SPKR_DRV_CTL, 0x69),
- MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_SPKR_DRV_DBG, 0x01),
- MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_PERPH_SUBTYPE, 0x0A),
- MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_SPKR_DAC_CTL, 0x03),
- MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_SPKR_OCP_CTL, 0xE1),
- MSM89XX_REG_VAL(MSM89XX_PMIC_DIGITAL_CDC_RST_CTL, 0x80),
-};
-
-static const struct msm8x16_wcd_reg_mask_val cajon_wcd_reg_defaults[] = {
- MSM89XX_REG_VAL(MSM89XX_PMIC_DIGITAL_SEC_ACCESS, 0xA5),
- MSM89XX_REG_VAL(MSM89XX_PMIC_DIGITAL_PERPH_RESET_CTL3, 0x0F),
- MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_SEC_ACCESS, 0xA5),
- MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_PERPH_RESET_CTL3, 0x0F),
- MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_TX_1_2_OPAMP_BIAS, 0x4C),
- MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_CURRENT_LIMIT, 0x82),
- MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_NCP_FBCTRL, 0xA8),
- MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_NCP_VCTRL, 0xA4),
- MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_SPKR_ANA_BIAS_SET, 0x41),
- MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_SPKR_DRV_CTL, 0x69),
- MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_SPKR_DRV_DBG, 0x01),
- MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_SPKR_OCP_CTL, 0xE1),
- MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_SPKR_DAC_CTL, 0x03),
- MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_RX_HPH_BIAS_PA, 0xFA),
- MSM89XX_REG_VAL(MSM89XX_PMIC_DIGITAL_CDC_RST_CTL, 0x80),
-};
-
-static const struct msm8x16_wcd_reg_mask_val cajon2p0_wcd_reg_defaults[] = {
- MSM89XX_REG_VAL(MSM89XX_PMIC_DIGITAL_SEC_ACCESS, 0xA5),
- MSM89XX_REG_VAL(MSM89XX_PMIC_DIGITAL_PERPH_RESET_CTL3, 0x0F),
- MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_SEC_ACCESS, 0xA5),
- MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_PERPH_RESET_CTL3, 0x0F),
- MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_TX_1_2_OPAMP_BIAS, 0x4C),
- MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_CURRENT_LIMIT, 0xA2),
- MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_NCP_FBCTRL, 0xA8),
- MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_NCP_VCTRL, 0xA4),
- MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_SPKR_ANA_BIAS_SET, 0x41),
- MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_SPKR_DRV_CTL, 0x69),
- MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_SPKR_DRV_DBG, 0x01),
- MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_SPKR_OCP_CTL, 0xE1),
- MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_SPKR_DAC_CTL, 0x03),
- MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_RX_EAR_STATUS, 0x10),
- MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_BYPASS_MODE, 0x18),
- MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_RX_HPH_BIAS_PA, 0xFA),
- MSM89XX_REG_VAL(MSM89XX_PMIC_DIGITAL_CDC_RST_CTL, 0x80),
-};
-
-static void msm8x16_wcd_update_reg_defaults(struct snd_soc_codec *codec)
-{
- u32 i, version;
- struct msm8x16_wcd_priv *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
-
- version = get_codec_version(msm8x16_wcd);
- if (version == TOMBAK_1_0) {
- for (i = 0; i < ARRAY_SIZE(msm8x16_wcd_reg_defaults); i++)
- snd_soc_write_wrapper(codec,
- msm8x16_wcd_reg_defaults[i].reg,
- msm8x16_wcd_reg_defaults[i].val);
- } else if (version == TOMBAK_2_0) {
- for (i = 0; i < ARRAY_SIZE(msm8x16_wcd_reg_defaults_2_0); i++)
- snd_soc_write_wrapper(codec,
- msm8x16_wcd_reg_defaults_2_0[i].reg,
- msm8x16_wcd_reg_defaults_2_0[i].val);
- } else if (version == CONGA) {
- for (i = 0; i < ARRAY_SIZE(msm8909_wcd_reg_defaults); i++)
- snd_soc_write_wrapper(codec,
- msm8909_wcd_reg_defaults[i].reg,
- msm8909_wcd_reg_defaults[i].val);
- } else if (version == CAJON) {
- for (i = 0; i < ARRAY_SIZE(cajon_wcd_reg_defaults); i++)
- snd_soc_write_wrapper(codec,
- cajon_wcd_reg_defaults[i].reg,
- cajon_wcd_reg_defaults[i].val);
- } else if (version == CAJON_2_0 || version == DIANGU) {
- for (i = 0; i < ARRAY_SIZE(cajon2p0_wcd_reg_defaults); i++)
- snd_soc_write_wrapper(codec,
- cajon2p0_wcd_reg_defaults[i].reg,
- cajon2p0_wcd_reg_defaults[i].val);
- }
-}
-
-static const struct msm8x16_wcd_reg_mask_val
- msm8x16_wcd_codec_reg_init_val[] = {
-
- /* Initialize current threshold to 350MA
- * number of wait and run cycles to 4096
- */
- {MSM89XX_PMIC_ANALOG_RX_COM_OCP_CTL, 0xFF, 0x12},
- {MSM89XX_PMIC_ANALOG_RX_COM_OCP_COUNT, 0xFF, 0xFF},
-};
-
-static void msm8x16_wcd_codec_init_reg(struct snd_soc_codec *codec)
-{
- u32 i;
-
- for (i = 0; i < ARRAY_SIZE(msm8x16_wcd_codec_reg_init_val); i++)
- snd_soc_update_bits_wrapper(codec,
- msm8x16_wcd_codec_reg_init_val[i].reg,
- msm8x16_wcd_codec_reg_init_val[i].mask,
- msm8x16_wcd_codec_reg_init_val[i].val);
-}
-
-static int msm8x16_wcd_bringup(struct snd_soc_codec *codec)
-{
- snd_soc_write_wrapper(codec,
- MSM89XX_PMIC_DIGITAL_SEC_ACCESS,
- 0xA5);
- snd_soc_write_wrapper(codec,
- MSM89XX_PMIC_DIGITAL_PERPH_RESET_CTL4, 0x01);
- snd_soc_write_wrapper(codec,
- MSM89XX_PMIC_ANALOG_SEC_ACCESS,
- 0xA5);
- snd_soc_write_wrapper(codec,
- MSM89XX_PMIC_ANALOG_PERPH_RESET_CTL4, 0x01);
- snd_soc_write_wrapper(codec,
- MSM89XX_PMIC_DIGITAL_SEC_ACCESS,
- 0xA5);
- snd_soc_write_wrapper(codec,
- MSM89XX_PMIC_DIGITAL_PERPH_RESET_CTL4, 0x00);
- snd_soc_write_wrapper(codec,
- MSM89XX_PMIC_ANALOG_SEC_ACCESS,
- 0xA5);
- snd_soc_write_wrapper(codec,
- MSM89XX_PMIC_ANALOG_PERPH_RESET_CTL4, 0x00);
- return 0;
-}
-
-static struct regulator *wcd8x16_wcd_codec_find_regulator(
- const struct msm8x16_wcd *msm8x16,
- const char *name)
-{
- int i;
-
- for (i = 0; i < msm8x16->num_of_supplies; i++) {
- if (msm8x16->supplies[i].supply &&
- !strcmp(msm8x16->supplies[i].supply, name))
- return msm8x16->supplies[i].consumer;
- }
-
- dev_err(msm8x16->dev, "Error: regulator not found:%s\n"
- , name);
- return NULL;
-}
-
-static int msm8x16_wcd_device_down(struct snd_soc_codec *codec)
-{
- struct msm_asoc_mach_data *pdata = NULL;
- struct msm8x16_wcd_priv *msm8x16_wcd_priv =
- snd_soc_codec_get_drvdata(codec);
- int i;
-
- pdata = snd_soc_card_get_drvdata(codec->component.card);
- dev_err(codec->dev, "%s: device down!\n", __func__);
- snd_soc_write_wrapper(codec,
- MSM89XX_PMIC_ANALOG_TX_1_EN, 0x3);
- snd_soc_write_wrapper(codec,
- MSM89XX_PMIC_ANALOG_TX_2_EN, 0x3);
- if (msm8x16_wcd_priv->boost_option == BOOST_ON_FOREVER) {
- if ((snd_soc_read_wrapper(codec,
- MSM89XX_PMIC_ANALOG_SPKR_DRV_CTL)
- & 0x80) == 0) {
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_CDC_CORE_CLK_MCLK_CTL, 0x01, 0x01);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_CDC_CORE_CLK_PDM_CTL, 0x03, 0x03);
- snd_soc_write_wrapper(codec,
- MSM89XX_PMIC_ANALOG_MASTER_BIAS_CTL, 0x30);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_DIGITAL_CDC_RST_CTL, 0x80, 0x80);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_DIGITAL_CDC_TOP_CLK_CTL,
- 0x0C, 0x0C);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL,
- 0x84, 0x84);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_DIGITAL_CDC_ANA_CLK_CTL,
- 0x10, 0x10);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_SPKR_PWRSTG_CTL,
- 0x1F, 0x1F);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_RX_COM_BIAS_DAC,
- 0x90, 0x90);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_RX_EAR_CTL,
- 0xFF, 0xFF);
- usleep_range(20, 21);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_SPKR_PWRSTG_CTL,
- 0xFF, 0xFF);
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_SPKR_DRV_CTL,
- 0xE9, 0xE9);
- }
- }
- msm8x16_wcd_boost_off(codec);
- msm8x16_wcd_priv->hph_mode = NORMAL_MODE;
- for (i = 0; i < MSM89XX_RX_MAX; i++)
- msm8x16_wcd_priv->comp_enabled[i] = COMPANDER_NONE;
-
- /* 40ms to allow boost to discharge */
- msleep(40);
- /* Disable PA to avoid pop during codec bring up */
- snd_soc_update_bits_wrapper(codec, MSM89XX_PMIC_ANALOG_RX_HPH_CNP_EN,
- 0x30, 0x00);
- snd_soc_update_bits_wrapper(codec, MSM89XX_PMIC_ANALOG_SPKR_DRV_CTL,
- 0x80, 0x00);
- snd_soc_write_wrapper(codec,
- MSM89XX_PMIC_ANALOG_RX_HPH_L_PA_DAC_CTL, 0x20);
- snd_soc_write_wrapper(codec,
- MSM89XX_PMIC_ANALOG_RX_HPH_R_PA_DAC_CTL, 0x20);
- snd_soc_write_wrapper(codec,
- MSM89XX_PMIC_ANALOG_RX_EAR_CTL, 0x12);
- snd_soc_write_wrapper(codec,
- MSM89XX_PMIC_ANALOG_SPKR_DAC_CTL, 0x93);
-
- msm8x16_wcd_bringup(codec);
- atomic_set(&pdata->int_mclk0_enabled, false);
- set_bit(BUS_DOWN, &msm8x16_wcd_priv->status_mask);
- snd_soc_card_change_online_state(codec->component.card, 0);
- return 0;
-}
-
-static int msm8x16_wcd_device_up(struct snd_soc_codec *codec)
-{
- struct msm8x16_wcd_priv *msm8x16_wcd_priv =
- snd_soc_codec_get_drvdata(codec);
- int ret = 0;
-
- dev_err(codec->dev, "%s: device up!\n", __func__);
-
- clear_bit(BUS_DOWN, &msm8x16_wcd_priv->status_mask);
-
- snd_soc_card_change_online_state(codec->component.card, 1);
- /* delay is required to make sure sound card state updated */
- usleep_range(5000, 5100);
-
- msm8x16_wcd_codec_init_reg(codec);
- msm8x16_wcd_update_reg_defaults(codec);
-
- snd_soc_write_wrapper(codec, MSM89XX_PMIC_DIGITAL_INT_EN_SET,
- MSM89XX_PMIC_DIGITAL_INT_EN_SET__POR);
- snd_soc_write_wrapper(codec, MSM89XX_PMIC_DIGITAL_INT_EN_CLR,
- MSM89XX_PMIC_DIGITAL_INT_EN_CLR__POR);
-
- msm8x16_wcd_set_boost_v(codec);
-
- msm8x16_wcd_set_micb_v(codec);
- if (msm8x16_wcd_priv->boost_option == BOOST_ON_FOREVER)
- msm8x16_wcd_boost_on(codec);
- else if (msm8x16_wcd_priv->boost_option == BYPASS_ALWAYS)
- msm8x16_wcd_bypass_on(codec);
-
- msm8x16_wcd_configure_cap(codec, false, false);
- wcd_mbhc_stop(&msm8x16_wcd_priv->mbhc);
- wcd_mbhc_deinit(&msm8x16_wcd_priv->mbhc);
- ret = wcd_mbhc_init(&msm8x16_wcd_priv->mbhc, codec, &mbhc_cb, &intr_ids,
- wcd_mbhc_registers, true);
- if (ret)
- dev_err(codec->dev, "%s: mbhc initialization failed\n",
- __func__);
- else
- wcd_mbhc_start(&msm8x16_wcd_priv->mbhc,
- msm8x16_wcd_priv->mbhc.mbhc_cfg);
-
-
- return 0;
-}
-
-static int adsp_state_callback(struct notifier_block *nb, unsigned long value,
- void *priv)
-{
- bool timedout;
- unsigned long timeout;
-
- if (value == SUBSYS_BEFORE_SHUTDOWN)
- msm8x16_wcd_device_down(registered_codec);
- else if (value == SUBSYS_AFTER_POWERUP) {
-
- dev_err(registered_codec->dev,
- "ADSP is about to power up. bring up codec\n");
-
- if (!q6core_is_adsp_ready()) {
- dev_err(registered_codec->dev,
- "ADSP isn't ready\n");
- timeout = jiffies +
- msecs_to_jiffies(ADSP_STATE_READY_TIMEOUT_MS);
- while (!(timedout = time_after(jiffies, timeout))) {
- if (!q6core_is_adsp_ready()) {
- dev_err(registered_codec->dev,
- "ADSP isn't ready\n");
- } else {
- dev_err(registered_codec->dev,
- "ADSP is ready\n");
- break;
- }
- }
- } else {
- dev_err(registered_codec->dev,
- "%s: DSP is ready\n", __func__);
- }
-
- msm8x16_wcd_device_up(registered_codec);
- }
- return NOTIFY_OK;
-}
-
-static struct notifier_block adsp_state_notifier_block = {
- .notifier_call = adsp_state_callback,
- .priority = -INT_MAX,
-};
-
-int msm8x16_wcd_hs_detect(struct snd_soc_codec *codec,
- struct wcd_mbhc_config *mbhc_cfg)
-{
- struct msm8x16_wcd_priv *msm8x16_wcd_priv =
- snd_soc_codec_get_drvdata(codec);
-
- return wcd_mbhc_start(&msm8x16_wcd_priv->mbhc, mbhc_cfg);
-}
-EXPORT_SYMBOL(msm8x16_wcd_hs_detect);
-
-void msm8x16_wcd_hs_detect_exit(struct snd_soc_codec *codec)
-{
- struct msm8x16_wcd_priv *msm8x16_wcd_priv =
- snd_soc_codec_get_drvdata(codec);
-
- wcd_mbhc_stop(&msm8x16_wcd_priv->mbhc);
-}
-EXPORT_SYMBOL(msm8x16_wcd_hs_detect_exit);
-
-void msm8x16_update_int_spk_boost(bool enable)
-{
- pr_err("%s: enable = %d\n", __func__, enable);
- spkr_boost_en = enable;
-}
-EXPORT_SYMBOL(msm8x16_update_int_spk_boost);
-
-static void msm8x16_wcd_set_micb_v(struct snd_soc_codec *codec)
-{
-
- struct msm8x16_wcd *msm8x16 = codec->control_data;
- struct msm8x16_wcd_pdata *pdata = msm8x16->dev->platform_data;
- u8 reg_val;
-
- reg_val = VOLTAGE_CONVERTER(pdata->micbias.cfilt1_mv, MICBIAS_MIN_VAL,
- MICBIAS_STEP_SIZE);
- dev_err(codec->dev, "cfilt1_mv %d reg_val %x\n",
- (u32)pdata->micbias.cfilt1_mv, reg_val);
- snd_soc_update_bits_wrapper(codec, MSM89XX_PMIC_ANALOG_MICB_1_VAL,
- 0xF8, (reg_val << 3));
-}
-
-static void msm8x16_wcd_set_boost_v(struct snd_soc_codec *codec)
-{
- struct msm8x16_wcd_priv *msm8x16_wcd_priv =
- snd_soc_codec_get_drvdata(codec);
-
- snd_soc_update_bits_wrapper(codec, MSM89XX_PMIC_ANALOG_OUTPUT_VOLTAGE,
- 0x1F, msm8x16_wcd_priv->boost_voltage);
-}
-
-static void msm8x16_wcd_configure_cap(struct snd_soc_codec *codec,
- bool micbias1, bool micbias2)
-{
-
- struct msm_asoc_mach_data *pdata = NULL;
-
- pdata = snd_soc_card_get_drvdata(codec->component.card);
-
- pr_err("\n %s: micbias1 %x micbias2 = %d\n", __func__, micbias1,
- micbias2);
- if (micbias1 && micbias2) {
- if ((pdata->micbias1_cap_mode
- == MICBIAS_EXT_BYP_CAP) ||
- (pdata->micbias2_cap_mode
- == MICBIAS_EXT_BYP_CAP))
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_MICB_1_EN,
- 0x40, (MICBIAS_EXT_BYP_CAP << 6));
- else
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_MICB_1_EN,
- 0x40, (MICBIAS_NO_EXT_BYP_CAP << 6));
- } else if (micbias2) {
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_MICB_1_EN,
- 0x40, (pdata->micbias2_cap_mode << 6));
- } else if (micbias1) {
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_MICB_1_EN,
- 0x40, (pdata->micbias1_cap_mode << 6));
- } else {
- snd_soc_update_bits_wrapper(codec,
- MSM89XX_PMIC_ANALOG_MICB_1_EN,
- 0x40, 0x00);
- }
-}
-
-static int msm89xx_digcodec_probe(struct snd_soc_codec *codec)
-{
- registered_digcodec = codec;
-
- return 0;
-}
-
-
-static int msm89xx_digcodec_remove(struct snd_soc_codec *codec)
-{
- return 0;
-}
-
-static int msm8x16_wcd_codec_probe(struct snd_soc_codec *codec)
-{
- struct msm8x16_wcd_priv *msm8x16_wcd_priv;
- int i, ret;
-
- dev_err(codec->dev, "%s()\n", __func__);
-
- msm8x16_wcd_priv = kzalloc(sizeof(struct msm8x16_wcd_priv), GFP_KERNEL);
- if (!msm8x16_wcd_priv)
- return -ENOMEM;
-
- for (i = 0; i < NUM_DECIMATORS; i++) {
- tx_hpf_work[i].msm8x16_wcd = msm8x16_wcd_priv;
- tx_hpf_work[i].decimator = i + 1;
- INIT_DELAYED_WORK(&tx_hpf_work[i].dwork,
- tx_hpf_corner_freq_callback);
- }
-
- codec->control_data = dev_get_drvdata(codec->dev);
- snd_soc_codec_set_drvdata(codec, msm8x16_wcd_priv);
- msm8x16_wcd_priv->codec = codec;
-
- msm8x16_wcd_priv->spkdrv_reg =
- wcd8x16_wcd_codec_find_regulator(codec->control_data,
- MSM89XX_VDD_SPKDRV_NAME);
- msm8x16_wcd_priv->pmic_rev = snd_soc_read_wrapper(codec,
- MSM89XX_PMIC_DIGITAL_REVISION1);
- msm8x16_wcd_priv->codec_version = snd_soc_read_wrapper(codec,
- MSM89XX_PMIC_DIGITAL_PERPH_SUBTYPE);
- if (msm8x16_wcd_priv->codec_version == CONGA) {
- dev_err(codec->dev, "%s :Conga REV: %d\n", __func__,
- msm8x16_wcd_priv->codec_version);
- msm8x16_wcd_priv->ext_spk_boost_set = true;
- } else {
- dev_err(codec->dev, "%s :PMIC REV: %d\n", __func__,
- msm8x16_wcd_priv->pmic_rev);
- if (msm8x16_wcd_priv->pmic_rev == TOMBAK_1_0 &&
- msm8x16_wcd_priv->codec_version == CAJON_2_0) {
- msm8x16_wcd_priv->codec_version = DIANGU;
- dev_err(codec->dev, "%s : Diangu detected\n",
- __func__);
- } else if (msm8x16_wcd_priv->pmic_rev == TOMBAK_1_0 &&
- (snd_soc_read_wrapper(codec,
- MSM89XX_PMIC_ANALOG_NCP_FBCTRL)
- & 0x80)) {
- msm8x16_wcd_priv->codec_version = CAJON;
- dev_err(codec->dev, "%s : Cajon detected\n", __func__);
- } else if (msm8x16_wcd_priv->pmic_rev == TOMBAK_2_0 &&
- (snd_soc_read_wrapper(codec,
- MSM89XX_PMIC_ANALOG_NCP_FBCTRL)
- & 0x80)) {
- msm8x16_wcd_priv->codec_version = CAJON_2_0;
- dev_err(codec->dev, "%s : Cajon 2.0 detected\n",
- __func__);
- }
- }
- /*
- * set to default boost option BOOST_SWITCH, user mixer path can change
- * it to BOOST_ALWAYS or BOOST_BYPASS based on solution chosen.
- */
- msm8x16_wcd_priv->boost_option = BOOST_SWITCH;
- msm8x16_wcd_priv->hph_mode = NORMAL_MODE;
-
- for (i = 0; i < MSM89XX_RX_MAX; i++)
- msm8x16_wcd_priv->comp_enabled[i] = COMPANDER_NONE;
-
- msm8x16_wcd_dt_parse_boost_info(codec);
- msm8x16_wcd_set_boost_v(codec);
-
- snd_soc_add_codec_controls(codec, impedance_detect_controls,
- ARRAY_SIZE(impedance_detect_controls));
- snd_soc_add_codec_controls(codec, hph_type_detect_controls,
- ARRAY_SIZE(hph_type_detect_controls));
-
- msm8x16_wcd_bringup(codec);
- msm8x16_wcd_codec_init_reg(codec);
- msm8x16_wcd_update_reg_defaults(codec);
-
- wcd9xxx_spmi_set_codec(codec);
-
- msm8x16_wcd_priv->on_demand_list[ON_DEMAND_MICBIAS].supply =
- wcd8x16_wcd_codec_find_regulator(
- codec->control_data,
- on_demand_supply_name[ON_DEMAND_MICBIAS]);
- atomic_set(&msm8x16_wcd_priv->on_demand_list[ON_DEMAND_MICBIAS].ref, 0);
-
- BLOCKING_INIT_NOTIFIER_HEAD(&msm8x16_wcd_priv->notifier);
-
- msm8x16_wcd_priv->fw_data = kzalloc(sizeof(*(msm8x16_wcd_priv->fw_data))
- , GFP_KERNEL);
- if (!msm8x16_wcd_priv->fw_data) {
- kfree(msm8x16_wcd_priv);
- return -ENOMEM;
- }
-
- set_bit(WCD9XXX_MBHC_CAL, msm8x16_wcd_priv->fw_data->cal_bit);
- ret = wcd_cal_create_hwdep(msm8x16_wcd_priv->fw_data,
- WCD9XXX_CODEC_HWDEP_NODE, codec);
- if (ret < 0) {
- dev_err(codec->dev, "%s hwdep failed %d\n", __func__, ret);
- kfree(msm8x16_wcd_priv->fw_data);
- kfree(msm8x16_wcd_priv);
- return ret;
- }
-
- wcd_mbhc_init(&msm8x16_wcd_priv->mbhc, codec, &mbhc_cb, &intr_ids,
- wcd_mbhc_registers, true);
-
- msm8x16_wcd_priv->int_mclk0_enabled = false;
- msm8x16_wcd_priv->clock_active = false;
- msm8x16_wcd_priv->config_mode_active = false;
-
- /*Update speaker boost configuration*/
- msm8x16_wcd_priv->spk_boost_set = spkr_boost_en;
- pr_err("%s: speaker boost configured = %d\n",
- __func__, msm8x16_wcd_priv->spk_boost_set);
-
- /* Set initial MICBIAS voltage level */
- msm8x16_wcd_set_micb_v(codec);
-
- /* Set initial cap mode */
- msm8x16_wcd_configure_cap(codec, false, false);
- registered_codec = codec;
- adsp_state_notifier =
- subsys_notif_register_notifier("adsp",
- &adsp_state_notifier_block);
- if (!adsp_state_notifier) {
- dev_err(codec->dev, "Failed to register adsp state notifier\n");
- kfree(msm8x16_wcd_priv->fw_data);
- kfree(msm8x16_wcd_priv);
- registered_codec = NULL;
- return -ENOMEM;
- }
- return 0;
-}
-
-static int msm8x16_wcd_codec_remove(struct snd_soc_codec *codec)
-{
- struct msm8x16_wcd_priv *msm8x16_wcd_priv =
- snd_soc_codec_get_drvdata(codec);
- struct msm8x16_wcd *msm8x16_wcd;
-
- msm8x16_wcd = codec->control_data;
- msm8x16_wcd_priv->spkdrv_reg = NULL;
- msm8x16_wcd_priv->on_demand_list[ON_DEMAND_MICBIAS].supply = NULL;
- atomic_set(&msm8x16_wcd_priv->on_demand_list[ON_DEMAND_MICBIAS].ref, 0);
- kfree(msm8x16_wcd_priv->fw_data);
- kfree(msm8x16_wcd_priv);
-
- return 0;
-}
-
-static int msm8x16_wcd_enable_static_supplies_to_optimum(
- struct msm8x16_wcd *msm8x16,
- struct msm8x16_wcd_pdata *pdata)
-{
- int i;
- int ret = 0;
-
- for (i = 0; i < msm8x16->num_of_supplies; i++) {
- if (pdata->regulator[i].ondemand)
- continue;
- if (regulator_count_voltages(msm8x16->supplies[i].consumer) <=
- 0)
- continue;
-
- ret = regulator_set_voltage(msm8x16->supplies[i].consumer,
- pdata->regulator[i].min_uv,
- pdata->regulator[i].max_uv);
- if (ret) {
- dev_err(msm8x16->dev,
- "Setting volt failed for regulator %s err %d\n",
- msm8x16->supplies[i].supply, ret);
- }
-
- ret = regulator_set_load(msm8x16->supplies[i].consumer,
- pdata->regulator[i].optimum_ua);
- dev_err(msm8x16->dev, "Regulator %s set optimum mode\n",
- msm8x16->supplies[i].supply);
- }
-
- return ret;
-}
-
-static int msm8x16_wcd_disable_static_supplies_to_optimum(
- struct msm8x16_wcd *msm8x16,
- struct msm8x16_wcd_pdata *pdata)
-{
- int i;
- int ret = 0;
-
- for (i = 0; i < msm8x16->num_of_supplies; i++) {
- if (pdata->regulator[i].ondemand)
- continue;
- if (regulator_count_voltages(msm8x16->supplies[i].consumer) <=
- 0)
- continue;
- regulator_set_voltage(msm8x16->supplies[i].consumer, 0,
- pdata->regulator[i].max_uv);
- regulator_set_load(msm8x16->supplies[i].consumer, 0);
- dev_err(msm8x16->dev, "Regulator %s set optimum mode\n",
- msm8x16->supplies[i].supply);
- }
-
- return ret;
-}
-
-int msm8x16_wcd_suspend(struct snd_soc_codec *codec)
-{
- struct msm_asoc_mach_data *pdata = NULL;
- struct msm8x16_wcd *msm8x16 = codec->control_data;
- struct msm8x16_wcd_pdata *msm8x16_pdata = msm8x16->dev->platform_data;
-
- pdata = snd_soc_card_get_drvdata(codec->component.card);
- pr_err("%s: mclk cnt = %d, mclk_enabled = %d\n",
- __func__, atomic_read(&pdata->int_mclk0_rsc_ref),
- atomic_read(&pdata->int_mclk0_enabled));
- if (atomic_read(&pdata->int_mclk0_enabled) == true) {
- cancel_delayed_work_sync(
- &pdata->disable_int_mclk0_work);
- mutex_lock(&pdata->cdc_int_mclk0_mutex);
- pdata->digital_cdc_core_clk.enable = 0;
- afe_set_lpass_clock_v2(AFE_PORT_ID_INT0_MI2S_RX,
- &pdata->digital_cdc_core_clk);
- atomic_set(&pdata->int_mclk0_enabled, false);
- mutex_unlock(&pdata->cdc_int_mclk0_mutex);
- }
- msm8x16_wcd_disable_static_supplies_to_optimum(msm8x16, msm8x16_pdata);
- return 0;
-}
-
-int msm8x16_wcd_resume(struct snd_soc_codec *codec)
-{
- struct msm_asoc_mach_data *pdata = NULL;
- struct msm8x16_wcd *msm8x16 = codec->control_data;
- struct msm8x16_wcd_pdata *msm8x16_pdata = msm8x16->dev->platform_data;
-
- pdata = snd_soc_card_get_drvdata(codec->component.card);
- msm8x16_wcd_enable_static_supplies_to_optimum(msm8x16, msm8x16_pdata);
- return 0;
-}
-
-static struct regmap *msm89xx_pmic_cdc_regmap;
-static struct regmap *msm89xx_pmic_cdc_get_regmap(struct device *dev)
-{
- return msm89xx_pmic_cdc_regmap;
-}
-
-static const struct snd_soc_codec_driver soc_codec_dev_msm8x16_wcd = {
- .probe = msm8x16_wcd_codec_probe,
- .remove = msm8x16_wcd_codec_remove,
-
- .suspend = msm8x16_wcd_suspend,
- .resume = msm8x16_wcd_resume,
-
- .controls = msm8x16_wcd_snd_controls,
- .num_controls = ARRAY_SIZE(msm8x16_wcd_snd_controls),
- .dapm_widgets = msm8x16_wcd_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(msm8x16_wcd_dapm_widgets),
- .dapm_routes = audio_map,
- .num_dapm_routes = ARRAY_SIZE(audio_map),
- .get_regmap = msm89xx_pmic_cdc_get_regmap,
-};
-
-static int msm8x16_wcd_init_supplies(struct msm8x16_wcd *msm8x16,
- struct msm8x16_wcd_pdata *pdata)
-{
- int ret;
- int i;
-
- msm8x16->supplies = kzalloc(sizeof(struct regulator_bulk_data) *
- ARRAY_SIZE(pdata->regulator),
- GFP_KERNEL);
- if (!msm8x16->supplies) {
- ret = -ENOMEM;
- goto err;
- }
-
- msm8x16->num_of_supplies = 0;
-
- if (ARRAY_SIZE(pdata->regulator) > MAX_REGULATOR) {
- dev_err(msm8x16->dev, "%s: Array Size out of bound\n",
- __func__);
- ret = -EINVAL;
- goto err;
- }
-
- for (i = 0; i < ARRAY_SIZE(pdata->regulator); i++) {
- if (pdata->regulator[i].name) {
- msm8x16->supplies[i].supply = pdata->regulator[i].name;
- msm8x16->num_of_supplies++;
- }
- }
-
- ret = regulator_bulk_get(msm8x16->dev, msm8x16->num_of_supplies,
- msm8x16->supplies);
- if (ret != 0) {
- dev_err(msm8x16->dev, "Failed to get supplies: err = %d\n",
- ret);
- goto err_supplies;
- }
-
- for (i = 0; i < msm8x16->num_of_supplies; i++) {
- if (regulator_count_voltages(msm8x16->supplies[i].consumer) <=
- 0)
- continue;
-
- ret = regulator_set_voltage(msm8x16->supplies[i].consumer,
- pdata->regulator[i].min_uv,
- pdata->regulator[i].max_uv);
- if (ret) {
- dev_err(msm8x16->dev, "Setting regulator voltage failed for regulator %s err = %d\n",
- msm8x16->supplies[i].supply, ret);
- goto err_get;
- }
-
- ret = regulator_set_load(msm8x16->supplies[i].consumer,
- pdata->regulator[i].optimum_ua);
- if (ret < 0) {
- dev_err(msm8x16->dev, "Setting regulator optimum mode failed for regulator %s err = %d\n",
- msm8x16->supplies[i].supply, ret);
- goto err_get;
- } else {
- ret = 0;
- }
- }
-
- return ret;
-
-err_get:
- regulator_bulk_free(msm8x16->num_of_supplies, msm8x16->supplies);
-err_supplies:
- kfree(msm8x16->supplies);
-err:
- return ret;
-}
-
-static int msm8x16_wcd_enable_static_supplies(struct msm8x16_wcd *msm8x16,
- struct msm8x16_wcd_pdata *pdata)
-{
- int i;
- int ret = 0;
-
- for (i = 0; i < msm8x16->num_of_supplies; i++) {
- if (pdata->regulator[i].ondemand)
- continue;
- ret = regulator_enable(msm8x16->supplies[i].consumer);
- if (ret) {
- dev_err(msm8x16->dev, "Failed to enable %s\n",
- msm8x16->supplies[i].supply);
- break;
- }
- dev_err(msm8x16->dev, "Enabled regulator %s\n",
- msm8x16->supplies[i].supply);
- }
-
- while (ret && --i)
- if (!pdata->regulator[i].ondemand)
- regulator_disable(msm8x16->supplies[i].consumer);
-
- return ret;
-}
-
-
-
-static void msm8x16_wcd_disable_supplies(struct msm8x16_wcd *msm8x16,
- struct msm8x16_wcd_pdata *pdata)
-{
- int i;
-
- regulator_bulk_disable(msm8x16->num_of_supplies,
- msm8x16->supplies);
- for (i = 0; i < msm8x16->num_of_supplies; i++) {
- if (regulator_count_voltages(msm8x16->supplies[i].consumer) <=
- 0)
- continue;
- regulator_set_voltage(msm8x16->supplies[i].consumer, 0,
- pdata->regulator[i].max_uv);
- regulator_set_load(msm8x16->supplies[i].consumer, 0);
- }
- regulator_bulk_free(msm8x16->num_of_supplies, msm8x16->supplies);
- kfree(msm8x16->supplies);
-}
-
-static struct snd_soc_dai_driver msm_codec_dais[] = {
- {
- .name = "msm-codec-rx",
- .playback = { /* Support maximum range */
- .stream_name = "Playback",
- .channels_min = 1,
- .channels_max = 2,
- .rates = SNDRV_PCM_RATE_8000_48000,
- .formats = SNDRV_PCM_FMTBIT_S16_LE,
- },
- },
- {
- .name = "msm-codec-tx",
- .capture = { /* Support maximum range */
- .stream_name = "Record",
- .channels_min = 1,
- .channels_max = 4,
- .rates = SNDRV_PCM_RATE_8000_48000,
- .formats = SNDRV_PCM_FMTBIT_S16_LE,
- },
- },
-};
-
-static struct regmap *msm89xx_codec_regmap;
-static struct regmap *msm89xx_codec_get_regmap(struct device *dev)
-{
- return msm89xx_codec_regmap;
-}
-
-static struct snd_soc_codec_driver soc_msm89xx_codec = {
- .probe = msm89xx_digcodec_probe,
- .remove = msm89xx_digcodec_remove,
- .get_regmap = msm89xx_codec_get_regmap,
-};
-
-static const struct of_device_id msm89xx_codec_of_match[] = {
- { .compatible = "qcom,msm-codec-core",
- .data = "msm_codec"},
- { .compatible = "qcom,pmic-codec-digital",
- .data = "pmic-digital-codec"},
- { .compatible = "qcom,pmic-codec-analog",
- .data = "pmic-analog-codec"},
- {},
-};
-MODULE_DEVICE_TABLE(of, msm89xx_codec_of_match);
-
-static struct msm8x16_wcd *temp_89xx;
-static int msm89xx_codec_probe(struct platform_device *pdev)
-{
- int ret = 0;
- struct msm8x16_wcd *msm8x16 = NULL;
- struct msm8x16_wcd_pdata *pdata;
- int adsp_state;
- static int dev_registered_cnt;
- const struct of_device_id *match;
- const char *addr_prop_name = "qcom,dig-cdc-base-addr";
- u32 dig_cdc_addr;
- char __iomem *dig_base;
-
- adsp_state = apr_get_subsys_state();
- if (adsp_state != APR_SUBSYS_LOADED) {
- dev_err(&pdev->dev, "Adsp is not loaded yet %d\n",
- adsp_state);
- return -EPROBE_DEFER;
- }
-
- match = of_match_node(msm89xx_codec_of_match,
- pdev->dev.of_node);
-
- dev_dbg(&pdev->dev, "%s(%d):%s\n",
- __func__, __LINE__, (char *)match->data);
-
- if (!strcmp(match->data, "pmic-digital-codec")) {
- device_init_wakeup(&pdev->dev, true);
-
- if (pdev->dev.of_node) {
- dev_err(&pdev->dev, "%s:Platform data from device tree\n",
- __func__);
- pdata = msm8x16_wcd_populate_dt_pdata(&pdev->dev);
- pdev->dev.platform_data = pdata;
- } else {
- dev_err(&pdev->dev, "%s:Platform data from board file\n",
- __func__);
- pdata = pdev->dev.platform_data;
- }
- if (pdata == NULL) {
- dev_err(&pdev->dev, "%s:Platform data failed to populate\n",
- __func__);
- goto rtn;
- }
- msm8x16 = kzalloc(sizeof(struct msm8x16_wcd), GFP_KERNEL);
- if (msm8x16 == NULL) {
- ret = -ENOMEM;
- goto rtn;
- }
-
- msm8x16->dev = &pdev->dev;
- ret = msm8x16_wcd_init_supplies(msm8x16, pdata);
- if (ret) {
- dev_err(&pdev->dev, "%s: Fail to enable Codec supplies\n",
- __func__);
- goto err_codec;
- }
-
- ret = msm8x16_wcd_enable_static_supplies(msm8x16, pdata);
- if (ret) {
- dev_err(&pdev->dev,
- "%s: Fail to enable Codec pre-reset supplies\n",
- __func__);
- goto err_codec;
- }
- usleep_range(5, 6);
-
- mutex_init(&msm8x16->io_lock);
- dev_set_drvdata(&pdev->dev, msm8x16);
- temp_89xx = msm8x16;
- dev_registered_cnt++;
- } else if (!strcmp(match->data, "pmic-analog-codec")) {
- if (wcd9xxx_spmi_irq_init()) {
- dev_err(&pdev->dev,
- "%s: irq initialization failed\n", __func__);
- } else {
- dev_err(&pdev->dev,
- "%s: irq initialization passed\n", __func__);
- }
- dev_registered_cnt++;
- } else if (!strcmp(match->data, "msm-codec")) {
- ret = of_property_read_u32(pdev->dev.of_node, addr_prop_name,
- &dig_cdc_addr);
- if (ret) {
- dev_err(&pdev->dev, "%s: could not find %s entry in dt\n",
- __func__, addr_prop_name);
- dig_cdc_addr = MSM89XX_DIGITAL_CODEC_BASE_ADDR;
- }
- dig_base = ioremap(dig_cdc_addr,
- MSM89XX_DIGITAL_CODEC_REG_SIZE);
- if (dig_base == NULL) {
- dev_err(&pdev->dev, "%s ioremap failed\n", __func__);
- return -ENOMEM;
- }
- msm89xx_codec_regmap =
- devm_regmap_init_mmio_clk(&pdev->dev, NULL,
- dig_base, &msm89xx_cdc_core_regmap_config);
- snd_soc_register_codec(&pdev->dev, &soc_msm89xx_codec,
- msm_codec_dais, ARRAY_SIZE(msm_codec_dais));
- dev_registered_cnt++;
- }
-
- if ((dev_registered_cnt == MAX_MSM89XX_DEVICE) && (!ret)) {
- msm89xx_pmic_cdc_regmap =
- devm_regmap_init_spmi_ext(
- (struct spmi_device *) &pdev->dev.parent,
- &msm89xx_pmic_cdc_regmap_config);
- ret = snd_soc_register_codec(temp_89xx->dev,
- &soc_codec_dev_msm8x16_wcd,
- msm8x16_wcd_i2s_dai,
- ARRAY_SIZE(msm8x16_wcd_i2s_dai));
- if (ret) {
- dev_err(&pdev->dev,
- "%s:snd_soc_register_codec failed with error %d\n",
- __func__, ret);
- goto err_supplies;
- }
- }
- return ret;
-err_supplies:
- msm8x16_wcd_disable_supplies(msm8x16, pdata);
-err_codec:
- kfree(msm8x16);
-rtn:
- return ret;
-}
-
-static int msm89xx_codec_remove(struct platform_device *pdev)
-{
- struct msm8x16_wcd *msm8x16 = dev_get_drvdata(&pdev->dev);
-
- mutex_destroy(&msm8x16->io_lock);
- kfree(msm8x16);
-
- return 0;
-}
-
-static struct platform_driver msm_codec_driver = {
- .driver = {
- .owner = THIS_MODULE,
- .name = DRV_NAME,
- .of_match_table = of_match_ptr(msm89xx_codec_of_match)
- },
- .probe = msm89xx_codec_probe,
- .remove = msm89xx_codec_remove,
-};
-module_platform_driver(msm_codec_driver);
-
-MODULE_DESCRIPTION("MSM89xx Audio codec driver");
-MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/msm_sdw/Kconfig b/sound/soc/codecs/msm_sdw/Kconfig
new file mode 100644
index 000000000000..abd7c8c7dfb0
--- /dev/null
+++ b/sound/soc/codecs/msm_sdw/Kconfig
@@ -0,0 +1,6 @@
+config SND_SOC_MSM_SDW
+ tristate "MSM Internal soundwire codec"
+ help
+ MSM-based soundwire codec core driver
+ supported along with internal digital
+ codec core.
diff --git a/sound/soc/codecs/msm_sdw/Makefile b/sound/soc/codecs/msm_sdw/Makefile
new file mode 100644
index 000000000000..64e932b9d262
--- /dev/null
+++ b/sound/soc/codecs/msm_sdw/Makefile
@@ -0,0 +1,3 @@
+snd-soc-msm-sdw-objs := msm_sdw_cdc.o msm_sdw_regmap.o msm-sdw-tables.o msm_sdw_cdc_utils.o
+obj-$(CONFIG_SND_SOC_MSM_SDW) += snd-soc-msm-sdw.o
+ccflags-y += -I$(srctree)/sound/soc/msm
diff --git a/sound/soc/codecs/msm_sdw/msm-sdw-tables.c b/sound/soc/codecs/msm_sdw/msm-sdw-tables.c
new file mode 100644
index 000000000000..767b9052a7da
--- /dev/null
+++ b/sound/soc/codecs/msm_sdw/msm-sdw-tables.c
@@ -0,0 +1,221 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/types.h>
+#include "msm_sdw.h"
+
+const u8 msm_sdw_page_map[MSM_SDW_MAX_REGISTER] = {
+ [MSM_SDW_TX9_SPKR_PROT_PATH_CTL] = 0xa,
+ [MSM_SDW_TX9_SPKR_PROT_PATH_CFG0] = 0xa,
+ [MSM_SDW_TX10_SPKR_PROT_PATH_CTL] = 0xa,
+ [MSM_SDW_TX10_SPKR_PROT_PATH_CFG0] = 0xa,
+ [MSM_SDW_TX11_SPKR_PROT_PATH_CTL] = 0xa,
+ [MSM_SDW_TX11_SPKR_PROT_PATH_CFG0] = 0xa,
+ [MSM_SDW_TX12_SPKR_PROT_PATH_CTL] = 0xa,
+ [MSM_SDW_TX12_SPKR_PROT_PATH_CFG0] = 0xa,
+ [MSM_SDW_COMPANDER7_CTL0] = 0xb,
+ [MSM_SDW_COMPANDER7_CTL1] = 0xb,
+ [MSM_SDW_COMPANDER7_CTL2] = 0xb,
+ [MSM_SDW_COMPANDER7_CTL3] = 0xb,
+ [MSM_SDW_COMPANDER7_CTL4] = 0xb,
+ [MSM_SDW_COMPANDER7_CTL5] = 0xb,
+ [MSM_SDW_COMPANDER7_CTL6] = 0xb,
+ [MSM_SDW_COMPANDER7_CTL7] = 0xb,
+ [MSM_SDW_COMPANDER8_CTL0] = 0xb,
+ [MSM_SDW_COMPANDER8_CTL1] = 0xb,
+ [MSM_SDW_COMPANDER8_CTL2] = 0xb,
+ [MSM_SDW_COMPANDER8_CTL3] = 0xb,
+ [MSM_SDW_COMPANDER8_CTL4] = 0xb,
+ [MSM_SDW_COMPANDER8_CTL5] = 0xb,
+ [MSM_SDW_COMPANDER8_CTL6] = 0xb,
+ [MSM_SDW_COMPANDER8_CTL7] = 0xb,
+ [MSM_SDW_RX7_RX_PATH_CTL] = 0xb,
+ [MSM_SDW_RX7_RX_PATH_CFG0] = 0xb,
+ [MSM_SDW_RX7_RX_PATH_CFG1] = 0xb,
+ [MSM_SDW_RX7_RX_PATH_CFG2] = 0xb,
+ [MSM_SDW_RX7_RX_VOL_CTL] = 0xb,
+ [MSM_SDW_RX7_RX_PATH_MIX_CTL] = 0xb,
+ [MSM_SDW_RX7_RX_PATH_MIX_CFG] = 0xb,
+ [MSM_SDW_RX7_RX_VOL_MIX_CTL] = 0xb,
+ [MSM_SDW_RX7_RX_PATH_SEC0] = 0xb,
+ [MSM_SDW_RX7_RX_PATH_SEC1] = 0xb,
+ [MSM_SDW_RX7_RX_PATH_SEC2] = 0xb,
+ [MSM_SDW_RX7_RX_PATH_SEC3] = 0xb,
+ [MSM_SDW_RX7_RX_PATH_SEC5] = 0xb,
+ [MSM_SDW_RX7_RX_PATH_SEC6] = 0xb,
+ [MSM_SDW_RX7_RX_PATH_SEC7] = 0xb,
+ [MSM_SDW_RX7_RX_PATH_MIX_SEC0] = 0xb,
+ [MSM_SDW_RX7_RX_PATH_MIX_SEC1] = 0xb,
+ [MSM_SDW_RX8_RX_PATH_CTL] = 0xb,
+ [MSM_SDW_RX8_RX_PATH_CFG0] = 0xb,
+ [MSM_SDW_RX8_RX_PATH_CFG1] = 0xb,
+ [MSM_SDW_RX8_RX_PATH_CFG2] = 0xb,
+ [MSM_SDW_RX8_RX_VOL_CTL] = 0xb,
+ [MSM_SDW_RX8_RX_PATH_MIX_CTL] = 0xb,
+ [MSM_SDW_RX8_RX_PATH_MIX_CFG] = 0xb,
+ [MSM_SDW_RX8_RX_VOL_MIX_CTL] = 0xb,
+ [MSM_SDW_RX8_RX_PATH_SEC0] = 0xb,
+ [MSM_SDW_RX8_RX_PATH_SEC1] = 0xb,
+ [MSM_SDW_RX8_RX_PATH_SEC2] = 0xb,
+ [MSM_SDW_RX8_RX_PATH_SEC3] = 0xb,
+ [MSM_SDW_RX8_RX_PATH_SEC5] = 0xb,
+ [MSM_SDW_RX8_RX_PATH_SEC6] = 0xb,
+ [MSM_SDW_RX8_RX_PATH_SEC7] = 0xb,
+ [MSM_SDW_RX8_RX_PATH_MIX_SEC0] = 0xb,
+ [MSM_SDW_RX8_RX_PATH_MIX_SEC1] = 0xb,
+ [MSM_SDW_BOOST0_BOOST_PATH_CTL] = 0xc,
+ [MSM_SDW_BOOST0_BOOST_CTL] = 0xc,
+ [MSM_SDW_BOOST0_BOOST_CFG1] = 0xc,
+ [MSM_SDW_BOOST0_BOOST_CFG2] = 0xc,
+ [MSM_SDW_BOOST1_BOOST_PATH_CTL] = 0xc,
+ [MSM_SDW_BOOST1_BOOST_CTL] = 0xc,
+ [MSM_SDW_BOOST1_BOOST_CFG1] = 0xc,
+ [MSM_SDW_BOOST1_BOOST_CFG2] = 0xc,
+ [MSM_SDW_AHB_BRIDGE_WR_DATA_0] = 0xc,
+ [MSM_SDW_AHB_BRIDGE_WR_DATA_1] = 0xc,
+ [MSM_SDW_AHB_BRIDGE_WR_DATA_2] = 0xc,
+ [MSM_SDW_AHB_BRIDGE_WR_DATA_3] = 0xc,
+ [MSM_SDW_AHB_BRIDGE_WR_ADDR_0] = 0xc,
+ [MSM_SDW_AHB_BRIDGE_WR_ADDR_1] = 0xc,
+ [MSM_SDW_AHB_BRIDGE_WR_ADDR_2] = 0xc,
+ [MSM_SDW_AHB_BRIDGE_WR_ADDR_3] = 0xc,
+ [MSM_SDW_AHB_BRIDGE_RD_ADDR_0] = 0xc,
+ [MSM_SDW_AHB_BRIDGE_RD_ADDR_1] = 0xc,
+ [MSM_SDW_AHB_BRIDGE_RD_ADDR_2] = 0xc,
+ [MSM_SDW_AHB_BRIDGE_RD_ADDR_3] = 0xc,
+ [MSM_SDW_AHB_BRIDGE_RD_DATA_0] = 0xc,
+ [MSM_SDW_AHB_BRIDGE_RD_DATA_1] = 0xc,
+ [MSM_SDW_AHB_BRIDGE_RD_DATA_2] = 0xc,
+ [MSM_SDW_AHB_BRIDGE_RD_DATA_3] = 0xc,
+ [MSM_SDW_AHB_BRIDGE_ACCESS_CFG] = 0xc,
+ [MSM_SDW_AHB_BRIDGE_ACCESS_STATUS] = 0xc,
+ [MSM_SDW_CLK_RST_CTRL_MCLK_CONTROL] = 0xd,
+ [MSM_SDW_CLK_RST_CTRL_FS_CNT_CONTROL] = 0xd,
+ [MSM_SDW_CLK_RST_CTRL_SWR_CONTROL] = 0xd,
+ [MSM_SDW_TOP_TOP_CFG0] = 0xd,
+ [MSM_SDW_TOP_TOP_CFG1] = 0xd,
+ [MSM_SDW_TOP_RX_I2S_CTL] = 0xd,
+ [MSM_SDW_TOP_TX_I2S_CTL] = 0xd,
+ [MSM_SDW_TOP_RX7_PATH_INPUT0_MUX] = 0xd,
+ [MSM_SDW_TOP_RX7_PATH_INPUT1_MUX] = 0xd,
+ [MSM_SDW_TOP_RX8_PATH_INPUT0_MUX] = 0xd,
+ [MSM_SDW_TOP_RX8_PATH_INPUT1_MUX] = 0xd,
+ [MSM_SDW_TOP_FREQ_MCLK] = 0xd,
+ [MSM_SDW_TOP_DEBUG_BUS_SEL] = 0xd,
+ [MSM_SDW_TOP_DEBUG_EN] = 0xd,
+ [MSM_SDW_TOP_I2S_RESET] = 0xd,
+ [MSM_SDW_TOP_BLOCKS_RESET] = 0xd,
+};
+
+const u8 msm_sdw_reg_readable[MSM_SDW_MAX_REGISTER] = {
+ [MSM_SDW_PAGE_REGISTER] = 1,
+ [MSM_SDW_TX9_SPKR_PROT_PATH_CTL] = 1,
+ [MSM_SDW_TX9_SPKR_PROT_PATH_CFG0] = 1,
+ [MSM_SDW_TX10_SPKR_PROT_PATH_CTL] = 1,
+ [MSM_SDW_TX10_SPKR_PROT_PATH_CFG0] = 1,
+ [MSM_SDW_TX11_SPKR_PROT_PATH_CTL] = 1,
+ [MSM_SDW_TX11_SPKR_PROT_PATH_CFG0] = 1,
+ [MSM_SDW_TX12_SPKR_PROT_PATH_CTL] = 1,
+ [MSM_SDW_TX12_SPKR_PROT_PATH_CFG0] = 1,
+ [MSM_SDW_COMPANDER7_CTL0] = 1,
+ [MSM_SDW_COMPANDER7_CTL1] = 1,
+ [MSM_SDW_COMPANDER7_CTL2] = 1,
+ [MSM_SDW_COMPANDER7_CTL3] = 1,
+ [MSM_SDW_COMPANDER7_CTL4] = 1,
+ [MSM_SDW_COMPANDER7_CTL5] = 1,
+ [MSM_SDW_COMPANDER7_CTL6] = 1,
+ [MSM_SDW_COMPANDER7_CTL7] = 1,
+ [MSM_SDW_COMPANDER8_CTL0] = 1,
+ [MSM_SDW_COMPANDER8_CTL1] = 1,
+ [MSM_SDW_COMPANDER8_CTL2] = 1,
+ [MSM_SDW_COMPANDER8_CTL3] = 1,
+ [MSM_SDW_COMPANDER8_CTL4] = 1,
+ [MSM_SDW_COMPANDER8_CTL5] = 1,
+ [MSM_SDW_COMPANDER8_CTL6] = 1,
+ [MSM_SDW_COMPANDER8_CTL7] = 1,
+ [MSM_SDW_RX7_RX_PATH_CTL] = 1,
+ [MSM_SDW_RX7_RX_PATH_CFG0] = 1,
+ [MSM_SDW_RX7_RX_PATH_CFG1] = 1,
+ [MSM_SDW_RX7_RX_PATH_CFG2] = 1,
+ [MSM_SDW_RX7_RX_VOL_CTL] = 1,
+ [MSM_SDW_RX7_RX_PATH_MIX_CTL] = 1,
+ [MSM_SDW_RX7_RX_PATH_MIX_CFG] = 1,
+ [MSM_SDW_RX7_RX_VOL_MIX_CTL] = 1,
+ [MSM_SDW_RX7_RX_PATH_SEC0] = 1,
+ [MSM_SDW_RX7_RX_PATH_SEC1] = 1,
+ [MSM_SDW_RX7_RX_PATH_SEC2] = 1,
+ [MSM_SDW_RX7_RX_PATH_SEC3] = 1,
+ [MSM_SDW_RX7_RX_PATH_SEC5] = 1,
+ [MSM_SDW_RX7_RX_PATH_SEC6] = 1,
+ [MSM_SDW_RX7_RX_PATH_SEC7] = 1,
+ [MSM_SDW_RX7_RX_PATH_MIX_SEC0] = 1,
+ [MSM_SDW_RX7_RX_PATH_MIX_SEC1] = 1,
+ [MSM_SDW_RX8_RX_PATH_CTL] = 1,
+ [MSM_SDW_RX8_RX_PATH_CFG0] = 1,
+ [MSM_SDW_RX8_RX_PATH_CFG1] = 1,
+ [MSM_SDW_RX8_RX_PATH_CFG2] = 1,
+ [MSM_SDW_RX8_RX_VOL_CTL] = 1,
+ [MSM_SDW_RX8_RX_PATH_MIX_CTL] = 1,
+ [MSM_SDW_RX8_RX_PATH_MIX_CFG] = 1,
+ [MSM_SDW_RX8_RX_VOL_MIX_CTL] = 1,
+ [MSM_SDW_RX8_RX_PATH_SEC0] = 1,
+ [MSM_SDW_RX8_RX_PATH_SEC1] = 1,
+ [MSM_SDW_RX8_RX_PATH_SEC2] = 1,
+ [MSM_SDW_RX8_RX_PATH_SEC3] = 1,
+ [MSM_SDW_RX8_RX_PATH_SEC5] = 1,
+ [MSM_SDW_RX8_RX_PATH_SEC6] = 1,
+ [MSM_SDW_RX8_RX_PATH_SEC7] = 1,
+ [MSM_SDW_RX8_RX_PATH_MIX_SEC0] = 1,
+ [MSM_SDW_RX8_RX_PATH_MIX_SEC1] = 1,
+ [MSM_SDW_BOOST0_BOOST_PATH_CTL] = 1,
+ [MSM_SDW_BOOST0_BOOST_CTL] = 1,
+ [MSM_SDW_BOOST0_BOOST_CFG1] = 1,
+ [MSM_SDW_BOOST0_BOOST_CFG2] = 1,
+ [MSM_SDW_BOOST1_BOOST_PATH_CTL] = 1,
+ [MSM_SDW_BOOST1_BOOST_CTL] = 1,
+ [MSM_SDW_BOOST1_BOOST_CFG1] = 1,
+ [MSM_SDW_BOOST1_BOOST_CFG2] = 1,
+ [MSM_SDW_AHB_BRIDGE_WR_DATA_0] = 1,
+ [MSM_SDW_AHB_BRIDGE_WR_DATA_1] = 1,
+ [MSM_SDW_AHB_BRIDGE_WR_DATA_2] = 1,
+ [MSM_SDW_AHB_BRIDGE_WR_DATA_3] = 1,
+ [MSM_SDW_AHB_BRIDGE_WR_ADDR_0] = 1,
+ [MSM_SDW_AHB_BRIDGE_WR_ADDR_1] = 1,
+ [MSM_SDW_AHB_BRIDGE_WR_ADDR_2] = 1,
+ [MSM_SDW_AHB_BRIDGE_WR_ADDR_3] = 1,
+ [MSM_SDW_AHB_BRIDGE_RD_ADDR_0] = 1,
+ [MSM_SDW_AHB_BRIDGE_RD_ADDR_1] = 1,
+ [MSM_SDW_AHB_BRIDGE_RD_ADDR_2] = 1,
+ [MSM_SDW_AHB_BRIDGE_RD_ADDR_3] = 1,
+ [MSM_SDW_AHB_BRIDGE_RD_DATA_0] = 1,
+ [MSM_SDW_AHB_BRIDGE_RD_DATA_1] = 1,
+ [MSM_SDW_AHB_BRIDGE_RD_DATA_2] = 1,
+ [MSM_SDW_AHB_BRIDGE_RD_DATA_3] = 1,
+ [MSM_SDW_AHB_BRIDGE_ACCESS_CFG] = 1,
+ [MSM_SDW_AHB_BRIDGE_ACCESS_STATUS] = 1,
+ [MSM_SDW_CLK_RST_CTRL_MCLK_CONTROL] = 1,
+ [MSM_SDW_CLK_RST_CTRL_FS_CNT_CONTROL] = 1,
+ [MSM_SDW_CLK_RST_CTRL_SWR_CONTROL] = 1,
+ [MSM_SDW_TOP_TOP_CFG0] = 1,
+ [MSM_SDW_TOP_TOP_CFG1] = 1,
+ [MSM_SDW_TOP_RX_I2S_CTL] = 1,
+ [MSM_SDW_TOP_TX_I2S_CTL] = 1,
+ [MSM_SDW_TOP_RX7_PATH_INPUT0_MUX] = 1,
+ [MSM_SDW_TOP_RX7_PATH_INPUT1_MUX] = 1,
+ [MSM_SDW_TOP_RX8_PATH_INPUT0_MUX] = 1,
+ [MSM_SDW_TOP_RX8_PATH_INPUT1_MUX] = 1,
+ [MSM_SDW_TOP_FREQ_MCLK] = 1,
+ [MSM_SDW_TOP_DEBUG_BUS_SEL] = 1,
+ [MSM_SDW_TOP_DEBUG_EN] = 1,
+ [MSM_SDW_TOP_I2S_RESET] = 1,
+ [MSM_SDW_TOP_BLOCKS_RESET] = 1,
+};
diff --git a/sound/soc/codecs/msm_sdw/msm_sdw.h b/sound/soc/codecs/msm_sdw/msm_sdw.h
new file mode 100644
index 000000000000..d464c5064635
--- /dev/null
+++ b/sound/soc/codecs/msm_sdw/msm_sdw.h
@@ -0,0 +1,166 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef MSM_SDW_H
+#define MSM_SDW_H
+
+#include <sound/soc.h>
+#include <sound/q6afe-v2.h>
+#include "msm_sdw_registers.h"
+
+#define MSM_SDW_MAX_REGISTER 0x400
+
+extern const struct regmap_config msm_sdw_regmap_config;
+extern const u8 msm_sdw_page_map[MSM_SDW_MAX_REGISTER];
+extern const u8 msm_sdw_reg_readable[MSM_SDW_MAX_REGISTER];
+
+enum {
+ MSM_SDW_RX4 = 0,
+ MSM_SDW_RX5,
+ MSM_SDW_RX_MAX,
+};
+
+enum {
+ MSM_SDW_TX0 = 0,
+ MSM_SDW_TX1,
+ MSM_SDW_TX_MAX,
+};
+
+enum {
+ COMP1, /* SPK_L */
+ COMP2, /* SPK_R */
+ COMP_MAX
+};
+
+/*
+ * Structure used to update codec
+ * register defaults after reset
+ */
+struct msm_sdw_reg_mask_val {
+ u16 reg;
+ u8 mask;
+ u8 val;
+};
+
+/*
+ * Selects compander and smart boost settings
+ * for a given speaker mode
+ */
+enum {
+ SPKR_MODE_DEFAULT,
+ SPKR_MODE_1, /* COMP Gain = 12dB, Smartboost Max = 5.5V */
+};
+
+/* Rx path gain offsets */
+enum {
+ RX_GAIN_OFFSET_M1P5_DB,
+ RX_GAIN_OFFSET_0_DB,
+};
+
+struct msm_sdw_reg_val {
+ unsigned short reg; /* register address */
+ u8 *buf; /* buffer to be written to reg. addr */
+ int bytes; /* number of bytes to be written */
+};
+
+/* Hold instance to soundwire platform device */
+struct msm_sdw_ctrl_data {
+ struct platform_device *sdw_pdev;
+};
+
+struct wcd_sdw_ctrl_platform_data {
+ void *handle; /* holds codec private data */
+ int (*read)(void *handle, int reg);
+ int (*write)(void *handle, int reg, int val);
+ int (*bulk_write)(void *handle, u32 *reg, u32 *val, size_t len);
+ int (*clk)(void *handle, bool enable);
+ int (*handle_irq)(void *handle,
+ irqreturn_t (*swrm_irq_handler)(int irq,
+ void *data),
+ void *swrm_handle,
+ int action);
+};
+
+struct msm_sdw_priv {
+ struct device *dev;
+ struct mutex io_lock;
+
+ int (*read_dev)(struct msm_sdw_priv *msm_sdw, unsigned short reg,
+ int bytes, void *dest);
+ int (*write_dev)(struct msm_sdw_priv *msm_sdw, unsigned short reg,
+ int bytes, void *src);
+ int (*multi_reg_write)(struct msm_sdw_priv *msm_sdw, const void *data,
+ size_t count);
+ struct snd_soc_codec *codec;
+ /* SoundWire data structure */
+ struct msm_sdw_ctrl_data *sdw_ctrl_data;
+ int nr;
+
+ /* compander */
+ int comp_enabled[COMP_MAX];
+ int ear_spkr_gain;
+
+ /* to track the status */
+ unsigned long status_mask;
+
+ struct work_struct msm_sdw_add_child_devices_work;
+ struct wcd_sdw_ctrl_platform_data sdw_plat_data;
+
+ unsigned int vi_feed_value;
+
+ struct mutex sdw_read_lock;
+ struct mutex sdw_write_lock;
+ struct mutex sdw_clk_lock;
+ int sdw_clk_users;
+
+ int sdw_irq;
+ int int_mclk1_rsc_ref;
+ bool int_mclk1_enabled;
+ bool sdw_npl_clk_enabled;
+ struct mutex cdc_int_mclk1_mutex;
+ struct mutex sdw_npl_clk_mutex;
+ struct delayed_work disable_int_mclk1_work;
+ struct afe_clk_set sdw_cdc_core_clk;
+ struct afe_clk_set sdw_npl_clk;
+ struct notifier_block service_nb;
+ int (*sdw_cdc_gpio_fn)(bool enable, struct snd_soc_codec *codec);
+ bool dev_up;
+
+ int spkr_gain_offset;
+ int spkr_mode;
+ struct mutex codec_mutex;
+ int rx_4_count;
+ int rx_5_count;
+ u32 mclk_rate;
+ struct regmap *regmap;
+
+ bool prev_pg_valid;
+ u8 prev_pg;
+ u32 sdw_base_addr;
+ char __iomem *sdw_base;
+ u32 version;
+
+ /* Entry for version info */
+ struct snd_info_entry *entry;
+ struct snd_info_entry *version_entry;
+};
+
+extern int msm_sdw_set_spkr_mode(struct snd_soc_codec *codec, int mode);
+extern int msm_sdw_set_spkr_gain_offset(struct snd_soc_codec *codec,
+ int offset);
+extern void msm_sdw_gpio_cb(
+ int (*sdw_cdc_gpio_fn)(bool enable, struct snd_soc_codec *codec),
+ struct snd_soc_codec *codec);
+extern struct regmap *msm_sdw_regmap_init(struct device *dev,
+ const struct regmap_config *config);
+extern int msm_sdw_codec_info_create_codec_entry(struct snd_info_entry *,
+ struct snd_soc_codec *);
+#endif
diff --git a/sound/soc/codecs/msm_sdw/msm_sdw_cdc.c b/sound/soc/codecs/msm_sdw/msm_sdw_cdc.c
new file mode 100644
index 000000000000..c8f7b05aef87
--- /dev/null
+++ b/sound/soc/codecs/msm_sdw/msm_sdw_cdc.c
@@ -0,0 +1,1920 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/printk.h>
+#include <linux/debugfs.h>
+#include <linux/bitops.h>
+#include <linux/regmap.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/qdsp6v2/apr.h>
+#include <linux/soundwire/swr-wcd.h>
+#include <linux/qdsp6v2/audio_notifier.h>
+#include <sound/apr_audio-v2.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/q6core.h>
+#include <sound/tlv.h>
+#include "msm_sdw.h"
+#include "msm_sdw_registers.h"
+
+#define MSM_SDW_RATES (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |\
+ SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000)
+#define MSM_SDW_FORMATS (SNDRV_PCM_FMTBIT_S16_LE |\
+ SNDRV_PCM_FMTBIT_S24_LE |\
+ SNDRV_PCM_FMTBIT_S24_3LE)
+
+#define MSM_SDW_STRING_LEN 80
+
+#define INT_MCLK1_FREQ 9600000
+#define SDW_NPL_FREQ 153600000
+
+#define MSM_SDW_VERSION_1_0 0x0001
+#define MSM_SDW_VERSION_ENTRY_SIZE 32
+
+static const DECLARE_TLV_DB_SCALE(digital_gain, 0, 1, 0);
+static struct snd_soc_dai_driver msm_sdw_dai[];
+static bool initial_boot = true;
+
+static int msm_sdw_config_ear_spkr_gain(struct snd_soc_codec *codec,
+ int event, int gain_reg);
+static int msm_sdw_config_compander(struct snd_soc_codec *, int, int);
+static int msm_sdw_mclk_enable(struct snd_soc_codec *codec,
+ int mclk_enable, bool dapm);
+static int msm_int_enable_sdw_cdc_clk(struct snd_soc_codec *codec,
+ int enable, bool dapm);
+
+enum {
+ VI_SENSE_1,
+ VI_SENSE_2,
+};
+
+enum {
+ AIF1_SDW_PB = 0,
+ AIF1_SDW_VIFEED,
+ NUM_CODEC_DAIS,
+};
+
+static const struct msm_sdw_reg_mask_val msm_sdw_spkr_default[] = {
+ {MSM_SDW_COMPANDER7_CTL3, 0x80, 0x80},
+ {MSM_SDW_COMPANDER8_CTL3, 0x80, 0x80},
+ {MSM_SDW_COMPANDER7_CTL7, 0x01, 0x01},
+ {MSM_SDW_COMPANDER8_CTL7, 0x01, 0x01},
+ {MSM_SDW_BOOST0_BOOST_CTL, 0x7C, 0x50},
+ {MSM_SDW_BOOST1_BOOST_CTL, 0x7C, 0x50},
+};
+
+static const struct msm_sdw_reg_mask_val msm_sdw_spkr_mode1[] = {
+ {MSM_SDW_COMPANDER7_CTL3, 0x80, 0x00},
+ {MSM_SDW_COMPANDER8_CTL3, 0x80, 0x00},
+ {MSM_SDW_COMPANDER7_CTL7, 0x01, 0x00},
+ {MSM_SDW_COMPANDER8_CTL7, 0x01, 0x00},
+ {MSM_SDW_BOOST0_BOOST_CTL, 0x7C, 0x44},
+ {MSM_SDW_BOOST1_BOOST_CTL, 0x7C, 0x44},
+};
+
+/**
+ * msm_sdw_set_spkr_gain_offset - offset the speaker path
+ * gain with the given offset value.
+ *
+ * @codec: codec instance
+ * @offset: Indicates speaker path gain offset value.
+ *
+ * Returns 0 on success or -EINVAL on error.
+ */
+int msm_sdw_set_spkr_gain_offset(struct snd_soc_codec *codec, int offset)
+{
+ struct msm_sdw_priv *priv;
+
+ if (!codec) {
+ pr_err("%s: NULL codec pointer!\n", __func__);
+ return -EINVAL;
+ }
+
+ priv = snd_soc_codec_get_drvdata(codec);
+ if (!priv)
+ return -EINVAL;
+
+ priv->spkr_gain_offset = offset;
+ return 0;
+}
+EXPORT_SYMBOL(msm_sdw_set_spkr_gain_offset);
+
+/**
+ * msm_sdw_set_spkr_mode - Configures speaker compander and smartboost
+ * settings based on speaker mode.
+ *
+ * @codec: codec instance
+ * @mode: Indicates speaker configuration mode.
+ *
+ * Returns 0 on success or -EINVAL on error.
+ */
+int msm_sdw_set_spkr_mode(struct snd_soc_codec *codec, int mode)
+{
+ struct msm_sdw_priv *priv;
+ int i;
+ const struct msm_sdw_reg_mask_val *regs;
+ int size;
+
+ if (!codec) {
+ pr_err("%s: NULL codec pointer!\n", __func__);
+ return -EINVAL;
+ }
+
+ priv = snd_soc_codec_get_drvdata(codec);
+ if (!priv)
+ return -EINVAL;
+
+ switch (mode) {
+ case SPKR_MODE_1:
+ regs = msm_sdw_spkr_mode1;
+ size = ARRAY_SIZE(msm_sdw_spkr_mode1);
+ break;
+ default:
+ regs = msm_sdw_spkr_default;
+ size = ARRAY_SIZE(msm_sdw_spkr_default);
+ break;
+ }
+
+ priv->spkr_mode = mode;
+ for (i = 0; i < size; i++)
+ snd_soc_update_bits(codec, regs[i].reg,
+ regs[i].mask, regs[i].val);
+ return 0;
+}
+EXPORT_SYMBOL(msm_sdw_set_spkr_mode);
+
+static int msm_enable_sdw_npl_clk(struct msm_sdw_priv *msm_sdw, int enable)
+{
+ int ret = 0;
+
+ dev_dbg(msm_sdw->dev, "%s: enable %d\n", __func__, enable);
+
+ mutex_lock(&msm_sdw->sdw_npl_clk_mutex);
+ if (enable) {
+ if (msm_sdw->sdw_npl_clk_enabled == false) {
+ msm_sdw->sdw_npl_clk.enable = 1;
+ ret = afe_set_lpass_clock_v2(
+ AFE_PORT_ID_INT4_MI2S_RX,
+ &msm_sdw->sdw_npl_clk);
+ if (ret < 0) {
+ dev_err(msm_sdw->dev,
+ "%s: failed to enable SDW NPL CLK\n",
+ __func__);
+ mutex_unlock(&msm_sdw->sdw_npl_clk_mutex);
+ return ret;
+ }
+ dev_dbg(msm_sdw->dev, "enabled sdw npl clk\n");
+ msm_sdw->sdw_npl_clk_enabled = true;
+ }
+ } else {
+ if (msm_sdw->sdw_npl_clk_enabled == true) {
+ msm_sdw->sdw_npl_clk.enable = 0;
+ ret = afe_set_lpass_clock_v2(
+ AFE_PORT_ID_INT4_MI2S_RX,
+ &msm_sdw->sdw_npl_clk);
+ if (ret < 0)
+ dev_err(msm_sdw->dev,
+ "%s: failed to disable SDW NPL CLK\n",
+ __func__);
+ msm_sdw->sdw_npl_clk_enabled = false;
+ }
+ }
+ mutex_unlock(&msm_sdw->sdw_npl_clk_mutex);
+ return ret;
+}
+
+/**
+ * msm_sdw_gpio_cb - Register callback by machine driver for sdw gpio.
+ *
+ * @sdw_cdc_gpio_fn: Function pointer to trigger for enable/disable sdw gpios.
+ * @codec: sdw codec instance.
+ *
+ */
+void msm_sdw_gpio_cb(
+ int (*sdw_cdc_gpio_fn)(bool enable, struct snd_soc_codec *codec),
+ struct snd_soc_codec *codec)
+{
+ struct msm_sdw_priv *msm_sdw;
+
+ if (!codec) {
+ pr_err("%s:NULL codec pointer!\n", __func__);
+ return;
+ }
+ msm_sdw = snd_soc_codec_get_drvdata(codec);
+ msm_sdw->sdw_cdc_gpio_fn = sdw_cdc_gpio_fn;
+}
+EXPORT_SYMBOL(msm_sdw_gpio_cb);
+
+static int msm_int_enable_sdw_cdc_clk(struct snd_soc_codec *codec,
+ int enable, bool dapm)
+{
+ int ret = 0;
+ struct msm_sdw_priv *msm_sdw;
+
+ if (!codec) {
+ pr_err("%s:NULL codec pointer\n", __func__);
+ return -EINVAL;
+ }
+ msm_sdw = snd_soc_codec_get_drvdata(codec);
+
+ mutex_lock(&msm_sdw->cdc_int_mclk1_mutex);
+ dev_dbg(msm_sdw->dev, "%s: enable %d mclk1 ref counter %d\n",
+ __func__, enable, msm_sdw->int_mclk1_rsc_ref);
+ if (enable) {
+ if (msm_sdw->int_mclk1_rsc_ref == 0) {
+ cancel_delayed_work_sync(
+ &msm_sdw->disable_int_mclk1_work);
+ if (msm_sdw->int_mclk1_enabled == false) {
+ msm_sdw->sdw_cdc_core_clk.enable = 1;
+ ret = afe_set_lpass_clock_v2(
+ AFE_PORT_ID_INT4_MI2S_RX,
+ &msm_sdw->sdw_cdc_core_clk);
+ if (ret < 0) {
+ dev_err(msm_sdw->dev,
+ "%s: failed to enable SDW MCLK\n",
+ __func__);
+ goto rtn;
+ }
+ dev_dbg(msm_sdw->dev,
+ "enabled sdw codec core mclk\n");
+ msm_sdw->int_mclk1_enabled = true;
+ }
+ }
+ msm_sdw->int_mclk1_rsc_ref++;
+ } else {
+ cancel_delayed_work_sync(&msm_sdw->disable_int_mclk1_work);
+ if (msm_sdw->int_mclk1_rsc_ref > 0) {
+ msm_sdw->int_mclk1_rsc_ref--;
+ dev_dbg(msm_sdw->dev,
+ "%s: decrementing mclk_res_ref %d\n",
+ __func__, msm_sdw->int_mclk1_rsc_ref);
+ }
+ if (msm_sdw->int_mclk1_enabled == true &&
+ msm_sdw->int_mclk1_rsc_ref == 0) {
+ msm_sdw->sdw_cdc_core_clk.enable = 0;
+ ret = afe_set_lpass_clock_v2(
+ AFE_PORT_ID_INT4_MI2S_RX,
+ &msm_sdw->sdw_cdc_core_clk);
+ if (ret < 0)
+ dev_err(msm_sdw->dev,
+ "%s: failed to disable SDW MCLK\n",
+ __func__);
+ msm_sdw->int_mclk1_enabled = false;
+ }
+ }
+ mutex_unlock(&msm_sdw->cdc_int_mclk1_mutex);
+rtn:
+ return ret;
+}
+EXPORT_SYMBOL(msm_int_enable_sdw_cdc_clk);
+
+static void msm_disable_int_mclk1(struct work_struct *work)
+{
+ struct msm_sdw_priv *msm_sdw = NULL;
+ struct delayed_work *dwork;
+ int ret = 0;
+
+ dwork = to_delayed_work(work);
+ msm_sdw = container_of(dwork, struct msm_sdw_priv,
+ disable_int_mclk1_work);
+ mutex_lock(&msm_sdw->cdc_int_mclk1_mutex);
+ dev_dbg(msm_sdw->dev, "%s: mclk1_enabled %d mclk1_rsc_ref %d\n",
+ __func__, msm_sdw->int_mclk1_enabled,
+ msm_sdw->int_mclk1_rsc_ref);
+
+ if (msm_sdw->int_mclk1_enabled == true
+ && msm_sdw->int_mclk1_rsc_ref == 0) {
+ dev_dbg(msm_sdw->dev, "Disable the mclk1\n");
+ msm_sdw->sdw_cdc_core_clk.enable = 0;
+ ret = afe_set_lpass_clock_v2(
+ AFE_PORT_ID_INT4_MI2S_RX,
+ &msm_sdw->sdw_cdc_core_clk);
+ if (ret < 0)
+ dev_err(msm_sdw->dev,
+ "%s failed to disable the MCLK1\n",
+ __func__);
+ msm_sdw->int_mclk1_enabled = false;
+ }
+ mutex_unlock(&msm_sdw->cdc_int_mclk1_mutex);
+}
+
+static int msm_int_mclk1_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ struct msm_sdw_priv *msm_sdw = snd_soc_codec_get_drvdata(codec);
+ int ret = 0;
+
+ mutex_lock(&msm_sdw->cdc_int_mclk1_mutex);
+ dev_dbg(msm_sdw->dev, "%s: event = %d\n", __func__, event);
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ /* enable the codec mclk config */
+ msm_int_enable_sdw_cdc_clk(codec, 1, true);
+ msm_sdw_mclk_enable(codec, 1, true);
+ if (msm_sdw->sdw_cdc_gpio_fn)
+ msm_sdw->sdw_cdc_gpio_fn(true, codec);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ /* disable the codec mclk config */
+ if (msm_sdw->sdw_cdc_gpio_fn)
+ msm_sdw->sdw_cdc_gpio_fn(false, codec);
+ msm_sdw_mclk_enable(codec, 0, true);
+ msm_int_enable_sdw_cdc_clk(codec, 0, true);
+ break;
+ default:
+ dev_err(msm_sdw->dev,
+ "%s: invalid DAPM event %d\n", __func__, event);
+ ret = -EINVAL;
+ }
+ mutex_unlock(&msm_sdw->cdc_int_mclk1_mutex);
+ return ret;
+}
+
+static int msm_sdw_ahb_write_device(struct msm_sdw_priv *msm_sdw,
+ u16 reg, u8 *value)
+{
+ u32 temp = (u32)(*value) & 0x000000FF;
+
+ if (!msm_sdw->dev_up) {
+ dev_dbg(msm_sdw->dev, "%s: q6 not ready\n", __func__);
+ return 0;
+ }
+
+ iowrite32(temp, msm_sdw->sdw_base + reg);
+ return 0;
+}
+
+static int msm_sdw_ahb_read_device(struct msm_sdw_priv *msm_sdw,
+ u16 reg, u8 *value)
+{
+ u32 temp;
+
+ if (!msm_sdw->dev_up) {
+ dev_dbg(msm_sdw->dev, "%s: q6 not ready\n", __func__);
+ return 0;
+ }
+
+ temp = ioread32(msm_sdw->sdw_base + reg);
+ *value = (u8)temp;
+ return 0;
+}
+
+static int __msm_sdw_reg_read(struct msm_sdw_priv *msm_sdw, unsigned short reg,
+ int bytes, void *dest)
+{
+ int ret = -EINVAL, i;
+ u8 temp = 0;
+
+ dev_dbg(msm_sdw->dev, "%s reg = %x\n", __func__, reg);
+ mutex_lock(&msm_sdw->cdc_int_mclk1_mutex);
+ if (msm_sdw->int_mclk1_enabled == false) {
+ msm_sdw->sdw_cdc_core_clk.enable = 1;
+ ret = afe_set_lpass_clock_v2(
+ AFE_PORT_ID_INT4_MI2S_RX,
+ &msm_sdw->sdw_cdc_core_clk);
+ if (ret < 0) {
+ dev_err(msm_sdw->dev,
+ "%s:failed to enable the INT_MCLK1\n",
+ __func__);
+ goto unlock_exit;
+ }
+ dev_dbg(msm_sdw->dev, "%s:enabled sdw codec core clk\n",
+ __func__);
+ for (i = 0; i < bytes; i++) {
+ ret = msm_sdw_ahb_read_device(
+ msm_sdw, reg + (4 * i), &temp);
+ ((u8 *)dest)[i] = temp;
+ }
+ msm_sdw->int_mclk1_enabled = true;
+ schedule_delayed_work(&msm_sdw->disable_int_mclk1_work, 50);
+ goto unlock_exit;
+ }
+ for (i = 0; i < bytes; i++) {
+ ret = msm_sdw_ahb_read_device(
+ msm_sdw, reg + (4 * i), &temp);
+ ((u8 *)dest)[i] = temp;
+ }
+unlock_exit:
+ mutex_unlock(&msm_sdw->cdc_int_mclk1_mutex);
+ if (ret < 0) {
+ dev_err_ratelimited(msm_sdw->dev,
+ "%s: codec read failed for reg 0x%x\n",
+ __func__, reg);
+ return ret;
+ }
+ dev_dbg(msm_sdw->dev, "Read 0x%02x from 0x%x\n", temp, reg);
+
+ return 0;
+}
+
+static int __msm_sdw_reg_write(struct msm_sdw_priv *msm_sdw, unsigned short reg,
+ int bytes, void *src)
+{
+ int ret = -EINVAL, i;
+
+ mutex_lock(&msm_sdw->cdc_int_mclk1_mutex);
+ if (msm_sdw->int_mclk1_enabled == false) {
+ msm_sdw->sdw_cdc_core_clk.enable = 1;
+ ret = afe_set_lpass_clock_v2(AFE_PORT_ID_INT4_MI2S_RX,
+ &msm_sdw->sdw_cdc_core_clk);
+ if (ret < 0) {
+ dev_err(msm_sdw->dev,
+ "%s: failed to enable the INT_MCLK1\n",
+ __func__);
+ ret = 0;
+ goto unlock_exit;
+ }
+ dev_dbg(msm_sdw->dev, "%s: enabled INT_MCLK1\n", __func__);
+ for (i = 0; i < bytes; i++)
+ ret = msm_sdw_ahb_write_device(msm_sdw, reg + (4 * i),
+ &((u8 *)src)[i]);
+ msm_sdw->int_mclk1_enabled = true;
+ schedule_delayed_work(&msm_sdw->disable_int_mclk1_work, 50);
+ goto unlock_exit;
+ }
+ for (i = 0; i < bytes; i++)
+ ret = msm_sdw_ahb_write_device(msm_sdw, reg + (4 * i),
+ &((u8 *)src)[i]);
+unlock_exit:
+ mutex_unlock(&msm_sdw->cdc_int_mclk1_mutex);
+
+ return ret;
+}
+
+static int msm_sdw_codec_enable_vi_feedback(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol,
+ int event)
+{
+ struct snd_soc_codec *codec = NULL;
+ struct msm_sdw_priv *msm_sdw_p = NULL;
+ int ret = 0;
+
+ if (!w) {
+ pr_err("%s invalid params\n", __func__);
+ return -EINVAL;
+ }
+ codec = snd_soc_dapm_to_codec(w->dapm);
+ msm_sdw_p = snd_soc_codec_get_drvdata(codec);
+
+ dev_dbg(codec->dev, "%s: num_dai %d stream name %s\n",
+ __func__, codec->component.num_dai, w->sname);
+
+ dev_dbg(codec->dev, "%s(): w->name %s event %d w->shift %d\n",
+ __func__, w->name, event, w->shift);
+ if (w->shift != AIF1_SDW_VIFEED) {
+ dev_err(codec->dev,
+ "%s:Error in enabling the vi feedback path\n",
+ __func__);
+ ret = -EINVAL;
+ goto out_vi;
+ }
+ switch (event) {
+ case SND_SOC_DAPM_POST_PMU:
+ if (test_bit(VI_SENSE_1, &msm_sdw_p->status_mask)) {
+ dev_dbg(codec->dev, "%s: spkr1 enabled\n", __func__);
+ /* Enable V&I sensing */
+ snd_soc_update_bits(codec,
+ MSM_SDW_TX9_SPKR_PROT_PATH_CTL, 0x20, 0x20);
+ snd_soc_update_bits(codec,
+ MSM_SDW_TX10_SPKR_PROT_PATH_CTL, 0x20,
+ 0x20);
+ snd_soc_update_bits(codec,
+ MSM_SDW_TX9_SPKR_PROT_PATH_CTL, 0x0F, 0x00);
+ snd_soc_update_bits(codec,
+ MSM_SDW_TX10_SPKR_PROT_PATH_CTL, 0x0F,
+ 0x00);
+ snd_soc_update_bits(codec,
+ MSM_SDW_TX9_SPKR_PROT_PATH_CTL, 0x10, 0x10);
+ snd_soc_update_bits(codec,
+ MSM_SDW_TX10_SPKR_PROT_PATH_CTL, 0x10,
+ 0x10);
+ snd_soc_update_bits(codec,
+ MSM_SDW_TX9_SPKR_PROT_PATH_CTL, 0x20, 0x00);
+ snd_soc_update_bits(codec,
+ MSM_SDW_TX10_SPKR_PROT_PATH_CTL, 0x20,
+ 0x00);
+ }
+ if (test_bit(VI_SENSE_2, &msm_sdw_p->status_mask)) {
+ dev_dbg(codec->dev, "%s: spkr2 enabled\n", __func__);
+ /* Enable V&I sensing */
+ snd_soc_update_bits(codec,
+ MSM_SDW_TX11_SPKR_PROT_PATH_CTL, 0x20,
+ 0x20);
+ snd_soc_update_bits(codec,
+ MSM_SDW_TX12_SPKR_PROT_PATH_CTL, 0x20,
+ 0x20);
+ snd_soc_update_bits(codec,
+ MSM_SDW_TX11_SPKR_PROT_PATH_CTL, 0x0F,
+ 0x00);
+ snd_soc_update_bits(codec,
+ MSM_SDW_TX12_SPKR_PROT_PATH_CTL, 0x0F,
+ 0x00);
+ snd_soc_update_bits(codec,
+ MSM_SDW_TX11_SPKR_PROT_PATH_CTL, 0x10,
+ 0x10);
+ snd_soc_update_bits(codec,
+ MSM_SDW_TX12_SPKR_PROT_PATH_CTL, 0x10,
+ 0x10);
+ snd_soc_update_bits(codec,
+ MSM_SDW_TX11_SPKR_PROT_PATH_CTL, 0x20,
+ 0x00);
+ snd_soc_update_bits(codec,
+ MSM_SDW_TX12_SPKR_PROT_PATH_CTL, 0x20,
+ 0x00);
+ }
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ if (test_bit(VI_SENSE_1, &msm_sdw_p->status_mask)) {
+ /* Disable V&I sensing */
+ dev_dbg(codec->dev, "%s: spkr1 disabled\n", __func__);
+ snd_soc_update_bits(codec,
+ MSM_SDW_TX9_SPKR_PROT_PATH_CTL, 0x20, 0x20);
+ snd_soc_update_bits(codec,
+ MSM_SDW_TX10_SPKR_PROT_PATH_CTL, 0x20,
+ 0x20);
+ snd_soc_update_bits(codec,
+ MSM_SDW_TX9_SPKR_PROT_PATH_CTL, 0x10, 0x00);
+ snd_soc_update_bits(codec,
+ MSM_SDW_TX10_SPKR_PROT_PATH_CTL, 0x10,
+ 0x00);
+ }
+ if (test_bit(VI_SENSE_2, &msm_sdw_p->status_mask)) {
+ /* Disable V&I sensing */
+ dev_dbg(codec->dev, "%s: spkr2 disabled\n", __func__);
+ snd_soc_update_bits(codec,
+ MSM_SDW_TX11_SPKR_PROT_PATH_CTL, 0x20,
+ 0x20);
+ snd_soc_update_bits(codec,
+ MSM_SDW_TX12_SPKR_PROT_PATH_CTL, 0x20,
+ 0x20);
+ snd_soc_update_bits(codec,
+ MSM_SDW_TX11_SPKR_PROT_PATH_CTL, 0x10,
+ 0x00);
+ snd_soc_update_bits(codec,
+ MSM_SDW_TX12_SPKR_PROT_PATH_CTL, 0x10,
+ 0x00);
+ }
+ break;
+ }
+out_vi:
+ return ret;
+}
+
+static int msm_sdwm_handle_irq(void *handle,
+ irqreturn_t (*swrm_irq_handler)(int irq,
+ void *data),
+ void *swrm_handle,
+ int action)
+{
+ struct msm_sdw_priv *msm_sdw;
+ int ret = 0;
+
+ if (!handle) {
+ pr_err("%s: null handle received\n", __func__);
+ return -EINVAL;
+ }
+ msm_sdw = (struct msm_sdw_priv *) handle;
+
+ if (action) {
+ ret = request_threaded_irq(msm_sdw->sdw_irq, NULL,
+ swrm_irq_handler,
+ IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+ "swr_master_irq", swrm_handle);
+ if (ret)
+ dev_err(msm_sdw->dev, "%s: Failed to request irq %d\n",
+ __func__, ret);
+ } else
+ free_irq(msm_sdw->sdw_irq, swrm_handle);
+
+ return ret;
+}
+
+static void msm_sdw_codec_hd2_control(struct snd_soc_codec *codec,
+ u16 reg, int event)
+{
+ u16 hd2_scale_reg;
+ u16 hd2_enable_reg = 0;
+
+ if (reg == MSM_SDW_RX7_RX_PATH_CTL) {
+ hd2_scale_reg = MSM_SDW_RX7_RX_PATH_SEC3;
+ hd2_enable_reg = MSM_SDW_RX7_RX_PATH_CFG0;
+ }
+ if (reg == MSM_SDW_RX8_RX_PATH_CTL) {
+ hd2_scale_reg = MSM_SDW_RX8_RX_PATH_SEC3;
+ hd2_enable_reg = MSM_SDW_RX8_RX_PATH_CFG0;
+ }
+
+ if (hd2_enable_reg && SND_SOC_DAPM_EVENT_ON(event)) {
+ snd_soc_update_bits(codec, hd2_scale_reg, 0x3C, 0x10);
+ snd_soc_update_bits(codec, hd2_scale_reg, 0x03, 0x01);
+ snd_soc_update_bits(codec, hd2_enable_reg, 0x04, 0x04);
+ }
+
+ if (hd2_enable_reg && SND_SOC_DAPM_EVENT_OFF(event)) {
+ snd_soc_update_bits(codec, hd2_enable_reg, 0x04, 0x00);
+ snd_soc_update_bits(codec, hd2_scale_reg, 0x03, 0x00);
+ snd_soc_update_bits(codec, hd2_scale_reg, 0x3C, 0x00);
+ }
+}
+
+static int msm_sdw_codec_enable_interpolator(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol,
+ int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ struct msm_sdw_priv *msm_sdw = snd_soc_codec_get_drvdata(codec);
+ u16 gain_reg;
+ u16 reg;
+ int val;
+ int i, ch_cnt;
+ int offset_val = 0;
+
+ dev_dbg(codec->dev, "%s %d %s\n", __func__, event, w->name);
+
+ if (!(strcmp(w->name, "RX INT4 INTERP"))) {
+ reg = MSM_SDW_RX7_RX_PATH_CTL;
+ gain_reg = MSM_SDW_RX7_RX_VOL_CTL;
+ } else if (!(strcmp(w->name, "RX INT5 INTERP"))) {
+ reg = MSM_SDW_RX8_RX_PATH_CTL;
+ gain_reg = MSM_SDW_RX8_RX_VOL_CTL;
+ } else {
+ dev_err(codec->dev, "%s: Interpolator reg not found\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ snd_soc_update_bits(codec, reg, 0x10, 0x10);
+ msm_sdw_codec_hd2_control(codec, reg, event);
+ snd_soc_update_bits(codec, reg, 1 << 0x5, 1 << 0x5);
+ /* Reset if needed */
+ if (!(strcmp(w->name, "RX INT4 INTERP")) &&
+ !msm_sdw->rx_4_count)
+ msm_sdw->rx_4_count++;
+ if (!(strcmp(w->name, "RX INT5 INTERP")) &&
+ !msm_sdw->rx_5_count)
+ msm_sdw->rx_5_count++;
+ ch_cnt = msm_sdw->rx_4_count + msm_sdw->rx_5_count;
+
+ for (i = 0; i < msm_sdw->nr; i++) {
+ swrm_wcd_notify(msm_sdw->sdw_ctrl_data[i].sdw_pdev,
+ SWR_DEVICE_UP, NULL);
+ swrm_wcd_notify(msm_sdw->sdw_ctrl_data[i].sdw_pdev,
+ SWR_SET_NUM_RX_CH, &ch_cnt);
+ }
+ break;
+ case SND_SOC_DAPM_POST_PMU:
+ msm_sdw_config_compander(codec, w->shift, event);
+ /* apply gain after int clk is enabled */
+ if ((msm_sdw->spkr_gain_offset == RX_GAIN_OFFSET_M1P5_DB) &&
+ (msm_sdw->comp_enabled[COMP1] ||
+ msm_sdw->comp_enabled[COMP2]) &&
+ (gain_reg == MSM_SDW_RX7_RX_VOL_CTL ||
+ gain_reg == MSM_SDW_RX8_RX_VOL_CTL)) {
+ snd_soc_update_bits(codec, MSM_SDW_RX7_RX_PATH_SEC1,
+ 0x01, 0x01);
+ snd_soc_update_bits(codec,
+ MSM_SDW_RX7_RX_PATH_MIX_SEC0,
+ 0x01, 0x01);
+ snd_soc_update_bits(codec, MSM_SDW_RX8_RX_PATH_SEC1,
+ 0x01, 0x01);
+ snd_soc_update_bits(codec,
+ MSM_SDW_RX8_RX_PATH_MIX_SEC0,
+ 0x01, 0x01);
+ offset_val = -2;
+ }
+ val = snd_soc_read(codec, gain_reg);
+ val += offset_val;
+ snd_soc_write(codec, gain_reg, val);
+ msm_sdw_config_ear_spkr_gain(codec, event, gain_reg);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ snd_soc_update_bits(codec, reg, 1 << 0x5, 0 << 0x5);
+ snd_soc_update_bits(codec, reg, 0x40, 0x40);
+ snd_soc_update_bits(codec, reg, 0x40, 0x00);
+ msm_sdw_codec_hd2_control(codec, reg, event);
+ msm_sdw_config_compander(codec, w->shift, event);
+ if ((msm_sdw->spkr_gain_offset == RX_GAIN_OFFSET_M1P5_DB) &&
+ (msm_sdw->comp_enabled[COMP1] ||
+ msm_sdw->comp_enabled[COMP2]) &&
+ (gain_reg == MSM_SDW_RX7_RX_VOL_CTL ||
+ gain_reg == MSM_SDW_RX8_RX_VOL_CTL)) {
+ snd_soc_update_bits(codec, MSM_SDW_RX7_RX_PATH_SEC1,
+ 0x01, 0x00);
+ snd_soc_update_bits(codec,
+ MSM_SDW_RX7_RX_PATH_MIX_SEC0,
+ 0x01, 0x00);
+ snd_soc_update_bits(codec, MSM_SDW_RX8_RX_PATH_SEC1,
+ 0x01, 0x00);
+ snd_soc_update_bits(codec,
+ MSM_SDW_RX8_RX_PATH_MIX_SEC0,
+ 0x01, 0x00);
+ offset_val = 2;
+ val = snd_soc_read(codec, gain_reg);
+ val += offset_val;
+ snd_soc_write(codec, gain_reg, val);
+ }
+ msm_sdw_config_ear_spkr_gain(codec, event, gain_reg);
+ if (!(strcmp(w->name, "RX INT4 INTERP")) &&
+ msm_sdw->rx_4_count)
+ msm_sdw->rx_4_count--;
+ if (!(strcmp(w->name, "RX INT5 INTERP")) &&
+ msm_sdw->rx_5_count)
+ msm_sdw->rx_5_count--;
+ ch_cnt = msm_sdw->rx_4_count + msm_sdw->rx_5_count;
+
+ for (i = 0; i < msm_sdw->nr; i++)
+ swrm_wcd_notify(msm_sdw->sdw_ctrl_data[i].sdw_pdev,
+ SWR_SET_NUM_RX_CH, &ch_cnt);
+ break;
+ };
+
+ return 0;
+}
+
+static int msm_sdw_config_ear_spkr_gain(struct snd_soc_codec *codec,
+ int event, int gain_reg)
+{
+ int comp_gain_offset, val;
+ struct msm_sdw_priv *msm_sdw = snd_soc_codec_get_drvdata(codec);
+
+ switch (msm_sdw->spkr_mode) {
+ /* Compander gain in SPKR_MODE1 case is 12 dB */
+ case SPKR_MODE_1:
+ comp_gain_offset = -12;
+ break;
+ /* Default case compander gain is 15 dB */
+ default:
+ comp_gain_offset = -15;
+ break;
+ }
+
+ switch (event) {
+ case SND_SOC_DAPM_POST_PMU:
+ /* Apply ear spkr gain only if compander is enabled */
+ if (msm_sdw->comp_enabled[COMP1] &&
+ (gain_reg == MSM_SDW_RX7_RX_VOL_CTL) &&
+ (msm_sdw->ear_spkr_gain != 0)) {
+ /* For example, val is -8(-12+5-1) for 4dB of gain */
+ val = comp_gain_offset + msm_sdw->ear_spkr_gain - 1;
+ snd_soc_write(codec, gain_reg, val);
+
+ dev_dbg(codec->dev, "%s: RX4 Volume %d dB\n",
+ __func__, val);
+ }
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ /*
+ * Reset RX4 volume to 0 dB if compander is enabled and
+ * ear_spkr_gain is non-zero.
+ */
+ if (msm_sdw->comp_enabled[COMP1] &&
+ (gain_reg == MSM_SDW_RX7_RX_VOL_CTL) &&
+ (msm_sdw->ear_spkr_gain != 0)) {
+ snd_soc_write(codec, gain_reg, 0x0);
+
+ dev_dbg(codec->dev, "%s: Reset RX4 Volume to 0 dB\n",
+ __func__);
+ }
+ break;
+ }
+
+ return 0;
+}
+
+static int msm_sdw_codec_spk_boost_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol,
+ int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ u16 boost_path_ctl, boost_path_cfg1;
+ u16 reg;
+
+ dev_dbg(codec->dev, "%s %s %d\n", __func__, w->name, event);
+
+ if (!strcmp(w->name, "RX INT4 CHAIN")) {
+ boost_path_ctl = MSM_SDW_BOOST0_BOOST_PATH_CTL;
+ boost_path_cfg1 = MSM_SDW_RX7_RX_PATH_CFG1;
+ reg = MSM_SDW_RX7_RX_PATH_CTL;
+ } else if (!strcmp(w->name, "RX INT5 CHAIN")) {
+ boost_path_ctl = MSM_SDW_BOOST1_BOOST_PATH_CTL;
+ boost_path_cfg1 = MSM_SDW_RX8_RX_PATH_CFG1;
+ reg = MSM_SDW_RX8_RX_PATH_CTL;
+ } else {
+ dev_err(codec->dev, "%s: boost reg not found\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ snd_soc_update_bits(codec, boost_path_ctl, 0x10, 0x10);
+ snd_soc_update_bits(codec, boost_path_cfg1, 0x01, 0x01);
+ snd_soc_update_bits(codec, reg, 0x10, 0x00);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ snd_soc_update_bits(codec, boost_path_cfg1, 0x01, 0x00);
+ snd_soc_update_bits(codec, boost_path_ctl, 0x10, 0x00);
+ break;
+ };
+
+ return 0;
+}
+
+static int msm_sdw_config_compander(struct snd_soc_codec *codec, int comp,
+ int event)
+{
+ struct msm_sdw_priv *msm_sdw = snd_soc_codec_get_drvdata(codec);
+ u16 comp_ctl0_reg, rx_path_cfg0_reg;
+
+ if (comp < COMP1 || comp >= COMP_MAX)
+ return 0;
+
+ dev_dbg(codec->dev, "%s: event %d compander %d, enabled %d\n",
+ __func__, event, comp + 1, msm_sdw->comp_enabled[comp]);
+
+ if (!msm_sdw->comp_enabled[comp])
+ return 0;
+
+ comp_ctl0_reg = MSM_SDW_COMPANDER7_CTL0 + (comp * 8);
+ rx_path_cfg0_reg = MSM_SDW_RX7_RX_PATH_CFG0 + (comp * 20);
+
+ if (SND_SOC_DAPM_EVENT_ON(event)) {
+ /* Enable Compander Clock */
+ snd_soc_update_bits(codec, comp_ctl0_reg, 0x01, 0x01);
+ snd_soc_update_bits(codec, comp_ctl0_reg, 0x02, 0x02);
+ snd_soc_update_bits(codec, comp_ctl0_reg, 0x02, 0x00);
+ snd_soc_update_bits(codec, rx_path_cfg0_reg, 0x02, 0x02);
+ }
+
+ if (SND_SOC_DAPM_EVENT_OFF(event)) {
+ snd_soc_update_bits(codec, comp_ctl0_reg, 0x04, 0x04);
+ snd_soc_update_bits(codec, rx_path_cfg0_reg, 0x02, 0x00);
+ snd_soc_update_bits(codec, comp_ctl0_reg, 0x02, 0x02);
+ snd_soc_update_bits(codec, comp_ctl0_reg, 0x02, 0x00);
+ snd_soc_update_bits(codec, comp_ctl0_reg, 0x01, 0x00);
+ snd_soc_update_bits(codec, comp_ctl0_reg, 0x04, 0x00);
+ }
+
+ return 0;
+}
+
+static int msm_sdw_get_compander(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+
+ struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ int comp = ((struct soc_multi_mixer_control *)
+ kcontrol->private_value)->shift;
+ struct msm_sdw_priv *msm_sdw = snd_soc_codec_get_drvdata(codec);
+
+ ucontrol->value.integer.value[0] = msm_sdw->comp_enabled[comp];
+ return 0;
+}
+
+static int msm_sdw_set_compander(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ struct msm_sdw_priv *msm_sdw = snd_soc_codec_get_drvdata(codec);
+ int comp = ((struct soc_multi_mixer_control *)
+ kcontrol->private_value)->shift;
+ int value = ucontrol->value.integer.value[0];
+
+ dev_dbg(codec->dev, "%s: Compander %d enable current %d, new %d\n",
+ __func__, comp + 1, msm_sdw->comp_enabled[comp], value);
+ msm_sdw->comp_enabled[comp] = value;
+
+ return 0;
+}
+
+static int msm_sdw_ear_spkr_pa_gain_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ struct msm_sdw_priv *msm_sdw = snd_soc_codec_get_drvdata(codec);
+
+ ucontrol->value.integer.value[0] = msm_sdw->ear_spkr_gain;
+
+ dev_dbg(codec->dev, "%s: ucontrol->value.integer.value[0] = %ld\n",
+ __func__, ucontrol->value.integer.value[0]);
+
+ return 0;
+}
+
+static int msm_sdw_ear_spkr_pa_gain_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ struct msm_sdw_priv *msm_sdw = snd_soc_codec_get_drvdata(codec);
+
+ msm_sdw->ear_spkr_gain = ucontrol->value.integer.value[0];
+
+ dev_dbg(codec->dev, "%s: gain = %d\n", __func__,
+ msm_sdw->ear_spkr_gain);
+
+ return 0;
+}
+
+static int msm_sdw_vi_feed_mixer_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_dapm_widget_list *wlist =
+ dapm_kcontrol_get_wlist(kcontrol);
+ struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(widget->dapm);
+ struct msm_sdw_priv *msm_sdw_p = snd_soc_codec_get_drvdata(codec);
+
+ ucontrol->value.integer.value[0] = msm_sdw_p->vi_feed_value;
+
+ return 0;
+}
+
+static int msm_sdw_vi_feed_mixer_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_dapm_widget_list *wlist =
+ dapm_kcontrol_get_wlist(kcontrol);
+ struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(widget->dapm);
+ struct msm_sdw_priv *msm_sdw_p = snd_soc_codec_get_drvdata(codec);
+ struct soc_multi_mixer_control *mixer =
+ ((struct soc_multi_mixer_control *)kcontrol->private_value);
+ u32 dai_id = widget->shift;
+ u32 port_id = mixer->shift;
+ u32 enable = ucontrol->value.integer.value[0];
+
+ dev_dbg(codec->dev, "%s: enable: %d, port_id:%d, dai_id: %d\n",
+ __func__, enable, port_id, dai_id);
+
+ msm_sdw_p->vi_feed_value = ucontrol->value.integer.value[0];
+
+ mutex_lock(&msm_sdw_p->codec_mutex);
+ if (enable) {
+ if (port_id == MSM_SDW_TX0 && !test_bit(VI_SENSE_1,
+ &msm_sdw_p->status_mask))
+ set_bit(VI_SENSE_1, &msm_sdw_p->status_mask);
+ if (port_id == MSM_SDW_TX1 && !test_bit(VI_SENSE_2,
+ &msm_sdw_p->status_mask))
+ set_bit(VI_SENSE_2, &msm_sdw_p->status_mask);
+ } else {
+ if (port_id == MSM_SDW_TX0 && test_bit(VI_SENSE_1,
+ &msm_sdw_p->status_mask))
+ clear_bit(VI_SENSE_1, &msm_sdw_p->status_mask);
+ if (port_id == MSM_SDW_TX1 && test_bit(VI_SENSE_2,
+ &msm_sdw_p->status_mask))
+ clear_bit(VI_SENSE_2, &msm_sdw_p->status_mask);
+ }
+ mutex_unlock(&msm_sdw_p->codec_mutex);
+ snd_soc_dapm_mixer_update_power(widget->dapm, kcontrol, enable, NULL);
+
+ return 0;
+}
+
+static int msm_sdw_mclk_enable(struct snd_soc_codec *codec,
+ int mclk_enable, bool dapm)
+{
+ struct msm_sdw_priv *msm_sdw;
+
+ if (!codec) {
+ pr_err("%s:NULL codec pointer\n", __func__);
+ return -EINVAL;
+ }
+ msm_sdw = snd_soc_codec_get_drvdata(codec);
+
+ mutex_lock(&msm_sdw->cdc_int_mclk1_mutex);
+ dev_dbg(codec->dev, "%s: mclk_enable = %u, dapm = %d\n",
+ __func__, mclk_enable, dapm);
+ if (mclk_enable) {
+ snd_soc_update_bits(codec,
+ MSM_SDW_CLK_RST_CTRL_FS_CNT_CONTROL,
+ 0x01, 0x01);
+ snd_soc_update_bits(codec,
+ MSM_SDW_CLK_RST_CTRL_MCLK_CONTROL,
+ 0x01, 0x01);
+ /* 9.6MHz MCLK, set value 0x00 if other frequency */
+ snd_soc_update_bits(codec,
+ MSM_SDW_TOP_FREQ_MCLK, 0x01, 0x01);
+ } else {
+ snd_soc_update_bits(codec,
+ MSM_SDW_CLK_RST_CTRL_FS_CNT_CONTROL,
+ 0x01, 0x00);
+ snd_soc_update_bits(codec,
+ MSM_SDW_CLK_RST_CTRL_MCLK_CONTROL,
+ 0x01, 0x00);
+ }
+ mutex_unlock(&msm_sdw->cdc_int_mclk1_mutex);
+ return 0;
+}
+EXPORT_SYMBOL(msm_sdw_mclk_enable);
+
+static int msm_sdw_swrm_read(void *handle, int reg)
+{
+ struct msm_sdw_priv *msm_sdw;
+ unsigned short sdw_rd_addr_base;
+ unsigned short sdw_rd_data_base;
+ int val, ret;
+
+ if (!handle) {
+ pr_err("%s: NULL handle\n", __func__);
+ return -EINVAL;
+ }
+ msm_sdw = (struct msm_sdw_priv *)handle;
+
+ dev_dbg(msm_sdw->dev, "%s: Reading soundwire register, 0x%x\n",
+ __func__, reg);
+ sdw_rd_addr_base = MSM_SDW_AHB_BRIDGE_RD_ADDR_0;
+ sdw_rd_data_base = MSM_SDW_AHB_BRIDGE_RD_DATA_0;
+ /* read_lock */
+ mutex_lock(&msm_sdw->sdw_read_lock);
+ ret = regmap_bulk_write(msm_sdw->regmap, sdw_rd_addr_base,
+ (u8 *)&reg, 4);
+ if (ret < 0) {
+ dev_err(msm_sdw->dev, "%s: RD Addr Failure\n", __func__);
+ goto err;
+ }
+ /* Check for RD value */
+ ret = regmap_bulk_read(msm_sdw->regmap, sdw_rd_data_base,
+ (u8 *)&val, 4);
+ if (ret < 0) {
+ dev_err(msm_sdw->dev, "%s: RD Data Failure\n", __func__);
+ goto err;
+ }
+ ret = val;
+err:
+ /* read_unlock */
+ mutex_unlock(&msm_sdw->sdw_read_lock);
+ return ret;
+}
+
+static int msm_sdw_bulk_write(struct msm_sdw_priv *msm_sdw,
+ struct msm_sdw_reg_val *bulk_reg,
+ size_t len)
+{
+ int i, ret = 0;
+ unsigned short sdw_wr_addr_base;
+ unsigned short sdw_wr_data_base;
+
+ sdw_wr_addr_base = MSM_SDW_AHB_BRIDGE_WR_ADDR_0;
+ sdw_wr_data_base = MSM_SDW_AHB_BRIDGE_WR_DATA_0;
+
+ for (i = 0; i < len; i += 2) {
+ /* First Write the Data to register */
+ ret = regmap_bulk_write(msm_sdw->regmap,
+ sdw_wr_data_base, bulk_reg[i].buf, 4);
+ if (ret < 0) {
+ dev_err(msm_sdw->dev, "%s: WR Data Failure\n",
+ __func__);
+ break;
+ }
+ /* Next Write Address */
+ ret = regmap_bulk_write(msm_sdw->regmap,
+ sdw_wr_addr_base, bulk_reg[i+1].buf, 4);
+ if (ret < 0) {
+ dev_err(msm_sdw->dev,
+ "%s: WR Addr Failure: 0x%x\n",
+ __func__, (u32)(bulk_reg[i+1].buf[0]));
+ break;
+ }
+ }
+ return ret;
+}
+
+static int msm_sdw_swrm_bulk_write(void *handle, u32 *reg, u32 *val, size_t len)
+{
+ struct msm_sdw_priv *msm_sdw;
+ struct msm_sdw_reg_val *bulk_reg;
+ unsigned short sdw_wr_addr_base;
+ unsigned short sdw_wr_data_base;
+ int i, j, ret;
+
+ if (!handle) {
+ pr_err("%s: NULL handle\n", __func__);
+ return -EINVAL;
+ }
+
+ msm_sdw = (struct msm_sdw_priv *)handle;
+ if (len <= 0) {
+ dev_err(msm_sdw->dev,
+ "%s: Invalid size: %zu\n", __func__, len);
+ return -EINVAL;
+ }
+
+ sdw_wr_addr_base = MSM_SDW_AHB_BRIDGE_WR_ADDR_0;
+ sdw_wr_data_base = MSM_SDW_AHB_BRIDGE_WR_DATA_0;
+
+ bulk_reg = kzalloc((2 * len * sizeof(struct msm_sdw_reg_val)),
+ GFP_KERNEL);
+ if (!bulk_reg)
+ return -ENOMEM;
+
+ for (i = 0, j = 0; i < (len * 2); i += 2, j++) {
+ bulk_reg[i].reg = sdw_wr_data_base;
+ bulk_reg[i].buf = (u8 *)(&val[j]);
+ bulk_reg[i].bytes = 4;
+ bulk_reg[i+1].reg = sdw_wr_addr_base;
+ bulk_reg[i+1].buf = (u8 *)(&reg[j]);
+ bulk_reg[i+1].bytes = 4;
+ }
+ mutex_lock(&msm_sdw->sdw_write_lock);
+
+ ret = msm_sdw_bulk_write(msm_sdw, bulk_reg, (len * 2));
+ if (ret)
+ dev_err(msm_sdw->dev, "%s: swrm bulk write failed, ret: %d\n",
+ __func__, ret);
+
+ mutex_unlock(&msm_sdw->sdw_write_lock);
+ kfree(bulk_reg);
+
+ return ret;
+}
+
+static int msm_sdw_swrm_write(void *handle, int reg, int val)
+{
+ struct msm_sdw_priv *msm_sdw;
+ unsigned short sdw_wr_addr_base;
+ unsigned short sdw_wr_data_base;
+ struct msm_sdw_reg_val bulk_reg[2];
+ int ret;
+
+ if (!handle) {
+ pr_err("%s: NULL handle\n", __func__);
+ return -EINVAL;
+ }
+ msm_sdw = (struct msm_sdw_priv *)handle;
+
+ sdw_wr_addr_base = MSM_SDW_AHB_BRIDGE_WR_ADDR_0;
+ sdw_wr_data_base = MSM_SDW_AHB_BRIDGE_WR_DATA_0;
+
+ /* First Write the Data to register */
+ bulk_reg[0].reg = sdw_wr_data_base;
+ bulk_reg[0].buf = (u8 *)(&val);
+ bulk_reg[0].bytes = 4;
+ bulk_reg[1].reg = sdw_wr_addr_base;
+ bulk_reg[1].buf = (u8 *)(&reg);
+ bulk_reg[1].bytes = 4;
+
+ mutex_lock(&msm_sdw->sdw_write_lock);
+
+ ret = msm_sdw_bulk_write(msm_sdw, bulk_reg, 2);
+ if (ret < 0)
+ dev_err(msm_sdw->dev, "%s: WR Data Failure\n", __func__);
+
+ mutex_unlock(&msm_sdw->sdw_write_lock);
+ return ret;
+}
+
+static int msm_sdw_swrm_clock(void *handle, bool enable)
+{
+ struct msm_sdw_priv *msm_sdw = (struct msm_sdw_priv *) handle;
+
+ mutex_lock(&msm_sdw->sdw_clk_lock);
+
+ dev_dbg(msm_sdw->dev, "%s: swrm clock %s\n",
+ __func__, (enable ? "enable" : "disable"));
+ if (enable) {
+ msm_sdw->sdw_clk_users++;
+ if (msm_sdw->sdw_clk_users == 1) {
+ msm_enable_sdw_npl_clk(msm_sdw, true);
+ regmap_update_bits(msm_sdw->regmap,
+ MSM_SDW_CLK_RST_CTRL_SWR_CONTROL,
+ 0x01, 0x01);
+ }
+ } else {
+ msm_sdw->sdw_clk_users--;
+ if (msm_sdw->sdw_clk_users == 0) {
+ regmap_update_bits(msm_sdw->regmap,
+ MSM_SDW_CLK_RST_CTRL_SWR_CONTROL,
+ 0x01, 0x00);
+ msm_enable_sdw_npl_clk(msm_sdw, false);
+ }
+ }
+ dev_dbg(msm_sdw->dev, "%s: swrm clock users %d\n",
+ __func__, msm_sdw->sdw_clk_users);
+ mutex_unlock(&msm_sdw->sdw_clk_lock);
+ return 0;
+}
+
+static int msm_sdw_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ dev_dbg(dai->codec->dev, "%s(): substream = %s stream = %d\n",
+ __func__,
+ substream->name, substream->stream);
+ return 0;
+}
+
+static int msm_sdw_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ u8 rx_clk_fs_rate, rx_fs_rate;
+
+ dev_dbg(dai->codec->dev,
+ "%s: dai_name = %s DAI-ID %x rate %d num_ch %d format %d\n",
+ __func__, dai->name, dai->id, params_rate(params),
+ params_channels(params), params_format(params));
+
+ switch (params_rate(params)) {
+ case 8000:
+ rx_clk_fs_rate = 0x00;
+ rx_fs_rate = 0x00;
+ break;
+ case 16000:
+ rx_clk_fs_rate = 0x01;
+ rx_fs_rate = 0x01;
+ break;
+ case 32000:
+ rx_clk_fs_rate = 0x02;
+ rx_fs_rate = 0x03;
+ break;
+ case 48000:
+ rx_clk_fs_rate = 0x03;
+ rx_fs_rate = 0x04;
+ break;
+ case 96000:
+ rx_clk_fs_rate = 0x04;
+ rx_fs_rate = 0x05;
+ break;
+ case 192000:
+ rx_clk_fs_rate = 0x05;
+ rx_fs_rate = 0x06;
+ break;
+ default:
+ dev_err(dai->codec->dev,
+ "%s: Invalid sampling rate %d\n", __func__,
+ params_rate(params));
+ return -EINVAL;
+ }
+
+ snd_soc_update_bits(dai->codec,
+ MSM_SDW_TOP_RX_I2S_CTL, 0x1C, (rx_clk_fs_rate << 2));
+ snd_soc_update_bits(dai->codec,
+ MSM_SDW_RX7_RX_PATH_CTL, 0x0F, rx_fs_rate);
+ snd_soc_update_bits(dai->codec,
+ MSM_SDW_RX8_RX_PATH_CTL, 0x0F, rx_fs_rate);
+
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ snd_soc_update_bits(dai->codec,
+ MSM_SDW_TOP_RX_I2S_CTL, 0x20, 0x20);
+ break;
+ case SNDRV_PCM_FORMAT_S24_LE:
+ case SNDRV_PCM_FORMAT_S24_3LE:
+ snd_soc_update_bits(dai->codec,
+ MSM_SDW_TOP_RX_I2S_CTL, 0x20, 0x00);
+ break;
+ default:
+ dev_err(dai->codec->dev, "%s: wrong format selected\n",
+ __func__);
+ return -EINVAL;
+ }
+ snd_soc_update_bits(dai->codec,
+ MSM_SDW_TOP_TX_I2S_CTL, 0x20, 0x20);
+
+ return 0;
+}
+
+static void msm_sdw_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ dev_dbg(dai->codec->dev,
+ "%s(): substream = %s stream = %d\n", __func__,
+ substream->name, substream->stream);
+}
+
+static ssize_t msm_sdw_codec_version_read(struct snd_info_entry *entry,
+ void *file_private_data,
+ struct file *file,
+ char __user *buf, size_t count,
+ loff_t pos)
+{
+ struct msm_sdw_priv *msm_sdw;
+ char buffer[MSM_SDW_VERSION_ENTRY_SIZE];
+ int len = 0;
+
+ msm_sdw = (struct msm_sdw_priv *) entry->private_data;
+ if (!msm_sdw) {
+ pr_err("%s: msm_sdw priv is null\n", __func__);
+ return -EINVAL;
+ }
+
+ switch (msm_sdw->version) {
+ case MSM_SDW_VERSION_1_0:
+ len = snprintf(buffer, sizeof(buffer), "MSM_SDW_CDC_1_0\n");
+ break;
+ default:
+ len = snprintf(buffer, sizeof(buffer), "VER_UNDEFINED\n");
+ }
+
+ return simple_read_from_buffer(buf, count, &pos, buffer, len);
+}
+
+static struct snd_info_entry_ops msm_sdw_codec_info_ops = {
+ .read = msm_sdw_codec_version_read,
+};
+
+/*
+ * msm_sdw_codec_info_create_codec_entry - creates msm_sdw module
+ * @codec_root: The parent directory
+ * @codec: Codec instance
+ *
+ * Creates msm_sdw module and version entry under the given
+ * parent directory.
+ *
+ * Return: 0 on success or negative error code on failure.
+ */
+int msm_sdw_codec_info_create_codec_entry(struct snd_info_entry *codec_root,
+ struct snd_soc_codec *codec)
+{
+ struct snd_info_entry *version_entry;
+ struct msm_sdw_priv *msm_sdw;
+ struct snd_soc_card *card;
+
+ if (!codec_root || !codec)
+ return -EINVAL;
+
+ msm_sdw = snd_soc_codec_get_drvdata(codec);
+ card = codec->component.card;
+ msm_sdw->entry = snd_register_module_info(codec_root->module,
+ "msm_sdw",
+ codec_root);
+ if (!msm_sdw->entry) {
+ dev_dbg(codec->dev, "%s: failed to create msm_sdw entry\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ version_entry = snd_info_create_card_entry(card->snd_card,
+ "version",
+ msm_sdw->entry);
+ if (!version_entry) {
+ dev_dbg(codec->dev, "%s: failed to create msm_sdw version entry\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ version_entry->private_data = msm_sdw;
+ version_entry->size = MSM_SDW_VERSION_ENTRY_SIZE;
+ version_entry->content = SNDRV_INFO_CONTENT_DATA;
+ version_entry->c.ops = &msm_sdw_codec_info_ops;
+
+ if (snd_info_register(version_entry) < 0) {
+ snd_info_free_entry(version_entry);
+ return -ENOMEM;
+ }
+ msm_sdw->version_entry = version_entry;
+
+ return 0;
+}
+EXPORT_SYMBOL(msm_sdw_codec_info_create_codec_entry);
+
+static struct snd_soc_dai_ops msm_sdw_dai_ops = {
+ .startup = msm_sdw_startup,
+ .shutdown = msm_sdw_shutdown,
+ .hw_params = msm_sdw_hw_params,
+};
+
+static struct snd_soc_dai_driver msm_sdw_dai[] = {
+ {
+ .name = "msm_sdw_i2s_rx1",
+ .id = AIF1_SDW_PB,
+ .playback = {
+ .stream_name = "AIF1_SDW Playback",
+ .rates = MSM_SDW_RATES,
+ .formats = MSM_SDW_FORMATS,
+ .rate_max = 192000,
+ .rate_min = 8000,
+ .channels_min = 1,
+ .channels_max = 2,
+ },
+ .ops = &msm_sdw_dai_ops,
+ },
+ {
+ .name = "msm_sdw_vifeedback",
+ .id = AIF1_SDW_VIFEED,
+ .capture = {
+ .stream_name = "VIfeed_SDW",
+ .rates = SNDRV_PCM_RATE_8000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .rate_max = 8000,
+ .rate_min = 8000,
+ .channels_min = 2,
+ .channels_max = 4,
+ },
+ .ops = &msm_sdw_dai_ops,
+ },
+};
+
+static const char * const rx_mix1_text[] = {
+ "ZERO", "RX4", "RX5"
+};
+
+static const char * const msm_sdw_ear_spkr_pa_gain_text[] = {
+ "G_DEFAULT", "G_0_DB", "G_1_DB", "G_2_DB", "G_3_DB",
+ "G_4_DB", "G_5_DB", "G_6_DB"
+};
+
+static SOC_ENUM_SINGLE_EXT_DECL(msm_sdw_ear_spkr_pa_gain_enum,
+ msm_sdw_ear_spkr_pa_gain_text);
+/* RX4 MIX1 */
+static const struct soc_enum rx4_mix1_inp1_chain_enum =
+ SOC_ENUM_SINGLE(MSM_SDW_TOP_RX7_PATH_INPUT0_MUX,
+ 0, 3, rx_mix1_text);
+
+static const struct soc_enum rx4_mix1_inp2_chain_enum =
+ SOC_ENUM_SINGLE(MSM_SDW_TOP_RX7_PATH_INPUT1_MUX,
+ 0, 3, rx_mix1_text);
+
+/* RX5 MIX1 */
+static const struct soc_enum rx5_mix1_inp1_chain_enum =
+ SOC_ENUM_SINGLE(MSM_SDW_TOP_RX8_PATH_INPUT0_MUX,
+ 0, 3, rx_mix1_text);
+
+static const struct soc_enum rx5_mix1_inp2_chain_enum =
+ SOC_ENUM_SINGLE(MSM_SDW_TOP_RX8_PATH_INPUT1_MUX,
+ 0, 3, rx_mix1_text);
+
+static const struct snd_kcontrol_new rx4_mix1_inp1_mux =
+ SOC_DAPM_ENUM("RX4 MIX1 INP1 Mux", rx4_mix1_inp1_chain_enum);
+
+static const struct snd_kcontrol_new rx4_mix1_inp2_mux =
+ SOC_DAPM_ENUM("RX4 MIX1 INP2 Mux", rx4_mix1_inp2_chain_enum);
+
+static const struct snd_kcontrol_new rx5_mix1_inp1_mux =
+ SOC_DAPM_ENUM("RX5 MIX1 INP1 Mux", rx5_mix1_inp1_chain_enum);
+
+static const struct snd_kcontrol_new rx5_mix1_inp2_mux =
+ SOC_DAPM_ENUM("RX5 MIX1 INP2 Mux", rx5_mix1_inp2_chain_enum);
+
+static const struct snd_kcontrol_new aif1_vi_mixer[] = {
+ SOC_SINGLE_EXT("SPKR_VI_1", SND_SOC_NOPM, MSM_SDW_TX0, 1, 0,
+ msm_sdw_vi_feed_mixer_get, msm_sdw_vi_feed_mixer_put),
+ SOC_SINGLE_EXT("SPKR_VI_2", SND_SOC_NOPM, MSM_SDW_TX1, 1, 0,
+ msm_sdw_vi_feed_mixer_get, msm_sdw_vi_feed_mixer_put),
+};
+
+static const struct snd_soc_dapm_widget msm_sdw_dapm_widgets[] = {
+ SND_SOC_DAPM_AIF_IN("I2S RX4", "AIF1_SDW Playback", 0,
+ SND_SOC_NOPM, 0, 0),
+
+ SND_SOC_DAPM_AIF_IN("I2S RX5", "AIF1_SDW Playback", 0,
+ SND_SOC_NOPM, 0, 0),
+
+ SND_SOC_DAPM_AIF_OUT_E("AIF1_SDW VI", "VIfeed_SDW", 0, SND_SOC_NOPM,
+ AIF1_SDW_VIFEED, 0, msm_sdw_codec_enable_vi_feedback,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_MIXER("AIF1_VI_SDW Mixer", SND_SOC_NOPM, AIF1_SDW_VIFEED,
+ 0, aif1_vi_mixer, ARRAY_SIZE(aif1_vi_mixer)),
+
+ SND_SOC_DAPM_MUX("RX4 MIX1 INP1", SND_SOC_NOPM, 0, 0,
+ &rx4_mix1_inp1_mux),
+ SND_SOC_DAPM_MUX("RX4 MIX1 INP2", SND_SOC_NOPM, 0, 0,
+ &rx4_mix1_inp2_mux),
+ SND_SOC_DAPM_MUX("RX5 MIX1 INP1", SND_SOC_NOPM, 0, 0,
+ &rx5_mix1_inp1_mux),
+ SND_SOC_DAPM_MUX("RX5 MIX1 INP2", SND_SOC_NOPM, 0, 0,
+ &rx5_mix1_inp2_mux),
+ SND_SOC_DAPM_MIXER("RX4 MIX1", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("RX5 MIX1", SND_SOC_NOPM, 0, 0, NULL, 0),
+
+ SND_SOC_DAPM_MIXER_E("RX INT4 INTERP", SND_SOC_NOPM,
+ COMP1, 0, NULL, 0, msm_sdw_codec_enable_interpolator,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_MIXER_E("RX INT5 INTERP", SND_SOC_NOPM,
+ COMP2, 0, NULL, 0, msm_sdw_codec_enable_interpolator,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_MIXER_E("RX INT4 CHAIN", SND_SOC_NOPM, 0, 0,
+ NULL, 0, msm_sdw_codec_spk_boost_event,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_MIXER_E("RX INT5 CHAIN", SND_SOC_NOPM, 0, 0,
+ NULL, 0, msm_sdw_codec_spk_boost_event,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_INPUT("VIINPUT_SDW"),
+
+ SND_SOC_DAPM_OUTPUT("SPK1 OUT"),
+ SND_SOC_DAPM_OUTPUT("SPK2 OUT"),
+
+ SND_SOC_DAPM_SUPPLY_S("SDW_CONN", -1, MSM_SDW_TOP_I2S_CLK,
+ 0, 0, NULL, 0),
+
+ SND_SOC_DAPM_SUPPLY_S("INT_MCLK1", -2, SND_SOC_NOPM, 0, 0,
+ msm_int_mclk1_event, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_SUPPLY("SDW_RX_I2S_CLK",
+ MSM_SDW_TOP_RX_I2S_CTL, 0, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("SDW_TX_I2S_CLK",
+ MSM_SDW_TOP_TX_I2S_CTL, 0, 0, NULL, 0),
+};
+
+static const struct snd_kcontrol_new msm_sdw_snd_controls[] = {
+ SOC_ENUM_EXT("EAR SPKR PA Gain", msm_sdw_ear_spkr_pa_gain_enum,
+ msm_sdw_ear_spkr_pa_gain_get,
+ msm_sdw_ear_spkr_pa_gain_put),
+ SOC_SINGLE_SX_TLV("RX4 Digital Volume", MSM_SDW_RX7_RX_VOL_CTL,
+ 0, -84, 40, digital_gain),
+ SOC_SINGLE_SX_TLV("RX5 Digital Volume", MSM_SDW_RX8_RX_VOL_CTL,
+ 0, -84, 40, digital_gain),
+ SOC_SINGLE_EXT("COMP1 Switch", SND_SOC_NOPM, COMP1, 1, 0,
+ msm_sdw_get_compander, msm_sdw_set_compander),
+ SOC_SINGLE_EXT("COMP2 Switch", SND_SOC_NOPM, COMP2, 1, 0,
+ msm_sdw_get_compander, msm_sdw_set_compander),
+};
+
+static const struct snd_soc_dapm_route audio_map[] = {
+
+ {"AIF1_SDW VI", NULL, "SDW_TX_I2S_CLK"},
+ {"SDW_TX_I2S_CLK", NULL, "INT_MCLK1"},
+ {"SDW_TX_I2S_CLK", NULL, "SDW_CONN"},
+
+ /* VI Feedback */
+ {"AIF1_VI_SDW Mixer", "SPKR_VI_1", "VIINPUT_SDW"},
+ {"AIF1_VI_SDW Mixer", "SPKR_VI_2", "VIINPUT_SDW"},
+ {"AIF1_SDW VI", NULL, "AIF1_VI_SDW Mixer"},
+
+ {"SDW_RX_I2S_CLK", NULL, "INT_MCLK1"},
+ {"SDW_RX_I2S_CLK", NULL, "SDW_CONN"},
+ {"I2S RX4", NULL, "SDW_RX_I2S_CLK"},
+ {"I2S RX5", NULL, "SDW_RX_I2S_CLK"},
+
+ {"RX4 MIX1 INP1", "RX4", "I2S RX4"},
+ {"RX4 MIX1 INP1", "RX5", "I2S RX5"},
+ {"RX4 MIX1 INP2", "RX4", "I2S RX4"},
+ {"RX4 MIX1 INP2", "RX5", "I2S RX5"},
+ {"RX5 MIX1 INP1", "RX4", "I2S RX4"},
+ {"RX5 MIX1 INP1", "RX5", "I2S RX5"},
+ {"RX5 MIX1 INP2", "RX4", "I2S RX4"},
+ {"RX5 MIX1 INP2", "RX5", "I2S RX5"},
+
+ {"RX4 MIX1", NULL, "RX4 MIX1 INP1"},
+ {"RX4 MIX1", NULL, "RX4 MIX1 INP2"},
+ {"RX5 MIX1", NULL, "RX5 MIX1 INP1"},
+ {"RX5 MIX1", NULL, "RX5 MIX1 INP2"},
+
+ {"RX INT4 INTERP", NULL, "RX4 MIX1"},
+ {"RX INT4 CHAIN", NULL, "RX INT4 INTERP"},
+ {"SPK1 OUT", NULL, "RX INT4 CHAIN"},
+
+ {"RX INT5 INTERP", NULL, "RX5 MIX1"},
+ {"RX INT5 CHAIN", NULL, "RX INT5 INTERP"},
+ {"SPK2 OUT", NULL, "RX INT5 CHAIN"},
+};
+
+static const struct msm_sdw_reg_mask_val msm_sdw_reg_init[] = {
+ {MSM_SDW_BOOST0_BOOST_CFG1, 0x3F, 0x12},
+ {MSM_SDW_BOOST0_BOOST_CFG2, 0x1C, 0x08},
+ {MSM_SDW_COMPANDER7_CTL7, 0x1E, 0x18},
+ {MSM_SDW_BOOST1_BOOST_CFG1, 0x3F, 0x12},
+ {MSM_SDW_BOOST1_BOOST_CFG2, 0x1C, 0x08},
+ {MSM_SDW_COMPANDER8_CTL7, 0x1E, 0x18},
+};
+
+static void msm_sdw_init_reg(struct snd_soc_codec *codec)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(msm_sdw_reg_init); i++)
+ snd_soc_update_bits(codec,
+ msm_sdw_reg_init[i].reg,
+ msm_sdw_reg_init[i].mask,
+ msm_sdw_reg_init[i].val);
+}
+
+static int msm_sdw_notifier_service_cb(struct notifier_block *nb,
+ unsigned long opcode, void *ptr)
+{
+ int i;
+ struct msm_sdw_priv *msm_sdw = container_of(nb,
+ struct msm_sdw_priv,
+ service_nb);
+
+ pr_debug("%s: Service opcode 0x%lx\n", __func__, opcode);
+
+ mutex_lock(&msm_sdw->codec_mutex);
+ switch (opcode) {
+ case AUDIO_NOTIFIER_SERVICE_DOWN:
+ msm_sdw->dev_up = false;
+ for (i = 0; i < msm_sdw->nr; i++)
+ swrm_wcd_notify(msm_sdw->sdw_ctrl_data[i].sdw_pdev,
+ SWR_DEVICE_DOWN, NULL);
+ break;
+ case AUDIO_NOTIFIER_SERVICE_UP:
+ if (initial_boot) {
+ initial_boot = false;
+ break;
+ }
+ msm_sdw->dev_up = true;
+ msm_sdw_init_reg(msm_sdw->codec);
+ regcache_mark_dirty(msm_sdw->regmap);
+ regcache_sync(msm_sdw->regmap);
+ msm_sdw_set_spkr_mode(msm_sdw->codec, msm_sdw->spkr_mode);
+ break;
+ default:
+ break;
+ }
+ mutex_unlock(&msm_sdw->codec_mutex);
+ return NOTIFY_OK;
+}
+
+static int msm_sdw_codec_probe(struct snd_soc_codec *codec)
+{
+ struct msm_sdw_priv *msm_sdw;
+ int i, ret;
+
+ msm_sdw = snd_soc_codec_get_drvdata(codec);
+ if (!msm_sdw) {
+ pr_err("%s:SDW priv data null\n", __func__);
+ return -EINVAL;
+ }
+ msm_sdw->codec = codec;
+ for (i = 0; i < COMP_MAX; i++)
+ msm_sdw->comp_enabled[i] = 0;
+
+ msm_sdw->spkr_gain_offset = RX_GAIN_OFFSET_0_DB;
+ msm_sdw_init_reg(codec);
+ msm_sdw->version = MSM_SDW_VERSION_1_0;
+ msm_sdw->dev_up = true;
+ msm_sdw->service_nb.notifier_call = msm_sdw_notifier_service_cb;
+ ret = audio_notifier_register("msm_sdw", AUDIO_NOTIFIER_ADSP_DOMAIN,
+ &msm_sdw->service_nb);
+ if (ret < 0)
+ dev_err(msm_sdw->dev,
+ "%s: Audio notifier register failed ret = %d\n",
+ __func__, ret);
+
+ return 0;
+}
+
+static int msm_sdw_codec_remove(struct snd_soc_codec *codec)
+{
+ return 0;
+}
+
+static struct regmap *msm_sdw_get_regmap(struct device *dev)
+{
+ struct msm_sdw_priv *msm_sdw = dev_get_drvdata(dev);
+
+ return msm_sdw->regmap;
+}
+
+static struct snd_soc_codec_driver soc_codec_dev_msm_sdw = {
+ .probe = msm_sdw_codec_probe,
+ .remove = msm_sdw_codec_remove,
+ .controls = msm_sdw_snd_controls,
+ .num_controls = ARRAY_SIZE(msm_sdw_snd_controls),
+ .dapm_widgets = msm_sdw_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(msm_sdw_dapm_widgets),
+ .dapm_routes = audio_map,
+ .num_dapm_routes = ARRAY_SIZE(audio_map),
+ .get_regmap = msm_sdw_get_regmap,
+};
+
+static void msm_sdw_add_child_devices(struct work_struct *work)
+{
+ struct msm_sdw_priv *msm_sdw;
+ struct platform_device *pdev;
+ struct device_node *node;
+ struct msm_sdw_ctrl_data *sdw_ctrl_data = NULL, *temp;
+ int ret, ctrl_num = 0;
+ struct wcd_sdw_ctrl_platform_data *platdata;
+ char plat_dev_name[MSM_SDW_STRING_LEN];
+
+ msm_sdw = container_of(work, struct msm_sdw_priv,
+ msm_sdw_add_child_devices_work);
+ if (!msm_sdw) {
+ pr_err("%s: Memory for msm_sdw does not exist\n",
+ __func__);
+ return;
+ }
+ if (!msm_sdw->dev->of_node) {
+ dev_err(msm_sdw->dev,
+ "%s: DT node for msm_sdw does not exist\n", __func__);
+ return;
+ }
+
+ platdata = &msm_sdw->sdw_plat_data;
+
+ for_each_available_child_of_node(msm_sdw->dev->of_node, node) {
+ if (!strcmp(node->name, "swr_master"))
+ strlcpy(plat_dev_name, "msm_sdw_swr_ctrl",
+ (MSM_SDW_STRING_LEN - 1));
+ else if (strnstr(node->name, "msm_cdc_pinctrl",
+ strlen("msm_cdc_pinctrl")) != NULL)
+ strlcpy(plat_dev_name, node->name,
+ (MSM_SDW_STRING_LEN - 1));
+ else
+ continue;
+
+ pdev = platform_device_alloc(plat_dev_name, -1);
+ if (!pdev) {
+ dev_err(msm_sdw->dev, "%s: pdev memory alloc failed\n",
+ __func__);
+ ret = -ENOMEM;
+ goto err;
+ }
+ pdev->dev.parent = msm_sdw->dev;
+ pdev->dev.of_node = node;
+
+ if (!strcmp(node->name, "swr_master")) {
+ ret = platform_device_add_data(pdev, platdata,
+ sizeof(*platdata));
+ if (ret) {
+ dev_err(&pdev->dev,
+ "%s: cannot add plat data ctrl:%d\n",
+ __func__, ctrl_num);
+ goto fail_pdev_add;
+ }
+ }
+
+ ret = platform_device_add(pdev);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "%s: Cannot add platform device\n",
+ __func__);
+ goto fail_pdev_add;
+ }
+
+ if (!strcmp(node->name, "swr_master")) {
+ temp = krealloc(sdw_ctrl_data,
+ (ctrl_num + 1) * sizeof(
+ struct msm_sdw_ctrl_data),
+ GFP_KERNEL);
+ if (!temp) {
+ dev_err(&pdev->dev, "out of memory\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+ sdw_ctrl_data = temp;
+ sdw_ctrl_data[ctrl_num].sdw_pdev = pdev;
+ ctrl_num++;
+ dev_dbg(&pdev->dev,
+ "%s: Added soundwire ctrl device(s)\n",
+ __func__);
+ msm_sdw->nr = ctrl_num;
+ msm_sdw->sdw_ctrl_data = sdw_ctrl_data;
+ }
+ }
+
+ return;
+fail_pdev_add:
+ platform_device_put(pdev);
+err:
+ return;
+}
+
+static int msm_sdw_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct msm_sdw_priv *msm_sdw;
+ int adsp_state;
+
+ adsp_state = apr_get_subsys_state();
+ if (adsp_state != APR_SUBSYS_LOADED) {
+ dev_err(&pdev->dev, "Adsp is not loaded yet %d\n",
+ adsp_state);
+ return -EPROBE_DEFER;
+ }
+
+ msm_sdw = devm_kzalloc(&pdev->dev, sizeof(struct msm_sdw_priv),
+ GFP_KERNEL);
+ if (!msm_sdw)
+ return -ENOMEM;
+ dev_set_drvdata(&pdev->dev, msm_sdw);
+
+ msm_sdw->dev = &pdev->dev;
+ INIT_WORK(&msm_sdw->msm_sdw_add_child_devices_work,
+ msm_sdw_add_child_devices);
+ mutex_init(&msm_sdw->sdw_read_lock);
+ mutex_init(&msm_sdw->sdw_write_lock);
+ mutex_init(&msm_sdw->sdw_clk_lock);
+
+ mutex_init(&msm_sdw->codec_mutex);
+ msm_sdw->sdw_plat_data.handle = (void *) msm_sdw;
+ msm_sdw->sdw_plat_data.read = msm_sdw_swrm_read;
+ msm_sdw->sdw_plat_data.write = msm_sdw_swrm_write;
+ msm_sdw->sdw_plat_data.bulk_write = msm_sdw_swrm_bulk_write;
+ msm_sdw->sdw_plat_data.clk = msm_sdw_swrm_clock;
+ msm_sdw->sdw_plat_data.handle_irq = msm_sdwm_handle_irq;
+ ret = of_property_read_u32(pdev->dev.of_node, "reg",
+ &msm_sdw->sdw_base_addr);
+ if (ret) {
+ dev_err(&pdev->dev, "%s: could not find %s entry in dt\n",
+ __func__, "reg");
+ goto err_sdw_cdc;
+ }
+ msm_sdw->sdw_base = ioremap(msm_sdw->sdw_base_addr,
+ MSM_SDW_MAX_REGISTER);
+ msm_sdw->read_dev = __msm_sdw_reg_read;
+ msm_sdw->write_dev = __msm_sdw_reg_write;
+
+ msm_sdw->regmap = msm_sdw_regmap_init(msm_sdw->dev,
+ &msm_sdw_regmap_config);
+ msm_sdw->sdw_irq = platform_get_irq_byname(pdev, "swr_master_irq");
+ if (msm_sdw->sdw_irq < 0) {
+ dev_err(msm_sdw->dev, "%s() error getting irq handle: %d\n",
+ __func__, msm_sdw->sdw_irq);
+ ret = -ENODEV;
+ goto err_sdw_cdc;
+ }
+ ret = snd_soc_register_codec(&pdev->dev, &soc_codec_dev_msm_sdw,
+ msm_sdw_dai, ARRAY_SIZE(msm_sdw_dai));
+ if (ret) {
+ dev_err(&pdev->dev, "%s: Codec registration failed, ret = %d\n",
+ __func__, ret);
+ goto err_sdw_cdc;
+ }
+ /* initialize the int_mclk1 */
+ msm_sdw->sdw_cdc_core_clk.clk_set_minor_version =
+ AFE_API_VERSION_I2S_CONFIG;
+ msm_sdw->sdw_cdc_core_clk.clk_id =
+ Q6AFE_LPASS_CLK_ID_INT_MCLK_1;
+ msm_sdw->sdw_cdc_core_clk.clk_freq_in_hz =
+ INT_MCLK1_FREQ;
+ msm_sdw->sdw_cdc_core_clk.clk_attri =
+ Q6AFE_LPASS_CLK_ATTRIBUTE_COUPLE_NO;
+ msm_sdw->sdw_cdc_core_clk.clk_root =
+ Q6AFE_LPASS_CLK_ROOT_DEFAULT;
+ msm_sdw->sdw_cdc_core_clk.enable = 0;
+
+ /* initialize the sdw_npl_clk */
+ msm_sdw->sdw_npl_clk.clk_set_minor_version =
+ AFE_API_VERSION_I2S_CONFIG;
+ msm_sdw->sdw_npl_clk.clk_id =
+ AFE_CLOCK_SET_CLOCK_ID_SWR_NPL_CLK;
+ msm_sdw->sdw_npl_clk.clk_freq_in_hz = SDW_NPL_FREQ;
+ msm_sdw->sdw_npl_clk.clk_attri =
+ Q6AFE_LPASS_CLK_ATTRIBUTE_COUPLE_NO;
+ msm_sdw->sdw_npl_clk.clk_root =
+ Q6AFE_LPASS_CLK_ROOT_DEFAULT;
+ msm_sdw->sdw_npl_clk.enable = 0;
+
+ INIT_DELAYED_WORK(&msm_sdw->disable_int_mclk1_work,
+ msm_disable_int_mclk1);
+ mutex_init(&msm_sdw->cdc_int_mclk1_mutex);
+ mutex_init(&msm_sdw->sdw_npl_clk_mutex);
+ schedule_work(&msm_sdw->msm_sdw_add_child_devices_work);
+
+ dev_dbg(&pdev->dev, "%s: msm_sdw driver probe done\n", __func__);
+ return ret;
+
+err_sdw_cdc:
+ devm_kfree(&pdev->dev, msm_sdw);
+ return ret;
+}
+
+static int msm_sdw_remove(struct platform_device *pdev)
+{
+ struct msm_sdw_priv *msm_sdw;
+
+ msm_sdw = dev_get_drvdata(&pdev->dev);
+
+ mutex_destroy(&msm_sdw->io_lock);
+ mutex_destroy(&msm_sdw->sdw_read_lock);
+ mutex_destroy(&msm_sdw->sdw_write_lock);
+ mutex_destroy(&msm_sdw->sdw_clk_lock);
+ mutex_destroy(&msm_sdw->codec_mutex);
+ mutex_destroy(&msm_sdw->cdc_int_mclk1_mutex);
+ devm_kfree(&pdev->dev, msm_sdw);
+ snd_soc_unregister_codec(&pdev->dev);
+ return 0;
+}
+
+static const struct of_device_id msm_sdw_codec_dt_match[] = {
+ { .compatible = "qcom,msm-sdw-codec", },
+ {}
+};
+
+static struct platform_driver msm_sdw_codec_driver = {
+ .probe = msm_sdw_probe,
+ .remove = msm_sdw_remove,
+ .driver = {
+ .name = "msm_sdw_codec",
+ .owner = THIS_MODULE,
+ .of_match_table = msm_sdw_codec_dt_match,
+ },
+};
+module_platform_driver(msm_sdw_codec_driver);
+
+MODULE_DESCRIPTION("MSM Soundwire Codec driver");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/msm_sdw/msm_sdw_cdc_utils.c b/sound/soc/codecs/msm_sdw/msm_sdw_cdc_utils.c
new file mode 100644
index 000000000000..59d0cae3893e
--- /dev/null
+++ b/sound/soc/codecs/msm_sdw/msm_sdw_cdc_utils.c
@@ -0,0 +1,211 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/regmap.h>
+#include "msm_sdw.h"
+
+#define REG_BYTES 2
+#define VAL_BYTES 1
+/*
+ * Page Register Address that APP Proc uses to
+ * access WCD9335 Codec registers is identified
+ * as 0x00
+ */
+#define PAGE_REG_ADDR 0x00
+
+/*
+ * msm_sdw_page_write:
+ * Retrieve page number from register and
+ * write that page number to the page address.
+ * Called under io_lock acquisition.
+ *
+ * @msm_sdw: pointer to msm_sdw
+ * @reg: Register address from which page number is retrieved
+ *
+ * Returns 0 for success and negative error code for failure.
+ */
+int msm_sdw_page_write(struct msm_sdw_priv *msm_sdw, unsigned short reg)
+{
+ int ret = 0;
+ u8 pg_num, prev_pg_num;
+
+ pg_num = msm_sdw_page_map[reg];
+ if (msm_sdw->prev_pg_valid) {
+ prev_pg_num = msm_sdw->prev_pg;
+ if (prev_pg_num != pg_num) {
+ ret = msm_sdw->write_dev(msm_sdw, PAGE_REG_ADDR, 1,
+ (void *) &pg_num);
+ if (ret < 0) {
+ dev_err(msm_sdw->dev,
+ "page write error, pg_num: 0x%x\n",
+ pg_num);
+ } else {
+ msm_sdw->prev_pg = pg_num;
+ dev_dbg(msm_sdw->dev,
+ "%s: Page 0x%x Write to 0x00\n",
+ __func__, pg_num);
+ }
+ }
+ } else {
+ ret = msm_sdw->write_dev(msm_sdw, PAGE_REG_ADDR, 1,
+ (void *) &pg_num);
+ if (ret < 0) {
+ dev_err(msm_sdw->dev,
+ "page write error, pg_num: 0x%x\n", pg_num);
+ } else {
+ msm_sdw->prev_pg = pg_num;
+ msm_sdw->prev_pg_valid = true;
+ dev_dbg(msm_sdw->dev, "%s: Page 0x%x Write to 0x00\n",
+ __func__, pg_num);
+ }
+ }
+ return ret;
+}
+EXPORT_SYMBOL(msm_sdw_page_write);
+
+static int regmap_bus_read(void *context, const void *reg, size_t reg_size,
+ void *val, size_t val_size)
+{
+ struct device *dev = context;
+ struct msm_sdw_priv *msm_sdw = dev_get_drvdata(dev);
+ unsigned short c_reg;
+ int ret, i;
+
+ if (!msm_sdw) {
+ dev_err(dev, "%s: msm_sdw is NULL\n", __func__);
+ return -EINVAL;
+ }
+ if (!reg || !val) {
+ dev_err(dev, "%s: reg or val is NULL\n", __func__);
+ return -EINVAL;
+ }
+ if (reg_size != REG_BYTES) {
+ dev_err(dev, "%s: register size %zd bytes, not supported\n",
+ __func__, reg_size);
+ return -EINVAL;
+ }
+ if (!msm_sdw->dev_up) {
+ dev_dbg_ratelimited(dev, "%s: No read allowed. dev_up = %d\n",
+ __func__, msm_sdw->dev_up);
+ return 0;
+ }
+
+ mutex_lock(&msm_sdw->io_lock);
+ c_reg = *(u16 *)reg;
+ ret = msm_sdw_page_write(msm_sdw, c_reg);
+ if (ret)
+ goto err;
+ ret = msm_sdw->read_dev(msm_sdw, c_reg, val_size, val);
+ if (ret < 0)
+ dev_err(dev, "%s: Codec read failed (%d), reg: 0x%x, size:%zd\n",
+ __func__, ret, c_reg, val_size);
+ else {
+ for (i = 0; i < val_size; i++)
+ dev_dbg(dev, "%s: Read 0x%02x from 0x%x\n",
+ __func__, ((u8 *)val)[i], c_reg + i);
+ }
+err:
+ mutex_unlock(&msm_sdw->io_lock);
+
+ return ret;
+}
+
+static int regmap_bus_gather_write(void *context,
+ const void *reg, size_t reg_size,
+ const void *val, size_t val_size)
+{
+ struct device *dev = context;
+ struct msm_sdw_priv *msm_sdw = dev_get_drvdata(dev);
+ unsigned short c_reg;
+ int ret, i;
+
+ if (!msm_sdw) {
+ dev_err(dev, "%s: msm_sdw is NULL\n", __func__);
+ return -EINVAL;
+ }
+ if (!reg || !val) {
+ dev_err(dev, "%s: reg or val is NULL\n", __func__);
+ return -EINVAL;
+ }
+ if (reg_size != REG_BYTES) {
+ dev_err(dev, "%s: register size %zd bytes, not supported\n",
+ __func__, reg_size);
+ return -EINVAL;
+ }
+ if (!msm_sdw->dev_up) {
+ dev_dbg_ratelimited(dev, "%s: No write allowed. dev_up = %d\n",
+ __func__, msm_sdw->dev_up);
+ return 0;
+ }
+
+ mutex_lock(&msm_sdw->io_lock);
+ c_reg = *(u16 *)reg;
+ ret = msm_sdw_page_write(msm_sdw, c_reg);
+ if (ret)
+ goto err;
+
+ for (i = 0; i < val_size; i++)
+ dev_dbg(dev, "Write %02x to 0x%x\n", ((u8 *)val)[i],
+ c_reg + i*4);
+
+ ret = msm_sdw->write_dev(msm_sdw, c_reg, val_size, (void *) val);
+ if (ret < 0)
+ dev_err(dev,
+ "%s: Codec write failed (%d), reg:0x%x, size:%zd\n",
+ __func__, ret, c_reg, val_size);
+
+err:
+ mutex_unlock(&msm_sdw->io_lock);
+ return ret;
+}
+
+static int regmap_bus_write(void *context, const void *data, size_t count)
+{
+ struct device *dev = context;
+ struct msm_sdw_priv *msm_sdw = dev_get_drvdata(dev);
+
+ if (!msm_sdw)
+ return -EINVAL;
+
+ WARN_ON(count < REG_BYTES);
+
+ return regmap_bus_gather_write(context, data, REG_BYTES,
+ data + REG_BYTES,
+ count - REG_BYTES);
+
+}
+
+static struct regmap_bus regmap_bus_config = {
+ .write = regmap_bus_write,
+ .gather_write = regmap_bus_gather_write,
+ .read = regmap_bus_read,
+ .reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
+ .val_format_endian_default = REGMAP_ENDIAN_NATIVE,
+};
+
+/*
+ * msm_sdw_regmap_init:
+ * Initialize msm_sdw register map
+ *
+ * @dev: pointer to wcd device
+ * @config: pointer to register map config
+ *
+ * Returns pointer to regmap structure for success
+ * or NULL in case of failure.
+ */
+struct regmap *msm_sdw_regmap_init(struct device *dev,
+ const struct regmap_config *config)
+{
+ return devm_regmap_init(dev, &regmap_bus_config, dev, config);
+}
+EXPORT_SYMBOL(msm_sdw_regmap_init);
diff --git a/sound/soc/codecs/msm_sdw/msm_sdw_registers.h b/sound/soc/codecs/msm_sdw/msm_sdw_registers.h
new file mode 100644
index 000000000000..f2302ef21e13
--- /dev/null
+++ b/sound/soc/codecs/msm_sdw/msm_sdw_registers.h
@@ -0,0 +1,126 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef MSM_SDW_REGISTERS_H
+#define MSM_SDW_REGISTERS_H
+
+#define MSM_SDW_PAGE_REGISTER 0x0000
+
+/* Page-A Registers */
+#define MSM_SDW_TX9_SPKR_PROT_PATH_CTL 0x0308
+#define MSM_SDW_TX9_SPKR_PROT_PATH_CFG0 0x030c
+#define MSM_SDW_TX10_SPKR_PROT_PATH_CTL 0x0318
+#define MSM_SDW_TX10_SPKR_PROT_PATH_CFG0 0x031c
+#define MSM_SDW_TX11_SPKR_PROT_PATH_CTL 0x0328
+#define MSM_SDW_TX11_SPKR_PROT_PATH_CFG0 0x032c
+#define MSM_SDW_TX12_SPKR_PROT_PATH_CTL 0x0338
+#define MSM_SDW_TX12_SPKR_PROT_PATH_CFG0 0x033c
+
+/* Page-B Registers */
+#define MSM_SDW_COMPANDER7_CTL0 0x0024
+#define MSM_SDW_COMPANDER7_CTL1 0x0028
+#define MSM_SDW_COMPANDER7_CTL2 0x002c
+#define MSM_SDW_COMPANDER7_CTL3 0x0030
+#define MSM_SDW_COMPANDER7_CTL4 0x0034
+#define MSM_SDW_COMPANDER7_CTL5 0x0038
+#define MSM_SDW_COMPANDER7_CTL6 0x003c
+#define MSM_SDW_COMPANDER7_CTL7 0x0040
+#define MSM_SDW_COMPANDER8_CTL0 0x0044
+#define MSM_SDW_COMPANDER8_CTL1 0x0048
+#define MSM_SDW_COMPANDER8_CTL2 0x004c
+#define MSM_SDW_COMPANDER8_CTL3 0x0050
+#define MSM_SDW_COMPANDER8_CTL4 0x0054
+#define MSM_SDW_COMPANDER8_CTL5 0x0058
+#define MSM_SDW_COMPANDER8_CTL6 0x005c
+#define MSM_SDW_COMPANDER8_CTL7 0x0060
+#define MSM_SDW_RX7_RX_PATH_CTL 0x01a4
+#define MSM_SDW_RX7_RX_PATH_CFG0 0x01a8
+#define MSM_SDW_RX7_RX_PATH_CFG1 0x01ac
+#define MSM_SDW_RX7_RX_PATH_CFG2 0x01b0
+#define MSM_SDW_RX7_RX_VOL_CTL 0x01b4
+#define MSM_SDW_RX7_RX_PATH_MIX_CTL 0x01b8
+#define MSM_SDW_RX7_RX_PATH_MIX_CFG 0x01bc
+#define MSM_SDW_RX7_RX_VOL_MIX_CTL 0x01c0
+#define MSM_SDW_RX7_RX_PATH_SEC0 0x01c4
+#define MSM_SDW_RX7_RX_PATH_SEC1 0x01c8
+#define MSM_SDW_RX7_RX_PATH_SEC2 0x01cc
+#define MSM_SDW_RX7_RX_PATH_SEC3 0x01d0
+#define MSM_SDW_RX7_RX_PATH_SEC5 0x01d8
+#define MSM_SDW_RX7_RX_PATH_SEC6 0x01dc
+#define MSM_SDW_RX7_RX_PATH_SEC7 0x01e0
+#define MSM_SDW_RX7_RX_PATH_MIX_SEC0 0x01e4
+#define MSM_SDW_RX7_RX_PATH_MIX_SEC1 0x01e8
+#define MSM_SDW_RX8_RX_PATH_CTL 0x0384
+#define MSM_SDW_RX8_RX_PATH_CFG0 0x0388
+#define MSM_SDW_RX8_RX_PATH_CFG1 0x038c
+#define MSM_SDW_RX8_RX_PATH_CFG2 0x0390
+#define MSM_SDW_RX8_RX_VOL_CTL 0x0394
+#define MSM_SDW_RX8_RX_PATH_MIX_CTL 0x0398
+#define MSM_SDW_RX8_RX_PATH_MIX_CFG 0x039c
+#define MSM_SDW_RX8_RX_VOL_MIX_CTL 0x03a0
+#define MSM_SDW_RX8_RX_PATH_SEC0 0x03a4
+#define MSM_SDW_RX8_RX_PATH_SEC1 0x03a8
+#define MSM_SDW_RX8_RX_PATH_SEC2 0x03ac
+#define MSM_SDW_RX8_RX_PATH_SEC3 0x03b0
+#define MSM_SDW_RX8_RX_PATH_SEC5 0x03b8
+#define MSM_SDW_RX8_RX_PATH_SEC6 0x03bc
+#define MSM_SDW_RX8_RX_PATH_SEC7 0x03c0
+#define MSM_SDW_RX8_RX_PATH_MIX_SEC0 0x03c4
+#define MSM_SDW_RX8_RX_PATH_MIX_SEC1 0x03c8
+
+/* Page-C Registers */
+#define MSM_SDW_BOOST0_BOOST_PATH_CTL 0x0064
+#define MSM_SDW_BOOST0_BOOST_CTL 0x0068
+#define MSM_SDW_BOOST0_BOOST_CFG1 0x006c
+#define MSM_SDW_BOOST0_BOOST_CFG2 0x0070
+#define MSM_SDW_BOOST1_BOOST_PATH_CTL 0x0084
+#define MSM_SDW_BOOST1_BOOST_CTL 0x0088
+#define MSM_SDW_BOOST1_BOOST_CFG1 0x008c
+#define MSM_SDW_BOOST1_BOOST_CFG2 0x0090
+#define MSM_SDW_AHB_BRIDGE_WR_DATA_0 0x00a4
+#define MSM_SDW_AHB_BRIDGE_WR_DATA_1 0x00a8
+#define MSM_SDW_AHB_BRIDGE_WR_DATA_2 0x00ac
+#define MSM_SDW_AHB_BRIDGE_WR_DATA_3 0x00b0
+#define MSM_SDW_AHB_BRIDGE_WR_ADDR_0 0x00b4
+#define MSM_SDW_AHB_BRIDGE_WR_ADDR_1 0x00b8
+#define MSM_SDW_AHB_BRIDGE_WR_ADDR_2 0x00bc
+#define MSM_SDW_AHB_BRIDGE_WR_ADDR_3 0x00c0
+#define MSM_SDW_AHB_BRIDGE_RD_ADDR_0 0x00c4
+#define MSM_SDW_AHB_BRIDGE_RD_ADDR_1 0x00c8
+#define MSM_SDW_AHB_BRIDGE_RD_ADDR_2 0x00cc
+#define MSM_SDW_AHB_BRIDGE_RD_ADDR_3 0x00d0
+#define MSM_SDW_AHB_BRIDGE_RD_DATA_0 0x00d4
+#define MSM_SDW_AHB_BRIDGE_RD_DATA_1 0x00d8
+#define MSM_SDW_AHB_BRIDGE_RD_DATA_2 0x00dc
+#define MSM_SDW_AHB_BRIDGE_RD_DATA_3 0x00e0
+#define MSM_SDW_AHB_BRIDGE_ACCESS_CFG 0x00e4
+#define MSM_SDW_AHB_BRIDGE_ACCESS_STATUS 0x00e8
+
+/* Page-D Registers */
+#define MSM_SDW_CLK_RST_CTRL_MCLK_CONTROL 0x0104
+#define MSM_SDW_CLK_RST_CTRL_FS_CNT_CONTROL 0x0108
+#define MSM_SDW_CLK_RST_CTRL_SWR_CONTROL 0x010c
+#define MSM_SDW_TOP_TOP_CFG0 0x0204
+#define MSM_SDW_TOP_TOP_CFG1 0x0208
+#define MSM_SDW_TOP_RX_I2S_CTL 0x020c
+#define MSM_SDW_TOP_TX_I2S_CTL 0x0210
+#define MSM_SDW_TOP_I2S_CLK 0x0214
+#define MSM_SDW_TOP_RX7_PATH_INPUT0_MUX 0x0218
+#define MSM_SDW_TOP_RX7_PATH_INPUT1_MUX 0x021c
+#define MSM_SDW_TOP_RX8_PATH_INPUT0_MUX 0x0220
+#define MSM_SDW_TOP_RX8_PATH_INPUT1_MUX 0x0224
+#define MSM_SDW_TOP_FREQ_MCLK 0x0228
+#define MSM_SDW_TOP_DEBUG_BUS_SEL 0x022c
+#define MSM_SDW_TOP_DEBUG_EN 0x0230
+#define MSM_SDW_TOP_I2S_RESET 0x0234
+#define MSM_SDW_TOP_BLOCKS_RESET 0x0238
+
+#endif
diff --git a/sound/soc/codecs/msm_sdw/msm_sdw_regmap.c b/sound/soc/codecs/msm_sdw/msm_sdw_regmap.c
new file mode 100644
index 000000000000..e79db0ab17be
--- /dev/null
+++ b/sound/soc/codecs/msm_sdw/msm_sdw_regmap.c
@@ -0,0 +1,155 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/regmap.h>
+#include "msm_sdw.h"
+
+static const struct reg_default msm_sdw_defaults[] = {
+ /* Page #10 registers */
+ { MSM_SDW_PAGE_REGISTER, 0x00 },
+ { MSM_SDW_TX9_SPKR_PROT_PATH_CTL, 0x02 },
+ { MSM_SDW_TX9_SPKR_PROT_PATH_CFG0, 0x00 },
+ { MSM_SDW_TX10_SPKR_PROT_PATH_CTL, 0x02 },
+ { MSM_SDW_TX10_SPKR_PROT_PATH_CFG0, 0x00 },
+ { MSM_SDW_TX11_SPKR_PROT_PATH_CTL, 0x02 },
+ { MSM_SDW_TX11_SPKR_PROT_PATH_CFG0, 0x00 },
+ { MSM_SDW_TX12_SPKR_PROT_PATH_CTL, 0x02 },
+ { MSM_SDW_TX12_SPKR_PROT_PATH_CFG0, 0x00 },
+ /* Page #11 registers */
+ { MSM_SDW_COMPANDER7_CTL0, 0x60 },
+ { MSM_SDW_COMPANDER7_CTL1, 0xdb },
+ { MSM_SDW_COMPANDER7_CTL2, 0xff },
+ { MSM_SDW_COMPANDER7_CTL3, 0x35 },
+ { MSM_SDW_COMPANDER7_CTL4, 0xff },
+ { MSM_SDW_COMPANDER7_CTL5, 0x00 },
+ { MSM_SDW_COMPANDER7_CTL6, 0x01 },
+ { MSM_SDW_COMPANDER8_CTL0, 0x60 },
+ { MSM_SDW_COMPANDER8_CTL1, 0xdb },
+ { MSM_SDW_COMPANDER8_CTL2, 0xff },
+ { MSM_SDW_COMPANDER8_CTL3, 0x35 },
+ { MSM_SDW_COMPANDER8_CTL4, 0xff },
+ { MSM_SDW_COMPANDER8_CTL5, 0x00 },
+ { MSM_SDW_COMPANDER8_CTL6, 0x01 },
+ { MSM_SDW_RX7_RX_PATH_CTL, 0x04 },
+ { MSM_SDW_RX7_RX_PATH_CFG0, 0x00 },
+ { MSM_SDW_RX7_RX_PATH_CFG2, 0x8f },
+ { MSM_SDW_RX7_RX_VOL_CTL, 0x00 },
+ { MSM_SDW_RX7_RX_PATH_MIX_CTL, 0x04 },
+ { MSM_SDW_RX7_RX_VOL_MIX_CTL, 0x00 },
+ { MSM_SDW_RX7_RX_PATH_SEC2, 0x00 },
+ { MSM_SDW_RX7_RX_PATH_SEC3, 0x00 },
+ { MSM_SDW_RX7_RX_PATH_SEC5, 0x00 },
+ { MSM_SDW_RX7_RX_PATH_SEC6, 0x00 },
+ { MSM_SDW_RX7_RX_PATH_SEC7, 0x00 },
+ { MSM_SDW_RX7_RX_PATH_MIX_SEC1, 0x00 },
+ { MSM_SDW_RX8_RX_PATH_CTL, 0x04 },
+ { MSM_SDW_RX8_RX_PATH_CFG0, 0x00 },
+ { MSM_SDW_RX8_RX_PATH_CFG2, 0x8f },
+ { MSM_SDW_RX8_RX_VOL_CTL, 0x00 },
+ { MSM_SDW_RX8_RX_PATH_MIX_CTL, 0x04 },
+ { MSM_SDW_RX8_RX_VOL_MIX_CTL, 0x00 },
+ { MSM_SDW_RX8_RX_PATH_SEC2, 0x00 },
+ { MSM_SDW_RX8_RX_PATH_SEC3, 0x00 },
+ { MSM_SDW_RX8_RX_PATH_SEC5, 0x00 },
+ { MSM_SDW_RX8_RX_PATH_SEC6, 0x00 },
+ { MSM_SDW_RX8_RX_PATH_SEC7, 0x00 },
+ { MSM_SDW_RX8_RX_PATH_MIX_SEC1, 0x00 },
+ /* Page #12 registers */
+ { MSM_SDW_BOOST0_BOOST_PATH_CTL, 0x00 },
+ { MSM_SDW_BOOST0_BOOST_CTL, 0xb2 },
+ { MSM_SDW_BOOST0_BOOST_CFG1, 0x00 },
+ { MSM_SDW_BOOST0_BOOST_CFG2, 0x00 },
+ { MSM_SDW_BOOST1_BOOST_PATH_CTL, 0x00 },
+ { MSM_SDW_BOOST1_BOOST_CTL, 0xb2 },
+ { MSM_SDW_BOOST1_BOOST_CFG1, 0x00 },
+ { MSM_SDW_BOOST1_BOOST_CFG2, 0x00 },
+ { MSM_SDW_AHB_BRIDGE_WR_DATA_0, 0x00 },
+ { MSM_SDW_AHB_BRIDGE_WR_DATA_1, 0x00 },
+ { MSM_SDW_AHB_BRIDGE_WR_DATA_2, 0x00 },
+ { MSM_SDW_AHB_BRIDGE_WR_DATA_3, 0x00 },
+ { MSM_SDW_AHB_BRIDGE_WR_ADDR_0, 0x00 },
+ { MSM_SDW_AHB_BRIDGE_WR_ADDR_1, 0x00 },
+ { MSM_SDW_AHB_BRIDGE_WR_ADDR_2, 0x00 },
+ { MSM_SDW_AHB_BRIDGE_WR_ADDR_3, 0x00 },
+ { MSM_SDW_AHB_BRIDGE_RD_ADDR_0, 0x00 },
+ { MSM_SDW_AHB_BRIDGE_RD_ADDR_1, 0x00 },
+ { MSM_SDW_AHB_BRIDGE_RD_ADDR_2, 0x00 },
+ { MSM_SDW_AHB_BRIDGE_RD_ADDR_3, 0x00 },
+ { MSM_SDW_AHB_BRIDGE_RD_DATA_0, 0x00 },
+ { MSM_SDW_AHB_BRIDGE_RD_DATA_1, 0x00 },
+ { MSM_SDW_AHB_BRIDGE_RD_DATA_2, 0x00 },
+ { MSM_SDW_AHB_BRIDGE_RD_DATA_3, 0x00 },
+ { MSM_SDW_AHB_BRIDGE_ACCESS_CFG, 0x0f },
+ { MSM_SDW_AHB_BRIDGE_ACCESS_STATUS, 0x03 },
+ /* Page #13 registers */
+ { MSM_SDW_CLK_RST_CTRL_MCLK_CONTROL, 0x00 },
+ { MSM_SDW_CLK_RST_CTRL_FS_CNT_CONTROL, 0x00 },
+ { MSM_SDW_CLK_RST_CTRL_SWR_CONTROL, 0x00 },
+ { MSM_SDW_TOP_TOP_CFG0, 0x00 },
+ { MSM_SDW_TOP_TOP_CFG1, 0x00 },
+ { MSM_SDW_TOP_RX_I2S_CTL, 0x0C },
+ { MSM_SDW_TOP_TX_I2S_CTL, 0x00 },
+ { MSM_SDW_TOP_I2S_CLK, 0x00 },
+ { MSM_SDW_TOP_RX7_PATH_INPUT0_MUX, 0x00 },
+ { MSM_SDW_TOP_RX7_PATH_INPUT1_MUX, 0x00 },
+ { MSM_SDW_TOP_RX8_PATH_INPUT0_MUX, 0x00 },
+ { MSM_SDW_TOP_RX8_PATH_INPUT1_MUX, 0x00 },
+ { MSM_SDW_TOP_FREQ_MCLK, 0x00 },
+ { MSM_SDW_TOP_DEBUG_BUS_SEL, 0x00 },
+ { MSM_SDW_TOP_DEBUG_EN, 0x00 },
+ { MSM_SDW_TOP_I2S_RESET, 0x00 },
+ { MSM_SDW_TOP_BLOCKS_RESET, 0x00 },
+};
+
+static bool msm_sdw_is_readable_register(struct device *dev, unsigned int reg)
+{
+ return msm_sdw_reg_readable[reg];
+}
+
+static bool msm_sdw_is_volatile_register(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case MSM_SDW_AHB_BRIDGE_WR_DATA_0:
+ case MSM_SDW_AHB_BRIDGE_WR_DATA_1:
+ case MSM_SDW_AHB_BRIDGE_WR_DATA_2:
+ case MSM_SDW_AHB_BRIDGE_WR_DATA_3:
+ case MSM_SDW_AHB_BRIDGE_WR_ADDR_0:
+ case MSM_SDW_AHB_BRIDGE_WR_ADDR_1:
+ case MSM_SDW_AHB_BRIDGE_WR_ADDR_2:
+ case MSM_SDW_AHB_BRIDGE_WR_ADDR_3:
+ case MSM_SDW_AHB_BRIDGE_RD_DATA_0:
+ case MSM_SDW_AHB_BRIDGE_RD_DATA_1:
+ case MSM_SDW_AHB_BRIDGE_RD_DATA_2:
+ case MSM_SDW_AHB_BRIDGE_RD_DATA_3:
+ case MSM_SDW_AHB_BRIDGE_RD_ADDR_0:
+ case MSM_SDW_AHB_BRIDGE_RD_ADDR_1:
+ case MSM_SDW_AHB_BRIDGE_RD_ADDR_2:
+ case MSM_SDW_AHB_BRIDGE_RD_ADDR_3:
+ case MSM_SDW_CLK_RST_CTRL_MCLK_CONTROL:
+ case MSM_SDW_CLK_RST_CTRL_FS_CNT_CONTROL:
+ return true;
+ default:
+ return false;
+ }
+}
+
+const struct regmap_config msm_sdw_regmap_config = {
+ .reg_bits = 16,
+ .val_bits = 8,
+ .reg_stride = 4,
+ .cache_type = REGCACHE_RBTREE,
+ .reg_defaults = msm_sdw_defaults,
+ .num_reg_defaults = ARRAY_SIZE(msm_sdw_defaults),
+ .max_register = MSM_SDW_MAX_REGISTER,
+ .volatile_reg = msm_sdw_is_volatile_register,
+ .readable_reg = msm_sdw_is_readable_register,
+};
diff --git a/sound/soc/codecs/msm8x16/Kconfig b/sound/soc/codecs/msmfalcon_cdc/Kconfig
index d225b7a56925..dc461a619781 100644
--- a/sound/soc/codecs/msm8x16/Kconfig
+++ b/sound/soc/codecs/msmfalcon_cdc/Kconfig
@@ -1,3 +1,3 @@
-config SND_SOC_MSM8X16_WCD
+config SND_SOC_MSMFALCON_CDC
tristate "MSM Internal PMIC based codec"
diff --git a/sound/soc/codecs/msmfalcon_cdc/Makefile b/sound/soc/codecs/msmfalcon_cdc/Makefile
new file mode 100644
index 000000000000..814308d9f5b0
--- /dev/null
+++ b/sound/soc/codecs/msmfalcon_cdc/Makefile
@@ -0,0 +1,2 @@
+snd-soc-msmfalcon-cdc-objs := msm-analog-cdc.o msm-digital-cdc.o msmfalcon-regmap.o
+obj-$(CONFIG_SND_SOC_MSMFALCON_CDC) += snd-soc-msmfalcon-cdc.o msmfalcon-cdc-irq.o
diff --git a/sound/soc/codecs/msmfalcon_cdc/msm-analog-cdc.c b/sound/soc/codecs/msmfalcon_cdc/msm-analog-cdc.c
new file mode 100644
index 000000000000..33c5e103dfe7
--- /dev/null
+++ b/sound/soc/codecs/msmfalcon_cdc/msm-analog-cdc.c
@@ -0,0 +1,4645 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/printk.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/regulator/consumer.h>
+#include <linux/qdsp6v2/apr.h>
+#include <linux/workqueue.h>
+#include <linux/regmap.h>
+#include <linux/qdsp6v2/audio_notifier.h>
+#include <sound/q6afe-v2.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/tlv.h>
+#include <sound/q6core.h>
+#include "msm-analog-cdc.h"
+#include "msmfalcon-cdc-irq.h"
+#include "msmfalcon-cdc-registers.h"
+#include "msm-cdc-common.h"
+#include "../../msm/msmfalcon-common.h"
+#include "../wcd-mbhc-v2.h"
+
+#define DRV_NAME "pmic_analog_codec"
+#define MSMFALCON_CDC_RATES (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |\
+ SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 |\
+ SNDRV_PCM_RATE_48000)
+#define MSMFALCON_CDC_FORMATS (SNDRV_PCM_FMTBIT_S16_LE |\
+ SNDRV_PCM_FMTBIT_S24_LE)
+#define MSM_DIG_CDC_STRING_LEN 80
+#define MSM_ANLG_CDC_VERSION_ENTRY_SIZE 32
+
+#define CODEC_DT_MAX_PROP_SIZE 40
+#define MAX_ON_DEMAND_SUPPLY_NAME_LENGTH 64
+#define BUS_DOWN 1
+
+/*
+ *50 Milliseconds sufficient for DSP bring up in the modem
+ * after Sub System Restart
+ */
+#define ADSP_STATE_READY_TIMEOUT_MS 50
+
+enum {
+ BOOST_SWITCH = 0,
+ BOOST_ALWAYS,
+ BYPASS_ALWAYS,
+ BOOST_ON_FOREVER,
+};
+
+#define EAR_PMD 0
+#define EAR_PMU 1
+#define SPK_PMD 2
+#define SPK_PMU 3
+
+#define MICBIAS_DEFAULT_VAL 1800000
+#define MICBIAS_MIN_VAL 1600000
+#define MICBIAS_STEP_SIZE 50000
+
+#define DEFAULT_BOOST_VOLTAGE 5000
+#define MIN_BOOST_VOLTAGE 4000
+#define MAX_BOOST_VOLTAGE 5550
+#define BOOST_VOLTAGE_STEP 50
+
+#define MSMFALCON_CDC_MBHC_BTN_COARSE_ADJ 100 /* in mV */
+#define MSMFALCON_CDC_MBHC_BTN_FINE_ADJ 12 /* in mV */
+
+#define VOLTAGE_CONVERTER(value, min_value, step_size)\
+ ((value - min_value)/step_size)
+
+enum {
+ RX_MIX1_INP_SEL_ZERO = 0,
+ RX_MIX1_INP_SEL_IIR1,
+ RX_MIX1_INP_SEL_IIR2,
+ RX_MIX1_INP_SEL_RX1,
+ RX_MIX1_INP_SEL_RX2,
+ RX_MIX1_INP_SEL_RX3,
+};
+
+static const DECLARE_TLV_DB_SCALE(analog_gain, 0, 25, 1);
+static struct snd_soc_dai_driver msm_anlg_cdc_i2s_dai[];
+/* By default enable the internal speaker boost */
+static bool spkr_boost_en = true;
+static bool initial_boot = true;
+
+static char on_demand_supply_name[][MAX_ON_DEMAND_SUPPLY_NAME_LENGTH] = {
+ "cdc-vdd-mic-bias",
+};
+
+static struct wcd_mbhc_register
+ wcd_mbhc_registers[WCD_MBHC_REG_FUNC_MAX] = {
+ WCD_MBHC_REGISTER("WCD_MBHC_L_DET_EN",
+ MSM89XX_PMIC_ANALOG_MBHC_DET_CTL_1, 0x80, 7, 0),
+ WCD_MBHC_REGISTER("WCD_MBHC_GND_DET_EN",
+ MSM89XX_PMIC_ANALOG_MBHC_DET_CTL_1, 0x40, 6, 0),
+ WCD_MBHC_REGISTER("WCD_MBHC_MECH_DETECTION_TYPE",
+ MSM89XX_PMIC_ANALOG_MBHC_DET_CTL_1, 0x20, 5, 0),
+ WCD_MBHC_REGISTER("WCD_MBHC_MIC_CLAMP_CTL",
+ MSM89XX_PMIC_ANALOG_MBHC_DET_CTL_1, 0x18, 3, 0),
+ WCD_MBHC_REGISTER("WCD_MBHC_ELECT_DETECTION_TYPE",
+ MSM89XX_PMIC_ANALOG_MBHC_DET_CTL_2, 0x01, 0, 0),
+ WCD_MBHC_REGISTER("WCD_MBHC_HS_L_DET_PULL_UP_CTRL",
+ MSM89XX_PMIC_ANALOG_MBHC_DET_CTL_2, 0xC0, 6, 0),
+ WCD_MBHC_REGISTER("WCD_MBHC_HS_L_DET_PULL_UP_COMP_CTRL",
+ MSM89XX_PMIC_ANALOG_MBHC_DET_CTL_2, 0x20, 5, 0),
+ WCD_MBHC_REGISTER("WCD_MBHC_HPHL_PLUG_TYPE",
+ MSM89XX_PMIC_ANALOG_MBHC_DET_CTL_2, 0x10, 4, 0),
+ WCD_MBHC_REGISTER("WCD_MBHC_GND_PLUG_TYPE",
+ MSM89XX_PMIC_ANALOG_MBHC_DET_CTL_2, 0x08, 3, 0),
+ WCD_MBHC_REGISTER("WCD_MBHC_SW_HPH_LP_100K_TO_GND",
+ MSM89XX_PMIC_ANALOG_MBHC_DET_CTL_2, 0x01, 0, 0),
+ WCD_MBHC_REGISTER("WCD_MBHC_ELECT_SCHMT_ISRC",
+ MSM89XX_PMIC_ANALOG_MBHC_DET_CTL_2, 0x06, 1, 0),
+ WCD_MBHC_REGISTER("WCD_MBHC_FSM_EN",
+ MSM89XX_PMIC_ANALOG_MBHC_FSM_CTL, 0x80, 7, 0),
+ WCD_MBHC_REGISTER("WCD_MBHC_INSREM_DBNC",
+ MSM89XX_PMIC_ANALOG_MBHC_DBNC_TIMER, 0xF0, 4, 0),
+ WCD_MBHC_REGISTER("WCD_MBHC_BTN_DBNC",
+ MSM89XX_PMIC_ANALOG_MBHC_DBNC_TIMER, 0x0C, 2, 0),
+ WCD_MBHC_REGISTER("WCD_MBHC_HS_VREF",
+ MSM89XX_PMIC_ANALOG_MBHC_BTN3_CTL, 0x03, 0, 0),
+ WCD_MBHC_REGISTER("WCD_MBHC_HS_COMP_RESULT",
+ MSM89XX_PMIC_ANALOG_MBHC_ZDET_ELECT_RESULT, 0x01,
+ 0, 0),
+ WCD_MBHC_REGISTER("WCD_MBHC_MIC_SCHMT_RESULT",
+ MSM89XX_PMIC_ANALOG_MBHC_ZDET_ELECT_RESULT, 0x02,
+ 1, 0),
+ WCD_MBHC_REGISTER("WCD_MBHC_HPHL_SCHMT_RESULT",
+ MSM89XX_PMIC_ANALOG_MBHC_ZDET_ELECT_RESULT, 0x08,
+ 3, 0),
+ WCD_MBHC_REGISTER("WCD_MBHC_HPHR_SCHMT_RESULT",
+ MSM89XX_PMIC_ANALOG_MBHC_ZDET_ELECT_RESULT, 0x04,
+ 2, 0),
+ WCD_MBHC_REGISTER("WCD_MBHC_OCP_FSM_EN",
+ MSM89XX_PMIC_ANALOG_RX_COM_OCP_CTL, 0x10, 4, 0),
+ WCD_MBHC_REGISTER("WCD_MBHC_BTN_RESULT",
+ MSM89XX_PMIC_ANALOG_MBHC_BTN_RESULT, 0xFF, 0, 0),
+ WCD_MBHC_REGISTER("WCD_MBHC_BTN_ISRC_CTL",
+ MSM89XX_PMIC_ANALOG_MBHC_FSM_CTL, 0x70, 4, 0),
+ WCD_MBHC_REGISTER("WCD_MBHC_ELECT_RESULT",
+ MSM89XX_PMIC_ANALOG_MBHC_ZDET_ELECT_RESULT, 0xFF,
+ 0, 0),
+ WCD_MBHC_REGISTER("WCD_MBHC_MICB_CTRL",
+ MSM89XX_PMIC_ANALOG_MICB_2_EN, 0xC0, 6, 0),
+ WCD_MBHC_REGISTER("WCD_MBHC_HPH_CNP_WG_TIME",
+ MSM89XX_PMIC_ANALOG_RX_HPH_CNP_WG_TIME, 0xFC, 2, 0),
+ WCD_MBHC_REGISTER("WCD_MBHC_HPHR_PA_EN",
+ MSM89XX_PMIC_ANALOG_RX_HPH_CNP_EN, 0x10, 4, 0),
+ WCD_MBHC_REGISTER("WCD_MBHC_HPHL_PA_EN",
+ MSM89XX_PMIC_ANALOG_RX_HPH_CNP_EN, 0x20, 5, 0),
+ WCD_MBHC_REGISTER("WCD_MBHC_HPH_PA_EN",
+ MSM89XX_PMIC_ANALOG_RX_HPH_CNP_EN, 0x30, 4, 0),
+ WCD_MBHC_REGISTER("WCD_MBHC_SWCH_LEVEL_REMOVE",
+ MSM89XX_PMIC_ANALOG_MBHC_ZDET_ELECT_RESULT,
+ 0x10, 4, 0),
+ WCD_MBHC_REGISTER("WCD_MBHC_PULLDOWN_CTRL",
+ MSM89XX_PMIC_ANALOG_MICB_2_EN, 0x20, 5, 0),
+ WCD_MBHC_REGISTER("WCD_MBHC_ANC_DET_EN", 0, 0, 0, 0),
+ WCD_MBHC_REGISTER("WCD_MBHC_FSM_STATUS", 0, 0, 0, 0),
+ WCD_MBHC_REGISTER("WCD_MBHC_MUX_CTL", 0, 0, 0, 0),
+};
+
+/* Multiply gain_adj and offset by 1000 and 100 to avoid float arithmetic */
+static const struct wcd_imped_i_ref imped_i_ref[] = {
+ {I_h4_UA, 8, 800, 9000, 10000},
+ {I_pt5_UA, 10, 100, 990, 4600},
+ {I_14_UA, 17, 14, 1050, 700},
+ {I_l4_UA, 10, 4, 1165, 110},
+ {I_1_UA, 0, 1, 1200, 65},
+};
+
+static const struct wcd_mbhc_intr intr_ids = {
+ .mbhc_sw_intr = MSM89XX_IRQ_MBHC_HS_DET,
+ .mbhc_btn_press_intr = MSM89XX_IRQ_MBHC_PRESS,
+ .mbhc_btn_release_intr = MSM89XX_IRQ_MBHC_RELEASE,
+ .mbhc_hs_ins_intr = MSM89XX_IRQ_MBHC_INSREM_DET1,
+ .mbhc_hs_rem_intr = MSM89XX_IRQ_MBHC_INSREM_DET,
+ .hph_left_ocp = MSM89XX_IRQ_HPHL_OCP,
+ .hph_right_ocp = MSM89XX_IRQ_HPHR_OCP,
+};
+
+static int msm_anlg_cdc_dt_parse_vreg_info(struct device *dev,
+ struct msmfalcon_cdc_regulator *vreg,
+ const char *vreg_name,
+ bool ondemand);
+static struct msmfalcon_cdc_pdata *msm_anlg_cdc_populate_dt_pdata(
+ struct device *dev);
+static int msm_anlg_cdc_enable_ext_mb_source(struct wcd_mbhc *wcd_mbhc,
+ bool turn_on);
+static void msm_anlg_cdc_trim_btn_reg(struct snd_soc_codec *codec);
+static void msm_anlg_cdc_set_micb_v(struct snd_soc_codec *codec);
+static void msm_anlg_cdc_set_boost_v(struct snd_soc_codec *codec);
+static void msm_anlg_cdc_set_auto_zeroing(struct snd_soc_codec *codec,
+ bool enable);
+static void msm_anlg_cdc_configure_cap(struct snd_soc_codec *codec,
+ bool micbias1, bool micbias2);
+static bool msm_anlg_cdc_use_mb(struct snd_soc_codec *codec);
+
+static int get_codec_version(struct msmfalcon_cdc_priv *msmfalcon_cdc)
+{
+ if (msmfalcon_cdc->codec_version == DRAX_CDC)
+ return DRAX_CDC;
+ else if (msmfalcon_cdc->codec_version == DIANGU)
+ return DIANGU;
+ else if (msmfalcon_cdc->codec_version == CAJON_2_0)
+ return CAJON_2_0;
+ else if (msmfalcon_cdc->codec_version == CAJON)
+ return CAJON;
+ else if (msmfalcon_cdc->codec_version == CONGA)
+ return CONGA;
+ else if (msmfalcon_cdc->pmic_rev == TOMBAK_2_0)
+ return TOMBAK_2_0;
+ else if (msmfalcon_cdc->pmic_rev == TOMBAK_1_0)
+ return TOMBAK_1_0;
+
+ pr_err("%s: unsupported codec version\n", __func__);
+ return UNSUPPORTED;
+}
+
+static void wcd_mbhc_meas_imped(struct snd_soc_codec *codec,
+ s16 *impedance_l, s16 *impedance_r)
+{
+ struct msmfalcon_cdc_priv *msmfalcon_cdc =
+ snd_soc_codec_get_drvdata(codec);
+
+ if ((msmfalcon_cdc->imped_det_pin == WCD_MBHC_DET_BOTH) ||
+ (msmfalcon_cdc->imped_det_pin == WCD_MBHC_DET_HPHL)) {
+ /* Enable ZDET_L_MEAS_EN */
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_MBHC_FSM_CTL,
+ 0x08, 0x08);
+ /* Wait for 2ms for measurement to complete */
+ usleep_range(2000, 2100);
+ /* Read Left impedance value from Result1 */
+ *impedance_l = snd_soc_read(codec,
+ MSM89XX_PMIC_ANALOG_MBHC_BTN_RESULT);
+ /* Enable ZDET_R_MEAS_EN */
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_MBHC_FSM_CTL,
+ 0x08, 0x00);
+ }
+ if ((msmfalcon_cdc->imped_det_pin == WCD_MBHC_DET_BOTH) ||
+ (msmfalcon_cdc->imped_det_pin == WCD_MBHC_DET_HPHR)) {
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_MBHC_FSM_CTL,
+ 0x04, 0x04);
+ /* Wait for 2ms for measurement to complete */
+ usleep_range(2000, 2100);
+ /* Read Right impedance value from Result1 */
+ *impedance_r = snd_soc_read(codec,
+ MSM89XX_PMIC_ANALOG_MBHC_BTN_RESULT);
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_MBHC_FSM_CTL,
+ 0x04, 0x00);
+ }
+}
+
+static void msm_anlg_cdc_set_ref_current(struct snd_soc_codec *codec,
+ enum wcd_curr_ref curr_ref)
+{
+ struct msmfalcon_cdc_priv *msmfalcon_cdc =
+ snd_soc_codec_get_drvdata(codec);
+
+ dev_dbg(codec->dev, "%s: curr_ref: %d\n", __func__, curr_ref);
+
+ if (get_codec_version(msmfalcon_cdc) < CAJON)
+ dev_dbg(codec->dev, "%s: Setting ref current not required\n",
+ __func__);
+
+ msmfalcon_cdc->imped_i_ref = imped_i_ref[curr_ref];
+
+ switch (curr_ref) {
+ case I_h4_UA:
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_MICB_2_EN,
+ 0x07, 0x01);
+ break;
+ case I_pt5_UA:
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_MICB_2_EN,
+ 0x07, 0x04);
+ break;
+ case I_14_UA:
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_MICB_2_EN,
+ 0x07, 0x03);
+ break;
+ case I_l4_UA:
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_MICB_2_EN,
+ 0x07, 0x01);
+ break;
+ case I_1_UA:
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_MICB_2_EN,
+ 0x07, 0x00);
+ break;
+ default:
+ pr_debug("%s: No ref current set\n", __func__);
+ break;
+ }
+}
+
+static bool msm_anlg_cdc_adj_ref_current(struct snd_soc_codec *codec,
+ s16 *impedance_l, s16 *impedance_r)
+{
+ int i = 2;
+ s16 compare_imp = 0;
+ struct msmfalcon_cdc_priv *msmfalcon_cdc =
+ snd_soc_codec_get_drvdata(codec);
+
+ if (msmfalcon_cdc->imped_det_pin == WCD_MBHC_DET_HPHR)
+ compare_imp = *impedance_r;
+ else
+ compare_imp = *impedance_l;
+
+ if (get_codec_version(msmfalcon_cdc) < CAJON) {
+ dev_dbg(codec->dev,
+ "%s: Reference current adjustment not required\n",
+ __func__);
+ return false;
+ }
+
+ while (compare_imp < imped_i_ref[i].min_val) {
+ msm_anlg_cdc_set_ref_current(codec, imped_i_ref[++i].curr_ref);
+ wcd_mbhc_meas_imped(codec, impedance_l, impedance_r);
+ compare_imp = (msmfalcon_cdc->imped_det_pin ==
+ WCD_MBHC_DET_HPHR) ? *impedance_r : *impedance_l;
+ if (i >= I_1_UA)
+ break;
+ }
+ return true;
+}
+
+void msm_anlg_cdc_spk_ext_pa_cb(
+ int (*codec_spk_ext_pa)(struct snd_soc_codec *codec,
+ int enable), struct snd_soc_codec *codec)
+{
+ struct msmfalcon_cdc_priv *msmfalcon_cdc;
+
+ if (!codec) {
+ pr_err("%s: NULL codec pointer!\n", __func__);
+ return;
+ }
+
+ msmfalcon_cdc = snd_soc_codec_get_drvdata(codec);
+
+ dev_dbg(codec->dev, "%s: Enter\n", __func__);
+ msmfalcon_cdc->codec_spk_ext_pa_cb = codec_spk_ext_pa;
+}
+
+static void msm_anlg_cdc_compute_impedance(struct snd_soc_codec *codec, s16 l,
+ s16 r, uint32_t *zl, uint32_t *zr,
+ bool high)
+{
+ struct msmfalcon_cdc_priv *msmfalcon_cdc =
+ snd_soc_codec_get_drvdata(codec);
+ uint32_t rl = 0, rr = 0;
+ struct wcd_imped_i_ref R = msmfalcon_cdc->imped_i_ref;
+ int codec_ver = get_codec_version(msmfalcon_cdc);
+
+ switch (codec_ver) {
+ case TOMBAK_1_0:
+ case TOMBAK_2_0:
+ case CONGA:
+ if (high) {
+ dev_dbg(codec->dev,
+ "%s: This plug has high range impedance\n",
+ __func__);
+ rl = (uint32_t)(((100 * (l * 400 - 200))/96) - 230);
+ rr = (uint32_t)(((100 * (r * 400 - 200))/96) - 230);
+ } else {
+ dev_dbg(codec->dev,
+ "%s: This plug has low range impedance\n",
+ __func__);
+ rl = (uint32_t)(((1000 * (l * 2 - 1))/1165) - (13/10));
+ rr = (uint32_t)(((1000 * (r * 2 - 1))/1165) - (13/10));
+ }
+ break;
+ case CAJON:
+ case CAJON_2_0:
+ case DIANGU:
+ case DRAX_CDC:
+ if (msmfalcon_cdc->imped_det_pin == WCD_MBHC_DET_HPHL) {
+ rr = (uint32_t)(((DEFAULT_MULTIPLIER * (10 * r - 5)) -
+ (DEFAULT_OFFSET * DEFAULT_GAIN))/DEFAULT_GAIN);
+ rl = (uint32_t)(((10000 * (R.multiplier * (10 * l - 5)))
+ - R.offset * R.gain_adj)/(R.gain_adj * 100));
+ } else if (msmfalcon_cdc->imped_det_pin == WCD_MBHC_DET_HPHR) {
+ rr = (uint32_t)(((10000 * (R.multiplier * (10 * r - 5)))
+ - R.offset * R.gain_adj)/(R.gain_adj * 100));
+ rl = (uint32_t)(((DEFAULT_MULTIPLIER * (10 * l - 5))-
+ (DEFAULT_OFFSET * DEFAULT_GAIN))/DEFAULT_GAIN);
+ } else if (msmfalcon_cdc->imped_det_pin == WCD_MBHC_DET_NONE) {
+ rr = (uint32_t)(((DEFAULT_MULTIPLIER * (10 * r - 5)) -
+ (DEFAULT_OFFSET * DEFAULT_GAIN))/DEFAULT_GAIN);
+ rl = (uint32_t)(((DEFAULT_MULTIPLIER * (10 * l - 5))-
+ (DEFAULT_OFFSET * DEFAULT_GAIN))/DEFAULT_GAIN);
+ } else {
+ rr = (uint32_t)(((10000 * (R.multiplier * (10 * r - 5)))
+ - R.offset * R.gain_adj)/(R.gain_adj * 100));
+ rl = (uint32_t)(((10000 * (R.multiplier * (10 * l - 5)))
+ - R.offset * R.gain_adj)/(R.gain_adj * 100));
+ }
+ break;
+ default:
+ dev_dbg(codec->dev, "%s: No codec mentioned\n", __func__);
+ break;
+ }
+ *zl = rl;
+ *zr = rr;
+}
+
+static struct firmware_cal *msm_anlg_cdc_get_hwdep_fw_cal(
+ struct wcd_mbhc *wcd_mbhc,
+ enum wcd_cal_type type)
+{
+ struct msmfalcon_cdc_priv *msmfalcon_cdc;
+ struct firmware_cal *hwdep_cal;
+ struct snd_soc_codec *codec = wcd_mbhc->codec;
+
+ if (!codec) {
+ pr_err("%s: NULL codec pointer\n", __func__);
+ return NULL;
+ }
+ msmfalcon_cdc = snd_soc_codec_get_drvdata(codec);
+ hwdep_cal = wcdcal_get_fw_cal(msmfalcon_cdc->fw_data, type);
+ if (!hwdep_cal) {
+ dev_err(codec->dev, "%s: cal not sent by %d\n",
+ __func__, type);
+ return NULL;
+ }
+ return hwdep_cal;
+}
+
+static void wcd9xxx_spmi_irq_control(struct snd_soc_codec *codec,
+ int irq, bool enable)
+{
+ if (enable)
+ wcd9xxx_spmi_enable_irq(irq);
+ else
+ wcd9xxx_spmi_disable_irq(irq);
+}
+
+static void msm_anlg_cdc_mbhc_clk_setup(struct snd_soc_codec *codec,
+ bool enable)
+{
+ if (enable)
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL,
+ 0x08, 0x08);
+ else
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL,
+ 0x08, 0x00);
+}
+
+static int msm_anlg_cdc_mbhc_map_btn_code_to_num(struct snd_soc_codec *codec)
+{
+ int btn_code;
+ int btn;
+
+ btn_code = snd_soc_read(codec, MSM89XX_PMIC_ANALOG_MBHC_BTN_RESULT);
+
+ switch (btn_code) {
+ case 0:
+ btn = 0;
+ break;
+ case 1:
+ btn = 1;
+ break;
+ case 3:
+ btn = 2;
+ break;
+ case 7:
+ btn = 3;
+ break;
+ case 15:
+ btn = 4;
+ break;
+ default:
+ btn = -EINVAL;
+ break;
+ };
+
+ return btn;
+}
+
+static bool msm_anlg_cdc_spmi_lock_sleep(struct wcd_mbhc *mbhc, bool lock)
+{
+ if (lock)
+ return wcd9xxx_spmi_lock_sleep();
+ wcd9xxx_spmi_unlock_sleep();
+ return 0;
+}
+
+static bool msm_anlg_cdc_micb_en_status(struct wcd_mbhc *mbhc, int micb_num)
+{
+ if (micb_num == MIC_BIAS_1)
+ return (snd_soc_read(mbhc->codec,
+ MSM89XX_PMIC_ANALOG_MICB_1_EN) &
+ 0x80);
+ if (micb_num == MIC_BIAS_2)
+ return (snd_soc_read(mbhc->codec,
+ MSM89XX_PMIC_ANALOG_MICB_2_EN) &
+ 0x80);
+ return false;
+}
+
+static void msm_anlg_cdc_enable_master_bias(struct snd_soc_codec *codec,
+ bool enable)
+{
+ if (enable)
+ snd_soc_update_bits(codec, MSM89XX_PMIC_ANALOG_MASTER_BIAS_CTL,
+ 0x30, 0x30);
+ else
+ snd_soc_update_bits(codec, MSM89XX_PMIC_ANALOG_MASTER_BIAS_CTL,
+ 0x30, 0x00);
+}
+
+static void msm_anlg_cdc_mbhc_common_micb_ctrl(struct snd_soc_codec *codec,
+ int event, bool enable)
+{
+ u16 reg;
+ u8 mask;
+ u8 val;
+
+ switch (event) {
+ case MBHC_COMMON_MICB_PRECHARGE:
+ reg = MSM89XX_PMIC_ANALOG_MICB_1_CTL;
+ mask = 0x60;
+ val = (enable ? 0x60 : 0x00);
+ break;
+ case MBHC_COMMON_MICB_SET_VAL:
+ reg = MSM89XX_PMIC_ANALOG_MICB_1_VAL;
+ mask = 0xFF;
+ val = (enable ? 0xC0 : 0x00);
+ break;
+ case MBHC_COMMON_MICB_TAIL_CURR:
+ reg = MSM89XX_PMIC_ANALOG_MICB_1_EN;
+ mask = 0x04;
+ val = (enable ? 0x04 : 0x00);
+ break;
+ default:
+ dev_err(codec->dev,
+ "%s: Invalid event received\n", __func__);
+ return;
+ };
+ snd_soc_update_bits(codec, reg, mask, val);
+}
+
+static void msm_anlg_cdc_mbhc_internal_micbias_ctrl(struct snd_soc_codec *codec,
+ int micbias_num,
+ bool enable)
+{
+ if (micbias_num == 1) {
+ if (enable)
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_MICB_1_INT_RBIAS,
+ 0x10, 0x10);
+ else
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_MICB_1_INT_RBIAS,
+ 0x10, 0x00);
+ }
+}
+
+static bool msm_anlg_cdc_mbhc_hph_pa_on_status(struct snd_soc_codec *codec)
+{
+ return (snd_soc_read(codec, MSM89XX_PMIC_ANALOG_RX_HPH_CNP_EN) &
+ 0x30) ? true : false;
+}
+
+static void msm_anlg_cdc_mbhc_program_btn_thr(struct snd_soc_codec *codec,
+ s16 *btn_low, s16 *btn_high,
+ int num_btn, bool is_micbias)
+{
+ int i;
+ u32 course, fine, reg_val;
+ u16 reg_addr = MSM89XX_PMIC_ANALOG_MBHC_BTN0_ZDETL_CTL;
+ s16 *btn_voltage;
+
+ btn_voltage = ((is_micbias) ? btn_high : btn_low);
+
+ for (i = 0; i < num_btn; i++) {
+ course = (btn_voltage[i] / MSMFALCON_CDC_MBHC_BTN_COARSE_ADJ);
+ fine = ((btn_voltage[i] % MSMFALCON_CDC_MBHC_BTN_COARSE_ADJ) /
+ MSMFALCON_CDC_MBHC_BTN_FINE_ADJ);
+
+ reg_val = (course << 5) | (fine << 2);
+ snd_soc_update_bits(codec, reg_addr, 0xFC, reg_val);
+ dev_dbg(codec->dev,
+ "%s: course: %d fine: %d reg_addr: %x reg_val: %x\n",
+ __func__, course, fine, reg_addr, reg_val);
+ reg_addr++;
+ }
+}
+
+static void msm_anlg_cdc_mbhc_calc_impedance(struct wcd_mbhc *mbhc,
+ uint32_t *zl, uint32_t *zr)
+{
+ struct snd_soc_codec *codec = mbhc->codec;
+ struct msmfalcon_cdc_priv *msmfalcon_cdc =
+ snd_soc_codec_get_drvdata(codec);
+ s16 impedance_l, impedance_r;
+ s16 impedance_l_fixed;
+ s16 reg0, reg1, reg2, reg3, reg4;
+ bool high = false;
+ bool min_range_used = false;
+
+ WCD_MBHC_RSC_ASSERT_LOCKED(mbhc);
+ reg0 = snd_soc_read(codec, MSM89XX_PMIC_ANALOG_MBHC_DBNC_TIMER);
+ reg1 = snd_soc_read(codec, MSM89XX_PMIC_ANALOG_MBHC_BTN2_ZDETH_CTL);
+ reg2 = snd_soc_read(codec, MSM89XX_PMIC_ANALOG_MBHC_DET_CTL_2);
+ reg3 = snd_soc_read(codec, MSM89XX_PMIC_ANALOG_MICB_2_EN);
+ reg4 = snd_soc_read(codec, MSM89XX_PMIC_ANALOG_MBHC_FSM_CTL);
+
+ msmfalcon_cdc->imped_det_pin = WCD_MBHC_DET_BOTH;
+ mbhc->hph_type = WCD_MBHC_HPH_NONE;
+
+ /* disable FSM and micbias and enable pullup*/
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_MBHC_FSM_CTL,
+ 0x80, 0x00);
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_MICB_2_EN,
+ 0xA5, 0x25);
+ /*
+ * Enable legacy electrical detection current sources
+ * and disable fast ramp and enable manual switching
+ * of extra capacitance
+ */
+ dev_dbg(codec->dev, "%s: Setup for impedance det\n", __func__);
+
+ msm_anlg_cdc_set_ref_current(codec, I_h4_UA);
+
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_MBHC_DET_CTL_2,
+ 0x06, 0x02);
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_MBHC_DBNC_TIMER,
+ 0x02, 0x02);
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_MBHC_BTN2_ZDETH_CTL,
+ 0x02, 0x00);
+
+ dev_dbg(codec->dev, "%s: Start performing impedance detection\n",
+ __func__);
+
+ wcd_mbhc_meas_imped(codec, &impedance_l, &impedance_r);
+
+ if (impedance_l > 2 || impedance_r > 2) {
+ high = true;
+ if (!mbhc->mbhc_cfg->mono_stero_detection) {
+ /* Set ZDET_CHG to 0 to discharge ramp */
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_MBHC_FSM_CTL,
+ 0x02, 0x00);
+ /* wait 40ms for the discharge ramp to complete */
+ usleep_range(40000, 40100);
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_MBHC_BTN0_ZDETL_CTL,
+ 0x03, 0x00);
+ msmfalcon_cdc->imped_det_pin = (impedance_l > 2 &&
+ impedance_r > 2) ?
+ WCD_MBHC_DET_NONE :
+ ((impedance_l > 2) ?
+ WCD_MBHC_DET_HPHR :
+ WCD_MBHC_DET_HPHL);
+ if (msmfalcon_cdc->imped_det_pin == WCD_MBHC_DET_NONE)
+ goto exit;
+ } else {
+ if (get_codec_version(msmfalcon_cdc) >= CAJON) {
+ if (impedance_l == 63 && impedance_r == 63) {
+ dev_dbg(codec->dev,
+ "%s: HPHL and HPHR are floating\n",
+ __func__);
+ msmfalcon_cdc->imped_det_pin =
+ WCD_MBHC_DET_NONE;
+ mbhc->hph_type = WCD_MBHC_HPH_NONE;
+ } else if (impedance_l == 63
+ && impedance_r < 63) {
+ dev_dbg(codec->dev,
+ "%s: Mono HS with HPHL floating\n",
+ __func__);
+ msmfalcon_cdc->imped_det_pin =
+ WCD_MBHC_DET_HPHR;
+ mbhc->hph_type = WCD_MBHC_HPH_MONO;
+ } else if (impedance_r == 63 &&
+ impedance_l < 63) {
+ dev_dbg(codec->dev,
+ "%s: Mono HS with HPHR floating\n",
+ __func__);
+ msmfalcon_cdc->imped_det_pin =
+ WCD_MBHC_DET_HPHL;
+ mbhc->hph_type = WCD_MBHC_HPH_MONO;
+ } else if (impedance_l > 3 && impedance_r > 3 &&
+ (impedance_l == impedance_r)) {
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_MBHC_DET_CTL_2,
+ 0x06, 0x06);
+ wcd_mbhc_meas_imped(codec, &impedance_l,
+ &impedance_r);
+ if (impedance_r == impedance_l)
+ dev_dbg(codec->dev,
+ "%s: Mono Headset\n",
+ __func__);
+ msmfalcon_cdc->imped_det_pin =
+ WCD_MBHC_DET_NONE;
+ mbhc->hph_type =
+ WCD_MBHC_HPH_MONO;
+ } else {
+ dev_dbg(codec->dev,
+ "%s: STEREO headset is found\n",
+ __func__);
+ msmfalcon_cdc->imped_det_pin =
+ WCD_MBHC_DET_BOTH;
+ mbhc->hph_type = WCD_MBHC_HPH_STEREO;
+ }
+ }
+ }
+ }
+
+ msm_anlg_cdc_set_ref_current(codec, I_pt5_UA);
+ msm_anlg_cdc_set_ref_current(codec, I_14_UA);
+
+ /* Enable RAMP_L , RAMP_R & ZDET_CHG*/
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_MBHC_BTN0_ZDETL_CTL,
+ 0x03, 0x03);
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_MBHC_FSM_CTL,
+ 0x02, 0x02);
+ /* wait for 50msec for the HW to apply ramp on HPHL and HPHR */
+ usleep_range(50000, 50100);
+ /* Enable ZDET_DISCHG_CAP_CTL to add extra capacitance */
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_MBHC_FSM_CTL,
+ 0x01, 0x01);
+ /* wait for 5msec for the voltage to get stable */
+ usleep_range(5000, 5100);
+
+ wcd_mbhc_meas_imped(codec, &impedance_l, &impedance_r);
+
+ min_range_used = msm_anlg_cdc_adj_ref_current(codec,
+ &impedance_l, &impedance_r);
+ if (!mbhc->mbhc_cfg->mono_stero_detection) {
+ /* Set ZDET_CHG to 0 to discharge ramp */
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_MBHC_FSM_CTL,
+ 0x02, 0x00);
+ /* wait for 40msec for the capacitor to discharge */
+ usleep_range(40000, 40100);
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_MBHC_BTN0_ZDETL_CTL,
+ 0x03, 0x00);
+ goto exit;
+ }
+
+ /* we are setting ref current to the minimun range or the measured
+ * value larger than the minimum value, so min_range_used is true.
+ * If the headset is mono headset with either HPHL or HPHR floating
+ * then we have already done the mono stereo detection and do not
+ * need to continue further.
+ */
+
+ if (!min_range_used ||
+ msmfalcon_cdc->imped_det_pin == WCD_MBHC_DET_HPHL ||
+ msmfalcon_cdc->imped_det_pin == WCD_MBHC_DET_HPHR)
+ goto exit;
+
+
+ /* Disable Set ZDET_CONN_RAMP_L and enable ZDET_CONN_FIXED_L */
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_MBHC_BTN0_ZDETL_CTL,
+ 0x02, 0x00);
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_MBHC_BTN1_ZDETM_CTL,
+ 0x02, 0x02);
+ /* Set ZDET_CHG to 0 */
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_MBHC_FSM_CTL,
+ 0x02, 0x00);
+ /* wait for 40msec for the capacitor to discharge */
+ usleep_range(40000, 40100);
+
+ /* Set ZDET_CONN_RAMP_R to 0 */
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_MBHC_BTN0_ZDETL_CTL,
+ 0x01, 0x00);
+ /* Enable ZDET_L_MEAS_EN */
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_MBHC_FSM_CTL,
+ 0x08, 0x08);
+ /* wait for 2msec for the HW to compute left inpedance value */
+ usleep_range(2000, 2100);
+ /* Read Left impedance value from Result1 */
+ impedance_l_fixed = snd_soc_read(codec,
+ MSM89XX_PMIC_ANALOG_MBHC_BTN_RESULT);
+ /* Disable ZDET_L_MEAS_EN */
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_MBHC_FSM_CTL,
+ 0x08, 0x00);
+ /*
+ * Assume impedance_l is L1, impedance_l_fixed is L2.
+ * If the following condition is met, we can take this
+ * headset as mono one with impedance of L2.
+ * Otherwise, take it as stereo with impedance of L1.
+ * Condition:
+ * abs[(L2-0.5L1)/(L2+0.5L1)] < abs [(L2-L1)/(L2+L1)]
+ */
+ if ((abs(impedance_l_fixed - impedance_l/2) *
+ (impedance_l_fixed + impedance_l)) >=
+ (abs(impedance_l_fixed - impedance_l) *
+ (impedance_l_fixed + impedance_l/2))) {
+ dev_dbg(codec->dev,
+ "%s: STEREO plug type detected\n",
+ __func__);
+ mbhc->hph_type = WCD_MBHC_HPH_STEREO;
+ } else {
+ dev_dbg(codec->dev,
+ "%s: MONO plug type detected\n",
+ __func__);
+ mbhc->hph_type = WCD_MBHC_HPH_MONO;
+ impedance_l = impedance_l_fixed;
+ }
+ /* Enable ZDET_CHG */
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_MBHC_FSM_CTL,
+ 0x02, 0x02);
+ /* wait for 10msec for the capacitor to charge */
+ usleep_range(10000, 10100);
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_MBHC_BTN0_ZDETL_CTL,
+ 0x02, 0x02);
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_MBHC_BTN1_ZDETM_CTL,
+ 0x02, 0x00);
+ /* Set ZDET_CHG to 0 to discharge HPHL */
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_MBHC_FSM_CTL,
+ 0x02, 0x00);
+ /* wait for 40msec for the capacitor to discharge */
+ usleep_range(40000, 40100);
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_MBHC_BTN0_ZDETL_CTL,
+ 0x02, 0x00);
+
+exit:
+ snd_soc_write(codec, MSM89XX_PMIC_ANALOG_MBHC_FSM_CTL, reg4);
+ snd_soc_write(codec, MSM89XX_PMIC_ANALOG_MICB_2_EN, reg3);
+ snd_soc_write(codec, MSM89XX_PMIC_ANALOG_MBHC_BTN2_ZDETH_CTL, reg1);
+ snd_soc_write(codec, MSM89XX_PMIC_ANALOG_MBHC_DBNC_TIMER, reg0);
+ snd_soc_write(codec, MSM89XX_PMIC_ANALOG_MBHC_DET_CTL_2, reg2);
+ msm_anlg_cdc_compute_impedance(codec, impedance_l, impedance_r,
+ zl, zr, high);
+
+ dev_dbg(codec->dev, "%s: RL %d ohm, RR %d ohm\n", __func__, *zl, *zr);
+ dev_dbg(codec->dev, "%s: Impedance detection completed\n", __func__);
+}
+
+static int msm_anlg_cdc_dig_register_notifier(void *handle,
+ struct notifier_block *nblock,
+ bool enable)
+{
+ struct msmfalcon_cdc *handle_cdc = handle;
+
+ if (enable)
+ return blocking_notifier_chain_register(&handle_cdc->notifier,
+ nblock);
+ return blocking_notifier_chain_unregister(&handle_cdc->notifier,
+ nblock);
+}
+
+static int msm_anlg_cdc_mbhc_register_notifier(struct wcd_mbhc *wcd_mbhc,
+ struct notifier_block *nblock,
+ bool enable)
+{
+ struct snd_soc_codec *codec = wcd_mbhc->codec;
+ struct msmfalcon_cdc_priv *msmfalcon_cdc =
+ snd_soc_codec_get_drvdata(codec);
+
+ if (enable)
+ return blocking_notifier_chain_register(
+ &msmfalcon_cdc->notifier,
+ nblock);
+
+ return blocking_notifier_chain_unregister(&msmfalcon_cdc->notifier,
+ nblock);
+}
+
+static int msm_anlg_cdc_request_irq(struct snd_soc_codec *codec,
+ int irq, irq_handler_t handler,
+ const char *name, void *data)
+{
+ return wcd9xxx_spmi_request_irq(irq, handler, name, data);
+}
+
+static int msm_anlg_cdc_free_irq(struct snd_soc_codec *codec,
+ int irq, void *data)
+{
+ return wcd9xxx_spmi_free_irq(irq, data);
+}
+
+static const struct wcd_mbhc_cb mbhc_cb = {
+ .enable_mb_source = msm_anlg_cdc_enable_ext_mb_source,
+ .trim_btn_reg = msm_anlg_cdc_trim_btn_reg,
+ .compute_impedance = msm_anlg_cdc_mbhc_calc_impedance,
+ .set_micbias_value = msm_anlg_cdc_set_micb_v,
+ .set_auto_zeroing = msm_anlg_cdc_set_auto_zeroing,
+ .get_hwdep_fw_cal = msm_anlg_cdc_get_hwdep_fw_cal,
+ .set_cap_mode = msm_anlg_cdc_configure_cap,
+ .register_notifier = msm_anlg_cdc_mbhc_register_notifier,
+ .request_irq = msm_anlg_cdc_request_irq,
+ .irq_control = wcd9xxx_spmi_irq_control,
+ .free_irq = msm_anlg_cdc_free_irq,
+ .clk_setup = msm_anlg_cdc_mbhc_clk_setup,
+ .map_btn_code_to_num = msm_anlg_cdc_mbhc_map_btn_code_to_num,
+ .lock_sleep = msm_anlg_cdc_spmi_lock_sleep,
+ .micbias_enable_status = msm_anlg_cdc_micb_en_status,
+ .mbhc_bias = msm_anlg_cdc_enable_master_bias,
+ .mbhc_common_micb_ctrl = msm_anlg_cdc_mbhc_common_micb_ctrl,
+ .micb_internal = msm_anlg_cdc_mbhc_internal_micbias_ctrl,
+ .hph_pa_on_status = msm_anlg_cdc_mbhc_hph_pa_on_status,
+ .set_btn_thr = msm_anlg_cdc_mbhc_program_btn_thr,
+ .extn_use_mb = msm_anlg_cdc_use_mb,
+};
+
+static const uint32_t wcd_imped_val[] = {4, 8, 12, 13, 16,
+ 20, 24, 28, 32,
+ 36, 40, 44, 48};
+
+static void msm_anlg_cdc_dig_notifier_call(struct snd_soc_codec *codec,
+ const enum dig_cdc_notify_event event)
+{
+ struct msmfalcon_cdc *msmfalcon_cdc = codec->control_data;
+
+ pr_debug("%s: notifier call event %d\n", __func__, event);
+ blocking_notifier_call_chain(&msmfalcon_cdc->notifier,
+ event, NULL);
+}
+
+static void msm_anlg_cdc_notifier_call(struct snd_soc_codec *codec,
+ const enum wcd_notify_event event)
+{
+ struct msmfalcon_cdc_priv *msmfalcon_cdc =
+ snd_soc_codec_get_drvdata(codec);
+
+ dev_dbg(codec->dev, "%s: notifier call event %d\n", __func__, event);
+ blocking_notifier_call_chain(&msmfalcon_cdc->notifier, event,
+ &msmfalcon_cdc->mbhc);
+}
+
+static void msm_anlg_cdc_boost_on(struct snd_soc_codec *codec)
+{
+ struct msmfalcon_cdc_priv *msmfalcon_cdc =
+ snd_soc_codec_get_drvdata(codec);
+
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_DIGITAL_PERPH_RESET_CTL3, 0x0F, 0x0F);
+ snd_soc_write(codec, MSM89XX_PMIC_ANALOG_SEC_ACCESS, 0xA5);
+ snd_soc_write(codec, MSM89XX_PMIC_ANALOG_PERPH_RESET_CTL3, 0x0F);
+ snd_soc_write(codec, MSM89XX_PMIC_ANALOG_MASTER_BIAS_CTL, 0x30);
+ if (get_codec_version(msmfalcon_cdc) < CAJON_2_0)
+ snd_soc_write(codec, MSM89XX_PMIC_ANALOG_CURRENT_LIMIT, 0x82);
+ else
+ snd_soc_write(codec, MSM89XX_PMIC_ANALOG_CURRENT_LIMIT, 0xA2);
+ snd_soc_update_bits(codec, MSM89XX_PMIC_ANALOG_SPKR_DRV_CTL,
+ 0x69, 0x69);
+ snd_soc_update_bits(codec, MSM89XX_PMIC_ANALOG_SPKR_DRV_DBG,
+ 0x01, 0x01);
+ snd_soc_update_bits(codec, MSM89XX_PMIC_ANALOG_SLOPE_COMP_IP_ZERO,
+ 0x88, 0x88);
+ snd_soc_update_bits(codec, MSM89XX_PMIC_ANALOG_SPKR_DAC_CTL,
+ 0x03, 0x03);
+ snd_soc_update_bits(codec, MSM89XX_PMIC_ANALOG_SPKR_OCP_CTL,
+ 0xE1, 0xE1);
+ if (get_codec_version(msmfalcon_cdc) < CAJON_2_0) {
+ snd_soc_update_bits(codec, MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL,
+ 0x20, 0x20);
+ /* Wait for 1ms after clock ctl enable */
+ usleep_range(CODEC_DELAY_1_MS, CODEC_DELAY_1_1_MS);
+ snd_soc_update_bits(codec, MSM89XX_PMIC_ANALOG_BOOST_EN_CTL,
+ 0xDF, 0xDF);
+ usleep_range(CODEC_DELAY_1_MS, CODEC_DELAY_1_1_MS);
+ } else {
+ snd_soc_update_bits(codec, MSM89XX_PMIC_ANALOG_BOOST_EN_CTL,
+ 0x40, 0x00);
+ snd_soc_update_bits(codec, MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL,
+ 0x20, 0x20);
+ snd_soc_update_bits(codec, MSM89XX_PMIC_ANALOG_BOOST_EN_CTL,
+ 0x80, 0x80);
+ /* Wait for 500us after BOOST_EN to happen */
+ usleep_range(500, 510);
+ snd_soc_update_bits(codec, MSM89XX_PMIC_ANALOG_BOOST_EN_CTL,
+ 0x40, 0x40);
+ /* Wait for 500us after BOOST pulse_skip */
+ usleep_range(500, 510);
+ }
+}
+
+static void msm_anlg_cdc_boost_off(struct snd_soc_codec *codec)
+{
+ snd_soc_update_bits(codec, MSM89XX_PMIC_ANALOG_BOOST_EN_CTL,
+ 0xDF, 0x5F);
+ snd_soc_update_bits(codec, MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL,
+ 0x20, 0x00);
+}
+
+static void msm_anlg_cdc_bypass_on(struct snd_soc_codec *codec)
+{
+ struct msmfalcon_cdc_priv *msmfalcon_cdc =
+ snd_soc_codec_get_drvdata(codec);
+
+ if (get_codec_version(msmfalcon_cdc) < CAJON_2_0) {
+ snd_soc_write(codec,
+ MSM89XX_PMIC_ANALOG_SEC_ACCESS,
+ 0xA5);
+ snd_soc_write(codec,
+ MSM89XX_PMIC_ANALOG_PERPH_RESET_CTL3,
+ 0x07);
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_BYPASS_MODE,
+ 0x02, 0x02);
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_BYPASS_MODE,
+ 0x01, 0x00);
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_BYPASS_MODE,
+ 0x40, 0x40);
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_BYPASS_MODE,
+ 0x80, 0x80);
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_BOOST_EN_CTL,
+ 0xDF, 0xDF);
+ } else {
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL,
+ 0x20, 0x20);
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_BYPASS_MODE,
+ 0x20, 0x20);
+ }
+}
+
+static void msm_anlg_cdc_bypass_off(struct snd_soc_codec *codec)
+{
+ struct msmfalcon_cdc_priv *msmfalcon_cdc =
+ snd_soc_codec_get_drvdata(codec);
+
+ if (get_codec_version(msmfalcon_cdc) < CAJON_2_0) {
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_BOOST_EN_CTL,
+ 0x80, 0x00);
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_BYPASS_MODE,
+ 0x80, 0x00);
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_BYPASS_MODE,
+ 0x02, 0x00);
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_BYPASS_MODE,
+ 0x40, 0x00);
+ } else {
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_BYPASS_MODE,
+ 0x20, 0x00);
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL,
+ 0x20, 0x00);
+ }
+}
+
+static void msm_anlg_cdc_boost_mode_sequence(struct snd_soc_codec *codec,
+ int flag)
+{
+ struct msmfalcon_cdc_priv *msmfalcon_cdc =
+ snd_soc_codec_get_drvdata(codec);
+
+ if (flag == EAR_PMU) {
+ switch (msmfalcon_cdc->boost_option) {
+ case BOOST_SWITCH:
+ if (msmfalcon_cdc->ear_pa_boost_set) {
+ msm_anlg_cdc_boost_off(codec);
+ msm_anlg_cdc_bypass_on(codec);
+ }
+ break;
+ case BOOST_ALWAYS:
+ msm_anlg_cdc_boost_on(codec);
+ break;
+ case BYPASS_ALWAYS:
+ msm_anlg_cdc_bypass_on(codec);
+ break;
+ case BOOST_ON_FOREVER:
+ msm_anlg_cdc_boost_on(codec);
+ break;
+ default:
+ dev_err(codec->dev,
+ "%s: invalid boost option: %d\n", __func__,
+ msmfalcon_cdc->boost_option);
+ break;
+ }
+ } else if (flag == EAR_PMD) {
+ switch (msmfalcon_cdc->boost_option) {
+ case BOOST_SWITCH:
+ if (msmfalcon_cdc->ear_pa_boost_set)
+ msm_anlg_cdc_bypass_off(codec);
+ break;
+ case BOOST_ALWAYS:
+ msm_anlg_cdc_boost_off(codec);
+ /* 80ms for EAR boost to settle down */
+ msleep(80);
+ break;
+ case BYPASS_ALWAYS:
+ /* nothing to do as bypass on always */
+ break;
+ case BOOST_ON_FOREVER:
+ /* nothing to do as boost on forever */
+ break;
+ default:
+ dev_err(codec->dev,
+ "%s: invalid boost option: %d\n", __func__,
+ msmfalcon_cdc->boost_option);
+ break;
+ }
+ } else if (flag == SPK_PMU) {
+ switch (msmfalcon_cdc->boost_option) {
+ case BOOST_SWITCH:
+ if (msmfalcon_cdc->spk_boost_set) {
+ msm_anlg_cdc_bypass_off(codec);
+ msm_anlg_cdc_boost_on(codec);
+ }
+ break;
+ case BOOST_ALWAYS:
+ msm_anlg_cdc_boost_on(codec);
+ break;
+ case BYPASS_ALWAYS:
+ msm_anlg_cdc_bypass_on(codec);
+ break;
+ case BOOST_ON_FOREVER:
+ msm_anlg_cdc_boost_on(codec);
+ break;
+ default:
+ dev_err(codec->dev,
+ "%s: invalid boost option: %d\n", __func__,
+ msmfalcon_cdc->boost_option);
+ break;
+ }
+ } else if (flag == SPK_PMD) {
+ switch (msmfalcon_cdc->boost_option) {
+ case BOOST_SWITCH:
+ if (msmfalcon_cdc->spk_boost_set) {
+ msm_anlg_cdc_boost_off(codec);
+ /*
+ * Add 40 ms sleep for the spk
+ * boost to settle down
+ */
+ msleep(40);
+ }
+ break;
+ case BOOST_ALWAYS:
+ msm_anlg_cdc_boost_off(codec);
+ /*
+ * Add 40 ms sleep for the spk
+ * boost to settle down
+ */
+ msleep(40);
+ break;
+ case BYPASS_ALWAYS:
+ /* nothing to do as bypass on always */
+ break;
+ case BOOST_ON_FOREVER:
+ /* nothing to do as boost on forever */
+ break;
+ default:
+ dev_err(codec->dev,
+ "%s: invalid boost option: %d\n", __func__,
+ msmfalcon_cdc->boost_option);
+ break;
+ }
+ }
+}
+
+static int msm_anlg_cdc_dt_parse_vreg_info(struct device *dev,
+ struct msmfalcon_cdc_regulator *vreg, const char *vreg_name,
+ bool ondemand)
+{
+ int len, ret = 0;
+ const __be32 *prop;
+ char prop_name[CODEC_DT_MAX_PROP_SIZE];
+ struct device_node *regnode = NULL;
+ u32 prop_val;
+
+ snprintf(prop_name, CODEC_DT_MAX_PROP_SIZE, "%s-supply",
+ vreg_name);
+ regnode = of_parse_phandle(dev->of_node, prop_name, 0);
+
+ if (!regnode) {
+ dev_err(dev, "Looking up %s property in node %s failed\n",
+ prop_name, dev->of_node->full_name);
+ return -ENODEV;
+ }
+
+ dev_dbg(dev, "Looking up %s property in node %s\n",
+ prop_name, dev->of_node->full_name);
+
+ vreg->name = vreg_name;
+ vreg->ondemand = ondemand;
+
+ snprintf(prop_name, CODEC_DT_MAX_PROP_SIZE,
+ "qcom,%s-voltage", vreg_name);
+ prop = of_get_property(dev->of_node, prop_name, &len);
+
+ if (!prop || (len != (2 * sizeof(__be32)))) {
+ dev_err(dev, "%s %s property\n",
+ prop ? "invalid format" : "no", prop_name);
+ return -EINVAL;
+ }
+ vreg->min_uv = be32_to_cpup(&prop[0]);
+ vreg->max_uv = be32_to_cpup(&prop[1]);
+
+ snprintf(prop_name, CODEC_DT_MAX_PROP_SIZE,
+ "qcom,%s-current", vreg_name);
+
+ ret = of_property_read_u32(dev->of_node, prop_name, &prop_val);
+ if (ret) {
+ dev_err(dev, "Looking up %s property in node %s failed",
+ prop_name, dev->of_node->full_name);
+ return -EFAULT;
+ }
+ vreg->optimum_ua = prop_val;
+
+ dev_dbg(dev, "%s: vol=[%d %d]uV, curr=[%d]uA, ond %d\n\n", vreg->name,
+ vreg->min_uv, vreg->max_uv, vreg->optimum_ua, vreg->ondemand);
+ return 0;
+}
+
+static void msm_anlg_cdc_dt_parse_boost_info(struct snd_soc_codec *codec)
+{
+ struct msmfalcon_cdc_priv *msmfalcon_cdc_priv =
+ snd_soc_codec_get_drvdata(codec);
+ const char *prop_name = "qcom,cdc-boost-voltage";
+ int boost_voltage, ret;
+
+ ret = of_property_read_u32(codec->dev->of_node, prop_name,
+ &boost_voltage);
+ if (ret) {
+ dev_dbg(codec->dev, "Looking up %s property in node %s failed\n",
+ prop_name, codec->dev->of_node->full_name);
+ boost_voltage = DEFAULT_BOOST_VOLTAGE;
+ }
+ if (boost_voltage < MIN_BOOST_VOLTAGE ||
+ boost_voltage > MAX_BOOST_VOLTAGE) {
+ dev_err(codec->dev,
+ "Incorrect boost voltage. Reverting to default\n");
+ boost_voltage = DEFAULT_BOOST_VOLTAGE;
+ }
+
+ msmfalcon_cdc_priv->boost_voltage =
+ VOLTAGE_CONVERTER(boost_voltage, MIN_BOOST_VOLTAGE,
+ BOOST_VOLTAGE_STEP);
+ dev_dbg(codec->dev, "Boost voltage value is: %d\n",
+ boost_voltage);
+}
+
+static void msm_anlg_cdc_dt_parse_micbias_info(struct device *dev,
+ struct wcd_micbias_setting *micbias)
+{
+ const char *prop_name = "qcom,cdc-micbias-cfilt-mv";
+ int ret;
+
+ ret = of_property_read_u32(dev->of_node, prop_name,
+ &micbias->cfilt1_mv);
+ if (ret) {
+ dev_dbg(dev, "Looking up %s property in node %s failed",
+ prop_name, dev->of_node->full_name);
+ micbias->cfilt1_mv = MICBIAS_DEFAULT_VAL;
+ }
+}
+
+static struct msmfalcon_cdc_pdata *msm_anlg_cdc_populate_dt_pdata(
+ struct device *dev)
+{
+ struct msmfalcon_cdc_pdata *pdata;
+ int ret, static_cnt, ond_cnt, idx, i;
+ const char *name = NULL;
+ const char *static_prop_name = "qcom,cdc-static-supplies";
+ const char *ond_prop_name = "qcom,cdc-on-demand-supplies";
+
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return NULL;
+
+ static_cnt = of_property_count_strings(dev->of_node, static_prop_name);
+ if (IS_ERR_VALUE(static_cnt)) {
+ dev_err(dev, "%s: Failed to get static supplies %d\n", __func__,
+ static_cnt);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /* On-demand supply list is an optional property */
+ ond_cnt = of_property_count_strings(dev->of_node, ond_prop_name);
+ if (IS_ERR_VALUE(ond_cnt))
+ ond_cnt = 0;
+
+ WARN_ON(static_cnt <= 0 || ond_cnt < 0);
+ if ((static_cnt + ond_cnt) > ARRAY_SIZE(pdata->regulator)) {
+ dev_err(dev, "%s: Num of supplies %u > max supported %zd\n",
+ __func__, (static_cnt + ond_cnt),
+ ARRAY_SIZE(pdata->regulator));
+ ret = -EINVAL;
+ goto err;
+ }
+
+ for (idx = 0; idx < static_cnt; idx++) {
+ ret = of_property_read_string_index(dev->of_node,
+ static_prop_name, idx,
+ &name);
+ if (ret) {
+ dev_err(dev, "%s: of read string %s idx %d error %d\n",
+ __func__, static_prop_name, idx, ret);
+ goto err;
+ }
+
+ dev_dbg(dev, "%s: Found static cdc supply %s\n", __func__,
+ name);
+ ret = msm_anlg_cdc_dt_parse_vreg_info(dev,
+ &pdata->regulator[idx],
+ name, false);
+ if (ret) {
+ dev_err(dev, "%s:err parsing vreg for %s idx %d\n",
+ __func__, name, idx);
+ goto err;
+ }
+ }
+
+ for (i = 0; i < ond_cnt; i++, idx++) {
+ ret = of_property_read_string_index(dev->of_node, ond_prop_name,
+ i, &name);
+ if (ret) {
+ dev_err(dev, "%s: err parsing on_demand for %s idx %d\n",
+ __func__, ond_prop_name, i);
+ goto err;
+ }
+
+ dev_dbg(dev, "%s: Found on-demand cdc supply %s\n", __func__,
+ name);
+ ret = msm_anlg_cdc_dt_parse_vreg_info(dev,
+ &pdata->regulator[idx],
+ name, true);
+ if (ret) {
+ dev_err(dev, "%s: err parsing vreg on_demand for %s idx %d\n",
+ __func__, name, idx);
+ goto err;
+ }
+ }
+ msm_anlg_cdc_dt_parse_micbias_info(dev, &pdata->micbias);
+
+ return pdata;
+err:
+ devm_kfree(dev, pdata);
+ dev_err(dev, "%s: Failed to populate DT data ret = %d\n",
+ __func__, ret);
+ return NULL;
+}
+
+static int msm_anlg_cdc_codec_enable_on_demand_supply(
+ struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ int ret = 0;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ struct msmfalcon_cdc_priv *msmfalcon_cdc =
+ snd_soc_codec_get_drvdata(codec);
+ struct on_demand_supply *supply;
+
+ if (w->shift >= ON_DEMAND_SUPPLIES_MAX) {
+ dev_err(codec->dev, "%s: error index > MAX Demand supplies",
+ __func__);
+ ret = -EINVAL;
+ goto out;
+ }
+ dev_dbg(codec->dev, "%s: supply: %s event: %d ref: %d\n",
+ __func__, on_demand_supply_name[w->shift], event,
+ atomic_read(&msmfalcon_cdc->on_demand_list[w->shift].ref));
+
+ supply = &msmfalcon_cdc->on_demand_list[w->shift];
+ WARN_ONCE(!supply->supply, "%s isn't defined\n",
+ on_demand_supply_name[w->shift]);
+ if (!supply->supply) {
+ dev_err(codec->dev, "%s: err supply not present ond for %d",
+ __func__, w->shift);
+ goto out;
+ }
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ if (atomic_inc_return(&supply->ref) == 1)
+ ret = regulator_enable(supply->supply);
+ if (ret)
+ dev_err(codec->dev, "%s: Failed to enable %s\n",
+ __func__,
+ on_demand_supply_name[w->shift]);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ if (atomic_read(&supply->ref) == 0) {
+ dev_dbg(codec->dev, "%s: %s supply has been disabled.\n",
+ __func__, on_demand_supply_name[w->shift]);
+ goto out;
+ }
+ if (atomic_dec_return(&supply->ref) == 0)
+ ret = regulator_disable(supply->supply);
+ if (ret)
+ dev_err(codec->dev, "%s: Failed to disable %s\n",
+ __func__,
+ on_demand_supply_name[w->shift]);
+ break;
+ default:
+ break;
+ }
+out:
+ return ret;
+}
+
+static int msm_anlg_cdc_codec_enable_clock_block(struct snd_soc_codec *codec,
+ int enable)
+{
+ struct msm_asoc_mach_data *pdata = NULL;
+
+ pdata = snd_soc_card_get_drvdata(codec->component.card);
+ if (enable) {
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_MASTER_BIAS_CTL, 0x30, 0x30);
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_DIGITAL_CDC_RST_CTL, 0x80, 0x80);
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_DIGITAL_CDC_TOP_CLK_CTL, 0x0C, 0x0C);
+ msm_anlg_cdc_dig_notifier_call(codec, DIG_CDC_EVENT_CLK_ON);
+ } else {
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_DIGITAL_CDC_TOP_CLK_CTL, 0x0C, 0x00);
+ msm_anlg_cdc_dig_notifier_call(codec, DIG_CDC_EVENT_CLK_OFF);
+ }
+ return 0;
+}
+
+static int msm_anlg_cdc_codec_enable_charge_pump(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol,
+ int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ struct msmfalcon_cdc_priv *msmfalcon_cdc =
+ snd_soc_codec_get_drvdata(codec);
+
+ dev_dbg(codec->dev, "%s: event = %d\n", __func__, event);
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ msm_anlg_cdc_codec_enable_clock_block(codec, 1);
+ if (!(strcmp(w->name, "EAR CP"))) {
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL,
+ 0x80, 0x80);
+ msm_anlg_cdc_boost_mode_sequence(codec, EAR_PMU);
+ } else if (get_codec_version(msmfalcon_cdc) >= DIANGU) {
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL,
+ 0x80, 0x80);
+ } else {
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL,
+ 0xC0, 0xC0);
+ }
+ break;
+ case SND_SOC_DAPM_POST_PMU:
+ /* Wait for 1ms post powerup of chargepump */
+ usleep_range(CODEC_DELAY_1_MS, CODEC_DELAY_1_1_MS);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ /* Wait for 1ms post powerdown of chargepump */
+ usleep_range(CODEC_DELAY_1_MS, CODEC_DELAY_1_1_MS);
+ if (!(strcmp(w->name, "EAR CP"))) {
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL,
+ 0x80, 0x00);
+ if (msmfalcon_cdc->boost_option != BOOST_ALWAYS) {
+ dev_dbg(codec->dev,
+ "%s: boost_option:%d, tear down ear\n",
+ __func__, msmfalcon_cdc->boost_option);
+ msm_anlg_cdc_boost_mode_sequence(codec,
+ EAR_PMD);
+ }
+ /*
+ * Reset pa select bit from ear to hph after ear pa
+ * is disabled and HPH DAC disable to reduce ear
+ * turn off pop and avoid HPH pop in concurrency
+ */
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_RX_EAR_CTL, 0x80, 0x00);
+ } else {
+ if (get_codec_version(msmfalcon_cdc) < DIANGU)
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL,
+ 0x40, 0x00);
+ if (msmfalcon_cdc->rx_bias_count == 0)
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL,
+ 0x80, 0x00);
+ dev_dbg(codec->dev, "%s: rx_bias_count = %d\n",
+ __func__, msmfalcon_cdc->rx_bias_count);
+ }
+ break;
+ }
+ return 0;
+}
+
+static int msm_anlg_cdc_ear_pa_boost_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ struct msmfalcon_cdc_priv *msmfalcon_cdc =
+ snd_soc_codec_get_drvdata(codec);
+
+ ucontrol->value.integer.value[0] =
+ (msmfalcon_cdc->ear_pa_boost_set ? 1 : 0);
+ dev_dbg(codec->dev, "%s: msmfalcon_cdc->ear_pa_boost_set = %d\n",
+ __func__, msmfalcon_cdc->ear_pa_boost_set);
+ return 0;
+}
+
+static int msm_anlg_cdc_ear_pa_boost_set(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ struct msmfalcon_cdc_priv *msmfalcon_cdc =
+ snd_soc_codec_get_drvdata(codec);
+
+ dev_dbg(codec->dev, "%s: ucontrol->value.integer.value[0] = %ld\n",
+ __func__, ucontrol->value.integer.value[0]);
+ msmfalcon_cdc->ear_pa_boost_set =
+ (ucontrol->value.integer.value[0] ? true : false);
+ return 0;
+}
+
+static int msm_anlg_cdc_loopback_mode_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ struct msm_asoc_mach_data *pdata = NULL;
+
+ pdata = snd_soc_card_get_drvdata(codec->component.card);
+ dev_dbg(codec->dev, "%s: ucontrol->value.integer.value[0] = %ld\n",
+ __func__, ucontrol->value.integer.value[0]);
+
+ return pdata->lb_mode;
+}
+
+static int msm_anlg_cdc_loopback_mode_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ struct msm_asoc_mach_data *pdata = NULL;
+
+ pdata = snd_soc_card_get_drvdata(codec->component.card);
+ dev_dbg(codec->dev, "%s: ucontrol->value.integer.value[0] = %ld\n",
+ __func__, ucontrol->value.integer.value[0]);
+
+ switch (ucontrol->value.integer.value[0]) {
+ case 0:
+ pdata->lb_mode = false;
+ break;
+ case 1:
+ pdata->lb_mode = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int msm_anlg_cdc_pa_gain_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ u8 ear_pa_gain;
+ struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ struct msmfalcon_cdc_priv *msmfalcon_cdc =
+ snd_soc_codec_get_drvdata(codec);
+
+ if (get_codec_version(msmfalcon_cdc) >= DIANGU) {
+ ear_pa_gain = snd_soc_read(codec,
+ MSM89XX_PMIC_ANALOG_RX_COM_BIAS_DAC);
+ ear_pa_gain = (ear_pa_gain >> 1) & 0x3;
+
+ if (ear_pa_gain == 0x00) {
+ ucontrol->value.integer.value[0] = 3;
+ } else if (ear_pa_gain == 0x01) {
+ ucontrol->value.integer.value[1] = 2;
+ } else if (ear_pa_gain == 0x02) {
+ ucontrol->value.integer.value[2] = 1;
+ } else if (ear_pa_gain == 0x03) {
+ ucontrol->value.integer.value[3] = 0;
+ } else {
+ dev_err(codec->dev,
+ "%s: ERROR: Unsupported Ear Gain = 0x%x\n",
+ __func__, ear_pa_gain);
+ return -EINVAL;
+ }
+ } else {
+ ear_pa_gain = snd_soc_read(codec,
+ MSM89XX_PMIC_ANALOG_RX_EAR_CTL);
+ ear_pa_gain = (ear_pa_gain >> 5) & 0x1;
+ if (ear_pa_gain == 0x00) {
+ ucontrol->value.integer.value[0] = 0;
+ } else if (ear_pa_gain == 0x01) {
+ ucontrol->value.integer.value[0] = 3;
+ } else {
+ dev_err(codec->dev,
+ "%s: ERROR: Unsupported Ear Gain = 0x%x\n",
+ __func__, ear_pa_gain);
+ return -EINVAL;
+ }
+ }
+ ucontrol->value.integer.value[0] = ear_pa_gain;
+ dev_dbg(codec->dev, "%s: ear_pa_gain = 0x%x\n", __func__, ear_pa_gain);
+ return 0;
+}
+
+static int msm_anlg_cdc_pa_gain_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ u8 ear_pa_gain;
+ struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ struct msmfalcon_cdc_priv *msmfalcon_cdc =
+ snd_soc_codec_get_drvdata(codec);
+
+ dev_dbg(codec->dev, "%s: ucontrol->value.integer.value[0] = %ld\n",
+ __func__, ucontrol->value.integer.value[0]);
+
+ if (get_codec_version(msmfalcon_cdc) >= DIANGU) {
+ switch (ucontrol->value.integer.value[0]) {
+ case 0:
+ ear_pa_gain = 0x06;
+ break;
+ case 1:
+ ear_pa_gain = 0x04;
+ break;
+ case 2:
+ ear_pa_gain = 0x02;
+ break;
+ case 3:
+ ear_pa_gain = 0x00;
+ break;
+ default:
+ return -EINVAL;
+ }
+ snd_soc_update_bits(codec, MSM89XX_PMIC_ANALOG_RX_COM_BIAS_DAC,
+ 0x06, ear_pa_gain);
+ } else {
+ switch (ucontrol->value.integer.value[0]) {
+ case 0:
+ ear_pa_gain = 0x00;
+ break;
+ case 3:
+ ear_pa_gain = 0x20;
+ break;
+ case 1:
+ case 2:
+ default:
+ return -EINVAL;
+ }
+ snd_soc_update_bits(codec, MSM89XX_PMIC_ANALOG_RX_EAR_CTL,
+ 0x20, ear_pa_gain);
+ }
+ return 0;
+}
+
+static int msm_anlg_cdc_hph_mode_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ struct msmfalcon_cdc_priv *msmfalcon_cdc =
+ snd_soc_codec_get_drvdata(codec);
+
+ if (msmfalcon_cdc->hph_mode == NORMAL_MODE) {
+ ucontrol->value.integer.value[0] = 0;
+ } else if (msmfalcon_cdc->hph_mode == HD2_MODE) {
+ ucontrol->value.integer.value[0] = 1;
+ } else {
+ dev_err(codec->dev, "%s: ERROR: Default HPH Mode= %d\n",
+ __func__, msmfalcon_cdc->hph_mode);
+ }
+
+ dev_dbg(codec->dev, "%s: msmfalcon_cdc->hph_mode = %d\n", __func__,
+ msmfalcon_cdc->hph_mode);
+ return 0;
+}
+
+static int msm_anlg_cdc_hph_mode_set(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ struct msmfalcon_cdc_priv *msmfalcon_cdc =
+ snd_soc_codec_get_drvdata(codec);
+
+ dev_dbg(codec->dev, "%s: ucontrol->value.integer.value[0] = %ld\n",
+ __func__, ucontrol->value.integer.value[0]);
+
+ switch (ucontrol->value.integer.value[0]) {
+ case 0:
+ msmfalcon_cdc->hph_mode = NORMAL_MODE;
+ break;
+ case 1:
+ if (get_codec_version(msmfalcon_cdc) >= DIANGU)
+ msmfalcon_cdc->hph_mode = HD2_MODE;
+ break;
+ default:
+ msmfalcon_cdc->hph_mode = NORMAL_MODE;
+ break;
+ }
+ dev_dbg(codec->dev, "%s: msmfalcon_cdc->hph_mode_set = %d\n",
+ __func__, msmfalcon_cdc->hph_mode);
+ return 0;
+}
+
+static int msm_anlg_cdc_boost_option_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ struct msmfalcon_cdc_priv *msmfalcon_cdc =
+ snd_soc_codec_get_drvdata(codec);
+
+ if (msmfalcon_cdc->boost_option == BOOST_SWITCH) {
+ ucontrol->value.integer.value[0] = 0;
+ } else if (msmfalcon_cdc->boost_option == BOOST_ALWAYS) {
+ ucontrol->value.integer.value[0] = 1;
+ } else if (msmfalcon_cdc->boost_option == BYPASS_ALWAYS) {
+ ucontrol->value.integer.value[0] = 2;
+ } else if (msmfalcon_cdc->boost_option == BOOST_ON_FOREVER) {
+ ucontrol->value.integer.value[0] = 3;
+ } else {
+ dev_err(codec->dev, "%s: ERROR: Unsupported Boost option= %d\n",
+ __func__, msmfalcon_cdc->boost_option);
+ return -EINVAL;
+ }
+
+ dev_dbg(codec->dev, "%s: msmfalcon_cdc->boost_option = %d\n", __func__,
+ msmfalcon_cdc->boost_option);
+ return 0;
+}
+
+static int msm_anlg_cdc_boost_option_set(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ struct msmfalcon_cdc_priv *msmfalcon_cdc =
+ snd_soc_codec_get_drvdata(codec);
+
+ dev_dbg(codec->dev, "%s: ucontrol->value.integer.value[0] = %ld\n",
+ __func__, ucontrol->value.integer.value[0]);
+
+ switch (ucontrol->value.integer.value[0]) {
+ case 0:
+ msmfalcon_cdc->boost_option = BOOST_SWITCH;
+ break;
+ case 1:
+ msmfalcon_cdc->boost_option = BOOST_ALWAYS;
+ break;
+ case 2:
+ msmfalcon_cdc->boost_option = BYPASS_ALWAYS;
+ msm_anlg_cdc_bypass_on(codec);
+ break;
+ case 3:
+ msmfalcon_cdc->boost_option = BOOST_ON_FOREVER;
+ msm_anlg_cdc_boost_on(codec);
+ break;
+ default:
+ pr_err("%s: invalid boost option: %d\n", __func__,
+ msmfalcon_cdc->boost_option);
+ return -EINVAL;
+ }
+ dev_dbg(codec->dev, "%s: msmfalcon_cdc->boost_option_set = %d\n",
+ __func__, msmfalcon_cdc->boost_option);
+ return 0;
+}
+
+static int msm_anlg_cdc_spk_boost_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ struct msmfalcon_cdc_priv *msmfalcon_cdc =
+ snd_soc_codec_get_drvdata(codec);
+
+ if (msmfalcon_cdc->spk_boost_set == false) {
+ ucontrol->value.integer.value[0] = 0;
+ } else if (msmfalcon_cdc->spk_boost_set == true) {
+ ucontrol->value.integer.value[0] = 1;
+ } else {
+ dev_err(codec->dev, "%s: ERROR: Unsupported Speaker Boost = %d\n",
+ __func__, msmfalcon_cdc->spk_boost_set);
+ return -EINVAL;
+ }
+
+ dev_dbg(codec->dev, "%s: msmfalcon_cdc->spk_boost_set = %d\n", __func__,
+ msmfalcon_cdc->spk_boost_set);
+ return 0;
+}
+
+static int msm_anlg_cdc_spk_boost_set(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ struct msmfalcon_cdc_priv *msmfalcon_cdc =
+ snd_soc_codec_get_drvdata(codec);
+
+ dev_dbg(codec->dev, "%s: ucontrol->value.integer.value[0] = %ld\n",
+ __func__, ucontrol->value.integer.value[0]);
+
+ switch (ucontrol->value.integer.value[0]) {
+ case 0:
+ msmfalcon_cdc->spk_boost_set = false;
+ break;
+ case 1:
+ msmfalcon_cdc->spk_boost_set = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+ dev_dbg(codec->dev, "%s: msmfalcon_cdc->spk_boost_set = %d\n",
+ __func__, msmfalcon_cdc->spk_boost_set);
+ return 0;
+}
+
+static int msm_anlg_cdc_ext_spk_boost_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ struct msmfalcon_cdc_priv *msmfalcon_cdc =
+ snd_soc_codec_get_drvdata(codec);
+
+ if (msmfalcon_cdc->ext_spk_boost_set == false)
+ ucontrol->value.integer.value[0] = 0;
+ else
+ ucontrol->value.integer.value[0] = 1;
+
+ dev_dbg(codec->dev, "%s: msmfalcon_cdc->ext_spk_boost_set = %d\n",
+ __func__, msmfalcon_cdc->ext_spk_boost_set);
+ return 0;
+}
+
+static int msm_anlg_cdc_ext_spk_boost_set(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ struct msmfalcon_cdc_priv *msmfalcon_cdc =
+ snd_soc_codec_get_drvdata(codec);
+
+ dev_dbg(codec->dev, "%s: ucontrol->value.integer.value[0] = %ld\n",
+ __func__, ucontrol->value.integer.value[0]);
+
+ switch (ucontrol->value.integer.value[0]) {
+ case 0:
+ msmfalcon_cdc->ext_spk_boost_set = false;
+ break;
+ case 1:
+ msmfalcon_cdc->ext_spk_boost_set = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+ dev_dbg(codec->dev, "%s: msmfalcon_cdc->spk_boost_set = %d\n",
+ __func__, msmfalcon_cdc->spk_boost_set);
+ return 0;
+}
+
+
+static const char * const msm_anlg_cdc_loopback_mode_ctrl_text[] = {
+ "DISABLE", "ENABLE"};
+static const struct soc_enum msm_anlg_cdc_loopback_mode_ctl_enum[] = {
+ SOC_ENUM_SINGLE_EXT(2, msm_anlg_cdc_loopback_mode_ctrl_text),
+};
+
+static const char * const msm_anlg_cdc_ear_pa_boost_ctrl_text[] = {
+ "DISABLE", "ENABLE"};
+static const struct soc_enum msm_anlg_cdc_ear_pa_boost_ctl_enum[] = {
+ SOC_ENUM_SINGLE_EXT(2, msm_anlg_cdc_ear_pa_boost_ctrl_text),
+};
+
+static const char * const msm_anlg_cdc_ear_pa_gain_text[] = {
+ "POS_1P5_DB", "POS_3_DB", "POS_4P5_DB", "POS_6_DB"};
+static const struct soc_enum msm_anlg_cdc_ear_pa_gain_enum[] = {
+ SOC_ENUM_SINGLE_EXT(4, msm_anlg_cdc_ear_pa_gain_text),
+};
+
+static const char * const msm_anlg_cdc_boost_option_ctrl_text[] = {
+ "BOOST_SWITCH", "BOOST_ALWAYS", "BYPASS_ALWAYS",
+ "BOOST_ON_FOREVER"};
+static const struct soc_enum msm_anlg_cdc_boost_option_ctl_enum[] = {
+ SOC_ENUM_SINGLE_EXT(4, msm_anlg_cdc_boost_option_ctrl_text),
+};
+static const char * const msm_anlg_cdc_spk_boost_ctrl_text[] = {
+ "DISABLE", "ENABLE"};
+static const struct soc_enum msm_anlg_cdc_spk_boost_ctl_enum[] = {
+ SOC_ENUM_SINGLE_EXT(2, msm_anlg_cdc_spk_boost_ctrl_text),
+};
+
+static const char * const msm_anlg_cdc_ext_spk_boost_ctrl_text[] = {
+ "DISABLE", "ENABLE"};
+static const struct soc_enum msm_anlg_cdc_ext_spk_boost_ctl_enum[] = {
+ SOC_ENUM_SINGLE_EXT(2, msm_anlg_cdc_ext_spk_boost_ctrl_text),
+};
+
+static const char * const msm_anlg_cdc_hph_mode_ctrl_text[] = {
+ "NORMAL", "HD2"};
+static const struct soc_enum msm_anlg_cdc_hph_mode_ctl_enum[] = {
+ SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(msm_anlg_cdc_hph_mode_ctrl_text),
+ msm_anlg_cdc_hph_mode_ctrl_text),
+};
+
+/*cut of frequency for high pass filter*/
+static const char * const cf_text[] = {
+ "MIN_3DB_4Hz", "MIN_3DB_75Hz", "MIN_3DB_150Hz"
+};
+
+
+static const struct snd_kcontrol_new msm_anlg_cdc_snd_controls[] = {
+
+ SOC_ENUM_EXT("RX HPH Mode", msm_anlg_cdc_hph_mode_ctl_enum[0],
+ msm_anlg_cdc_hph_mode_get, msm_anlg_cdc_hph_mode_set),
+
+ SOC_ENUM_EXT("Boost Option", msm_anlg_cdc_boost_option_ctl_enum[0],
+ msm_anlg_cdc_boost_option_get, msm_anlg_cdc_boost_option_set),
+
+ SOC_ENUM_EXT("EAR PA Boost", msm_anlg_cdc_ear_pa_boost_ctl_enum[0],
+ msm_anlg_cdc_ear_pa_boost_get, msm_anlg_cdc_ear_pa_boost_set),
+
+ SOC_ENUM_EXT("EAR PA Gain", msm_anlg_cdc_ear_pa_gain_enum[0],
+ msm_anlg_cdc_pa_gain_get, msm_anlg_cdc_pa_gain_put),
+
+ SOC_ENUM_EXT("Speaker Boost", msm_anlg_cdc_spk_boost_ctl_enum[0],
+ msm_anlg_cdc_spk_boost_get, msm_anlg_cdc_spk_boost_set),
+
+ SOC_ENUM_EXT("Ext Spk Boost", msm_anlg_cdc_ext_spk_boost_ctl_enum[0],
+ msm_anlg_cdc_ext_spk_boost_get, msm_anlg_cdc_ext_spk_boost_set),
+
+ SOC_ENUM_EXT("LOOPBACK Mode", msm_anlg_cdc_loopback_mode_ctl_enum[0],
+ msm_anlg_cdc_loopback_mode_get, msm_anlg_cdc_loopback_mode_put),
+ SOC_SINGLE_TLV("ADC1 Volume", MSM89XX_PMIC_ANALOG_TX_1_EN, 3,
+ 8, 0, analog_gain),
+ SOC_SINGLE_TLV("ADC2 Volume", MSM89XX_PMIC_ANALOG_TX_2_EN, 3,
+ 8, 0, analog_gain),
+ SOC_SINGLE_TLV("ADC3 Volume", MSM89XX_PMIC_ANALOG_TX_3_EN, 3,
+ 8, 0, analog_gain),
+
+
+};
+
+static int tombak_hph_impedance_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int ret;
+ uint32_t zl, zr;
+ bool hphr;
+ struct soc_multi_mixer_control *mc;
+ struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ struct msmfalcon_cdc_priv *priv = snd_soc_codec_get_drvdata(codec);
+
+ mc = (struct soc_multi_mixer_control *)(kcontrol->private_value);
+
+ hphr = mc->shift;
+ ret = wcd_mbhc_get_impedance(&priv->mbhc, &zl, &zr);
+ if (ret)
+ dev_dbg(codec->dev, "%s: Failed to get mbhc imped", __func__);
+ dev_dbg(codec->dev, "%s: zl %u, zr %u\n", __func__, zl, zr);
+ ucontrol->value.integer.value[0] = hphr ? zr : zl;
+
+ return 0;
+}
+
+static const struct snd_kcontrol_new impedance_detect_controls[] = {
+ SOC_SINGLE_EXT("HPHL Impedance", 0, 0, UINT_MAX, 0,
+ tombak_hph_impedance_get, NULL),
+ SOC_SINGLE_EXT("HPHR Impedance", 0, 1, UINT_MAX, 0,
+ tombak_hph_impedance_get, NULL),
+};
+
+static int tombak_get_hph_type(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ struct msmfalcon_cdc_priv *priv = snd_soc_codec_get_drvdata(codec);
+ struct wcd_mbhc *mbhc;
+
+ if (!priv) {
+ dev_err(codec->dev,
+ "%s: msmfalcon_cdc-wcd private data is NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ mbhc = &priv->mbhc;
+ if (!mbhc) {
+ dev_err(codec->dev, "%s: mbhc not initialized\n", __func__);
+ return -EINVAL;
+ }
+
+ ucontrol->value.integer.value[0] = (u32) mbhc->hph_type;
+ dev_dbg(codec->dev, "%s: hph_type = %u\n", __func__, mbhc->hph_type);
+
+ return 0;
+}
+
+static const struct snd_kcontrol_new hph_type_detect_controls[] = {
+ SOC_SINGLE_EXT("HPH Type", 0, 0, UINT_MAX, 0,
+ tombak_get_hph_type, NULL),
+};
+
+static const char * const rdac2_mux_text[] = {
+ "ZERO", "RX2", "RX1"
+};
+
+static const struct soc_enum rdac2_mux_enum =
+ SOC_ENUM_SINGLE(MSM89XX_PMIC_DIGITAL_CDC_CONN_HPHR_DAC_CTL,
+ 0, 3, rdac2_mux_text);
+
+static const char * const adc2_mux_text[] = {
+ "ZERO", "INP2", "INP3"
+};
+
+static const char * const ext_spk_text[] = {
+ "Off", "On"
+};
+
+static const char * const wsa_spk_text[] = {
+ "ZERO", "WSA"
+};
+
+
+
+static const char * const iir_inp1_text[] = {
+ "ZERO", "DEC1", "DEC2", "RX1", "RX2", "RX3"
+};
+
+static const struct soc_enum adc2_enum =
+ SOC_ENUM_SINGLE(SND_SOC_NOPM, 0,
+ ARRAY_SIZE(adc2_mux_text), adc2_mux_text);
+
+static const struct soc_enum ext_spk_enum =
+ SOC_ENUM_SINGLE(SND_SOC_NOPM, 0,
+ ARRAY_SIZE(ext_spk_text), ext_spk_text);
+
+static const struct soc_enum wsa_spk_enum =
+ SOC_ENUM_SINGLE(SND_SOC_NOPM, 0,
+ ARRAY_SIZE(wsa_spk_text), wsa_spk_text);
+
+
+
+static const struct snd_kcontrol_new ext_spk_mux =
+ SOC_DAPM_ENUM("Ext Spk Switch Mux", ext_spk_enum);
+
+
+
+static const struct snd_kcontrol_new tx_adc2_mux =
+ SOC_DAPM_ENUM("ADC2 MUX Mux", adc2_enum);
+
+
+static const struct snd_kcontrol_new rdac2_mux =
+ SOC_DAPM_ENUM("RDAC2 MUX Mux", rdac2_mux_enum);
+
+static const char * const ear_text[] = {
+ "ZERO", "Switch",
+};
+
+static const struct soc_enum ear_enum =
+ SOC_ENUM_SINGLE(SND_SOC_NOPM, 0, ARRAY_SIZE(ear_text), ear_text);
+
+static const struct snd_kcontrol_new ear_pa_mux[] = {
+ SOC_DAPM_ENUM("EAR_S", ear_enum)
+};
+
+static const struct snd_kcontrol_new wsa_spk_mux[] = {
+ SOC_DAPM_ENUM("WSA Spk Switch", wsa_spk_enum)
+};
+
+
+
+static const char * const hph_text[] = {
+ "ZERO", "Switch",
+};
+
+static const struct soc_enum hph_enum =
+ SOC_ENUM_SINGLE(SND_SOC_NOPM, 0, ARRAY_SIZE(hph_text), hph_text);
+
+static const struct snd_kcontrol_new hphl_mux[] = {
+ SOC_DAPM_ENUM("HPHL", hph_enum)
+};
+
+static const struct snd_kcontrol_new hphr_mux[] = {
+ SOC_DAPM_ENUM("HPHR", hph_enum)
+};
+
+static const struct snd_kcontrol_new spkr_mux[] = {
+ SOC_DAPM_ENUM("SPK", hph_enum)
+};
+
+static const char * const lo_text[] = {
+ "ZERO", "Switch",
+};
+
+static const struct soc_enum lo_enum =
+ SOC_ENUM_SINGLE(SND_SOC_NOPM, 0, ARRAY_SIZE(hph_text), hph_text);
+
+static const struct snd_kcontrol_new lo_mux[] = {
+ SOC_DAPM_ENUM("LINE_OUT", lo_enum)
+};
+
+static void msm_anlg_cdc_codec_enable_adc_block(struct snd_soc_codec *codec,
+ int enable)
+{
+ struct msmfalcon_cdc_priv *wcd8x16 = snd_soc_codec_get_drvdata(codec);
+
+ dev_dbg(codec->dev, "%s %d\n", __func__, enable);
+
+ if (enable) {
+ wcd8x16->adc_count++;
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_DIGITAL_CDC_ANA_CLK_CTL,
+ 0x20, 0x20);
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL,
+ 0x10, 0x10);
+ } else {
+ wcd8x16->adc_count--;
+ if (!wcd8x16->adc_count) {
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL,
+ 0x10, 0x00);
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_DIGITAL_CDC_ANA_CLK_CTL,
+ 0x20, 0x0);
+ }
+ }
+}
+
+static int msm_anlg_cdc_codec_enable_adc(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol,
+ int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ u16 adc_reg;
+ u8 init_bit_shift;
+
+ dev_dbg(codec->dev, "%s %d\n", __func__, event);
+
+ adc_reg = MSM89XX_PMIC_ANALOG_TX_1_2_TEST_CTL_2;
+
+ if (w->reg == MSM89XX_PMIC_ANALOG_TX_1_EN)
+ init_bit_shift = 5;
+ else if ((w->reg == MSM89XX_PMIC_ANALOG_TX_2_EN) ||
+ (w->reg == MSM89XX_PMIC_ANALOG_TX_3_EN))
+ init_bit_shift = 4;
+ else {
+ dev_err(codec->dev, "%s: Error, invalid adc register\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ msm_anlg_cdc_codec_enable_adc_block(codec, 1);
+ if (w->reg == MSM89XX_PMIC_ANALOG_TX_2_EN)
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_MICB_1_CTL, 0x02, 0x02);
+ /*
+ * Add delay of 10 ms to give sufficient time for the voltage
+ * to shoot up and settle so that the txfe init does not
+ * happen when the input voltage is changing too much.
+ */
+ usleep_range(10000, 10010);
+ snd_soc_update_bits(codec, adc_reg, 1 << init_bit_shift,
+ 1 << init_bit_shift);
+ if (w->reg == MSM89XX_PMIC_ANALOG_TX_1_EN)
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_DIGITAL_CDC_CONN_TX1_CTL,
+ 0x03, 0x00);
+ else if ((w->reg == MSM89XX_PMIC_ANALOG_TX_2_EN) ||
+ (w->reg == MSM89XX_PMIC_ANALOG_TX_3_EN))
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_DIGITAL_CDC_CONN_TX2_CTL,
+ 0x03, 0x00);
+ /* Wait for 1ms to allow txfe settling time */
+ usleep_range(CODEC_DELAY_1_MS, CODEC_DELAY_1_1_MS);
+ break;
+ case SND_SOC_DAPM_POST_PMU:
+ /*
+ * Add delay of 12 ms before deasserting the init
+ * to reduce the tx pop
+ */
+ usleep_range(12000, 12010);
+ snd_soc_update_bits(codec, adc_reg, 1 << init_bit_shift, 0x00);
+ /* Wait for 1ms to allow txfe settling time post powerup */
+ usleep_range(CODEC_DELAY_1_MS, CODEC_DELAY_1_1_MS);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ msm_anlg_cdc_codec_enable_adc_block(codec, 0);
+ if (w->reg == MSM89XX_PMIC_ANALOG_TX_2_EN)
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_MICB_1_CTL, 0x02, 0x00);
+ if (w->reg == MSM89XX_PMIC_ANALOG_TX_1_EN)
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_DIGITAL_CDC_CONN_TX1_CTL,
+ 0x03, 0x02);
+ else if ((w->reg == MSM89XX_PMIC_ANALOG_TX_2_EN) ||
+ (w->reg == MSM89XX_PMIC_ANALOG_TX_3_EN))
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_DIGITAL_CDC_CONN_TX2_CTL,
+ 0x03, 0x02);
+
+ break;
+ }
+ return 0;
+}
+
+static int msm_anlg_cdc_codec_enable_spk_pa(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol,
+ int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ struct msmfalcon_cdc_priv *msmfalcon_cdc =
+ snd_soc_codec_get_drvdata(codec);
+
+ dev_dbg(codec->dev, "%s %d %s\n", __func__, event, w->name);
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_DIGITAL_CDC_ANA_CLK_CTL, 0x10, 0x10);
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_SPKR_PWRSTG_CTL, 0x01, 0x01);
+ switch (msmfalcon_cdc->boost_option) {
+ case BOOST_SWITCH:
+ if (!msmfalcon_cdc->spk_boost_set)
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_SPKR_DAC_CTL,
+ 0x10, 0x10);
+ break;
+ case BOOST_ALWAYS:
+ case BOOST_ON_FOREVER:
+ break;
+ case BYPASS_ALWAYS:
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_SPKR_DAC_CTL,
+ 0x10, 0x10);
+ break;
+ default:
+ dev_err(codec->dev,
+ "%s: invalid boost option: %d\n", __func__,
+ msmfalcon_cdc->boost_option);
+ break;
+ }
+ /* Wait for 1ms after SPK_DAC CTL setting */
+ usleep_range(CODEC_DELAY_1_MS, CODEC_DELAY_1_1_MS);
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_SPKR_PWRSTG_CTL, 0xE0, 0xE0);
+ if (get_codec_version(msmfalcon_cdc) != TOMBAK_1_0)
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_RX_EAR_CTL, 0x01, 0x01);
+ break;
+ case SND_SOC_DAPM_POST_PMU:
+ /* Wait for 1ms after SPK_VBAT_LDO Enable */
+ usleep_range(CODEC_DELAY_1_MS, CODEC_DELAY_1_1_MS);
+ switch (msmfalcon_cdc->boost_option) {
+ case BOOST_SWITCH:
+ if (msmfalcon_cdc->spk_boost_set)
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_SPKR_DRV_CTL,
+ 0xEF, 0xEF);
+ else
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_SPKR_DAC_CTL,
+ 0x10, 0x00);
+ break;
+ case BOOST_ALWAYS:
+ case BOOST_ON_FOREVER:
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_SPKR_DRV_CTL,
+ 0xEF, 0xEF);
+ break;
+ case BYPASS_ALWAYS:
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_SPKR_DAC_CTL, 0x10, 0x00);
+ break;
+ default:
+ dev_err(codec->dev,
+ "%s: invalid boost option: %d\n", __func__,
+ msmfalcon_cdc->boost_option);
+ break;
+ }
+ msm_anlg_cdc_dig_notifier_call(codec,
+ DIG_CDC_EVENT_RX3_MUTE_OFF);
+ snd_soc_update_bits(codec, w->reg, 0x80, 0x80);
+ break;
+ case SND_SOC_DAPM_PRE_PMD:
+ msm_anlg_cdc_dig_notifier_call(codec,
+ DIG_CDC_EVENT_RX3_MUTE_ON);
+ /*
+ * Add 1 ms sleep for the mute to take effect
+ */
+ usleep_range(CODEC_DELAY_1_MS, CODEC_DELAY_1_1_MS);
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_SPKR_DAC_CTL, 0x10, 0x10);
+ if (get_codec_version(msmfalcon_cdc) < CAJON_2_0)
+ msm_anlg_cdc_boost_mode_sequence(codec, SPK_PMD);
+ snd_soc_update_bits(codec, w->reg, 0x80, 0x00);
+ switch (msmfalcon_cdc->boost_option) {
+ case BOOST_SWITCH:
+ if (msmfalcon_cdc->spk_boost_set)
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_SPKR_DRV_CTL,
+ 0xEF, 0x69);
+ break;
+ case BOOST_ALWAYS:
+ case BOOST_ON_FOREVER:
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_SPKR_DRV_CTL,
+ 0xEF, 0x69);
+ break;
+ case BYPASS_ALWAYS:
+ break;
+ default:
+ dev_err(codec->dev,
+ "%s: invalid boost option: %d\n", __func__,
+ msmfalcon_cdc->boost_option);
+ break;
+ }
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_SPKR_PWRSTG_CTL, 0xE0, 0x00);
+ /* Wait for 1ms to allow setting time for spkr path disable */
+ usleep_range(CODEC_DELAY_1_MS, CODEC_DELAY_1_1_MS);
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_SPKR_PWRSTG_CTL, 0x01, 0x00);
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_SPKR_DAC_CTL, 0x10, 0x00);
+ if (get_codec_version(msmfalcon_cdc) != TOMBAK_1_0)
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_RX_EAR_CTL, 0x01, 0x00);
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_DIGITAL_CDC_ANA_CLK_CTL, 0x10, 0x00);
+ if (get_codec_version(msmfalcon_cdc) >= CAJON_2_0)
+ msm_anlg_cdc_boost_mode_sequence(codec, SPK_PMD);
+ break;
+ }
+ return 0;
+}
+
+static int msm_anlg_cdc_codec_enable_dig_clk(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol,
+ int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ struct msmfalcon_cdc_priv *msmfalcon_cdc =
+ snd_soc_codec_get_drvdata(codec);
+ struct msm_asoc_mach_data *pdata = NULL;
+
+ pdata = snd_soc_card_get_drvdata(codec->component.card);
+
+ dev_dbg(codec->dev, "%s event %d w->name %s\n", __func__,
+ event, w->name);
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ msm_anlg_cdc_codec_enable_clock_block(codec, 1);
+ snd_soc_update_bits(codec, w->reg, 0x80, 0x80);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ if (msmfalcon_cdc->rx_bias_count == 0)
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL,
+ 0x80, 0x00);
+ }
+ return 0;
+}
+
+
+
+static bool msm_anlg_cdc_use_mb(struct snd_soc_codec *codec)
+{
+ struct msmfalcon_cdc_priv *msmfalcon_cdc =
+ snd_soc_codec_get_drvdata(codec);
+
+ if (get_codec_version(msmfalcon_cdc) < CAJON)
+ return true;
+ else
+ return false;
+}
+
+static void msm_anlg_cdc_set_auto_zeroing(struct snd_soc_codec *codec,
+ bool enable)
+{
+ struct msmfalcon_cdc_priv *msmfalcon_cdc =
+ snd_soc_codec_get_drvdata(codec);
+
+ if (get_codec_version(msmfalcon_cdc) < CONGA) {
+ if (enable)
+ /*
+ * Set autozeroing for special headset detection and
+ * buttons to work.
+ */
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_MICB_2_EN,
+ 0x18, 0x10);
+ else
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_MICB_2_EN,
+ 0x18, 0x00);
+
+ } else {
+ dev_dbg(codec->dev,
+ "%s: Auto Zeroing is not required from CONGA\n",
+ __func__);
+ }
+}
+
+static void msm_anlg_cdc_trim_btn_reg(struct snd_soc_codec *codec)
+{
+ struct msmfalcon_cdc_priv *msmfalcon_cdc =
+ snd_soc_codec_get_drvdata(codec);
+
+ if (get_codec_version(msmfalcon_cdc) == TOMBAK_1_0) {
+ pr_debug("%s: This device needs to be trimmed\n", __func__);
+ /*
+ * Calculate the trim value for each device used
+ * till is comes in production by hardware team
+ */
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_SEC_ACCESS,
+ 0xA5, 0xA5);
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_TRIM_CTRL2,
+ 0xFF, 0x30);
+ } else {
+ dev_dbg(codec->dev, "%s: This device is trimmed at ATE\n",
+ __func__);
+ }
+}
+
+static int msm_anlg_cdc_enable_ext_mb_source(struct wcd_mbhc *wcd_mbhc,
+ bool turn_on)
+{
+ int ret = 0;
+ static int count;
+ struct snd_soc_codec *codec = wcd_mbhc->codec;
+ struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
+
+ dev_dbg(codec->dev, "%s turn_on: %d count: %d\n", __func__, turn_on,
+ count);
+ if (turn_on) {
+ if (!count) {
+ ret = snd_soc_dapm_force_enable_pin(dapm,
+ "MICBIAS_REGULATOR");
+ snd_soc_dapm_sync(dapm);
+ }
+ count++;
+ } else {
+ if (count > 0)
+ count--;
+ if (!count) {
+ ret = snd_soc_dapm_disable_pin(dapm,
+ "MICBIAS_REGULATOR");
+ snd_soc_dapm_sync(dapm);
+ }
+ }
+
+ if (ret)
+ dev_err(codec->dev, "%s: Failed to %s external micbias source\n",
+ __func__, turn_on ? "enable" : "disabled");
+ else
+ dev_dbg(codec->dev, "%s: %s external micbias source\n",
+ __func__, turn_on ? "Enabled" : "Disabled");
+
+ return ret;
+}
+
+static int msm_anlg_cdc_codec_enable_micbias(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol,
+ int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ struct msmfalcon_cdc_priv *msmfalcon_cdc =
+ snd_soc_codec_get_drvdata(codec);
+ u16 micb_int_reg;
+ char *internal1_text = "Internal1";
+ char *internal2_text = "Internal2";
+ char *internal3_text = "Internal3";
+ char *external2_text = "External2";
+ char *external_text = "External";
+ bool micbias2;
+
+ dev_dbg(codec->dev, "%s %d\n", __func__, event);
+ switch (w->reg) {
+ case MSM89XX_PMIC_ANALOG_MICB_1_EN:
+ case MSM89XX_PMIC_ANALOG_MICB_2_EN:
+ micb_int_reg = MSM89XX_PMIC_ANALOG_MICB_1_INT_RBIAS;
+ break;
+ default:
+ dev_err(codec->dev,
+ "%s: Error, invalid micbias register 0x%x\n",
+ __func__, w->reg);
+ return -EINVAL;
+ }
+
+ micbias2 = (snd_soc_read(codec, MSM89XX_PMIC_ANALOG_MICB_2_EN) & 0x80);
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ if (strnstr(w->name, internal1_text, strlen(w->name))) {
+ if (get_codec_version(msmfalcon_cdc) >= CAJON)
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_TX_1_2_ATEST_CTL_2,
+ 0x02, 0x02);
+ snd_soc_update_bits(codec, micb_int_reg, 0x80, 0x80);
+ } else if (strnstr(w->name, internal2_text, strlen(w->name))) {
+ snd_soc_update_bits(codec, micb_int_reg, 0x10, 0x10);
+ snd_soc_update_bits(codec, w->reg, 0x60, 0x00);
+ } else if (strnstr(w->name, internal3_text, strlen(w->name))) {
+ snd_soc_update_bits(codec, micb_int_reg, 0x2, 0x2);
+ /*
+ * update MSM89XX_PMIC_ANALOG_TX_1_2_ATEST_CTL_2
+ * for external bias only, not for external2.
+ */
+ } else if (!strnstr(w->name, external2_text, strlen(w->name)) &&
+ strnstr(w->name, external_text,
+ strlen(w->name))) {
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_TX_1_2_ATEST_CTL_2,
+ 0x02, 0x02);
+ }
+ if (!strnstr(w->name, external_text, strlen(w->name)))
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_MICB_1_EN, 0x05, 0x04);
+ if (w->reg == MSM89XX_PMIC_ANALOG_MICB_1_EN)
+ msm_anlg_cdc_configure_cap(codec, true, micbias2);
+
+ break;
+ case SND_SOC_DAPM_POST_PMU:
+ if (get_codec_version(msmfalcon_cdc) <= TOMBAK_2_0)
+ /*
+ * Wait for 20ms post micbias enable
+ * for version < tombak 2.0.
+ */
+ usleep_range(20000, 20100);
+ if (strnstr(w->name, internal1_text, strlen(w->name))) {
+ snd_soc_update_bits(codec, micb_int_reg, 0x40, 0x40);
+ } else if (strnstr(w->name, internal2_text, strlen(w->name))) {
+ snd_soc_update_bits(codec, micb_int_reg, 0x08, 0x08);
+ msm_anlg_cdc_notifier_call(codec,
+ WCD_EVENT_POST_MICBIAS_2_ON);
+ } else if (strnstr(w->name, internal3_text, 30)) {
+ snd_soc_update_bits(codec, micb_int_reg, 0x01, 0x01);
+ } else if (strnstr(w->name, external2_text, strlen(w->name))) {
+ msm_anlg_cdc_notifier_call(codec,
+ WCD_EVENT_POST_MICBIAS_2_ON);
+ }
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ if (strnstr(w->name, internal1_text, strlen(w->name))) {
+ snd_soc_update_bits(codec, micb_int_reg, 0xC0, 0x40);
+ } else if (strnstr(w->name, internal2_text, strlen(w->name))) {
+ msm_anlg_cdc_notifier_call(codec,
+ WCD_EVENT_POST_MICBIAS_2_OFF);
+ } else if (strnstr(w->name, internal3_text, 30)) {
+ snd_soc_update_bits(codec, micb_int_reg, 0x2, 0x0);
+ } else if (strnstr(w->name, external2_text, strlen(w->name))) {
+ /*
+ * send micbias turn off event to mbhc driver and then
+ * break, as no need to set MICB_1_EN register.
+ */
+ msm_anlg_cdc_notifier_call(codec,
+ WCD_EVENT_POST_MICBIAS_2_OFF);
+ break;
+ }
+ if (w->reg == MSM89XX_PMIC_ANALOG_MICB_1_EN)
+ msm_anlg_cdc_configure_cap(codec, false, micbias2);
+ break;
+ }
+ return 0;
+}
+
+static void update_clkdiv(void *handle, int val)
+{
+ struct msmfalcon_cdc *handle_cdc = handle;
+ struct snd_soc_codec *codec = handle_cdc->codec;
+
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_TX_1_2_TXFE_CLKDIV,
+ 0xFF, val);
+}
+
+static int get_cdc_version(void *handle)
+{
+ struct msmfalcon_cdc *handle_cdc = handle;
+ struct snd_soc_codec *codec = handle_cdc->codec;
+ struct msmfalcon_cdc_priv *msmfalcon_cdc =
+ snd_soc_codec_get_drvdata(codec);
+
+ return get_codec_version(msmfalcon_cdc);
+}
+
+static int msmfalcon_wcd_codec_enable_vdd_spkr(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol,
+ int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ struct msmfalcon_cdc_priv *msmfalcon_cdc =
+ snd_soc_codec_get_drvdata(codec);
+ int ret = 0;
+
+ if (!msmfalcon_cdc->ext_spk_boost_set) {
+ dev_dbg(codec->dev, "%s: ext_boost not supported/disabled\n",
+ __func__);
+ return 0;
+ }
+ dev_dbg(codec->dev, "%s: %s %d\n", __func__, w->name, event);
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ if (msmfalcon_cdc->spkdrv_reg) {
+ ret = regulator_enable(msmfalcon_cdc->spkdrv_reg);
+ if (ret)
+ dev_err(codec->dev,
+ "%s Failed to enable spkdrv reg %s\n",
+ __func__, MSM89XX_VDD_SPKDRV_NAME);
+ }
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ if (msmfalcon_cdc->spkdrv_reg) {
+ ret = regulator_disable(msmfalcon_cdc->spkdrv_reg);
+ if (ret)
+ dev_err(codec->dev,
+ "%s: Failed to disable spkdrv_reg %s\n",
+ __func__, MSM89XX_VDD_SPKDRV_NAME);
+ }
+ break;
+ }
+ return 0;
+}
+
+
+/* The register address is the same as other codec so it can use resmgr */
+static int msm_anlg_cdc_codec_enable_rx_bias(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol,
+ int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ struct msmfalcon_cdc_priv *msmfalcon_cdc =
+ snd_soc_codec_get_drvdata(codec);
+
+ dev_dbg(codec->dev, "%s %d\n", __func__, event);
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ msmfalcon_cdc->rx_bias_count++;
+ if (msmfalcon_cdc->rx_bias_count == 1) {
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_RX_COM_BIAS_DAC,
+ 0x80, 0x80);
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_RX_COM_BIAS_DAC,
+ 0x01, 0x01);
+ }
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ msmfalcon_cdc->rx_bias_count--;
+ if (msmfalcon_cdc->rx_bias_count == 0) {
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_RX_COM_BIAS_DAC,
+ 0x01, 0x00);
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_RX_COM_BIAS_DAC,
+ 0x80, 0x00);
+ }
+ break;
+ }
+ dev_dbg(codec->dev, "%s rx_bias_count = %d\n",
+ __func__, msmfalcon_cdc->rx_bias_count);
+ return 0;
+}
+
+static uint32_t wcd_get_impedance_value(uint32_t imped)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(wcd_imped_val) - 1; i++) {
+ if (imped >= wcd_imped_val[i] &&
+ imped < wcd_imped_val[i + 1])
+ break;
+ }
+
+ pr_debug("%s: selected impedance value = %d\n",
+ __func__, wcd_imped_val[i]);
+ return wcd_imped_val[i];
+}
+
+static void wcd_imped_config(struct snd_soc_codec *codec,
+ uint32_t imped, bool set_gain)
+{
+ uint32_t value;
+ int codec_version;
+ struct msmfalcon_cdc_priv *msmfalcon_cdc =
+ snd_soc_codec_get_drvdata(codec);
+
+ value = wcd_get_impedance_value(imped);
+
+ if (value < wcd_imped_val[0]) {
+ dev_dbg(codec->dev,
+ "%s, detected impedance is less than 4 Ohm\n",
+ __func__);
+ return;
+ }
+
+ codec_version = get_codec_version(msmfalcon_cdc);
+
+ if (set_gain) {
+ switch (codec_version) {
+ case TOMBAK_1_0:
+ case TOMBAK_2_0:
+ case CONGA:
+ /*
+ * For 32Ohm load and higher loads, Set 0x19E
+ * bit 5 to 1 (POS_0_DB_DI). For loads lower
+ * than 32Ohm (such as 16Ohm load), Set 0x19E
+ * bit 5 to 0 (POS_M4P5_DB_DI)
+ */
+ if (value >= 32)
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_RX_EAR_CTL,
+ 0x20, 0x20);
+ else
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_RX_EAR_CTL,
+ 0x20, 0x00);
+ break;
+ case CAJON:
+ case CAJON_2_0:
+ case DIANGU:
+ case DRAX_CDC:
+ if (value >= 13) {
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_RX_EAR_CTL,
+ 0x20, 0x20);
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_NCP_VCTRL,
+ 0x07, 0x07);
+ } else {
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_RX_EAR_CTL,
+ 0x20, 0x00);
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_NCP_VCTRL,
+ 0x07, 0x04);
+ }
+ break;
+ }
+ } else {
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_RX_EAR_CTL,
+ 0x20, 0x00);
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_NCP_VCTRL,
+ 0x07, 0x04);
+ }
+
+ dev_dbg(codec->dev, "%s: Exit\n", __func__);
+}
+
+static int msm_anlg_cdc_hphl_dac_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol,
+ int event)
+{
+ uint32_t impedl, impedr;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ struct msmfalcon_cdc_priv *msmfalcon_cdc =
+ snd_soc_codec_get_drvdata(codec);
+ int ret;
+
+ dev_dbg(codec->dev, "%s %s %d\n", __func__, w->name, event);
+ ret = wcd_mbhc_get_impedance(&msmfalcon_cdc->mbhc,
+ &impedl, &impedr);
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ if (get_codec_version(msmfalcon_cdc) > CAJON)
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_RX_HPH_CNP_EN,
+ 0x08, 0x08);
+ if (get_codec_version(msmfalcon_cdc) == CAJON ||
+ get_codec_version(msmfalcon_cdc) == CAJON_2_0) {
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_RX_HPH_L_TEST,
+ 0x80, 0x80);
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_RX_HPH_R_TEST,
+ 0x80, 0x80);
+ }
+ if (get_codec_version(msmfalcon_cdc) > CAJON)
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_RX_HPH_CNP_EN,
+ 0x08, 0x00);
+ if (msmfalcon_cdc->hph_mode == HD2_MODE)
+ msm_anlg_cdc_dig_notifier_call(codec,
+ DIG_CDC_EVENT_PRE_RX1_INT_ON);
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_RX_HPH_L_PA_DAC_CTL, 0x02, 0x02);
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL, 0x01, 0x01);
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_DIGITAL_CDC_ANA_CLK_CTL, 0x02, 0x02);
+ if (!ret)
+ wcd_imped_config(codec, impedl, true);
+ else
+ dev_dbg(codec->dev, "Failed to get mbhc impedance %d\n",
+ ret);
+ break;
+ case SND_SOC_DAPM_POST_PMU:
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_RX_HPH_L_PA_DAC_CTL, 0x02, 0x00);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ wcd_imped_config(codec, impedl, false);
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_DIGITAL_CDC_ANA_CLK_CTL, 0x02, 0x00);
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL, 0x01, 0x00);
+ if (msmfalcon_cdc->hph_mode == HD2_MODE)
+ msm_anlg_cdc_dig_notifier_call(codec,
+ DIG_CDC_EVENT_POST_RX1_INT_OFF);
+ break;
+ }
+ return 0;
+}
+
+static int msm_anlg_cdc_lo_dac_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol,
+ int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+
+ dev_dbg(codec->dev, "%s %s %d\n", __func__, w->name, event);
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_DIGITAL_CDC_ANA_CLK_CTL, 0x10, 0x10);
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_RX_LO_EN_CTL, 0x20, 0x20);
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_RX_LO_EN_CTL, 0x80, 0x80);
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_RX_LO_DAC_CTL, 0x08, 0x08);
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_RX_LO_DAC_CTL, 0x40, 0x40);
+ break;
+ case SND_SOC_DAPM_POST_PMU:
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_RX_LO_DAC_CTL, 0x80, 0x80);
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_RX_LO_DAC_CTL, 0x08, 0x00);
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_RX_LO_EN_CTL, 0x40, 0x40);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ /* Wait for 20ms before powerdown of lineout_dac */
+ usleep_range(20000, 20100);
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_RX_LO_DAC_CTL, 0x80, 0x00);
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_RX_LO_DAC_CTL, 0x40, 0x00);
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_RX_LO_DAC_CTL, 0x08, 0x00);
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_RX_LO_EN_CTL, 0x80, 0x00);
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_RX_LO_EN_CTL, 0x40, 0x00);
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_RX_LO_EN_CTL, 0x20, 0x00);
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_DIGITAL_CDC_ANA_CLK_CTL, 0x10, 0x00);
+ break;
+ }
+ return 0;
+}
+
+static int msm_anlg_cdc_hphr_dac_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol,
+ int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ struct msmfalcon_cdc_priv *msmfalcon_cdc =
+ snd_soc_codec_get_drvdata(codec);
+
+ dev_dbg(codec->dev, "%s %s %d\n", __func__, w->name, event);
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ if (msmfalcon_cdc->hph_mode == HD2_MODE)
+ msm_anlg_cdc_dig_notifier_call(codec,
+ DIG_CDC_EVENT_PRE_RX2_INT_ON);
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_RX_HPH_R_PA_DAC_CTL, 0x02, 0x02);
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL, 0x02, 0x02);
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_DIGITAL_CDC_ANA_CLK_CTL, 0x01, 0x01);
+ break;
+ case SND_SOC_DAPM_POST_PMU:
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_RX_HPH_R_PA_DAC_CTL, 0x02, 0x00);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_DIGITAL_CDC_ANA_CLK_CTL, 0x01, 0x00);
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL, 0x02, 0x00);
+ if (msmfalcon_cdc->hph_mode == HD2_MODE)
+ msm_anlg_cdc_dig_notifier_call(codec,
+ DIG_CDC_EVENT_POST_RX2_INT_OFF);
+ break;
+ }
+ return 0;
+}
+
+static int msm_anlg_cdc_hph_pa_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol,
+ int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ struct msmfalcon_cdc_priv *msmfalcon_cdc =
+ snd_soc_codec_get_drvdata(codec);
+
+ dev_dbg(codec->dev, "%s: %s event = %d\n", __func__, w->name, event);
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ if (w->shift == 5)
+ msm_anlg_cdc_notifier_call(codec,
+ WCD_EVENT_PRE_HPHL_PA_ON);
+ else if (w->shift == 4)
+ msm_anlg_cdc_notifier_call(codec,
+ WCD_EVENT_PRE_HPHR_PA_ON);
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_NCP_FBCTRL, 0x20, 0x20);
+ break;
+
+ case SND_SOC_DAPM_POST_PMU:
+ /* Wait for 7ms to allow setting time for HPH_PA Enable */
+ usleep_range(7000, 7100);
+ if (w->shift == 5) {
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_RX_HPH_L_TEST, 0x04, 0x04);
+ msm_anlg_cdc_dig_notifier_call(codec,
+ DIG_CDC_EVENT_RX1_MUTE_OFF);
+ } else if (w->shift == 4) {
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_RX_HPH_R_TEST, 0x04, 0x04);
+ msm_anlg_cdc_dig_notifier_call(codec,
+ DIG_CDC_EVENT_RX2_MUTE_OFF);
+ }
+ break;
+
+ case SND_SOC_DAPM_PRE_PMD:
+ if (w->shift == 5) {
+ msm_anlg_cdc_dig_notifier_call(codec,
+ DIG_CDC_EVENT_RX1_MUTE_ON);
+ /* Wait for 20ms after HPHL RX digital mute */
+ msleep(20);
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_RX_HPH_L_TEST, 0x04, 0x00);
+ msm_anlg_cdc_notifier_call(codec,
+ WCD_EVENT_PRE_HPHL_PA_OFF);
+ } else if (w->shift == 4) {
+ msm_anlg_cdc_dig_notifier_call(codec,
+ DIG_CDC_EVENT_RX2_MUTE_ON);
+ /* Wait for 20ms after HPHR RX digital mute */
+ msleep(20);
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_RX_HPH_R_TEST, 0x04, 0x00);
+ msm_anlg_cdc_notifier_call(codec,
+ WCD_EVENT_PRE_HPHR_PA_OFF);
+ }
+ if (get_codec_version(msmfalcon_cdc) >= CAJON) {
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_RX_HPH_BIAS_CNP,
+ 0xF0, 0x30);
+ }
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ if (w->shift == 5) {
+ clear_bit(WCD_MBHC_HPHL_PA_OFF_ACK,
+ &msmfalcon_cdc->mbhc.hph_pa_dac_state);
+ msm_anlg_cdc_notifier_call(codec,
+ WCD_EVENT_POST_HPHL_PA_OFF);
+ } else if (w->shift == 4) {
+ clear_bit(WCD_MBHC_HPHR_PA_OFF_ACK,
+ &msmfalcon_cdc->mbhc.hph_pa_dac_state);
+ msm_anlg_cdc_notifier_call(codec,
+ WCD_EVENT_POST_HPHR_PA_OFF);
+ }
+ /* Wait for 15ms after HPH RX teardown */
+ usleep_range(15000, 15100);
+ break;
+ }
+ return 0;
+}
+
+static const struct snd_soc_dapm_route audio_map[] = {
+ /* RDAC Connections */
+ {"HPHR DAC", NULL, "RDAC2 MUX"},
+ {"RDAC2 MUX", "RX1", "PDM_IN_RX1"},
+ {"RDAC2 MUX", "RX2", "PDM_IN_RX2"},
+
+ /* WSA */
+ {"WSA_SPK OUT", NULL, "WSA Spk Switch"},
+ {"WSA Spk Switch", "WSA", "EAR PA"},
+
+ /* Earpiece (RX MIX1) */
+ {"EAR", NULL, "EAR_S"},
+ {"EAR_S", "Switch", "EAR PA"},
+ {"EAR PA", NULL, "RX_BIAS"},
+ {"EAR PA", NULL, "HPHL DAC"},
+ {"EAR PA", NULL, "HPHR DAC"},
+ {"EAR PA", NULL, "EAR CP"},
+
+ /* Headset (RX MIX1 and RX MIX2) */
+ {"HEADPHONE", NULL, "HPHL PA"},
+ {"HEADPHONE", NULL, "HPHR PA"},
+
+ {"Ext Spk", NULL, "Ext Spk Switch"},
+ {"Ext Spk Switch", "On", "HPHL PA"},
+ {"Ext Spk Switch", "On", "HPHR PA"},
+
+ {"HPHL PA", NULL, "HPHL"},
+ {"HPHR PA", NULL, "HPHR"},
+ {"HPHL", "Switch", "HPHL DAC"},
+ {"HPHR", "Switch", "HPHR DAC"},
+ {"HPHL PA", NULL, "CP"},
+ {"HPHL PA", NULL, "RX_BIAS"},
+ {"HPHR PA", NULL, "CP"},
+ {"HPHR PA", NULL, "RX_BIAS"},
+ {"HPHL DAC", NULL, "PDM_IN_RX1"},
+
+ {"SPK_OUT", NULL, "SPK PA"},
+ {"SPK PA", NULL, "SPK_RX_BIAS"},
+ {"SPK PA", NULL, "SPK"},
+ {"SPK", "Switch", "SPK DAC"},
+ {"SPK DAC", NULL, "PDM_IN_RX3"},
+ {"SPK DAC", NULL, "VDD_SPKDRV"},
+
+ /* lineout */
+ {"LINEOUT", NULL, "LINEOUT PA"},
+ {"LINEOUT PA", NULL, "SPK_RX_BIAS"},
+ {"LINEOUT PA", NULL, "LINE_OUT"},
+ {"LINE_OUT", "Switch", "LINEOUT DAC"},
+ {"LINEOUT DAC", NULL, "PDM_IN_RX3"},
+
+ /* lineout to WSA */
+ {"WSA_SPK OUT", NULL, "LINEOUT PA"},
+
+ {"PDM_IN_RX1", NULL, "RX1 CLK"},
+ {"PDM_IN_RX2", NULL, "RX2 CLK"},
+ {"PDM_IN_RX3", NULL, "RX3 CLK"},
+
+ {"ADC1_OUT", NULL, "ADC1"},
+ {"ADC2_OUT", NULL, "ADC2"},
+ {"ADC3_OUT", NULL, "ADC3"},
+
+ /* ADC Connections */
+ {"ADC2", NULL, "ADC2 MUX"},
+ {"ADC3", NULL, "ADC2 MUX"},
+ {"ADC2 MUX", "INP2", "ADC2_INP2"},
+ {"ADC2 MUX", "INP3", "ADC2_INP3"},
+
+ {"ADC1", NULL, "AMIC1"},
+ {"ADC2_INP2", NULL, "AMIC2"},
+ {"ADC2_INP3", NULL, "AMIC3"},
+
+ {"MIC BIAS Internal1", NULL, "INT_LDO_H"},
+ {"MIC BIAS Internal2", NULL, "INT_LDO_H"},
+ {"MIC BIAS External", NULL, "INT_LDO_H"},
+ {"MIC BIAS External2", NULL, "INT_LDO_H"},
+ {"MIC BIAS Internal1", NULL, "MICBIAS_REGULATOR"},
+ {"MIC BIAS Internal2", NULL, "MICBIAS_REGULATOR"},
+ {"MIC BIAS External", NULL, "MICBIAS_REGULATOR"},
+ {"MIC BIAS External2", NULL, "MICBIAS_REGULATOR"},
+};
+
+static int msm_anlg_cdc_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct msmfalcon_cdc_priv *msmfalcon_cdc =
+ snd_soc_codec_get_drvdata(dai->codec);
+
+ dev_dbg(dai->codec->dev, "%s(): substream = %s stream = %d\n",
+ __func__,
+ substream->name, substream->stream);
+ /*
+ * If status_mask is BUS_DOWN it means SSR is not complete.
+ * So return error.
+ */
+ if (test_bit(BUS_DOWN, &msmfalcon_cdc->status_mask)) {
+ dev_err(dai->codec->dev, "Error, Device is not up post SSR\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void msm_anlg_cdc_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ dev_dbg(dai->codec->dev,
+ "%s(): substream = %s stream = %d\n", __func__,
+ substream->name, substream->stream);
+}
+
+int msm_anlg_cdc_mclk_enable(struct snd_soc_codec *codec,
+ int mclk_enable, bool dapm)
+{
+ struct msmfalcon_cdc_priv *msmfalcon_cdc =
+ snd_soc_codec_get_drvdata(codec);
+
+ dev_dbg(codec->dev, "%s: mclk_enable = %u, dapm = %d\n",
+ __func__, mclk_enable, dapm);
+ if (mclk_enable) {
+ msmfalcon_cdc->int_mclk0_enabled = true;
+ msm_anlg_cdc_codec_enable_clock_block(codec, 1);
+ } else {
+ if (!msmfalcon_cdc->int_mclk0_enabled) {
+ dev_err(codec->dev, "Error, MCLK already diabled\n");
+ return -EINVAL;
+ }
+ msmfalcon_cdc->int_mclk0_enabled = false;
+ msm_anlg_cdc_codec_enable_clock_block(codec, 0);
+ }
+ return 0;
+}
+
+static int msm_anlg_cdc_set_dai_sysclk(struct snd_soc_dai *dai,
+ int clk_id, unsigned int freq, int dir)
+{
+ dev_dbg(dai->codec->dev, "%s\n", __func__);
+ return 0;
+}
+
+static int msm_anlg_cdc_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+{
+ dev_dbg(dai->codec->dev, "%s\n", __func__);
+ return 0;
+}
+
+static int msm_anlg_cdc_set_channel_map(struct snd_soc_dai *dai,
+ unsigned int tx_num, unsigned int *tx_slot,
+ unsigned int rx_num, unsigned int *rx_slot)
+
+{
+ dev_dbg(dai->codec->dev, "%s\n", __func__);
+ return 0;
+}
+
+static int msm_anlg_cdc_get_channel_map(struct snd_soc_dai *dai,
+ unsigned int *tx_num, unsigned int *tx_slot,
+ unsigned int *rx_num, unsigned int *rx_slot)
+
+{
+ dev_dbg(dai->codec->dev, "%s\n", __func__);
+ return 0;
+}
+
+static struct snd_soc_dai_ops msm_anlg_cdc_dai_ops = {
+ .startup = msm_anlg_cdc_startup,
+ .shutdown = msm_anlg_cdc_shutdown,
+ .set_sysclk = msm_anlg_cdc_set_dai_sysclk,
+ .set_fmt = msm_anlg_cdc_set_dai_fmt,
+ .set_channel_map = msm_anlg_cdc_set_channel_map,
+ .get_channel_map = msm_anlg_cdc_get_channel_map,
+};
+
+static struct snd_soc_dai_driver msm_anlg_cdc_i2s_dai[] = {
+ {
+ .name = "msm_anlg_cdc_i2s_rx1",
+ .id = AIF1_PB,
+ .playback = {
+ .stream_name = "Playback",
+ .rates = MSMFALCON_CDC_RATES,
+ .formats = MSMFALCON_CDC_FORMATS,
+ .rate_max = 192000,
+ .rate_min = 8000,
+ .channels_min = 1,
+ .channels_max = 3,
+ },
+ .ops = &msm_anlg_cdc_dai_ops,
+ },
+ {
+ .name = "msm_anlg_cdc_i2s_tx1",
+ .id = AIF1_CAP,
+ .capture = {
+ .stream_name = "Record",
+ .rates = MSMFALCON_CDC_RATES,
+ .formats = MSMFALCON_CDC_FORMATS,
+ .rate_max = 48000,
+ .rate_min = 8000,
+ .channels_min = 1,
+ .channels_max = 4,
+ },
+ .ops = &msm_anlg_cdc_dai_ops,
+ },
+ {
+ .name = "msm_anlg_cdc_i2s_tx2",
+ .id = AIF3_SVA,
+ .capture = {
+ .stream_name = "RecordSVA",
+ .rates = MSMFALCON_CDC_RATES,
+ .formats = MSMFALCON_CDC_FORMATS,
+ .rate_max = 48000,
+ .rate_min = 8000,
+ .channels_min = 1,
+ .channels_max = 2,
+ },
+ .ops = &msm_anlg_cdc_dai_ops,
+ },
+ {
+ .name = "msm_anlg_vifeedback",
+ .id = AIF2_VIFEED,
+ .capture = {
+ .stream_name = "VIfeed",
+ .rates = MSMFALCON_CDC_RATES,
+ .formats = MSMFALCON_CDC_FORMATS,
+ .rate_max = 48000,
+ .rate_min = 48000,
+ .channels_min = 2,
+ .channels_max = 2,
+ },
+ .ops = &msm_anlg_cdc_dai_ops,
+ },
+};
+
+
+static int msm_anlg_cdc_codec_enable_lo_pa(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol,
+ int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+
+ dev_dbg(codec->dev, "%s: %d %s\n", __func__, event, w->name);
+ switch (event) {
+ case SND_SOC_DAPM_POST_PMU:
+ msm_anlg_cdc_dig_notifier_call(codec,
+ DIG_CDC_EVENT_RX3_MUTE_OFF);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ msm_anlg_cdc_dig_notifier_call(codec,
+ DIG_CDC_EVENT_RX3_MUTE_ON);
+ break;
+ }
+
+ return 0;
+}
+
+static int msm_anlg_cdc_codec_enable_spk_ext_pa(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol,
+ int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ struct msmfalcon_cdc_priv *msmfalcon_cdc =
+ snd_soc_codec_get_drvdata(codec);
+
+ dev_dbg(codec->dev, "%s: %s event = %d\n", __func__, w->name, event);
+ switch (event) {
+ case SND_SOC_DAPM_POST_PMU:
+ dev_dbg(codec->dev,
+ "%s: enable external speaker PA\n", __func__);
+ if (msmfalcon_cdc->codec_spk_ext_pa_cb)
+ msmfalcon_cdc->codec_spk_ext_pa_cb(codec, 1);
+ break;
+ case SND_SOC_DAPM_PRE_PMD:
+ dev_dbg(codec->dev,
+ "%s: enable external speaker PA\n", __func__);
+ if (msmfalcon_cdc->codec_spk_ext_pa_cb)
+ msmfalcon_cdc->codec_spk_ext_pa_cb(codec, 0);
+ break;
+ }
+ return 0;
+}
+
+static int msm_anlg_cdc_codec_enable_ear_pa(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol,
+ int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ struct msmfalcon_cdc_priv *msmfalcon_cdc =
+ snd_soc_codec_get_drvdata(codec);
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ dev_dbg(codec->dev,
+ "%s: Sleeping 20ms after select EAR PA\n",
+ __func__);
+ snd_soc_update_bits(codec, MSM89XX_PMIC_ANALOG_RX_EAR_CTL,
+ 0x80, 0x80);
+ if (get_codec_version(msmfalcon_cdc) < CONGA)
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_RX_HPH_CNP_WG_TIME, 0xFF, 0x2A);
+ if (get_codec_version(msmfalcon_cdc) >= DIANGU) {
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_RX_COM_BIAS_DAC, 0x08, 0x00);
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_RX_HPH_L_TEST, 0x04, 0x04);
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_RX_HPH_R_TEST, 0x04, 0x04);
+ }
+ break;
+ case SND_SOC_DAPM_POST_PMU:
+ dev_dbg(codec->dev,
+ "%s: Sleeping 20ms after enabling EAR PA\n",
+ __func__);
+ snd_soc_update_bits(codec, MSM89XX_PMIC_ANALOG_RX_EAR_CTL,
+ 0x40, 0x40);
+ /* Wait for 7ms after EAR PA enable */
+ usleep_range(7000, 7100);
+ msm_anlg_cdc_dig_notifier_call(codec,
+ DIG_CDC_EVENT_RX1_MUTE_OFF);
+ break;
+ case SND_SOC_DAPM_PRE_PMD:
+ msm_anlg_cdc_dig_notifier_call(codec,
+ DIG_CDC_EVENT_RX1_MUTE_ON);
+ /* Wait for 20ms for RX digital mute to take effect */
+ msleep(20);
+ if (msmfalcon_cdc->boost_option == BOOST_ALWAYS) {
+ dev_dbg(codec->dev,
+ "%s: boost_option:%d, tear down ear\n",
+ __func__, msmfalcon_cdc->boost_option);
+ msm_anlg_cdc_boost_mode_sequence(codec, EAR_PMD);
+ }
+ if (get_codec_version(msmfalcon_cdc) >= DIANGU) {
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_RX_HPH_L_TEST, 0x04, 0x0);
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_RX_HPH_R_TEST, 0x04, 0x0);
+ }
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ dev_dbg(codec->dev,
+ "%s: Sleeping 7ms after disabling EAR PA\n",
+ __func__);
+ snd_soc_update_bits(codec, MSM89XX_PMIC_ANALOG_RX_EAR_CTL,
+ 0x40, 0x00);
+ /* Wait for 7ms after EAR PA teardown */
+ usleep_range(7000, 7100);
+ if (get_codec_version(msmfalcon_cdc) < CONGA)
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_RX_HPH_CNP_WG_TIME, 0xFF, 0x16);
+ if (get_codec_version(msmfalcon_cdc) >= DIANGU)
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_RX_COM_BIAS_DAC, 0x08, 0x08);
+ break;
+ }
+ return 0;
+}
+
+static const struct snd_soc_dapm_widget msm_anlg_cdc_dapm_widgets[] = {
+ SND_SOC_DAPM_PGA_E("EAR PA", SND_SOC_NOPM,
+ 0, 0, NULL, 0, msm_anlg_cdc_codec_enable_ear_pa,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_PGA_E("HPHL PA", MSM89XX_PMIC_ANALOG_RX_HPH_CNP_EN,
+ 5, 0, NULL, 0,
+ msm_anlg_cdc_hph_pa_event, SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD |
+ SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_PGA_E("HPHR PA", MSM89XX_PMIC_ANALOG_RX_HPH_CNP_EN,
+ 4, 0, NULL, 0,
+ msm_anlg_cdc_hph_pa_event, SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD |
+ SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_PGA_E("SPK PA", SND_SOC_NOPM,
+ 0, 0, NULL, 0, msm_anlg_cdc_codec_enable_spk_pa,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_PGA_E("LINEOUT PA", MSM89XX_PMIC_ANALOG_RX_LO_EN_CTL,
+ 5, 0, NULL, 0, msm_anlg_cdc_codec_enable_lo_pa,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_MUX("EAR_S", SND_SOC_NOPM, 0, 0, ear_pa_mux),
+ SND_SOC_DAPM_MUX("SPK", SND_SOC_NOPM, 0, 0, spkr_mux),
+ SND_SOC_DAPM_MUX("HPHL", SND_SOC_NOPM, 0, 0, hphl_mux),
+ SND_SOC_DAPM_MUX("HPHR", SND_SOC_NOPM, 0, 0, hphr_mux),
+ SND_SOC_DAPM_MUX("RDAC2 MUX", SND_SOC_NOPM, 0, 0, &rdac2_mux),
+ SND_SOC_DAPM_MUX("WSA Spk Switch", SND_SOC_NOPM, 0, 0, wsa_spk_mux),
+ SND_SOC_DAPM_MUX("Ext Spk Switch", SND_SOC_NOPM, 0, 0, &ext_spk_mux),
+ SND_SOC_DAPM_MUX("LINE_OUT", SND_SOC_NOPM, 0, 0, lo_mux),
+ SND_SOC_DAPM_MUX("ADC2 MUX", SND_SOC_NOPM, 0, 0, &tx_adc2_mux),
+
+ SND_SOC_DAPM_MIXER_E("HPHL DAC",
+ MSM89XX_PMIC_ANALOG_RX_HPH_L_PA_DAC_CTL, 3, 0, NULL,
+ 0, msm_anlg_cdc_hphl_dac_event,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_MIXER_E("HPHR DAC",
+ MSM89XX_PMIC_ANALOG_RX_HPH_R_PA_DAC_CTL, 3, 0, NULL,
+ 0, msm_anlg_cdc_hphr_dac_event,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_MIXER("ADC2", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("ADC3", SND_SOC_NOPM, 0, 0, NULL, 0),
+
+ SND_SOC_DAPM_DAC("SPK DAC", NULL, MSM89XX_PMIC_ANALOG_SPKR_DAC_CTL,
+ 7, 0),
+ SND_SOC_DAPM_DAC_E("LINEOUT DAC", NULL,
+ SND_SOC_NOPM, 0, 0, msm_anlg_cdc_lo_dac_event,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_SPK("Ext Spk", msm_anlg_cdc_codec_enable_spk_ext_pa),
+
+ SND_SOC_DAPM_SUPPLY("RX1 CLK", MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL,
+ 0, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("RX2 CLK", MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL,
+ 1, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("RX3 CLK", MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL,
+ 2, 0, msm_anlg_cdc_codec_enable_dig_clk,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_SUPPLY("CP", MSM89XX_PMIC_ANALOG_NCP_EN, 0, 0,
+ msm_anlg_cdc_codec_enable_charge_pump,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_SUPPLY("EAR CP", MSM89XX_PMIC_ANALOG_NCP_EN, 4, 0,
+ msm_anlg_cdc_codec_enable_charge_pump,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_SUPPLY_S("RX_BIAS", 1, SND_SOC_NOPM,
+ 0, 0, msm_anlg_cdc_codec_enable_rx_bias,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_SUPPLY_S("SPK_RX_BIAS", 1, SND_SOC_NOPM, 0, 0,
+ msm_anlg_cdc_codec_enable_rx_bias, SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_SUPPLY("VDD_SPKDRV", SND_SOC_NOPM, 0, 0,
+ msmfalcon_wcd_codec_enable_vdd_spkr,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_SUPPLY("INT_LDO_H", SND_SOC_NOPM, 1, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("MICBIAS_REGULATOR", SND_SOC_NOPM,
+ ON_DEMAND_MICBIAS, 0,
+ msm_anlg_cdc_codec_enable_on_demand_supply,
+ SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_MICBIAS_E("MIC BIAS Internal1",
+ MSM89XX_PMIC_ANALOG_MICB_1_EN, 7, 0,
+ msm_anlg_cdc_codec_enable_micbias, SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_MICBIAS_E("MIC BIAS Internal2",
+ MSM89XX_PMIC_ANALOG_MICB_2_EN, 7, 0,
+ msm_anlg_cdc_codec_enable_micbias, SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_MICBIAS_E("MIC BIAS Internal3",
+ MSM89XX_PMIC_ANALOG_MICB_1_EN, 7, 0,
+ msm_anlg_cdc_codec_enable_micbias, SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_ADC_E("ADC1", NULL, MSM89XX_PMIC_ANALOG_TX_1_EN, 7, 0,
+ msm_anlg_cdc_codec_enable_adc, SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_ADC_E("ADC2_INP2",
+ NULL, MSM89XX_PMIC_ANALOG_TX_2_EN, 7, 0,
+ msm_anlg_cdc_codec_enable_adc, SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_ADC_E("ADC2_INP3",
+ NULL, MSM89XX_PMIC_ANALOG_TX_3_EN, 7, 0,
+ msm_anlg_cdc_codec_enable_adc, SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_MICBIAS_E("MIC BIAS External",
+ MSM89XX_PMIC_ANALOG_MICB_1_EN, 7, 0,
+ msm_anlg_cdc_codec_enable_micbias, SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_MICBIAS_E("MIC BIAS External2",
+ MSM89XX_PMIC_ANALOG_MICB_2_EN, 7, 0,
+ msm_anlg_cdc_codec_enable_micbias, SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_INPUT("AMIC1"),
+ SND_SOC_DAPM_INPUT("AMIC2"),
+ SND_SOC_DAPM_INPUT("AMIC3"),
+ SND_SOC_DAPM_INPUT("PDM_IN_RX1"),
+ SND_SOC_DAPM_INPUT("PDM_IN_RX2"),
+ SND_SOC_DAPM_INPUT("PDM_IN_RX3"),
+
+ SND_SOC_DAPM_OUTPUT("EAR"),
+ SND_SOC_DAPM_OUTPUT("WSA_SPK OUT"),
+ SND_SOC_DAPM_OUTPUT("HEADPHONE"),
+ SND_SOC_DAPM_OUTPUT("SPK_OUT"),
+ SND_SOC_DAPM_OUTPUT("LINEOUT"),
+ SND_SOC_DAPM_OUTPUT("ADC1_OUT"),
+ SND_SOC_DAPM_OUTPUT("ADC2_OUT"),
+ SND_SOC_DAPM_OUTPUT("ADC3_OUT"),
+};
+
+static const struct msmfalcon_cdc_reg_mask_val msm_anlg_cdc_reg_defaults[] = {
+ MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_SPKR_DAC_CTL, 0x03),
+ MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_CURRENT_LIMIT, 0x82),
+ MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_SPKR_OCP_CTL, 0xE1),
+};
+
+static const struct msmfalcon_cdc_reg_mask_val
+ msm_anlg_cdc_reg_defaults_2_0[] = {
+ MSM89XX_REG_VAL(MSM89XX_PMIC_DIGITAL_SEC_ACCESS, 0xA5),
+ MSM89XX_REG_VAL(MSM89XX_PMIC_DIGITAL_PERPH_RESET_CTL3, 0x0F),
+ MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_TX_1_2_OPAMP_BIAS, 0x4F),
+ MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_NCP_FBCTRL, 0x28),
+ MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_SPKR_DRV_CTL, 0x69),
+ MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_SPKR_DRV_DBG, 0x01),
+ MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_BOOST_EN_CTL, 0x5F),
+ MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_SLOPE_COMP_IP_ZERO, 0x88),
+ MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_SEC_ACCESS, 0xA5),
+ MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_PERPH_RESET_CTL3, 0x0F),
+ MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_CURRENT_LIMIT, 0x82),
+ MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_SPKR_DAC_CTL, 0x03),
+ MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_SPKR_OCP_CTL, 0xE1),
+ MSM89XX_REG_VAL(MSM89XX_PMIC_DIGITAL_CDC_RST_CTL, 0x80),
+};
+
+static const struct msmfalcon_cdc_reg_mask_val conga_wcd_reg_defaults[] = {
+ MSM89XX_REG_VAL(MSM89XX_PMIC_DIGITAL_SEC_ACCESS, 0xA5),
+ MSM89XX_REG_VAL(MSM89XX_PMIC_DIGITAL_PERPH_RESET_CTL3, 0x0F),
+ MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_SEC_ACCESS, 0xA5),
+ MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_PERPH_RESET_CTL3, 0x0F),
+ MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_TX_1_2_OPAMP_BIAS, 0x4C),
+ MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_NCP_FBCTRL, 0x28),
+ MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_SPKR_DRV_CTL, 0x69),
+ MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_SPKR_DRV_DBG, 0x01),
+ MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_PERPH_SUBTYPE, 0x0A),
+ MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_SPKR_DAC_CTL, 0x03),
+ MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_SPKR_OCP_CTL, 0xE1),
+ MSM89XX_REG_VAL(MSM89XX_PMIC_DIGITAL_CDC_RST_CTL, 0x80),
+};
+
+static const struct msmfalcon_cdc_reg_mask_val cajon_wcd_reg_defaults[] = {
+ MSM89XX_REG_VAL(MSM89XX_PMIC_DIGITAL_SEC_ACCESS, 0xA5),
+ MSM89XX_REG_VAL(MSM89XX_PMIC_DIGITAL_PERPH_RESET_CTL3, 0x0F),
+ MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_SEC_ACCESS, 0xA5),
+ MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_PERPH_RESET_CTL3, 0x0F),
+ MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_TX_1_2_OPAMP_BIAS, 0x4C),
+ MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_CURRENT_LIMIT, 0x82),
+ MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_NCP_FBCTRL, 0xA8),
+ MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_NCP_VCTRL, 0xA4),
+ MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_SPKR_ANA_BIAS_SET, 0x41),
+ MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_SPKR_DRV_CTL, 0x69),
+ MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_SPKR_DRV_DBG, 0x01),
+ MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_SPKR_OCP_CTL, 0xE1),
+ MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_SPKR_DAC_CTL, 0x03),
+ MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_RX_HPH_BIAS_PA, 0xFA),
+ MSM89XX_REG_VAL(MSM89XX_PMIC_DIGITAL_CDC_RST_CTL, 0x80),
+};
+
+static const struct msmfalcon_cdc_reg_mask_val cajon2p0_wcd_reg_defaults[] = {
+ MSM89XX_REG_VAL(MSM89XX_PMIC_DIGITAL_SEC_ACCESS, 0xA5),
+ MSM89XX_REG_VAL(MSM89XX_PMIC_DIGITAL_PERPH_RESET_CTL3, 0x0F),
+ MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_SEC_ACCESS, 0xA5),
+ MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_PERPH_RESET_CTL3, 0x0F),
+ MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_TX_1_2_OPAMP_BIAS, 0x4C),
+ MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_CURRENT_LIMIT, 0xA2),
+ MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_NCP_FBCTRL, 0xA8),
+ MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_NCP_VCTRL, 0xA4),
+ MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_SPKR_ANA_BIAS_SET, 0x41),
+ MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_SPKR_DRV_CTL, 0x69),
+ MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_SPKR_DRV_DBG, 0x01),
+ MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_SPKR_OCP_CTL, 0xE1),
+ MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_SPKR_DAC_CTL, 0x03),
+ MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_RX_EAR_STATUS, 0x10),
+ MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_BYPASS_MODE, 0x18),
+ MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_RX_HPH_BIAS_PA, 0xFA),
+ MSM89XX_REG_VAL(MSM89XX_PMIC_DIGITAL_CDC_RST_CTL, 0x80),
+};
+
+static void msm_anlg_cdc_update_reg_defaults(struct snd_soc_codec *codec)
+{
+ u32 i, version;
+ struct msmfalcon_cdc_priv *msmfalcon_cdc =
+ snd_soc_codec_get_drvdata(codec);
+
+ version = get_codec_version(msmfalcon_cdc);
+ if (version == TOMBAK_1_0) {
+ for (i = 0; i < ARRAY_SIZE(msm_anlg_cdc_reg_defaults); i++)
+ snd_soc_write(codec, msm_anlg_cdc_reg_defaults[i].reg,
+ msm_anlg_cdc_reg_defaults[i].val);
+ } else if (version == TOMBAK_2_0) {
+ for (i = 0; i < ARRAY_SIZE(msm_anlg_cdc_reg_defaults_2_0); i++)
+ snd_soc_write(codec,
+ msm_anlg_cdc_reg_defaults_2_0[i].reg,
+ msm_anlg_cdc_reg_defaults_2_0[i].val);
+ } else if (version == CONGA) {
+ for (i = 0; i < ARRAY_SIZE(conga_wcd_reg_defaults); i++)
+ snd_soc_write(codec,
+ conga_wcd_reg_defaults[i].reg,
+ conga_wcd_reg_defaults[i].val);
+ } else if (version == CAJON) {
+ for (i = 0; i < ARRAY_SIZE(cajon_wcd_reg_defaults); i++)
+ snd_soc_write(codec,
+ cajon_wcd_reg_defaults[i].reg,
+ cajon_wcd_reg_defaults[i].val);
+ } else if (version == CAJON_2_0 || version == DIANGU
+ || version == DRAX_CDC) {
+ for (i = 0; i < ARRAY_SIZE(cajon2p0_wcd_reg_defaults); i++)
+ snd_soc_write(codec,
+ cajon2p0_wcd_reg_defaults[i].reg,
+ cajon2p0_wcd_reg_defaults[i].val);
+ }
+}
+
+static const struct msmfalcon_cdc_reg_mask_val
+ msm_anlg_cdc_codec_reg_init_val[] = {
+
+ /* Initialize current threshold to 350MA
+ * number of wait and run cycles to 4096
+ */
+ {MSM89XX_PMIC_ANALOG_RX_COM_OCP_CTL, 0xFF, 0x12},
+ {MSM89XX_PMIC_ANALOG_RX_COM_OCP_COUNT, 0xFF, 0xFF},
+};
+
+static void msm_anlg_cdc_codec_init_cache(struct snd_soc_codec *codec)
+{
+ u32 i;
+
+ regcache_cache_only(codec->component.regmap, true);
+ /* update cache with POR values */
+ for (i = 0; i < ARRAY_SIZE(msm89xx_pmic_cdc_defaults); i++)
+ snd_soc_write(codec, msm89xx_pmic_cdc_defaults[i].reg,
+ msm89xx_pmic_cdc_defaults[i].def);
+ regcache_cache_only(codec->component.regmap, false);
+}
+
+static void msm_anlg_cdc_codec_init_reg(struct snd_soc_codec *codec)
+{
+ u32 i;
+
+ for (i = 0; i < ARRAY_SIZE(msm_anlg_cdc_codec_reg_init_val); i++)
+ snd_soc_update_bits(codec,
+ msm_anlg_cdc_codec_reg_init_val[i].reg,
+ msm_anlg_cdc_codec_reg_init_val[i].mask,
+ msm_anlg_cdc_codec_reg_init_val[i].val);
+}
+
+static int msm_anlg_cdc_bringup(struct snd_soc_codec *codec)
+{
+ snd_soc_write(codec,
+ MSM89XX_PMIC_DIGITAL_SEC_ACCESS,
+ 0xA5);
+ snd_soc_write(codec, MSM89XX_PMIC_DIGITAL_PERPH_RESET_CTL4, 0x01);
+ snd_soc_write(codec,
+ MSM89XX_PMIC_ANALOG_SEC_ACCESS,
+ 0xA5);
+ snd_soc_write(codec, MSM89XX_PMIC_ANALOG_PERPH_RESET_CTL4, 0x01);
+ snd_soc_write(codec,
+ MSM89XX_PMIC_DIGITAL_SEC_ACCESS,
+ 0xA5);
+ snd_soc_write(codec, MSM89XX_PMIC_DIGITAL_PERPH_RESET_CTL4, 0x00);
+ snd_soc_write(codec,
+ MSM89XX_PMIC_ANALOG_SEC_ACCESS,
+ 0xA5);
+ snd_soc_write(codec, MSM89XX_PMIC_ANALOG_PERPH_RESET_CTL4, 0x00);
+ return 0;
+}
+
+static struct regulator *msm_anlg_cdc_find_regulator(
+ const struct msmfalcon_cdc *msmfalcon_cdc,
+ const char *name)
+{
+ int i;
+
+ for (i = 0; i < msmfalcon_cdc->num_of_supplies; i++) {
+ if (msmfalcon_cdc->supplies[i].supply &&
+ !strcmp(msmfalcon_cdc->supplies[i].supply, name))
+ return msmfalcon_cdc->supplies[i].consumer;
+ }
+
+ dev_err(msmfalcon_cdc->dev, "Error: regulator not found:%s\n"
+ , name);
+ return NULL;
+}
+
+static int msm_anlg_cdc_device_down(struct snd_soc_codec *codec)
+{
+ struct msm_asoc_mach_data *pdata = NULL;
+ struct msmfalcon_cdc_priv *msmfalcon_cdc_priv =
+ snd_soc_codec_get_drvdata(codec);
+ unsigned int tx_1_en;
+ unsigned int tx_2_en;
+
+ pdata = snd_soc_card_get_drvdata(codec->component.card);
+ dev_dbg(codec->dev, "%s: device down!\n", __func__);
+
+ tx_1_en = snd_soc_read(codec, MSM89XX_PMIC_ANALOG_TX_1_EN);
+ tx_2_en = snd_soc_read(codec, MSM89XX_PMIC_ANALOG_TX_2_EN);
+ tx_1_en = tx_1_en & 0x7f;
+ tx_2_en = tx_2_en & 0x7f;
+ snd_soc_write(codec,
+ MSM89XX_PMIC_ANALOG_TX_1_EN, tx_1_en);
+ snd_soc_write(codec,
+ MSM89XX_PMIC_ANALOG_TX_2_EN, tx_2_en);
+ if (msmfalcon_cdc_priv->boost_option == BOOST_ON_FOREVER) {
+ if ((snd_soc_read(codec, MSM89XX_PMIC_ANALOG_SPKR_DRV_CTL)
+ & 0x80) == 0) {
+ msm_anlg_cdc_dig_notifier_call(codec,
+ DIG_CDC_EVENT_CLK_ON);
+ snd_soc_write(codec,
+ MSM89XX_PMIC_ANALOG_MASTER_BIAS_CTL, 0x30);
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_DIGITAL_CDC_RST_CTL, 0x80, 0x80);
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_DIGITAL_CDC_TOP_CLK_CTL,
+ 0x0C, 0x0C);
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL,
+ 0x84, 0x84);
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_DIGITAL_CDC_ANA_CLK_CTL,
+ 0x10, 0x10);
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_SPKR_PWRSTG_CTL,
+ 0x1F, 0x1F);
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_RX_COM_BIAS_DAC,
+ 0x90, 0x90);
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_RX_EAR_CTL,
+ 0xFF, 0xFF);
+ /* Wait for 20us for boost settings to take effect */
+ usleep_range(20, 21);
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_SPKR_PWRSTG_CTL,
+ 0xFF, 0xFF);
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_SPKR_DRV_CTL,
+ 0xE9, 0xE9);
+ }
+ }
+ msm_anlg_cdc_boost_off(codec);
+ msmfalcon_cdc_priv->hph_mode = NORMAL_MODE;
+
+ /* 40ms to allow boost to discharge */
+ msleep(40);
+ /* Disable PA to avoid pop during codec bring up */
+ snd_soc_update_bits(codec, MSM89XX_PMIC_ANALOG_RX_HPH_CNP_EN,
+ 0x30, 0x00);
+ snd_soc_update_bits(codec, MSM89XX_PMIC_ANALOG_SPKR_DRV_CTL,
+ 0x80, 0x00);
+ snd_soc_write(codec,
+ MSM89XX_PMIC_ANALOG_RX_HPH_L_PA_DAC_CTL, 0x20);
+ snd_soc_write(codec,
+ MSM89XX_PMIC_ANALOG_RX_HPH_R_PA_DAC_CTL, 0x20);
+ snd_soc_write(codec,
+ MSM89XX_PMIC_ANALOG_RX_EAR_CTL, 0x12);
+ snd_soc_write(codec,
+ MSM89XX_PMIC_ANALOG_SPKR_DAC_CTL, 0x93);
+
+ msm_anlg_cdc_bringup(codec);
+ atomic_set(&pdata->int_mclk0_enabled, false);
+ msm_anlg_cdc_dig_notifier_call(codec, DIG_CDC_EVENT_SSR_DOWN);
+ set_bit(BUS_DOWN, &msmfalcon_cdc_priv->status_mask);
+ snd_soc_card_change_online_state(codec->component.card, 0);
+ return 0;
+}
+
+static int msm_anlg_cdc_device_up(struct snd_soc_codec *codec)
+{
+ struct msmfalcon_cdc_priv *msmfalcon_cdc_priv =
+ snd_soc_codec_get_drvdata(codec);
+ int ret = 0;
+
+ dev_dbg(codec->dev, "%s: device up!\n", __func__);
+
+ msm_anlg_cdc_dig_notifier_call(codec, DIG_CDC_EVENT_SSR_UP);
+ clear_bit(BUS_DOWN, &msmfalcon_cdc_priv->status_mask);
+ snd_soc_card_change_online_state(codec->component.card, 1);
+ /* delay is required to make sure sound card state updated */
+ usleep_range(5000, 5100);
+
+ msm_anlg_cdc_codec_init_reg(codec);
+ msm_anlg_cdc_update_reg_defaults(codec);
+
+ regcache_mark_dirty(codec->component.regmap);
+ regcache_sync_region(codec->component.regmap,
+ MSM89XX_PMIC_DIGITAL_REVISION1,
+ MSM89XX_PMIC_CDC_MAX_REGISTER);
+
+ snd_soc_write(codec, MSM89XX_PMIC_DIGITAL_INT_EN_SET,
+ MSM89XX_PMIC_DIGITAL_INT_EN_SET__POR);
+ snd_soc_write(codec, MSM89XX_PMIC_DIGITAL_INT_EN_CLR,
+ MSM89XX_PMIC_DIGITAL_INT_EN_CLR__POR);
+
+ msm_anlg_cdc_set_boost_v(codec);
+ msm_anlg_cdc_set_micb_v(codec);
+ if (msmfalcon_cdc_priv->boost_option == BOOST_ON_FOREVER)
+ msm_anlg_cdc_boost_on(codec);
+ else if (msmfalcon_cdc_priv->boost_option == BYPASS_ALWAYS)
+ msm_anlg_cdc_bypass_on(codec);
+
+ msm_anlg_cdc_configure_cap(codec, false, false);
+ wcd_mbhc_stop(&msmfalcon_cdc_priv->mbhc);
+ wcd_mbhc_deinit(&msmfalcon_cdc_priv->mbhc);
+ ret = wcd_mbhc_init(&msmfalcon_cdc_priv->mbhc, codec, &mbhc_cb,
+ &intr_ids, wcd_mbhc_registers, true);
+ if (ret)
+ dev_err(codec->dev, "%s: mbhc initialization failed\n",
+ __func__);
+ else
+ wcd_mbhc_start(&msmfalcon_cdc_priv->mbhc,
+ msmfalcon_cdc_priv->mbhc.mbhc_cfg);
+
+ return 0;
+}
+
+static int msmfalcon_cdc_notifier_service_cb(struct notifier_block *nb,
+ unsigned long opcode, void *ptr)
+{
+ struct snd_soc_codec *codec;
+ struct msmfalcon_cdc_priv *msmfalcon_cdc_priv =
+ container_of(nb, struct msmfalcon_cdc_priv,
+ audio_ssr_nb);
+ bool adsp_ready = false;
+ bool timedout;
+ unsigned long timeout;
+
+ codec = msmfalcon_cdc_priv->codec;
+ dev_dbg(codec->dev, "%s: Service opcode 0x%lx\n", __func__, opcode);
+
+ switch (opcode) {
+ case AUDIO_NOTIFIER_SERVICE_DOWN:
+ dev_dbg(codec->dev,
+ "ADSP is about to power down. teardown/reset codec\n");
+ msm_anlg_cdc_device_down(codec);
+ break;
+ case AUDIO_NOTIFIER_SERVICE_UP:
+ if (initial_boot) {
+ initial_boot = false;
+ break;
+ }
+ dev_dbg(codec->dev,
+ "ADSP is about to power up. bring up codec\n");
+
+ if (!q6core_is_adsp_ready()) {
+ dev_dbg(codec->dev,
+ "ADSP isn't ready\n");
+ timeout = jiffies +
+ msecs_to_jiffies(ADSP_STATE_READY_TIMEOUT_MS);
+ while (!(timedout = time_after(jiffies, timeout))) {
+ if (!q6core_is_adsp_ready()) {
+ dev_dbg(codec->dev,
+ "ADSP isn't ready\n");
+ } else {
+ dev_dbg(codec->dev,
+ "ADSP is ready\n");
+ adsp_ready = true;
+ goto powerup;
+ }
+ }
+ } else {
+ adsp_ready = true;
+ dev_dbg(codec->dev, "%s: DSP is ready\n", __func__);
+ }
+powerup:
+ if (adsp_ready)
+ msm_anlg_cdc_device_up(codec);
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+int msm_anlg_cdc_hs_detect(struct snd_soc_codec *codec,
+ struct wcd_mbhc_config *mbhc_cfg)
+{
+ struct msmfalcon_cdc_priv *msmfalcon_cdc_priv =
+ snd_soc_codec_get_drvdata(codec);
+
+ return wcd_mbhc_start(&msmfalcon_cdc_priv->mbhc, mbhc_cfg);
+}
+EXPORT_SYMBOL(msm_anlg_cdc_hs_detect);
+
+void msm_anlg_cdc_hs_detect_exit(struct snd_soc_codec *codec)
+{
+ struct msmfalcon_cdc_priv *msmfalcon_cdc_priv =
+ snd_soc_codec_get_drvdata(codec);
+
+ wcd_mbhc_stop(&msmfalcon_cdc_priv->mbhc);
+}
+EXPORT_SYMBOL(msm_anlg_cdc_hs_detect_exit);
+
+void msm_anlg_cdc_update_int_spk_boost(bool enable)
+{
+ pr_debug("%s: enable = %d\n", __func__, enable);
+ spkr_boost_en = enable;
+}
+EXPORT_SYMBOL(msm_anlg_cdc_update_int_spk_boost);
+
+static void msm_anlg_cdc_set_micb_v(struct snd_soc_codec *codec)
+{
+
+ struct msmfalcon_cdc *msmfalcon_cdc = codec->control_data;
+ struct msmfalcon_cdc_pdata *pdata = msmfalcon_cdc->dev->platform_data;
+ u8 reg_val;
+
+ reg_val = VOLTAGE_CONVERTER(pdata->micbias.cfilt1_mv, MICBIAS_MIN_VAL,
+ MICBIAS_STEP_SIZE);
+ dev_dbg(codec->dev, "cfilt1_mv %d reg_val %x\n",
+ (u32)pdata->micbias.cfilt1_mv, reg_val);
+ snd_soc_update_bits(codec, MSM89XX_PMIC_ANALOG_MICB_1_VAL,
+ 0xF8, (reg_val << 3));
+}
+
+static void msm_anlg_cdc_set_boost_v(struct snd_soc_codec *codec)
+{
+ struct msmfalcon_cdc_priv *msmfalcon_cdc_priv =
+ snd_soc_codec_get_drvdata(codec);
+
+ snd_soc_update_bits(codec, MSM89XX_PMIC_ANALOG_OUTPUT_VOLTAGE,
+ 0x1F, msmfalcon_cdc_priv->boost_voltage);
+}
+
+static void msm_anlg_cdc_configure_cap(struct snd_soc_codec *codec,
+ bool micbias1, bool micbias2)
+{
+
+ struct msm_asoc_mach_data *pdata = NULL;
+
+ pdata = snd_soc_card_get_drvdata(codec->component.card);
+
+ pr_debug("\n %s: micbias1 %x micbias2 = %d\n", __func__, micbias1,
+ micbias2);
+ if (micbias1 && micbias2) {
+ if ((pdata->micbias1_cap_mode
+ == MICBIAS_EXT_BYP_CAP) ||
+ (pdata->micbias2_cap_mode
+ == MICBIAS_EXT_BYP_CAP))
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_MICB_1_EN,
+ 0x40, (MICBIAS_EXT_BYP_CAP << 6));
+ else
+ snd_soc_update_bits(codec,
+ MSM89XX_PMIC_ANALOG_MICB_1_EN,
+ 0x40, (MICBIAS_NO_EXT_BYP_CAP << 6));
+ } else if (micbias2) {
+ snd_soc_update_bits(codec, MSM89XX_PMIC_ANALOG_MICB_1_EN,
+ 0x40, (pdata->micbias2_cap_mode << 6));
+ } else if (micbias1) {
+ snd_soc_update_bits(codec, MSM89XX_PMIC_ANALOG_MICB_1_EN,
+ 0x40, (pdata->micbias1_cap_mode << 6));
+ } else {
+ snd_soc_update_bits(codec, MSM89XX_PMIC_ANALOG_MICB_1_EN,
+ 0x40, 0x00);
+ }
+}
+
+static ssize_t msm_anlg_codec_version_read(struct snd_info_entry *entry,
+ void *file_private_data,
+ struct file *file,
+ char __user *buf, size_t count,
+ loff_t pos)
+{
+ struct msmfalcon_cdc_priv *msmfalcon_cdc_priv;
+ char buffer[MSM_ANLG_CDC_VERSION_ENTRY_SIZE];
+ int len = 0;
+
+ msmfalcon_cdc_priv = (struct msmfalcon_cdc_priv *) entry->private_data;
+ if (!msmfalcon_cdc_priv) {
+ pr_err("%s: msmfalcon_cdc_priv is null\n", __func__);
+ return -EINVAL;
+ }
+
+ switch (get_codec_version(msmfalcon_cdc_priv)) {
+ case DRAX_CDC:
+ len = snprintf(buffer, sizeof(buffer), "DRAX_CDC_1_0\n");
+ break;
+ default:
+ len = snprintf(buffer, sizeof(buffer), "VER_UNDEFINED\n");
+ }
+
+ return simple_read_from_buffer(buf, count, &pos, buffer, len);
+}
+
+static struct snd_info_entry_ops msm_anlg_codec_info_ops = {
+ .read = msm_anlg_codec_version_read,
+};
+
+/*
+ * msm_anlg_codec_info_create_codec_entry - creates pmic_analog module
+ * @codec_root: The parent directory
+ * @codec: Codec instance
+ *
+ * Creates pmic_analog module and version entry under the given
+ * parent directory.
+ *
+ * Return: 0 on success or negative error code on failure.
+ */
+int msm_anlg_codec_info_create_codec_entry(struct snd_info_entry *codec_root,
+ struct snd_soc_codec *codec)
+{
+ struct snd_info_entry *version_entry;
+ struct msmfalcon_cdc_priv *msmfalcon_cdc_priv;
+ struct snd_soc_card *card;
+
+ if (!codec_root || !codec)
+ return -EINVAL;
+
+ msmfalcon_cdc_priv = snd_soc_codec_get_drvdata(codec);
+ card = codec->component.card;
+ msmfalcon_cdc_priv->entry = snd_register_module_info(codec_root->module,
+ "pmic_analog",
+ codec_root);
+ if (!msmfalcon_cdc_priv->entry) {
+ dev_dbg(codec->dev, "%s: failed to create pmic_analog entry\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ version_entry = snd_info_create_card_entry(card->snd_card,
+ "version",
+ msmfalcon_cdc_priv->entry);
+ if (!version_entry) {
+ dev_dbg(codec->dev, "%s: failed to create pmic_analog version entry\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ version_entry->private_data = msmfalcon_cdc_priv;
+ version_entry->size = MSM_ANLG_CDC_VERSION_ENTRY_SIZE;
+ version_entry->content = SNDRV_INFO_CONTENT_DATA;
+ version_entry->c.ops = &msm_anlg_codec_info_ops;
+
+ if (snd_info_register(version_entry) < 0) {
+ snd_info_free_entry(version_entry);
+ return -ENOMEM;
+ }
+ msmfalcon_cdc_priv->version_entry = version_entry;
+ return 0;
+}
+EXPORT_SYMBOL(msm_anlg_codec_info_create_codec_entry);
+
+static int msm_anlg_cdc_soc_probe(struct snd_soc_codec *codec)
+{
+ struct msmfalcon_cdc_priv *msmfalcon_cdc_priv;
+ struct msmfalcon_cdc *handle_cdc;
+ int ret;
+
+ msmfalcon_cdc_priv = devm_kzalloc(codec->dev,
+ sizeof(struct msmfalcon_cdc_priv),
+ GFP_KERNEL);
+ if (!msmfalcon_cdc_priv)
+ return -ENOMEM;
+
+ codec->control_data = dev_get_drvdata(codec->dev);
+ snd_soc_codec_set_drvdata(codec, msmfalcon_cdc_priv);
+ msmfalcon_cdc_priv->codec = codec;
+ handle_cdc = codec->control_data;
+ handle_cdc->codec = codec;
+
+ /* codec resmgr module init */
+ msmfalcon_cdc_priv->spkdrv_reg =
+ msm_anlg_cdc_find_regulator(codec->control_data,
+ MSM89XX_VDD_SPKDRV_NAME);
+ msmfalcon_cdc_priv->pmic_rev =
+ snd_soc_read(codec,
+ MSM89XX_PMIC_DIGITAL_REVISION1);
+ msmfalcon_cdc_priv->codec_version =
+ snd_soc_read(codec,
+ MSM89XX_PMIC_DIGITAL_PERPH_SUBTYPE);
+ msmfalcon_cdc_priv->analog_major_rev =
+ snd_soc_read(codec,
+ MSM89XX_PMIC_ANALOG_REVISION4);
+
+ if (msmfalcon_cdc_priv->codec_version == CONGA) {
+ dev_dbg(codec->dev, "%s :Conga REV: %d\n", __func__,
+ msmfalcon_cdc_priv->codec_version);
+ msmfalcon_cdc_priv->ext_spk_boost_set = true;
+ } else {
+ dev_dbg(codec->dev, "%s :PMIC REV: %d\n", __func__,
+ msmfalcon_cdc_priv->pmic_rev);
+ if (msmfalcon_cdc_priv->pmic_rev == TOMBAK_1_0 &&
+ msmfalcon_cdc_priv->codec_version == CAJON_2_0) {
+ if (msmfalcon_cdc_priv->analog_major_rev == 0x02) {
+ msmfalcon_cdc_priv->codec_version = DRAX_CDC;
+ dev_dbg(codec->dev,
+ "%s : Drax codec detected\n", __func__);
+ } else {
+ msmfalcon_cdc_priv->codec_version = DIANGU;
+ dev_dbg(codec->dev, "%s : Diangu detected\n",
+ __func__);
+ }
+ } else if (msmfalcon_cdc_priv->pmic_rev == TOMBAK_1_0 &&
+ (snd_soc_read(codec, MSM89XX_PMIC_ANALOG_NCP_FBCTRL)
+ & 0x80)) {
+ msmfalcon_cdc_priv->codec_version = CAJON;
+ dev_dbg(codec->dev, "%s : Cajon detected\n", __func__);
+ } else if (msmfalcon_cdc_priv->pmic_rev == TOMBAK_2_0 &&
+ (snd_soc_read(codec, MSM89XX_PMIC_ANALOG_NCP_FBCTRL)
+ & 0x80)) {
+ msmfalcon_cdc_priv->codec_version = CAJON_2_0;
+ dev_dbg(codec->dev, "%s : Cajon 2.0 detected\n",
+ __func__);
+ }
+ }
+ /*
+ * set to default boost option BOOST_SWITCH, user mixer path can change
+ * it to BOOST_ALWAYS or BOOST_BYPASS based on solution chosen.
+ */
+ msmfalcon_cdc_priv->boost_option = BOOST_SWITCH;
+ msmfalcon_cdc_priv->hph_mode = NORMAL_MODE;
+
+ msm_anlg_cdc_dt_parse_boost_info(codec);
+ msm_anlg_cdc_set_boost_v(codec);
+
+ snd_soc_add_codec_controls(codec, impedance_detect_controls,
+ ARRAY_SIZE(impedance_detect_controls));
+ snd_soc_add_codec_controls(codec, hph_type_detect_controls,
+ ARRAY_SIZE(hph_type_detect_controls));
+
+ msm_anlg_cdc_bringup(codec);
+ msm_anlg_cdc_codec_init_cache(codec);
+ msm_anlg_cdc_codec_init_reg(codec);
+ msm_anlg_cdc_update_reg_defaults(codec);
+
+ wcd9xxx_spmi_set_codec(codec);
+
+ msmfalcon_cdc_priv->on_demand_list[ON_DEMAND_MICBIAS].supply =
+ msm_anlg_cdc_find_regulator(
+ codec->control_data,
+ on_demand_supply_name[ON_DEMAND_MICBIAS]);
+ atomic_set(&msmfalcon_cdc_priv->on_demand_list[ON_DEMAND_MICBIAS].ref,
+ 0);
+
+ BLOCKING_INIT_NOTIFIER_HEAD(&msmfalcon_cdc_priv->notifier);
+
+ msmfalcon_cdc_priv->fw_data = devm_kzalloc(codec->dev,
+ sizeof(*(msmfalcon_cdc_priv->fw_data)),
+ GFP_KERNEL);
+ if (!msmfalcon_cdc_priv->fw_data)
+ return -ENOMEM;
+
+ set_bit(WCD9XXX_MBHC_CAL, msmfalcon_cdc_priv->fw_data->cal_bit);
+ ret = wcd_cal_create_hwdep(msmfalcon_cdc_priv->fw_data,
+ WCD9XXX_CODEC_HWDEP_NODE, codec);
+ if (ret < 0) {
+ dev_err(codec->dev, "%s hwdep failed %d\n", __func__, ret);
+ return ret;
+ }
+
+ wcd_mbhc_init(&msmfalcon_cdc_priv->mbhc, codec, &mbhc_cb, &intr_ids,
+ wcd_mbhc_registers, true);
+
+ msmfalcon_cdc_priv->int_mclk0_enabled = false;
+ /*Update speaker boost configuration*/
+ msmfalcon_cdc_priv->spk_boost_set = spkr_boost_en;
+ pr_debug("%s: speaker boost configured = %d\n",
+ __func__, msmfalcon_cdc_priv->spk_boost_set);
+
+ /* Set initial MICBIAS voltage level */
+ msm_anlg_cdc_set_micb_v(codec);
+
+ /* Set initial cap mode */
+ msm_anlg_cdc_configure_cap(codec, false, false);
+ msmfalcon_cdc_priv->audio_ssr_nb.notifier_call =
+ msmfalcon_cdc_notifier_service_cb;
+ ret = audio_notifier_register("pmic_analog_cdc",
+ AUDIO_NOTIFIER_ADSP_DOMAIN,
+ &msmfalcon_cdc_priv->audio_ssr_nb);
+ if (ret < 0) {
+ pr_err("%s: Audio notifier register failed ret = %d\n",
+ __func__, ret);
+ wcd_mbhc_deinit(&msmfalcon_cdc_priv->mbhc);
+ return ret;
+ }
+ return 0;
+}
+
+static int msm_anlg_cdc_soc_remove(struct snd_soc_codec *codec)
+{
+ struct msmfalcon_cdc_priv *msmfalcon_cdc_priv =
+ snd_soc_codec_get_drvdata(codec);
+
+ msmfalcon_cdc_priv->spkdrv_reg = NULL;
+ msmfalcon_cdc_priv->on_demand_list[ON_DEMAND_MICBIAS].supply = NULL;
+ atomic_set(&msmfalcon_cdc_priv->on_demand_list[ON_DEMAND_MICBIAS].ref,
+ 0);
+ wcd_mbhc_deinit(&msmfalcon_cdc_priv->mbhc);
+
+ return 0;
+}
+
+static int msm_anlg_cdc_enable_static_supplies_to_optimum(
+ struct msmfalcon_cdc *msmfalcon_cdc,
+ struct msmfalcon_cdc_pdata *pdata)
+{
+ int i;
+ int ret = 0;
+
+ for (i = 0; i < msmfalcon_cdc->num_of_supplies; i++) {
+ if (pdata->regulator[i].ondemand)
+ continue;
+ if (regulator_count_voltages(
+ msmfalcon_cdc->supplies[i].consumer) <= 0)
+ continue;
+
+ ret = regulator_set_voltage(
+ msmfalcon_cdc->supplies[i].consumer,
+ pdata->regulator[i].min_uv,
+ pdata->regulator[i].max_uv);
+ if (ret) {
+ dev_err(msmfalcon_cdc->dev,
+ "Setting volt failed for regulator %s err %d\n",
+ msmfalcon_cdc->supplies[i].supply, ret);
+ }
+
+ ret = regulator_set_load(msmfalcon_cdc->supplies[i].consumer,
+ pdata->regulator[i].optimum_ua);
+ dev_dbg(msmfalcon_cdc->dev, "Regulator %s set optimum mode\n",
+ msmfalcon_cdc->supplies[i].supply);
+ }
+
+ return ret;
+}
+
+static int msm_anlg_cdc_disable_static_supplies_to_optimum(
+ struct msmfalcon_cdc *msmfalcon_cdc,
+ struct msmfalcon_cdc_pdata *pdata)
+{
+ int i;
+ int ret = 0;
+
+ for (i = 0; i < msmfalcon_cdc->num_of_supplies; i++) {
+ if (pdata->regulator[i].ondemand)
+ continue;
+ if (regulator_count_voltages(
+ msmfalcon_cdc->supplies[i].consumer) <= 0)
+ continue;
+ regulator_set_voltage(msmfalcon_cdc->supplies[i].consumer, 0,
+ pdata->regulator[i].max_uv);
+ regulator_set_load(msmfalcon_cdc->supplies[i].consumer, 0);
+ dev_dbg(msmfalcon_cdc->dev, "Regulator %s set optimum mode\n",
+ msmfalcon_cdc->supplies[i].supply);
+ }
+
+ return ret;
+}
+
+static int msm_anlg_cdc_suspend(struct snd_soc_codec *codec)
+{
+ struct msm_asoc_mach_data *pdata = NULL;
+ struct msmfalcon_cdc *msmfalcon_cdc = codec->control_data;
+ struct msmfalcon_cdc_pdata *msmfalcon_cdc_pdata =
+ msmfalcon_cdc->dev->platform_data;
+
+ pdata = snd_soc_card_get_drvdata(codec->component.card);
+ pr_debug("%s: mclk cnt = %d, mclk_enabled = %d\n",
+ __func__, atomic_read(&pdata->int_mclk0_rsc_ref),
+ atomic_read(&pdata->int_mclk0_enabled));
+ if (atomic_read(&pdata->int_mclk0_enabled) == true) {
+ cancel_delayed_work_sync(&pdata->disable_int_mclk0_work);
+ mutex_lock(&pdata->cdc_int_mclk0_mutex);
+ pdata->digital_cdc_core_clk.enable = 0;
+ afe_set_lpass_clock_v2(AFE_PORT_ID_INT0_MI2S_RX,
+ &pdata->digital_cdc_core_clk);
+ atomic_set(&pdata->int_mclk0_enabled, false);
+ mutex_unlock(&pdata->cdc_int_mclk0_mutex);
+ }
+ msm_anlg_cdc_disable_static_supplies_to_optimum(msmfalcon_cdc,
+ msmfalcon_cdc_pdata);
+ return 0;
+}
+
+static int msm_anlg_cdc_resume(struct snd_soc_codec *codec)
+{
+ struct msm_asoc_mach_data *pdata = NULL;
+ struct msmfalcon_cdc *msmfalcon_cdc = codec->control_data;
+ struct msmfalcon_cdc_pdata *msmfalcon_cdc_pdata =
+ msmfalcon_cdc->dev->platform_data;
+
+ pdata = snd_soc_card_get_drvdata(codec->component.card);
+ msm_anlg_cdc_enable_static_supplies_to_optimum(msmfalcon_cdc,
+ msmfalcon_cdc_pdata);
+ return 0;
+}
+
+static struct regmap *msm_anlg_get_regmap(struct device *dev)
+{
+ return dev_get_regmap(dev->parent, NULL);
+}
+
+static struct snd_soc_codec_driver soc_codec_dev_msmfalcon_cdc = {
+ .probe = msm_anlg_cdc_soc_probe,
+ .remove = msm_anlg_cdc_soc_remove,
+ .suspend = msm_anlg_cdc_suspend,
+ .resume = msm_anlg_cdc_resume,
+ .reg_word_size = 1,
+ .controls = msm_anlg_cdc_snd_controls,
+ .num_controls = ARRAY_SIZE(msm_anlg_cdc_snd_controls),
+ .dapm_widgets = msm_anlg_cdc_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(msm_anlg_cdc_dapm_widgets),
+ .dapm_routes = audio_map,
+ .num_dapm_routes = ARRAY_SIZE(audio_map),
+ .get_regmap = msm_anlg_get_regmap,
+};
+
+static int msm_anlg_cdc_init_supplies(struct msmfalcon_cdc *msmfalcon_cdc,
+ struct msmfalcon_cdc_pdata *pdata)
+{
+ int ret;
+ int i;
+
+ msmfalcon_cdc->supplies = devm_kzalloc(msmfalcon_cdc->dev,
+ sizeof(struct regulator_bulk_data) *
+ ARRAY_SIZE(pdata->regulator),
+ GFP_KERNEL);
+ if (!msmfalcon_cdc->supplies) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ msmfalcon_cdc->num_of_supplies = 0;
+ if (ARRAY_SIZE(pdata->regulator) > MAX_REGULATOR) {
+ dev_err(msmfalcon_cdc->dev, "%s: Array Size out of bound\n",
+ __func__);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(pdata->regulator); i++) {
+ if (pdata->regulator[i].name) {
+ msmfalcon_cdc->supplies[i].supply =
+ pdata->regulator[i].name;
+ msmfalcon_cdc->num_of_supplies++;
+ }
+ }
+
+ ret = devm_regulator_bulk_get(msmfalcon_cdc->dev,
+ msmfalcon_cdc->num_of_supplies,
+ msmfalcon_cdc->supplies);
+ if (ret != 0) {
+ dev_err(msmfalcon_cdc->dev,
+ "Failed to get supplies: err = %d\n",
+ ret);
+ goto err_supplies;
+ }
+
+ for (i = 0; i < msmfalcon_cdc->num_of_supplies; i++) {
+ if (regulator_count_voltages(
+ msmfalcon_cdc->supplies[i].consumer) <= 0)
+ continue;
+ ret = regulator_set_voltage(msmfalcon_cdc->supplies[i].consumer,
+ pdata->regulator[i].min_uv,
+ pdata->regulator[i].max_uv);
+ if (ret) {
+ dev_err(msmfalcon_cdc->dev,
+ "Setting regulator voltage failed for regulator %s err = %d\n",
+ msmfalcon_cdc->supplies[i].supply, ret);
+ goto err_supplies;
+ }
+ ret = regulator_set_load(msmfalcon_cdc->supplies[i].consumer,
+ pdata->regulator[i].optimum_ua);
+ if (ret < 0) {
+ dev_err(msmfalcon_cdc->dev,
+ "Setting regulator optimum mode failed for regulator %s err = %d\n",
+ msmfalcon_cdc->supplies[i].supply, ret);
+ goto err_supplies;
+ } else {
+ ret = 0;
+ }
+ }
+
+ return ret;
+
+err_supplies:
+ kfree(msmfalcon_cdc->supplies);
+err:
+ return ret;
+}
+
+static int msm_anlg_cdc_enable_static_supplies(
+ struct msmfalcon_cdc *msmfalcon_cdc,
+ struct msmfalcon_cdc_pdata *pdata)
+{
+ int i;
+ int ret = 0;
+
+ for (i = 0; i < msmfalcon_cdc->num_of_supplies; i++) {
+ if (pdata->regulator[i].ondemand)
+ continue;
+ ret = regulator_enable(msmfalcon_cdc->supplies[i].consumer);
+ if (ret) {
+ dev_err(msmfalcon_cdc->dev, "Failed to enable %s\n",
+ msmfalcon_cdc->supplies[i].supply);
+ break;
+ }
+ dev_dbg(msmfalcon_cdc->dev, "Enabled regulator %s\n",
+ msmfalcon_cdc->supplies[i].supply);
+ }
+
+ while (ret && --i)
+ if (!pdata->regulator[i].ondemand)
+ regulator_disable(msmfalcon_cdc->supplies[i].consumer);
+ return ret;
+}
+
+static void msm_anlg_cdc_disable_supplies(struct msmfalcon_cdc *msmfalcon_cdc,
+ struct msmfalcon_cdc_pdata *pdata)
+{
+ int i;
+
+ regulator_bulk_disable(msmfalcon_cdc->num_of_supplies,
+ msmfalcon_cdc->supplies);
+ for (i = 0; i < msmfalcon_cdc->num_of_supplies; i++) {
+ if (regulator_count_voltages(
+ msmfalcon_cdc->supplies[i].consumer) <= 0)
+ continue;
+ regulator_set_voltage(msmfalcon_cdc->supplies[i].consumer, 0,
+ pdata->regulator[i].max_uv);
+ regulator_set_load(msmfalcon_cdc->supplies[i].consumer, 0);
+ }
+ regulator_bulk_free(msmfalcon_cdc->num_of_supplies,
+ msmfalcon_cdc->supplies);
+ kfree(msmfalcon_cdc->supplies);
+}
+
+static const struct of_device_id msmfalcon_codec_of_match[] = {
+ { .compatible = "qcom,pmic-analog-codec", },
+ {},
+};
+
+static void msm_anlg_add_child_devices(struct work_struct *work)
+{
+ struct msmfalcon_cdc *pdata;
+ struct platform_device *pdev;
+ struct device_node *node;
+ struct msm_dig_ctrl_data *dig_ctrl_data = NULL, *temp;
+ int ret, ctrl_num = 0;
+ struct msm_dig_ctrl_platform_data *platdata;
+ char plat_dev_name[MSM_DIG_CDC_STRING_LEN];
+
+ pdata = container_of(work, struct msmfalcon_cdc,
+ msm_anlg_add_child_devices_work);
+ if (!pdata) {
+ pr_err("%s: Memory for pdata does not exist\n",
+ __func__);
+ return;
+ }
+ if (!pdata->dev->of_node) {
+ dev_err(pdata->dev,
+ "%s: DT node for pdata does not exist\n", __func__);
+ return;
+ }
+
+ platdata = &pdata->dig_plat_data;
+
+ for_each_child_of_node(pdata->dev->of_node, node) {
+ if (!strcmp(node->name, "msm-dig-codec"))
+ strlcpy(plat_dev_name, "msm_digital_codec",
+ (MSM_DIG_CDC_STRING_LEN - 1));
+ else
+ continue;
+
+ pdev = platform_device_alloc(plat_dev_name, -1);
+ if (!pdev) {
+ dev_err(pdata->dev, "%s: pdev memory alloc failed\n",
+ __func__);
+ ret = -ENOMEM;
+ goto err;
+ }
+ pdev->dev.parent = pdata->dev;
+ pdev->dev.of_node = node;
+
+ if (!strcmp(node->name, "msm-dig-codec")) {
+ ret = platform_device_add_data(pdev, platdata,
+ sizeof(*platdata));
+ if (ret) {
+ dev_err(&pdev->dev,
+ "%s: cannot add plat data ctrl:%d\n",
+ __func__, ctrl_num);
+ goto fail_pdev_add;
+ }
+ }
+
+ ret = platform_device_add(pdev);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "%s: Cannot add platform device\n",
+ __func__);
+ goto fail_pdev_add;
+ }
+
+ if (!strcmp(node->name, "msm-dig-codec")) {
+ temp = krealloc(dig_ctrl_data,
+ (ctrl_num + 1) * sizeof(
+ struct msm_dig_ctrl_data),
+ GFP_KERNEL);
+ if (!temp) {
+ dev_err(&pdev->dev, "out of memory\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+ dig_ctrl_data = temp;
+ dig_ctrl_data[ctrl_num].dig_pdev = pdev;
+ ctrl_num++;
+ dev_dbg(&pdev->dev,
+ "%s: Added digital codec device(s)\n",
+ __func__);
+ pdata->dig_ctrl_data = dig_ctrl_data;
+ }
+ }
+
+ return;
+fail_pdev_add:
+ platform_device_put(pdev);
+err:
+ return;
+}
+
+static int msm_anlg_cdc_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct msmfalcon_cdc *msmfalcon_cdc = NULL;
+ struct msmfalcon_cdc_pdata *pdata;
+ int adsp_state;
+
+ adsp_state = apr_get_subsys_state();
+ if (adsp_state != APR_SUBSYS_LOADED) {
+ dev_err(&pdev->dev, "Adsp is not loaded yet %d\n",
+ adsp_state);
+ return -EPROBE_DEFER;
+ }
+ device_init_wakeup(&pdev->dev, true);
+
+ if (pdev->dev.of_node) {
+ dev_dbg(&pdev->dev, "%s:Platform data from device tree\n",
+ __func__);
+ pdata = msm_anlg_cdc_populate_dt_pdata(&pdev->dev);
+ pdev->dev.platform_data = pdata;
+ } else {
+ dev_dbg(&pdev->dev, "%s:Platform data from board file\n",
+ __func__);
+ pdata = pdev->dev.platform_data;
+ }
+ if (pdata == NULL) {
+ dev_err(&pdev->dev, "%s:Platform data failed to populate\n",
+ __func__);
+ goto rtn;
+ }
+ msmfalcon_cdc = devm_kzalloc(&pdev->dev, sizeof(struct msmfalcon_cdc),
+ GFP_KERNEL);
+ if (msmfalcon_cdc == NULL) {
+ ret = -ENOMEM;
+ goto rtn;
+ }
+
+ msmfalcon_cdc->dev = &pdev->dev;
+ ret = msm_anlg_cdc_init_supplies(msmfalcon_cdc, pdata);
+ if (ret) {
+ dev_err(&pdev->dev, "%s: Fail to enable Codec supplies\n",
+ __func__);
+ goto rtn;
+ }
+ ret = msm_anlg_cdc_enable_static_supplies(msmfalcon_cdc, pdata);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "%s: Fail to enable Codec pre-reset supplies\n",
+ __func__);
+ goto rtn;
+ }
+ /* Allow supplies to be ready */
+ usleep_range(5, 6);
+
+ dev_set_drvdata(&pdev->dev, msmfalcon_cdc);
+ if (wcd9xxx_spmi_irq_init()) {
+ dev_err(&pdev->dev,
+ "%s: irq initialization failed\n", __func__);
+ } else {
+ dev_dbg(&pdev->dev,
+ "%s: irq initialization passed\n", __func__);
+ }
+
+ ret = snd_soc_register_codec(&pdev->dev,
+ &soc_codec_dev_msmfalcon_cdc,
+ msm_anlg_cdc_i2s_dai,
+ ARRAY_SIZE(msm_anlg_cdc_i2s_dai));
+ if (ret) {
+ dev_err(&pdev->dev,
+ "%s:snd_soc_register_codec failed with error %d\n",
+ __func__, ret);
+ goto err_supplies;
+ }
+ msmfalcon_cdc->dig_plat_data.handle = (void *) msmfalcon_cdc;
+ msmfalcon_cdc->dig_plat_data.update_clkdiv = update_clkdiv;
+ msmfalcon_cdc->dig_plat_data.get_cdc_version = get_cdc_version;
+ msmfalcon_cdc->dig_plat_data.register_notifier =
+ msm_anlg_cdc_dig_register_notifier;
+ INIT_WORK(&msmfalcon_cdc->msm_anlg_add_child_devices_work,
+ msm_anlg_add_child_devices);
+ schedule_work(&msmfalcon_cdc->msm_anlg_add_child_devices_work);
+
+ return ret;
+err_supplies:
+ msm_anlg_cdc_disable_supplies(msmfalcon_cdc, pdata);
+rtn:
+ return ret;
+}
+
+static int msm_anlg_cdc_remove(struct platform_device *pdev)
+{
+ struct msmfalcon_cdc *msmfalcon_cdc = dev_get_drvdata(&pdev->dev);
+ struct msmfalcon_cdc_pdata *pdata = msmfalcon_cdc->dev->platform_data;
+
+ snd_soc_unregister_codec(&pdev->dev);
+ msm_anlg_cdc_disable_supplies(msmfalcon_cdc, pdata);
+ return 0;
+}
+
+static struct platform_driver msm_anlg_codec_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = DRV_NAME,
+ .of_match_table = of_match_ptr(msmfalcon_codec_of_match)
+ },
+ .probe = msm_anlg_cdc_probe,
+ .remove = msm_anlg_cdc_remove,
+};
+module_platform_driver(msm_anlg_codec_driver);
+
+MODULE_DESCRIPTION("MSM Audio Analog codec driver");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/msm8x16/msm8x16-wcd.h b/sound/soc/codecs/msmfalcon_cdc/msm-analog-cdc.h
index 776090258119..112b544b7de8 100644
--- a/sound/soc/codecs/msm8x16/msm8x16-wcd.h
+++ b/sound/soc/codecs/msmfalcon_cdc/msm-analog-cdc.h
@@ -9,45 +9,23 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
-#ifndef MSM8X16_WCD_H
-#define MSM8X16_WCD_H
+#ifndef MSM_ANALOG_CDC_H
+#define MSM_ANALOG_CDC_H
#include <sound/soc.h>
#include <sound/jack.h>
#include <sound/q6afe-v2.h>
#include "../wcd-mbhc-v2.h"
#include "../wcdcal-hwdep.h"
-#include "msm8x16_wcd_registers.h"
+#include "msmfalcon-cdc-registers.h"
#define MICBIAS_EXT_BYP_CAP 0x00
#define MICBIAS_NO_EXT_BYP_CAP 0x01
#define MSM89XX_NUM_IRQ_REGS 2
-#define MAX_REGULATOR 7
-#define MSM89XX_REG_VAL(reg, val) {reg, 0, val}
-#define MSM8X16_TOMBAK_LPASS_AUDIO_CORE_DIG_CODEC_CLK_SEL 0xFE03B004
-#define MSM8X16_TOMBAK_LPASS_DIGCODEC_CMD_RCGR 0x0181C09C
-#define MSM8X16_TOMBAK_LPASS_DIGCODEC_CFG_RCGR 0x0181C0A0
-#define MSM8X16_TOMBAK_LPASS_DIGCODEC_M 0x0181C0A4
-#define MSM8X16_TOMBAK_LPASS_DIGCODEC_N 0x0181C0A8
-#define MSM8X16_TOMBAK_LPASS_DIGCODEC_D 0x0181C0AC
-#define MSM8X16_TOMBAK_LPASS_DIGCODEC_CBCR 0x0181C0B0
-#define MSM8X16_TOMBAK_LPASS_DIGCODEC_AHB_CBCR 0x0181C0B4
+#define MAX_REGULATOR 7
+#define MSM89XX_REG_VAL(reg, val) {reg, 0, val}
-#define MSM8X16_CODEC_NAME "msm8x16_wcd_codec"
-
-#define MSM89XX_IS_CDC_CORE_REG(reg) \
- (((reg >= 0x00) && (reg <= 0x3FF)) ? 1 : 0)
-#define MSM89XX_IS_PMIC_CDC_REG(reg) \
- (((reg >= 0xF000) && (reg <= 0xF1FF)) ? 1 : 0)
-/*
- * MCLK activity indicators during suspend and resume call
- */
-#define MCLK_SUS_DIS 1
-#define MCLK_SUS_RSC 2
-#define MCLK_SUS_NO_ACT 3
-
-#define NUM_DECIMATORS 4
#define MSM89XX_VDD_SPKDRV_NAME "cdc-vdd-spkdrv"
#define DEFAULT_MULTIPLIER 800
@@ -59,29 +37,6 @@ extern const u8 msm89xx_cdc_core_reg_readable[MSM89XX_CDC_CORE_CACHE_SIZE];
extern struct regmap_config msm89xx_cdc_core_regmap_config;
extern struct regmap_config msm89xx_pmic_cdc_regmap_config;
-enum codec_versions {
- TOMBAK_1_0,
- TOMBAK_2_0,
- CONGA,
- CAJON,
- CAJON_2_0,
- DIANGU,
- UNSUPPORTED,
-};
-
-/* Support different hph modes */
-enum {
- NORMAL_MODE = 0,
- HD2_MODE,
-};
-
-/* Codec supports 1 compander */
-enum {
- COMPANDER_NONE = 0,
- COMPANDER_1, /* HPHL/R */
- COMPANDER_MAX,
-};
-
enum wcd_curr_ref {
I_h4_UA = 0,
I_pt5_UA,
@@ -107,7 +62,7 @@ enum wcd_mbhc_imp_det_pin {
* If ldoh_v = 2.85 250 mv < cfiltx_mv < 2700 mv
*/
-struct wcd9xxx_micbias_setting {
+struct wcd_micbias_setting {
u8 ldoh_v;
u32 cfilt1_mv; /* in mv */
u32 cfilt2_mv; /* in mv */
@@ -128,41 +83,19 @@ struct wcd9xxx_micbias_setting {
bool bias2_is_headset_only;
};
-enum msm8x16_wcd_pid_current {
+enum msmfalcon_cdc_pid_current {
MSM89XX_PID_MIC_2P5_UA,
MSM89XX_PID_MIC_5_UA,
MSM89XX_PID_MIC_10_UA,
MSM89XX_PID_MIC_20_UA,
};
-struct msm8x16_wcd_reg_mask_val {
+struct msmfalcon_cdc_reg_mask_val {
u16 reg;
u8 mask;
u8 val;
};
-enum msm8x16_wcd_mbhc_analog_pwr_cfg {
- MSM89XX_ANALOG_PWR_COLLAPSED = 0,
- MSM89XX_ANALOG_PWR_ON,
- MSM89XX_NUM_ANALOG_PWR_CONFIGS,
-};
-
-/* Number of input and output I2S port */
-enum {
- MSM89XX_RX1 = 0,
- MSM89XX_RX2,
- MSM89XX_RX3,
- MSM89XX_RX_MAX,
-};
-
-enum {
- MSM89XX_TX1 = 0,
- MSM89XX_TX2,
- MSM89XX_TX3,
- MSM89XX_TX4,
- MSM89XX_TX_MAX,
-};
-
enum {
/* INTR_REG 0 - Digital Periph */
MSM89XX_IRQ_SPKR_CNP = 0,
@@ -199,7 +132,7 @@ enum {
CODEC_DELAY_1_1_MS = 1100,
};
-struct msm8x16_wcd_regulator {
+struct msmfalcon_cdc_regulator {
const char *name;
int min_uv;
int max_uv;
@@ -221,96 +154,87 @@ struct wcd_imped_i_ref {
int offset;
};
-struct msm8x16_wcd_pdata {
- int irq;
- int irq_base;
- int num_irqs;
- int reset_gpio;
- void *msm8x16_wcd_ahb_base_vaddr;
- struct wcd9xxx_micbias_setting micbias;
- struct msm8x16_wcd_regulator regulator[MAX_REGULATOR];
- u32 mclk_rate;
- u32 is_lpass;
-};
-
-enum msm8x16_wcd_micbias_num {
+enum msmfalcon_cdc_micbias_num {
MSM89XX_MICBIAS1 = 0,
};
-struct msm8x16_wcd {
- struct device *dev;
- struct mutex io_lock;
- u8 version;
+/* Hold instance to digital codec platform device */
+struct msm_dig_ctrl_data {
+ struct platform_device *dig_pdev;
+};
- int reset_gpio;
- int (*read_dev)(struct snd_soc_codec *codec,
- unsigned short reg);
- int (*write_dev)(struct snd_soc_codec *codec,
- unsigned short reg, u8 val);
+struct msm_dig_ctrl_platform_data {
+ void *handle;
+ void (*update_clkdiv)(void *handle, int val);
+ int (*get_cdc_version)(void *handle);
+ int (*register_notifier)(void *handle,
+ struct notifier_block *nblock,
+ bool enable);
+};
+struct msmfalcon_cdc {
+ struct device *dev;
u32 num_of_supplies;
struct regulator_bulk_data *supplies;
+ struct snd_soc_codec *codec;
+ struct work_struct msm_anlg_add_child_devices_work;
+ struct msm_dig_ctrl_platform_data dig_plat_data;
+ /* digital codec data structure */
+ struct msm_dig_ctrl_data *dig_ctrl_data;
+ struct blocking_notifier_head notifier;
+};
- u8 idbyte[4];
-
- int num_irqs;
- u32 mclk_rate;
+struct msmfalcon_cdc_pdata {
+ struct wcd_micbias_setting micbias;
+ struct msmfalcon_cdc_regulator regulator[MAX_REGULATOR];
};
-struct msm8x16_wcd_priv {
+struct msmfalcon_cdc_priv {
struct snd_soc_codec *codec;
u16 pmic_rev;
u16 codec_version;
+ u16 analog_major_rev;
u32 boost_voltage;
u32 adc_count;
u32 rx_bias_count;
- s32 dmic_1_2_clk_cnt;
- u32 mute_mask;
bool int_mclk0_enabled;
- bool clock_active;
- bool config_mode_active;
u16 boost_option;
/* mode to select hd2 */
u32 hph_mode;
/* compander used for each rx chain */
- u32 comp_enabled[MSM89XX_RX_MAX];
bool spk_boost_set;
bool ear_pa_boost_set;
bool ext_spk_boost_set;
- bool dec_active[NUM_DECIMATORS];
struct on_demand_supply on_demand_list[ON_DEMAND_SUPPLIES_MAX];
struct regulator *spkdrv_reg;
+ struct blocking_notifier_head notifier;
/* mbhc module */
struct wcd_mbhc mbhc;
/* cal info for codec */
struct fw_info *fw_data;
- struct blocking_notifier_head notifier;
+ struct notifier_block audio_ssr_nb;
int (*codec_spk_ext_pa_cb)(struct snd_soc_codec *codec, int enable);
- int (*codec_hph_comp_gpio)(bool enable);
unsigned long status_mask;
struct wcd_imped_i_ref imped_i_ref;
enum wcd_mbhc_imp_det_pin imped_det_pin;
+ /* Entry for version info */
+ struct snd_info_entry *entry;
+ struct snd_info_entry *version_entry;
};
-extern int msm8x16_wcd_mclk_enable(struct snd_soc_codec *codec, int mclk_enable,
- bool dapm);
+extern int msm_anlg_cdc_mclk_enable(struct snd_soc_codec *codec,
+ int mclk_enable, bool dapm);
-extern int msm8x16_wcd_hs_detect(struct snd_soc_codec *codec,
+extern int msm_anlg_cdc_hs_detect(struct snd_soc_codec *codec,
struct wcd_mbhc_config *mbhc_cfg);
-extern void msm8x16_wcd_hs_detect_exit(struct snd_soc_codec *codec);
+extern void msm_anlg_cdc_hs_detect_exit(struct snd_soc_codec *codec);
-extern void msm8x16_update_int_spk_boost(bool enable);
+extern void msmfalcon_cdc_update_int_spk_boost(bool enable);
-extern void msm8x16_wcd_spk_ext_pa_cb(
+extern void msm_anlg_cdc_spk_ext_pa_cb(
int (*codec_spk_ext_pa)(struct snd_soc_codec *codec,
int enable), struct snd_soc_codec *codec);
-
-extern void msm8x16_wcd_hph_comp_cb(
- int (*codec_hph_comp_gpio)(bool enable),
- struct snd_soc_codec *codec);
-void enable_digital_callback(void *flag);
-void disable_digital_callback(void *flag);
-
+int msm_anlg_codec_info_create_codec_entry(struct snd_info_entry *codec_root,
+ struct snd_soc_codec *codec);
#endif
-
diff --git a/sound/soc/codecs/msmfalcon_cdc/msm-cdc-common.h b/sound/soc/codecs/msmfalcon_cdc/msm-cdc-common.h
new file mode 100644
index 000000000000..9f2e9355197b
--- /dev/null
+++ b/sound/soc/codecs/msmfalcon_cdc/msm-cdc-common.h
@@ -0,0 +1,66 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/regmap.h>
+#include "msmfalcon-cdc-registers.h"
+
+extern struct reg_default
+ msm89xx_cdc_core_defaults[MSM89XX_CDC_CORE_CACHE_SIZE];
+extern struct reg_default
+ msm89xx_pmic_cdc_defaults[MSM89XX_PMIC_CDC_CACHE_SIZE];
+
+bool msm89xx_cdc_core_readable_reg(struct device *dev, unsigned int reg);
+bool msm89xx_cdc_core_volatile_reg(struct device *dev, unsigned int reg);
+
+enum {
+ AIF1_PB = 0,
+ AIF1_CAP,
+ AIF2_VIFEED,
+ AIF3_SVA,
+ NUM_CODEC_DAIS,
+};
+
+enum codec_versions {
+ TOMBAK_1_0,
+ TOMBAK_2_0,
+ CONGA,
+ CAJON,
+ CAJON_2_0,
+ DIANGU,
+ DRAX_CDC,
+ UNSUPPORTED,
+};
+
+/* Support different hph modes */
+enum {
+ NORMAL_MODE = 0,
+ HD2_MODE,
+};
+
+enum dig_cdc_notify_event {
+ DIG_CDC_EVENT_INVALID,
+ DIG_CDC_EVENT_CLK_ON,
+ DIG_CDC_EVENT_CLK_OFF,
+ DIG_CDC_EVENT_RX1_MUTE_ON,
+ DIG_CDC_EVENT_RX1_MUTE_OFF,
+ DIG_CDC_EVENT_RX2_MUTE_ON,
+ DIG_CDC_EVENT_RX2_MUTE_OFF,
+ DIG_CDC_EVENT_RX3_MUTE_ON,
+ DIG_CDC_EVENT_RX3_MUTE_OFF,
+ DIG_CDC_EVENT_PRE_RX1_INT_ON,
+ DIG_CDC_EVENT_PRE_RX2_INT_ON,
+ DIG_CDC_EVENT_POST_RX1_INT_OFF,
+ DIG_CDC_EVENT_POST_RX2_INT_OFF,
+ DIG_CDC_EVENT_SSR_DOWN,
+ DIG_CDC_EVENT_SSR_UP,
+ DIG_CDC_EVENT_LAST,
+};
diff --git a/sound/soc/codecs/msmfalcon_cdc/msm-digital-cdc.c b/sound/soc/codecs/msmfalcon_cdc/msm-digital-cdc.c
new file mode 100644
index 000000000000..d036b82654f0
--- /dev/null
+++ b/sound/soc/codecs/msmfalcon_cdc/msm-digital-cdc.c
@@ -0,0 +1,2039 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/printk.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/qdsp6v2/apr.h>
+#include <linux/workqueue.h>
+#include <linux/regmap.h>
+#include <sound/q6afe-v2.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/tlv.h>
+#include "msmfalcon-cdc-registers.h"
+#include "msm-digital-cdc.h"
+#include "msm-cdc-common.h"
+#include "../../msm/msmfalcon-common.h"
+
+#define DRV_NAME "msm_digital_codec"
+#define MCLK_RATE_9P6MHZ 9600000
+#define MCLK_RATE_12P288MHZ 12288000
+#define TX_MUX_CTL_CUT_OFF_FREQ_MASK 0x30
+#define CF_MIN_3DB_4HZ 0x0
+#define CF_MIN_3DB_75HZ 0x1
+#define CF_MIN_3DB_150HZ 0x2
+
+#define MSM_DIG_CDC_VERSION_ENTRY_SIZE 32
+
+static unsigned long rx_digital_gain_reg[] = {
+ MSM89XX_CDC_CORE_RX1_VOL_CTL_B2_CTL,
+ MSM89XX_CDC_CORE_RX2_VOL_CTL_B2_CTL,
+ MSM89XX_CDC_CORE_RX3_VOL_CTL_B2_CTL,
+};
+
+static unsigned long tx_digital_gain_reg[] = {
+ MSM89XX_CDC_CORE_TX1_VOL_CTL_GAIN,
+ MSM89XX_CDC_CORE_TX2_VOL_CTL_GAIN,
+};
+
+static const DECLARE_TLV_DB_SCALE(digital_gain, 0, 1, 0);
+
+struct snd_soc_codec *registered_digcodec;
+struct hpf_work tx_hpf_work[NUM_DECIMATORS];
+
+/* Codec supports 2 IIR filters */
+enum {
+ IIR1 = 0,
+ IIR2,
+ IIR_MAX,
+};
+
+static int msm_digcdc_clock_control(bool flag)
+{
+ int ret = -EINVAL;
+ struct msm_asoc_mach_data *pdata = NULL;
+
+ pdata = snd_soc_card_get_drvdata(registered_digcodec->component.card);
+
+ if (flag) {
+ if (atomic_read(&pdata->int_mclk0_enabled) == false) {
+ pdata->digital_cdc_core_clk.enable = 1;
+ ret = afe_set_lpass_clock_v2(
+ AFE_PORT_ID_PRIMARY_MI2S_RX,
+ &pdata->digital_cdc_core_clk);
+ if (ret < 0) {
+ pr_err("%s:failed to enable the MCLK\n",
+ __func__);
+ return ret;
+ }
+ pr_debug("enabled digital codec core clk\n");
+ atomic_set(&pdata->int_mclk0_enabled, true);
+ schedule_delayed_work(&pdata->disable_int_mclk0_work,
+ 50);
+ }
+ } else {
+ dev_dbg(registered_digcodec->dev,
+ "disable MCLK, workq to disable set already\n");
+ }
+ return 0;
+}
+
+static void enable_digital_callback(void *flag)
+{
+ msm_digcdc_clock_control(true);
+}
+
+static void disable_digital_callback(void *flag)
+{
+ pr_debug("disable mclk happens in workq\n");
+}
+
+static int msm_dig_cdc_put_dec_enum(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_dapm_widget_list *wlist =
+ dapm_kcontrol_get_wlist(kcontrol);
+ struct snd_soc_dapm_widget *w = wlist->widgets[0];
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
+ unsigned int dec_mux, decimator;
+ char *dec_name = NULL;
+ char *widget_name = NULL;
+ char *temp;
+ u16 tx_mux_ctl_reg;
+ u8 adc_dmic_sel = 0x0;
+ int ret = 0;
+ char *dec_num;
+
+ if (ucontrol->value.enumerated.item[0] > e->items) {
+ dev_err(codec->dev, "%s: Invalid enum value: %d\n",
+ __func__, ucontrol->value.enumerated.item[0]);
+ return -EINVAL;
+ }
+ dec_mux = ucontrol->value.enumerated.item[0];
+
+ widget_name = kstrndup(w->name, 15, GFP_KERNEL);
+ if (!widget_name) {
+ dev_err(codec->dev, "%s: failed to copy string\n",
+ __func__);
+ return -ENOMEM;
+ }
+ temp = widget_name;
+
+ dec_name = strsep(&widget_name, " ");
+ widget_name = temp;
+ if (!dec_name) {
+ dev_err(codec->dev, "%s: Invalid decimator = %s\n",
+ __func__, w->name);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ dec_num = strpbrk(dec_name, "12345");
+ if (dec_num == NULL) {
+ dev_err(codec->dev, "%s: Invalid DEC selected\n", __func__);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = kstrtouint(dec_num, 10, &decimator);
+ if (ret < 0) {
+ dev_err(codec->dev, "%s: Invalid decimator = %s\n",
+ __func__, dec_name);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ dev_dbg(w->dapm->dev, "%s(): widget = %s decimator = %u dec_mux = %u\n"
+ , __func__, w->name, decimator, dec_mux);
+
+ switch (decimator) {
+ case 1:
+ case 2:
+ case 3:
+ case 4:
+ case 5:
+ if ((dec_mux == 4) || (dec_mux == 5) ||
+ (dec_mux == 6) || (dec_mux == 7))
+ adc_dmic_sel = 0x1;
+ else
+ adc_dmic_sel = 0x0;
+ break;
+ default:
+ dev_err(codec->dev, "%s: Invalid Decimator = %u\n",
+ __func__, decimator);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ tx_mux_ctl_reg =
+ MSM89XX_CDC_CORE_TX1_MUX_CTL + 32 * (decimator - 1);
+
+ snd_soc_update_bits(codec, tx_mux_ctl_reg, 0x1, adc_dmic_sel);
+
+ ret = snd_soc_dapm_put_enum_double(kcontrol, ucontrol);
+
+out:
+ kfree(widget_name);
+ return ret;
+}
+
+
+static int msm_dig_cdc_codec_config_compander(struct snd_soc_codec *codec,
+ int interp_n, int event)
+{
+ struct msm_dig_priv *dig_cdc = snd_soc_codec_get_drvdata(codec);
+
+ dev_dbg(codec->dev, "%s: event %d shift %d, enabled %d\n",
+ __func__, event, interp_n,
+ dig_cdc->comp_enabled[interp_n]);
+
+ /* compander is not enabled */
+ if (!dig_cdc->comp_enabled[interp_n])
+ return 0;
+
+ switch (dig_cdc->comp_enabled[interp_n]) {
+ case COMPANDER_1:
+ if (SND_SOC_DAPM_EVENT_ON(event)) {
+ /* Enable Compander Clock */
+ snd_soc_update_bits(codec,
+ MSM89XX_CDC_CORE_COMP0_B2_CTL, 0x0F, 0x09);
+ snd_soc_update_bits(codec,
+ MSM89XX_CDC_CORE_CLK_RX_B2_CTL, 0x01, 0x01);
+ snd_soc_update_bits(codec,
+ MSM89XX_CDC_CORE_COMP0_B1_CTL,
+ 1 << interp_n, 1 << interp_n);
+ snd_soc_update_bits(codec,
+ MSM89XX_CDC_CORE_COMP0_B3_CTL, 0xFF, 0x01);
+ snd_soc_update_bits(codec,
+ MSM89XX_CDC_CORE_COMP0_B2_CTL, 0xF0, 0x50);
+ /* add sleep for compander to settle */
+ usleep_range(1000, 1100);
+ snd_soc_update_bits(codec,
+ MSM89XX_CDC_CORE_COMP0_B3_CTL, 0xFF, 0x28);
+ snd_soc_update_bits(codec,
+ MSM89XX_CDC_CORE_COMP0_B2_CTL, 0xF0, 0xB0);
+
+ /* Enable Compander GPIO */
+ if (dig_cdc->codec_hph_comp_gpio)
+ dig_cdc->codec_hph_comp_gpio(1, codec);
+ } else if (SND_SOC_DAPM_EVENT_OFF(event)) {
+ /* Disable Compander GPIO */
+ if (dig_cdc->codec_hph_comp_gpio)
+ dig_cdc->codec_hph_comp_gpio(0, codec);
+
+ snd_soc_update_bits(codec,
+ MSM89XX_CDC_CORE_COMP0_B2_CTL, 0x0F, 0x05);
+ snd_soc_update_bits(codec,
+ MSM89XX_CDC_CORE_COMP0_B1_CTL,
+ 1 << interp_n, 0);
+ snd_soc_update_bits(codec,
+ MSM89XX_CDC_CORE_CLK_RX_B2_CTL, 0x01, 0x00);
+ }
+ break;
+ default:
+ dev_dbg(codec->dev, "%s: Invalid compander %d\n", __func__,
+ dig_cdc->comp_enabled[interp_n]);
+ break;
+ };
+
+ return 0;
+}
+
+/**
+ * msm_dig_cdc_hph_comp_cb - registers callback to codec by machine driver.
+ *
+ * @codec_hph_comp_gpio: function pointer to set comp gpio at machine driver
+ * @codec: codec pointer
+ *
+ */
+void msm_dig_cdc_hph_comp_cb(
+ int (*codec_hph_comp_gpio)(bool enable, struct snd_soc_codec *codec),
+ struct snd_soc_codec *codec)
+{
+ struct msm_dig_priv *dig_cdc = snd_soc_codec_get_drvdata(codec);
+
+ pr_debug("%s: Enter\n", __func__);
+ dig_cdc->codec_hph_comp_gpio = codec_hph_comp_gpio;
+}
+EXPORT_SYMBOL(msm_dig_cdc_hph_comp_cb);
+
+static int msm_dig_cdc_codec_enable_interpolator(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol,
+ int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ struct msm_dig *msm_dig_cdc = dev_get_drvdata(codec->dev);
+
+ dev_dbg(codec->dev, "%s %d %s\n", __func__, event, w->name);
+
+ if (w->shift >= MSM89XX_RX_MAX || w->shift < 0) {
+ dev_err(codec->dev, "%s: wrong RX index: %d\n",
+ __func__, w->shift);
+ return -EINVAL;
+ }
+ switch (event) {
+ case SND_SOC_DAPM_POST_PMU:
+ msm_dig_cdc_codec_config_compander(codec, w->shift, event);
+ /* apply the digital gain after the interpolator is enabled*/
+ if ((w->shift) < ARRAY_SIZE(rx_digital_gain_reg))
+ snd_soc_write(codec,
+ rx_digital_gain_reg[w->shift],
+ snd_soc_read(codec,
+ rx_digital_gain_reg[w->shift])
+ );
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ msm_dig_cdc_codec_config_compander(codec, w->shift, event);
+ snd_soc_update_bits(codec,
+ MSM89XX_CDC_CORE_CLK_RX_RESET_CTL,
+ 1 << w->shift, 1 << w->shift);
+ snd_soc_update_bits(codec,
+ MSM89XX_CDC_CORE_CLK_RX_RESET_CTL,
+ 1 << w->shift, 0x0);
+ /*
+ * disable the mute enabled during the PMD of this device
+ */
+ if ((w->shift == 0) &&
+ (msm_dig_cdc->mute_mask & HPHL_PA_DISABLE)) {
+ pr_debug("disabling HPHL mute\n");
+ snd_soc_update_bits(codec,
+ MSM89XX_CDC_CORE_RX1_B6_CTL, 0x01, 0x00);
+ msm_dig_cdc->mute_mask &= ~(HPHL_PA_DISABLE);
+ } else if ((w->shift == 1) &&
+ (msm_dig_cdc->mute_mask & HPHR_PA_DISABLE)) {
+ pr_debug("disabling HPHR mute\n");
+ snd_soc_update_bits(codec,
+ MSM89XX_CDC_CORE_RX2_B6_CTL, 0x01, 0x00);
+ msm_dig_cdc->mute_mask &= ~(HPHR_PA_DISABLE);
+ } else if ((w->shift == 2) &&
+ (msm_dig_cdc->mute_mask & SPKR_PA_DISABLE)) {
+ pr_debug("disabling SPKR mute\n");
+ snd_soc_update_bits(codec,
+ MSM89XX_CDC_CORE_RX3_B6_CTL, 0x01, 0x00);
+ msm_dig_cdc->mute_mask &= ~(SPKR_PA_DISABLE);
+ }
+ }
+ return 0;
+}
+
+static int msm_dig_cdc_codec_enable_rx_chain(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol,
+ int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+
+ switch (event) {
+ case SND_SOC_DAPM_POST_PMD:
+ snd_soc_update_bits(codec, w->reg,
+ 1 << w->shift, 0x00);
+ break;
+ }
+ return 0;
+}
+
+static int msm_dig_cdc_get_iir_enable_audio_mixer(
+ struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ int iir_idx = ((struct soc_multi_mixer_control *)
+ kcontrol->private_value)->reg;
+ int band_idx = ((struct soc_multi_mixer_control *)
+ kcontrol->private_value)->shift;
+
+ ucontrol->value.integer.value[0] =
+ (snd_soc_read(codec,
+ (MSM89XX_CDC_CORE_IIR1_CTL + 64 * iir_idx)) &
+ (1 << band_idx)) != 0;
+
+ dev_dbg(codec->dev, "%s: IIR #%d band #%d enable %d\n", __func__,
+ iir_idx, band_idx,
+ (uint32_t)ucontrol->value.integer.value[0]);
+ return 0;
+}
+
+static int msm_dig_cdc_put_iir_enable_audio_mixer(
+ struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ int iir_idx = ((struct soc_multi_mixer_control *)
+ kcontrol->private_value)->reg;
+ int band_idx = ((struct soc_multi_mixer_control *)
+ kcontrol->private_value)->shift;
+ int value = ucontrol->value.integer.value[0];
+
+ /* Mask first 5 bits, 6-8 are reserved */
+ snd_soc_update_bits(codec,
+ (MSM89XX_CDC_CORE_IIR1_CTL + 64 * iir_idx),
+ (1 << band_idx), (value << band_idx));
+
+ dev_dbg(codec->dev, "%s: IIR #%d band #%d enable %d\n", __func__,
+ iir_idx, band_idx,
+ ((snd_soc_read(codec,
+ (MSM89XX_CDC_CORE_IIR1_CTL + 64 * iir_idx)) &
+ (1 << band_idx)) != 0));
+
+ return 0;
+}
+
+static uint32_t get_iir_band_coeff(struct snd_soc_codec *codec,
+ int iir_idx, int band_idx,
+ int coeff_idx)
+{
+ uint32_t value = 0;
+
+ /* Address does not automatically update if reading */
+ snd_soc_write(codec,
+ (MSM89XX_CDC_CORE_IIR1_COEF_B1_CTL + 64 * iir_idx),
+ ((band_idx * BAND_MAX + coeff_idx)
+ * sizeof(uint32_t)) & 0x7F);
+
+ value |= snd_soc_read(codec,
+ (MSM89XX_CDC_CORE_IIR1_COEF_B2_CTL + 64 * iir_idx));
+
+ snd_soc_write(codec,
+ (MSM89XX_CDC_CORE_IIR1_COEF_B1_CTL + 64 * iir_idx),
+ ((band_idx * BAND_MAX + coeff_idx)
+ * sizeof(uint32_t) + 1) & 0x7F);
+
+ value |= (snd_soc_read(codec,
+ (MSM89XX_CDC_CORE_IIR1_COEF_B2_CTL + 64 * iir_idx)) << 8);
+
+ snd_soc_write(codec,
+ (MSM89XX_CDC_CORE_IIR1_COEF_B1_CTL + 64 * iir_idx),
+ ((band_idx * BAND_MAX + coeff_idx)
+ * sizeof(uint32_t) + 2) & 0x7F);
+
+ value |= (snd_soc_read(codec,
+ (MSM89XX_CDC_CORE_IIR1_COEF_B2_CTL + 64 * iir_idx)) << 16);
+
+ snd_soc_write(codec,
+ (MSM89XX_CDC_CORE_IIR1_COEF_B1_CTL + 64 * iir_idx),
+ ((band_idx * BAND_MAX + coeff_idx)
+ * sizeof(uint32_t) + 3) & 0x7F);
+
+ /* Mask bits top 2 bits since they are reserved */
+ value |= ((snd_soc_read(codec, (MSM89XX_CDC_CORE_IIR1_COEF_B2_CTL
+ + 64 * iir_idx)) & 0x3f) << 24);
+
+ return value;
+
+}
+
+static void set_iir_band_coeff(struct snd_soc_codec *codec,
+ int iir_idx, int band_idx,
+ uint32_t value)
+{
+ snd_soc_write(codec,
+ (MSM89XX_CDC_CORE_IIR1_COEF_B2_CTL + 64 * iir_idx),
+ (value & 0xFF));
+
+ snd_soc_write(codec,
+ (MSM89XX_CDC_CORE_IIR1_COEF_B2_CTL + 64 * iir_idx),
+ (value >> 8) & 0xFF);
+
+ snd_soc_write(codec,
+ (MSM89XX_CDC_CORE_IIR1_COEF_B2_CTL + 64 * iir_idx),
+ (value >> 16) & 0xFF);
+
+ /* Mask top 2 bits, 7-8 are reserved */
+ snd_soc_write(codec,
+ (MSM89XX_CDC_CORE_IIR1_COEF_B2_CTL + 64 * iir_idx),
+ (value >> 24) & 0x3F);
+
+}
+
+static int msm_dig_cdc_get_iir_band_audio_mixer(
+ struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ int iir_idx = ((struct soc_multi_mixer_control *)
+ kcontrol->private_value)->reg;
+ int band_idx = ((struct soc_multi_mixer_control *)
+ kcontrol->private_value)->shift;
+
+ ucontrol->value.integer.value[0] =
+ get_iir_band_coeff(codec, iir_idx, band_idx, 0);
+ ucontrol->value.integer.value[1] =
+ get_iir_band_coeff(codec, iir_idx, band_idx, 1);
+ ucontrol->value.integer.value[2] =
+ get_iir_band_coeff(codec, iir_idx, band_idx, 2);
+ ucontrol->value.integer.value[3] =
+ get_iir_band_coeff(codec, iir_idx, band_idx, 3);
+ ucontrol->value.integer.value[4] =
+ get_iir_band_coeff(codec, iir_idx, band_idx, 4);
+
+ dev_dbg(codec->dev, "%s: IIR #%d band #%d b0 = 0x%x\n"
+ "%s: IIR #%d band #%d b1 = 0x%x\n"
+ "%s: IIR #%d band #%d b2 = 0x%x\n"
+ "%s: IIR #%d band #%d a1 = 0x%x\n"
+ "%s: IIR #%d band #%d a2 = 0x%x\n",
+ __func__, iir_idx, band_idx,
+ (uint32_t)ucontrol->value.integer.value[0],
+ __func__, iir_idx, band_idx,
+ (uint32_t)ucontrol->value.integer.value[1],
+ __func__, iir_idx, band_idx,
+ (uint32_t)ucontrol->value.integer.value[2],
+ __func__, iir_idx, band_idx,
+ (uint32_t)ucontrol->value.integer.value[3],
+ __func__, iir_idx, band_idx,
+ (uint32_t)ucontrol->value.integer.value[4]);
+ return 0;
+}
+
+static int msm_dig_cdc_put_iir_band_audio_mixer(
+ struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ int iir_idx = ((struct soc_multi_mixer_control *)
+ kcontrol->private_value)->reg;
+ int band_idx = ((struct soc_multi_mixer_control *)
+ kcontrol->private_value)->shift;
+
+ /* Mask top bit it is reserved */
+ /* Updates addr automatically for each B2 write */
+ snd_soc_write(codec,
+ (MSM89XX_CDC_CORE_IIR1_COEF_B1_CTL + 64 * iir_idx),
+ (band_idx * BAND_MAX * sizeof(uint32_t)) & 0x7F);
+
+
+ set_iir_band_coeff(codec, iir_idx, band_idx,
+ ucontrol->value.integer.value[0]);
+ set_iir_band_coeff(codec, iir_idx, band_idx,
+ ucontrol->value.integer.value[1]);
+ set_iir_band_coeff(codec, iir_idx, band_idx,
+ ucontrol->value.integer.value[2]);
+ set_iir_band_coeff(codec, iir_idx, band_idx,
+ ucontrol->value.integer.value[3]);
+ set_iir_band_coeff(codec, iir_idx, band_idx,
+ ucontrol->value.integer.value[4]);
+
+ dev_dbg(codec->dev, "%s: IIR #%d band #%d b0 = 0x%x\n"
+ "%s: IIR #%d band #%d b1 = 0x%x\n"
+ "%s: IIR #%d band #%d b2 = 0x%x\n"
+ "%s: IIR #%d band #%d a1 = 0x%x\n"
+ "%s: IIR #%d band #%d a2 = 0x%x\n",
+ __func__, iir_idx, band_idx,
+ get_iir_band_coeff(codec, iir_idx, band_idx, 0),
+ __func__, iir_idx, band_idx,
+ get_iir_band_coeff(codec, iir_idx, band_idx, 1),
+ __func__, iir_idx, band_idx,
+ get_iir_band_coeff(codec, iir_idx, band_idx, 2),
+ __func__, iir_idx, band_idx,
+ get_iir_band_coeff(codec, iir_idx, band_idx, 3),
+ __func__, iir_idx, band_idx,
+ get_iir_band_coeff(codec, iir_idx, band_idx, 4));
+ return 0;
+}
+
+static void tx_hpf_corner_freq_callback(struct work_struct *work)
+{
+ struct delayed_work *hpf_delayed_work;
+ struct hpf_work *hpf_work;
+ struct snd_soc_codec *codec;
+ struct msm_dig *msm_dig_cdc;
+ u16 tx_mux_ctl_reg;
+ u8 hpf_cut_of_freq;
+
+ hpf_delayed_work = to_delayed_work(work);
+ hpf_work = container_of(hpf_delayed_work, struct hpf_work, dwork);
+ codec = hpf_work->dig_cdc->codec;
+ msm_dig_cdc = codec->control_data;
+ hpf_cut_of_freq = hpf_work->tx_hpf_cut_of_freq;
+
+ tx_mux_ctl_reg = MSM89XX_CDC_CORE_TX1_MUX_CTL +
+ (hpf_work->decimator - 1) * 32;
+
+ dev_dbg(codec->dev, "%s(): decimator %u hpf_cut_of_freq 0x%x\n",
+ __func__, hpf_work->decimator, (unsigned int)hpf_cut_of_freq);
+ msm_dig_cdc->update_clkdiv(msm_dig_cdc->handle, 0x51);
+
+ snd_soc_update_bits(codec, tx_mux_ctl_reg, 0x30, hpf_cut_of_freq << 4);
+}
+
+static int msm_dig_cdc_codec_set_iir_gain(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ int value = 0, reg;
+
+ switch (event) {
+ case SND_SOC_DAPM_POST_PMU:
+ if (w->shift == 0)
+ reg = MSM89XX_CDC_CORE_IIR1_GAIN_B1_CTL;
+ else if (w->shift == 1)
+ reg = MSM89XX_CDC_CORE_IIR2_GAIN_B1_CTL;
+ else
+ goto ret;
+ value = snd_soc_read(codec, reg);
+ snd_soc_write(codec, reg, value);
+ break;
+ default:
+ pr_err("%s: event = %d not expected\n", __func__, event);
+ }
+ret:
+ return 0;
+}
+
+static int msm_dig_cdc_compander_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ struct msm_dig_priv *dig_cdc = snd_soc_codec_get_drvdata(codec);
+ int comp_idx = ((struct soc_multi_mixer_control *)
+ kcontrol->private_value)->reg;
+ int rx_idx = ((struct soc_multi_mixer_control *)
+ kcontrol->private_value)->shift;
+
+ dev_dbg(codec->dev, "%s: msm_dig_cdc->comp[%d]_enabled[%d] = %d\n",
+ __func__, comp_idx, rx_idx,
+ dig_cdc->comp_enabled[rx_idx]);
+
+ ucontrol->value.integer.value[0] = dig_cdc->comp_enabled[rx_idx];
+
+ dev_dbg(codec->dev, "%s: ucontrol->value.integer.value[0] = %ld\n",
+ __func__, ucontrol->value.integer.value[0]);
+
+ return 0;
+}
+
+static int msm_dig_cdc_compander_set(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ struct msm_dig_priv *dig_cdc = snd_soc_codec_get_drvdata(codec);
+ int comp_idx = ((struct soc_multi_mixer_control *)
+ kcontrol->private_value)->reg;
+ int rx_idx = ((struct soc_multi_mixer_control *)
+ kcontrol->private_value)->shift;
+ int value = ucontrol->value.integer.value[0];
+
+ dev_dbg(codec->dev, "%s: ucontrol->value.integer.value[0] = %ld\n",
+ __func__, ucontrol->value.integer.value[0]);
+
+ if (dig_cdc->version >= DIANGU) {
+ if (!value)
+ dig_cdc->comp_enabled[rx_idx] = 0;
+ else
+ dig_cdc->comp_enabled[rx_idx] = comp_idx;
+ }
+
+ dev_dbg(codec->dev, "%s: msm_dig_cdc->comp[%d]_enabled[%d] = %d\n",
+ __func__, comp_idx, rx_idx,
+ dig_cdc->comp_enabled[rx_idx]);
+
+ return 0;
+}
+
+static const struct snd_kcontrol_new compander_kcontrols[] = {
+ SOC_SINGLE_EXT("COMP0 RX1", COMPANDER_1, MSM89XX_RX1, 1, 0,
+ msm_dig_cdc_compander_get, msm_dig_cdc_compander_set),
+
+ SOC_SINGLE_EXT("COMP0 RX2", COMPANDER_1, MSM89XX_RX2, 1, 0,
+ msm_dig_cdc_compander_get, msm_dig_cdc_compander_set),
+
+};
+
+static int msm_dig_cdc_set_interpolator_rate(struct snd_soc_dai *dai,
+ u8 rx_fs_rate_reg_val,
+ u32 sample_rate)
+{
+ snd_soc_update_bits(dai->codec,
+ MSM89XX_CDC_CORE_RX1_B5_CTL, 0xF0, rx_fs_rate_reg_val);
+ snd_soc_update_bits(dai->codec,
+ MSM89XX_CDC_CORE_RX2_B5_CTL, 0xF0, rx_fs_rate_reg_val);
+ return 0;
+}
+
+static int msm_dig_cdc_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ u8 tx_fs_rate, rx_fs_rate, rx_clk_fs_rate;
+ int ret;
+
+ dev_dbg(dai->codec->dev,
+ "%s: dai_name = %s DAI-ID %x rate %d num_ch %d format %d\n",
+ __func__, dai->name, dai->id, params_rate(params),
+ params_channels(params), params_format(params));
+
+ switch (params_rate(params)) {
+ case 8000:
+ tx_fs_rate = 0x00;
+ rx_fs_rate = 0x00;
+ rx_clk_fs_rate = 0x00;
+ break;
+ case 16000:
+ tx_fs_rate = 0x20;
+ rx_fs_rate = 0x20;
+ rx_clk_fs_rate = 0x01;
+ break;
+ case 32000:
+ tx_fs_rate = 0x40;
+ rx_fs_rate = 0x40;
+ rx_clk_fs_rate = 0x02;
+ break;
+ case 44100:
+ case 48000:
+ tx_fs_rate = 0x60;
+ rx_fs_rate = 0x60;
+ rx_clk_fs_rate = 0x03;
+ break;
+ case 96000:
+ tx_fs_rate = 0x80;
+ rx_fs_rate = 0x80;
+ rx_clk_fs_rate = 0x04;
+ break;
+ case 192000:
+ tx_fs_rate = 0xA0;
+ rx_fs_rate = 0xA0;
+ rx_clk_fs_rate = 0x05;
+ break;
+ default:
+ dev_err(dai->codec->dev,
+ "%s: Invalid sampling rate %d\n", __func__,
+ params_rate(params));
+ return -EINVAL;
+ }
+
+ snd_soc_update_bits(dai->codec,
+ MSM89XX_CDC_CORE_CLK_RX_I2S_CTL, 0x0F, rx_clk_fs_rate);
+
+ switch (substream->stream) {
+ case SNDRV_PCM_STREAM_CAPTURE:
+ break;
+ case SNDRV_PCM_STREAM_PLAYBACK:
+ ret = msm_dig_cdc_set_interpolator_rate(dai, rx_fs_rate,
+ params_rate(params));
+ if (ret < 0) {
+ dev_err(dai->codec->dev,
+ "%s: set decimator rate failed %d\n", __func__,
+ ret);
+ return ret;
+ }
+ break;
+ default:
+ dev_err(dai->codec->dev,
+ "%s: Invalid stream type %d\n", __func__,
+ substream->stream);
+ return -EINVAL;
+ }
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ snd_soc_update_bits(dai->codec,
+ MSM89XX_CDC_CORE_CLK_RX_I2S_CTL, 0x20, 0x20);
+ break;
+ case SNDRV_PCM_FORMAT_S24_LE:
+ snd_soc_update_bits(dai->codec,
+ MSM89XX_CDC_CORE_CLK_RX_I2S_CTL, 0x20, 0x00);
+ break;
+ default:
+ dev_err(dai->codec->dev, "%s: wrong format selected\n",
+ __func__);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int msm_dig_cdc_codec_enable_dmic(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol,
+ int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ struct msm_dig_priv *dig_cdc = snd_soc_codec_get_drvdata(codec);
+ u8 dmic_clk_en;
+ u16 dmic_clk_reg;
+ s32 *dmic_clk_cnt;
+ unsigned int dmic;
+ int ret;
+ char *dmic_num = strpbrk(w->name, "1234");
+
+ if (dmic_num == NULL) {
+ dev_err(codec->dev, "%s: Invalid DMIC\n", __func__);
+ return -EINVAL;
+ }
+
+ ret = kstrtouint(dmic_num, 10, &dmic);
+ if (ret < 0) {
+ dev_err(codec->dev,
+ "%s: Invalid DMIC line on the codec\n", __func__);
+ return -EINVAL;
+ }
+
+ switch (dmic) {
+ case 1:
+ case 2:
+ dmic_clk_en = 0x01;
+ dmic_clk_cnt = &(dig_cdc->dmic_1_2_clk_cnt);
+ dmic_clk_reg = MSM89XX_CDC_CORE_CLK_DMIC_B1_CTL;
+ dev_dbg(codec->dev,
+ "%s() event %d DMIC%d dmic_1_2_clk_cnt %d\n",
+ __func__, event, dmic, *dmic_clk_cnt);
+ break;
+ case 3:
+ case 4:
+ dmic_clk_en = 0x01;
+ dmic_clk_cnt = &(dig_cdc->dmic_3_4_clk_cnt);
+ dmic_clk_reg = MSM89XX_CDC_CORE_CLK_DMIC_B2_CTL;
+ dev_dbg(codec->dev,
+ "%s() event %d DMIC%d dmic_3_4_clk_cnt %d\n",
+ __func__, event, dmic, *dmic_clk_cnt);
+ break;
+ default:
+ dev_err(codec->dev, "%s: Invalid DMIC Selection\n", __func__);
+ return -EINVAL;
+ }
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ (*dmic_clk_cnt)++;
+ if (*dmic_clk_cnt == 1) {
+ snd_soc_update_bits(codec, dmic_clk_reg,
+ 0x0E, 0x02);
+ snd_soc_update_bits(codec, dmic_clk_reg,
+ dmic_clk_en, dmic_clk_en);
+ }
+ snd_soc_update_bits(codec,
+ MSM89XX_CDC_CORE_TX1_DMIC_CTL + (dmic - 1) * 0x20,
+ 0x07, 0x01);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ (*dmic_clk_cnt)--;
+ if (*dmic_clk_cnt == 0)
+ snd_soc_update_bits(codec, dmic_clk_reg,
+ dmic_clk_en, 0);
+ break;
+ }
+ return 0;
+}
+
+static int msm_dig_cdc_codec_enable_dec(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol,
+ int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ struct msm_asoc_mach_data *pdata = NULL;
+ unsigned int decimator;
+ struct msm_dig_priv *dig_cdc = snd_soc_codec_get_drvdata(codec);
+ struct msm_dig *msm_dig_cdc = codec->control_data;
+ char *dec_name = NULL;
+ char *widget_name = NULL;
+ char *temp;
+ int ret = 0, i;
+ u16 dec_reset_reg, tx_vol_ctl_reg, tx_mux_ctl_reg;
+ u8 dec_hpf_cut_of_freq;
+ int offset;
+ char *dec_num;
+
+ pdata = snd_soc_card_get_drvdata(codec->component.card);
+ dev_dbg(codec->dev, "%s %d\n", __func__, event);
+
+ widget_name = kstrndup(w->name, 15, GFP_KERNEL);
+ if (!widget_name)
+ return -ENOMEM;
+ temp = widget_name;
+
+ dec_name = strsep(&widget_name, " ");
+ widget_name = temp;
+ if (!dec_name) {
+ dev_err(codec->dev,
+ "%s: Invalid decimator = %s\n", __func__, w->name);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ dec_num = strpbrk(dec_name, "12345");
+ if (dec_num == NULL) {
+ dev_err(codec->dev, "%s: Invalid Decimator\n", __func__);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = kstrtouint(dec_num, 10, &decimator);
+ if (ret < 0) {
+ dev_err(codec->dev,
+ "%s: Invalid decimator = %s\n", __func__, dec_name);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ dev_dbg(codec->dev,
+ "%s(): widget = %s dec_name = %s decimator = %u\n", __func__,
+ w->name, dec_name, decimator);
+
+ if (w->reg == MSM89XX_CDC_CORE_CLK_TX_CLK_EN_B1_CTL) {
+ dec_reset_reg = MSM89XX_CDC_CORE_CLK_TX_RESET_B1_CTL;
+ offset = 0;
+ } else {
+ dev_err(codec->dev, "%s: Error, incorrect dec\n", __func__);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ tx_vol_ctl_reg = MSM89XX_CDC_CORE_TX1_VOL_CTL_CFG +
+ 32 * (decimator - 1);
+ tx_mux_ctl_reg = MSM89XX_CDC_CORE_TX1_MUX_CTL +
+ 32 * (decimator - 1);
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ /* Enableable TX digital mute */
+ snd_soc_update_bits(codec, tx_vol_ctl_reg, 0x01, 0x01);
+ for (i = 0; i < NUM_DECIMATORS; i++) {
+ if (decimator == i + 1)
+ dig_cdc->dec_active[i] = true;
+ }
+
+ dec_hpf_cut_of_freq = snd_soc_read(codec, tx_mux_ctl_reg);
+
+ dec_hpf_cut_of_freq = (dec_hpf_cut_of_freq & 0x30) >> 4;
+
+ tx_hpf_work[decimator - 1].tx_hpf_cut_of_freq =
+ dec_hpf_cut_of_freq;
+
+ if (dec_hpf_cut_of_freq != CF_MIN_3DB_150HZ) {
+
+ /* set cut of freq to CF_MIN_3DB_150HZ (0x1); */
+ snd_soc_update_bits(codec, tx_mux_ctl_reg, 0x30,
+ CF_MIN_3DB_150HZ << 4);
+ }
+ msm_dig_cdc->update_clkdiv(msm_dig_cdc->handle, 0x42);
+ break;
+ case SND_SOC_DAPM_POST_PMU:
+ /* enable HPF */
+ snd_soc_update_bits(codec, tx_mux_ctl_reg, 0x08, 0x00);
+
+ if (tx_hpf_work[decimator - 1].tx_hpf_cut_of_freq !=
+ CF_MIN_3DB_150HZ) {
+
+ schedule_delayed_work(&tx_hpf_work[decimator - 1].dwork,
+ msecs_to_jiffies(300));
+ }
+ /* apply the digital gain after the decimator is enabled*/
+ if ((w->shift) < ARRAY_SIZE(tx_digital_gain_reg))
+ snd_soc_write(codec,
+ tx_digital_gain_reg[w->shift + offset],
+ snd_soc_read(codec,
+ tx_digital_gain_reg[w->shift + offset])
+ );
+ if (pdata->lb_mode) {
+ pr_debug("%s: loopback mode unmute the DEC\n",
+ __func__);
+ snd_soc_update_bits(codec, tx_vol_ctl_reg, 0x01, 0x00);
+ }
+ snd_soc_update_bits(codec, tx_vol_ctl_reg,
+ 0x01, 0x00);
+
+ break;
+ case SND_SOC_DAPM_PRE_PMD:
+ snd_soc_update_bits(codec, tx_vol_ctl_reg, 0x01, 0x01);
+ msleep(20);
+ snd_soc_update_bits(codec, tx_mux_ctl_reg, 0x08, 0x08);
+ cancel_delayed_work_sync(&tx_hpf_work[decimator - 1].dwork);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ snd_soc_update_bits(codec, dec_reset_reg, 1 << w->shift,
+ 1 << w->shift);
+ snd_soc_update_bits(codec, dec_reset_reg, 1 << w->shift, 0x0);
+ snd_soc_update_bits(codec, tx_mux_ctl_reg, 0x08, 0x08);
+ snd_soc_update_bits(codec, tx_mux_ctl_reg, 0x30,
+ (tx_hpf_work[decimator - 1].tx_hpf_cut_of_freq) << 4);
+ snd_soc_update_bits(codec, tx_vol_ctl_reg, 0x01, 0x00);
+ for (i = 0; i < NUM_DECIMATORS; i++) {
+ if (decimator == i + 1)
+ dig_cdc->dec_active[i] = false;
+ }
+ break;
+ }
+out:
+ kfree(widget_name);
+ return ret;
+}
+
+static int msm_dig_cdc_event_notify(struct notifier_block *block,
+ unsigned long val,
+ void *data)
+{
+ enum dig_cdc_notify_event event = (enum dig_cdc_notify_event)val;
+ struct snd_soc_codec *codec = registered_digcodec;
+ struct msm_dig *msm_dig_cdc = codec->control_data;
+ struct msm_asoc_mach_data *pdata = NULL;
+
+ pdata = snd_soc_card_get_drvdata(codec->component.card);
+
+ switch (event) {
+ case DIG_CDC_EVENT_CLK_ON:
+ snd_soc_update_bits(codec,
+ MSM89XX_CDC_CORE_CLK_PDM_CTL, 0x03, 0x03);
+ if (pdata->mclk_freq == MCLK_RATE_12P288MHZ ||
+ pdata->native_clk_set)
+ snd_soc_update_bits(codec,
+ MSM89XX_CDC_CORE_TOP_CTL, 0x01, 0x00);
+ else if (pdata->mclk_freq == MCLK_RATE_9P6MHZ)
+ snd_soc_update_bits(codec,
+ MSM89XX_CDC_CORE_TOP_CTL, 0x01, 0x01);
+ snd_soc_update_bits(codec,
+ MSM89XX_CDC_CORE_CLK_MCLK_CTL, 0x01, 0x01);
+ break;
+ case DIG_CDC_EVENT_CLK_OFF:
+ snd_soc_update_bits(codec,
+ MSM89XX_CDC_CORE_CLK_PDM_CTL, 0x03, 0x00);
+ snd_soc_update_bits(codec,
+ MSM89XX_CDC_CORE_CLK_MCLK_CTL, 0x01, 0x00);
+ break;
+ case DIG_CDC_EVENT_RX1_MUTE_ON:
+ snd_soc_update_bits(codec,
+ MSM89XX_CDC_CORE_RX1_B6_CTL, 0x01, 0x01);
+ msm_dig_cdc->mute_mask |= HPHL_PA_DISABLE;
+ break;
+ case DIG_CDC_EVENT_RX1_MUTE_OFF:
+ snd_soc_update_bits(codec,
+ MSM89XX_CDC_CORE_RX1_B6_CTL, 0x01, 0x00);
+ msm_dig_cdc->mute_mask &= (~HPHL_PA_DISABLE);
+ break;
+ case DIG_CDC_EVENT_RX2_MUTE_ON:
+ snd_soc_update_bits(codec,
+ MSM89XX_CDC_CORE_RX2_B6_CTL, 0x01, 0x01);
+ msm_dig_cdc->mute_mask |= HPHR_PA_DISABLE;
+ break;
+ case DIG_CDC_EVENT_RX2_MUTE_OFF:
+ snd_soc_update_bits(codec,
+ MSM89XX_CDC_CORE_RX2_B6_CTL, 0x01, 0x00);
+ msm_dig_cdc->mute_mask &= (~HPHR_PA_DISABLE);
+ break;
+ case DIG_CDC_EVENT_RX3_MUTE_ON:
+ snd_soc_update_bits(codec,
+ MSM89XX_CDC_CORE_RX3_B6_CTL, 0x01, 0x01);
+ msm_dig_cdc->mute_mask |= SPKR_PA_DISABLE;
+ break;
+ case DIG_CDC_EVENT_RX3_MUTE_OFF:
+ snd_soc_update_bits(codec,
+ MSM89XX_CDC_CORE_RX3_B6_CTL, 0x01, 0x00);
+ msm_dig_cdc->mute_mask &= (~SPKR_PA_DISABLE);
+ break;
+ case DIG_CDC_EVENT_PRE_RX1_INT_ON:
+ snd_soc_update_bits(codec,
+ MSM89XX_CDC_CORE_RX1_B3_CTL, 0x1C, 0x14);
+ snd_soc_update_bits(codec,
+ MSM89XX_CDC_CORE_RX1_B4_CTL, 0x18, 0x10);
+ snd_soc_update_bits(codec,
+ MSM89XX_CDC_CORE_RX1_B3_CTL, 0x80, 0x80);
+ break;
+ case DIG_CDC_EVENT_PRE_RX2_INT_ON:
+ snd_soc_update_bits(codec,
+ MSM89XX_CDC_CORE_RX2_B3_CTL, 0x1C, 0x14);
+ snd_soc_update_bits(codec,
+ MSM89XX_CDC_CORE_RX2_B4_CTL, 0x18, 0x10);
+ snd_soc_update_bits(codec,
+ MSM89XX_CDC_CORE_RX2_B3_CTL, 0x80, 0x80);
+ break;
+ case DIG_CDC_EVENT_POST_RX1_INT_OFF:
+ snd_soc_update_bits(codec,
+ MSM89XX_CDC_CORE_RX1_B3_CTL, 0x1C, 0x00);
+ snd_soc_update_bits(codec,
+ MSM89XX_CDC_CORE_RX1_B4_CTL, 0x18, 0xFF);
+ snd_soc_update_bits(codec,
+ MSM89XX_CDC_CORE_RX1_B3_CTL, 0x80, 0x00);
+ break;
+ case DIG_CDC_EVENT_POST_RX2_INT_OFF:
+ snd_soc_update_bits(codec,
+ MSM89XX_CDC_CORE_RX2_B3_CTL, 0x1C, 0x00);
+ snd_soc_update_bits(codec,
+ MSM89XX_CDC_CORE_RX2_B4_CTL, 0x18, 0xFF);
+ snd_soc_update_bits(codec,
+ MSM89XX_CDC_CORE_RX2_B3_CTL, 0x80, 0x00);
+ break;
+ case DIG_CDC_EVENT_SSR_DOWN:
+ regcache_cache_only(msm_dig_cdc->regmap, true);
+ break;
+ case DIG_CDC_EVENT_SSR_UP:
+ regcache_cache_only(msm_dig_cdc->regmap, false);
+ regcache_mark_dirty(msm_dig_cdc->regmap);
+ regcache_sync(msm_dig_cdc->regmap);
+ break;
+ case DIG_CDC_EVENT_INVALID:
+ default:
+ break;
+ }
+ return 0;
+}
+
+static ssize_t msm_dig_codec_version_read(struct snd_info_entry *entry,
+ void *file_private_data,
+ struct file *file,
+ char __user *buf, size_t count,
+ loff_t pos)
+{
+ struct msm_dig_priv *msm_dig;
+ char buffer[MSM_DIG_CDC_VERSION_ENTRY_SIZE];
+ int len = 0;
+
+ msm_dig = (struct msm_dig_priv *) entry->private_data;
+ if (!msm_dig) {
+ pr_err("%s: msm_dig priv is null\n", __func__);
+ return -EINVAL;
+ }
+
+ switch (msm_dig->version) {
+ case DRAX_CDC:
+ len = snprintf(buffer, sizeof(buffer), "DRAX_CDC_1_0\n");
+ break;
+ default:
+ len = snprintf(buffer, sizeof(buffer), "VER_UNDEFINED\n");
+ }
+
+ return simple_read_from_buffer(buf, count, &pos, buffer, len);
+}
+
+static struct snd_info_entry_ops msm_dig_codec_info_ops = {
+ .read = msm_dig_codec_version_read,
+};
+
+/*
+ * msm_dig_codec_info_create_codec_entry - creates msm_dig module
+ * @codec_root: The parent directory
+ * @codec: Codec instance
+ *
+ * Creates msm_dig module and version entry under the given
+ * parent directory.
+ *
+ * Return: 0 on success or negative error code on failure.
+ */
+int msm_dig_codec_info_create_codec_entry(struct snd_info_entry *codec_root,
+ struct snd_soc_codec *codec)
+{
+ struct snd_info_entry *version_entry;
+ struct msm_dig_priv *msm_dig;
+ struct snd_soc_card *card;
+
+ if (!codec_root || !codec)
+ return -EINVAL;
+
+ msm_dig = snd_soc_codec_get_drvdata(codec);
+ card = codec->component.card;
+ msm_dig->entry = snd_register_module_info(codec_root->module,
+ "msm_digital",
+ codec_root);
+ if (!msm_dig->entry) {
+ dev_dbg(codec->dev, "%s: failed to create msm_digital entry\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ version_entry = snd_info_create_card_entry(card->snd_card,
+ "version",
+ msm_dig->entry);
+ if (!version_entry) {
+ dev_dbg(codec->dev, "%s: failed to create msm_digital version entry\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ version_entry->private_data = msm_dig;
+ version_entry->size = MSM_DIG_CDC_VERSION_ENTRY_SIZE;
+ version_entry->content = SNDRV_INFO_CONTENT_DATA;
+ version_entry->c.ops = &msm_dig_codec_info_ops;
+
+ if (snd_info_register(version_entry) < 0) {
+ snd_info_free_entry(version_entry);
+ return -ENOMEM;
+ }
+ msm_dig->version_entry = version_entry;
+ return 0;
+}
+EXPORT_SYMBOL(msm_dig_codec_info_create_codec_entry);
+
+static int msm_dig_cdc_soc_probe(struct snd_soc_codec *codec)
+{
+ struct msm_dig_priv *dig_cdc = NULL;
+ struct msm_dig *msm_dig_cdc = dev_get_drvdata(codec->dev);
+ int i, ret;
+
+ dig_cdc = devm_kzalloc(codec->dev, sizeof(struct msm_dig_priv),
+ GFP_KERNEL);
+ if (!dig_cdc)
+ return -ENOMEM;
+ snd_soc_codec_set_drvdata(codec, dig_cdc);
+ dig_cdc->codec = codec;
+ codec->control_data = msm_dig_cdc;
+
+ snd_soc_add_codec_controls(codec, compander_kcontrols,
+ ARRAY_SIZE(compander_kcontrols));
+
+ for (i = 0; i < NUM_DECIMATORS; i++) {
+ tx_hpf_work[i].dig_cdc = dig_cdc;
+ tx_hpf_work[i].decimator = i + 1;
+ INIT_DELAYED_WORK(&tx_hpf_work[i].dwork,
+ tx_hpf_corner_freq_callback);
+ }
+
+ for (i = 0; i < MSM89XX_RX_MAX; i++)
+ dig_cdc->comp_enabled[i] = COMPANDER_NONE;
+
+ /* Register event notifier */
+ msm_dig_cdc->nblock.notifier_call = msm_dig_cdc_event_notify;
+ if (msm_dig_cdc->register_notifier) {
+ ret = msm_dig_cdc->register_notifier(msm_dig_cdc->handle,
+ &msm_dig_cdc->nblock,
+ true);
+ if (ret) {
+ pr_err("%s: Failed to register notifier %d\n",
+ __func__, ret);
+ return ret;
+ }
+ }
+ if (msm_dig_cdc->get_cdc_version)
+ dig_cdc->version = msm_dig_cdc->get_cdc_version(
+ msm_dig_cdc->handle);
+ registered_digcodec = codec;
+ return 0;
+}
+
+static int msm_dig_cdc_soc_remove(struct snd_soc_codec *codec)
+{
+ struct msm_dig *msm_dig_cdc = dev_get_drvdata(codec->dev);
+
+ if (msm_dig_cdc->register_notifier)
+ msm_dig_cdc->register_notifier(msm_dig_cdc->handle,
+ &msm_dig_cdc->nblock,
+ false);
+ iounmap(msm_dig_cdc->dig_base);
+ return 0;
+}
+
+static const struct snd_soc_dapm_route audio_dig_map[] = {
+ {"RX_I2S_CLK", NULL, "CDC_CONN"},
+ {"I2S RX1", NULL, "RX_I2S_CLK"},
+ {"I2S RX2", NULL, "RX_I2S_CLK"},
+ {"I2S RX3", NULL, "RX_I2S_CLK"},
+
+ {"I2S TX1", NULL, "TX_I2S_CLK"},
+ {"I2S TX2", NULL, "TX_I2S_CLK"},
+ {"I2S TX3", NULL, "TX_I2S_CLK"},
+ {"I2S TX4", NULL, "TX_I2S_CLK"},
+ {"I2S TX5", NULL, "TX_I2S_CLK"},
+ {"I2S TX6", NULL, "TX_I2S_CLK"},
+
+ {"I2S TX1", NULL, "DEC1 MUX"},
+ {"I2S TX2", NULL, "DEC2 MUX"},
+ {"I2S TX3", NULL, "DEC3 MUX"},
+ {"I2S TX4", NULL, "DEC4 MUX"},
+ {"I2S TX5", NULL, "DEC3 MUX"},
+ {"I2S TX6", NULL, "DEC4 MUX"},
+ {"I2S TX6", NULL, "DEC5 MUX"},
+
+ {"PDM_OUT_RX1", NULL, "RX1 CHAIN"},
+ {"PDM_OUT_RX2", NULL, "RX2 CHAIN"},
+ {"PDM_OUT_RX3", NULL, "RX3 CHAIN"},
+
+ {"RX1 CHAIN", NULL, "RX1 MIX2"},
+ {"RX2 CHAIN", NULL, "RX2 MIX2"},
+ {"RX3 CHAIN", NULL, "RX3 MIX1"},
+
+ {"RX1 MIX1", NULL, "RX1 MIX1 INP1"},
+ {"RX1 MIX1", NULL, "RX1 MIX1 INP2"},
+ {"RX1 MIX1", NULL, "RX1 MIX1 INP3"},
+ {"RX2 MIX1", NULL, "RX2 MIX1 INP1"},
+ {"RX2 MIX1", NULL, "RX2 MIX1 INP2"},
+ {"RX3 MIX1", NULL, "RX3 MIX1 INP1"},
+ {"RX3 MIX1", NULL, "RX3 MIX1 INP2"},
+ {"RX1 MIX2", NULL, "RX1 MIX1"},
+ {"RX1 MIX2", NULL, "RX1 MIX2 INP1"},
+ {"RX2 MIX2", NULL, "RX2 MIX1"},
+ {"RX2 MIX2", NULL, "RX2 MIX2 INP1"},
+
+ {"RX1 MIX1 INP1", "RX1", "I2S RX1"},
+ {"RX1 MIX1 INP1", "RX2", "I2S RX2"},
+ {"RX1 MIX1 INP1", "RX3", "I2S RX3"},
+ {"RX1 MIX1 INP1", "IIR1", "IIR1"},
+ {"RX1 MIX1 INP1", "IIR2", "IIR2"},
+ {"RX1 MIX1 INP2", "RX1", "I2S RX1"},
+ {"RX1 MIX1 INP2", "RX2", "I2S RX2"},
+ {"RX1 MIX1 INP2", "RX3", "I2S RX3"},
+ {"RX1 MIX1 INP2", "IIR1", "IIR1"},
+ {"RX1 MIX1 INP2", "IIR2", "IIR2"},
+ {"RX1 MIX1 INP3", "RX1", "I2S RX1"},
+ {"RX1 MIX1 INP3", "RX2", "I2S RX2"},
+ {"RX1 MIX1 INP3", "RX3", "I2S RX3"},
+
+ {"RX2 MIX1 INP1", "RX1", "I2S RX1"},
+ {"RX2 MIX1 INP1", "RX2", "I2S RX2"},
+ {"RX2 MIX1 INP1", "RX3", "I2S RX3"},
+ {"RX2 MIX1 INP1", "IIR1", "IIR1"},
+ {"RX2 MIX1 INP1", "IIR2", "IIR2"},
+ {"RX2 MIX1 INP2", "RX1", "I2S RX1"},
+ {"RX2 MIX1 INP2", "RX2", "I2S RX2"},
+ {"RX2 MIX1 INP2", "RX3", "I2S RX3"},
+ {"RX2 MIX1 INP2", "IIR1", "IIR1"},
+ {"RX2 MIX1 INP2", "IIR2", "IIR2"},
+
+ {"RX3 MIX1 INP1", "RX1", "I2S RX1"},
+ {"RX3 MIX1 INP1", "RX2", "I2S RX2"},
+ {"RX3 MIX1 INP1", "RX3", "I2S RX3"},
+ {"RX3 MIX1 INP1", "IIR1", "IIR1"},
+ {"RX3 MIX1 INP1", "IIR2", "IIR2"},
+ {"RX3 MIX1 INP2", "RX1", "I2S RX1"},
+ {"RX3 MIX1 INP2", "RX2", "I2S RX2"},
+ {"RX3 MIX1 INP2", "RX3", "I2S RX3"},
+ {"RX3 MIX1 INP2", "IIR1", "IIR1"},
+ {"RX3 MIX1 INP2", "IIR2", "IIR2"},
+
+ {"RX1 MIX2 INP1", "IIR1", "IIR1"},
+ {"RX2 MIX2 INP1", "IIR1", "IIR1"},
+ {"RX1 MIX2 INP1", "IIR2", "IIR2"},
+ {"RX2 MIX2 INP1", "IIR2", "IIR2"},
+
+ /* Decimator Inputs */
+ {"DEC1 MUX", "DMIC1", "DMIC1"},
+ {"DEC1 MUX", "DMIC2", "DMIC2"},
+ {"DEC1 MUX", "DMIC3", "DMIC3"},
+ {"DEC1 MUX", "DMIC4", "DMIC4"},
+ {"DEC1 MUX", "ADC1", "ADC1_IN"},
+ {"DEC1 MUX", "ADC2", "ADC2_IN"},
+ {"DEC1 MUX", "ADC3", "ADC3_IN"},
+ {"DEC1 MUX", NULL, "CDC_CONN"},
+
+ {"DEC2 MUX", "DMIC1", "DMIC1"},
+ {"DEC2 MUX", "DMIC2", "DMIC2"},
+ {"DEC2 MUX", "DMIC3", "DMIC3"},
+ {"DEC2 MUX", "DMIC4", "DMIC4"},
+ {"DEC2 MUX", "ADC1", "ADC1_IN"},
+ {"DEC2 MUX", "ADC2", "ADC2_IN"},
+ {"DEC2 MUX", "ADC3", "ADC3_IN"},
+ {"DEC2 MUX", NULL, "CDC_CONN"},
+
+ {"DEC3 MUX", "DMIC1", "DMIC1"},
+ {"DEC3 MUX", "DMIC2", "DMIC2"},
+ {"DEC3 MUX", "DMIC3", "DMIC3"},
+ {"DEC3 MUX", "DMIC4", "DMIC4"},
+ {"DEC3 MUX", "ADC1", "ADC1_IN"},
+ {"DEC3 MUX", "ADC2", "ADC2_IN"},
+ {"DEC3 MUX", "ADC3", "ADC3_IN"},
+ {"DEC3 MUX", NULL, "CDC_CONN"},
+
+ {"DEC4 MUX", "DMIC1", "DMIC1"},
+ {"DEC4 MUX", "DMIC2", "DMIC2"},
+ {"DEC4 MUX", "DMIC3", "DMIC3"},
+ {"DEC4 MUX", "DMIC4", "DMIC4"},
+ {"DEC4 MUX", "ADC1", "ADC1_IN"},
+ {"DEC4 MUX", "ADC2", "ADC2_IN"},
+ {"DEC4 MUX", "ADC3", "ADC3_IN"},
+ {"DEC4 MUX", NULL, "CDC_CONN"},
+
+ {"DEC5 MUX", "DMIC1", "DMIC1"},
+ {"DEC5 MUX", "DMIC2", "DMIC2"},
+ {"DEC5 MUX", "DMIC3", "DMIC3"},
+ {"DEC5 MUX", "DMIC4", "DMIC4"},
+ {"DEC5 MUX", "ADC1", "ADC1_IN"},
+ {"DEC5 MUX", "ADC2", "ADC2_IN"},
+ {"DEC5 MUX", "ADC3", "ADC3_IN"},
+ {"DEC5 MUX", NULL, "CDC_CONN"},
+
+ {"IIR1", NULL, "IIR1 INP1 MUX"},
+ {"IIR1 INP1 MUX", "DEC1", "DEC1 MUX"},
+ {"IIR1 INP1 MUX", "DEC2", "DEC2 MUX"},
+ {"IIR1 INP1 MUX", "DEC3", "DEC3 MUX"},
+ {"IIR1 INP1 MUX", "DEC4", "DEC4 MUX"},
+ {"IIR2", NULL, "IIR2 INP1 MUX"},
+ {"IIR2 INP1 MUX", "DEC1", "DEC1 MUX"},
+ {"IIR2 INP1 MUX", "DEC2", "DEC2 MUX"},
+ {"IIR1 INP1 MUX", "DEC3", "DEC3 MUX"},
+ {"IIR1 INP1 MUX", "DEC4", "DEC4 MUX"},
+};
+
+static const char * const rx_mix1_text[] = {
+ "ZERO", "IIR1", "IIR2", "RX1", "RX2", "RX3"
+};
+
+static const char * const rx_mix2_text[] = {
+ "ZERO", "IIR1", "IIR2"
+};
+
+static const char * const dec_mux_text[] = {
+ "ZERO", "ADC1", "ADC2", "ADC3", "DMIC1", "DMIC2", "DMIC3", "DMIC4"
+};
+
+static const char * const iir_inp1_text[] = {
+ "ZERO", "DEC1", "DEC2", "RX1", "RX2", "RX3", "DEC3", "DEC4"
+};
+/* RX1 MIX1 */
+static const struct soc_enum rx_mix1_inp1_chain_enum =
+ SOC_ENUM_SINGLE(MSM89XX_CDC_CORE_CONN_RX1_B1_CTL,
+ 0, 6, rx_mix1_text);
+
+static const struct soc_enum rx_mix1_inp2_chain_enum =
+ SOC_ENUM_SINGLE(MSM89XX_CDC_CORE_CONN_RX1_B1_CTL,
+ 3, 6, rx_mix1_text);
+
+static const struct soc_enum rx_mix1_inp3_chain_enum =
+ SOC_ENUM_SINGLE(MSM89XX_CDC_CORE_CONN_RX1_B2_CTL,
+ 0, 6, rx_mix1_text);
+
+/* RX1 MIX2 */
+static const struct soc_enum rx_mix2_inp1_chain_enum =
+ SOC_ENUM_SINGLE(MSM89XX_CDC_CORE_CONN_RX1_B3_CTL,
+ 0, 3, rx_mix2_text);
+
+/* RX2 MIX1 */
+static const struct soc_enum rx2_mix1_inp1_chain_enum =
+ SOC_ENUM_SINGLE(MSM89XX_CDC_CORE_CONN_RX2_B1_CTL,
+ 0, 6, rx_mix1_text);
+
+static const struct soc_enum rx2_mix1_inp2_chain_enum =
+ SOC_ENUM_SINGLE(MSM89XX_CDC_CORE_CONN_RX2_B1_CTL,
+ 3, 6, rx_mix1_text);
+
+static const struct soc_enum rx2_mix1_inp3_chain_enum =
+ SOC_ENUM_SINGLE(MSM89XX_CDC_CORE_CONN_RX2_B1_CTL,
+ 0, 6, rx_mix1_text);
+
+/* RX2 MIX2 */
+static const struct soc_enum rx2_mix2_inp1_chain_enum =
+ SOC_ENUM_SINGLE(MSM89XX_CDC_CORE_CONN_RX2_B3_CTL,
+ 0, 3, rx_mix2_text);
+
+/* RX3 MIX1 */
+static const struct soc_enum rx3_mix1_inp1_chain_enum =
+ SOC_ENUM_SINGLE(MSM89XX_CDC_CORE_CONN_RX3_B1_CTL,
+ 0, 6, rx_mix1_text);
+
+static const struct soc_enum rx3_mix1_inp2_chain_enum =
+ SOC_ENUM_SINGLE(MSM89XX_CDC_CORE_CONN_RX3_B1_CTL,
+ 3, 6, rx_mix1_text);
+
+static const struct soc_enum rx3_mix1_inp3_chain_enum =
+ SOC_ENUM_SINGLE(MSM89XX_CDC_CORE_CONN_RX3_B1_CTL,
+ 0, 6, rx_mix1_text);
+
+/* DEC */
+static const struct soc_enum dec1_mux_enum =
+ SOC_ENUM_SINGLE(MSM89XX_CDC_CORE_CONN_TX_B1_CTL,
+ 0, 8, dec_mux_text);
+
+static const struct soc_enum dec2_mux_enum =
+ SOC_ENUM_SINGLE(MSM89XX_CDC_CORE_CONN_TX_B1_CTL,
+ 3, 8, dec_mux_text);
+
+static const struct soc_enum dec3_mux_enum =
+ SOC_ENUM_SINGLE(MSM89XX_CDC_CORE_CONN_TX_B2_CTL,
+ 0, 8, dec_mux_text);
+
+static const struct soc_enum dec4_mux_enum =
+ SOC_ENUM_SINGLE(MSM89XX_CDC_CORE_CONN_TX_B2_CTL,
+ 3, 8, dec_mux_text);
+
+static const struct soc_enum decsva_mux_enum =
+ SOC_ENUM_SINGLE(MSM89XX_CDC_CORE_CONN_TX_B3_CTL,
+ 0, 8, dec_mux_text);
+
+static const struct soc_enum iir1_inp1_mux_enum =
+ SOC_ENUM_SINGLE(MSM89XX_CDC_CORE_CONN_EQ1_B1_CTL,
+ 0, 8, iir_inp1_text);
+
+static const struct soc_enum iir2_inp1_mux_enum =
+ SOC_ENUM_SINGLE(MSM89XX_CDC_CORE_CONN_EQ2_B1_CTL,
+ 0, 8, iir_inp1_text);
+
+/*cut of frequency for high pass filter*/
+static const char * const cf_text[] = {
+ "MIN_3DB_4Hz", "MIN_3DB_75Hz", "MIN_3DB_150Hz"
+};
+
+static const struct soc_enum cf_rxmix1_enum =
+ SOC_ENUM_SINGLE(MSM89XX_CDC_CORE_RX1_B4_CTL, 0, 3, cf_text);
+
+static const struct soc_enum cf_rxmix2_enum =
+ SOC_ENUM_SINGLE(MSM89XX_CDC_CORE_RX2_B4_CTL, 0, 3, cf_text);
+
+static const struct soc_enum cf_rxmix3_enum =
+ SOC_ENUM_SINGLE(MSM89XX_CDC_CORE_RX3_B4_CTL, 0, 3, cf_text);
+
+static const struct snd_kcontrol_new rx3_mix1_inp1_mux =
+ SOC_DAPM_ENUM("RX3 MIX1 INP1 Mux", rx3_mix1_inp1_chain_enum);
+
+#define MSM89XX_DEC_ENUM(xname, xenum) \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
+ .info = snd_soc_info_enum_double, \
+ .get = snd_soc_dapm_get_enum_double, \
+ .put = msm_dig_cdc_put_dec_enum, \
+ .private_value = (unsigned long)&xenum }
+
+static const struct snd_kcontrol_new dec1_mux =
+ MSM89XX_DEC_ENUM("DEC1 MUX Mux", dec1_mux_enum);
+
+static const struct snd_kcontrol_new dec2_mux =
+ MSM89XX_DEC_ENUM("DEC2 MUX Mux", dec2_mux_enum);
+
+static const struct snd_kcontrol_new dec3_mux =
+ MSM89XX_DEC_ENUM("DEC3 MUX Mux", dec3_mux_enum);
+
+static const struct snd_kcontrol_new dec4_mux =
+ MSM89XX_DEC_ENUM("DEC4 MUX Mux", dec4_mux_enum);
+
+static const struct snd_kcontrol_new decsva_mux =
+ MSM89XX_DEC_ENUM("DEC5 MUX Mux", decsva_mux_enum);
+
+static const struct snd_kcontrol_new iir1_inp1_mux =
+ SOC_DAPM_ENUM("IIR1 INP1 Mux", iir1_inp1_mux_enum);
+
+static const struct snd_kcontrol_new iir2_inp1_mux =
+ SOC_DAPM_ENUM("IIR2 INP1 Mux", iir2_inp1_mux_enum);
+
+static const struct snd_kcontrol_new rx_mix1_inp1_mux =
+ SOC_DAPM_ENUM("RX1 MIX1 INP1 Mux", rx_mix1_inp1_chain_enum);
+
+static const struct snd_kcontrol_new rx_mix1_inp2_mux =
+ SOC_DAPM_ENUM("RX1 MIX1 INP2 Mux", rx_mix1_inp2_chain_enum);
+
+static const struct snd_kcontrol_new rx_mix1_inp3_mux =
+ SOC_DAPM_ENUM("RX1 MIX1 INP3 Mux", rx_mix1_inp3_chain_enum);
+
+static const struct snd_kcontrol_new rx2_mix1_inp1_mux =
+ SOC_DAPM_ENUM("RX2 MIX1 INP1 Mux", rx2_mix1_inp1_chain_enum);
+
+static const struct snd_kcontrol_new rx2_mix1_inp2_mux =
+ SOC_DAPM_ENUM("RX2 MIX1 INP2 Mux", rx2_mix1_inp2_chain_enum);
+
+static const struct snd_kcontrol_new rx2_mix1_inp3_mux =
+ SOC_DAPM_ENUM("RX2 MIX1 INP3 Mux", rx2_mix1_inp3_chain_enum);
+
+static const struct snd_kcontrol_new rx3_mix1_inp2_mux =
+ SOC_DAPM_ENUM("RX3 MIX1 INP2 Mux", rx3_mix1_inp2_chain_enum);
+
+static const struct snd_kcontrol_new rx3_mix1_inp3_mux =
+ SOC_DAPM_ENUM("RX3 MIX1 INP3 Mux", rx3_mix1_inp3_chain_enum);
+
+static const struct snd_kcontrol_new rx1_mix2_inp1_mux =
+ SOC_DAPM_ENUM("RX1 MIX2 INP1 Mux", rx_mix2_inp1_chain_enum);
+
+static const struct snd_kcontrol_new rx2_mix2_inp1_mux =
+ SOC_DAPM_ENUM("RX2 MIX2 INP1 Mux", rx2_mix2_inp1_chain_enum);
+
+static const struct snd_soc_dapm_widget msm_dig_dapm_widgets[] = {
+ SND_SOC_DAPM_AIF_IN("I2S RX1", "AIF1 Playback", 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_IN("I2S RX2", "AIF1 Playback", 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_IN("I2S RX3", "AIF1 Playback", 0, SND_SOC_NOPM, 0, 0),
+
+ SND_SOC_DAPM_AIF_OUT("I2S TX1", "AIF1 Capture", 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("I2S TX2", "AIF1 Capture", 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("I2S TX3", "AIF1 Capture", 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("I2S TX4", "AIF1 Capture", 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("I2S TX5", "AIF1 Capture", 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("I2S TX6", "AIF2 Capture", 0, SND_SOC_NOPM, 0, 0),
+
+ SND_SOC_DAPM_MIXER_E("RX1 MIX2", MSM89XX_CDC_CORE_CLK_RX_B1_CTL,
+ MSM89XX_RX1, 0, NULL, 0,
+ msm_dig_cdc_codec_enable_interpolator,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_MIXER_E("RX2 MIX2", MSM89XX_CDC_CORE_CLK_RX_B1_CTL,
+ MSM89XX_RX2, 0, NULL, 0,
+ msm_dig_cdc_codec_enable_interpolator,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_MIXER_E("RX3 MIX1", MSM89XX_CDC_CORE_CLK_RX_B1_CTL,
+ MSM89XX_RX3, 0, NULL, 0,
+ msm_dig_cdc_codec_enable_interpolator,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_MIXER("RX1 MIX1", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("RX2 MIX1", SND_SOC_NOPM, 0, 0, NULL, 0),
+
+ SND_SOC_DAPM_MIXER_E("RX1 CHAIN", MSM89XX_CDC_CORE_RX1_B6_CTL,
+ MSM89XX_RX1, 0, NULL, 0,
+ msm_dig_cdc_codec_enable_rx_chain,
+ SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_MIXER_E("RX2 CHAIN", MSM89XX_CDC_CORE_RX2_B6_CTL,
+ MSM89XX_RX2, 0, NULL, 0,
+ msm_dig_cdc_codec_enable_rx_chain, SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_MIXER_E("RX3 CHAIN", MSM89XX_CDC_CORE_RX3_B6_CTL,
+ MSM89XX_RX3, 0, NULL, 0,
+ msm_dig_cdc_codec_enable_rx_chain, SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_MUX("RX1 MIX1 INP1", SND_SOC_NOPM, 0, 0,
+ &rx_mix1_inp1_mux),
+ SND_SOC_DAPM_MUX("RX1 MIX1 INP2", SND_SOC_NOPM, 0, 0,
+ &rx_mix1_inp2_mux),
+ SND_SOC_DAPM_MUX("RX1 MIX1 INP3", SND_SOC_NOPM, 0, 0,
+ &rx_mix1_inp3_mux),
+
+ SND_SOC_DAPM_MUX("RX2 MIX1 INP1", SND_SOC_NOPM, 0, 0,
+ &rx2_mix1_inp1_mux),
+ SND_SOC_DAPM_MUX("RX2 MIX1 INP2", SND_SOC_NOPM, 0, 0,
+ &rx2_mix1_inp2_mux),
+ SND_SOC_DAPM_MUX("RX2 MIX1 INP3", SND_SOC_NOPM, 0, 0,
+ &rx2_mix1_inp3_mux),
+
+ SND_SOC_DAPM_MUX("RX3 MIX1 INP1", SND_SOC_NOPM, 0, 0,
+ &rx3_mix1_inp1_mux),
+ SND_SOC_DAPM_MUX("RX3 MIX1 INP2", SND_SOC_NOPM, 0, 0,
+ &rx3_mix1_inp2_mux),
+ SND_SOC_DAPM_MUX("RX3 MIX1 INP3", SND_SOC_NOPM, 0, 0,
+ &rx3_mix1_inp3_mux),
+
+ SND_SOC_DAPM_MUX("RX1 MIX2 INP1", SND_SOC_NOPM, 0, 0,
+ &rx1_mix2_inp1_mux),
+ SND_SOC_DAPM_MUX("RX2 MIX2 INP1", SND_SOC_NOPM, 0, 0,
+ &rx2_mix2_inp1_mux),
+
+ SND_SOC_DAPM_SUPPLY_S("CDC_CONN", -2, MSM89XX_CDC_CORE_CLK_OTHR_CTL,
+ 2, 0, NULL, 0),
+
+ SND_SOC_DAPM_MUX_E("DEC1 MUX",
+ MSM89XX_CDC_CORE_CLK_TX_CLK_EN_B1_CTL, 0, 0,
+ &dec1_mux, msm_dig_cdc_codec_enable_dec,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_MUX_E("DEC2 MUX",
+ MSM89XX_CDC_CORE_CLK_TX_CLK_EN_B1_CTL, 1, 0,
+ &dec2_mux, msm_dig_cdc_codec_enable_dec,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_MUX_E("DEC3 MUX",
+ MSM89XX_CDC_CORE_CLK_TX_CLK_EN_B1_CTL, 2, 0,
+ &dec3_mux, msm_dig_cdc_codec_enable_dec,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_MUX_E("DEC4 MUX",
+ MSM89XX_CDC_CORE_CLK_TX_CLK_EN_B1_CTL, 3, 0,
+ &dec4_mux, msm_dig_cdc_codec_enable_dec,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_MUX_E("DEC5 MUX",
+ MSM89XX_CDC_CORE_CLK_TX_CLK_EN_B1_CTL, 4, 0,
+ &decsva_mux, msm_dig_cdc_codec_enable_dec,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
+
+ /* Sidetone */
+ SND_SOC_DAPM_MUX("IIR1 INP1 MUX", SND_SOC_NOPM, 0, 0, &iir1_inp1_mux),
+ SND_SOC_DAPM_PGA_E("IIR1", MSM89XX_CDC_CORE_CLK_SD_CTL, 0, 0, NULL, 0,
+ msm_dig_cdc_codec_set_iir_gain, SND_SOC_DAPM_POST_PMU),
+
+ SND_SOC_DAPM_MUX("IIR2 INP1 MUX", SND_SOC_NOPM, 0, 0, &iir2_inp1_mux),
+ SND_SOC_DAPM_PGA_E("IIR2", MSM89XX_CDC_CORE_CLK_SD_CTL, 1, 0, NULL, 0,
+ msm_dig_cdc_codec_set_iir_gain, SND_SOC_DAPM_POST_PMU),
+
+ SND_SOC_DAPM_SUPPLY("RX_I2S_CLK",
+ MSM89XX_CDC_CORE_CLK_RX_I2S_CTL, 4, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("TX_I2S_CLK",
+ MSM89XX_CDC_CORE_CLK_TX_I2S_CTL, 4, 0, NULL, 0),
+
+ /* Digital Mic Inputs */
+ SND_SOC_DAPM_ADC_E("DMIC1", NULL, SND_SOC_NOPM, 0, 0,
+ msm_dig_cdc_codec_enable_dmic, SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_ADC_E("DMIC2", NULL, SND_SOC_NOPM, 0, 0,
+ msm_dig_cdc_codec_enable_dmic, SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_ADC_E("DMIC3", NULL, SND_SOC_NOPM, 0, 0,
+ msm_dig_cdc_codec_enable_dmic, SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_ADC_E("DMIC4", NULL, SND_SOC_NOPM, 0, 0,
+ msm_dig_cdc_codec_enable_dmic, SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_INPUT("ADC1_IN"),
+ SND_SOC_DAPM_INPUT("ADC2_IN"),
+ SND_SOC_DAPM_INPUT("ADC3_IN"),
+ SND_SOC_DAPM_OUTPUT("PDM_OUT_RX1"),
+ SND_SOC_DAPM_OUTPUT("PDM_OUT_RX2"),
+ SND_SOC_DAPM_OUTPUT("PDM_OUT_RX3"),
+};
+
+static const struct soc_enum cf_dec1_enum =
+ SOC_ENUM_SINGLE(MSM89XX_CDC_CORE_TX1_MUX_CTL, 4, 3, cf_text);
+
+static const struct soc_enum cf_dec2_enum =
+ SOC_ENUM_SINGLE(MSM89XX_CDC_CORE_TX2_MUX_CTL, 4, 3, cf_text);
+
+static const struct soc_enum cf_dec3_enum =
+ SOC_ENUM_SINGLE(MSM89XX_CDC_CORE_TX3_MUX_CTL, 4, 3, cf_text);
+
+static const struct soc_enum cf_dec4_enum =
+ SOC_ENUM_SINGLE(MSM89XX_CDC_CORE_TX4_MUX_CTL, 4, 3, cf_text);
+
+static const struct soc_enum cf_decsva_enum =
+ SOC_ENUM_SINGLE(MSM89XX_CDC_CORE_TX5_MUX_CTL, 4, 3, cf_text);
+
+static const struct snd_kcontrol_new msm_dig_snd_controls[] = {
+ SOC_SINGLE_SX_TLV("DEC1 Volume",
+ MSM89XX_CDC_CORE_TX1_VOL_CTL_GAIN,
+ 0, -84, 40, digital_gain),
+ SOC_SINGLE_SX_TLV("DEC2 Volume",
+ MSM89XX_CDC_CORE_TX2_VOL_CTL_GAIN,
+ 0, -84, 40, digital_gain),
+ SOC_SINGLE_SX_TLV("DEC3 Volume",
+ MSM89XX_CDC_CORE_TX3_VOL_CTL_GAIN,
+ 0, -84, 40, digital_gain),
+ SOC_SINGLE_SX_TLV("DEC4 Volume",
+ MSM89XX_CDC_CORE_TX4_VOL_CTL_GAIN,
+ 0, -84, 40, digital_gain),
+ SOC_SINGLE_SX_TLV("DEC5 Volume",
+ MSM89XX_CDC_CORE_TX5_VOL_CTL_GAIN,
+ 0, -84, 40, digital_gain),
+
+ SOC_SINGLE_SX_TLV("IIR1 INP1 Volume",
+ MSM89XX_CDC_CORE_IIR1_GAIN_B1_CTL,
+ 0, -84, 40, digital_gain),
+ SOC_SINGLE_SX_TLV("IIR1 INP2 Volume",
+ MSM89XX_CDC_CORE_IIR1_GAIN_B2_CTL,
+ 0, -84, 40, digital_gain),
+ SOC_SINGLE_SX_TLV("IIR1 INP3 Volume",
+ MSM89XX_CDC_CORE_IIR1_GAIN_B3_CTL,
+ 0, -84, 40, digital_gain),
+ SOC_SINGLE_SX_TLV("IIR1 INP4 Volume",
+ MSM89XX_CDC_CORE_IIR1_GAIN_B4_CTL,
+ 0, -84, 40, digital_gain),
+ SOC_SINGLE_SX_TLV("IIR2 INP1 Volume",
+ MSM89XX_CDC_CORE_IIR2_GAIN_B1_CTL,
+ 0, -84, 40, digital_gain),
+
+ SOC_SINGLE_SX_TLV("RX1 Digital Volume",
+ MSM89XX_CDC_CORE_RX1_VOL_CTL_B2_CTL,
+ 0, -84, 40, digital_gain),
+ SOC_SINGLE_SX_TLV("RX2 Digital Volume",
+ MSM89XX_CDC_CORE_RX2_VOL_CTL_B2_CTL,
+ 0, -84, 40, digital_gain),
+ SOC_SINGLE_SX_TLV("RX3 Digital Volume",
+ MSM89XX_CDC_CORE_RX3_VOL_CTL_B2_CTL,
+ 0, -84, 40, digital_gain),
+
+ SOC_SINGLE_EXT("IIR1 Enable Band1", IIR1, BAND1, 1, 0,
+ msm_dig_cdc_get_iir_enable_audio_mixer,
+ msm_dig_cdc_put_iir_enable_audio_mixer),
+ SOC_SINGLE_EXT("IIR1 Enable Band2", IIR1, BAND2, 1, 0,
+ msm_dig_cdc_get_iir_enable_audio_mixer,
+ msm_dig_cdc_put_iir_enable_audio_mixer),
+ SOC_SINGLE_EXT("IIR1 Enable Band3", IIR1, BAND3, 1, 0,
+ msm_dig_cdc_get_iir_enable_audio_mixer,
+ msm_dig_cdc_put_iir_enable_audio_mixer),
+ SOC_SINGLE_EXT("IIR1 Enable Band4", IIR1, BAND4, 1, 0,
+ msm_dig_cdc_get_iir_enable_audio_mixer,
+ msm_dig_cdc_put_iir_enable_audio_mixer),
+ SOC_SINGLE_EXT("IIR1 Enable Band5", IIR1, BAND5, 1, 0,
+ msm_dig_cdc_get_iir_enable_audio_mixer,
+ msm_dig_cdc_put_iir_enable_audio_mixer),
+
+ SOC_SINGLE_EXT("IIR2 Enable Band1", IIR2, BAND1, 1, 0,
+ msm_dig_cdc_get_iir_enable_audio_mixer,
+ msm_dig_cdc_put_iir_enable_audio_mixer),
+ SOC_SINGLE_EXT("IIR2 Enable Band2", IIR2, BAND2, 1, 0,
+ msm_dig_cdc_get_iir_enable_audio_mixer,
+ msm_dig_cdc_put_iir_enable_audio_mixer),
+ SOC_SINGLE_EXT("IIR2 Enable Band3", IIR2, BAND3, 1, 0,
+ msm_dig_cdc_get_iir_enable_audio_mixer,
+ msm_dig_cdc_put_iir_enable_audio_mixer),
+ SOC_SINGLE_EXT("IIR2 Enable Band4", IIR2, BAND4, 1, 0,
+ msm_dig_cdc_get_iir_enable_audio_mixer,
+ msm_dig_cdc_put_iir_enable_audio_mixer),
+ SOC_SINGLE_EXT("IIR2 Enable Band5", IIR2, BAND5, 1, 0,
+ msm_dig_cdc_get_iir_enable_audio_mixer,
+ msm_dig_cdc_put_iir_enable_audio_mixer),
+
+ SOC_SINGLE_MULTI_EXT("IIR1 Band1", IIR1, BAND1, 255, 0, 5,
+ msm_dig_cdc_get_iir_band_audio_mixer,
+ msm_dig_cdc_put_iir_band_audio_mixer),
+ SOC_SINGLE_MULTI_EXT("IIR1 Band2", IIR1, BAND2, 255, 0, 5,
+ msm_dig_cdc_get_iir_band_audio_mixer,
+ msm_dig_cdc_put_iir_band_audio_mixer),
+ SOC_SINGLE_MULTI_EXT("IIR1 Band3", IIR1, BAND3, 255, 0, 5,
+ msm_dig_cdc_get_iir_band_audio_mixer,
+ msm_dig_cdc_put_iir_band_audio_mixer),
+ SOC_SINGLE_MULTI_EXT("IIR1 Band4", IIR1, BAND4, 255, 0, 5,
+ msm_dig_cdc_get_iir_band_audio_mixer,
+ msm_dig_cdc_put_iir_band_audio_mixer),
+ SOC_SINGLE_MULTI_EXT("IIR1 Band5", IIR1, BAND5, 255, 0, 5,
+ msm_dig_cdc_get_iir_band_audio_mixer,
+ msm_dig_cdc_put_iir_band_audio_mixer),
+
+ SOC_SINGLE_MULTI_EXT("IIR2 Band1", IIR2, BAND1, 255, 0, 5,
+ msm_dig_cdc_get_iir_band_audio_mixer,
+ msm_dig_cdc_put_iir_band_audio_mixer),
+ SOC_SINGLE_MULTI_EXT("IIR2 Band2", IIR2, BAND2, 255, 0, 5,
+ msm_dig_cdc_get_iir_band_audio_mixer,
+ msm_dig_cdc_put_iir_band_audio_mixer),
+ SOC_SINGLE_MULTI_EXT("IIR2 Band3", IIR2, BAND3, 255, 0, 5,
+ msm_dig_cdc_get_iir_band_audio_mixer,
+ msm_dig_cdc_put_iir_band_audio_mixer),
+ SOC_SINGLE_MULTI_EXT("IIR2 Band4", IIR2, BAND4, 255, 0, 5,
+ msm_dig_cdc_get_iir_band_audio_mixer,
+ msm_dig_cdc_put_iir_band_audio_mixer),
+ SOC_SINGLE_MULTI_EXT("IIR2 Band5", IIR2, BAND5, 255, 0, 5,
+ msm_dig_cdc_get_iir_band_audio_mixer,
+ msm_dig_cdc_put_iir_band_audio_mixer),
+
+ SOC_SINGLE("RX1 HPF Switch",
+ MSM89XX_CDC_CORE_RX1_B5_CTL, 2, 1, 0),
+ SOC_SINGLE("RX2 HPF Switch",
+ MSM89XX_CDC_CORE_RX2_B5_CTL, 2, 1, 0),
+ SOC_SINGLE("RX3 HPF Switch",
+ MSM89XX_CDC_CORE_RX3_B5_CTL, 2, 1, 0),
+
+ SOC_ENUM("RX1 HPF cut off", cf_rxmix1_enum),
+ SOC_ENUM("RX2 HPF cut off", cf_rxmix2_enum),
+ SOC_ENUM("RX3 HPF cut off", cf_rxmix3_enum),
+
+ SOC_ENUM("TX1 HPF cut off", cf_dec1_enum),
+ SOC_ENUM("TX2 HPF cut off", cf_dec2_enum),
+ SOC_ENUM("TX3 HPF cut off", cf_dec3_enum),
+ SOC_ENUM("TX4 HPF cut off", cf_dec4_enum),
+ SOC_ENUM("TX5 HPF cut off", cf_decsva_enum),
+ SOC_SINGLE("TX1 HPF Switch",
+ MSM89XX_CDC_CORE_TX1_MUX_CTL, 3, 1, 0),
+ SOC_SINGLE("TX2 HPF Switch",
+ MSM89XX_CDC_CORE_TX2_MUX_CTL, 3, 1, 0),
+ SOC_SINGLE("TX3 HPF Switch",
+ MSM89XX_CDC_CORE_TX3_MUX_CTL, 3, 1, 0),
+ SOC_SINGLE("TX4 HPF Switch",
+ MSM89XX_CDC_CORE_TX4_MUX_CTL, 3, 1, 0),
+ SOC_SINGLE("TX5 HPF Switch",
+ MSM89XX_CDC_CORE_TX5_MUX_CTL, 3, 1, 0),
+};
+
+static int msm_dig_cdc_digital_mute(struct snd_soc_dai *dai, int mute)
+{
+ struct snd_soc_codec *codec = NULL;
+ u16 tx_vol_ctl_reg = 0;
+ u8 decimator = 0, i;
+ struct msm_dig_priv *dig_cdc;
+
+ pr_debug("%s: Digital Mute val = %d\n", __func__, mute);
+
+ if (!dai || !dai->codec) {
+ pr_err("%s: Invalid params\n", __func__);
+ return -EINVAL;
+ }
+ codec = dai->codec;
+ dig_cdc = snd_soc_codec_get_drvdata(codec);
+
+ if (dai->id == AIF1_PB) {
+ dev_dbg(codec->dev, "%s: Not capture use case skip\n",
+ __func__);
+ return 0;
+ }
+
+ mute = (mute) ? 1 : 0;
+ if (!mute) {
+ /*
+ * 15 ms is an emperical value for the mute time
+ * that was arrived by checking the pop level
+ * to be inaudible
+ */
+ usleep_range(15000, 15010);
+ }
+
+ if (dai->id == AIF3_SVA) {
+ snd_soc_update_bits(codec,
+ MSM89XX_CDC_CORE_TX5_VOL_CTL_CFG, 0x01, mute);
+ goto ret;
+ }
+ for (i = 0; i < (NUM_DECIMATORS - 1); i++) {
+ if (dig_cdc->dec_active[i])
+ decimator = i + 1;
+ if (decimator && decimator < NUM_DECIMATORS) {
+ /* mute/unmute decimators corresponding to Tx DAI's */
+ tx_vol_ctl_reg =
+ MSM89XX_CDC_CORE_TX1_VOL_CTL_CFG +
+ 32 * (decimator - 1);
+ snd_soc_update_bits(codec, tx_vol_ctl_reg,
+ 0x01, mute);
+ }
+ decimator = 0;
+ }
+ret:
+ return 0;
+}
+
+static struct snd_soc_dai_ops msm_dig_dai_ops = {
+ .hw_params = msm_dig_cdc_hw_params,
+ .digital_mute = msm_dig_cdc_digital_mute,
+};
+
+
+static struct snd_soc_dai_driver msm_codec_dais[] = {
+ {
+ .name = "msm_dig_cdc_dai_rx1",
+ .id = AIF1_PB,
+ .playback = { /* Support maximum range */
+ .stream_name = "AIF1 Playback",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_8000_48000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ },
+ .ops = &msm_dig_dai_ops,
+ },
+ {
+ .name = "msm_dig_cdc_dai_tx1",
+ .id = AIF1_CAP,
+ .capture = { /* Support maximum range */
+ .stream_name = "AIF1 Capture",
+ .channels_min = 1,
+ .channels_max = 4,
+ .rates = SNDRV_PCM_RATE_8000_48000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ },
+ .ops = &msm_dig_dai_ops,
+ },
+ {
+ .name = "msm_dig_cdc_dai_tx2",
+ .id = AIF3_SVA,
+ .capture = { /* Support maximum range */
+ .stream_name = "AIF2 Capture",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_8000_48000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ },
+ .ops = &msm_dig_dai_ops,
+ },
+ {
+ .name = "msm_dig_cdc_dai_vifeed",
+ .id = AIF2_VIFEED,
+ .capture = { /* Support maximum range */
+ .stream_name = "AIF2 Capture",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_8000_48000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ },
+ .ops = &msm_dig_dai_ops,
+ },
+};
+
+static struct regmap *msm_digital_get_regmap(struct device *dev)
+{
+ struct msm_dig *msm_dig_cdc = dev_get_drvdata(dev);
+
+ return msm_dig_cdc->regmap;
+}
+
+static struct snd_soc_codec_driver soc_msm_dig_codec = {
+ .probe = msm_dig_cdc_soc_probe,
+ .remove = msm_dig_cdc_soc_remove,
+ .controls = msm_dig_snd_controls,
+ .num_controls = ARRAY_SIZE(msm_dig_snd_controls),
+ .dapm_widgets = msm_dig_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(msm_dig_dapm_widgets),
+ .dapm_routes = audio_dig_map,
+ .num_dapm_routes = ARRAY_SIZE(audio_dig_map),
+ .get_regmap = msm_digital_get_regmap,
+};
+
+const struct regmap_config msm_digital_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .lock = enable_digital_callback,
+ .unlock = disable_digital_callback,
+ .cache_type = REGCACHE_FLAT,
+ .reg_defaults = msm89xx_cdc_core_defaults,
+ .num_reg_defaults = MSM89XX_CDC_CORE_MAX_REGISTER,
+ .readable_reg = msm89xx_cdc_core_readable_reg,
+ .volatile_reg = msm89xx_cdc_core_volatile_reg,
+ .reg_format_endian = REGMAP_ENDIAN_NATIVE,
+ .val_format_endian = REGMAP_ENDIAN_NATIVE,
+ .max_register = MSM89XX_CDC_CORE_MAX_REGISTER,
+};
+
+static int msm_dig_cdc_probe(struct platform_device *pdev)
+{
+ int ret;
+ u32 dig_cdc_addr;
+ struct msm_dig *msm_dig_cdc;
+ struct dig_ctrl_platform_data *pdata;
+
+ msm_dig_cdc = devm_kzalloc(&pdev->dev, sizeof(struct msm_dig),
+ GFP_KERNEL);
+ if (!msm_dig_cdc)
+ return -ENOMEM;
+ pdata = dev_get_platdata(&pdev->dev);
+ if (!pdata) {
+ dev_err(&pdev->dev, "%s: pdata from parent is NULL\n",
+ __func__);
+ ret = -EINVAL;
+ goto rtn;
+ }
+ dev_set_drvdata(&pdev->dev, msm_dig_cdc);
+
+ ret = of_property_read_u32(pdev->dev.of_node, "reg",
+ &dig_cdc_addr);
+ if (ret) {
+ dev_err(&pdev->dev, "%s: could not find %s entry in dt\n",
+ __func__, "reg");
+ return ret;
+ }
+
+ msm_dig_cdc->dig_base = ioremap(dig_cdc_addr,
+ MSM89XX_CDC_CORE_MAX_REGISTER);
+ if (msm_dig_cdc->dig_base == NULL) {
+ dev_err(&pdev->dev, "%s ioremap failed\n", __func__);
+ return -ENOMEM;
+ }
+ msm_dig_cdc->regmap =
+ devm_regmap_init_mmio_clk(&pdev->dev, NULL,
+ msm_dig_cdc->dig_base, &msm_digital_regmap_config);
+
+ msm_dig_cdc->update_clkdiv = pdata->update_clkdiv;
+ msm_dig_cdc->get_cdc_version = pdata->get_cdc_version;
+ msm_dig_cdc->handle = pdata->handle;
+ msm_dig_cdc->register_notifier = pdata->register_notifier;
+
+ snd_soc_register_codec(&pdev->dev, &soc_msm_dig_codec,
+ msm_codec_dais, ARRAY_SIZE(msm_codec_dais));
+ dev_dbg(&pdev->dev, "%s: registered DIG CODEC 0x%x\n",
+ __func__, dig_cdc_addr);
+rtn:
+ return ret;
+}
+
+static int msm_dig_cdc_remove(struct platform_device *pdev)
+{
+ snd_soc_unregister_codec(&pdev->dev);
+ return 0;
+}
+
+static const struct of_device_id msm_dig_cdc_of_match[] = {
+ {.compatible = "qcom,msm-digital-codec"},
+ {},
+};
+
+static struct platform_driver msm_digcodec_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = DRV_NAME,
+ .of_match_table = msm_dig_cdc_of_match,
+ },
+ .probe = msm_dig_cdc_probe,
+ .remove = msm_dig_cdc_remove,
+};
+module_platform_driver(msm_digcodec_driver);
+
+MODULE_DESCRIPTION("MSM Audio Digital codec driver");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/msmfalcon_cdc/msm-digital-cdc.h b/sound/soc/codecs/msmfalcon_cdc/msm-digital-cdc.h
new file mode 100644
index 000000000000..4cb82cd421b0
--- /dev/null
+++ b/sound/soc/codecs/msmfalcon_cdc/msm-digital-cdc.h
@@ -0,0 +1,93 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef MSM_DIGITAL_CDC_H
+#define MSM_DIGITAL_CDC_H
+
+#define HPHL_PA_DISABLE (0x01 << 1)
+#define HPHR_PA_DISABLE (0x01 << 2)
+#define SPKR_PA_DISABLE (0x01 << 3)
+
+#define NUM_DECIMATORS 5
+/* Codec supports 1 compander */
+enum {
+ COMPANDER_NONE = 0,
+ COMPANDER_1, /* HPHL/R */
+ COMPANDER_MAX,
+};
+
+/* Number of output I2S port */
+enum {
+ MSM89XX_RX1 = 0,
+ MSM89XX_RX2,
+ MSM89XX_RX3,
+ MSM89XX_RX_MAX,
+};
+
+struct msm_dig_priv {
+ struct snd_soc_codec *codec;
+ u32 comp_enabled[MSM89XX_RX_MAX];
+ int (*codec_hph_comp_gpio)(bool enable, struct snd_soc_codec *codec);
+ s32 dmic_1_2_clk_cnt;
+ s32 dmic_3_4_clk_cnt;
+ bool dec_active[NUM_DECIMATORS];
+ int version;
+ /* Entry for version info */
+ struct snd_info_entry *entry;
+ struct snd_info_entry *version_entry;
+};
+
+struct msm_dig {
+ char __iomem *dig_base;
+ struct regmap *regmap;
+ struct notifier_block nblock;
+ u32 mute_mask;
+ void *handle;
+ void (*update_clkdiv)(void *handle, int val);
+ int (*get_cdc_version)(void *handle);
+ int (*register_notifier)(void *handle,
+ struct notifier_block *nblock,
+ bool enable);
+};
+
+struct dig_ctrl_platform_data {
+ void *handle;
+ void (*update_clkdiv)(void *handle, int val);
+ int (*get_cdc_version)(void *handle);
+ int (*register_notifier)(void *handle,
+ struct notifier_block *nblock,
+ bool enable);
+};
+
+struct hpf_work {
+ struct msm_dig_priv *dig_cdc;
+ u32 decimator;
+ u8 tx_hpf_cut_of_freq;
+ struct delayed_work dwork;
+};
+
+/* Codec supports 5 bands */
+enum {
+ BAND1 = 0,
+ BAND2,
+ BAND3,
+ BAND4,
+ BAND5,
+ BAND_MAX,
+};
+
+extern void msm_dig_cdc_hph_comp_cb(
+ int (*codec_hph_comp_gpio)(
+ bool enable, struct snd_soc_codec *codec),
+ struct snd_soc_codec *codec);
+int msm_dig_codec_info_create_codec_entry(struct snd_info_entry *codec_root,
+ struct snd_soc_codec *codec);
+#endif
diff --git a/sound/soc/codecs/msm8x16/msm8916-wcd-irq.c b/sound/soc/codecs/msmfalcon_cdc/msmfalcon-cdc-irq.c
index a722842b106b..5ba2dac1ec20 100644
--- a/sound/soc/codecs/msm8x16/msm8916-wcd-irq.c
+++ b/sound/soc/codecs/msmfalcon_cdc/msmfalcon-cdc-irq.c
@@ -25,9 +25,9 @@
#include <linux/pm_qos.h>
#include <soc/qcom/pm.h>
#include <sound/soc.h>
-#include "msm8x16-wcd.h"
-#include "msm8916-wcd-irq.h"
-#include "msm8x16_wcd_registers.h"
+#include "msm-analog-cdc.h"
+#include "msmfalcon-cdc-irq.h"
+#include "msmfalcon-cdc-registers.h"
#define MAX_NUM_IRQS 14
#define NUM_IRQ_REGS 2
diff --git a/sound/soc/codecs/msm8x16/msm8916-wcd-irq.h b/sound/soc/codecs/msmfalcon_cdc/msmfalcon-cdc-irq.h
index 659e52cc2a5e..659e52cc2a5e 100644
--- a/sound/soc/codecs/msm8x16/msm8916-wcd-irq.h
+++ b/sound/soc/codecs/msmfalcon_cdc/msmfalcon-cdc-irq.h
diff --git a/sound/soc/codecs/msm8x16/msm8x16_wcd_registers.h b/sound/soc/codecs/msmfalcon_cdc/msmfalcon-cdc-registers.h
index 280e6f28682a..34c3d3333d6e 100644
--- a/sound/soc/codecs/msm8x16/msm8x16_wcd_registers.h
+++ b/sound/soc/codecs/msmfalcon_cdc/msmfalcon-cdc-registers.h
@@ -9,8 +9,8 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
-#ifndef MSM8X16_WCD_REGISTERS_H
-#define MSM8X16_WCD_REGISTERS_H
+#ifndef MSMFALCON_WCD_REGISTERS_H
+#define MSMFALCON_WCD_REGISTERS_H
#define CDC_DIG_BASE 0xF000
#define CDC_ANA_BASE 0xF100
@@ -335,20 +335,20 @@
MSM89XX_PMIC_CDC_NUM_REGISTERS
-#define MSM89XX_CDC_CORE_CLK_RX_RESET_CTL (0x00)
+#define MSM89XX_CDC_CORE_CLK_RX_RESET_CTL (0x00)
#define MSM89XX_CDC_CORE_CLK_RX_RESET_CTL__POR (0x00)
-#define MSM89XX_CDC_CORE_CLK_TX_RESET_B1_CTL (0x04)
-#define MSM89XX_CDC_CORE_CLK_TX_RESET_B1_CTL__POR (0x00)
-#define MSM89XX_CDC_CORE_CLK_DMIC_B1_CTL (0x08)
+#define MSM89XX_CDC_CORE_CLK_TX_RESET_B1_CTL (0x04)
+#define MSM89XX_CDC_CORE_CLK_TX_RESET_B1_CTL__POR (0x00)
+#define MSM89XX_CDC_CORE_CLK_DMIC_B1_CTL (0x08)
#define MSM89XX_CDC_CORE_CLK_DMIC_B1_CTL__POR (0x00)
#define MSM89XX_CDC_CORE_CLK_RX_I2S_CTL (0x0C)
#define MSM89XX_CDC_CORE_CLK_RX_I2S_CTL__POR (0x13)
#define MSM89XX_CDC_CORE_CLK_TX_I2S_CTL (0x10)
#define MSM89XX_CDC_CORE_CLK_TX_I2S_CTL__POR (0x13)
-#define MSM89XX_CDC_CORE_CLK_OTHR_RESET_B1_CTL (0x14)
-#define MSM89XX_CDC_CORE_CLK_OTHR_RESET_B1_CTL__POR (0x00)
-#define MSM89XX_CDC_CORE_CLK_TX_CLK_EN_B1_CTL (0x18)
-#define MSM89XX_CDC_CORE_CLK_TX_CLK_EN_B1_CTL__POR (0x00)
+#define MSM89XX_CDC_CORE_CLK_OTHR_RESET_B1_CTL (0x14)
+#define MSM89XX_CDC_CORE_CLK_OTHR_RESET_B1_CTL__POR (0x00)
+#define MSM89XX_CDC_CORE_CLK_TX_CLK_EN_B1_CTL (0x18)
+#define MSM89XX_CDC_CORE_CLK_TX_CLK_EN_B1_CTL__POR (0x00)
#define MSM89XX_CDC_CORE_CLK_OTHR_CTL (0x1C)
#define MSM89XX_CDC_CORE_CLK_OTHR_CTL__POR (0x04)
#define MSM89XX_CDC_CORE_CLK_RX_B1_CTL (0x20)
@@ -359,10 +359,12 @@
#define MSM89XX_CDC_CORE_CLK_PDM_CTL__POR (0x00)
#define MSM89XX_CDC_CORE_CLK_SD_CTL (0x2C)
#define MSM89XX_CDC_CORE_CLK_SD_CTL__POR (0x00)
-#define MSM89XX_CDC_CORE_CLK_WSA_VI_B1_CTL (0x30)
-#define MSM89XX_CDC_CORE_CLK_WSA_VI_B1_CTL__POR (0x00)
+#define MSM89XX_CDC_CORE_CLK_DMIC_B2_CTL (0x30)
+#define MSM89XX_CDC_CORE_CLK_DMIC_B2_CTL__POR (0x00)
#define MSM89XX_CDC_CORE_CLK_RX_B2_CTL (0x34)
#define MSM89XX_CDC_CORE_CLK_RX_B2_CTL__POR (0x00)
+#define MSM89XX_CDC_CORE_CLK_TX2_I2S_CTL (0x38)
+#define MSM89XX_CDC_CORE_CLK_TX2_I2S_CTL__POR (0x13)
#define MSM89XX_CDC_CORE_RX1_B1_CTL (0x40)
#define MSM89XX_CDC_CORE_RX1_B1_CTL__POR (0x00)
#define MSM89XX_CDC_CORE_RX2_B1_CTL (0x60)
@@ -399,19 +401,19 @@
#define MSM89XX_CDC_CORE_RX2_B6_CTL__POR (0x00)
#define MSM89XX_CDC_CORE_RX3_B6_CTL (0x94)
#define MSM89XX_CDC_CORE_RX3_B6_CTL__POR (0x00)
-#define MSM89XX_CDC_CORE_RX1_VOL_CTL_B1_CTL (0x58)
-#define MSM89XX_CDC_CORE_RX1_VOL_CTL_B1_CTL__POR (0x00)
-#define MSM89XX_CDC_CORE_RX2_VOL_CTL_B1_CTL (0x78)
-#define MSM89XX_CDC_CORE_RX2_VOL_CTL_B1_CTL__POR (0x00)
-#define MSM89XX_CDC_CORE_RX3_VOL_CTL_B1_CTL (0x98)
-#define MSM89XX_CDC_CORE_RX3_VOL_CTL_B1_CTL__POR (0x00)
-#define MSM89XX_CDC_CORE_RX1_VOL_CTL_B2_CTL (0x5C)
-#define MSM89XX_CDC_CORE_RX1_VOL_CTL_B2_CTL__POR (0x00)
-#define MSM89XX_CDC_CORE_RX2_VOL_CTL_B2_CTL (0x7C)
-#define MSM89XX_CDC_CORE_RX2_VOL_CTL_B2_CTL__POR (0x00)
-#define MSM89XX_CDC_CORE_RX3_VOL_CTL_B2_CTL (0x9C)
-#define MSM89XX_CDC_CORE_RX3_VOL_CTL_B2_CTL__POR (0x00)
-#define MSM89XX_CDC_CORE_TOP_GAIN_UPDATE (0xA0)
+#define MSM89XX_CDC_CORE_RX1_VOL_CTL_B1_CTL (0x58)
+#define MSM89XX_CDC_CORE_RX1_VOL_CTL_B1_CTL__POR (0x00)
+#define MSM89XX_CDC_CORE_RX2_VOL_CTL_B1_CTL (0x78)
+#define MSM89XX_CDC_CORE_RX2_VOL_CTL_B1_CTL__POR (0x00)
+#define MSM89XX_CDC_CORE_RX3_VOL_CTL_B1_CTL (0x98)
+#define MSM89XX_CDC_CORE_RX3_VOL_CTL_B1_CTL__POR (0x00)
+#define MSM89XX_CDC_CORE_RX1_VOL_CTL_B2_CTL (0x5C)
+#define MSM89XX_CDC_CORE_RX1_VOL_CTL_B2_CTL__POR (0x00)
+#define MSM89XX_CDC_CORE_RX2_VOL_CTL_B2_CTL (0x7C)
+#define MSM89XX_CDC_CORE_RX2_VOL_CTL_B2_CTL__POR (0x00)
+#define MSM89XX_CDC_CORE_RX3_VOL_CTL_B2_CTL (0x9C)
+#define MSM89XX_CDC_CORE_RX3_VOL_CTL_B2_CTL__POR (0x00)
+#define MSM89XX_CDC_CORE_TOP_GAIN_UPDATE (0xA0)
#define MSM89XX_CDC_CORE_TOP_GAIN_UPDATE__POR (0x00)
#define MSM89XX_CDC_CORE_TOP_CTL (0xA4)
#define MSM89XX_CDC_CORE_TOP_CTL__POR (0x01)
@@ -427,129 +429,145 @@
#define MSM89XX_CDC_CORE_COMP0_B5_CTL__POR (0x7F)
#define MSM89XX_CDC_CORE_COMP0_B6_CTL (0xC4)
#define MSM89XX_CDC_CORE_COMP0_B6_CTL__POR (0x00)
-#define MSM89XX_CDC_CORE_COMP0_SHUT_DOWN_STATUS (0xC8)
-#define MSM89XX_CDC_CORE_COMP0_SHUT_DOWN_STATUS__POR (0x03)
+#define MSM89XX_CDC_CORE_COMP0_SHUT_DOWN_STATUS (0xC8)
+#define MSM89XX_CDC_CORE_COMP0_SHUT_DOWN_STATUS__POR (0x03)
#define MSM89XX_CDC_CORE_COMP0_FS_CFG (0xCC)
#define MSM89XX_CDC_CORE_COMP0_FS_CFG__POR (0x03)
-#define MSM89XX_CDC_CORE_COMP0_DELAY_BUF_CTL (0xD0)
-#define MSM89XX_CDC_CORE_COMP0_DELAY_BUF_CTL__POR (0x02)
-#define MSM89XX_CDC_CORE_DEBUG_DESER1_CTL (0xE0)
+#define MSM89XX_CDC_CORE_COMP0_DELAY_BUF_CTL (0xD0)
+#define MSM89XX_CDC_CORE_COMP0_DELAY_BUF_CTL__POR (0x02)
+#define MSM89XX_CDC_CORE_DEBUG_DESER1_CTL (0xE0)
#define MSM89XX_CDC_CORE_DEBUG_DESER1_CTL__POR (0x00)
-#define MSM89XX_CDC_CORE_DEBUG_DESER2_CTL (0xE4)
+#define MSM89XX_CDC_CORE_DEBUG_DESER2_CTL (0xE4)
#define MSM89XX_CDC_CORE_DEBUG_DESER2_CTL__POR (0x00)
-#define MSM89XX_CDC_CORE_DEBUG_B1_CTL_CFG (0xE8)
+#define MSM89XX_CDC_CORE_DEBUG_B1_CTL_CFG (0xE8)
#define MSM89XX_CDC_CORE_DEBUG_B1_CTL__POR (0x00)
-#define MSM89XX_CDC_CORE_DEBUG_B2_CTL_CFG (0xEC)
+#define MSM89XX_CDC_CORE_DEBUG_B2_CTL_CFG (0xEC)
#define MSM89XX_CDC_CORE_DEBUG_B2_CTL__POR (0x00)
-#define MSM89XX_CDC_CORE_DEBUG_B3_CTL_CFG (0xF0)
+#define MSM89XX_CDC_CORE_DEBUG_B3_CTL_CFG (0xF0)
#define MSM89XX_CDC_CORE_DEBUG_B3_CTL__POR (0x00)
-#define MSM89XX_CDC_CORE_IIR1_GAIN_B1_CTL (0x100)
+#define MSM89XX_CDC_CORE_IIR1_GAIN_B1_CTL (0x100)
#define MSM89XX_CDC_CORE_IIR1_GAIN_B1_CTL__POR (0x00)
-#define MSM89XX_CDC_CORE_IIR2_GAIN_B1_CTL (0x140)
+#define MSM89XX_CDC_CORE_IIR2_GAIN_B1_CTL (0x140)
#define MSM89XX_CDC_CORE_IIR2_GAIN_B1_CTL__POR (0x00)
-#define MSM89XX_CDC_CORE_IIR1_GAIN_B2_CTL (0x104)
+#define MSM89XX_CDC_CORE_IIR1_GAIN_B2_CTL (0x104)
#define MSM89XX_CDC_CORE_IIR1_GAIN_B2_CTL__POR (0x00)
-#define MSM89XX_CDC_CORE_IIR2_GAIN_B2_CTL (0x144)
+#define MSM89XX_CDC_CORE_IIR2_GAIN_B2_CTL (0x144)
#define MSM89XX_CDC_CORE_IIR2_GAIN_B2_CTL__POR (0x00)
-#define MSM89XX_CDC_CORE_IIR1_GAIN_B3_CTL (0x108)
+#define MSM89XX_CDC_CORE_IIR1_GAIN_B3_CTL (0x108)
#define MSM89XX_CDC_CORE_IIR1_GAIN_B3_CTL__POR (0x00)
-#define MSM89XX_CDC_CORE_IIR2_GAIN_B3_CTL (0x148)
+#define MSM89XX_CDC_CORE_IIR2_GAIN_B3_CTL (0x148)
#define MSM89XX_CDC_CORE_IIR2_GAIN_B3_CTL__POR (0x00)
-#define MSM89XX_CDC_CORE_IIR1_GAIN_B4_CTL (0x10C)
+#define MSM89XX_CDC_CORE_IIR1_GAIN_B4_CTL (0x10C)
#define MSM89XX_CDC_CORE_IIR1_GAIN_B4_CTL__POR (0x00)
-#define MSM89XX_CDC_CORE_IIR2_GAIN_B4_CTL (0x14C)
+#define MSM89XX_CDC_CORE_IIR2_GAIN_B4_CTL (0x14C)
#define MSM89XX_CDC_CORE_IIR2_GAIN_B4_CTL__POR (0x00)
-#define MSM89XX_CDC_CORE_IIR1_GAIN_B5_CTL (0x110)
+#define MSM89XX_CDC_CORE_IIR1_GAIN_B5_CTL (0x110)
#define MSM89XX_CDC_CORE_IIR1_GAIN_B5_CTL__POR (0x00)
-#define MSM89XX_CDC_CORE_IIR2_GAIN_B5_CTL (0x150)
+#define MSM89XX_CDC_CORE_IIR2_GAIN_B5_CTL (0x150)
#define MSM89XX_CDC_CORE_IIR2_GAIN_B5_CTL__POR (0x00)
-#define MSM89XX_CDC_CORE_IIR1_GAIN_B6_CTL (0x114)
+#define MSM89XX_CDC_CORE_IIR1_GAIN_B6_CTL (0x114)
#define MSM89XX_CDC_CORE_IIR1_GAIN_B6_CTL__POR (0x00)
-#define MSM89XX_CDC_CORE_IIR2_GAIN_B6_CTL (0x154)
+#define MSM89XX_CDC_CORE_IIR2_GAIN_B6_CTL (0x154)
#define MSM89XX_CDC_CORE_IIR2_GAIN_B6_CTL__POR (0x00)
-#define MSM89XX_CDC_CORE_IIR1_GAIN_B7_CTL (0x118)
+#define MSM89XX_CDC_CORE_IIR1_GAIN_B7_CTL (0x118)
#define MSM89XX_CDC_CORE_IIR1_GAIN_B7_CTL__POR (0x00)
-#define MSM89XX_CDC_CORE_IIR2_GAIN_B7_CTL (0x158)
+#define MSM89XX_CDC_CORE_IIR2_GAIN_B7_CTL (0x158)
#define MSM89XX_CDC_CORE_IIR2_GAIN_B7_CTL__POR (0x00)
-#define MSM89XX_CDC_CORE_IIR1_GAIN_B8_CTL (0x11C)
+#define MSM89XX_CDC_CORE_IIR1_GAIN_B8_CTL (0x11C)
#define MSM89XX_CDC_CORE_IIR1_GAIN_B8_CTL__POR (0x00)
-#define MSM89XX_CDC_CORE_IIR2_GAIN_B8_CTL (0x15C)
+#define MSM89XX_CDC_CORE_IIR2_GAIN_B8_CTL (0x15C)
#define MSM89XX_CDC_CORE_IIR2_GAIN_B8_CTL__POR (0x00)
#define MSM89XX_CDC_CORE_IIR1_CTL (0x120)
-#define MSM89XX_CDC_CORE_IIR1_CTL__POR (0x40)
+#define MSM89XX_CDC_CORE_IIR1_CTL__POR (0x40)
#define MSM89XX_CDC_CORE_IIR2_CTL (0x160)
-#define MSM89XX_CDC_CORE_IIR2_CTL__POR (0x40)
-#define MSM89XX_CDC_CORE_IIR1_GAIN_TIMER_CTL (0x124)
-#define MSM89XX_CDC_CORE_IIR1_GAIN_TIMER_CTL__POR (0x00)
-#define MSM89XX_CDC_CORE_IIR2_GAIN_TIMER_CTL (0x164)
-#define MSM89XX_CDC_CORE_IIR2_GAIN_TIMER_CTL__POR (0x00)
-#define MSM89XX_CDC_CORE_IIR1_COEF_B1_CTL (0x128)
+#define MSM89XX_CDC_CORE_IIR2_CTL__POR (0x40)
+#define MSM89XX_CDC_CORE_IIR1_GAIN_TIMER_CTL (0x124)
+#define MSM89XX_CDC_CORE_IIR1_GAIN_TIMER_CTL__POR (0x00)
+#define MSM89XX_CDC_CORE_IIR2_GAIN_TIMER_CTL (0x164)
+#define MSM89XX_CDC_CORE_IIR2_GAIN_TIMER_CTL__POR (0x00)
+#define MSM89XX_CDC_CORE_IIR1_COEF_B1_CTL (0x128)
#define MSM89XX_CDC_CORE_IIR1_COEF_B1_CTL__POR (0x00)
-#define MSM89XX_CDC_CORE_IIR2_COEF_B1_CTL (0x168)
+#define MSM89XX_CDC_CORE_IIR2_COEF_B1_CTL (0x168)
#define MSM89XX_CDC_CORE_IIR2_COEF_B1_CTL__POR (0x00)
-#define MSM89XX_CDC_CORE_IIR1_COEF_B2_CTL (0x12C)
+#define MSM89XX_CDC_CORE_IIR1_COEF_B2_CTL (0x12C)
#define MSM89XX_CDC_CORE_IIR1_COEF_B2_CTL__POR (0x00)
-#define MSM89XX_CDC_CORE_IIR2_COEF_B2_CTL (0x16C)
+#define MSM89XX_CDC_CORE_IIR2_COEF_B2_CTL (0x16C)
#define MSM89XX_CDC_CORE_IIR2_COEF_B2_CTL__POR (0x00)
-#define MSM89XX_CDC_CORE_CONN_RX1_B1_CTL (0x180)
+#define MSM89XX_CDC_CORE_CONN_RX1_B1_CTL (0x180)
#define MSM89XX_CDC_CORE_CONN_RX1_B1_CTL__POR (0x00)
-#define MSM89XX_CDC_CORE_CONN_RX1_B2_CTL (0x184)
+#define MSM89XX_CDC_CORE_CONN_RX1_B2_CTL (0x184)
#define MSM89XX_CDC_CORE_CONN_RX1_B2_CTL__POR (0x00)
-#define MSM89XX_CDC_CORE_CONN_RX1_B3_CTL (0x188)
+#define MSM89XX_CDC_CORE_CONN_RX1_B3_CTL (0x188)
#define MSM89XX_CDC_CORE_CONN_RX1_B3_CTL__POR (0x00)
-#define MSM89XX_CDC_CORE_CONN_RX2_B1_CTL (0x18C)
+#define MSM89XX_CDC_CORE_CONN_RX2_B1_CTL (0x18C)
#define MSM89XX_CDC_CORE_CONN_RX2_B1_CTL__POR (0x00)
-#define MSM89XX_CDC_CORE_CONN_RX2_B2_CTL (0x190)
+#define MSM89XX_CDC_CORE_CONN_RX2_B2_CTL (0x190)
#define MSM89XX_CDC_CORE_CONN_RX2_B2_CTL__POR (0x00)
-#define MSM89XX_CDC_CORE_CONN_RX2_B3_CTL (0x194)
+#define MSM89XX_CDC_CORE_CONN_RX2_B3_CTL (0x194)
#define MSM89XX_CDC_CORE_CONN_RX2_B3_CTL__POR (0x00)
-#define MSM89XX_CDC_CORE_CONN_RX3_B1_CTL (0x198)
+#define MSM89XX_CDC_CORE_CONN_RX3_B1_CTL (0x198)
#define MSM89XX_CDC_CORE_CONN_RX3_B1_CTL__POR (0x00)
-#define MSM89XX_CDC_CORE_CONN_RX3_B2_CTL (0x19C)
+#define MSM89XX_CDC_CORE_CONN_RX3_B2_CTL (0x19C)
#define MSM89XX_CDC_CORE_CONN_RX3_B2_CTL__POR (0x00)
#define MSM89XX_CDC_CORE_CONN_TX_B1_CTL (0x1A0)
#define MSM89XX_CDC_CORE_CONN_TX_B1_CTL__POR (0x00)
-#define MSM89XX_CDC_CORE_CONN_EQ1_B1_CTL (0x1A8)
+#define MSM89XX_CDC_CORE_CONN_TX_B2_CTL (0x1A4)
+#define MSM89XX_CDC_CORE_CONN_TX_B2_CTL__POR (0x00)
+#define MSM89XX_CDC_CORE_CONN_EQ1_B1_CTL (0x1A8)
#define MSM89XX_CDC_CORE_CONN_EQ1_B1_CTL__POR (0x00)
-#define MSM89XX_CDC_CORE_CONN_EQ1_B2_CTL (0x1AC)
+#define MSM89XX_CDC_CORE_CONN_EQ1_B2_CTL (0x1AC)
#define MSM89XX_CDC_CORE_CONN_EQ1_B2_CTL__POR (0x00)
-#define MSM89XX_CDC_CORE_CONN_EQ1_B3_CTL (0x1B0)
+#define MSM89XX_CDC_CORE_CONN_EQ1_B3_CTL (0x1B0)
#define MSM89XX_CDC_CORE_CONN_EQ1_B3_CTL__POR (0x00)
-#define MSM89XX_CDC_CORE_CONN_EQ1_B4_CTL (0x1B4)
+#define MSM89XX_CDC_CORE_CONN_EQ1_B4_CTL (0x1B4)
#define MSM89XX_CDC_CORE_CONN_EQ1_B4_CTL__POR (0x00)
-#define MSM89XX_CDC_CORE_CONN_EQ2_B1_CTL (0x1B8)
+#define MSM89XX_CDC_CORE_CONN_EQ2_B1_CTL (0x1B8)
#define MSM89XX_CDC_CORE_CONN_EQ2_B1_CTL__POR (0x00)
-#define MSM89XX_CDC_CORE_CONN_EQ2_B2_CTL (0x1BC)
+#define MSM89XX_CDC_CORE_CONN_EQ2_B2_CTL (0x1BC)
#define MSM89XX_CDC_CORE_CONN_EQ2_B2_CTL__POR (0x00)
-#define MSM89XX_CDC_CORE_CONN_EQ2_B3_CTL (0x1C0)
+#define MSM89XX_CDC_CORE_CONN_EQ2_B3_CTL (0x1C0)
#define MSM89XX_CDC_CORE_CONN_EQ2_B3_CTL__POR (0x00)
-#define MSM89XX_CDC_CORE_CONN_EQ2_B4_CTL (0x1C4)
+#define MSM89XX_CDC_CORE_CONN_EQ2_B4_CTL (0x1C4)
#define MSM89XX_CDC_CORE_CONN_EQ2_B4_CTL__POR (0x00)
-#define MSM89XX_CDC_CORE_CONN_TX_I2S_SD1_CTL (0x1C8)
-#define MSM89XX_CDC_CORE_CONN_TX_I2S_SD1_CTL__POR (0x00)
-#define MSM89XX_CDC_CORE_TX1_VOL_CTL_TIMER (0x280)
+#define MSM89XX_CDC_CORE_CONN_TX_I2S_SD1_CTL (0x1C8)
+#define MSM89XX_CDC_CORE_CONN_TX_I2S_SD1_CTL__POR (0x00)
+#define MSM89XX_CDC_CORE_CONN_TX_B3_CTL (0x1CC)
+#define MSM89XX_CDC_CORE_CONN_TX_B3_CTL__POR (0x00)
+#define MSM89XX_CDC_CORE_TX5_VOL_CTL_TIMER (0x1E0)
+#define MSM89XX_CDC_CORE_TX5_VOL_CTL_TIMER__POR (0x00)
+#define MSM89XX_CDC_CORE_TX5_VOL_CTL_GAIN (0x1E4)
+#define MSM89XX_CDC_CORE_TX5_VOL_CTL_GAIN__POR (0x00)
+#define MSM89XX_CDC_CORE_TX5_VOL_CTL_CFG (0x1E8)
+#define MSM89XX_CDC_CORE_TX5_VOL_CTL_CFG__POR (0x00)
+#define MSM89XX_CDC_CORE_TX5_MUX_CTL (0x1EC)
+#define MSM89XX_CDC_CORE_TX5_MUX_CTL__POR (0x00)
+#define MSM89XX_CDC_CORE_TX5_CLK_FS_CTL (0x1F0)
+#define MSM89XX_CDC_CORE_TX5_CLK_FS_CTL__POR (0x03)
+#define MSM89XX_CDC_CORE_TX5_DMIC_CTL (0x1F4)
+#define MSM89XX_CDC_CORE_TX5_DMIC_CTL__POR (0x00)
+#define MSM89XX_CDC_CORE_TX1_VOL_CTL_TIMER (0x280)
#define MSM89XX_CDC_CORE_TX1_VOL_CTL_TIMER__POR (0x00)
-#define MSM89XX_CDC_CORE_TX2_VOL_CTL_TIMER (0x2A0)
+#define MSM89XX_CDC_CORE_TX2_VOL_CTL_TIMER (0x2A0)
#define MSM89XX_CDC_CORE_TX2_VOL_CTL_TIMER__POR (0x00)
-#define MSM89XX_CDC_CORE_TX3_VOL_CTL_TIMER (0x2C0)
+#define MSM89XX_CDC_CORE_TX3_VOL_CTL_TIMER (0x2C0)
#define MSM89XX_CDC_CORE_TX3_VOL_CTL_TIMER__POR (0x00)
-#define MSM89XX_CDC_CORE_TX4_VOL_CTL_TIMER (0x2E0)
+#define MSM89XX_CDC_CORE_TX4_VOL_CTL_TIMER (0x2E0)
#define MSM89XX_CDC_CORE_TX4_VOL_CTL_TIMER__POR (0x00)
-#define MSM89XX_CDC_CORE_TX1_VOL_CTL_GAIN (0x284)
+#define MSM89XX_CDC_CORE_TX1_VOL_CTL_GAIN (0x284)
#define MSM89XX_CDC_CORE_TX1_VOL_CTL_GAIN__POR (0x00)
-#define MSM89XX_CDC_CORE_TX2_VOL_CTL_GAIN (0x2A4)
+#define MSM89XX_CDC_CORE_TX2_VOL_CTL_GAIN (0x2A4)
#define MSM89XX_CDC_CORE_TX2_VOL_CTL_GAIN__POR (0x00)
-#define MSM89XX_CDC_CORE_TX3_VOL_CTL_GAIN (0x2C4)
+#define MSM89XX_CDC_CORE_TX3_VOL_CTL_GAIN (0x2C4)
#define MSM89XX_CDC_CORE_TX3_VOL_CTL_GAIN__POR (0x00)
-#define MSM89XX_CDC_CORE_TX4_VOL_CTL_GAIN (0x2E4)
+#define MSM89XX_CDC_CORE_TX4_VOL_CTL_GAIN (0x2E4)
#define MSM89XX_CDC_CORE_TX4_VOL_CTL_GAIN__POR (0x00)
-#define MSM89XX_CDC_CORE_TX1_VOL_CTL_CFG (0x288)
+#define MSM89XX_CDC_CORE_TX1_VOL_CTL_CFG (0x288)
#define MSM89XX_CDC_CORE_TX1_VOL_CTL_CFG__POR (0x00)
-#define MSM89XX_CDC_CORE_TX2_VOL_CTL_CFG (0x2A8)
+#define MSM89XX_CDC_CORE_TX2_VOL_CTL_CFG (0x2A8)
#define MSM89XX_CDC_CORE_TX2_VOL_CTL_CFG__POR (0x00)
-#define MSM89XX_CDC_CORE_TX3_VOL_CTL_CFG (0x2C8)
+#define MSM89XX_CDC_CORE_TX3_VOL_CTL_CFG (0x2C8)
#define MSM89XX_CDC_CORE_TX3_VOL_CTL_CFG__POR (0x00)
-#define MSM89XX_CDC_CORE_TX4_VOL_CTL_CFG (0x2E8)
+#define MSM89XX_CDC_CORE_TX4_VOL_CTL_CFG (0x2E8)
#define MSM89XX_CDC_CORE_TX4_VOL_CTL_CFG__POR (0x00)
#define MSM89XX_CDC_CORE_TX1_MUX_CTL (0x28C)
#define MSM89XX_CDC_CORE_TX1_MUX_CTL__POR (0x00)
diff --git a/sound/soc/codecs/msm8x16/msm89xx-regmap.c b/sound/soc/codecs/msmfalcon_cdc/msmfalcon-regmap.c
index 007b74c8b867..5001c8da9877 100644
--- a/sound/soc/codecs/msm8x16/msm89xx-regmap.c
+++ b/sound/soc/codecs/msmfalcon_cdc/msmfalcon-regmap.c
@@ -12,8 +12,7 @@
*/
#include <linux/regmap.h>
-#include <linux/device.h>
-#include "msm8x16-wcd.h"
+#include "msmfalcon-cdc-registers.h"
/*
* Default register reset values that are common across different versions
@@ -21,7 +20,7 @@
* then remove it from this structure and add it in version specific
* structures.
*/
-static struct reg_default
+struct reg_default
msm89xx_cdc_core_defaults[MSM89XX_CDC_CORE_CACHE_SIZE] = {
{MSM89XX_CDC_CORE_CLK_RX_RESET_CTL, 0x00},
{MSM89XX_CDC_CORE_CLK_TX_RESET_B1_CTL, 0x00},
@@ -35,8 +34,9 @@ static struct reg_default
{MSM89XX_CDC_CORE_CLK_MCLK_CTL, 0x00},
{MSM89XX_CDC_CORE_CLK_PDM_CTL, 0x00},
{MSM89XX_CDC_CORE_CLK_SD_CTL, 0x00},
- {MSM89XX_CDC_CORE_CLK_WSA_VI_B1_CTL, 0x00},
+ {MSM89XX_CDC_CORE_CLK_DMIC_B2_CTL, 0x00},
{MSM89XX_CDC_CORE_CLK_RX_B2_CTL, 0x00},
+ {MSM89XX_CDC_CORE_CLK_TX2_I2S_CTL, 0x13},
{MSM89XX_CDC_CORE_RX1_B1_CTL, 0x00},
{MSM89XX_CDC_CORE_RX2_B1_CTL, 0x00},
{MSM89XX_CDC_CORE_RX3_B1_CTL, 0x00},
@@ -78,8 +78,9 @@ static struct reg_default
{MSM89XX_CDC_CORE_DEBUG_B2_CTL_CFG, 0x00},
{MSM89XX_CDC_CORE_DEBUG_B3_CTL_CFG, 0x00},
{MSM89XX_CDC_CORE_IIR1_GAIN_B1_CTL, 0x00},
- {MSM89XX_CDC_CORE_IIR2_GAIN_B2_CTL, 0x00},
+ {MSM89XX_CDC_CORE_IIR2_GAIN_B1_CTL, 0x00},
{MSM89XX_CDC_CORE_IIR1_GAIN_B2_CTL, 0x00},
+ {MSM89XX_CDC_CORE_IIR2_GAIN_B2_CTL, 0x00},
{MSM89XX_CDC_CORE_IIR1_GAIN_B3_CTL, 0x00},
{MSM89XX_CDC_CORE_IIR2_GAIN_B3_CTL, 0x00},
{MSM89XX_CDC_CORE_IIR1_GAIN_B4_CTL, 0x00},
@@ -92,7 +93,6 @@ static struct reg_default
{MSM89XX_CDC_CORE_IIR2_GAIN_B7_CTL, 0x00},
{MSM89XX_CDC_CORE_IIR1_GAIN_B8_CTL, 0x00},
{MSM89XX_CDC_CORE_IIR2_GAIN_B8_CTL, 0x00},
- {MSM89XX_CDC_CORE_IIR2_GAIN_B1_CTL, 0x00},
{MSM89XX_CDC_CORE_IIR1_CTL, 0x40},
{MSM89XX_CDC_CORE_IIR2_CTL, 0x40},
{MSM89XX_CDC_CORE_IIR1_GAIN_TIMER_CTL, 0x00},
@@ -110,6 +110,7 @@ static struct reg_default
{MSM89XX_CDC_CORE_CONN_RX3_B1_CTL, 0x00},
{MSM89XX_CDC_CORE_CONN_RX3_B2_CTL, 0x00},
{MSM89XX_CDC_CORE_CONN_TX_B1_CTL, 0x00},
+ {MSM89XX_CDC_CORE_CONN_TX_B2_CTL, 0x00},
{MSM89XX_CDC_CORE_CONN_EQ1_B1_CTL, 0x00},
{MSM89XX_CDC_CORE_CONN_EQ1_B2_CTL, 0x00},
{MSM89XX_CDC_CORE_CONN_EQ1_B3_CTL, 0x00},
@@ -119,6 +120,13 @@ static struct reg_default
{MSM89XX_CDC_CORE_CONN_EQ2_B3_CTL, 0x00},
{MSM89XX_CDC_CORE_CONN_EQ2_B4_CTL, 0x00},
{MSM89XX_CDC_CORE_CONN_TX_I2S_SD1_CTL, 0x00},
+ {MSM89XX_CDC_CORE_CONN_TX_B3_CTL, 0x00},
+ {MSM89XX_CDC_CORE_TX5_VOL_CTL_TIMER, 0x00},
+ {MSM89XX_CDC_CORE_TX5_VOL_CTL_GAIN, 0x00},
+ {MSM89XX_CDC_CORE_TX5_VOL_CTL_CFG, 0x00},
+ {MSM89XX_CDC_CORE_TX5_MUX_CTL, 0x00},
+ {MSM89XX_CDC_CORE_TX5_CLK_FS_CTL, 0x03},
+ {MSM89XX_CDC_CORE_TX5_DMIC_CTL, 0x00},
{MSM89XX_CDC_CORE_TX1_VOL_CTL_TIMER, 0x00},
{MSM89XX_CDC_CORE_TX2_VOL_CTL_TIMER, 0x00},
{MSM89XX_CDC_CORE_TX3_VOL_CTL_TIMER, 0x00},
@@ -145,7 +153,7 @@ static struct reg_default
{MSM89XX_CDC_CORE_TX4_DMIC_CTL, 0x00},
};
-static struct reg_default
+struct reg_default
msm89xx_pmic_cdc_defaults[MSM89XX_PMIC_CDC_CACHE_SIZE] = {
{MSM89XX_PMIC_DIGITAL_REVISION1, 0x00},
{MSM89XX_PMIC_DIGITAL_REVISION2, 0x00},
@@ -304,17 +312,144 @@ static struct reg_default
{MSM89XX_PMIC_ANALOG_TRIM_CTRL4, 0x00},
};
-static bool msm89xx_cdc_core_readable_reg(struct device *dev, unsigned int reg)
-{
- return msm89xx_cdc_core_reg_readable[reg];
-}
+static const u8 msm89xx_cdc_core_reg_readable[MSM89XX_CDC_CORE_CACHE_SIZE] = {
+ [MSM89XX_CDC_CORE_CLK_RX_RESET_CTL] = 1,
+ [MSM89XX_CDC_CORE_CLK_TX_RESET_B1_CTL] = 1,
+ [MSM89XX_CDC_CORE_CLK_DMIC_B1_CTL] = 1,
+ [MSM89XX_CDC_CORE_CLK_RX_I2S_CTL] = 1,
+ [MSM89XX_CDC_CORE_CLK_TX_I2S_CTL] = 1,
+ [MSM89XX_CDC_CORE_CLK_OTHR_RESET_B1_CTL] = 1,
+ [MSM89XX_CDC_CORE_CLK_TX_CLK_EN_B1_CTL] = 1,
+ [MSM89XX_CDC_CORE_CLK_OTHR_CTL] = 1,
+ [MSM89XX_CDC_CORE_CLK_RX_B1_CTL] = 1,
+ [MSM89XX_CDC_CORE_CLK_MCLK_CTL] = 1,
+ [MSM89XX_CDC_CORE_CLK_PDM_CTL] = 1,
+ [MSM89XX_CDC_CORE_CLK_SD_CTL] = 1,
+ [MSM89XX_CDC_CORE_CLK_DMIC_B2_CTL] = 1,
+ [MSM89XX_CDC_CORE_CLK_RX_B2_CTL] = 1,
+ [MSM89XX_CDC_CORE_CLK_TX2_I2S_CTL] = 1,
+ [MSM89XX_CDC_CORE_RX1_B1_CTL] = 1,
+ [MSM89XX_CDC_CORE_RX2_B1_CTL] = 1,
+ [MSM89XX_CDC_CORE_RX3_B1_CTL] = 1,
+ [MSM89XX_CDC_CORE_RX1_B2_CTL] = 1,
+ [MSM89XX_CDC_CORE_RX2_B2_CTL] = 1,
+ [MSM89XX_CDC_CORE_RX3_B2_CTL] = 1,
+ [MSM89XX_CDC_CORE_RX1_B3_CTL] = 1,
+ [MSM89XX_CDC_CORE_RX2_B3_CTL] = 1,
+ [MSM89XX_CDC_CORE_RX3_B3_CTL] = 1,
+ [MSM89XX_CDC_CORE_RX1_B4_CTL] = 1,
+ [MSM89XX_CDC_CORE_RX2_B4_CTL] = 1,
+ [MSM89XX_CDC_CORE_RX3_B4_CTL] = 1,
+ [MSM89XX_CDC_CORE_RX1_B5_CTL] = 1,
+ [MSM89XX_CDC_CORE_RX2_B5_CTL] = 1,
+ [MSM89XX_CDC_CORE_RX3_B5_CTL] = 1,
+ [MSM89XX_CDC_CORE_RX1_B6_CTL] = 1,
+ [MSM89XX_CDC_CORE_RX2_B6_CTL] = 1,
+ [MSM89XX_CDC_CORE_RX3_B6_CTL] = 1,
+ [MSM89XX_CDC_CORE_RX1_VOL_CTL_B1_CTL] = 1,
+ [MSM89XX_CDC_CORE_RX2_VOL_CTL_B1_CTL] = 1,
+ [MSM89XX_CDC_CORE_RX3_VOL_CTL_B1_CTL] = 1,
+ [MSM89XX_CDC_CORE_RX1_VOL_CTL_B2_CTL] = 1,
+ [MSM89XX_CDC_CORE_RX2_VOL_CTL_B2_CTL] = 1,
+ [MSM89XX_CDC_CORE_RX3_VOL_CTL_B2_CTL] = 1,
+ [MSM89XX_CDC_CORE_TOP_GAIN_UPDATE] = 1,
+ [MSM89XX_CDC_CORE_TOP_CTL] = 1,
+ [MSM89XX_CDC_CORE_COMP0_B1_CTL] = 1,
+ [MSM89XX_CDC_CORE_COMP0_B2_CTL] = 1,
+ [MSM89XX_CDC_CORE_COMP0_B3_CTL] = 1,
+ [MSM89XX_CDC_CORE_COMP0_B4_CTL] = 1,
+ [MSM89XX_CDC_CORE_COMP0_B5_CTL] = 1,
+ [MSM89XX_CDC_CORE_COMP0_B6_CTL] = 1,
+ [MSM89XX_CDC_CORE_COMP0_SHUT_DOWN_STATUS] = 1,
+ [MSM89XX_CDC_CORE_COMP0_FS_CFG] = 1,
+ [MSM89XX_CDC_CORE_COMP0_DELAY_BUF_CTL] = 1,
+ [MSM89XX_CDC_CORE_DEBUG_DESER1_CTL] = 1,
+ [MSM89XX_CDC_CORE_DEBUG_DESER2_CTL] = 1,
+ [MSM89XX_CDC_CORE_DEBUG_B1_CTL_CFG] = 1,
+ [MSM89XX_CDC_CORE_DEBUG_B2_CTL_CFG] = 1,
+ [MSM89XX_CDC_CORE_DEBUG_B3_CTL_CFG] = 1,
+ [MSM89XX_CDC_CORE_IIR1_GAIN_B1_CTL] = 1,
+ [MSM89XX_CDC_CORE_IIR2_GAIN_B1_CTL] = 1,
+ [MSM89XX_CDC_CORE_IIR1_GAIN_B2_CTL] = 1,
+ [MSM89XX_CDC_CORE_IIR2_GAIN_B2_CTL] = 1,
+ [MSM89XX_CDC_CORE_IIR1_GAIN_B3_CTL] = 1,
+ [MSM89XX_CDC_CORE_IIR2_GAIN_B3_CTL] = 1,
+ [MSM89XX_CDC_CORE_IIR1_GAIN_B4_CTL] = 1,
+ [MSM89XX_CDC_CORE_IIR2_GAIN_B4_CTL] = 1,
+ [MSM89XX_CDC_CORE_IIR1_GAIN_B5_CTL] = 1,
+ [MSM89XX_CDC_CORE_IIR2_GAIN_B5_CTL] = 1,
+ [MSM89XX_CDC_CORE_IIR1_GAIN_B6_CTL] = 1,
+ [MSM89XX_CDC_CORE_IIR2_GAIN_B6_CTL] = 1,
+ [MSM89XX_CDC_CORE_IIR1_GAIN_B7_CTL] = 1,
+ [MSM89XX_CDC_CORE_IIR2_GAIN_B7_CTL] = 1,
+ [MSM89XX_CDC_CORE_IIR1_GAIN_B8_CTL] = 1,
+ [MSM89XX_CDC_CORE_IIR2_GAIN_B8_CTL] = 1,
+ [MSM89XX_CDC_CORE_IIR1_CTL] = 1,
+ [MSM89XX_CDC_CORE_IIR2_CTL] = 1,
+ [MSM89XX_CDC_CORE_IIR1_GAIN_TIMER_CTL] = 1,
+ [MSM89XX_CDC_CORE_IIR2_GAIN_TIMER_CTL] = 1,
+ [MSM89XX_CDC_CORE_IIR1_COEF_B1_CTL] = 1,
+ [MSM89XX_CDC_CORE_IIR2_COEF_B1_CTL] = 1,
+ [MSM89XX_CDC_CORE_IIR1_COEF_B2_CTL] = 1,
+ [MSM89XX_CDC_CORE_IIR2_COEF_B2_CTL] = 1,
+ [MSM89XX_CDC_CORE_CONN_RX1_B1_CTL] = 1,
+ [MSM89XX_CDC_CORE_CONN_RX1_B2_CTL] = 1,
+ [MSM89XX_CDC_CORE_CONN_RX1_B3_CTL] = 1,
+ [MSM89XX_CDC_CORE_CONN_RX2_B1_CTL] = 1,
+ [MSM89XX_CDC_CORE_CONN_RX2_B2_CTL] = 1,
+ [MSM89XX_CDC_CORE_CONN_RX2_B3_CTL] = 1,
+ [MSM89XX_CDC_CORE_CONN_RX3_B1_CTL] = 1,
+ [MSM89XX_CDC_CORE_CONN_RX3_B2_CTL] = 1,
+ [MSM89XX_CDC_CORE_CONN_TX_B1_CTL] = 1,
+ [MSM89XX_CDC_CORE_CONN_TX_B2_CTL] = 1,
+ [MSM89XX_CDC_CORE_CONN_EQ1_B1_CTL] = 1,
+ [MSM89XX_CDC_CORE_CONN_EQ1_B2_CTL] = 1,
+ [MSM89XX_CDC_CORE_CONN_EQ1_B3_CTL] = 1,
+ [MSM89XX_CDC_CORE_CONN_EQ1_B4_CTL] = 1,
+ [MSM89XX_CDC_CORE_CONN_EQ2_B1_CTL] = 1,
+ [MSM89XX_CDC_CORE_CONN_EQ2_B2_CTL] = 1,
+ [MSM89XX_CDC_CORE_CONN_EQ2_B3_CTL] = 1,
+ [MSM89XX_CDC_CORE_CONN_EQ2_B4_CTL] = 1,
+ [MSM89XX_CDC_CORE_CONN_TX_I2S_SD1_CTL] = 1,
+ [MSM89XX_CDC_CORE_CONN_TX_B3_CTL] = 1,
+ [MSM89XX_CDC_CORE_TX1_VOL_CTL_TIMER] = 1,
+ [MSM89XX_CDC_CORE_TX2_VOL_CTL_TIMER] = 1,
+ [MSM89XX_CDC_CORE_TX3_VOL_CTL_TIMER] = 1,
+ [MSM89XX_CDC_CORE_TX4_VOL_CTL_TIMER] = 1,
+ [MSM89XX_CDC_CORE_TX1_VOL_CTL_GAIN] = 1,
+ [MSM89XX_CDC_CORE_TX2_VOL_CTL_GAIN] = 1,
+ [MSM89XX_CDC_CORE_TX3_VOL_CTL_GAIN] = 1,
+ [MSM89XX_CDC_CORE_TX4_VOL_CTL_GAIN] = 1,
+ [MSM89XX_CDC_CORE_TX1_VOL_CTL_CFG] = 1,
+ [MSM89XX_CDC_CORE_TX2_VOL_CTL_CFG] = 1,
+ [MSM89XX_CDC_CORE_TX3_VOL_CTL_CFG] = 1,
+ [MSM89XX_CDC_CORE_TX4_VOL_CTL_CFG] = 1,
+ [MSM89XX_CDC_CORE_TX1_MUX_CTL] = 1,
+ [MSM89XX_CDC_CORE_TX2_MUX_CTL] = 1,
+ [MSM89XX_CDC_CORE_TX3_MUX_CTL] = 1,
+ [MSM89XX_CDC_CORE_TX4_MUX_CTL] = 1,
+ [MSM89XX_CDC_CORE_TX1_CLK_FS_CTL] = 1,
+ [MSM89XX_CDC_CORE_TX2_CLK_FS_CTL] = 1,
+ [MSM89XX_CDC_CORE_TX3_CLK_FS_CTL] = 1,
+ [MSM89XX_CDC_CORE_TX4_CLK_FS_CTL] = 1,
+ [MSM89XX_CDC_CORE_TX5_VOL_CTL_TIMER] = 1,
+ [MSM89XX_CDC_CORE_TX5_VOL_CTL_GAIN] = 1,
+ [MSM89XX_CDC_CORE_TX5_VOL_CTL_CFG] = 1,
+ [MSM89XX_CDC_CORE_TX5_MUX_CTL] = 1,
+ [MSM89XX_CDC_CORE_TX5_CLK_FS_CTL] = 1,
+ [MSM89XX_CDC_CORE_TX5_DMIC_CTL] = 1,
+ [MSM89XX_CDC_CORE_TX1_DMIC_CTL] = 1,
+ [MSM89XX_CDC_CORE_TX2_DMIC_CTL] = 1,
+ [MSM89XX_CDC_CORE_TX3_DMIC_CTL] = 1,
+ [MSM89XX_CDC_CORE_TX4_DMIC_CTL] = 1,
+};
-static bool msm89xx_pmic_cdc_readable_reg(struct device *dev, unsigned int reg)
+bool msm89xx_cdc_core_readable_reg(struct device *dev, unsigned int reg)
{
- return msm89xx_pmic_cdc_reg_readable[reg];
+ return msm89xx_cdc_core_reg_readable[reg];
}
-static bool msm89xx_cdc_core_volatile_reg(struct device *dev, unsigned int reg)
+bool msm89xx_cdc_core_volatile_reg(struct device *dev, unsigned int reg)
{
switch (reg) {
case MSM89XX_CDC_CORE_RX1_B1_CTL:
@@ -329,89 +464,8 @@ static bool msm89xx_cdc_core_volatile_reg(struct device *dev, unsigned int reg)
case MSM89XX_CDC_CORE_IIR2_COEF_B1_CTL:
case MSM89XX_CDC_CORE_CLK_MCLK_CTL:
case MSM89XX_CDC_CORE_CLK_PDM_CTL:
- case MSM89XX_PMIC_ANALOG_BYPASS_MODE:
- case MSM89XX_PMIC_ANALOG_BOOST_EN_CTL:
- case MSM89XX_PMIC_ANALOG_MASTER_BIAS_CTL:
- case MSM89XX_PMIC_ANALOG_CURRENT_LIMIT:
- case MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL:
- case MSM89XX_PMIC_ANALOG_NCP_FBCTRL:
- case MSM89XX_PMIC_ANALOG_MBHC_DET_CTL_1:
return true;
default:
return false;
}
}
-
-static bool msm89xx_pmic_cdc_volatile_reg(struct device *dev, unsigned int reg)
-{
- switch (reg) {
- case MSM89XX_PMIC_DIGITAL_REVISION1:
- case MSM89XX_PMIC_DIGITAL_REVISION2:
- case MSM89XX_PMIC_DIGITAL_PERPH_TYPE:
- case MSM89XX_PMIC_DIGITAL_PERPH_SUBTYPE:
- case MSM89XX_PMIC_DIGITAL_INT_RT_STS:
- case MSM89XX_PMIC_DIGITAL_INT_SET_TYPE:
- case MSM89XX_PMIC_DIGITAL_INT_POLARITY_HIGH:
- case MSM89XX_PMIC_DIGITAL_INT_POLARITY_LOW:
- case MSM89XX_PMIC_DIGITAL_INT_LATCHED_STS:
- case MSM89XX_PMIC_DIGITAL_INT_PENDING_STS:
- case MSM89XX_PMIC_DIGITAL_PIN_STATUS:
- case MSM89XX_PMIC_DIGITAL_SEC_ACCESS:
- case MSM89XX_PMIC_ANALOG_SEC_ACCESS:
- case MSM89XX_PMIC_ANALOG_REVISION1:
- case MSM89XX_PMIC_ANALOG_REVISION2:
- case MSM89XX_PMIC_ANALOG_REVISION3:
- case MSM89XX_PMIC_ANALOG_REVISION4:
- case MSM89XX_PMIC_ANALOG_PERPH_TYPE:
- case MSM89XX_PMIC_ANALOG_PERPH_SUBTYPE:
- case MSM89XX_PMIC_ANALOG_INT_RT_STS:
- case MSM89XX_PMIC_ANALOG_INT_SET_TYPE:
- case MSM89XX_PMIC_ANALOG_INT_POLARITY_HIGH:
- case MSM89XX_PMIC_ANALOG_INT_POLARITY_LOW:
- case MSM89XX_PMIC_ANALOG_INT_LATCHED_STS:
- case MSM89XX_PMIC_ANALOG_INT_PENDING_STS:
- case MSM89XX_PMIC_ANALOG_MBHC_BTN_RESULT:
- case MSM89XX_PMIC_ANALOG_MBHC_ZDET_ELECT_RESULT:
- case MSM89XX_PMIC_ANALOG_RX_HPH_STATUS:
- case MSM89XX_PMIC_ANALOG_RX_EAR_STATUS:
- case MSM89XX_PMIC_ANALOG_SPKR_SAR_STATUS:
- case MSM89XX_PMIC_ANALOG_SPKR_DRV_STATUS:
- return true;
- default:
- return false;
- }
-}
-
-struct regmap_config msm89xx_pmic_cdc_regmap_config = {
- .reg_bits = 16,
- .val_bits = 8,
- .max_register = MSM89XX_PMIC_CDC_CACHE_SIZE,
- .fast_io = true,
- .reg_defaults = msm89xx_pmic_cdc_defaults,
- .num_reg_defaults = ARRAY_SIZE(msm89xx_pmic_cdc_defaults),
- .readable_reg = msm89xx_pmic_cdc_readable_reg,
- .volatile_reg = msm89xx_pmic_cdc_volatile_reg,
- .cache_type = REGCACHE_RBTREE,
- .reg_format_endian = REGMAP_ENDIAN_NATIVE,
- .val_format_endian = REGMAP_ENDIAN_NATIVE,
- .can_multi_write = true,
- .lock = enable_digital_callback,
- .unlock = disable_digital_callback,
-
-};
-
-struct regmap_config msm89xx_cdc_core_regmap_config = {
- .reg_bits = 32,
- .reg_stride = 4,
- .val_bits = 32,
-
- .max_register = MSM89XX_CDC_CORE_CACHE_SIZE,
- .reg_defaults = msm89xx_cdc_core_defaults,
- .num_reg_defaults = ARRAY_SIZE(msm89xx_cdc_core_defaults),
- .readable_reg = msm89xx_cdc_core_readable_reg,
- .volatile_reg = msm89xx_cdc_core_volatile_reg,
- .cache_type = REGCACHE_RBTREE,
- .reg_format_endian = REGMAP_ENDIAN_NATIVE,
- .val_format_endian = REGMAP_ENDIAN_NATIVE,
- .can_multi_write = true,
-};
diff --git a/sound/soc/codecs/wcd934x/wcd934x-routing.h b/sound/soc/codecs/wcd934x/wcd934x-routing.h
index 940fdf89d361..8ca4c07a3327 100644
--- a/sound/soc/codecs/wcd934x/wcd934x-routing.h
+++ b/sound/soc/codecs/wcd934x/wcd934x-routing.h
@@ -761,7 +761,8 @@ const struct snd_soc_dapm_route tavil_audio_map[] = {
{"RX INT0_2 MUX", "RX5", "CDC_IF RX5 MUX"},
{"RX INT0_2 MUX", "RX6", "CDC_IF RX6 MUX"},
{"RX INT0_2 MUX", "RX7", "CDC_IF RX7 MUX"},
- {"RX INT0 SEC MIX", NULL, "RX INT0_2 MUX"},
+ {"RX INT0_2 INTERP", NULL, "RX INT0_2 MUX"},
+ {"RX INT0 SEC MIX", NULL, "RX INT0_2 INTERP"},
/* Mixing path INT1 */
{"RX INT1_2 MUX", "RX0", "CDC_IF RX0 MUX"},
diff --git a/sound/soc/codecs/wcd934x/wcd934x.c b/sound/soc/codecs/wcd934x/wcd934x.c
index 1fdf81a3a45f..8be608c8511c 100644
--- a/sound/soc/codecs/wcd934x/wcd934x.c
+++ b/sound/soc/codecs/wcd934x/wcd934x.c
@@ -8589,7 +8589,7 @@ static int tavil_handle_pdata(struct tavil_priv *tavil,
if (pdata->dmic_clk_drv ==
WCD9XXX_DMIC_CLK_DRIVE_UNDEFINED) {
pdata->dmic_clk_drv = WCD934X_DMIC_CLK_DRIVE_DEFAULT;
- dev_info(codec->dev,
+ dev_dbg(codec->dev,
"%s: dmic_clk_strength invalid, default = %d\n",
__func__, pdata->dmic_clk_drv);
}
diff --git a/sound/soc/msm/Kconfig b/sound/soc/msm/Kconfig
index a5a81df49d69..69d74a8703b6 100644
--- a/sound/soc/msm/Kconfig
+++ b/sound/soc/msm/Kconfig
@@ -119,8 +119,9 @@ config SND_SOC_INT_CODEC
select MSM_QDSP6_PDR
select MSM_QDSP6_NOTIFIER
select MSM_QDSP6V2_CODECS
- select SND_SOC_MSM_SWR
- select SND_SOC_MSM8X16_WCD
+ select MSM_CDC_PINCTRL
+ select SND_SOC_MSM_SDW
+ select SND_SOC_MSMFALCON_CDC
select QTI_PP
select DTS_SRS_TM
select DOLBY_DAP
@@ -130,6 +131,7 @@ config SND_SOC_INT_CODEC
select DTS_EAGLE
select SND_SOC_MSMFALCON_COMMON
select SND_SOC_COMPRESS
+ select PINCTRL_LPI
help
To add support for SoC audio on MSM_INT.
This will enable sound soc drivers which
@@ -164,6 +166,7 @@ config SND_SOC_EXT_CODEC
select DTS_EAGLE
select SND_SOC_MSMFALCON_COMMON
select SND_SOC_COMPRESS
+ select PINCTRL_LPI
help
To add support for SoC audio on MSM_EXT.
This will enable sound soc drivers which
diff --git a/sound/soc/msm/Makefile b/sound/soc/msm/Makefile
index 8df7fad3893d..d66f9eb79cea 100644
--- a/sound/soc/msm/Makefile
+++ b/sound/soc/msm/Makefile
@@ -21,7 +21,7 @@ snd-soc-msm8998-objs := msm8998.o
obj-$(CONFIG_SND_SOC_MSM8998) += snd-soc-msm8998.o
# for MSMFALCON sound card driver
-snd-soc-msmfalcon-common-objs := msm-audio-pinctrl.o msmfalcon-common.o
+snd-soc-msmfalcon-common-objs := msmfalcon-common.o
obj-$(CONFIG_SND_SOC_MSMFALCON_COMMON) += snd-soc-msmfalcon-common.o
# for MSMFALCON sound card driver
diff --git a/sound/soc/msm/msm8998.c b/sound/soc/msm/msm8998.c
index 7f43ef401a4f..6396cd6aaf39 100644
--- a/sound/soc/msm/msm8998.c
+++ b/sound/soc/msm/msm8998.c
@@ -4947,6 +4947,26 @@ static struct snd_soc_dai_link msm_tavil_fe_dai_links[] = {
},
};
+static struct snd_soc_dai_link msm_common_misc_fe_dai_links[] = {
+ {
+ .name = MSM_DAILINK_NAME(ASM Loopback),
+ .stream_name = "MultiMedia6",
+ .cpu_dai_name = "MultiMedia6",
+ .platform_name = "msm-pcm-loopback",
+ .dynamic = 1,
+ .dpcm_playback = 1,
+ .dpcm_capture = 1,
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .codec_name = "snd-soc-dummy",
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST,
+ SND_SOC_DPCM_TRIGGER_POST},
+ .ignore_suspend = 1,
+ .no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+ .ignore_pmdown_time = 1,
+ .be_id = MSM_FRONTEND_DAI_MULTIMEDIA6,
+ },
+};
+
static struct snd_soc_dai_link msm_common_be_dai_links[] = {
/* Backend AFE DAI Links */
{
@@ -5870,6 +5890,7 @@ static struct snd_soc_dai_link msm_auxpcm_be_dai_links[] = {
static struct snd_soc_dai_link msm_tasha_dai_links[
ARRAY_SIZE(msm_common_dai_links) +
ARRAY_SIZE(msm_tasha_fe_dai_links) +
+ ARRAY_SIZE(msm_common_misc_fe_dai_links) +
ARRAY_SIZE(msm_common_be_dai_links) +
ARRAY_SIZE(msm_tasha_be_dai_links) +
ARRAY_SIZE(msm_wcn_be_dai_links) +
@@ -5880,6 +5901,7 @@ static struct snd_soc_dai_link msm_tasha_dai_links[
static struct snd_soc_dai_link msm_tavil_dai_links[
ARRAY_SIZE(msm_common_dai_links) +
ARRAY_SIZE(msm_tavil_fe_dai_links) +
+ ARRAY_SIZE(msm_common_misc_fe_dai_links) +
ARRAY_SIZE(msm_common_be_dai_links) +
ARRAY_SIZE(msm_tavil_be_dai_links) +
ARRAY_SIZE(msm_wcn_be_dai_links) +
@@ -6213,7 +6235,7 @@ static struct snd_soc_card *populate_snd_card_dailinks(struct device *dev)
{
struct snd_soc_card *card = NULL;
struct snd_soc_dai_link *dailink;
- int len_1, len_2, len_3;
+ int len_1, len_2, len_3, len_4;
int total_links;
const struct of_device_id *match;
@@ -6228,8 +6250,9 @@ static struct snd_soc_card *populate_snd_card_dailinks(struct device *dev)
card = &snd_soc_card_tasha_msm;
len_1 = ARRAY_SIZE(msm_common_dai_links);
len_2 = len_1 + ARRAY_SIZE(msm_tasha_fe_dai_links);
- len_3 = len_2 + ARRAY_SIZE(msm_common_be_dai_links);
- total_links = len_3 + ARRAY_SIZE(msm_tasha_be_dai_links);
+ len_3 = len_2 + ARRAY_SIZE(msm_common_misc_fe_dai_links);
+ len_4 = len_3 + ARRAY_SIZE(msm_common_be_dai_links);
+ total_links = len_4 + ARRAY_SIZE(msm_tasha_be_dai_links);
memcpy(msm_tasha_dai_links,
msm_common_dai_links,
sizeof(msm_common_dai_links));
@@ -6237,9 +6260,12 @@ static struct snd_soc_card *populate_snd_card_dailinks(struct device *dev)
msm_tasha_fe_dai_links,
sizeof(msm_tasha_fe_dai_links));
memcpy(msm_tasha_dai_links + len_2,
+ msm_common_misc_fe_dai_links,
+ sizeof(msm_common_misc_fe_dai_links));
+ memcpy(msm_tasha_dai_links + len_3,
msm_common_be_dai_links,
sizeof(msm_common_be_dai_links));
- memcpy(msm_tasha_dai_links + len_3,
+ memcpy(msm_tasha_dai_links + len_4,
msm_tasha_be_dai_links,
sizeof(msm_tasha_be_dai_links));
@@ -6280,8 +6306,9 @@ static struct snd_soc_card *populate_snd_card_dailinks(struct device *dev)
card = &snd_soc_card_tavil_msm;
len_1 = ARRAY_SIZE(msm_common_dai_links);
len_2 = len_1 + ARRAY_SIZE(msm_tavil_fe_dai_links);
- len_3 = len_2 + ARRAY_SIZE(msm_common_be_dai_links);
- total_links = len_3 + ARRAY_SIZE(msm_tavil_be_dai_links);
+ len_3 = len_2 + ARRAY_SIZE(msm_common_misc_fe_dai_links);
+ len_4 = len_3 + ARRAY_SIZE(msm_common_be_dai_links);
+ total_links = len_4 + ARRAY_SIZE(msm_tavil_be_dai_links);
memcpy(msm_tavil_dai_links,
msm_common_dai_links,
sizeof(msm_common_dai_links));
@@ -6289,9 +6316,12 @@ static struct snd_soc_card *populate_snd_card_dailinks(struct device *dev)
msm_tavil_fe_dai_links,
sizeof(msm_tavil_fe_dai_links));
memcpy(msm_tavil_dai_links + len_2,
+ msm_common_misc_fe_dai_links,
+ sizeof(msm_common_misc_fe_dai_links));
+ memcpy(msm_tavil_dai_links + len_3,
msm_common_be_dai_links,
sizeof(msm_common_be_dai_links));
- memcpy(msm_tavil_dai_links + len_3,
+ memcpy(msm_tavil_dai_links + len_4,
msm_tavil_be_dai_links,
sizeof(msm_tavil_be_dai_links));
diff --git a/sound/soc/msm/msmfalcon-common.c b/sound/soc/msm/msmfalcon-common.c
index b4b35ee144ff..e705fa8faf73 100644
--- a/sound/soc/msm/msmfalcon-common.c
+++ b/sound/soc/msm/msmfalcon-common.c
@@ -16,15 +16,17 @@
#include <sound/pcm_params.h>
#include <sound/q6afe-v2.h>
#include "qdsp6v2/msm-pcm-routing-v2.h"
-#include "msm-audio-pinctrl.h"
#include "msmfalcon-common.h"
#include "msmfalcon-internal.h"
#include "msmfalcon-external.h"
-#include "../codecs/msm8x16/msm8x16-wcd.h"
+#include "../codecs/msmfalcon_cdc/msm-analog-cdc.h"
#include "../codecs/wsa881x.h"
#define DRV_NAME "msmfalcon-asoc-snd"
+#define MSM_INT_DIGITAL_CODEC "msm-dig-codec"
+#define PMIC_INT_ANALOG_CODEC "analog-codec"
+
#define DEV_NAME_STR_LEN 32
#define DEFAULT_MCLK_RATE 9600000
@@ -189,7 +191,7 @@ static struct wcd_mbhc_config mbhc_cfg = {
.detect_extn_cable = true,
.mono_stero_detection = false,
.swap_gnd_mic = NULL,
- .hs_ext_micbias = false,
+ .hs_ext_micbias = true,
.key_code[0] = KEY_MEDIA,
.key_code[1] = KEY_VOICECOMMAND,
.key_code[2] = KEY_VOLUMEUP,
@@ -2233,6 +2235,7 @@ static bool msm_swap_gnd_mic(struct snd_soc_codec *codec)
}
static int msm_populate_dai_link_component_of_node(
+ struct msm_asoc_mach_data *pdata,
struct snd_soc_card *card)
{
int i, index, ret = 0;
@@ -2312,6 +2315,31 @@ codec_dai:
dai_link[i].codec_of_node = phandle;
dai_link[i].codec_name = NULL;
}
+ if (pdata->snd_card_val == INT_SND_CARD) {
+ if ((dai_link[i].be_id ==
+ MSM_BACKEND_DAI_INT0_MI2S_RX) ||
+ (dai_link[i].be_id ==
+ MSM_BACKEND_DAI_INT1_MI2S_RX) ||
+ (dai_link[i].be_id ==
+ MSM_BACKEND_DAI_INT2_MI2S_TX) ||
+ (dai_link[i].be_id ==
+ MSM_BACKEND_DAI_INT3_MI2S_TX)) {
+ index = of_property_match_string(cdev->of_node,
+ "asoc-codec-names",
+ MSM_INT_DIGITAL_CODEC);
+ phandle = of_parse_phandle(cdev->of_node,
+ "asoc-codec",
+ index);
+ dai_link[i].codecs[DIG_CDC].of_node = phandle;
+ index = of_property_match_string(cdev->of_node,
+ "asoc-codec-names",
+ PMIC_INT_ANALOG_CODEC);
+ phandle = of_parse_phandle(cdev->of_node,
+ "asoc-codec",
+ index);
+ dai_link[i].codecs[ANA_CDC].of_node = phandle;
+ }
+ }
}
err:
return ret;
@@ -2691,13 +2719,16 @@ static int msm_asoc_machine_probe(struct platform_device *pdev)
if (pdata->snd_card_val == INT_SND_CARD) {
/*reading the gpio configurations from dtsi file*/
- ret = msm_gpioset_initialize(CLIENT_WCD, &pdev->dev);
- if (ret < 0) {
- dev_err(&pdev->dev,
- "%s: error reading dtsi files%d\n",
- __func__, ret);
- goto err;
- }
+ pdata->pdm_gpio_p = of_parse_phandle(pdev->dev.of_node,
+ "qcom,cdc-pdm-gpios", 0);
+ pdata->comp_gpio_p = of_parse_phandle(pdev->dev.of_node,
+ "qcom,cdc-comp-gpios", 0);
+ pdata->sdw_gpio_p = of_parse_phandle(pdev->dev.of_node,
+ "qcom,cdc-sdw-gpios", 0);
+ pdata->dmic_gpio_p = of_parse_phandle(pdev->dev.of_node,
+ "qcom,cdc-dmic-gpios", 0);
+ pdata->ext_spk_gpio_p = of_parse_phandle(pdev->dev.of_node,
+ "qcom,cdc-ext-spk-gpios", 0);
}
/*
@@ -2730,7 +2761,7 @@ static int msm_asoc_machine_probe(struct platform_device *pdev)
if (ret)
goto err;
- ret = msm_populate_dai_link_component_of_node(card);
+ ret = msm_populate_dai_link_component_of_node(pdata, card);
if (ret) {
ret = -EPROBE_DEFER;
goto err;
diff --git a/sound/soc/msm/msmfalcon-common.h b/sound/soc/msm/msmfalcon-common.h
index 5f6b8592acec..3c18852cf897 100644
--- a/sound/soc/msm/msmfalcon-common.h
+++ b/sound/soc/msm/msmfalcon-common.h
@@ -59,6 +59,12 @@ struct tdm_port {
u32 channel;
};
+enum {
+ DIG_CDC,
+ ANA_CDC,
+ CODECS_MAX,
+};
+
extern const struct snd_kcontrol_new msm_common_snd_controls[];
struct msmfalcon_codec {
void* (*get_afe_config_fn)(struct snd_soc_codec *codec,
@@ -78,11 +84,17 @@ struct msm_asoc_mach_data {
struct device_node *us_euro_gpio_p; /* used by pinctrl API */
struct device_node *hph_en1_gpio_p; /* used by pinctrl API */
struct device_node *hph_en0_gpio_p; /* used by pinctrl API */
+ struct device_node *pdm_gpio_p; /* used by pinctrl API */
+ struct device_node *comp_gpio_p; /* used by pinctrl API */
+ struct device_node *sdw_gpio_p; /* used by pinctrl API */
+ struct device_node *dmic_gpio_p; /* used by pinctrl API */
+ struct device_node *ext_spk_gpio_p; /* used by pinctrl API */
struct snd_soc_codec *codec;
struct msmfalcon_codec msmfalcon_codec_fn;
struct snd_info_entry *codec_root;
int spk_ext_pa_gpio;
int mclk_freq;
+ bool native_clk_set;
int lb_mode;
int snd_card_val;
u8 micbias1_cap_mode;
diff --git a/sound/soc/msm/msmfalcon-ext-dai-links.c b/sound/soc/msm/msmfalcon-ext-dai-links.c
index 6f066c5945a9..fc6d52233a33 100644
--- a/sound/soc/msm/msmfalcon-ext-dai-links.c
+++ b/sound/soc/msm/msmfalcon-ext-dai-links.c
@@ -861,6 +861,7 @@ static struct snd_soc_dai_link msm_ext_common_fe_dai[] = {
.stream_name = "Compress1",
.cpu_dai_name = "MultiMedia4",
.platform_name = "msm-compress-dsp",
+ .async_ops = ASYNC_DPCM_SND_SOC_HW_PARAMS,
.dynamic = 1,
.dpcm_capture = 1,
.dpcm_playback = 1,
diff --git a/sound/soc/msm/msmfalcon-internal.c b/sound/soc/msm/msmfalcon-internal.c
index 180ff492e9e9..a40740d02582 100644
--- a/sound/soc/msm/msmfalcon-internal.c
+++ b/sound/soc/msm/msmfalcon-internal.c
@@ -13,11 +13,13 @@
#include <linux/of_gpio.h>
#include <linux/platform_device.h>
#include <linux/module.h>
+#include <linux/mfd/msm-cdc-pinctrl.h>
#include <sound/pcm_params.h>
#include "qdsp6v2/msm-pcm-routing-v2.h"
-#include "msm-audio-pinctrl.h"
#include "msmfalcon-common.h"
-#include "../codecs/msm8x16/msm8x16-wcd.h"
+#include "../codecs/msmfalcon_cdc/msm-digital-cdc.h"
+#include "../codecs/msmfalcon_cdc/msm-analog-cdc.h"
+#include "../codecs/msm_sdw/msm_sdw.h"
#define __CHIPSET__ "MSMFALCON "
#define MSM_DAILINK_NAME(name) (__CHIPSET__#name)
@@ -30,6 +32,9 @@
#define WCN_CDC_SLIM_RX_CH_MAX 2
#define WCN_CDC_SLIM_TX_CH_MAX 3
+#define WSA8810_NAME_1 "wsa881x.20170211"
+#define WSA8810_NAME_2 "wsa881x.20170212"
+
enum {
INT0_MI2S = 0,
INT1_MI2S,
@@ -176,6 +181,9 @@ static int msm_int_mi2s_snd_startup(struct snd_pcm_substream *substream);
static void msm_int_mi2s_snd_shutdown(struct snd_pcm_substream *substream);
static struct wcd_mbhc_config *mbhc_cfg_ptr;
+static struct snd_info_entry *msm_sdw_codec_root;
+static struct snd_info_entry *msm_dig_codec_root;
+static struct snd_info_entry *pmic_analog_codec_root;
static int int_mi2s_get_bit_format_val(int bit_format)
{
@@ -443,22 +451,25 @@ static const struct snd_soc_dapm_widget msm_int_dapm_widgets[] = {
SND_SOC_DAPM_MIC("Digital Mic4", msm_dmic_event),
};
-static int msm_config_hph_compander_gpio(bool enable)
+static int msm_config_hph_compander_gpio(bool enable,
+ struct snd_soc_codec *codec)
{
+ struct snd_soc_card *card = codec->component.card;
+ struct msm_asoc_mach_data *pdata = snd_soc_card_get_drvdata(card);
int ret = 0;
pr_debug("%s: %s HPH Compander\n", __func__,
enable ? "Enable" : "Disable");
if (enable) {
- ret = msm_gpioset_activate(CLIENT_WCD, "comp_gpio");
+ ret = msm_cdc_pinctrl_select_active_state(pdata->comp_gpio_p);
if (ret) {
pr_err("%s: gpio set cannot be activated %s\n",
__func__, "comp_gpio");
goto done;
}
} else {
- ret = msm_gpioset_suspend(CLIENT_WCD, "comp_gpio");
+ ret = msm_cdc_pinctrl_select_sleep_state(pdata->comp_gpio_p);
if (ret) {
pr_err("%s: gpio set cannot be de-activated %s\n",
__func__, "comp_gpio");
@@ -509,7 +520,8 @@ static int enable_spk_ext_pa(struct snd_soc_codec *codec, int enable)
enable ? "Enable" : "Disable");
if (enable) {
- ret = msm_gpioset_activate(CLIENT_WCD, "ext_spk_gpio");
+ ret = msm_cdc_pinctrl_select_active_state(
+ pdata->ext_spk_gpio_p);
if (ret) {
pr_err("%s: gpio set cannot be de-activated %s\n",
__func__, "ext_spk_gpio");
@@ -518,7 +530,8 @@ static int enable_spk_ext_pa(struct snd_soc_codec *codec, int enable)
gpio_set_value_cansleep(pdata->spk_ext_pa_gpio, enable);
} else {
gpio_set_value_cansleep(pdata->spk_ext_pa_gpio, enable);
- ret = msm_gpioset_suspend(CLIENT_WCD, "ext_spk_gpio");
+ ret = msm_cdc_pinctrl_select_sleep_state(
+ pdata->ext_spk_gpio_p);
if (ret) {
pr_err("%s: gpio set cannot be de-activated %s\n",
__func__, "ext_spk_gpio");
@@ -528,6 +541,35 @@ static int enable_spk_ext_pa(struct snd_soc_codec *codec, int enable)
return 0;
}
+static int msm_config_sdw_gpio(bool enable, struct snd_soc_codec *codec)
+{
+ struct snd_soc_card *card = codec->component.card;
+ struct msm_asoc_mach_data *pdata = snd_soc_card_get_drvdata(card);
+ int ret = 0;
+
+ pr_debug("%s: %s SDW Clk/Data Gpios\n", __func__,
+ enable ? "Enable" : "Disable");
+
+ if (enable) {
+ ret = msm_cdc_pinctrl_select_active_state(pdata->sdw_gpio_p);
+ if (ret) {
+ pr_err("%s: gpio set cannot be activated %s\n",
+ __func__, "sdw_pin");
+ goto done;
+ }
+ } else {
+ ret = msm_cdc_pinctrl_select_sleep_state(pdata->sdw_gpio_p);
+ if (ret) {
+ pr_err("%s: gpio set cannot be de-activated %s\n",
+ __func__, "sdw_pin");
+ goto done;
+ }
+ }
+
+done:
+ return ret;
+}
+
static int int_mi2s_get_idx_from_beid(int32_t be_id)
{
int idx = 0;
@@ -672,10 +714,13 @@ static int msm_int_enable_dig_cdc_clk(struct snd_soc_codec *codec,
atomic_read(&pdata->int_mclk0_rsc_ref));
if (enable) {
if (int_mi2s_cfg[INT0_MI2S].sample_rate ==
- SAMPLING_RATE_44P1KHZ)
+ SAMPLING_RATE_44P1KHZ) {
clk_freq_in_hz = NATIVE_MCLK_RATE;
- else
+ pdata->native_clk_set = true;
+ } else {
clk_freq_in_hz = pdata->mclk_freq;
+ pdata->native_clk_set = false;
+ }
if (pdata->digital_cdc_core_clk.clk_freq_in_hz
!= clk_freq_in_hz)
@@ -744,7 +789,7 @@ static int loopback_mclk_put(struct snd_kcontrol *kcontrol,
ucontrol->value.integer.value[0]);
switch (ucontrol->value.integer.value[0]) {
case 1:
- ret = msm_gpioset_activate(CLIENT_WCD, "int_pdm");
+ ret = msm_cdc_pinctrl_select_active_state(pdata->pdm_gpio_p);
if (ret) {
pr_err("%s: failed to enable the pri gpios: %d\n",
__func__, ret);
@@ -761,8 +806,8 @@ static int loopback_mclk_put(struct snd_kcontrol *kcontrol,
pr_err("%s: failed to enable the MCLK: %d\n",
__func__, ret);
mutex_unlock(&pdata->cdc_int_mclk0_mutex);
- ret = msm_gpioset_suspend(CLIENT_WCD,
- "int_pdm");
+ ret = msm_cdc_pinctrl_select_sleep_state(
+ pdata->pdm_gpio_p);
if (ret)
pr_err("%s: failed to disable the pri gpios: %d\n",
__func__, ret);
@@ -772,12 +817,12 @@ static int loopback_mclk_put(struct snd_kcontrol *kcontrol,
}
mutex_unlock(&pdata->cdc_int_mclk0_mutex);
atomic_inc(&pdata->int_mclk0_rsc_ref);
- msm8x16_wcd_mclk_enable(codec, 1, true);
+ msm_anlg_cdc_mclk_enable(codec, 1, true);
break;
case 0:
if (atomic_read(&pdata->int_mclk0_rsc_ref) <= 0)
break;
- msm8x16_wcd_mclk_enable(codec, 0, true);
+ msm_anlg_cdc_mclk_enable(codec, 0, true);
mutex_lock(&pdata->cdc_int_mclk0_mutex);
if ((!atomic_dec_return(&pdata->int_mclk0_rsc_ref)) &&
(atomic_read(&pdata->int_mclk0_enabled))) {
@@ -794,7 +839,7 @@ static int loopback_mclk_put(struct snd_kcontrol *kcontrol,
atomic_set(&pdata->int_mclk0_enabled, false);
}
mutex_unlock(&pdata->cdc_int_mclk0_mutex);
- ret = msm_gpioset_suspend(CLIENT_WCD, "int_pdm");
+ ret = msm_cdc_pinctrl_select_sleep_state(pdata->pdm_gpio_p);
if (ret)
pr_err("%s: failed to disable the pri gpios: %d\n",
__func__, ret);
@@ -893,7 +938,7 @@ static const struct snd_kcontrol_new msm_snd_controls[] = {
msm_bt_sample_rate_put),
};
-static const struct snd_kcontrol_new msm_swr_controls[] = {
+static const struct snd_kcontrol_new msm_sdw_controls[] = {
SOC_ENUM_EXT("INT4_MI2S_RX Format", int4_mi2s_rx_format,
int_mi2s_bit_format_get, int_mi2s_bit_format_put),
SOC_ENUM_EXT("INT4_MI2S_RX SampleRate", int4_mi2s_rx_sample_rate,
@@ -919,7 +964,7 @@ static int msm_dmic_event(struct snd_soc_dapm_widget *w,
pr_debug("%s: event = %d\n", __func__, event);
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
- ret = msm_gpioset_activate(CLIENT_WCD, "dmic_gpio");
+ ret = msm_cdc_pinctrl_select_active_state(pdata->dmic_gpio_p);
if (ret < 0) {
pr_err("%s: gpio set cannot be activated %sd",
__func__, "dmic_gpio");
@@ -927,7 +972,7 @@ static int msm_dmic_event(struct snd_soc_dapm_widget *w,
}
break;
case SND_SOC_DAPM_POST_PMD:
- ret = msm_gpioset_suspend(CLIENT_WCD, "dmic_gpio");
+ ret = msm_cdc_pinctrl_select_sleep_state(pdata->dmic_gpio_p);
if (ret < 0) {
pr_err("%s: gpio set cannot be de-activated %sd",
__func__, "dmic_gpio");
@@ -954,7 +999,7 @@ static int msm_int_mclk0_event(struct snd_soc_dapm_widget *w,
case SND_SOC_DAPM_POST_PMD:
pr_debug("%s: mclk_res_ref = %d\n",
__func__, atomic_read(&pdata->int_mclk0_rsc_ref));
- ret = msm_gpioset_suspend(CLIENT_WCD, "int_pdm");
+ ret = msm_cdc_pinctrl_select_sleep_state(pdata->pdm_gpio_p);
if (ret < 0) {
pr_err("%s: gpio set cannot be de-activated %sd",
__func__, "int_pdm");
@@ -963,7 +1008,7 @@ static int msm_int_mclk0_event(struct snd_soc_dapm_widget *w,
if (atomic_read(&pdata->int_mclk0_rsc_ref) == 0) {
pr_debug("%s: disabling MCLK\n", __func__);
/* disable the codec mclk config*/
- msm8x16_wcd_mclk_enable(codec, 0, true);
+ msm_anlg_cdc_mclk_enable(codec, 0, true);
msm_int_enable_dig_cdc_clk(codec, 0, true);
}
break;
@@ -1098,7 +1143,7 @@ done:
return ret;
}
-static int msm_swr_mi2s_snd_startup(struct snd_pcm_substream *substream)
+static int msm_sdw_mi2s_snd_startup(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
@@ -1113,13 +1158,6 @@ static int msm_swr_mi2s_snd_startup(struct snd_pcm_substream *substream)
__func__, ret);
return ret;
}
- /* Enable the codec mclk config */
- ret = msm_gpioset_activate(CLIENT_WCD, "swr_pin");
- if (ret < 0) {
- pr_err("%s: gpio set cannot be activated %sd",
- __func__, "swr_pin");
- return ret;
- }
ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_CBS_CFS);
if (ret < 0)
pr_err("%s: set fmt cpu dai failed; ret=%d\n", __func__, ret);
@@ -1127,7 +1165,7 @@ static int msm_swr_mi2s_snd_startup(struct snd_pcm_substream *substream)
return ret;
}
-static void msm_swr_mi2s_snd_shutdown(struct snd_pcm_substream *substream)
+static void msm_sdw_mi2s_snd_shutdown(struct snd_pcm_substream *substream)
{
int ret;
@@ -1144,9 +1182,11 @@ static int msm_int_mi2s_snd_startup(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
- struct snd_soc_codec *codec = rtd->codec;
+ struct snd_soc_codec *codec = rtd->codec_dais[ANA_CDC]->codec;
int ret = 0;
+ struct msm_asoc_mach_data *pdata = NULL;
+ pdata = snd_soc_card_get_drvdata(codec->component.card);
pr_debug("%s(): substream = %s stream = %d\n", __func__,
substream->name, substream->stream);
@@ -1162,13 +1202,13 @@ static int msm_int_mi2s_snd_startup(struct snd_pcm_substream *substream)
return ret;
}
/* Enable the codec mclk config */
- ret = msm_gpioset_activate(CLIENT_WCD, "int_pdm");
+ ret = msm_cdc_pinctrl_select_active_state(pdata->pdm_gpio_p);
if (ret < 0) {
pr_err("%s: gpio set cannot be activated %s\n",
__func__, "int_pdm");
return ret;
}
- msm8x16_wcd_mclk_enable(codec, 1, true);
+ msm_anlg_cdc_mclk_enable(codec, 1, true);
ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_CBS_CFS);
if (ret < 0)
pr_err("%s: set fmt cpu dai failed; ret=%d\n", __func__, ret);
@@ -1249,22 +1289,22 @@ static void *def_msm_int_wcd_mbhc_cal(void)
static int msm_audrx_init(struct snd_soc_pcm_runtime *rtd)
{
- struct snd_soc_codec *codec = rtd->codec;
- struct snd_soc_dapm_context *dapm =
- snd_soc_codec_get_dapm(codec);
+ struct snd_soc_codec *dig_cdc = rtd->codec_dais[DIG_CDC]->codec;
+ struct snd_soc_codec *ana_cdc = rtd->codec_dais[ANA_CDC]->codec;
+ struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(ana_cdc);
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_pcm_runtime *rtd_aux = rtd->card->rtd_aux;
+ struct snd_card *card;
+ struct snd_info_entry *entry;
int ret = -ENOMEM;
pr_debug("%s(),dev_name%s\n", __func__, dev_name(cpu_dai->dev));
- snd_soc_add_codec_controls(codec, msm_snd_controls,
- ARRAY_SIZE(msm_snd_controls));
-
- snd_soc_add_codec_controls(codec, msm_common_snd_controls,
- ARRAY_SIZE(msm_snd_controls));
+ snd_soc_add_codec_controls(ana_cdc, msm_snd_controls,
+ ARRAY_SIZE(msm_snd_controls));
snd_soc_dapm_new_controls(dapm, msm_int_dapm_widgets,
- ARRAY_SIZE(msm_int_dapm_widgets));
+ ARRAY_SIZE(msm_int_dapm_widgets));
snd_soc_dapm_ignore_suspend(dapm, "Handset Mic");
snd_soc_dapm_ignore_suspend(dapm, "Headset Mic");
@@ -1285,39 +1325,86 @@ static int msm_audrx_init(struct snd_soc_pcm_runtime *rtd)
snd_soc_dapm_sync(dapm);
- msm8x16_wcd_spk_ext_pa_cb(enable_spk_ext_pa, codec);
- msm8x16_wcd_hph_comp_cb(msm_config_hph_compander_gpio, codec);
+ /*
+ * Send speaker configuration only for WSA8810.
+ * Defalut configuration is for WSA8815.
+ */
+ if (rtd_aux && rtd_aux->component)
+ if (!strcmp(rtd_aux->component->name, WSA8810_NAME_1) ||
+ !strcmp(rtd_aux->component->name, WSA8810_NAME_2)) {
+ msm_sdw_set_spkr_mode(rtd->codec, SPKR_MODE_1);
+ msm_sdw_set_spkr_gain_offset(rtd->codec,
+ RX_GAIN_OFFSET_M1P5_DB);
+ }
+ msm_anlg_cdc_spk_ext_pa_cb(enable_spk_ext_pa, ana_cdc);
+ msm_dig_cdc_hph_comp_cb(msm_config_hph_compander_gpio, dig_cdc);
mbhc_cfg_ptr->calibration = def_msm_int_wcd_mbhc_cal();
if (mbhc_cfg_ptr->calibration) {
- ret = msm8x16_wcd_hs_detect(codec, mbhc_cfg_ptr);
+ ret = msm_anlg_cdc_hs_detect(ana_cdc, mbhc_cfg_ptr);
if (ret) {
- pr_err("%s: msm8x16_wcd_hs_detect failed\n", __func__);
+ pr_err("%s: msm_anlg_cdc_hs_detect failed\n", __func__);
kfree(mbhc_cfg_ptr->calibration);
return ret;
}
}
+ card = rtd->card->snd_card;
+ entry = snd_register_module_info(card->module, "codecs",
+ card->proc_root);
+ if (!entry) {
+ pr_debug("%s: Cannot create codecs module entry\n",
+ __func__);
+ msm_dig_codec_root = NULL;
+ goto done;
+ }
+ msm_dig_codec_root = entry;
+ msm_dig_codec_info_create_codec_entry(msm_dig_codec_root, dig_cdc);
+ entry = snd_register_module_info(card->module, "codecs",
+ card->proc_root);
+ if (!entry) {
+ pr_debug("%s: Cannot create codecs module entry\n",
+ __func__);
+ pmic_analog_codec_root = NULL;
+ goto done;
+ }
+ pmic_analog_codec_root = entry;
+ msm_anlg_codec_info_create_codec_entry(pmic_analog_codec_root, ana_cdc);
+done:
return 0;
}
-static int msm_swr_audrx_init(struct snd_soc_pcm_runtime *rtd)
+static int msm_sdw_audrx_init(struct snd_soc_pcm_runtime *rtd)
{
struct snd_soc_codec *codec = rtd->codec;
struct snd_soc_dapm_context *dapm =
snd_soc_codec_get_dapm(codec);
+ struct snd_card *card;
+ struct snd_info_entry *entry;
- snd_soc_add_codec_controls(codec, msm_swr_controls,
- ARRAY_SIZE(msm_swr_controls));
+ snd_soc_add_codec_controls(codec, msm_sdw_controls,
+ ARRAY_SIZE(msm_sdw_controls));
- snd_soc_dapm_ignore_suspend(dapm, "AIF1_SWR Playback");
- snd_soc_dapm_ignore_suspend(dapm, "VIfeed_SWR");
+ snd_soc_dapm_ignore_suspend(dapm, "AIF1_SDW Playback");
+ snd_soc_dapm_ignore_suspend(dapm, "VIfeed_SDW");
snd_soc_dapm_ignore_suspend(dapm, "SPK1 OUT");
snd_soc_dapm_ignore_suspend(dapm, "SPK2 OUT");
- snd_soc_dapm_ignore_suspend(dapm, "AIF1_SWR VI");
- snd_soc_dapm_ignore_suspend(dapm, "VIINPUT_SWR");
+ snd_soc_dapm_ignore_suspend(dapm, "AIF1_SDW VI");
+ snd_soc_dapm_ignore_suspend(dapm, "VIINPUT_SDW");
snd_soc_dapm_sync(dapm);
-
+ msm_sdw_gpio_cb(msm_config_sdw_gpio, codec);
+ card = rtd->card->snd_card;
+ entry = snd_register_module_info(card->module, "codecs",
+ card->proc_root);
+ if (!entry) {
+ pr_debug("%s: Cannot create codecs module entry\n",
+ __func__);
+ msm_sdw_codec_root = NULL;
+ goto done;
+ }
+ msm_sdw_codec_root = entry;
+ msm_sdw_codec_info_create_codec_entry(msm_sdw_codec_root, codec);
+done:
return 0;
}
@@ -1540,9 +1627,42 @@ static struct snd_soc_ops msm_int_mi2s_be_ops = {
.shutdown = msm_int_mi2s_snd_shutdown,
};
-static struct snd_soc_ops msm_swr_mi2s_be_ops = {
- .startup = msm_swr_mi2s_snd_startup,
- .shutdown = msm_swr_mi2s_snd_shutdown,
+static struct snd_soc_ops msm_sdw_mi2s_be_ops = {
+ .startup = msm_sdw_mi2s_snd_startup,
+ .shutdown = msm_sdw_mi2s_snd_shutdown,
+};
+
+struct snd_soc_dai_link_component dlc_rx1[] = {
+ {
+ .of_node = NULL,
+ .dai_name = "msm_dig_cdc_dai_rx1",
+ },
+ {
+ .of_node = NULL,
+ .dai_name = "msm_anlg_cdc_i2s_rx1",
+ },
+};
+
+struct snd_soc_dai_link_component dlc_tx1[] = {
+ {
+ .of_node = NULL,
+ .dai_name = "msm_dig_cdc_dai_tx1",
+ },
+ {
+ .of_node = NULL,
+ .dai_name = "msm_anlg_cdc_i2s_tx1",
+ },
+};
+
+struct snd_soc_dai_link_component dlc_tx2[] = {
+ {
+ .of_node = NULL,
+ .dai_name = "msm_dig_cdc_dai_tx2",
+ },
+ {
+ .of_node = NULL,
+ .dai_name = "msm_anlg_cdc_i2s_tx2",
+ },
};
/* Digital audio interface glue - connects codec <---> CPU */
@@ -1675,6 +1795,7 @@ static struct snd_soc_dai_link msm_int_dai[] = {
.stream_name = "Compress1",
.cpu_dai_name = "MultiMedia4",
.platform_name = "msm-compress-dsp",
+ .async_ops = ASYNC_DPCM_SND_SOC_HW_PARAMS,
.dynamic = 1,
.dpcm_capture = 1,
.dpcm_playback = 1,
@@ -2118,11 +2239,11 @@ static struct snd_soc_dai_link msm_int_dai[] = {
.stream_name = "INT5_mi2s Capture",
.cpu_dai_name = "msm-dai-q6-mi2s.12",
.platform_name = "msm-pcm-hostless",
- .codec_name = "msm_swr_codec",
- .codec_dai_name = "msm_swr_vifeedback",
+ .codec_name = "msm_sdw_codec",
+ .codec_dai_name = "msm_sdw_vifeedback",
.be_id = MSM_BACKEND_DAI_INT5_MI2S_TX,
.be_hw_params_fixup = int_mi2s_be_hw_params_fixup,
- .ops = &msm_swr_mi2s_be_ops,
+ .ops = &msm_sdw_mi2s_be_ops,
.no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
.ignore_suspend = 1,
.dpcm_capture = 1,
@@ -2202,8 +2323,8 @@ static struct snd_soc_dai_link msm_int_dai[] = {
.stream_name = "INT0 MI2S Playback",
.cpu_dai_name = "msm-dai-q6-mi2s.7",
.platform_name = "msm-pcm-routing",
- .codec_name = "cajon_codec",
- .codec_dai_name = "msm8x16_wcd_i2s_rx1",
+ .codecs = dlc_rx1,
+ .num_codecs = CODECS_MAX,
.no_pcm = 1,
.dpcm_playback = 1,
.async_ops = ASYNC_DPCM_SND_SOC_PREPARE |
@@ -2215,18 +2336,34 @@ static struct snd_soc_dai_link msm_int_dai[] = {
.ignore_suspend = 1,
},
{
+ .name = LPASS_BE_INT3_MI2S_TX,
+ .stream_name = "INT3 MI2S Capture",
+ .cpu_dai_name = "msm-dai-q6-mi2s.10",
+ .platform_name = "msm-pcm-routing",
+ .codecs = dlc_tx1,
+ .num_codecs = CODECS_MAX,
+ .no_pcm = 1,
+ .dpcm_capture = 1,
+ .async_ops = ASYNC_DPCM_SND_SOC_PREPARE |
+ ASYNC_DPCM_SND_SOC_HW_PARAMS,
+ .be_id = MSM_BACKEND_DAI_INT3_MI2S_TX,
+ .be_hw_params_fixup = msm_be_hw_params_fixup,
+ .ops = &msm_int_mi2s_be_ops,
+ .ignore_suspend = 1,
+ },
+ {
.name = LPASS_BE_INT4_MI2S_RX,
.stream_name = "INT4 MI2S Playback",
.cpu_dai_name = "msm-dai-q6-mi2s.11",
.platform_name = "msm-pcm-routing",
- .codec_name = "msm_swr_codec",
- .codec_dai_name = "msm_swr_i2s_rx1",
+ .codec_name = "msm_sdw_codec",
+ .codec_dai_name = "msm_sdw_i2s_rx1",
.no_pcm = 1,
.dpcm_playback = 1,
.be_id = MSM_BACKEND_DAI_INT4_MI2S_RX,
- .init = &msm_swr_audrx_init,
+ .init = &msm_sdw_audrx_init,
.be_hw_params_fixup = int_mi2s_be_hw_params_fixup,
- .ops = &msm_swr_mi2s_be_ops,
+ .ops = &msm_sdw_mi2s_be_ops,
.ignore_suspend = 1,
},
{
@@ -2234,8 +2371,8 @@ static struct snd_soc_dai_link msm_int_dai[] = {
.stream_name = "INT2 MI2S Capture",
.cpu_dai_name = "msm-dai-q6-mi2s.9",
.platform_name = "msm-pcm-routing",
- .codec_name = "cajon_codec",
- .codec_dai_name = "msm8x16_wcd_i2s_tx2",
+ .codecs = dlc_tx2,
+ .num_codecs = CODECS_MAX,
.no_pcm = 1,
.dpcm_capture = 1,
.async_ops = ASYNC_DPCM_SND_SOC_PREPARE |
@@ -2246,22 +2383,6 @@ static struct snd_soc_dai_link msm_int_dai[] = {
.ignore_suspend = 1,
},
{
- .name = LPASS_BE_INT3_MI2S_TX,
- .stream_name = "INT3 MI2S Capture",
- .cpu_dai_name = "msm-dai-q6-mi2s.10",
- .platform_name = "msm-pcm-routing",
- .codec_name = "cajon_codec",
- .codec_dai_name = "msm8x16_wcd_i2s_tx1",
- .no_pcm = 1,
- .dpcm_capture = 1,
- .async_ops = ASYNC_DPCM_SND_SOC_PREPARE |
- ASYNC_DPCM_SND_SOC_HW_PARAMS,
- .be_id = MSM_BACKEND_DAI_INT3_MI2S_TX,
- .be_hw_params_fixup = int_mi2s_be_hw_params_fixup,
- .ops = &msm_int_mi2s_be_ops,
- .ignore_suspend = 1,
- },
- {
.name = LPASS_BE_AFE_PCM_RX,
.stream_name = "AFE Playback",
.cpu_dai_name = "msm-dai-q6-dev.224",
@@ -2913,8 +3034,7 @@ static int msm_internal_init(struct platform_device *pdev,
AFE_API_VERSION_I2S_CONFIG;
pdata->digital_cdc_core_clk.clk_id =
Q6AFE_LPASS_CLK_ID_INT_MCLK_0;
- pdata->digital_cdc_core_clk.clk_freq_in_hz =
- pdata->mclk_freq;
+ pdata->digital_cdc_core_clk.clk_freq_in_hz = 0;
pdata->digital_cdc_core_clk.clk_attri =
Q6AFE_LPASS_CLK_ATTRIBUTE_COUPLE_NO;
pdata->digital_cdc_core_clk.clk_root =
diff --git a/sound/soc/msm/qdsp6v2/msm-dts-eagle.c b/sound/soc/msm/qdsp6v2/msm-dts-eagle.c
index dfa4bb23c45d..15845b2e6dc9 100644
--- a/sound/soc/msm/qdsp6v2/msm-dts-eagle.c
+++ b/sound/soc/msm/qdsp6v2/msm-dts-eagle.c
@@ -1302,9 +1302,9 @@ int msm_dts_eagle_ioctl(unsigned int cmd, unsigned long arg)
if (((u32 *)_sec_blob[target[0]])[1] != target[1]) {
eagle_ioctl_dbg("%s: request new size for already allocated license index %u",
__func__, target[0]);
- kfree(_sec_blob[target[0]]);
- _sec_blob[target[0]] = NULL;
}
+ kfree(_sec_blob[target[0]]);
+ _sec_blob[target[0]] = NULL;
}
eagle_ioctl_dbg("%s: allocating %u bytes for license index %u",
__func__, target[1], target[0]);
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
index 01902f7f571d..8c545560cf10 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
@@ -1481,6 +1481,7 @@ static void msm_pcm_routing_process_voice(u16 reg, u16 val, int set)
{
u32 session_id = 0;
u16 path_type;
+ struct media_format_info voc_be_media_format;
pr_debug("%s: reg %x val %x set %x\n", __func__, reg, val, set);
@@ -1513,8 +1514,22 @@ static void msm_pcm_routing_process_voice(u16 reg, u16 val, int set)
if (set) {
if (msm_bedais[reg].active) {
voc_set_route_flag(session_id, path_type, 1);
+
+ memset(&voc_be_media_format, 0,
+ sizeof(struct media_format_info));
+
+ voc_be_media_format.port_id = msm_bedais[reg].port_id;
+ voc_be_media_format.num_channels =
+ msm_bedais[reg].channel;
+ voc_be_media_format.sample_rate =
+ msm_bedais[reg].sample_rate;
+ voc_be_media_format.bits_per_sample =
+ msm_bedais[reg].format;
+ /* Defaulting this to 1 for voice call usecases */
+ voc_be_media_format.channel_mapping[0] = 1;
+
voc_set_device_config(session_id, path_type,
- msm_bedais[reg].channel, msm_bedais[reg].port_id);
+ &voc_be_media_format);
if (voc_get_route_flag(session_id, TX_PATH) &&
voc_get_route_flag(session_id, RX_PATH))
@@ -2381,9 +2396,9 @@ static int msm_routing_ext_ec_put(struct snd_kcontrol *kcontrol,
}
pr_debug("%s: val = %d ext_ec_ref_port_id = 0x%0x state = %d\n",
- __func__, msm_route_ext_ec_ref, ext_ec_ref_port_id, state);
+ __func__, msm_route_ext_ec_ref, ext_ec_ref_port_id, state);
- if (!voc_set_ext_ec_ref(ext_ec_ref_port_id, state)) {
+ if (!voc_set_ext_ec_ref_port_id(ext_ec_ref_port_id, state)) {
mutex_unlock(&routing_lock);
snd_soc_dapm_mux_update_power(widget->dapm, kcontrol, mux, e, update);
} else {
@@ -11433,6 +11448,7 @@ static int msm_pcm_routing_prepare(struct snd_pcm_substream *substream)
uint16_t bits_per_sample = 16, voc_path_type;
struct msm_pcm_routing_fdai_data *fdai;
u32 session_id;
+ struct media_format_info voc_be_media_format;
pr_debug("%s: substream->pcm->id:%s\n",
__func__, substream->pcm->id);
@@ -11545,8 +11561,8 @@ static int msm_pcm_routing_prepare(struct snd_pcm_substream *substream)
for_each_set_bit(i, &bedai->fe_sessions, MSM_FRONTEND_DAI_MAX) {
session_id = msm_pcm_routing_get_voc_sessionid(i);
if (session_id) {
- pr_debug("%s voice session_id: 0x%x",
- __func__, session_id);
+ pr_debug("%s voice session_id: 0x%x\n", __func__,
+ session_id);
if (session_type == SESSION_TYPE_TX)
voc_path_type = TX_PATH;
@@ -11554,8 +11570,19 @@ static int msm_pcm_routing_prepare(struct snd_pcm_substream *substream)
voc_path_type = RX_PATH;
voc_set_route_flag(session_id, voc_path_type, 1);
- voc_set_device_config(session_id, voc_path_type,
- bedai->channel, bedai->port_id);
+
+ memset(&voc_be_media_format, 0,
+ sizeof(struct media_format_info));
+
+ voc_be_media_format.port_id = bedai->port_id;
+ voc_be_media_format.num_channels = bedai->channel;
+ voc_be_media_format.sample_rate = bedai->sample_rate;
+ voc_be_media_format.bits_per_sample = bedai->format;
+ /* Defaulting this to 1 for voice call usecases */
+ voc_be_media_format.channel_mapping[0] = 1;
+
+ voc_set_device_config(session_id, voc_path_type,
+ &voc_be_media_format);
if (voc_get_route_flag(session_id, RX_PATH) &&
voc_get_route_flag(session_id, TX_PATH))
@@ -11563,6 +11590,27 @@ static int msm_pcm_routing_prepare(struct snd_pcm_substream *substream)
}
}
+ /* Check if backend is an external ec ref port and set as needed */
+ if (unlikely(bedai->port_id == voc_get_ext_ec_ref_port_id())) {
+
+ memset(&voc_be_media_format, 0,
+ sizeof(struct media_format_info));
+
+ /* Get format info for ec ref port from msm_bedais[] */
+ voc_be_media_format.port_id = bedai->port_id;
+ voc_be_media_format.num_channels = bedai->channel;
+ voc_be_media_format.bits_per_sample = bedai->format;
+ voc_be_media_format.sample_rate = bedai->sample_rate;
+ /* Defaulting this to 1 for voice call usecases */
+ voc_be_media_format.channel_mapping[0] = 1;
+ voc_set_ext_ec_ref_media_fmt_info(&voc_be_media_format);
+ pr_debug("%s: EC Ref media format info set to port_id=%d, num_channels=%d, bits_per_sample=%d, sample_rate=%d\n",
+ __func__, voc_be_media_format.port_id,
+ voc_be_media_format.num_channels,
+ voc_be_media_format.bits_per_sample,
+ voc_be_media_format.sample_rate);
+ }
+
done:
mutex_unlock(&routing_lock);
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-voip-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-voip-v2.c
index 6570819c2b31..c444a27c06e6 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-voip-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-voip-v2.c
@@ -823,6 +823,11 @@ static int msm_pcm_playback_copy(struct snd_pcm_substream *substream, int a,
(sizeof(buf_node->frame.frm_hdr) +
sizeof(buf_node->frame.pktlen));
}
+ if (ret) {
+ pr_err("%s: copy from user failed %d\n",
+ __func__, ret);
+ return -EFAULT;
+ }
spin_lock_irqsave(&prtd->dsp_lock, dsp_flags);
list_add_tail(&buf_node->list, &prtd->in_queue);
spin_unlock_irqrestore(&prtd->dsp_lock, dsp_flags);
diff --git a/sound/soc/msm/qdsp6v2/q6asm.c b/sound/soc/msm/qdsp6v2/q6asm.c
index 38c51eb32f4d..19105ffd9d4a 100644
--- a/sound/soc/msm/qdsp6v2/q6asm.c
+++ b/sound/soc/msm/qdsp6v2/q6asm.c
@@ -8326,7 +8326,7 @@ int q6asm_send_cal(struct audio_client *ac)
q6asm_add_hdr_async(ac, &hdr, (sizeof(struct apr_hdr) +
sizeof(struct asm_stream_cmd_set_pp_params_v2)), TRUE);
- atomic_set(&ac->cmd_state, 1);
+ atomic_set(&ac->cmd_state, -1);
hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2;
payload_params.data_payload_addr_lsw =
lower_32_bits(cal_block->cal_data.paddr);
@@ -8352,13 +8352,13 @@ int q6asm_send_cal(struct audio_client *ac)
goto free;
}
rc = wait_event_timeout(ac->cmd_wait,
- (atomic_read(&ac->cmd_state) <= 0), 5 * HZ);
+ (atomic_read(&ac->cmd_state) >= 0), 5 * HZ);
if (!rc) {
pr_err("%s: timeout, audio audstrm cal send\n", __func__);
rc = -ETIMEDOUT;
goto free;
}
- if (atomic_read(&ac->cmd_state) < 0) {
+ if (atomic_read(&ac->cmd_state) > 0) {
pr_err("%s: DSP returned error[%d] audio audstrm cal send\n",
__func__, atomic_read(&ac->cmd_state));
rc = -EINVAL;
diff --git a/sound/soc/msm/qdsp6v2/q6voice.c b/sound/soc/msm/qdsp6v2/q6voice.c
index b242a8fe6017..5cff3dd1ed4f 100644
--- a/sound/soc/msm/qdsp6v2/q6voice.c
+++ b/sound/soc/msm/qdsp6v2/q6voice.c
@@ -78,7 +78,10 @@ static int voice_send_cvp_register_cal_cmd(struct voice_data *v);
static int voice_send_cvp_deregister_cal_cmd(struct voice_data *v);
static int voice_send_cvp_register_vol_cal_cmd(struct voice_data *v);
static int voice_send_cvp_deregister_vol_cal_cmd(struct voice_data *v);
+static int voice_send_cvp_media_fmt_info_cmd(struct voice_data *v);
static int voice_send_cvp_device_channels_cmd(struct voice_data *v);
+static int voice_send_cvp_media_format_cmd(struct voice_data *v,
+ uint32_t param_type);
static int voice_send_cvp_topology_commit_cmd(struct voice_data *v);
static int voice_cvs_stop_playback(struct voice_data *v);
@@ -2398,7 +2401,7 @@ static int voice_send_set_device_cmd(struct voice_data *v)
cvp_setdev_cmd.cvp_set_device_v2.vocproc_mode =
VSS_IVOCPROC_VOCPROC_MODE_EC_EXT_MIXING;
cvp_setdev_cmd.cvp_set_device_v2.ec_ref_port_id =
- common.ec_port_id;
+ common.ec_media_fmt_info.port_id;
} else {
cvp_setdev_cmd.cvp_set_device_v2.vocproc_mode =
VSS_IVOCPROC_VOCPROC_MODE_EC_INT_MIXING;
@@ -2752,7 +2755,7 @@ static int voice_send_cvp_create_cmd(struct voice_data *v)
cvp_session_cmd.cvp_session.vocproc_mode =
VSS_IVOCPROC_VOCPROC_MODE_EC_EXT_MIXING;
cvp_session_cmd.cvp_session.ec_ref_port_id =
- common.ec_port_id;
+ common.ec_media_fmt_info.port_id;
} else {
cvp_session_cmd.cvp_session.vocproc_mode =
VSS_IVOCPROC_VOCPROC_MODE_EC_INT_MIXING;
@@ -3826,10 +3829,10 @@ static int voice_setup_vocproc(struct voice_data *v)
goto fail;
}
- ret = voice_send_cvp_device_channels_cmd(v);
+ ret = voice_send_cvp_media_fmt_info_cmd(v);
if (ret < 0) {
- pr_err("%s: Set device channels failed err:%d\n",
- __func__, ret);
+ pr_err("%s: Set media format info failed err:%d\n", __func__,
+ ret);
goto fail;
}
@@ -3983,6 +3986,158 @@ done:
return ret;
}
+static int voice_send_cvp_media_fmt_info_cmd(struct voice_data *v)
+{
+ int ret;
+
+ if (voice_get_cvd_int_version(common.cvd_version) >=
+ CVD_INT_VERSION_2_3) {
+ ret = voice_send_cvp_media_format_cmd(v, RX_PATH);
+ if (ret < 0)
+ goto done;
+
+ ret = voice_send_cvp_media_format_cmd(v, TX_PATH);
+ if (ret < 0)
+ goto done;
+
+ if (common.ec_ref_ext)
+ ret = voice_send_cvp_media_format_cmd(v, EC_REF_PATH);
+ } else {
+ ret = voice_send_cvp_device_channels_cmd(v);
+ }
+
+done:
+ return ret;
+}
+
+static int voice_send_cvp_media_format_cmd(struct voice_data *v,
+ uint32_t param_type)
+{
+ int ret = 0;
+ struct cvp_set_media_format_cmd cvp_set_media_format_cmd;
+ void *apr_cvp;
+ u16 cvp_handle;
+ struct vss_icommon_param_data_t *media_fmt_param_data =
+ &cvp_set_media_format_cmd.cvp_set_param_v2.param_data;
+ struct vss_param_endpoint_media_format_info_t *media_fmt_info =
+ &media_fmt_param_data->media_format_info;
+
+ if (v == NULL) {
+ pr_err("%s: v is NULL\n", __func__);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ apr_cvp = common.apr_q6_cvp;
+ if (!apr_cvp) {
+ pr_err("%s: apr_cvp is NULL.\n", __func__);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ cvp_handle = voice_get_cvp_handle(v);
+ memset(&cvp_set_media_format_cmd, 0, sizeof(cvp_set_media_format_cmd));
+
+ /* Fill header data */
+ cvp_set_media_format_cmd.hdr.hdr_field =
+ APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE),
+ APR_PKT_VER);
+ cvp_set_media_format_cmd.hdr.pkt_size =
+ APR_PKT_SIZE(APR_HDR_SIZE,
+ sizeof(cvp_set_media_format_cmd) - APR_HDR_SIZE);
+ cvp_set_media_format_cmd.hdr.src_svc = 0;
+ cvp_set_media_format_cmd.hdr.src_domain = APR_DOMAIN_APPS;
+ cvp_set_media_format_cmd.hdr.src_port =
+ voice_get_idx_for_session(v->session_id);
+ cvp_set_media_format_cmd.hdr.dest_svc = 0;
+ cvp_set_media_format_cmd.hdr.dest_domain = APR_DOMAIN_ADSP;
+ cvp_set_media_format_cmd.hdr.dest_port = cvp_handle;
+ cvp_set_media_format_cmd.hdr.token = VOC_SET_MEDIA_FORMAT_PARAM_TOKEN;
+ cvp_set_media_format_cmd.hdr.opcode = VSS_ICOMMON_CMD_SET_PARAM_V2;
+
+ /* Fill param data */
+ cvp_set_media_format_cmd.cvp_set_param_v2.mem_size =
+ sizeof(struct vss_icommon_param_data_t);
+ media_fmt_param_data->module_id = VSS_MODULE_CVD_GENERIC;
+ media_fmt_param_data->param_size =
+ sizeof(struct vss_param_endpoint_media_format_info_t);
+
+ /* Fill device specific data */
+ switch (param_type) {
+ case RX_PATH:
+ media_fmt_param_data->param_id =
+ VSS_PARAM_RX_PORT_ENDPOINT_MEDIA_INFO;
+ media_fmt_info->port_id = v->dev_rx.port_id;
+ media_fmt_info->num_channels = v->dev_rx.no_of_channels;
+ media_fmt_info->bits_per_sample = v->dev_rx.bits_per_sample;
+ media_fmt_info->sample_rate = v->dev_rx.sample_rate;
+ memcpy(&media_fmt_info->channel_mapping,
+ &v->dev_rx.channel_mapping, VSS_CHANNEL_MAPPING_SIZE);
+ break;
+
+ case TX_PATH:
+ media_fmt_param_data->param_id =
+ VSS_PARAM_TX_PORT_ENDPOINT_MEDIA_INFO;
+ media_fmt_info->port_id = v->dev_tx.port_id;
+ media_fmt_info->num_channels = v->dev_tx.no_of_channels;
+ media_fmt_info->bits_per_sample = v->dev_tx.bits_per_sample;
+ media_fmt_info->sample_rate = v->dev_tx.sample_rate;
+ memcpy(&media_fmt_info->channel_mapping,
+ &v->dev_tx.channel_mapping, VSS_CHANNEL_MAPPING_SIZE);
+ break;
+
+ case EC_REF_PATH:
+ media_fmt_param_data->param_id =
+ VSS_PARAM_EC_REF_PORT_ENDPOINT_MEDIA_INFO;
+ media_fmt_info->port_id = common.ec_media_fmt_info.port_id;
+ media_fmt_info->num_channels =
+ common.ec_media_fmt_info.num_channels;
+ media_fmt_info->bits_per_sample =
+ common.ec_media_fmt_info.bits_per_sample;
+ media_fmt_info->sample_rate =
+ common.ec_media_fmt_info.sample_rate;
+ memcpy(&media_fmt_info->channel_mapping,
+ &common.ec_media_fmt_info.channel_mapping,
+ VSS_CHANNEL_MAPPING_SIZE);
+ break;
+
+ default:
+ pr_err("%s: Invalid param type %d\n", __func__, param_type);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ /* Send command */
+ v->cvp_state = CMD_STATUS_FAIL;
+ v->async_err = 0;
+ ret = apr_send_pkt(apr_cvp, (uint32_t *) &cvp_set_media_format_cmd);
+ if (ret < 0) {
+ pr_err("%s: Fail in sending VSS_ICOMMON_CMD_SET_PARAM_V2\n",
+ __func__);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ ret = wait_event_timeout(v->cvp_wait,
+ (v->cvp_state == CMD_STATUS_SUCCESS),
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret) {
+ pr_err("%s: wait_event timeout\n", __func__);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ if (v->async_err > 0) {
+ pr_err("%s: DSP returned error[%s] handle = %d\n", __func__,
+ adsp_err_get_err_str(v->async_err), cvp_handle);
+ ret = adsp_err_get_lnx_err_code(v->async_err);
+ goto done;
+ }
+
+done:
+ return ret;
+}
+
static int voice_send_cvp_topology_commit_cmd(struct voice_data *v)
{
int ret = 0;
@@ -5743,7 +5898,7 @@ int voc_set_rx_vol_step(uint32_t session_id, uint32_t dir, uint32_t vol_step,
}
int voc_set_device_config(uint32_t session_id, uint8_t path_dir,
- uint8_t no_of_channels, uint32_t port_id)
+ struct media_format_info *finfo)
{
struct voice_data *v = voice_get_session(session_id);
@@ -5753,22 +5908,55 @@ int voc_set_device_config(uint32_t session_id, uint8_t path_dir,
return -EINVAL;
}
- pr_debug("%s: path_dir=%d port_id=%x, channels=%d\n",
- __func__, path_dir, port_id, no_of_channels);
+ pr_debug("%s: path_dir=%d port_id=%x, channels=%d, sample_rate=%d, bits_per_sample=%d\n",
+ __func__, path_dir, finfo->port_id, finfo->num_channels,
+ finfo->sample_rate, finfo->bits_per_sample);
mutex_lock(&v->lock);
- if (path_dir == RX_PATH) {
- v->dev_rx.port_id = q6audio_get_port_id(port_id);
- v->dev_rx.no_of_channels = no_of_channels;
- } else {
- v->dev_tx.port_id = q6audio_get_port_id(port_id);
- v->dev_tx.no_of_channels = no_of_channels;
+ switch (path_dir) {
+ case RX_PATH:
+ v->dev_rx.port_id = q6audio_get_port_id(finfo->port_id);
+ v->dev_rx.no_of_channels = finfo->num_channels;
+ v->dev_rx.sample_rate = finfo->sample_rate;
+ v->dev_rx.bits_per_sample = finfo->bits_per_sample;
+ memcpy(&v->dev_rx.channel_mapping, &finfo->channel_mapping,
+ VSS_CHANNEL_MAPPING_SIZE);
+ break;
+ case TX_PATH:
+ v->dev_tx.port_id = q6audio_get_port_id(finfo->port_id);
+ v->dev_tx.no_of_channels = finfo->num_channels;
+ v->dev_tx.sample_rate = finfo->sample_rate;
+ v->dev_tx.bits_per_sample = finfo->bits_per_sample;
+ memcpy(&v->dev_tx.channel_mapping, &finfo->channel_mapping,
+ VSS_CHANNEL_MAPPING_SIZE);
+ break;
+ default:
+ pr_err("%s: Invalid path_dir %d\n", __func__, path_dir);
+ return -EINVAL;
}
+
mutex_unlock(&v->lock);
return 0;
}
+int voc_set_ext_ec_ref_media_fmt_info(struct media_format_info *finfo)
+{
+ mutex_lock(&common.common_lock);
+ if (common.ec_ref_ext) {
+ common.ec_media_fmt_info.num_channels = finfo->num_channels;
+ common.ec_media_fmt_info.bits_per_sample =
+ finfo->bits_per_sample;
+ common.ec_media_fmt_info.sample_rate = finfo->sample_rate;
+ memcpy(&common.ec_media_fmt_info.channel_mapping,
+ &finfo->channel_mapping, VSS_CHANNEL_MAPPING_SIZE);
+ } else {
+ pr_debug("%s: Ext Ec Ref not active, returning", __func__);
+ }
+ mutex_unlock(&common.common_lock);
+ return 0;
+}
+
int voc_set_route_flag(uint32_t session_id, uint8_t path_dir, uint8_t set)
{
struct voice_data *v = voice_get_session(session_id);
@@ -5982,9 +6170,9 @@ int voc_enable_device(uint32_t session_id)
goto done;
}
- ret = voice_send_cvp_device_channels_cmd(v);
+ ret = voice_send_cvp_media_fmt_info_cmd(v);
if (ret < 0) {
- pr_err("%s: Set device channels failed\n", __func__);
+ pr_err("%s: Set format failed err:%d\n", __func__, ret);
goto done;
}
@@ -6179,7 +6367,7 @@ fail:
return ret;
}
-int voc_set_ext_ec_ref(uint16_t port_id, bool state)
+int voc_set_ext_ec_ref_port_id(uint16_t port_id, bool state)
{
int ret = 0;
@@ -6190,17 +6378,25 @@ int voc_set_ext_ec_ref(uint16_t port_id, bool state)
ret = -EINVAL;
goto exit;
}
- common.ec_port_id = port_id;
common.ec_ref_ext = true;
} else {
common.ec_ref_ext = false;
- common.ec_port_id = port_id;
}
+ /* Cache EC Fromat Info in common */
+ common.ec_media_fmt_info.port_id = port_id;
exit:
mutex_unlock(&common.common_lock);
return ret;
}
+int voc_get_ext_ec_ref_port_id(void)
+{
+ if (common.ec_ref_ext)
+ return common.ec_media_fmt_info.port_id;
+ else
+ return AFE_PORT_INVALID;
+}
+
void voc_register_mvs_cb(ul_cb_fn ul_cb,
dl_cb_fn dl_cb,
voip_ssr_cb ssr_cb,
@@ -6557,18 +6753,19 @@ static int32_t qdsp_cvs_callback(struct apr_client_data *data, void *priv)
v->async_err = ptr[1];
wake_up(&v->cvs_wait);
break;
- case VOICE_CMD_SET_PARAM:
- pr_debug("%s: VOICE_CMD_SET_PARAM\n", __func__);
+ case VSS_ICOMMON_CMD_SET_PARAM_V2:
+ pr_debug("%s: VSS_ICOMMON_CMD_SET_PARAM_V2\n",
+ __func__);
rtac_make_voice_callback(RTAC_CVS, ptr,
data->payload_size);
break;
- case VOICE_CMD_GET_PARAM:
- pr_debug("%s: VOICE_CMD_GET_PARAM\n",
- __func__);
+ case VSS_ICOMMON_CMD_GET_PARAM_V2:
+ pr_debug("%s: VSS_ICOMMON_CMD_GET_PARAM_V2\n",
+ __func__);
/* Should only come here if there is an APR */
/* error or malformed APR packet. Otherwise */
/* response will be returned as */
- /* VOICE_EVT_GET_PARAM_ACK */
+ /* VSS_ICOMMON_RSP_GET_PARAM */
if (ptr[1] != 0) {
pr_err("%s: CVP get param error = %d, resuming\n",
__func__, ptr[1]);
@@ -6695,12 +6892,12 @@ static int32_t qdsp_cvs_callback(struct apr_client_data *data, void *priv)
pr_debug("Recd VSS_ISTREAM_EVT_NOT_READY\n");
} else if (data->opcode == VSS_ISTREAM_EVT_READY) {
pr_debug("Recd VSS_ISTREAM_EVT_READY\n");
- } else if (data->opcode == VOICE_EVT_GET_PARAM_ACK) {
- pr_debug("%s: VOICE_EVT_GET_PARAM_ACK\n", __func__);
+ } else if (data->opcode == VSS_ICOMMON_RSP_GET_PARAM) {
+ pr_debug("%s: VSS_ICOMMON_RSP_GET_PARAM\n", __func__);
ptr = data->payload;
if (ptr[0] != 0) {
- pr_err("%s: VOICE_EVT_GET_PARAM_ACK returned error = 0x%x\n",
- __func__, ptr[0]);
+ pr_err("%s: VSS_ICOMMON_RSP_GET_PARAM returned error = 0x%x\n",
+ __func__, ptr[0]);
}
rtac_make_voice_callback(RTAC_CVS, data->payload,
data->payload_size);
@@ -6837,18 +7034,35 @@ static int32_t qdsp_cvp_callback(struct apr_client_data *data, void *priv)
break;
case VSS_IVPCM_EVT_PUSH_BUFFER_V2:
break;
- case VOICE_CMD_SET_PARAM:
- pr_debug("%s: VOICE_CMD_SET_PARAM\n", __func__);
- rtac_make_voice_callback(RTAC_CVP, ptr,
- data->payload_size);
+ case VSS_ICOMMON_CMD_SET_PARAM_V2:
+ switch (data->token) {
+ case VOC_SET_MEDIA_FORMAT_PARAM_TOKEN:
+ pr_debug("%s: VSS_ICOMMON_CMD_SET_PARAM_V2 called by voice_send_cvp_media_format_cmd\n",
+ __func__);
+ v->cvp_state = CMD_STATUS_SUCCESS;
+ v->async_err = ptr[1];
+ wake_up(&v->cvp_wait);
+ break;
+ case VOC_RTAC_SET_PARAM_TOKEN:
+ pr_debug("%s: VSS_ICOMMON_CMD_SET_PARAM_V2 called by rtac\n",
+ __func__);
+ rtac_make_voice_callback(
+ RTAC_CVP, ptr,
+ data->payload_size);
+ break;
+ default:
+ pr_debug("%s: invalid token for command VSS_ICOMMON_CMD_SET_PARAM_V2: %d\n",
+ __func__, data->token);
+ break;
+ }
break;
- case VOICE_CMD_GET_PARAM:
- pr_debug("%s: VOICE_CMD_GET_PARAM\n",
- __func__);
+ case VSS_ICOMMON_CMD_GET_PARAM_V2:
+ pr_debug("%s: VSS_ICOMMON_CMD_GET_PARAM_V2\n",
+ __func__);
/* Should only come here if there is an APR */
/* error or malformed APR packet. Otherwise */
/* response will be returned as */
- /* VOICE_EVT_GET_PARAM_ACK */
+ /* VSS_ICOMMON_RSP_GET_PARAM */
if (ptr[1] != 0) {
pr_err("%s: CVP get param error = %d, resuming\n",
__func__, ptr[1]);
@@ -6909,12 +7123,12 @@ static int32_t qdsp_cvp_callback(struct apr_client_data *data, void *priv)
break;
}
}
- } else if (data->opcode == VOICE_EVT_GET_PARAM_ACK) {
- pr_debug("%s: VOICE_EVT_GET_PARAM_ACK\n", __func__);
+ } else if (data->opcode == VSS_ICOMMON_RSP_GET_PARAM) {
+ pr_debug("%s: VSS_ICOMMON_RSP_GET_PARAM\n", __func__);
ptr = data->payload;
if (ptr[0] != 0) {
- pr_err("%s: VOICE_EVT_GET_PARAM_ACK returned error = 0x%x\n",
- __func__, ptr[0]);
+ pr_err("%s: VSS_ICOMMON_RSP_GET_PARAM returned error = 0x%x\n",
+ __func__, ptr[0]);
}
rtac_make_voice_callback(RTAC_CVP, data->payload,
data->payload_size);
@@ -8335,7 +8549,16 @@ static int __init voice_init(void)
common.default_vol_step_val = 0;
common.default_vol_ramp_duration_ms = DEFAULT_VOLUME_RAMP_DURATION;
common.default_mute_ramp_duration_ms = DEFAULT_MUTE_RAMP_DURATION;
+
+ /* Initialize EC Ref media format info */
common.ec_ref_ext = false;
+ common.ec_media_fmt_info.port_id = AFE_PORT_INVALID;
+ common.ec_media_fmt_info.num_channels = 0;
+ common.ec_media_fmt_info.bits_per_sample = 16;
+ common.ec_media_fmt_info.sample_rate = 8000;
+ memset(&common.ec_media_fmt_info.channel_mapping, 0,
+ VSS_CHANNEL_MAPPING_SIZE);
+
/* Initialize MVS info. */
common.mvs_info.network_type = VSS_NETWORK_ID_DEFAULT;
@@ -8373,8 +8596,16 @@ static int __init voice_init(void)
common.voice[i].dev_rx.port_id = 0x100A;
common.voice[i].dev_tx.dev_id = 0;
common.voice[i].dev_rx.dev_id = 0;
- common.voice[i].dev_rx.no_of_channels = 0;
common.voice[i].dev_tx.no_of_channels = 0;
+ common.voice[i].dev_rx.no_of_channels = 0;
+ common.voice[i].dev_tx.sample_rate = 8000;
+ common.voice[i].dev_rx.sample_rate = 8000;
+ common.voice[i].dev_tx.bits_per_sample = 16;
+ common.voice[i].dev_rx.bits_per_sample = 16;
+ memset(&common.voice[i].dev_tx.channel_mapping, 0,
+ VSS_CHANNEL_MAPPING_SIZE);
+ memset(&common.voice[i].dev_rx.channel_mapping, 0,
+ VSS_CHANNEL_MAPPING_SIZE);
common.voice[i].sidetone_gain = 0x512;
common.voice[i].dtmf_rx_detect_en = 0;
common.voice[i].lch_mode = 0;
diff --git a/sound/soc/msm/qdsp6v2/q6voice.h b/sound/soc/msm/qdsp6v2/q6voice.h
index 834fe7c05306..9c3ec62a980d 100644
--- a/sound/soc/msm/qdsp6v2/q6voice.h
+++ b/sound/soc/msm/qdsp6v2/q6voice.h
@@ -22,6 +22,8 @@
#define SESSION_NAME_LEN 20
#define NUM_OF_MEMORY_BLOCKS 1
#define NUM_OF_BUFFERS 2
+#define VSS_NUM_CHANNELS_MAX 8
+#define VSS_CHANNEL_MAPPING_SIZE (sizeof(uint8_t) * VSS_NUM_CHANNELS_MAX)
/*
* BUFFER BLOCK SIZE based on
* the supported page size
@@ -98,7 +100,9 @@ struct stream_data {
/* Device information payload structure */
struct device_data {
uint32_t dev_mute;
- uint32_t sample;
+ uint32_t sample_rate;
+ uint16_t bits_per_sample;
+ uint8_t channel_mapping[VSS_NUM_CHANNELS_MAX];
uint32_t enabled;
uint32_t dev_id;
uint32_t port_id;
@@ -108,6 +112,25 @@ struct device_data {
uint32_t no_of_channels;
};
+/*
+ * Format information structure to match
+ * vss_param_endpoint_media_format_info_t
+ */
+struct media_format_info {
+ uint32_t port_id;
+ uint16_t num_channels;
+ uint16_t bits_per_sample;
+ uint32_t sample_rate;
+ uint8_t channel_mapping[VSS_NUM_CHANNELS_MAX];
+};
+
+enum {
+ VOC_NO_SET_PARAM_TOKEN = 0,
+ VOC_RTAC_SET_PARAM_TOKEN,
+ VOC_SET_MEDIA_FORMAT_PARAM_TOKEN,
+ VOC_SET_PARAM_TOKEN_MAX
+};
+
struct voice_dev_route_state {
u16 rx_route_flag;
u16 tx_route_flag;
@@ -190,6 +213,81 @@ struct vss_unmap_memory_cmd {
struct vss_icommon_cmd_unmap_memory_t vss_unmap_mem;
} __packed;
+struct vss_param_endpoint_media_format_info_t {
+ /* AFE port ID to which this media format corresponds to. */
+ uint32_t port_id;
+ /*
+ * Number of channels of data.
+ * Supported values: 1 to 8
+ */
+ uint16_t num_channels;
+ /*
+ * Bits per sample of data.
+ * Supported values: 16 and 24
+ */
+ uint16_t bits_per_sample;
+ /*
+ * Sampling rate in Hz.
+ * Supported values: 8000, 11025, 16000, 22050, 24000, 32000,
+ * 44100, 48000, 88200, 96000, 176400, and 192000
+ */
+ uint32_t sample_rate;
+ /*
+ * The channel[i] mapping describes channel i. Each element i
+ * of the array describes channel i inside the data buffer. An
+ * unused or unknown channel is set to 0.
+ */
+ uint8_t channel_mapping[VSS_NUM_CHANNELS_MAX];
+} __packed;
+
+struct vss_icommon_param_data_t {
+ /* Valid ID of the module. */
+ uint32_t module_id;
+ /* Valid ID of the parameter. */
+ uint32_t param_id;
+ /*
+ * Data size of the structure relating to the param_id/module_id
+ * combination in uint8_t bytes.
+ */
+ uint16_t param_size;
+ /* This field must be set to zero. */
+ uint16_t reserved;
+ /*
+ * Parameter data payload when inband. Should have size param_size.
+ * Bit size of payload must be a multiple of 4.
+ */
+ union {
+ struct vss_param_endpoint_media_format_info_t media_format_info;
+ };
+} __packed;
+
+/* Payload structure for the VSS_ICOMMON_CMD_SET_PARAM_V2 command. */
+struct vss_icommon_cmd_set_param_v2_t {
+ /*
+ * Pointer to the unique identifier for an address (physical/virtual).
+ *
+ * If the parameter data payload is within the message payload
+ * (in-band), set this field to 0. The parameter data begins at the
+ * specified data payload address.
+ *
+ * If the parameter data is out-of-band, this field is the handle to
+ * the physical address in the shared memory that holds the parameter
+ * data.
+ */
+ uint32_t mem_handle;
+ /*
+ * Location of the parameter data payload.
+ *
+ * The payload is an array of vss_icommon_param_data_t. If the
+ * mem_handle is 0, this field is ignored.
+ */
+ uint64_t mem_address;
+ /* Size of the parameter data payload in bytes. */
+ uint32_t mem_size;
+ /* Parameter data payload when the data is inband. */
+ struct vss_icommon_param_data_t param_data;
+} __packed;
+
/* TO MVM commands */
#define VSS_IMVM_CMD_CREATE_PASSIVE_CONTROL_SESSION 0x000110FF
/**< No payload. Wait for APRV2_IBASIC_RSP_RESULT response. */
@@ -577,6 +675,14 @@ struct vss_imemory_cmd_unmap_t {
#define VSS_IRECORD_MODE_TX_RX_MIXING 0x00010F7B
/* Select mixed Tx and Rx paths. */
+#define VSS_PARAM_TX_PORT_ENDPOINT_MEDIA_INFO 0x00013253
+
+#define VSS_PARAM_RX_PORT_ENDPOINT_MEDIA_INFO 0x00013254
+
+#define VSS_PARAM_EC_REF_PORT_ENDPOINT_MEDIA_INFO 0x00013255
+
+#define VSS_MODULE_CVD_GENERIC 0x0001316E
+
#define VSS_ISTREAM_EVT_NOT_READY 0x000110FD
#define VSS_ISTREAM_EVT_READY 0x000110FC
@@ -1378,6 +1484,11 @@ struct cvp_set_dev_channels_cmd {
struct vss_ivocproc_cmd_topology_set_dev_channels_t cvp_set_channels;
} __packed;
+struct cvp_set_media_format_cmd {
+ struct apr_hdr hdr;
+ struct vss_icommon_cmd_set_param_v2_t cvp_set_param_v2;
+} __packed;
+
struct cvp_set_vp3_data_cmd {
struct apr_hdr hdr;
} __packed;
@@ -1613,7 +1724,7 @@ struct common_data {
uint32_t default_vol_ramp_duration_ms;
uint32_t default_mute_ramp_duration_ms;
bool ec_ref_ext;
- uint16_t ec_port_id;
+ struct media_format_info ec_media_fmt_info;
/* APR to MVM in the Q6 */
void *apr_q6_mvm;
@@ -1685,9 +1796,9 @@ enum {
enum {
RX_PATH = 0,
TX_PATH,
+ EC_REF_PATH,
};
-
#define VOC_PATH_PASSIVE 0
#define VOC_PATH_FULL 1
#define VOC_PATH_VOLTE_PASSIVE 2
@@ -1773,7 +1884,9 @@ uint32_t voc_get_session_id(char *name);
int voc_start_playback(uint32_t set, uint16_t port_id);
int voc_start_record(uint32_t port_id, uint32_t set, uint32_t session_id);
int voice_get_idx_for_session(u32 session_id);
-int voc_set_ext_ec_ref(uint16_t port_id, bool state);
+int voc_set_ext_ec_ref_port_id(uint16_t port_id, bool state);
+int voc_get_ext_ec_ref_port_id(void);
+int voc_set_ext_ec_ref_media_fmt_info(struct media_format_info *finfo);
int voc_update_amr_vocoder_rate(uint32_t session_id);
int voc_disable_device(uint32_t session_id);
int voc_enable_device(uint32_t session_id);
@@ -1781,7 +1894,7 @@ void voc_set_destroy_cvd_flag(bool is_destroy_cvd);
void voc_set_vote_bms_flag(bool is_vote_bms);
int voc_disable_topology(uint32_t session_id, uint32_t disable);
int voc_set_device_config(uint32_t session_id, uint8_t path_dir,
- uint8_t no_of_channels, uint32_t dev_port_id);
+ struct media_format_info *finfo);
uint32_t voice_get_topology(uint32_t topology_idx);
int voc_set_sound_focus(struct sound_focus_param sound_focus_param);
int voc_get_sound_focus(struct sound_focus_param *soundFocusData);
diff --git a/sound/soc/msm/qdsp6v2/rtac.c b/sound/soc/msm/qdsp6v2/rtac.c
index 188f0d3e8c5d..82551fb8ed71 100644
--- a/sound/soc/msm/qdsp6v2/rtac.c
+++ b/sound/soc/msm/qdsp6v2/rtac.c
@@ -1484,7 +1484,7 @@ int send_voice_apr(u32 mode, void *buf, u32 opcode)
goto err;
}
- if (opcode == VOICE_CMD_SET_PARAM) {
+ if (opcode == VSS_ICOMMON_CMD_SET_PARAM_V2) {
/* set payload size to in-band payload */
/* set data size to actual out of band payload size */
data_size = payload_size - 4 * sizeof(u32);
@@ -1537,7 +1537,9 @@ int send_voice_apr(u32 mode, void *buf, u32 opcode)
voice_params.dest_svc = 0;
voice_params.dest_domain = APR_DOMAIN_MODEM;
voice_params.dest_port = (u16)dest_port;
- voice_params.token = 0;
+ voice_params.token = (opcode == VSS_ICOMMON_CMD_SET_PARAM_V2) ?
+ VOC_RTAC_SET_PARAM_TOKEN :
+ 0;
voice_params.opcode = opcode;
/* fill for out-of-band */
@@ -1582,7 +1584,7 @@ int send_voice_apr(u32 mode, void *buf, u32 opcode)
goto err;
}
- if (opcode == VOICE_CMD_GET_PARAM) {
+ if (opcode == VSS_ICOMMON_CMD_GET_PARAM_V2) {
bytes_returned = ((u32 *)rtac_cal[VOICE_RTAC_CAL].cal_data.
kvaddr)[2] + 3 * sizeof(u32);
@@ -1675,20 +1677,20 @@ static long rtac_ioctl_shared(struct file *f,
ASM_STREAM_CMD_SET_PP_PARAMS_V2);
break;
case AUDIO_GET_RTAC_CVS_CAL:
- result = send_voice_apr(RTAC_CVS, (void *)arg,
- VOICE_CMD_GET_PARAM);
+ result = send_voice_apr(RTAC_CVS, (void *) arg,
+ VSS_ICOMMON_CMD_GET_PARAM_V2);
break;
case AUDIO_SET_RTAC_CVS_CAL:
- result = send_voice_apr(RTAC_CVS, (void *)arg,
- VOICE_CMD_SET_PARAM);
+ result = send_voice_apr(RTAC_CVS, (void *) arg,
+ VSS_ICOMMON_CMD_SET_PARAM_V2);
break;
case AUDIO_GET_RTAC_CVP_CAL:
- result = send_voice_apr(RTAC_CVP, (void *)arg,
- VOICE_CMD_GET_PARAM);
+ result = send_voice_apr(RTAC_CVP, (void *) arg,
+ VSS_ICOMMON_CMD_GET_PARAM_V2);
break;
case AUDIO_SET_RTAC_CVP_CAL:
- result = send_voice_apr(RTAC_CVP, (void *)arg,
- VOICE_CMD_SET_PARAM);
+ result = send_voice_apr(RTAC_CVP, (void *) arg,
+ VSS_ICOMMON_CMD_SET_PARAM_V2);
break;
case AUDIO_GET_RTAC_AFE_CAL:
result = send_rtac_afe_apr((void *)arg,
diff --git a/sound/usb/card.c b/sound/usb/card.c
index 1f6c247f773a..76720b2a1f0e 100644
--- a/sound/usb/card.c
+++ b/sound/usb/card.c
@@ -643,6 +643,7 @@ static int usb_audio_probe(struct usb_interface *intf,
usb_chip[chip->index] = chip;
chip->num_interfaces++;
usb_set_intfdata(intf, chip);
+ intf->needs_remote_wakeup = 1;
usb_enable_autosuspend(chip->dev);
atomic_dec(&chip->active);
mutex_unlock(&register_mutex);
diff --git a/sound/usb/line6/pcm.c b/sound/usb/line6/pcm.c
index 204cc074adb9..41aa3355e920 100644
--- a/sound/usb/line6/pcm.c
+++ b/sound/usb/line6/pcm.c
@@ -55,7 +55,6 @@ static int snd_line6_impulse_volume_put(struct snd_kcontrol *kcontrol,
err = line6_pcm_acquire(line6pcm, LINE6_STREAM_IMPULSE);
if (err < 0) {
line6pcm->impulse_volume = 0;
- line6_pcm_release(line6pcm, LINE6_STREAM_IMPULSE);
return err;
}
} else {
@@ -211,7 +210,9 @@ static void line6_stream_stop(struct snd_line6_pcm *line6pcm, int direction,
spin_lock_irqsave(&pstr->lock, flags);
clear_bit(type, &pstr->running);
if (!pstr->running) {
+ spin_unlock_irqrestore(&pstr->lock, flags);
line6_unlink_audio_urbs(line6pcm, pstr);
+ spin_lock_irqsave(&pstr->lock, flags);
if (direction == SNDRV_PCM_STREAM_CAPTURE) {
line6pcm->prev_fbuf = NULL;
line6pcm->prev_fsize = 0;
diff --git a/sound/usb/line6/pod.c b/sound/usb/line6/pod.c
index daf81d169a42..45dd34874f43 100644
--- a/sound/usb/line6/pod.c
+++ b/sound/usb/line6/pod.c
@@ -244,8 +244,8 @@ static int pod_set_system_param_int(struct usb_line6_pod *pod, int value,
static ssize_t serial_number_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct usb_interface *interface = to_usb_interface(dev);
- struct usb_line6_pod *pod = usb_get_intfdata(interface);
+ struct snd_card *card = dev_to_snd_card(dev);
+ struct usb_line6_pod *pod = card->private_data;
return sprintf(buf, "%u\n", pod->serial_number);
}
@@ -256,8 +256,8 @@ static ssize_t serial_number_show(struct device *dev,
static ssize_t firmware_version_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct usb_interface *interface = to_usb_interface(dev);
- struct usb_line6_pod *pod = usb_get_intfdata(interface);
+ struct snd_card *card = dev_to_snd_card(dev);
+ struct usb_line6_pod *pod = card->private_data;
return sprintf(buf, "%d.%02d\n", pod->firmware_version / 100,
pod->firmware_version % 100);
@@ -269,8 +269,8 @@ static ssize_t firmware_version_show(struct device *dev,
static ssize_t device_id_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct usb_interface *interface = to_usb_interface(dev);
- struct usb_line6_pod *pod = usb_get_intfdata(interface);
+ struct snd_card *card = dev_to_snd_card(dev);
+ struct usb_line6_pod *pod = card->private_data;
return sprintf(buf, "%d\n", pod->device_id);
}
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index db11ecf0b74d..3039e907f1f8 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -1129,6 +1129,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
{
/* devices which do not support reading the sample rate. */
switch (chip->usb_id) {
+ case USB_ID(0x041E, 0x4080): /* Creative Live Cam VF0610 */
case USB_ID(0x045E, 0x075D): /* MS Lifecam Cinema */
case USB_ID(0x045E, 0x076D): /* MS Lifecam HD-5000 */
case USB_ID(0x045E, 0x076E): /* MS Lifecam HD-5001 */
@@ -1139,7 +1140,9 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
case USB_ID(0x047F, 0xAA05): /* Plantronics DA45 */
case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */
case USB_ID(0x0556, 0x0014): /* Phoenix Audio TMX320VC */
+ case USB_ID(0x05A3, 0x9420): /* ELP HD USB Camera */
case USB_ID(0x074D, 0x3553): /* Outlaw RR2150 (Micronas UAC3553B) */
+ case USB_ID(0x1901, 0x0191): /* GE B850V3 CP2114 audio interface */
case USB_ID(0x1de7, 0x0013): /* Phoenix Audio MT202exe */
case USB_ID(0x1de7, 0x0014): /* Phoenix Audio TMX320 */
case USB_ID(0x1de7, 0x0114): /* Phoenix Audio MT202pcs */
diff --git a/sound/usb/usb_audio_qmi_svc.c b/sound/usb/usb_audio_qmi_svc.c
index 8ce87195548e..2cdf4901b8c7 100644
--- a/sound/usb/usb_audio_qmi_svc.c
+++ b/sound/usb/usb_audio_qmi_svc.c
@@ -17,6 +17,7 @@
#include <linux/delay.h>
#include <linux/debugfs.h>
#include <linux/usb/audio.h>
+#include <linux/usb/audio-v2.h>
#include <linux/uaccess.h>
#include <sound/pcm.h>
#include <sound/core.h>
@@ -76,6 +77,8 @@ struct intf_info {
struct uaudio_dev {
struct usb_device *udev;
+ /* audio control interface */
+ struct usb_host_interface *ctrl_intf;
unsigned int card_num;
atomic_t in_use;
struct kref kref;
@@ -400,8 +403,8 @@ static int prepare_qmi_response(struct snd_usb_substream *subs,
struct uac_format_type_i_discrete_descriptor *fmt_v1;
struct uac_format_type_i_ext_descriptor *fmt_v2;
struct uac1_as_header_descriptor *as;
- struct uac1_ac_header_descriptor *ac;
int protocol;
+ void *hdr_ptr;
u8 *xfer_buf;
u32 len, mult, remainder;
unsigned long va, tr_data_va = 0, tr_sync_va = 0, dcba_va = 0,
@@ -436,6 +439,19 @@ static int prepare_qmi_response(struct snd_usb_substream *subs,
goto err;
}
+ if (!uadev[card_num].ctrl_intf) {
+ pr_err("%s: audio ctrl intf info not cached\n", __func__);
+ goto err;
+ }
+
+ hdr_ptr = snd_usb_find_csint_desc(uadev[card_num].ctrl_intf->extra,
+ uadev[card_num].ctrl_intf->extralen,
+ NULL, UAC_HEADER);
+ if (!hdr_ptr) {
+ pr_err("%s: no UAC_HEADER desc\n", __func__);
+ goto err;
+ }
+
if (protocol == UAC_VERSION_1) {
as = snd_usb_find_csint_desc(alts->extra, alts->extralen, NULL,
UAC_AS_GENERAL);
@@ -449,26 +465,23 @@ static int prepare_qmi_response(struct snd_usb_substream *subs,
fmt_v1 = (struct uac_format_type_i_discrete_descriptor *)fmt;
resp->usb_audio_subslot_size = fmt_v1->bSubframeSize;
resp->usb_audio_subslot_size_valid = 1;
+
+ resp->usb_audio_spec_revision =
+ ((struct uac1_ac_header_descriptor *)hdr_ptr)->bcdADC;
+ resp->usb_audio_spec_revision_valid = 1;
} else if (protocol == UAC_VERSION_2) {
fmt_v2 = (struct uac_format_type_i_ext_descriptor *)fmt;
resp->usb_audio_subslot_size = fmt_v2->bSubslotSize;
resp->usb_audio_subslot_size_valid = 1;
+
+ resp->usb_audio_spec_revision =
+ ((struct uac2_ac_header_descriptor *)hdr_ptr)->bcdADC;
+ resp->usb_audio_spec_revision_valid = 1;
} else {
pr_err("%s: unknown protocol version %x\n", __func__, protocol);
goto err;
}
- ac = snd_usb_find_csint_desc(alts->extra,
- alts->extralen,
- NULL, UAC_HEADER);
- if (!ac) {
- pr_err("%s: %u:%d : no UAC_HEADER desc\n", __func__,
- subs->interface, subs->altset_idx);
- goto err;
- }
- resp->usb_audio_spec_revision = ac->bcdADC;
- resp->usb_audio_spec_revision_valid = 1;
-
resp->slot_id = subs->dev->slot_id;
resp->slot_id_valid = 1;
@@ -919,6 +932,7 @@ static int handle_uaudio_stream_req(void *req_h, void *req)
subs->pcm_format = pcm_format;
subs->channels = req_msg->number_of_ch;
subs->cur_rate = req_msg->bit_rate;
+ uadev[pcm_card_num].ctrl_intf = chip->ctrl_intf;
ret = snd_usb_enable_audio_stream(subs, req_msg->enable);
diff --git a/tools/hv/hv_fcopy_daemon.c b/tools/hv/hv_fcopy_daemon.c
index 5480e4e424eb..f1d742682317 100644
--- a/tools/hv/hv_fcopy_daemon.c
+++ b/tools/hv/hv_fcopy_daemon.c
@@ -37,12 +37,14 @@
static int target_fd;
static char target_fname[W_MAX_PATH];
+static unsigned long long filesize;
static int hv_start_fcopy(struct hv_start_fcopy *smsg)
{
int error = HV_E_FAIL;
char *q, *p;
+ filesize = 0;
p = (char *)smsg->path_name;
snprintf(target_fname, sizeof(target_fname), "%s/%s",
(char *)smsg->path_name, (char *)smsg->file_name);
@@ -98,14 +100,26 @@ done:
static int hv_copy_data(struct hv_do_fcopy *cpmsg)
{
ssize_t bytes_written;
+ int ret = 0;
bytes_written = pwrite(target_fd, cpmsg->data, cpmsg->size,
cpmsg->offset);
- if (bytes_written != cpmsg->size)
- return HV_E_FAIL;
+ filesize += cpmsg->size;
+ if (bytes_written != cpmsg->size) {
+ switch (errno) {
+ case ENOSPC:
+ ret = HV_ERROR_DISK_FULL;
+ break;
+ default:
+ ret = HV_E_FAIL;
+ break;
+ }
+ syslog(LOG_ERR, "pwrite failed to write %llu bytes: %ld (%s)",
+ filesize, (long)bytes_written, strerror(errno));
+ }
- return 0;
+ return ret;
}
static int hv_copy_finished(void)
diff --git a/tools/perf/arch/x86/util/intel-pt.c b/tools/perf/arch/x86/util/intel-pt.c
index e5c1f2e21f87..de3965c4e4aa 100644
--- a/tools/perf/arch/x86/util/intel-pt.c
+++ b/tools/perf/arch/x86/util/intel-pt.c
@@ -501,7 +501,7 @@ static int intel_pt_recording_options(struct auxtrace_record *itr,
struct intel_pt_recording *ptr =
container_of(itr, struct intel_pt_recording, itr);
struct perf_pmu *intel_pt_pmu = ptr->intel_pt_pmu;
- bool have_timing_info;
+ bool have_timing_info, need_immediate = false;
struct perf_evsel *evsel, *intel_pt_evsel = NULL;
const struct cpu_map *cpus = evlist->cpus;
bool privileged = geteuid() == 0 || perf_event_paranoid() < 0;
@@ -655,6 +655,7 @@ static int intel_pt_recording_options(struct auxtrace_record *itr,
ptr->have_sched_switch = 3;
} else {
opts->record_switch_events = true;
+ need_immediate = true;
if (cpu_wide)
ptr->have_sched_switch = 3;
else
@@ -700,6 +701,9 @@ static int intel_pt_recording_options(struct auxtrace_record *itr,
tracking_evsel->attr.freq = 0;
tracking_evsel->attr.sample_period = 1;
+ if (need_immediate)
+ tracking_evsel->immediate = true;
+
/* In per-cpu case, always need the time of mmap events etc */
if (!cpu_map__empty(cpus)) {
perf_evsel__set_sample_bit(tracking_evsel, TIME);
diff --git a/tools/testing/nvdimm/test/nfit.c b/tools/testing/nvdimm/test/nfit.c
index 51cf8256c6cd..f0d1c8ff8e8a 100644
--- a/tools/testing/nvdimm/test/nfit.c
+++ b/tools/testing/nvdimm/test/nfit.c
@@ -13,6 +13,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
+#include <linux/workqueue.h>
#include <linux/libnvdimm.h>
#include <linux/vmalloc.h>
#include <linux/device.h>
@@ -1246,6 +1247,7 @@ static int nfit_test_probe(struct platform_device *pdev)
if (nfit_test->setup != nfit_test0_setup)
return 0;
+ flush_work(&acpi_desc->work);
nfit_test->setup_hotplug = 1;
nfit_test->setup(nfit_test);
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 510df220d1b5..336ed267c407 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -142,6 +142,7 @@ int vcpu_load(struct kvm_vcpu *vcpu)
put_cpu();
return 0;
}
+EXPORT_SYMBOL_GPL(vcpu_load);
void vcpu_put(struct kvm_vcpu *vcpu)
{
@@ -151,6 +152,7 @@ void vcpu_put(struct kvm_vcpu *vcpu)
preempt_enable();
mutex_unlock(&vcpu->mutex);
}
+EXPORT_SYMBOL_GPL(vcpu_put);
static void ack_flush(void *_completed)
{