summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/arm/msm/msm.txt4
-rw-r--r--Documentation/devicetree/bindings/gpu/adreno.txt4
-rw-r--r--Documentation/devicetree/bindings/leds/leds-qpnp-wled.txt2
-rw-r--r--Documentation/devicetree/bindings/pci/msm_pcie.txt4
-rw-r--r--Documentation/devicetree/bindings/platform/msm/qpnp-revid.txt2
-rw-r--r--Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb2.txt4
-rw-r--r--Documentation/devicetree/bindings/qdsp/msm-fastrpc.txt3
-rw-r--r--Documentation/devicetree/bindings/regulator/qpnp-lcdb-regulator.txt7
-rw-r--r--Documentation/devicetree/bindings/soc/qcom/msm-early-cam.txt110
-rw-r--r--Documentation/devicetree/bindings/wcnss/wcnss-wlan.txt18
-rw-r--r--Makefile5
-rw-r--r--android/configs/android-base.cfg1
-rw-r--r--arch/arm/boot/dts/omap3-n900.dts2
-rw-r--r--arch/arm/boot/dts/qcom/Makefile1
-rw-r--r--arch/arm/boot/dts/qcom/apq8096-auto-dragonboard.dtsi8
-rw-r--r--arch/arm/boot/dts/qcom/apq8096pro-v1.1-auto-adp-lite.dts10
-rw-r--r--arch/arm/boot/dts/qcom/apq8098-v2.1-mediabox.dts4
-rw-r--r--arch/arm/boot/dts/qcom/apq8098-v2.1-svr20.dts22
-rw-r--r--arch/arm/boot/dts/qcom/dsi-panel-nt35597-dualmipi-wqxga-video.dtsi1
-rw-r--r--arch/arm/boot/dts/qcom/dsi-panel-nt35597-truly-dualmipi-wqxga-video.dtsi1
-rw-r--r--arch/arm/boot/dts/qcom/dsi-panel-sharp-dsc-4k-cmd.dtsi4
-rw-r--r--arch/arm/boot/dts/qcom/dsi-panel-sharp-dsc-4k-video.dtsi4
-rw-r--r--arch/arm/boot/dts/qcom/fg-gen3-batterydata-svr-v2-3200mah.dtsi84
-rw-r--r--arch/arm/boot/dts/qcom/msm-pm660.dtsi1
-rw-r--r--arch/arm/boot/dts/qcom/msm-pm660l.dtsi1
-rw-r--r--arch/arm/boot/dts/qcom/msm-pmi8998.dtsi1
-rw-r--r--arch/arm/boot/dts/qcom/msm8996-agave-adp.dtsi106
-rw-r--r--arch/arm/boot/dts/qcom/msm8996-auto-cdp.dtsi106
-rw-r--r--arch/arm/boot/dts/qcom/msm8996-mmxf-adp.dtsi8
-rw-r--r--arch/arm/boot/dts/qcom/msm8996-pinctrl.dtsi16
-rw-r--r--arch/arm/boot/dts/qcom/msm8996.dtsi84
-rw-r--r--arch/arm/boot/dts/qcom/msm8996pro-auto-adp-lite.dts3
-rw-r--r--arch/arm/boot/dts/qcom/msm8998-camera-sensor-svr20.dtsi399
-rw-r--r--arch/arm/boot/dts/qcom/msm8998-mdss-panels.dtsi2
-rw-r--r--arch/arm/boot/dts/qcom/msm8998-svr20-pinctrl.dtsi3386
-rw-r--r--arch/arm/boot/dts/qcom/msm8998-svr20.dtsi409
-rw-r--r--arch/arm/boot/dts/qcom/msm8998-v2.dtsi2
-rw-r--r--arch/arm/boot/dts/qcom/msm8998.dtsi2
-rw-r--r--arch/arm/boot/dts/qcom/sdm630-camera-sensor-mtp.dtsi20
-rw-r--r--arch/arm/boot/dts/qcom/sdm630-pm.dtsi4
-rw-r--r--arch/arm/boot/dts/qcom/sdm630.dtsi14
-rw-r--r--arch/arm/boot/dts/qcom/sdm660-camera-sensor-cdp.dtsi79
-rw-r--r--arch/arm/boot/dts/qcom/sdm660-camera-sensor-mtp.dtsi79
-rw-r--r--arch/arm/boot/dts/qcom/sdm660-mtp.dtsi10
-rw-r--r--arch/arm/boot/dts/qcom/sdm660-pinctrl.dtsi28
-rw-r--r--arch/arm/boot/dts/qcom/sdm660.dtsi11
-rw-r--r--arch/arm/boot/dts/qcom/vplatform-lfv-msm8996.dts84
-rw-r--r--arch/arm/configs/s3c2410_defconfig6
-rw-r--r--arch/arm/configs/sdm660-perf_defconfig1
-rw-r--r--arch/arm/configs/sdm660_defconfig1
-rw-r--r--arch/arm/include/asm/elf.h8
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-ep108.dts2
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp.dtsi6
-rw-r--r--arch/arm64/configs/msm-perf_defconfig2
-rw-r--r--arch/arm64/configs/msm_defconfig2
-rw-r--r--arch/arm64/configs/msmcortex-perf_defconfig4
-rw-r--r--arch/arm64/configs/msmcortex_defconfig4
-rw-r--r--arch/arm64/configs/sdm660-perf_defconfig6
-rw-r--r--arch/arm64/configs/sdm660_defconfig6
-rw-r--r--arch/arm64/include/asm/elf.h12
-rw-r--r--arch/arm64/include/asm/io.h4
-rw-r--r--arch/arm64/mm/fault.c8
-rw-r--r--arch/mips/include/asm/branch.h5
-rw-r--r--arch/mips/kernel/branch.c38
-rw-r--r--arch/mips/kernel/proc.c2
-rw-r--r--arch/mips/kernel/ptrace.c2
-rw-r--r--arch/mips/kernel/scall32-o32.S2
-rw-r--r--arch/mips/kernel/scall64-64.S2
-rw-r--r--arch/mips/kernel/scall64-n32.S2
-rw-r--r--arch/mips/kernel/scall64-o32.S2
-rw-r--r--arch/mips/kernel/syscall.c15
-rw-r--r--arch/mips/math-emu/cp1emu.c38
-rw-r--r--arch/openrisc/kernel/vmlinux.lds.S2
-rw-r--r--arch/parisc/include/asm/dma-mapping.h11
-rw-r--r--arch/parisc/include/asm/mmu_context.h15
-rw-r--r--arch/parisc/kernel/syscall_table.S2
-rw-r--r--arch/parisc/mm/fault.c2
-rw-r--r--arch/powerpc/include/asm/atomic.h4
-rw-r--r--arch/powerpc/include/asm/elf.h13
-rw-r--r--arch/powerpc/include/asm/reg.h2
-rw-r--r--arch/powerpc/include/asm/topology.h14
-rw-r--r--arch/powerpc/kernel/setup_64.c4
-rw-r--r--arch/powerpc/kvm/book3s_hv.c41
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S41
-rw-r--r--arch/powerpc/lib/sstep.c19
-rw-r--r--arch/powerpc/platforms/pseries/reconfig.c1
-rw-r--r--arch/s390/include/asm/elf.h15
-rw-r--r--arch/s390/include/asm/syscall.h6
-rw-r--r--arch/x86/crypto/sha1_ssse3_glue.c2
-rw-r--r--arch/x86/include/asm/elf.h13
-rw-r--r--arch/x86/include/asm/msr-index.h2
-rw-r--r--arch/x86/include/asm/pat.h1
-rw-r--r--arch/x86/include/asm/xen/hypercall.h3
-rw-r--r--arch/x86/kernel/acpi/boot.c8
-rw-r--r--arch/x86/kernel/apic/io_apic.c2
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_amd.c3
-rw-r--r--arch/x86/kernel/setup.c7
-rw-r--r--arch/x86/kvm/cpuid.c13
-rw-r--r--arch/x86/kvm/cpuid.h9
-rw-r--r--arch/x86/kvm/vmx.c19
-rw-r--r--arch/x86/lib/copy_user_64.S7
-rw-r--r--arch/x86/mm/pat.c28
-rw-r--r--arch/x86/tools/relocs.c3
-rw-r--r--crypto/authencesn.c5
-rw-r--r--drivers/acpi/glue.c10
-rw-r--r--drivers/android/Makefile2
-rw-r--r--drivers/android/binder.c4303
-rw-r--r--drivers/android/binder_alloc.c802
-rw-r--r--drivers/android/binder_alloc.h163
-rw-r--r--drivers/android/binder_trace.h41
-rw-r--r--drivers/base/core.c6
-rw-r--r--drivers/base/platform.c11
-rw-r--r--drivers/base/power/domain.c14
-rw-r--r--drivers/base/power/sysfs.c2
-rw-r--r--drivers/base/power/wakeup.c30
-rw-r--r--drivers/block/xen-blkback/blkback.c3
-rw-r--r--drivers/block/xen-blkback/xenbus.c8
-rw-r--r--drivers/bluetooth/btfm_slim.c14
-rw-r--r--drivers/bluetooth/btfm_slim_codec.c60
-rw-r--r--drivers/bluetooth/btfm_slim_wcn3990.c24
-rw-r--r--drivers/bluetooth/btfm_slim_wcn3990.h3
-rw-r--r--drivers/char/adsprpc.c34
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c5
-rw-r--r--drivers/char/ipmi/ipmi_ssif.c5
-rw-r--r--drivers/char/ipmi/ipmi_watchdog.c7
-rw-r--r--drivers/char/tpm/tpm-chip.c196
-rw-r--r--drivers/char/tpm/tpm-dev.c15
-rw-r--r--drivers/char/tpm/tpm-interface.c50
-rw-r--r--drivers/char/tpm/tpm-sysfs.c20
-rw-r--r--drivers/char/tpm/tpm.h20
-rw-r--r--drivers/char/tpm/tpm2-cmd.c8
-rw-r--r--drivers/char/tpm/tpm_atmel.c14
-rw-r--r--drivers/char/tpm/tpm_i2c_atmel.c16
-rw-r--r--drivers/char/tpm/tpm_i2c_infineon.c6
-rw-r--r--drivers/char/tpm/tpm_i2c_nuvoton.c22
-rw-r--r--drivers/char/tpm/tpm_infineon.c22
-rw-r--r--drivers/char/tpm/tpm_nsc.c20
-rw-r--r--drivers/char/tpm/tpm_tis.c16
-rw-r--r--drivers/clk/msm/clock-osm.c28
-rw-r--r--drivers/clk/qcom/clk-cpu-osm.c27
-rw-r--r--drivers/cpufreq/cpufreq_interactive.c9
-rw-r--r--drivers/crypto/atmel-sha.c4
-rw-r--r--drivers/crypto/caam/caamhash.c2
-rw-r--r--drivers/crypto/caam/key_gen.c2
-rw-r--r--drivers/crypto/msm/ice.c5
-rw-r--r--drivers/crypto/msm/qce50.c20
-rw-r--r--drivers/crypto/msm/qce50.h4
-rw-r--r--drivers/crypto/msm/qcrypto.c14
-rw-r--r--drivers/crypto/talitos.c7
-rw-r--r--drivers/dma/ioat/hw.h2
-rw-r--r--drivers/dma/ioat/init.c11
-rw-r--r--drivers/dma/ti-dma-crossbar.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c3
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c41
-rw-r--r--drivers/gpu/drm/drm_edid.c54
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c18
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.h8
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_preempt.c17
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c3
-rw-r--r--drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c197
-rw-r--r--drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h38
-rw-r--r--drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c51
-rw-r--r--drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_util.h8
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h1
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c91
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c33
-rw-r--r--drivers/gpu/drm/msm/sde/sde_color_processing.c4
-rw-r--r--drivers/gpu/drm/msm/sde/sde_connector.c44
-rw-r--r--drivers/gpu/drm/msm/sde/sde_connector.h34
-rw-r--r--drivers/gpu/drm/msm/sde/sde_crtc.c9
-rw-r--r--drivers/gpu/drm/msm/sde/sde_encoder.c110
-rw-r--r--drivers/gpu/drm/msm/sde/sde_encoder_phys.h2
-rw-r--r--drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c19
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_mdss.h3
-rw-r--r--drivers/gpu/drm/msm/sde/sde_kms.c28
-rw-r--r--drivers/gpu/drm/msm/sde/sde_splash.c101
-rw-r--r--drivers/gpu/drm/msm/sde/sde_splash.h10
-rw-r--r--drivers/gpu/drm/msm/sde_edid_parser.c6
-rw-r--r--drivers/gpu/drm/msm/sde_hdcp_1x.c30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c2
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c13
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c6
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.c20
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.h2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.c185
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c11
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_kms.c7
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c11
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_vgacon.c11
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_object.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c2
-rw-r--r--drivers/gpu/msm/adreno.c30
-rw-r--r--drivers/gpu/msm/adreno.h47
-rw-r--r--drivers/gpu/msm/adreno_a3xx.c6
-rw-r--r--drivers/gpu/msm/adreno_a4xx.c6
-rw-r--r--drivers/gpu/msm/adreno_a5xx.c11
-rw-r--r--drivers/gpu/msm/adreno_a5xx_preempt.c43
-rw-r--r--drivers/gpu/msm/adreno_debugfs.c6
-rw-r--r--drivers/gpu/msm/adreno_dispatch.c4
-rw-r--r--drivers/gpu/msm/adreno_ringbuffer.c7
-rw-r--r--drivers/gpu/msm/adreno_trace.h25
-rw-r--r--drivers/gpu/msm/kgsl_drawobj.c46
-rw-r--r--drivers/gpu/msm/kgsl_drawobj.h4
-rw-r--r--drivers/gpu/msm/kgsl_pool.c22
-rw-r--r--drivers/gpu/msm/kgsl_pwrctrl.c17
-rw-r--r--drivers/gpu/msm/kgsl_pwrctrl.h20
-rw-r--r--drivers/gpu/msm/kgsl_sync.c4
-rw-r--r--drivers/hid/hid-core.c1
-rw-r--r--drivers/hid/hid-ids.h3
-rw-r--r--drivers/iio/adc/qcom-rradc.c78
-rw-r--r--drivers/infiniband/core/cma.c2
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c9
-rw-r--r--drivers/input/serio/i8042.c12
-rw-r--r--drivers/input/touchscreen/st/fts.c19
-rw-r--r--drivers/iommu/arm-smmu.c3
-rw-r--r--drivers/irqchip/irq-gic-v3.c3
-rw-r--r--drivers/irqchip/irq-keystone.c28
-rw-r--r--drivers/irqchip/irq-mxs.c4
-rw-r--r--drivers/isdn/i4l/isdn_common.c1
-rw-r--r--drivers/isdn/i4l/isdn_net.c5
-rw-r--r--drivers/isdn/i4l/isdn_ppp.c2
-rw-r--r--drivers/leds/leds-qpnp-flash-v2.c43
-rw-r--r--drivers/leds/leds-qpnp-wled.c409
-rw-r--r--drivers/leds/leds-qpnp.c29
-rw-r--r--drivers/mailbox/mailbox.c13
-rw-r--r--drivers/md/md.c4
-rw-r--r--drivers/md/raid1.c5
-rw-r--r--drivers/md/raid5.c11
-rw-r--r--drivers/media/i2c/s5c73m3/s5c73m3-ctrls.c2
-rw-r--r--drivers/media/pci/cx88/cx88-cards.c9
-rw-r--r--drivers/media/pci/cx88/cx88-video.c2
-rw-r--r--drivers/media/pci/saa7134/saa7134-i2c.c31
-rw-r--r--drivers/media/platform/msm/ais/fd/msm_fd_dev.c27
-rw-r--r--drivers/media/platform/msm/ais/fd/msm_fd_dev.h1
-rw-r--r--drivers/media/platform/msm/ais/isp/msm_isp.h1
-rw-r--r--drivers/media/platform/msm/ais/isp/msm_isp47.c4
-rw-r--r--drivers/media/platform/msm/ais/isp/msm_isp_stats_util.c6
-rw-r--r--drivers/media/platform/msm/ais/sensor/cci/Makefile1
-rw-r--r--drivers/media/platform/msm/ais/sensor/cci/msm_early_cam.c279
-rw-r--r--drivers/media/platform/msm/ais/sensor/cci/msm_early_cam.h53
-rw-r--r--drivers/media/platform/msm/ais/sensor/msm_sensor_init.c33
-rw-r--r--drivers/media/platform/msm/ais/sensor/ois/msm_ois.c2
-rw-r--r--drivers/media/platform/msm/camera_v2/fd/msm_fd_dev.c21
-rw-r--r--drivers/media/platform/msm/camera_v2/fd/msm_fd_dev.h3
-rw-r--r--drivers/media/platform/msm/camera_v2/msm.c5
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_cci_i2c.c6
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/msm_sensor_driver.c23
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/ois/msm_ois.c6
-rw-r--r--drivers/media/platform/msm/vidc/msm_vidc.c17
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-core.c8
-rw-r--r--drivers/media/rc/imon.c2
-rw-r--r--drivers/misc/enclosure.c14
-rw-r--r--drivers/misc/qseecom.c5
-rw-r--r--drivers/mmc/card/block.c9
-rw-r--r--drivers/net/can/spi/rh850.c2
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/icm.c7
-rw-r--r--drivers/net/ethernet/msm/msm_rmnet_mhi.c9
-rw-r--r--drivers/net/ethernet/realtek/r8169.c1
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c1
-rw-r--r--drivers/net/phy/dp83640.c2
-rw-r--r--drivers/net/phy/micrel.c2
-rw-r--r--drivers/net/phy/phy_device.c2
-rw-r--r--drivers/net/usb/kaweth.c10
-rw-r--r--drivers/net/vrf.c18
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c6
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h1
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c173
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.h90
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.h15
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c1
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_tx.c21
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h33
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c12
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/snoc.c12
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-ops.h6
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/tx99.c13
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c5
-rw-r--r--drivers/net/wireless/cnss/cnss_pci.c370
-rw-r--r--drivers/net/wireless/cnss2/debug.c330
-rw-r--r--drivers/net/wireless/cnss2/main.c29
-rw-r--r--drivers/net/wireless/cnss2/main.h6
-rw-r--r--drivers/net/wireless/cnss2/pci.c37
-rw-r--r--drivers/net/wireless/cnss2/qmi.c2
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c2
-rw-r--r--drivers/net/wireless/ti/wlcore/spi.c2
-rw-r--r--drivers/net/wireless/wcnss/wcnss_vreg.c189
-rw-r--r--drivers/net/wireless/wcnss/wcnss_wlan.c329
-rw-r--r--drivers/nfc/fdp/i2c.c6
-rw-r--r--drivers/nfc/nfcmrvl/fw_dnld.c7
-rw-r--r--drivers/nfc/nfcmrvl/main.c35
-rw-r--r--drivers/nfc/nfcmrvl/uart.c8
-rw-r--r--drivers/nvdimm/btt.c9
-rw-r--r--drivers/nvmem/imx-ocotp.c2
-rw-r--r--drivers/of/device.c2
-rw-r--r--drivers/parisc/ccio-dma.c12
-rw-r--r--drivers/parisc/dino.c5
-rw-r--r--drivers/parisc/lba_pci.c6
-rw-r--r--drivers/parisc/sba_iommu.c14
-rw-r--r--drivers/pci/host/pci-msm.c43
-rw-r--r--drivers/pci/pci-driver.c1
-rw-r--r--drivers/perf/arm_pmu.c1
-rw-r--r--drivers/pinctrl/freescale/pinctrl-mxs.c16
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson8b.c4
-rw-r--r--drivers/pinctrl/sh-pfc/core.c3
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7791.c2
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c2
-rw-r--r--drivers/platform/msm/gsi/gsi.c14
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c3
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service_v01.c62
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_rt.c2
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c9
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa.c43
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c3
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c5
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h25
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service_v01.c466
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_rt.c3
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_utils.c2
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c22
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h2
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c14
-rw-r--r--drivers/platform/msm/mhi/mhi_iface.c2
-rw-r--r--drivers/platform/msm/mhi/mhi_init.c4
-rw-r--r--drivers/platform/msm/mhi/mhi_mmio_ops.c5
-rw-r--r--drivers/platform/msm/mhi/mhi_sys.c4
-rw-r--r--drivers/platform/msm/mhi_uci/mhi_uci.c3
-rw-r--r--drivers/platform/msm/qpnp-revid.c11
-rw-r--r--drivers/platform/msm/usb_bam.c148
-rw-r--r--drivers/power/power_supply_sysfs.c6
-rw-r--r--drivers/power/supply/qcom/fg-core.h37
-rw-r--r--drivers/power/supply/qcom/fg-util.c34
-rw-r--r--drivers/power/supply/qcom/pmic-voter.c17
-rw-r--r--drivers/power/supply/qcom/qpnp-fg-gen3.c875
-rw-r--r--drivers/power/supply/qcom/qpnp-qnovo.c384
-rw-r--r--drivers/power/supply/qcom/qpnp-smb2.c88
-rw-r--r--drivers/power/supply/qcom/smb-lib.c296
-rw-r--r--drivers/power/supply/qcom/smb-lib.h23
-rw-r--r--drivers/power/supply/qcom/smb138x-charger.c70
-rw-r--r--drivers/power/supply/qcom/step-chg-jeita.c328
-rw-r--r--drivers/power/supply/qcom/step-chg-jeita.h2
-rw-r--r--drivers/pwm/pwm-qpnp.c67
-rw-r--r--drivers/regulator/qpnp-lcdb-regulator.c84
-rw-r--r--drivers/scsi/fnic/fnic.h1
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c16
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c33
-rw-r--r--drivers/scsi/snic/snic_main.c3
-rw-r--r--drivers/scsi/ufs/ufs-qcom.c7
-rw-r--r--drivers/soc/qcom/glink_smem_native_xprt.c1
-rw-r--r--drivers/soc/qcom/icnss.c53
-rw-r--r--drivers/soc/qcom/jtagv8-etm.c13
-rw-r--r--drivers/soc/qcom/jtagv8.c7
-rw-r--r--drivers/soc/qcom/qbt1000.c101
-rw-r--r--drivers/soc/qcom/qdsp6v2/msm_audio_ion.c84
-rw-r--r--drivers/soc/qcom/qpnp-haptic.c14
-rw-r--r--drivers/soc/qcom/scm.c95
-rw-r--r--drivers/soc/qcom/scm_qcpe.c23
-rw-r--r--drivers/soc/qcom/secure_buffer.c5
-rw-r--r--drivers/spi/spi-dw.c5
-rw-r--r--drivers/spmi/spmi.c12
-rw-r--r--drivers/staging/android/ion/ion.c16
-rw-r--r--drivers/staging/android/ion/ion_cma_heap.c24
-rw-r--r--drivers/staging/android/ion/ion_priv.h6
-rw-r--r--drivers/staging/android/lowmemorykiller.c4
-rw-r--r--drivers/staging/comedi/comedi_fops.c7
-rw-r--r--drivers/staging/rtl8188eu/os_dep/usb_intf.c1
-rw-r--r--drivers/staging/vt6656/main_usb.c3
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.c6
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.c41
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.h2
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.c19
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.h1
-rw-r--r--drivers/target/target_core_transport.c9
-rw-r--r--drivers/thermal/cpu_cooling.c4
-rw-r--r--drivers/tty/serial/msm_serial_hs.c5
-rw-r--r--drivers/tty/vt/vt.c6
-rw-r--r--drivers/usb/class/cdc-acm.c3
-rw-r--r--drivers/usb/core/quirks.c4
-rw-r--r--drivers/usb/dwc3/dwc3-msm.c66
-rw-r--r--drivers/usb/dwc3/dwc3-st.c2
-rw-r--r--drivers/usb/dwc3/ep0.c2
-rw-r--r--drivers/usb/gadget/function/f_ccid.c15
-rw-r--r--drivers/usb/gadget/function/f_gsi.c6
-rw-r--r--drivers/usb/gadget/function/f_hid.c2
-rw-r--r--drivers/usb/gadget/function/f_qc_rndis.c6
-rw-r--r--drivers/usb/gadget/function/f_rndis.c6
-rw-r--r--drivers/usb/host/xhci-hub.c3
-rw-r--r--drivers/usb/host/xhci-ring.c11
-rw-r--r--drivers/usb/pd/policy_engine.c52
-rw-r--r--drivers/usb/pd/qpnp-pdphy.c34
-rw-r--r--drivers/usb/pd/usbpd.h14
-rw-r--r--drivers/usb/phy/phy-msm-ssusb-qmp.c10
-rw-r--r--drivers/usb/renesas_usbhs/common.c4
-rw-r--r--drivers/usb/renesas_usbhs/mod_gadget.c31
-rw-r--r--drivers/usb/serial/cp210x.c1
-rw-r--r--drivers/usb/serial/option.c4
-rw-r--r--drivers/usb/serial/qcserial.c1
-rw-r--r--drivers/usb/storage/isd200.c5
-rw-r--r--drivers/usb/usbip/stub_main.c4
-rw-r--r--drivers/usb/usbip/stub_tx.c4
-rw-r--r--drivers/vfio/pci/vfio_pci.c4
-rw-r--r--drivers/vfio/pci/vfio_pci_rdwr.c5
-rw-r--r--drivers/vfio/vfio.c46
-rw-r--r--drivers/video/fbdev/cobalt_lcdfb.c5
-rw-r--r--drivers/video/fbdev/msm/mdss_dsi.c19
-rw-r--r--drivers/video/fbdev/msm/mdss_hdmi_edid.c13
-rw-r--r--drivers/video/fbdev/msm/mdss_hdmi_tx.c39
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp.c7
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_layer.c37
-rw-r--r--fs/binfmt_elf.c59
-rw-r--r--fs/btrfs/inode.c11
-rw-r--r--fs/ceph/dir.c5
-rw-r--r--fs/dcache.c27
-rw-r--r--fs/debugfs/inode.c10
-rw-r--r--fs/exec.c11
-rw-r--r--fs/ext4/sysfs.c2
-rw-r--r--fs/f2fs/acl.c2
-rw-r--r--fs/fcntl.c14
-rw-r--r--fs/gfs2/glock.c11
-rw-r--r--fs/gfs2/incore.h1
-rw-r--r--fs/mount.h1
-rw-r--r--fs/namei.c8
-rw-r--r--fs/namespace.c1
-rw-r--r--fs/nfs/dir.c12
-rw-r--r--fs/nfs/inode.c4
-rw-r--r--fs/notify/fsnotify.c8
-rw-r--r--fs/open.c6
-rw-r--r--fs/pnode.c212
-rw-r--r--fs/pstore/ram.c5
-rw-r--r--fs/pstore/ram_core.c25
-rw-r--r--fs/sdcardfs/inode.c6
-rw-r--r--fs/sdcardfs/main.c47
-rw-r--r--fs/seq_file.c7
-rw-r--r--fs/udf/inode.c4
-rw-r--r--fs/xfs/xfs_aops.c21
-rw-r--r--include/drm/drm_crtc.h6
-rw-r--r--include/drm/drm_edid.h9
-rw-r--r--include/linux/dcache.h6
-rw-r--r--include/linux/device.h2
-rw-r--r--include/linux/fcntl.h6
-rw-r--r--include/linux/fsnotify.h31
-rw-r--r--include/linux/ipa.h25
-rw-r--r--include/linux/msm_audio_ion.h12
-rw-r--r--include/linux/phy.h4
-rw-r--r--include/linux/power_supply.h6
-rw-r--r--include/linux/pstore_ram.h11
-rw-r--r--include/linux/qpnp/qpnp-revid.h1
-rw-r--r--include/linux/seq_file.h13
-rw-r--r--include/linux/tick.h1
-rw-r--r--include/linux/usb/hcd.h4
-rw-r--r--include/linux/usb_bam.h6
-rw-r--r--include/linux/vfio.h2
-rw-r--r--include/linux/wcnss_wlan.h8
-rw-r--r--include/net/ip6_route.h8
-rw-r--r--include/soc/qcom/scm.h8
-rw-r--r--include/sound/apr_audio-v2.h2
-rw-r--r--include/sound/q6asm-v2.h4
-rw-r--r--include/sound/q6core.h16
-rw-r--r--include/target/iscsi/iscsi_target_core.h9
-rw-r--r--include/trace/events/sched.h42
-rw-r--r--include/uapi/drm/msm_drm.h6
-rw-r--r--include/uapi/linux/android/binder.h63
-rw-r--r--include/uapi/linux/ipa_qmi_service_v01.h303
-rw-r--r--include/uapi/sound/compress_offload.h7
-rw-r--r--ipc/mqueue.c4
-rw-r--r--kernel/bpf/verifier.c5
-rw-r--r--kernel/events/core.c73
-rw-r--r--kernel/events/hw_breakpoint.c2
-rw-r--r--kernel/extable.c2
-rw-r--r--kernel/fork.c3
-rw-r--r--kernel/resource.c13
-rw-r--r--kernel/sched/core.c36
-rw-r--r--kernel/sched/cpufreq_schedutil.c154
-rw-r--r--kernel/sched/fair.c408
-rw-r--r--kernel/sched/rt.c15
-rw-r--r--kernel/sched/walt.c8
-rw-r--r--kernel/sched/walt.h2
-rw-r--r--kernel/sysctl.c3
-rw-r--r--kernel/time/alarmtimer.c3
-rw-r--r--kernel/time/tick-sched.c12
-rw-r--r--kernel/trace/ftrace.c2
-rw-r--r--kernel/trace/trace.c101
-rw-r--r--kernel/trace/trace_kprobe.c21
-rw-r--r--mm/mmap.c2
-rw-r--r--mm/vmscan.c2
-rw-r--r--net/8021q/vlan.c5
-rw-r--r--net/8021q/vlan.h2
-rw-r--r--net/8021q/vlan_dev.c20
-rw-r--r--net/bluetooth/smp.c35
-rw-r--r--net/core/dev.c45
-rw-r--r--net/ipv4/tcp.c2
-rw-r--r--net/ipv4/tcp_input.c10
-rw-r--r--net/ipv6/addrconf.c27
-rw-r--r--net/ipv6/ip6_fib.c5
-rw-r--r--net/ipv6/ip6_output.c2
-rw-r--r--net/ipv6/route.c8
-rw-r--r--net/key/af_key.c93
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c19
-rw-r--r--net/nfc/core.c31
-rw-r--r--net/nfc/llcp_sock.c9
-rw-r--r--net/nfc/nci/core.c3
-rw-r--r--net/nfc/netlink.c4
-rw-r--r--net/rds/tcp_listen.c2
-rw-r--r--net/rmnet_data/rmnet_data_handlers.c10
-rw-r--r--net/sched/sch_api.c3
-rw-r--r--net/sched/sch_hhf.c8
-rw-r--r--net/sched/sch_mq.c10
-rw-r--r--net/sched/sch_mqprio.c19
-rw-r--r--net/sched/sch_sfq.c3
-rw-r--r--net/wireless/db.txt12
-rw-r--r--net/wireless/nl80211.c8
-rw-r--r--net/xfrm/xfrm_policy.c9
-rwxr-xr-xscripts/checkpatch.pl6
-rw-r--r--security/keys/encrypted-keys/encrypted.c2
-rw-r--r--sound/soc/codecs/nau8825.c3
-rw-r--r--sound/soc/codecs/nau8825.h3
-rw-r--r--sound/soc/codecs/tlv320aic3x.c13
-rw-r--r--sound/soc/codecs/wcd-dsp-mgr.c23
-rw-r--r--sound/soc/codecs/wcd934x/wcd934x-dsp-cntl.c9
-rw-r--r--sound/soc/msm/apq8096-auto.c7
-rw-r--r--sound/soc/msm/msm-dai-fe.c15
-rw-r--r--sound/soc/msm/msm8996.c9
-rw-r--r--sound/soc/msm/msm8998.c28
-rw-r--r--sound/soc/msm/qdsp6v2/msm-audio-effects-q6-v2.c60
-rw-r--r--sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c177
-rw-r--r--sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c3
-rw-r--r--sound/soc/msm/qdsp6v2/msm-lsm-client.c14
-rw-r--r--sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c12
-rw-r--r--sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c165
-rw-r--r--sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.h4
-rw-r--r--sound/soc/msm/qdsp6v2/msm-transcode-loopback-q6-v2.c455
-rw-r--r--sound/soc/msm/qdsp6v2/q6adm.c16
-rw-r--r--sound/soc/msm/qdsp6v2/q6asm.c36
-rw-r--r--sound/soc/msm/qdsp6v2/q6core.c62
-rw-r--r--sound/soc/msm/sdm660-ext-dai-links.c6
-rw-r--r--sound/soc/msm/sdm660-internal.c6
-rw-r--r--sound/soc/soc-core.c3
-rw-r--r--sound/usb/endpoint.c3
-rw-r--r--tools/include/linux/compiler.h9
-rw-r--r--tools/lib/lockdep/uinclude/linux/lockdep.h2
-rw-r--r--tools/lib/traceevent/plugin_sched_switch.c4
-rw-r--r--tools/perf/Makefile.perf4
-rw-r--r--tools/perf/arch/x86/tests/intel-cqm.c2
-rw-r--r--tools/perf/arch/x86/util/dwarf-regs.c8
-rw-r--r--tools/perf/bench/numa.c6
-rw-r--r--tools/perf/builtin-script.c70
-rw-r--r--tools/perf/builtin-top.c2
-rw-r--r--tools/perf/builtin-trace.c1
-rw-r--r--tools/perf/tests/parse-events.c8
-rw-r--r--tools/perf/ui/browser.c2
-rw-r--r--tools/perf/ui/browsers/annotate.c4
-rw-r--r--tools/perf/util/event.c12
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-decoder.c121
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c26
-rw-r--r--tools/perf/util/parse-events.c60
-rw-r--r--tools/perf/util/pmu.c2
-rw-r--r--tools/perf/util/scripting-engines/Build2
-rw-r--r--tools/perf/util/strfilter.c1
-rw-r--r--tools/perf/util/string.c2
-rw-r--r--tools/perf/util/symbol-elf.c6
-rw-r--r--tools/perf/util/thread.c2
-rw-r--r--tools/perf/util/thread_map.c10
-rw-r--r--tools/testing/selftests/capabilities/test_execve.c7
-rw-r--r--virt/kvm/vfio.c27
568 files changed, 19550 insertions, 5145 deletions
diff --git a/Documentation/devicetree/bindings/arm/msm/msm.txt b/Documentation/devicetree/bindings/arm/msm/msm.txt
index 41cbb91351bb..d58b98f9a702 100644
--- a/Documentation/devicetree/bindings/arm/msm/msm.txt
+++ b/Documentation/devicetree/bindings/arm/msm/msm.txt
@@ -169,6 +169,9 @@ Generic board variants:
- RUMI device:
compatible = "qcom,rumi"
+- SVR device:
+ compatible = "qcom,svr"
+
Boards (SoC type + board variant):
@@ -202,6 +205,7 @@ compatible = "qcom,apqtitanium-mtp"
compatible = "qcom,apq8098-cdp"
compatible = "qcom,apq8098-mtp"
compatible = "qcom,apq8098-qrd"
+compatible = "qcom,apq8098-svr"
compatible = "qcom,mdm9630-cdp"
compatible = "qcom,mdm9630-mtp"
compatible = "qcom,mdm9630-sim"
diff --git a/Documentation/devicetree/bindings/gpu/adreno.txt b/Documentation/devicetree/bindings/gpu/adreno.txt
index d8c3a7c35465..80813dd1b3e5 100644
--- a/Documentation/devicetree/bindings/gpu/adreno.txt
+++ b/Documentation/devicetree/bindings/gpu/adreno.txt
@@ -93,6 +93,7 @@ Optional Properties:
- qcom,chipid: If it exists this property is used to replace
the chip identification read from the GPU hardware.
This is used to override faulty hardware readings.
+- qcom,disable-wake-on-touch: Boolean. Disables the GPU power up on a touch input event.
- qcom,disable-busy-time-burst:
Boolean. Disables the busy time burst to avoid switching
of power level for large frames based on the busy time limit.
@@ -141,6 +142,9 @@ Optional Properties:
rendering thread is running on masked CPUs.
Bit 0 is for CPU-0, bit 1 is for CPU-1...
+- qcom,l2pc-update-queue:
+ Disables L2PC on masked CPUs at queue time when it's true.
+
- qcom,snapshot-size:
Specify the size of snapshot in bytes. This will override
snapshot size defined in the driver code.
diff --git a/Documentation/devicetree/bindings/leds/leds-qpnp-wled.txt b/Documentation/devicetree/bindings/leds/leds-qpnp-wled.txt
index 1e6aac56c44e..42e97f765bee 100644
--- a/Documentation/devicetree/bindings/leds/leds-qpnp-wled.txt
+++ b/Documentation/devicetree/bindings/leds/leds-qpnp-wled.txt
@@ -78,6 +78,8 @@ Optional properties for WLED:
- qcom,lcd-psm-ctrl : A boolean property to specify if PSM needs to be
controlled dynamically when WLED module is enabled
or disabled.
+- qcom,auto-calibration-enable : A boolean property which enables auto-calibration
+ of the WLED sink configuration.
Optional properties if 'qcom,disp-type-amoled' is mentioned in DT:
- qcom,loop-comp-res-kohm : control to select the compensation resistor in kohm. default is 320.
diff --git a/Documentation/devicetree/bindings/pci/msm_pcie.txt b/Documentation/devicetree/bindings/pci/msm_pcie.txt
index fc019bda50a7..bf3ad8a71c26 100644
--- a/Documentation/devicetree/bindings/pci/msm_pcie.txt
+++ b/Documentation/devicetree/bindings/pci/msm_pcie.txt
@@ -97,6 +97,9 @@ Optional Properties:
and assign for each endpoint.
- qcom,ep-latency: The time (unit: ms) to wait for the PCIe endpoint to become
stable after power on, before de-assert the PERST to the endpoint.
+ - qcom,switch-latency: The time (unit: ms) to wait for the PCIe endpoint's link
+ training with switch downstream port after the link between switch upstream
+ port and RC is up.
- qcom,wr-halt-size: With base 2, this exponent determines the size of the
data that PCIe core will halt on for each write transaction.
- qcom,cpl-timeout: Completion timeout value. This value specifies the time range
@@ -276,6 +279,7 @@ Example:
qcom,smmu-exist;
qcom,smmu-sid-base = <0x1480>;
qcom,ep-latency = <100>;
+ qcom,switch-latency = <100>;
qcom,wr-halt-size = <0xa>; /* 1KB */
qcom,cpl-timeout = <0x2>;
diff --git a/Documentation/devicetree/bindings/platform/msm/qpnp-revid.txt b/Documentation/devicetree/bindings/platform/msm/qpnp-revid.txt
index babc4523a29a..dd14890123e6 100644
--- a/Documentation/devicetree/bindings/platform/msm/qpnp-revid.txt
+++ b/Documentation/devicetree/bindings/platform/msm/qpnp-revid.txt
@@ -9,6 +9,8 @@ Required properties:
Optional property:
- qcom,fab-id-valid: Use this property when support to read Fab
identification from REV ID peripheral is available.
+- qcom,tp-rev-valid: Use this property when support to read TP
+ revision identification from REV ID peripheral.
Example:
qcom,revid@100 {
diff --git a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb2.txt b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb2.txt
index 3527779ef93c..f01eae10bf4f 100644
--- a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb2.txt
+++ b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb2.txt
@@ -178,6 +178,10 @@ Charger specific properties:
Definition: WD bark-timeout in seconds. The possible values are
16, 32, 64, 128. If not defined it defaults to 64.
+- qcom,sw-jeita-enable
+ Usage: optional
+ Value type: bool
+ Definition: Boolean flag which when present enables sw compensation for jeita
=============================================
Second Level Nodes - SMB2 Charger Peripherals
diff --git a/Documentation/devicetree/bindings/qdsp/msm-fastrpc.txt b/Documentation/devicetree/bindings/qdsp/msm-fastrpc.txt
index f419655722d4..376af82381f2 100644
--- a/Documentation/devicetree/bindings/qdsp/msm-fastrpc.txt
+++ b/Documentation/devicetree/bindings/qdsp/msm-fastrpc.txt
@@ -11,6 +11,8 @@ Required properties:
Optional properties:
- qcom,fastrpc-glink: Flag to use glink instead of smd for IPC
+- qcom,fastrpc-vmid-heap-shared: Flag for Dynamic heap feature, to
+ share HLOS memory buffer to ADSP
Optional subnodes:
- qcom,msm_fastrpc_compute_cb : Child nodes representing the compute context
@@ -25,6 +27,7 @@ Example:
qcom,msm_fastrpc {
compatible = "qcom,msm-fastrpc-adsp";
qcom,fastrpc-glink;
+ qcom,fastrpc-vmid-heap-shared;
qcom,msm_fastrpc_compute_cb_1 {
compatible = "qcom,msm-fastrpc-compute-cb";
diff --git a/Documentation/devicetree/bindings/regulator/qpnp-lcdb-regulator.txt b/Documentation/devicetree/bindings/regulator/qpnp-lcdb-regulator.txt
index ed383ce9ea8f..9798ac60b493 100644
--- a/Documentation/devicetree/bindings/regulator/qpnp-lcdb-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/qpnp-lcdb-regulator.txt
@@ -209,6 +209,12 @@ Properties below are specific to BOOST subnode only.
Definition: Current limit (in mA) of the BOOST rail.
Possible values are 200 to 1600mA in 200mA steps.
+- qcom,bst-headroom-mv
+ Usage: optional
+ Value type: <u16>
+ Definition: Headroom of the boost (in mV). The minimum headroom is
+ 200mV and if not specified defaults to 200mV.
+
=======
Example
=======
@@ -250,5 +256,6 @@ pm660l_lcdb: qpnp-lcdb@ec00 {
qcom,bst-pd-strength = <1>;
qcom,bst-ps = <1>;
qcom,bst-ps-threshold-ma = <50>;
+ qcom,bst-headroom-mv = <200>;
};
};
diff --git a/Documentation/devicetree/bindings/soc/qcom/msm-early-cam.txt b/Documentation/devicetree/bindings/soc/qcom/msm-early-cam.txt
new file mode 100644
index 000000000000..388426f44524
--- /dev/null
+++ b/Documentation/devicetree/bindings/soc/qcom/msm-early-cam.txt
@@ -0,0 +1,110 @@
+* Qualcomm Technologies Inc MSM BA
+
+[Root level node]
+==================
+Required properties:
+- compatible: Must be "qcom,early-cam".
+
+[Subnode]
+==========
+- qcom,early-cam-input-profile-#: Defines child nodes for the profiles supported
+ by early camera driver. Each profile should have properties
+ "mmagic-supply", "gdscr-supply", "vfe0-vdd-supply",
+ "qcom,cam-vreg-name", "clocks", "clock-names",
+ "qcom,clock-rates".
+Required properties:
+- mmagic-supply : should contain mmagic regulator used for mmagic clocks.
+- gdscr-supply : should contain gdsr regulator used for cci clocks.
+- vfe0-vdd-supply: phandle to vfe0 regulator.
+- qcom,cam-vreg-name : name of the voltage regulators required for the device.
+- clocks: List of clock handles. The parent clocks of the input clocks to the
+ devices in this power domain are set to oscclk before power gating
+ and restored back after powering on a domain. This is required for
+ all domains which are powered on and off and not required for unused
+ domains.
+- clock-names: name of the clock used by the driver.
+- qcom,clock-rates: clock rate in Hz.
+Example:
+
+ qcom,early-cam {
+ cell-index = <0>;
+ compatible = "qcom,early-cam";
+ status = "ok";
+ mmagic-supply = <&gdsc_mmagic_camss>;
+ gdscr-supply = <&gdsc_camss_top>;
+ vfe0-vdd-supply = <&gdsc_vfe0>;
+ qcom,cam-vreg-name = "mmagic", "gdscr", "vfe0-vdd";
+ clocks = <&clock_mmss clk_mmss_mmagic_ahb_clk>,
+ <&clock_mmss clk_camss_top_ahb_clk>,
+ <&clock_mmss clk_cci_clk_src>,
+ <&clock_mmss clk_camss_cci_ahb_clk>,
+ <&clock_mmss clk_camss_cci_clk>,
+ <&clock_mmss clk_camss_ahb_clk>,
+ <&clock_mmss clk_mmagic_camss_axi_clk>,
+ <&clock_mmss clk_camss_vfe_ahb_clk>,
+ <&clock_mmss clk_camss_vfe0_ahb_clk>,
+ <&clock_mmss clk_camss_vfe_axi_clk>,
+ <&clock_mmss clk_camss_vfe0_stream_clk>,
+ <&clock_mmss clk_smmu_vfe_axi_clk>,
+ <&clock_mmss clk_smmu_vfe_ahb_clk>,
+ <&clock_mmss clk_camss_csi_vfe0_clk>,
+ <&clock_mmss clk_vfe0_clk_src>,
+ <&clock_mmss clk_camss_csi_vfe0_clk>,
+ <&clock_mmss clk_camss_csi2_ahb_clk>,
+ <&clock_mmss clk_camss_csi2_clk>,
+ <&clock_mmss clk_camss_csi2phy_clk>,
+ <&clock_mmss clk_csi2phytimer_clk_src>,
+ <&clock_mmss clk_camss_csi2phytimer_clk>,
+ <&clock_mmss clk_camss_csi2rdi_clk>,
+ <&clock_mmss clk_camss_ispif_ahb_clk>,
+ <&clock_mmss clk_camss_vfe0_clk>;
+ clock-names =
+ "mmss_mmagic_ahb_clk",
+ "camss_top_ahb_clk",
+ "cci_clk_src",
+ "camss_cci_ahb_clk",
+ "camss_cci_clk",
+ "camss_ahb_clk",
+ "mmagic_camss_axi_clk",
+ "camss_vfe_ahb_clk",
+ "camss_vfe0_ahb_clk",
+ "camss_vfe_axi_clk",
+ "camss_vfe0_stream_clk",
+ "smmu_vfe_axi_clk",
+ "smmu_vfe_ahb_clk",
+ "camss_csi_vfe0_clk",
+ "vfe0_clk_src",
+ "camss_csi_vfe0_clk",
+ "camss_csi2_ahb_clk",
+ "camss_csi2_clk",
+ "camss_csi2phy_clk",
+ "csi2phytimer_clk_src",
+ "camss_csi2phytimer_clk",
+ "camss_csi2rdi_clk",
+ "camss_ispif_ahb_clk",
+ "clk_camss_vfe0_clk";
+
+ qcom,clock-rates = <19200000
+ 19200000
+ 19200000
+ 19200000
+ 19200000
+ 19200000
+ 0
+ 0
+ 0
+ 320000000
+ 0
+ 0
+ 0
+ 0
+ 19200000
+ 0
+ 0
+ 200000000
+ 200000000
+ 200000000
+ 200000000
+ 200000000
+ 0
+ };
diff --git a/Documentation/devicetree/bindings/wcnss/wcnss-wlan.txt b/Documentation/devicetree/bindings/wcnss/wcnss-wlan.txt
index 061c1d16ad24..d0855115b6d1 100644
--- a/Documentation/devicetree/bindings/wcnss/wcnss-wlan.txt
+++ b/Documentation/devicetree/bindings/wcnss/wcnss-wlan.txt
@@ -12,7 +12,7 @@ Required properties:
"riva_ccu_base", "pronto_a2xb_base", "pronto_ccpu_base",
"pronto_saw2_base", "wlan_tx_phy_aborts","wlan_brdg_err_source",
"wlan_tx_status", "alarms_txctl", "alarms_tactl",
- "pronto_mcu_base".
+ "pronto_mcu_base", "pronto_qfuse".
- interupts: Pronto to Apps interrupts for tx done and rx pending.
- qcom,pronto-vddmx-supply: regulator to supply pronto pll.
- qcom,pronto-vddcx-supply: voltage corner regulator to supply WLAN/BT/FM
@@ -29,7 +29,7 @@ Required properties:
- qcom,wcnss-vadc: VADC handle for battery voltage notification APIs.
- pinctrl-<n> : Pinctrl states as described in bindings/pinctrl/pinctrl-bindings.txt
- pinctrl-names : Names corresponding to the numbered pinctrl states
-- clocks: from common clock binding: handle to xo and rf_clk clocks.
+- clocks: from common clock binding: handle to xo, rf_clk and wcnss snoc clocks.
- clock-names: Names of all the clocks that are accessed by the subsystem
- qcom,vdd-voltage-level: This property represents (nominal, min, max) voltage
for iris and pronto regulators in milli-volts.
@@ -39,11 +39,16 @@ iris and pronto regulators in micro-amps.
Optional properties:
- qcom,has-autodetect-xo: boolean flag to determine whether Iris XO auto detect
should be performed during boot up.
+- qcom,snoc-wcnss-clock-freq: indicates the wcnss snoc clock frequency in Hz.
+If wcnss_snoc clock is specified in the list of clocks, this property needs
+to be set to make it functional.
- qcom,wlan-rx-buff-count: WLAN RX buffer count is a configurable value,
using a smaller count for this buffer will reduce the memory usage.
- qcom,is-pronto-v3: boolean flag to determine the pronto hardware version
in use. subsequently correct workqueue will be used by DXE engine to push frames
in TX data path.
+- qcom,is-dual-band-disable: boolean flag to determine the WLAN dual band
+ capability.
- qcom,is-pronto-vadc: boolean flag to determine Battery voltage feature
support for pronto hardware.
- qcom,wcnss-pm : <Core rail LDO#, PA rail LDO#, XO settling time,
@@ -94,7 +99,12 @@ Example:
clocks = <&clock_rpm clk_xo_wlan_clk>,
<&clock_rpm clk_rf_clk2>,
<&clock_debug clk_gcc_debug_mux>,
- <&clock_gcc clk_wcnss_m_clk>;
- clock-names = "xo", "rf_clk", "measure", "wcnss_debug";
+ <&clock_gcc clk_wcnss_m_clk>,
+ <&clock_gcc clk_snoc_wcnss_a_clk>;
+
+ clock-names = "xo", "rf_clk", "measure", "wcnss_debug",
+ "snoc_wcnss";
+
+ qcom,snoc-wcnss-clock-freq = <200000000>;
qcom,wcnss-pm = <11 21 1200 1 1 6>;
};
diff --git a/Makefile b/Makefile
index 64e9303560f7..dbb22cf9f76a 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
VERSION = 4
PATCHLEVEL = 4
-SUBLEVEL = 76
+SUBLEVEL = 80
EXTRAVERSION =
NAME = Blurry Fish Butt
@@ -623,6 +623,9 @@ include arch/$(SRCARCH)/Makefile
KBUILD_CFLAGS += $(call cc-option,-fno-delete-null-pointer-checks,)
KBUILD_CFLAGS += $(call cc-disable-warning,maybe-uninitialized,)
KBUILD_CFLAGS += $(call cc-disable-warning,frame-address,)
+KBUILD_CFLAGS += $(call cc-disable-warning, format-truncation)
+KBUILD_CFLAGS += $(call cc-disable-warning, format-overflow)
+KBUILD_CFLAGS += $(call cc-disable-warning, int-in-bool-context)
ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
KBUILD_CFLAGS += -Os
diff --git a/android/configs/android-base.cfg b/android/configs/android-base.cfg
index 96edb3c84425..d1e3b0891a4e 100644
--- a/android/configs/android-base.cfg
+++ b/android/configs/android-base.cfg
@@ -17,7 +17,6 @@ CONFIG_AUDIT=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_CGROUPS=y
CONFIG_CGROUP_CPUACCT=y
-CONFIG_CGROUP_DEBUG=y
CONFIG_CGROUP_FREEZER=y
CONFIG_CGROUP_SCHED=y
CONFIG_DEFAULT_SECURITY_SELINUX=y
diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts
index 5f5e0f3d5b64..27cd4abfc74d 100644
--- a/arch/arm/boot/dts/omap3-n900.dts
+++ b/arch/arm/boot/dts/omap3-n900.dts
@@ -697,6 +697,8 @@
vmmc_aux-supply = <&vsim>;
bus-width = <8>;
non-removable;
+ no-sdio;
+ no-sd;
};
&mmc3 {
diff --git a/arch/arm/boot/dts/qcom/Makefile b/arch/arm/boot/dts/qcom/Makefile
index 297d6535382e..1c3de8ccb400 100644
--- a/arch/arm/boot/dts/qcom/Makefile
+++ b/arch/arm/boot/dts/qcom/Makefile
@@ -168,6 +168,7 @@ dtb-$(CONFIG_ARCH_MSM8998) += msm8998-sim.dtb \
apq8098-v2.1-cdp.dtb \
apq8098-v2.1-qrd.dtb \
apq8098-v2.1-mediabox.dtb \
+ apq8098-v2.1-svr20.dtb \
msm8998-v2.1-interposer-sdm660-cdp.dtb \
msm8998-v2.1-interposer-sdm660-mtp.dtb \
msm8998-v2.1-interposer-sdm660-qrd.dtb \
diff --git a/arch/arm/boot/dts/qcom/apq8096-auto-dragonboard.dtsi b/arch/arm/boot/dts/qcom/apq8096-auto-dragonboard.dtsi
index 4081a21b3134..db33594d3827 100644
--- a/arch/arm/boot/dts/qcom/apq8096-auto-dragonboard.dtsi
+++ b/arch/arm/boot/dts/qcom/apq8096-auto-dragonboard.dtsi
@@ -713,6 +713,10 @@
<&afe_proxy_rx>, <&afe_proxy_tx>,
<&incall_record_rx>, <&incall_record_tx>,
<&incall_music_rx>, <&incall_music2_rx>,
+ <&dai_pri_tdm_tx_0>, <&dai_pri_tdm_tx_1>,
+ <&dai_pri_tdm_tx_2>, <&dai_pri_tdm_tx_3>,
+ <&dai_pri_tdm_rx_0>, <&dai_pri_tdm_rx_1>,
+ <&dai_pri_tdm_rx_2>, <&dai_pri_tdm_rx_3>,
<&dai_sec_tdm_tx_0>, <&dai_sec_tdm_tx_1>,
<&dai_sec_tdm_tx_2>, <&dai_sec_tdm_tx_3>,
<&dai_tert_tdm_rx_0>, <&dai_tert_tdm_rx_1>,
@@ -731,6 +735,10 @@
"msm-dai-q6-dev.241", "msm-dai-q6-dev.240",
"msm-dai-q6-dev.32771", "msm-dai-q6-dev.32772",
"msm-dai-q6-dev.32773", "msm-dai-q6-dev.32770",
+ "msm-dai-q6-tdm.36865", "msm-dai-q6-tdm.36867",
+ "msm-dai-q6-tdm.36869", "msm-dai-q6-tdm.36871",
+ "msm-dai-q6-tdm.36864", "msm-dai-q6-tdm.36866",
+ "msm-dai-q6-tdm.36868", "msm-dai-q6-tdm.36870",
"msm-dai-q6-tdm.36881", "msm-dai-q6-tdm.36883",
"msm-dai-q6-tdm.36885", "msm-dai-q6-tdm.36887",
"msm-dai-q6-tdm.36896", "msm-dai-q6-tdm.36898",
diff --git a/arch/arm/boot/dts/qcom/apq8096pro-v1.1-auto-adp-lite.dts b/arch/arm/boot/dts/qcom/apq8096pro-v1.1-auto-adp-lite.dts
index 9f8d432a3112..082b04791dbd 100644
--- a/arch/arm/boot/dts/qcom/apq8096pro-v1.1-auto-adp-lite.dts
+++ b/arch/arm/boot/dts/qcom/apq8096pro-v1.1-auto-adp-lite.dts
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -42,6 +42,10 @@
i2c@75b6000 { /* BLSP8 */
/* ADV7533 HDMI Bridge Chip removed on ADP Lite */
+ adv7533@3d {
+ status = "disabled";
+ };
+
adv7533@39 {
status = "disabled";
};
@@ -60,6 +64,10 @@
/delete-property/ qcom,dsi-display-active;
};
+&sde_kms {
+ connectors = <&sde_hdmi_tx &sde_hdmi &dsi_adv_7533_1>;
+};
+
&pil_modem {
pinctrl-names = "default";
pinctrl-0 = <&modem_mux>;
diff --git a/arch/arm/boot/dts/qcom/apq8098-v2.1-mediabox.dts b/arch/arm/boot/dts/qcom/apq8098-v2.1-mediabox.dts
index bd29c0307576..1fa49d8a060d 100644
--- a/arch/arm/boot/dts/qcom/apq8098-v2.1-mediabox.dts
+++ b/arch/arm/boot/dts/qcom/apq8098-v2.1-mediabox.dts
@@ -101,6 +101,10 @@
status = "disabled";
};
+&pcie0 {
+ qcom,boot-option = <0x0>;
+};
+
&soc {
qcom,msm-dai-mi2s {
dai_mi2s3: qcom,msm-dai-q6-mi2s-quat {
diff --git a/arch/arm/boot/dts/qcom/apq8098-v2.1-svr20.dts b/arch/arm/boot/dts/qcom/apq8098-v2.1-svr20.dts
new file mode 100644
index 000000000000..4359a3f38ade
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/apq8098-v2.1-svr20.dts
@@ -0,0 +1,22 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "apq8098-v2.1.dtsi"
+#include "msm8998-svr20.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. APQ 8098 V2.1 SVR V2.0 Board";
+ compatible = "qcom,apq8098-svr", "qcom,apq8098", "qcom,svr";
+ qcom,board-id = <0x03020008 3>;
+};
diff --git a/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dualmipi-wqxga-video.dtsi b/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dualmipi-wqxga-video.dtsi
index 5971a3d1025e..aa627b3e7c63 100644
--- a/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dualmipi-wqxga-video.dtsi
+++ b/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dualmipi-wqxga-video.dtsi
@@ -68,7 +68,6 @@
qcom,mdss-dsi-lane-1-state;
qcom,mdss-dsi-lane-2-state;
qcom,mdss-dsi-lane-3-state;
- qcom,cmd-sync-wait-broadcast;
qcom,mdss-dsi-panel-timings = [e2 36 24 00 66 6a 28 38 2a 03 04 00];
qcom,mdss-dsi-t-clk-post = <0x0d>;
qcom,mdss-dsi-t-clk-pre = <0x2d>;
diff --git a/arch/arm/boot/dts/qcom/dsi-panel-nt35597-truly-dualmipi-wqxga-video.dtsi b/arch/arm/boot/dts/qcom/dsi-panel-nt35597-truly-dualmipi-wqxga-video.dtsi
index 1a572f97c840..339d87f66d2f 100644
--- a/arch/arm/boot/dts/qcom/dsi-panel-nt35597-truly-dualmipi-wqxga-video.dtsi
+++ b/arch/arm/boot/dts/qcom/dsi-panel-nt35597-truly-dualmipi-wqxga-video.dtsi
@@ -193,7 +193,6 @@
qcom,mdss-dsi-lane-1-state;
qcom,mdss-dsi-lane-2-state;
qcom,mdss-dsi-lane-3-state;
- qcom,cmd-sync-wait-broadcast;
qcom,mdss-dsi-panel-timings = [e2 36 24 00 66 6a 28 38 2a 03
04 00];
qcom,mdss-dsi-t-clk-post = <0x0d>;
diff --git a/arch/arm/boot/dts/qcom/dsi-panel-sharp-dsc-4k-cmd.dtsi b/arch/arm/boot/dts/qcom/dsi-panel-sharp-dsc-4k-cmd.dtsi
index 51a225b82f47..ff3b7b80c449 100644
--- a/arch/arm/boot/dts/qcom/dsi-panel-sharp-dsc-4k-cmd.dtsi
+++ b/arch/arm/boot/dts/qcom/dsi-panel-sharp-dsc-4k-cmd.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -54,6 +54,8 @@
qcom,ulps-enabled;
qcom,dcs-cmd-by-left;
qcom,mdss-dsi-tx-eot-append;
+ qcom,mdss-pan-physical-width-dimension = <68>;
+ qcom,mdss-pan-physical-height-dimension = <121>;
qcom,adjust-timer-wakeup-ms = <1>;
qcom,mdss-dsi-on-command = [
diff --git a/arch/arm/boot/dts/qcom/dsi-panel-sharp-dsc-4k-video.dtsi b/arch/arm/boot/dts/qcom/dsi-panel-sharp-dsc-4k-video.dtsi
index 02c87067f212..933746b8abe7 100644
--- a/arch/arm/boot/dts/qcom/dsi-panel-sharp-dsc-4k-video.dtsi
+++ b/arch/arm/boot/dts/qcom/dsi-panel-sharp-dsc-4k-video.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -46,6 +46,8 @@
qcom,mdss-dsi-mdp-trigger = "none";
qcom,mdss-dsi-reset-sequence = <1 20>, <0 20>, <1 20>;
qcom,mdss-dsi-tx-eot-append;
+ qcom,mdss-pan-physical-width-dimension = <68>;
+ qcom,mdss-pan-physical-height-dimension = <121>;
qcom,adjust-timer-wakeup-ms = <1>;
qcom,mdss-dsi-on-command = [
diff --git a/arch/arm/boot/dts/qcom/fg-gen3-batterydata-svr-v2-3200mah.dtsi b/arch/arm/boot/dts/qcom/fg-gen3-batterydata-svr-v2-3200mah.dtsi
new file mode 100644
index 000000000000..048897b084ec
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/fg-gen3-batterydata-svr-v2-3200mah.dtsi
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+qcom,svr835v2_3200mah {
+ /*3003021_TC_MLP603170_3200mAh_averaged_MasterSlave_Jun292017*/
+ qcom, = <24>;
+ qcom,max-voltage-uv = <4200000>;
+ qcom,fg-cc-cv-threshold-mv = <4190>;
+ qcom,fastchg-current-ma = <3200>;
+ qcom,nom-batt-capacity-mah = <3200>;
+ qcom,batt-id-kohm = <0>;
+ qcom,battery-beta = <3435>;
+ qcom,battery-type = "svr835v2_3200mah";
+ qcom,checksum = <0xB7B0>;
+ qcom,gui-version = "PMI8998GUI - 2.0.0.58";
+ qcom,fg-profile-data = [
+ 87 16 AB 0B
+ BE 15 3A 0A
+ 8B 1C 6D 02
+ 76 0D 1F 0A
+ 50 18 ED 22
+ 98 45 CA 52
+ 83 00 00 00
+ 0D 00 00 00
+ 00 00 37 B4
+ 78 C5 9D BA
+ 29 00 08 00
+ 3E CA 11 E5
+ D4 06 B7 EA
+ 51 07 0F 02
+ 82 DD 22 3B
+ 1C 06 09 20
+ 27 00 14 00
+ 1C 19 82 0A
+ E9 0C 49 03
+ 84 1C 5C 03
+ D0 15 0D 12
+ 91 19 0C 22
+ F0 3C 35 4B
+ 7D 00 00 00
+ 12 00 00 00
+ 00 00 F3 D4
+ 9F B4 AF D3
+ 22 00 00 00
+ CC EA 11 E5
+ 2D F4 35 E3
+ A5 F3 49 0B
+ 8F EA 5A 1A
+ 9B 33 CC FF
+ 07 10 00 00
+ 21 0D 33 43
+ 22 00 40 00
+ 07 01 0A FA
+ FF 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ ];
+};
diff --git a/arch/arm/boot/dts/qcom/msm-pm660.dtsi b/arch/arm/boot/dts/qcom/msm-pm660.dtsi
index 93aeef07cfe0..460e7e76ac4d 100644
--- a/arch/arm/boot/dts/qcom/msm-pm660.dtsi
+++ b/arch/arm/boot/dts/qcom/msm-pm660.dtsi
@@ -24,6 +24,7 @@
compatible = "qcom,qpnp-revid";
reg = <0x100 0x100>;
qcom,fab-id-valid;
+ qcom,tp-rev-valid;
};
pm660_misc: qcom,misc@900 {
diff --git a/arch/arm/boot/dts/qcom/msm-pm660l.dtsi b/arch/arm/boot/dts/qcom/msm-pm660l.dtsi
index 9cd117ce4e0c..075eaef21254 100644
--- a/arch/arm/boot/dts/qcom/msm-pm660l.dtsi
+++ b/arch/arm/boot/dts/qcom/msm-pm660l.dtsi
@@ -269,6 +269,7 @@
qcom,led-strings-list = [00 01 02];
qcom,loop-auto-gm-en;
qcom,pmic-revid = <&pm660l_revid>;
+ qcom,auto-calibration-enable;
status = "ok";
};
diff --git a/arch/arm/boot/dts/qcom/msm-pmi8998.dtsi b/arch/arm/boot/dts/qcom/msm-pmi8998.dtsi
index 684f6cf9b389..147b537eba33 100644
--- a/arch/arm/boot/dts/qcom/msm-pmi8998.dtsi
+++ b/arch/arm/boot/dts/qcom/msm-pmi8998.dtsi
@@ -634,6 +634,7 @@
qcom,en-ext-pfet-sc-pro;
qcom,pmic-revid = <&pmi8998_revid>;
qcom,loop-auto-gm-en;
+ qcom,auto-calibration-enable;
status = "okay";
};
diff --git a/arch/arm/boot/dts/qcom/msm8996-agave-adp.dtsi b/arch/arm/boot/dts/qcom/msm8996-agave-adp.dtsi
index 9dbec1e87fda..1283cdddc2db 100644
--- a/arch/arm/boot/dts/qcom/msm8996-agave-adp.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8996-agave-adp.dtsi
@@ -858,6 +858,90 @@
};
&soc {
+ qcom,early-cam {
+ cell-index = <0>;
+ compatible = "qcom,early-cam";
+ status = "ok";
+ mmagic-supply = <&gdsc_mmagic_camss>;
+ gdscr-supply = <&gdsc_camss_top>;
+ vfe0-vdd-supply = <&gdsc_vfe0>;
+ qcom,cam-vreg-name = "mmagic", "gdscr", "vfe0-vdd";
+ clocks = <&clock_mmss clk_mmss_mmagic_ahb_clk>,
+ <&clock_mmss clk_camss_top_ahb_clk>,
+ <&clock_mmss clk_cci_clk_src>,
+ <&clock_mmss clk_camss_cci_ahb_clk>,
+ <&clock_mmss clk_camss_cci_clk>,
+ <&clock_mmss clk_camss_ahb_clk>,
+ <&clock_mmss clk_mmagic_camss_axi_clk>,
+ <&clock_mmss clk_camss_vfe_ahb_clk>,
+ <&clock_mmss clk_camss_vfe0_ahb_clk>,
+ <&clock_mmss clk_camss_vfe_axi_clk>,
+ <&clock_mmss clk_camss_vfe0_stream_clk>,
+ <&clock_mmss clk_smmu_vfe_axi_clk>,
+ <&clock_mmss clk_smmu_vfe_ahb_clk>,
+ <&clock_mmss clk_camss_csi_vfe0_clk>,
+ <&clock_mmss clk_vfe0_clk_src>,
+ <&clock_mmss clk_camss_csi_vfe0_clk>,
+ <&clock_mmss clk_camss_csi2_ahb_clk>,
+ <&clock_mmss clk_camss_csi2_clk>,
+ <&clock_mmss clk_camss_csi2phy_clk>,
+ <&clock_mmss clk_csi2phytimer_clk_src>,
+ <&clock_mmss clk_camss_csi2phytimer_clk>,
+ <&clock_mmss clk_camss_csi2rdi_clk>,
+ <&clock_mmss clk_camss_ispif_ahb_clk>,
+ <&clock_mmss clk_camss_vfe0_clk>;
+ clock-names =
+ "mmss_mmagic_ahb_clk",
+ "camss_top_ahb_clk",
+ "cci_clk_src",
+ "camss_cci_ahb_clk",
+ "camss_cci_clk",
+ "camss_ahb_clk",
+ "mmagic_camss_axi_clk",
+ "camss_vfe_ahb_clk",
+ "camss_vfe0_ahb_clk",
+ "camss_vfe_axi_clk",
+ "camss_vfe0_stream_clk",
+ "smmu_vfe_axi_clk",
+ "smmu_vfe_ahb_clk",
+ "camss_csi_vfe0_clk",
+ "vfe0_clk_src",
+ "camss_csi_vfe0_clk",
+ "camss_csi2_ahb_clk",
+ "camss_csi2_clk",
+ "camss_csi2phy_clk",
+ "csi2phytimer_clk_src",
+ "camss_csi2phytimer_clk",
+ "camss_csi2rdi_clk",
+ "camss_ispif_ahb_clk",
+ "clk_camss_vfe0_clk";
+
+ qcom,clock-rates = <19200000
+ 19200000
+ 19200000
+ 19200000
+ 19200000
+ 19200000
+ 0
+ 0
+ 0
+ 320000000
+ 0
+ 0
+ 0
+ 0
+ 19200000
+ 0
+ 0
+ 200000000
+ 200000000
+ 200000000
+ 200000000
+ 200000000
+ 0
+ 100000000>;
+ };
+
qcom,ntn_avb {
compatible = "qcom,ntn_avb";
@@ -1016,6 +1100,10 @@
<&afe_proxy_rx>, <&afe_proxy_tx>,
<&incall_record_rx>, <&incall_record_tx>,
<&incall_music_rx>, <&incall_music2_rx>,
+ <&dai_pri_tdm_tx_0>, <&dai_pri_tdm_tx_1>,
+ <&dai_pri_tdm_tx_2>, <&dai_pri_tdm_tx_3>,
+ <&dai_pri_tdm_rx_0>, <&dai_pri_tdm_rx_1>,
+ <&dai_pri_tdm_rx_2>, <&dai_pri_tdm_rx_3>,
<&dai_sec_tdm_tx_0>, <&dai_sec_tdm_tx_1>,
<&dai_sec_tdm_tx_2>, <&dai_sec_tdm_tx_3>,
<&dai_tert_tdm_rx_0>, <&dai_tert_tdm_rx_1>,
@@ -1034,6 +1122,10 @@
"msm-dai-q6-dev.241", "msm-dai-q6-dev.240",
"msm-dai-q6-dev.32771", "msm-dai-q6-dev.32772",
"msm-dai-q6-dev.32773", "msm-dai-q6-dev.32770",
+ "msm-dai-q6-tdm.36865", "msm-dai-q6-tdm.36867",
+ "msm-dai-q6-tdm.36869", "msm-dai-q6-tdm.36871",
+ "msm-dai-q6-tdm.36864", "msm-dai-q6-tdm.36866",
+ "msm-dai-q6-tdm.36868", "msm-dai-q6-tdm.36870",
"msm-dai-q6-tdm.36881", "msm-dai-q6-tdm.36883",
"msm-dai-q6-tdm.36885", "msm-dai-q6-tdm.36887",
"msm-dai-q6-tdm.36896", "msm-dai-q6-tdm.36898",
@@ -1449,11 +1541,21 @@
};
};
+&vfe_smmu {
+ qcom,no-smr-check;
+};
+
/ {
reserved-memory {
- lk_mem: lk_pool@91600000 {
- reg = <0x0 0x91600000 0x0 0x600000>;
+ lk_mem: lk_pool@0x91600000 {
+ no-map;
+ reg = <0 0x91600000 0 0x00600000>;
label = "lk_pool";
};
+
+ early_camera_mem: early_camera_mem@b3fff000 {
+ reg = <0 0xb3fff000 0 0x800000>;
+ label = "early_camera_mem";
+ };
};
};
diff --git a/arch/arm/boot/dts/qcom/msm8996-auto-cdp.dtsi b/arch/arm/boot/dts/qcom/msm8996-auto-cdp.dtsi
index ad14bfd00cd1..c3b986786034 100644
--- a/arch/arm/boot/dts/qcom/msm8996-auto-cdp.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8996-auto-cdp.dtsi
@@ -623,6 +623,90 @@
};
&soc {
+ qcom,early-cam {
+ cell-index = <0>;
+ compatible = "qcom,early-cam";
+ status = "ok";
+ mmagic-supply = <&gdsc_mmagic_camss>;
+ gdscr-supply = <&gdsc_camss_top>;
+ vfe0-vdd-supply = <&gdsc_vfe0>;
+ qcom,cam-vreg-name = "mmagic", "gdscr", "vfe0-vdd";
+ clocks = <&clock_mmss clk_mmss_mmagic_ahb_clk>,
+ <&clock_mmss clk_camss_top_ahb_clk>,
+ <&clock_mmss clk_cci_clk_src>,
+ <&clock_mmss clk_camss_cci_ahb_clk>,
+ <&clock_mmss clk_camss_cci_clk>,
+ <&clock_mmss clk_camss_ahb_clk>,
+ <&clock_mmss clk_mmagic_camss_axi_clk>,
+ <&clock_mmss clk_camss_vfe_ahb_clk>,
+ <&clock_mmss clk_camss_vfe0_ahb_clk>,
+ <&clock_mmss clk_camss_vfe_axi_clk>,
+ <&clock_mmss clk_camss_vfe0_stream_clk>,
+ <&clock_mmss clk_smmu_vfe_axi_clk>,
+ <&clock_mmss clk_smmu_vfe_ahb_clk>,
+ <&clock_mmss clk_camss_csi_vfe0_clk>,
+ <&clock_mmss clk_vfe0_clk_src>,
+ <&clock_mmss clk_camss_csi_vfe0_clk>,
+ <&clock_mmss clk_camss_csi2_ahb_clk>,
+ <&clock_mmss clk_camss_csi2_clk>,
+ <&clock_mmss clk_camss_csi2phy_clk>,
+ <&clock_mmss clk_csi2phytimer_clk_src>,
+ <&clock_mmss clk_camss_csi2phytimer_clk>,
+ <&clock_mmss clk_camss_csi2rdi_clk>,
+ <&clock_mmss clk_camss_ispif_ahb_clk>,
+ <&clock_mmss clk_camss_vfe0_clk>;
+ clock-names =
+ "mmss_mmagic_ahb_clk",
+ "camss_top_ahb_clk",
+ "cci_clk_src",
+ "camss_cci_ahb_clk",
+ "camss_cci_clk",
+ "camss_ahb_clk",
+ "mmagic_camss_axi_clk",
+ "camss_vfe_ahb_clk",
+ "camss_vfe0_ahb_clk",
+ "camss_vfe_axi_clk",
+ "camss_vfe0_stream_clk",
+ "smmu_vfe_axi_clk",
+ "smmu_vfe_ahb_clk",
+ "camss_csi_vfe0_clk",
+ "vfe0_clk_src",
+ "camss_csi_vfe0_clk",
+ "camss_csi2_ahb_clk",
+ "camss_csi2_clk",
+ "camss_csi2phy_clk",
+ "csi2phytimer_clk_src",
+ "camss_csi2phytimer_clk",
+ "camss_csi2rdi_clk",
+ "camss_ispif_ahb_clk",
+ "clk_camss_vfe0_clk";
+
+ qcom,clock-rates = <19200000
+ 19200000
+ 19200000
+ 19200000
+ 19200000
+ 19200000
+ 0
+ 0
+ 0
+ 320000000
+ 0
+ 0
+ 0
+ 0
+ 19200000
+ 0
+ 0
+ 200000000
+ 200000000
+ 200000000
+ 200000000
+ 200000000
+ 0
+ 100000000>;
+ };
+
ntn1: ntn_avb@1 { /* Neutrno device on RC1*/
compatible = "qcom,ntn_avb";
@@ -838,6 +922,10 @@
<&afe_proxy_rx>, <&afe_proxy_tx>,
<&incall_record_rx>, <&incall_record_tx>,
<&incall_music_rx>, <&incall_music2_rx>,
+ <&dai_pri_tdm_tx_0>, <&dai_pri_tdm_tx_1>,
+ <&dai_pri_tdm_tx_2>, <&dai_pri_tdm_tx_3>,
+ <&dai_pri_tdm_rx_0>, <&dai_pri_tdm_rx_1>,
+ <&dai_pri_tdm_rx_2>, <&dai_pri_tdm_rx_3>,
<&dai_sec_tdm_tx_0>, <&dai_sec_tdm_tx_1>,
<&dai_sec_tdm_tx_2>, <&dai_sec_tdm_tx_3>,
<&dai_tert_tdm_rx_0>, <&dai_tert_tdm_rx_1>,
@@ -856,6 +944,10 @@
"msm-dai-q6-dev.241", "msm-dai-q6-dev.240",
"msm-dai-q6-dev.32771", "msm-dai-q6-dev.32772",
"msm-dai-q6-dev.32773", "msm-dai-q6-dev.32770",
+ "msm-dai-q6-tdm.36865", "msm-dai-q6-tdm.36867",
+ "msm-dai-q6-tdm.36869", "msm-dai-q6-tdm.36871",
+ "msm-dai-q6-tdm.36864", "msm-dai-q6-tdm.36866",
+ "msm-dai-q6-tdm.36868", "msm-dai-q6-tdm.36870",
"msm-dai-q6-tdm.36881", "msm-dai-q6-tdm.36883",
"msm-dai-q6-tdm.36885", "msm-dai-q6-tdm.36887",
"msm-dai-q6-tdm.36896", "msm-dai-q6-tdm.36898",
@@ -1223,12 +1315,22 @@
/delete-property/ qcom,spkr-sd-n-gpio;
};
+&vfe_smmu {
+ qcom,no-smr-check;
+};
+
/ {
reserved-memory {
- lk_mem: lk_pool@91600000 {
- reg = <0x0 0x91600000 0x0 0x600000>;
+ lk_mem: lk_pool@0x91600000 {
+ no-map;
+ reg = <0 0x91600000 0 0x00600000>;
label = "lk_pool";
};
+
+ early_camera_mem: early_camera_mem@b3fff000 {
+ reg = <0 0xb3fff000 0 0x800000>;
+ label = "early_camera_mem";
+ };
};
};
diff --git a/arch/arm/boot/dts/qcom/msm8996-mmxf-adp.dtsi b/arch/arm/boot/dts/qcom/msm8996-mmxf-adp.dtsi
index d3ea51268590..c8898ec01992 100644
--- a/arch/arm/boot/dts/qcom/msm8996-mmxf-adp.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8996-mmxf-adp.dtsi
@@ -538,6 +538,10 @@
<&afe_proxy_rx>, <&afe_proxy_tx>,
<&incall_record_rx>, <&incall_record_tx>,
<&incall_music_rx>, <&incall_music2_rx>,
+ <&dai_pri_tdm_tx_0>, <&dai_pri_tdm_tx_1>,
+ <&dai_pri_tdm_tx_2>, <&dai_pri_tdm_tx_3>,
+ <&dai_pri_tdm_rx_0>, <&dai_pri_tdm_rx_1>,
+ <&dai_pri_tdm_rx_2>, <&dai_pri_tdm_rx_3>,
<&dai_sec_tdm_tx_0>, <&dai_sec_tdm_tx_1>,
<&dai_sec_tdm_tx_2>, <&dai_sec_tdm_tx_3>,
<&dai_tert_tdm_rx_0>, <&dai_tert_tdm_rx_1>,
@@ -556,6 +560,10 @@
"msm-dai-q6-dev.241", "msm-dai-q6-dev.240",
"msm-dai-q6-dev.32771", "msm-dai-q6-dev.32772",
"msm-dai-q6-dev.32773", "msm-dai-q6-dev.32770",
+ "msm-dai-q6-tdm.36865", "msm-dai-q6-tdm.36867",
+ "msm-dai-q6-tdm.36869", "msm-dai-q6-tdm.36871",
+ "msm-dai-q6-tdm.36864", "msm-dai-q6-tdm.36866",
+ "msm-dai-q6-tdm.36868", "msm-dai-q6-tdm.36870",
"msm-dai-q6-tdm.36881", "msm-dai-q6-tdm.36883",
"msm-dai-q6-tdm.36885", "msm-dai-q6-tdm.36887",
"msm-dai-q6-tdm.36896", "msm-dai-q6-tdm.36898",
diff --git a/arch/arm/boot/dts/qcom/msm8996-pinctrl.dtsi b/arch/arm/boot/dts/qcom/msm8996-pinctrl.dtsi
index ff128acb376a..316859a65801 100644
--- a/arch/arm/boot/dts/qcom/msm8996-pinctrl.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8996-pinctrl.dtsi
@@ -1449,7 +1449,7 @@
};
cnss_pins {
- cnss_default: cnss_default {
+ cnss_bootstrap_active: cnss_bootstrap_active {
mux {
pins = "gpio46";
function = "gpio";
@@ -1458,6 +1458,20 @@
config {
pins = "gpio46";
drive-strength = <16>;
+ output-high;
+ bias-pull-up;
+ };
+ };
+ cnss_bootstrap_sleep: cnss_bootstrap_sleep {
+ mux {
+ pins = "gpio46";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio46";
+ drive-strength = <2>;
+ output-low;
bias-pull-down;
};
};
diff --git a/arch/arm/boot/dts/qcom/msm8996.dtsi b/arch/arm/boot/dts/qcom/msm8996.dtsi
index dcd884b4a745..f5e059484c95 100644
--- a/arch/arm/boot/dts/qcom/msm8996.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8996.dtsi
@@ -2335,15 +2335,17 @@
qcom,cnss {
compatible = "qcom,cnss";
wlan-bootstrap-gpio = <&tlmm 46 0>;
- wlan-en-gpio = <&pm8994_gpios 8 0>;
+ vdd-wlan-en-supply = <&wlan_en_vreg>;
vdd-wlan-supply = <&rome_vreg>;
vdd-wlan-io-supply = <&pm8994_s4>;
vdd-wlan-xtal-supply = <&pm8994_l30>;
vdd-wlan-core-supply = <&pm8994_s3>;
wlan-ant-switch-supply = <&pm8994_l18_pin_ctrl>;
+ qcom,wlan-en-vreg-support;
qcom,notify-modem-status;
- pinctrl-names = "default";
- pinctrl-0 = <&cnss_default>;
+ pinctrl-names = "bootstrap_active", "bootstrap_sleep";
+ pinctrl-0 = <&cnss_bootstrap_active>;
+ pinctrl-1 = <&cnss_bootstrap_sleep>;
qcom,wlan-rc-num = <0>;
qcom,wlan-ramdump-dynamic = <0x200000>;
@@ -3355,6 +3357,82 @@
};
};
+ qcom,msm-dai-tdm-pri-rx {
+ compatible = "qcom,msm-dai-tdm";
+ qcom,msm-cpudai-tdm-group-id = <37120>;
+ qcom,msm-cpudai-tdm-group-num-ports = <4>;
+ qcom,msm-cpudai-tdm-group-port-id = <36864 36866 36868 36870>;
+ qcom,msm-cpudai-tdm-clk-rate = <12288000>;
+ qcom,msm-cpudai-tdm-clk-internal = <1>;
+ qcom,msm-cpudai-tdm-sync-mode = <0>;
+ qcom,msm-cpudai-tdm-sync-src = <1>;
+ qcom,msm-cpudai-tdm-data-out = <0>;
+ qcom,msm-cpudai-tdm-invert-sync = <0>;
+ qcom,msm-cpudai-tdm-data-delay = <1>;
+ qcom,msm-cpudai-tdm-clk-attribute = /bits/ 16 <1>;
+ dai_pri_tdm_rx_0: qcom,msm-dai-q6-tdm-pri-rx-0 {
+ compatible = "qcom,msm-dai-q6-tdm";
+ qcom,msm-cpudai-tdm-dev-id = <36864>;
+ qcom,msm-cpudai-tdm-data-align = <0>;
+ };
+
+ dai_pri_tdm_rx_1: qcom,msm-dai-q6-tdm-pri-rx-1 {
+ compatible = "qcom,msm-dai-q6-tdm";
+ qcom,msm-cpudai-tdm-dev-id = <36866>;
+ qcom,msm-cpudai-tdm-data-align = <0>;
+ };
+
+ dai_pri_tdm_rx_2: qcom,msm-dai-q6-tdm-pri-rx-2 {
+ compatible = "qcom,msm-dai-q6-tdm";
+ qcom,msm-cpudai-tdm-dev-id = <36868>;
+ qcom,msm-cpudai-tdm-data-align = <0>;
+ };
+
+ dai_pri_tdm_rx_3: qcom,msm-dai-q6-tdm-pri-rx-3 {
+ compatible = "qcom,msm-dai-q6-tdm";
+ qcom,msm-cpudai-tdm-dev-id = <36870>;
+ qcom,msm-cpudai-tdm-data-align = <0>;
+ };
+ };
+
+ qcom,msm-dai-tdm-pri-tx {
+ compatible = "qcom,msm-dai-tdm";
+ qcom,msm-cpudai-tdm-group-id = <37121>;
+ qcom,msm-cpudai-tdm-group-num-ports = <4>;
+ qcom,msm-cpudai-tdm-group-port-id = <36865 36867 36869 36871>;
+ qcom,msm-cpudai-tdm-clk-rate = <12288000>;
+ qcom,msm-cpudai-tdm-clk-internal = <1>;
+ qcom,msm-cpudai-tdm-sync-mode = <0>;
+ qcom,msm-cpudai-tdm-sync-src = <1>;
+ qcom,msm-cpudai-tdm-data-out = <0>;
+ qcom,msm-cpudai-tdm-invert-sync = <0>;
+ qcom,msm-cpudai-tdm-data-delay = <1>;
+ qcom,msm-cpudai-tdm-clk-attribute = /bits/ 16 <1>;
+ dai_pri_tdm_tx_0: qcom,msm-dai-q6-tdm-pri-tx-0 {
+ compatible = "qcom,msm-dai-q6-tdm";
+ qcom,msm-cpudai-tdm-dev-id = <36865>;
+ qcom,msm-cpudai-tdm-data-align = <0>;
+ };
+
+ dai_pri_tdm_tx_1: qcom,msm-dai-q6-tdm-pri-tx-1 {
+ compatible = "qcom,msm-dai-q6-tdm";
+ qcom,msm-cpudai-tdm-dev-id = <36867>;
+ qcom,msm-cpudai-tdm-data-align = <0>;
+ };
+
+ dai_pri_tdm_tx_2: qcom,msm-dai-q6-tdm-pri-tx-2 {
+ compatible = "qcom,msm-dai-q6-tdm";
+ qcom,msm-cpudai-tdm-dev-id = <36869>;
+ qcom,msm-cpudai-tdm-data-align = <0>;
+ };
+
+ dai_pri_tdm_tx_3: qcom,msm-dai-q6-tdm-pri-tx-3 {
+ compatible = "qcom,msm-dai-q6-tdm";
+ qcom,msm-cpudai-tdm-dev-id = <36871>;
+ qcom,msm-cpudai-tdm-data-align = <0>;
+ };
+ };
+
qcom,msm-dai-tdm-sec-tx {
compatible = "qcom,msm-dai-tdm";
qcom,msm-cpudai-tdm-group-id = <37137>;
diff --git a/arch/arm/boot/dts/qcom/msm8996pro-auto-adp-lite.dts b/arch/arm/boot/dts/qcom/msm8996pro-auto-adp-lite.dts
index 48d5cb78611b..f5c33063643d 100644
--- a/arch/arm/boot/dts/qcom/msm8996pro-auto-adp-lite.dts
+++ b/arch/arm/boot/dts/qcom/msm8996pro-auto-adp-lite.dts
@@ -42,6 +42,9 @@
i2c@75b6000 { /* BLSP8 */
/* ADV7533 HDMI Bridge Chip removed on ADP Lite */
+ adv7533@3d {
+ status = "disabled";
+ };
adv7533@39 {
status = "disabled";
};
diff --git a/arch/arm/boot/dts/qcom/msm8998-camera-sensor-svr20.dtsi b/arch/arm/boot/dts/qcom/msm8998-camera-sensor-svr20.dtsi
new file mode 100644
index 000000000000..9d408ee5f3a7
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/msm8998-camera-sensor-svr20.dtsi
@@ -0,0 +1,399 @@
+/*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+ led_flash0: qcom,camera-flash@0 {
+ cell-index = <0>;
+ compatible = "qcom,camera-flash";
+ qcom,flash-source = <&pmi8998_flash0 &pmi8998_flash1>;
+ qcom,torch-source = <&pmi8998_torch0 &pmi8998_torch1>;
+ qcom,switch-source = <&pmi8998_switch0>;
+ status = "ok";
+ };
+
+ led_flash1: qcom,camera-flash@1 {
+ cell-index = <1>;
+ compatible = "qcom,camera-flash";
+ qcom,flash-source = <&pmi8998_flash2>;
+ qcom,torch-source = <&pmi8998_torch2>;
+ qcom,switch-source = <&pmi8998_switch1>;
+ status = "ok";
+ };
+};
+
+&cci {
+ actuator0: qcom,actuator@0 {
+ cell-index = <0>;
+ reg = <0x0>;
+ compatible = "qcom,actuator";
+ qcom,cci-master = <0>;
+ gpios = <&tlmm 27 0>;
+ qcom,gpio-vaf = <0>;
+ qcom,gpio-req-tbl-num = <0>;
+ qcom,gpio-req-tbl-flags = <0>;
+ qcom,gpio-req-tbl-label = "CAM_VAF";
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_actuator_vaf_active>;
+ pinctrl-1 = <&cam_actuator_vaf_suspend>;
+ };
+
+ actuator1: qcom,actuator@1 {
+ cell-index = <1>;
+ reg = <0x1>;
+ compatible = "qcom,actuator";
+ qcom,cci-master = <1>;
+ gpios = <&tlmm 27 0>;
+ qcom,gpio-vaf = <0>;
+ qcom,gpio-req-tbl-num = <0>;
+ qcom,gpio-req-tbl-flags = <0>;
+ qcom,gpio-req-tbl-label = "CAM_VAF";
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_actuator_vaf_active>;
+ pinctrl-1 = <&cam_actuator_vaf_suspend>;
+ };
+
+ ois0: qcom,ois@0 {
+ cell-index = <0>;
+ reg = <0x0>;
+ compatible = "qcom,ois";
+ qcom,cci-master = <0>;
+ gpios = <&tlmm 27 0>;
+ qcom,gpio-vaf = <0>;
+ qcom,gpio-req-tbl-num = <0>;
+ qcom,gpio-req-tbl-flags = <0>;
+ qcom,gpio-req-tbl-label = "CAM_VAF";
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_actuator_vaf_active>;
+ pinctrl-1 = <&cam_actuator_vaf_suspend>;
+ status = "disabled";
+ };
+
+ eeprom0: qcom,eeprom@0 {
+ cell-index = <0>;
+ reg = <0>;
+ compatible = "qcom,eeprom";
+ cam_vio-supply = <&pm8998_lvs1>;
+ cam_vana-supply = <&pmi8998_bob>;
+ cam_vdig-supply = <&pm8998_s3>;
+ qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig";
+ qcom,cam-vreg-min-voltage = <0 3312000 1352000>;
+ qcom,cam-vreg-max-voltage = <0 3600000 1352000>;
+ qcom,cam-vreg-op-mode = <0 80000 105000>;
+ qcom,gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk0_active
+ &cam_sensor_rear_active
+ &cam_actuator_vaf_active>;
+ pinctrl-1 = <&cam_sensor_mclk0_suspend
+ &cam_sensor_rear_suspend
+ &cam_actuator_vaf_suspend>;
+ gpios = <&tlmm 13 0>,
+ <&tlmm 30 0>,
+ <&pm8998_gpios 20 0>,
+ <&tlmm 29 0>,
+ <&tlmm 27 0>;
+ qcom,gpio-reset = <1>;
+ qcom,gpio-vdig = <2>;
+ qcom,gpio-vana = <3>;
+ qcom,gpio-vaf = <4>;
+ qcom,gpio-req-tbl-num = <0 1 2 3 4>;
+ qcom,gpio-req-tbl-flags = <1 0 0 0 0>;
+ qcom,gpio-req-tbl-label = "CAMIF_MCLK0",
+ "CAM_RESET0",
+ "CAM_VDIG",
+ "CAM_VANA",
+ "CAM_VAF";
+ qcom,sensor-position = <0>;
+ qcom,sensor-mode = <0>;
+ qcom,cci-master = <0>;
+ status = "ok";
+ clocks = <&clock_mmss clk_mclk0_clk_src>,
+ <&clock_mmss clk_mmss_camss_mclk0_clk>;
+ clock-names = "cam_src_clk", "cam_clk";
+ qcom,clock-rates = <24000000 0>;
+ };
+
+ eeprom1: qcom,eeprom@1 {
+ cell-index = <1>;
+ reg = <0x1>;
+ compatible = "qcom,eeprom";
+ cam_vdig-supply = <&pm8998_lvs1>;
+ cam_vio-supply = <&pm8998_lvs1>;
+ cam_vana-supply = <&pmi8998_bob>;
+ qcom,cam-vreg-name = "cam_vdig", "cam_vio", "cam_vana";
+ qcom,cam-vreg-min-voltage = <0 0 3312000>;
+ qcom,cam-vreg-max-voltage = <0 0 3600000>;
+ qcom,cam-vreg-op-mode = <0 0 80000>;
+ qcom,gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk2_active
+ &cam_sensor_rear2_active>;
+ pinctrl-1 = <&cam_sensor_mclk2_suspend
+ &cam_sensor_rear2_suspend>;
+ gpios = <&tlmm 15 0>,
+ <&tlmm 9 0>,
+ <&tlmm 8 0>;
+ qcom,gpio-reset = <1>;
+ qcom,gpio-vana = <2>;
+ qcom,gpio-req-tbl-num = <0 1 2>;
+ qcom,gpio-req-tbl-flags = <1 0 0>;
+ qcom,gpio-req-tbl-label = "CAMIF_MCLK1",
+ "CAM_RESET1",
+ "CAM_VANA1";
+ qcom,sensor-position = <0>;
+ qcom,sensor-mode = <0>;
+ qcom,cci-master = <1>;
+ status = "ok";
+ clocks = <&clock_mmss clk_mclk2_clk_src>,
+ <&clock_mmss clk_mmss_camss_mclk2_clk>;
+ clock-names = "cam_src_clk", "cam_clk";
+ qcom,clock-rates = <24000000 0>;
+ };
+
+ eeprom2: qcom,eeprom@2 {
+ cell-index = <2>;
+ reg = <0x2>;
+ compatible = "qcom,eeprom";
+ cam_vio-supply = <&pm8998_lvs1>;
+ /*cam_vana-supply = <&pm8998_l22>;*/
+ cam_vdig-supply = <&pm8998_s3>;
+ qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig";
+ qcom,cam-vreg-min-voltage =
+ <0 2864000 1352000>;
+ qcom,cam-vreg-max-voltage =
+ <0 2864000 1352000>;
+ qcom,cam-vreg-op-mode = <0 80000 105000>;
+ qcom,gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk1_active
+ &cam_sensor_front_active>;
+ pinctrl-1 = <&cam_sensor_mclk1_suspend
+ &cam_sensor_front_suspend>;
+ gpios = <&tlmm 14 0>,
+ <&tlmm 28 0>,
+ <&pm8998_gpios 9 0>;
+ qcom,gpio-reset = <1>;
+ qcom,gpio-vdig = <2>;
+ qcom,gpio-req-tbl-num = <0 1 2>;
+ qcom,gpio-req-tbl-flags = <1 0 0>;
+ qcom,gpio-req-tbl-label = "CAMIF_MCLK2",
+ "CAM_RESET2",
+ "CAM_VDIG";
+ qcom,sensor-position = <1>;
+ qcom,sensor-mode = <0>;
+ qcom,cci-master = <1>;
+ status = "ok";
+ clocks = <&clock_mmss clk_mclk1_clk_src>,
+ <&clock_mmss clk_mmss_camss_mclk1_clk>;
+ clock-names = "cam_src_clk", "cam_clk";
+ qcom,clock-rates = <24000000 0>;
+ };
+
+ qcom,camera@0 {
+ cell-index = <0>;
+ compatible = "qcom,camera";
+ reg = <0x0>;
+ qcom,csiphy-sd-index = <0>;
+ qcom,csid-sd-index = <0>;
+ qcom,mount-angle = <270>;
+ cam_vio-supply = <&pm8998_l8>;
+ cam_vana-supply = <&pmi8998_bob>;
+ cam_vdig-supply = <&pm8998_l9>;
+ cam_v_custom1-supply = <&pm8998_lvs1>;
+ qcom,cam-vreg-name = "cam_vdig", "cam_vana",
+ "cam_vio", "cam_v_custom1";
+ qcom,cam-vreg-min-voltage = <1808000 3312000 1200000 0>;
+ qcom,cam-vreg-max-voltage = <2960000 3600000 1200000 0>;
+ qcom,cam-vreg-op-mode = <0 80000 105000 0>;
+ qcom,gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk0_active
+ &cam_sensor_6dofl_active>;
+ pinctrl-1 = <&cam_sensor_mclk0_suspend
+ &cam_sensor_6dofl_suspend>;
+ gpios = <&tlmm 13 0>,
+ <&tlmm 148 0>,
+ <&tlmm 93 0>,
+ <&tlmm 52 0>;
+ qcom,gpio-reset = <1>;
+ qcom,gpio-vana = <2>;
+ qcom,gpio-vdig = <3>;
+ qcom,gpio-req-tbl-num = <0 1 2 3>;
+ qcom,gpio-req-tbl-flags = <1 0 0 0>;
+ qcom,gpio-req-tbl-label = "CAMIF_MCLK0",
+ "CAM_RESET0",
+ "CAM_VANA",
+ "CAM_VDIG";
+ qcom,sensor-position = <0>;
+ qcom,sensor-mode = <1>;
+ qcom,cci-master = <0>;
+ status = "ok";
+ clocks = <&clock_mmss clk_mclk0_clk_src>,
+ <&clock_mmss clk_mmss_camss_mclk0_clk>;
+ clock-names = "cam_src_clk", "cam_clk";
+ qcom,clock-rates = <24000000 0>;
+ };
+
+ qcom,camera@1 {
+ cell-index = <1>;
+ compatible = "qcom,camera";
+ reg = <0x1>;
+ qcom,csiphy-sd-index = <1>;
+ qcom,csid-sd-index = <1>;
+ qcom,mount-angle = <90>;
+ qcom,eeprom-src = <&eeprom1>;
+ qcom,actuator-src = <&actuator1>;
+ cam_vdig-supply = <&pm8998_lvs1>;
+ cam_vio-supply = <&pm8998_lvs1>;
+ cam_vana-supply = <&pmi8998_bob>;
+ qcom,cam-vreg-name = "cam_vdig", "cam_vio", "cam_vana";
+ qcom,cam-vreg-min-voltage = <0 0 3312000>;
+ qcom,cam-vreg-max-voltage = <0 0 3600000>;
+ qcom,cam-vreg-op-mode = <0 0 80000>;
+ qcom,gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk2_active
+ &cam_sensor_rear2_active>;
+ pinctrl-1 = <&cam_sensor_mclk2_suspend
+ &cam_sensor_rear2_suspend>;
+ gpios = <&tlmm 15 0>,
+ <&tlmm 9 0>,
+ <&tlmm 8 0>;
+ qcom,gpio-reset = <1>;
+ qcom,gpio-vana = <2>;
+ qcom,gpio-req-tbl-num = <0 1 2>;
+ qcom,gpio-req-tbl-flags = <1 0 0>;
+ qcom,gpio-req-tbl-label = "CAMIF_MCLK1",
+ "CAM_RESET1",
+ "CAM_VANA1";
+ qcom,sensor-position = <0>;
+ qcom,sensor-mode = <0>;
+ qcom,cci-master = <1>;
+ status = "ok";
+ clocks = <&clock_mmss clk_mclk2_clk_src>,
+ <&clock_mmss clk_mmss_camss_mclk2_clk>;
+ clock-names = "cam_src_clk", "cam_clk";
+ qcom,clock-rates = <24000000 0>;
+ };
+ qcom,camera@2 {
+ cell-index = <2>;
+ compatible = "qcom,camera";
+ reg = <0x02>;
+ qcom,csiphy-sd-index = <2>;
+ qcom,csid-sd-index = <2>;
+ qcom,mount-angle = <90>;
+ qcom,eeprom-src = <&eeprom2>;
+ qcom,led-flash-src = <&led_flash1>;
+ qcom,actuator-src = <&actuator1>;
+ cam_vio-supply = <&pm8998_lvs1>;
+ cam_vana-supply = <&pmi8998_bob>;
+ cam_vdig-supply = <&pm8998_s3>;
+ qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig";
+ qcom,cam-vreg-min-voltage =
+ <0 3312000 1352000>;
+ qcom,cam-vreg-max-voltage =
+ <0 3600000 1352000>;
+ qcom,cam-vreg-op-mode = <0 80000 105000>;
+ qcom,gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk1_active
+ &cam_sensor_front_active>;
+ pinctrl-1 = <&cam_sensor_mclk1_suspend
+ &cam_sensor_front_suspend>;
+ gpios = <&tlmm 14 0>,
+ <&tlmm 28 0>,
+ <&pm8998_gpios 9 0>;
+ qcom,gpio-reset = <1>;
+ qcom,gpio-vdig = <2>;
+ qcom,gpio-req-tbl-num = <0 1 2>;
+ qcom,gpio-req-tbl-flags = <1 0 0>;
+ qcom,gpio-req-tbl-label = "CAMIF_MCLK2",
+ "CAM_RESET2",
+ "CAM_VDIG";
+ qcom,sensor-position = <1>;
+ qcom,sensor-mode = <0>;
+ qcom,cci-master = <1>;
+ status = "disabled";
+ clocks = <&clock_mmss clk_mclk1_clk_src>,
+ <&clock_mmss clk_mmss_camss_mclk1_clk>;
+ clock-names = "cam_src_clk", "cam_clk";
+ qcom,clock-rates = <24000000 0>;
+ };
+ qcom,camera@3 {
+ cell-index = <3>;
+ compatible = "qcom,camera";
+ reg = <0x3>;
+ qcom,csiphy-sd-index = <0>;
+ qcom,csid-sd-index = <0>;
+ qcom,mount-angle = <270>;
+ cam_vio-supply = <&pm8998_l8>;
+ cam_vana-supply = <&pmi8998_bob>;
+ cam_vdig-supply = <&pm8998_l9>;
+ cam_v_custom1-supply = <&pm8998_lvs1>;
+ qcom,cam-vreg-name = "cam_vdig", "cam_vana",
+ "cam_vio", "cam_v_custom1";
+ qcom,cam-vreg-min-voltage = <1808000 3312000 1200000 0>;
+ qcom,cam-vreg-max-voltage = <2960000 3600000 1200000 0>;
+ qcom,cam-vreg-op-mode = <0 80000 105000 0>;
+ qcom,gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk0_active
+ &cam_sensor_6dofr_active>;
+ pinctrl-1 = <&cam_sensor_mclk0_suspend
+ &cam_sensor_6dofr_suspend>;
+ gpios = <&tlmm 13 0>,
+ <&tlmm 149 0>,
+ <&tlmm 93 0>,
+ <&tlmm 52 0>;
+ qcom,gpio-reset = <1>;
+ qcom,gpio-vana = <2>;
+ qcom,gpio-vdig = <3>;
+ qcom,gpio-req-tbl-num = <0 1 2 3>;
+ qcom,gpio-req-tbl-flags = <1 0 0 0>;
+ qcom,gpio-req-tbl-label = "CAMIF_MCLK0",
+ "CAM_RESET1",
+ "CAM_VANA1",
+ "CAM_VDIG1";
+ qcom,sensor-position = <1>;
+ qcom,sensor-mode = <1>;
+ qcom,cci-master = <0>;
+ status = "ok";
+ clocks = <&clock_mmss clk_mclk0_clk_src>,
+ <&clock_mmss clk_mmss_camss_mclk0_clk>;
+ clock-names = "cam_src_clk", "cam_clk";
+ qcom,clock-rates = <24000000 0>;
+ };
+
+};
+
+&pm8998_gpios {
+ gpio@c800 { /* GPIO 9 - CAMERA SENSOR 2 VDIG */
+ qcom,mode = <1>; /* Output */
+ qcom,pull = <5>; /* No Pull */
+ qcom,vin-sel = <0>; /* VIN1 GPIO_LV */
+ qcom,src-sel = <0>; /* GPIO */
+ qcom,invert = <0>; /* Invert */
+ qcom,master-en = <1>; /* Enable GPIO */
+ status = "ok";
+ };
+
+ gpio@d300 { /* GPIO 20 - CAMERA SENSOR 0 VDIG */
+ qcom,mode = <1>; /* Output */
+ qcom,pull = <5>; /* No Pull */
+ qcom,vin-sel = <1>; /* VIN1 GPIO_MV */
+ qcom,src-sel = <0>; /* GPIO */
+ qcom,invert = <0>; /* Invert */
+ qcom,master-en = <1>; /* Enable GPIO */
+ status = "ok";
+ };
+};
diff --git a/arch/arm/boot/dts/qcom/msm8998-mdss-panels.dtsi b/arch/arm/boot/dts/qcom/msm8998-mdss-panels.dtsi
index 93b6a7664ed8..897ab12fe0a7 100644
--- a/arch/arm/boot/dts/qcom/msm8998-mdss-panels.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8998-mdss-panels.dtsi
@@ -87,7 +87,6 @@
qcom,mdss-dsi-t-clk-post = <0x07>;
qcom,mdss-dsi-t-clk-pre = <0x25>;
qcom,mdss-dsi-tx-eot-append;
- qcom,cmd-sync-wait-broadcast;
qcom,esd-check-enabled;
qcom,mdss-dsi-min-refresh-rate = <55>;
qcom,mdss-dsi-max-refresh-rate = <60>;
@@ -107,7 +106,6 @@
qcom,mdss-dsi-t-clk-post = <0x0d>;
qcom,mdss-dsi-t-clk-pre = <0x2d>;
qcom,mdss-dsi-tx-eot-append;
- qcom,cmd-sync-wait-broadcast;
qcom,esd-check-enabled;
qcom,mdss-dsi-panel-status-check-mode = "reg_read";
qcom,mdss-dsi-panel-status-command = [06 01 00 01 00 00 01 0a];
diff --git a/arch/arm/boot/dts/qcom/msm8998-svr20-pinctrl.dtsi b/arch/arm/boot/dts/qcom/msm8998-svr20-pinctrl.dtsi
new file mode 100644
index 000000000000..1347fcef4251
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/msm8998-svr20-pinctrl.dtsi
@@ -0,0 +1,3386 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+ tlmm: pinctrl@03400000 {
+ compatible = "qcom,msm8998-pinctrl";
+ reg = <0x3400000 0xc00000>;
+ interrupts = <0 208 0>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ uart_console_active: uart_console_active {
+ mux {
+ pins = "gpio4", "gpio5";
+ function = "blsp_uart8_a";
+ };
+
+ config {
+ pins = "gpio4", "gpio5";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+
+ wcd9xxx_intr {
+ wcd_intr_default: wcd_intr_default{
+ mux {
+ pins = "gpio54";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio54";
+ drive-strength = <2>; /* 2 mA */
+ bias-pull-down; /* pull down */
+ input-enable;
+ };
+ };
+ };
+
+ /* I2C CONFIGURATION */
+ i2c_1 {
+ i2c_1_active: i2c_1_active {
+ mux {
+ pins = "gpio2", "gpio3";
+ function = "blsp_i2c1";
+ };
+
+ config {
+ pins = "gpio2", "gpio3";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+
+ i2c_1_sleep: i2c_1_sleep {
+ mux {
+ pins = "gpio2", "gpio3";
+ function = "blsp_i2c1";
+ };
+
+ config {
+ pins = "gpio2", "gpio3";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+ };
+ };
+
+ i2c_2 {
+ i2c_2_active: i2c_2_active {
+ mux {
+ pins = "gpio32", "gpio33";
+ function = "blsp_i2c2";
+ };
+
+ config {
+ pins = "gpio32", "gpio33";
+ drive-strength = <8>;
+ bias-disable;
+ };
+ };
+
+ i2c_2_sleep: i2c_2_sleep {
+ mux {
+ pins = "gpio32", "gpio33";
+ function = "blsp_i2c2";
+ };
+
+ config {
+ pins = "gpio32", "gpio33";
+ drive-strength = <8>;
+ bias-pull-up;
+ };
+ };
+ };
+
+ i2c_3 {
+ i2c_3_active: i2c_3_active {
+ mux {
+ pins = "gpio47", "gpio48";
+ function = "blsp_i2c3";
+ };
+
+ config {
+ pins = "gpio47", "gpio48";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+
+ i2c_3_sleep: i2c_3_sleep {
+ mux {
+ pins = "gpio47", "gpio48";
+ function = "blsp_i2c3";
+ };
+
+ config {
+ pins = "gpio47", "gpio48";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+ };
+ };
+
+ i2c_4 {
+ i2c_4_active: i2c_4_active {
+ mux {
+ pins = "gpio10", "gpio11";
+ function = "blsp_i2c4";
+ };
+
+ config {
+ pins = "gpio10", "gpio11";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+
+ i2c_4_sleep: i2c_4_sleep {
+ mux {
+ pins = "gpio10", "gpio11";
+ function = "blsp_i2c4";
+ };
+
+ config {
+ pins = "gpio10", "gpio11";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+ };
+ };
+
+ i2c_5 {
+ i2c_5_active: i2c_5_active {
+ mux {
+ pins = "gpio87", "gpio88";
+ function = "blsp_i2c5";
+ };
+
+ config {
+ pins = "gpio87", "gpio88";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+
+ i2c_5_sleep: i2c_5_sleep {
+ mux {
+ pins = "gpio87", "gpio88";
+ function = "blsp_i2c5";
+ };
+
+ config {
+ pins = "gpio87", "gpio88";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+ };
+
+ i2c_6 {
+ i2c_6_active: i2c_6_active {
+ mux {
+ pins = "gpio43", "gpio44";
+ function = "blsp_i2c6";
+ };
+
+ config {
+ pins = "gpio43", "gpio44";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+
+ i2c_6_sleep: i2c_6_sleep {
+ mux {
+ pins = "gpio43", "gpio44";
+ function = "blsp_i2c6";
+ };
+
+ config {
+ pins = "gpio43", "gpio44";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+ };
+ };
+
+ nfc {
+ nfc_int_active: nfc_int_active {
+ /* active state */
+ mux {
+ /* GPIO 92 NFC Read Interrupt */
+ pins = "gpio92";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio92";
+ drive-strength = <6>; /* 6 MA */
+ bias-pull-up;
+ };
+ };
+
+ nfc_int_suspend: nfc_int_suspend {
+ /* sleep state */
+ mux {
+ /* GPIO 92 NFC Read Interrupt */
+ pins = "gpio92";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio92";
+ drive-strength = <6>; /* 6 MA */
+ bias-pull-up;
+ };
+ };
+
+ nfc_enable_active: nfc_enable_active {
+ /* active state */
+ mux {
+ /* 12: NFC ENABLE 116:ESE Enable */
+ pins = "gpio12", "gpio116";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio12", "gpio116";
+ drive-strength = <6>; /* 6 MA */
+ bias-pull-up;
+ };
+ };
+
+ nfc_enable_suspend: nfc_enable_suspend {
+ /* sleep state */
+ mux {
+ /* 12: NFC ENABLE 116:ESE Enable */
+ pins = "gpio12", "gpio116";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio12", "gpio116";
+ drive-strength = <6>; /* 6 MA */
+ bias-disable;
+ };
+ };
+ };
+
+ i2c_7 {
+ i2c_7_active: i2c_7_active {
+ mux {
+ pins = "gpio55", "gpio56";
+ function = "blsp_i2c7";
+ };
+
+ config {
+ pins = "gpio55", "gpio56";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+
+ i2c_7_sleep: i2c_7_sleep {
+ mux {
+ pins = "gpio55", "gpio56";
+ function = "blsp_i2c7";
+ };
+
+ config {
+ pins = "gpio55", "gpio56";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+ };
+ };
+
+ i2c_8 {
+ i2c_8_active: i2c_8_active {
+ mux {
+ pins = "gpio6", "gpio7";
+ function = "blsp_i2c8";
+ };
+
+ config {
+ pins = "gpio6", "gpio7";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+
+ i2c_8_sleep: i2c_8_sleep {
+ mux {
+ pins = "gpio6", "gpio7";
+ function = "blsp_i2c8";
+ };
+
+ config {
+ pins = "gpio6", "gpio7";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+ };
+ };
+
+ i2c_9 {
+ i2c_9_active: i2c_9_active {
+ mux {
+ pins = "gpio51", "gpio52";
+ function = "blsp_i2c9";
+ };
+
+ config {
+ pins = "gpio51", "gpio52";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+
+ i2c_9_sleep: i2c_9_sleep {
+ mux {
+ pins = "gpio51", "gpio52";
+ function = "blsp_i2c9";
+ };
+
+ config {
+ pins = "gpio51", "gpio52";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+ };
+ };
+
+ i2c_10 {
+ i2c_10_active: i2c_10_active {
+ mux {
+ pins = "gpio67", "gpio68";
+ function = "blsp_i2c10";
+ };
+
+ config {
+ pins = "gpio67", "gpio68";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+
+ i2c_10_sleep: i2c_10_sleep {
+ mux {
+ pins = "gpio67", "gpio68";
+ function = "blsp_i2c10";
+ };
+
+ config {
+ pins = "gpio67", "gpio68";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+ };
+ };
+
+ i2c_11 {
+ i2c_11_active: i2c_11_active {
+ mux {
+ pins = "gpio60", "gpio61";
+ function = "blsp_i2c11";
+ };
+
+ config {
+ pins = "gpio60", "gpio61";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+
+ i2c_11_sleep: i2c_11_sleep {
+ mux {
+ pins = "gpio60", "gpio61";
+ function = "blsp_i2c11";
+ };
+
+ config {
+ pins = "gpio60", "gpio61";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+ };
+ };
+
+ i2c_12 {
+ i2c_12_active: i2c_12_active {
+ mux {
+ pins = "gpio83", "gpio84";
+ function = "blsp_i2c12";
+ };
+
+ config {
+ pins = "gpio83", "gpio84";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+
+ i2c_12_sleep: i2c_12_sleep {
+ mux {
+ pins = "gpio83", "gpio84";
+ function = "blsp_i2c12";
+ };
+
+ config {
+ pins = "gpio83", "gpio84";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+ };
+ };
+
+ /* SPI CONFIGURATION */
+
+ spi_1 {
+ spi_1_active: spi_1_active {
+ mux {
+ pins = "gpio0", "gpio1",
+ "gpio2", "gpio3";
+ function = "blsp_spi1";
+ };
+
+ config {
+ pins = "gpio0", "gpio1",
+ "gpio2", "gpio3";
+ drive-strength = <6>;
+ bias-disable;
+ };
+ };
+
+ spi_1_sleep: spi_1_sleep {
+ mux {
+ pins = "gpio0", "gpio1",
+ "gpio2", "gpio3";
+ function = "blsp_spi1";
+ };
+
+ config {
+ pins = "gpio0", "gpio1",
+ "gpio2", "gpio3";
+ drive-strength = <6>;
+ bias-disable;
+ };
+ };
+ };
+
+ spi_2 {
+ spi_2_active: spi_2_active {
+ mux {
+ pins = "gpio31", "gpio34",
+ "gpio32", "gpio33";
+ function = "blsp_spi2";
+ };
+
+ config {
+ pins = "gpio31", "gpio34",
+ "gpio32", "gpio33";
+ drive-strength = <6>;
+ bias-disable;
+ };
+ };
+
+ spi_2_sleep: spi_2_sleep {
+ mux {
+ pins = "gpio31", "gpio34",
+ "gpio32", "gpio33";
+ function = "blsp_spi2";
+ };
+
+ config {
+ pins = "gpio31", "gpio34",
+ "gpio32", "gpio33";
+ drive-strength = <6>;
+ bias-disable;
+ };
+ };
+ };
+
+ spi_3 {
+ spi_3_active: spi_3_active {
+ mux {
+ pins = "gpio45", "gpio46",
+ "gpio47", "gpio48";
+ function = "blsp_spi3";
+ };
+
+ config {
+ pins = "gpio45", "gpio46",
+ "gpio47", "gpio48";
+ drive-strength = <6>;
+ bias-disable;
+ };
+ };
+
+ spi_3_sleep: spi_3_sleep {
+ mux {
+ pins = "gpio45", "gpio46",
+ "gpio47", "gpio48";
+ function = "blsp_spi3";
+ };
+
+ config {
+ pins = "gpio45", "gpio46",
+ "gpio47", "gpio48";
+ drive-strength = <6>;
+ bias-disable;
+ };
+ };
+ };
+
+ pcie0 {
+ pcie0_clkreq_default: pcie0_clkreq_default {
+ mux {
+ pins = "gpio36";
+ function = "pci_e0";
+ };
+
+ config {
+ pins = "gpio36";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+ };
+
+ pcie0_perst_default: pcie0_perst_default {
+ mux {
+ pins = "gpio35";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio35";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+ };
+
+ pcie0_wake_default: pcie0_wake_default {
+ mux {
+ pins = "gpio37";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio37";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+ };
+ };
+
+ hph_en0_ctrl {
+ hph_en0_idle: hph_en0_idle {
+ mux {
+ pins = "gpio67";
+ function = "gpio";
+ };
+ config {
+ pins = "gpio67";
+ drive-strength = <2>;
+ bias-pull-down;
+ output-low;
+ };
+ };
+ hph_en0_active: hph_en0_active {
+ mux {
+ pins = "gpio67";
+ function = "gpio";
+ };
+ config {
+ pins = "gpio67";
+ drive-strength = <2>;
+ bias-disable;
+ output-high;
+ };
+ };
+ };
+
+ hph_en1_ctrl {
+ hph_en1_idle: hph_en1_idle {
+ mux {
+ pins = "gpio68";
+ function = "gpio";
+ };
+ config {
+ pins = "gpio68";
+ drive-strength = <2>;
+ bias-pull-down;
+ output-low;
+ };
+ };
+ hph_en1_active: hph_en1_active {
+ mux {
+ pins = "gpio68";
+ function = "gpio";
+ };
+ config {
+ pins = "gpio68";
+ drive-strength = <2>;
+ bias-disable;
+ output-high;
+ };
+ };
+ };
+
+ wcd_gnd_mic_swap {
+ wcd_gnd_mic_swap_idle: wcd_gnd_mic_swap_idle {
+ mux {
+ pins = "gpio75";
+ function = "gpio";
+ };
+ config {
+ pins = "gpio75";
+ drive-strength = <2>;
+ bias-pull-down;
+ output-low;
+ };
+ };
+ wcd_gnd_mic_swap_active: wcd_gnd_mic_swap_active {
+ mux {
+ pins = "gpio75";
+ function = "gpio";
+ };
+ config {
+ pins = "gpio75";
+ drive-strength = <2>;
+ bias-disable;
+ output-high;
+ };
+ };
+ };
+
+ /* USB C analog configuration */
+ wcd_usbc_analog_en1 {
+ wcd_usbc_analog_en1_idle: wcd_usbc_ana_en1_idle {
+ mux {
+ pins = "gpio59";
+ function = "gpio";
+ };
+ config {
+ pins = "gpio59";
+ drive-strength = <2>;
+ bias-pull-down;
+ output-low;
+ };
+ };
+
+ wcd_usbc_analog_en1_active: wcd_usbc_ana_en1_active {
+ mux {
+ pins = "gpio59";
+ function = "gpio";
+ };
+ config {
+ pins = "gpio59";
+ drive-strength = <2>;
+ bias-disable;
+ output-high;
+ };
+ };
+ };
+
+ wcd_usbc_analog_en2n {
+ wcd_usbc_analog_en2n_idle: wcd_usbc_ana_en2n_idle {
+ mux {
+ pins = "gpio60";
+ function = "gpio";
+ };
+ config {
+ pins = "gpio60";
+ drive-strength = <2>;
+ bias-disable;
+ output-high;
+ };
+ };
+
+ wcd_usbc_analog_en2n_active: wcd_usbc_ana_en2n_active {
+ mux {
+ pins = "gpio60";
+ function = "gpio";
+ };
+ config {
+ pins = "gpio60";
+ drive-strength = <2>;
+ bias-pull-down;
+ output-low;
+ };
+ };
+ };
+
+ cdc_reset_ctrl {
+ cdc_reset_sleep: cdc_reset_sleep {
+ mux {
+ pins = "gpio116";
+ function = "gpio";
+ };
+ config {
+ pins = "gpio116";
+ drive-strength = <16>;
+ bias-disable;
+ output-low;
+ };
+ };
+ cdc_reset_active:cdc_reset_active {
+ mux {
+ pins = "gpio116";
+ function = "gpio";
+ };
+ config {
+ pins = "gpio116";
+ drive-strength = <16>;
+ bias-pull-down;
+ output-high;
+ };
+ };
+ };
+
+ spi_4 {
+ spi_4_active: spi_4_active {
+ mux {
+ pins = "gpio8", "gpio9",
+ "gpio10", "gpio11";
+ function = "blsp_spi4";
+ };
+
+ config {
+ pins = "gpio8", "gpio9",
+ "gpio10", "gpio11";
+ drive-strength = <6>;
+ bias-disable;
+ };
+ };
+
+ spi_4_sleep: spi_4_sleep {
+ mux {
+ pins = "gpio8", "gpio9",
+ "gpio10", "gpio11";
+ function = "blsp_spi4";
+ };
+
+ config {
+ pins = "gpio8", "gpio9",
+ "gpio10", "gpio11";
+ drive-strength = <6>;
+ bias-disable;
+ };
+ };
+ };
+
+ spkr_1_sd_n {
+ spkr_1_sd_n_sleep: spkr_1_sd_n_sleep {
+ mux {
+ pins = "gpio111";
+ function = "gpio";
+ };
+ config {
+ pins = "gpio111";
+ drive-strength = <2>; /* 2 mA */
+ bias-pull-down;
+ input-enable;
+ };
+ };
+ spkr_1_sd_n_active: spkr_1_sd_n_active {
+ mux {
+ pins = "gpio111";
+ function = "gpio";
+ };
+ config {
+ pins = "gpio111";
+ drive-strength = <16>; /* 16 mA */
+ bias-disable;
+ output-high;
+ };
+ };
+ };
+
+ spi_5 {
+ spi_5_active: spi_5_active {
+ mux {
+ pins = "gpio85", "gpio86",
+ "gpio87", "gpio88";
+ function = "blsp_spi5";
+ };
+
+ config {
+ pins = "gpio85", "gpio86",
+ "gpio87", "gpio88";
+ drive-strength = <6>;
+ bias-disable;
+ };
+ };
+
+ spi_5_sleep: spi_5_sleep {
+ mux {
+ pins = "gpio85", "gpio86",
+ "gpio87", "gpio88";
+ function = "blsp_spi5";
+ };
+
+ config {
+ pins = "gpio85", "gpio86",
+ "gpio87", "gpio88";
+ drive-strength = <6>;
+ bias-disable;
+ };
+ };
+ };
+
+ spkr_2_sd_n {
+ spkr_2_sd_n_sleep: spkr_2_sd_n_sleep {
+ mux {
+ pins = "gpio112";
+ function = "gpio";
+ };
+ config {
+ pins = "gpio112";
+ drive-strength = <2>; /* 2 mA */
+ bias-pull-down;
+ input-enable;
+ };
+ };
+ spkr_2_sd_n_active: spkr_2_sd_n_active {
+ mux {
+ pins = "gpio112";
+ function = "gpio";
+ };
+ config {
+ pins = "gpio112";
+ drive-strength = <16>; /* 16 mA */
+ bias-disable;
+ output-high;
+ };
+ };
+ };
+
+ cci0_active: cci0_active {
+ mux {
+ /* CLK, DATA */
+ pins = "gpio17","gpio18"; // Only 2
+ function = "cci_i2c";
+ };
+
+ config {
+ pins = "gpio17","gpio18";
+ bias-pull-up; /* PULL UP*/
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
+ cci0_suspend: cci0_suspend {
+ mux {
+ /* CLK, DATA */
+ pins = "gpio17","gpio18";
+ function = "cci_i2c";
+ };
+
+ config {
+ pins = "gpio17","gpio18";
+ bias-pull-down; /* PULL DOWN */
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
+ cci1_active: cci1_active {
+ mux {
+ /* CLK, DATA */
+ pins = "gpio19","gpio20";
+ function = "cci_i2c";
+ };
+
+ config {
+ pins = "gpio19","gpio20";
+ bias-pull-up; /* PULL UP*/
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
+ cci1_suspend: cci1_suspend {
+ mux {
+ /* CLK, DATA */
+ pins = "gpio19","gpio20";
+ function = "cci_i2c";
+ };
+
+ config {
+ pins = "gpio19","gpio20";
+ bias-pull-down; /* PULL DOWN */
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
+ cam_actuator_vaf_active: cam_actuator_vaf_active {
+ /* ACTUATOR POWER */
+ mux {
+ pins = "gpio27";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio27";
+ bias-disable; /* No PULL */
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
+ cam_actuator_vaf_suspend: cam_actuator_vaf_suspend {
+ /* ACTUATOR POWER */
+ mux {
+ pins = "gpio27";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio27";
+ bias-pull-down; /* PULL DOWN */
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
+ cam_sensor_mclk0_active: cam_sensor_mclk0_active {
+ /* MCLK0 */
+ mux {
+ /* CLK, DATA */
+ pins = "gpio13";
+ function = "cam_mclk";
+ };
+
+ config {
+ pins = "gpio13";
+ bias-disable; /* No PULL */
+ drive-strength = <8>; /* 2 MA */
+ };
+ };
+
+ cam_sensor_mclk0_suspend: cam_sensor_mclk0_suspend {
+ /* MCLK0 */
+ mux {
+ /* CLK, DATA */
+ pins = "gpio13";
+ function = "cam_mclk";
+ };
+
+ config {
+ pins = "gpio13";
+ bias-pull-down; /* PULL DOWN */
+ drive-strength = <8>; /* 2 MA */
+ };
+ };
+
+ cam_sensor_6dofl_active: cam_sensor_6dofl_active {
+ /* RESET, STANDBY */
+ mux {
+ pins = "gpio148","gpio29";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio148","gpio29";
+ bias-disable; /* No PULL */
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
+ cam_sensor_6dofr_active: cam_sensor_6dofr_active {
+ /* RESET, STANDBY */
+ mux {
+ pins = "gpio149","gpio29";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio149","gpio29";
+ bias-disable; /* No PULL */
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
+ cam_sensor_6dofl_suspend: cam_sensor_6dofl_suspend {
+ /* RESET, STANDBY */
+ mux {
+ pins = "gpio148","gpio29";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio148","gpio29";
+ bias-disable; /* No PULL */
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
+ cam_sensor_6dofr_suspend: cam_sensor_6dofr_suspend {
+ /* RESET, STANDBY */
+ mux {
+ pins = "gpio149","gpio29";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio149","gpio29";
+ bias-disable; /* No PULL */
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
+ cam_sensor_rear_active: cam_sensor_rear_active {
+ /* RESET, STANDBY */
+ mux {
+ pins = "gpio30","gpio29";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio30","gpio29";
+ bias-disable; /* No PULL */
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+ max_volt_active: max_volt_active {
+ /* RESET */
+ mux {
+ pins = "gpio128", "gpio129",
+ "gpio130", "gpio133";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio128", "gpio129",
+ "gpio130", "gpio133";
+ bias-disable; /* No PULL */
+ drive-strength = <8>; /* 2 MA */
+ };
+ };
+ max_volt_suspend: max_volt_suspend {
+ /* RESET */
+ mux {
+ pins = "gpio128", "gpio129",
+ "gpio130", "gpio133";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio128", "gpio129",
+ "gpio130", "gpio133";
+ bias-disable; /* No PULL */
+ drive-strength = <8>; /* 2 MA */
+ };
+ };
+ max_rst_active: max_rst_active {
+ /* RESET */
+ mux {
+ pins = "gpio30";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio30";
+ bias-disable; /* No PULL */
+ drive-strength = <8>; /* 2 MA */
+ };
+ };
+ max_rst_suspend: max_rst_suspend {
+ /* RESET */
+ mux {
+ pins = "gpio30";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio30";
+ bias-disable; /* No PULL */
+ drive-strength = <8>; /* 2 MA */
+ };
+ };
+
+ spi_6 {
+ spi_6_active: spi_6_active {
+ mux {
+ pins = "gpio41", "gpio42",
+ "gpio43", "gpio44";
+ function = "blsp_spi6";
+ };
+
+ config {
+ pins = "gpio41", "gpio42",
+ "gpio43", "gpio44";
+ drive-strength = <6>;
+ bias-disable;
+ };
+ };
+
+ spi_6_sleep: spi_6_sleep {
+ mux {
+ pins = "gpio41", "gpio42",
+ "gpio43", "gpio44";
+ function = "blsp_spi6";
+ };
+
+ config {
+ pins = "gpio41", "gpio42",
+ "gpio43", "gpio44";
+ drive-strength = <6>;
+ bias-disable;
+ };
+ };
+ };
+
+ spi_7 {
+ spi_7_active: spi_7_active {
+ mux {
+ pins = "gpio53", "gpio54",
+ "gpio55", "gpio56";
+ function = "blsp_spi7";
+ };
+
+ config {
+ pins = "gpio53", "gpio54",
+ "gpio55", "gpio56";
+ drive-strength = <6>;
+ bias-disable;
+ };
+ };
+
+ spi_7_sleep: spi_7_sleep {
+ mux {
+ pins = "gpio53", "gpio54",
+ "gpio55", "gpio56";
+ function = "blsp_spi7";
+ };
+
+ config {
+ pins = "gpio53", "gpio54",
+ "gpio55", "gpio56";
+ drive-strength = <6>;
+ bias-disable;
+ };
+ };
+ };
+
+ spi_8 {
+ spi_8_active: spi_8_active {
+ mux {
+ pins = "gpio4", "gpio5",
+ "gpio6", "gpio7";
+ function = "blsp_spi8";
+ };
+
+ config {
+ pins = "gpio4", "gpio5",
+ "gpio6", "gpio7";
+ drive-strength = <6>;
+ bias-disable;
+ };
+ };
+
+ spi_8_sleep: spi_8_sleep {
+ mux {
+ pins = "gpio4", "gpio5",
+ "gpio6", "gpio7";
+ function = "blsp_spi8";
+ };
+
+ config {
+ pins = "gpio4", "gpio5",
+ "gpio6", "gpio7";
+ drive-strength = <6>;
+ bias-disable;
+ };
+ };
+ };
+
+ spi_9 {
+ spi_9_active: spi_9_active {
+ mux {
+ pins = "gpio49", "gpio50",
+ "gpio51", "gpio52";
+ function = "blsp_spi9";
+ };
+
+ config {
+ pins = "gpio49", "gpio50",
+ "gpio51", "gpio52";
+ drive-strength = <6>;
+ bias-disable;
+ };
+ };
+
+ spi_9_sleep: spi_9_sleep {
+ mux {
+ pins = "gpio49", "gpio50",
+ "gpio51", "gpio52";
+ function = "blsp_spi9";
+ };
+
+ config {
+ pins = "gpio49", "gpio50",
+ "gpio51", "gpio52";
+ drive-strength = <6>;
+ bias-disable;
+ };
+ };
+ };
+
+ spi_10 {
+ spi_10_active: spi_10_active {
+ mux {
+ pins = "gpio65", "gpio66",
+ "gpio67", "gpio68";
+ function = "blsp_spi10";
+ };
+
+ config {
+ pins = "gpio65", "gpio66",
+ "gpio67", "gpio68";
+ drive-strength = <6>;
+ bias-disable;
+ };
+ };
+
+ spi_10_sleep: spi_10_sleep {
+ mux {
+ pins = "gpio65", "gpio66",
+ "gpio67", "gpio68";
+ function = "blsp_spi10";
+ };
+
+ config {
+ pins = "gpio65", "gpio66",
+ "gpio67", "gpio68";
+ drive-strength = <6>;
+ bias-disable;
+ };
+ };
+ };
+
+ spi_11 {
+ spi_11_active: spi_11_active {
+ mux {
+ pins = "gpio58", "gpio59",
+ "gpio60", "gpio61";
+ function = "blsp_spi11";
+ };
+
+ config {
+ pins = "gpio58", "gpio59",
+ "gpio60", "gpio61";
+ drive-strength = <6>;
+ bias-disable;
+ };
+ };
+
+ spi_11_sleep: spi_11_sleep {
+ mux {
+ pins = "gpio58", "gpio59",
+ "gpio60", "gpio61";
+ function = "blsp_spi11";
+ };
+
+ config {
+ pins = "gpio58", "gpio59",
+ "gpio60", "gpio61";
+ drive-strength = <6>;
+ bias-disable;
+ };
+ };
+ };
+
+ spi_12 {
+ spi_12_active: spi_12_active {
+ mux {
+ pins = "gpio81", "gpio82",
+ "gpio83", "gpio84";
+ function = "blsp_spi12";
+ };
+
+ config {
+ pins = "gpio81", "gpio82",
+ "gpio83", "gpio84";
+ drive-strength = <6>;
+ bias-disable;
+ };
+ };
+
+ spi_12_sleep: spi_12_sleep {
+ mux {
+ pins = "gpio81", "gpio82",
+ "gpio83", "gpio84";
+ function = "blsp_spi12";
+ };
+
+ config {
+ pins = "gpio81", "gpio82",
+ "gpio83", "gpio84";
+ drive-strength = <6>;
+ bias-disable;
+ };
+ };
+ };
+
+ /* HS UART CONFIGURATION */
+ blsp1_uart1_active: blsp1_uart1_active {
+ mux {
+ pins = "gpio0", "gpio1", "gpio2", "gpio3";
+ function = "blsp_uart1_a";
+ };
+
+ config {
+ pins = "gpio0", "gpio1", "gpio2", "gpio3";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+
+ blsp1_uart1_sleep: blsp1_uart1_sleep {
+ mux {
+ pins = "gpio0", "gpio1", "gpio2", "gpio3";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio0", "gpio1", "gpio2", "gpio3";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+
+ cam_sensor_rear_suspend: cam_sensor_rear_suspend {
+ /* RESET, STANDBY */
+ mux {
+ pins = "gpio30","gpio29";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio30","gpio29";
+ bias-disable; /* No PULL */
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
+ cam_sensor_mclk1_active: cam_sensor_mclk1_active {
+ /* MCLK1 */
+ mux {
+ /* CLK, DATA */
+ pins = "gpio14";
+ function = "cam_mclk";
+ };
+
+ config {
+ pins = "gpio14";
+ bias-disable; /* No PULL */
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
+ cam_sensor_mclk1_suspend: cam_sensor_mclk1_suspend {
+ /* MCLK1 */
+ mux {
+ /* CLK, DATA */
+ pins = "gpio14";
+ function = "cam_mclk";
+ };
+
+ config {
+ pins = "gpio14";
+ bias-pull-down; /* PULL DOWN */
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
+ cam_sensor_rear2_active: cam_sensor_rear2_active {
+ /* RESET, STANDBY */
+ mux {
+ pins = "gpio9","gpio8";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio9","gpio8";
+ bias-disable; /* No PULL */
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
+ blsp1_uart2_active: blsp1_uart2_active {
+ mux {
+ pins = "gpio31", "gpio34", "gpio33", "gpio32";
+ function = "blsp_uart2_a";
+ };
+
+ config {
+ pins = "gpio31", "gpio34", "gpio33", "gpio32";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+
+ blsp1_uart2_sleep: blsp1_uart2_sleep {
+ mux {
+ pins = "gpio31", "gpio34", "gpio33", "gpio32";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio31", "gpio34", "gpio33", "gpio32";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+
+ blsp1_uart3: blsp1_uart3 {
+ blsp1_uart3_tx_active: blsp1_uart3_tx_active {
+ mux {
+ pins = "gpio45";
+ function = "blsp_uart3_a";
+ };
+
+ config {
+ pins = "gpio45";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+
+ blsp1_uart3_tx_sleep: blsp1_uart3_tx_sleep {
+ mux {
+ pins = "gpio45";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio45";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+ };
+
+ blsp1_uart3_rxcts_active: blsp1_uart3_rxcts_active {
+ mux {
+ pins = "gpio46", "gpio47";
+ function = "blsp_uart3_a";
+ };
+
+ config {
+ pins = "gpio46", "gpio47";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+
+ blsp1_uart3_rxcts_sleep: blsp1_uart3_rxcts_sleep {
+ mux {
+ pins = "gpio46", "gpio47";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio46", "gpio47";
+ drive-strength = <2>;
+ bias-no-pull;
+ };
+ };
+
+ blsp1_uart3_rfr_active: blsp1_uart3_rfr_active {
+ mux {
+ pins = "gpio48";
+ function = "blsp_uart3_a";
+ };
+
+ config {
+ pins = "gpio48";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+
+ blsp1_uart3_rfr_sleep: blsp1_uart3_rfr_sleep {
+ mux {
+ pins = "gpio48";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio48";
+ drive-strength = <2>;
+ bias-no-pull;
+ };
+ };
+ };
+
+ cam_sensor_rear2_suspend: cam_sensor_rear2_suspend {
+ /* RESET, STANDBY */
+ mux {
+ pins = "gpio9","gpio8";
+ function = "gpio";
+ };
+ config {
+ pins = "gpio9","gpio8";
+ bias-disable; /* No PULL */
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
+ cam_sensor_mclk2_active: cam_sensor_mclk2_active {
+ /* MCLK1 */
+ mux {
+ /* CLK, DATA */
+ pins = "gpio15";
+ function = "cam_mclk";
+ };
+
+ config {
+ pins = "gpio15";
+ bias-disable; /* No PULL */
+ drive-strength = <8>; /* 2 MA */
+ };
+ };
+
+ cam_sensor_mclk2_suspend: cam_sensor_mclk2_suspend {
+ /* MCLK1 */
+ mux {
+ /* CLK, DATA */
+ pins = "gpio15";
+ function = "cam_mclk";
+ };
+
+ config {
+ pins = "gpio15";
+ bias-pull-down; /* PULL DOWN */
+ drive-strength = <8>; /* 2 MA */
+ };
+ };
+
+ cam_sensor_mclk3_active: cam_sensor_mclk3_active {
+ /* MCLK1 */
+ mux {
+ /* CLK, DATA */
+ pins = "gpio16";
+ function = "cam_mclk";
+ };
+
+ config {
+ pins = "gpio16";
+ bias-disable; /* No PULL */
+ drive-strength = <8>; /* 2 MA */
+ };
+ };
+
+ cam_sensor_mclk3_suspend: cam_sensor_mclk3_suspend {
+ /* MCLK1 */
+ mux {
+ /* CLK, DATA */
+ pins = "gpio16";
+ function = "cam_mclk";
+ };
+
+ config {
+ pins = "gpio16";
+ bias-pull-down; /* PULL DOWN */
+ drive-strength = <8>; /* 2 MA */
+ };
+ };
+
+
+ cam_sensor_front_active: cam_sensor_front_active {
+ /* RESET VANA*/
+ mux {
+ pins = "gpio28", "gpio29";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio28", "gpio29";
+ bias-disable; /* No PULL */
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
+ blsp2_uart1_active: blsp2_uart1_active {
+ mux {
+ pins = "gpio53", "gpio54", "gpio55", "gpio56";
+ function = "blsp_uart7_a";
+ };
+
+ config {
+ pins = "gpio53", "gpio54", "gpio55", "gpio56";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+
+ blsp2_uart1_sleep: blsp2_uart1_sleep {
+ mux {
+ pins = "gpio53", "gpio54", "gpio55", "gpio56";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio53", "gpio54", "gpio55", "gpio56";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+
+ blsp2_uart2_active: blsp2_uart2_active {
+ mux {
+ pins = "gpio4", "gpio5", "gpio6", "gpio7";
+ function = "blsp_uart8_a";
+ };
+
+ config {
+ pins = "gpio4", "gpio5", "gpio6", "gpio7";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+
+ blsp2_uart2_sleep: blsp2_uart2_sleep {
+ mux {
+ pins = "gpio4", "gpio5", "gpio6", "gpio7";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio4", "gpio5", "gpio6", "gpio7";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+
+ cam_sensor_front_suspend: cam_sensor_front_suspend {
+ /* RESET */
+ mux {
+ pins = "gpio28";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio28";
+ bias-disable; /* No PULL */
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
+ pmx_mdss: pmx_mdss {
+ mdss_dsi_active: mdss_dsi_active {
+ mux {
+ pins = "gpio94", "gpio97", "gpio51";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio94", "gpio97", "gpio51";
+ drive-strength = <8>; /* 8 mA */
+ bias-disable = <0>; /* no pull */
+ };
+ };
+ mdss_dsi_suspend: mdss_dsi_suspend {
+ mux {
+ pins = "gpio94", "gpio97", "gpio51";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio94", "gpio97", "gpio51";
+ drive-strength = <2>; /* 2 mA */
+ bias-pull-down; /* pull down */
+ };
+ };
+ };
+
+ pmx_mdss_te {
+ mdss_te_active: mdss_te_active {
+ mux {
+ pins = "gpio97";
+ function = "mdp_vsync_b";
+ };
+ config {
+ pins = "gpio97";
+ drive-strength = <2>; /* 8 mA */
+ bias-pull-down; /* pull down*/
+ };
+ };
+
+ mdss_te_suspend: mdss_te_suspend {
+ mux {
+ pins = "gpio97";
+ function = "mdp_vsync_b";
+ };
+ config {
+ pins = "gpio97";
+ drive-strength = <2>; /* 2 mA */
+ bias-pull-down; /* pull down */
+ };
+ };
+ };
+
+ mdss_dp_aux_active: mdss_dp_aux_active {
+ mux {
+ pins = "gpio77", "gpio78";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio77", "gpio78";
+ bias-disable = <0>; /* no pull */
+ drive-strength = <8>;
+ };
+ };
+
+ mdss_dp_aux_suspend: mdss_dp_aux_suspend {
+ mux {
+ pins = "gpio77", "gpio78";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio77", "gpio78";
+ bias-pull-down;
+ drive-strength = <2>;
+ };
+ };
+
+ mdss_dp_usbplug_cc_active: mdss_dp_usbplug_cc_active {
+ mux {
+ pins = "gpio38";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio38";
+ bias-disable;
+ drive-strength = <16>;
+ };
+ };
+
+ mdss_dp_usbplug_cc_suspend: mdss_dp_usbplug_cc_suspend {
+ mux {
+ pins = "gpio38";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio38";
+ bias-pull-down;
+ drive-strength = <2>;
+ };
+ };
+
+ mdss_dp_hpd_active: mdss_dp_hpd_active {
+ mux {
+ pins = "gpio34";
+ function = "edp_hot";
+ };
+
+ config {
+ pins = "gpio34";
+ bias-pull-down;
+ drive-strength = <16>;
+ };
+ };
+
+ mdss_dp_hpd_suspend: mdss_dp_hpd_suspend {
+ mux {
+ pins = "gpio34";
+ function = "edp_hot";
+ };
+
+ config {
+ pins = "gpio34";
+ bias-pull-down;
+ drive-strength = <2>;
+ };
+ };
+
+ blsp2_uart3_active: blsp2_uart3_active {
+ mux {
+ pins = "gpio49", "gpio50", "gpio51", "gpio52";
+ function = "blsp_uart9_a";
+ };
+
+ config {
+ pins = "gpio49", "gpio50", "gpio51", "gpio52";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+
+ blsp2_uart3_sleep: blsp2_uart3_sleep {
+ mux {
+ pins = "gpio49", "gpio50", "gpio51", "gpio52";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio49", "gpio50", "gpio51", "gpio52";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+
+ /* add pingrp for touchscreen */
+ pmx_ts_int_active {
+ ts_int_active: ts_int_active {
+ mux {
+ pins = "gpio125";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio125";
+ drive-strength = <8>;
+ bias-pull-up;
+ };
+ };
+ };
+
+ pmx_ts_int_suspend {
+ ts_int_suspend1: ts_int_suspend1 {
+ mux {
+ pins = "gpio125";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio125";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+ };
+ };
+
+ pmx_ts_reset_active {
+ ts_reset_active: ts_reset_active {
+ mux {
+ pins = "gpio89";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio89";
+ drive-strength = <8>;
+ bias-pull-up;
+ };
+ };
+ };
+
+ pmx_ts_reset_suspend {
+ ts_reset_suspend1: ts_reset_suspend1 {
+ mux {
+ pins = "gpio89";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio89";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+ };
+ };
+
+ pmx_ts_release {
+ ts_release: ts_release {
+ mux {
+ pins = "gpio125", "gpio89";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio125", "gpio89";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+ };
+ };
+
+ ts_mux {
+ ts_active: ts_active {
+ mux {
+ pins = "gpio89", "gpio125";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio89", "gpio125";
+ drive-strength = <16>;
+ bias-pull-up;
+ };
+ };
+
+ ts_reset_suspend: ts_reset_suspend {
+ mux {
+ pins = "gpio89";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio89";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+ };
+
+ ts_int_suspend: ts_int_suspend {
+ mux {
+ pins = "gpio125";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio125";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+ };
+
+ ufs_dev_reset_assert: ufs_dev_reset_assert {
+ config {
+ pins = "ufs_reset";
+ bias-pull-down; /* default: pull down */
+ /*
+ * UFS_RESET driver strengths are having
+ * different values/steps compared to typical
+ * GPIO drive strengths.
+ *
+ * Following table clarifies:
+ *
+ * HDRV value | UFS_RESET | Typical GPIO
+ * (dec) | (mA) | (mA)
+ * 0 | 0.8 | 2
+ * 1 | 1.55 | 4
+ * 2 | 2.35 | 6
+ * 3 | 3.1 | 8
+ * 4 | 3.9 | 10
+ * 5 | 4.65 | 12
+ * 6 | 5.4 | 14
+ * 7 | 6.15 | 16
+ *
+ * POR value for UFS_RESET HDRV is 3 which means
+ * 3.1mA and we want to use that. Hence just
+ * specify 8mA to "drive-strength" binding and
+ * that should result into writing 3 to HDRV
+ * field.
+ */
+ drive-strength = <8>; /* default: 3.1 mA */
+ output-low; /* active low reset */
+ };
+ };
+
+ ufs_dev_reset_deassert: ufs_dev_reset_deassert {
+ config {
+ pins = "ufs_reset";
+ bias-pull-down; /* default: pull down */
+ /*
+ * default: 3.1 mA
+ * check comments under ufs_dev_reset_assert
+ */
+ drive-strength = <8>;
+ output-high; /* active low reset */
+ };
+ };
+
+ sdc2_clk_on: sdc2_clk_on {
+ config {
+ pins = "sdc2_clk";
+ bias-disable; /* NO pull */
+ drive-strength = <16>; /* 16 MA */
+ };
+ };
+
+ sdc2_clk_off: sdc2_clk_off {
+ config {
+ pins = "sdc2_clk";
+ bias-disable; /* NO pull */
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
+ sdc2_cmd_on: sdc2_cmd_on {
+ config {
+ pins = "sdc2_cmd";
+ bias-pull-up; /* pull up */
+ drive-strength = <10>; /* 10 MA */
+ };
+ };
+
+ sdc2_cmd_off: sdc2_cmd_off {
+ config {
+ pins = "sdc2_cmd";
+ bias-pull-up; /* pull up */
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
+ sdc2_data_on: sdc2_data_on {
+ config {
+ pins = "sdc2_data";
+ bias-pull-up; /* pull up */
+ drive-strength = <10>; /* 10 MA */
+ };
+ };
+
+ sdc2_data_off: sdc2_data_off {
+ config {
+ pins = "sdc2_data";
+ bias-pull-up; /* pull up */
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
+ sdc2_cd_on: sdc2_cd_on {
+ mux {
+ pins = "gpio95";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio95";
+ bias-pull-up; /* pull up */
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
+ sdc2_cd_off: sdc2_cd_off {
+ mux {
+ pins = "gpio95";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio95";
+ bias-pull-up; /* pull up */
+ drive-strength = <2>; /* 2 MA */
+ };
+
+ };
+
+ led_enable: led_enable {
+ mux {
+ pins = "gpio21";
+ drive_strength = <16>;
+ output-high;
+ };
+ };
+
+ led_disable: led_disable {
+ mux {
+ pins = "gpio21";
+ drive_strength = <2>;
+ output-low;
+ };
+ };
+
+ trigout_a: trigout_a {
+ mux {
+ pins = "gpio58";
+ function = "qdss_cti1_a";
+ };
+
+ config {
+ pins = "gpio58";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+
+ mdss_hdmi_5v_active: mdss_hdmi_5v_active {
+ mux {
+ pins = "gpio133";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio133";
+ bias-pull-up;
+ drive-strength = <16>;
+ };
+ };
+
+ mdss_hdmi_5v_suspend: mdss_hdmi_5v_suspend {
+ mux {
+ pins = "gpio133";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio133";
+ bias-pull-down;
+ drive-strength = <2>;
+ };
+ };
+
+
+ mdss_hdmi_hpd_active: mdss_hdmi_hpd_active {
+ mux {
+ pins = "gpio34";
+ function = "hdmi_hot";
+ };
+
+ config {
+ pins = "gpio34";
+ bias-pull-down;
+ drive-strength = <16>;
+ };
+ };
+
+ mdss_hdmi_hpd_suspend: mdss_hdmi_hpd_suspend {
+ mux {
+ pins = "gpio34";
+ function = "hdmi_hot";
+ };
+
+ config {
+ pins = "gpio34";
+ bias-pull-down;
+ drive-strength = <2>;
+ };
+ };
+
+ mdss_hdmi_ddc_active: mdss_hdmi_ddc_active {
+ mux {
+ pins = "gpio32", "gpio33";
+ function = "hdmi_ddc";
+ };
+
+ config {
+ pins = "gpio32", "gpio33";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+ };
+
+ mdss_hdmi_ddc_suspend: mdss_hdmi_ddc_suspend {
+ mux {
+ pins = "gpio32", "gpio33";
+ function = "hdmi_ddc";
+ };
+
+ config {
+ pins = "gpio32", "gpio33";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+ };
+
+ mdss_hdmi_cec_active: mdss_hdmi_cec_active {
+ mux {
+ pins = "gpio31";
+ function = "hdmi_cec";
+ };
+
+ config {
+ pins = "gpio31";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+ };
+
+ mdss_hdmi_cec_suspend: mdss_hdmi_cec_suspend {
+ mux {
+ pins = "gpio31";
+ function = "hdmi_cec";
+ };
+
+ config {
+ pins = "gpio31";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+ };
+
+ tsif0_signals_active: tsif0_signals_active {
+ tsif1_clk {
+ pins = "gpio89"; /* TSIF0 CLK */
+ function = "tsif1_clk";
+ };
+ tsif1_en {
+ pins = "gpio90"; /* TSIF0 Enable */
+ function = "tsif1_en";
+ };
+ tsif1_data {
+ pins = "gpio91"; /* TSIF0 DATA */
+ function = "tsif1_data";
+ };
+ signals_cfg {
+ pins = "gpio89", "gpio90", "gpio91";
+ drive_strength = <2>; /* 2 mA */
+ bias-pull-down; /* pull down */
+ };
+ };
+
+ /* sync signal is only used if configured to mode-2 */
+ tsif0_sync_active: tsif0_sync_active {
+ tsif1_sync {
+ pins = "gpio9"; /* TSIF0 SYNC */
+ function = "tsif1_sync";
+ drive_strength = <2>; /* 2 mA */
+ bias-pull-down; /* pull down */
+ };
+ };
+
+ tsif1_signals_active: tsif1_signals_active {
+ tsif2_clk {
+ pins = "gpio93"; /* TSIF1 CLK */
+ function = "tsif2_clk";
+ };
+ tsif2_en {
+ pins = "gpio94"; /* TSIF1 Enable */
+ function = "tsif2_en";
+ };
+ tsif2_data {
+ pins = "gpio95"; /* TSIF1 DATA */
+ function = "tsif2_data";
+ };
+ signals_cfg {
+ pins = "gpio93", "gpio94", "gpio95";
+ drive_strength = <2>; /* 2 mA */
+ bias-pull-down; /* pull down */
+ };
+ };
+
+ /* sync signal is only used if configured to mode-2 */
+ tsif1_sync_active: tsif1_sync_active {
+ tsif2_sync {
+ pins = "gpio96"; /* TSIF1 SYNC */
+ function = "tsif2_sync";
+ drive_strength = <2>; /* 2 mA */
+ bias-pull-down; /* pull down */
+ };
+ };
+
+ pri_aux_pcm_clk {
+ pri_aux_pcm_clk_sleep: pri_aux_pcm_clk_sleep {
+ mux {
+ pins = "gpio65";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio65";
+ drive-strength = <2>; /* 2 mA */
+ bias-pull-down; /* PULL DOWN */
+ input-enable;
+ };
+ };
+
+ pri_aux_pcm_clk_active: pri_aux_pcm_clk_active {
+ mux {
+ pins = "gpio65";
+ function = "pri_mi2s";
+ };
+
+ config {
+ pins = "gpio65";
+ drive-strength = <8>; /* 8 mA */
+ bias-disable; /* NO PULL */
+ output-high;
+ };
+ };
+ };
+
+ pri_aux_pcm_sync {
+ pri_aux_pcm_sync_sleep: pri_aux_pcm_sync_sleep {
+ mux {
+ pins = "gpio66";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio66";
+ drive-strength = <2>; /* 2 mA */
+ bias-pull-down; /* PULL DOWN */
+ input-enable;
+ };
+ };
+
+ pri_aux_pcm_sync_active: pri_aux_pcm_sync_active {
+ mux {
+ pins = "gpio66";
+ function = "pri_mi2s_ws";
+ };
+
+ config {
+ pins = "gpio66";
+ drive-strength = <8>; /* 8 mA */
+ bias-disable; /* NO PULL */
+ output-high;
+ };
+ };
+ };
+
+ pri_aux_pcm_din {
+ pri_aux_pcm_din_sleep: pri_aux_pcm_din_sleep {
+ mux {
+ pins = "gpio67";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio67";
+ drive-strength = <2>; /* 2 mA */
+ bias-pull-down; /* PULL DOWN */
+ input-enable;
+ };
+ };
+
+ pri_aux_pcm_din_active: pri_aux_pcm_din_active {
+ mux {
+ pins = "gpio67";
+ function = "pri_mi2s";
+ };
+
+ config {
+ pins = "gpio67";
+ drive-strength = <8>; /* 8 mA */
+ bias-disable; /* NO PULL */
+ };
+ };
+ };
+
+ pri_aux_pcm_dout {
+ pri_aux_pcm_dout_sleep: pri_aux_pcm_dout_sleep {
+ mux {
+ pins = "gpio68";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio68";
+ drive-strength = <2>; /* 2 mA */
+ bias-pull-down; /* PULL DOWN */
+ input-enable;
+ };
+ };
+
+ pri_aux_pcm_dout_active: pri_aux_pcm_dout_active {
+ mux {
+ pins = "gpio68";
+ function = "pri_mi2s";
+ };
+
+ config {
+ pins = "gpio68";
+ drive-strength = <8>; /* 8 mA */
+ bias-disable; /* NO PULL */
+ };
+ };
+ };
+
+ sec_aux_pcm {
+ sec_aux_pcm_sleep: sec_aux_pcm_sleep {
+ mux {
+ pins = "gpio80", "gpio81";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio80", "gpio81";
+ drive-strength = <2>; /* 2 mA */
+ bias-pull-down; /* PULL DOWN */
+ input-enable;
+ };
+ };
+
+ sec_aux_pcm_active: sec_aux_pcm_active {
+ mux {
+ pins = "gpio80", "gpio81";
+ function = "sec_mi2s";
+ };
+
+ config {
+ pins = "gpio80", "gpio81";
+ drive-strength = <8>; /* 8 mA */
+ bias-disable; /* NO PULL */
+ };
+ };
+ };
+
+ sec_aux_pcm_din {
+ sec_aux_pcm_din_sleep: sec_aux_pcm_din_sleep {
+ mux {
+ pins = "gpio82";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio82";
+ drive-strength = <2>; /* 2 mA */
+ bias-pull-down; /* PULL DOWN */
+ input-enable;
+ };
+ };
+
+ sec_aux_pcm_din_active: sec_aux_pcm_din_active {
+ mux {
+ pins = "gpio82";
+ function = "sec_mi2s";
+ };
+
+ config {
+ pins = "gpio82";
+ drive-strength = <8>; /* 8 mA */
+ bias-disable; /* NO PULL */
+ };
+ };
+ };
+
+ sec_aux_pcm_dout {
+ sec_aux_pcm_dout_sleep: sec_aux_pcm_dout_sleep {
+ mux {
+ pins = "gpio83";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio83";
+ drive-strength = <2>; /* 2 mA */
+ bias-pull-down; /* PULL DOWN */
+ input-enable;
+ };
+ };
+
+ sec_aux_pcm_dout_active: sec_aux_pcm_dout_active {
+ mux {
+ pins = "gpio83";
+ function = "sec_mi2s";
+ };
+
+ config {
+ pins = "gpio83";
+ drive-strength = <8>; /* 8 mA */
+ bias-disable; /* NO PULL */
+ };
+ };
+ };
+
+ tert_aux_pcm {
+ tert_aux_pcm_sleep: tert_aux_pcm_sleep {
+ mux {
+ pins = "gpio75", "gpio76";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio75", "gpio76";
+ drive-strength = <2>; /* 2 mA */
+ bias-pull-down; /* PULL DOWN */
+ input-enable;
+ };
+ };
+
+ tert_aux_pcm_active: tert_aux_pcm_active {
+ mux {
+ pins = "gpio75", "gpio76";
+ function = "ter_mi2s";
+ };
+
+ config {
+ pins = "gpio75", "gpio76";
+ drive-strength = <8>; /* 8 mA */
+ bias-disable; /* NO PULL */
+ output-high;
+ };
+ };
+ };
+
+ tert_aux_pcm_din {
+ tert_aux_pcm_din_sleep: tert_aux_pcm_din_sleep {
+ mux {
+ pins = "gpio77";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio77";
+ drive-strength = <2>; /* 2 mA */
+ bias-pull-down; /* PULL DOWN */
+ input-enable;
+ };
+ };
+
+ tert_aux_pcm_din_active: tert_aux_pcm_din_active {
+ mux {
+ pins = "gpio77";
+ function = "ter_mi2s";
+ };
+
+ config {
+ pins = "gpio77";
+ drive-strength = <8>; /* 8 mA */
+ bias-disable; /* NO PULL */
+ };
+ };
+ };
+
+ tert_aux_pcm_dout {
+ tert_aux_pcm_dout_sleep: tert_aux_pcm_dout_sleep {
+ mux {
+ pins = "gpio78";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio78";
+ drive-strength = <2>; /* 2 mA */
+ bias-pull-down; /* PULL DOWN */
+ input-enable;
+ };
+ };
+
+ tert_aux_pcm_dout_active: tert_aux_pcm_dout_active {
+ mux {
+ pins = "gpio78";
+ function = "ter_mi2s";
+ };
+
+ config {
+ pins = "gpio78";
+ drive-strength = <8>; /* 8 mA */
+ bias-disable; /* NO PULL */
+ };
+ };
+ };
+
+ quat_aux_pcm {
+ quat_aux_pcm_sleep: quat_aux_pcm_sleep {
+ mux {
+ pins = "gpio58", "gpio59";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio58", "gpio59";
+ drive-strength = <2>; /* 2 mA */
+ bias-pull-down; /* PULL DOWN */
+ input-enable;
+ };
+ };
+
+ quat_aux_pcm_active: quat_aux_pcm_active {
+ mux {
+ pins = "gpio58", "gpio59";
+ function = "qua_mi2s";
+ };
+
+ config {
+ pins = "gpio58", "gpio59";
+ drive-strength = <8>; /* 8 mA */
+ bias-disable; /* NO PULL */
+ output-high;
+ };
+ };
+ };
+
+ quat_aux_pcm_din {
+ quat_aux_pcm_din_sleep: quat_aux_pcm_din_sleep {
+ mux {
+ pins = "gpio60";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio60";
+ drive-strength = <2>; /* 2 mA */
+ bias-pull-down; /* PULL DOWN */
+ input-enable;
+ };
+ };
+
+ quat_aux_pcm_din_active: quat_aux_pcm_din_active {
+ mux {
+ pins = "gpio60";
+ function = "qua_mi2s";
+ };
+
+ config {
+ pins = "gpio60";
+ drive-strength = <8>; /* 8 mA */
+ bias-disable; /* NO PULL */
+ };
+ };
+ };
+
+ quat_aux_pcm_dout {
+ quat_aux_pcm_dout_sleep: quat_aux_pcm_dout_sleep {
+ mux {
+ pins = "gpio61";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio61";
+ drive-strength = <2>; /* 2 mA */
+ bias-pull-down; /* PULL DOWN */
+ input-enable;
+ };
+ };
+
+ quat_aux_pcm_dout_active: quat_aux_pcm_dout_active {
+ mux {
+ pins = "gpio61";
+ function = "qua_mi2s";
+ };
+
+ config {
+ pins = "gpio61";
+ drive-strength = <8>; /* 8 mA */
+ bias-disable; /* NO PULL */
+ };
+ };
+ };
+
+ pri_mi2s_mclk {
+ pri_mi2s_mclk_sleep: pri_mi2s_mclk_sleep {
+ mux {
+ pins = "gpio64";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio64";
+ drive-strength = <2>; /* 2 mA */
+ bias-pull-down; /* PULL DOWN */
+ input-enable;
+ };
+ };
+
+ pri_mi2s_mclk_active: pri_mi2s_mclk_active {
+ mux {
+ pins = "gpio64";
+ function = "pri_mi2s";
+ };
+
+ config {
+ pins = "gpio64";
+ drive-strength = <8>; /* 8 mA */
+ bias-disable; /* NO PULL */
+ output-high;
+ };
+ };
+ };
+
+ pri_mi2s_sck {
+ pri_mi2s_sck_sleep: pri_mi2s_sck_sleep {
+ mux {
+ pins = "gpio65";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio65";
+ drive-strength = <2>; /* 2 mA */
+ bias-pull-down; /* PULL DOWN */
+ input-enable;
+ };
+ };
+
+ pri_mi2s_sck_active: pri_mi2s_sck_active {
+ mux {
+ pins = "gpio65";
+ function = "pri_mi2s";
+ };
+
+ config {
+ pins = "gpio65";
+ drive-strength = <8>; /* 8 mA */
+ bias-disable; /* NO PULL */
+ output-high;
+ };
+ };
+ };
+
+ pri_mi2s_ws {
+ pri_mi2s_ws_sleep: pri_mi2s_ws_sleep {
+ mux {
+ pins = "gpio66";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio66";
+ drive-strength = <2>; /* 2 mA */
+ bias-pull-down; /* PULL DOWN */
+ input-enable;
+ };
+ };
+
+ pri_mi2s_ws_active: pri_mi2s_ws_active {
+ mux {
+ pins = "gpio66";
+ function = "pri_mi2s_ws";
+ };
+
+ config {
+ pins = "gpio66";
+ drive-strength = <8>; /* 8 mA */
+ bias-disable; /* NO PULL */
+ output-high;
+ };
+ };
+ };
+
+ pri_mi2s_sd0 {
+ pri_mi2s_sd0_sleep: pri_mi2s_sd0_sleep {
+ mux {
+ pins = "gpio67";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio67";
+ drive-strength = <2>; /* 2 mA */
+ bias-pull-down; /* PULL DOWN */
+ input-enable;
+ };
+ };
+
+ pri_mi2s_sd0_active: pri_mi2s_sd0_active {
+ mux {
+ pins = "gpio67";
+ function = "pri_mi2s";
+ };
+
+ config {
+ pins = "gpio67";
+ drive-strength = <8>; /* 8 mA */
+ bias-disable; /* NO PULL */
+ };
+ };
+ };
+
+ pri_mi2s_sd1 {
+ pri_mi2s_sd1_sleep: pri_mi2s_sd1_sleep {
+ mux {
+ pins = "gpio68";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio68";
+ drive-strength = <2>; /* 2 mA */
+ bias-pull-down; /* PULL DOWN */
+ input-enable;
+ };
+ };
+
+ pri_mi2s_sd1_active: pri_mi2s_sd1_active {
+ mux {
+ pins = "gpio68";
+ function = "pri_mi2s";
+ };
+
+ config {
+ pins = "gpio68";
+ drive-strength = <8>; /* 8 mA */
+ bias-disable; /* NO PULL */
+ };
+ };
+ };
+
+ sec_mi2s_mclk {
+ sec_mi2s_mclk_sleep: sec_mi2s_mclk_sleep {
+ mux {
+ pins = "gpio79";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio79";
+ drive-strength = <2>; /* 2 mA */
+ bias-pull-down; /* PULL DOWN */
+ input-enable;
+ };
+ };
+
+ sec_mi2s_mclk_active: sec_mi2s_mclk_active {
+ mux {
+ pins = "gpio79";
+ function = "sec_mi2s";
+ };
+
+ config {
+ pins = "gpio79";
+ drive-strength = <8>; /* 8 mA */
+ bias-disable; /* NO PULL */
+ };
+ };
+ };
+
+ sec_mi2s {
+ sec_mi2s_sleep: sec_mi2s_sleep {
+ mux {
+ pins = "gpio80", "gpio81";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio80", "gpio81";
+ drive-strength = <2>; /* 2 mA */
+ bias-disable; /* NO PULL */
+ input-enable;
+ };
+ };
+
+ sec_mi2s_active: sec_mi2s_active {
+ mux {
+ pins = "gpio80", "gpio81";
+ function = "sec_mi2s";
+ };
+
+ config {
+ pins = "gpio80", "gpio81";
+ drive-strength = <8>; /* 8 mA */
+ bias-disable; /* NO PULL */
+ };
+ };
+ };
+
+ sec_mi2s_sd0 {
+ sec_mi2s_sd0_sleep: sec_mi2s_sd0_sleep {
+ mux {
+ pins = "gpio82";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio82";
+ drive-strength = <2>; /* 2 mA */
+ bias-pull-down; /* PULL DOWN */
+ input-enable;
+ };
+ };
+
+ sec_mi2s_sd0_active: sec_mi2s_sd0_active {
+ mux {
+ pins = "gpio82";
+ function = "sec_mi2s";
+ };
+
+ config {
+ pins = "gpio82";
+ drive-strength = <8>; /* 8 mA */
+ bias-disable; /* NO PULL */
+ };
+ };
+ };
+
+ sec_mi2s_sd1 {
+ sec_mi2s_sd1_sleep: sec_mi2s_sd1_sleep {
+ mux {
+ pins = "gpio83";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio83";
+ drive-strength = <2>; /* 2 mA */
+ bias-pull-down; /* PULL DOWN */
+ input-enable;
+ };
+ };
+
+ sec_mi2s_sd1_active: sec_mi2s_sd1_active {
+ mux {
+ pins = "gpio83";
+ function = "sec_mi2s";
+ };
+
+ config {
+ pins = "gpio83";
+ drive-strength = <8>; /* 8 mA */
+ bias-disable; /* NO PULL */
+ };
+ };
+ };
+
+ tert_mi2s_mclk {
+ tert_mi2s_mclk_sleep: tert_mi2s_mclk_sleep {
+ mux {
+ pins = "gpio74";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio74";
+ drive-strength = <2>; /* 2 mA */
+ bias-pull-down; /* PULL DOWN */
+ input-enable;
+ };
+ };
+
+ tert_mi2s_mclk_active: tert_mi2s_mclk_active {
+ mux {
+ pins = "gpio74";
+ function = "ter_mi2s";
+ };
+
+ config {
+ pins = "gpio74";
+ drive-strength = <8>; /* 8 mA */
+ bias-disable; /* NO PULL */
+ };
+ };
+ };
+
+ tert_mi2s {
+ tert_mi2s_sleep: tert_mi2s_sleep {
+ mux {
+ pins = "gpio75", "gpio76";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio75", "gpio76";
+ drive-strength = <2>; /* 2 mA */
+ bias-pull-down; /* PULL DOWN */
+ input-enable;
+ };
+ };
+
+ tert_mi2s_active: tert_mi2s_active {
+ mux {
+ pins = "gpio75", "gpio76";
+ function = "ter_mi2s";
+ };
+
+ config {
+ pins = "gpio75", "gpio76";
+ drive-strength = <8>; /* 8 mA */
+ bias-disable; /* NO PULL */
+ output-high;
+ };
+ };
+ };
+
+ tert_mi2s_sd0 {
+ tert_mi2s_sd0_sleep: tert_mi2s_sd0_sleep {
+ mux {
+ pins = "gpio77";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio77";
+ drive-strength = <2>; /* 2 mA */
+ bias-pull-down; /* PULL DOWN */
+ input-enable;
+ };
+ };
+
+ tert_mi2s_sd0_active: tert_mi2s_sd0_active {
+ mux {
+ pins = "gpio77";
+ function = "ter_mi2s";
+ };
+
+ config {
+ pins = "gpio77";
+ drive-strength = <8>; /* 8 mA */
+ bias-disable; /* NO PULL */
+ };
+ };
+ };
+
+ tert_mi2s_sd1 {
+ tert_mi2s_sd1_sleep: tert_mi2s_sd1_sleep {
+ mux {
+ pins = "gpio78";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio78";
+ drive-strength = <2>; /* 2 mA */
+ bias-pull-down; /* PULL DOWN */
+ input-enable;
+ };
+ };
+
+ tert_mi2s_sd1_active: tert_mi2s_sd1_active {
+ mux {
+ pins = "gpio78";
+ function = "ter_mi2s";
+ };
+
+ config {
+ pins = "gpio78";
+ drive-strength = <8>; /* 8 mA */
+ bias-disable; /* NO PULL */
+ };
+ };
+ };
+
+ quat_mi2s_mclk {
+ quat_mi2s_mclk_sleep: quat_mi2s_mclk_sleep {
+ mux {
+ pins = "gpio57";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio57";
+ drive-strength = <2>; /* 2 mA */
+ bias-pull-down; /* PULL DOWN */
+ input-enable;
+ };
+ };
+
+ quat_mi2s_mclk_active: quat_mi2s_mclk_active {
+ mux {
+ pins = "gpio57";
+ function = "qua_mi2s";
+ };
+
+ config {
+ pins = "gpio57";
+ drive-strength = <8>; /* 8 mA */
+ bias-disable; /* NO PULL */
+ };
+ };
+ };
+
+ quat_mi2s {
+ quat_mi2s_sleep: quat_mi2s_sleep {
+ mux {
+ pins = "gpio58", "gpio59";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio58", "gpio59";
+ drive-strength = <2>; /* 2 mA */
+ bias-pull-down; /* PULL DOWN */
+ input-enable;
+ };
+ };
+
+ quat_mi2s_active: quat_mi2s_active {
+ mux {
+ pins = "gpio58", "gpio59";
+ function = "qua_mi2s";
+ };
+
+ config {
+ pins = "gpio58", "gpio59";
+ drive-strength = <8>; /* 8 mA */
+ bias-disable; /* NO PULL */
+ output-high;
+ };
+ };
+ };
+
+ quat_mi2s_sd0 {
+ quat_mi2s_sd0_sleep: quat_mi2s_sd0_sleep {
+ mux {
+ pins = "gpio60";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio60";
+ drive-strength = <2>; /* 2 mA */
+ bias-pull-down; /* PULL DOWN */
+ input-enable;
+ };
+ };
+
+ quat_mi2s_sd0_active: quat_mi2s_sd0_active {
+ mux {
+ pins = "gpio60";
+ function = "qua_mi2s";
+ };
+
+ config {
+ pins = "gpio60";
+ drive-strength = <8>; /* 8 mA */
+ bias-disable; /* NO PULL */
+ };
+ };
+ };
+
+ quat_mi2s_sd1 {
+ quat_mi2s_sd1_sleep: quat_mi2s_sd1_sleep {
+ mux {
+ pins = "gpio61";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio61";
+ drive-strength = <2>; /* 2 mA */
+ bias-pull-down; /* PULL DOWN */
+ input-enable;
+ };
+ };
+
+ quat_mi2s_sd1_active: quat_mi2s_sd1_active {
+ mux {
+ pins = "gpio61";
+ function = "qua_mi2s";
+ };
+
+ config {
+ pins = "gpio61";
+ drive-strength = <8>; /* 8 mA */
+ bias-disable; /* NO PULL */
+ };
+ };
+ };
+
+ quat_mi2s_sd2 {
+ quat_mi2s_sd2_sleep: quat_mi2s_sd2_sleep {
+ mux {
+ pins = "gpio62";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio62";
+ drive-strength = <2>; /* 2 mA */
+ bias-pull-down; /* PULL DOWN */
+ input-enable;
+ };
+ };
+
+ quat_mi2s_sd2_active: quat_mi2s_sd2_active {
+ mux {
+ pins = "gpio62";
+ function = "qua_mi2s";
+ };
+
+ config {
+ pins = "gpio62";
+ drive-strength = <8>; /* 8 mA */
+ bias-disable; /* NO PULL */
+ };
+ };
+ };
+
+ quat_mi2s_sd3 {
+ quat_mi2s_sd3_sleep: quat_mi2s_sd3_sleep {
+ mux {
+ pins = "gpio63";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio63";
+ drive-strength = <2>; /* 2 mA */
+ bias-pull-down; /* PULL DOWN */
+ input-enable;
+ };
+ };
+
+ quat_mi2s_sd3_active: quat_mi2s_sd3_active {
+ mux {
+ pins = "gpio63";
+ function = "qua_mi2s";
+ };
+
+ config {
+ pins = "gpio63";
+ drive-strength = <8>; /* 8 mA */
+ bias-disable; /* NO PULL */
+ };
+ };
+ };
+
+ spkr_i2s_clk_pin {
+ spkr_i2s_clk_sleep: spkr_i2s_clk_sleep {
+ mux {
+ pins = "gpio69";
+ function = "spkr_i2s";
+ };
+
+ config {
+ pins = "gpio69";
+ drive-strength = <2>; /* 2 mA */
+ bias-pull-down; /* PULL DOWN */
+ };
+ };
+
+ spkr_i2s_clk_active: spkr_i2s_clk_active {
+ mux {
+ pins = "gpio69";
+ function = "spkr_i2s";
+ };
+
+ config {
+ pins = "gpio69";
+ drive-strength = <8>; /* 8 mA */
+ bias-disable; /* NO PULL */
+ };
+ };
+ };
+
+ spkr_1_sd_mediabox {
+ spkr_1_sd_sleep_mediabox: spkr_1_sd_sleep_mediabox {
+ mux {
+ pins = "gpio85";
+ function = "gpio";
+ };
+ config {
+ pins = "gpio85";
+ drive-strength = <2>; /* 2 mA */
+ bias-pull-down;
+ input-enable;
+ };
+ };
+ spkr_1_sd_active_mediabox: spkr_1_sd_active_mediabox {
+ mux {
+ pins = "gpio85";
+ function = "gpio";
+ };
+ config {
+ pins = "gpio85";
+ drive-strength = <8>; /* 8 mA */
+ bias-disable;
+ output-high;
+ };
+ };
+ };
+
+ spkr_2_sd_mediabox_mediabox {
+ spkr_2_sd_sleep_mediabox: spkr_2_sd_sleep_mediabox {
+ mux {
+ pins = "gpio112";
+ function = "gpio";
+ };
+ config {
+ pins = "gpio112";
+ drive-strength = <2>; /* 2 mA */
+ bias-pull-down;
+ input-enable;
+ };
+ };
+ spkr_2_sd_active_mediabox: spkr_2_sd_active_mediabox {
+ mux {
+ pins = "gpio112";
+ function = "gpio";
+ };
+ config {
+ pins = "gpio112";
+ drive-strength = <8>; /* 8 mA */
+ bias-disable;
+ output-high;
+ };
+ };
+ };
+
+ sdc2_cd_on_mediabox: sdc2_cd_on_mediabox {
+ mux {
+ pins = "gpio86";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio86";
+ bias-pull-up; /* pull up */
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
+ sdc2_cd_off_mediabox: sdc2_cd_off_mediabox {
+ mux {
+ pins = "gpio86";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio86";
+ bias-pull-up; /* pull up */
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
+ ir_active: ir_active {
+ mux {
+ pins = "gpio90", "gpio91";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio90", "gpio91";
+ drive-strength = <16>;
+ bias-disable;
+ output-high;
+ };
+ };
+
+ mtch6102_int {
+ mtch6102_int_active: mtch6102_int_active {
+ mux {
+ pins = "gpio125";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio125";
+ drive-strength = <16>; /* 16 mA */
+ bias-pull-up;
+ };
+ };
+
+ mtch6102_int_suspend: mtch6102_int_suspend {
+ mux {
+ pins = "gpio125";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio125";
+ drive-strength = <2>; /* 2 mA */
+ bias-pull-down;
+ };
+ };
+ };
+
+ mtch6102_rst {
+ mtch6102_rst_active: mtch6102_rst_active {
+ mux {
+ pins = "gpio85";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio85";
+ drive-strength = <8>; /* 8 mA */
+ bias-disable; /* NO PULL */
+ output-high;
+ };
+ };
+ mtch6102_rst_suspend: mtch6102_rst_suspend {
+ mux {
+ pins = "gpio85";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio85";
+ drive-strength = <2>; /* 2 mA */
+ bias-pull-down;
+ };
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/qcom/msm8998-svr20.dtsi b/arch/arm/boot/dts/qcom/msm8998-svr20.dtsi
new file mode 100644
index 000000000000..a1fce88c8e54
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/msm8998-svr20.dtsi
@@ -0,0 +1,409 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/interrupt-controller/irq.h>
+#include "msm8998-svr20-pinctrl.dtsi"
+#include "msm8998-camera-sensor-svr20.dtsi"
+&vendor {
+ bluetooth: bt_wcn3990 {
+ compatible = "qca,wcn3990";
+ qca,bt-vdd-io-supply = <&pm8998_s3>;
+ qca,bt-vdd-xtal-supply = <&pm8998_s5>;
+ qca,bt-vdd-core-supply = <&pm8998_l7>;
+ qca,bt-vdd-pa-supply = <&pm8998_l17>;
+ qca,bt-vdd-ldo-supply = <&pm8998_l25>;
+ qca,bt-chip-pwd-supply = <&pmi8998_bob_pin1>;
+ clocks = <&clock_gcc clk_rf_clk2_pin>;
+ clock-names = "rf_clk2";
+
+ qca,bt-vdd-io-voltage-level = <1352000 1352000>;
+ qca,bt-vdd-xtal-voltage-level = <2040000 2040000>;
+ qca,bt-vdd-core-voltage-level = <1800000 1800000>;
+ qca,bt-vdd-pa-voltage-level = <1304000 1304000>;
+ qca,bt-vdd-ldo-voltage-level = <3312000 3312000>;
+ qca,bt-chip-pwd-voltage-level = <3600000 3600000>;
+
+ qca,bt-vdd-io-current-level = <1>; /* LPM/PFM */
+ qca,bt-vdd-xtal-current-level = <1>; /* LPM/PFM */
+ qca,bt-vdd-core-current-level = <1>; /* LPM/PFM */
+ qca,bt-vdd-pa-current-level = <1>; /* LPM/PFM */
+ qca,bt-vdd-ldo-current-level = <1>; /* LPM/PFM */
+ };
+ svr20_batterydata: qcom,battery-data {
+ qcom,batt-id-range-pct = <25>;
+ #include "fg-gen3-batterydata-svr-v2-3200mah.dtsi"
+ };
+};
+
+&blue_led {
+ qcom,default-state = "on";
+ linux,default-trigger = "system-running";
+};
+
+&pmi8998_charger {
+ qcom,fcc-max-ua = <5000000>;
+ qcom,usb-icl-ua = <3000000>;
+};
+
+&blsp1_uart3_hs {
+ status = "ok";
+};
+
+&ufsphy1 {
+ vdda-phy-supply = <&pm8998_l1>;
+ vdda-pll-supply = <&pm8998_l2>;
+ vddp-ref-clk-supply = <&pm8998_l26>;
+ vdda-phy-max-microamp = <51400>;
+ vdda-pll-max-microamp = <14600>;
+ vddp-ref-clk-max-microamp = <100>;
+ vddp-ref-clk-always-on;
+ status = "ok";
+};
+
+&ufs1 {
+ vdd-hba-supply = <&gdsc_ufs>;
+ vdd-hba-fixed-regulator;
+ vcc-supply = <&pm8998_l20>;
+ vccq-supply = <&pm8998_l26>;
+ vccq2-supply = <&pm8998_s4>;
+ vcc-max-microamp = <750000>;
+ vccq-max-microamp = <560000>;
+ vccq2-max-microamp = <750000>;
+ status = "ok";
+};
+
+&ufs_ice {
+ status = "ok";
+};
+
+&sdhc_2 {
+ vdd-supply = <&pm8998_l21>;
+ qcom,vdd-voltage-level = <2950000 2960000>;
+ qcom,vdd-current-level = <200 800000>;
+
+ vdd-io-supply = <&pm8998_l13>;
+ qcom,vdd-io-voltage-level = <1808000 2960000>;
+ qcom,vdd-io-current-level = <200 22000>;
+
+ pinctrl-names = "active", "sleep";
+ pinctrl-0 = <&sdc2_clk_on &sdc2_cmd_on &sdc2_data_on &sdc2_cd_on>;
+ pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off &sdc2_cd_off>;
+
+ qcom,clk-rates = <400000 20000000 25000000
+ 50000000 100000000 200000000>;
+ qcom,bus-speed-mode = "SDR12", "SDR25", "SDR50", "DDR50", "SDR104";
+
+ cd-gpios = <&tlmm 95 0x1>;
+
+ status = "ok";
+};
+
+&uartblsp2dm1 {
+ status = "ok";
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart_console_active>;
+};
+
+&pm8998_gpios {
+ /* GPIO 5 for Home Key */
+ gpio@c400 {
+ status = "okay";
+ qcom,mode = <0>;
+ qcom,pull = <0>;
+ qcom,vin-sel = <0>;
+ qcom,src-sel = <0>;
+ qcom,out-strength = <1>;
+ };
+
+ /* GPIO 6 for Vol+ Key */
+ gpio@c500 {
+ status = "okay";
+ qcom,mode = <0>;
+ qcom,pull = <0>;
+ qcom,vin-sel = <0>;
+ qcom,src-sel = <0>;
+ qcom,out-strength = <1>;
+ };
+
+ /* GPIO 7 for Snapshot Key */
+ gpio@c600 {
+ status = "okay";
+ qcom,mode = <0>;
+ qcom,pull = <0>;
+ qcom,vin-sel = <0>;
+ qcom,src-sel = <0>;
+ qcom,out-strength = <1>;
+ };
+
+ /* GPIO 8 for Focus Key */
+ gpio@c700 {
+ status = "okay";
+ qcom,mode = <0>;
+ qcom,pull = <0>;
+ qcom,vin-sel = <0>;
+ qcom,src-sel = <0>;
+ qcom,out-strength = <1>;
+ };
+
+ gpio@cc00 { /* GPIO 13 */
+ qcom,mode = <1>;
+ qcom,output-type = <0>;
+ qcom,pull = <5>;
+ qcom,vin-sel = <0>;
+ qcom,out-strength = <1>;
+ qcom,src-sel = <3>;
+ qcom,master-en = <1>;
+ status = "okay";
+ };
+
+ /* GPIO 21 (NFC_CLK_REQ) */
+ gpio@d400 {
+ qcom,mode = <0>;
+ qcom,vin-sel = <1>;
+ qcom,src-sel = <0>;
+ qcom,master-en = <1>;
+ status = "okay";
+ };
+
+ /* GPIO 18 SMB138X */
+ gpio@d100 {
+ qcom,mode = <0>;
+ qcom,pull = <0>;
+ qcom,vin-sel = <0>;
+ qcom,src-sel = <0>;
+ qcom,master-en = <1>;
+ status = "okay";
+ };
+};
+
+&i2c_5 {
+ status = "okay";
+};
+
+&i2c_6 { /* BLSP1 QUP6 (NFC) */
+ status = "okay";
+ nq@28 {
+ compatible = "qcom,nq-nci";
+ reg = <0x28>;
+ qcom,nq-irq = <&tlmm 92 0x00>;
+ qcom,nq-ven = <&tlmm 12 0x00>;
+ qcom,nq-firm = <&tlmm 93 0x00>;
+ qcom,nq-clkreq = <&pm8998_gpios 21 0x00>;
+ qcom,nq-esepwr = <&tlmm 116 0x00>;
+ interrupt-parent = <&tlmm>;
+ qcom,clk-src = "BBCLK3";
+ interrupts = <92 0>;
+ interrupt-names = "nfc_irq";
+ pinctrl-names = "nfc_active", "nfc_suspend";
+ pinctrl-0 = <&nfc_int_active &nfc_enable_active>;
+ pinctrl-1 = <&nfc_int_suspend &nfc_enable_suspend>;
+ clocks = <&clock_gcc clk_ln_bb_clk3_pin>;
+ clock-names = "ref_clk";
+ };
+};
+
+&mdss_hdmi_tx {
+ status = "disabled";
+ pinctrl-names = "hdmi_hpd_active", "hdmi_ddc_active", "hdmi_cec_active",
+ "hdmi_active", "hdmi_sleep";
+ pinctrl-0 = <&mdss_hdmi_5v_active &mdss_hdmi_hpd_active
+ &mdss_hdmi_ddc_suspend &mdss_hdmi_cec_suspend>;
+ pinctrl-1 = <&mdss_hdmi_5v_active &mdss_hdmi_hpd_active
+ &mdss_hdmi_ddc_active &mdss_hdmi_cec_suspend>;
+ pinctrl-2 = <&mdss_hdmi_5v_active &mdss_hdmi_hpd_active
+ &mdss_hdmi_cec_active &mdss_hdmi_ddc_suspend>;
+ pinctrl-3 = <&mdss_hdmi_5v_active &mdss_hdmi_hpd_active
+ &mdss_hdmi_ddc_active &mdss_hdmi_cec_active>;
+ pinctrl-4 = <&mdss_hdmi_5v_suspend &mdss_hdmi_hpd_suspend
+ &mdss_hdmi_ddc_suspend &mdss_hdmi_cec_suspend>;
+};
+
+&mdss_dp_ctrl {
+ status = "disabled";
+ pinctrl-names = "mdss_dp_active", "mdss_dp_sleep";
+ pinctrl-0 = <&mdss_dp_aux_active &mdss_dp_usbplug_cc_active>;
+ pinctrl-1 = <&mdss_dp_aux_suspend &mdss_dp_usbplug_cc_suspend>;
+ qcom,aux-en-gpio = <&tlmm 77 0>;
+ qcom,aux-sel-gpio = <&tlmm 78 0>;
+ qcom,usbplug-cc-gpio = <&tlmm 38 0>;
+};
+
+&mdss_mdp {
+ qcom,mdss-pref-prim-intf = "dsi";
+};
+
+&mdss_dsi {
+ hw-config = "split_dsi";
+};
+
+&mem_client_3_size {
+ qcom,peripheral-size = <0x500000>;
+};
+
+&pmi8998_haptics {
+ status = "okay";
+};
+
+&pm8998_vadc {
+ chan@83 {
+ label = "vph_pwr";
+ reg = <0x83>;
+ qcom,decimation = <2>;
+ qcom,pre-div-channel-scaling = <1>;
+ qcom,calibration-type = "absolute";
+ qcom,scale-function = <0>;
+ qcom,hw-settle-time = <0>;
+ qcom,fast-avg-setup = <0>;
+ };
+
+ chan@85 {
+ label = "vcoin";
+ reg = <0x85>;
+ qcom,decimation = <2>;
+ qcom,pre-div-channel-scaling = <1>;
+ qcom,calibration-type = "absolute";
+ qcom,scale-function = <0>;
+ qcom,hw-settle-time = <0>;
+ qcom,fast-avg-setup = <0>;
+ };
+
+ chan@4c {
+ label = "xo_therm";
+ reg = <0x4c>;
+ qcom,decimation = <2>;
+ qcom,pre-div-channel-scaling = <0>;
+ qcom,calibration-type = "ratiometric";
+ qcom,scale-function = <4>;
+ qcom,hw-settle-time = <2>;
+ qcom,fast-avg-setup = <0>;
+ };
+
+ chan@4d {
+ label = "msm_therm";
+ reg = <0x4d>;
+ qcom,decimation = <2>;
+ qcom,pre-div-channel-scaling = <0>;
+ qcom,calibration-type = "ratiometric";
+ qcom,scale-function = <2>;
+ qcom,hw-settle-time = <2>;
+ qcom,fast-avg-setup = <0>;
+ };
+
+ chan@51 {
+ label = "quiet_therm";
+ reg = <0x51>;
+ qcom,decimation = <2>;
+ qcom,pre-div-channel-scaling = <0>;
+ qcom,calibration-type = "ratiometric";
+ qcom,scale-function = <2>;
+ qcom,hw-settle-time = <2>;
+ qcom,fast-avg-setup = <0>;
+ };
+};
+
+&pm8998_adc_tm {
+ chan@83 {
+ label = "vph_pwr";
+ reg = <0x83>;
+ qcom,pre-div-channel-scaling = <1>;
+ qcom,calibration-type = "absolute";
+ qcom,scale-function = <0>;
+ qcom,hw-settle-time = <0>;
+ qcom,btm-channel-number = <0x60>;
+ };
+
+ chan@4d {
+ label = "msm_therm";
+ reg = <0x4d>;
+ qcom,pre-div-channel-scaling = <0>;
+ qcom,calibration-type = "ratiometric";
+ qcom,scale-function = <2>;
+ qcom,hw-settle-time = <2>;
+ qcom,btm-channel-number = <0x68>;
+ qcom,thermal-node;
+ };
+
+ chan@51 {
+ label = "quiet_therm";
+ reg = <0x51>;
+ qcom,pre-div-channel-scaling = <0>;
+ qcom,calibration-type = "ratiometric";
+ qcom,scale-function = <2>;
+ qcom,hw-settle-time = <2>;
+ qcom,btm-channel-number = <0x70>;
+ qcom,thermal-node;
+ };
+
+ chan@4c {
+ label = "xo_therm";
+ reg = <0x4c>;
+ qcom,pre-div-channel-scaling = <0>;
+ qcom,calibration-type = "ratiometric";
+ qcom,scale-function = <4>;
+ qcom,hw-settle-time = <2>;
+ qcom,btm-channel-number = <0x78>;
+ qcom,thermal-node;
+ };
+};
+
+&wil6210 {
+ status = "ok";
+};
+
+&soc {
+ gpio_keys {
+ compatible = "gpio-keys";
+ input-name = "gpio-keys";
+ status = "okay";
+
+ home {
+ label = "home";
+ gpios = <&pm8998_gpios 5 0x1>;
+ linux,input-type = <1>;
+ linux,code = <158>;
+ gpio-key,wakeup;
+ debounce-interval = <15>;
+ };
+
+ vol_up {
+ label = "volume_up";
+ gpios = <&pm8998_gpios 6 0x1>;
+ linux,input-type = <1>;
+ linux,code = <115>;
+ gpio-key,wakeup;
+ debounce-interval = <15>;
+ };
+
+ vol_down {
+ label = "volume_down";
+ gpios = <&pm8998_gpios 7 0x1>;
+ linux,input-type = <1>;
+ linux,code = <114>;
+ gpio-key,wakeup;
+ debounce-interval = <15>;
+ };
+
+ confirm {
+ label = "confirm_key";
+ gpios = <&pm8998_gpios 8 0x1>;
+ linux,input-type = <1>;
+ linux,code = <28>;
+ gpio-key,wakeup;
+ debounce-interval = <15>;
+ };
+ };
+};
+
+&pmi8998_fg {
+ qcom,battery-data = <&svr20_batterydata>;
+ qcom,fg-force-load-profile;
+};
diff --git a/arch/arm/boot/dts/qcom/msm8998-v2.dtsi b/arch/arm/boot/dts/qcom/msm8998-v2.dtsi
index b2f30de94bbc..acdd4bdcd95b 100644
--- a/arch/arm/boot/dts/qcom/msm8998-v2.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8998-v2.dtsi
@@ -436,7 +436,7 @@
0x9ac 0x00 0x00
0x8a0 0x01 0x00
0x9e0 0x00 0x00
- 0x9dc 0x01 0x00
+ 0x9dc 0x20 0x00
0x9a8 0x00 0x00
0x8a4 0x01 0x00
0x8a8 0x73 0x00
diff --git a/arch/arm/boot/dts/qcom/msm8998.dtsi b/arch/arm/boot/dts/qcom/msm8998.dtsi
index 3f8962de22be..eafa6b841c17 100644
--- a/arch/arm/boot/dts/qcom/msm8998.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8998.dtsi
@@ -2675,7 +2675,7 @@
0x9ac 0x00 0x00
0x8a0 0x01 0x00
0x9e0 0x00 0x00
- 0x9dc 0x01 0x00
+ 0x9dc 0x20 0x00
0x9a8 0x00 0x00
0x8a4 0x01 0x00
0x8a8 0x73 0x00
diff --git a/arch/arm/boot/dts/qcom/sdm630-camera-sensor-mtp.dtsi b/arch/arm/boot/dts/qcom/sdm630-camera-sensor-mtp.dtsi
index 0275016c9662..32a251f00550 100644
--- a/arch/arm/boot/dts/qcom/sdm630-camera-sensor-mtp.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm630-camera-sensor-mtp.dtsi
@@ -114,6 +114,26 @@
status = "disabled";
};
+ tof0: qcom,tof@29{
+ cell-index = <0>;
+ reg = <0x29>;
+ compatible = "st,stmvl53l0";
+ qcom,cci-master = <0>;
+ cam_laser-supply = <&pm660l_l8>;
+ qcom,cam-vreg-name = "cam_laser";
+ qcom,cam-vreg-min-voltage = <2800000>;
+ qcom,cam-vreg-max-voltage = <3400000>;
+ qcom,cam-vreg-op-mode = <100000>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_tof_active>;
+ pinctrl-1 = <&cam_tof_suspend>;
+ stm,irq-gpio = <&tlmm 45 0x2008>;
+ gpios = <&tlmm 42 0>;
+ qcom,gpio-req-tbl-num = <0>;
+ qcom,gpio-req-tbl-flags = <0>;
+ qcom,gpio-req-tbl-label = "RNG_EN";
+ };
+
eeprom0: qcom,eeprom@0 {
cell-index = <0>;
reg = <0>;
diff --git a/arch/arm/boot/dts/qcom/sdm630-pm.dtsi b/arch/arm/boot/dts/qcom/sdm630-pm.dtsi
index f10477bab8cf..f39f8b880690 100644
--- a/arch/arm/boot/dts/qcom/sdm630-pm.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm630-pm.dtsi
@@ -24,7 +24,7 @@
qcom,vctl-timeout-us = <500>;
qcom,vctl-port = <0x0>;
qcom,phase-port = <0x1>;
- qcom,saw2-avs-ctl = <0x101c031>;
+ qcom,saw2-avs-ctl = <0x1010031>;
qcom,saw2-avs-limit = <0x4580458>;
qcom,pfm-port = <0x2>;
};
@@ -40,7 +40,7 @@
qcom,vctl-timeout-us = <500>;
qcom,vctl-port = <0x0>;
qcom,phase-port = <0x1>;
- qcom,saw2-avs-ctl = <0x101c031>;
+ qcom,saw2-avs-ctl = <0x1010031>;
qcom,saw2-avs-limit = <0x4580458>;
qcom,pfm-port = <0x2>;
};
diff --git a/arch/arm/boot/dts/qcom/sdm630.dtsi b/arch/arm/boot/dts/qcom/sdm630.dtsi
index 6d73c0eff6c8..5618f02e34f2 100644
--- a/arch/arm/boot/dts/qcom/sdm630.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm630.dtsi
@@ -1306,18 +1306,9 @@
compatible = "qcom,clk-cpu-osm-sdm630";
reg = <0x179c0000 0x4000>, <0x17916000 0x1000>,
<0x17816000 0x1000>, <0x179d1000 0x1000>,
- <0x00784130 0x8>, <0x17914800 0x800>,
- <0x17814800 0x800>;
+ <0x00784130 0x8>;
reg-names = "osm", "pwrcl_pll", "perfcl_pll",
- "apcs_common", "perfcl_efuse",
- "pwrcl_acd", "perfcl_acd";
-
- qcom,acdtd-val = <0x0000a111 0x0000a111>;
- qcom,acdcr-val = <0x002c5ffd 0x002c5ffd>;
- qcom,acdsscr-val = <0x00000901 0x00000901>;
- qcom,acdextint0-val = <0x2cf9ae8 0x2cf9ae8>;
- qcom,acdextint1-val = <0x2cf9afe 0x2cf9afe>;
- qcom,acdautoxfer-val = <0x00000015 0x00000015>;
+ "apcs_common", "perfcl_efuse";
vdd-pwrcl-supply = <&apc0_pwrcl_vreg>;
vdd-perfcl-supply = <&apc1_perfcl_vreg>;
@@ -2041,6 +2032,7 @@
qcom,qsee-ce-hw-instance = <0>;
qcom,disk-encrypt-pipe-pair = <2>;
qcom,support-fde;
+ qcom,fde-key-size;
qcom,no-clock-support;
qcom,msm-bus,name = "qseecom-noc";
qcom,msm-bus,num-cases = <4>;
diff --git a/arch/arm/boot/dts/qcom/sdm660-camera-sensor-cdp.dtsi b/arch/arm/boot/dts/qcom/sdm660-camera-sensor-cdp.dtsi
index 46f77e9a3253..476fedec35a4 100644
--- a/arch/arm/boot/dts/qcom/sdm660-camera-sensor-cdp.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm660-camera-sensor-cdp.dtsi
@@ -29,6 +29,16 @@
qcom,switch-source = <&pm660l_switch1>;
status = "ok";
};
+
+ cam_actuator_regulator: cam_actuator_fixed_regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "cam_actuator_regulator";
+ regulator-min-microvolt = <3600000>;
+ regulator-max-microvolt = <3600000>;
+ enable-active-high;
+ gpio = <&tlmm 50 0>;
+ vin-supply = <&pm660l_bob>;
+ };
};
&cci {
@@ -37,14 +47,11 @@
reg = <0x0>;
compatible = "qcom,actuator";
qcom,cci-master = <0>;
- gpios = <&tlmm 50 0>;
- qcom,gpio-vaf = <0>;
- qcom,gpio-req-tbl-num = <0>;
- qcom,gpio-req-tbl-flags = <0>;
- qcom,gpio-req-tbl-label = "CAM_VAF";
- pinctrl-names = "cam_default", "cam_suspend";
- pinctrl-0 = <&cam_actuator_vaf_active>;
- pinctrl-1 = <&cam_actuator_vaf_suspend>;
+ cam_vaf-supply = <&cam_actuator_regulator>;
+ qcom,cam-vreg-name = "cam_vaf";
+ qcom,cam-vreg-min-voltage = <3600000>;
+ qcom,cam-vreg-max-voltage = <3600000>;
+ qcom,cam-vreg-op-mode = <0>;
};
actuator1: qcom,actuator@1 {
@@ -52,14 +59,11 @@
reg = <0x1>;
compatible = "qcom,actuator";
qcom,cci-master = <1>;
- gpios = <&tlmm 50 0>;
- qcom,gpio-vaf = <0>;
- qcom,gpio-req-tbl-num = <0>;
- qcom,gpio-req-tbl-flags = <0>;
- qcom,gpio-req-tbl-label = "CAM_VAF";
- pinctrl-names = "cam_default", "cam_suspend";
- pinctrl-0 = <&cam_actuator_vaf_active>;
- pinctrl-1 = <&cam_actuator_vaf_suspend>;
+ cam_vaf-supply = <&cam_actuator_regulator>;
+ qcom,cam-vreg-name = "cam_vaf";
+ qcom,cam-vreg-min-voltage = <3600000>;
+ qcom,cam-vreg-max-voltage = <3600000>;
+ qcom,cam-vreg-op-mode = <0>;
};
actuator2: qcom,actuator@2 {
@@ -67,14 +71,11 @@
reg = <0x2>;
compatible = "qcom,actuator";
qcom,cci-master = <1>;
- gpios = <&tlmm 50 0>;
- qcom,gpio-vaf = <0>;
- qcom,gpio-req-tbl-num = <0>;
- qcom,gpio-req-tbl-flags = <0>;
- qcom,gpio-req-tbl-label = "CAM_VAF";
- pinctrl-names = "cam_default", "cam_suspend";
- pinctrl-0 = <&cam_actuator_vaf_active>;
- pinctrl-1 = <&cam_actuator_vaf_suspend>;
+ cam_vaf-supply = <&cam_actuator_regulator>;
+ qcom,cam-vreg-name = "cam_vaf";
+ qcom,cam-vreg-min-voltage = <3600000>;
+ qcom,cam-vreg-max-voltage = <3600000>;
+ qcom,cam-vreg-op-mode = <0>;
};
ois0: qcom,ois@0 {
@@ -82,15 +83,31 @@
reg = <0x0>;
compatible = "qcom,ois";
qcom,cci-master = <0>;
- gpios = <&tlmm 50 0>;
- qcom,gpio-vaf = <0>;
+ cam_vaf-supply = <&cam_actuator_regulator>;
+ qcom,cam-vreg-name = "cam_vaf";
+ qcom,cam-vreg-min-voltage = <3600000>;
+ qcom,cam-vreg-max-voltage = <3600000>;
+ qcom,cam-vreg-op-mode = <0>;
+ status = "disabled";
+ };
+
+ tof0: qcom,tof@29{
+ cell-index = <0>;
+ reg = <0x29>;
+ compatible = "st,stmvl53l0";
+ qcom,cci-master = <0>;
+ cam_laser-supply = <&cam_actuator_regulator>;
+ qcom,cam-vreg-name = "cam_laser";
+ qcom,cam-vreg-min-voltage = <3600000>;
+ qcom,cam-vreg-max-voltage = <3600000>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_tof_active>;
+ pinctrl-1 = <&cam_tof_suspend>;
+ stm,irq-gpio = <&tlmm 45 0x2008>;
+ gpios = <&tlmm 42 0>;
qcom,gpio-req-tbl-num = <0>;
qcom,gpio-req-tbl-flags = <0>;
- qcom,gpio-req-tbl-label = "CAM_VAF";
- pinctrl-names = "cam_default", "cam_suspend";
- pinctrl-0 = <&cam_actuator_vaf_active>;
- pinctrl-1 = <&cam_actuator_vaf_suspend>;
- status = "disabled";
+ qcom,gpio-req-tbl-label = "RNG_EN";
};
eeprom0: qcom,eeprom@0 {
diff --git a/arch/arm/boot/dts/qcom/sdm660-camera-sensor-mtp.dtsi b/arch/arm/boot/dts/qcom/sdm660-camera-sensor-mtp.dtsi
index 94166bf8dd3e..11eb288804ff 100644
--- a/arch/arm/boot/dts/qcom/sdm660-camera-sensor-mtp.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm660-camera-sensor-mtp.dtsi
@@ -40,6 +40,16 @@
vin-supply = <&pm660l_bob>;
};
+ cam_actuator_regulator: cam_actuator_fixed_regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "cam_actuator_regulator";
+ regulator-min-microvolt = <3600000>;
+ regulator-max-microvolt = <3600000>;
+ enable-active-high;
+ gpio = <&tlmm 50 0>;
+ vin-supply = <&pm660l_bob>;
+ };
+
cam_dvdd_gpio_regulator: cam_dvdd_fixed_regulator {
compatible = "regulator-fixed";
regulator-name = "cam_dvdd_gpio_regulator";
@@ -67,14 +77,11 @@
reg = <0x0>;
compatible = "qcom,actuator";
qcom,cci-master = <0>;
- gpios = <&tlmm 50 0>;
- qcom,gpio-vaf = <0>;
- qcom,gpio-req-tbl-num = <0>;
- qcom,gpio-req-tbl-flags = <0>;
- qcom,gpio-req-tbl-label = "CAM_VAF";
- pinctrl-names = "cam_default", "cam_suspend";
- pinctrl-0 = <&cam_actuator_vaf_active>;
- pinctrl-1 = <&cam_actuator_vaf_suspend>;
+ cam_vaf-supply = <&cam_actuator_regulator>;
+ qcom,cam-vreg-name = "cam_vaf";
+ qcom,cam-vreg-min-voltage = <3600000>;
+ qcom,cam-vreg-max-voltage = <3600000>;
+ qcom,cam-vreg-op-mode = <0>;
};
actuator1: qcom,actuator@1 {
@@ -82,14 +89,11 @@
reg = <0x1>;
compatible = "qcom,actuator";
qcom,cci-master = <1>;
- gpios = <&tlmm 50 0>;
- qcom,gpio-vaf = <0>;
- qcom,gpio-req-tbl-num = <0>;
- qcom,gpio-req-tbl-flags = <0>;
- qcom,gpio-req-tbl-label = "CAM_VAF";
- pinctrl-names = "cam_default", "cam_suspend";
- pinctrl-0 = <&cam_actuator_vaf_active>;
- pinctrl-1 = <&cam_actuator_vaf_suspend>;
+ cam_vaf-supply = <&cam_actuator_regulator>;
+ qcom,cam-vreg-name = "cam_vaf";
+ qcom,cam-vreg-min-voltage = <3600000>;
+ qcom,cam-vreg-max-voltage = <3600000>;
+ qcom,cam-vreg-op-mode = <0>;
};
actuator2: qcom,actuator@2 {
@@ -97,14 +101,11 @@
reg = <0x2>;
compatible = "qcom,actuator";
qcom,cci-master = <1>;
- gpios = <&tlmm 50 0>;
- qcom,gpio-vaf = <0>;
- qcom,gpio-req-tbl-num = <0>;
- qcom,gpio-req-tbl-flags = <0>;
- qcom,gpio-req-tbl-label = "CAM_VAF";
- pinctrl-names = "cam_default", "cam_suspend";
- pinctrl-0 = <&cam_actuator_vaf_active>;
- pinctrl-1 = <&cam_actuator_vaf_suspend>;
+ cam_vaf-supply = <&cam_actuator_regulator>;
+ qcom,cam-vreg-name = "cam_vaf";
+ qcom,cam-vreg-min-voltage = <3600000>;
+ qcom,cam-vreg-max-voltage = <3600000>;
+ qcom,cam-vreg-op-mode = <0>;
};
ois0: qcom,ois@0 {
@@ -112,15 +113,31 @@
reg = <0x0>;
compatible = "qcom,ois";
qcom,cci-master = <0>;
- gpios = <&tlmm 50 0>;
- qcom,gpio-vaf = <0>;
+ cam_vaf-supply = <&cam_actuator_regulator>;
+ qcom,cam-vreg-name = "cam_vaf";
+ qcom,cam-vreg-min-voltage = <3600000>;
+ qcom,cam-vreg-max-voltage = <3600000>;
+ qcom,cam-vreg-op-mode = <0>;
+ status = "disabled";
+ };
+
+ tof0: qcom,tof@29{
+ cell-index = <0>;
+ reg = <0x29>;
+ compatible = "st,stmvl53l0";
+ qcom,cci-master = <0>;
+ cam_laser-supply = <&cam_actuator_regulator>;
+ qcom,cam-vreg-name = "cam_laser";
+ qcom,cam-vreg-min-voltage = <3600000>;
+ qcom,cam-vreg-max-voltage = <3600000>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_tof_active>;
+ pinctrl-1 = <&cam_tof_suspend>;
+ stm,irq-gpio = <&tlmm 45 0x2008>;
+ gpios = <&tlmm 42 0>;
qcom,gpio-req-tbl-num = <0>;
qcom,gpio-req-tbl-flags = <0>;
- qcom,gpio-req-tbl-label = "CAM_VAF";
- pinctrl-names = "cam_default", "cam_suspend";
- pinctrl-0 = <&cam_actuator_vaf_active>;
- pinctrl-1 = <&cam_actuator_vaf_suspend>;
- status = "disabled";
+ qcom,gpio-req-tbl-label = "RNG_EN";
};
eeprom0: qcom,eeprom@0 {
diff --git a/arch/arm/boot/dts/qcom/sdm660-mtp.dtsi b/arch/arm/boot/dts/qcom/sdm660-mtp.dtsi
index 45b7201fbf71..50f5d83346c6 100644
--- a/arch/arm/boot/dts/qcom/sdm660-mtp.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm660-mtp.dtsi
@@ -56,6 +56,16 @@
qcom,master-en = <1>;
status = "okay";
};
+
+ /* GPIO 11 for Home Key */
+ gpio@ca00 {
+ status = "okay";
+ qcom,mode = <0>;
+ qcom,pull = <0>;
+ qcom,vin-sel = <0>;
+ qcom,src-sel = <0>;
+ qcom,out-strength = <1>;
+ };
};
&i2c_6 { /* BLSP1 QUP6 (NFC) */
diff --git a/arch/arm/boot/dts/qcom/sdm660-pinctrl.dtsi b/arch/arm/boot/dts/qcom/sdm660-pinctrl.dtsi
index d902078b1048..e55a67e68b36 100644
--- a/arch/arm/boot/dts/qcom/sdm660-pinctrl.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm660-pinctrl.dtsi
@@ -1101,6 +1101,34 @@
};
};
+ cam_tof_active: cam_tof_active {
+ /* LASER */
+ mux {
+ pins = "gpio50", "gpio42", "gpio45";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio50", "gpio42", "gpio45";
+ bias-pull-up;
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
+ cam_tof_suspend: cam_tof_suspend {
+ /* LASER */
+ mux {
+ pins = "gpio50", "gpio42", "gpio45";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio50", "gpio42", "gpio45";
+ bias-pull-down; /* PULL DOWN */
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
cam_sensor_mclk0_active: cam_sensor_mclk0_active {
/* MCLK0 */
mux {
diff --git a/arch/arm/boot/dts/qcom/sdm660.dtsi b/arch/arm/boot/dts/qcom/sdm660.dtsi
index f14c9a32c2f9..e00753f8b3e7 100644
--- a/arch/arm/boot/dts/qcom/sdm660.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm660.dtsi
@@ -672,6 +672,16 @@
clock-names = "core", "iface";
};
+ qcom,qbt1000 {
+ compatible = "qcom,qbt1000";
+ clock-names = "core", "iface";
+ clocks = <&clock_gcc GCC_BLSP1_QUP3_SPI_APPS_CLK>,
+ <&clock_gcc GCC_BLSP1_AHB_CLK>;
+ clock-frequency = <15000000>;
+ qcom,ipc-gpio = <&tlmm 72 0>;
+ qcom,finger-detect-gpio = <&pm660_gpios 11 0>;
+ };
+
qcom,sensor-information {
compatible = "qcom,sensor-information";
sensor_information0: qcom,sensor-information-0 {
@@ -1567,6 +1577,7 @@
qcom,msm_fastrpc {
compatible = "qcom,msm-fastrpc-adsp";
qcom,fastrpc-glink;
+ qcom,fastrpc-vmid-heap-shared;
qcom,msm_fastrpc_compute_cb1 {
compatible = "qcom,msm-fastrpc-compute-cb";
diff --git a/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996.dts b/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996.dts
index b108f7d56e57..5c0bbbccafea 100644
--- a/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996.dts
+++ b/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996.dts
@@ -85,6 +85,10 @@
<&afe_proxy_rx>, <&afe_proxy_tx>,
<&incall_record_rx>, <&incall_record_tx>,
<&incall_music_rx>, <&incall_music2_rx>,
+ <&dai_pri_tdm_tx_0>, <&dai_pri_tdm_tx_1>,
+ <&dai_pri_tdm_tx_2>, <&dai_pri_tdm_tx_3>,
+ <&dai_pri_tdm_rx_0>, <&dai_pri_tdm_rx_1>,
+ <&dai_pri_tdm_rx_2>, <&dai_pri_tdm_rx_3>,
<&dai_sec_tdm_tx_0>, <&dai_sec_tdm_tx_1>,
<&dai_sec_tdm_tx_2>, <&dai_sec_tdm_tx_3>,
<&dai_tert_tdm_rx_0>, <&dai_tert_tdm_rx_1>,
@@ -103,6 +107,10 @@
"msm-dai-q6-dev.241", "msm-dai-q6-dev.240",
"msm-dai-q6-dev.32771", "msm-dai-q6-dev.32772",
"msm-dai-q6-dev.32773", "msm-dai-q6-dev.32770",
+ "msm-dai-q6-tdm.36865", "msm-dai-q6-tdm.36867",
+ "msm-dai-q6-tdm.36869", "msm-dai-q6-tdm.36871",
+ "msm-dai-q6-tdm.36864", "msm-dai-q6-tdm.36866",
+ "msm-dai-q6-tdm.36868", "msm-dai-q6-tdm.36870",
"msm-dai-q6-tdm.36881", "msm-dai-q6-tdm.36883",
"msm-dai-q6-tdm.36885", "msm-dai-q6-tdm.36887",
"msm-dai-q6-tdm.36896", "msm-dai-q6-tdm.36898",
@@ -292,6 +300,82 @@
};
};
+ qcom,msm-dai-tdm-pri-rx {
+ compatible = "qcom,msm-dai-tdm";
+ qcom,msm-cpudai-tdm-group-id = <37120>;
+ qcom,msm-cpudai-tdm-group-num-ports = <4>;
+ qcom,msm-cpudai-tdm-group-port-id = <36864 36866 36868 36870>;
+ qcom,msm-cpudai-tdm-clk-rate = <12288000>;
+ qcom,msm-cpudai-tdm-clk-internal = <1>;
+ qcom,msm-cpudai-tdm-sync-mode = <0>;
+ qcom,msm-cpudai-tdm-sync-src = <1>;
+ qcom,msm-cpudai-tdm-data-out = <0>;
+ qcom,msm-cpudai-tdm-invert-sync = <0>;
+ qcom,msm-cpudai-tdm-data-delay = <1>;
+ qcom,msm-cpudai-tdm-clk-attribute = /bits/ 16 <1>;
+ dai_pri_tdm_rx_0: qcom,msm-dai-q6-tdm-pri-rx-0 {
+ compatible = "qcom,msm-dai-q6-tdm";
+ qcom,msm-cpudai-tdm-dev-id = <36864>;
+ qcom,msm-cpudai-tdm-data-align = <0>;
+ };
+
+ dai_pri_tdm_rx_1: qcom,msm-dai-q6-tdm-pri-rx-1 {
+ compatible = "qcom,msm-dai-q6-tdm";
+ qcom,msm-cpudai-tdm-dev-id = <36866>;
+ qcom,msm-cpudai-tdm-data-align = <0>;
+ };
+
+ dai_pri_tdm_rx_2: qcom,msm-dai-q6-tdm-pri-rx-2 {
+ compatible = "qcom,msm-dai-q6-tdm";
+ qcom,msm-cpudai-tdm-dev-id = <36868>;
+ qcom,msm-cpudai-tdm-data-align = <0>;
+ };
+
+ dai_pri_tdm_rx_3: qcom,msm-dai-q6-tdm-pri-rx-3 {
+ compatible = "qcom,msm-dai-q6-tdm";
+ qcom,msm-cpudai-tdm-dev-id = <36870>;
+ qcom,msm-cpudai-tdm-data-align = <0>;
+ };
+ };
+
+ qcom,msm-dai-tdm-pri-tx {
+ compatible = "qcom,msm-dai-tdm";
+ qcom,msm-cpudai-tdm-group-id = <37121>;
+ qcom,msm-cpudai-tdm-group-num-ports = <4>;
+ qcom,msm-cpudai-tdm-group-port-id = <36865 36867 36869 36871>;
+ qcom,msm-cpudai-tdm-clk-rate = <12288000>;
+ qcom,msm-cpudai-tdm-clk-internal = <1>;
+ qcom,msm-cpudai-tdm-sync-mode = <0>;
+ qcom,msm-cpudai-tdm-sync-src = <1>;
+ qcom,msm-cpudai-tdm-data-out = <0>;
+ qcom,msm-cpudai-tdm-invert-sync = <0>;
+ qcom,msm-cpudai-tdm-data-delay = <1>;
+ qcom,msm-cpudai-tdm-clk-attribute = /bits/ 16 <1>;
+ dai_pri_tdm_tx_0: qcom,msm-dai-q6-tdm-pri-tx-0 {
+ compatible = "qcom,msm-dai-q6-tdm";
+ qcom,msm-cpudai-tdm-dev-id = <36865>;
+ qcom,msm-cpudai-tdm-data-align = <0>;
+ };
+
+ dai_pri_tdm_tx_1: qcom,msm-dai-q6-tdm-pri-tx-1 {
+ compatible = "qcom,msm-dai-q6-tdm";
+ qcom,msm-cpudai-tdm-dev-id = <36867>;
+ qcom,msm-cpudai-tdm-data-align = <0>;
+ };
+
+ dai_pri_tdm_tx_2: qcom,msm-dai-q6-tdm-pri-tx-2 {
+ compatible = "qcom,msm-dai-q6-tdm";
+ qcom,msm-cpudai-tdm-dev-id = <36869>;
+ qcom,msm-cpudai-tdm-data-align = <0>;
+ };
+
+ dai_pri_tdm_tx_3: qcom,msm-dai-q6-tdm-pri-tx-3 {
+ compatible = "qcom,msm-dai-q6-tdm";
+ qcom,msm-cpudai-tdm-dev-id = <36871>;
+ qcom,msm-cpudai-tdm-data-align = <0>;
+ };
+ };
+
qcom,msm-dai-tdm-sec-tx {
compatible = "qcom,msm-dai-tdm";
qcom,msm-cpudai-tdm-group-id = <37137>;
diff --git a/arch/arm/configs/s3c2410_defconfig b/arch/arm/configs/s3c2410_defconfig
index f3142369f594..01116ee1284b 100644
--- a/arch/arm/configs/s3c2410_defconfig
+++ b/arch/arm/configs/s3c2410_defconfig
@@ -87,9 +87,9 @@ CONFIG_IPV6_TUNNEL=m
CONFIG_NETFILTER=y
CONFIG_NF_CONNTRACK=m
CONFIG_NF_CONNTRACK_EVENTS=y
-CONFIG_NF_CT_PROTO_DCCP=m
-CONFIG_NF_CT_PROTO_SCTP=m
-CONFIG_NF_CT_PROTO_UDPLITE=m
+CONFIG_NF_CT_PROTO_DCCP=y
+CONFIG_NF_CT_PROTO_SCTP=y
+CONFIG_NF_CT_PROTO_UDPLITE=y
CONFIG_NF_CONNTRACK_AMANDA=m
CONFIG_NF_CONNTRACK_FTP=m
CONFIG_NF_CONNTRACK_H323=m
diff --git a/arch/arm/configs/sdm660-perf_defconfig b/arch/arm/configs/sdm660-perf_defconfig
index 43b6432118f0..878e720a927b 100644
--- a/arch/arm/configs/sdm660-perf_defconfig
+++ b/arch/arm/configs/sdm660-perf_defconfig
@@ -656,6 +656,7 @@ CONFIG_CORESIGHT_QPDI=y
CONFIG_CORESIGHT_SOURCE_DUMMY=y
CONFIG_PFK=y
CONFIG_SECURITY=y
+CONFIG_HARDENED_USERCOPY=y
CONFIG_SECURITY_SELINUX=y
CONFIG_SECURITY_SMACK=y
CONFIG_CRYPTO_ECHAINIV=y
diff --git a/arch/arm/configs/sdm660_defconfig b/arch/arm/configs/sdm660_defconfig
index e3aa35da81ce..524abcf83e77 100644
--- a/arch/arm/configs/sdm660_defconfig
+++ b/arch/arm/configs/sdm660_defconfig
@@ -694,6 +694,7 @@ CONFIG_CORESIGHT_QPDI=y
CONFIG_CORESIGHT_SOURCE_DUMMY=y
CONFIG_PFK=y
CONFIG_SECURITY=y
+CONFIG_HARDENED_USERCOPY=y
CONFIG_SECURITY_SELINUX=y
CONFIG_SECURITY_SMACK=y
CONFIG_CRYPTO_ECHAINIV=y
diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
index d2315ffd8f12..f13ae153fb24 100644
--- a/arch/arm/include/asm/elf.h
+++ b/arch/arm/include/asm/elf.h
@@ -112,12 +112,8 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
#define CORE_DUMP_USE_REGSET
#define ELF_EXEC_PAGESIZE 4096
-/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
- use of this is to invoke "./ld.so someprog" to test out a new version of
- the loader. We need to make sure that it is out of the way of the program
- that it will "exec", and that there is sufficient room for the brk. */
-
-#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
+/* This is the base location for PIE (ET_DYN with INTERP) loads. */
+#define ELF_ET_DYN_BASE 0x400000UL
/* When the program starts, a1 contains a pointer to a function to be
registered with atexit, as per the SVR4 ABI. A value of 0 means we
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-ep108.dts b/arch/arm64/boot/dts/xilinx/zynqmp-ep108.dts
index ce5d848251fa..7b34822d61e9 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-ep108.dts
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-ep108.dts
@@ -26,7 +26,7 @@
stdout-path = "serial0:115200n8";
};
- memory {
+ memory@0 {
device_type = "memory";
reg = <0x0 0x0 0x40000000>;
};
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
index 857eda5c7217..172402cc1a0f 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
+++ b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
@@ -71,7 +71,7 @@
<1 10 0xf01>;
};
- amba_apu {
+ amba_apu: amba_apu@0 {
compatible = "simple-bus";
#address-cells = <2>;
#size-cells = <1>;
@@ -191,7 +191,7 @@
};
i2c0: i2c@ff020000 {
- compatible = "cdns,i2c-r1p10";
+ compatible = "cdns,i2c-r1p14", "cdns,i2c-r1p10";
status = "disabled";
interrupt-parent = <&gic>;
interrupts = <0 17 4>;
@@ -202,7 +202,7 @@
};
i2c1: i2c@ff030000 {
- compatible = "cdns,i2c-r1p10";
+ compatible = "cdns,i2c-r1p14", "cdns,i2c-r1p10";
status = "disabled";
interrupt-parent = <&gic>;
interrupts = <0 18 4>;
diff --git a/arch/arm64/configs/msm-perf_defconfig b/arch/arm64/configs/msm-perf_defconfig
index acde18d2fe31..61418724b897 100644
--- a/arch/arm64/configs/msm-perf_defconfig
+++ b/arch/arm64/configs/msm-perf_defconfig
@@ -34,7 +34,7 @@ CONFIG_EMBEDDED=y
# CONFIG_SLUB_DEBUG is not set
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
-CONFIG_CC_STACKPROTECTOR_REGULAR=y
+CONFIG_CC_STACKPROTECTOR_STRONG=y
CONFIG_ARCH_MMAP_RND_COMPAT_BITS=16
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
diff --git a/arch/arm64/configs/msm_defconfig b/arch/arm64/configs/msm_defconfig
index f510f43427ce..ee2b9fa628ff 100644
--- a/arch/arm64/configs/msm_defconfig
+++ b/arch/arm64/configs/msm_defconfig
@@ -31,7 +31,7 @@ CONFIG_KALLSYMS_ALL=y
CONFIG_EMBEDDED=y
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
-CONFIG_CC_STACKPROTECTOR_REGULAR=y
+CONFIG_CC_STACKPROTECTOR_STRONG=y
CONFIG_ARCH_MMAP_RND_COMPAT_BITS=16
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
diff --git a/arch/arm64/configs/msmcortex-perf_defconfig b/arch/arm64/configs/msmcortex-perf_defconfig
index 894cb466b075..e16fc58ce913 100644
--- a/arch/arm64/configs/msmcortex-perf_defconfig
+++ b/arch/arm64/configs/msmcortex-perf_defconfig
@@ -40,7 +40,7 @@ CONFIG_EMBEDDED=y
# CONFIG_SLUB_DEBUG is not set
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
-CONFIG_CC_STACKPROTECTOR_REGULAR=y
+CONFIG_CC_STACKPROTECTOR_STRONG=y
CONFIG_ARCH_MMAP_RND_COMPAT_BITS=16
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
@@ -295,6 +295,7 @@ CONFIG_WCNSS_MEM_PRE_ALLOC=y
CONFIG_ATH_CARDS=y
CONFIG_WIL6210=m
CONFIG_CLD_LL_CORE=y
+CONFIG_CNSS_GENL=y
CONFIG_INPUT_EVDEV=y
CONFIG_INPUT_KEYRESET=y
CONFIG_KEYBOARD_GPIO=y
@@ -320,6 +321,7 @@ CONFIG_SERIAL_MSM_SMD=y
CONFIG_DIAG_CHAR=y
CONFIG_HW_RANDOM=y
CONFIG_HW_RANDOM_MSM_LEGACY=y
+# CONFIG_DEVPORT is not set
CONFIG_MSM_ADSPRPC=y
CONFIG_MSM_RDBG=m
CONFIG_I2C_CHARDEV=y
diff --git a/arch/arm64/configs/msmcortex_defconfig b/arch/arm64/configs/msmcortex_defconfig
index 993e350ef000..77157bb85ee1 100644
--- a/arch/arm64/configs/msmcortex_defconfig
+++ b/arch/arm64/configs/msmcortex_defconfig
@@ -38,7 +38,7 @@ CONFIG_KALLSYMS_ALL=y
CONFIG_EMBEDDED=y
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
-CONFIG_CC_STACKPROTECTOR_REGULAR=y
+CONFIG_CC_STACKPROTECTOR_STRONG=y
CONFIG_ARCH_MMAP_RND_COMPAT_BITS=16
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
@@ -296,6 +296,7 @@ CONFIG_WCNSS_MEM_PRE_ALLOC=y
CONFIG_ATH_CARDS=y
CONFIG_WIL6210=m
CONFIG_CLD_LL_CORE=y
+CONFIG_CNSS_GENL=y
CONFIG_INPUT_EVDEV=y
CONFIG_INPUT_KEYRESET=y
CONFIG_KEYBOARD_GPIO=y
@@ -324,6 +325,7 @@ CONFIG_SERIAL_MSM_SMD=y
CONFIG_DIAG_CHAR=y
CONFIG_HW_RANDOM=y
CONFIG_HW_RANDOM_MSM_LEGACY=y
+# CONFIG_DEVPORT is not set
CONFIG_MSM_ADSPRPC=y
CONFIG_MSM_RDBG=m
CONFIG_I2C_CHARDEV=y
diff --git a/arch/arm64/configs/sdm660-perf_defconfig b/arch/arm64/configs/sdm660-perf_defconfig
index b6cb1a4b8574..c92551c69bf8 100644
--- a/arch/arm64/configs/sdm660-perf_defconfig
+++ b/arch/arm64/configs/sdm660-perf_defconfig
@@ -38,7 +38,7 @@ CONFIG_EMBEDDED=y
# CONFIG_SLUB_DEBUG is not set
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
-CONFIG_CC_STACKPROTECTOR_REGULAR=y
+CONFIG_CC_STACKPROTECTOR_STRONG=y
CONFIG_ARCH_MMAP_RND_COMPAT_BITS=16
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
@@ -69,6 +69,7 @@ CONFIG_ARMV8_DEPRECATED=y
CONFIG_SWP_EMULATION=y
CONFIG_CP15_BARRIER_EMULATION=y
CONFIG_SETEND_EMULATION=y
+CONFIG_ARM64_SW_TTBR0_PAN=y
CONFIG_RANDOMIZE_BASE=y
# CONFIG_EFI is not set
CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE=y
@@ -299,6 +300,7 @@ CONFIG_INPUT_MISC=y
CONFIG_INPUT_HBTP_INPUT=y
CONFIG_INPUT_QPNP_POWER_ON=y
CONFIG_INPUT_UINPUT=y
+CONFIG_INPUT_STMVL53L0=y
# CONFIG_SERIO_SERPORT is not set
# CONFIG_VT is not set
# CONFIG_LEGACY_PTYS is not set
@@ -574,6 +576,7 @@ CONFIG_MSM_EVENT_TIMER=y
CONFIG_MSM_AVTIMER=y
CONFIG_QCOM_REMOTEQDSS=y
CONFIG_MSM_SERVICE_NOTIFIER=y
+CONFIG_MSM_QBT1000=y
CONFIG_MSM_RPM_RBCPR_STATS_V2_LOG=y
CONFIG_MSM_RPM_LOG=y
CONFIG_MSM_RPM_STATS_LOG=y
@@ -640,6 +643,7 @@ CONFIG_CORESIGHT_QPDI=y
CONFIG_CORESIGHT_SOURCE_DUMMY=y
CONFIG_PFK=y
CONFIG_SECURITY=y
+CONFIG_HARDENED_USERCOPY=y
CONFIG_SECURITY_SELINUX=y
CONFIG_SECURITY_SMACK=y
CONFIG_CRYPTO_ECHAINIV=y
diff --git a/arch/arm64/configs/sdm660_defconfig b/arch/arm64/configs/sdm660_defconfig
index ccef87ff6a04..6b45087e2603 100644
--- a/arch/arm64/configs/sdm660_defconfig
+++ b/arch/arm64/configs/sdm660_defconfig
@@ -40,7 +40,7 @@ CONFIG_KALLSYMS_ALL=y
CONFIG_EMBEDDED=y
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
-CONFIG_CC_STACKPROTECTOR_REGULAR=y
+CONFIG_CC_STACKPROTECTOR_STRONG=y
CONFIG_ARCH_MMAP_RND_COMPAT_BITS=16
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
@@ -73,6 +73,7 @@ CONFIG_ARMV8_DEPRECATED=y
CONFIG_SWP_EMULATION=y
CONFIG_CP15_BARRIER_EMULATION=y
CONFIG_SETEND_EMULATION=y
+CONFIG_ARM64_SW_TTBR0_PAN=y
CONFIG_RANDOMIZE_BASE=y
CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
@@ -308,6 +309,7 @@ CONFIG_INPUT_QPNP_POWER_ON=y
CONFIG_INPUT_KEYCHORD=y
CONFIG_INPUT_UINPUT=y
CONFIG_INPUT_GPIO=y
+CONFIG_INPUT_STMVL53L0=y
# CONFIG_SERIO_SERPORT is not set
# CONFIG_VT is not set
# CONFIG_LEGACY_PTYS is not set
@@ -599,6 +601,7 @@ CONFIG_MSM_EVENT_TIMER=y
CONFIG_MSM_AVTIMER=y
CONFIG_QCOM_REMOTEQDSS=y
CONFIG_MSM_SERVICE_NOTIFIER=y
+CONFIG_MSM_QBT1000=y
CONFIG_MSM_RPM_RBCPR_STATS_V2_LOG=y
CONFIG_MSM_RPM_LOG=y
CONFIG_MSM_RPM_STATS_LOG=y
@@ -713,6 +716,7 @@ CONFIG_CORESIGHT_QPDI=y
CONFIG_CORESIGHT_SOURCE_DUMMY=y
CONFIG_PFK=y
CONFIG_SECURITY=y
+CONFIG_HARDENED_USERCOPY=y
CONFIG_SECURITY_SELINUX=y
CONFIG_SECURITY_SMACK=y
CONFIG_CRYPTO_ECHAINIV=y
diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h
index a383c288ef49..b98332269462 100644
--- a/arch/arm64/include/asm/elf.h
+++ b/arch/arm64/include/asm/elf.h
@@ -114,12 +114,11 @@
#define ELF_EXEC_PAGESIZE PAGE_SIZE
/*
- * This is the location that an ET_DYN program is loaded if exec'ed. Typical
- * use of this is to invoke "./ld.so someprog" to test out a new version of
- * the loader. We need to make sure that it is out of the way of the program
- * that it will "exec", and that there is sufficient room for the brk.
+ * This is the base location for PIE (ET_DYN with INTERP) loads. On
+ * 64-bit, this is raised to 4GB to leave the entire 32-bit address
+ * space open for things that want to use the area for 32-bit pointers.
*/
-#define ELF_ET_DYN_BASE (2 * TASK_SIZE_64 / 3)
+#define ELF_ET_DYN_BASE 0x100000000UL
#ifndef __ASSEMBLY__
@@ -170,7 +169,8 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
#ifdef CONFIG_COMPAT
-#define COMPAT_ELF_ET_DYN_BASE (2 * TASK_SIZE_32 / 3)
+/* PIE load location for compat arm. Must match ARM ELF_ET_DYN_BASE. */
+#define COMPAT_ELF_ET_DYN_BASE 0x000400000UL
/* AArch32 registers. */
#define COMPAT_ELF_NGREG 18
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
index eea245bf546a..3112c2a9d96f 100644
--- a/arch/arm64/include/asm/io.h
+++ b/arch/arm64/include/asm/io.h
@@ -167,9 +167,9 @@ static inline u64 __raw_readq_no_log(const volatile void __iomem *addr)
#define readq_relaxed_no_log(c) ({ u64 __v = le64_to_cpu((__force __le64)__raw_readq_no_log(c)); __v; })
#define writeb_relaxed_no_log(v, c) ((void)__raw_writeb_no_log((v), (c)))
-#define writew_relaxed_no_log(v, c) ((void)__raw_writew_no_log((__force u16)cpu_to_le32(v), (c)))
+#define writew_relaxed_no_log(v, c) ((void)__raw_writew_no_log((__force u16)cpu_to_le16(v), (c)))
#define writel_relaxed_no_log(v, c) ((void)__raw_writel_no_log((__force u32)cpu_to_le32(v), (c)))
-#define writeq_relaxed_no_log(v, c) ((void)__raw_writeq_no_log((__force u64)cpu_to_le32(v), (c)))
+#define writeq_relaxed_no_log(v, c) ((void)__raw_writeq_no_log((__force u64)cpu_to_le64(v), (c)))
/*
* I/O memory access primitives. Reads are ordered relative to any
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 1804aea44faa..2720d47da366 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -92,21 +92,21 @@ void show_pte(struct mm_struct *mm, unsigned long addr)
break;
pud = pud_offset(pgd, addr);
- printk(", *pud=%016llx", pud_val(*pud));
+ pr_cont(", *pud=%016llx", pud_val(*pud));
if (pud_none(*pud) || pud_bad(*pud))
break;
pmd = pmd_offset(pud, addr);
- printk(", *pmd=%016llx", pmd_val(*pmd));
+ pr_cont(", *pmd=%016llx", pmd_val(*pmd));
if (pmd_none(*pmd) || pmd_bad(*pmd))
break;
pte = pte_offset_map(pmd, addr);
- printk(", *pte=%016llx", pte_val(*pte));
+ pr_cont(", *pte=%016llx", pte_val(*pte));
pte_unmap(pte);
} while(0);
- printk("\n");
+ pr_cont("\n");
}
#ifdef CONFIG_ARM64_HW_AFDBM
diff --git a/arch/mips/include/asm/branch.h b/arch/mips/include/asm/branch.h
index de781cf54bc7..da80878f2c0d 100644
--- a/arch/mips/include/asm/branch.h
+++ b/arch/mips/include/asm/branch.h
@@ -74,10 +74,7 @@ static inline int compute_return_epc(struct pt_regs *regs)
return __microMIPS_compute_return_epc(regs);
if (cpu_has_mips16)
return __MIPS16e_compute_return_epc(regs);
- return regs->cp0_epc;
- }
-
- if (!delay_slot(regs)) {
+ } else if (!delay_slot(regs)) {
regs->cp0_epc += 4;
return 0;
}
diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c
index e9fed8ca9b42..71e8f4c0b8da 100644
--- a/arch/mips/kernel/branch.c
+++ b/arch/mips/kernel/branch.c
@@ -399,7 +399,7 @@ int __MIPS16e_compute_return_epc(struct pt_regs *regs)
*
* @regs: Pointer to pt_regs
* @insn: branch instruction to decode
- * @returns: -EFAULT on error and forces SIGBUS, and on success
+ * @returns: -EFAULT on error and forces SIGILL, and on success
* returns 0 or BRANCH_LIKELY_TAKEN as appropriate after
* evaluating the branch.
*
@@ -431,7 +431,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
/* Fall through */
case jr_op:
if (NO_R6EMU && insn.r_format.func == jr_op)
- goto sigill_r6;
+ goto sigill_r2r6;
regs->cp0_epc = regs->regs[insn.r_format.rs];
break;
}
@@ -446,7 +446,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
switch (insn.i_format.rt) {
case bltzl_op:
if (NO_R6EMU)
- goto sigill_r6;
+ goto sigill_r2r6;
case bltz_op:
if ((long)regs->regs[insn.i_format.rs] < 0) {
epc = epc + 4 + (insn.i_format.simmediate << 2);
@@ -459,7 +459,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
case bgezl_op:
if (NO_R6EMU)
- goto sigill_r6;
+ goto sigill_r2r6;
case bgez_op:
if ((long)regs->regs[insn.i_format.rs] >= 0) {
epc = epc + 4 + (insn.i_format.simmediate << 2);
@@ -473,10 +473,8 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
case bltzal_op:
case bltzall_op:
if (NO_R6EMU && (insn.i_format.rs ||
- insn.i_format.rt == bltzall_op)) {
- ret = -SIGILL;
- break;
- }
+ insn.i_format.rt == bltzall_op))
+ goto sigill_r2r6;
regs->regs[31] = epc + 8;
/*
* OK we are here either because we hit a NAL
@@ -507,10 +505,8 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
case bgezal_op:
case bgezall_op:
if (NO_R6EMU && (insn.i_format.rs ||
- insn.i_format.rt == bgezall_op)) {
- ret = -SIGILL;
- break;
- }
+ insn.i_format.rt == bgezall_op))
+ goto sigill_r2r6;
regs->regs[31] = epc + 8;
/*
* OK we are here either because we hit a BAL
@@ -556,6 +552,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
/*
* These are unconditional and in j_format.
*/
+ case jalx_op:
case jal_op:
regs->regs[31] = regs->cp0_epc + 8;
case j_op:
@@ -573,7 +570,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
*/
case beql_op:
if (NO_R6EMU)
- goto sigill_r6;
+ goto sigill_r2r6;
case beq_op:
if (regs->regs[insn.i_format.rs] ==
regs->regs[insn.i_format.rt]) {
@@ -587,7 +584,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
case bnel_op:
if (NO_R6EMU)
- goto sigill_r6;
+ goto sigill_r2r6;
case bne_op:
if (regs->regs[insn.i_format.rs] !=
regs->regs[insn.i_format.rt]) {
@@ -601,7 +598,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
case blezl_op: /* not really i_format */
if (!insn.i_format.rt && NO_R6EMU)
- goto sigill_r6;
+ goto sigill_r2r6;
case blez_op:
/*
* Compact branches for R6 for the
@@ -636,7 +633,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
case bgtzl_op:
if (!insn.i_format.rt && NO_R6EMU)
- goto sigill_r6;
+ goto sigill_r2r6;
case bgtz_op:
/*
* Compact branches for R6 for the
@@ -843,11 +840,12 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
return ret;
sigill_dsp:
- printk("%s: DSP branch but not DSP ASE - sending SIGBUS.\n", current->comm);
- force_sig(SIGBUS, current);
+ pr_info("%s: DSP branch but not DSP ASE - sending SIGILL.\n",
+ current->comm);
+ force_sig(SIGILL, current);
return -EFAULT;
-sigill_r6:
- pr_info("%s: R2 branch but r2-to-r6 emulator is not preset - sending SIGILL.\n",
+sigill_r2r6:
+ pr_info("%s: R2 branch but r2-to-r6 emulator is not present - sending SIGILL.\n",
current->comm);
force_sig(SIGILL, current);
return -EFAULT;
diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c
index 298b2b773d12..f1fab6ff53e6 100644
--- a/arch/mips/kernel/proc.c
+++ b/arch/mips/kernel/proc.c
@@ -83,7 +83,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
}
seq_printf(m, "isa\t\t\t:");
- if (cpu_has_mips_r1)
+ if (cpu_has_mips_1)
seq_printf(m, " mips1");
if (cpu_has_mips_2)
seq_printf(m, "%s", " mips2");
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index c95bf18260f8..24c115a0721a 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -927,7 +927,7 @@ asmlinkage void syscall_trace_leave(struct pt_regs *regs)
audit_syscall_exit(regs);
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
- trace_sys_exit(regs, regs->regs[2]);
+ trace_sys_exit(regs, regs_return_value(regs));
if (test_thread_flag(TIF_SYSCALL_TRACE))
tracehook_report_syscall_exit(regs, 0);
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index 2d23c834ba96..29b0c5f978e4 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -372,7 +372,7 @@ EXPORT(sys_call_table)
PTR sys_writev
PTR sys_cacheflush
PTR sys_cachectl
- PTR sys_sysmips
+ PTR __sys_sysmips
PTR sys_ni_syscall /* 4150 */
PTR sys_getsid
PTR sys_fdatasync
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
index deac63315d0e..a6323a969919 100644
--- a/arch/mips/kernel/scall64-64.S
+++ b/arch/mips/kernel/scall64-64.S
@@ -312,7 +312,7 @@ EXPORT(sys_call_table)
PTR sys_sched_getaffinity
PTR sys_cacheflush
PTR sys_cachectl
- PTR sys_sysmips
+ PTR __sys_sysmips
PTR sys_io_setup /* 5200 */
PTR sys_io_destroy
PTR sys_io_getevents
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index ee93d5fe61d7..e0fdca8d3abe 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -298,7 +298,7 @@ EXPORT(sysn32_call_table)
PTR compat_sys_sched_getaffinity
PTR sys_cacheflush
PTR sys_cachectl
- PTR sys_sysmips
+ PTR __sys_sysmips
PTR compat_sys_io_setup /* 6200 */
PTR sys_io_destroy
PTR compat_sys_io_getevents
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index b77052ec6fb2..87c697181d25 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -367,7 +367,7 @@ EXPORT(sys32_call_table)
PTR compat_sys_writev
PTR sys_cacheflush
PTR sys_cachectl
- PTR sys_sysmips
+ PTR __sys_sysmips
PTR sys_ni_syscall /* 4150 */
PTR sys_getsid
PTR sys_fdatasync
diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
index 53a7ef9a8f32..4234b2d726c5 100644
--- a/arch/mips/kernel/syscall.c
+++ b/arch/mips/kernel/syscall.c
@@ -28,6 +28,7 @@
#include <linux/elf.h>
#include <asm/asm.h>
+#include <asm/asm-eva.h>
#include <asm/branch.h>
#include <asm/cachectl.h>
#include <asm/cacheflush.h>
@@ -138,10 +139,12 @@ static inline int mips_atomic_set(unsigned long addr, unsigned long new)
__asm__ __volatile__ (
" .set "MIPS_ISA_ARCH_LEVEL" \n"
" li %[err], 0 \n"
- "1: ll %[old], (%[addr]) \n"
+ "1: \n"
+ user_ll("%[old]", "(%[addr])")
" move %[tmp], %[new] \n"
- "2: sc %[tmp], (%[addr]) \n"
- " bnez %[tmp], 4f \n"
+ "2: \n"
+ user_sc("%[tmp]", "(%[addr])")
+ " beqz %[tmp], 4f \n"
"3: \n"
" .insn \n"
" .subsection 2 \n"
@@ -199,6 +202,12 @@ static inline int mips_atomic_set(unsigned long addr, unsigned long new)
unreachable();
}
+/*
+ * mips_atomic_set() normally returns directly via syscall_exit potentially
+ * clobbering static registers, so be sure to preserve them.
+ */
+save_static_function(sys_sysmips);
+
SYSCALL_DEFINE3(sysmips, long, cmd, long, arg1, long, arg2)
{
switch (cmd) {
diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
index 734a2c7665ec..6da2e4a6ba39 100644
--- a/arch/mips/math-emu/cp1emu.c
+++ b/arch/mips/math-emu/cp1emu.c
@@ -2496,6 +2496,35 @@ dcopuop:
return 0;
}
+/*
+ * Emulate FPU instructions.
+ *
+ * If we use FPU hardware, then we have been typically called to handle
+ * an unimplemented operation, such as where an operand is a NaN or
+ * denormalized. In that case exit the emulation loop after a single
+ * iteration so as to let hardware execute any subsequent instructions.
+ *
+ * If we have no FPU hardware or it has been disabled, then continue
+ * emulating floating-point instructions until one of these conditions
+ * has occurred:
+ *
+ * - a non-FPU instruction has been encountered,
+ *
+ * - an attempt to emulate has ended with a signal,
+ *
+ * - the ISA mode has been switched.
+ *
+ * We need to terminate the emulation loop if we got switched to the
+ * MIPS16 mode, whether supported or not, so that we do not attempt
+ * to emulate a MIPS16 instruction as a regular MIPS FPU instruction.
+ * Similarly if we got switched to the microMIPS mode and only the
+ * regular MIPS mode is supported, so that we do not attempt to emulate
+ * a microMIPS instruction as a regular MIPS FPU instruction. Or if
+ * we got switched to the regular MIPS mode and only the microMIPS mode
+ * is supported, so that we do not attempt to emulate a regular MIPS
+ * instruction that should cause an Address Error exception instead.
+ * For simplicity we always terminate upon an ISA mode switch.
+ */
int fpu_emulator_cop1Handler(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
int has_fpu, void *__user *fault_addr)
{
@@ -2581,6 +2610,15 @@ int fpu_emulator_cop1Handler(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
break;
if (sig)
break;
+ /*
+ * We have to check for the ISA bit explicitly here,
+ * because `get_isa16_mode' may return 0 if support
+ * for code compression has been globally disabled,
+ * or otherwise we may produce the wrong signal or
+ * even proceed successfully where we must not.
+ */
+ if ((xcp->cp0_epc ^ prevepc) & 0x1)
+ break;
cond_resched();
} while (xcp->cp0_epc > prevepc);
diff --git a/arch/openrisc/kernel/vmlinux.lds.S b/arch/openrisc/kernel/vmlinux.lds.S
index 2d69a853b742..3a08b55609b6 100644
--- a/arch/openrisc/kernel/vmlinux.lds.S
+++ b/arch/openrisc/kernel/vmlinux.lds.S
@@ -38,6 +38,8 @@ SECTIONS
/* Read-only sections, merged into text segment: */
. = LOAD_BASE ;
+ _text = .;
+
/* _s_kernel_ro must be page aligned */
. = ALIGN(PAGE_SIZE);
_s_kernel_ro = .;
diff --git a/arch/parisc/include/asm/dma-mapping.h b/arch/parisc/include/asm/dma-mapping.h
index d8d60a57183f..f53725202955 100644
--- a/arch/parisc/include/asm/dma-mapping.h
+++ b/arch/parisc/include/asm/dma-mapping.h
@@ -39,6 +39,8 @@ struct hppa_dma_ops {
** flush/purge and allocate "regular" cacheable pages for everything.
*/
+#define DMA_ERROR_CODE (~(dma_addr_t)0)
+
#ifdef CONFIG_PA11
extern struct hppa_dma_ops pcxl_dma_ops;
extern struct hppa_dma_ops pcx_dma_ops;
@@ -209,12 +211,13 @@ parisc_walk_tree(struct device *dev)
break;
}
}
- BUG_ON(!dev->platform_data);
return dev->platform_data;
}
-
-#define GET_IOC(dev) (HBA_DATA(parisc_walk_tree(dev))->iommu)
-
+
+#define GET_IOC(dev) ({ \
+ void *__pdata = parisc_walk_tree(dev); \
+ __pdata ? HBA_DATA(__pdata)->iommu : NULL; \
+})
#ifdef CONFIG_IOMMU_CCIO
struct parisc_device;
diff --git a/arch/parisc/include/asm/mmu_context.h b/arch/parisc/include/asm/mmu_context.h
index 59be25764433..a81226257878 100644
--- a/arch/parisc/include/asm/mmu_context.h
+++ b/arch/parisc/include/asm/mmu_context.h
@@ -49,15 +49,26 @@ static inline void load_context(mm_context_t context)
mtctl(__space_to_prot(context), 8);
}
-static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk)
+static inline void switch_mm_irqs_off(struct mm_struct *prev,
+ struct mm_struct *next, struct task_struct *tsk)
{
-
if (prev != next) {
mtctl(__pa(next->pgd), 25);
load_context(next->context);
}
}
+static inline void switch_mm(struct mm_struct *prev,
+ struct mm_struct *next, struct task_struct *tsk)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ switch_mm_irqs_off(prev, next, tsk);
+ local_irq_restore(flags);
+}
+#define switch_mm_irqs_off switch_mm_irqs_off
+
#define deactivate_mm(tsk,mm) do { } while (0)
static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S
index d4ffcfbc9885..041e1f9ec129 100644
--- a/arch/parisc/kernel/syscall_table.S
+++ b/arch/parisc/kernel/syscall_table.S
@@ -361,7 +361,7 @@
ENTRY_SAME(ni_syscall) /* 263: reserved for vserver */
ENTRY_SAME(add_key)
ENTRY_SAME(request_key) /* 265 */
- ENTRY_SAME(keyctl)
+ ENTRY_COMP(keyctl)
ENTRY_SAME(ioprio_set)
ENTRY_SAME(ioprio_get)
ENTRY_SAME(inotify_init)
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
index 16dbe81c97c9..2f33a67bc531 100644
--- a/arch/parisc/mm/fault.c
+++ b/arch/parisc/mm/fault.c
@@ -298,7 +298,7 @@ bad_area:
case 15: /* Data TLB miss fault/Data page fault */
/* send SIGSEGV when outside of vma */
if (!vma ||
- address < vma->vm_start || address > vma->vm_end) {
+ address < vma->vm_start || address >= vma->vm_end) {
si.si_signo = SIGSEGV;
si.si_code = SEGV_MAPERR;
break;
diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
index 55f106ed12bf..039c4b910615 100644
--- a/arch/powerpc/include/asm/atomic.h
+++ b/arch/powerpc/include/asm/atomic.h
@@ -460,7 +460,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
* Atomically increments @v by 1, so long as @v is non-zero.
* Returns non-zero if @v was non-zero, and zero otherwise.
*/
-static __inline__ long atomic64_inc_not_zero(atomic64_t *v)
+static __inline__ int atomic64_inc_not_zero(atomic64_t *v)
{
long t1, t2;
@@ -479,7 +479,7 @@ static __inline__ long atomic64_inc_not_zero(atomic64_t *v)
: "r" (&v->counter)
: "cc", "xer", "memory");
- return t1;
+ return t1 != 0;
}
#endif /* __powerpc64__ */
diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
index ee46ffef608e..743ad7a400d6 100644
--- a/arch/powerpc/include/asm/elf.h
+++ b/arch/powerpc/include/asm/elf.h
@@ -23,12 +23,13 @@
#define CORE_DUMP_USE_REGSET
#define ELF_EXEC_PAGESIZE PAGE_SIZE
-/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
- use of this is to invoke "./ld.so someprog" to test out a new version of
- the loader. We need to make sure that it is out of the way of the program
- that it will "exec", and that there is sufficient room for the brk. */
-
-#define ELF_ET_DYN_BASE 0x20000000
+/*
+ * This is the base location for PIE (ET_DYN with INTERP) loads. On
+ * 64-bit, this is raised to 4GB to leave the entire 32-bit address
+ * space open for things that want to use the area for 32-bit pointers.
+ */
+#define ELF_ET_DYN_BASE (is_32bit_task() ? 0x000400000UL : \
+ 0x100000000UL)
#define ELF_CORE_EFLAGS (is_elf2_task() ? 2 : 0)
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index 627d129d7fcb..ca372bbc0ffe 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -1236,7 +1236,7 @@ static inline unsigned long mfvtb (void)
" .llong 0\n" \
".previous" \
: "=r" (rval) \
- : "i" (CPU_FTR_CELL_TB_BUG), "i" (SPRN_TBRL)); \
+ : "i" (CPU_FTR_CELL_TB_BUG), "i" (SPRN_TBRL) : "cr0"); \
rval;})
#else
#define mftb() ({unsigned long rval; \
diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h
index 329771559cbb..8b3b46b7b0f2 100644
--- a/arch/powerpc/include/asm/topology.h
+++ b/arch/powerpc/include/asm/topology.h
@@ -44,22 +44,8 @@ extern void __init dump_numa_cpu_topology(void);
extern int sysfs_add_device_to_node(struct device *dev, int nid);
extern void sysfs_remove_device_from_node(struct device *dev, int nid);
-static inline int early_cpu_to_node(int cpu)
-{
- int nid;
-
- nid = numa_cpu_lookup_table[cpu];
-
- /*
- * Fall back to node 0 if nid is unset (it should be, except bugs).
- * This allows callers to safely do NODE_DATA(early_cpu_to_node(cpu)).
- */
- return (nid < 0) ? 0 : nid;
-}
#else
-static inline int early_cpu_to_node(int cpu) { return 0; }
-
static inline void dump_numa_cpu_topology(void) {}
static inline int sysfs_add_device_to_node(struct device *dev, int nid)
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index fe6e800c1357..a20823210ac0 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -751,7 +751,7 @@ void __init setup_arch(char **cmdline_p)
static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
{
- return __alloc_bootmem_node(NODE_DATA(early_cpu_to_node(cpu)), size, align,
+ return __alloc_bootmem_node(NODE_DATA(cpu_to_node(cpu)), size, align,
__pa(MAX_DMA_ADDRESS));
}
@@ -762,7 +762,7 @@ static void __init pcpu_fc_free(void *ptr, size_t size)
static int pcpu_cpu_distance(unsigned int from, unsigned int to)
{
- if (early_cpu_to_node(from) == early_cpu_to_node(to))
+ if (cpu_to_node(from) == cpu_to_node(to))
return LOCAL_DISTANCE;
else
return REMOTE_DISTANCE;
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 396dc44e783b..428563b195c3 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -2687,6 +2687,10 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
{
int r;
int srcu_idx;
+ unsigned long ebb_regs[3] = {}; /* shut up GCC */
+ unsigned long user_tar = 0;
+ unsigned long proc_fscr = 0;
+ unsigned int user_vrsave;
if (!vcpu->arch.sane) {
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
@@ -2707,10 +2711,11 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
run->fail_entry.hardware_entry_failure_reason = 0;
return -EINVAL;
}
+ /* Enable TM so we can read the TM SPRs */
+ mtmsr(mfmsr() | MSR_TM);
current->thread.tm_tfhar = mfspr(SPRN_TFHAR);
current->thread.tm_tfiar = mfspr(SPRN_TFIAR);
current->thread.tm_texasr = mfspr(SPRN_TEXASR);
- current->thread.regs->msr &= ~MSR_TM;
}
#endif
@@ -2736,6 +2741,17 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
flush_fp_to_thread(current);
flush_altivec_to_thread(current);
flush_vsx_to_thread(current);
+
+ /* Save userspace EBB and other register values */
+ if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
+ ebb_regs[0] = mfspr(SPRN_EBBHR);
+ ebb_regs[1] = mfspr(SPRN_EBBRR);
+ ebb_regs[2] = mfspr(SPRN_BESCR);
+ user_tar = mfspr(SPRN_TAR);
+ proc_fscr = mfspr(SPRN_FSCR);
+ }
+ user_vrsave = mfspr(SPRN_VRSAVE);
+
vcpu->arch.wqp = &vcpu->arch.vcore->wq;
vcpu->arch.pgdir = current->mm->pgd;
vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
@@ -2757,6 +2773,29 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
}
} while (is_kvmppc_resume_guest(r));
+ /* Restore userspace EBB and other register values */
+ if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
+ mtspr(SPRN_EBBHR, ebb_regs[0]);
+ mtspr(SPRN_EBBRR, ebb_regs[1]);
+ mtspr(SPRN_BESCR, ebb_regs[2]);
+ mtspr(SPRN_TAR, user_tar);
+ mtspr(SPRN_FSCR, proc_fscr);
+ }
+ mtspr(SPRN_VRSAVE, user_vrsave);
+
+ /*
+ * Since we don't do lazy TM reload, we need to reload
+ * the TM registers here.
+ */
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ if (cpu_has_feature(CPU_FTR_TM) && current->thread.regs &&
+ (current->thread.regs->msr & MSR_TM)) {
+ mtspr(SPRN_TFHAR, current->thread.tm_tfhar);
+ mtspr(SPRN_TFIAR, current->thread.tm_tfiar);
+ mtspr(SPRN_TEXASR, current->thread.tm_texasr);
+ }
+#endif
+
out:
vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
atomic_dec(&vcpu->kvm->arch.vcpus_running);
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 1a743f87b37d..ffab9269bfe4 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -36,6 +36,13 @@
#define NAPPING_CEDE 1
#define NAPPING_NOVCPU 2
+/* Stack frame offsets for kvmppc_hv_entry */
+#define SFS 112
+#define STACK_SLOT_TRAP (SFS-4)
+#define STACK_SLOT_CIABR (SFS-16)
+#define STACK_SLOT_DAWR (SFS-24)
+#define STACK_SLOT_DAWRX (SFS-32)
+
/*
* Call kvmppc_hv_entry in real mode.
* Must be called with interrupts hard-disabled.
@@ -274,10 +281,10 @@ kvm_novcpu_exit:
bl kvmhv_accumulate_time
#endif
13: mr r3, r12
- stw r12, 112-4(r1)
+ stw r12, STACK_SLOT_TRAP(r1)
bl kvmhv_commence_exit
nop
- lwz r12, 112-4(r1)
+ lwz r12, STACK_SLOT_TRAP(r1)
b kvmhv_switch_to_host
/*
@@ -489,7 +496,7 @@ kvmppc_hv_entry:
*/
mflr r0
std r0, PPC_LR_STKOFF(r1)
- stdu r1, -112(r1)
+ stdu r1, -SFS(r1)
/* Save R1 in the PACA */
std r1, HSTATE_HOST_R1(r13)
@@ -643,6 +650,16 @@ kvmppc_got_guest:
mtspr SPRN_PURR,r7
mtspr SPRN_SPURR,r8
+ /* Save host values of some registers */
+BEGIN_FTR_SECTION
+ mfspr r5, SPRN_CIABR
+ mfspr r6, SPRN_DAWR
+ mfspr r7, SPRN_DAWRX
+ std r5, STACK_SLOT_CIABR(r1)
+ std r6, STACK_SLOT_DAWR(r1)
+ std r7, STACK_SLOT_DAWRX(r1)
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
+
BEGIN_FTR_SECTION
/* Set partition DABR */
/* Do this before re-enabling PMU to avoid P7 DABR corruption bug */
@@ -1266,8 +1283,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
*/
li r0, 0
mtspr SPRN_IAMR, r0
- mtspr SPRN_CIABR, r0
- mtspr SPRN_DAWRX, r0
+ mtspr SPRN_PSPB, r0
mtspr SPRN_TCSCR, r0
mtspr SPRN_WORT, r0
/* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */
@@ -1283,6 +1299,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
std r6,VCPU_UAMOR(r9)
li r6,0
mtspr SPRN_AMR,r6
+ mtspr SPRN_UAMOR, r6
/* Switch DSCR back to host value */
mfspr r8, SPRN_DSCR
@@ -1424,6 +1441,16 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
slbia
ptesync
+ /* Restore host values of some registers */
+BEGIN_FTR_SECTION
+ ld r5, STACK_SLOT_CIABR(r1)
+ ld r6, STACK_SLOT_DAWR(r1)
+ ld r7, STACK_SLOT_DAWRX(r1)
+ mtspr SPRN_CIABR, r5
+ mtspr SPRN_DAWR, r6
+ mtspr SPRN_DAWRX, r7
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
+
/*
* POWER7/POWER8 guest -> host partition switch code.
* We don't have to lock against tlbies but we do
@@ -1533,8 +1560,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
li r0, KVM_GUEST_MODE_NONE
stb r0, HSTATE_IN_GUEST(r13)
- ld r0, 112+PPC_LR_STKOFF(r1)
- addi r1, r1, 112
+ ld r0, SFS+PPC_LR_STKOFF(r1)
+ addi r1, r1, SFS
mtlr r0
blr
diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c
index 4014881e9843..e37162d356d8 100644
--- a/arch/powerpc/lib/sstep.c
+++ b/arch/powerpc/lib/sstep.c
@@ -687,8 +687,10 @@ int __kprobes analyse_instr(struct instruction_op *op, struct pt_regs *regs,
case 19:
switch ((instr >> 1) & 0x3ff) {
case 0: /* mcrf */
- rd = (instr >> 21) & 0x1c;
- ra = (instr >> 16) & 0x1c;
+ rd = 7 - ((instr >> 23) & 0x7);
+ ra = 7 - ((instr >> 18) & 0x7);
+ rd *= 4;
+ ra *= 4;
val = (regs->ccr >> ra) & 0xf;
regs->ccr = (regs->ccr & ~(0xfUL << rd)) | (val << rd);
goto instr_done;
@@ -967,6 +969,19 @@ int __kprobes analyse_instr(struct instruction_op *op, struct pt_regs *regs,
#endif
case 19: /* mfcr */
+ if ((instr >> 20) & 1) {
+ imm = 0xf0000000UL;
+ for (sh = 0; sh < 8; ++sh) {
+ if (instr & (0x80000 >> sh)) {
+ regs->gpr[rd] = regs->ccr & imm;
+ break;
+ }
+ imm >>= 4;
+ }
+
+ goto instr_done;
+ }
+
regs->gpr[rd] = regs->ccr;
regs->gpr[rd] &= 0xffffffffUL;
goto instr_done;
diff --git a/arch/powerpc/platforms/pseries/reconfig.c b/arch/powerpc/platforms/pseries/reconfig.c
index 7c7fcc042549..fb695f142563 100644
--- a/arch/powerpc/platforms/pseries/reconfig.c
+++ b/arch/powerpc/platforms/pseries/reconfig.c
@@ -82,7 +82,6 @@ static int pSeries_reconfig_remove_node(struct device_node *np)
of_detach_node(np);
of_node_put(parent);
- of_node_put(np); /* Must decrement the refcount */
return 0;
}
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
index bab6739a1154..b9eb7b1a49d2 100644
--- a/arch/s390/include/asm/elf.h
+++ b/arch/s390/include/asm/elf.h
@@ -154,14 +154,13 @@ extern unsigned int vdso_enabled;
#define CORE_DUMP_USE_REGSET
#define ELF_EXEC_PAGESIZE 4096
-/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
- use of this is to invoke "./ld.so someprog" to test out a new version of
- the loader. We need to make sure that it is out of the way of the program
- that it will "exec", and that there is sufficient room for the brk. 64-bit
- tasks are aligned to 4GB. */
-#define ELF_ET_DYN_BASE (is_32bit_task() ? \
- (STACK_TOP / 3 * 2) : \
- (STACK_TOP / 3 * 2) & ~((1UL << 32) - 1))
+/*
+ * This is the base location for PIE (ET_DYN with INTERP) loads. On
+ * 64-bit, this is raised to 4GB to leave the entire 32-bit address
+ * space open for things that want to use the area for 32-bit pointers.
+ */
+#define ELF_ET_DYN_BASE (is_compat_task() ? 0x000400000UL : \
+ 0x100000000UL)
/* This yields a mask that user programs can use to figure out what
instruction set this CPU supports. */
diff --git a/arch/s390/include/asm/syscall.h b/arch/s390/include/asm/syscall.h
index 6ba0bf928909..6bc941be6921 100644
--- a/arch/s390/include/asm/syscall.h
+++ b/arch/s390/include/asm/syscall.h
@@ -64,6 +64,12 @@ static inline void syscall_get_arguments(struct task_struct *task,
{
unsigned long mask = -1UL;
+ /*
+ * No arguments for this syscall, there's nothing to do.
+ */
+ if (!n)
+ return;
+
BUG_ON(i + n > 6);
#ifdef CONFIG_COMPAT
if (test_tsk_thread_flag(task, TIF_31BIT))
diff --git a/arch/x86/crypto/sha1_ssse3_glue.c b/arch/x86/crypto/sha1_ssse3_glue.c
index dd14616b7739..7de207a11014 100644
--- a/arch/x86/crypto/sha1_ssse3_glue.c
+++ b/arch/x86/crypto/sha1_ssse3_glue.c
@@ -201,7 +201,7 @@ asmlinkage void sha1_transform_avx2(u32 *digest, const char *data,
static bool avx2_usable(void)
{
- if (avx_usable() && boot_cpu_has(X86_FEATURE_AVX2)
+ if (false && avx_usable() && boot_cpu_has(X86_FEATURE_AVX2)
&& boot_cpu_has(X86_FEATURE_BMI1)
&& boot_cpu_has(X86_FEATURE_BMI2))
return true;
diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
index d262f985bbc8..07cf288b692e 100644
--- a/arch/x86/include/asm/elf.h
+++ b/arch/x86/include/asm/elf.h
@@ -245,12 +245,13 @@ extern int force_personality32;
#define CORE_DUMP_USE_REGSET
#define ELF_EXEC_PAGESIZE 4096
-/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
- use of this is to invoke "./ld.so someprog" to test out a new version of
- the loader. We need to make sure that it is out of the way of the program
- that it will "exec", and that there is sufficient room for the brk. */
-
-#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
+/*
+ * This is the base location for PIE (ET_DYN with INTERP) loads. On
+ * 64-bit, this is raised to 4GB to leave the entire 32-bit address
+ * space open for things that want to use the area for 32-bit pointers.
+ */
+#define ELF_ET_DYN_BASE (mmap_is_ia32() ? 0x000400000UL : \
+ 0x100000000UL)
/* This yields a mask that user programs can use to figure out what
instruction set this CPU supports. This could be done in user space,
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 690b4027e17c..37db36fddc88 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -405,6 +405,8 @@
#define MSR_IA32_TSC_ADJUST 0x0000003b
#define MSR_IA32_BNDCFGS 0x00000d90
+#define MSR_IA32_BNDCFGS_RSVD 0x00000ffc
+
#define MSR_IA32_XSS 0x00000da0
#define FEATURE_CONTROL_LOCKED (1<<0)
diff --git a/arch/x86/include/asm/pat.h b/arch/x86/include/asm/pat.h
index 0b1ff4c1c14e..fffb2794dd89 100644
--- a/arch/x86/include/asm/pat.h
+++ b/arch/x86/include/asm/pat.h
@@ -7,6 +7,7 @@
bool pat_enabled(void);
void pat_disable(const char *reason);
extern void pat_init(void);
+extern void init_cache_modes(void);
extern int reserve_memtype(u64 start, u64 end,
enum page_cache_mode req_pcm, enum page_cache_mode *ret_pcm);
diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h
index 4c20dd333412..85133b2b8e99 100644
--- a/arch/x86/include/asm/xen/hypercall.h
+++ b/arch/x86/include/asm/xen/hypercall.h
@@ -43,6 +43,7 @@
#include <asm/page.h>
#include <asm/pgtable.h>
+#include <asm/smap.h>
#include <xen/interface/xen.h>
#include <xen/interface/sched.h>
@@ -213,10 +214,12 @@ privcmd_call(unsigned call,
__HYPERCALL_DECLS;
__HYPERCALL_5ARG(a1, a2, a3, a4, a5);
+ stac();
asm volatile("call *%[call]"
: __HYPERCALL_5PARAM
: [call] "a" (&hypercall_page[call])
: __HYPERCALL_CLOBBER5);
+ clac();
return (long)__res;
}
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index e75907601a41..1e5eb9f2ff5f 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -329,6 +329,14 @@ static void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger,
struct mpc_intsrc mp_irq;
/*
+ * Check bus_irq boundary.
+ */
+ if (bus_irq >= NR_IRQS_LEGACY) {
+ pr_warn("Invalid bus_irq %u for legacy override\n", bus_irq);
+ return;
+ }
+
+ /*
* Convert 'gsi' to 'ioapic.pin'.
*/
ioapic = mp_find_ioapic(gsi);
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 1e5d2f07416b..fc91c98bee01 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -2115,7 +2115,7 @@ static inline void __init check_timer(void)
int idx;
idx = find_irq_entry(apic1, pin1, mp_INT);
if (idx != -1 && irq_trigger(idx))
- unmask_ioapic_irq(irq_get_chip_data(0));
+ unmask_ioapic_irq(irq_get_irq_data(0));
}
irq_domain_deactivate_irq(irq_data);
irq_domain_activate_irq(irq_data);
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
index 62aca448726a..2116176c1721 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
@@ -682,6 +682,9 @@ static int threshold_create_bank(unsigned int cpu, unsigned int bank)
const char *name = th_names[bank];
int err = 0;
+ if (!dev)
+ return -ENODEV;
+
if (is_shared_bank(bank)) {
nb = node_to_amd_nb(amd_get_nb_id(cpu));
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index d2bbe343fda7..e67b834279b2 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -1048,6 +1048,13 @@ void __init setup_arch(char **cmdline_p)
if (mtrr_trim_uncached_memory(max_pfn))
max_pfn = e820_end_of_ram_pfn();
+ /*
+ * This call is required when the CPU does not support PAT. If
+ * mtrr_bp_init() invoked it already via pat_init() the call has no
+ * effect.
+ */
+ init_cache_modes();
+
#ifdef CONFIG_X86_32
/* max_low_pfn get updated here */
find_low_pfn_range();
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 9357b29de9bc..83d6369c45f5 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -46,11 +46,18 @@ static u32 xstate_required_size(u64 xstate_bv, bool compacted)
return ret;
}
+bool kvm_mpx_supported(void)
+{
+ return ((host_xcr0 & (XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR))
+ && kvm_x86_ops->mpx_supported());
+}
+EXPORT_SYMBOL_GPL(kvm_mpx_supported);
+
u64 kvm_supported_xcr0(void)
{
u64 xcr0 = KVM_SUPPORTED_XCR0 & host_xcr0;
- if (!kvm_x86_ops->mpx_supported())
+ if (!kvm_mpx_supported())
xcr0 &= ~(XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR);
return xcr0;
@@ -97,7 +104,7 @@ int kvm_update_cpuid(struct kvm_vcpu *vcpu)
if (best && (best->eax & (F(XSAVES) | F(XSAVEC))))
best->ebx = xstate_required_size(vcpu->arch.xcr0, true);
- vcpu->arch.eager_fpu = use_eager_fpu() || guest_cpuid_has_mpx(vcpu);
+ vcpu->arch.eager_fpu = use_eager_fpu();
if (vcpu->arch.eager_fpu)
kvm_x86_ops->fpu_activate(vcpu);
@@ -295,7 +302,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
#endif
unsigned f_rdtscp = kvm_x86_ops->rdtscp_supported() ? F(RDTSCP) : 0;
unsigned f_invpcid = kvm_x86_ops->invpcid_supported() ? F(INVPCID) : 0;
- unsigned f_mpx = kvm_x86_ops->mpx_supported() ? F(MPX) : 0;
+ unsigned f_mpx = kvm_mpx_supported() ? F(MPX) : 0;
unsigned f_xsaves = kvm_x86_ops->xsaves_supported() ? F(XSAVES) : 0;
/* cpuid 1.edx */
diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
index 3f5c48ddba45..d1534feefcfe 100644
--- a/arch/x86/kvm/cpuid.h
+++ b/arch/x86/kvm/cpuid.h
@@ -4,6 +4,7 @@
#include "x86.h"
int kvm_update_cpuid(struct kvm_vcpu *vcpu);
+bool kvm_mpx_supported(void);
struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
u32 function, u32 index);
int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid,
@@ -134,20 +135,20 @@ static inline bool guest_cpuid_has_rtm(struct kvm_vcpu *vcpu)
return best && (best->ebx & bit(X86_FEATURE_RTM));
}
-static inline bool guest_cpuid_has_mpx(struct kvm_vcpu *vcpu)
+static inline bool guest_cpuid_has_pcommit(struct kvm_vcpu *vcpu)
{
struct kvm_cpuid_entry2 *best;
best = kvm_find_cpuid_entry(vcpu, 7, 0);
- return best && (best->ebx & bit(X86_FEATURE_MPX));
+ return best && (best->ebx & bit(X86_FEATURE_PCOMMIT));
}
-static inline bool guest_cpuid_has_pcommit(struct kvm_vcpu *vcpu)
+static inline bool guest_cpuid_has_mpx(struct kvm_vcpu *vcpu)
{
struct kvm_cpuid_entry2 *best;
best = kvm_find_cpuid_entry(vcpu, 7, 0);
- return best && (best->ebx & bit(X86_FEATURE_PCOMMIT));
+ return best && (best->ebx & bit(X86_FEATURE_MPX));
}
static inline bool guest_cpuid_has_rdtscp(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index bbaa11f4e74b..b12391119ce8 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -863,7 +863,6 @@ static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu);
static u64 construct_eptp(unsigned long root_hpa);
static void kvm_cpu_vmxon(u64 addr);
static void kvm_cpu_vmxoff(void);
-static bool vmx_mpx_supported(void);
static bool vmx_xsaves_supported(void);
static int vmx_cpu_uses_apicv(struct kvm_vcpu *vcpu);
static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr);
@@ -2541,7 +2540,7 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
VM_EXIT_LOAD_IA32_EFER | VM_EXIT_SAVE_IA32_EFER |
VM_EXIT_SAVE_VMX_PREEMPTION_TIMER | VM_EXIT_ACK_INTR_ON_EXIT;
- if (vmx_mpx_supported())
+ if (kvm_mpx_supported())
vmx->nested.nested_vmx_exit_ctls_high |= VM_EXIT_CLEAR_BNDCFGS;
/* We support free control of debug control saving. */
@@ -2562,7 +2561,7 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
VM_ENTRY_LOAD_IA32_PAT;
vmx->nested.nested_vmx_entry_ctls_high |=
(VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | VM_ENTRY_LOAD_IA32_EFER);
- if (vmx_mpx_supported())
+ if (kvm_mpx_supported())
vmx->nested.nested_vmx_entry_ctls_high |= VM_ENTRY_LOAD_BNDCFGS;
/* We support free control of debug control loading. */
@@ -2813,7 +2812,8 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
msr_info->data = vmcs_readl(GUEST_SYSENTER_ESP);
break;
case MSR_IA32_BNDCFGS:
- if (!vmx_mpx_supported())
+ if (!kvm_mpx_supported() ||
+ (!msr_info->host_initiated && !guest_cpuid_has_mpx(vcpu)))
return 1;
msr_info->data = vmcs_read64(GUEST_BNDCFGS);
break;
@@ -2890,7 +2890,11 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
vmcs_writel(GUEST_SYSENTER_ESP, data);
break;
case MSR_IA32_BNDCFGS:
- if (!vmx_mpx_supported())
+ if (!kvm_mpx_supported() ||
+ (!msr_info->host_initiated && !guest_cpuid_has_mpx(vcpu)))
+ return 1;
+ if (is_noncanonical_address(data & PAGE_MASK) ||
+ (data & MSR_IA32_BNDCFGS_RSVD))
return 1;
vmcs_write64(GUEST_BNDCFGS, data);
break;
@@ -3363,7 +3367,7 @@ static void init_vmcs_shadow_fields(void)
for (i = j = 0; i < max_shadow_read_write_fields; i++) {
switch (shadow_read_write_fields[i]) {
case GUEST_BNDCFGS:
- if (!vmx_mpx_supported())
+ if (!kvm_mpx_supported())
continue;
break;
default:
@@ -6253,7 +6257,6 @@ static __init int hardware_setup(void)
vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_CS, false);
vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_ESP, false);
vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false);
- vmx_disable_intercept_for_msr(MSR_IA32_BNDCFGS, true);
memcpy(vmx_msr_bitmap_legacy_x2apic,
vmx_msr_bitmap_legacy, PAGE_SIZE);
@@ -10265,7 +10268,7 @@ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
vmcs12->guest_sysenter_cs = vmcs_read32(GUEST_SYSENTER_CS);
vmcs12->guest_sysenter_esp = vmcs_readl(GUEST_SYSENTER_ESP);
vmcs12->guest_sysenter_eip = vmcs_readl(GUEST_SYSENTER_EIP);
- if (vmx_mpx_supported())
+ if (kvm_mpx_supported())
vmcs12->guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS);
if (nested_cpu_has_xsaves(vmcs12))
vmcs12->xss_exit_bitmap = vmcs_read64(XSS_EXIT_BITMAP);
diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
index 27f89c79a44b..423644c230e7 100644
--- a/arch/x86/lib/copy_user_64.S
+++ b/arch/x86/lib/copy_user_64.S
@@ -80,7 +80,7 @@ ENTRY(copy_user_generic_unrolled)
movl %edx,%ecx
andl $63,%edx
shrl $6,%ecx
- jz 17f
+ jz .L_copy_short_string
1: movq (%rsi),%r8
2: movq 1*8(%rsi),%r9
3: movq 2*8(%rsi),%r10
@@ -101,7 +101,8 @@ ENTRY(copy_user_generic_unrolled)
leaq 64(%rdi),%rdi
decl %ecx
jnz 1b
-17: movl %edx,%ecx
+.L_copy_short_string:
+ movl %edx,%ecx
andl $7,%edx
shrl $3,%ecx
jz 20f
@@ -215,6 +216,8 @@ ENDPROC(copy_user_generic_string)
*/
ENTRY(copy_user_enhanced_fast_string)
ASM_STAC
+ cmpl $64,%edx
+ jb .L_copy_short_string /* less then 64 bytes, avoid the costly 'rep' */
movl %edx,%ecx
1: rep
movsb
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 6ad687d104ca..3f1bb4f93a5a 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -36,14 +36,14 @@
#undef pr_fmt
#define pr_fmt(fmt) "" fmt
-static bool boot_cpu_done;
-
-static int __read_mostly __pat_enabled = IS_ENABLED(CONFIG_X86_PAT);
-static void init_cache_modes(void);
+static bool __read_mostly boot_cpu_done;
+static bool __read_mostly pat_disabled = !IS_ENABLED(CONFIG_X86_PAT);
+static bool __read_mostly pat_initialized;
+static bool __read_mostly init_cm_done;
void pat_disable(const char *reason)
{
- if (!__pat_enabled)
+ if (pat_disabled)
return;
if (boot_cpu_done) {
@@ -51,10 +51,8 @@ void pat_disable(const char *reason)
return;
}
- __pat_enabled = 0;
+ pat_disabled = true;
pr_info("x86/PAT: %s\n", reason);
-
- init_cache_modes();
}
static int __init nopat(char *str)
@@ -66,7 +64,7 @@ early_param("nopat", nopat);
bool pat_enabled(void)
{
- return !!__pat_enabled;
+ return pat_initialized;
}
EXPORT_SYMBOL_GPL(pat_enabled);
@@ -204,6 +202,8 @@ static void __init_cache_modes(u64 pat)
update_cache_mode_entry(i, cache);
}
pr_info("x86/PAT: Configuration [0-7]: %s\n", pat_msg);
+
+ init_cm_done = true;
}
#define PAT(x, y) ((u64)PAT_ ## y << ((x)*8))
@@ -224,6 +224,7 @@ static void pat_bsp_init(u64 pat)
}
wrmsrl(MSR_IA32_CR_PAT, pat);
+ pat_initialized = true;
__init_cache_modes(pat);
}
@@ -241,10 +242,9 @@ static void pat_ap_init(u64 pat)
wrmsrl(MSR_IA32_CR_PAT, pat);
}
-static void init_cache_modes(void)
+void init_cache_modes(void)
{
u64 pat = 0;
- static int init_cm_done;
if (init_cm_done)
return;
@@ -286,8 +286,6 @@ static void init_cache_modes(void)
}
__init_cache_modes(pat);
-
- init_cm_done = 1;
}
/**
@@ -305,10 +303,8 @@ void pat_init(void)
u64 pat;
struct cpuinfo_x86 *c = &boot_cpu_data;
- if (!pat_enabled()) {
- init_cache_modes();
+ if (pat_disabled)
return;
- }
if ((c->x86_vendor == X86_VENDOR_INTEL) &&
(((c->x86 == 0x6) && (c->x86_model <= 0xd)) ||
diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
index 0c2fae8d929d..73eb7fd4aec4 100644
--- a/arch/x86/tools/relocs.c
+++ b/arch/x86/tools/relocs.c
@@ -992,11 +992,12 @@ static void emit_relocs(int as_text, int use_real_mode)
die("Segment relocations found but --realmode not specified\n");
/* Order the relocations for more efficient processing */
- sort_relocs(&relocs16);
sort_relocs(&relocs32);
#if ELF_BITS == 64
sort_relocs(&relocs32neg);
sort_relocs(&relocs64);
+#else
+ sort_relocs(&relocs16);
#endif
/* Print the relocations */
diff --git a/crypto/authencesn.c b/crypto/authencesn.c
index 0c0468869e25..52154ef21b5e 100644
--- a/crypto/authencesn.c
+++ b/crypto/authencesn.c
@@ -245,6 +245,9 @@ static int crypto_authenc_esn_decrypt_tail(struct aead_request *req,
u8 *ihash = ohash + crypto_ahash_digestsize(auth);
u32 tmp[2];
+ if (!authsize)
+ goto decrypt;
+
/* Move high-order bits of sequence number back. */
scatterwalk_map_and_copy(tmp, dst, 4, 4, 0);
scatterwalk_map_and_copy(tmp + 1, dst, assoclen + cryptlen, 4, 0);
@@ -253,6 +256,8 @@ static int crypto_authenc_esn_decrypt_tail(struct aead_request *req,
if (crypto_memneq(ihash, ohash, authsize))
return -EBADMSG;
+decrypt:
+
sg_init_table(areq_ctx->dst, 2);
dst = scatterwalk_ffwd(areq_ctx->dst, dst, assoclen);
diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c
index 5ea5dc219f56..73c9c7fa9001 100644
--- a/drivers/acpi/glue.c
+++ b/drivers/acpi/glue.c
@@ -98,7 +98,15 @@ static int find_child_checks(struct acpi_device *adev, bool check_children)
if (check_children && list_empty(&adev->children))
return -ENODEV;
- return sta_present ? FIND_CHILD_MAX_SCORE : FIND_CHILD_MIN_SCORE;
+ /*
+ * If the device has a _HID (or _CID) returning a valid ACPI/PNP
+ * device ID, it is better to make it look less attractive here, so that
+ * the other device with the same _ADR value (that may not have a valid
+ * device ID) can be matched going forward. [This means a second spec
+ * violation in a row, so whatever we do here is best effort anyway.]
+ */
+ return sta_present && list_empty(&adev->pnp.ids) ?
+ FIND_CHILD_MAX_SCORE : FIND_CHILD_MIN_SCORE;
}
struct acpi_device *acpi_find_child_device(struct acpi_device *parent,
diff --git a/drivers/android/Makefile b/drivers/android/Makefile
index 3b7e4b072c58..4b7c726bb560 100644
--- a/drivers/android/Makefile
+++ b/drivers/android/Makefile
@@ -1,3 +1,3 @@
ccflags-y += -I$(src) # needed for trace events
-obj-$(CONFIG_ANDROID_BINDER_IPC) += binder.o
+obj-$(CONFIG_ANDROID_BINDER_IPC) += binder.o binder_alloc.o
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 0c3cf182e351..71ebe36577c6 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -15,17 +15,49 @@
*
*/
+/*
+ * Locking overview
+ *
+ * There are 3 main spinlocks which must be acquired in the
+ * order shown:
+ *
+ * 1) proc->outer_lock : protects binder_ref
+ * binder_proc_lock() and binder_proc_unlock() are
+ * used to acq/rel.
+ * 2) node->lock : protects most fields of binder_node.
+ * binder_node_lock() and binder_node_unlock() are
+ * used to acq/rel
+ * 3) proc->inner_lock : protects the thread and node lists
+ * (proc->threads, proc->waiting_threads, proc->nodes)
+ * and all todo lists associated with the binder_proc
+ * (proc->todo, thread->todo, proc->delivered_death and
+ * node->async_todo), as well as thread->transaction_stack
+ * binder_inner_proc_lock() and binder_inner_proc_unlock()
+ * are used to acq/rel
+ *
+ * Any lock under procA must never be nested under any lock at the same
+ * level or below on procB.
+ *
+ * Functions that require a lock held on entry indicate which lock
+ * in the suffix of the function name:
+ *
+ * foo_olocked() : requires node->outer_lock
+ * foo_nlocked() : requires node->lock
+ * foo_ilocked() : requires proc->inner_lock
+ * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
+ * foo_nilocked(): requires node->lock and proc->inner_lock
+ * ...
+ */
+
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <asm/cacheflush.h>
-#include <linux/atomic.h>
#include <linux/fdtable.h>
#include <linux/file.h>
#include <linux/freezer.h>
#include <linux/fs.h>
#include <linux/list.h>
#include <linux/miscdevice.h>
-#include <linux/mm.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/nsproxy.h>
@@ -35,23 +67,32 @@
#include <linux/sched.h>
#include <linux/seq_file.h>
#include <linux/uaccess.h>
-#include <linux/vmalloc.h>
-#include <linux/slab.h>
#include <linux/pid_namespace.h>
#include <linux/security.h>
+#include <linux/spinlock.h>
#ifdef CONFIG_ANDROID_BINDER_IPC_32BIT
#define BINDER_IPC_32BIT 1
#endif
#include <uapi/linux/android/binder.h>
+#include "binder_alloc.h"
#include "binder_trace.h"
+static HLIST_HEAD(binder_deferred_list);
+static DEFINE_MUTEX(binder_deferred_lock);
+
static HLIST_HEAD(binder_devices);
+static HLIST_HEAD(binder_procs);
+static DEFINE_MUTEX(binder_procs_lock);
+
+static HLIST_HEAD(binder_dead_nodes);
+static DEFINE_SPINLOCK(binder_dead_nodes_lock);
static struct dentry *binder_debugfs_dir_entry_root;
static struct dentry *binder_debugfs_dir_entry_proc;
-atomic_t binder_last_id;
+static atomic_t binder_last_id;
+static struct workqueue_struct *binder_deferred_workqueue;
#define BINDER_DEBUG_ENTRY(name) \
static int binder_##name##_open(struct inode *inode, struct file *file) \
@@ -97,17 +138,13 @@ enum {
BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
BINDER_DEBUG_FREE_BUFFER = 1U << 11,
BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
- BINDER_DEBUG_BUFFER_ALLOC = 1U << 13,
- BINDER_DEBUG_PRIORITY_CAP = 1U << 14,
- BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 15,
+ BINDER_DEBUG_PRIORITY_CAP = 1U << 13,
+ BINDER_DEBUG_SPINLOCKS = 1U << 14,
};
static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO);
-static bool binder_debug_no_lock;
-module_param_named(proc_no_lock, binder_debug_no_lock, bool, S_IWUSR | S_IRUGO);
-
static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
module_param_named(devices, binder_devices_param, charp, S_IRUGO);
@@ -164,30 +201,27 @@ enum binder_stat_types {
};
struct binder_stats {
- int br[_IOC_NR(BR_FAILED_REPLY) + 1];
- int bc[_IOC_NR(BC_REPLY_SG) + 1];
-};
-
-/* These are still global, since it's not always easy to get the context */
-struct binder_obj_stats {
+ atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1];
+ atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1];
atomic_t obj_created[BINDER_STAT_COUNT];
atomic_t obj_deleted[BINDER_STAT_COUNT];
};
-static struct binder_obj_stats binder_obj_stats;
+static struct binder_stats binder_stats;
static inline void binder_stats_deleted(enum binder_stat_types type)
{
- atomic_inc(&binder_obj_stats.obj_deleted[type]);
+ atomic_inc(&binder_stats.obj_deleted[type]);
}
static inline void binder_stats_created(enum binder_stat_types type)
{
- atomic_inc(&binder_obj_stats.obj_created[type]);
+ atomic_inc(&binder_stats.obj_created[type]);
}
struct binder_transaction_log_entry {
int debug_id;
+ int debug_id_done;
int call_type;
int from_proc;
int from_thread;
@@ -197,48 +231,45 @@ struct binder_transaction_log_entry {
int to_node;
int data_size;
int offsets_size;
+ int return_error_line;
+ uint32_t return_error;
+ uint32_t return_error_param;
const char *context_name;
};
struct binder_transaction_log {
- int next;
- int full;
+ atomic_t cur;
+ bool full;
struct binder_transaction_log_entry entry[32];
};
+static struct binder_transaction_log binder_transaction_log;
+static struct binder_transaction_log binder_transaction_log_failed;
static struct binder_transaction_log_entry *binder_transaction_log_add(
struct binder_transaction_log *log)
{
struct binder_transaction_log_entry *e;
+ unsigned int cur = atomic_inc_return(&log->cur);
- e = &log->entry[log->next];
- memset(e, 0, sizeof(*e));
- log->next++;
- if (log->next == ARRAY_SIZE(log->entry)) {
- log->next = 0;
+ if (cur >= ARRAY_SIZE(log->entry))
log->full = 1;
- }
+ e = &log->entry[cur % ARRAY_SIZE(log->entry)];
+ WRITE_ONCE(e->debug_id_done, 0);
+ /*
+ * write-barrier to synchronize access to e->debug_id_done.
+ * We make sure the initialized 0 value is seen before
+ * memset() other fields are zeroed by memset.
+ */
+ smp_wmb();
+ memset(e, 0, sizeof(*e));
return e;
}
struct binder_context {
struct binder_node *binder_context_mgr_node;
+ struct mutex context_mgr_node_lock;
+
kuid_t binder_context_mgr_uid;
const char *name;
-
- struct mutex binder_main_lock;
- struct mutex binder_deferred_lock;
- struct mutex binder_mmap_lock;
-
- struct hlist_head binder_procs;
- struct hlist_head binder_dead_nodes;
- struct hlist_head binder_deferred_list;
-
- struct work_struct deferred_work;
- struct workqueue_struct *binder_deferred_workqueue;
- struct binder_transaction_log transaction_log;
- struct binder_transaction_log transaction_log_failed;
-
- struct binder_stats binder_stats;
};
struct binder_device {
@@ -247,11 +278,20 @@ struct binder_device {
struct binder_context context;
};
+/**
+ * struct binder_work - work enqueued on a worklist
+ * @entry: node enqueued on list
+ * @type: type of work to be performed
+ *
+ * There are separate work lists for proc, thread, and node (async).
+ */
struct binder_work {
struct list_head entry;
+
enum {
BINDER_WORK_TRANSACTION = 1,
BINDER_WORK_TRANSACTION_COMPLETE,
+ BINDER_WORK_RETURN_ERROR,
BINDER_WORK_NODE,
BINDER_WORK_DEAD_BINDER,
BINDER_WORK_DEAD_BINDER_AND_CLEAR,
@@ -259,8 +299,76 @@ struct binder_work {
} type;
};
+struct binder_error {
+ struct binder_work work;
+ uint32_t cmd;
+};
+
+/**
+ * struct binder_node - binder node bookkeeping
+ * @debug_id: unique ID for debugging
+ * (invariant after initialized)
+ * @lock: lock for node fields
+ * @work: worklist element for node work
+ * (protected by @proc->inner_lock)
+ * @rb_node: element for proc->nodes tree
+ * (protected by @proc->inner_lock)
+ * @dead_node: element for binder_dead_nodes list
+ * (protected by binder_dead_nodes_lock)
+ * @proc: binder_proc that owns this node
+ * (invariant after initialized)
+ * @refs: list of references on this node
+ * (protected by @lock)
+ * @internal_strong_refs: used to take strong references when
+ * initiating a transaction
+ * (protected by @proc->inner_lock if @proc
+ * and by @lock)
+ * @local_weak_refs: weak user refs from local process
+ * (protected by @proc->inner_lock if @proc
+ * and by @lock)
+ * @local_strong_refs: strong user refs from local process
+ * (protected by @proc->inner_lock if @proc
+ * and by @lock)
+ * @tmp_refs: temporary kernel refs
+ * (protected by @proc->inner_lock while @proc
+ * is valid, and by binder_dead_nodes_lock
+ * if @proc is NULL. During inc/dec and node release
+ * it is also protected by @lock to provide safety
+ * as the node dies and @proc becomes NULL)
+ * @ptr: userspace pointer for node
+ * (invariant, no lock needed)
+ * @cookie: userspace cookie for node
+ * (invariant, no lock needed)
+ * @has_strong_ref: userspace notified of strong ref
+ * (protected by @proc->inner_lock if @proc
+ * and by @lock)
+ * @pending_strong_ref: userspace has acked notification of strong ref
+ * (protected by @proc->inner_lock if @proc
+ * and by @lock)
+ * @has_weak_ref: userspace notified of weak ref
+ * (protected by @proc->inner_lock if @proc
+ * and by @lock)
+ * @pending_weak_ref: userspace has acked notification of weak ref
+ * (protected by @proc->inner_lock if @proc
+ * and by @lock)
+ * @has_async_transaction: async transaction to node in progress
+ * (protected by @lock)
+ * @sched_policy: minimum scheduling policy for node
+ * (invariant after initialized)
+ * @accept_fds: file descriptor operations supported for node
+ * (invariant after initialized)
+ * @min_priority: minimum scheduling priority
+ * (invariant after initialized)
+ * @inherit_rt: inherit RT scheduling policy from caller
+ * (invariant after initialized)
+ * @async_todo: list of async work items
+ * (protected by @proc->inner_lock)
+ *
+ * Bookkeeping structure for binder nodes.
+ */
struct binder_node {
int debug_id;
+ spinlock_t lock;
struct binder_work work;
union {
struct rb_node rb_node;
@@ -271,88 +379,185 @@ struct binder_node {
int internal_strong_refs;
int local_weak_refs;
int local_strong_refs;
+ int tmp_refs;
binder_uintptr_t ptr;
binder_uintptr_t cookie;
- unsigned has_strong_ref:1;
- unsigned pending_strong_ref:1;
- unsigned has_weak_ref:1;
- unsigned pending_weak_ref:1;
- unsigned has_async_transaction:1;
- unsigned accept_fds:1;
- unsigned min_priority:8;
+ struct {
+ /*
+ * bitfield elements protected by
+ * proc inner_lock
+ */
+ u8 has_strong_ref:1;
+ u8 pending_strong_ref:1;
+ u8 has_weak_ref:1;
+ u8 pending_weak_ref:1;
+ };
+ struct {
+ /*
+ * invariant after initialization
+ */
+ u8 sched_policy:2;
+ u8 inherit_rt:1;
+ u8 accept_fds:1;
+ u8 min_priority;
+ };
+ bool has_async_transaction;
struct list_head async_todo;
};
struct binder_ref_death {
+ /**
+ * @work: worklist element for death notifications
+ * (protected by inner_lock of the proc that
+ * this ref belongs to)
+ */
struct binder_work work;
binder_uintptr_t cookie;
};
+/**
+ * struct binder_ref_data - binder_ref counts and id
+ * @debug_id: unique ID for the ref
+ * @desc: unique userspace handle for ref
+ * @strong: strong ref count (debugging only if not locked)
+ * @weak: weak ref count (debugging only if not locked)
+ *
+ * Structure to hold ref count and ref id information. Since
+ * the actual ref can only be accessed with a lock, this structure
+ * is used to return information about the ref to callers of
+ * ref inc/dec functions.
+ */
+struct binder_ref_data {
+ int debug_id;
+ uint32_t desc;
+ int strong;
+ int weak;
+};
+
+/**
+ * struct binder_ref - struct to track references on nodes
+ * @data: binder_ref_data containing id, handle, and current refcounts
+ * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree
+ * @rb_node_node: node for lookup by @node in proc's rb_tree
+ * @node_entry: list entry for node->refs list in target node
+ * (protected by @node->lock)
+ * @proc: binder_proc containing ref
+ * @node: binder_node of target node. When cleaning up a
+ * ref for deletion in binder_cleanup_ref, a non-NULL
+ * @node indicates the node must be freed
+ * @death: pointer to death notification (ref_death) if requested
+ * (protected by @node->lock)
+ *
+ * Structure to track references from procA to target node (on procB). This
+ * structure is unsafe to access without holding @proc->outer_lock.
+ */
struct binder_ref {
/* Lookups needed: */
/* node + proc => ref (transaction) */
/* desc + proc => ref (transaction, inc/dec ref) */
/* node => refs + procs (proc exit) */
- int debug_id;
+ struct binder_ref_data data;
struct rb_node rb_node_desc;
struct rb_node rb_node_node;
struct hlist_node node_entry;
struct binder_proc *proc;
struct binder_node *node;
- uint32_t desc;
- int strong;
- int weak;
struct binder_ref_death *death;
};
-struct binder_buffer {
- struct list_head entry; /* free and allocated entries by address */
- struct rb_node rb_node; /* free entry by size or allocated entry */
- /* by address */
- unsigned free:1;
- unsigned allow_user_free:1;
- unsigned async_transaction:1;
- unsigned debug_id:29;
-
- struct binder_transaction *transaction;
-
- struct binder_node *target_node;
- size_t data_size;
- size_t offsets_size;
- size_t extra_buffers_size;
- uint8_t data[0];
-};
-
enum binder_deferred_state {
BINDER_DEFERRED_PUT_FILES = 0x01,
BINDER_DEFERRED_FLUSH = 0x02,
BINDER_DEFERRED_RELEASE = 0x04,
};
+/**
+ * struct binder_priority - scheduler policy and priority
+ * @sched_policy scheduler policy
+ * @prio [100..139] for SCHED_NORMAL, [0..99] for FIFO/RT
+ *
+ * The binder driver supports inheriting the following scheduler policies:
+ * SCHED_NORMAL
+ * SCHED_BATCH
+ * SCHED_FIFO
+ * SCHED_RR
+ */
+struct binder_priority {
+ unsigned int sched_policy;
+ int prio;
+};
+
+/**
+ * struct binder_proc - binder process bookkeeping
+ * @proc_node: element for binder_procs list
+ * @threads: rbtree of binder_threads in this proc
+ * (protected by @inner_lock)
+ * @nodes: rbtree of binder nodes associated with
+ * this proc ordered by node->ptr
+ * (protected by @inner_lock)
+ * @refs_by_desc: rbtree of refs ordered by ref->desc
+ * (protected by @outer_lock)
+ * @refs_by_node: rbtree of refs ordered by ref->node
+ * (protected by @outer_lock)
+ * @waiting_threads: threads currently waiting for proc work
+ * (protected by @inner_lock)
+ * @pid PID of group_leader of process
+ * (invariant after initialized)
+ * @tsk task_struct for group_leader of process
+ * (invariant after initialized)
+ * @files files_struct for process
+ * (invariant after initialized)
+ * @deferred_work_node: element for binder_deferred_list
+ * (protected by binder_deferred_lock)
+ * @deferred_work: bitmap of deferred work to perform
+ * (protected by binder_deferred_lock)
+ * @is_dead: process is dead and awaiting free
+ * when outstanding transactions are cleaned up
+ * (protected by @inner_lock)
+ * @todo: list of work for this process
+ * (protected by @inner_lock)
+ * @wait: wait queue head to wait for proc work
+ * (invariant after initialized)
+ * @stats: per-process binder statistics
+ * (atomics, no lock needed)
+ * @delivered_death: list of delivered death notification
+ * (protected by @inner_lock)
+ * @max_threads: cap on number of binder threads
+ * (protected by @inner_lock)
+ * @requested_threads: number of binder threads requested but not
+ * yet started. In current implementation, can
+ * only be 0 or 1.
+ * (protected by @inner_lock)
+ * @requested_threads_started: number binder threads started
+ * (protected by @inner_lock)
+ * @tmp_ref: temporary reference to indicate proc is in use
+ * (protected by @inner_lock)
+ * @default_priority: default scheduler priority
+ * (invariant after initialized)
+ * @debugfs_entry: debugfs node
+ * @alloc: binder allocator bookkeeping
+ * @context: binder_context for this proc
+ * (invariant after initialized)
+ * @inner_lock: can nest under outer_lock and/or node lock
+ * @outer_lock: no nesting under innor or node lock
+ * Lock order: 1) outer, 2) node, 3) inner
+ *
+ * Bookkeeping structure for binder processes
+ */
struct binder_proc {
struct hlist_node proc_node;
struct rb_root threads;
struct rb_root nodes;
struct rb_root refs_by_desc;
struct rb_root refs_by_node;
+ struct list_head waiting_threads;
int pid;
- struct vm_area_struct *vma;
- struct mm_struct *vma_vm_mm;
struct task_struct *tsk;
struct files_struct *files;
struct hlist_node deferred_work_node;
int deferred_work;
- void *buffer;
- ptrdiff_t user_buffer_offset;
-
- struct list_head buffers;
- struct rb_root free_buffers;
- struct rb_root allocated_buffers;
- size_t free_async_space;
+ bool is_dead;
- struct page **pages;
- size_t buffer_size;
- uint32_t buffer_free;
struct list_head todo;
wait_queue_head_t wait;
struct binder_stats stats;
@@ -360,10 +565,13 @@ struct binder_proc {
int max_threads;
int requested_threads;
int requested_threads_started;
- int ready_threads;
- long default_priority;
+ int tmp_ref;
+ struct binder_priority default_priority;
struct dentry *debugfs_entry;
+ struct binder_alloc alloc;
struct binder_context *context;
+ spinlock_t inner_lock;
+ spinlock_t outer_lock;
};
enum {
@@ -372,22 +580,60 @@ enum {
BINDER_LOOPER_STATE_EXITED = 0x04,
BINDER_LOOPER_STATE_INVALID = 0x08,
BINDER_LOOPER_STATE_WAITING = 0x10,
- BINDER_LOOPER_STATE_NEED_RETURN = 0x20
+ BINDER_LOOPER_STATE_POLL = 0x20,
};
+/**
+ * struct binder_thread - binder thread bookkeeping
+ * @proc: binder process for this thread
+ * (invariant after initialization)
+ * @rb_node: element for proc->threads rbtree
+ * (protected by @proc->inner_lock)
+ * @waiting_thread_node: element for @proc->waiting_threads list
+ * (protected by @proc->inner_lock)
+ * @pid: PID for this thread
+ * (invariant after initialization)
+ * @looper: bitmap of looping state
+ * (only accessed by this thread)
+ * @looper_needs_return: looping thread needs to exit driver
+ * (no lock needed)
+ * @transaction_stack: stack of in-progress transactions for this thread
+ * (protected by @proc->inner_lock)
+ * @todo: list of work to do for this thread
+ * (protected by @proc->inner_lock)
+ * @return_error: transaction errors reported by this thread
+ * (only accessed by this thread)
+ * @reply_error: transaction errors reported by target thread
+ * (protected by @proc->inner_lock)
+ * @wait: wait queue for thread work
+ * @stats: per-thread statistics
+ * (atomics, no lock needed)
+ * @tmp_ref: temporary reference to indicate thread is in use
+ * (atomic since @proc->inner_lock cannot
+ * always be acquired)
+ * @is_dead: thread is dead and awaiting free
+ * when outstanding transactions are cleaned up
+ * (protected by @proc->inner_lock)
+ * @task: struct task_struct for this thread
+ *
+ * Bookkeeping structure for binder threads.
+ */
struct binder_thread {
struct binder_proc *proc;
struct rb_node rb_node;
+ struct list_head waiting_thread_node;
int pid;
- int looper;
+ int looper; /* only modified by this thread */
+ bool looper_need_return; /* can be written by other thread */
struct binder_transaction *transaction_stack;
struct list_head todo;
- uint32_t return_error; /* Write failed, return error code in read buf */
- uint32_t return_error2; /* Write failed, return error code in read */
- /* buffer. Used when sending a reply to a dead process that */
- /* we are also waiting on */
+ struct binder_error return_error;
+ struct binder_error reply_error;
wait_queue_head_t wait;
struct binder_stats stats;
+ atomic_t tmp_ref;
+ bool is_dead;
+ struct task_struct *task;
};
struct binder_transaction {
@@ -404,20 +650,263 @@ struct binder_transaction {
struct binder_buffer *buffer;
unsigned int code;
unsigned int flags;
- long priority;
- long saved_priority;
+ struct binder_priority priority;
+ struct binder_priority saved_priority;
+ bool set_priority_called;
kuid_t sender_euid;
+ /**
+ * @lock: protects @from, @to_proc, and @to_thread
+ *
+ * @from, @to_proc, and @to_thread can be set to NULL
+ * during thread teardown
+ */
+ spinlock_t lock;
};
+/**
+ * binder_proc_lock() - Acquire outer lock for given binder_proc
+ * @proc: struct binder_proc to acquire
+ *
+ * Acquires proc->outer_lock. Used to protect binder_ref
+ * structures associated with the given proc.
+ */
+#define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
+static void
+_binder_proc_lock(struct binder_proc *proc, int line)
+{
+ binder_debug(BINDER_DEBUG_SPINLOCKS,
+ "%s: line=%d\n", __func__, line);
+ spin_lock(&proc->outer_lock);
+}
+
+/**
+ * binder_proc_unlock() - Release spinlock for given binder_proc
+ * @proc: struct binder_proc to acquire
+ *
+ * Release lock acquired via binder_proc_lock()
+ */
+#define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
+static void
+_binder_proc_unlock(struct binder_proc *proc, int line)
+{
+ binder_debug(BINDER_DEBUG_SPINLOCKS,
+ "%s: line=%d\n", __func__, line);
+ spin_unlock(&proc->outer_lock);
+}
+
+/**
+ * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
+ * @proc: struct binder_proc to acquire
+ *
+ * Acquires proc->inner_lock. Used to protect todo lists
+ */
+#define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
+static void
+_binder_inner_proc_lock(struct binder_proc *proc, int line)
+{
+ binder_debug(BINDER_DEBUG_SPINLOCKS,
+ "%s: line=%d\n", __func__, line);
+ spin_lock(&proc->inner_lock);
+}
+
+/**
+ * binder_inner_proc_unlock() - Release inner lock for given binder_proc
+ * @proc: struct binder_proc to acquire
+ *
+ * Release lock acquired via binder_inner_proc_lock()
+ */
+#define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
+static void
+_binder_inner_proc_unlock(struct binder_proc *proc, int line)
+{
+ binder_debug(BINDER_DEBUG_SPINLOCKS,
+ "%s: line=%d\n", __func__, line);
+ spin_unlock(&proc->inner_lock);
+}
+
+/**
+ * binder_node_lock() - Acquire spinlock for given binder_node
+ * @node: struct binder_node to acquire
+ *
+ * Acquires node->lock. Used to protect binder_node fields
+ */
+#define binder_node_lock(node) _binder_node_lock(node, __LINE__)
+static void
+_binder_node_lock(struct binder_node *node, int line)
+{
+ binder_debug(BINDER_DEBUG_SPINLOCKS,
+ "%s: line=%d\n", __func__, line);
+ spin_lock(&node->lock);
+}
+
+/**
+ * binder_node_unlock() - Release spinlock for given binder_proc
+ * @node: struct binder_node to acquire
+ *
+ * Release lock acquired via binder_node_lock()
+ */
+#define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
+static void
+_binder_node_unlock(struct binder_node *node, int line)
+{
+ binder_debug(BINDER_DEBUG_SPINLOCKS,
+ "%s: line=%d\n", __func__, line);
+ spin_unlock(&node->lock);
+}
+
+/**
+ * binder_node_inner_lock() - Acquire node and inner locks
+ * @node: struct binder_node to acquire
+ *
+ * Acquires node->lock. If node->proc also acquires
+ * proc->inner_lock. Used to protect binder_node fields
+ */
+#define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
+static void
+_binder_node_inner_lock(struct binder_node *node, int line)
+{
+ binder_debug(BINDER_DEBUG_SPINLOCKS,
+ "%s: line=%d\n", __func__, line);
+ spin_lock(&node->lock);
+ if (node->proc)
+ binder_inner_proc_lock(node->proc);
+}
+
+/**
+ * binder_node_unlock() - Release node and inner locks
+ * @node: struct binder_node to acquire
+ *
+ * Release lock acquired via binder_node_lock()
+ */
+#define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
+static void
+_binder_node_inner_unlock(struct binder_node *node, int line)
+{
+ struct binder_proc *proc = node->proc;
+
+ binder_debug(BINDER_DEBUG_SPINLOCKS,
+ "%s: line=%d\n", __func__, line);
+ if (proc)
+ binder_inner_proc_unlock(proc);
+ spin_unlock(&node->lock);
+}
+
+static bool binder_worklist_empty_ilocked(struct list_head *list)
+{
+ return list_empty(list);
+}
+
+/**
+ * binder_worklist_empty() - Check if no items on the work list
+ * @proc: binder_proc associated with list
+ * @list: list to check
+ *
+ * Return: true if there are no items on list, else false
+ */
+static bool binder_worklist_empty(struct binder_proc *proc,
+ struct list_head *list)
+{
+ bool ret;
+
+ binder_inner_proc_lock(proc);
+ ret = binder_worklist_empty_ilocked(list);
+ binder_inner_proc_unlock(proc);
+ return ret;
+}
+
+static void
+binder_enqueue_work_ilocked(struct binder_work *work,
+ struct list_head *target_list)
+{
+ BUG_ON(target_list == NULL);
+ BUG_ON(work->entry.next && !list_empty(&work->entry));
+ list_add_tail(&work->entry, target_list);
+}
+
+/**
+ * binder_enqueue_work() - Add an item to the work list
+ * @proc: binder_proc associated with list
+ * @work: struct binder_work to add to list
+ * @target_list: list to add work to
+ *
+ * Adds the work to the specified list. Asserts that work
+ * is not already on a list.
+ */
+static void
+binder_enqueue_work(struct binder_proc *proc,
+ struct binder_work *work,
+ struct list_head *target_list)
+{
+ binder_inner_proc_lock(proc);
+ binder_enqueue_work_ilocked(work, target_list);
+ binder_inner_proc_unlock(proc);
+}
+
+static void
+binder_dequeue_work_ilocked(struct binder_work *work)
+{
+ list_del_init(&work->entry);
+}
+
+/**
+ * binder_dequeue_work() - Removes an item from the work list
+ * @proc: binder_proc associated with list
+ * @work: struct binder_work to remove from list
+ *
+ * Removes the specified work item from whatever list it is on.
+ * Can safely be called if work is not on any list.
+ */
+static void
+binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
+{
+ binder_inner_proc_lock(proc);
+ binder_dequeue_work_ilocked(work);
+ binder_inner_proc_unlock(proc);
+}
+
+static struct binder_work *binder_dequeue_work_head_ilocked(
+ struct list_head *list)
+{
+ struct binder_work *w;
+
+ w = list_first_entry_or_null(list, struct binder_work, entry);
+ if (w)
+ list_del_init(&w->entry);
+ return w;
+}
+
+/**
+ * binder_dequeue_work_head() - Dequeues the item at head of list
+ * @proc: binder_proc associated with list
+ * @list: list to dequeue head
+ *
+ * Removes the head of the list if there are items on the list
+ *
+ * Return: pointer dequeued binder_work, NULL if list was empty
+ */
+static struct binder_work *binder_dequeue_work_head(
+ struct binder_proc *proc,
+ struct list_head *list)
+{
+ struct binder_work *w;
+
+ binder_inner_proc_lock(proc);
+ w = binder_dequeue_work_head_ilocked(list);
+ binder_inner_proc_unlock(proc);
+ return w;
+}
+
static void
binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
+static void binder_free_thread(struct binder_thread *thread);
+static void binder_free_proc(struct binder_proc *proc);
+static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
{
struct files_struct *files = proc->files;
unsigned long rlim_cur;
unsigned long irqs;
- int ret;
if (files == NULL)
return -ESRCH;
@@ -428,11 +917,7 @@ static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
unlock_task_sighand(proc->tsk, &irqs);
- preempt_enable_no_resched();
- ret = __alloc_fd(files, 0, rlim_cur, flags);
- preempt_disable();
-
- return ret;
+ return __alloc_fd(files, 0, rlim_cur, flags);
}
/*
@@ -441,11 +926,8 @@ static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
static void task_fd_install(
struct binder_proc *proc, unsigned int fd, struct file *file)
{
- if (proc->files) {
- preempt_enable_no_resched();
+ if (proc->files)
__fd_install(proc->files, fd, file);
- preempt_disable();
- }
}
/*
@@ -469,526 +951,281 @@ static long task_close_fd(struct binder_proc *proc, unsigned int fd)
return retval;
}
-static inline void binder_lock(struct binder_context *context, const char *tag)
-{
- trace_binder_lock(tag);
- mutex_lock(&context->binder_main_lock);
- preempt_disable();
- trace_binder_locked(tag);
-}
-
-static inline void binder_unlock(struct binder_context *context,
- const char *tag)
+static bool binder_has_work_ilocked(struct binder_thread *thread,
+ bool do_proc_work)
{
- trace_binder_unlock(tag);
- mutex_unlock(&context->binder_main_lock);
- preempt_enable();
+ return !binder_worklist_empty_ilocked(&thread->todo) ||
+ thread->looper_need_return ||
+ (do_proc_work &&
+ !binder_worklist_empty_ilocked(&thread->proc->todo));
}
-static inline void *kzalloc_preempt_disabled(size_t size)
+static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
{
- void *ptr;
+ bool has_work;
- ptr = kzalloc(size, GFP_NOWAIT);
- if (ptr)
- return ptr;
+ binder_inner_proc_lock(thread->proc);
+ has_work = binder_has_work_ilocked(thread, do_proc_work);
+ binder_inner_proc_unlock(thread->proc);
- preempt_enable_no_resched();
- ptr = kzalloc(size, GFP_KERNEL);
- preempt_disable();
-
- return ptr;
-}
-
-static inline long copy_to_user_preempt_disabled(void __user *to, const void *from, long n)
-{
- long ret;
-
- preempt_enable_no_resched();
- ret = copy_to_user(to, from, n);
- preempt_disable();
- return ret;
+ return has_work;
}
-static inline long copy_from_user_preempt_disabled(void *to, const void __user *from, long n)
+static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
{
- long ret;
-
- preempt_enable_no_resched();
- ret = copy_from_user(to, from, n);
- preempt_disable();
- return ret;
+ return !thread->transaction_stack &&
+ binder_worklist_empty_ilocked(&thread->todo) &&
+ (thread->looper & (BINDER_LOOPER_STATE_ENTERED |
+ BINDER_LOOPER_STATE_REGISTERED));
}
-#define get_user_preempt_disabled(x, ptr) \
-({ \
- int __ret; \
- preempt_enable_no_resched(); \
- __ret = get_user(x, ptr); \
- preempt_disable(); \
- __ret; \
-})
-
-#define put_user_preempt_disabled(x, ptr) \
-({ \
- int __ret; \
- preempt_enable_no_resched(); \
- __ret = put_user(x, ptr); \
- preempt_disable(); \
- __ret; \
-})
-
-static void binder_set_nice(long nice)
+static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
+ bool sync)
{
- long min_nice;
+ struct rb_node *n;
+ struct binder_thread *thread;
- if (can_nice(current, nice)) {
- set_user_nice(current, nice);
- return;
+ for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
+ thread = rb_entry(n, struct binder_thread, rb_node);
+ if (thread->looper & BINDER_LOOPER_STATE_POLL &&
+ binder_available_for_proc_work_ilocked(thread)) {
+ if (sync)
+ wake_up_interruptible_sync(&thread->wait);
+ else
+ wake_up_interruptible(&thread->wait);
+ }
}
- min_nice = rlimit_to_nice(current->signal->rlim[RLIMIT_NICE].rlim_cur);
- binder_debug(BINDER_DEBUG_PRIORITY_CAP,
- "%d: nice value %ld not allowed use %ld instead\n",
- current->pid, nice, min_nice);
- set_user_nice(current, min_nice);
- if (min_nice <= MAX_NICE)
- return;
- binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
}
-static size_t binder_buffer_size(struct binder_proc *proc,
- struct binder_buffer *buffer)
-{
- if (list_is_last(&buffer->entry, &proc->buffers))
- return proc->buffer + proc->buffer_size - (void *)buffer->data;
- return (size_t)list_entry(buffer->entry.next,
- struct binder_buffer, entry) - (size_t)buffer->data;
-}
-
-static void binder_insert_free_buffer(struct binder_proc *proc,
- struct binder_buffer *new_buffer)
+/**
+ * binder_select_thread_ilocked() - selects a thread for doing proc work.
+ * @proc: process to select a thread from
+ *
+ * Note that calling this function moves the thread off the waiting_threads
+ * list, so it can only be woken up by the caller of this function, or a
+ * signal. Therefore, callers *should* always wake up the thread this function
+ * returns.
+ *
+ * Return: If there's a thread currently waiting for process work,
+ * returns that thread. Otherwise returns NULL.
+ */
+static struct binder_thread *
+binder_select_thread_ilocked(struct binder_proc *proc)
{
- struct rb_node **p = &proc->free_buffers.rb_node;
- struct rb_node *parent = NULL;
- struct binder_buffer *buffer;
- size_t buffer_size;
- size_t new_buffer_size;
-
- BUG_ON(!new_buffer->free);
-
- new_buffer_size = binder_buffer_size(proc, new_buffer);
-
- binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: add free buffer, size %zd, at %p\n",
- proc->pid, new_buffer_size, new_buffer);
+ struct binder_thread *thread;
- while (*p) {
- parent = *p;
- buffer = rb_entry(parent, struct binder_buffer, rb_node);
- BUG_ON(!buffer->free);
+ assert_spin_locked(&proc->inner_lock);
+ thread = list_first_entry_or_null(&proc->waiting_threads,
+ struct binder_thread,
+ waiting_thread_node);
- buffer_size = binder_buffer_size(proc, buffer);
+ if (thread)
+ list_del_init(&thread->waiting_thread_node);
- if (new_buffer_size < buffer_size)
- p = &parent->rb_left;
- else
- p = &parent->rb_right;
- }
- rb_link_node(&new_buffer->rb_node, parent, p);
- rb_insert_color(&new_buffer->rb_node, &proc->free_buffers);
+ return thread;
}
-static void binder_insert_allocated_buffer(struct binder_proc *proc,
- struct binder_buffer *new_buffer)
+/**
+ * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
+ * @proc: process to wake up a thread in
+ * @thread: specific thread to wake-up (may be NULL)
+ * @sync: whether to do a synchronous wake-up
+ *
+ * This function wakes up a thread in the @proc process.
+ * The caller may provide a specific thread to wake-up in
+ * the @thread parameter. If @thread is NULL, this function
+ * will wake up threads that have called poll().
+ *
+ * Note that for this function to work as expected, callers
+ * should first call binder_select_thread() to find a thread
+ * to handle the work (if they don't have a thread already),
+ * and pass the result into the @thread parameter.
+ */
+static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
+ struct binder_thread *thread,
+ bool sync)
{
- struct rb_node **p = &proc->allocated_buffers.rb_node;
- struct rb_node *parent = NULL;
- struct binder_buffer *buffer;
-
- BUG_ON(new_buffer->free);
-
- while (*p) {
- parent = *p;
- buffer = rb_entry(parent, struct binder_buffer, rb_node);
- BUG_ON(buffer->free);
+ assert_spin_locked(&proc->inner_lock);
- if (new_buffer < buffer)
- p = &parent->rb_left;
- else if (new_buffer > buffer)
- p = &parent->rb_right;
+ if (thread) {
+ if (sync)
+ wake_up_interruptible_sync(&thread->wait);
else
- BUG();
+ wake_up_interruptible(&thread->wait);
+ return;
}
- rb_link_node(&new_buffer->rb_node, parent, p);
- rb_insert_color(&new_buffer->rb_node, &proc->allocated_buffers);
+
+ /* Didn't find a thread waiting for proc work; this can happen
+ * in two scenarios:
+ * 1. All threads are busy handling transactions
+ * In that case, one of those threads should call back into
+ * the kernel driver soon and pick up this work.
+ * 2. Threads are using the (e)poll interface, in which case
+ * they may be blocked on the waitqueue without having been
+ * added to waiting_threads. For this case, we just iterate
+ * over all threads not handling transaction work, and
+ * wake them all up. We wake all because we don't know whether
+ * a thread that called into (e)poll is handling non-binder
+ * work currently.
+ */
+ binder_wakeup_poll_threads_ilocked(proc, sync);
}
-static struct binder_buffer *binder_buffer_lookup(struct binder_proc *proc,
- uintptr_t user_ptr)
+static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
{
- struct rb_node *n = proc->allocated_buffers.rb_node;
- struct binder_buffer *buffer;
- struct binder_buffer *kern_ptr;
-
- kern_ptr = (struct binder_buffer *)(user_ptr - proc->user_buffer_offset
- - offsetof(struct binder_buffer, data));
+ struct binder_thread *thread = binder_select_thread_ilocked(proc);
- while (n) {
- buffer = rb_entry(n, struct binder_buffer, rb_node);
- BUG_ON(buffer->free);
-
- if (kern_ptr < buffer)
- n = n->rb_left;
- else if (kern_ptr > buffer)
- n = n->rb_right;
- else
- return buffer;
- }
- return NULL;
+ binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
}
-static int binder_update_page_range(struct binder_proc *proc, int allocate,
- void *start, void *end,
- struct vm_area_struct *vma)
+static bool is_rt_policy(int policy)
{
- void *page_addr;
- unsigned long user_page_addr;
- struct page **page;
- struct mm_struct *mm;
-
- binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: %s pages %p-%p\n", proc->pid,
- allocate ? "allocate" : "free", start, end);
+ return policy == SCHED_FIFO || policy == SCHED_RR;
+}
- if (end <= start)
- return 0;
+static bool is_fair_policy(int policy)
+{
+ return policy == SCHED_NORMAL || policy == SCHED_BATCH;
+}
- trace_binder_update_page_range(proc, allocate, start, end);
+static bool binder_supported_policy(int policy)
+{
+ return is_fair_policy(policy) || is_rt_policy(policy);
+}
- if (vma)
- mm = NULL;
+static int to_userspace_prio(int policy, int kernel_priority)
+{
+ if (is_fair_policy(policy))
+ return PRIO_TO_NICE(kernel_priority);
else
- mm = get_task_mm(proc->tsk);
+ return MAX_USER_RT_PRIO - 1 - kernel_priority;
+}
- preempt_enable_no_resched();
+static int to_kernel_prio(int policy, int user_priority)
+{
+ if (is_fair_policy(policy))
+ return NICE_TO_PRIO(user_priority);
+ else
+ return MAX_USER_RT_PRIO - 1 - user_priority;
+}
- if (mm) {
- down_write(&mm->mmap_sem);
- vma = proc->vma;
- if (vma && mm != proc->vma_vm_mm) {
- pr_err("%d: vma mm and task mm mismatch\n",
- proc->pid);
- vma = NULL;
- }
- }
+static void binder_do_set_priority(struct task_struct *task,
+ struct binder_priority desired,
+ bool verify)
+{
+ int priority; /* user-space prio value */
+ bool has_cap_nice;
+ unsigned int policy = desired.sched_policy;
- if (allocate == 0)
- goto free_range;
+ if (task->policy == policy && task->normal_prio == desired.prio)
+ return;
- if (vma == NULL) {
- pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
- proc->pid);
- goto err_no_vma;
- }
+ has_cap_nice = has_capability_noaudit(task, CAP_SYS_NICE);
- for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
- int ret;
+ priority = to_userspace_prio(policy, desired.prio);
- page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
+ if (verify && is_rt_policy(policy) && !has_cap_nice) {
+ long max_rtprio = task_rlimit(task, RLIMIT_RTPRIO);
- BUG_ON(*page);
- *page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
- if (*page == NULL) {
- pr_err("%d: binder_alloc_buf failed for page at %p\n",
- proc->pid, page_addr);
- goto err_alloc_page_failed;
+ if (max_rtprio == 0) {
+ policy = SCHED_NORMAL;
+ priority = MIN_NICE;
+ } else if (priority > max_rtprio) {
+ priority = max_rtprio;
}
- ret = map_kernel_range_noflush((unsigned long)page_addr,
- PAGE_SIZE, PAGE_KERNEL, page);
- flush_cache_vmap((unsigned long)page_addr,
- (unsigned long)page_addr + PAGE_SIZE);
- if (ret != 1) {
- pr_err("%d: binder_alloc_buf failed to map page at %p in kernel\n",
- proc->pid, page_addr);
- goto err_map_kernel_failed;
- }
- user_page_addr =
- (uintptr_t)page_addr + proc->user_buffer_offset;
- ret = vm_insert_page(vma, user_page_addr, page[0]);
- if (ret) {
- pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
- proc->pid, user_page_addr);
- goto err_vm_insert_page_failed;
- }
- /* vm_insert_page does not seem to increment the refcount */
- }
- if (mm) {
- up_write(&mm->mmap_sem);
- mmput(mm);
}
- preempt_disable();
+ if (verify && is_fair_policy(policy) && !has_cap_nice) {
+ long min_nice = rlimit_to_nice(task_rlimit(task, RLIMIT_NICE));
- return 0;
-
-free_range:
- for (page_addr = end - PAGE_SIZE; page_addr >= start;
- page_addr -= PAGE_SIZE) {
- page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
- if (vma)
- zap_page_range(vma, (uintptr_t)page_addr +
- proc->user_buffer_offset, PAGE_SIZE, NULL);
-err_vm_insert_page_failed:
- unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
-err_map_kernel_failed:
- __free_page(*page);
- *page = NULL;
-err_alloc_page_failed:
- ;
- }
-err_no_vma:
- if (mm) {
- up_write(&mm->mmap_sem);
- mmput(mm);
- }
-
- preempt_disable();
-
- return -ENOMEM;
-}
-
-static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc,
- size_t data_size,
- size_t offsets_size,
- size_t extra_buffers_size,
- int is_async)
-{
- struct rb_node *n = proc->free_buffers.rb_node;
- struct binder_buffer *buffer;
- size_t buffer_size;
- struct rb_node *best_fit = NULL;
- void *has_page_addr;
- void *end_page_addr;
- size_t size, data_offsets_size;
-
- if (proc->vma == NULL) {
- pr_err("%d: binder_alloc_buf, no vma\n",
- proc->pid);
- return NULL;
- }
-
- data_offsets_size = ALIGN(data_size, sizeof(void *)) +
- ALIGN(offsets_size, sizeof(void *));
-
- if (data_offsets_size < data_size || data_offsets_size < offsets_size) {
- binder_user_error("%d: got transaction with invalid size %zd-%zd\n",
- proc->pid, data_size, offsets_size);
- return NULL;
- }
- size = data_offsets_size + ALIGN(extra_buffers_size, sizeof(void *));
- if (size < data_offsets_size || size < extra_buffers_size) {
- binder_user_error("%d: got transaction with invalid extra_buffers_size %zd\n",
- proc->pid, extra_buffers_size);
- return NULL;
- }
- if (is_async &&
- proc->free_async_space < size + sizeof(struct binder_buffer)) {
- binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: binder_alloc_buf size %zd failed, no async space left\n",
- proc->pid, size);
- return NULL;
- }
-
- while (n) {
- buffer = rb_entry(n, struct binder_buffer, rb_node);
- BUG_ON(!buffer->free);
- buffer_size = binder_buffer_size(proc, buffer);
-
- if (size < buffer_size) {
- best_fit = n;
- n = n->rb_left;
- } else if (size > buffer_size)
- n = n->rb_right;
- else {
- best_fit = n;
- break;
+ if (min_nice > MAX_NICE) {
+ binder_user_error("%d RLIMIT_NICE not set\n",
+ task->pid);
+ return;
+ } else if (priority < min_nice) {
+ priority = min_nice;
}
}
- if (best_fit == NULL) {
- pr_err("%d: binder_alloc_buf size %zd failed, no address space\n",
- proc->pid, size);
- return NULL;
- }
- if (n == NULL) {
- buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
- buffer_size = binder_buffer_size(proc, buffer);
- }
- binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: binder_alloc_buf size %zd got buffer %p size %zd\n",
- proc->pid, size, buffer, buffer_size);
+ if (policy != desired.sched_policy ||
+ to_kernel_prio(policy, priority) != desired.prio)
+ binder_debug(BINDER_DEBUG_PRIORITY_CAP,
+ "%d: priority %d not allowed, using %d instead\n",
+ task->pid, desired.prio,
+ to_kernel_prio(policy, priority));
- has_page_addr =
- (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK);
- if (n == NULL) {
- if (size + sizeof(struct binder_buffer) + 4 >= buffer_size)
- buffer_size = size; /* no room for other buffers */
- else
- buffer_size = size + sizeof(struct binder_buffer);
- }
- end_page_addr =
- (void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size);
- if (end_page_addr > has_page_addr)
- end_page_addr = has_page_addr;
- if (binder_update_page_range(proc, 1,
- (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL))
- return NULL;
+ /* Set the actual priority */
+ if (task->policy != policy || is_rt_policy(policy)) {
+ struct sched_param params;
- rb_erase(best_fit, &proc->free_buffers);
- buffer->free = 0;
- binder_insert_allocated_buffer(proc, buffer);
- if (buffer_size != size) {
- struct binder_buffer *new_buffer = (void *)buffer->data + size;
+ params.sched_priority = is_rt_policy(policy) ? priority : 0;
- list_add(&new_buffer->entry, &buffer->entry);
- new_buffer->free = 1;
- binder_insert_free_buffer(proc, new_buffer);
+ sched_setscheduler_nocheck(task,
+ policy | SCHED_RESET_ON_FORK,
+ &params);
}
- binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: binder_alloc_buf size %zd got %p\n",
- proc->pid, size, buffer);
- buffer->data_size = data_size;
- buffer->offsets_size = offsets_size;
- buffer->extra_buffers_size = extra_buffers_size;
- buffer->async_transaction = is_async;
- if (is_async) {
- proc->free_async_space -= size + sizeof(struct binder_buffer);
- binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
- "%d: binder_alloc_buf size %zd async free %zd\n",
- proc->pid, size, proc->free_async_space);
- }
-
- return buffer;
-}
-
-static void *buffer_start_page(struct binder_buffer *buffer)
-{
- return (void *)((uintptr_t)buffer & PAGE_MASK);
+ if (is_fair_policy(policy))
+ set_user_nice(task, priority);
}
-static void *buffer_end_page(struct binder_buffer *buffer)
+static void binder_set_priority(struct task_struct *task,
+ struct binder_priority desired)
{
- return (void *)(((uintptr_t)(buffer + 1) - 1) & PAGE_MASK);
+ binder_do_set_priority(task, desired, /* verify = */ true);
}
-static void binder_delete_free_buffer(struct binder_proc *proc,
- struct binder_buffer *buffer)
+static void binder_restore_priority(struct task_struct *task,
+ struct binder_priority desired)
{
- struct binder_buffer *prev, *next = NULL;
- int free_page_end = 1;
- int free_page_start = 1;
-
- BUG_ON(proc->buffers.next == &buffer->entry);
- prev = list_entry(buffer->entry.prev, struct binder_buffer, entry);
- BUG_ON(!prev->free);
- if (buffer_end_page(prev) == buffer_start_page(buffer)) {
- free_page_start = 0;
- if (buffer_end_page(prev) == buffer_end_page(buffer))
- free_page_end = 0;
- binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: merge free, buffer %p share page with %p\n",
- proc->pid, buffer, prev);
- }
-
- if (!list_is_last(&buffer->entry, &proc->buffers)) {
- next = list_entry(buffer->entry.next,
- struct binder_buffer, entry);
- if (buffer_start_page(next) == buffer_end_page(buffer)) {
- free_page_end = 0;
- if (buffer_start_page(next) ==
- buffer_start_page(buffer))
- free_page_start = 0;
- binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: merge free, buffer %p share page with %p\n",
- proc->pid, buffer, prev);
- }
- }
- list_del(&buffer->entry);
- if (free_page_start || free_page_end) {
- binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: merge free, buffer %p do not share page%s%s with %p or %p\n",
- proc->pid, buffer, free_page_start ? "" : " end",
- free_page_end ? "" : " start", prev, next);
- binder_update_page_range(proc, 0, free_page_start ?
- buffer_start_page(buffer) : buffer_end_page(buffer),
- (free_page_end ? buffer_end_page(buffer) :
- buffer_start_page(buffer)) + PAGE_SIZE, NULL);
- }
+ binder_do_set_priority(task, desired, /* verify = */ false);
}
-static void binder_free_buf(struct binder_proc *proc,
- struct binder_buffer *buffer)
+static void binder_transaction_priority(struct task_struct *task,
+ struct binder_transaction *t,
+ struct binder_priority node_prio,
+ bool inherit_rt)
{
- size_t size, buffer_size;
-
- buffer_size = binder_buffer_size(proc, buffer);
-
- size = ALIGN(buffer->data_size, sizeof(void *)) +
- ALIGN(buffer->offsets_size, sizeof(void *)) +
- ALIGN(buffer->extra_buffers_size, sizeof(void *));
+ struct binder_priority desired_prio;
- binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: binder_free_buf %p size %zd buffer_size %zd\n",
- proc->pid, buffer, size, buffer_size);
-
- BUG_ON(buffer->free);
- BUG_ON(size > buffer_size);
- BUG_ON(buffer->transaction != NULL);
- BUG_ON((void *)buffer < proc->buffer);
- BUG_ON((void *)buffer > proc->buffer + proc->buffer_size);
+ if (t->set_priority_called)
+ return;
- if (buffer->async_transaction) {
- proc->free_async_space += size + sizeof(struct binder_buffer);
+ t->set_priority_called = true;
+ t->saved_priority.sched_policy = task->policy;
+ t->saved_priority.prio = task->normal_prio;
- binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
- "%d: binder_free_buf size %zd async free %zd\n",
- proc->pid, size, proc->free_async_space);
+ if (!inherit_rt && is_rt_policy(desired_prio.sched_policy)) {
+ desired_prio.prio = NICE_TO_PRIO(0);
+ desired_prio.sched_policy = SCHED_NORMAL;
+ } else {
+ desired_prio.prio = t->priority.prio;
+ desired_prio.sched_policy = t->priority.sched_policy;
}
- binder_update_page_range(proc, 0,
- (void *)PAGE_ALIGN((uintptr_t)buffer->data),
- (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK),
- NULL);
- rb_erase(&buffer->rb_node, &proc->allocated_buffers);
- buffer->free = 1;
- if (!list_is_last(&buffer->entry, &proc->buffers)) {
- struct binder_buffer *next = list_entry(buffer->entry.next,
- struct binder_buffer, entry);
-
- if (next->free) {
- rb_erase(&next->rb_node, &proc->free_buffers);
- binder_delete_free_buffer(proc, next);
- }
- }
- if (proc->buffers.next != &buffer->entry) {
- struct binder_buffer *prev = list_entry(buffer->entry.prev,
- struct binder_buffer, entry);
-
- if (prev->free) {
- binder_delete_free_buffer(proc, buffer);
- rb_erase(&prev->rb_node, &proc->free_buffers);
- buffer = prev;
- }
+ if (node_prio.prio < t->priority.prio ||
+ (node_prio.prio == t->priority.prio &&
+ node_prio.sched_policy == SCHED_FIFO)) {
+ /*
+ * In case the minimum priority on the node is
+ * higher (lower value), use that priority. If
+ * the priority is the same, but the node uses
+ * SCHED_FIFO, prefer SCHED_FIFO, since it can
+ * run unbounded, unlike SCHED_RR.
+ */
+ desired_prio = node_prio;
}
- binder_insert_free_buffer(proc, buffer);
+
+ binder_set_priority(task, desired_prio);
}
-static struct binder_node *binder_get_node(struct binder_proc *proc,
- binder_uintptr_t ptr)
+static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
+ binder_uintptr_t ptr)
{
struct rb_node *n = proc->nodes.rb_node;
struct binder_node *node;
+ assert_spin_locked(&proc->inner_lock);
+
while (n) {
node = rb_entry(n, struct binder_node, rb_node);
@@ -996,21 +1233,47 @@ static struct binder_node *binder_get_node(struct binder_proc *proc,
n = n->rb_left;
else if (ptr > node->ptr)
n = n->rb_right;
- else
+ else {
+ /*
+ * take an implicit weak reference
+ * to ensure node stays alive until
+ * call to binder_put_node()
+ */
+ binder_inc_node_tmpref_ilocked(node);
return node;
+ }
}
return NULL;
}
-static struct binder_node *binder_new_node(struct binder_proc *proc,
- binder_uintptr_t ptr,
- binder_uintptr_t cookie)
+static struct binder_node *binder_get_node(struct binder_proc *proc,
+ binder_uintptr_t ptr)
+{
+ struct binder_node *node;
+
+ binder_inner_proc_lock(proc);
+ node = binder_get_node_ilocked(proc, ptr);
+ binder_inner_proc_unlock(proc);
+ return node;
+}
+
+static struct binder_node *binder_init_node_ilocked(
+ struct binder_proc *proc,
+ struct binder_node *new_node,
+ struct flat_binder_object *fp)
{
struct rb_node **p = &proc->nodes.rb_node;
struct rb_node *parent = NULL;
struct binder_node *node;
+ binder_uintptr_t ptr = fp ? fp->binder : 0;
+ binder_uintptr_t cookie = fp ? fp->cookie : 0;
+ __u32 flags = fp ? fp->flags : 0;
+ s8 priority;
+
+ assert_spin_locked(&proc->inner_lock);
while (*p) {
+
parent = *p;
node = rb_entry(parent, struct binder_node, rb_node);
@@ -1018,14 +1281,19 @@ static struct binder_node *binder_new_node(struct binder_proc *proc,
p = &(*p)->rb_left;
else if (ptr > node->ptr)
p = &(*p)->rb_right;
- else
- return NULL;
+ else {
+ /*
+ * A matching node is already in
+ * the rb tree. Abandon the init
+ * and return it.
+ */
+ binder_inc_node_tmpref_ilocked(node);
+ return node;
+ }
}
-
- node = kzalloc_preempt_disabled(sizeof(*node));
- if (node == NULL)
- return NULL;
+ node = new_node;
binder_stats_created(BINDER_STAT_NODE);
+ node->tmp_refs++;
rb_link_node(&node->rb_node, parent, p);
rb_insert_color(&node->rb_node, &proc->nodes);
node->debug_id = atomic_inc_return(&binder_last_id);
@@ -1033,18 +1301,58 @@ static struct binder_node *binder_new_node(struct binder_proc *proc,
node->ptr = ptr;
node->cookie = cookie;
node->work.type = BINDER_WORK_NODE;
+ priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
+ node->sched_policy = (flags & FLAT_BINDER_FLAG_PRIORITY_MASK) >>
+ FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT;
+ node->min_priority = to_kernel_prio(node->sched_policy, priority);
+ node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
+ node->inherit_rt = !!(flags & FLAT_BINDER_FLAG_INHERIT_RT);
+ spin_lock_init(&node->lock);
INIT_LIST_HEAD(&node->work.entry);
INIT_LIST_HEAD(&node->async_todo);
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
"%d:%d node %d u%016llx c%016llx created\n",
proc->pid, current->pid, node->debug_id,
(u64)node->ptr, (u64)node->cookie);
+
return node;
}
-static int binder_inc_node(struct binder_node *node, int strong, int internal,
- struct list_head *target_list)
+static struct binder_node *binder_new_node(struct binder_proc *proc,
+ struct flat_binder_object *fp)
+{
+ struct binder_node *node;
+ struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
+
+ if (!new_node)
+ return NULL;
+ binder_inner_proc_lock(proc);
+ node = binder_init_node_ilocked(proc, new_node, fp);
+ binder_inner_proc_unlock(proc);
+ if (node != new_node)
+ /*
+ * The node was already added by another thread
+ */
+ kfree(new_node);
+
+ return node;
+}
+
+static void binder_free_node(struct binder_node *node)
+{
+ kfree(node);
+ binder_stats_deleted(BINDER_STAT_NODE);
+}
+
+static int binder_inc_node_nilocked(struct binder_node *node, int strong,
+ int internal,
+ struct list_head *target_list)
{
+ struct binder_proc *proc = node->proc;
+
+ assert_spin_locked(&node->lock);
+ if (proc)
+ assert_spin_locked(&proc->inner_lock);
if (strong) {
if (internal) {
if (target_list == NULL &&
@@ -1061,8 +1369,8 @@ static int binder_inc_node(struct binder_node *node, int strong, int internal,
} else
node->local_strong_refs++;
if (!node->has_strong_ref && target_list) {
- list_del_init(&node->work.entry);
- list_add_tail(&node->work.entry, target_list);
+ binder_dequeue_work_ilocked(&node->work);
+ binder_enqueue_work_ilocked(&node->work, target_list);
}
} else {
if (!internal)
@@ -1073,58 +1381,169 @@ static int binder_inc_node(struct binder_node *node, int strong, int internal,
node->debug_id);
return -EINVAL;
}
- list_add_tail(&node->work.entry, target_list);
+ binder_enqueue_work_ilocked(&node->work, target_list);
}
}
return 0;
}
-static int binder_dec_node(struct binder_node *node, int strong, int internal)
+static int binder_inc_node(struct binder_node *node, int strong, int internal,
+ struct list_head *target_list)
+{
+ int ret;
+
+ binder_node_inner_lock(node);
+ ret = binder_inc_node_nilocked(node, strong, internal, target_list);
+ binder_node_inner_unlock(node);
+
+ return ret;
+}
+
+static bool binder_dec_node_nilocked(struct binder_node *node,
+ int strong, int internal)
{
+ struct binder_proc *proc = node->proc;
+
+ assert_spin_locked(&node->lock);
+ if (proc)
+ assert_spin_locked(&proc->inner_lock);
if (strong) {
if (internal)
node->internal_strong_refs--;
else
node->local_strong_refs--;
if (node->local_strong_refs || node->internal_strong_refs)
- return 0;
+ return false;
} else {
if (!internal)
node->local_weak_refs--;
- if (node->local_weak_refs || !hlist_empty(&node->refs))
- return 0;
+ if (node->local_weak_refs || node->tmp_refs ||
+ !hlist_empty(&node->refs))
+ return false;
}
- if (node->proc && (node->has_strong_ref || node->has_weak_ref)) {
+
+ if (proc && (node->has_strong_ref || node->has_weak_ref)) {
if (list_empty(&node->work.entry)) {
- list_add_tail(&node->work.entry, &node->proc->todo);
- wake_up_interruptible(&node->proc->wait);
+ binder_enqueue_work_ilocked(&node->work, &proc->todo);
+ binder_wakeup_proc_ilocked(proc);
}
} else {
if (hlist_empty(&node->refs) && !node->local_strong_refs &&
- !node->local_weak_refs) {
- list_del_init(&node->work.entry);
- if (node->proc) {
- rb_erase(&node->rb_node, &node->proc->nodes);
+ !node->local_weak_refs && !node->tmp_refs) {
+ if (proc) {
+ binder_dequeue_work_ilocked(&node->work);
+ rb_erase(&node->rb_node, &proc->nodes);
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
"refless node %d deleted\n",
node->debug_id);
} else {
+ BUG_ON(!list_empty(&node->work.entry));
+ spin_lock(&binder_dead_nodes_lock);
+ /*
+ * tmp_refs could have changed so
+ * check it again
+ */
+ if (node->tmp_refs) {
+ spin_unlock(&binder_dead_nodes_lock);
+ return false;
+ }
hlist_del(&node->dead_node);
+ spin_unlock(&binder_dead_nodes_lock);
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
"dead node %d deleted\n",
node->debug_id);
}
- kfree(node);
- binder_stats_deleted(BINDER_STAT_NODE);
+ return true;
}
}
+ return false;
+}
- return 0;
+static void binder_dec_node(struct binder_node *node, int strong, int internal)
+{
+ bool free_node;
+
+ binder_node_inner_lock(node);
+ free_node = binder_dec_node_nilocked(node, strong, internal);
+ binder_node_inner_unlock(node);
+ if (free_node)
+ binder_free_node(node);
+}
+
+static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
+{
+ /*
+ * No call to binder_inc_node() is needed since we
+ * don't need to inform userspace of any changes to
+ * tmp_refs
+ */
+ node->tmp_refs++;
}
+/**
+ * binder_inc_node_tmpref() - take a temporary reference on node
+ * @node: node to reference
+ *
+ * Take reference on node to prevent the node from being freed
+ * while referenced only by a local variable. The inner lock is
+ * needed to serialize with the node work on the queue (which
+ * isn't needed after the node is dead). If the node is dead
+ * (node->proc is NULL), use binder_dead_nodes_lock to protect
+ * node->tmp_refs against dead-node-only cases where the node
+ * lock cannot be acquired (eg traversing the dead node list to
+ * print nodes)
+ */
+static void binder_inc_node_tmpref(struct binder_node *node)
+{
+ binder_node_lock(node);
+ if (node->proc)
+ binder_inner_proc_lock(node->proc);
+ else
+ spin_lock(&binder_dead_nodes_lock);
+ binder_inc_node_tmpref_ilocked(node);
+ if (node->proc)
+ binder_inner_proc_unlock(node->proc);
+ else
+ spin_unlock(&binder_dead_nodes_lock);
+ binder_node_unlock(node);
+}
+
+/**
+ * binder_dec_node_tmpref() - remove a temporary reference on node
+ * @node: node to reference
+ *
+ * Release temporary reference on node taken via binder_inc_node_tmpref()
+ */
+static void binder_dec_node_tmpref(struct binder_node *node)
+{
+ bool free_node;
+
+ binder_node_inner_lock(node);
+ if (!node->proc)
+ spin_lock(&binder_dead_nodes_lock);
+ node->tmp_refs--;
+ BUG_ON(node->tmp_refs < 0);
+ if (!node->proc)
+ spin_unlock(&binder_dead_nodes_lock);
+ /*
+ * Call binder_dec_node() to check if all refcounts are 0
+ * and cleanup is needed. Calling with strong=0 and internal=1
+ * causes no actual reference to be released in binder_dec_node().
+ * If that changes, a change is needed here too.
+ */
+ free_node = binder_dec_node_nilocked(node, 0, 1);
+ binder_node_inner_unlock(node);
+ if (free_node)
+ binder_free_node(node);
+}
-static struct binder_ref *binder_get_ref(struct binder_proc *proc,
- u32 desc, bool need_strong_ref)
+static void binder_put_node(struct binder_node *node)
+{
+ binder_dec_node_tmpref(node);
+}
+
+static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
+ u32 desc, bool need_strong_ref)
{
struct rb_node *n = proc->refs_by_desc.rb_node;
struct binder_ref *ref;
@@ -1132,11 +1551,11 @@ static struct binder_ref *binder_get_ref(struct binder_proc *proc,
while (n) {
ref = rb_entry(n, struct binder_ref, rb_node_desc);
- if (desc < ref->desc) {
+ if (desc < ref->data.desc) {
n = n->rb_left;
- } else if (desc > ref->desc) {
+ } else if (desc > ref->data.desc) {
n = n->rb_right;
- } else if (need_strong_ref && !ref->strong) {
+ } else if (need_strong_ref && !ref->data.strong) {
binder_user_error("tried to use weak ref as strong ref\n");
return NULL;
} else {
@@ -1146,14 +1565,34 @@ static struct binder_ref *binder_get_ref(struct binder_proc *proc,
return NULL;
}
-static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
- struct binder_node *node)
+/**
+ * binder_get_ref_for_node_olocked() - get the ref associated with given node
+ * @proc: binder_proc that owns the ref
+ * @node: binder_node of target
+ * @new_ref: newly allocated binder_ref to be initialized or %NULL
+ *
+ * Look up the ref for the given node and return it if it exists
+ *
+ * If it doesn't exist and the caller provides a newly allocated
+ * ref, initialize the fields of the newly allocated ref and insert
+ * into the given proc rb_trees and node refs list.
+ *
+ * Return: the ref for node. It is possible that another thread
+ * allocated/initialized the ref first in which case the
+ * returned ref would be different than the passed-in
+ * new_ref. new_ref must be kfree'd by the caller in
+ * this case.
+ */
+static struct binder_ref *binder_get_ref_for_node_olocked(
+ struct binder_proc *proc,
+ struct binder_node *node,
+ struct binder_ref *new_ref)
{
- struct rb_node *n;
+ struct binder_context *context = proc->context;
struct rb_node **p = &proc->refs_by_node.rb_node;
struct rb_node *parent = NULL;
- struct binder_ref *ref, *new_ref;
- struct binder_context *context = proc->context;
+ struct binder_ref *ref;
+ struct rb_node *n;
while (*p) {
parent = *p;
@@ -1166,22 +1605,22 @@ static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
else
return ref;
}
- new_ref = kzalloc_preempt_disabled(sizeof(*ref));
- if (new_ref == NULL)
+ if (!new_ref)
return NULL;
+
binder_stats_created(BINDER_STAT_REF);
- new_ref->debug_id = atomic_inc_return(&binder_last_id);
+ new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
new_ref->proc = proc;
new_ref->node = node;
rb_link_node(&new_ref->rb_node_node, parent, p);
rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
- new_ref->desc = (node == context->binder_context_mgr_node) ? 0 : 1;
+ new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
ref = rb_entry(n, struct binder_ref, rb_node_desc);
- if (ref->desc > new_ref->desc)
+ if (ref->data.desc > new_ref->data.desc)
break;
- new_ref->desc = ref->desc + 1;
+ new_ref->data.desc = ref->data.desc + 1;
}
p = &proc->refs_by_desc.rb_node;
@@ -1189,121 +1628,423 @@ static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
parent = *p;
ref = rb_entry(parent, struct binder_ref, rb_node_desc);
- if (new_ref->desc < ref->desc)
+ if (new_ref->data.desc < ref->data.desc)
p = &(*p)->rb_left;
- else if (new_ref->desc > ref->desc)
+ else if (new_ref->data.desc > ref->data.desc)
p = &(*p)->rb_right;
else
BUG();
}
rb_link_node(&new_ref->rb_node_desc, parent, p);
rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
- if (node) {
- hlist_add_head(&new_ref->node_entry, &node->refs);
- binder_debug(BINDER_DEBUG_INTERNAL_REFS,
- "%d new ref %d desc %d for node %d\n",
- proc->pid, new_ref->debug_id, new_ref->desc,
- node->debug_id);
- } else {
- binder_debug(BINDER_DEBUG_INTERNAL_REFS,
- "%d new ref %d desc %d for dead node\n",
- proc->pid, new_ref->debug_id, new_ref->desc);
- }
+ binder_node_lock(node);
+ hlist_add_head(&new_ref->node_entry, &node->refs);
+
+ binder_debug(BINDER_DEBUG_INTERNAL_REFS,
+ "%d new ref %d desc %d for node %d\n",
+ proc->pid, new_ref->data.debug_id, new_ref->data.desc,
+ node->debug_id);
+ binder_node_unlock(node);
return new_ref;
}
-static void binder_delete_ref(struct binder_ref *ref)
+static void binder_cleanup_ref_olocked(struct binder_ref *ref)
{
+ bool delete_node = false;
+
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
"%d delete ref %d desc %d for node %d\n",
- ref->proc->pid, ref->debug_id, ref->desc,
+ ref->proc->pid, ref->data.debug_id, ref->data.desc,
ref->node->debug_id);
rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
- if (ref->strong)
- binder_dec_node(ref->node, 1, 1);
+
+ binder_node_inner_lock(ref->node);
+ if (ref->data.strong)
+ binder_dec_node_nilocked(ref->node, 1, 1);
+
hlist_del(&ref->node_entry);
- binder_dec_node(ref->node, 0, 1);
+ delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
+ binder_node_inner_unlock(ref->node);
+ /*
+ * Clear ref->node unless we want the caller to free the node
+ */
+ if (!delete_node) {
+ /*
+ * The caller uses ref->node to determine
+ * whether the node needs to be freed. Clear
+ * it since the node is still alive.
+ */
+ ref->node = NULL;
+ }
+
if (ref->death) {
binder_debug(BINDER_DEBUG_DEAD_BINDER,
"%d delete ref %d desc %d has death notification\n",
- ref->proc->pid, ref->debug_id, ref->desc);
- list_del(&ref->death->work.entry);
- kfree(ref->death);
+ ref->proc->pid, ref->data.debug_id,
+ ref->data.desc);
+ binder_dequeue_work(ref->proc, &ref->death->work);
binder_stats_deleted(BINDER_STAT_DEATH);
}
- kfree(ref);
binder_stats_deleted(BINDER_STAT_REF);
}
-static int binder_inc_ref(struct binder_ref *ref, int strong,
- struct list_head *target_list)
+/**
+ * binder_inc_ref_olocked() - increment the ref for given handle
+ * @ref: ref to be incremented
+ * @strong: if true, strong increment, else weak
+ * @target_list: list to queue node work on
+ *
+ * Increment the ref. @ref->proc->outer_lock must be held on entry
+ *
+ * Return: 0, if successful, else errno
+ */
+static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
+ struct list_head *target_list)
{
int ret;
if (strong) {
- if (ref->strong == 0) {
+ if (ref->data.strong == 0) {
ret = binder_inc_node(ref->node, 1, 1, target_list);
if (ret)
return ret;
}
- ref->strong++;
+ ref->data.strong++;
} else {
- if (ref->weak == 0) {
+ if (ref->data.weak == 0) {
ret = binder_inc_node(ref->node, 0, 1, target_list);
if (ret)
return ret;
}
- ref->weak++;
+ ref->data.weak++;
}
return 0;
}
-
-static int binder_dec_ref(struct binder_ref *ref, int strong)
+/**
+ * binder_dec_ref() - dec the ref for given handle
+ * @ref: ref to be decremented
+ * @strong: if true, strong decrement, else weak
+ *
+ * Decrement the ref.
+ *
+ * Return: true if ref is cleaned up and ready to be freed
+ */
+static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
{
if (strong) {
- if (ref->strong == 0) {
+ if (ref->data.strong == 0) {
binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
- ref->proc->pid, ref->debug_id,
- ref->desc, ref->strong, ref->weak);
- return -EINVAL;
- }
- ref->strong--;
- if (ref->strong == 0) {
- int ret;
-
- ret = binder_dec_node(ref->node, strong, 1);
- if (ret)
- return ret;
+ ref->proc->pid, ref->data.debug_id,
+ ref->data.desc, ref->data.strong,
+ ref->data.weak);
+ return false;
}
+ ref->data.strong--;
+ if (ref->data.strong == 0)
+ binder_dec_node(ref->node, strong, 1);
} else {
- if (ref->weak == 0) {
+ if (ref->data.weak == 0) {
binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
- ref->proc->pid, ref->debug_id,
- ref->desc, ref->strong, ref->weak);
- return -EINVAL;
+ ref->proc->pid, ref->data.debug_id,
+ ref->data.desc, ref->data.strong,
+ ref->data.weak);
+ return false;
}
- ref->weak--;
+ ref->data.weak--;
}
- if (ref->strong == 0 && ref->weak == 0)
- binder_delete_ref(ref);
- return 0;
+ if (ref->data.strong == 0 && ref->data.weak == 0) {
+ binder_cleanup_ref_olocked(ref);
+ return true;
+ }
+ return false;
}
-static void binder_pop_transaction(struct binder_thread *target_thread,
- struct binder_transaction *t)
+/**
+ * binder_get_node_from_ref() - get the node from the given proc/desc
+ * @proc: proc containing the ref
+ * @desc: the handle associated with the ref
+ * @need_strong_ref: if true, only return node if ref is strong
+ * @rdata: the id/refcount data for the ref
+ *
+ * Given a proc and ref handle, return the associated binder_node
+ *
+ * Return: a binder_node or NULL if not found or not strong when strong required
+ */
+static struct binder_node *binder_get_node_from_ref(
+ struct binder_proc *proc,
+ u32 desc, bool need_strong_ref,
+ struct binder_ref_data *rdata)
{
- if (target_thread) {
- BUG_ON(target_thread->transaction_stack != t);
- BUG_ON(target_thread->transaction_stack->from != target_thread);
- target_thread->transaction_stack =
- target_thread->transaction_stack->from_parent;
- t->from = NULL;
+ struct binder_node *node;
+ struct binder_ref *ref;
+
+ binder_proc_lock(proc);
+ ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
+ if (!ref)
+ goto err_no_ref;
+ node = ref->node;
+ /*
+ * Take an implicit reference on the node to ensure
+ * it stays alive until the call to binder_put_node()
+ */
+ binder_inc_node_tmpref(node);
+ if (rdata)
+ *rdata = ref->data;
+ binder_proc_unlock(proc);
+
+ return node;
+
+err_no_ref:
+ binder_proc_unlock(proc);
+ return NULL;
+}
+
+/**
+ * binder_free_ref() - free the binder_ref
+ * @ref: ref to free
+ *
+ * Free the binder_ref. Free the binder_node indicated by ref->node
+ * (if non-NULL) and the binder_ref_death indicated by ref->death.
+ */
+static void binder_free_ref(struct binder_ref *ref)
+{
+ if (ref->node)
+ binder_free_node(ref->node);
+ kfree(ref->death);
+ kfree(ref);
+}
+
+/**
+ * binder_update_ref_for_handle() - inc/dec the ref for given handle
+ * @proc: proc containing the ref
+ * @desc: the handle associated with the ref
+ * @increment: true=inc reference, false=dec reference
+ * @strong: true=strong reference, false=weak reference
+ * @rdata: the id/refcount data for the ref
+ *
+ * Given a proc and ref handle, increment or decrement the ref
+ * according to "increment" arg.
+ *
+ * Return: 0 if successful, else errno
+ */
+static int binder_update_ref_for_handle(struct binder_proc *proc,
+ uint32_t desc, bool increment, bool strong,
+ struct binder_ref_data *rdata)
+{
+ int ret = 0;
+ struct binder_ref *ref;
+ bool delete_ref = false;
+
+ binder_proc_lock(proc);
+ ref = binder_get_ref_olocked(proc, desc, strong);
+ if (!ref) {
+ ret = -EINVAL;
+ goto err_no_ref;
+ }
+ if (increment)
+ ret = binder_inc_ref_olocked(ref, strong, NULL);
+ else
+ delete_ref = binder_dec_ref_olocked(ref, strong);
+
+ if (rdata)
+ *rdata = ref->data;
+ binder_proc_unlock(proc);
+
+ if (delete_ref)
+ binder_free_ref(ref);
+ return ret;
+
+err_no_ref:
+ binder_proc_unlock(proc);
+ return ret;
+}
+
+/**
+ * binder_dec_ref_for_handle() - dec the ref for given handle
+ * @proc: proc containing the ref
+ * @desc: the handle associated with the ref
+ * @strong: true=strong reference, false=weak reference
+ * @rdata: the id/refcount data for the ref
+ *
+ * Just calls binder_update_ref_for_handle() to decrement the ref.
+ *
+ * Return: 0 if successful, else errno
+ */
+static int binder_dec_ref_for_handle(struct binder_proc *proc,
+ uint32_t desc, bool strong, struct binder_ref_data *rdata)
+{
+ return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
+}
+
+
+/**
+ * binder_inc_ref_for_node() - increment the ref for given proc/node
+ * @proc: proc containing the ref
+ * @node: target node
+ * @strong: true=strong reference, false=weak reference
+ * @target_list: worklist to use if node is incremented
+ * @rdata: the id/refcount data for the ref
+ *
+ * Given a proc and node, increment the ref. Create the ref if it
+ * doesn't already exist
+ *
+ * Return: 0 if successful, else errno
+ */
+static int binder_inc_ref_for_node(struct binder_proc *proc,
+ struct binder_node *node,
+ bool strong,
+ struct list_head *target_list,
+ struct binder_ref_data *rdata)
+{
+ struct binder_ref *ref;
+ struct binder_ref *new_ref = NULL;
+ int ret = 0;
+
+ binder_proc_lock(proc);
+ ref = binder_get_ref_for_node_olocked(proc, node, NULL);
+ if (!ref) {
+ binder_proc_unlock(proc);
+ new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
+ if (!new_ref)
+ return -ENOMEM;
+ binder_proc_lock(proc);
+ ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
+ }
+ ret = binder_inc_ref_olocked(ref, strong, target_list);
+ *rdata = ref->data;
+ binder_proc_unlock(proc);
+ if (new_ref && ref != new_ref)
+ /*
+ * Another thread created the ref first so
+ * free the one we allocated
+ */
+ kfree(new_ref);
+ return ret;
+}
+
+static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
+ struct binder_transaction *t)
+{
+ BUG_ON(!target_thread);
+ assert_spin_locked(&target_thread->proc->inner_lock);
+ BUG_ON(target_thread->transaction_stack != t);
+ BUG_ON(target_thread->transaction_stack->from != target_thread);
+ target_thread->transaction_stack =
+ target_thread->transaction_stack->from_parent;
+ t->from = NULL;
+}
+
+/**
+ * binder_thread_dec_tmpref() - decrement thread->tmp_ref
+ * @thread: thread to decrement
+ *
+ * A thread needs to be kept alive while being used to create or
+ * handle a transaction. binder_get_txn_from() is used to safely
+ * extract t->from from a binder_transaction and keep the thread
+ * indicated by t->from from being freed. When done with that
+ * binder_thread, this function is called to decrement the
+ * tmp_ref and free if appropriate (thread has been released
+ * and no transaction being processed by the driver)
+ */
+static void binder_thread_dec_tmpref(struct binder_thread *thread)
+{
+ /*
+ * atomic is used to protect the counter value while
+ * it cannot reach zero or thread->is_dead is false
+ */
+ binder_inner_proc_lock(thread->proc);
+ atomic_dec(&thread->tmp_ref);
+ if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
+ binder_inner_proc_unlock(thread->proc);
+ binder_free_thread(thread);
+ return;
}
- t->need_reply = 0;
+ binder_inner_proc_unlock(thread->proc);
+}
+
+/**
+ * binder_proc_dec_tmpref() - decrement proc->tmp_ref
+ * @proc: proc to decrement
+ *
+ * A binder_proc needs to be kept alive while being used to create or
+ * handle a transaction. proc->tmp_ref is incremented when
+ * creating a new transaction or the binder_proc is currently in-use
+ * by threads that are being released. When done with the binder_proc,
+ * this function is called to decrement the counter and free the
+ * proc if appropriate (proc has been released, all threads have
+ * been released and not currenly in-use to process a transaction).
+ */
+static void binder_proc_dec_tmpref(struct binder_proc *proc)
+{
+ binder_inner_proc_lock(proc);
+ proc->tmp_ref--;
+ if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
+ !proc->tmp_ref) {
+ binder_inner_proc_unlock(proc);
+ binder_free_proc(proc);
+ return;
+ }
+ binder_inner_proc_unlock(proc);
+}
+
+/**
+ * binder_get_txn_from() - safely extract the "from" thread in transaction
+ * @t: binder transaction for t->from
+ *
+ * Atomically return the "from" thread and increment the tmp_ref
+ * count for the thread to ensure it stays alive until
+ * binder_thread_dec_tmpref() is called.
+ *
+ * Return: the value of t->from
+ */
+static struct binder_thread *binder_get_txn_from(
+ struct binder_transaction *t)
+{
+ struct binder_thread *from;
+
+ spin_lock(&t->lock);
+ from = t->from;
+ if (from)
+ atomic_inc(&from->tmp_ref);
+ spin_unlock(&t->lock);
+ return from;
+}
+
+/**
+ * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
+ * @t: binder transaction for t->from
+ *
+ * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
+ * to guarantee that the thread cannot be released while operating on it.
+ * The caller must call binder_inner_proc_unlock() to release the inner lock
+ * as well as call binder_dec_thread_txn() to release the reference.
+ *
+ * Return: the value of t->from
+ */
+static struct binder_thread *binder_get_txn_from_and_acq_inner(
+ struct binder_transaction *t)
+{
+ struct binder_thread *from;
+
+ from = binder_get_txn_from(t);
+ if (!from)
+ return NULL;
+ binder_inner_proc_lock(from->proc);
+ if (t->from) {
+ BUG_ON(from != t->from);
+ return from;
+ }
+ binder_inner_proc_unlock(from->proc);
+ binder_thread_dec_tmpref(from);
+ return NULL;
+}
+
+static void binder_free_transaction(struct binder_transaction *t)
+{
if (t->buffer)
t->buffer->transaction = NULL;
kfree(t);
@@ -1318,30 +2059,28 @@ static void binder_send_failed_reply(struct binder_transaction *t,
BUG_ON(t->flags & TF_ONE_WAY);
while (1) {
- target_thread = t->from;
+ target_thread = binder_get_txn_from_and_acq_inner(t);
if (target_thread) {
- if (target_thread->return_error != BR_OK &&
- target_thread->return_error2 == BR_OK) {
- target_thread->return_error2 =
- target_thread->return_error;
- target_thread->return_error = BR_OK;
- }
- if (target_thread->return_error == BR_OK) {
- binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
- "send failed reply for transaction %d to %d:%d\n",
- t->debug_id,
- target_thread->proc->pid,
- target_thread->pid);
-
- binder_pop_transaction(target_thread, t);
- target_thread->return_error = error_code;
+ binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
+ "send failed reply for transaction %d to %d:%d\n",
+ t->debug_id,
+ target_thread->proc->pid,
+ target_thread->pid);
+
+ binder_pop_transaction_ilocked(target_thread, t);
+ if (target_thread->reply_error.cmd == BR_OK) {
+ target_thread->reply_error.cmd = error_code;
+ binder_enqueue_work_ilocked(
+ &target_thread->reply_error.work,
+ &target_thread->todo);
wake_up_interruptible(&target_thread->wait);
} else {
- pr_err("reply failed, target thread, %d:%d, has error code %d already\n",
- target_thread->proc->pid,
- target_thread->pid,
- target_thread->return_error);
+ WARN(1, "Unexpected reply error: %u\n",
+ target_thread->reply_error.cmd);
}
+ binder_inner_proc_unlock(target_thread->proc);
+ binder_thread_dec_tmpref(target_thread);
+ binder_free_transaction(t);
return;
}
next = t->from_parent;
@@ -1350,7 +2089,7 @@ static void binder_send_failed_reply(struct binder_transaction *t,
"send failed reply for transaction %d, target dead\n",
t->debug_id);
- binder_pop_transaction(target_thread, t);
+ binder_free_transaction(t);
if (next == NULL) {
binder_debug(BINDER_DEBUG_DEAD_BINDER,
"reply failed, no target thread at root\n");
@@ -1559,25 +2298,26 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
node->debug_id, (u64)node->ptr);
binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
0);
+ binder_put_node(node);
} break;
case BINDER_TYPE_HANDLE:
case BINDER_TYPE_WEAK_HANDLE: {
struct flat_binder_object *fp;
- struct binder_ref *ref;
+ struct binder_ref_data rdata;
+ int ret;
fp = to_flat_binder_object(hdr);
- ref = binder_get_ref(proc, fp->handle,
- hdr->type == BINDER_TYPE_HANDLE);
+ ret = binder_dec_ref_for_handle(proc, fp->handle,
+ hdr->type == BINDER_TYPE_HANDLE, &rdata);
- if (ref == NULL) {
- pr_err("transaction release %d bad handle %d\n",
- debug_id, fp->handle);
+ if (ret) {
+ pr_err("transaction release %d bad handle %d, ret = %d\n",
+ debug_id, fp->handle, ret);
break;
}
binder_debug(BINDER_DEBUG_TRANSACTION,
- " ref %d desc %d (node %d)\n",
- ref->debug_id, ref->desc, ref->node->debug_id);
- binder_dec_ref(ref, hdr->type == BINDER_TYPE_HANDLE);
+ " ref %d desc %d\n",
+ rdata.debug_id, rdata.desc);
} break;
case BINDER_TYPE_FD: {
@@ -1616,7 +2356,8 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
* back to kernel address space to access it
*/
parent_buffer = parent->buffer -
- proc->user_buffer_offset;
+ binder_alloc_get_user_buffer_offset(
+ &proc->alloc);
fd_buf_size = sizeof(u32) * fda->num_fds;
if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
@@ -1648,102 +2389,122 @@ static int binder_translate_binder(struct flat_binder_object *fp,
struct binder_thread *thread)
{
struct binder_node *node;
- struct binder_ref *ref;
struct binder_proc *proc = thread->proc;
struct binder_proc *target_proc = t->to_proc;
+ struct binder_ref_data rdata;
+ int ret = 0;
node = binder_get_node(proc, fp->binder);
if (!node) {
- node = binder_new_node(proc, fp->binder, fp->cookie);
+ node = binder_new_node(proc, fp);
if (!node)
return -ENOMEM;
-
- node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
- node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
}
if (fp->cookie != node->cookie) {
binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
proc->pid, thread->pid, (u64)fp->binder,
node->debug_id, (u64)fp->cookie,
(u64)node->cookie);
- return -EINVAL;
+ ret = -EINVAL;
+ goto done;
+ }
+ if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
+ ret = -EPERM;
+ goto done;
}
- if (security_binder_transfer_binder(proc->tsk, target_proc->tsk))
- return -EPERM;
- ref = binder_get_ref_for_node(target_proc, node);
- if (!ref)
- return -EINVAL;
+ ret = binder_inc_ref_for_node(target_proc, node,
+ fp->hdr.type == BINDER_TYPE_BINDER,
+ &thread->todo, &rdata);
+ if (ret)
+ goto done;
if (fp->hdr.type == BINDER_TYPE_BINDER)
fp->hdr.type = BINDER_TYPE_HANDLE;
else
fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
fp->binder = 0;
- fp->handle = ref->desc;
+ fp->handle = rdata.desc;
fp->cookie = 0;
- binder_inc_ref(ref, fp->hdr.type == BINDER_TYPE_HANDLE, &thread->todo);
- trace_binder_transaction_node_to_ref(t, node, ref);
+ trace_binder_transaction_node_to_ref(t, node, &rdata);
binder_debug(BINDER_DEBUG_TRANSACTION,
" node %d u%016llx -> ref %d desc %d\n",
node->debug_id, (u64)node->ptr,
- ref->debug_id, ref->desc);
-
- return 0;
+ rdata.debug_id, rdata.desc);
+done:
+ binder_put_node(node);
+ return ret;
}
static int binder_translate_handle(struct flat_binder_object *fp,
struct binder_transaction *t,
struct binder_thread *thread)
{
- struct binder_ref *ref;
struct binder_proc *proc = thread->proc;
struct binder_proc *target_proc = t->to_proc;
+ struct binder_node *node;
+ struct binder_ref_data src_rdata;
+ int ret = 0;
- ref = binder_get_ref(proc, fp->handle,
- fp->hdr.type == BINDER_TYPE_HANDLE);
- if (!ref) {
+ node = binder_get_node_from_ref(proc, fp->handle,
+ fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
+ if (!node) {
binder_user_error("%d:%d got transaction with invalid handle, %d\n",
proc->pid, thread->pid, fp->handle);
return -EINVAL;
}
- if (security_binder_transfer_binder(proc->tsk, target_proc->tsk))
- return -EPERM;
+ if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
+ ret = -EPERM;
+ goto done;
+ }
- if (ref->node->proc == target_proc) {
+ binder_node_lock(node);
+ if (node->proc == target_proc) {
if (fp->hdr.type == BINDER_TYPE_HANDLE)
fp->hdr.type = BINDER_TYPE_BINDER;
else
fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
- fp->binder = ref->node->ptr;
- fp->cookie = ref->node->cookie;
- binder_inc_node(ref->node, fp->hdr.type == BINDER_TYPE_BINDER,
- 0, NULL);
- trace_binder_transaction_ref_to_node(t, ref);
+ fp->binder = node->ptr;
+ fp->cookie = node->cookie;
+ if (node->proc)
+ binder_inner_proc_lock(node->proc);
+ binder_inc_node_nilocked(node,
+ fp->hdr.type == BINDER_TYPE_BINDER,
+ 0, NULL);
+ if (node->proc)
+ binder_inner_proc_unlock(node->proc);
+ trace_binder_transaction_ref_to_node(t, node, &src_rdata);
binder_debug(BINDER_DEBUG_TRANSACTION,
" ref %d desc %d -> node %d u%016llx\n",
- ref->debug_id, ref->desc, ref->node->debug_id,
- (u64)ref->node->ptr);
+ src_rdata.debug_id, src_rdata.desc, node->debug_id,
+ (u64)node->ptr);
+ binder_node_unlock(node);
} else {
- struct binder_ref *new_ref;
+ int ret;
+ struct binder_ref_data dest_rdata;
- new_ref = binder_get_ref_for_node(target_proc, ref->node);
- if (!new_ref)
- return -EINVAL;
+ binder_node_unlock(node);
+ ret = binder_inc_ref_for_node(target_proc, node,
+ fp->hdr.type == BINDER_TYPE_HANDLE,
+ NULL, &dest_rdata);
+ if (ret)
+ goto done;
fp->binder = 0;
- fp->handle = new_ref->desc;
+ fp->handle = dest_rdata.desc;
fp->cookie = 0;
- binder_inc_ref(new_ref, fp->hdr.type == BINDER_TYPE_HANDLE,
- NULL);
- trace_binder_transaction_ref_to_ref(t, ref, new_ref);
+ trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
+ &dest_rdata);
binder_debug(BINDER_DEBUG_TRANSACTION,
" ref %d desc %d -> ref %d desc %d (node %d)\n",
- ref->debug_id, ref->desc, new_ref->debug_id,
- new_ref->desc, ref->node->debug_id);
+ src_rdata.debug_id, src_rdata.desc,
+ dest_rdata.debug_id, dest_rdata.desc,
+ node->debug_id);
}
- return 0;
+done:
+ binder_put_node(node);
+ return ret;
}
static int binder_translate_fd(int fd,
@@ -1834,7 +2595,8 @@ static int binder_translate_fd_array(struct binder_fd_array_object *fda,
* Since the parent was already fixed up, convert it
* back to the kernel address space to access it
*/
- parent_buffer = parent->buffer - target_proc->user_buffer_offset;
+ parent_buffer = parent->buffer -
+ binder_alloc_get_user_buffer_offset(&target_proc->alloc);
fd_array = (u32 *)(parent_buffer + fda->parent_offset);
if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) {
binder_user_error("%d:%d parent offset not aligned correctly.\n",
@@ -1902,12 +2664,87 @@ static int binder_fixup_parent(struct binder_transaction *t,
return -EINVAL;
}
parent_buffer = (u8 *)(parent->buffer -
- target_proc->user_buffer_offset);
+ binder_alloc_get_user_buffer_offset(
+ &target_proc->alloc));
*(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer;
return 0;
}
+/**
+ * binder_proc_transaction() - sends a transaction to a process and wakes it up
+ * @t: transaction to send
+ * @proc: process to send the transaction to
+ * @thread: thread in @proc to send the transaction to (may be NULL)
+ *
+ * This function queues a transaction to the specified process. It will try
+ * to find a thread in the target process to handle the transaction and
+ * wake it up. If no thread is found, the work is queued to the proc
+ * waitqueue.
+ *
+ * If the @thread parameter is not NULL, the transaction is always queued
+ * to the waitlist of that specific thread.
+ *
+ * Return: true if the transactions was successfully queued
+ * false if the target process or thread is dead
+ */
+static bool binder_proc_transaction(struct binder_transaction *t,
+ struct binder_proc *proc,
+ struct binder_thread *thread)
+{
+ struct list_head *target_list = NULL;
+ struct binder_node *node = t->buffer->target_node;
+ struct binder_priority node_prio;
+ bool oneway = !!(t->flags & TF_ONE_WAY);
+ bool wakeup = true;
+
+ BUG_ON(!node);
+ binder_node_lock(node);
+ node_prio.prio = node->min_priority;
+ node_prio.sched_policy = node->sched_policy;
+
+ if (oneway) {
+ BUG_ON(thread);
+ if (node->has_async_transaction) {
+ target_list = &node->async_todo;
+ wakeup = false;
+ } else {
+ node->has_async_transaction = 1;
+ }
+ }
+
+ binder_inner_proc_lock(proc);
+
+ if (proc->is_dead || (thread && thread->is_dead)) {
+ binder_inner_proc_unlock(proc);
+ binder_node_unlock(node);
+ return false;
+ }
+
+ if (!thread && !target_list)
+ thread = binder_select_thread_ilocked(proc);
+
+ if (thread) {
+ target_list = &thread->todo;
+ binder_transaction_priority(thread->task, t, node_prio,
+ node->inherit_rt);
+ } else if (!target_list) {
+ target_list = &proc->todo;
+ } else {
+ BUG_ON(target_list != &node->async_todo);
+ }
+
+ binder_enqueue_work_ilocked(&t->work, target_list);
+
+ if (wakeup)
+ binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
+
+ binder_inner_proc_unlock(proc);
+ binder_node_unlock(node);
+
+ return true;
+}
+
static void binder_transaction(struct binder_proc *proc,
struct binder_thread *thread,
struct binder_transaction_data *tr, int reply,
@@ -1919,19 +2756,21 @@ static void binder_transaction(struct binder_proc *proc,
binder_size_t *offp, *off_end, *off_start;
binder_size_t off_min;
u8 *sg_bufp, *sg_buf_end;
- struct binder_proc *target_proc;
+ struct binder_proc *target_proc = NULL;
struct binder_thread *target_thread = NULL;
struct binder_node *target_node = NULL;
- struct list_head *target_list;
- wait_queue_head_t *target_wait;
struct binder_transaction *in_reply_to = NULL;
struct binder_transaction_log_entry *e;
- uint32_t return_error;
+ uint32_t return_error = 0;
+ uint32_t return_error_param = 0;
+ uint32_t return_error_line = 0;
struct binder_buffer_object *last_fixup_obj = NULL;
binder_size_t last_fixup_min_off = 0;
struct binder_context *context = proc->context;
+ int t_debug_id = atomic_inc_return(&binder_last_id);
- e = binder_transaction_log_add(&context->transaction_log);
+ e = binder_transaction_log_add(&binder_transaction_log);
+ e->debug_id = t_debug_id;
e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
e->from_proc = proc->pid;
e->from_thread = thread->pid;
@@ -1941,29 +2780,39 @@ static void binder_transaction(struct binder_proc *proc,
e->context_name = proc->context->name;
if (reply) {
+ binder_inner_proc_lock(proc);
in_reply_to = thread->transaction_stack;
if (in_reply_to == NULL) {
+ binder_inner_proc_unlock(proc);
binder_user_error("%d:%d got reply transaction with no transaction stack\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
+ return_error_param = -EPROTO;
+ return_error_line = __LINE__;
goto err_empty_call_stack;
}
- binder_set_nice(in_reply_to->saved_priority);
if (in_reply_to->to_thread != thread) {
+ spin_lock(&in_reply_to->lock);
binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
proc->pid, thread->pid, in_reply_to->debug_id,
in_reply_to->to_proc ?
in_reply_to->to_proc->pid : 0,
in_reply_to->to_thread ?
in_reply_to->to_thread->pid : 0);
+ spin_unlock(&in_reply_to->lock);
+ binder_inner_proc_unlock(proc);
return_error = BR_FAILED_REPLY;
+ return_error_param = -EPROTO;
+ return_error_line = __LINE__;
in_reply_to = NULL;
goto err_bad_call_stack;
}
thread->transaction_stack = in_reply_to->to_parent;
- target_thread = in_reply_to->from;
+ binder_inner_proc_unlock(proc);
+ target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
if (target_thread == NULL) {
return_error = BR_DEAD_REPLY;
+ return_error_line = __LINE__;
goto err_dead_binder;
}
if (target_thread->transaction_stack != in_reply_to) {
@@ -1972,89 +2821,137 @@ static void binder_transaction(struct binder_proc *proc,
target_thread->transaction_stack ?
target_thread->transaction_stack->debug_id : 0,
in_reply_to->debug_id);
+ binder_inner_proc_unlock(target_thread->proc);
return_error = BR_FAILED_REPLY;
+ return_error_param = -EPROTO;
+ return_error_line = __LINE__;
in_reply_to = NULL;
target_thread = NULL;
goto err_dead_binder;
}
target_proc = target_thread->proc;
+ target_proc->tmp_ref++;
+ binder_inner_proc_unlock(target_thread->proc);
} else {
if (tr->target.handle) {
struct binder_ref *ref;
- ref = binder_get_ref(proc, tr->target.handle, true);
- if (ref == NULL) {
+ /*
+ * There must already be a strong ref
+ * on this node. If so, do a strong
+ * increment on the node to ensure it
+ * stays alive until the transaction is
+ * done.
+ */
+ binder_proc_lock(proc);
+ ref = binder_get_ref_olocked(proc, tr->target.handle,
+ true);
+ if (ref) {
+ binder_inc_node(ref->node, 1, 0, NULL);
+ target_node = ref->node;
+ }
+ binder_proc_unlock(proc);
+ if (target_node == NULL) {
binder_user_error("%d:%d got transaction to invalid handle\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
+ return_error_param = -EINVAL;
+ return_error_line = __LINE__;
goto err_invalid_target_handle;
}
- target_node = ref->node;
} else {
+ mutex_lock(&context->context_mgr_node_lock);
target_node = context->binder_context_mgr_node;
if (target_node == NULL) {
return_error = BR_DEAD_REPLY;
+ mutex_unlock(&context->context_mgr_node_lock);
+ return_error_line = __LINE__;
goto err_no_context_mgr_node;
}
+ binder_inc_node(target_node, 1, 0, NULL);
+ mutex_unlock(&context->context_mgr_node_lock);
}
e->to_node = target_node->debug_id;
+ binder_node_lock(target_node);
target_proc = target_node->proc;
if (target_proc == NULL) {
+ binder_node_unlock(target_node);
return_error = BR_DEAD_REPLY;
+ return_error_line = __LINE__;
goto err_dead_binder;
}
+ binder_inner_proc_lock(target_proc);
+ target_proc->tmp_ref++;
+ binder_inner_proc_unlock(target_proc);
+ binder_node_unlock(target_node);
if (security_binder_transaction(proc->tsk,
target_proc->tsk) < 0) {
return_error = BR_FAILED_REPLY;
+ return_error_param = -EPERM;
+ return_error_line = __LINE__;
goto err_invalid_target_handle;
}
+ binder_inner_proc_lock(proc);
if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
struct binder_transaction *tmp;
tmp = thread->transaction_stack;
if (tmp->to_thread != thread) {
+ spin_lock(&tmp->lock);
binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
proc->pid, thread->pid, tmp->debug_id,
tmp->to_proc ? tmp->to_proc->pid : 0,
tmp->to_thread ?
tmp->to_thread->pid : 0);
+ spin_unlock(&tmp->lock);
+ binder_inner_proc_unlock(proc);
return_error = BR_FAILED_REPLY;
+ return_error_param = -EPROTO;
+ return_error_line = __LINE__;
goto err_bad_call_stack;
}
while (tmp) {
- if (tmp->from && tmp->from->proc == target_proc)
- target_thread = tmp->from;
+ struct binder_thread *from;
+
+ spin_lock(&tmp->lock);
+ from = tmp->from;
+ if (from && from->proc == target_proc) {
+ atomic_inc(&from->tmp_ref);
+ target_thread = from;
+ spin_unlock(&tmp->lock);
+ break;
+ }
+ spin_unlock(&tmp->lock);
tmp = tmp->from_parent;
}
}
+ binder_inner_proc_unlock(proc);
}
- if (target_thread) {
+ if (target_thread)
e->to_thread = target_thread->pid;
- target_list = &target_thread->todo;
- target_wait = &target_thread->wait;
- } else {
- target_list = &target_proc->todo;
- target_wait = &target_proc->wait;
- }
e->to_proc = target_proc->pid;
/* TODO: reuse incoming transaction for reply */
- t = kzalloc_preempt_disabled(sizeof(*t));
+ t = kzalloc(sizeof(*t), GFP_KERNEL);
if (t == NULL) {
return_error = BR_FAILED_REPLY;
+ return_error_param = -ENOMEM;
+ return_error_line = __LINE__;
goto err_alloc_t_failed;
}
binder_stats_created(BINDER_STAT_TRANSACTION);
+ spin_lock_init(&t->lock);
- tcomplete = kzalloc_preempt_disabled(sizeof(*tcomplete));
+ tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
if (tcomplete == NULL) {
return_error = BR_FAILED_REPLY;
+ return_error_param = -ENOMEM;
+ return_error_line = __LINE__;
goto err_alloc_tcomplete_failed;
}
binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
- t->debug_id = atomic_inc_return(&binder_last_id);
- e->debug_id = t->debug_id;
+ t->debug_id = t_debug_id;
if (reply)
binder_debug(BINDER_DEBUG_TRANSACTION,
@@ -2084,15 +2981,30 @@ static void binder_transaction(struct binder_proc *proc,
t->to_thread = target_thread;
t->code = tr->code;
t->flags = tr->flags;
- t->priority = task_nice(current);
+ if (!(t->flags & TF_ONE_WAY) &&
+ binder_supported_policy(current->policy)) {
+ /* Inherit supported policies for synchronous transactions */
+ t->priority.sched_policy = current->policy;
+ t->priority.prio = current->normal_prio;
+ } else {
+ /* Otherwise, fall back to the default priority */
+ t->priority = target_proc->default_priority;
+ }
trace_binder_transaction(reply, t, target_node);
- t->buffer = binder_alloc_buf(target_proc, tr->data_size,
+ t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
tr->offsets_size, extra_buffers_size,
!reply && (t->flags & TF_ONE_WAY));
- if (t->buffer == NULL) {
- return_error = BR_FAILED_REPLY;
+ if (IS_ERR(t->buffer)) {
+ /*
+ * -ESRCH indicates VMA cleared. The target is dying.
+ */
+ return_error_param = PTR_ERR(t->buffer);
+ return_error = return_error_param == -ESRCH ?
+ BR_DEAD_REPLY : BR_FAILED_REPLY;
+ return_error_line = __LINE__;
+ t->buffer = NULL;
goto err_binder_alloc_buf_failed;
}
t->buffer->allow_user_free = 0;
@@ -2100,31 +3012,34 @@ static void binder_transaction(struct binder_proc *proc,
t->buffer->transaction = t;
t->buffer->target_node = target_node;
trace_binder_transaction_alloc_buf(t->buffer);
- if (target_node)
- binder_inc_node(target_node, 1, 0, NULL);
-
off_start = (binder_size_t *)(t->buffer->data +
ALIGN(tr->data_size, sizeof(void *)));
offp = off_start;
- if (copy_from_user_preempt_disabled(t->buffer->data, (const void __user *)(uintptr_t)
+ if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
tr->data.ptr.buffer, tr->data_size)) {
binder_user_error("%d:%d got transaction with invalid data ptr\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
+ return_error_param = -EFAULT;
+ return_error_line = __LINE__;
goto err_copy_data_failed;
}
- if (copy_from_user_preempt_disabled(offp, (const void __user *)(uintptr_t)
+ if (copy_from_user(offp, (const void __user *)(uintptr_t)
tr->data.ptr.offsets, tr->offsets_size)) {
binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
+ return_error_param = -EFAULT;
+ return_error_line = __LINE__;
goto err_copy_data_failed;
}
if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
proc->pid, thread->pid, (u64)tr->offsets_size);
return_error = BR_FAILED_REPLY;
+ return_error_param = -EINVAL;
+ return_error_line = __LINE__;
goto err_bad_offset;
}
if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
@@ -2132,6 +3047,8 @@ static void binder_transaction(struct binder_proc *proc,
proc->pid, thread->pid,
(u64)extra_buffers_size);
return_error = BR_FAILED_REPLY;
+ return_error_param = -EINVAL;
+ return_error_line = __LINE__;
goto err_bad_offset;
}
off_end = (void *)off_start + tr->offsets_size;
@@ -2148,6 +3065,8 @@ static void binder_transaction(struct binder_proc *proc,
(u64)off_min,
(u64)t->buffer->data_size);
return_error = BR_FAILED_REPLY;
+ return_error_param = -EINVAL;
+ return_error_line = __LINE__;
goto err_bad_offset;
}
@@ -2162,6 +3081,8 @@ static void binder_transaction(struct binder_proc *proc,
ret = binder_translate_binder(fp, t, thread);
if (ret < 0) {
return_error = BR_FAILED_REPLY;
+ return_error_param = ret;
+ return_error_line = __LINE__;
goto err_translate_failed;
}
} break;
@@ -2173,6 +3094,8 @@ static void binder_transaction(struct binder_proc *proc,
ret = binder_translate_handle(fp, t, thread);
if (ret < 0) {
return_error = BR_FAILED_REPLY;
+ return_error_param = ret;
+ return_error_line = __LINE__;
goto err_translate_failed;
}
} break;
@@ -2184,6 +3107,8 @@ static void binder_transaction(struct binder_proc *proc,
if (target_fd < 0) {
return_error = BR_FAILED_REPLY;
+ return_error_param = target_fd;
+ return_error_line = __LINE__;
goto err_translate_failed;
}
fp->pad_binder = 0;
@@ -2200,6 +3125,8 @@ static void binder_transaction(struct binder_proc *proc,
binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
+ return_error_param = -EINVAL;
+ return_error_line = __LINE__;
goto err_bad_parent;
}
if (!binder_validate_fixup(t->buffer, off_start,
@@ -2209,12 +3136,16 @@ static void binder_transaction(struct binder_proc *proc,
binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
+ return_error_param = -EINVAL;
+ return_error_line = __LINE__;
goto err_bad_parent;
}
ret = binder_translate_fd_array(fda, parent, t, thread,
in_reply_to);
if (ret < 0) {
return_error = BR_FAILED_REPLY;
+ return_error_param = ret;
+ return_error_line = __LINE__;
goto err_translate_failed;
}
last_fixup_obj = parent;
@@ -2230,20 +3161,24 @@ static void binder_transaction(struct binder_proc *proc,
binder_user_error("%d:%d got transaction with too large buffer\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
+ return_error_param = -EINVAL;
+ return_error_line = __LINE__;
goto err_bad_offset;
}
- if (copy_from_user_preempt_disabled(
- sg_bufp,
- (const void __user *)(uintptr_t)
- bp->buffer, bp->length)) {
+ if (copy_from_user(sg_bufp,
+ (const void __user *)(uintptr_t)
+ bp->buffer, bp->length)) {
binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
proc->pid, thread->pid);
+ return_error_param = -EFAULT;
return_error = BR_FAILED_REPLY;
+ return_error_line = __LINE__;
goto err_copy_data_failed;
}
/* Fixup buffer pointer to target proc address space */
bp->buffer = (uintptr_t)sg_bufp +
- target_proc->user_buffer_offset;
+ binder_alloc_get_user_buffer_offset(
+ &target_proc->alloc);
sg_bufp += ALIGN(bp->length, sizeof(u64));
ret = binder_fixup_parent(t, thread, bp, off_start,
@@ -2252,6 +3187,8 @@ static void binder_transaction(struct binder_proc *proc,
last_fixup_min_off);
if (ret < 0) {
return_error = BR_FAILED_REPLY;
+ return_error_param = ret;
+ return_error_line = __LINE__;
goto err_translate_failed;
}
last_fixup_obj = bp;
@@ -2261,42 +3198,61 @@ static void binder_transaction(struct binder_proc *proc,
binder_user_error("%d:%d got transaction with invalid object type, %x\n",
proc->pid, thread->pid, hdr->type);
return_error = BR_FAILED_REPLY;
+ return_error_param = -EINVAL;
+ return_error_line = __LINE__;
goto err_bad_object_type;
}
}
+ tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
+ binder_enqueue_work(proc, tcomplete, &thread->todo);
+ t->work.type = BINDER_WORK_TRANSACTION;
+
if (reply) {
+ binder_inner_proc_lock(target_proc);
+ if (target_thread->is_dead) {
+ binder_inner_proc_unlock(target_proc);
+ goto err_dead_proc_or_thread;
+ }
BUG_ON(t->buffer->async_transaction != 0);
- binder_pop_transaction(target_thread, in_reply_to);
+ binder_pop_transaction_ilocked(target_thread, in_reply_to);
+ binder_enqueue_work_ilocked(&t->work, &target_thread->todo);
+ binder_inner_proc_unlock(target_proc);
+ wake_up_interruptible_sync(&target_thread->wait);
+ binder_restore_priority(current, in_reply_to->saved_priority);
+ binder_free_transaction(in_reply_to);
} else if (!(t->flags & TF_ONE_WAY)) {
BUG_ON(t->buffer->async_transaction != 0);
+ binder_inner_proc_lock(proc);
t->need_reply = 1;
t->from_parent = thread->transaction_stack;
thread->transaction_stack = t;
+ binder_inner_proc_unlock(proc);
+ if (!binder_proc_transaction(t, target_proc, target_thread)) {
+ binder_inner_proc_lock(proc);
+ binder_pop_transaction_ilocked(thread, t);
+ binder_inner_proc_unlock(proc);
+ goto err_dead_proc_or_thread;
+ }
} else {
BUG_ON(target_node == NULL);
BUG_ON(t->buffer->async_transaction != 1);
- if (target_node->has_async_transaction) {
- target_list = &target_node->async_todo;
- target_wait = NULL;
- } else
- target_node->has_async_transaction = 1;
- }
- t->work.type = BINDER_WORK_TRANSACTION;
- list_add_tail(&t->work.entry, target_list);
- tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
- list_add_tail(&tcomplete->entry, &thread->todo);
- if (target_wait) {
- if (reply || !(t->flags & TF_ONE_WAY)) {
- preempt_disable();
- wake_up_interruptible_sync(target_wait);
- preempt_enable_no_resched();
- }
- else {
- wake_up_interruptible(target_wait);
- }
+ if (!binder_proc_transaction(t, target_proc, NULL))
+ goto err_dead_proc_or_thread;
}
+ if (target_thread)
+ binder_thread_dec_tmpref(target_thread);
+ binder_proc_dec_tmpref(target_proc);
+ /*
+ * write barrier to synchronize with initialization
+ * of log entry
+ */
+ smp_wmb();
+ WRITE_ONCE(e->debug_id_done, t_debug_id);
return;
+err_dead_proc_or_thread:
+ return_error = BR_DEAD_REPLY;
+ return_error_line = __LINE__;
err_translate_failed:
err_bad_object_type:
err_bad_offset:
@@ -2304,8 +3260,9 @@ err_bad_parent:
err_copy_data_failed:
trace_binder_transaction_failed_buffer_release(t->buffer);
binder_transaction_buffer_release(target_proc, t->buffer, offp);
+ target_node = NULL;
t->buffer->transaction = NULL;
- binder_free_buf(target_proc, t->buffer);
+ binder_alloc_free_buf(&target_proc->alloc, t->buffer);
err_binder_alloc_buf_failed:
kfree(tcomplete);
binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
@@ -2318,25 +3275,50 @@ err_empty_call_stack:
err_dead_binder:
err_invalid_target_handle:
err_no_context_mgr_node:
+ if (target_thread)
+ binder_thread_dec_tmpref(target_thread);
+ if (target_proc)
+ binder_proc_dec_tmpref(target_proc);
+ if (target_node)
+ binder_dec_node(target_node, 1, 0);
+
binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
- "%d:%d transaction failed %d, size %lld-%lld\n",
- proc->pid, thread->pid, return_error,
- (u64)tr->data_size, (u64)tr->offsets_size);
+ "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
+ proc->pid, thread->pid, return_error, return_error_param,
+ (u64)tr->data_size, (u64)tr->offsets_size,
+ return_error_line);
{
struct binder_transaction_log_entry *fe;
- fe = binder_transaction_log_add(
- &context->transaction_log_failed);
+ e->return_error = return_error;
+ e->return_error_param = return_error_param;
+ e->return_error_line = return_error_line;
+ fe = binder_transaction_log_add(&binder_transaction_log_failed);
*fe = *e;
+ /*
+ * write barrier to synchronize with initialization
+ * of log entry
+ */
+ smp_wmb();
+ WRITE_ONCE(e->debug_id_done, t_debug_id);
+ WRITE_ONCE(fe->debug_id_done, t_debug_id);
}
- BUG_ON(thread->return_error != BR_OK);
+ BUG_ON(thread->return_error.cmd != BR_OK);
if (in_reply_to) {
- thread->return_error = BR_TRANSACTION_COMPLETE;
+ binder_restore_priority(current, in_reply_to->saved_priority);
+ thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
+ binder_enqueue_work(thread->proc,
+ &thread->return_error.work,
+ &thread->todo);
binder_send_failed_reply(in_reply_to, return_error);
- } else
- thread->return_error = return_error;
+ } else {
+ thread->return_error.cmd = return_error;
+ binder_enqueue_work(thread->proc,
+ &thread->return_error.work,
+ &thread->todo);
+ }
}
static int binder_thread_write(struct binder_proc *proc,
@@ -2350,15 +3332,17 @@ static int binder_thread_write(struct binder_proc *proc,
void __user *ptr = buffer + *consumed;
void __user *end = buffer + size;
- while (ptr < end && thread->return_error == BR_OK) {
- if (get_user_preempt_disabled(cmd, (uint32_t __user *)ptr))
+ while (ptr < end && thread->return_error.cmd == BR_OK) {
+ int ret;
+
+ if (get_user(cmd, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
trace_binder_command(cmd);
- if (_IOC_NR(cmd) < ARRAY_SIZE(context->binder_stats.bc)) {
- context->binder_stats.bc[_IOC_NR(cmd)]++;
- proc->stats.bc[_IOC_NR(cmd)]++;
- thread->stats.bc[_IOC_NR(cmd)]++;
+ if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
+ atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
+ atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
+ atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
}
switch (cmd) {
case BC_INCREFS:
@@ -2366,53 +3350,61 @@ static int binder_thread_write(struct binder_proc *proc,
case BC_RELEASE:
case BC_DECREFS: {
uint32_t target;
- struct binder_ref *ref;
const char *debug_string;
+ bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
+ bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
+ struct binder_ref_data rdata;
- if (get_user_preempt_disabled(target, (uint32_t __user *)ptr))
+ if (get_user(target, (uint32_t __user *)ptr))
return -EFAULT;
+
ptr += sizeof(uint32_t);
- if (target == 0 && context->binder_context_mgr_node &&
- (cmd == BC_INCREFS || cmd == BC_ACQUIRE)) {
- ref = binder_get_ref_for_node(proc,
- context->binder_context_mgr_node);
- if (ref->desc != target) {
- binder_user_error("%d:%d tried to acquire reference to desc 0, got %d instead\n",
- proc->pid, thread->pid,
- ref->desc);
- }
- } else
- ref = binder_get_ref(proc, target,
- cmd == BC_ACQUIRE ||
- cmd == BC_RELEASE);
- if (ref == NULL) {
- binder_user_error("%d:%d refcount change on invalid ref %d\n",
- proc->pid, thread->pid, target);
- break;
+ ret = -1;
+ if (increment && !target) {
+ struct binder_node *ctx_mgr_node;
+ mutex_lock(&context->context_mgr_node_lock);
+ ctx_mgr_node = context->binder_context_mgr_node;
+ if (ctx_mgr_node)
+ ret = binder_inc_ref_for_node(
+ proc, ctx_mgr_node,
+ strong, NULL, &rdata);
+ mutex_unlock(&context->context_mgr_node_lock);
+ }
+ if (ret)
+ ret = binder_update_ref_for_handle(
+ proc, target, increment, strong,
+ &rdata);
+ if (!ret && rdata.desc != target) {
+ binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
+ proc->pid, thread->pid,
+ target, rdata.desc);
}
switch (cmd) {
case BC_INCREFS:
debug_string = "IncRefs";
- binder_inc_ref(ref, 0, NULL);
break;
case BC_ACQUIRE:
debug_string = "Acquire";
- binder_inc_ref(ref, 1, NULL);
break;
case BC_RELEASE:
debug_string = "Release";
- binder_dec_ref(ref, 1);
break;
case BC_DECREFS:
default:
debug_string = "DecRefs";
- binder_dec_ref(ref, 0);
+ break;
+ }
+ if (ret) {
+ binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
+ proc->pid, thread->pid, debug_string,
+ strong, target, ret);
break;
}
binder_debug(BINDER_DEBUG_USER_REFS,
- "%d:%d %s ref %d desc %d s %d w %d for node %d\n",
- proc->pid, thread->pid, debug_string, ref->debug_id,
- ref->desc, ref->strong, ref->weak, ref->node->debug_id);
+ "%d:%d %s ref %d desc %d s %d w %d\n",
+ proc->pid, thread->pid, debug_string,
+ rdata.debug_id, rdata.desc, rdata.strong,
+ rdata.weak);
break;
}
case BC_INCREFS_DONE:
@@ -2420,11 +3412,12 @@ static int binder_thread_write(struct binder_proc *proc,
binder_uintptr_t node_ptr;
binder_uintptr_t cookie;
struct binder_node *node;
+ bool free_node;
- if (get_user_preempt_disabled(node_ptr, (binder_uintptr_t __user *)ptr))
+ if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
return -EFAULT;
ptr += sizeof(binder_uintptr_t);
- if (get_user_preempt_disabled(cookie, (binder_uintptr_t __user *)ptr))
+ if (get_user(cookie, (binder_uintptr_t __user *)ptr))
return -EFAULT;
ptr += sizeof(binder_uintptr_t);
node = binder_get_node(proc, node_ptr);
@@ -2444,13 +3437,17 @@ static int binder_thread_write(struct binder_proc *proc,
"BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
(u64)node_ptr, node->debug_id,
(u64)cookie, (u64)node->cookie);
+ binder_put_node(node);
break;
}
+ binder_node_inner_lock(node);
if (cmd == BC_ACQUIRE_DONE) {
if (node->pending_strong_ref == 0) {
binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
proc->pid, thread->pid,
node->debug_id);
+ binder_node_inner_unlock(node);
+ binder_put_node(node);
break;
}
node->pending_strong_ref = 0;
@@ -2459,16 +3456,23 @@ static int binder_thread_write(struct binder_proc *proc,
binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
proc->pid, thread->pid,
node->debug_id);
+ binder_node_inner_unlock(node);
+ binder_put_node(node);
break;
}
node->pending_weak_ref = 0;
}
- binder_dec_node(node, cmd == BC_ACQUIRE_DONE, 0);
+ free_node = binder_dec_node_nilocked(node,
+ cmd == BC_ACQUIRE_DONE, 0);
+ WARN_ON(free_node);
binder_debug(BINDER_DEBUG_USER_REFS,
- "%d:%d %s node %d ls %d lw %d\n",
+ "%d:%d %s node %d ls %d lw %d tr %d\n",
proc->pid, thread->pid,
cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
- node->debug_id, node->local_strong_refs, node->local_weak_refs);
+ node->debug_id, node->local_strong_refs,
+ node->local_weak_refs, node->tmp_refs);
+ binder_node_inner_unlock(node);
+ binder_put_node(node);
break;
}
case BC_ATTEMPT_ACQUIRE:
@@ -2482,11 +3486,12 @@ static int binder_thread_write(struct binder_proc *proc,
binder_uintptr_t data_ptr;
struct binder_buffer *buffer;
- if (get_user_preempt_disabled(data_ptr, (binder_uintptr_t __user *)ptr))
+ if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
return -EFAULT;
ptr += sizeof(binder_uintptr_t);
- buffer = binder_buffer_lookup(proc, data_ptr);
+ buffer = binder_alloc_prepare_to_free(&proc->alloc,
+ data_ptr);
if (buffer == NULL) {
binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n",
proc->pid, thread->pid, (u64)data_ptr);
@@ -2508,15 +3513,25 @@ static int binder_thread_write(struct binder_proc *proc,
buffer->transaction = NULL;
}
if (buffer->async_transaction && buffer->target_node) {
- BUG_ON(!buffer->target_node->has_async_transaction);
- if (list_empty(&buffer->target_node->async_todo))
- buffer->target_node->has_async_transaction = 0;
+ struct binder_node *buf_node;
+ struct binder_work *w;
+
+ buf_node = buffer->target_node;
+ binder_node_inner_lock(buf_node);
+ BUG_ON(!buf_node->has_async_transaction);
+ BUG_ON(buf_node->proc != proc);
+ w = binder_dequeue_work_head_ilocked(
+ &buf_node->async_todo);
+ if (!w)
+ buf_node->has_async_transaction = 0;
else
- list_move_tail(buffer->target_node->async_todo.next, &thread->todo);
+ binder_enqueue_work_ilocked(
+ w, &thread->todo);
+ binder_node_inner_unlock(buf_node);
}
trace_binder_transaction_buffer_release(buffer);
binder_transaction_buffer_release(proc, buffer, NULL);
- binder_free_buf(proc, buffer);
+ binder_alloc_free_buf(&proc->alloc, buffer);
break;
}
@@ -2524,8 +3539,7 @@ static int binder_thread_write(struct binder_proc *proc,
case BC_REPLY_SG: {
struct binder_transaction_data_sg tr;
- if (copy_from_user_preempt_disabled(&tr, ptr,
- sizeof(tr)))
+ if (copy_from_user(&tr, ptr, sizeof(tr)))
return -EFAULT;
ptr += sizeof(tr);
binder_transaction(proc, thread, &tr.transaction_data,
@@ -2536,7 +3550,7 @@ static int binder_thread_write(struct binder_proc *proc,
case BC_REPLY: {
struct binder_transaction_data tr;
- if (copy_from_user_preempt_disabled(&tr, ptr, sizeof(tr)))
+ if (copy_from_user(&tr, ptr, sizeof(tr)))
return -EFAULT;
ptr += sizeof(tr);
binder_transaction(proc, thread, &tr,
@@ -2548,6 +3562,7 @@ static int binder_thread_write(struct binder_proc *proc,
binder_debug(BINDER_DEBUG_THREADS,
"%d:%d BC_REGISTER_LOOPER\n",
proc->pid, thread->pid);
+ binder_inner_proc_lock(proc);
if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
thread->looper |= BINDER_LOOPER_STATE_INVALID;
binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
@@ -2561,6 +3576,7 @@ static int binder_thread_write(struct binder_proc *proc,
proc->requested_threads_started++;
}
thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
+ binder_inner_proc_unlock(proc);
break;
case BC_ENTER_LOOPER:
binder_debug(BINDER_DEBUG_THREADS,
@@ -2585,15 +3601,37 @@ static int binder_thread_write(struct binder_proc *proc,
uint32_t target;
binder_uintptr_t cookie;
struct binder_ref *ref;
- struct binder_ref_death *death;
+ struct binder_ref_death *death = NULL;
- if (get_user_preempt_disabled(target, (uint32_t __user *)ptr))
+ if (get_user(target, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
- if (get_user_preempt_disabled(cookie, (binder_uintptr_t __user *)ptr))
+ if (get_user(cookie, (binder_uintptr_t __user *)ptr))
return -EFAULT;
ptr += sizeof(binder_uintptr_t);
- ref = binder_get_ref(proc, target, false);
+ if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
+ /*
+ * Allocate memory for death notification
+ * before taking lock
+ */
+ death = kzalloc(sizeof(*death), GFP_KERNEL);
+ if (death == NULL) {
+ WARN_ON(thread->return_error.cmd !=
+ BR_OK);
+ thread->return_error.cmd = BR_ERROR;
+ binder_enqueue_work(
+ thread->proc,
+ &thread->return_error.work,
+ &thread->todo);
+ binder_debug(
+ BINDER_DEBUG_FAILED_TRANSACTION,
+ "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
+ proc->pid, thread->pid);
+ break;
+ }
+ }
+ binder_proc_lock(proc);
+ ref = binder_get_ref_olocked(proc, target, false);
if (ref == NULL) {
binder_user_error("%d:%d %s invalid ref %d\n",
proc->pid, thread->pid,
@@ -2601,6 +3639,8 @@ static int binder_thread_write(struct binder_proc *proc,
"BC_REQUEST_DEATH_NOTIFICATION" :
"BC_CLEAR_DEATH_NOTIFICATION",
target);
+ binder_proc_unlock(proc);
+ kfree(death);
break;
}
@@ -2610,21 +3650,18 @@ static int binder_thread_write(struct binder_proc *proc,
cmd == BC_REQUEST_DEATH_NOTIFICATION ?
"BC_REQUEST_DEATH_NOTIFICATION" :
"BC_CLEAR_DEATH_NOTIFICATION",
- (u64)cookie, ref->debug_id, ref->desc,
- ref->strong, ref->weak, ref->node->debug_id);
+ (u64)cookie, ref->data.debug_id,
+ ref->data.desc, ref->data.strong,
+ ref->data.weak, ref->node->debug_id);
+ binder_node_lock(ref->node);
if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
if (ref->death) {
binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
proc->pid, thread->pid);
- break;
- }
- death = kzalloc_preempt_disabled(sizeof(*death));
- if (death == NULL) {
- thread->return_error = BR_ERROR;
- binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
- "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
- proc->pid, thread->pid);
+ binder_node_unlock(ref->node);
+ binder_proc_unlock(proc);
+ kfree(death);
break;
}
binder_stats_created(BINDER_STAT_DEATH);
@@ -2633,17 +3670,29 @@ static int binder_thread_write(struct binder_proc *proc,
ref->death = death;
if (ref->node->proc == NULL) {
ref->death->work.type = BINDER_WORK_DEAD_BINDER;
- if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
- list_add_tail(&ref->death->work.entry, &thread->todo);
- } else {
- list_add_tail(&ref->death->work.entry, &proc->todo);
- wake_up_interruptible(&proc->wait);
+ if (thread->looper &
+ (BINDER_LOOPER_STATE_REGISTERED |
+ BINDER_LOOPER_STATE_ENTERED))
+ binder_enqueue_work(
+ proc,
+ &ref->death->work,
+ &thread->todo);
+ else {
+ binder_inner_proc_lock(proc);
+ binder_enqueue_work_ilocked(
+ &ref->death->work,
+ &proc->todo);
+ binder_wakeup_proc_ilocked(
+ proc);
+ binder_inner_proc_unlock(proc);
}
}
} else {
if (ref->death == NULL) {
binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
proc->pid, thread->pid);
+ binder_node_unlock(ref->node);
+ binder_proc_unlock(proc);
break;
}
death = ref->death;
@@ -2652,33 +3701,52 @@ static int binder_thread_write(struct binder_proc *proc,
proc->pid, thread->pid,
(u64)death->cookie,
(u64)cookie);
+ binder_node_unlock(ref->node);
+ binder_proc_unlock(proc);
break;
}
ref->death = NULL;
+ binder_inner_proc_lock(proc);
if (list_empty(&death->work.entry)) {
death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
- if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
- list_add_tail(&death->work.entry, &thread->todo);
- } else {
- list_add_tail(&death->work.entry, &proc->todo);
- wake_up_interruptible(&proc->wait);
+ if (thread->looper &
+ (BINDER_LOOPER_STATE_REGISTERED |
+ BINDER_LOOPER_STATE_ENTERED))
+ binder_enqueue_work_ilocked(
+ &death->work,
+ &thread->todo);
+ else {
+ binder_enqueue_work_ilocked(
+ &death->work,
+ &proc->todo);
+ binder_wakeup_proc_ilocked(
+ proc);
}
} else {
BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
}
+ binder_inner_proc_unlock(proc);
}
+ binder_node_unlock(ref->node);
+ binder_proc_unlock(proc);
} break;
case BC_DEAD_BINDER_DONE: {
struct binder_work *w;
binder_uintptr_t cookie;
struct binder_ref_death *death = NULL;
- if (get_user_preempt_disabled(cookie, (binder_uintptr_t __user *)ptr))
+
+ if (get_user(cookie, (binder_uintptr_t __user *)ptr))
return -EFAULT;
ptr += sizeof(cookie);
- list_for_each_entry(w, &proc->delivered_death, entry) {
- struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work);
+ binder_inner_proc_lock(proc);
+ list_for_each_entry(w, &proc->delivered_death,
+ entry) {
+ struct binder_ref_death *tmp_death =
+ container_of(w,
+ struct binder_ref_death,
+ work);
if (tmp_death->cookie == cookie) {
death = tmp_death;
@@ -2692,21 +3760,26 @@ static int binder_thread_write(struct binder_proc *proc,
if (death == NULL) {
binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
proc->pid, thread->pid, (u64)cookie);
+ binder_inner_proc_unlock(proc);
break;
}
-
- list_del_init(&death->work.entry);
+ binder_dequeue_work_ilocked(&death->work);
if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
- if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
- list_add_tail(&death->work.entry, &thread->todo);
- } else {
- list_add_tail(&death->work.entry, &proc->todo);
- wake_up_interruptible(&proc->wait);
+ if (thread->looper &
+ (BINDER_LOOPER_STATE_REGISTERED |
+ BINDER_LOOPER_STATE_ENTERED))
+ binder_enqueue_work_ilocked(
+ &death->work, &thread->todo);
+ else {
+ binder_enqueue_work_ilocked(
+ &death->work,
+ &proc->todo);
+ binder_wakeup_proc_ilocked(proc);
}
}
- }
- break;
+ binder_inner_proc_unlock(proc);
+ } break;
default:
pr_err("%d:%d unknown command %d\n",
@@ -2722,24 +3795,80 @@ static void binder_stat_br(struct binder_proc *proc,
struct binder_thread *thread, uint32_t cmd)
{
trace_binder_return(cmd);
- if (_IOC_NR(cmd) < ARRAY_SIZE(proc->stats.br)) {
- proc->context->binder_stats.br[_IOC_NR(cmd)]++;
- proc->stats.br[_IOC_NR(cmd)]++;
- thread->stats.br[_IOC_NR(cmd)]++;
+ if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
+ atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
+ atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
+ atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
}
}
-static int binder_has_proc_work(struct binder_proc *proc,
- struct binder_thread *thread)
+static int binder_has_thread_work(struct binder_thread *thread)
{
- return !list_empty(&proc->todo) ||
- (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN);
+ return !binder_worklist_empty(thread->proc, &thread->todo) ||
+ thread->looper_need_return;
}
-static int binder_has_thread_work(struct binder_thread *thread)
+static int binder_put_node_cmd(struct binder_proc *proc,
+ struct binder_thread *thread,
+ void __user **ptrp,
+ binder_uintptr_t node_ptr,
+ binder_uintptr_t node_cookie,
+ int node_debug_id,
+ uint32_t cmd, const char *cmd_name)
{
- return !list_empty(&thread->todo) || thread->return_error != BR_OK ||
- (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN);
+ void __user *ptr = *ptrp;
+
+ if (put_user(cmd, (uint32_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(uint32_t);
+
+ if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(binder_uintptr_t);
+
+ if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(binder_uintptr_t);
+
+ binder_stat_br(proc, thread, cmd);
+ binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
+ proc->pid, thread->pid, cmd_name, node_debug_id,
+ (u64)node_ptr, (u64)node_cookie);
+
+ *ptrp = ptr;
+ return 0;
+}
+
+static int binder_wait_for_work(struct binder_thread *thread,
+ bool do_proc_work)
+{
+ DEFINE_WAIT(wait);
+ struct binder_proc *proc = thread->proc;
+ int ret = 0;
+
+ freezer_do_not_count();
+ binder_inner_proc_lock(proc);
+ for (;;) {
+ prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE);
+ if (binder_has_work_ilocked(thread, do_proc_work))
+ break;
+ if (do_proc_work)
+ list_add(&thread->waiting_thread_node,
+ &proc->waiting_threads);
+ binder_inner_proc_unlock(proc);
+ schedule();
+ binder_inner_proc_lock(proc);
+ list_del_init(&thread->waiting_thread_node);
+ if (signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ break;
+ }
+ }
+ finish_wait(&thread->wait, &wait);
+ binder_inner_proc_unlock(proc);
+ freezer_count();
+
+ return ret;
}
static int binder_thread_read(struct binder_proc *proc,
@@ -2755,43 +3884,21 @@ static int binder_thread_read(struct binder_proc *proc,
int wait_for_proc_work;
if (*consumed == 0) {
- if (put_user_preempt_disabled(BR_NOOP, (uint32_t __user *)ptr))
+ if (put_user(BR_NOOP, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
}
retry:
- wait_for_proc_work = thread->transaction_stack == NULL &&
- list_empty(&thread->todo);
-
- if (thread->return_error != BR_OK && ptr < end) {
- if (thread->return_error2 != BR_OK) {
- if (put_user_preempt_disabled(thread->return_error2, (uint32_t __user *)ptr))
- return -EFAULT;
- ptr += sizeof(uint32_t);
- binder_stat_br(proc, thread, thread->return_error2);
- if (ptr == end)
- goto done;
- thread->return_error2 = BR_OK;
- }
- if (put_user_preempt_disabled(thread->return_error, (uint32_t __user *)ptr))
- return -EFAULT;
- ptr += sizeof(uint32_t);
- binder_stat_br(proc, thread, thread->return_error);
- thread->return_error = BR_OK;
- goto done;
- }
-
+ binder_inner_proc_lock(proc);
+ wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
+ binder_inner_proc_unlock(proc);
thread->looper |= BINDER_LOOPER_STATE_WAITING;
- if (wait_for_proc_work)
- proc->ready_threads++;
-
- binder_unlock(proc->context, __func__);
trace_binder_wait_for_work(wait_for_proc_work,
!!thread->transaction_stack,
- !list_empty(&thread->todo));
+ !binder_worklist_empty(proc, &thread->todo));
if (wait_for_proc_work) {
if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
BINDER_LOOPER_STATE_ENTERED))) {
@@ -2800,24 +3907,16 @@ retry:
wait_event_interruptible(binder_user_error_wait,
binder_stop_on_user_error < 2);
}
- binder_set_nice(proc->default_priority);
- if (non_block) {
- if (!binder_has_proc_work(proc, thread))
- ret = -EAGAIN;
- } else
- ret = wait_event_freezable_exclusive(proc->wait, binder_has_proc_work(proc, thread));
- } else {
- if (non_block) {
- if (!binder_has_thread_work(thread))
- ret = -EAGAIN;
- } else
- ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread));
+ binder_restore_priority(current, proc->default_priority);
}
- binder_lock(proc->context, __func__);
+ if (non_block) {
+ if (!binder_has_work(thread, wait_for_proc_work))
+ ret = -EAGAIN;
+ } else {
+ ret = binder_wait_for_work(thread, wait_for_proc_work);
+ }
- if (wait_for_proc_work)
- proc->ready_threads--;
thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
if (ret)
@@ -2826,33 +3925,54 @@ retry:
while (1) {
uint32_t cmd;
struct binder_transaction_data tr;
- struct binder_work *w;
+ struct binder_work *w = NULL;
+ struct list_head *list = NULL;
struct binder_transaction *t = NULL;
+ struct binder_thread *t_from;
+
+ binder_inner_proc_lock(proc);
+ if (!binder_worklist_empty_ilocked(&thread->todo))
+ list = &thread->todo;
+ else if (!binder_worklist_empty_ilocked(&proc->todo) &&
+ wait_for_proc_work)
+ list = &proc->todo;
+ else {
+ binder_inner_proc_unlock(proc);
- if (!list_empty(&thread->todo)) {
- w = list_first_entry(&thread->todo, struct binder_work,
- entry);
- } else if (!list_empty(&proc->todo) && wait_for_proc_work) {
- w = list_first_entry(&proc->todo, struct binder_work,
- entry);
- } else {
/* no data added */
- if (ptr - buffer == 4 &&
- !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN))
+ if (ptr - buffer == 4 && !thread->looper_need_return)
goto retry;
break;
}
- if (end - ptr < sizeof(tr) + 4)
+ if (end - ptr < sizeof(tr) + 4) {
+ binder_inner_proc_unlock(proc);
break;
+ }
+ w = binder_dequeue_work_head_ilocked(list);
switch (w->type) {
case BINDER_WORK_TRANSACTION: {
+ binder_inner_proc_unlock(proc);
t = container_of(w, struct binder_transaction, work);
} break;
+ case BINDER_WORK_RETURN_ERROR: {
+ struct binder_error *e = container_of(
+ w, struct binder_error, work);
+
+ WARN_ON(e->cmd == BR_OK);
+ binder_inner_proc_unlock(proc);
+ if (put_user(e->cmd, (uint32_t __user *)ptr))
+ return -EFAULT;
+ e->cmd = BR_OK;
+ ptr += sizeof(uint32_t);
+
+ binder_stat_br(proc, thread, cmd);
+ } break;
case BINDER_WORK_TRANSACTION_COMPLETE: {
+ binder_inner_proc_unlock(proc);
cmd = BR_TRANSACTION_COMPLETE;
- if (put_user_preempt_disabled(cmd, (uint32_t __user *) ptr))
+ if (put_user(cmd, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
@@ -2860,112 +3980,134 @@ retry:
binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
"%d:%d BR_TRANSACTION_COMPLETE\n",
proc->pid, thread->pid);
-
- list_del(&w->entry);
kfree(w);
binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
} break;
case BINDER_WORK_NODE: {
struct binder_node *node = container_of(w, struct binder_node, work);
- uint32_t cmd = BR_NOOP;
- const char *cmd_name;
- int strong = node->internal_strong_refs || node->local_strong_refs;
- int weak = !hlist_empty(&node->refs) || node->local_weak_refs || strong;
-
- if (weak && !node->has_weak_ref) {
- cmd = BR_INCREFS;
- cmd_name = "BR_INCREFS";
+ int strong, weak;
+ binder_uintptr_t node_ptr = node->ptr;
+ binder_uintptr_t node_cookie = node->cookie;
+ int node_debug_id = node->debug_id;
+ int has_weak_ref;
+ int has_strong_ref;
+ void __user *orig_ptr = ptr;
+
+ BUG_ON(proc != node->proc);
+ strong = node->internal_strong_refs ||
+ node->local_strong_refs;
+ weak = !hlist_empty(&node->refs) ||
+ node->local_weak_refs ||
+ node->tmp_refs || strong;
+ has_strong_ref = node->has_strong_ref;
+ has_weak_ref = node->has_weak_ref;
+
+ if (weak && !has_weak_ref) {
node->has_weak_ref = 1;
node->pending_weak_ref = 1;
node->local_weak_refs++;
- } else if (strong && !node->has_strong_ref) {
- cmd = BR_ACQUIRE;
- cmd_name = "BR_ACQUIRE";
+ }
+ if (strong && !has_strong_ref) {
node->has_strong_ref = 1;
node->pending_strong_ref = 1;
node->local_strong_refs++;
- } else if (!strong && node->has_strong_ref) {
- cmd = BR_RELEASE;
- cmd_name = "BR_RELEASE";
+ }
+ if (!strong && has_strong_ref)
node->has_strong_ref = 0;
- } else if (!weak && node->has_weak_ref) {
- cmd = BR_DECREFS;
- cmd_name = "BR_DECREFS";
+ if (!weak && has_weak_ref)
node->has_weak_ref = 0;
- }
- if (cmd != BR_NOOP) {
- if (put_user_preempt_disabled(cmd, (uint32_t __user *) ptr))
- return -EFAULT;
- ptr += sizeof(uint32_t);
- if (put_user_preempt_disabled(node->ptr, (binder_uintptr_t __user *)
- (binder_uintptr_t __user *)ptr))
- return -EFAULT;
- ptr += sizeof(binder_uintptr_t);
- if (put_user_preempt_disabled(node->cookie, (binder_uintptr_t __user *)
- (binder_uintptr_t __user *)ptr))
- return -EFAULT;
- ptr += sizeof(binder_uintptr_t);
-
- binder_stat_br(proc, thread, cmd);
- binder_debug(BINDER_DEBUG_USER_REFS,
- "%d:%d %s %d u%016llx c%016llx\n",
- proc->pid, thread->pid, cmd_name,
- node->debug_id,
- (u64)node->ptr, (u64)node->cookie);
- } else {
- list_del_init(&w->entry);
- if (!weak && !strong) {
- binder_debug(BINDER_DEBUG_INTERNAL_REFS,
- "%d:%d node %d u%016llx c%016llx deleted\n",
- proc->pid, thread->pid,
- node->debug_id,
- (u64)node->ptr,
- (u64)node->cookie);
- rb_erase(&node->rb_node, &proc->nodes);
- kfree(node);
- binder_stats_deleted(BINDER_STAT_NODE);
- } else {
- binder_debug(BINDER_DEBUG_INTERNAL_REFS,
- "%d:%d node %d u%016llx c%016llx state unchanged\n",
- proc->pid, thread->pid,
- node->debug_id,
- (u64)node->ptr,
- (u64)node->cookie);
- }
- }
+ if (!weak && !strong) {
+ binder_debug(BINDER_DEBUG_INTERNAL_REFS,
+ "%d:%d node %d u%016llx c%016llx deleted\n",
+ proc->pid, thread->pid,
+ node_debug_id,
+ (u64)node_ptr,
+ (u64)node_cookie);
+ rb_erase(&node->rb_node, &proc->nodes);
+ binder_inner_proc_unlock(proc);
+ binder_node_lock(node);
+ /*
+ * Acquire the node lock before freeing the
+ * node to serialize with other threads that
+ * may have been holding the node lock while
+ * decrementing this node (avoids race where
+ * this thread frees while the other thread
+ * is unlocking the node after the final
+ * decrement)
+ */
+ binder_node_unlock(node);
+ binder_free_node(node);
+ } else
+ binder_inner_proc_unlock(proc);
+
+ if (weak && !has_weak_ref)
+ ret = binder_put_node_cmd(
+ proc, thread, &ptr, node_ptr,
+ node_cookie, node_debug_id,
+ BR_INCREFS, "BR_INCREFS");
+ if (!ret && strong && !has_strong_ref)
+ ret = binder_put_node_cmd(
+ proc, thread, &ptr, node_ptr,
+ node_cookie, node_debug_id,
+ BR_ACQUIRE, "BR_ACQUIRE");
+ if (!ret && !strong && has_strong_ref)
+ ret = binder_put_node_cmd(
+ proc, thread, &ptr, node_ptr,
+ node_cookie, node_debug_id,
+ BR_RELEASE, "BR_RELEASE");
+ if (!ret && !weak && has_weak_ref)
+ ret = binder_put_node_cmd(
+ proc, thread, &ptr, node_ptr,
+ node_cookie, node_debug_id,
+ BR_DECREFS, "BR_DECREFS");
+ if (orig_ptr == ptr)
+ binder_debug(BINDER_DEBUG_INTERNAL_REFS,
+ "%d:%d node %d u%016llx c%016llx state unchanged\n",
+ proc->pid, thread->pid,
+ node_debug_id,
+ (u64)node_ptr,
+ (u64)node_cookie);
+ if (ret)
+ return ret;
} break;
case BINDER_WORK_DEAD_BINDER:
case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
struct binder_ref_death *death;
uint32_t cmd;
+ binder_uintptr_t cookie;
death = container_of(w, struct binder_ref_death, work);
if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
else
cmd = BR_DEAD_BINDER;
- if (put_user_preempt_disabled(cmd, (uint32_t __user *) ptr))
- return -EFAULT;
- ptr += sizeof(uint32_t);
- if (put_user_preempt_disabled(death->cookie, (binder_uintptr_t __user *) ptr))
- return -EFAULT;
- ptr += sizeof(binder_uintptr_t);
- binder_stat_br(proc, thread, cmd);
+ cookie = death->cookie;
+
binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
"%d:%d %s %016llx\n",
proc->pid, thread->pid,
cmd == BR_DEAD_BINDER ?
"BR_DEAD_BINDER" :
"BR_CLEAR_DEATH_NOTIFICATION_DONE",
- (u64)death->cookie);
-
+ (u64)cookie);
if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
- list_del(&w->entry);
+ binder_inner_proc_unlock(proc);
kfree(death);
binder_stats_deleted(BINDER_STAT_DEATH);
- } else
- list_move(&w->entry, &proc->delivered_death);
+ } else {
+ binder_enqueue_work_ilocked(
+ w, &proc->delivered_death);
+ binder_inner_proc_unlock(proc);
+ }
+ if (put_user(cmd, (uint32_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(uint32_t);
+ if (put_user(cookie,
+ (binder_uintptr_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(binder_uintptr_t);
+ binder_stat_br(proc, thread, cmd);
if (cmd == BR_DEAD_BINDER)
goto done; /* DEAD_BINDER notifications can cause transactions */
} break;
@@ -2977,16 +4119,14 @@ retry:
BUG_ON(t->buffer == NULL);
if (t->buffer->target_node) {
struct binder_node *target_node = t->buffer->target_node;
+ struct binder_priority node_prio;
tr.target.ptr = target_node->ptr;
tr.cookie = target_node->cookie;
- t->saved_priority = task_nice(current);
- if (t->priority < target_node->min_priority &&
- !(t->flags & TF_ONE_WAY))
- binder_set_nice(t->priority);
- else if (!(t->flags & TF_ONE_WAY) ||
- t->saved_priority > target_node->min_priority)
- binder_set_nice(target_node->min_priority);
+ node_prio.sched_policy = target_node->sched_policy;
+ node_prio.prio = target_node->min_priority;
+ binder_transaction_priority(current, t, node_prio,
+ target_node->inherit_rt);
cmd = BR_TRANSACTION;
} else {
tr.target.ptr = 0;
@@ -2997,8 +4137,9 @@ retry:
tr.flags = t->flags;
tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
- if (t->from) {
- struct task_struct *sender = t->from->proc->tsk;
+ t_from = binder_get_txn_from(t);
+ if (t_from) {
+ struct task_struct *sender = t_from->proc->tsk;
tr.sender_pid = task_tgid_nr_ns(sender,
task_active_pid_ns(current));
@@ -3008,18 +4149,24 @@ retry:
tr.data_size = t->buffer->data_size;
tr.offsets_size = t->buffer->offsets_size;
- tr.data.ptr.buffer = (binder_uintptr_t)(
- (uintptr_t)t->buffer->data +
- proc->user_buffer_offset);
+ tr.data.ptr.buffer = (binder_uintptr_t)
+ ((uintptr_t)t->buffer->data +
+ binder_alloc_get_user_buffer_offset(&proc->alloc));
tr.data.ptr.offsets = tr.data.ptr.buffer +
ALIGN(t->buffer->data_size,
sizeof(void *));
- if (put_user_preempt_disabled(cmd, (uint32_t __user *) ptr))
+ if (put_user(cmd, (uint32_t __user *)ptr)) {
+ if (t_from)
+ binder_thread_dec_tmpref(t_from);
return -EFAULT;
+ }
ptr += sizeof(uint32_t);
- if (copy_to_user_preempt_disabled(ptr, &tr, sizeof(tr)))
+ if (copy_to_user(ptr, &tr, sizeof(tr))) {
+ if (t_from)
+ binder_thread_dec_tmpref(t_from);
return -EFAULT;
+ }
ptr += sizeof(tr);
trace_binder_transaction_received(t);
@@ -3029,21 +4176,22 @@ retry:
proc->pid, thread->pid,
(cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
"BR_REPLY",
- t->debug_id, t->from ? t->from->proc->pid : 0,
- t->from ? t->from->pid : 0, cmd,
+ t->debug_id, t_from ? t_from->proc->pid : 0,
+ t_from ? t_from->pid : 0, cmd,
t->buffer->data_size, t->buffer->offsets_size,
(u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets);
- list_del(&t->work.entry);
+ if (t_from)
+ binder_thread_dec_tmpref(t_from);
t->buffer->allow_user_free = 1;
if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
+ binder_inner_proc_lock(thread->proc);
t->to_parent = thread->transaction_stack;
t->to_thread = thread;
thread->transaction_stack = t;
+ binder_inner_proc_unlock(thread->proc);
} else {
- t->buffer->transaction = NULL;
- kfree(t);
- binder_stats_deleted(BINDER_STAT_TRANSACTION);
+ binder_free_transaction(t);
}
break;
}
@@ -3051,29 +4199,36 @@ retry:
done:
*consumed = ptr - buffer;
- if (proc->requested_threads + proc->ready_threads == 0 &&
+ binder_inner_proc_lock(proc);
+ if (proc->requested_threads == 0 &&
+ list_empty(&thread->proc->waiting_threads) &&
proc->requested_threads_started < proc->max_threads &&
(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
/*spawn a new thread if we leave this out */) {
proc->requested_threads++;
+ binder_inner_proc_unlock(proc);
binder_debug(BINDER_DEBUG_THREADS,
"%d:%d BR_SPAWN_LOOPER\n",
proc->pid, thread->pid);
- if (put_user_preempt_disabled(BR_SPAWN_LOOPER, (uint32_t __user *) buffer))
+ if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
return -EFAULT;
binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
- }
+ } else
+ binder_inner_proc_unlock(proc);
return 0;
}
-static void binder_release_work(struct list_head *list)
+static void binder_release_work(struct binder_proc *proc,
+ struct list_head *list)
{
struct binder_work *w;
- while (!list_empty(list)) {
- w = list_first_entry(list, struct binder_work, entry);
- list_del_init(&w->entry);
+ while (1) {
+ w = binder_dequeue_work_head(proc, list);
+ if (!w)
+ return;
+
switch (w->type) {
case BINDER_WORK_TRANSACTION: {
struct binder_transaction *t;
@@ -3086,11 +4241,17 @@ static void binder_release_work(struct list_head *list)
binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
"undelivered transaction %d\n",
t->debug_id);
- t->buffer->transaction = NULL;
- kfree(t);
- binder_stats_deleted(BINDER_STAT_TRANSACTION);
+ binder_free_transaction(t);
}
} break;
+ case BINDER_WORK_RETURN_ERROR: {
+ struct binder_error *e = container_of(
+ w, struct binder_error, work);
+
+ binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
+ "undelivered TRANSACTION_ERROR: %u\n",
+ e->cmd);
+ } break;
case BINDER_WORK_TRANSACTION_COMPLETE: {
binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
"undelivered TRANSACTION_COMPLETE\n");
@@ -3117,7 +4278,8 @@ static void binder_release_work(struct list_head *list)
}
-static struct binder_thread *binder_get_thread(struct binder_proc *proc)
+static struct binder_thread *binder_get_thread_ilocked(
+ struct binder_proc *proc, struct binder_thread *new_thread)
{
struct binder_thread *thread = NULL;
struct rb_node *parent = NULL;
@@ -3132,38 +4294,102 @@ static struct binder_thread *binder_get_thread(struct binder_proc *proc)
else if (current->pid > thread->pid)
p = &(*p)->rb_right;
else
- break;
+ return thread;
}
- if (*p == NULL) {
- thread = kzalloc_preempt_disabled(sizeof(*thread));
- if (thread == NULL)
+ if (!new_thread)
+ return NULL;
+ thread = new_thread;
+ binder_stats_created(BINDER_STAT_THREAD);
+ thread->proc = proc;
+ thread->pid = current->pid;
+ get_task_struct(current);
+ thread->task = current;
+ atomic_set(&thread->tmp_ref, 0);
+ init_waitqueue_head(&thread->wait);
+ INIT_LIST_HEAD(&thread->todo);
+ rb_link_node(&thread->rb_node, parent, p);
+ rb_insert_color(&thread->rb_node, &proc->threads);
+ thread->looper_need_return = true;
+ thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
+ thread->return_error.cmd = BR_OK;
+ thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
+ thread->reply_error.cmd = BR_OK;
+ INIT_LIST_HEAD(&new_thread->waiting_thread_node);
+ return thread;
+}
+
+static struct binder_thread *binder_get_thread(struct binder_proc *proc)
+{
+ struct binder_thread *thread;
+ struct binder_thread *new_thread;
+
+ binder_inner_proc_lock(proc);
+ thread = binder_get_thread_ilocked(proc, NULL);
+ binder_inner_proc_unlock(proc);
+ if (!thread) {
+ new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
+ if (new_thread == NULL)
return NULL;
- binder_stats_created(BINDER_STAT_THREAD);
- thread->proc = proc;
- thread->pid = current->pid;
- init_waitqueue_head(&thread->wait);
- INIT_LIST_HEAD(&thread->todo);
- rb_link_node(&thread->rb_node, parent, p);
- rb_insert_color(&thread->rb_node, &proc->threads);
- thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
- thread->return_error = BR_OK;
- thread->return_error2 = BR_OK;
+ binder_inner_proc_lock(proc);
+ thread = binder_get_thread_ilocked(proc, new_thread);
+ binder_inner_proc_unlock(proc);
+ if (thread != new_thread)
+ kfree(new_thread);
}
return thread;
}
-static int binder_free_thread(struct binder_proc *proc,
- struct binder_thread *thread)
+static void binder_free_proc(struct binder_proc *proc)
+{
+ BUG_ON(!list_empty(&proc->todo));
+ BUG_ON(!list_empty(&proc->delivered_death));
+ binder_alloc_deferred_release(&proc->alloc);
+ put_task_struct(proc->tsk);
+ binder_stats_deleted(BINDER_STAT_PROC);
+ kfree(proc);
+}
+
+static void binder_free_thread(struct binder_thread *thread)
+{
+ BUG_ON(!list_empty(&thread->todo));
+ binder_stats_deleted(BINDER_STAT_THREAD);
+ binder_proc_dec_tmpref(thread->proc);
+ put_task_struct(thread->task);
+ kfree(thread);
+}
+
+static int binder_thread_release(struct binder_proc *proc,
+ struct binder_thread *thread)
{
struct binder_transaction *t;
struct binder_transaction *send_reply = NULL;
int active_transactions = 0;
+ struct binder_transaction *last_t = NULL;
+ binder_inner_proc_lock(thread->proc);
+ /*
+ * take a ref on the proc so it survives
+ * after we remove this thread from proc->threads.
+ * The corresponding dec is when we actually
+ * free the thread in binder_free_thread()
+ */
+ proc->tmp_ref++;
+ /*
+ * take a ref on this thread to ensure it
+ * survives while we are releasing it
+ */
+ atomic_inc(&thread->tmp_ref);
rb_erase(&thread->rb_node, &proc->threads);
t = thread->transaction_stack;
- if (t && t->to_thread == thread)
- send_reply = t;
+ if (t) {
+ spin_lock(&t->lock);
+ if (t->to_thread == thread)
+ send_reply = t;
+ }
+ thread->is_dead = true;
+
while (t) {
+ last_t = t;
active_transactions++;
binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
"release %d:%d transaction %d %s, still active\n",
@@ -3184,12 +4410,16 @@ static int binder_free_thread(struct binder_proc *proc,
t = t->from_parent;
} else
BUG();
+ spin_unlock(&last_t->lock);
+ if (t)
+ spin_lock(&t->lock);
}
+ binder_inner_proc_unlock(thread->proc);
+
if (send_reply)
binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
- binder_release_work(&thread->todo);
- kfree(thread);
- binder_stats_deleted(BINDER_STAT_THREAD);
+ binder_release_work(proc, &thread->todo);
+ binder_thread_dec_tmpref(thread);
return active_transactions;
}
@@ -3198,30 +4428,24 @@ static unsigned int binder_poll(struct file *filp,
{
struct binder_proc *proc = filp->private_data;
struct binder_thread *thread = NULL;
- int wait_for_proc_work;
-
- binder_lock(proc->context, __func__);
+ bool wait_for_proc_work;
thread = binder_get_thread(proc);
- wait_for_proc_work = thread->transaction_stack == NULL &&
- list_empty(&thread->todo) && thread->return_error == BR_OK;
+ binder_inner_proc_lock(thread->proc);
+ thread->looper |= BINDER_LOOPER_STATE_POLL;
+ wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
- binder_unlock(proc->context, __func__);
+ binder_inner_proc_unlock(thread->proc);
+
+ if (binder_has_work(thread, wait_for_proc_work))
+ return POLLIN;
+
+ poll_wait(filp, &thread->wait, wait);
+
+ if (binder_has_thread_work(thread))
+ return POLLIN;
- if (wait_for_proc_work) {
- if (binder_has_proc_work(proc, thread))
- return POLLIN;
- poll_wait(filp, &proc->wait, wait);
- if (binder_has_proc_work(proc, thread))
- return POLLIN;
- } else {
- if (binder_has_thread_work(thread))
- return POLLIN;
- poll_wait(filp, &thread->wait, wait);
- if (binder_has_thread_work(thread))
- return POLLIN;
- }
return 0;
}
@@ -3239,7 +4463,7 @@ static int binder_ioctl_write_read(struct file *filp,
ret = -EINVAL;
goto out;
}
- if (copy_from_user_preempt_disabled(&bwr, ubuf, sizeof(bwr))) {
+ if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
ret = -EFAULT;
goto out;
}
@@ -3257,7 +4481,7 @@ static int binder_ioctl_write_read(struct file *filp,
trace_binder_write_done(ret);
if (ret < 0) {
bwr.read_consumed = 0;
- if (copy_to_user_preempt_disabled(ubuf, &bwr, sizeof(bwr)))
+ if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
ret = -EFAULT;
goto out;
}
@@ -3268,10 +4492,12 @@ static int binder_ioctl_write_read(struct file *filp,
&bwr.read_consumed,
filp->f_flags & O_NONBLOCK);
trace_binder_read_done(ret);
- if (!list_empty(&proc->todo))
- wake_up_interruptible(&proc->wait);
+ binder_inner_proc_lock(proc);
+ if (!binder_worklist_empty_ilocked(&proc->todo))
+ binder_wakeup_proc_ilocked(proc);
+ binder_inner_proc_unlock(proc);
if (ret < 0) {
- if (copy_to_user_preempt_disabled(ubuf, &bwr, sizeof(bwr)))
+ if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
ret = -EFAULT;
goto out;
}
@@ -3281,7 +4507,7 @@ static int binder_ioctl_write_read(struct file *filp,
proc->pid, thread->pid,
(u64)bwr.write_consumed, (u64)bwr.write_size,
(u64)bwr.read_consumed, (u64)bwr.read_size);
- if (copy_to_user_preempt_disabled(ubuf, &bwr, sizeof(bwr))) {
+ if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
ret = -EFAULT;
goto out;
}
@@ -3294,9 +4520,10 @@ static int binder_ioctl_set_ctx_mgr(struct file *filp)
int ret = 0;
struct binder_proc *proc = filp->private_data;
struct binder_context *context = proc->context;
-
+ struct binder_node *new_node;
kuid_t curr_euid = current_euid();
+ mutex_lock(&context->context_mgr_node_lock);
if (context->binder_context_mgr_node) {
pr_err("BINDER_SET_CONTEXT_MGR already set\n");
ret = -EBUSY;
@@ -3317,24 +4544,52 @@ static int binder_ioctl_set_ctx_mgr(struct file *filp)
} else {
context->binder_context_mgr_uid = curr_euid;
}
- context->binder_context_mgr_node = binder_new_node(proc, 0, 0);
- if (!context->binder_context_mgr_node) {
+ new_node = binder_new_node(proc, NULL);
+ if (!new_node) {
ret = -ENOMEM;
goto out;
}
- context->binder_context_mgr_node->local_weak_refs++;
- context->binder_context_mgr_node->local_strong_refs++;
- context->binder_context_mgr_node->has_strong_ref = 1;
- context->binder_context_mgr_node->has_weak_ref = 1;
+ binder_node_lock(new_node);
+ new_node->local_weak_refs++;
+ new_node->local_strong_refs++;
+ new_node->has_strong_ref = 1;
+ new_node->has_weak_ref = 1;
+ context->binder_context_mgr_node = new_node;
+ binder_node_unlock(new_node);
+ binder_put_node(new_node);
out:
+ mutex_unlock(&context->context_mgr_node_lock);
return ret;
}
+static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
+ struct binder_node_debug_info *info) {
+ struct rb_node *n;
+ binder_uintptr_t ptr = info->ptr;
+
+ memset(info, 0, sizeof(*info));
+
+ binder_inner_proc_lock(proc);
+ for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
+ struct binder_node *node = rb_entry(n, struct binder_node,
+ rb_node);
+ if (node->ptr > ptr) {
+ info->ptr = node->ptr;
+ info->cookie = node->cookie;
+ info->has_strong_ref = node->has_strong_ref;
+ info->has_weak_ref = node->has_weak_ref;
+ break;
+ }
+ }
+ binder_inner_proc_unlock(proc);
+
+ return 0;
+}
+
static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
int ret;
struct binder_proc *proc = filp->private_data;
- struct binder_context *context = proc->context;
struct binder_thread *thread;
unsigned int size = _IOC_SIZE(cmd);
void __user *ubuf = (void __user *)arg;
@@ -3348,7 +4603,6 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (ret)
goto err_unlocked;
- binder_lock(context, __func__);
thread = binder_get_thread(proc);
if (thread == NULL) {
ret = -ENOMEM;
@@ -3361,12 +4615,19 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (ret)
goto err;
break;
- case BINDER_SET_MAX_THREADS:
- if (copy_from_user_preempt_disabled(&proc->max_threads, ubuf, sizeof(proc->max_threads))) {
+ case BINDER_SET_MAX_THREADS: {
+ int max_threads;
+
+ if (copy_from_user(&max_threads, ubuf,
+ sizeof(max_threads))) {
ret = -EINVAL;
goto err;
}
+ binder_inner_proc_lock(proc);
+ proc->max_threads = max_threads;
+ binder_inner_proc_unlock(proc);
break;
+ }
case BINDER_SET_CONTEXT_MGR:
ret = binder_ioctl_set_ctx_mgr(filp);
if (ret)
@@ -3375,7 +4636,7 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
case BINDER_THREAD_EXIT:
binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
proc->pid, thread->pid);
- binder_free_thread(proc, thread);
+ binder_thread_release(proc, thread);
thread = NULL;
break;
case BINDER_VERSION: {
@@ -3385,8 +4646,27 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
ret = -EINVAL;
goto err;
}
- if (put_user_preempt_disabled(BINDER_CURRENT_PROTOCOL_VERSION, &ver->protocol_version)) {
- ret = -EINVAL;
+ if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
+ &ver->protocol_version)) {
+ ret = -EINVAL;
+ goto err;
+ }
+ break;
+ }
+ case BINDER_GET_NODE_DEBUG_INFO: {
+ struct binder_node_debug_info info;
+
+ if (copy_from_user(&info, ubuf, sizeof(info))) {
+ ret = -EFAULT;
+ goto err;
+ }
+
+ ret = binder_ioctl_get_node_debug_info(proc, &info);
+ if (ret < 0)
+ goto err;
+
+ if (copy_to_user(ubuf, &info, sizeof(info))) {
+ ret = -EFAULT;
goto err;
}
break;
@@ -3398,8 +4678,7 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
ret = 0;
err:
if (thread)
- thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN;
- binder_unlock(context, __func__);
+ thread->looper_need_return = false;
wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
if (ret && ret != -ERESTARTSYS)
pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
@@ -3428,8 +4707,7 @@ static void binder_vma_close(struct vm_area_struct *vma)
proc->pid, vma->vm_start, vma->vm_end,
(vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
(unsigned long)pgprot_val(vma->vm_page_prot));
- proc->vma = NULL;
- proc->vma_vm_mm = NULL;
+ binder_alloc_vma_close(&proc->alloc);
binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
}
@@ -3447,11 +4725,8 @@ static const struct vm_operations_struct binder_vm_ops = {
static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
{
int ret;
-
- struct vm_struct *area;
struct binder_proc *proc = filp->private_data;
const char *failure_string;
- struct binder_buffer *buffer;
if (proc->tsk != current->group_leader)
return -EINVAL;
@@ -3460,8 +4735,8 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
vma->vm_end = vma->vm_start + SZ_4M;
binder_debug(BINDER_DEBUG_OPEN_CLOSE,
- "binder_mmap: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
- proc->pid, vma->vm_start, vma->vm_end,
+ "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
+ __func__, proc->pid, vma->vm_start, vma->vm_end,
(vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
(unsigned long)pgprot_val(vma->vm_page_prot));
@@ -3471,77 +4746,15 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
goto err_bad_arg;
}
vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
-
- mutex_lock(&proc->context->binder_mmap_lock);
- if (proc->buffer) {
- ret = -EBUSY;
- failure_string = "already mapped";
- goto err_already_mapped;
- }
-
- area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP);
- if (area == NULL) {
- ret = -ENOMEM;
- failure_string = "get_vm_area";
- goto err_get_vm_area_failed;
- }
- proc->buffer = area->addr;
- proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer;
- mutex_unlock(&proc->context->binder_mmap_lock);
-
-#ifdef CONFIG_CPU_CACHE_VIPT
- if (cache_is_vipt_aliasing()) {
- while (CACHE_COLOUR((vma->vm_start ^ (uint32_t)proc->buffer))) {
- pr_info("binder_mmap: %d %lx-%lx maps %p bad alignment\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer);
- vma->vm_start += PAGE_SIZE;
- }
- }
-#endif
- proc->pages = kzalloc(sizeof(proc->pages[0]) * ((vma->vm_end - vma->vm_start) / PAGE_SIZE), GFP_KERNEL);
- if (proc->pages == NULL) {
- ret = -ENOMEM;
- failure_string = "alloc page array";
- goto err_alloc_pages_failed;
- }
- proc->buffer_size = vma->vm_end - vma->vm_start;
-
vma->vm_ops = &binder_vm_ops;
vma->vm_private_data = proc;
- /* binder_update_page_range assumes preemption is disabled */
- preempt_disable();
- ret = binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma);
- preempt_enable_no_resched();
- if (ret) {
- ret = -ENOMEM;
- failure_string = "alloc small buf";
- goto err_alloc_small_buf_failed;
- }
- buffer = proc->buffer;
- INIT_LIST_HEAD(&proc->buffers);
- list_add(&buffer->entry, &proc->buffers);
- buffer->free = 1;
- binder_insert_free_buffer(proc, buffer);
- proc->free_async_space = proc->buffer_size / 2;
- barrier();
+ ret = binder_alloc_mmap_handler(&proc->alloc, vma);
+ if (ret)
+ return ret;
proc->files = get_files_struct(current);
- proc->vma = vma;
- proc->vma_vm_mm = vma->vm_mm;
-
- /*pr_info("binder_mmap: %d %lx-%lx maps %p\n",
- proc->pid, vma->vm_start, vma->vm_end, proc->buffer);*/
return 0;
-err_alloc_small_buf_failed:
- kfree(proc->pages);
- proc->pages = NULL;
-err_alloc_pages_failed:
- mutex_lock(&proc->context->binder_mmap_lock);
- vfree(proc->buffer);
- proc->buffer = NULL;
-err_get_vm_area_failed:
-err_already_mapped:
- mutex_unlock(&proc->context->binder_mmap_lock);
err_bad_arg:
pr_err("binder_mmap: %d %lx-%lx %s failed %d\n",
proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
@@ -3559,24 +4772,33 @@ static int binder_open(struct inode *nodp, struct file *filp)
proc = kzalloc(sizeof(*proc), GFP_KERNEL);
if (proc == NULL)
return -ENOMEM;
+ spin_lock_init(&proc->inner_lock);
+ spin_lock_init(&proc->outer_lock);
get_task_struct(current->group_leader);
proc->tsk = current->group_leader;
INIT_LIST_HEAD(&proc->todo);
- init_waitqueue_head(&proc->wait);
- proc->default_priority = task_nice(current);
+ if (binder_supported_policy(current->policy)) {
+ proc->default_priority.sched_policy = current->policy;
+ proc->default_priority.prio = current->normal_prio;
+ } else {
+ proc->default_priority.sched_policy = SCHED_NORMAL;
+ proc->default_priority.prio = NICE_TO_PRIO(0);
+ }
+
binder_dev = container_of(filp->private_data, struct binder_device,
miscdev);
proc->context = &binder_dev->context;
-
- binder_lock(proc->context, __func__);
+ binder_alloc_init(&proc->alloc);
binder_stats_created(BINDER_STAT_PROC);
- hlist_add_head(&proc->proc_node, &proc->context->binder_procs);
proc->pid = current->group_leader->pid;
INIT_LIST_HEAD(&proc->delivered_death);
+ INIT_LIST_HEAD(&proc->waiting_threads);
filp->private_data = proc;
- binder_unlock(proc->context, __func__);
+ mutex_lock(&binder_procs_lock);
+ hlist_add_head(&proc->proc_node, &binder_procs);
+ mutex_unlock(&binder_procs_lock);
if (binder_debugfs_dir_entry_proc) {
char strbuf[11];
@@ -3612,16 +4834,17 @@ static void binder_deferred_flush(struct binder_proc *proc)
struct rb_node *n;
int wake_count = 0;
+ binder_inner_proc_lock(proc);
for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
- thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
+ thread->looper_need_return = true;
if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
wake_up_interruptible(&thread->wait);
wake_count++;
}
}
- wake_up_interruptible_all(&proc->wait);
+ binder_inner_proc_unlock(proc);
binder_debug(BINDER_DEBUG_OPEN_CLOSE,
"binder_flush: %d woke %d threads\n", proc->pid,
@@ -3641,15 +4864,22 @@ static int binder_release(struct inode *nodp, struct file *filp)
static int binder_node_release(struct binder_node *node, int refs)
{
struct binder_ref *ref;
- struct binder_context *context = node->proc->context;
int death = 0;
+ struct binder_proc *proc = node->proc;
- list_del_init(&node->work.entry);
- binder_release_work(&node->async_todo);
+ binder_release_work(proc, &node->async_todo);
- if (hlist_empty(&node->refs)) {
- kfree(node);
- binder_stats_deleted(BINDER_STAT_NODE);
+ binder_node_lock(node);
+ binder_inner_proc_lock(proc);
+ binder_dequeue_work_ilocked(&node->work);
+ /*
+ * The caller must have taken a temporary ref on the node,
+ */
+ BUG_ON(!node->tmp_refs);
+ if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
+ binder_inner_proc_unlock(proc);
+ binder_node_unlock(node);
+ binder_free_node(node);
return refs;
}
@@ -3657,45 +4887,58 @@ static int binder_node_release(struct binder_node *node, int refs)
node->proc = NULL;
node->local_strong_refs = 0;
node->local_weak_refs = 0;
- hlist_add_head(&node->dead_node, &context->binder_dead_nodes);
+ binder_inner_proc_unlock(proc);
+
+ spin_lock(&binder_dead_nodes_lock);
+ hlist_add_head(&node->dead_node, &binder_dead_nodes);
+ spin_unlock(&binder_dead_nodes_lock);
hlist_for_each_entry(ref, &node->refs, node_entry) {
refs++;
-
- if (!ref->death)
+ /*
+ * Need the node lock to synchronize
+ * with new notification requests and the
+ * inner lock to synchronize with queued
+ * death notifications.
+ */
+ binder_inner_proc_lock(ref->proc);
+ if (!ref->death) {
+ binder_inner_proc_unlock(ref->proc);
continue;
+ }
death++;
- if (list_empty(&ref->death->work.entry)) {
- ref->death->work.type = BINDER_WORK_DEAD_BINDER;
- list_add_tail(&ref->death->work.entry,
- &ref->proc->todo);
- wake_up_interruptible(&ref->proc->wait);
- } else
- BUG();
+ BUG_ON(!list_empty(&ref->death->work.entry));
+ ref->death->work.type = BINDER_WORK_DEAD_BINDER;
+ binder_enqueue_work_ilocked(&ref->death->work,
+ &ref->proc->todo);
+ binder_wakeup_proc_ilocked(ref->proc);
+ binder_inner_proc_unlock(ref->proc);
}
binder_debug(BINDER_DEBUG_DEAD_BINDER,
"node %d now dead, refs %d, death %d\n",
node->debug_id, refs, death);
+ binder_node_unlock(node);
+ binder_put_node(node);
return refs;
}
static void binder_deferred_release(struct binder_proc *proc)
{
- struct binder_transaction *t;
struct binder_context *context = proc->context;
struct rb_node *n;
- int threads, nodes, incoming_refs, outgoing_refs, buffers,
- active_transactions, page_count;
+ int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
- BUG_ON(proc->vma);
BUG_ON(proc->files);
+ mutex_lock(&binder_procs_lock);
hlist_del(&proc->proc_node);
+ mutex_unlock(&binder_procs_lock);
+ mutex_lock(&context->context_mgr_node_lock);
if (context->binder_context_mgr_node &&
context->binder_context_mgr_node->proc == proc) {
binder_debug(BINDER_DEBUG_DEAD_BINDER,
@@ -3703,15 +4946,25 @@ static void binder_deferred_release(struct binder_proc *proc)
__func__, proc->pid);
context->binder_context_mgr_node = NULL;
}
+ mutex_unlock(&context->context_mgr_node_lock);
+ binder_inner_proc_lock(proc);
+ /*
+ * Make sure proc stays alive after we
+ * remove all the threads
+ */
+ proc->tmp_ref++;
+ proc->is_dead = true;
threads = 0;
active_transactions = 0;
while ((n = rb_first(&proc->threads))) {
struct binder_thread *thread;
thread = rb_entry(n, struct binder_thread, rb_node);
+ binder_inner_proc_unlock(proc);
threads++;
- active_transactions += binder_free_thread(proc, thread);
+ active_transactions += binder_thread_release(proc, thread);
+ binder_inner_proc_lock(proc);
}
nodes = 0;
@@ -3721,96 +4974,55 @@ static void binder_deferred_release(struct binder_proc *proc)
node = rb_entry(n, struct binder_node, rb_node);
nodes++;
+ /*
+ * take a temporary ref on the node before
+ * calling binder_node_release() which will either
+ * kfree() the node or call binder_put_node()
+ */
+ binder_inc_node_tmpref_ilocked(node);
rb_erase(&node->rb_node, &proc->nodes);
- incoming_refs = binder_node_release(node,
- incoming_refs);
+ binder_inner_proc_unlock(proc);
+ incoming_refs = binder_node_release(node, incoming_refs);
+ binder_inner_proc_lock(proc);
}
+ binder_inner_proc_unlock(proc);
outgoing_refs = 0;
+ binder_proc_lock(proc);
while ((n = rb_first(&proc->refs_by_desc))) {
struct binder_ref *ref;
ref = rb_entry(n, struct binder_ref, rb_node_desc);
outgoing_refs++;
- binder_delete_ref(ref);
- }
-
- binder_release_work(&proc->todo);
- binder_release_work(&proc->delivered_death);
-
- buffers = 0;
- while ((n = rb_first(&proc->allocated_buffers))) {
- struct binder_buffer *buffer;
-
- buffer = rb_entry(n, struct binder_buffer, rb_node);
-
- t = buffer->transaction;
- if (t) {
- t->buffer = NULL;
- buffer->transaction = NULL;
- pr_err("release proc %d, transaction %d, not freed\n",
- proc->pid, t->debug_id);
- /*BUG();*/
- }
-
- binder_free_buf(proc, buffer);
- buffers++;
- }
-
- binder_stats_deleted(BINDER_STAT_PROC);
-
- page_count = 0;
- if (proc->pages) {
- int i;
-
- for (i = 0; i < proc->buffer_size / PAGE_SIZE; i++) {
- void *page_addr;
-
- if (!proc->pages[i])
- continue;
-
- page_addr = proc->buffer + i * PAGE_SIZE;
- binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%s: %d: page %d at %p not freed\n",
- __func__, proc->pid, i, page_addr);
- unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
- __free_page(proc->pages[i]);
- page_count++;
- }
- kfree(proc->pages);
- preempt_enable_no_resched();
- vfree(proc->buffer);
- preempt_disable();
+ binder_cleanup_ref_olocked(ref);
+ binder_proc_unlock(proc);
+ binder_free_ref(ref);
+ binder_proc_lock(proc);
}
+ binder_proc_unlock(proc);
- put_task_struct(proc->tsk);
+ binder_release_work(proc, &proc->todo);
+ binder_release_work(proc, &proc->delivered_death);
binder_debug(BINDER_DEBUG_OPEN_CLOSE,
- "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d, buffers %d, pages %d\n",
+ "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
__func__, proc->pid, threads, nodes, incoming_refs,
- outgoing_refs, active_transactions, buffers, page_count);
+ outgoing_refs, active_transactions);
- kfree(proc);
+ binder_proc_dec_tmpref(proc);
}
static void binder_deferred_func(struct work_struct *work)
{
struct binder_proc *proc;
struct files_struct *files;
- struct binder_context *context =
- container_of(work, struct binder_context, deferred_work);
int defer;
do {
- trace_binder_lock(__func__);
- mutex_lock(&context->binder_main_lock);
- trace_binder_locked(__func__);
-
- mutex_lock(&context->binder_deferred_lock);
- preempt_disable();
- if (!hlist_empty(&context->binder_deferred_list)) {
- proc = hlist_entry(context->binder_deferred_list.first,
+ mutex_lock(&binder_deferred_lock);
+ if (!hlist_empty(&binder_deferred_list)) {
+ proc = hlist_entry(binder_deferred_list.first,
struct binder_proc, deferred_work_node);
hlist_del_init(&proc->deferred_work_node);
defer = proc->deferred_work;
@@ -3819,7 +5031,7 @@ static void binder_deferred_func(struct work_struct *work)
proc = NULL;
defer = 0;
}
- mutex_unlock(&context->binder_deferred_lock);
+ mutex_unlock(&binder_deferred_lock);
files = NULL;
if (defer & BINDER_DEFERRED_PUT_FILES) {
@@ -3834,63 +5046,71 @@ static void binder_deferred_func(struct work_struct *work)
if (defer & BINDER_DEFERRED_RELEASE)
binder_deferred_release(proc); /* frees proc */
- trace_binder_unlock(__func__);
- mutex_unlock(&context->binder_main_lock);
- preempt_enable_no_resched();
if (files)
put_files_struct(files);
} while (proc);
}
+static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
static void
binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
{
- mutex_lock(&proc->context->binder_deferred_lock);
+ mutex_lock(&binder_deferred_lock);
proc->deferred_work |= defer;
if (hlist_unhashed(&proc->deferred_work_node)) {
hlist_add_head(&proc->deferred_work_node,
- &proc->context->binder_deferred_list);
- queue_work(proc->context->binder_deferred_workqueue,
- &proc->context->deferred_work);
+ &binder_deferred_list);
+ queue_work(binder_deferred_workqueue, &binder_deferred_work);
}
- mutex_unlock(&proc->context->binder_deferred_lock);
+ mutex_unlock(&binder_deferred_lock);
}
-static void print_binder_transaction(struct seq_file *m, const char *prefix,
- struct binder_transaction *t)
+static void print_binder_transaction_ilocked(struct seq_file *m,
+ struct binder_proc *proc,
+ const char *prefix,
+ struct binder_transaction *t)
{
+ struct binder_proc *to_proc;
+ struct binder_buffer *buffer = t->buffer;
+
+ spin_lock(&t->lock);
+ to_proc = t->to_proc;
seq_printf(m,
- "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d",
+ "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %d:%d r%d",
prefix, t->debug_id, t,
t->from ? t->from->proc->pid : 0,
t->from ? t->from->pid : 0,
- t->to_proc ? t->to_proc->pid : 0,
+ to_proc ? to_proc->pid : 0,
t->to_thread ? t->to_thread->pid : 0,
- t->code, t->flags, t->priority, t->need_reply);
- if (t->buffer == NULL) {
+ t->code, t->flags, t->priority.sched_policy,
+ t->priority.prio, t->need_reply);
+ spin_unlock(&t->lock);
+
+ if (proc != to_proc) {
+ /*
+ * Can only safely deref buffer if we are holding the
+ * correct proc inner lock for this node
+ */
+ seq_puts(m, "\n");
+ return;
+ }
+
+ if (buffer == NULL) {
seq_puts(m, " buffer free\n");
return;
}
- if (t->buffer->target_node)
- seq_printf(m, " node %d",
- t->buffer->target_node->debug_id);
+ if (buffer->target_node)
+ seq_printf(m, " node %d", buffer->target_node->debug_id);
seq_printf(m, " size %zd:%zd data %p\n",
- t->buffer->data_size, t->buffer->offsets_size,
- t->buffer->data);
-}
-
-static void print_binder_buffer(struct seq_file *m, const char *prefix,
- struct binder_buffer *buffer)
-{
- seq_printf(m, "%s %d: %p size %zd:%zd %s\n",
- prefix, buffer->debug_id, buffer->data,
buffer->data_size, buffer->offsets_size,
- buffer->transaction ? "active" : "delivered");
+ buffer->data);
}
-static void print_binder_work(struct seq_file *m, const char *prefix,
- const char *transaction_prefix,
- struct binder_work *w)
+static void print_binder_work_ilocked(struct seq_file *m,
+ struct binder_proc *proc,
+ const char *prefix,
+ const char *transaction_prefix,
+ struct binder_work *w)
{
struct binder_node *node;
struct binder_transaction *t;
@@ -3898,8 +5118,16 @@ static void print_binder_work(struct seq_file *m, const char *prefix,
switch (w->type) {
case BINDER_WORK_TRANSACTION:
t = container_of(w, struct binder_transaction, work);
- print_binder_transaction(m, transaction_prefix, t);
+ print_binder_transaction_ilocked(
+ m, proc, transaction_prefix, t);
break;
+ case BINDER_WORK_RETURN_ERROR: {
+ struct binder_error *e = container_of(
+ w, struct binder_error, work);
+
+ seq_printf(m, "%stransaction error: %u\n",
+ prefix, e->cmd);
+ } break;
case BINDER_WORK_TRANSACTION_COMPLETE:
seq_printf(m, "%stransaction complete\n", prefix);
break;
@@ -3924,40 +5152,46 @@ static void print_binder_work(struct seq_file *m, const char *prefix,
}
}
-static void print_binder_thread(struct seq_file *m,
- struct binder_thread *thread,
- int print_always)
+static void print_binder_thread_ilocked(struct seq_file *m,
+ struct binder_thread *thread,
+ int print_always)
{
struct binder_transaction *t;
struct binder_work *w;
size_t start_pos = m->count;
size_t header_pos;
- seq_printf(m, " thread %d: l %02x\n", thread->pid, thread->looper);
+ seq_printf(m, " thread %d: l %02x need_return %d tr %d\n",
+ thread->pid, thread->looper,
+ thread->looper_need_return,
+ atomic_read(&thread->tmp_ref));
header_pos = m->count;
t = thread->transaction_stack;
while (t) {
if (t->from == thread) {
- print_binder_transaction(m,
- " outgoing transaction", t);
+ print_binder_transaction_ilocked(m, thread->proc,
+ " outgoing transaction", t);
t = t->from_parent;
} else if (t->to_thread == thread) {
- print_binder_transaction(m,
+ print_binder_transaction_ilocked(m, thread->proc,
" incoming transaction", t);
t = t->to_parent;
} else {
- print_binder_transaction(m, " bad transaction", t);
+ print_binder_transaction_ilocked(m, thread->proc,
+ " bad transaction", t);
t = NULL;
}
}
list_for_each_entry(w, &thread->todo, entry) {
- print_binder_work(m, " ", " pending transaction", w);
+ print_binder_work_ilocked(m, thread->proc, " ",
+ " pending transaction", w);
}
if (!print_always && m->count == header_pos)
m->count = start_pos;
}
-static void print_binder_node(struct seq_file *m, struct binder_node *node)
+static void print_binder_node_nilocked(struct seq_file *m,
+ struct binder_node *node)
{
struct binder_ref *ref;
struct binder_work *w;
@@ -3967,27 +5201,35 @@ static void print_binder_node(struct seq_file *m, struct binder_node *node)
hlist_for_each_entry(ref, &node->refs, node_entry)
count++;
- seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d",
+ seq_printf(m, " node %d: u%016llx c%016llx pri %d:%d hs %d hw %d ls %d lw %d is %d iw %d tr %d",
node->debug_id, (u64)node->ptr, (u64)node->cookie,
+ node->sched_policy, node->min_priority,
node->has_strong_ref, node->has_weak_ref,
node->local_strong_refs, node->local_weak_refs,
- node->internal_strong_refs, count);
+ node->internal_strong_refs, count, node->tmp_refs);
if (count) {
seq_puts(m, " proc");
hlist_for_each_entry(ref, &node->refs, node_entry)
seq_printf(m, " %d", ref->proc->pid);
}
seq_puts(m, "\n");
- list_for_each_entry(w, &node->async_todo, entry)
- print_binder_work(m, " ",
- " pending async transaction", w);
+ if (node->proc) {
+ list_for_each_entry(w, &node->async_todo, entry)
+ print_binder_work_ilocked(m, node->proc, " ",
+ " pending async transaction", w);
+ }
}
-static void print_binder_ref(struct seq_file *m, struct binder_ref *ref)
+static void print_binder_ref_olocked(struct seq_file *m,
+ struct binder_ref *ref)
{
+ binder_node_lock(ref->node);
seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
- ref->debug_id, ref->desc, ref->node->proc ? "" : "dead ",
- ref->node->debug_id, ref->strong, ref->weak, ref->death);
+ ref->data.debug_id, ref->data.desc,
+ ref->node->proc ? "" : "dead ",
+ ref->node->debug_id, ref->data.strong,
+ ref->data.weak, ref->death);
+ binder_node_unlock(ref->node);
}
static void print_binder_proc(struct seq_file *m,
@@ -3997,36 +5239,60 @@ static void print_binder_proc(struct seq_file *m,
struct rb_node *n;
size_t start_pos = m->count;
size_t header_pos;
+ struct binder_node *last_node = NULL;
seq_printf(m, "proc %d\n", proc->pid);
seq_printf(m, "context %s\n", proc->context->name);
header_pos = m->count;
+ binder_inner_proc_lock(proc);
for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
- print_binder_thread(m, rb_entry(n, struct binder_thread,
+ print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
rb_node), print_all);
+
for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
struct binder_node *node = rb_entry(n, struct binder_node,
rb_node);
- if (print_all || node->has_async_transaction)
- print_binder_node(m, node);
- }
+ /*
+ * take a temporary reference on the node so it
+ * survives and isn't removed from the tree
+ * while we print it.
+ */
+ binder_inc_node_tmpref_ilocked(node);
+ /* Need to drop inner lock to take node lock */
+ binder_inner_proc_unlock(proc);
+ if (last_node)
+ binder_put_node(last_node);
+ binder_node_inner_lock(node);
+ print_binder_node_nilocked(m, node);
+ binder_node_inner_unlock(node);
+ last_node = node;
+ binder_inner_proc_lock(proc);
+ }
+ binder_inner_proc_unlock(proc);
+ if (last_node)
+ binder_put_node(last_node);
+
if (print_all) {
+ binder_proc_lock(proc);
for (n = rb_first(&proc->refs_by_desc);
n != NULL;
n = rb_next(n))
- print_binder_ref(m, rb_entry(n, struct binder_ref,
- rb_node_desc));
+ print_binder_ref_olocked(m, rb_entry(n,
+ struct binder_ref,
+ rb_node_desc));
+ binder_proc_unlock(proc);
}
- for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n))
- print_binder_buffer(m, " buffer",
- rb_entry(n, struct binder_buffer, rb_node));
+ binder_alloc_print_allocated(m, &proc->alloc);
+ binder_inner_proc_lock(proc);
list_for_each_entry(w, &proc->todo, entry)
- print_binder_work(m, " ", " pending transaction", w);
+ print_binder_work_ilocked(m, proc, " ",
+ " pending transaction", w);
list_for_each_entry(w, &proc->delivered_death, entry) {
seq_puts(m, " has delivered dead binder\n");
break;
}
+ binder_inner_proc_unlock(proc);
if (!print_all && m->count == header_pos)
m->count = start_pos;
}
@@ -4084,54 +5350,45 @@ static const char * const binder_objstat_strings[] = {
"transaction_complete"
};
-static void add_binder_stats(struct binder_stats *from, struct binder_stats *to)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(to->bc); i++)
- to->bc[i] += from->bc[i];
-
- for (i = 0; i < ARRAY_SIZE(to->br); i++)
- to->br[i] += from->br[i];
-}
-
static void print_binder_stats(struct seq_file *m, const char *prefix,
- struct binder_stats *stats,
- struct binder_obj_stats *obj_stats)
+ struct binder_stats *stats)
{
int i;
BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
ARRAY_SIZE(binder_command_strings));
for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
- if (stats->bc[i])
+ int temp = atomic_read(&stats->bc[i]);
+
+ if (temp)
seq_printf(m, "%s%s: %d\n", prefix,
- binder_command_strings[i], stats->bc[i]);
+ binder_command_strings[i], temp);
}
BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
ARRAY_SIZE(binder_return_strings));
for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
- if (stats->br[i])
+ int temp = atomic_read(&stats->br[i]);
+
+ if (temp)
seq_printf(m, "%s%s: %d\n", prefix,
- binder_return_strings[i], stats->br[i]);
+ binder_return_strings[i], temp);
}
- if (!obj_stats)
- return;
-
- BUILD_BUG_ON(ARRAY_SIZE(obj_stats->obj_created) !=
+ BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
ARRAY_SIZE(binder_objstat_strings));
- BUILD_BUG_ON(ARRAY_SIZE(obj_stats->obj_created) !=
- ARRAY_SIZE(obj_stats->obj_deleted));
- for (i = 0; i < ARRAY_SIZE(obj_stats->obj_created); i++) {
- int obj_created = atomic_read(&obj_stats->obj_created[i]);
- int obj_deleted = atomic_read(&obj_stats->obj_deleted[i]);
-
- if (obj_created || obj_deleted)
- seq_printf(m, "%s%s: active %d total %d\n", prefix,
- binder_objstat_strings[i],
- obj_created - obj_deleted, obj_created);
+ BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
+ ARRAY_SIZE(stats->obj_deleted));
+ for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
+ int created = atomic_read(&stats->obj_created[i]);
+ int deleted = atomic_read(&stats->obj_deleted[i]);
+
+ if (created || deleted)
+ seq_printf(m, "%s%s: active %d total %d\n",
+ prefix,
+ binder_objstat_strings[i],
+ created - deleted,
+ created);
}
}
@@ -4139,226 +5396,193 @@ static void print_binder_proc_stats(struct seq_file *m,
struct binder_proc *proc)
{
struct binder_work *w;
+ struct binder_thread *thread;
struct rb_node *n;
- int count, strong, weak;
+ int count, strong, weak, ready_threads;
+ size_t free_async_space =
+ binder_alloc_get_free_async_space(&proc->alloc);
seq_printf(m, "proc %d\n", proc->pid);
seq_printf(m, "context %s\n", proc->context->name);
count = 0;
+ ready_threads = 0;
+ binder_inner_proc_lock(proc);
for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
count++;
+
+ list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
+ ready_threads++;
+
seq_printf(m, " threads: %d\n", count);
seq_printf(m, " requested threads: %d+%d/%d\n"
" ready threads %d\n"
" free async space %zd\n", proc->requested_threads,
proc->requested_threads_started, proc->max_threads,
- proc->ready_threads, proc->free_async_space);
+ ready_threads,
+ free_async_space);
count = 0;
for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
count++;
+ binder_inner_proc_unlock(proc);
seq_printf(m, " nodes: %d\n", count);
count = 0;
strong = 0;
weak = 0;
+ binder_proc_lock(proc);
for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
struct binder_ref *ref = rb_entry(n, struct binder_ref,
rb_node_desc);
count++;
- strong += ref->strong;
- weak += ref->weak;
+ strong += ref->data.strong;
+ weak += ref->data.weak;
}
+ binder_proc_unlock(proc);
seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
- count = 0;
- for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n))
- count++;
+ count = binder_alloc_get_allocated_count(&proc->alloc);
seq_printf(m, " buffers: %d\n", count);
count = 0;
+ binder_inner_proc_lock(proc);
list_for_each_entry(w, &proc->todo, entry) {
- switch (w->type) {
- case BINDER_WORK_TRANSACTION:
+ if (w->type == BINDER_WORK_TRANSACTION)
count++;
- break;
- default:
- break;
- }
}
+ binder_inner_proc_unlock(proc);
seq_printf(m, " pending transactions: %d\n", count);
- print_binder_stats(m, " ", &proc->stats, NULL);
+ print_binder_stats(m, " ", &proc->stats);
}
static int binder_state_show(struct seq_file *m, void *unused)
{
- struct binder_device *device;
- struct binder_context *context;
struct binder_proc *proc;
struct binder_node *node;
- int do_lock = !binder_debug_no_lock;
- bool wrote_dead_nodes_header = false;
+ struct binder_node *last_node = NULL;
seq_puts(m, "binder state:\n");
- hlist_for_each_entry(device, &binder_devices, hlist) {
- context = &device->context;
- if (do_lock)
- binder_lock(context, __func__);
- if (!wrote_dead_nodes_header &&
- !hlist_empty(&context->binder_dead_nodes)) {
- seq_puts(m, "dead nodes:\n");
- wrote_dead_nodes_header = true;
- }
- hlist_for_each_entry(node, &context->binder_dead_nodes,
- dead_node)
- print_binder_node(m, node);
-
- if (do_lock)
- binder_unlock(context, __func__);
- }
-
- hlist_for_each_entry(device, &binder_devices, hlist) {
- context = &device->context;
- if (do_lock)
- binder_lock(context, __func__);
+ spin_lock(&binder_dead_nodes_lock);
+ if (!hlist_empty(&binder_dead_nodes))
+ seq_puts(m, "dead nodes:\n");
+ hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
+ /*
+ * take a temporary reference on the node so it
+ * survives and isn't removed from the list
+ * while we print it.
+ */
+ node->tmp_refs++;
+ spin_unlock(&binder_dead_nodes_lock);
+ if (last_node)
+ binder_put_node(last_node);
+ binder_node_lock(node);
+ print_binder_node_nilocked(m, node);
+ binder_node_unlock(node);
+ last_node = node;
+ spin_lock(&binder_dead_nodes_lock);
+ }
+ spin_unlock(&binder_dead_nodes_lock);
+ if (last_node)
+ binder_put_node(last_node);
+
+ mutex_lock(&binder_procs_lock);
+ hlist_for_each_entry(proc, &binder_procs, proc_node)
+ print_binder_proc(m, proc, 1);
+ mutex_unlock(&binder_procs_lock);
- hlist_for_each_entry(proc, &context->binder_procs, proc_node)
- print_binder_proc(m, proc, 1);
- if (do_lock)
- binder_unlock(context, __func__);
- }
return 0;
}
static int binder_stats_show(struct seq_file *m, void *unused)
{
- struct binder_device *device;
- struct binder_context *context;
struct binder_proc *proc;
- struct binder_stats total_binder_stats;
- int do_lock = !binder_debug_no_lock;
-
- memset(&total_binder_stats, 0, sizeof(struct binder_stats));
-
- hlist_for_each_entry(device, &binder_devices, hlist) {
- context = &device->context;
- if (do_lock)
- binder_lock(context, __func__);
-
- add_binder_stats(&context->binder_stats, &total_binder_stats);
-
- if (do_lock)
- binder_unlock(context, __func__);
- }
seq_puts(m, "binder stats:\n");
- print_binder_stats(m, "", &total_binder_stats, &binder_obj_stats);
- hlist_for_each_entry(device, &binder_devices, hlist) {
- context = &device->context;
- if (do_lock)
- binder_lock(context, __func__);
+ print_binder_stats(m, "", &binder_stats);
+
+ mutex_lock(&binder_procs_lock);
+ hlist_for_each_entry(proc, &binder_procs, proc_node)
+ print_binder_proc_stats(m, proc);
+ mutex_unlock(&binder_procs_lock);
- hlist_for_each_entry(proc, &context->binder_procs, proc_node)
- print_binder_proc_stats(m, proc);
- if (do_lock)
- binder_unlock(context, __func__);
- }
return 0;
}
static int binder_transactions_show(struct seq_file *m, void *unused)
{
- struct binder_device *device;
- struct binder_context *context;
struct binder_proc *proc;
- int do_lock = !binder_debug_no_lock;
seq_puts(m, "binder transactions:\n");
- hlist_for_each_entry(device, &binder_devices, hlist) {
- context = &device->context;
- if (do_lock)
- binder_lock(context, __func__);
-
- hlist_for_each_entry(proc, &context->binder_procs, proc_node)
- print_binder_proc(m, proc, 0);
- if (do_lock)
- binder_unlock(context, __func__);
- }
+ mutex_lock(&binder_procs_lock);
+ hlist_for_each_entry(proc, &binder_procs, proc_node)
+ print_binder_proc(m, proc, 0);
+ mutex_unlock(&binder_procs_lock);
+
return 0;
}
static int binder_proc_show(struct seq_file *m, void *unused)
{
- struct binder_device *device;
- struct binder_context *context;
struct binder_proc *itr;
int pid = (unsigned long)m->private;
- int do_lock = !binder_debug_no_lock;
- hlist_for_each_entry(device, &binder_devices, hlist) {
- context = &device->context;
- if (do_lock)
- binder_lock(context, __func__);
-
- hlist_for_each_entry(itr, &context->binder_procs, proc_node) {
- if (itr->pid == pid) {
- seq_puts(m, "binder proc state:\n");
- print_binder_proc(m, itr, 1);
- }
+ mutex_lock(&binder_procs_lock);
+ hlist_for_each_entry(itr, &binder_procs, proc_node) {
+ if (itr->pid == pid) {
+ seq_puts(m, "binder proc state:\n");
+ print_binder_proc(m, itr, 1);
}
- if (do_lock)
- binder_unlock(context, __func__);
}
+ mutex_unlock(&binder_procs_lock);
+
return 0;
}
static void print_binder_transaction_log_entry(struct seq_file *m,
struct binder_transaction_log_entry *e)
{
+ int debug_id = READ_ONCE(e->debug_id_done);
+ /*
+ * read barrier to guarantee debug_id_done read before
+ * we print the log values
+ */
+ smp_rmb();
seq_printf(m,
- "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d\n",
+ "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
e->debug_id, (e->call_type == 2) ? "reply" :
((e->call_type == 1) ? "async" : "call "), e->from_proc,
e->from_thread, e->to_proc, e->to_thread, e->context_name,
- e->to_node, e->target_handle, e->data_size, e->offsets_size);
-}
-
-static int print_binder_transaction_log(struct seq_file *m,
- struct binder_transaction_log *log)
-{
- int i;
- if (log->full) {
- for (i = log->next; i < ARRAY_SIZE(log->entry); i++)
- print_binder_transaction_log_entry(m, &log->entry[i]);
- }
- for (i = 0; i < log->next; i++)
- print_binder_transaction_log_entry(m, &log->entry[i]);
- return 0;
+ e->to_node, e->target_handle, e->data_size, e->offsets_size,
+ e->return_error, e->return_error_param,
+ e->return_error_line);
+ /*
+ * read-barrier to guarantee read of debug_id_done after
+ * done printing the fields of the entry
+ */
+ smp_rmb();
+ seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
+ "\n" : " (incomplete)\n");
}
static int binder_transaction_log_show(struct seq_file *m, void *unused)
{
- struct binder_device *device;
- struct binder_context *context;
-
- hlist_for_each_entry(device, &binder_devices, hlist) {
- context = &device->context;
- print_binder_transaction_log(m, &context->transaction_log);
- }
- return 0;
-}
+ struct binder_transaction_log *log = m->private;
+ unsigned int log_cur = atomic_read(&log->cur);
+ unsigned int count;
+ unsigned int cur;
+ int i;
-static int binder_failed_transaction_log_show(struct seq_file *m, void *unused)
-{
- struct binder_device *device;
- struct binder_context *context;
+ count = log_cur + 1;
+ cur = count < ARRAY_SIZE(log->entry) && !log->full ?
+ 0 : count % ARRAY_SIZE(log->entry);
+ if (count > ARRAY_SIZE(log->entry) || log->full)
+ count = ARRAY_SIZE(log->entry);
+ for (i = 0; i < count; i++) {
+ unsigned int index = cur++ % ARRAY_SIZE(log->entry);
- hlist_for_each_entry(device, &binder_devices, hlist) {
- context = &device->context;
- print_binder_transaction_log(m,
- &context->transaction_log_failed);
+ print_binder_transaction_log_entry(m, &log->entry[index]);
}
return 0;
}
@@ -4378,20 +5602,11 @@ BINDER_DEBUG_ENTRY(state);
BINDER_DEBUG_ENTRY(stats);
BINDER_DEBUG_ENTRY(transactions);
BINDER_DEBUG_ENTRY(transaction_log);
-BINDER_DEBUG_ENTRY(failed_transaction_log);
-
-static void __init free_binder_device(struct binder_device *device)
-{
- if (device->context.binder_deferred_workqueue)
- destroy_workqueue(device->context.binder_deferred_workqueue);
- kfree(device);
-}
static int __init init_binder_device(const char *name)
{
int ret;
struct binder_device *binder_device;
- struct binder_context *context;
binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
if (!binder_device)
@@ -4401,65 +5616,34 @@ static int __init init_binder_device(const char *name)
binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
binder_device->miscdev.name = name;
- context = &binder_device->context;
- context->binder_context_mgr_uid = INVALID_UID;
- context->name = name;
-
- mutex_init(&context->binder_main_lock);
- mutex_init(&context->binder_deferred_lock);
- mutex_init(&context->binder_mmap_lock);
-
- context->binder_deferred_workqueue =
- create_singlethread_workqueue(name);
-
- if (!context->binder_deferred_workqueue) {
- ret = -ENOMEM;
- goto err_create_singlethread_workqueue_failed;
- }
-
- INIT_HLIST_HEAD(&context->binder_procs);
- INIT_HLIST_HEAD(&context->binder_dead_nodes);
- INIT_HLIST_HEAD(&context->binder_deferred_list);
- INIT_WORK(&context->deferred_work, binder_deferred_func);
+ binder_device->context.binder_context_mgr_uid = INVALID_UID;
+ binder_device->context.name = name;
+ mutex_init(&binder_device->context.context_mgr_node_lock);
ret = misc_register(&binder_device->miscdev);
if (ret < 0) {
- goto err_misc_register_failed;
+ kfree(binder_device);
+ return ret;
}
hlist_add_head(&binder_device->hlist, &binder_devices);
- return ret;
-
-err_create_singlethread_workqueue_failed:
-err_misc_register_failed:
- free_binder_device(binder_device);
return ret;
}
static int __init binder_init(void)
{
- int ret = 0;
+ int ret;
char *device_name, *device_names;
struct binder_device *device;
struct hlist_node *tmp;
- /*
- * Copy the module_parameter string, because we don't want to
- * tokenize it in-place.
- */
- device_names = kzalloc(strlen(binder_devices_param) + 1, GFP_KERNEL);
- if (!device_names)
+ atomic_set(&binder_transaction_log.cur, ~0U);
+ atomic_set(&binder_transaction_log_failed.cur, ~0U);
+ binder_deferred_workqueue = create_singlethread_workqueue("binder");
+ if (!binder_deferred_workqueue)
return -ENOMEM;
- strcpy(device_names, binder_devices_param);
-
- while ((device_name = strsep(&device_names, ","))) {
- ret = init_binder_device(device_name);
- if (ret)
- goto err_init_binder_device_failed;
- }
-
binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
if (binder_debugfs_dir_entry_root)
binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
@@ -4484,13 +5668,30 @@ static int __init binder_init(void)
debugfs_create_file("transaction_log",
S_IRUGO,
binder_debugfs_dir_entry_root,
- NULL,
+ &binder_transaction_log,
&binder_transaction_log_fops);
debugfs_create_file("failed_transaction_log",
S_IRUGO,
binder_debugfs_dir_entry_root,
- NULL,
- &binder_failed_transaction_log_fops);
+ &binder_transaction_log_failed,
+ &binder_transaction_log_fops);
+ }
+
+ /*
+ * Copy the module_parameter string, because we don't want to
+ * tokenize it in-place.
+ */
+ device_names = kzalloc(strlen(binder_devices_param) + 1, GFP_KERNEL);
+ if (!device_names) {
+ ret = -ENOMEM;
+ goto err_alloc_device_names_failed;
+ }
+ strcpy(device_names, binder_devices_param);
+
+ while ((device_name = strsep(&device_names, ","))) {
+ ret = init_binder_device(device_name);
+ if (ret)
+ goto err_init_binder_device_failed;
}
return ret;
@@ -4499,8 +5700,12 @@ err_init_binder_device_failed:
hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
misc_deregister(&device->miscdev);
hlist_del(&device->hlist);
- free_binder_device(device);
+ kfree(device);
}
+err_alloc_device_names_failed:
+ debugfs_remove_recursive(binder_debugfs_dir_entry_root);
+
+ destroy_workqueue(binder_deferred_workqueue);
return ret;
}
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
new file mode 100644
index 000000000000..aabfebac6e57
--- /dev/null
+++ b/drivers/android/binder_alloc.c
@@ -0,0 +1,802 @@
+/* binder_alloc.c
+ *
+ * Android IPC Subsystem
+ *
+ * Copyright (C) 2007-2017 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <asm/cacheflush.h>
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/rtmutex.h>
+#include <linux/rbtree.h>
+#include <linux/seq_file.h>
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include "binder_alloc.h"
+#include "binder_trace.h"
+
+static DEFINE_MUTEX(binder_alloc_mmap_lock);
+
+enum {
+ BINDER_DEBUG_OPEN_CLOSE = 1U << 1,
+ BINDER_DEBUG_BUFFER_ALLOC = 1U << 2,
+ BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 3,
+};
+static uint32_t binder_alloc_debug_mask;
+
+module_param_named(debug_mask, binder_alloc_debug_mask,
+ uint, S_IWUSR | S_IRUGO);
+
+#define binder_alloc_debug(mask, x...) \
+ do { \
+ if (binder_alloc_debug_mask & mask) \
+ pr_info(x); \
+ } while (0)
+
+static size_t binder_alloc_buffer_size(struct binder_alloc *alloc,
+ struct binder_buffer *buffer)
+{
+ if (list_is_last(&buffer->entry, &alloc->buffers))
+ return alloc->buffer +
+ alloc->buffer_size - (void *)buffer->data;
+ return (size_t)list_entry(buffer->entry.next,
+ struct binder_buffer, entry) - (size_t)buffer->data;
+}
+
+static void binder_insert_free_buffer(struct binder_alloc *alloc,
+ struct binder_buffer *new_buffer)
+{
+ struct rb_node **p = &alloc->free_buffers.rb_node;
+ struct rb_node *parent = NULL;
+ struct binder_buffer *buffer;
+ size_t buffer_size;
+ size_t new_buffer_size;
+
+ BUG_ON(!new_buffer->free);
+
+ new_buffer_size = binder_alloc_buffer_size(alloc, new_buffer);
+
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: add free buffer, size %zd, at %pK\n",
+ alloc->pid, new_buffer_size, new_buffer);
+
+ while (*p) {
+ parent = *p;
+ buffer = rb_entry(parent, struct binder_buffer, rb_node);
+ BUG_ON(!buffer->free);
+
+ buffer_size = binder_alloc_buffer_size(alloc, buffer);
+
+ if (new_buffer_size < buffer_size)
+ p = &parent->rb_left;
+ else
+ p = &parent->rb_right;
+ }
+ rb_link_node(&new_buffer->rb_node, parent, p);
+ rb_insert_color(&new_buffer->rb_node, &alloc->free_buffers);
+}
+
+static void binder_insert_allocated_buffer_locked(
+ struct binder_alloc *alloc, struct binder_buffer *new_buffer)
+{
+ struct rb_node **p = &alloc->allocated_buffers.rb_node;
+ struct rb_node *parent = NULL;
+ struct binder_buffer *buffer;
+
+ BUG_ON(new_buffer->free);
+
+ while (*p) {
+ parent = *p;
+ buffer = rb_entry(parent, struct binder_buffer, rb_node);
+ BUG_ON(buffer->free);
+
+ if (new_buffer < buffer)
+ p = &parent->rb_left;
+ else if (new_buffer > buffer)
+ p = &parent->rb_right;
+ else
+ BUG();
+ }
+ rb_link_node(&new_buffer->rb_node, parent, p);
+ rb_insert_color(&new_buffer->rb_node, &alloc->allocated_buffers);
+}
+
+static struct binder_buffer *binder_alloc_prepare_to_free_locked(
+ struct binder_alloc *alloc,
+ uintptr_t user_ptr)
+{
+ struct rb_node *n = alloc->allocated_buffers.rb_node;
+ struct binder_buffer *buffer;
+ struct binder_buffer *kern_ptr;
+
+ kern_ptr = (struct binder_buffer *)(user_ptr - alloc->user_buffer_offset
+ - offsetof(struct binder_buffer, data));
+
+ while (n) {
+ buffer = rb_entry(n, struct binder_buffer, rb_node);
+ BUG_ON(buffer->free);
+
+ if (kern_ptr < buffer)
+ n = n->rb_left;
+ else if (kern_ptr > buffer)
+ n = n->rb_right;
+ else {
+ /*
+ * Guard against user threads attempting to
+ * free the buffer twice
+ */
+ if (buffer->free_in_progress) {
+ pr_err("%d:%d FREE_BUFFER u%016llx user freed buffer twice\n",
+ alloc->pid, current->pid, (u64)user_ptr);
+ return NULL;
+ }
+ buffer->free_in_progress = 1;
+ return buffer;
+ }
+ }
+ return NULL;
+}
+
+/**
+ * binder_alloc_buffer_lookup() - get buffer given user ptr
+ * @alloc: binder_alloc for this proc
+ * @user_ptr: User pointer to buffer data
+ *
+ * Validate userspace pointer to buffer data and return buffer corresponding to
+ * that user pointer. Search the rb tree for buffer that matches user data
+ * pointer.
+ *
+ * Return: Pointer to buffer or NULL
+ */
+struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc,
+ uintptr_t user_ptr)
+{
+ struct binder_buffer *buffer;
+
+ mutex_lock(&alloc->mutex);
+ buffer = binder_alloc_prepare_to_free_locked(alloc, user_ptr);
+ mutex_unlock(&alloc->mutex);
+ return buffer;
+}
+
+static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
+ void *start, void *end,
+ struct vm_area_struct *vma)
+{
+ void *page_addr;
+ unsigned long user_page_addr;
+ struct page **page;
+ struct mm_struct *mm;
+
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: %s pages %pK-%pK\n", alloc->pid,
+ allocate ? "allocate" : "free", start, end);
+
+ if (end <= start)
+ return 0;
+
+ trace_binder_update_page_range(alloc, allocate, start, end);
+
+ if (vma)
+ mm = NULL;
+ else
+ mm = get_task_mm(alloc->tsk);
+
+ if (mm) {
+ down_write(&mm->mmap_sem);
+ vma = alloc->vma;
+ if (vma && mm != alloc->vma_vm_mm) {
+ pr_err("%d: vma mm and task mm mismatch\n",
+ alloc->pid);
+ vma = NULL;
+ }
+ }
+
+ if (allocate == 0)
+ goto free_range;
+
+ if (vma == NULL) {
+ pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
+ alloc->pid);
+ goto err_no_vma;
+ }
+
+ for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
+ int ret;
+
+ page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
+
+ BUG_ON(*page);
+ *page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
+ if (*page == NULL) {
+ pr_err("%d: binder_alloc_buf failed for page at %pK\n",
+ alloc->pid, page_addr);
+ goto err_alloc_page_failed;
+ }
+ ret = map_kernel_range_noflush((unsigned long)page_addr,
+ PAGE_SIZE, PAGE_KERNEL, page);
+ flush_cache_vmap((unsigned long)page_addr,
+ (unsigned long)page_addr + PAGE_SIZE);
+ if (ret != 1) {
+ pr_err("%d: binder_alloc_buf failed to map page at %pK in kernel\n",
+ alloc->pid, page_addr);
+ goto err_map_kernel_failed;
+ }
+ user_page_addr =
+ (uintptr_t)page_addr + alloc->user_buffer_offset;
+ ret = vm_insert_page(vma, user_page_addr, page[0]);
+ if (ret) {
+ pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
+ alloc->pid, user_page_addr);
+ goto err_vm_insert_page_failed;
+ }
+ /* vm_insert_page does not seem to increment the refcount */
+ }
+ if (mm) {
+ up_write(&mm->mmap_sem);
+ mmput(mm);
+ }
+ return 0;
+
+free_range:
+ for (page_addr = end - PAGE_SIZE; page_addr >= start;
+ page_addr -= PAGE_SIZE) {
+ page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
+ if (vma)
+ zap_page_range(vma, (uintptr_t)page_addr +
+ alloc->user_buffer_offset, PAGE_SIZE, NULL);
+err_vm_insert_page_failed:
+ unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
+err_map_kernel_failed:
+ __free_page(*page);
+ *page = NULL;
+err_alloc_page_failed:
+ ;
+ }
+err_no_vma:
+ if (mm) {
+ up_write(&mm->mmap_sem);
+ mmput(mm);
+ }
+ return vma ? -ENOMEM : -ESRCH;
+}
+
+struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc,
+ size_t data_size,
+ size_t offsets_size,
+ size_t extra_buffers_size,
+ int is_async)
+{
+ struct rb_node *n = alloc->free_buffers.rb_node;
+ struct binder_buffer *buffer;
+ size_t buffer_size;
+ struct rb_node *best_fit = NULL;
+ void *has_page_addr;
+ void *end_page_addr;
+ size_t size, data_offsets_size;
+ int ret;
+
+ if (alloc->vma == NULL) {
+ pr_err("%d: binder_alloc_buf, no vma\n",
+ alloc->pid);
+ return ERR_PTR(-ESRCH);
+ }
+
+ data_offsets_size = ALIGN(data_size, sizeof(void *)) +
+ ALIGN(offsets_size, sizeof(void *));
+
+ if (data_offsets_size < data_size || data_offsets_size < offsets_size) {
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: got transaction with invalid size %zd-%zd\n",
+ alloc->pid, data_size, offsets_size);
+ return ERR_PTR(-EINVAL);
+ }
+ size = data_offsets_size + ALIGN(extra_buffers_size, sizeof(void *));
+ if (size < data_offsets_size || size < extra_buffers_size) {
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: got transaction with invalid extra_buffers_size %zd\n",
+ alloc->pid, extra_buffers_size);
+ return ERR_PTR(-EINVAL);
+ }
+ if (is_async &&
+ alloc->free_async_space < size + sizeof(struct binder_buffer)) {
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: binder_alloc_buf size %zd failed, no async space left\n",
+ alloc->pid, size);
+ return ERR_PTR(-ENOSPC);
+ }
+
+ while (n) {
+ buffer = rb_entry(n, struct binder_buffer, rb_node);
+ BUG_ON(!buffer->free);
+ buffer_size = binder_alloc_buffer_size(alloc, buffer);
+
+ if (size < buffer_size) {
+ best_fit = n;
+ n = n->rb_left;
+ } else if (size > buffer_size)
+ n = n->rb_right;
+ else {
+ best_fit = n;
+ break;
+ }
+ }
+ if (best_fit == NULL) {
+ size_t allocated_buffers = 0;
+ size_t largest_alloc_size = 0;
+ size_t total_alloc_size = 0;
+ size_t free_buffers = 0;
+ size_t largest_free_size = 0;
+ size_t total_free_size = 0;
+
+ for (n = rb_first(&alloc->allocated_buffers); n != NULL;
+ n = rb_next(n)) {
+ buffer = rb_entry(n, struct binder_buffer, rb_node);
+ buffer_size = binder_alloc_buffer_size(alloc, buffer);
+ allocated_buffers++;
+ total_alloc_size += buffer_size;
+ if (buffer_size > largest_alloc_size)
+ largest_alloc_size = buffer_size;
+ }
+ for (n = rb_first(&alloc->free_buffers); n != NULL;
+ n = rb_next(n)) {
+ buffer = rb_entry(n, struct binder_buffer, rb_node);
+ buffer_size = binder_alloc_buffer_size(alloc, buffer);
+ free_buffers++;
+ total_free_size += buffer_size;
+ if (buffer_size > largest_free_size)
+ largest_free_size = buffer_size;
+ }
+ pr_err("%d: binder_alloc_buf size %zd failed, no address space\n",
+ alloc->pid, size);
+ pr_err("allocated: %zd (num: %zd largest: %zd), free: %zd (num: %zd largest: %zd)\n",
+ total_alloc_size, allocated_buffers, largest_alloc_size,
+ total_free_size, free_buffers, largest_free_size);
+ return ERR_PTR(-ENOSPC);
+ }
+ if (n == NULL) {
+ buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
+ buffer_size = binder_alloc_buffer_size(alloc, buffer);
+ }
+
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n",
+ alloc->pid, size, buffer, buffer_size);
+
+ has_page_addr =
+ (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK);
+ if (n == NULL) {
+ if (size + sizeof(struct binder_buffer) + 4 >= buffer_size)
+ buffer_size = size; /* no room for other buffers */
+ else
+ buffer_size = size + sizeof(struct binder_buffer);
+ }
+ end_page_addr =
+ (void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size);
+ if (end_page_addr > has_page_addr)
+ end_page_addr = has_page_addr;
+ ret = binder_update_page_range(alloc, 1,
+ (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL);
+ if (ret)
+ return ERR_PTR(ret);
+
+ rb_erase(best_fit, &alloc->free_buffers);
+ buffer->free = 0;
+ buffer->free_in_progress = 0;
+ binder_insert_allocated_buffer_locked(alloc, buffer);
+ if (buffer_size != size) {
+ struct binder_buffer *new_buffer = (void *)buffer->data + size;
+
+ list_add(&new_buffer->entry, &buffer->entry);
+ new_buffer->free = 1;
+ binder_insert_free_buffer(alloc, new_buffer);
+ }
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: binder_alloc_buf size %zd got %pK\n",
+ alloc->pid, size, buffer);
+ buffer->data_size = data_size;
+ buffer->offsets_size = offsets_size;
+ buffer->async_transaction = is_async;
+ buffer->extra_buffers_size = extra_buffers_size;
+ if (is_async) {
+ alloc->free_async_space -= size + sizeof(struct binder_buffer);
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
+ "%d: binder_alloc_buf size %zd async free %zd\n",
+ alloc->pid, size, alloc->free_async_space);
+ }
+ return buffer;
+}
+
+/**
+ * binder_alloc_new_buf() - Allocate a new binder buffer
+ * @alloc: binder_alloc for this proc
+ * @data_size: size of user data buffer
+ * @offsets_size: user specified buffer offset
+ * @extra_buffers_size: size of extra space for meta-data (eg, security context)
+ * @is_async: buffer for async transaction
+ *
+ * Allocate a new buffer given the requested sizes. Returns
+ * the kernel version of the buffer pointer. The size allocated
+ * is the sum of the three given sizes (each rounded up to
+ * pointer-sized boundary)
+ *
+ * Return: The allocated buffer or %NULL if error
+ */
+struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
+ size_t data_size,
+ size_t offsets_size,
+ size_t extra_buffers_size,
+ int is_async)
+{
+ struct binder_buffer *buffer;
+
+ mutex_lock(&alloc->mutex);
+ buffer = binder_alloc_new_buf_locked(alloc, data_size, offsets_size,
+ extra_buffers_size, is_async);
+ mutex_unlock(&alloc->mutex);
+ return buffer;
+}
+
+static void *buffer_start_page(struct binder_buffer *buffer)
+{
+ return (void *)((uintptr_t)buffer & PAGE_MASK);
+}
+
+static void *buffer_end_page(struct binder_buffer *buffer)
+{
+ return (void *)(((uintptr_t)(buffer + 1) - 1) & PAGE_MASK);
+}
+
+static void binder_delete_free_buffer(struct binder_alloc *alloc,
+ struct binder_buffer *buffer)
+{
+ struct binder_buffer *prev, *next = NULL;
+ int free_page_end = 1;
+ int free_page_start = 1;
+
+ BUG_ON(alloc->buffers.next == &buffer->entry);
+ prev = list_entry(buffer->entry.prev, struct binder_buffer, entry);
+ BUG_ON(!prev->free);
+ if (buffer_end_page(prev) == buffer_start_page(buffer)) {
+ free_page_start = 0;
+ if (buffer_end_page(prev) == buffer_end_page(buffer))
+ free_page_end = 0;
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: merge free, buffer %pK share page with %pK\n",
+ alloc->pid, buffer, prev);
+ }
+
+ if (!list_is_last(&buffer->entry, &alloc->buffers)) {
+ next = list_entry(buffer->entry.next,
+ struct binder_buffer, entry);
+ if (buffer_start_page(next) == buffer_end_page(buffer)) {
+ free_page_end = 0;
+ if (buffer_start_page(next) ==
+ buffer_start_page(buffer))
+ free_page_start = 0;
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: merge free, buffer %pK share page with %pK\n",
+ alloc->pid, buffer, prev);
+ }
+ }
+ list_del(&buffer->entry);
+ if (free_page_start || free_page_end) {
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: merge free, buffer %pK do not share page%s%s with %pK or %pK\n",
+ alloc->pid, buffer, free_page_start ? "" : " end",
+ free_page_end ? "" : " start", prev, next);
+ binder_update_page_range(alloc, 0, free_page_start ?
+ buffer_start_page(buffer) : buffer_end_page(buffer),
+ (free_page_end ? buffer_end_page(buffer) :
+ buffer_start_page(buffer)) + PAGE_SIZE, NULL);
+ }
+}
+
+static void binder_free_buf_locked(struct binder_alloc *alloc,
+ struct binder_buffer *buffer)
+{
+ size_t size, buffer_size;
+
+ buffer_size = binder_alloc_buffer_size(alloc, buffer);
+
+ size = ALIGN(buffer->data_size, sizeof(void *)) +
+ ALIGN(buffer->offsets_size, sizeof(void *)) +
+ ALIGN(buffer->extra_buffers_size, sizeof(void *));
+
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: binder_free_buf %pK size %zd buffer_size %zd\n",
+ alloc->pid, buffer, size, buffer_size);
+
+ BUG_ON(buffer->free);
+ BUG_ON(size > buffer_size);
+ BUG_ON(buffer->transaction != NULL);
+ BUG_ON((void *)buffer < alloc->buffer);
+ BUG_ON((void *)buffer > alloc->buffer + alloc->buffer_size);
+
+ if (buffer->async_transaction) {
+ alloc->free_async_space += size + sizeof(struct binder_buffer);
+
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
+ "%d: binder_free_buf size %zd async free %zd\n",
+ alloc->pid, size, alloc->free_async_space);
+ }
+
+ binder_update_page_range(alloc, 0,
+ (void *)PAGE_ALIGN((uintptr_t)buffer->data),
+ (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK),
+ NULL);
+
+ rb_erase(&buffer->rb_node, &alloc->allocated_buffers);
+ buffer->free = 1;
+ if (!list_is_last(&buffer->entry, &alloc->buffers)) {
+ struct binder_buffer *next = list_entry(buffer->entry.next,
+ struct binder_buffer, entry);
+
+ if (next->free) {
+ rb_erase(&next->rb_node, &alloc->free_buffers);
+ binder_delete_free_buffer(alloc, next);
+ }
+ }
+ if (alloc->buffers.next != &buffer->entry) {
+ struct binder_buffer *prev = list_entry(buffer->entry.prev,
+ struct binder_buffer, entry);
+
+ if (prev->free) {
+ binder_delete_free_buffer(alloc, buffer);
+ rb_erase(&prev->rb_node, &alloc->free_buffers);
+ buffer = prev;
+ }
+ }
+ binder_insert_free_buffer(alloc, buffer);
+}
+
+/**
+ * binder_alloc_free_buf() - free a binder buffer
+ * @alloc: binder_alloc for this proc
+ * @buffer: kernel pointer to buffer
+ *
+ * Free the buffer allocated via binder_alloc_new_buffer()
+ */
+void binder_alloc_free_buf(struct binder_alloc *alloc,
+ struct binder_buffer *buffer)
+{
+ mutex_lock(&alloc->mutex);
+ binder_free_buf_locked(alloc, buffer);
+ mutex_unlock(&alloc->mutex);
+}
+
+/**
+ * binder_alloc_mmap_handler() - map virtual address space for proc
+ * @alloc: alloc structure for this proc
+ * @vma: vma passed to mmap()
+ *
+ * Called by binder_mmap() to initialize the space specified in
+ * vma for allocating binder buffers
+ *
+ * Return:
+ * 0 = success
+ * -EBUSY = address space already mapped
+ * -ENOMEM = failed to map memory to given address space
+ */
+int binder_alloc_mmap_handler(struct binder_alloc *alloc,
+ struct vm_area_struct *vma)
+{
+ int ret;
+ struct vm_struct *area;
+ const char *failure_string;
+ struct binder_buffer *buffer;
+
+ mutex_lock(&binder_alloc_mmap_lock);
+ if (alloc->buffer) {
+ ret = -EBUSY;
+ failure_string = "already mapped";
+ goto err_already_mapped;
+ }
+
+ area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP);
+ if (area == NULL) {
+ ret = -ENOMEM;
+ failure_string = "get_vm_area";
+ goto err_get_vm_area_failed;
+ }
+ alloc->buffer = area->addr;
+ alloc->user_buffer_offset =
+ vma->vm_start - (uintptr_t)alloc->buffer;
+ mutex_unlock(&binder_alloc_mmap_lock);
+
+#ifdef CONFIG_CPU_CACHE_VIPT
+ if (cache_is_vipt_aliasing()) {
+ while (CACHE_COLOUR(
+ (vma->vm_start ^ (uint32_t)alloc->buffer))) {
+ pr_info("binder_mmap: %d %lx-%lx maps %pK bad alignment\n",
+ alloc->pid, vma->vm_start, vma->vm_end,
+ alloc->buffer);
+ vma->vm_start += PAGE_SIZE;
+ }
+ }
+#endif
+ alloc->pages = kzalloc(sizeof(alloc->pages[0]) *
+ ((vma->vm_end - vma->vm_start) / PAGE_SIZE),
+ GFP_KERNEL);
+ if (alloc->pages == NULL) {
+ ret = -ENOMEM;
+ failure_string = "alloc page array";
+ goto err_alloc_pages_failed;
+ }
+ alloc->buffer_size = vma->vm_end - vma->vm_start;
+
+ if (binder_update_page_range(alloc, 1, alloc->buffer,
+ alloc->buffer + PAGE_SIZE, vma)) {
+ ret = -ENOMEM;
+ failure_string = "alloc small buf";
+ goto err_alloc_small_buf_failed;
+ }
+ buffer = alloc->buffer;
+ INIT_LIST_HEAD(&alloc->buffers);
+ list_add(&buffer->entry, &alloc->buffers);
+ buffer->free = 1;
+ binder_insert_free_buffer(alloc, buffer);
+ alloc->free_async_space = alloc->buffer_size / 2;
+ barrier();
+ alloc->vma = vma;
+ alloc->vma_vm_mm = vma->vm_mm;
+
+ return 0;
+
+err_alloc_small_buf_failed:
+ kfree(alloc->pages);
+ alloc->pages = NULL;
+err_alloc_pages_failed:
+ mutex_lock(&binder_alloc_mmap_lock);
+ vfree(alloc->buffer);
+ alloc->buffer = NULL;
+err_get_vm_area_failed:
+err_already_mapped:
+ mutex_unlock(&binder_alloc_mmap_lock);
+ pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
+ alloc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
+ return ret;
+}
+
+
+void binder_alloc_deferred_release(struct binder_alloc *alloc)
+{
+ struct rb_node *n;
+ int buffers, page_count;
+
+ BUG_ON(alloc->vma);
+
+ buffers = 0;
+ mutex_lock(&alloc->mutex);
+ while ((n = rb_first(&alloc->allocated_buffers))) {
+ struct binder_buffer *buffer;
+
+ buffer = rb_entry(n, struct binder_buffer, rb_node);
+
+ /* Transaction should already have been freed */
+ BUG_ON(buffer->transaction);
+
+ binder_free_buf_locked(alloc, buffer);
+ buffers++;
+ }
+
+ page_count = 0;
+ if (alloc->pages) {
+ int i;
+
+ for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
+ void *page_addr;
+
+ if (!alloc->pages[i])
+ continue;
+
+ page_addr = alloc->buffer + i * PAGE_SIZE;
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%s: %d: page %d at %pK not freed\n",
+ __func__, alloc->pid, i, page_addr);
+ unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
+ __free_page(alloc->pages[i]);
+ page_count++;
+ }
+ kfree(alloc->pages);
+ vfree(alloc->buffer);
+ }
+ mutex_unlock(&alloc->mutex);
+
+ binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE,
+ "%s: %d buffers %d, pages %d\n",
+ __func__, alloc->pid, buffers, page_count);
+}
+
+static void print_binder_buffer(struct seq_file *m, const char *prefix,
+ struct binder_buffer *buffer)
+{
+ seq_printf(m, "%s %d: %pK size %zd:%zd:%zd %s\n",
+ prefix, buffer->debug_id, buffer->data,
+ buffer->data_size, buffer->offsets_size,
+ buffer->extra_buffers_size,
+ buffer->transaction ? "active" : "delivered");
+}
+
+/**
+ * binder_alloc_print_allocated() - print buffer info
+ * @m: seq_file for output via seq_printf()
+ * @alloc: binder_alloc for this proc
+ *
+ * Prints information about every buffer associated with
+ * the binder_alloc state to the given seq_file
+ */
+void binder_alloc_print_allocated(struct seq_file *m,
+ struct binder_alloc *alloc)
+{
+ struct rb_node *n;
+
+ mutex_lock(&alloc->mutex);
+ for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
+ print_binder_buffer(m, " buffer",
+ rb_entry(n, struct binder_buffer, rb_node));
+ mutex_unlock(&alloc->mutex);
+}
+
+/**
+ * binder_alloc_get_allocated_count() - return count of buffers
+ * @alloc: binder_alloc for this proc
+ *
+ * Return: count of allocated buffers
+ */
+int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
+{
+ struct rb_node *n;
+ int count = 0;
+
+ mutex_lock(&alloc->mutex);
+ for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
+ count++;
+ mutex_unlock(&alloc->mutex);
+ return count;
+}
+
+
+/**
+ * binder_alloc_vma_close() - invalidate address space
+ * @alloc: binder_alloc for this proc
+ *
+ * Called from binder_vma_close() when releasing address space.
+ * Clears alloc->vma to prevent new incoming transactions from
+ * allocating more buffers.
+ */
+void binder_alloc_vma_close(struct binder_alloc *alloc)
+{
+ WRITE_ONCE(alloc->vma, NULL);
+ WRITE_ONCE(alloc->vma_vm_mm, NULL);
+}
+
+/**
+ * binder_alloc_init() - called by binder_open() for per-proc initialization
+ * @alloc: binder_alloc for this proc
+ *
+ * Called from binder_open() to initialize binder_alloc fields for
+ * new binder proc
+ */
+void binder_alloc_init(struct binder_alloc *alloc)
+{
+ alloc->tsk = current->group_leader;
+ alloc->pid = current->group_leader->pid;
+ mutex_init(&alloc->mutex);
+}
+
diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h
new file mode 100644
index 000000000000..088e4ffc6230
--- /dev/null
+++ b/drivers/android/binder_alloc.h
@@ -0,0 +1,163 @@
+/*
+ * Copyright (C) 2017 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_BINDER_ALLOC_H
+#define _LINUX_BINDER_ALLOC_H
+
+#include <linux/rbtree.h>
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/rtmutex.h>
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+
+struct binder_transaction;
+
+/**
+ * struct binder_buffer - buffer used for binder transactions
+ * @entry: entry alloc->buffers
+ * @rb_node: node for allocated_buffers/free_buffers rb trees
+ * @free: true if buffer is free
+ * @allow_user_free: describe the second member of struct blah,
+ * @async_transaction: describe the second member of struct blah,
+ * @debug_id: describe the second member of struct blah,
+ * @transaction: describe the second member of struct blah,
+ * @target_node: describe the second member of struct blah,
+ * @data_size: describe the second member of struct blah,
+ * @offsets_size: describe the second member of struct blah,
+ * @extra_buffers_size: describe the second member of struct blah,
+ * @data:i describe the second member of struct blah,
+ *
+ * Bookkeeping structure for binder transaction buffers
+ */
+struct binder_buffer {
+ struct list_head entry; /* free and allocated entries by address */
+ struct rb_node rb_node; /* free entry by size or allocated entry */
+ /* by address */
+ unsigned free:1;
+ unsigned allow_user_free:1;
+ unsigned async_transaction:1;
+ unsigned free_in_progress:1;
+ unsigned debug_id:28;
+
+ struct binder_transaction *transaction;
+
+ struct binder_node *target_node;
+ size_t data_size;
+ size_t offsets_size;
+ size_t extra_buffers_size;
+ uint8_t data[0];
+};
+
+/**
+ * struct binder_alloc - per-binder proc state for binder allocator
+ * @vma: vm_area_struct passed to mmap_handler
+ * (invarient after mmap)
+ * @tsk: tid for task that called init for this proc
+ * (invariant after init)
+ * @vma_vm_mm: copy of vma->vm_mm (invarient after mmap)
+ * @buffer: base of per-proc address space mapped via mmap
+ * @user_buffer_offset: offset between user and kernel VAs for buffer
+ * @buffers: list of all buffers for this proc
+ * @free_buffers: rb tree of buffers available for allocation
+ * sorted by size
+ * @allocated_buffers: rb tree of allocated buffers sorted by address
+ * @free_async_space: VA space available for async buffers. This is
+ * initialized at mmap time to 1/2 the full VA space
+ * @pages: array of physical page addresses for each
+ * page of mmap'd space
+ * @buffer_size: size of address space specified via mmap
+ * @pid: pid for associated binder_proc (invariant after init)
+ *
+ * Bookkeeping structure for per-proc address space management for binder
+ * buffers. It is normally initialized during binder_init() and binder_mmap()
+ * calls. The address space is used for both user-visible buffers and for
+ * struct binder_buffer objects used to track the user buffers
+ */
+struct binder_alloc {
+ struct mutex mutex;
+ struct task_struct *tsk;
+ struct vm_area_struct *vma;
+ struct mm_struct *vma_vm_mm;
+ void *buffer;
+ ptrdiff_t user_buffer_offset;
+ struct list_head buffers;
+ struct rb_root free_buffers;
+ struct rb_root allocated_buffers;
+ size_t free_async_space;
+ struct page **pages;
+ size_t buffer_size;
+ uint32_t buffer_free;
+ int pid;
+};
+
+extern struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
+ size_t data_size,
+ size_t offsets_size,
+ size_t extra_buffers_size,
+ int is_async);
+extern void binder_alloc_init(struct binder_alloc *alloc);
+extern void binder_alloc_vma_close(struct binder_alloc *alloc);
+extern struct binder_buffer *
+binder_alloc_prepare_to_free(struct binder_alloc *alloc,
+ uintptr_t user_ptr);
+extern void binder_alloc_free_buf(struct binder_alloc *alloc,
+ struct binder_buffer *buffer);
+extern int binder_alloc_mmap_handler(struct binder_alloc *alloc,
+ struct vm_area_struct *vma);
+extern void binder_alloc_deferred_release(struct binder_alloc *alloc);
+extern int binder_alloc_get_allocated_count(struct binder_alloc *alloc);
+extern void binder_alloc_print_allocated(struct seq_file *m,
+ struct binder_alloc *alloc);
+
+/**
+ * binder_alloc_get_free_async_space() - get free space available for async
+ * @alloc: binder_alloc for this proc
+ *
+ * Return: the bytes remaining in the address-space for async transactions
+ */
+static inline size_t
+binder_alloc_get_free_async_space(struct binder_alloc *alloc)
+{
+ size_t free_async_space;
+
+ mutex_lock(&alloc->mutex);
+ free_async_space = alloc->free_async_space;
+ mutex_unlock(&alloc->mutex);
+ return free_async_space;
+}
+
+/**
+ * binder_alloc_get_user_buffer_offset() - get offset between kernel/user addrs
+ * @alloc: binder_alloc for this proc
+ *
+ * Return: the offset between kernel and user-space addresses to use for
+ * virtual address conversion
+ */
+static inline ptrdiff_t
+binder_alloc_get_user_buffer_offset(struct binder_alloc *alloc)
+{
+ /*
+ * user_buffer_offset is constant if vma is set and
+ * undefined if vma is not set. It is possible to
+ * get here with !alloc->vma if the target process
+ * is dying while a transaction is being initiated.
+ * Returning the old value is ok in this case and
+ * the transaction will fail.
+ */
+ return alloc->user_buffer_offset;
+}
+
+#endif /* _LINUX_BINDER_ALLOC_H */
+
diff --git a/drivers/android/binder_trace.h b/drivers/android/binder_trace.h
index 7f20f3dc8369..7967db16ba5a 100644
--- a/drivers/android/binder_trace.h
+++ b/drivers/android/binder_trace.h
@@ -23,7 +23,8 @@
struct binder_buffer;
struct binder_node;
struct binder_proc;
-struct binder_ref;
+struct binder_alloc;
+struct binder_ref_data;
struct binder_thread;
struct binder_transaction;
@@ -146,8 +147,8 @@ TRACE_EVENT(binder_transaction_received,
TRACE_EVENT(binder_transaction_node_to_ref,
TP_PROTO(struct binder_transaction *t, struct binder_node *node,
- struct binder_ref *ref),
- TP_ARGS(t, node, ref),
+ struct binder_ref_data *rdata),
+ TP_ARGS(t, node, rdata),
TP_STRUCT__entry(
__field(int, debug_id)
@@ -160,8 +161,8 @@ TRACE_EVENT(binder_transaction_node_to_ref,
__entry->debug_id = t->debug_id;
__entry->node_debug_id = node->debug_id;
__entry->node_ptr = node->ptr;
- __entry->ref_debug_id = ref->debug_id;
- __entry->ref_desc = ref->desc;
+ __entry->ref_debug_id = rdata->debug_id;
+ __entry->ref_desc = rdata->desc;
),
TP_printk("transaction=%d node=%d src_ptr=0x%016llx ==> dest_ref=%d dest_desc=%d",
__entry->debug_id, __entry->node_debug_id,
@@ -170,8 +171,9 @@ TRACE_EVENT(binder_transaction_node_to_ref,
);
TRACE_EVENT(binder_transaction_ref_to_node,
- TP_PROTO(struct binder_transaction *t, struct binder_ref *ref),
- TP_ARGS(t, ref),
+ TP_PROTO(struct binder_transaction *t, struct binder_node *node,
+ struct binder_ref_data *rdata),
+ TP_ARGS(t, node, rdata),
TP_STRUCT__entry(
__field(int, debug_id)
@@ -182,10 +184,10 @@ TRACE_EVENT(binder_transaction_ref_to_node,
),
TP_fast_assign(
__entry->debug_id = t->debug_id;
- __entry->ref_debug_id = ref->debug_id;
- __entry->ref_desc = ref->desc;
- __entry->node_debug_id = ref->node->debug_id;
- __entry->node_ptr = ref->node->ptr;
+ __entry->ref_debug_id = rdata->debug_id;
+ __entry->ref_desc = rdata->desc;
+ __entry->node_debug_id = node->debug_id;
+ __entry->node_ptr = node->ptr;
),
TP_printk("transaction=%d node=%d src_ref=%d src_desc=%d ==> dest_ptr=0x%016llx",
__entry->debug_id, __entry->node_debug_id,
@@ -194,9 +196,10 @@ TRACE_EVENT(binder_transaction_ref_to_node,
);
TRACE_EVENT(binder_transaction_ref_to_ref,
- TP_PROTO(struct binder_transaction *t, struct binder_ref *src_ref,
- struct binder_ref *dest_ref),
- TP_ARGS(t, src_ref, dest_ref),
+ TP_PROTO(struct binder_transaction *t, struct binder_node *node,
+ struct binder_ref_data *src_ref,
+ struct binder_ref_data *dest_ref),
+ TP_ARGS(t, node, src_ref, dest_ref),
TP_STRUCT__entry(
__field(int, debug_id)
@@ -208,7 +211,7 @@ TRACE_EVENT(binder_transaction_ref_to_ref,
),
TP_fast_assign(
__entry->debug_id = t->debug_id;
- __entry->node_debug_id = src_ref->node->debug_id;
+ __entry->node_debug_id = node->debug_id;
__entry->src_ref_debug_id = src_ref->debug_id;
__entry->src_ref_desc = src_ref->desc;
__entry->dest_ref_debug_id = dest_ref->debug_id;
@@ -268,9 +271,9 @@ DEFINE_EVENT(binder_buffer_class, binder_transaction_failed_buffer_release,
TP_ARGS(buffer));
TRACE_EVENT(binder_update_page_range,
- TP_PROTO(struct binder_proc *proc, bool allocate,
+ TP_PROTO(struct binder_alloc *alloc, bool allocate,
void *start, void *end),
- TP_ARGS(proc, allocate, start, end),
+ TP_ARGS(alloc, allocate, start, end),
TP_STRUCT__entry(
__field(int, proc)
__field(bool, allocate)
@@ -278,9 +281,9 @@ TRACE_EVENT(binder_update_page_range,
__field(size_t, size)
),
TP_fast_assign(
- __entry->proc = proc->pid;
+ __entry->proc = alloc->pid;
__entry->allocate = allocate;
- __entry->offset = start - proc->buffer;
+ __entry->offset = start - alloc->buffer;
__entry->size = end - start;
),
TP_printk("proc=%d allocate=%d offset=%zu size=%zu",
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 3fa9096b27c2..5a56a8e9f006 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -2105,7 +2105,11 @@ void device_shutdown(void)
pm_runtime_get_noresume(dev);
pm_runtime_barrier(dev);
- if (dev->bus && dev->bus->shutdown) {
+ if (dev->class && dev->class->shutdown) {
+ if (initcall_debug)
+ dev_info(dev, "shutdown\n");
+ dev->class->shutdown(dev);
+ } else if (dev->bus && dev->bus->shutdown) {
if (initcall_debug)
dev_info(dev, "shutdown\n");
dev->bus->shutdown(dev);
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 9920916a6220..ae7f3ce90bd2 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -827,7 +827,7 @@ static ssize_t driver_override_store(struct device *dev,
const char *buf, size_t count)
{
struct platform_device *pdev = to_platform_device(dev);
- char *driver_override, *old = pdev->driver_override, *cp;
+ char *driver_override, *old, *cp;
if (count > PATH_MAX)
return -EINVAL;
@@ -840,12 +840,15 @@ static ssize_t driver_override_store(struct device *dev,
if (cp)
*cp = '\0';
+ device_lock(dev);
+ old = pdev->driver_override;
if (strlen(driver_override)) {
pdev->driver_override = driver_override;
} else {
kfree(driver_override);
pdev->driver_override = NULL;
}
+ device_unlock(dev);
kfree(old);
@@ -856,8 +859,12 @@ static ssize_t driver_override_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct platform_device *pdev = to_platform_device(dev);
+ ssize_t len;
- return sprintf(buf, "%s\n", pdev->driver_override);
+ device_lock(dev);
+ len = sprintf(buf, "%s\n", pdev->driver_override);
+ device_unlock(dev);
+ return len;
}
static DEVICE_ATTR_RW(driver_override);
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index a48824deabc5..78b0ece0c867 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -1188,7 +1188,6 @@ static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev,
}
dev->power.subsys_data->domain_data = &gpd_data->base;
- dev->pm_domain = &genpd->domain;
spin_unlock_irq(&dev->power.lock);
@@ -1207,7 +1206,6 @@ static void genpd_free_dev_data(struct device *dev,
{
spin_lock_irq(&dev->power.lock);
- dev->pm_domain = NULL;
dev->power.subsys_data->domain_data = NULL;
spin_unlock_irq(&dev->power.lock);
@@ -1248,6 +1246,8 @@ int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
if (ret)
goto out;
+ dev->pm_domain = &genpd->domain;
+
genpd->device_count++;
genpd->max_off_time_changed = true;
@@ -1299,6 +1299,8 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd,
if (genpd->detach_dev)
genpd->detach_dev(genpd, dev);
+ dev->pm_domain = NULL;
+
list_del_init(&pdd->list_node);
mutex_unlock(&genpd->lock);
@@ -1373,7 +1375,7 @@ EXPORT_SYMBOL_GPL(pm_genpd_add_subdomain);
int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
struct generic_pm_domain *subdomain)
{
- struct gpd_link *link;
+ struct gpd_link *l, *link;
int ret = -EINVAL;
if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
@@ -1388,7 +1390,7 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
goto out;
}
- list_for_each_entry(link, &genpd->master_links, master_node) {
+ list_for_each_entry_safe(link, l, &genpd->master_links, master_node) {
if (link->slave != subdomain)
continue;
@@ -1642,10 +1644,10 @@ EXPORT_SYMBOL_GPL(__of_genpd_add_provider);
*/
void of_genpd_del_provider(struct device_node *np)
{
- struct of_genpd_provider *cp;
+ struct of_genpd_provider *cp, *tmp;
mutex_lock(&of_genpd_mutex);
- list_for_each_entry(cp, &of_genpd_providers, link) {
+ list_for_each_entry_safe(cp, tmp, &of_genpd_providers, link) {
if (cp->node == np) {
list_del(&cp->link);
of_node_put(cp->node);
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index a7b46798c81d..39efa7e6c0c0 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -268,6 +268,8 @@ static ssize_t pm_qos_latency_tolerance_store(struct device *dev,
value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT;
else if (!strcmp(buf, "any") || !strcmp(buf, "any\n"))
value = PM_QOS_LATENCY_ANY;
+ else
+ return -EINVAL;
}
ret = dev_pm_qos_update_user_latency_tolerance(dev, value);
return ret < 0 ? ret : n;
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index 0e494108c20c..7af116e12e53 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -61,6 +61,8 @@ static LIST_HEAD(wakeup_sources);
static DECLARE_WAIT_QUEUE_HEAD(wakeup_count_wait_queue);
+DEFINE_STATIC_SRCU(wakeup_srcu);
+
static struct wakeup_source deleted_ws = {
.name = "deleted",
.lock = __SPIN_LOCK_UNLOCKED(deleted_ws.lock),
@@ -199,7 +201,7 @@ void wakeup_source_remove(struct wakeup_source *ws)
spin_lock_irqsave(&events_lock, flags);
list_del_rcu(&ws->entry);
spin_unlock_irqrestore(&events_lock, flags);
- synchronize_rcu();
+ synchronize_srcu(&wakeup_srcu);
}
EXPORT_SYMBOL_GPL(wakeup_source_remove);
@@ -331,13 +333,14 @@ void device_wakeup_detach_irq(struct device *dev)
void device_wakeup_arm_wake_irqs(void)
{
struct wakeup_source *ws;
+ int srcuidx;
- rcu_read_lock();
+ srcuidx = srcu_read_lock(&wakeup_srcu);
list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
if (ws->wakeirq)
dev_pm_arm_wake_irq(ws->wakeirq);
}
- rcu_read_unlock();
+ srcu_read_unlock(&wakeup_srcu, srcuidx);
}
/**
@@ -348,13 +351,14 @@ void device_wakeup_arm_wake_irqs(void)
void device_wakeup_disarm_wake_irqs(void)
{
struct wakeup_source *ws;
+ int srcuidx;
- rcu_read_lock();
+ srcuidx = srcu_read_lock(&wakeup_srcu);
list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
if (ws->wakeirq)
dev_pm_disarm_wake_irq(ws->wakeirq);
}
- rcu_read_unlock();
+ srcu_read_unlock(&wakeup_srcu, srcuidx);
}
/**
@@ -839,10 +843,10 @@ EXPORT_SYMBOL_GPL(pm_get_active_wakeup_sources);
void pm_print_active_wakeup_sources(void)
{
struct wakeup_source *ws;
- int active = 0;
+ int srcuidx, active = 0;
struct wakeup_source *last_activity_ws = NULL;
- rcu_read_lock();
+ srcuidx = srcu_read_lock(&wakeup_srcu);
list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
if (ws->active) {
pr_info("active wakeup source: %s\n", ws->name);
@@ -858,7 +862,7 @@ void pm_print_active_wakeup_sources(void)
if (!active && last_activity_ws)
pr_info("last active wakeup source: %s\n",
last_activity_ws->name);
- rcu_read_unlock();
+ srcu_read_unlock(&wakeup_srcu, srcuidx);
}
EXPORT_SYMBOL_GPL(pm_print_active_wakeup_sources);
@@ -985,8 +989,9 @@ void pm_wakep_autosleep_enabled(bool set)
{
struct wakeup_source *ws;
ktime_t now = ktime_get();
+ int srcuidx;
- rcu_read_lock();
+ srcuidx = srcu_read_lock(&wakeup_srcu);
list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
spin_lock_irq(&ws->lock);
if (ws->autosleep_enabled != set) {
@@ -1000,7 +1005,7 @@ void pm_wakep_autosleep_enabled(bool set)
}
spin_unlock_irq(&ws->lock);
}
- rcu_read_unlock();
+ srcu_read_unlock(&wakeup_srcu, srcuidx);
}
#endif /* CONFIG_PM_AUTOSLEEP */
@@ -1061,15 +1066,16 @@ static int print_wakeup_source_stats(struct seq_file *m,
static int wakeup_sources_stats_show(struct seq_file *m, void *unused)
{
struct wakeup_source *ws;
+ int srcuidx;
seq_puts(m, "name\t\t\t\t\tactive_count\tevent_count\twakeup_count\t"
"expire_count\tactive_since\ttotal_time\tmax_time\t"
"last_change\tprevent_suspend_time\n");
- rcu_read_lock();
+ srcuidx = srcu_read_lock(&wakeup_srcu);
list_for_each_entry_rcu(ws, &wakeup_sources, entry)
print_wakeup_source_stats(m, ws);
- rcu_read_unlock();
+ srcu_read_unlock(&wakeup_srcu, srcuidx);
print_wakeup_source_stats(m, &deleted_ws);
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index 41fb1a917b17..33e23a7a691f 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -595,8 +595,6 @@ int xen_blkif_schedule(void *arg)
unsigned long timeout;
int ret;
- xen_blkif_get(blkif);
-
while (!kthread_should_stop()) {
if (try_to_freeze())
continue;
@@ -650,7 +648,6 @@ purge_gnt_list:
print_stats(blkif);
blkif->xenblkd = NULL;
- xen_blkif_put(blkif);
return 0;
}
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index f53cff42f8da..923308201375 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -221,7 +221,6 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
if (blkif->xenblkd) {
kthread_stop(blkif->xenblkd);
wake_up(&blkif->shutdown_wq);
- blkif->xenblkd = NULL;
}
/* The above kthread_stop() guarantees that at this point we
@@ -266,9 +265,10 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
static void xen_blkif_free(struct xen_blkif *blkif)
{
-
- xen_blkif_disconnect(blkif);
+ WARN_ON(xen_blkif_disconnect(blkif));
xen_vbd_free(&blkif->vbd);
+ kfree(blkif->be->mode);
+ kfree(blkif->be);
/* Make sure everything is drained before shutting down */
BUG_ON(blkif->persistent_gnt_c != 0);
@@ -445,8 +445,6 @@ static int xen_blkbk_remove(struct xenbus_device *dev)
xen_blkif_put(be->blkif);
}
- kfree(be->mode);
- kfree(be);
return 0;
}
diff --git a/drivers/bluetooth/btfm_slim.c b/drivers/bluetooth/btfm_slim.c
index 969f755f5dc4..0a61186167ba 100644
--- a/drivers/bluetooth/btfm_slim.c
+++ b/drivers/bluetooth/btfm_slim.c
@@ -155,23 +155,22 @@ int btfm_slim_enable_ch(struct btfmslim *btfmslim, struct btfmslim_ch *ch,
rxport, 1);
if (ret < 0) {
BTFMSLIM_ERR("vendor_port_en failed ret[%d]",
- ret);
+ ret);
goto error;
}
}
if (rxport) {
BTFMSLIM_INFO("slim_connect_sink(port: %d, ch: %d)",
- ch->port, ch->ch);
+ ch->port, ch->ch);
/* Connect Port with channel given by Machine driver*/
ret = slim_connect_sink(btfmslim->slim_pgd,
&ch->port_hdl, 1, ch->ch_hdl);
if (ret < 0) {
BTFMSLIM_ERR("slim_connect_sink failed ret[%d]",
- ret);
+ ret);
goto remove_channel;
}
-
} else {
BTFMSLIM_INFO("slim_connect_src(port: %d, ch: %d)",
ch->port, ch->ch);
@@ -180,7 +179,7 @@ int btfm_slim_enable_ch(struct btfmslim *btfmslim, struct btfmslim_ch *ch,
ch->ch_hdl);
if (ret < 0) {
BTFMSLIM_ERR("slim_connect_src failed ret[%d]",
- ret);
+ ret);
goto remove_channel;
}
}
@@ -190,6 +189,7 @@ int btfm_slim_enable_ch(struct btfmslim *btfmslim, struct btfmslim_ch *ch,
BTFMSLIM_INFO(
"port: %d, ch: %d, grp: %d, ch->grph: 0x%x, ch_hdl: 0x%x",
chan->port, chan->ch, grp, chan->grph, chan->ch_hdl);
+
ret = slim_control_ch(btfmslim->slim_pgd, (grp ? chan->grph :
chan->ch_hdl), SLIM_CH_ACTIVATE, true);
if (ret < 0) {
@@ -220,6 +220,7 @@ int btfm_slim_disable_ch(struct btfmslim *btfmslim, struct btfmslim_ch *ch,
BTFMSLIM_INFO("port:%d, grp: %d, ch->grph:0x%x, ch->ch_hdl:0x%x ",
ch->port, grp, ch->grph, ch->ch_hdl);
+
/* Remove the channel immediately*/
ret = slim_control_ch(btfmslim->slim_pgd, (grp ? ch->grph : ch->ch_hdl),
SLIM_CH_REMOVE, true);
@@ -233,7 +234,6 @@ int btfm_slim_disable_ch(struct btfmslim *btfmslim, struct btfmslim_ch *ch,
goto error;
}
}
-
/* Disable port through registration setting */
for (i = 0; i < nchan; i++, ch++) {
if (btfmslim->vendor_port_en) {
@@ -246,9 +246,11 @@ int btfm_slim_disable_ch(struct btfmslim *btfmslim, struct btfmslim_ch *ch,
}
}
}
+
error:
return ret;
}
+
static int btfm_slim_get_logical_addr(struct slim_device *slim)
{
int ret = 0;
diff --git a/drivers/bluetooth/btfm_slim_codec.c b/drivers/bluetooth/btfm_slim_codec.c
index 96be0e2f9183..035e8d9fb5fd 100644
--- a/drivers/bluetooth/btfm_slim_codec.c
+++ b/drivers/bluetooth/btfm_slim_codec.c
@@ -118,9 +118,6 @@ static void btfm_slim_dai_shutdown(struct snd_pcm_substream *substream,
return;
}
- if (dai->id == BTFM_FM_SLIM_TX)
- goto out;
-
/* Search for dai->id matched port handler */
for (i = 0; (i < BTFM_SLIM_NUM_CODEC_DAIS) &&
(ch->id != BTFM_SLIM_NUM_CODEC_DAIS) &&
@@ -134,7 +131,6 @@ static void btfm_slim_dai_shutdown(struct snd_pcm_substream *substream,
}
btfm_slim_disable_ch(btfmslim, ch, rxport, grp, nchan);
-out:
btfm_slim_hw_deinit(btfmslim);
}
@@ -205,61 +201,6 @@ int btfm_slim_dai_prepare(struct snd_pcm_substream *substream,
return ret;
}
-static int btfm_slim_dai_hw_free(struct snd_pcm_substream *substream,
- struct snd_soc_dai *dai)
-{
- int ret = -EINVAL, i;
- struct btfmslim *btfmslim = dai->dev->platform_data;
- struct btfmslim_ch *ch;
- uint8_t rxport, grp = false, nchan = 1;
-
- BTFMSLIM_DBG("dai->name: %s, dai->id: %d, dai->rate: %d", dai->name,
- dai->id, dai->rate);
-
- switch (dai->id) {
- case BTFM_FM_SLIM_TX:
- grp = true; nchan = 2;
- ch = btfmslim->tx_chs;
- rxport = 0;
- break;
- case BTFM_BT_SCO_SLIM_TX:
- ch = btfmslim->tx_chs;
- rxport = 0;
- break;
- case BTFM_BT_SCO_A2DP_SLIM_RX:
- case BTFM_BT_SPLIT_A2DP_SLIM_RX:
- ch = btfmslim->rx_chs;
- rxport = 1;
- break;
- case BTFM_SLIM_NUM_CODEC_DAIS:
- default:
- BTFMSLIM_ERR("dai->id is invalid:%d", dai->id);
- goto out;
- }
-
- if (dai->id != BTFM_FM_SLIM_TX) {
- ret = 0;
- goto out;
- }
-
- /* Search for dai->id matched port handler */
- for (i = 0; (i < BTFM_SLIM_NUM_CODEC_DAIS) &&
- (ch->id != BTFM_SLIM_NUM_CODEC_DAIS) &&
- (ch->id != dai->id); ch++, i++)
- ;
-
- if ((ch->port == BTFM_SLIM_PGD_PORT_LAST) ||
- (ch->id == BTFM_SLIM_NUM_CODEC_DAIS)) {
- BTFMSLIM_ERR("ch is invalid!!");
- goto out;
- }
-
- btfm_slim_disable_ch(btfmslim, ch, rxport, grp, nchan);
-
-out:
- return ret;
-}
-
/* This function will be called once during boot up */
static int btfm_slim_dai_set_channel_map(struct snd_soc_dai *dai,
unsigned int tx_num, unsigned int *tx_slot,
@@ -402,7 +343,6 @@ static struct snd_soc_dai_ops btfmslim_dai_ops = {
.shutdown = btfm_slim_dai_shutdown,
.hw_params = btfm_slim_dai_hw_params,
.prepare = btfm_slim_dai_prepare,
- .hw_free = btfm_slim_dai_hw_free,
.set_channel_map = btfm_slim_dai_set_channel_map,
.get_channel_map = btfm_slim_dai_get_channel_map,
};
diff --git a/drivers/bluetooth/btfm_slim_wcn3990.c b/drivers/bluetooth/btfm_slim_wcn3990.c
index 0c4e0b3d5c2e..363b4692d228 100644
--- a/drivers/bluetooth/btfm_slim_wcn3990.c
+++ b/drivers/bluetooth/btfm_slim_wcn3990.c
@@ -82,11 +82,12 @@ int btfm_slim_chrk_enable_port(struct btfmslim *btfmslim, uint8_t port_num,
uint8_t rxport, uint8_t enable)
{
int ret = 0;
- uint8_t reg_val = 0;
+ uint8_t reg_val = 0, en;
uint8_t port_bit = 0;
uint16_t reg;
BTFMSLIM_DBG("port(%d) enable(%d)", port_num, enable);
+
if (rxport) {
if (enable) {
/* For SCO Rx, A2DP Rx */
@@ -117,7 +118,8 @@ int btfm_slim_chrk_enable_port(struct btfmslim *btfmslim, uint8_t port_num,
reg = CHRK_SB_PGD_TX_PORTn_MULTI_CHNL_0(port_num);
ret = btfm_slim_write(btfmslim, reg, 1, &reg_val, IFD);
if (ret) {
- BTFMSLIM_ERR("failed to write (%d) reg 0x%x", ret, reg);
+ BTFMSLIM_ERR("failed to write (%d) reg 0x%x",
+ ret, reg);
goto error;
}
}
@@ -137,15 +139,15 @@ enable_disable_txport:
reg = CHRK_SB_PGD_PORT_TX_CFGN(port_num);
enable_disable_rxport:
- if (enable) {
- if (is_fm_port(port_num))
- reg_val = CHRK_SB_PGD_PORT_ENABLE |
- CHRK_SB_PGD_PORT_WM_L3;
- else
- reg_val = CHRK_SB_PGD_PORT_ENABLE |
- CHRK_SB_PGD_PORT_WM_LB;
- } else
- reg_val = CHRK_SB_PGD_PORT_DISABLE;
+ if (enable)
+ en = CHRK_SB_PGD_PORT_ENABLE;
+ else
+ en = CHRK_SB_PGD_PORT_DISABLE;
+
+ if (is_fm_port(port_num))
+ reg_val = en | CHRK_SB_PGD_PORT_WM_L8;
+ else
+ reg_val = enable ? en | CHRK_SB_PGD_PORT_WM_LB : en;
ret = btfm_slim_write(btfmslim, reg, 1, &reg_val, IFD);
if (ret)
diff --git a/drivers/bluetooth/btfm_slim_wcn3990.h b/drivers/bluetooth/btfm_slim_wcn3990.h
index f6a260096c91..b637ac581201 100644
--- a/drivers/bluetooth/btfm_slim_wcn3990.h
+++ b/drivers/bluetooth/btfm_slim_wcn3990.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -68,6 +68,7 @@
#define CHRK_SB_PGD_PORT_WM_L1 (0x1 << 1)
#define CHRK_SB_PGD_PORT_WM_L2 (0x2 << 1)
#define CHRK_SB_PGD_PORT_WM_L3 (0x3 << 1)
+#define CHRK_SB_PGD_PORT_WM_L8 (0x8 << 1)
#define CHRK_SB_PGD_PORT_WM_LB (0xB << 1)
#define CHRK_SB_PGD_PORT_RX_NUM 16
diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c
index 781fa96726e2..aba9010c5aa2 100644
--- a/drivers/char/adsprpc.c
+++ b/drivers/char/adsprpc.c
@@ -58,6 +58,7 @@
#define FASTRPC_ENOSUCH 39
#define VMID_SSC_Q6 5
#define VMID_ADSP_Q6 6
+#define AC_VM_ADSP_HEAP_SHARED 33
#define DEBUGFS_SIZE 1024
#define RPC_TIMEOUT (5 * HZ)
@@ -222,6 +223,7 @@ struct fastrpc_channel_ctx {
int prevssrcount;
int issubsystemup;
int vmid;
+ int heap_vmid;
int ramdumpenabled;
void *remoteheap_ramdump_dev;
struct fastrpc_glink_info link;
@@ -1594,7 +1596,7 @@ static int fastrpc_init_process(struct fastrpc_file *fl,
struct fastrpc_mmap *file = 0, *mem = 0;
char *proc_name = NULL;
int srcVM[1] = {VMID_HLOS};
- int destVM[1] = {VMID_ADSP_Q6};
+ int destVM[1] = {gcinfo[0].heap_vmid};
int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
int hlosVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
@@ -1853,7 +1855,7 @@ static int fastrpc_mmap_on_dsp(struct fastrpc_file *fl, uint32_t flags,
} else if (flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
int srcVM[1] = {VMID_HLOS};
- int destVM[1] = {VMID_ADSP_Q6};
+ int destVM[1] = {gcinfo[0].heap_vmid};
int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
VERIFY(err, !hyp_assign_phys(map->phys, (uint64_t)map->size,
@@ -1869,7 +1871,7 @@ static int fastrpc_munmap_on_dsp_rh(struct fastrpc_file *fl,
struct fastrpc_mmap *map)
{
int err = 0;
- int srcVM[1] = {VMID_ADSP_Q6};
+ int srcVM[1] = {gcinfo[0].heap_vmid};
int destVM[1] = {VMID_HLOS};
int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
@@ -2400,16 +2402,16 @@ static ssize_t fastrpc_debugfs_read(struct file *filp, char __user *buffer,
spin_lock(&fl->hlock);
hlist_for_each_entry_safe(buf, n, &fl->bufs, hn) {
len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
- "%s %p %s %p %s %llx\n", "buf:",
- buf, "buf->virt:", buf->virt,
- "buf->phys:", buf->phys);
+ "%s %pK %s %pK %s %llx\n", "buf:",
+ buf, "buf->virt:", buf->virt,
+ "buf->phys:", buf->phys);
}
len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
"\n%s\n",
"LIST OF MAPS:");
hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
- "%s %p %s %lx %s %llx\n",
+ "%s %pK %s %lx %s %llx\n",
"map:", map,
"map->va:", map->va,
"map->phys:", map->phys);
@@ -2419,7 +2421,7 @@ static ssize_t fastrpc_debugfs_read(struct file *filp, char __user *buffer,
"LIST OF PENDING SMQCONTEXTS:");
hlist_for_each_entry_safe(ictx, n, &fl->clst.pending, hn) {
len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
- "%s %p %s %u %s %u %s %u\n",
+ "%s %pK %s %u %s %u %s %u\n",
"smqcontext:", ictx,
"sc:", ictx->sc,
"tid:", ictx->pid,
@@ -2430,7 +2432,7 @@ static ssize_t fastrpc_debugfs_read(struct file *filp, char __user *buffer,
"LIST OF INTERRUPTED SMQCONTEXTS:");
hlist_for_each_entry_safe(ictx, n, &fl->clst.interrupted, hn) {
len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
- "%s %p %s %u %s %u %s %u\n",
+ "%s %pK %s %u %s %u %s %u\n",
"smqcontext:", ictx,
"sc:", ictx->sc,
"tid:", ictx->pid,
@@ -2500,6 +2502,9 @@ static int fastrpc_channel_open(struct fastrpc_file *fl)
kref_init(&me->channel[cid].kref);
pr_info("'opened /dev/%s c %d %d'\n", gcinfo[cid].name,
MAJOR(me->dev_no), cid);
+ err = glink_queue_rx_intent(me->channel[cid].chan, NULL, 64);
+ if (err)
+ pr_info("adsprpc: initial intent failed for %d\n", cid);
if (cid == 0 && me->channel[cid].ssrcount !=
me->channel[cid].prevssrcount) {
if (fastrpc_mmap_remove_ssr(fl))
@@ -2695,6 +2700,10 @@ static long fastrpc_device_ioctl(struct file *file, unsigned int ioctl_num,
VERIFY(err, 0 == copy_from_user(&p.init, param, size));
if (err)
goto bail;
+ VERIFY(err, p.init.init.filelen >= 0 &&
+ p.init.init.memlen >= 0);
+ if (err)
+ goto bail;
VERIFY(err, 0 == fastrpc_init_process(fl, &p.init));
if (err)
goto bail;
@@ -2840,6 +2849,7 @@ static int fastrpc_cb_probe(struct device *dev)
chan->sesscount++;
debugfs_global_file = debugfs_create_file("global", 0644, debugfs_root,
NULL, &debugfs_fops);
+
bail:
return err;
}
@@ -2953,6 +2963,12 @@ static int fastrpc_probe(struct platform_device *pdev)
}
return 0;
}
+ if (of_property_read_bool(dev->of_node,
+ "qcom,fastrpc-vmid-heap-shared"))
+ gcinfo[0].heap_vmid = AC_VM_ADSP_HEAP_SHARED;
+ else
+ gcinfo[0].heap_vmid = VMID_ADSP_Q6;
+ pr_info("ADSPRPC: gcinfo[0].heap_vmid %d\n", gcinfo[0].heap_vmid);
me->glink = of_property_read_bool(dev->of_node, "qcom,fastrpc-glink");
VERIFY(err, !of_platform_populate(pdev->dev.of_node,
fastrpc_match_table,
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index a084a4751fa9..25372dc381d4 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -3877,6 +3877,9 @@ static void smi_recv_tasklet(unsigned long val)
* because the lower layer is allowed to hold locks while calling
* message delivery.
*/
+
+ rcu_read_lock();
+
if (!run_to_completion)
spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
if (intf->curr_msg == NULL && !intf->in_shutdown) {
@@ -3899,6 +3902,8 @@ static void smi_recv_tasklet(unsigned long val)
if (newmsg)
intf->handlers->sender(intf->send_info, newmsg);
+ rcu_read_unlock();
+
handle_new_recv_msgs(intf);
}
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index 0d83cfb9708f..f53e8ba2c718 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -758,6 +758,11 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
result, len, data[2]);
} else if (data[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2
|| data[1] != IPMI_GET_MSG_FLAGS_CMD) {
+ /*
+ * Don't abort here, maybe it was a queued
+ * response to a previous command.
+ */
+ ipmi_ssif_unlock_cond(ssif_info, flags);
pr_warn(PFX "Invalid response getting flags: %x %x\n",
data[0], data[1]);
} else {
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index 096f0cef4da1..40d400fe5bb7 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -1162,10 +1162,11 @@ static int wdog_reboot_handler(struct notifier_block *this,
ipmi_watchdog_state = WDOG_TIMEOUT_NONE;
ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB);
} else if (ipmi_watchdog_state != WDOG_TIMEOUT_NONE) {
- /* Set a long timer to let the reboot happens, but
- reboot if it hangs, but only if the watchdog
+ /* Set a long timer to let the reboot happen or
+ reset if it hangs, but only if the watchdog
timer was already running. */
- timeout = 120;
+ if (timeout < 120)
+ timeout = 120;
pretimeout = 0;
ipmi_watchdog_state = WDOG_TIMEOUT_RESET;
ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB);
diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
index 252142524ff2..a0d9ac6b6cc9 100644
--- a/drivers/char/tpm/tpm-chip.c
+++ b/drivers/char/tpm/tpm-chip.c
@@ -29,33 +29,92 @@
#include "tpm.h"
#include "tpm_eventlog.h"
-static DECLARE_BITMAP(dev_mask, TPM_NUM_DEVICES);
-static LIST_HEAD(tpm_chip_list);
-static DEFINE_SPINLOCK(driver_lock);
+DEFINE_IDR(dev_nums_idr);
+static DEFINE_MUTEX(idr_lock);
struct class *tpm_class;
dev_t tpm_devt;
-/*
- * tpm_chip_find_get - return tpm_chip for a given chip number
- * @chip_num the device number for the chip
+/**
+ * tpm_try_get_ops() - Get a ref to the tpm_chip
+ * @chip: Chip to ref
+ *
+ * The caller must already have some kind of locking to ensure that chip is
+ * valid. This function will lock the chip so that the ops member can be
+ * accessed safely. The locking prevents tpm_chip_unregister from
+ * completing, so it should not be held for long periods.
+ *
+ * Returns -ERRNO if the chip could not be got.
*/
-struct tpm_chip *tpm_chip_find_get(int chip_num)
+int tpm_try_get_ops(struct tpm_chip *chip)
{
- struct tpm_chip *pos, *chip = NULL;
+ int rc = -EIO;
- rcu_read_lock();
- list_for_each_entry_rcu(pos, &tpm_chip_list, list) {
- if (chip_num != TPM_ANY_NUM && chip_num != pos->dev_num)
- continue;
+ get_device(&chip->dev);
- if (try_module_get(pos->pdev->driver->owner)) {
- chip = pos;
- break;
- }
+ down_read(&chip->ops_sem);
+ if (!chip->ops)
+ goto out_lock;
+
+ if (!try_module_get(chip->dev.parent->driver->owner))
+ goto out_lock;
+
+ return 0;
+out_lock:
+ up_read(&chip->ops_sem);
+ put_device(&chip->dev);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(tpm_try_get_ops);
+
+/**
+ * tpm_put_ops() - Release a ref to the tpm_chip
+ * @chip: Chip to put
+ *
+ * This is the opposite pair to tpm_try_get_ops(). After this returns chip may
+ * be kfree'd.
+ */
+void tpm_put_ops(struct tpm_chip *chip)
+{
+ module_put(chip->dev.parent->driver->owner);
+ up_read(&chip->ops_sem);
+ put_device(&chip->dev);
+}
+EXPORT_SYMBOL_GPL(tpm_put_ops);
+
+/**
+ * tpm_chip_find_get() - return tpm_chip for a given chip number
+ * @chip_num: id to find
+ *
+ * The return'd chip has been tpm_try_get_ops'd and must be released via
+ * tpm_put_ops
+ */
+struct tpm_chip *tpm_chip_find_get(int chip_num)
+{
+ struct tpm_chip *chip, *res = NULL;
+ int chip_prev;
+
+ mutex_lock(&idr_lock);
+
+ if (chip_num == TPM_ANY_NUM) {
+ chip_num = 0;
+ do {
+ chip_prev = chip_num;
+ chip = idr_get_next(&dev_nums_idr, &chip_num);
+ if (chip && !tpm_try_get_ops(chip)) {
+ res = chip;
+ break;
+ }
+ } while (chip_prev != chip_num);
+ } else {
+ chip = idr_find_slowpath(&dev_nums_idr, chip_num);
+ if (chip && !tpm_try_get_ops(chip))
+ res = chip;
}
- rcu_read_unlock();
- return chip;
+
+ mutex_unlock(&idr_lock);
+
+ return res;
}
/**
@@ -68,12 +127,48 @@ static void tpm_dev_release(struct device *dev)
{
struct tpm_chip *chip = container_of(dev, struct tpm_chip, dev);
- spin_lock(&driver_lock);
- clear_bit(chip->dev_num, dev_mask);
- spin_unlock(&driver_lock);
+ mutex_lock(&idr_lock);
+ idr_remove(&dev_nums_idr, chip->dev_num);
+ mutex_unlock(&idr_lock);
+
kfree(chip);
}
+
+/**
+ * tpm_class_shutdown() - prepare the TPM device for loss of power.
+ * @dev: device to which the chip is associated.
+ *
+ * Issues a TPM2_Shutdown command prior to loss of power, as required by the
+ * TPM 2.0 spec.
+ * Then, calls bus- and device- specific shutdown code.
+ *
+ * XXX: This codepath relies on the fact that sysfs is not enabled for
+ * TPM2: sysfs uses an implicit lock on chip->ops, so this could race if TPM2
+ * has sysfs support enabled before TPM sysfs's implicit locking is fixed.
+ */
+static int tpm_class_shutdown(struct device *dev)
+{
+ struct tpm_chip *chip = container_of(dev, struct tpm_chip, dev);
+
+ if (chip->flags & TPM_CHIP_FLAG_TPM2) {
+ down_write(&chip->ops_sem);
+ tpm2_shutdown(chip, TPM2_SU_CLEAR);
+ chip->ops = NULL;
+ up_write(&chip->ops_sem);
+ }
+ /* Allow bus- and device-specific code to run. Note: since chip->ops
+ * is NULL, more-specific shutdown code will not be able to issue TPM
+ * commands.
+ */
+ if (dev->bus && dev->bus->shutdown)
+ dev->bus->shutdown(dev);
+ else if (dev->driver && dev->driver->shutdown)
+ dev->driver->shutdown(dev);
+ return 0;
+}
+
+
/**
* tpmm_chip_alloc() - allocate a new struct tpm_chip instance
* @dev: device to which the chip is associated
@@ -88,37 +183,35 @@ struct tpm_chip *tpmm_chip_alloc(struct device *dev,
const struct tpm_class_ops *ops)
{
struct tpm_chip *chip;
+ int rc;
chip = kzalloc(sizeof(*chip), GFP_KERNEL);
if (chip == NULL)
return ERR_PTR(-ENOMEM);
mutex_init(&chip->tpm_mutex);
- INIT_LIST_HEAD(&chip->list);
+ init_rwsem(&chip->ops_sem);
chip->ops = ops;
- spin_lock(&driver_lock);
- chip->dev_num = find_first_zero_bit(dev_mask, TPM_NUM_DEVICES);
- spin_unlock(&driver_lock);
-
- if (chip->dev_num >= TPM_NUM_DEVICES) {
+ mutex_lock(&idr_lock);
+ rc = idr_alloc(&dev_nums_idr, NULL, 0, TPM_NUM_DEVICES, GFP_KERNEL);
+ mutex_unlock(&idr_lock);
+ if (rc < 0) {
dev_err(dev, "No available tpm device numbers\n");
kfree(chip);
- return ERR_PTR(-ENOMEM);
+ return ERR_PTR(rc);
}
-
- set_bit(chip->dev_num, dev_mask);
+ chip->dev_num = rc;
scnprintf(chip->devname, sizeof(chip->devname), "tpm%d", chip->dev_num);
- chip->pdev = dev;
-
dev_set_drvdata(dev, chip);
chip->dev.class = tpm_class;
+ chip->dev.class->shutdown = tpm_class_shutdown;
chip->dev.release = tpm_dev_release;
- chip->dev.parent = chip->pdev;
+ chip->dev.parent = dev;
#ifdef CONFIG_ACPI
chip->dev.groups = chip->groups;
#endif
@@ -133,7 +226,7 @@ struct tpm_chip *tpmm_chip_alloc(struct device *dev,
device_initialize(&chip->dev);
cdev_init(&chip->cdev, &tpm_fops);
- chip->cdev.owner = chip->pdev->driver->owner;
+ chip->cdev.owner = dev->driver->owner;
chip->cdev.kobj.parent = &chip->dev.kobj;
devm_add_action(dev, (void (*)(void *)) put_device, &chip->dev);
@@ -167,6 +260,11 @@ static int tpm_add_char_device(struct tpm_chip *chip)
return rc;
}
+ /* Make the chip available. */
+ mutex_lock(&idr_lock);
+ idr_replace(&dev_nums_idr, chip, chip->dev_num);
+ mutex_unlock(&idr_lock);
+
return rc;
}
@@ -174,6 +272,16 @@ static void tpm_del_char_device(struct tpm_chip *chip)
{
cdev_del(&chip->cdev);
device_del(&chip->dev);
+
+ /* Make the chip unavailable. */
+ mutex_lock(&idr_lock);
+ idr_replace(&dev_nums_idr, NULL, chip->dev_num);
+ mutex_unlock(&idr_lock);
+
+ /* Make the driver uncallable. */
+ down_write(&chip->ops_sem);
+ chip->ops = NULL;
+ up_write(&chip->ops_sem);
}
static int tpm1_chip_register(struct tpm_chip *chip)
@@ -228,17 +336,11 @@ int tpm_chip_register(struct tpm_chip *chip)
if (rc)
goto out_err;
- /* Make the chip available. */
- spin_lock(&driver_lock);
- list_add_tail_rcu(&chip->list, &tpm_chip_list);
- spin_unlock(&driver_lock);
-
chip->flags |= TPM_CHIP_FLAG_REGISTERED;
if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) {
- rc = __compat_only_sysfs_link_entry_to_kobj(&chip->pdev->kobj,
- &chip->dev.kobj,
- "ppi");
+ rc = __compat_only_sysfs_link_entry_to_kobj(
+ &chip->dev.parent->kobj, &chip->dev.kobj, "ppi");
if (rc && rc != -ENOENT) {
tpm_chip_unregister(chip);
return rc;
@@ -259,6 +361,9 @@ EXPORT_SYMBOL_GPL(tpm_chip_register);
* Takes the chip first away from the list of available TPM chips and then
* cleans up all the resources reserved by tpm_chip_register().
*
+ * Once this function returns the driver call backs in 'op's will not be
+ * running and will no longer start.
+ *
* NOTE: This function should be only called before deinitializing chip
* resources.
*/
@@ -267,13 +372,8 @@ void tpm_chip_unregister(struct tpm_chip *chip)
if (!(chip->flags & TPM_CHIP_FLAG_REGISTERED))
return;
- spin_lock(&driver_lock);
- list_del_rcu(&chip->list);
- spin_unlock(&driver_lock);
- synchronize_rcu();
-
if (!(chip->flags & TPM_CHIP_FLAG_TPM2))
- sysfs_remove_link(&chip->pdev->kobj, "ppi");
+ sysfs_remove_link(&chip->dev.parent->kobj, "ppi");
tpm1_chip_unregister(chip);
tpm_del_char_device(chip);
diff --git a/drivers/char/tpm/tpm-dev.c b/drivers/char/tpm/tpm-dev.c
index 4f3137d9a35e..912ad30be585 100644
--- a/drivers/char/tpm/tpm-dev.c
+++ b/drivers/char/tpm/tpm-dev.c
@@ -61,7 +61,7 @@ static int tpm_open(struct inode *inode, struct file *file)
* by the check of is_open variable, which is protected
* by driver_lock. */
if (test_and_set_bit(0, &chip->is_open)) {
- dev_dbg(chip->pdev, "Another process owns this TPM\n");
+ dev_dbg(&chip->dev, "Another process owns this TPM\n");
return -EBUSY;
}
@@ -79,7 +79,6 @@ static int tpm_open(struct inode *inode, struct file *file)
INIT_WORK(&priv->work, timeout_work);
file->private_data = priv;
- get_device(chip->pdev);
return 0;
}
@@ -137,9 +136,18 @@ static ssize_t tpm_write(struct file *file, const char __user *buf,
return -EFAULT;
}
- /* atomic tpm command send and result receive */
+ /* atomic tpm command send and result receive. We only hold the ops
+ * lock during this period so that the tpm can be unregistered even if
+ * the char dev is held open.
+ */
+ if (tpm_try_get_ops(priv->chip)) {
+ mutex_unlock(&priv->buffer_mutex);
+ return -EPIPE;
+ }
out_size = tpm_transmit(priv->chip, priv->data_buffer,
sizeof(priv->data_buffer), 0);
+
+ tpm_put_ops(priv->chip);
if (out_size < 0) {
mutex_unlock(&priv->buffer_mutex);
return out_size;
@@ -166,7 +174,6 @@ static int tpm_release(struct inode *inode, struct file *file)
file->private_data = NULL;
atomic_set(&priv->data_pending, 0);
clear_bit(0, &priv->chip->is_open);
- put_device(priv->chip->pdev);
kfree(priv);
return 0;
}
diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
index 17abe52e6365..aaa5fa95dede 100644
--- a/drivers/char/tpm/tpm-interface.c
+++ b/drivers/char/tpm/tpm-interface.c
@@ -343,7 +343,7 @@ ssize_t tpm_transmit(struct tpm_chip *chip, const u8 *buf, size_t bufsiz,
if (count == 0)
return -ENODATA;
if (count > bufsiz) {
- dev_err(chip->pdev,
+ dev_err(&chip->dev,
"invalid count value %x %zx\n", count, bufsiz);
return -E2BIG;
}
@@ -353,7 +353,7 @@ ssize_t tpm_transmit(struct tpm_chip *chip, const u8 *buf, size_t bufsiz,
rc = chip->ops->send(chip, (u8 *) buf, count);
if (rc < 0) {
- dev_err(chip->pdev,
+ dev_err(&chip->dev,
"tpm_transmit: tpm_send: error %zd\n", rc);
goto out;
}
@@ -372,7 +372,7 @@ ssize_t tpm_transmit(struct tpm_chip *chip, const u8 *buf, size_t bufsiz,
goto out_recv;
if (chip->ops->req_canceled(chip, status)) {
- dev_err(chip->pdev, "Operation Canceled\n");
+ dev_err(&chip->dev, "Operation Canceled\n");
rc = -ECANCELED;
goto out;
}
@@ -382,14 +382,14 @@ ssize_t tpm_transmit(struct tpm_chip *chip, const u8 *buf, size_t bufsiz,
} while (time_before(jiffies, stop));
chip->ops->cancel(chip);
- dev_err(chip->pdev, "Operation Timed out\n");
+ dev_err(&chip->dev, "Operation Timed out\n");
rc = -ETIME;
goto out;
out_recv:
rc = chip->ops->recv(chip, (u8 *) buf, bufsiz);
if (rc < 0)
- dev_err(chip->pdev,
+ dev_err(&chip->dev,
"tpm_transmit: tpm_recv: error %zd\n", rc);
out:
if (!(flags & TPM_TRANSMIT_UNLOCKED))
@@ -416,7 +416,7 @@ ssize_t tpm_transmit_cmd(struct tpm_chip *chip, const void *cmd,
err = be32_to_cpu(header->return_code);
if (err != 0 && desc)
- dev_err(chip->pdev, "A TPM error (%d) occurred %s\n", err,
+ dev_err(&chip->dev, "A TPM error (%d) occurred %s\n", err,
desc);
return err;
@@ -514,7 +514,7 @@ int tpm_get_timeouts(struct tpm_chip *chip)
if (rc == TPM_ERR_INVALID_POSTINIT) {
/* The TPM is not started, we are the first to talk to it.
Execute a startup command. */
- dev_info(chip->pdev, "Issuing TPM_STARTUP");
+ dev_info(&chip->dev, "Issuing TPM_STARTUP");
if (tpm_startup(chip, TPM_ST_CLEAR))
return rc;
@@ -526,7 +526,7 @@ int tpm_get_timeouts(struct tpm_chip *chip)
0, NULL);
}
if (rc) {
- dev_err(chip->pdev,
+ dev_err(&chip->dev,
"A TPM error (%zd) occurred attempting to determine the timeouts\n",
rc);
goto duration;
@@ -565,7 +565,7 @@ int tpm_get_timeouts(struct tpm_chip *chip)
/* Report adjusted timeouts */
if (chip->vendor.timeout_adjusted) {
- dev_info(chip->pdev,
+ dev_info(&chip->dev,
HW_ERR "Adjusting reported timeouts: A %lu->%luus B %lu->%luus C %lu->%luus D %lu->%luus\n",
old_timeout[0], new_timeout[0],
old_timeout[1], new_timeout[1],
@@ -612,7 +612,7 @@ duration:
chip->vendor.duration[TPM_MEDIUM] *= 1000;
chip->vendor.duration[TPM_LONG] *= 1000;
chip->vendor.duration_adjusted = true;
- dev_info(chip->pdev, "Adjusting TPM timeout parameters.");
+ dev_info(&chip->dev, "Adjusting TPM timeout parameters.");
}
return 0;
}
@@ -687,7 +687,7 @@ int tpm_is_tpm2(u32 chip_num)
rc = (chip->flags & TPM_CHIP_FLAG_TPM2) != 0;
- tpm_chip_put(chip);
+ tpm_put_ops(chip);
return rc;
}
@@ -716,7 +716,7 @@ int tpm_pcr_read(u32 chip_num, int pcr_idx, u8 *res_buf)
rc = tpm2_pcr_read(chip, pcr_idx, res_buf);
else
rc = tpm_pcr_read_dev(chip, pcr_idx, res_buf);
- tpm_chip_put(chip);
+ tpm_put_ops(chip);
return rc;
}
EXPORT_SYMBOL_GPL(tpm_pcr_read);
@@ -751,7 +751,7 @@ int tpm_pcr_extend(u32 chip_num, int pcr_idx, const u8 *hash)
if (chip->flags & TPM_CHIP_FLAG_TPM2) {
rc = tpm2_pcr_extend(chip, pcr_idx, hash);
- tpm_chip_put(chip);
+ tpm_put_ops(chip);
return rc;
}
@@ -761,7 +761,7 @@ int tpm_pcr_extend(u32 chip_num, int pcr_idx, const u8 *hash)
rc = tpm_transmit_cmd(chip, &cmd, EXTEND_PCR_RESULT_SIZE, 0,
"attempting extend a PCR value");
- tpm_chip_put(chip);
+ tpm_put_ops(chip);
return rc;
}
EXPORT_SYMBOL_GPL(tpm_pcr_extend);
@@ -802,7 +802,9 @@ int tpm_do_selftest(struct tpm_chip *chip)
* around 300ms while the self test is ongoing, keep trying
* until the self test duration expires. */
if (rc == -ETIME) {
- dev_info(chip->pdev, HW_ERR "TPM command timed out during continue self test");
+ dev_info(
+ &chip->dev, HW_ERR
+ "TPM command timed out during continue self test");
msleep(delay_msec);
continue;
}
@@ -812,7 +814,7 @@ int tpm_do_selftest(struct tpm_chip *chip)
rc = be32_to_cpu(cmd.header.out.return_code);
if (rc == TPM_ERR_DISABLED || rc == TPM_ERR_DEACTIVATED) {
- dev_info(chip->pdev,
+ dev_info(&chip->dev,
"TPM is disabled/deactivated (0x%X)\n", rc);
/* TPM is disabled and/or deactivated; driver can
* proceed and TPM does handle commands for
@@ -840,7 +842,7 @@ int tpm_send(u32 chip_num, void *cmd, size_t buflen)
rc = tpm_transmit_cmd(chip, cmd, buflen, 0, "attempting tpm_cmd");
- tpm_chip_put(chip);
+ tpm_put_ops(chip);
return rc;
}
EXPORT_SYMBOL_GPL(tpm_send);
@@ -966,10 +968,10 @@ int tpm_pm_suspend(struct device *dev)
}
if (rc)
- dev_err(chip->pdev,
+ dev_err(&chip->dev,
"Error (%d) sending savestate before suspend\n", rc);
else if (try > 0)
- dev_warn(chip->pdev, "TPM savestate took %dms\n",
+ dev_warn(&chip->dev, "TPM savestate took %dms\n",
try * TPM_TIMEOUT_RETRY);
return rc;
@@ -1023,7 +1025,7 @@ int tpm_get_random(u32 chip_num, u8 *out, size_t max)
if (chip->flags & TPM_CHIP_FLAG_TPM2) {
err = tpm2_get_random(chip, out, max);
- tpm_chip_put(chip);
+ tpm_put_ops(chip);
return err;
}
@@ -1045,7 +1047,7 @@ int tpm_get_random(u32 chip_num, u8 *out, size_t max)
num_bytes -= recd;
} while (retries-- && total < max);
- tpm_chip_put(chip);
+ tpm_put_ops(chip);
return total ? total : -EIO;
}
EXPORT_SYMBOL_GPL(tpm_get_random);
@@ -1071,7 +1073,7 @@ int tpm_seal_trusted(u32 chip_num, struct trusted_key_payload *payload,
rc = tpm2_seal_trusted(chip, payload, options);
- tpm_chip_put(chip);
+ tpm_put_ops(chip);
return rc;
}
EXPORT_SYMBOL_GPL(tpm_seal_trusted);
@@ -1097,7 +1099,8 @@ int tpm_unseal_trusted(u32 chip_num, struct trusted_key_payload *payload,
rc = tpm2_unseal_trusted(chip, payload, options);
- tpm_chip_put(chip);
+ tpm_put_ops(chip);
+
return rc;
}
EXPORT_SYMBOL_GPL(tpm_unseal_trusted);
@@ -1124,6 +1127,7 @@ static int __init tpm_init(void)
static void __exit tpm_exit(void)
{
+ idr_destroy(&dev_nums_idr);
class_destroy(tpm_class);
unregister_chrdev_region(tpm_devt, TPM_NUM_DEVICES);
}
diff --git a/drivers/char/tpm/tpm-sysfs.c b/drivers/char/tpm/tpm-sysfs.c
index f880856aa75e..06ac6e9657d2 100644
--- a/drivers/char/tpm/tpm-sysfs.c
+++ b/drivers/char/tpm/tpm-sysfs.c
@@ -38,6 +38,8 @@ static ssize_t pubek_show(struct device *dev, struct device_attribute *attr,
struct tpm_chip *chip = dev_get_drvdata(dev);
+ memset(&tpm_cmd, 0, sizeof(tpm_cmd));
+
tpm_cmd.header.in = tpm_readpubek_header;
err = tpm_transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE, 0,
"attempting to read the PUBEK");
@@ -284,16 +286,28 @@ static const struct attribute_group tpm_dev_group = {
int tpm_sysfs_add_device(struct tpm_chip *chip)
{
int err;
- err = sysfs_create_group(&chip->pdev->kobj,
+
+ /* XXX: If you wish to remove this restriction, you must first update
+ * tpm_sysfs to explicitly lock chip->ops.
+ */
+ if (chip->flags & TPM_CHIP_FLAG_TPM2)
+ return 0;
+
+ err = sysfs_create_group(&chip->dev.parent->kobj,
&tpm_dev_group);
if (err)
- dev_err(chip->pdev,
+ dev_err(&chip->dev,
"failed to create sysfs attributes, %d\n", err);
return err;
}
void tpm_sysfs_del_device(struct tpm_chip *chip)
{
- sysfs_remove_group(&chip->pdev->kobj, &tpm_dev_group);
+ /* The sysfs routines rely on an implicit tpm_try_get_ops, this
+ * function is called before ops is null'd and the sysfs core
+ * synchronizes this removal so that no callbacks are running or can
+ * run again
+ */
+ sysfs_remove_group(&chip->dev.parent->kobj, &tpm_dev_group);
}
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index 2216861f89f1..772d99b3a8e4 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -34,7 +34,7 @@
enum tpm_const {
TPM_MINOR = 224, /* officially assigned */
TPM_BUFSIZE = 4096,
- TPM_NUM_DEVICES = 256,
+ TPM_NUM_DEVICES = 65536,
TPM_RETRY = 50, /* 5 seconds */
};
@@ -171,11 +171,16 @@ enum tpm_chip_flags {
};
struct tpm_chip {
- struct device *pdev; /* Device stuff */
struct device dev;
struct cdev cdev;
+ /* A driver callback under ops cannot be run unless ops_sem is held
+ * (sometimes implicitly, eg for the sysfs code). ops becomes null
+ * when the driver is unregistered, see tpm_try_get_ops.
+ */
+ struct rw_semaphore ops_sem;
const struct tpm_class_ops *ops;
+
unsigned int flags;
int dev_num; /* /dev/tpm# */
@@ -195,17 +200,10 @@ struct tpm_chip {
acpi_handle acpi_dev_handle;
char ppi_version[TPM_PPI_VERSION_LEN + 1];
#endif /* CONFIG_ACPI */
-
- struct list_head list;
};
#define to_tpm_chip(d) container_of(d, struct tpm_chip, dev)
-static inline void tpm_chip_put(struct tpm_chip *chip)
-{
- module_put(chip->pdev->driver->owner);
-}
-
static inline int tpm_read_index(int base, int index)
{
outb(index, base);
@@ -497,6 +495,7 @@ static inline void tpm_buf_append_u32(struct tpm_buf *buf, const u32 value)
extern struct class *tpm_class;
extern dev_t tpm_devt;
extern const struct file_operations tpm_fops;
+extern struct idr dev_nums_idr;
enum tpm_transmit_flags {
TPM_TRANSMIT_UNLOCKED = BIT(0),
@@ -517,6 +516,9 @@ extern int wait_for_tpm_stat(struct tpm_chip *, u8, unsigned long,
wait_queue_head_t *, bool);
struct tpm_chip *tpm_chip_find_get(int chip_num);
+__must_check int tpm_try_get_ops(struct tpm_chip *chip);
+void tpm_put_ops(struct tpm_chip *chip);
+
extern struct tpm_chip *tpmm_chip_alloc(struct device *dev,
const struct tpm_class_ops *ops);
extern int tpm_chip_register(struct tpm_chip *chip);
diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
index cb7e4f6b70ba..286bd090a488 100644
--- a/drivers/char/tpm/tpm2-cmd.c
+++ b/drivers/char/tpm/tpm2-cmd.c
@@ -570,7 +570,7 @@ static void tpm2_flush_context_cmd(struct tpm_chip *chip, u32 handle,
rc = tpm_buf_init(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_FLUSH_CONTEXT);
if (rc) {
- dev_warn(chip->pdev, "0x%08x was not flushed, out of memory\n",
+ dev_warn(&chip->dev, "0x%08x was not flushed, out of memory\n",
handle);
return;
}
@@ -580,7 +580,7 @@ static void tpm2_flush_context_cmd(struct tpm_chip *chip, u32 handle,
rc = tpm_transmit_cmd(chip, buf.data, PAGE_SIZE, flags,
"flushing context");
if (rc)
- dev_warn(chip->pdev, "0x%08x was not flushed, rc=%d\n", handle,
+ dev_warn(&chip->dev, "0x%08x was not flushed, rc=%d\n", handle,
rc);
tpm_buf_destroy(&buf);
@@ -753,7 +753,7 @@ void tpm2_shutdown(struct tpm_chip *chip, u16 shutdown_type)
* except print the error code on a system failure.
*/
if (rc < 0)
- dev_warn(chip->pdev, "transmit returned %d while stopping the TPM",
+ dev_warn(&chip->dev, "transmit returned %d while stopping the TPM",
rc);
}
EXPORT_SYMBOL_GPL(tpm2_shutdown);
@@ -820,7 +820,7 @@ static int tpm2_start_selftest(struct tpm_chip *chip, bool full)
* immediately. This is a workaround for that.
*/
if (rc == TPM2_RC_TESTING) {
- dev_warn(chip->pdev, "Got RC_TESTING, ignoring\n");
+ dev_warn(&chip->dev, "Got RC_TESTING, ignoring\n");
rc = 0;
}
diff --git a/drivers/char/tpm/tpm_atmel.c b/drivers/char/tpm/tpm_atmel.c
index dfadad0916a1..a48a878f791d 100644
--- a/drivers/char/tpm/tpm_atmel.c
+++ b/drivers/char/tpm/tpm_atmel.c
@@ -49,7 +49,7 @@ static int tpm_atml_recv(struct tpm_chip *chip, u8 *buf, size_t count)
for (i = 0; i < 6; i++) {
status = ioread8(chip->vendor.iobase + 1);
if ((status & ATML_STATUS_DATA_AVAIL) == 0) {
- dev_err(chip->pdev, "error reading header\n");
+ dev_err(&chip->dev, "error reading header\n");
return -EIO;
}
*buf++ = ioread8(chip->vendor.iobase);
@@ -60,12 +60,12 @@ static int tpm_atml_recv(struct tpm_chip *chip, u8 *buf, size_t count)
size = be32_to_cpu(*native_size);
if (count < size) {
- dev_err(chip->pdev,
+ dev_err(&chip->dev,
"Recv size(%d) less than available space\n", size);
for (; i < size; i++) { /* clear the waiting data anyway */
status = ioread8(chip->vendor.iobase + 1);
if ((status & ATML_STATUS_DATA_AVAIL) == 0) {
- dev_err(chip->pdev, "error reading data\n");
+ dev_err(&chip->dev, "error reading data\n");
return -EIO;
}
}
@@ -76,7 +76,7 @@ static int tpm_atml_recv(struct tpm_chip *chip, u8 *buf, size_t count)
for (; i < size; i++) {
status = ioread8(chip->vendor.iobase + 1);
if ((status & ATML_STATUS_DATA_AVAIL) == 0) {
- dev_err(chip->pdev, "error reading data\n");
+ dev_err(&chip->dev, "error reading data\n");
return -EIO;
}
*buf++ = ioread8(chip->vendor.iobase);
@@ -86,7 +86,7 @@ static int tpm_atml_recv(struct tpm_chip *chip, u8 *buf, size_t count)
status = ioread8(chip->vendor.iobase + 1);
if (status & ATML_STATUS_DATA_AVAIL) {
- dev_err(chip->pdev, "data available is stuck\n");
+ dev_err(&chip->dev, "data available is stuck\n");
return -EIO;
}
@@ -97,9 +97,9 @@ static int tpm_atml_send(struct tpm_chip *chip, u8 *buf, size_t count)
{
int i;
- dev_dbg(chip->pdev, "tpm_atml_send:\n");
+ dev_dbg(&chip->dev, "tpm_atml_send:\n");
for (i = 0; i < count; i++) {
- dev_dbg(chip->pdev, "%d 0x%x(%d)\n", i, buf[i], buf[i]);
+ dev_dbg(&chip->dev, "%d 0x%x(%d)\n", i, buf[i], buf[i]);
iowrite8(buf[i], chip->vendor.iobase);
}
diff --git a/drivers/char/tpm/tpm_i2c_atmel.c b/drivers/char/tpm/tpm_i2c_atmel.c
index 8dfb88b9739c..dd8f0eb3170a 100644
--- a/drivers/char/tpm/tpm_i2c_atmel.c
+++ b/drivers/char/tpm/tpm_i2c_atmel.c
@@ -52,7 +52,7 @@ struct priv_data {
static int i2c_atmel_send(struct tpm_chip *chip, u8 *buf, size_t len)
{
struct priv_data *priv = chip->vendor.priv;
- struct i2c_client *client = to_i2c_client(chip->pdev);
+ struct i2c_client *client = to_i2c_client(chip->dev.parent);
s32 status;
priv->len = 0;
@@ -62,7 +62,7 @@ static int i2c_atmel_send(struct tpm_chip *chip, u8 *buf, size_t len)
status = i2c_master_send(client, buf, len);
- dev_dbg(chip->pdev,
+ dev_dbg(&chip->dev,
"%s(buf=%*ph len=%0zx) -> sts=%d\n", __func__,
(int)min_t(size_t, 64, len), buf, len, status);
return status;
@@ -71,7 +71,7 @@ static int i2c_atmel_send(struct tpm_chip *chip, u8 *buf, size_t len)
static int i2c_atmel_recv(struct tpm_chip *chip, u8 *buf, size_t count)
{
struct priv_data *priv = chip->vendor.priv;
- struct i2c_client *client = to_i2c_client(chip->pdev);
+ struct i2c_client *client = to_i2c_client(chip->dev.parent);
struct tpm_output_header *hdr =
(struct tpm_output_header *)priv->buffer;
u32 expected_len;
@@ -88,7 +88,7 @@ static int i2c_atmel_recv(struct tpm_chip *chip, u8 *buf, size_t count)
return -ENOMEM;
if (priv->len >= expected_len) {
- dev_dbg(chip->pdev,
+ dev_dbg(&chip->dev,
"%s early(buf=%*ph count=%0zx) -> ret=%d\n", __func__,
(int)min_t(size_t, 64, expected_len), buf, count,
expected_len);
@@ -97,7 +97,7 @@ static int i2c_atmel_recv(struct tpm_chip *chip, u8 *buf, size_t count)
}
rc = i2c_master_recv(client, buf, expected_len);
- dev_dbg(chip->pdev,
+ dev_dbg(&chip->dev,
"%s reread(buf=%*ph count=%0zx) -> ret=%d\n", __func__,
(int)min_t(size_t, 64, expected_len), buf, count,
expected_len);
@@ -106,13 +106,13 @@ static int i2c_atmel_recv(struct tpm_chip *chip, u8 *buf, size_t count)
static void i2c_atmel_cancel(struct tpm_chip *chip)
{
- dev_err(chip->pdev, "TPM operation cancellation was requested, but is not supported");
+ dev_err(&chip->dev, "TPM operation cancellation was requested, but is not supported");
}
static u8 i2c_atmel_read_status(struct tpm_chip *chip)
{
struct priv_data *priv = chip->vendor.priv;
- struct i2c_client *client = to_i2c_client(chip->pdev);
+ struct i2c_client *client = to_i2c_client(chip->dev.parent);
int rc;
/* The TPM fails the I2C read until it is ready, so we do the entire
@@ -125,7 +125,7 @@ static u8 i2c_atmel_read_status(struct tpm_chip *chip)
/* Once the TPM has completed the command the command remains readable
* until another command is issued. */
rc = i2c_master_recv(client, priv->buffer, sizeof(priv->buffer));
- dev_dbg(chip->pdev,
+ dev_dbg(&chip->dev,
"%s: sts=%d", __func__, rc);
if (rc <= 0)
return 0;
diff --git a/drivers/char/tpm/tpm_i2c_infineon.c b/drivers/char/tpm/tpm_i2c_infineon.c
index 63d5d22e9e60..f2aa99e34b4b 100644
--- a/drivers/char/tpm/tpm_i2c_infineon.c
+++ b/drivers/char/tpm/tpm_i2c_infineon.c
@@ -446,7 +446,7 @@ static int tpm_tis_i2c_recv(struct tpm_chip *chip, u8 *buf, size_t count)
/* read first 10 bytes, including tag, paramsize, and result */
size = recv_data(chip, buf, TPM_HEADER_SIZE);
if (size < TPM_HEADER_SIZE) {
- dev_err(chip->pdev, "Unable to read header\n");
+ dev_err(&chip->dev, "Unable to read header\n");
goto out;
}
@@ -459,14 +459,14 @@ static int tpm_tis_i2c_recv(struct tpm_chip *chip, u8 *buf, size_t count)
size += recv_data(chip, &buf[TPM_HEADER_SIZE],
expected - TPM_HEADER_SIZE);
if (size < expected) {
- dev_err(chip->pdev, "Unable to read remainder of result\n");
+ dev_err(&chip->dev, "Unable to read remainder of result\n");
size = -ETIME;
goto out;
}
wait_for_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c, &status);
if (status & TPM_STS_DATA_AVAIL) { /* retry? */
- dev_err(chip->pdev, "Error left over data\n");
+ dev_err(&chip->dev, "Error left over data\n");
size = -EIO;
goto out;
}
diff --git a/drivers/char/tpm/tpm_i2c_nuvoton.c b/drivers/char/tpm/tpm_i2c_nuvoton.c
index 847f1597fe9b..a1e1474dda30 100644
--- a/drivers/char/tpm/tpm_i2c_nuvoton.c
+++ b/drivers/char/tpm/tpm_i2c_nuvoton.c
@@ -96,13 +96,13 @@ static s32 i2c_nuvoton_write_buf(struct i2c_client *client, u8 offset, u8 size,
/* read TPM_STS register */
static u8 i2c_nuvoton_read_status(struct tpm_chip *chip)
{
- struct i2c_client *client = to_i2c_client(chip->pdev);
+ struct i2c_client *client = to_i2c_client(chip->dev.parent);
s32 status;
u8 data;
status = i2c_nuvoton_read_buf(client, TPM_STS, 1, &data);
if (status <= 0) {
- dev_err(chip->pdev, "%s() error return %d\n", __func__,
+ dev_err(&chip->dev, "%s() error return %d\n", __func__,
status);
data = TPM_STS_ERR_VAL;
}
@@ -127,13 +127,13 @@ static s32 i2c_nuvoton_write_status(struct i2c_client *client, u8 data)
/* write commandReady to TPM_STS register */
static void i2c_nuvoton_ready(struct tpm_chip *chip)
{
- struct i2c_client *client = to_i2c_client(chip->pdev);
+ struct i2c_client *client = to_i2c_client(chip->dev.parent);
s32 status;
/* this causes the current command to be aborted */
status = i2c_nuvoton_write_status(client, TPM_STS_COMMAND_READY);
if (status < 0)
- dev_err(chip->pdev,
+ dev_err(&chip->dev,
"%s() fail to write TPM_STS.commandReady\n", __func__);
}
@@ -212,7 +212,7 @@ static int i2c_nuvoton_wait_for_stat(struct tpm_chip *chip, u8 mask, u8 value,
return 0;
} while (time_before(jiffies, stop));
}
- dev_err(chip->pdev, "%s(%02x, %02x) -> timeout\n", __func__, mask,
+ dev_err(&chip->dev, "%s(%02x, %02x) -> timeout\n", __func__, mask,
value);
return -ETIMEDOUT;
}
@@ -240,7 +240,7 @@ static int i2c_nuvoton_recv_data(struct i2c_client *client,
&chip->vendor.read_queue) == 0) {
burst_count = i2c_nuvoton_get_burstcount(client, chip);
if (burst_count < 0) {
- dev_err(chip->pdev,
+ dev_err(&chip->dev,
"%s() fail to read burstCount=%d\n", __func__,
burst_count);
return -EIO;
@@ -249,12 +249,12 @@ static int i2c_nuvoton_recv_data(struct i2c_client *client,
rc = i2c_nuvoton_read_buf(client, TPM_DATA_FIFO_R,
bytes2read, &buf[size]);
if (rc < 0) {
- dev_err(chip->pdev,
+ dev_err(&chip->dev,
"%s() fail on i2c_nuvoton_read_buf()=%d\n",
__func__, rc);
return -EIO;
}
- dev_dbg(chip->pdev, "%s(%d):", __func__, bytes2read);
+ dev_dbg(&chip->dev, "%s(%d):", __func__, bytes2read);
size += bytes2read;
}
@@ -264,7 +264,7 @@ static int i2c_nuvoton_recv_data(struct i2c_client *client,
/* Read TPM command results */
static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count)
{
- struct device *dev = chip->pdev;
+ struct device *dev = chip->dev.parent;
struct i2c_client *client = to_i2c_client(dev);
s32 rc;
int expected, status, burst_count, retries, size = 0;
@@ -334,7 +334,7 @@ static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count)
break;
}
i2c_nuvoton_ready(chip);
- dev_dbg(chip->pdev, "%s() -> %d\n", __func__, size);
+ dev_dbg(&chip->dev, "%s() -> %d\n", __func__, size);
return size;
}
@@ -347,7 +347,7 @@ static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count)
*/
static int i2c_nuvoton_send(struct tpm_chip *chip, u8 *buf, size_t len)
{
- struct device *dev = chip->pdev;
+ struct device *dev = chip->dev.parent;
struct i2c_client *client = to_i2c_client(dev);
u32 ordinal;
size_t count = 0;
diff --git a/drivers/char/tpm/tpm_infineon.c b/drivers/char/tpm/tpm_infineon.c
index 6c488e635fdd..e3cf9f3545c5 100644
--- a/drivers/char/tpm/tpm_infineon.c
+++ b/drivers/char/tpm/tpm_infineon.c
@@ -195,9 +195,9 @@ static int wait(struct tpm_chip *chip, int wait_for_bit)
}
if (i == TPM_MAX_TRIES) { /* timeout occurs */
if (wait_for_bit == STAT_XFE)
- dev_err(chip->pdev, "Timeout in wait(STAT_XFE)\n");
+ dev_err(&chip->dev, "Timeout in wait(STAT_XFE)\n");
if (wait_for_bit == STAT_RDA)
- dev_err(chip->pdev, "Timeout in wait(STAT_RDA)\n");
+ dev_err(&chip->dev, "Timeout in wait(STAT_RDA)\n");
return -EIO;
}
return 0;
@@ -220,7 +220,7 @@ static void wait_and_send(struct tpm_chip *chip, u8 sendbyte)
static void tpm_wtx(struct tpm_chip *chip)
{
number_of_wtx++;
- dev_info(chip->pdev, "Granting WTX (%02d / %02d)\n",
+ dev_info(&chip->dev, "Granting WTX (%02d / %02d)\n",
number_of_wtx, TPM_MAX_WTX_PACKAGES);
wait_and_send(chip, TPM_VL_VER);
wait_and_send(chip, TPM_CTRL_WTX);
@@ -231,7 +231,7 @@ static void tpm_wtx(struct tpm_chip *chip)
static void tpm_wtx_abort(struct tpm_chip *chip)
{
- dev_info(chip->pdev, "Aborting WTX\n");
+ dev_info(&chip->dev, "Aborting WTX\n");
wait_and_send(chip, TPM_VL_VER);
wait_and_send(chip, TPM_CTRL_WTX_ABORT);
wait_and_send(chip, 0x00);
@@ -257,7 +257,7 @@ recv_begin:
}
if (buf[0] != TPM_VL_VER) {
- dev_err(chip->pdev,
+ dev_err(&chip->dev,
"Wrong transport protocol implementation!\n");
return -EIO;
}
@@ -272,7 +272,7 @@ recv_begin:
}
if ((size == 0x6D00) && (buf[1] == 0x80)) {
- dev_err(chip->pdev, "Error handling on vendor layer!\n");
+ dev_err(&chip->dev, "Error handling on vendor layer!\n");
return -EIO;
}
@@ -284,7 +284,7 @@ recv_begin:
}
if (buf[1] == TPM_CTRL_WTX) {
- dev_info(chip->pdev, "WTX-package received\n");
+ dev_info(&chip->dev, "WTX-package received\n");
if (number_of_wtx < TPM_MAX_WTX_PACKAGES) {
tpm_wtx(chip);
goto recv_begin;
@@ -295,14 +295,14 @@ recv_begin:
}
if (buf[1] == TPM_CTRL_WTX_ABORT_ACK) {
- dev_info(chip->pdev, "WTX-abort acknowledged\n");
+ dev_info(&chip->dev, "WTX-abort acknowledged\n");
return size;
}
if (buf[1] == TPM_CTRL_ERROR) {
- dev_err(chip->pdev, "ERROR-package received:\n");
+ dev_err(&chip->dev, "ERROR-package received:\n");
if (buf[4] == TPM_INF_NAK)
- dev_err(chip->pdev,
+ dev_err(&chip->dev,
"-> Negative acknowledgement"
" - retransmit command!\n");
return -EIO;
@@ -321,7 +321,7 @@ static int tpm_inf_send(struct tpm_chip *chip, u8 * buf, size_t count)
ret = empty_fifo(chip, 1);
if (ret) {
- dev_err(chip->pdev, "Timeout while clearing FIFO\n");
+ dev_err(&chip->dev, "Timeout while clearing FIFO\n");
return -EIO;
}
diff --git a/drivers/char/tpm/tpm_nsc.c b/drivers/char/tpm/tpm_nsc.c
index 289389ecef84..766370bed60c 100644
--- a/drivers/char/tpm/tpm_nsc.c
+++ b/drivers/char/tpm/tpm_nsc.c
@@ -113,7 +113,7 @@ static int nsc_wait_for_ready(struct tpm_chip *chip)
}
while (time_before(jiffies, stop));
- dev_info(chip->pdev, "wait for ready failed\n");
+ dev_info(&chip->dev, "wait for ready failed\n");
return -EBUSY;
}
@@ -129,12 +129,12 @@ static int tpm_nsc_recv(struct tpm_chip *chip, u8 * buf, size_t count)
return -EIO;
if (wait_for_stat(chip, NSC_STATUS_F0, NSC_STATUS_F0, &data) < 0) {
- dev_err(chip->pdev, "F0 timeout\n");
+ dev_err(&chip->dev, "F0 timeout\n");
return -EIO;
}
if ((data =
inb(chip->vendor.base + NSC_DATA)) != NSC_COMMAND_NORMAL) {
- dev_err(chip->pdev, "not in normal mode (0x%x)\n",
+ dev_err(&chip->dev, "not in normal mode (0x%x)\n",
data);
return -EIO;
}
@@ -143,7 +143,7 @@ static int tpm_nsc_recv(struct tpm_chip *chip, u8 * buf, size_t count)
for (p = buffer; p < &buffer[count]; p++) {
if (wait_for_stat
(chip, NSC_STATUS_OBF, NSC_STATUS_OBF, &data) < 0) {
- dev_err(chip->pdev,
+ dev_err(&chip->dev,
"OBF timeout (while reading data)\n");
return -EIO;
}
@@ -154,11 +154,11 @@ static int tpm_nsc_recv(struct tpm_chip *chip, u8 * buf, size_t count)
if ((data & NSC_STATUS_F0) == 0 &&
(wait_for_stat(chip, NSC_STATUS_F0, NSC_STATUS_F0, &data) < 0)) {
- dev_err(chip->pdev, "F0 not set\n");
+ dev_err(&chip->dev, "F0 not set\n");
return -EIO;
}
if ((data = inb(chip->vendor.base + NSC_DATA)) != NSC_COMMAND_EOC) {
- dev_err(chip->pdev,
+ dev_err(&chip->dev,
"expected end of command(0x%x)\n", data);
return -EIO;
}
@@ -189,19 +189,19 @@ static int tpm_nsc_send(struct tpm_chip *chip, u8 * buf, size_t count)
return -EIO;
if (wait_for_stat(chip, NSC_STATUS_IBF, 0, &data) < 0) {
- dev_err(chip->pdev, "IBF timeout\n");
+ dev_err(&chip->dev, "IBF timeout\n");
return -EIO;
}
outb(NSC_COMMAND_NORMAL, chip->vendor.base + NSC_COMMAND);
if (wait_for_stat(chip, NSC_STATUS_IBR, NSC_STATUS_IBR, &data) < 0) {
- dev_err(chip->pdev, "IBR timeout\n");
+ dev_err(&chip->dev, "IBR timeout\n");
return -EIO;
}
for (i = 0; i < count; i++) {
if (wait_for_stat(chip, NSC_STATUS_IBF, 0, &data) < 0) {
- dev_err(chip->pdev,
+ dev_err(&chip->dev,
"IBF timeout (while writing data)\n");
return -EIO;
}
@@ -209,7 +209,7 @@ static int tpm_nsc_send(struct tpm_chip *chip, u8 * buf, size_t count)
}
if (wait_for_stat(chip, NSC_STATUS_IBF, 0, &data) < 0) {
- dev_err(chip->pdev, "IBF timeout\n");
+ dev_err(&chip->dev, "IBF timeout\n");
return -EIO;
}
outb(NSC_COMMAND_EOC, chip->vendor.base + NSC_COMMAND);
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index f10a107614b4..7f13221aeb30 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -293,7 +293,7 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
/* read first 10 bytes, including tag, paramsize, and result */
if ((size =
recv_data(chip, buf, TPM_HEADER_SIZE)) < TPM_HEADER_SIZE) {
- dev_err(chip->pdev, "Unable to read header\n");
+ dev_err(&chip->dev, "Unable to read header\n");
goto out;
}
@@ -306,7 +306,7 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
if ((size +=
recv_data(chip, &buf[TPM_HEADER_SIZE],
expected - TPM_HEADER_SIZE)) < expected) {
- dev_err(chip->pdev, "Unable to read remainder of result\n");
+ dev_err(&chip->dev, "Unable to read remainder of result\n");
size = -ETIME;
goto out;
}
@@ -315,7 +315,7 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
&chip->vendor.int_queue, false);
status = tpm_tis_status(chip);
if (status & TPM_STS_DATA_AVAIL) { /* retry? */
- dev_err(chip->pdev, "Error left over data\n");
+ dev_err(&chip->dev, "Error left over data\n");
size = -EIO;
goto out;
}
@@ -401,7 +401,7 @@ static void disable_interrupts(struct tpm_chip *chip)
iowrite32(intmask,
chip->vendor.iobase +
TPM_INT_ENABLE(chip->vendor.locality));
- devm_free_irq(chip->pdev, chip->vendor.irq, chip);
+ devm_free_irq(&chip->dev, chip->vendor.irq, chip);
chip->vendor.irq = 0;
}
@@ -463,7 +463,7 @@ static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
msleep(1);
if (!priv->irq_tested) {
disable_interrupts(chip);
- dev_err(chip->pdev,
+ dev_err(&chip->dev,
FW_BUG "TPM interrupt not working, polling instead\n");
}
priv->irq_tested = true;
@@ -533,7 +533,7 @@ static int probe_itpm(struct tpm_chip *chip)
rc = tpm_tis_send_data(chip, cmd_getticks, len);
if (rc == 0) {
- dev_info(chip->pdev, "Detected an iTPM.\n");
+ dev_info(&chip->dev, "Detected an iTPM.\n");
rc = 1;
} else
rc = -EFAULT;
@@ -766,7 +766,7 @@ static int tpm_tis_init(struct device *dev, struct tpm_info *tpm_info,
if (devm_request_irq
(dev, i, tis_int_probe, IRQF_SHARED,
chip->devname, chip) != 0) {
- dev_info(chip->pdev,
+ dev_info(&chip->dev,
"Unable to request irq: %d for probe\n",
i);
continue;
@@ -818,7 +818,7 @@ static int tpm_tis_init(struct device *dev, struct tpm_info *tpm_info,
if (devm_request_irq
(dev, chip->vendor.irq, tis_int_handler, IRQF_SHARED,
chip->devname, chip) != 0) {
- dev_info(chip->pdev,
+ dev_info(&chip->dev,
"Unable to request irq: %d for use\n",
chip->vendor.irq);
chip->vendor.irq = 0;
diff --git a/drivers/clk/msm/clock-osm.c b/drivers/clk/msm/clock-osm.c
index 72a75873b810..eb72217b9b1c 100644
--- a/drivers/clk/msm/clock-osm.c
+++ b/drivers/clk/msm/clock-osm.c
@@ -397,6 +397,7 @@ struct clk_osm {
u32 acd_extint1_cfg;
u32 acd_autoxfer_ctl;
u32 acd_debugfs_addr;
+ u32 acd_debugfs_addr_size;
bool acd_init;
bool secure_init;
bool red_fsm_en;
@@ -1449,6 +1450,7 @@ static int clk_osm_resources_init(struct platform_device *pdev)
return -ENOMEM;
}
pwrcl_clk.pbases[ACD_BASE] = pbase;
+ pwrcl_clk.acd_debugfs_addr_size = resource_size(res);
pwrcl_clk.vbases[ACD_BASE] = vbase;
pwrcl_clk.acd_init = true;
} else {
@@ -1466,6 +1468,7 @@ static int clk_osm_resources_init(struct platform_device *pdev)
return -ENOMEM;
}
perfcl_clk.pbases[ACD_BASE] = pbase;
+ perfcl_clk.acd_debugfs_addr_size = resource_size(res);
perfcl_clk.vbases[ACD_BASE] = vbase;
perfcl_clk.acd_init = true;
} else {
@@ -3015,6 +3018,11 @@ static int debugfs_get_debug_reg(void *data, u64 *val)
{
struct clk_osm *c = data;
+ if (!c->pbases[ACD_BASE]) {
+ pr_err("ACD base start not defined\n");
+ return -EINVAL;
+ }
+
if (c->acd_debugfs_addr >= ACD_MASTER_ONLY_REG_ADDR)
*val = readl_relaxed((char *)c->vbases[ACD_BASE] +
c->acd_debugfs_addr);
@@ -3027,6 +3035,11 @@ static int debugfs_set_debug_reg(void *data, u64 val)
{
struct clk_osm *c = data;
+ if (!c->pbases[ACD_BASE]) {
+ pr_err("ACD base start not defined\n");
+ return -EINVAL;
+ }
+
if (c->acd_debugfs_addr >= ACD_MASTER_ONLY_REG_ADDR)
clk_osm_acd_master_write_reg(c, val, c->acd_debugfs_addr);
else
@@ -3044,7 +3057,13 @@ static int debugfs_get_debug_reg_addr(void *data, u64 *val)
{
struct clk_osm *c = data;
+ if (!c->pbases[ACD_BASE]) {
+ pr_err("ACD base start not defined\n");
+ return -EINVAL;
+ }
+
*val = c->acd_debugfs_addr;
+
return 0;
}
@@ -3052,7 +3071,16 @@ static int debugfs_set_debug_reg_addr(void *data, u64 val)
{
struct clk_osm *c = data;
+ if (!c->pbases[ACD_BASE]) {
+ pr_err("ACD base start not defined\n");
+ return -EINVAL;
+ }
+
+ if (val >= c->acd_debugfs_addr_size)
+ return -EINVAL;
+
c->acd_debugfs_addr = val;
+
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(debugfs_acd_debug_reg_addr_fops,
diff --git a/drivers/clk/qcom/clk-cpu-osm.c b/drivers/clk/qcom/clk-cpu-osm.c
index d99e13817a29..ddaeca1b29e4 100644
--- a/drivers/clk/qcom/clk-cpu-osm.c
+++ b/drivers/clk/qcom/clk-cpu-osm.c
@@ -384,6 +384,7 @@ struct clk_osm {
u32 acd_extint1_cfg;
u32 acd_autoxfer_ctl;
u32 acd_debugfs_addr;
+ u32 acd_debugfs_addr_size;
bool acd_init;
bool secure_init;
bool red_fsm_en;
@@ -1371,6 +1372,7 @@ static int clk_osm_resources_init(struct platform_device *pdev)
return -ENOMEM;
}
pwrcl_clk.pbases[ACD_BASE] = pbase;
+ pwrcl_clk.acd_debugfs_addr_size = resource_size(res);
pwrcl_clk.vbases[ACD_BASE] = vbase;
pwrcl_clk.acd_init = true;
} else {
@@ -1388,6 +1390,7 @@ static int clk_osm_resources_init(struct platform_device *pdev)
return -ENOMEM;
}
perfcl_clk.pbases[ACD_BASE] = pbase;
+ perfcl_clk.acd_debugfs_addr_size = resource_size(res);
perfcl_clk.vbases[ACD_BASE] = vbase;
perfcl_clk.acd_init = true;
} else {
@@ -2832,6 +2835,11 @@ static int debugfs_get_debug_reg(void *data, u64 *val)
{
struct clk_osm *c = data;
+ if (!c->pbases[ACD_BASE]) {
+ pr_err("ACD base start not defined\n");
+ return -EINVAL;
+ }
+
if (c->acd_debugfs_addr >= ACD_MASTER_ONLY_REG_ADDR)
*val = readl_relaxed((char *)c->vbases[ACD_BASE] +
c->acd_debugfs_addr);
@@ -2844,6 +2852,11 @@ static int debugfs_set_debug_reg(void *data, u64 val)
{
struct clk_osm *c = data;
+ if (!c->pbases[ACD_BASE]) {
+ pr_err("ACD base start not defined\n");
+ return -EINVAL;
+ }
+
if (c->acd_debugfs_addr >= ACD_MASTER_ONLY_REG_ADDR)
clk_osm_acd_master_write_reg(c, val, c->acd_debugfs_addr);
else
@@ -2861,7 +2874,13 @@ static int debugfs_get_debug_reg_addr(void *data, u64 *val)
{
struct clk_osm *c = data;
+ if (!c->pbases[ACD_BASE]) {
+ pr_err("ACD base start not defined\n");
+ return -EINVAL;
+ }
+
*val = c->acd_debugfs_addr;
+
return 0;
}
@@ -2869,6 +2888,14 @@ static int debugfs_set_debug_reg_addr(void *data, u64 val)
{
struct clk_osm *c = data;
+ if (!c->pbases[ACD_BASE]) {
+ pr_err("ACD base start not defined\n");
+ return -EINVAL;
+ }
+
+ if (val >= c->acd_debugfs_addr_size)
+ return -EINVAL;
+
c->acd_debugfs_addr = val;
return 0;
}
diff --git a/drivers/cpufreq/cpufreq_interactive.c b/drivers/cpufreq/cpufreq_interactive.c
index b91e115462ae..abbee61c99c8 100644
--- a/drivers/cpufreq/cpufreq_interactive.c
+++ b/drivers/cpufreq/cpufreq_interactive.c
@@ -479,6 +479,7 @@ static void cpufreq_interactive_timer(unsigned long data)
bool skip_hispeed_logic, skip_min_sample_time;
bool jump_to_max_no_ts = false;
bool jump_to_max = false;
+ bool start_hyst = true;
if (!down_read_trylock(&ppol->enable_sem))
return;
@@ -588,8 +589,12 @@ static void cpufreq_interactive_timer(unsigned long data)
}
if (now - ppol->max_freq_hyst_start_time <
- tunables->max_freq_hysteresis)
+ tunables->max_freq_hysteresis) {
+ if (new_freq < ppol->policy->max &&
+ ppol->policy->max <= tunables->hispeed_freq)
+ start_hyst = false;
new_freq = max(tunables->hispeed_freq, new_freq);
+ }
if (!skip_hispeed_logic &&
ppol->target_freq >= tunables->hispeed_freq &&
@@ -646,7 +651,7 @@ static void cpufreq_interactive_timer(unsigned long data)
ppol->floor_validate_time = now;
}
- if (new_freq >= ppol->policy->max && !jump_to_max_no_ts)
+ if (start_hyst && new_freq >= ppol->policy->max && !jump_to_max_no_ts)
ppol->max_freq_hyst_start_time = now;
if (ppol->target_freq == new_freq &&
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
index 0dadb6332f0e..7abe908427df 100644
--- a/drivers/crypto/atmel-sha.c
+++ b/drivers/crypto/atmel-sha.c
@@ -963,7 +963,9 @@ static int atmel_sha_finup(struct ahash_request *req)
ctx->flags |= SHA_FLAGS_FINUP;
err1 = atmel_sha_update(req);
- if (err1 == -EINPROGRESS || err1 == -EBUSY)
+ if (err1 == -EINPROGRESS ||
+ (err1 == -EBUSY && (ahash_request_flags(req) &
+ CRYPTO_TFM_REQ_MAY_BACKLOG)))
return err1;
/*
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 99d5e11db194..e06cc5df30be 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -498,7 +498,7 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
if (!ret) {
/* in progress */
- wait_for_completion_interruptible(&result.completion);
+ wait_for_completion(&result.completion);
ret = result.err;
#ifdef DEBUG
print_hex_dump(KERN_ERR,
diff --git a/drivers/crypto/caam/key_gen.c b/drivers/crypto/caam/key_gen.c
index e1eaf4ff9762..3ce1d5cdcbd2 100644
--- a/drivers/crypto/caam/key_gen.c
+++ b/drivers/crypto/caam/key_gen.c
@@ -103,7 +103,7 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
if (!ret) {
/* in progress */
- wait_for_completion_interruptible(&result.completion);
+ wait_for_completion(&result.completion);
ret = result.err;
#ifdef DEBUG
print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
diff --git a/drivers/crypto/msm/ice.c b/drivers/crypto/msm/ice.c
index 49165daa807f..490f8d9ddb9f 100644
--- a/drivers/crypto/msm/ice.c
+++ b/drivers/crypto/msm/ice.c
@@ -975,7 +975,8 @@ static int qcom_ice_secure_ice_init(struct ice_device *ice_dev)
static int qcom_ice_update_sec_cfg(struct ice_device *ice_dev)
{
- int ret = 0, scm_ret = 0;
+ int ret = 0;
+ u64 scm_ret = 0;
/* scm command buffer structure */
struct qcom_scm_cmd_buf {
@@ -1001,7 +1002,7 @@ static int qcom_ice_update_sec_cfg(struct ice_device *ice_dev)
cbuf.device_id = ICE_TZ_DEV_ID;
ret = scm_restore_sec_cfg(cbuf.device_id, cbuf.spare, &scm_ret);
if (ret || scm_ret) {
- pr_err("%s: failed, ret %d scm_ret %d\n",
+ pr_err("%s: failed, ret %d scm_ret %llu\n",
__func__, ret, scm_ret);
if (!ret)
ret = scm_ret;
diff --git a/drivers/crypto/msm/qce50.c b/drivers/crypto/msm/qce50.c
index 4ab8ca143f6c..b44f926a6ba0 100644
--- a/drivers/crypto/msm/qce50.c
+++ b/drivers/crypto/msm/qce50.c
@@ -2155,6 +2155,10 @@ static int _sha_complete(struct qce_device *pce_dev, int req_info)
pce_sps_data = &preq_info->ce_sps;
qce_callback = preq_info->qce_cb;
areq = (struct ahash_request *) preq_info->areq;
+ if (!areq) {
+ pr_err("sha operation error. areq is NULL\n");
+ return -ENXIO;
+ }
qce_dma_unmap_sg(pce_dev->pdev, areq->src, preq_info->src_nents,
DMA_TO_DEVICE);
memcpy(digest, (char *)(&pce_sps_data->result->auth_iv[0]),
@@ -2970,7 +2974,7 @@ static inline int qce_alloc_req_info(struct qce_device *pce_dev)
request_index++;
if (request_index >= MAX_QCE_BAM_REQ)
request_index = 0;
- if (xchg(&pce_dev->ce_request_info[request_index].
+ if (atomic_xchg(&pce_dev->ce_request_info[request_index].
in_use, true) == false) {
pce_dev->ce_request_index = request_index;
return request_index;
@@ -2986,7 +2990,8 @@ static inline void qce_free_req_info(struct qce_device *pce_dev, int req_info,
bool is_complete)
{
pce_dev->ce_request_info[req_info].xfer_type = QCE_XFER_TYPE_LAST;
- if (xchg(&pce_dev->ce_request_info[req_info].in_use, false) == true) {
+ if (atomic_xchg(&pce_dev->ce_request_info[req_info].in_use,
+ false) == true) {
if (req_info < MAX_QCE_BAM_REQ && is_complete)
atomic_dec(&pce_dev->no_of_queued_req);
} else
@@ -4610,7 +4615,7 @@ static int qce_dummy_req(struct qce_device *pce_dev)
{
int ret = 0;
- if (!(xchg(&pce_dev->ce_request_info[DUMMY_REQ_INDEX].
+ if (!(atomic_xchg(&pce_dev->ce_request_info[DUMMY_REQ_INDEX].
in_use, true) == false))
return -EBUSY;
ret = qce_process_sha_req(pce_dev, NULL);
@@ -5969,7 +5974,7 @@ void *qce_open(struct platform_device *pdev, int *rc)
}
for (i = 0; i < MAX_QCE_ALLOC_BAM_REQ; i++)
- pce_dev->ce_request_info[i].in_use = false;
+ atomic_set(&pce_dev->ce_request_info[i].in_use, false);
pce_dev->ce_request_index = 0;
pce_dev->memsize = 10 * PAGE_SIZE * MAX_QCE_ALLOC_BAM_REQ;
@@ -6133,12 +6138,13 @@ EXPORT_SYMBOL(qce_hw_support);
void qce_dump_req(void *handle)
{
int i;
+ bool req_in_use;
struct qce_device *pce_dev = (struct qce_device *)handle;
for (i = 0; i < MAX_QCE_BAM_REQ; i++) {
- pr_info("qce_dump_req %d %d\n", i,
- pce_dev->ce_request_info[i].in_use);
- if (pce_dev->ce_request_info[i].in_use == true)
+ req_in_use = atomic_read(&pce_dev->ce_request_info[i].in_use);
+ pr_info("qce_dump_req %d %d\n", i, req_in_use);
+ if (req_in_use == true)
_qce_dump_descr_fifos(pce_dev, i);
}
}
diff --git a/drivers/crypto/msm/qce50.h b/drivers/crypto/msm/qce50.h
index 6dba3664ff08..ab0d21da72c5 100644
--- a/drivers/crypto/msm/qce50.h
+++ b/drivers/crypto/msm/qce50.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -214,7 +214,7 @@ struct ce_sps_data {
};
struct ce_request_info {
- bool in_use;
+ atomic_t in_use;
bool in_prog;
enum qce_xfer_type_enum xfer_type;
struct ce_sps_data ce_sps;
diff --git a/drivers/crypto/msm/qcrypto.c b/drivers/crypto/msm/qcrypto.c
index 5b364f053b1b..f38fc422b35e 100644
--- a/drivers/crypto/msm/qcrypto.c
+++ b/drivers/crypto/msm/qcrypto.c
@@ -3972,6 +3972,7 @@ static int _sha1_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int len)
{
struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(&tfm->base);
+ int ret = 0;
memset(&sha_ctx->authkey[0], 0, SHA1_BLOCK_SIZE);
if (len <= SHA1_BLOCK_SIZE) {
memcpy(&sha_ctx->authkey[0], key, len);
@@ -3979,16 +3980,19 @@ static int _sha1_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
} else {
sha_ctx->alg = QCE_HASH_SHA1;
sha_ctx->diglen = SHA1_DIGEST_SIZE;
- _sha_hmac_setkey(tfm, key, len);
+ ret = _sha_hmac_setkey(tfm, key, len);
+ if (ret)
+ pr_err("SHA1 hmac setkey failed\n");
sha_ctx->authkey_in_len = SHA1_BLOCK_SIZE;
}
- return 0;
+ return ret;
}
static int _sha256_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int len)
{
struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(&tfm->base);
+ int ret = 0;
memset(&sha_ctx->authkey[0], 0, SHA256_BLOCK_SIZE);
if (len <= SHA256_BLOCK_SIZE) {
@@ -3997,11 +4001,13 @@ static int _sha256_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
} else {
sha_ctx->alg = QCE_HASH_SHA256;
sha_ctx->diglen = SHA256_DIGEST_SIZE;
- _sha_hmac_setkey(tfm, key, len);
+ ret = _sha_hmac_setkey(tfm, key, len);
+ if (ret)
+ pr_err("SHA256 hmac setkey failed\n");
sha_ctx->authkey_in_len = SHA256_BLOCK_SIZE;
}
- return 0;
+ return ret;
}
static int _sha_hmac_init_ihash(struct ahash_request *req,
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 9a8a18aafd5c..6a60936b46e0 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -804,7 +804,7 @@ static void talitos_unregister_rng(struct device *dev)
* crypto alg
*/
#define TALITOS_CRA_PRIORITY 3000
-#define TALITOS_MAX_KEY_SIZE 96
+#define TALITOS_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + SHA512_BLOCK_SIZE)
#define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
struct talitos_ctx {
@@ -1388,6 +1388,11 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
{
struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ if (keylen > TALITOS_MAX_KEY_SIZE) {
+ crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
memcpy(&ctx->key, key, keylen);
ctx->keylen = keylen;
diff --git a/drivers/dma/ioat/hw.h b/drivers/dma/ioat/hw.h
index 690e3b4f8202..b36da3c1073f 100644
--- a/drivers/dma/ioat/hw.h
+++ b/drivers/dma/ioat/hw.h
@@ -64,6 +64,8 @@
#define PCI_DEVICE_ID_INTEL_IOAT_BDX8 0x6f2e
#define PCI_DEVICE_ID_INTEL_IOAT_BDX9 0x6f2f
+#define PCI_DEVICE_ID_INTEL_IOAT_SKX 0x2021
+
#define IOAT_VER_1_2 0x12 /* Version 1.2 */
#define IOAT_VER_2_0 0x20 /* Version 2.0 */
#define IOAT_VER_3_0 0x30 /* Version 3.0 */
diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c
index 4ef0c5e07912..abb75ebd65ea 100644
--- a/drivers/dma/ioat/init.c
+++ b/drivers/dma/ioat/init.c
@@ -105,6 +105,8 @@ static struct pci_device_id ioat_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX8) },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX9) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SKX) },
+
/* I/OAT v3.3 platforms */
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD0) },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD1) },
@@ -250,10 +252,15 @@ static bool is_bdx_ioat(struct pci_dev *pdev)
}
}
+static inline bool is_skx_ioat(struct pci_dev *pdev)
+{
+ return (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_SKX) ? true : false;
+}
+
static bool is_xeon_cb32(struct pci_dev *pdev)
{
return is_jf_ioat(pdev) || is_snb_ioat(pdev) || is_ivb_ioat(pdev) ||
- is_hsw_ioat(pdev) || is_bdx_ioat(pdev);
+ is_hsw_ioat(pdev) || is_bdx_ioat(pdev) || is_skx_ioat(pdev);
}
bool is_bwd_ioat(struct pci_dev *pdev)
@@ -1350,6 +1357,8 @@ static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
device->version = readb(device->reg_base + IOAT_VER_OFFSET);
if (device->version >= IOAT_VER_3_0) {
+ if (is_skx_ioat(pdev))
+ device->version = IOAT_VER_3_2;
err = ioat3_dma_probe(device, ioat_dca_enabled);
if (device->version >= IOAT_VER_3_3)
diff --git a/drivers/dma/ti-dma-crossbar.c b/drivers/dma/ti-dma-crossbar.c
index a415edbe61b1..149ec2bd9bc6 100644
--- a/drivers/dma/ti-dma-crossbar.c
+++ b/drivers/dma/ti-dma-crossbar.c
@@ -146,6 +146,7 @@ static int ti_am335x_xbar_probe(struct platform_device *pdev)
match = of_match_node(ti_am335x_master_match, dma_node);
if (!match) {
dev_err(&pdev->dev, "DMA master is not supported\n");
+ of_node_put(dma_node);
return -EINVAL;
}
@@ -310,6 +311,7 @@ static int ti_dra7_xbar_probe(struct platform_device *pdev)
match = of_match_node(ti_dra7_master_match, dma_node);
if (!match) {
dev_err(&pdev->dev, "DMA master is not supported\n");
+ of_node_put(dma_node);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 475c38fe9245..e40a6d8b0b92 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -1126,6 +1126,9 @@ static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf,
if (size & 0x3 || *pos & 0x3)
return -EINVAL;
+ if (*pos >= adev->mc.mc_vram_size)
+ return -ENXIO;
+
while (size) {
unsigned long flags;
uint32_t value;
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index a3b96d691ac9..58bf94b69186 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -330,6 +330,13 @@ static bool drm_dp_sideband_msg_build(struct drm_dp_sideband_msg_rx *msg,
return false;
}
+ /*
+ * ignore out-of-order messages or messages that are part of a
+ * failed transaction
+ */
+ if (!recv_hdr.somt && !msg->have_somt)
+ return false;
+
/* get length contained in this portion */
msg->curchunk_len = recv_hdr.msg_len;
msg->curchunk_hdrlen = hdrlen;
@@ -2163,7 +2170,7 @@ out_unlock:
}
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume);
-static void drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
+static bool drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
{
int len;
u8 replyblock[32];
@@ -2178,12 +2185,12 @@ static void drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
replyblock, len);
if (ret != len) {
DRM_DEBUG_KMS("failed to read DPCD down rep %d %d\n", len, ret);
- return;
+ return false;
}
ret = drm_dp_sideband_msg_build(msg, replyblock, len, true);
if (!ret) {
DRM_DEBUG_KMS("sideband msg build failed %d\n", replyblock[0]);
- return;
+ return false;
}
replylen = msg->curchunk_len + msg->curchunk_hdrlen;
@@ -2195,21 +2202,32 @@ static void drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply,
replyblock, len);
if (ret != len) {
- DRM_DEBUG_KMS("failed to read a chunk\n");
+ DRM_DEBUG_KMS("failed to read a chunk (len %d, ret %d)\n",
+ len, ret);
+ return false;
}
+
ret = drm_dp_sideband_msg_build(msg, replyblock, len, false);
- if (ret == false)
+ if (!ret) {
DRM_DEBUG_KMS("failed to build sideband msg\n");
+ return false;
+ }
+
curreply += len;
replylen -= len;
}
+ return true;
}
static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
{
int ret = 0;
- drm_dp_get_one_sb_msg(mgr, false);
+ if (!drm_dp_get_one_sb_msg(mgr, false)) {
+ memset(&mgr->down_rep_recv, 0,
+ sizeof(struct drm_dp_sideband_msg_rx));
+ return 0;
+ }
if (mgr->down_rep_recv.have_eomt) {
struct drm_dp_sideband_msg_tx *txmsg;
@@ -2265,7 +2283,12 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
{
int ret = 0;
- drm_dp_get_one_sb_msg(mgr, true);
+
+ if (!drm_dp_get_one_sb_msg(mgr, true)) {
+ memset(&mgr->up_req_recv, 0,
+ sizeof(struct drm_dp_sideband_msg_rx));
+ return 0;
+ }
if (mgr->up_req_recv.have_eomt) {
struct drm_dp_sideband_msg_req_body msg;
@@ -2317,7 +2340,9 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", msg.u.resource_stat.port_number, msg.u.resource_stat.available_pbn);
}
- drm_dp_put_mst_branch_device(mstb);
+ if (mstb)
+ drm_dp_put_mst_branch_device(mstb);
+
memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
}
return ret;
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 39b8e171cad5..47c1747e7ae3 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -2716,6 +2716,7 @@ add_detailed_modes(struct drm_connector *connector, struct edid *edid,
#define VENDOR_BLOCK 0x03
#define SPEAKER_BLOCK 0x04
#define HDR_STATIC_METADATA_EXTENDED_DATA_BLOCK 0x06
+#define COLORIMETRY_EXTENDED_DATA_BLOCK 0x05
#define EXTENDED_TAG 0x07
#define VIDEO_CAPABILITY_BLOCK 0x07
#define Y420_VIDEO_DATA_BLOCK 0x0E
@@ -3526,11 +3527,17 @@ drm_extract_vcdb_info(struct drm_connector *connector, const u8 *db)
(db[2] & (BIT(3) | BIT(2))) >> 2;
connector->ce_scan_info =
db[2] & (BIT(1) | BIT(0));
+ connector->rgb_qs =
+ db[2] & BIT(6);
+ connector->yuv_qs =
+ db[2] & BIT(7);
DRM_DEBUG_KMS("Scan Info (pt|it|ce): (%d|%d|%d)",
(int) connector->pt_scan_info,
(int) connector->it_scan_info,
(int) connector->ce_scan_info);
+ DRM_DEBUG_KMS("rgb_quant_range_select %d", connector->rgb_qs);
+ DRM_DEBUG_KMS("ycc_quant_range_select %d", connector->yuv_qs);
}
static bool drm_edid_is_luminance_value_present(
@@ -3589,6 +3596,50 @@ drm_extract_hdr_db(struct drm_connector *connector, const u8 *db)
}
/*
+ * drm_extract_colorimetry_db - Parse the HDMI colorimetry extended block
+ * @connector: connector corresponding to the HDMI sink
+ * @db: start of the HDMI colorimetry extended block
+ *
+ * Parses the HDMI colorimetry block to extract sink info for @connector.
+ */
+static void
+drm_extract_clrmetry_db(struct drm_connector *connector, const u8 *db)
+{
+
+ if (!db) {
+ DRM_ERROR("invalid db\n");
+ return;
+ }
+
+ /* Bit 0: xvYCC_601 */
+ if (db[2] & BIT(0))
+ connector->color_enc_fmt |= DRM_EDID_COLORIMETRY_xvYCC_601;
+ /* Bit 0: xvYCC_709 */
+ if (db[2] & BIT(1))
+ connector->color_enc_fmt |= DRM_EDID_COLORIMETRY_xvYCC_709;
+ /* Bit 0: sYCC_601 */
+ if (db[2] & BIT(2))
+ connector->color_enc_fmt |= DRM_EDID_COLORIMETRY_sYCC_601;
+ /* Bit 0: ADBYCC_601 */
+ if (db[2] & BIT(3))
+ connector->color_enc_fmt |= DRM_EDID_COLORIMETRY_ADBYCC_601;
+ /* Bit 0: ADB_RGB */
+ if (db[2] & BIT(4))
+ connector->color_enc_fmt |= DRM_EDID_COLORIMETRY_ADB_RGB;
+ /* Bit 0: BT2020_CYCC */
+ if (db[2] & BIT(5))
+ connector->color_enc_fmt |= DRM_EDID_COLORIMETRY_BT2020_CYCC;
+ /* Bit 0: BT2020_YCC */
+ if (db[2] & BIT(6))
+ connector->color_enc_fmt |= DRM_EDID_COLORIMETRY_BT2020_YCC;
+ /* Bit 0: BT2020_RGB */
+ if (db[2] & BIT(7))
+ connector->color_enc_fmt |= DRM_EDID_COLORIMETRY_BT2020_RGB;
+
+ DRM_DEBUG_KMS("colorimetry fmt 0x%x\n", connector->color_enc_fmt);
+}
+
+/*
* drm_hdmi_extract_extended_blk_info - Parse the HDMI extended tag blocks
* @connector: connector corresponding to the HDMI sink
* @edid: handle to the EDID structure
@@ -3620,6 +3671,9 @@ struct edid *edid)
case HDR_STATIC_METADATA_EXTENDED_DATA_BLOCK:
drm_extract_hdr_db(connector, db);
break;
+ case COLORIMETRY_EXTENDED_DATA_BLOCK:
+ drm_extract_clrmetry_db(connector, db);
+ break;
default:
break;
}
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index 45a38b247727..765c1c087c76 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -15,6 +15,7 @@
#include "msm_iommu.h"
#include "msm_trace.h"
#include "a5xx_gpu.h"
+#include <linux/clk/msm-clk.h>
#define SECURE_VA_START 0xc0000000
#define SECURE_VA_SIZE SZ_256M
@@ -1169,6 +1170,17 @@ static int a5xx_pm_resume(struct msm_gpu *gpu)
{
int ret;
+ /*
+ * Between suspend/resumes the GPU clocks need to be turned off
+ * but not a complete power down, typically between frames. Set the
+ * memory retention flags on the GPU core clock to retain memory
+ * across clock toggles.
+ */
+ if (gpu->core_clk) {
+ clk_set_flags(gpu->core_clk, CLKFLAG_RETAIN_PERIPH);
+ clk_set_flags(gpu->core_clk, CLKFLAG_RETAIN_MEM);
+ }
+
/* Turn on the core power */
ret = msm_gpu_pm_resume(gpu);
if (ret)
@@ -1208,6 +1220,12 @@ static int a5xx_pm_suspend(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ /* Turn off the memory retention flag when not necessary */
+ if (gpu->core_clk) {
+ clk_set_flags(gpu->core_clk, CLKFLAG_NORETAIN_PERIPH);
+ clk_set_flags(gpu->core_clk, CLKFLAG_NORETAIN_MEM);
+ }
+
/* Only do this next bit if we are about to go down */
if (gpu->active_cnt == 1) {
/* Clear the VBIF pipe before shutting down */
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
index e637237fa811..c30b65785ab6 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
@@ -70,6 +70,8 @@ struct a5xx_gpu {
* PREEMPT_NONE - no preemption in progress. Next state START.
* PREEMPT_START - The trigger is evaulating if preemption is possible. Next
* states: TRIGGERED, NONE
+ * PREEMPT_ABORT - An intermediate state before moving back to NONE. Next
+ * state: NONE.
* PREEMPT_TRIGGERED: A preemption has been executed on the hardware. Next
* states: FAULTED, PENDING
* PREEMPT_FAULTED: A preemption timed out (never completed). This will trigger
@@ -81,6 +83,7 @@ struct a5xx_gpu {
enum preempt_state {
PREEMPT_NONE = 0,
PREEMPT_START,
+ PREEMPT_ABORT,
PREEMPT_TRIGGERED,
PREEMPT_FAULTED,
PREEMPT_PENDING,
@@ -184,7 +187,10 @@ int a5xx_snapshot(struct msm_gpu *gpu, struct msm_snapshot *snapshot);
/* Return true if we are in a preempt state */
static inline bool a5xx_in_preempt(struct a5xx_gpu *a5xx_gpu)
{
- return !(atomic_read(&a5xx_gpu->preempt_state) == PREEMPT_NONE);
+ int preempt_state = atomic_read(&a5xx_gpu->preempt_state);
+
+ return !(preempt_state == PREEMPT_NONE ||
+ preempt_state == PREEMPT_ABORT);
}
int a5xx_counters_init(struct adreno_gpu *adreno_gpu);
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
index 6ab3ba076c2f..44d4ca35fa09 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
@@ -128,9 +128,20 @@ void a5xx_preempt_trigger(struct msm_gpu *gpu)
* one do nothing except to update the wptr to the latest and greatest
*/
if (!ring || (a5xx_gpu->cur_ring == ring)) {
- update_wptr(gpu, ring);
-
- /* Set the state back to NONE */
+ /*
+ * Its possible that while a preemption request is in progress
+ * from an irq context, a user context trying to submit might
+ * fail to update the write pointer, because it determines
+ * that the preempt state is not PREEMPT_NONE.
+ *
+ * Close the race by introducing an intermediate
+ * state PREEMPT_ABORT to let the submit path
+ * know that the ringbuffer is not going to change
+ * and can safely update the write pointer.
+ */
+
+ set_preempt_state(a5xx_gpu, PREEMPT_ABORT);
+ update_wptr(gpu, a5xx_gpu->cur_ring);
set_preempt_state(a5xx_gpu, PREEMPT_NONE);
return;
}
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index a498a60cd52d..4e4709d6172f 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -173,6 +173,9 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev)
ret = gpu->funcs->hw_init(gpu);
if (ret) {
dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
+ mutex_lock(&dev->struct_mutex);
+ gpu->funcs->pm_suspend(gpu);
+ mutex_unlock(&dev->struct_mutex);
gpu->funcs->destroy(gpu);
gpu = NULL;
} else {
diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c
index f1c44b30575f..c98f4511d644 100644
--- a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c
@@ -1970,6 +1970,123 @@ enable_packet_control:
hdmi_write(hdmi, HDMI_GEN_PKT_CTRL, packet_control);
}
+static void sde_hdmi_update_colorimetry(struct sde_hdmi *display,
+ bool use_bt2020)
+{
+ struct hdmi *hdmi;
+ struct drm_connector *connector;
+ bool mode_is_yuv = false;
+ struct drm_display_mode *mode;
+ u32 mode_fmt_flags = 0;
+ u8 checksum;
+ u32 avi_info0 = 0;
+ u32 avi_info1 = 0;
+ u8 avi_iframe[HDMI_AVI_INFOFRAME_BUFFER_SIZE] = {0};
+ u8 *avi_frame = &avi_iframe[HDMI_INFOFRAME_HEADER_SIZE];
+ struct hdmi_avi_infoframe info;
+
+ if (!display) {
+ SDE_ERROR("invalid input\n");
+ return;
+ }
+
+ hdmi = display->ctrl.ctrl;
+
+ if (!hdmi) {
+ SDE_ERROR("invalid input\n");
+ return;
+ }
+
+ connector = display->ctrl.ctrl->connector;
+
+ if (!connector) {
+ SDE_ERROR("invalid input\n");
+ return;
+ }
+
+ if (!connector->hdr_supported) {
+ SDE_DEBUG("HDR is not supported\n");
+ return;
+ }
+
+ /* If sink doesn't support BT2020, just return */
+ if (!(connector->color_enc_fmt & DRM_EDID_COLORIMETRY_BT2020_YCC) ||
+ !(connector->color_enc_fmt & DRM_EDID_COLORIMETRY_BT2020_RGB)) {
+ SDE_DEBUG("BT2020 colorimetry is not supported\n");
+ return;
+ }
+
+ /* If there is no change in colorimetry, just return */
+ if (use_bt2020 && display->bt2020_colorimetry)
+ return;
+ else if (!use_bt2020 && !display->bt2020_colorimetry)
+ return;
+
+ mode = &display->mode;
+ /* Cache the format flags before clearing */
+ mode_fmt_flags = mode->flags;
+ /**
+ * Clear the RGB/YUV format flags before calling upstream API
+ * as the API also compares the flags and then returns a mode
+ */
+ mode->flags &= ~SDE_DRM_MODE_FLAG_FMT_MASK;
+ drm_hdmi_avi_infoframe_from_display_mode(&info, mode);
+ /* Restore the format flags */
+ mode->flags = mode_fmt_flags;
+
+ /* Mode should only support YUV and not both to set the flag */
+ if ((mode->private_flags & MSM_MODE_FLAG_COLOR_FORMAT_YCBCR420)
+ && !(mode->private_flags & MSM_MODE_FLAG_COLOR_FORMAT_RGB444)) {
+ mode_is_yuv = true;
+ }
+
+
+ if (!display->bt2020_colorimetry && use_bt2020) {
+ /**
+ * 1. Update colorimetry to use extended
+ * 2. Change extended to use BT2020
+ * 3. Change colorspace based on mode
+ * 4. Use limited as BT2020 is always limited
+ */
+ info.colorimetry = SDE_HDMI_USE_EXTENDED_COLORIMETRY;
+ info.extended_colorimetry = SDE_HDMI_BT2020_COLORIMETRY;
+ if (mode_is_yuv)
+ info.colorspace = HDMI_COLORSPACE_YUV420;
+ if (connector->yuv_qs)
+ info.ycc_quantization_range =
+ HDMI_YCC_QUANTIZATION_RANGE_LIMITED;
+ } else if (display->bt2020_colorimetry && !use_bt2020) {
+ /**
+ * 1. Update colorimetry to non-extended
+ * 2. Change colorspace based on mode
+ * 3. Restore quantization to full if QS
+ * is enabled
+ */
+ info.colorimetry = SDE_HDMI_DEFAULT_COLORIMETRY;
+ if (mode_is_yuv)
+ info.colorspace = HDMI_COLORSPACE_YUV420;
+ if (connector->yuv_qs)
+ info.ycc_quantization_range =
+ HDMI_YCC_QUANTIZATION_RANGE_FULL;
+ }
+
+ hdmi_avi_infoframe_pack(&info, avi_iframe, sizeof(avi_iframe));
+ checksum = avi_iframe[HDMI_INFOFRAME_HEADER_SIZE - 1];
+ avi_info0 = checksum |
+ LEFT_SHIFT_BYTE(avi_frame[0]) |
+ LEFT_SHIFT_WORD(avi_frame[1]) |
+ LEFT_SHIFT_24BITS(avi_frame[2]);
+
+ avi_info1 = avi_frame[3] |
+ LEFT_SHIFT_BYTE(avi_frame[4]) |
+ LEFT_SHIFT_WORD(avi_frame[5]) |
+ LEFT_SHIFT_24BITS(avi_frame[6]);
+
+ hdmi_write(hdmi, REG_HDMI_AVI_INFO(0), avi_info0);
+ hdmi_write(hdmi, REG_HDMI_AVI_INFO(1), avi_info1);
+ display->bt2020_colorimetry = use_bt2020;
+}
+
static void sde_hdmi_clear_hdr_infoframe(struct sde_hdmi *display)
{
struct hdmi *hdmi;
@@ -2340,14 +2457,22 @@ int sde_hdmi_pre_kickoff(struct drm_connector *connector,
{
struct sde_hdmi *hdmi_display = (struct sde_hdmi *)display;
struct drm_msm_ext_panel_hdr_ctrl *hdr_ctrl;
+ struct drm_msm_ext_panel_hdr_metadata *hdr_meta;
u8 hdr_op;
- if (!connector || !display || !params) {
+ if (!connector || !display || !params ||
+ !params->hdr_ctrl) {
pr_err("Invalid params\n");
return -EINVAL;
}
hdr_ctrl = params->hdr_ctrl;
+ hdr_meta = &hdr_ctrl->hdr_meta;
+
+ if (!hdr_meta) {
+ SDE_ERROR("Invalid params\n");
+ return -EINVAL;
+ }
hdr_op = sde_hdmi_hdr_get_ops(hdmi_display->curr_hdr_state,
hdr_ctrl->hdr_state);
@@ -2356,6 +2481,12 @@ int sde_hdmi_pre_kickoff(struct drm_connector *connector,
if (connector->hdr_supported)
sde_hdmi_panel_set_hdr_infoframe(display,
&hdr_ctrl->hdr_meta);
+ if (hdr_meta->eotf)
+ sde_hdmi_update_colorimetry(hdmi_display,
+ true);
+ else
+ sde_hdmi_update_colorimetry(hdmi_display,
+ false);
} else if (hdr_op == HDR_CLEAR_INFO)
sde_hdmi_clear_hdr_infoframe(display);
@@ -2364,6 +2495,70 @@ int sde_hdmi_pre_kickoff(struct drm_connector *connector,
return 0;
}
+bool sde_hdmi_mode_needs_full_range(void *display)
+{
+ struct sde_hdmi *hdmi_display = (struct sde_hdmi *)display;
+ struct drm_display_mode *mode;
+ u32 mode_fmt_flags = 0;
+ u32 cea_mode;
+
+ if (!hdmi_display) {
+ SDE_ERROR("invalid input\n");
+ return false;
+ }
+
+ mode = &hdmi_display->mode;
+ /* Cache the format flags before clearing */
+ mode_fmt_flags = mode->flags;
+ /**
+ * Clear the RGB/YUV format flags before calling upstream API
+ * as the API also compares the flags and then returns a mode
+ */
+ mode->flags &= ~SDE_DRM_MODE_FLAG_FMT_MASK;
+ cea_mode = drm_match_cea_mode(mode);
+ /* Restore the format flags */
+ mode->flags = mode_fmt_flags;
+
+ if (cea_mode > SDE_HDMI_VIC_640x480)
+ return false;
+
+ return true;
+}
+
+enum sde_csc_type sde_hdmi_get_csc_type(struct drm_connector *conn,
+ void *display)
+{
+ struct sde_hdmi *hdmi_display = (struct sde_hdmi *)display;
+ struct sde_connector_state *c_state;
+ struct drm_msm_ext_panel_hdr_ctrl *hdr_ctrl;
+ struct drm_msm_ext_panel_hdr_metadata *hdr_meta;
+
+ if (!hdmi_display || !conn) {
+ SDE_ERROR("invalid input\n");
+ goto error;
+ }
+
+ c_state = to_sde_connector_state(conn->state);
+
+ if (!c_state) {
+ SDE_ERROR("invalid input\n");
+ goto error;
+ }
+
+ hdr_ctrl = &c_state->hdr_ctrl;
+ hdr_meta = &hdr_ctrl->hdr_meta;
+
+ if ((hdr_ctrl->hdr_state == HDR_ENABLE)
+ && (hdr_meta->eotf != 0))
+ return SDE_CSC_RGB2YUV_2020L;
+ else if (sde_hdmi_mode_needs_full_range(hdmi_display)
+ || conn->yuv_qs)
+ return SDE_CSC_RGB2YUV_601FR;
+
+error:
+ return SDE_CSC_RGB2YUV_601L;
+}
+
int sde_hdmi_connector_get_modes(struct drm_connector *connector, void *display)
{
struct sde_hdmi *hdmi_display = (struct sde_hdmi *)display;
diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h
index f2dd5351913b..672a9f188d27 100644
--- a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h
+++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h
@@ -169,6 +169,7 @@ struct sde_hdmi {
bool pll_update_enable;
bool dc_enable;
bool dc_feature_supported;
+ bool bt2020_colorimetry;
struct delayed_work hdcp_cb_work;
struct dss_io_data io[HDMI_TX_MAX_IO];
@@ -201,6 +202,15 @@ enum hdmi_tx_scdc_access_type {
#define HDMI_GEN_PKT_CTRL_CLR_MASK 0x7
+/* for AVI program */
+#define HDMI_AVI_INFOFRAME_BUFFER_SIZE \
+ (HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE)
+#define HDMI_VS_INFOFRAME_BUFFER_SIZE (HDMI_INFOFRAME_HEADER_SIZE + 6)
+
+#define LEFT_SHIFT_BYTE(x) ((x) << 8)
+#define LEFT_SHIFT_WORD(x) ((x) << 16)
+#define LEFT_SHIFT_24BITS(x) ((x) << 24)
+
/* Maximum pixel clock rates for hdmi tx */
#define HDMI_DEFAULT_MAX_PCLK_RATE 148500
#define HDMI_TX_3_MAX_PCLK_RATE 297000
@@ -482,6 +492,23 @@ int sde_hdmi_pre_kickoff(struct drm_connector *connector,
void *display,
struct msm_display_kickoff_params *params);
+/*
+ * sde_hdmi_mode_needs_full_range - does mode need full range
+ * quantization
+ * @display: Pointer to private display structure
+ * Returns: true or false based on mode
+ */
+bool sde_hdmi_mode_needs_full_range(void *display);
+
+/*
+ * sde_hdmi_get_csc_type - returns the CSC type to be
+ * used based on state of HDR playback
+ * @conn: Pointer to DRM connector
+ * @display: Pointer to private display structure
+ * Returns: true or false based on mode
+ */
+enum sde_csc_type sde_hdmi_get_csc_type(struct drm_connector *conn,
+ void *display);
#else /*#ifdef CONFIG_DRM_SDE_HDMI*/
static inline u32 sde_hdmi_get_num_of_displays(void)
@@ -596,5 +623,16 @@ static inline int sde_hdmi_set_property(struct drm_connector *connector,
return 0;
}
+static inline bool sde_hdmi_mode_needs_full_range(void *display)
+{
+ return false;
+}
+
+enum sde_csc_type sde_hdmi_get_csc_type(struct drm_connector *conn,
+ void *display)
+{
+ return 0;
+}
+
#endif /*#else of CONFIG_DRM_SDE_HDMI*/
#endif /* _SDE_HDMI_H_ */
diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c
index e4eb531c12aa..e6b6d15b5fb7 100644
--- a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c
+++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c
@@ -99,17 +99,10 @@ struct sde_hdmi_bridge {
#define HDMI_TX_SCRAMBLER_TIMEOUT_MSEC 200
-/* for AVI program */
-#define HDMI_AVI_INFOFRAME_BUFFER_SIZE \
- (HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE)
-#define HDMI_VS_INFOFRAME_BUFFER_SIZE (HDMI_INFOFRAME_HEADER_SIZE + 6)
#define HDMI_SPD_INFOFRAME_BUFFER_SIZE \
(HDMI_INFOFRAME_HEADER_SIZE + HDMI_SPD_INFOFRAME_SIZE)
#define HDMI_DEFAULT_VENDOR_NAME "unknown"
#define HDMI_DEFAULT_PRODUCT_NAME "msm"
-#define LEFT_SHIFT_BYTE(x) ((x) << 8)
-#define LEFT_SHIFT_WORD(x) ((x) << 16)
-#define LEFT_SHIFT_24BITS(x) ((x) << 24)
#define HDMI_AVI_IFRAME_LINE_NUMBER 1
#define HDMI_VENDOR_IFRAME_LINE_NUMBER 3
@@ -351,6 +344,7 @@ static int _sde_hdmi_bridge_setup_scrambler(struct hdmi *hdmi,
scrambler_on = true;
tmds_clock_ratio = 1;
} else {
+ tmds_clock_ratio = 0;
scrambler_on = connector->supports_scramble;
}
@@ -396,6 +390,14 @@ static int _sde_hdmi_bridge_setup_scrambler(struct hdmi *hdmi,
rc = _sde_hdmi_bridge_setup_ddc_timers(hdmi,
HDMI_TX_DDC_TIMER_SCRAMBLER_STATUS, timeout_hsync);
} else {
+ /* reset tmds clock ratio */
+ rc = sde_hdmi_scdc_write(hdmi,
+ HDMI_TX_SCDC_TMDS_BIT_CLOCK_RATIO_UPDATE,
+ tmds_clock_ratio);
+ /* scdc write can fail if sink doesn't support SCDC */
+ if (rc && connector->scdc_present)
+ SDE_ERROR("SCDC present, TMDS clk ratio err\n");
+
sde_hdmi_scdc_write(hdmi, HDMI_TX_SCDC_SCRAMBLING_ENABLE, 0x0);
reg_val = hdmi_read(hdmi, REG_HDMI_CTRL);
reg_val &= ~BIT(28); /* Unset SCRAMBLER_EN bit */
@@ -573,18 +575,49 @@ static void _sde_hdmi_bridge_post_disable(struct drm_bridge *bridge)
}
static void _sde_hdmi_bridge_set_avi_infoframe(struct hdmi *hdmi,
- const struct drm_display_mode *mode)
+ struct drm_display_mode *mode)
{
u8 avi_iframe[HDMI_AVI_INFOFRAME_BUFFER_SIZE] = {0};
u8 *avi_frame = &avi_iframe[HDMI_INFOFRAME_HEADER_SIZE];
u8 checksum;
u32 reg_val;
+ u32 mode_fmt_flags = 0;
struct hdmi_avi_infoframe info;
+ struct drm_connector *connector;
+
+ if (!hdmi || !mode) {
+ SDE_ERROR("invalid input\n");
+ return;
+ }
+ connector = hdmi->connector;
+
+ if (!connector) {
+ SDE_ERROR("invalid input\n");
+ return;
+ }
+
+ /* Cache the format flags before clearing */
+ mode_fmt_flags = mode->flags;
+ /**
+ * Clear the RGB/YUV format flags before calling upstream API
+ * as the API also compares the flags and then returns a mode
+ */
+ mode->flags &= ~SDE_DRM_MODE_FLAG_FMT_MASK;
drm_hdmi_avi_infoframe_from_display_mode(&info, mode);
+ /* Restore the format flags */
+ mode->flags = mode_fmt_flags;
- if (mode->private_flags & MSM_MODE_FLAG_COLOR_FORMAT_YCBCR420)
+ if (mode->private_flags & MSM_MODE_FLAG_COLOR_FORMAT_YCBCR420) {
info.colorspace = HDMI_COLORSPACE_YUV420;
+ /**
+ * If sink supports quantization select,
+ * override to full range
+ */
+ if (connector->yuv_qs)
+ info.ycc_quantization_range =
+ HDMI_YCC_QUANTIZATION_RANGE_FULL;
+ }
hdmi_avi_infoframe_pack(&info, avi_iframe, sizeof(avi_iframe));
checksum = avi_iframe[HDMI_INFOFRAME_HEADER_SIZE - 1];
diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_util.h b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_util.h
index 1d89ae222a7b..3c6b0f1b9dd4 100644
--- a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_util.h
+++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_util.h
@@ -97,6 +97,14 @@
#define HDMI_GET_MSB(x)(x >> 8)
#define HDMI_GET_LSB(x)(x & 0xff)
+#define SDE_HDMI_VIC_640x480 0x1
+#define SDE_HDMI_YCC_QUANT_MASK (0x3 << 14)
+#define SDE_HDMI_COLORIMETRY_MASK (0x3 << 22)
+
+#define SDE_HDMI_DEFAULT_COLORIMETRY 0x0
+#define SDE_HDMI_USE_EXTENDED_COLORIMETRY 0x3
+#define SDE_HDMI_BT2020_COLORIMETRY 0x6
+
/*
* Bits 1:0 in HDMI_HW_DDC_CTRL that dictate how the HDCP 2.2 RxStatus will be
* read by the hardware
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index df9ddadc5c5c..0cd458fd184b 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -149,7 +149,6 @@ struct msm_gem_submit {
uint32_t fence;
int ring;
u32 flags;
- bool valid;
uint64_t profile_buf_iova;
struct drm_msm_gem_submit_profile_buffer *profile_buf;
bool secure;
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index b73379aa9ed7..f2b6aa29b410 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -212,15 +212,8 @@ static int submit_validate_objects(struct msm_gpu *gpu,
int contended, slow_locked = -1, i, ret = 0;
retry:
- submit->valid = true;
-
for (i = 0; i < submit->nr_bos; i++) {
struct msm_gem_object *msm_obj = submit->bos[i].obj;
- struct msm_gem_address_space *aspace;
- uint64_t iova;
-
- aspace = (msm_obj->flags & MSM_BO_SECURE) ?
- gpu->secure_aspace : submit->aspace;
if (slow_locked == i)
slow_locked = -1;
@@ -247,28 +240,6 @@ retry:
goto fail;
}
}
-
- /* if locking succeeded, pin bo: */
- ret = msm_gem_get_iova(&msm_obj->base, aspace, &iova);
-
- /* this would break the logic in the fail path.. there is no
- * reason for this to happen, but just to be on the safe side
- * let's notice if this starts happening in the future:
- */
- WARN_ON(ret == -EDEADLK);
-
- if (ret)
- goto fail;
-
- submit->bos[i].flags |= BO_PINNED;
-
- if (iova == submit->bos[i].iova) {
- submit->bos[i].flags |= BO_VALID;
- } else {
- submit->bos[i].iova = iova;
- submit->bos[i].flags &= ~BO_VALID;
- submit->valid = false;
- }
}
ww_acquire_done(&submit->ticket);
@@ -297,9 +268,14 @@ fail:
return ret;
}
-static int submit_bo(struct msm_gem_submit *submit, uint32_t idx,
+static int submit_bo(struct msm_gpu *gpu,
+ struct msm_gem_submit *submit, uint32_t idx,
struct msm_gem_object **obj, uint64_t *iova, bool *valid)
{
+ struct msm_gem_object *msm_obj;
+ struct msm_gem_address_space *aspace;
+ int ret;
+
if (idx >= submit->nr_bos) {
DRM_ERROR("invalid buffer index: %u (out of %u)\n",
idx, submit->nr_bos);
@@ -308,6 +284,39 @@ static int submit_bo(struct msm_gem_submit *submit, uint32_t idx,
if (obj)
*obj = submit->bos[idx].obj;
+
+ /* Only map and pin if the caller needs either the iova or valid */
+ if (!iova && !valid)
+ return 0;
+
+ if (!(submit->bos[idx].flags & BO_PINNED)) {
+ uint64_t buf_iova;
+
+ msm_obj = submit->bos[idx].obj;
+ aspace = (msm_obj->flags & MSM_BO_SECURE) ?
+ gpu->secure_aspace : submit->aspace;
+
+ ret = msm_gem_get_iova(&msm_obj->base, aspace, &buf_iova);
+
+ /* this would break the logic in the fail path.. there is no
+ * reason for this to happen, but just to be on the safe side
+ * let's notice if this starts happening in the future:
+ */
+ WARN_ON(ret == -EDEADLK);
+
+ if (ret)
+ return ret;
+
+ submit->bos[idx].flags |= BO_PINNED;
+
+ if (buf_iova == submit->bos[idx].iova) {
+ submit->bos[idx].flags |= BO_VALID;
+ } else {
+ submit->bos[idx].iova = buf_iova;
+ submit->bos[idx].flags &= ~BO_VALID;
+ }
+ }
+
if (iova)
*iova = submit->bos[idx].iova;
if (valid)
@@ -317,8 +326,10 @@ static int submit_bo(struct msm_gem_submit *submit, uint32_t idx,
}
/* process the reloc's and patch up the cmdstream as needed: */
-static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *obj,
- uint32_t offset, uint32_t nr_relocs, uint64_t relocs)
+static int submit_reloc(struct msm_gpu *gpu,
+ struct msm_gem_submit *submit,
+ struct msm_gem_object *obj, uint32_t offset,
+ uint32_t nr_relocs, uint64_t relocs)
{
uint32_t i, last_offset = 0;
uint32_t *ptr;
@@ -334,6 +345,9 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
return -EINVAL;
}
+ if (nr_relocs == 0)
+ return 0;
+
/* For now, just map the entire thing. Eventually we probably
* to do it page-by-page, w/ kmap() if not vmap()d..
*/
@@ -372,7 +386,8 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
return -EINVAL;
}
- ret = submit_bo(submit, submit_reloc.reloc_idx, NULL, &iova, &valid);
+ ret = submit_bo(gpu, submit, submit_reloc.reloc_idx,
+ NULL, &iova, &valid);
if (ret)
return ret;
@@ -482,7 +497,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
goto out;
}
- ret = submit_bo(submit, submit_cmd.submit_idx,
+ ret = submit_bo(gpu, submit, submit_cmd.submit_idx,
&msm_obj, &iova, NULL);
if (ret)
goto out;
@@ -515,11 +530,9 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
+ submit_cmd.submit_offset;
}
- if (submit->valid)
- continue;
-
- ret = submit_reloc(submit, msm_obj, submit_cmd.submit_offset,
- submit_cmd.nr_relocs, submit_cmd.relocs);
+ ret = submit_reloc(gpu, submit, msm_obj,
+ submit_cmd.submit_offset, submit_cmd.nr_relocs,
+ submit_cmd.relocs);
if (ret)
goto out;
}
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 8073898e4275..7c109fdab545 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -863,7 +863,14 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
gpu->dev = drm;
gpu->funcs = funcs;
gpu->name = name;
- gpu->inactive = true;
+ /*
+ * Set the inactive flag to false, so that when the retire worker
+ * kicks in from the init path, it knows that it has to turn off the
+ * clocks. This should be fine to do since this is the init sequence
+ * and we have an init_lock in msm_open() to protect against bad things
+ * from happening.
+ */
+ gpu->inactive = false;
INIT_LIST_HEAD(&gpu->active_list);
INIT_WORK(&gpu->retire_work, retire_worker);
@@ -897,6 +904,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
ret = devm_request_irq(&pdev->dev, gpu->irq, irq_handler,
IRQF_TRIGGER_HIGH, gpu->name, gpu);
if (ret) {
+ gpu->irq = ret;
dev_err(drm->dev, "failed to request IRQ%u: %d\n", gpu->irq, ret);
goto fail;
}
@@ -1007,6 +1015,11 @@ void msm_gpu_cleanup(struct msm_gpu *gpu)
WARN_ON(!list_empty(&gpu->active_list));
+ if (gpu->irq >= 0) {
+ disable_irq(gpu->irq);
+ devm_free_irq(&pdev->dev, gpu->irq, gpu);
+ }
+
bs_fini(gpu);
for (i = 0; i < ARRAY_SIZE(gpu->rb); i++)
@@ -1022,4 +1035,22 @@ void msm_gpu_cleanup(struct msm_gpu *gpu)
msm_gpu_destroy_address_space(gpu->aspace);
msm_gpu_destroy_address_space(gpu->secure_aspace);
+
+ if (gpu->gpu_reg)
+ devm_regulator_put(gpu->gpu_reg);
+
+ if (gpu->gpu_cx)
+ devm_regulator_put(gpu->gpu_cx);
+
+ if (gpu->ebi1_clk)
+ devm_clk_put(&pdev->dev, gpu->ebi1_clk);
+
+ for (i = gpu->nr_clocks - 1; i >= 0; i--)
+ if (gpu->grp_clks[i])
+ devm_clk_put(&pdev->dev, gpu->grp_clks[i]);
+
+ devm_kfree(&pdev->dev, gpu->grp_clks);
+
+ if (gpu->mmio)
+ devm_iounmap(&pdev->dev, gpu->mmio);
}
diff --git a/drivers/gpu/drm/msm/sde/sde_color_processing.c b/drivers/gpu/drm/msm/sde/sde_color_processing.c
index ef7492817983..a0f6b5c6a732 100644
--- a/drivers/gpu/drm/msm/sde/sde_color_processing.c
+++ b/drivers/gpu/drm/msm/sde/sde_color_processing.c
@@ -344,8 +344,8 @@ static void sde_cp_crtc_install_immutable_property(struct drm_crtc *crtc,
prop = priv->cp_property[feature];
if (!prop) {
- prop = drm_property_create(crtc->dev, DRM_MODE_PROP_IMMUTABLE,
- name, 0);
+ prop = drm_property_create_range(crtc->dev,
+ DRM_MODE_PROP_IMMUTABLE, name, 0, 1);
if (!prop) {
DRM_ERROR("property create failed: %s\n", name);
kfree(prop_node);
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.c b/drivers/gpu/drm/msm/sde/sde_connector.c
index 6cc54d15beb2..5fa4c21060f9 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.c
+++ b/drivers/gpu/drm/msm/sde/sde_connector.c
@@ -90,6 +90,50 @@ int sde_connector_pre_kickoff(struct drm_connector *connector)
return rc;
}
+enum sde_csc_type sde_connector_get_csc_type(struct drm_connector *conn)
+{
+ struct sde_connector *c_conn;
+
+ if (!conn) {
+ SDE_ERROR("invalid argument\n");
+ return -EINVAL;
+ }
+
+ c_conn = to_sde_connector(conn);
+
+ if (!c_conn->display) {
+ SDE_ERROR("invalid argument\n");
+ return -EINVAL;
+ }
+
+ if (!c_conn->ops.get_csc_type)
+ return SDE_CSC_RGB2YUV_601L;
+
+ return c_conn->ops.get_csc_type(conn, c_conn->display);
+}
+
+bool sde_connector_mode_needs_full_range(struct drm_connector *connector)
+{
+ struct sde_connector *c_conn;
+
+ if (!connector) {
+ SDE_ERROR("invalid argument\n");
+ return false;
+ }
+
+ c_conn = to_sde_connector(connector);
+
+ if (!c_conn->display) {
+ SDE_ERROR("invalid argument\n");
+ return false;
+ }
+
+ if (!c_conn->ops.mode_needs_full_range)
+ return false;
+
+ return c_conn->ops.mode_needs_full_range(c_conn->display);
+}
+
static void sde_connector_destroy(struct drm_connector *connector)
{
struct sde_connector *c_conn;
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.h b/drivers/gpu/drm/msm/sde/sde_connector.h
index 8257f29bd4b8..b76ce0aaf577 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.h
+++ b/drivers/gpu/drm/msm/sde/sde_connector.h
@@ -135,6 +135,24 @@ struct sde_connector_ops {
int (*pre_kickoff)(struct drm_connector *connector,
void *display,
struct msm_display_kickoff_params *params);
+
+ /**
+ * mode_needs_full_range - does the mode need full range
+ * quantization
+ * @display: Pointer to private display structure
+ * Returns: true or false based on whether full range is needed
+ */
+ bool (*mode_needs_full_range)(void *display);
+
+ /**
+ * get_csc_type - returns the CSC type to be used
+ * by the CDM block based on HDR state
+ * @connector: Pointer to drm connector structure
+ * @display: Pointer to private display structure
+ * Returns: type of CSC matrix to be used
+ */
+ enum sde_csc_type (*get_csc_type)(struct drm_connector *connector,
+ void *display);
};
/**
@@ -327,5 +345,21 @@ int sde_connector_get_info(struct drm_connector *connector,
*/
int sde_connector_pre_kickoff(struct drm_connector *connector);
+/**
+ * sde_connector_mode_needs_full_range - query quantization type
+ * for the connector mode
+ * @connector: Pointer to drm connector object
+ * Returns: true OR false based on connector mode
+ */
+bool sde_connector_mode_needs_full_range(struct drm_connector *connector);
+
+/**
+ * sde_connector_get_csc_type - query csc type
+ * to be used for the connector
+ * @connector: Pointer to drm connector object
+ * Returns: csc type based on connector HDR state
+ */
+enum sde_csc_type sde_connector_get_csc_type(struct drm_connector *conn);
+
#endif /* _SDE_CONNECTOR_H_ */
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c
index 9ab216213d8e..2e9e2192670d 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.c
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.c
@@ -601,6 +601,7 @@ void sde_crtc_complete_commit(struct drm_crtc *crtc,
struct sde_crtc *sde_crtc;
struct sde_crtc_state *cstate;
struct drm_connector *conn;
+ struct sde_connector *c_conn;
struct drm_device *dev;
struct msm_drm_private *priv;
struct sde_kms *sde_kms;
@@ -625,16 +626,18 @@ void sde_crtc_complete_commit(struct drm_crtc *crtc,
for (i = 0; i < cstate->num_connectors; ++i)
sde_connector_complete_commit(cstate->connectors[i]);
- if (!sde_kms->splash_info.handoff &&
- sde_kms->splash_info.lk_is_exited) {
+ if (sde_splash_get_lk_complete_status(&sde_kms->splash_info)) {
mutex_lock(&dev->mode_config.mutex);
drm_for_each_connector(conn, crtc->dev) {
if (conn->state->crtc != crtc)
continue;
+ c_conn = to_sde_connector(conn);
+
sde_splash_clean_up_free_resource(priv->kms,
&priv->phandle,
- conn->connector_type);
+ c_conn->connector_type,
+ c_conn->display);
}
mutex_unlock(&dev->mode_config.mutex);
}
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.c b/drivers/gpu/drm/msm/sde/sde_encoder.c
index 97c9f8baea6d..23fb79241d84 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.c
@@ -80,6 +80,42 @@ static struct sde_csc_cfg sde_csc_10bit_convert[SDE_MAX_CSC] = {
{ 0x0, 0x3ff, 0x0, 0x3ff, 0x0, 0x3ff,},
{ 0x0, 0x3ff, 0x0, 0x3ff, 0x0, 0x3ff,},
},
+
+ [SDE_CSC_RGB2YUV_709L] = {
+ {
+ TO_S15D16(0x005d), TO_S15D16(0x013a), TO_S15D16(0x0020),
+ TO_S15D16(0xffcc), TO_S15D16(0xff53), TO_S15D16(0x00e1),
+ TO_S15D16(0x00e1), TO_S15D16(0xff34), TO_S15D16(0xffeb),
+ },
+ { 0x0, 0x0, 0x0,},
+ { 0x0040, 0x0200, 0x0200,},
+ { 0x0, 0x3ff, 0x0, 0x3ff, 0x0, 0x3ff,},
+ { 0x0040, 0x03ac, 0x0040, 0x03c0, 0x0040, 0x03c0,},
+ },
+
+ [SDE_CSC_RGB2YUV_2020L] = {
+ {
+ TO_S15D16(0x0073), TO_S15D16(0x0129), TO_S15D16(0x001a),
+ TO_S15D16(0xffc1), TO_S15D16(0xff5e), TO_S15D16(0x00e0),
+ TO_S15D16(0x00e0), TO_S15D16(0xff32), TO_S15D16(0xffee),
+ },
+ { 0x0, 0x0, 0x0,},
+ { 0x0040, 0x0200, 0x0200,},
+ { 0x0, 0x3ff, 0x0, 0x3ff, 0x0, 0x3ff,},
+ { 0x0040, 0x03ac, 0x0040, 0x03c0, 0x0040, 0x03c0,},
+ },
+
+ [SDE_CSC_RGB2YUV_2020FR] = {
+ {
+ TO_S15D16(0x0086), TO_S15D16(0x015b), TO_S15D16(0x001e),
+ TO_S15D16(0xffb9), TO_S15D16(0xff47), TO_S15D16(0x0100),
+ TO_S15D16(0x0100), TO_S15D16(0xff15), TO_S15D16(0xffeb),
+ },
+ { 0x0, 0x0, 0x0,},
+ { 0x0, 0x0200, 0x0200,},
+ { 0x0, 0x3ff, 0x0, 0x3ff, 0x0, 0x3ff,},
+ { 0x0, 0x3ff, 0x0, 0x3ff, 0x0, 0x3ff,},
+ },
};
/**
@@ -826,7 +862,12 @@ void sde_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc)
{
struct sde_encoder_virt *sde_enc;
struct sde_encoder_phys *phys;
+ struct drm_connector *conn_mas = NULL;
unsigned int i;
+ enum sde_csc_type conn_csc;
+ struct drm_display_mode *mode;
+ struct sde_hw_cdm *hw_cdm;
+ int mode_is_yuv = 0;
int rc;
if (!drm_enc) {
@@ -846,11 +887,46 @@ void sde_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc)
}
if (sde_enc->cur_master && sde_enc->cur_master->connector) {
- rc = sde_connector_pre_kickoff(sde_enc->cur_master->connector);
+ conn_mas = sde_enc->cur_master->connector;
+ rc = sde_connector_pre_kickoff(conn_mas);
if (rc)
- SDE_ERROR_ENC(sde_enc, "kickoff conn%d failed rc %d\n",
- sde_enc->cur_master->connector->base.id,
- rc);
+ SDE_ERROR_ENC(sde_enc,
+ "kickoff conn%d failed rc %d\n",
+ conn_mas->base.id,
+ rc);
+
+ for (i = 0; i < sde_enc->num_phys_encs; i++) {
+ phys = sde_enc->phys_encs[i];
+ if (phys) {
+ mode = &phys->cached_mode;
+ mode_is_yuv = (mode->private_flags &
+ MSM_MODE_FLAG_COLOR_FORMAT_YCBCR420);
+ }
+ /**
+ * Check the CSC matrix type to which the
+ * CDM CSC matrix should be updated to based
+ * on the connector HDR state
+ */
+ conn_csc = sde_connector_get_csc_type(conn_mas);
+ if (phys && mode_is_yuv) {
+ if (phys->enc_cdm_csc != conn_csc) {
+ hw_cdm = phys->hw_cdm;
+ rc = hw_cdm->ops.setup_csc_data(hw_cdm,
+ &sde_csc_10bit_convert[conn_csc]);
+
+ if (rc)
+ SDE_ERROR_ENC(sde_enc,
+ "CSC setup failed rc %d\n",
+ rc);
+ SDE_DEBUG_ENC(sde_enc,
+ "updating CSC %d to %d\n",
+ phys->enc_cdm_csc,
+ conn_csc);
+ phys->enc_cdm_csc = conn_csc;
+
+ }
+ }
+ }
}
}
@@ -1417,6 +1493,7 @@ void sde_encoder_phys_setup_cdm(struct sde_encoder_phys *phys_enc,
struct sde_encoder_virt *sde_enc = NULL;
struct sde_hw_cdm *hw_cdm = phys_enc->hw_cdm;
struct sde_hw_cdm_cfg *cdm_cfg = &phys_enc->cdm_cfg;
+ struct drm_connector *connector = phys_enc->connector;
int ret;
u32 csc_type = 0;
@@ -1476,10 +1553,26 @@ void sde_encoder_phys_setup_cdm(struct sde_encoder_phys *phys_enc,
cdm_cfg->h_cdwn_type,
cdm_cfg->v_cdwn_type);
- if (output_type == CDM_CDWN_OUTPUT_HDMI)
- csc_type = SDE_CSC_RGB2YUV_601FR;
- else if (output_type == CDM_CDWN_OUTPUT_WB)
+ /**
+ * Choose CSC matrix based on following rules:
+ * 1. If connector supports quantization select,
+ * pick Full-Range for better quality.
+ * 2. If non-CEA mode, then pick Full-Range as per CEA spec
+ * 3. Otherwise, pick Limited-Range as all other CEA modes
+ * need a limited range
+ */
+
+ if (output_type == CDM_CDWN_OUTPUT_HDMI) {
+ if (connector && connector->yuv_qs)
+ csc_type = SDE_CSC_RGB2YUV_601FR;
+ else if (connector &&
+ sde_connector_mode_needs_full_range(connector))
+ csc_type = SDE_CSC_RGB2YUV_601FR;
+ else
+ csc_type = SDE_CSC_RGB2YUV_601L;
+ } else if (output_type == CDM_CDWN_OUTPUT_WB) {
csc_type = SDE_CSC_RGB2YUV_601L;
+ }
if (hw_cdm && hw_cdm->ops.setup_csc_data) {
ret = hw_cdm->ops.setup_csc_data(hw_cdm,
@@ -1490,6 +1583,9 @@ void sde_encoder_phys_setup_cdm(struct sde_encoder_phys *phys_enc,
}
}
+ /* Cache the CSC default matrix type */
+ phys_enc->enc_cdm_csc = csc_type;
+
if (hw_cdm && hw_cdm->ops.setup_cdwn) {
ret = hw_cdm->ops.setup_cdwn(hw_cdm, cdm_cfg);
if (ret < 0) {
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
index 20f125155de3..aec844d640bd 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
@@ -174,6 +174,7 @@ enum sde_intr_idx {
* @split_role: Role to play in a split-panel configuration
* @intf_mode: Interface mode
* @intf_idx: Interface index on sde hardware
+ * @enc_cdm_csc: Cached CSC type of CDM block
* @enc_spinlock: Virtual-Encoder-Wide Spin Lock for IRQ purposes
* @enable_state: Enable state tracking
* @vblank_refcount: Reference count of vblank request
@@ -201,6 +202,7 @@ struct sde_encoder_phys {
enum sde_enc_split_role split_role;
enum sde_intf_mode intf_mode;
enum sde_intf intf_idx;
+ enum sde_csc_type enc_cdm_csc;
spinlock_t *enc_spinlock;
enum sde_enc_enable_state enable_state;
atomic_t vblank_refcount;
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
index 78a8b732b0de..69a4237f7b67 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
@@ -593,22 +593,33 @@ static void sde_encoder_phys_vid_get_hw_resources(
struct drm_connector_state *conn_state)
{
struct sde_encoder_phys_vid *vid_enc;
+ struct sde_mdss_cfg *vid_catalog;
if (!phys_enc || !hw_res) {
SDE_ERROR("invalid arg(s), enc %d hw_res %d conn_state %d\n",
- phys_enc != 0, hw_res != 0, conn_state != 0);
+ phys_enc != NULL, hw_res != NULL, conn_state != NULL);
return;
}
+ vid_catalog = phys_enc->sde_kms->catalog;
vid_enc = to_sde_encoder_phys_vid(phys_enc);
- if (!vid_enc->hw_intf) {
- SDE_ERROR("invalid arg(s), hw_intf\n");
+ if (!vid_enc->hw_intf || !vid_catalog) {
+ SDE_ERROR("invalid arg(s), hw_intf %d vid_catalog %d\n",
+ vid_enc->hw_intf != NULL, vid_catalog != NULL);
return;
}
SDE_DEBUG_VIDENC(vid_enc, "\n");
+ if (vid_enc->hw_intf->idx > INTF_MAX) {
+ SDE_ERROR("invalid arg(s), idx %d\n",
+ vid_enc->hw_intf->idx);
+ return;
+ }
hw_res->intfs[vid_enc->hw_intf->idx - INTF_0] = INTF_MODE_VIDEO;
- hw_res->needs_cdm = true;
+
+ if (vid_catalog->intf[vid_enc->hw_intf->idx - INTF_0].type
+ == INTF_HDMI)
+ hw_res->needs_cdm = true;
SDE_DEBUG_DRIVER("[vid] needs_cdm=%d\n", hw_res->needs_cdm);
}
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_mdss.h b/drivers/gpu/drm/msm/sde/sde_hw_mdss.h
index 1edeff6a7aec..92dd829eee3e 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_mdss.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_mdss.h
@@ -344,6 +344,9 @@ enum sde_3d_blend_mode {
enum sde_csc_type {
SDE_CSC_RGB2YUV_601L,
SDE_CSC_RGB2YUV_601FR,
+ SDE_CSC_RGB2YUV_709L,
+ SDE_CSC_RGB2YUV_2020L,
+ SDE_CSC_RGB2YUV_2020FR,
SDE_MAX_CSC
};
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c
index 705ec2d0dfa2..cdf67c0aa864 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.c
+++ b/drivers/gpu/drm/msm/sde/sde_kms.c
@@ -594,6 +594,8 @@ static int _sde_kms_setup_displays(struct drm_device *dev,
.set_property = sde_hdmi_set_property,
.get_property = sde_hdmi_get_property,
.pre_kickoff = sde_hdmi_pre_kickoff,
+ .mode_needs_full_range = sde_hdmi_mode_needs_full_range,
+ .get_csc_type = sde_hdmi_get_csc_type
};
struct msm_display_info info = {0};
struct drm_encoder *encoder;
@@ -1260,12 +1262,6 @@ static int sde_kms_hw_init(struct msm_kms *kms)
goto power_error;
}
- rc = sde_splash_parse_dt(dev);
- if (rc) {
- SDE_ERROR("parse dt for splash info failed: %d\n", rc);
- goto power_error;
- }
-
/*
* Read the DISP_INTF_SEL register to check
* whether early display is enabled in LK.
@@ -1277,15 +1273,23 @@ static int sde_kms_hw_init(struct msm_kms *kms)
}
/*
- * when LK has enabled early display, sde_splash_init should be
- * called first. This function will first do bandwidth voting job
- * because display hardware is accessing AHB data bus, otherwise
- * device reboot will happen. Second is to check if the memory is
- * reserved.
+ * when LK has enabled early display, sde_splash_parse_dt and
+ * sde_splash_init must be called. The first function is to parse the
+ * mandatory memory node for splash function, and the second function
+ * will first do bandwidth voting job, because display hardware is now
+ * accessing AHB data bus, otherwise device reboot will happen, and then
+ * to check if the memory is reserved.
*/
sinfo = &sde_kms->splash_info;
- if (sinfo->handoff)
+ if (sinfo->handoff) {
+ rc = sde_splash_parse_dt(dev);
+ if (rc) {
+ SDE_ERROR("parse dt for splash info failed: %d\n", rc);
+ goto power_error;
+ }
+
sde_splash_init(&priv->phandle, kms);
+ }
for (i = 0; i < sde_kms->catalog->vbif_count; i++) {
u32 vbif_idx = sde_kms->catalog->vbif[i].id;
diff --git a/drivers/gpu/drm/msm/sde/sde_splash.c b/drivers/gpu/drm/msm/sde/sde_splash.c
index 8b2894163eae..19e6406600cd 100644
--- a/drivers/gpu/drm/msm/sde/sde_splash.c
+++ b/drivers/gpu/drm/msm/sde/sde_splash.c
@@ -22,6 +22,7 @@
#include "sde_hw_util.h"
#include "sde_hw_intf.h"
#include "sde_hw_catalog.h"
+#include "dsi_display.h"
#define MDP_SSPP_TOP0_OFF 0x1000
#define DISP_INTF_SEL 0x004
@@ -37,6 +38,9 @@
#define SDE_LK_EXIT_VALUE 0xDEADBEEF
#define SDE_LK_EXIT_MAX_LOOP 20
+
+static DEFINE_MUTEX(sde_splash_lock);
+
/*
* In order to free reseved memory from bootup, and we are not
* able to call the __init free functions, so we need to free
@@ -279,6 +283,15 @@ static void _sde_splash_destroy_splash_node(struct sde_splash_info *sinfo)
sinfo->splash_mem_size = NULL;
}
+static void _sde_splash_get_connector_ref_cnt(struct sde_splash_info *sinfo,
+ u32 *hdmi_cnt, u32 *dsi_cnt)
+{
+ mutex_lock(&sde_splash_lock);
+ *hdmi_cnt = sinfo->hdmi_connector_cnt;
+ *dsi_cnt = sinfo->dsi_connector_cnt;
+ mutex_unlock(&sde_splash_lock);
+}
+
static int _sde_splash_free_resource(struct msm_mmu *mmu,
struct sde_splash_info *sinfo, enum splash_connector_type conn)
{
@@ -442,13 +455,13 @@ int sde_splash_get_handoff_status(struct msm_kms *kms)
if (num_of_display_on) {
sinfo->handoff = true;
sinfo->program_scratch_regs = true;
+ sinfo->lk_is_exited = false;
} else {
sinfo->handoff = false;
sinfo->program_scratch_regs = false;
+ sinfo->lk_is_exited = true;
}
- sinfo->lk_is_exited = false;
-
return 0;
}
@@ -505,15 +518,29 @@ void sde_splash_setup_connector_count(struct sde_splash_info *sinfo,
}
}
+bool sde_splash_get_lk_complete_status(struct sde_splash_info *sinfo)
+{
+ bool ret = 0;
+
+ mutex_lock(&sde_splash_lock);
+ ret = !sinfo->handoff && !sinfo->lk_is_exited;
+ mutex_unlock(&sde_splash_lock);
+
+ return ret;
+}
+
int sde_splash_clean_up_free_resource(struct msm_kms *kms,
- struct sde_power_handle *phandle, int connector_type)
+ struct sde_power_handle *phandle,
+ int connector_type, void *display)
{
struct sde_kms *sde_kms;
struct sde_splash_info *sinfo;
struct msm_mmu *mmu;
+ struct dsi_display *dsi_display = display;
int ret = 0;
- static bool hdmi_is_released;
- static bool dsi_is_released;
+ int hdmi_conn_count = 0;
+ int dsi_conn_count = 0;
+ static const char *last_commit_display_type = "unknown";
if (!phandle || !kms) {
SDE_ERROR("invalid phandle/kms.\n");
@@ -527,20 +554,30 @@ int sde_splash_clean_up_free_resource(struct msm_kms *kms,
return -EINVAL;
}
- /* When both hdmi's and dsi's resource are freed,
- * 1. Destroy splash node objects.
- * 2. Decrease ref count in bandwidth voting function.
- */
- if (sinfo->hdmi_connector_cnt == 0 &&
- sinfo->dsi_connector_cnt == 0) {
+ _sde_splash_get_connector_ref_cnt(sinfo, &hdmi_conn_count,
+ &dsi_conn_count);
+
+ mutex_lock(&sde_splash_lock);
+ if (hdmi_conn_count == 0 && dsi_conn_count == 0 &&
+ !sinfo->lk_is_exited) {
+ /* When both hdmi's and dsi's handoff are finished,
+ * 1. Destroy splash node objects.
+ * 2. Release the memory which LK's stack is running on.
+ * 3. Withdraw AHB data bus bandwidth voting.
+ */
DRM_INFO("HDMI and DSI resource handoff is completed\n");
- sinfo->lk_is_exited = false;
+ sinfo->lk_is_exited = true;
_sde_splash_destroy_splash_node(sinfo);
+ _sde_splash_free_bootup_memory_to_system(sinfo->lk_pool_paddr,
+ sinfo->lk_pool_size);
+
sde_power_data_bus_bandwidth_ctrl(phandle,
sde_kms->core_client, false);
+
+ mutex_unlock(&sde_splash_lock);
return 0;
}
@@ -548,25 +585,36 @@ int sde_splash_clean_up_free_resource(struct msm_kms *kms,
switch (connector_type) {
case DRM_MODE_CONNECTOR_HDMIA:
- if (!hdmi_is_released)
+ if (sinfo->hdmi_connector_cnt == 1) {
sinfo->hdmi_connector_cnt--;
- if ((sinfo->hdmi_connector_cnt == 0) && (!hdmi_is_released)) {
- hdmi_is_released = true;
-
ret = _sde_splash_free_resource(mmu,
- sinfo, SPLASH_HDMI);
+ sinfo, SPLASH_HDMI);
}
break;
case DRM_MODE_CONNECTOR_DSI:
- if (!dsi_is_released)
- sinfo->dsi_connector_cnt--;
-
- if ((sinfo->dsi_connector_cnt == 0) && (!dsi_is_released)) {
- dsi_is_released = true;
+ /*
+ * Basically, we have commits coming on two DSI connectors.
+ * So when releasing DSI resource, it's ensured that the
+ * coming commits should happen on different DSIs, to promise
+ * the handoff has finished on the two DSIs, then it's safe
+ * to release DSI resource, otherwise, problem happens when
+ * freeing memory, while DSI0 or DSI1 is still visiting
+ * the memory.
+ */
+ if (strcmp(dsi_display->display_type, "unknown") &&
+ strcmp(last_commit_display_type,
+ dsi_display->display_type)) {
+ if (sinfo->dsi_connector_cnt > 1)
+ sinfo->dsi_connector_cnt--;
+ else if (sinfo->dsi_connector_cnt == 1) {
+ ret = _sde_splash_free_resource(mmu,
+ sinfo, SPLASH_DSI);
+
+ sinfo->dsi_connector_cnt--;
+ }
- ret = _sde_splash_free_resource(mmu,
- sinfo, SPLASH_DSI);
+ last_commit_display_type = dsi_display->display_type;
}
break;
default:
@@ -575,6 +623,8 @@ int sde_splash_clean_up_free_resource(struct msm_kms *kms,
__func__, connector_type);
}
+ mutex_unlock(&sde_splash_lock);
+
return ret;
}
@@ -598,6 +648,7 @@ int sde_splash_clean_up_exit_lk(struct msm_kms *kms)
}
/* Monitor LK's status and tell it to exit. */
+ mutex_lock(&sde_splash_lock);
if (sinfo->program_scratch_regs) {
if (_sde_splash_lk_check(sde_kms->hw_intr))
_sde_splash_notify_lk_exit(sde_kms->hw_intr);
@@ -605,6 +656,7 @@ int sde_splash_clean_up_exit_lk(struct msm_kms *kms)
sinfo->handoff = false;
sinfo->program_scratch_regs = false;
}
+ mutex_unlock(&sde_splash_lock);
if (!sde_kms->aspace[0] || !sde_kms->aspace[0]->mmu) {
/* We do not return fault value here, to ensure
@@ -626,6 +678,5 @@ int sde_splash_clean_up_exit_lk(struct msm_kms *kms)
}
}
- sinfo->lk_is_exited = true;
return 0;
}
diff --git a/drivers/gpu/drm/msm/sde/sde_splash.h b/drivers/gpu/drm/msm/sde/sde_splash.h
index 47965693f0b8..babf88335e49 100644
--- a/drivers/gpu/drm/msm/sde/sde_splash.h
+++ b/drivers/gpu/drm/msm/sde/sde_splash.h
@@ -95,7 +95,8 @@ int sde_splash_clean_up_exit_lk(struct msm_kms *kms);
* HDMI's and DSI's resource respectively.
*/
int sde_splash_clean_up_free_resource(struct msm_kms *kms,
- struct sde_power_handle *phandle, int connector_type);
+ struct sde_power_handle *phandle,
+ int connector_type, void *display);
/**
* sde_splash_parse_dt.
@@ -121,4 +122,11 @@ void sde_splash_destroy(struct sde_splash_info *sinfo,
struct sde_power_handle *phandle,
struct sde_power_client *pclient);
+/**
+ * sde_splash_get_lk_complete_status
+ *
+ * Get LK's status to check if it has been stopped.
+ */
+bool sde_splash_get_lk_complete_status(struct sde_splash_info *sinfo);
+
#endif
diff --git a/drivers/gpu/drm/msm/sde_edid_parser.c b/drivers/gpu/drm/msm/sde_edid_parser.c
index 68246253bb70..50667c5921a0 100644
--- a/drivers/gpu/drm/msm/sde_edid_parser.c
+++ b/drivers/gpu/drm/msm/sde_edid_parser.c
@@ -549,6 +549,12 @@ int _sde_edid_update_modes(struct drm_connector *connector,
{
int rc = 0;
struct sde_edid_ctrl *edid_ctrl = (struct sde_edid_ctrl *)(input);
+ struct drm_display_info *disp_info;
+
+ disp_info = &connector->display_info;
+
+ if (disp_info)
+ disp_info->edid_hdmi_dc_modes = 0;
SDE_EDID_DEBUG("%s +", __func__);
if (edid_ctrl->edid) {
diff --git a/drivers/gpu/drm/msm/sde_hdcp_1x.c b/drivers/gpu/drm/msm/sde_hdcp_1x.c
index 3aba9e307732..d08cf13c448d 100644
--- a/drivers/gpu/drm/msm/sde_hdcp_1x.c
+++ b/drivers/gpu/drm/msm/sde_hdcp_1x.c
@@ -751,11 +751,24 @@ error:
static void sde_hdcp_1x_enable_sink_irq_hpd(struct sde_hdcp_1x *hdcp)
{
int rc;
+ u8 const required_major = 1, required_minor = 2;
+ u8 sink_major = 0, sink_minor = 0;
u8 enable_hpd_irq = 0x1;
+ u16 version;
- if (hdcp->current_tp.ds_type != DS_REPEATER)
+ if (hdcp->init_data.client_id == HDCP_CLIENT_HDMI)
return;
+ version = *hdcp->init_data.version;
+ sink_major = (version >> 4) & 0x0f;
+ sink_minor = version & 0x0f;
+
+ if ((sink_minor < required_minor) || (sink_major < required_major) ||
+ (hdcp->current_tp.ds_type != DS_REPEATER)) {
+ pr_debug("sink irq hpd not enabled\n");
+ return;
+ }
+
rc = sde_hdcp_1x_write(hdcp, &hdcp->sink_addr.ainfo, &enable_hpd_irq);
if (IS_ERR_VALUE(rc))
SDE_HDCP_DEBUG("error writing ainfo to sink\n");
@@ -1295,6 +1308,11 @@ static void sde_hdcp_1x_auth_work(struct work_struct *work)
if (rc)
goto end;
+
+end:
+ if (rc && !sde_hdcp_1x_state(HDCP_STATE_INACTIVE))
+ hdcp->hdcp_state = HDCP_STATE_AUTH_FAIL;
+
/*
* Disabling software DDC before going into part3 to make sure
* there is no Arbitration between software and hardware for DDC
@@ -1302,9 +1320,6 @@ static void sde_hdcp_1x_auth_work(struct work_struct *work)
if (hdcp->init_data.client_id == HDCP_CLIENT_HDMI)
DSS_REG_W_ND(io, HDMI_DDC_ARBITRATION, DSS_REG_R(io,
HDMI_DDC_ARBITRATION) | (BIT(4)));
-end:
- if (rc && !sde_hdcp_1x_state(HDCP_STATE_INACTIVE))
- hdcp->hdcp_state = HDCP_STATE_AUTH_FAIL;
sde_hdcp_1x_update_auth_status(hdcp);
}
@@ -1498,6 +1513,10 @@ static int sde_hdcp_1x_isr(void *input)
SDE_HDCP_DEBUG("%s: AUTH FAIL, LINK0_STATUS=0x%08x\n",
SDE_HDCP_STATE_NAME, link_status);
+ /* Clear AUTH_FAIL_INFO as well */
+ DSS_REG_W(io, isr->int_reg,
+ (hdcp_int_val | isr->auth_fail_info_ack));
+
if (sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATED)) {
hdcp->hdcp_state = HDCP_STATE_AUTH_FAIL;
sde_hdcp_1x_update_auth_status(hdcp);
@@ -1505,9 +1524,6 @@ static int sde_hdcp_1x_isr(void *input)
complete_all(&hdcp->r0_checked);
}
- /* Clear AUTH_FAIL_INFO as well */
- DSS_REG_W(io, isr->int_reg,
- (hdcp_int_val | isr->auth_fail_info_ack));
}
if (hdcp_int_val & isr->tx_req_int) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c
index c794b2c2d21e..6d8f21290aa2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c
@@ -129,7 +129,7 @@ gf100_bar_init(struct nvkm_bar *base)
if (bar->bar[0].mem) {
addr = nvkm_memory_addr(bar->bar[0].mem) >> 12;
- nvkm_wr32(device, 0x001714, 0xc0000000 | addr);
+ nvkm_wr32(device, 0x001714, 0x80000000 | addr);
}
return 0;
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index d4ac8c837314..8e86cf7da614 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -30,6 +30,7 @@
#include "radeon_audio.h"
#include "atom.h"
#include <linux/backlight.h>
+#include <linux/dmi.h>
extern int atom_debug;
@@ -2183,9 +2184,17 @@ int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder, int fe_idx)
goto assigned;
}
- /* on DCE32 and encoder can driver any block so just crtc id */
+ /*
+ * On DCE32 any encoder can drive any block so usually just use crtc id,
+ * but Apple thinks different at least on iMac10,1, so there use linkb,
+ * otherwise the internal eDP panel will stay dark.
+ */
if (ASIC_IS_DCE32(rdev)) {
- enc_idx = radeon_crtc->crtc_id;
+ if (dmi_match(DMI_PRODUCT_NAME, "iMac10,1"))
+ enc_idx = (dig->linkb) ? 1 : 0;
+ else
+ enc_idx = radeon_crtc->crtc_id;
+
goto assigned;
}
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index 3c32f095a873..2ccf81168d1e 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -782,6 +782,12 @@ bool ci_dpm_vblank_too_short(struct radeon_device *rdev)
if (r600_dpm_get_vrefresh(rdev) > 120)
return true;
+ /* disable mclk switching if the refresh is >120Hz, even if the
+ * blanking period would allow it
+ */
+ if (r600_dpm_get_vrefresh(rdev) > 120)
+ return true;
+
if (vblank_time < switch_limit)
return true;
else
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
index 48cb19949ca3..9255b9c096b6 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
@@ -282,26 +282,6 @@ static void rcar_du_crtc_update_planes(struct rcar_du_crtc *rcrtc)
* Page Flip
*/
-void rcar_du_crtc_cancel_page_flip(struct rcar_du_crtc *rcrtc,
- struct drm_file *file)
-{
- struct drm_pending_vblank_event *event;
- struct drm_device *dev = rcrtc->crtc.dev;
- unsigned long flags;
-
- /* Destroy the pending vertical blanking event associated with the
- * pending page flip, if any, and disable vertical blanking interrupts.
- */
- spin_lock_irqsave(&dev->event_lock, flags);
- event = rcrtc->event;
- if (event && event->base.file_priv == file) {
- rcrtc->event = NULL;
- event->base.destroy(&event->base);
- drm_crtc_vblank_put(&rcrtc->crtc);
- }
- spin_unlock_irqrestore(&dev->event_lock, flags);
-}
-
static void rcar_du_crtc_finish_page_flip(struct rcar_du_crtc *rcrtc)
{
struct drm_pending_vblank_event *event;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
index 4b95d9d08c49..2bbe3f5aab65 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
@@ -67,8 +67,6 @@ enum rcar_du_output {
int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int index);
void rcar_du_crtc_enable_vblank(struct rcar_du_crtc *rcrtc, bool enable);
-void rcar_du_crtc_cancel_page_flip(struct rcar_du_crtc *rcrtc,
- struct drm_file *file);
void rcar_du_crtc_suspend(struct rcar_du_crtc *rcrtc);
void rcar_du_crtc_resume(struct rcar_du_crtc *rcrtc);
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
index 40422f6b645e..bf4674aa6405 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
@@ -144,91 +144,6 @@ MODULE_DEVICE_TABLE(of, rcar_du_of_table);
* DRM operations
*/
-static int rcar_du_unload(struct drm_device *dev)
-{
- struct rcar_du_device *rcdu = dev->dev_private;
-
- if (rcdu->fbdev)
- drm_fbdev_cma_fini(rcdu->fbdev);
-
- drm_kms_helper_poll_fini(dev);
- drm_mode_config_cleanup(dev);
- drm_vblank_cleanup(dev);
-
- dev->irq_enabled = 0;
- dev->dev_private = NULL;
-
- return 0;
-}
-
-static int rcar_du_load(struct drm_device *dev, unsigned long flags)
-{
- struct platform_device *pdev = dev->platformdev;
- struct device_node *np = pdev->dev.of_node;
- struct rcar_du_device *rcdu;
- struct resource *mem;
- int ret;
-
- if (np == NULL) {
- dev_err(dev->dev, "no platform data\n");
- return -ENODEV;
- }
-
- rcdu = devm_kzalloc(&pdev->dev, sizeof(*rcdu), GFP_KERNEL);
- if (rcdu == NULL) {
- dev_err(dev->dev, "failed to allocate private data\n");
- return -ENOMEM;
- }
-
- init_waitqueue_head(&rcdu->commit.wait);
-
- rcdu->dev = &pdev->dev;
- rcdu->info = of_match_device(rcar_du_of_table, rcdu->dev)->data;
- rcdu->ddev = dev;
- dev->dev_private = rcdu;
-
- /* I/O resources */
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- rcdu->mmio = devm_ioremap_resource(&pdev->dev, mem);
- if (IS_ERR(rcdu->mmio))
- return PTR_ERR(rcdu->mmio);
-
- /* Initialize vertical blanking interrupts handling. Start with vblank
- * disabled for all CRTCs.
- */
- ret = drm_vblank_init(dev, (1 << rcdu->info->num_crtcs) - 1);
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to initialize vblank\n");
- goto done;
- }
-
- /* DRM/KMS objects */
- ret = rcar_du_modeset_init(rcdu);
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to initialize DRM/KMS (%d)\n", ret);
- goto done;
- }
-
- dev->irq_enabled = 1;
-
- platform_set_drvdata(pdev, rcdu);
-
-done:
- if (ret)
- rcar_du_unload(dev);
-
- return ret;
-}
-
-static void rcar_du_preclose(struct drm_device *dev, struct drm_file *file)
-{
- struct rcar_du_device *rcdu = dev->dev_private;
- unsigned int i;
-
- for (i = 0; i < rcdu->num_crtcs; ++i)
- rcar_du_crtc_cancel_page_flip(&rcdu->crtcs[i], file);
-}
-
static void rcar_du_lastclose(struct drm_device *dev)
{
struct rcar_du_device *rcdu = dev->dev_private;
@@ -269,11 +184,7 @@ static const struct file_operations rcar_du_fops = {
static struct drm_driver rcar_du_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME
| DRIVER_ATOMIC,
- .load = rcar_du_load,
- .unload = rcar_du_unload,
- .preclose = rcar_du_preclose,
.lastclose = rcar_du_lastclose,
- .set_busid = drm_platform_set_busid,
.get_vblank_counter = drm_vblank_no_hw_counter,
.enable_vblank = rcar_du_enable_vblank,
.disable_vblank = rcar_du_disable_vblank,
@@ -333,18 +244,104 @@ static const struct dev_pm_ops rcar_du_pm_ops = {
* Platform driver
*/
-static int rcar_du_probe(struct platform_device *pdev)
+static int rcar_du_remove(struct platform_device *pdev)
{
- return drm_platform_init(&rcar_du_driver, pdev);
+ struct rcar_du_device *rcdu = platform_get_drvdata(pdev);
+ struct drm_device *ddev = rcdu->ddev;
+
+ mutex_lock(&ddev->mode_config.mutex);
+ drm_connector_unplug_all(ddev);
+ mutex_unlock(&ddev->mode_config.mutex);
+
+ drm_dev_unregister(ddev);
+
+ if (rcdu->fbdev)
+ drm_fbdev_cma_fini(rcdu->fbdev);
+
+ drm_kms_helper_poll_fini(ddev);
+ drm_mode_config_cleanup(ddev);
+
+ drm_dev_unref(ddev);
+
+ return 0;
}
-static int rcar_du_remove(struct platform_device *pdev)
+static int rcar_du_probe(struct platform_device *pdev)
{
- struct rcar_du_device *rcdu = platform_get_drvdata(pdev);
+ struct device_node *np = pdev->dev.of_node;
+ struct rcar_du_device *rcdu;
+ struct drm_connector *connector;
+ struct drm_device *ddev;
+ struct resource *mem;
+ int ret;
+
+ if (np == NULL) {
+ dev_err(&pdev->dev, "no device tree node\n");
+ return -ENODEV;
+ }
+
+ /* Allocate and initialize the DRM and R-Car device structures. */
+ rcdu = devm_kzalloc(&pdev->dev, sizeof(*rcdu), GFP_KERNEL);
+ if (rcdu == NULL)
+ return -ENOMEM;
+
+ init_waitqueue_head(&rcdu->commit.wait);
+
+ rcdu->dev = &pdev->dev;
+ rcdu->info = of_match_device(rcar_du_of_table, rcdu->dev)->data;
+
+ platform_set_drvdata(pdev, rcdu);
- drm_put_dev(rcdu->ddev);
+ /* I/O resources */
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ rcdu->mmio = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(rcdu->mmio))
+ ret = PTR_ERR(rcdu->mmio);
+
+ /* DRM/KMS objects */
+ ddev = drm_dev_alloc(&rcar_du_driver, &pdev->dev);
+ if (!ddev)
+ return -ENOMEM;
+
+ drm_dev_set_unique(ddev, dev_name(&pdev->dev));
+
+ rcdu->ddev = ddev;
+ ddev->dev_private = rcdu;
+
+ ret = rcar_du_modeset_init(rcdu);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to initialize DRM/KMS (%d)\n", ret);
+ goto error;
+ }
+
+ ddev->irq_enabled = 1;
+
+ /* Register the DRM device with the core and the connectors with
+ * sysfs.
+ */
+ ret = drm_dev_register(ddev, 0);
+ if (ret)
+ goto error;
+
+ mutex_lock(&ddev->mode_config.mutex);
+ drm_for_each_connector(connector, ddev) {
+ ret = drm_connector_register(connector);
+ if (ret < 0)
+ break;
+ }
+ mutex_unlock(&ddev->mode_config.mutex);
+
+ if (ret < 0)
+ goto error;
+
+ DRM_INFO("Device %s probed\n", dev_name(&pdev->dev));
return 0;
+
+error:
+ rcar_du_remove(pdev);
+
+ return ret;
}
static struct platform_driver rcar_du_platform_driver = {
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c b/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c
index 96f2eb43713c..6038be93c58d 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c
@@ -55,12 +55,6 @@ static const struct drm_connector_helper_funcs connector_helper_funcs = {
.best_encoder = rcar_du_connector_best_encoder,
};
-static void rcar_du_hdmi_connector_destroy(struct drm_connector *connector)
-{
- drm_connector_unregister(connector);
- drm_connector_cleanup(connector);
-}
-
static enum drm_connector_status
rcar_du_hdmi_connector_detect(struct drm_connector *connector, bool force)
{
@@ -79,7 +73,7 @@ static const struct drm_connector_funcs connector_funcs = {
.reset = drm_atomic_helper_connector_reset,
.detect = rcar_du_hdmi_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = rcar_du_hdmi_connector_destroy,
+ .destroy = drm_connector_cleanup,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
@@ -108,9 +102,6 @@ int rcar_du_hdmi_connector_init(struct rcar_du_device *rcdu,
return ret;
drm_connector_helper_add(connector, &connector_helper_funcs);
- ret = drm_connector_register(connector);
- if (ret < 0)
- return ret;
connector->dpms = DRM_MODE_DPMS_OFF;
drm_object_property_set_value(&connector->base,
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
index ca12e8ca5552..46429c4be8e5 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
@@ -761,6 +761,13 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu)
if (ret < 0)
return ret;
+ /* Initialize vertical blanking interrupts handling. Start with vblank
+ * disabled for all CRTCs.
+ */
+ ret = drm_vblank_init(dev, (1 << rcdu->info->num_crtcs) - 1);
+ if (ret < 0)
+ return ret;
+
/* Initialize the groups. */
num_groups = DIV_ROUND_UP(rcdu->num_crtcs, 2);
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c
index 0c43032fc693..e905f5da7aaa 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c
@@ -62,12 +62,6 @@ static const struct drm_connector_helper_funcs connector_helper_funcs = {
.best_encoder = rcar_du_connector_best_encoder,
};
-static void rcar_du_lvds_connector_destroy(struct drm_connector *connector)
-{
- drm_connector_unregister(connector);
- drm_connector_cleanup(connector);
-}
-
static enum drm_connector_status
rcar_du_lvds_connector_detect(struct drm_connector *connector, bool force)
{
@@ -79,7 +73,7 @@ static const struct drm_connector_funcs connector_funcs = {
.reset = drm_atomic_helper_connector_reset,
.detect = rcar_du_lvds_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = rcar_du_lvds_connector_destroy,
+ .destroy = drm_connector_cleanup,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
@@ -117,9 +111,6 @@ int rcar_du_lvds_connector_init(struct rcar_du_device *rcdu,
return ret;
drm_connector_helper_add(connector, &connector_helper_funcs);
- ret = drm_connector_register(connector);
- if (ret < 0)
- return ret;
connector->dpms = DRM_MODE_DPMS_OFF;
drm_object_property_set_value(&connector->base,
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c b/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c
index e0a5d8f93963..9d7e5c99caf6 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c
@@ -31,12 +31,6 @@ static const struct drm_connector_helper_funcs connector_helper_funcs = {
.best_encoder = rcar_du_connector_best_encoder,
};
-static void rcar_du_vga_connector_destroy(struct drm_connector *connector)
-{
- drm_connector_unregister(connector);
- drm_connector_cleanup(connector);
-}
-
static enum drm_connector_status
rcar_du_vga_connector_detect(struct drm_connector *connector, bool force)
{
@@ -48,7 +42,7 @@ static const struct drm_connector_funcs connector_funcs = {
.reset = drm_atomic_helper_connector_reset,
.detect = rcar_du_vga_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = rcar_du_vga_connector_destroy,
+ .destroy = drm_connector_cleanup,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
@@ -76,9 +70,6 @@ int rcar_du_vga_connector_init(struct rcar_du_device *rcdu,
return ret;
drm_connector_helper_add(connector, &connector_helper_funcs);
- ret = drm_connector_register(connector);
- if (ret < 0)
- return ret;
connector->dpms = DRM_MODE_DPMS_OFF;
drm_object_property_set_value(&connector->base,
diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c
index f300eba95bb1..1244cdf52859 100644
--- a/drivers/gpu/drm/virtio/virtgpu_object.c
+++ b/drivers/gpu/drm/virtio/virtgpu_object.c
@@ -81,8 +81,10 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
return -ENOMEM;
size = roundup(size, PAGE_SIZE);
ret = drm_gem_object_init(vgdev->ddev, &bo->gem_base, size);
- if (ret != 0)
+ if (ret != 0) {
+ kfree(bo);
return ret;
+ }
bo->dumb = false;
virtio_gpu_init_ttm_placement(bo, pinned);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index ecf15cf0c3fd..04fd0f2b6af0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -471,7 +471,7 @@ static int vmw_cmd_invalid(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- return capable(CAP_SYS_ADMIN) ? : -EINVAL;
+ return -EINVAL;
}
static int vmw_cmd_ok(struct vmw_private *dev_priv,
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index 6521ec01413e..7cab049771de 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -902,6 +902,9 @@ static int adreno_of_get_power(struct adreno_device *adreno_dev,
device->pwrctrl.bus_control = of_property_read_bool(node,
"qcom,bus-control");
+ device->pwrctrl.input_disable = of_property_read_bool(node,
+ "qcom,disable-wake-on-touch");
+
return 0;
}
@@ -1016,15 +1019,19 @@ static int adreno_probe(struct platform_device *pdev)
/* Initialize coresight for the target */
adreno_coresight_init(adreno_dev);
- adreno_input_handler.private = device;
-
#ifdef CONFIG_INPUT
- /*
- * It isn't fatal if we cannot register the input handler. Sad,
- * perhaps, but not fatal
- */
- if (input_register_handler(&adreno_input_handler))
- KGSL_DRV_ERR(device, "Unable to register the input handler\n");
+ if (!device->pwrctrl.input_disable) {
+ adreno_input_handler.private = device;
+ /*
+ * It isn't fatal if we cannot register the input handler. Sad,
+ * perhaps, but not fatal
+ */
+ if (input_register_handler(&adreno_input_handler)) {
+ adreno_input_handler.private = NULL;
+ KGSL_DRV_ERR(device,
+ "Unable to register the input handler\n");
+ }
+ }
#endif
out:
if (status) {
@@ -1076,7 +1083,8 @@ static int adreno_remove(struct platform_device *pdev)
_adreno_free_memories(adreno_dev);
#ifdef CONFIG_INPUT
- input_unregister_handler(&adreno_input_handler);
+ if (adreno_input_handler.private)
+ input_unregister_handler(&adreno_input_handler);
#endif
adreno_sysfs_close(adreno_dev);
@@ -1309,6 +1317,10 @@ static int _adreno_start(struct adreno_device *adreno_dev)
/* make sure ADRENO_DEVICE_STARTED is not set here */
BUG_ON(test_bit(ADRENO_DEVICE_STARTED, &adreno_dev->priv));
+ /* disallow l2pc during wake up to improve GPU wake up time */
+ kgsl_pwrctrl_update_l2pc(&adreno_dev->dev,
+ KGSL_L2PC_WAKEUP_TIMEOUT);
+
pm_qos_update_request(&device->pwrctrl.pm_qos_req_dma,
pmqos_wakeup_vote);
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index 218d08e6dfc3..4a0acdcf8844 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -568,6 +568,8 @@ enum adreno_regs {
ADRENO_REG_RBBM_RBBM_CTL,
ADRENO_REG_UCHE_INVALIDATE0,
ADRENO_REG_UCHE_INVALIDATE1,
+ ADRENO_REG_RBBM_PERFCTR_RBBM_0_LO,
+ ADRENO_REG_RBBM_PERFCTR_RBBM_0_HI,
ADRENO_REG_RBBM_PERFCTR_LOAD_VALUE_LO,
ADRENO_REG_RBBM_PERFCTR_LOAD_VALUE_HI,
ADRENO_REG_RBBM_SECVID_TRUST_CONTROL,
@@ -1508,21 +1510,60 @@ static inline void adreno_ringbuffer_set_pagetable(struct adreno_ringbuffer *rb,
spin_unlock_irqrestore(&rb->preempt_lock, flags);
}
+static inline bool is_power_counter_overflow(struct adreno_device *adreno_dev,
+ unsigned int reg, unsigned int prev_val, unsigned int *perfctr_pwr_hi)
+{
+ unsigned int val;
+ bool ret = false;
+
+ /*
+ * If prev_val is zero, it is first read after perf counter reset.
+ * So set perfctr_pwr_hi register to zero.
+ */
+ if (prev_val == 0) {
+ *perfctr_pwr_hi = 0;
+ return ret;
+ }
+ adreno_readreg(adreno_dev, ADRENO_REG_RBBM_PERFCTR_RBBM_0_HI, &val);
+ if (val != *perfctr_pwr_hi) {
+ *perfctr_pwr_hi = val;
+ ret = true;
+ }
+ return ret;
+}
+
static inline unsigned int counter_delta(struct kgsl_device *device,
unsigned int reg, unsigned int *counter)
{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
unsigned int val;
unsigned int ret = 0;
+ bool overflow = true;
+ static unsigned int perfctr_pwr_hi;
/* Read the value */
kgsl_regread(device, reg, &val);
+ if (adreno_is_a5xx(adreno_dev) && reg == adreno_getreg
+ (adreno_dev, ADRENO_REG_RBBM_PERFCTR_RBBM_0_LO))
+ overflow = is_power_counter_overflow(adreno_dev, reg,
+ *counter, &perfctr_pwr_hi);
+
/* Return 0 for the first read */
if (*counter != 0) {
- if (val < *counter)
- ret = (0xFFFFFFFF - *counter) + val;
- else
+ if (val >= *counter) {
ret = val - *counter;
+ } else if (overflow == true) {
+ ret = (0xFFFFFFFF - *counter) + val;
+ } else {
+ /*
+ * Since KGSL got abnormal value from the counter,
+ * We will drop the value from being accumulated.
+ */
+ pr_warn_once("KGSL: Abnormal value :0x%x (0x%x) from perf counter : 0x%x\n",
+ val, *counter, reg);
+ return 0;
+ }
}
*counter = val;
diff --git a/drivers/gpu/msm/adreno_a3xx.c b/drivers/gpu/msm/adreno_a3xx.c
index 423071811b43..0e3e5b64bdc7 100644
--- a/drivers/gpu/msm/adreno_a3xx.c
+++ b/drivers/gpu/msm/adreno_a3xx.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -1530,6 +1530,10 @@ static unsigned int a3xx_register_offsets[ADRENO_REG_REGISTER_MAX] = {
A3XX_UCHE_CACHE_INVALIDATE0_REG),
ADRENO_REG_DEFINE(ADRENO_REG_UCHE_INVALIDATE1,
A3XX_UCHE_CACHE_INVALIDATE1_REG),
+ ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_RBBM_0_LO,
+ A3XX_RBBM_PERFCTR_RBBM_0_LO),
+ ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_RBBM_0_HI,
+ A3XX_RBBM_PERFCTR_RBBM_0_HI),
ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_VALUE_LO,
A3XX_RBBM_PERFCTR_LOAD_VALUE_LO),
ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_VALUE_HI,
diff --git a/drivers/gpu/msm/adreno_a4xx.c b/drivers/gpu/msm/adreno_a4xx.c
index 5ca04e522270..6170cc263e4a 100644
--- a/drivers/gpu/msm/adreno_a4xx.c
+++ b/drivers/gpu/msm/adreno_a4xx.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -806,6 +806,10 @@ static unsigned int a4xx_register_offsets[ADRENO_REG_REGISTER_MAX] = {
ADRENO_REG_DEFINE(ADRENO_REG_RBBM_SW_RESET_CMD, A4XX_RBBM_SW_RESET_CMD),
ADRENO_REG_DEFINE(ADRENO_REG_UCHE_INVALIDATE0, A4XX_UCHE_INVALIDATE0),
ADRENO_REG_DEFINE(ADRENO_REG_UCHE_INVALIDATE1, A4XX_UCHE_INVALIDATE1),
+ ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_RBBM_0_LO,
+ A4XX_RBBM_PERFCTR_RBBM_0_LO),
+ ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_RBBM_0_HI,
+ A4XX_RBBM_PERFCTR_RBBM_0_HI),
ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_VALUE_LO,
A4XX_RBBM_PERFCTR_LOAD_VALUE_LO),
ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_VALUE_HI,
diff --git a/drivers/gpu/msm/adreno_a5xx.c b/drivers/gpu/msm/adreno_a5xx.c
index 0715022be6e3..3fb13c7a0814 100644
--- a/drivers/gpu/msm/adreno_a5xx.c
+++ b/drivers/gpu/msm/adreno_a5xx.c
@@ -715,6 +715,10 @@ static int _load_gpmu_firmware(struct adreno_device *adreno_dev)
if (ret)
goto err;
+ /* Integer overflow check for cmd_size */
+ if (data[2] > (data[0] - 2))
+ goto err;
+
cmds = data + data[2] + 3;
cmd_size = data[0] - data[2] - 2;
@@ -2069,6 +2073,9 @@ static void a5xx_start(struct adreno_device *adreno_dev)
}
+ /* Disable All flat shading optimization */
+ kgsl_regrmw(device, A5XX_VPC_DBG_ECO_CNTL, 0, 0x1 << 10);
+
/*
* VPC corner case with local memory load kill leads to corrupt
* internal state. Normal Disable does not work for all a5x chips.
@@ -3069,6 +3076,10 @@ static unsigned int a5xx_register_offsets[ADRENO_REG_REGISTER_MAX] = {
ADRENO_REG_DEFINE(ADRENO_REG_RBBM_BLOCK_SW_RESET_CMD2,
A5XX_RBBM_BLOCK_SW_RESET_CMD2),
ADRENO_REG_DEFINE(ADRENO_REG_UCHE_INVALIDATE0, A5XX_UCHE_INVALIDATE0),
+ ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_RBBM_0_LO,
+ A5XX_RBBM_PERFCTR_RBBM_0_LO),
+ ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_RBBM_0_HI,
+ A5XX_RBBM_PERFCTR_RBBM_0_HI),
ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_VALUE_LO,
A5XX_RBBM_PERFCTR_LOAD_VALUE_LO),
ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_VALUE_HI,
diff --git a/drivers/gpu/msm/adreno_a5xx_preempt.c b/drivers/gpu/msm/adreno_a5xx_preempt.c
index 0e56731b16e2..883a9810fbf4 100644
--- a/drivers/gpu/msm/adreno_a5xx_preempt.c
+++ b/drivers/gpu/msm/adreno_a5xx_preempt.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -537,13 +537,42 @@ static int a5xx_preemption_iommu_init(struct adreno_device *adreno_dev)
KGSL_MEMFLAGS_GPUREADONLY, KGSL_MEMDESC_PRIVILEGED,
"smmu_info");
}
+
+static void a5xx_preemption_iommu_close(struct adreno_device *adreno_dev)
+{
+ struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+ struct kgsl_iommu *iommu = KGSL_IOMMU_PRIV(device);
+
+ kgsl_free_global(device, &iommu->smmu_info);
+}
+
#else
static int a5xx_preemption_iommu_init(struct adreno_device *adreno_dev)
{
return -ENODEV;
}
+
+static void a5xx_preemption_iommu_close(struct adreno_device *adreno_dev)
+{
+}
#endif
+static void a5xx_preemption_close(struct kgsl_device *device)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct adreno_preemption *preempt = &adreno_dev->preempt;
+ struct adreno_ringbuffer *rb;
+ unsigned int i;
+
+ del_timer(&preempt->timer);
+ kgsl_free_global(device, &preempt->counters);
+ a5xx_preemption_iommu_close(adreno_dev);
+
+ FOR_EACH_RINGBUFFER(adreno_dev, rb, i) {
+ kgsl_free_global(device, &rb->preemption_desc);
+ }
+}
+
int a5xx_preemption_init(struct adreno_device *adreno_dev)
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
@@ -568,7 +597,7 @@ int a5xx_preemption_init(struct adreno_device *adreno_dev)
A5XX_CP_CTXRECORD_PREEMPTION_COUNTER_SIZE, 0, 0,
"preemption_counters");
if (ret)
- return ret;
+ goto err;
addr = preempt->counters.gpuaddr;
@@ -576,10 +605,16 @@ int a5xx_preemption_init(struct adreno_device *adreno_dev)
FOR_EACH_RINGBUFFER(adreno_dev, rb, i) {
ret = a5xx_preemption_ringbuffer_init(adreno_dev, rb, addr);
if (ret)
- return ret;
+ goto err;
addr += A5XX_CP_CTXRECORD_PREEMPTION_COUNTER_SIZE;
}
- return a5xx_preemption_iommu_init(adreno_dev);
+ ret = a5xx_preemption_iommu_init(adreno_dev);
+
+err:
+ if (ret)
+ a5xx_preemption_close(device);
+
+ return ret;
}
diff --git a/drivers/gpu/msm/adreno_debugfs.c b/drivers/gpu/msm/adreno_debugfs.c
index 5306303b8d15..2027ac66f737 100644
--- a/drivers/gpu/msm/adreno_debugfs.c
+++ b/drivers/gpu/msm/adreno_debugfs.c
@@ -131,6 +131,8 @@ typedef void (*reg_read_fill_t)(struct kgsl_device *device, int i,
static void sync_event_print(struct seq_file *s,
struct kgsl_drawobj_sync_event *sync_event)
{
+ unsigned long flags;
+
switch (sync_event->type) {
case KGSL_CMD_SYNCPOINT_TYPE_TIMESTAMP: {
seq_printf(s, "sync: ctx: %d ts: %d",
@@ -138,9 +140,13 @@ static void sync_event_print(struct seq_file *s,
break;
}
case KGSL_CMD_SYNCPOINT_TYPE_FENCE:
+ spin_lock_irqsave(&sync_event->handle_lock, flags);
+
seq_printf(s, "sync: [%pK] %s", sync_event->handle,
(sync_event->handle && sync_event->handle->fence)
? sync_event->handle->fence->name : "NULL");
+
+ spin_unlock_irqrestore(&sync_event->handle_lock, flags);
break;
default:
seq_printf(s, "sync: type: %d", sync_event->type);
diff --git a/drivers/gpu/msm/adreno_dispatch.c b/drivers/gpu/msm/adreno_dispatch.c
index 8902c3175c79..1a94e71f5c1d 100644
--- a/drivers/gpu/msm/adreno_dispatch.c
+++ b/drivers/gpu/msm/adreno_dispatch.c
@@ -1460,7 +1460,9 @@ int adreno_dispatcher_queue_cmds(struct kgsl_device_private *dev_priv,
spin_unlock(&drawctxt->lock);
- kgsl_pwrctrl_update_l2pc(&adreno_dev->dev);
+ if (device->pwrctrl.l2pc_update_queue)
+ kgsl_pwrctrl_update_l2pc(&adreno_dev->dev,
+ KGSL_L2PC_QUEUE_TIMEOUT);
/* Add the context to the dispatcher pending list */
dispatcher_queue_context(adreno_dev, drawctxt);
diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c
index d79d9613043f..ddc53edce3c1 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.c
+++ b/drivers/gpu/msm/adreno_ringbuffer.c
@@ -26,6 +26,7 @@
#include "adreno_iommu.h"
#include "adreno_pm4types.h"
#include "adreno_ringbuffer.h"
+#include "adreno_trace.h"
#include "a3xx_reg.h"
#include "adreno_a5xx.h"
@@ -58,6 +59,7 @@ static void _cff_write_ringbuffer(struct adreno_ringbuffer *rb)
}
static void adreno_get_submit_time(struct adreno_device *adreno_dev,
+ struct adreno_ringbuffer *rb,
struct adreno_submit_time *time)
{
unsigned long flags;
@@ -87,6 +89,9 @@ static void adreno_get_submit_time(struct adreno_device *adreno_dev,
} else
time->ticks = 0;
+ /* Trace the GPU time to create a mapping to ftrace time */
+ trace_adreno_cmdbatch_sync(rb->drawctxt_active, time->ticks);
+
/* Get the kernel clock for time since boot */
time->ktime = local_clock();
@@ -128,7 +133,7 @@ void adreno_ringbuffer_submit(struct adreno_ringbuffer *rb,
_cff_write_ringbuffer(rb);
if (time != NULL)
- adreno_get_submit_time(adreno_dev, time);
+ adreno_get_submit_time(adreno_dev, rb, time);
adreno_ringbuffer_wptr(adreno_dev, rb);
}
diff --git a/drivers/gpu/msm/adreno_trace.h b/drivers/gpu/msm/adreno_trace.h
index 16ca0980cfbe..74c4c4e6e1fa 100644
--- a/drivers/gpu/msm/adreno_trace.h
+++ b/drivers/gpu/msm/adreno_trace.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -148,6 +148,29 @@ TRACE_EVENT(adreno_cmdbatch_retired,
)
);
+TRACE_EVENT(adreno_cmdbatch_sync,
+ TP_PROTO(struct adreno_context *drawctxt,
+ uint64_t ticks),
+ TP_ARGS(drawctxt, ticks),
+ TP_STRUCT__entry(
+ __field(unsigned int, id)
+ __field(unsigned int, timestamp)
+ __field(uint64_t, ticks)
+ __field(int, prio)
+ ),
+ TP_fast_assign(
+ __entry->id = drawctxt->base.id;
+ __entry->timestamp = drawctxt->timestamp;
+ __entry->ticks = ticks;
+ __entry->prio = drawctxt->base.priority;
+ ),
+ TP_printk(
+ "ctx=%u ctx_prio=%d ts=%u ticks=%lld",
+ __entry->id, __entry->prio, __entry->timestamp,
+ __entry->ticks
+ )
+);
+
TRACE_EVENT(adreno_cmdbatch_fault,
TP_PROTO(struct kgsl_drawobj_cmd *cmdobj, unsigned int fault),
TP_ARGS(cmdobj, fault),
diff --git a/drivers/gpu/msm/kgsl_drawobj.c b/drivers/gpu/msm/kgsl_drawobj.c
index 8dc2ebd26cf9..fba18231cb72 100644
--- a/drivers/gpu/msm/kgsl_drawobj.c
+++ b/drivers/gpu/msm/kgsl_drawobj.c
@@ -79,6 +79,7 @@ void kgsl_dump_syncpoints(struct kgsl_device *device,
{
struct kgsl_drawobj_sync_event *event;
unsigned int i;
+ unsigned long flags;
for (i = 0; i < syncobj->numsyncs; i++) {
event = &syncobj->synclist[i];
@@ -101,12 +102,16 @@ void kgsl_dump_syncpoints(struct kgsl_device *device,
break;
}
case KGSL_CMD_SYNCPOINT_TYPE_FENCE:
+ spin_lock_irqsave(&event->handle_lock, flags);
+
if (event->handle)
dev_err(device->dev, " fence: [%pK] %s\n",
event->handle->fence,
event->handle->name);
else
dev_err(device->dev, " fence: invalid\n");
+
+ spin_unlock_irqrestore(&event->handle_lock, flags);
break;
}
}
@@ -119,6 +124,7 @@ static void syncobj_timer(unsigned long data)
struct kgsl_drawobj *drawobj = DRAWOBJ(syncobj);
struct kgsl_drawobj_sync_event *event;
unsigned int i;
+ unsigned long flags;
if (syncobj == NULL || drawobj->context == NULL)
return;
@@ -147,12 +153,16 @@ static void syncobj_timer(unsigned long data)
i, event->context->id, event->timestamp);
break;
case KGSL_CMD_SYNCPOINT_TYPE_FENCE:
+ spin_lock_irqsave(&event->handle_lock, flags);
+
if (event->handle != NULL) {
dev_err(device->dev, " [%d] FENCE %s\n",
i, event->handle->fence ?
event->handle->fence->name : "NULL");
kgsl_sync_fence_log(event->handle->fence);
}
+
+ spin_unlock_irqrestore(&event->handle_lock, flags);
break;
}
}
@@ -231,7 +241,7 @@ static void drawobj_destroy_sparse(struct kgsl_drawobj *drawobj)
static void drawobj_destroy_sync(struct kgsl_drawobj *drawobj)
{
struct kgsl_drawobj_sync *syncobj = SYNCOBJ(drawobj);
- unsigned long pending;
+ unsigned long pending, flags;
unsigned int i;
/* Zap the canary timer */
@@ -262,8 +272,17 @@ static void drawobj_destroy_sync(struct kgsl_drawobj *drawobj)
drawobj_sync_func, event);
break;
case KGSL_CMD_SYNCPOINT_TYPE_FENCE:
- if (kgsl_sync_fence_async_cancel(event->handle))
+ spin_lock_irqsave(&event->handle_lock, flags);
+
+ if (kgsl_sync_fence_async_cancel(event->handle)) {
+ event->handle = NULL;
+ spin_unlock_irqrestore(
+ &event->handle_lock, flags);
drawobj_put(drawobj);
+ } else {
+ spin_unlock_irqrestore(
+ &event->handle_lock, flags);
+ }
break;
}
}
@@ -325,12 +344,23 @@ EXPORT_SYMBOL(kgsl_drawobj_destroy);
static void drawobj_sync_fence_func(void *priv)
{
+ unsigned long flags;
struct kgsl_drawobj_sync_event *event = priv;
+ drawobj_sync_expire(event->device, event);
+
trace_syncpoint_fence_expire(event->syncobj,
event->handle ? event->handle->name : "unknown");
- drawobj_sync_expire(event->device, event);
+ spin_lock_irqsave(&event->handle_lock, flags);
+
+ /*
+ * Setting the event->handle to NULL here make sure that
+ * other function does not dereference a invalid pointer.
+ */
+ event->handle = NULL;
+
+ spin_unlock_irqrestore(&event->handle_lock, flags);
drawobj_put(&event->syncobj->base);
}
@@ -350,6 +380,7 @@ static int drawobj_add_sync_fence(struct kgsl_device *device,
struct kgsl_drawobj_sync_event *event;
struct sync_fence *fence = NULL;
unsigned int id;
+ unsigned long flags;
int ret = 0;
fence = sync_fence_fdget(sync->fd);
@@ -368,18 +399,23 @@ static int drawobj_add_sync_fence(struct kgsl_device *device,
event->device = device;
event->context = NULL;
+ spin_lock_init(&event->handle_lock);
set_bit(event->id, &syncobj->pending);
trace_syncpoint_fence(syncobj, fence->name);
+ spin_lock_irqsave(&event->handle_lock, flags);
+
event->handle = kgsl_sync_fence_async_wait(sync->fd,
drawobj_sync_fence_func, event);
if (IS_ERR_OR_NULL(event->handle)) {
ret = PTR_ERR(event->handle);
- clear_bit(event->id, &syncobj->pending);
event->handle = NULL;
+ spin_unlock_irqrestore(&event->handle_lock, flags);
+
+ clear_bit(event->id, &syncobj->pending);
drawobj_put(drawobj);
@@ -390,6 +426,8 @@ static int drawobj_add_sync_fence(struct kgsl_device *device,
*/
trace_syncpoint_fence_expire(syncobj, (ret < 0) ?
"error" : fence->name);
+ } else {
+ spin_unlock_irqrestore(&event->handle_lock, flags);
}
sync_fence_put(fence);
diff --git a/drivers/gpu/msm/kgsl_drawobj.h b/drivers/gpu/msm/kgsl_drawobj.h
index fd9d2bc93f41..b208870e4c42 100644
--- a/drivers/gpu/msm/kgsl_drawobj.h
+++ b/drivers/gpu/msm/kgsl_drawobj.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -114,6 +114,7 @@ struct kgsl_drawobj_sync {
* register this event
* @timestamp: Pending timestamp for the event
* @handle: Pointer to a sync fence handle
+ * @handle_lock: Spin lock to protect handle
* @device: Pointer to the KGSL device
*/
struct kgsl_drawobj_sync_event {
@@ -123,6 +124,7 @@ struct kgsl_drawobj_sync_event {
struct kgsl_context *context;
unsigned int timestamp;
struct kgsl_sync_fence_waiter *handle;
+ spinlock_t handle_lock;
struct kgsl_device *device;
};
diff --git a/drivers/gpu/msm/kgsl_pool.c b/drivers/gpu/msm/kgsl_pool.c
index c31a85b07447..685ce3ea968b 100644
--- a/drivers/gpu/msm/kgsl_pool.c
+++ b/drivers/gpu/msm/kgsl_pool.c
@@ -65,26 +65,19 @@ _kgsl_get_pool_from_order(unsigned int order)
/* Map the page into kernel and zero it out */
static void
-_kgsl_pool_zero_page(struct page *p, unsigned int pool_order)
+_kgsl_pool_zero_page(struct page *p)
{
- int i;
-
- for (i = 0; i < (1 << pool_order); i++) {
- struct page *page = nth_page(p, i);
- void *addr = kmap_atomic(page);
+ void *addr = kmap_atomic(p);
- memset(addr, 0, PAGE_SIZE);
- dmac_flush_range(addr, addr + PAGE_SIZE);
- kunmap_atomic(addr);
- }
+ memset(addr, 0, PAGE_SIZE);
+ dmac_flush_range(addr, addr + PAGE_SIZE);
+ kunmap_atomic(addr);
}
/* Add a page to specified pool */
static void
_kgsl_pool_add_page(struct kgsl_page_pool *pool, struct page *p)
{
- _kgsl_pool_zero_page(p, pool->pool_order);
-
spin_lock(&pool->list_lock);
list_add_tail(&p->lru, &pool->page_list);
pool->page_count++;
@@ -329,7 +322,6 @@ int kgsl_pool_alloc_page(int *page_size, struct page **pages,
} else
return -ENOMEM;
}
- _kgsl_pool_zero_page(page, order);
goto done;
}
@@ -349,7 +341,6 @@ int kgsl_pool_alloc_page(int *page_size, struct page **pages,
page = alloc_pages(gfp_mask, order);
if (page == NULL)
return -ENOMEM;
- _kgsl_pool_zero_page(page, order);
goto done;
}
}
@@ -379,13 +370,12 @@ int kgsl_pool_alloc_page(int *page_size, struct page **pages,
} else
return -ENOMEM;
}
-
- _kgsl_pool_zero_page(page, order);
}
done:
for (j = 0; j < (*page_size >> PAGE_SHIFT); j++) {
p = nth_page(page, j);
+ _kgsl_pool_zero_page(p);
pages[pcount] = p;
pcount++;
}
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c
index ad8b0131bb46..8c998a5d791b 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.c
+++ b/drivers/gpu/msm/kgsl_pwrctrl.c
@@ -43,13 +43,6 @@
#define DEFAULT_BUS_P 25
-/*
- * The effective duration of qos request in usecs. After
- * timeout, qos request is cancelled automatically.
- * Kept 80ms default, inline with default GPU idle time.
- */
-#define KGSL_L2PC_CPU_TIMEOUT (80 * 1000)
-
/* Order deeply matters here because reasons. New entries go on the end */
static const char * const clocks[] = {
"src_clk",
@@ -520,12 +513,14 @@ EXPORT_SYMBOL(kgsl_pwrctrl_set_constraint);
/**
* kgsl_pwrctrl_update_l2pc() - Update existing qos request
* @device: Pointer to the kgsl_device struct
+ * @timeout_us: the effective duration of qos request in usecs.
*
* Updates an existing qos request to avoid L2PC on the
* CPUs (which are selected through dtsi) on which GPU
* thread is running. This would help for performance.
*/
-void kgsl_pwrctrl_update_l2pc(struct kgsl_device *device)
+void kgsl_pwrctrl_update_l2pc(struct kgsl_device *device,
+ unsigned long timeout_us)
{
int cpu;
@@ -539,7 +534,7 @@ void kgsl_pwrctrl_update_l2pc(struct kgsl_device *device)
pm_qos_update_request_timeout(
&device->pwrctrl.l2pc_cpus_qos,
device->pwrctrl.pm_qos_cpu_mask_latency,
- KGSL_L2PC_CPU_TIMEOUT);
+ timeout_us);
}
}
EXPORT_SYMBOL(kgsl_pwrctrl_update_l2pc);
@@ -2201,6 +2196,10 @@ int kgsl_pwrctrl_init(struct kgsl_device *device)
kgsl_property_read_u32(device, "qcom,l2pc-cpu-mask",
&pwr->l2pc_cpus_mask);
+ pwr->l2pc_update_queue = of_property_read_bool(
+ device->pdev->dev.of_node,
+ "qcom,l2pc-update-queue");
+
pm_runtime_enable(&pdev->dev);
ocmem_bus_node = of_find_node_by_name(
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.h b/drivers/gpu/msm/kgsl_pwrctrl.h
index 42f918b80fcd..02707c901839 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.h
+++ b/drivers/gpu/msm/kgsl_pwrctrl.h
@@ -51,6 +51,19 @@
#define KGSL_PWR_DEL_LIMIT 1
#define KGSL_PWR_SET_LIMIT 2
+/*
+ * The effective duration of qos request in usecs at queue time.
+ * After timeout, qos request is cancelled automatically.
+ * Kept 80ms default, inline with default GPU idle time.
+ */
+#define KGSL_L2PC_QUEUE_TIMEOUT (80 * 1000)
+
+/*
+ * The effective duration of qos request in usecs at wakeup time.
+ * After timeout, qos request is cancelled automatically.
+ */
+#define KGSL_L2PC_WAKEUP_TIMEOUT (10 * 1000)
+
enum kgsl_pwrctrl_timer_type {
KGSL_PWR_IDLE_TIMER,
};
@@ -128,10 +141,12 @@ struct kgsl_regulator {
* @irq_name - resource name for the IRQ
* @clk_stats - structure of clock statistics
* @l2pc_cpus_mask - mask to avoid L2PC on masked CPUs
+ * @l2pc_update_queue - Boolean flag to avoid L2PC on masked CPUs at queue time
* @l2pc_cpus_qos - qos structure to avoid L2PC on CPUs
* @pm_qos_req_dma - the power management quality of service structure
* @pm_qos_active_latency - allowed CPU latency in microseconds when active
* @pm_qos_cpu_mask_latency - allowed CPU mask latency in microseconds
+ * @input_disable - To disable GPU wakeup on touch input event
* @pm_qos_wakeup_latency - allowed CPU latency in microseconds during wakeup
* @bus_control - true if the bus calculation is independent
* @bus_mod - modifier from the current power level for the bus vote
@@ -183,11 +198,13 @@ struct kgsl_pwrctrl {
const char *irq_name;
struct kgsl_clk_stats clk_stats;
unsigned int l2pc_cpus_mask;
+ bool l2pc_update_queue;
struct pm_qos_request l2pc_cpus_qos;
struct pm_qos_request pm_qos_req_dma;
unsigned int pm_qos_active_latency;
unsigned int pm_qos_cpu_mask_latency;
unsigned int pm_qos_wakeup_latency;
+ bool input_disable;
bool bus_control;
int bus_mod;
unsigned int bus_percent_ab;
@@ -249,5 +266,6 @@ int kgsl_active_count_wait(struct kgsl_device *device, int count);
void kgsl_pwrctrl_busy_time(struct kgsl_device *device, u64 time, u64 busy);
void kgsl_pwrctrl_set_constraint(struct kgsl_device *device,
struct kgsl_pwr_constraint *pwrc, uint32_t id);
-void kgsl_pwrctrl_update_l2pc(struct kgsl_device *device);
+void kgsl_pwrctrl_update_l2pc(struct kgsl_device *device,
+ unsigned long timeout_us);
#endif /* __KGSL_PWRCTRL_H */
diff --git a/drivers/gpu/msm/kgsl_sync.c b/drivers/gpu/msm/kgsl_sync.c
index 358b3b038899..4bf591c236a7 100644
--- a/drivers/gpu/msm/kgsl_sync.c
+++ b/drivers/gpu/msm/kgsl_sync.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -179,7 +179,7 @@ int kgsl_add_fence_event(struct kgsl_device *device,
goto out;
}
snprintf(fence_name, sizeof(fence_name),
- "%s-pid-%d-ctx-%d-ts-%d",
+ "%s-pid-%d-ctx-%d-ts-%u",
device->name, current->group_leader->pid,
context_id, timestamp);
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 34ea83d067af..13dc2731195b 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -2428,6 +2428,7 @@ static const struct hid_device_id hid_ignore_list[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0002) },
{ HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0003) },
{ HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0004) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PETZL, USB_DEVICE_ID_PETZL_HEADLAMP) },
{ HID_USB_DEVICE(USB_VENDOR_ID_PHILIPS, USB_DEVICE_ID_PHILIPS_IEEE802154_DONGLE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_POWERCOM, USB_DEVICE_ID_POWERCOM_UPS) },
#if defined(CONFIG_MOUSE_SYNAPTICS_USB) || defined(CONFIG_MOUSE_SYNAPTICS_USB_MODULE)
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index c7f8b70d15ee..37cbc2ecfc5f 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -777,6 +777,9 @@
#define USB_VENDOR_ID_PETALYNX 0x18b1
#define USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE 0x0037
+#define USB_VENDOR_ID_PETZL 0x2122
+#define USB_DEVICE_ID_PETZL_HEADLAMP 0x1234
+
#define USB_VENDOR_ID_PHILIPS 0x0471
#define USB_DEVICE_ID_PHILIPS_IEEE802154_DONGLE 0x0617
diff --git a/drivers/iio/adc/qcom-rradc.c b/drivers/iio/adc/qcom-rradc.c
index 5c20970ccbbb..28ab4e52dab5 100644
--- a/drivers/iio/adc/qcom-rradc.c
+++ b/drivers/iio/adc/qcom-rradc.c
@@ -180,6 +180,9 @@
#define FG_ADC_RR_VOLT_INPUT_FACTOR 8
#define FG_ADC_RR_CURR_INPUT_FACTOR 2000
#define FG_ADC_RR_CURR_USBIN_INPUT_FACTOR_MIL 1886
+#define FG_ADC_RR_CURR_USBIN_660_FACTOR_MIL 9
+#define FG_ADC_RR_CURR_USBIN_660_UV_VAL 579500
+
#define FG_ADC_SCALE_MILLI_FACTOR 1000
#define FG_ADC_KELVINMIL_CELSIUSMIL 273150
@@ -192,6 +195,9 @@
#define FG_RR_CONV_CONTINUOUS_TIME_MIN_US 50000
#define FG_RR_CONV_CONTINUOUS_TIME_MAX_US 51000
#define FG_RR_CONV_MAX_RETRY_CNT 50
+#define FG_RR_TP_REV_VERSION1 21
+#define FG_RR_TP_REV_VERSION2 29
+#define FG_RR_TP_REV_VERSION3 32
/*
* The channel number is not a physical index in hardware,
@@ -228,6 +234,7 @@ struct rradc_chip {
struct rradc_chan_prop *chan_props;
struct device_node *revid_dev_node;
struct pmic_revid_data *pmic_fab_id;
+ int volt;
};
struct rradc_channels {
@@ -353,7 +360,7 @@ static int rradc_post_process_volt(struct rradc_chip *chip,
return 0;
}
-static int rradc_post_process_curr(struct rradc_chip *chip,
+static int rradc_post_process_usbin_curr(struct rradc_chip *chip,
struct rradc_chan_prop *prop, u16 adc_code,
int *result_ua)
{
@@ -361,11 +368,33 @@ static int rradc_post_process_curr(struct rradc_chip *chip,
if (!prop)
return -EINVAL;
-
- if (prop->channel == RR_ADC_USBIN_I)
- scale = FG_ADC_RR_CURR_USBIN_INPUT_FACTOR_MIL;
- else
- scale = FG_ADC_RR_CURR_INPUT_FACTOR;
+ if (chip->revid_dev_node) {
+ switch (chip->pmic_fab_id->pmic_subtype) {
+ case PM660_SUBTYPE:
+ if (((chip->pmic_fab_id->tp_rev
+ >= FG_RR_TP_REV_VERSION1)
+ && (chip->pmic_fab_id->tp_rev
+ <= FG_RR_TP_REV_VERSION2))
+ || (chip->pmic_fab_id->tp_rev
+ >= FG_RR_TP_REV_VERSION3)) {
+ chip->volt = div64_s64(chip->volt, 1000);
+ chip->volt = chip->volt *
+ FG_ADC_RR_CURR_USBIN_660_FACTOR_MIL;
+ chip->volt = FG_ADC_RR_CURR_USBIN_660_UV_VAL -
+ (chip->volt);
+ chip->volt = div64_s64(1000000000, chip->volt);
+ scale = chip->volt;
+ } else
+ scale = FG_ADC_RR_CURR_USBIN_INPUT_FACTOR_MIL;
+ break;
+ case PMI8998_SUBTYPE:
+ scale = FG_ADC_RR_CURR_USBIN_INPUT_FACTOR_MIL;
+ break;
+ default:
+ pr_err("No PMIC subtype found\n");
+ return -EINVAL;
+ }
+ }
/* scale * V/A; 2.5V ADC full scale */
ua = ((int64_t)adc_code * scale);
@@ -376,6 +405,24 @@ static int rradc_post_process_curr(struct rradc_chip *chip,
return 0;
}
+static int rradc_post_process_dcin_curr(struct rradc_chip *chip,
+ struct rradc_chan_prop *prop, u16 adc_code,
+ int *result_ua)
+{
+ int64_t ua = 0;
+
+ if (!prop)
+ return -EINVAL;
+
+ /* 0.5 V/A; 2.5V ADC full scale */
+ ua = ((int64_t)adc_code * FG_ADC_RR_CURR_INPUT_FACTOR);
+ ua *= (FG_ADC_RR_FS_VOLTAGE_MV * FG_ADC_SCALE_MILLI_FACTOR);
+ ua = div64_s64(ua, (FG_MAX_ADC_READINGS * 1000));
+ *result_ua = ua;
+
+ return 0;
+}
+
static int rradc_post_process_die_temp(struct rradc_chip *chip,
struct rradc_chan_prop *prop, u16 adc_code,
int *result_millidegc)
@@ -591,13 +638,13 @@ static const struct rradc_channels rradc_chans[] = {
BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_PROCESSED),
FG_ADC_RR_SKIN_TEMP_LSB, FG_ADC_RR_SKIN_TEMP_MSB,
FG_ADC_RR_AUX_THERM_STS)
- RR_ADC_CHAN_CURRENT("usbin_i", &rradc_post_process_curr,
+ RR_ADC_CHAN_CURRENT("usbin_i", &rradc_post_process_usbin_curr,
FG_ADC_RR_USB_IN_I_LSB, FG_ADC_RR_USB_IN_I_MSB,
FG_ADC_RR_USB_IN_I_STS)
RR_ADC_CHAN_VOLT("usbin_v", &rradc_post_process_volt,
FG_ADC_RR_USB_IN_V_LSB, FG_ADC_RR_USB_IN_V_MSB,
FG_ADC_RR_USB_IN_V_STS)
- RR_ADC_CHAN_CURRENT("dcin_i", &rradc_post_process_curr,
+ RR_ADC_CHAN_CURRENT("dcin_i", &rradc_post_process_dcin_curr,
FG_ADC_RR_DC_IN_I_LSB, FG_ADC_RR_DC_IN_I_MSB,
FG_ADC_RR_DC_IN_I_STS)
RR_ADC_CHAN_VOLT("dcin_v", &rradc_post_process_volt,
@@ -955,6 +1002,21 @@ static int rradc_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_PROCESSED:
+ if (((chip->pmic_fab_id->tp_rev
+ >= FG_RR_TP_REV_VERSION1)
+ && (chip->pmic_fab_id->tp_rev
+ <= FG_RR_TP_REV_VERSION2))
+ || (chip->pmic_fab_id->tp_rev
+ >= FG_RR_TP_REV_VERSION3)) {
+ if (chan->address == RR_ADC_USBIN_I) {
+ prop = &chip->chan_props[RR_ADC_USBIN_V];
+ rc = rradc_do_conversion(chip, prop, &adc_code);
+ if (rc)
+ break;
+ prop->scale(chip, prop, adc_code, &chip->volt);
+ }
+ }
+
prop = &chip->chan_props[chan->address];
rc = rradc_do_conversion(chip, prop, &adc_code);
if (rc)
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 3f5741a3e728..43d5166db4c6 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -857,6 +857,8 @@ int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr,
} else
ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr,
qp_attr_mask);
+ qp_attr->port_num = id_priv->id.port_num;
+ *qp_attr_mask |= IB_QP_PORT;
} else
ret = -ENOSYS;
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 1c02deab068f..b7a73f1a8beb 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -2287,6 +2287,11 @@ ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file,
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
+ if ((cmd.attr_mask & IB_QP_PORT) &&
+ (cmd.port_num < rdma_start_port(ib_dev) ||
+ cmd.port_num > rdma_end_port(ib_dev)))
+ return -EINVAL;
+
INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd,
out_len);
@@ -2827,6 +2832,10 @@ ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file,
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
+ if (cmd.attr.port_num < rdma_start_port(ib_dev) ||
+ cmd.attr.port_num > rdma_end_port(ib_dev))
+ return -EINVAL;
+
uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
if (!uobj)
return -ENOMEM;
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index 89abfdb539ac..c84c685056b9 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -434,8 +434,10 @@ static int i8042_start(struct serio *serio)
{
struct i8042_port *port = serio->port_data;
+ spin_lock_irq(&i8042_lock);
port->exists = true;
- mb();
+ spin_unlock_irq(&i8042_lock);
+
return 0;
}
@@ -448,16 +450,20 @@ static void i8042_stop(struct serio *serio)
{
struct i8042_port *port = serio->port_data;
+ spin_lock_irq(&i8042_lock);
port->exists = false;
+ port->serio = NULL;
+ spin_unlock_irq(&i8042_lock);
/*
+ * We need to make sure that interrupt handler finishes using
+ * our serio port before we return from this function.
* We synchronize with both AUX and KBD IRQs because there is
* a (very unlikely) chance that AUX IRQ is raised for KBD port
* and vice versa.
*/
synchronize_irq(I8042_AUX_IRQ);
synchronize_irq(I8042_KBD_IRQ);
- port->serio = NULL;
}
/*
@@ -574,7 +580,7 @@ static irqreturn_t i8042_interrupt(int irq, void *dev_id)
spin_unlock_irqrestore(&i8042_lock, flags);
- if (likely(port->exists && !filtered))
+ if (likely(serio && !filtered))
serio_interrupt(serio, data, dfl);
out:
diff --git a/drivers/input/touchscreen/st/fts.c b/drivers/input/touchscreen/st/fts.c
index 78bdd24af28b..08bfb83a9447 100644
--- a/drivers/input/touchscreen/st/fts.c
+++ b/drivers/input/touchscreen/st/fts.c
@@ -1003,7 +1003,10 @@ static unsigned char *fts_status_event_handler(
case FTS_WATER_MODE_ON:
case FTS_WATER_MODE_OFF:
default:
- logError(1, "%s %s Received unhandled status event = %02X %02X %02X %02X %02X %02X %02X %02X\n", tag, __func__, event[0], event[1], event[2], event[3], event[4], event[5], event[6], event[7]);
+ logError(0,
+ "%s %s Received unhandled status event = %02X %02X %02X %02X %02X %02X %02X %02X\n",
+ tag, __func__, event[0], event[1], event[2],
+ event[3], event[4], event[5], event[6], event[7]);
break;
}
@@ -1755,8 +1758,6 @@ static int fts_fb_state_chg_callback(struct notifier_block *nb, unsigned long va
info->resume_bit = 1;
- fts_system_reset();
-
fts_mode_handler(info, 0);
info->sensor_sleep = false;
@@ -1959,9 +1960,9 @@ static int parse_dt(struct device *dev, struct fts_i2c_platform_data *bdata)
bdata->bus_reg_name = name;
logError(0, "%s bus_reg_name = %s\n", tag, name);
- if (of_property_read_bool(np, "st, reset-gpio")) {
+ if (of_property_read_bool(np, "st,reset-gpio")) {
bdata->reset_gpio = of_get_named_gpio_flags(np,
- "st, reset-gpio", 0, NULL);
+ "st,reset-gpio", 0, NULL);
logError(0, "%s reset_gpio =%d\n", tag, bdata->reset_gpio);
} else {
bdata->reset_gpio = GPIO_NOT_DEFINED;
@@ -2210,7 +2211,13 @@ static int fts_probe(struct i2c_client *client,
}
#endif
- queue_delayed_work(info->fwu_workqueue, &info->fwu_work, msecs_to_jiffies(EXP_FN_WORK_DELAY_MS));
+ /*if wanna auto-update FW when probe,
+ * please don't comment the following code
+ */
+
+ /* queue_delayed_work(info->fwu_workqueue, &info->fwu_work,
+ * msecs_to_jiffies(EXP_FN_WORK_DELAY_MS));
+ */
logError(1, "%s Probe Finished!\n", tag);
return OK;
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index c6f74b149706..ce18a512b76a 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -1727,7 +1727,8 @@ static void arm_smmu_pgtbl_unlock(struct arm_smmu_domain *smmu_domain,
static int arm_smmu_restore_sec_cfg(struct arm_smmu_device *smmu)
{
- int ret, scm_ret;
+ int ret;
+ u64 scm_ret;
if (!arm_smmu_is_static_cb(smmu))
return 0;
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 2e0f61a2dc3f..9e96d81bc5cd 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -793,6 +793,9 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
int enabled;
u64 val;
+ if (cpu >= nr_cpu_ids)
+ return -EINVAL;
+
if (gic_irq_in_rdist(d))
return -EINVAL;
diff --git a/drivers/irqchip/irq-keystone.c b/drivers/irqchip/irq-keystone.c
index deb89d63a728..e684be1bb7c0 100644
--- a/drivers/irqchip/irq-keystone.c
+++ b/drivers/irqchip/irq-keystone.c
@@ -19,9 +19,9 @@
#include <linux/bitops.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
+#include <linux/interrupt.h>
#include <linux/irqdomain.h>
#include <linux/irqchip.h>
-#include <linux/irqchip/chained_irq.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/mfd/syscon.h>
@@ -39,6 +39,7 @@ struct keystone_irq_device {
struct irq_domain *irqd;
struct regmap *devctrl_regs;
u32 devctrl_offset;
+ raw_spinlock_t wa_lock;
};
static inline u32 keystone_irq_readl(struct keystone_irq_device *kirq)
@@ -83,17 +84,15 @@ static void keystone_irq_ack(struct irq_data *d)
/* nothing to do here */
}
-static void keystone_irq_handler(struct irq_desc *desc)
+static irqreturn_t keystone_irq_handler(int irq, void *keystone_irq)
{
- unsigned int irq = irq_desc_get_irq(desc);
- struct keystone_irq_device *kirq = irq_desc_get_handler_data(desc);
+ struct keystone_irq_device *kirq = keystone_irq;
+ unsigned long wa_lock_flags;
unsigned long pending;
int src, virq;
dev_dbg(kirq->dev, "start irq %d\n", irq);
- chained_irq_enter(irq_desc_get_chip(desc), desc);
-
pending = keystone_irq_readl(kirq);
keystone_irq_writel(kirq, pending);
@@ -111,13 +110,15 @@ static void keystone_irq_handler(struct irq_desc *desc)
if (!virq)
dev_warn(kirq->dev, "sporious irq detected hwirq %d, virq %d\n",
src, virq);
+ raw_spin_lock_irqsave(&kirq->wa_lock, wa_lock_flags);
generic_handle_irq(virq);
+ raw_spin_unlock_irqrestore(&kirq->wa_lock,
+ wa_lock_flags);
}
}
- chained_irq_exit(irq_desc_get_chip(desc), desc);
-
dev_dbg(kirq->dev, "end irq %d\n", irq);
+ return IRQ_HANDLED;
}
static int keystone_irq_map(struct irq_domain *h, unsigned int virq,
@@ -182,9 +183,16 @@ static int keystone_irq_probe(struct platform_device *pdev)
return -ENODEV;
}
+ raw_spin_lock_init(&kirq->wa_lock);
+
platform_set_drvdata(pdev, kirq);
- irq_set_chained_handler_and_data(kirq->irq, keystone_irq_handler, kirq);
+ ret = request_irq(kirq->irq, keystone_irq_handler,
+ 0, dev_name(dev), kirq);
+ if (ret) {
+ irq_domain_remove(kirq->irqd);
+ return ret;
+ }
/* clear all source bits */
keystone_irq_writel(kirq, ~0x0);
@@ -199,6 +207,8 @@ static int keystone_irq_remove(struct platform_device *pdev)
struct keystone_irq_device *kirq = platform_get_drvdata(pdev);
int hwirq;
+ free_irq(kirq->irq, kirq);
+
for (hwirq = 0; hwirq < KEYSTONE_N_IRQ; hwirq++)
irq_dispose_mapping(irq_find_mapping(kirq->irqd, hwirq));
diff --git a/drivers/irqchip/irq-mxs.c b/drivers/irqchip/irq-mxs.c
index 17304705f2cf..05fa9f7af53c 100644
--- a/drivers/irqchip/irq-mxs.c
+++ b/drivers/irqchip/irq-mxs.c
@@ -131,12 +131,16 @@ static struct irq_chip mxs_icoll_chip = {
.irq_ack = icoll_ack_irq,
.irq_mask = icoll_mask_irq,
.irq_unmask = icoll_unmask_irq,
+ .flags = IRQCHIP_MASK_ON_SUSPEND |
+ IRQCHIP_SKIP_SET_WAKE,
};
static struct irq_chip asm9260_icoll_chip = {
.irq_ack = icoll_ack_irq,
.irq_mask = asm9260_mask_irq,
.irq_unmask = asm9260_unmask_irq,
+ .flags = IRQCHIP_MASK_ON_SUSPEND |
+ IRQCHIP_SKIP_SET_WAKE,
};
asmlinkage void __exception_irq_entry icoll_handle_irq(struct pt_regs *regs)
diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
index 9b856e1890d1..e4c43a17b333 100644
--- a/drivers/isdn/i4l/isdn_common.c
+++ b/drivers/isdn/i4l/isdn_common.c
@@ -1379,6 +1379,7 @@ isdn_ioctl(struct file *file, uint cmd, ulong arg)
if (arg) {
if (copy_from_user(bname, argp, sizeof(bname) - 1))
return -EFAULT;
+ bname[sizeof(bname)-1] = 0;
} else
return -EINVAL;
ret = mutex_lock_interruptible(&dev->mtx);
diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c
index aa5dd5668528..dbad5c431bcb 100644
--- a/drivers/isdn/i4l/isdn_net.c
+++ b/drivers/isdn/i4l/isdn_net.c
@@ -2611,10 +2611,9 @@ isdn_net_newslave(char *parm)
char newname[10];
if (p) {
- /* Slave-Name MUST not be empty */
- if (!strlen(p + 1))
+ /* Slave-Name MUST not be empty or overflow 'newname' */
+ if (strscpy(newname, p + 1, sizeof(newname)) <= 0)
return NULL;
- strcpy(newname, p + 1);
*p = 0;
/* Master must already exist */
if (!(n = isdn_net_findif(parm)))
diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c
index 9c1e8adaf4fc..bf3fbd00a091 100644
--- a/drivers/isdn/i4l/isdn_ppp.c
+++ b/drivers/isdn/i4l/isdn_ppp.c
@@ -2364,7 +2364,7 @@ static struct ippp_ccp_reset_state *isdn_ppp_ccp_reset_alloc_state(struct ippp_s
id);
return NULL;
} else {
- rs = kzalloc(sizeof(struct ippp_ccp_reset_state), GFP_KERNEL);
+ rs = kzalloc(sizeof(struct ippp_ccp_reset_state), GFP_ATOMIC);
if (!rs)
return NULL;
rs->state = CCPResetIdle;
diff --git a/drivers/leds/leds-qpnp-flash-v2.c b/drivers/leds/leds-qpnp-flash-v2.c
index 564d20079715..15c931bbbf65 100644
--- a/drivers/leds/leds-qpnp-flash-v2.c
+++ b/drivers/leds/leds-qpnp-flash-v2.c
@@ -158,6 +158,11 @@
#define FLASH_LED_DISABLE 0x00
#define FLASH_LED_SAFETY_TMR_DISABLED 0x13
#define FLASH_LED_MAX_TOTAL_CURRENT_MA 3750
+#define FLASH_LED_IRES5P0_MAX_CURR_MA 640
+#define FLASH_LED_IRES7P5_MAX_CURR_MA 960
+#define FLASH_LED_IRES10P0_MAX_CURR_MA 1280
+#define FLASH_LED_IRES12P5_MAX_CURR_MA 1600
+#define MAX_IRES_LEVELS 4
/* notifier call chain for flash-led irqs */
static ATOMIC_NOTIFIER_HEAD(irq_notifier_list);
@@ -196,13 +201,15 @@ struct flash_node_data {
struct pinctrl_state *hw_strobe_state_suspend;
int hw_strobe_gpio;
int ires_ua;
+ int default_ires_ua;
int max_current;
int current_ma;
int prev_current_ma;
u8 duration;
u8 id;
u8 type;
- u8 ires;
+ u8 ires_idx;
+ u8 default_ires_idx;
u8 hdrm_val;
u8 current_reg_val;
u8 strobe_ctrl;
@@ -305,6 +312,11 @@ static int otst3_threshold_table[] = {
125, 119, 113, 107, 149, 143, 137, 131,
};
+static int max_ires_curr_ma_table[MAX_IRES_LEVELS] = {
+ FLASH_LED_IRES12P5_MAX_CURR_MA, FLASH_LED_IRES10P0_MAX_CURR_MA,
+ FLASH_LED_IRES7P5_MAX_CURR_MA, FLASH_LED_IRES5P0_MAX_CURR_MA
+};
+
static int qpnp_flash_led_read(struct qpnp_flash_led *led, u16 addr, u8 *data)
{
int rc;
@@ -935,6 +947,7 @@ static void qpnp_flash_led_aggregate_max_current(struct flash_node_data *fnode)
static void qpnp_flash_led_node_set(struct flash_node_data *fnode, int value)
{
+ int i = 0;
int prgm_current_ma = value;
int min_ma = fnode->ires_ua / 1000;
struct qpnp_flash_led *led = dev_get_drvdata(&fnode->pdev->dev);
@@ -944,7 +957,22 @@ static void qpnp_flash_led_node_set(struct flash_node_data *fnode, int value)
else if (value < min_ma)
prgm_current_ma = min_ma;
+ fnode->ires_idx = fnode->default_ires_idx;
+ fnode->ires_ua = fnode->default_ires_ua;
+
prgm_current_ma = min(prgm_current_ma, fnode->max_current);
+ if (prgm_current_ma > max_ires_curr_ma_table[fnode->ires_idx]) {
+ /* find the matching ires */
+ for (i = MAX_IRES_LEVELS - 1; i >= 0; i--) {
+ if (prgm_current_ma <= max_ires_curr_ma_table[i]) {
+ fnode->ires_idx = i;
+ fnode->ires_ua = FLASH_LED_IRES_MIN_UA +
+ (FLASH_LED_IRES_BASE - fnode->ires_idx) *
+ FLASH_LED_IRES_DIVISOR;
+ break;
+ }
+ }
+ }
fnode->current_ma = prgm_current_ma;
fnode->cdev.brightness = prgm_current_ma;
fnode->current_reg_val = CURRENT_MA_TO_REG_VAL(prgm_current_ma,
@@ -1062,7 +1090,7 @@ static int qpnp_flash_led_switch_set(struct flash_switch_data *snode, bool on)
val = 0;
for (i = 0; i < led->num_fnodes; i++)
if (snode->led_mask & BIT(led->fnode[i].id))
- val |= led->fnode[i].ires << (led->fnode[i].id * 2);
+ val |= led->fnode[i].ires_idx << (led->fnode[i].id * 2);
rc = qpnp_flash_led_masked_write(led, FLASH_LED_REG_IRES(led->base),
FLASH_LED_CURRENT_MASK, val);
@@ -1434,13 +1462,14 @@ static int qpnp_flash_led_parse_each_led_dt(struct qpnp_flash_led *led,
return rc;
}
- fnode->ires_ua = FLASH_LED_IRES_DEFAULT_UA;
- fnode->ires = FLASH_LED_IRES_DEFAULT_VAL;
+ fnode->default_ires_ua = fnode->ires_ua = FLASH_LED_IRES_DEFAULT_UA;
+ fnode->default_ires_idx = fnode->ires_idx = FLASH_LED_IRES_DEFAULT_VAL;
rc = of_property_read_u32(node, "qcom,ires-ua", &val);
if (!rc) {
- fnode->ires_ua = val;
- fnode->ires = FLASH_LED_IRES_BASE -
- (val - FLASH_LED_IRES_MIN_UA) / FLASH_LED_IRES_DIVISOR;
+ fnode->default_ires_ua = fnode->ires_ua = val;
+ fnode->default_ires_idx = fnode->ires_idx =
+ FLASH_LED_IRES_BASE - (val - FLASH_LED_IRES_MIN_UA) /
+ FLASH_LED_IRES_DIVISOR;
} else if (rc != -EINVAL) {
pr_err("Unable to read current resolution rc=%d\n", rc);
return rc;
diff --git a/drivers/leds/leds-qpnp-wled.c b/drivers/leds/leds-qpnp-wled.c
index 950244f1e4e8..bfa7d29701da 100644
--- a/drivers/leds/leds-qpnp-wled.c
+++ b/drivers/leds/leds-qpnp-wled.c
@@ -160,18 +160,19 @@
#define QPNP_WLED_MOD_EN_SHFT 7
#define QPNP_WLED_MOD_EN 1
#define QPNP_WLED_GATE_DRV_MASK 0xFE
-#define QPNP_WLED_SYNC_DLY_MASK 0xF8
+#define QPNP_WLED_SYNC_DLY_MASK GENMASK(2, 0)
#define QPNP_WLED_SYNC_DLY_MIN_US 0
#define QPNP_WLED_SYNC_DLY_MAX_US 1400
#define QPNP_WLED_SYNC_DLY_STEP_US 200
#define QPNP_WLED_DEF_SYNC_DLY_US 400
-#define QPNP_WLED_FS_CURR_MASK 0xF0
+#define QPNP_WLED_FS_CURR_MASK GENMASK(3, 0)
#define QPNP_WLED_FS_CURR_MIN_UA 0
#define QPNP_WLED_FS_CURR_MAX_UA 30000
#define QPNP_WLED_FS_CURR_STEP_UA 2500
-#define QPNP_WLED_CABC_MASK 0x7F
+#define QPNP_WLED_CABC_MASK 0x80
#define QPNP_WLED_CABC_SHIFT 7
#define QPNP_WLED_CURR_SINK_SHIFT 4
+#define QPNP_WLED_CURR_SINK_MASK GENMASK(7, 4)
#define QPNP_WLED_BRIGHT_LSB_MASK 0xFF
#define QPNP_WLED_BRIGHT_MSB_SHIFT 8
#define QPNP_WLED_BRIGHT_MSB_MASK 0x0F
@@ -208,12 +209,14 @@
#define QPNP_WLED_SEC_UNLOCK 0xA5
#define QPNP_WLED_MAX_STRINGS 4
+#define QPNP_PM660_WLED_MAX_STRINGS 3
#define WLED_MAX_LEVEL_4095 4095
#define QPNP_WLED_RAMP_DLY_MS 20
#define QPNP_WLED_TRIGGER_NONE "none"
#define QPNP_WLED_STR_SIZE 20
#define QPNP_WLED_MIN_MSLEEP 20
#define QPNP_WLED_SC_DLY_MS 20
+#define QPNP_WLED_SOFT_START_DLY_US 10000
#define NUM_SUPPORTED_AVDD_VOLTAGES 6
#define QPNP_WLED_DFLT_AVDD_MV 7600
@@ -381,6 +384,8 @@ struct qpnp_wled {
u16 ramp_ms;
u16 ramp_step;
u16 cons_sync_write_delay_us;
+ u16 auto_calibration_ovp_count;
+ u16 max_strings;
u8 strings[QPNP_WLED_MAX_STRINGS];
u8 num_strings;
u8 loop_auto_gm_thresh;
@@ -396,6 +401,9 @@ struct qpnp_wled {
bool en_ext_pfet_sc_pro;
bool prev_state;
bool ovp_irq_disabled;
+ bool auto_calib_enabled;
+ bool auto_calib_done;
+ ktime_t start_ovp_fault_time;
};
/* helper to read a pmic register */
@@ -531,7 +539,7 @@ static int qpnp_wled_set_level(struct qpnp_wled *wled, int level)
u8 reg;
/* set brightness registers */
- for (i = 0; i < wled->num_strings; i++) {
+ for (i = 0; i < wled->max_strings; i++) {
reg = level & QPNP_WLED_BRIGHT_LSB_MASK;
rc = qpnp_wled_write_reg(wled,
QPNP_WLED_BRIGHT_LSB_REG(wled->sink_base,
@@ -600,7 +608,8 @@ static int qpnp_wled_module_en(struct qpnp_wled *wled,
* OVP interrupt disabled when the module is disabled.
*/
if (state) {
- usleep_range(10000, 11000);
+ usleep_range(QPNP_WLED_SOFT_START_DLY_US,
+ QPNP_WLED_SOFT_START_DLY_US + 1000);
rc = qpnp_wled_psm_config(wled, false);
if (rc < 0)
return rc;
@@ -873,32 +882,25 @@ static ssize_t qpnp_wled_fs_curr_ua_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct qpnp_wled *wled = dev_get_drvdata(dev);
- int data, i, rc, temp;
+ int data, i, rc;
u8 reg;
rc = kstrtoint(buf, 10, &data);
if (rc)
return rc;
- for (i = 0; i < wled->num_strings; i++) {
+ for (i = 0; i < wled->max_strings; i++) {
if (data < QPNP_WLED_FS_CURR_MIN_UA)
data = QPNP_WLED_FS_CURR_MIN_UA;
else if (data > QPNP_WLED_FS_CURR_MAX_UA)
data = QPNP_WLED_FS_CURR_MAX_UA;
- rc = qpnp_wled_read_reg(wled,
- QPNP_WLED_FS_CURR_REG(wled->sink_base,
- wled->strings[i]), &reg);
+ reg = data / QPNP_WLED_FS_CURR_STEP_UA;
+ rc = qpnp_wled_masked_write_reg(wled,
+ QPNP_WLED_FS_CURR_REG(wled->sink_base, i),
+ QPNP_WLED_FS_CURR_MASK, reg);
if (rc < 0)
return rc;
- reg &= QPNP_WLED_FS_CURR_MASK;
- temp = data / QPNP_WLED_FS_CURR_STEP_UA;
- reg |= temp;
- rc = qpnp_wled_write_reg(wled,
- QPNP_WLED_FS_CURR_REG(wled->sink_base,
- wled->strings[i]), reg);
- if (rc)
- return rc;
}
wled->fs_curr_ua = data;
@@ -1090,6 +1092,229 @@ static int qpnp_wled_set_disp(struct qpnp_wled *wled, u16 base_addr)
return 0;
}
+#define AUTO_CALIB_BRIGHTNESS 16
+static int wled_auto_calibrate(struct qpnp_wled *wled)
+{
+ int rc = 0, i;
+ u8 reg = 0, sink_config = 0, sink_test = 0, sink_valid = 0, int_sts;
+
+ mutex_lock(&wled->lock);
+
+ /* disable OVP IRQ */
+ if (wled->ovp_irq > 0 && !wled->ovp_irq_disabled) {
+ disable_irq_nosync(wled->ovp_irq);
+ wled->ovp_irq_disabled = true;
+ }
+
+ /* read configured sink configuration */
+ rc = qpnp_wled_read_reg(wled,
+ QPNP_WLED_CURR_SINK_REG(wled->sink_base), &sink_config);
+ if (rc < 0) {
+ pr_err("Failed to read SINK configuration rc=%d\n", rc);
+ goto failed_calib;
+ }
+
+ /* disable the module before starting calibration */
+ rc = qpnp_wled_masked_write_reg(wled,
+ QPNP_WLED_MODULE_EN_REG(wled->ctrl_base),
+ QPNP_WLED_MODULE_EN_MASK, 0);
+ if (rc < 0) {
+ pr_err("Failed to disable WLED module rc=%d\n", rc);
+ goto failed_calib;
+ }
+
+ /* set low brightness across all sinks */
+ rc = qpnp_wled_set_level(wled, AUTO_CALIB_BRIGHTNESS);
+ if (rc < 0) {
+ pr_err("Failed to set brightness for calibration rc=%d\n", rc);
+ goto failed_calib;
+ }
+
+ /* disable all sinks */
+ rc = qpnp_wled_write_reg(wled,
+ QPNP_WLED_CURR_SINK_REG(wled->sink_base), 0);
+ if (rc < 0) {
+ pr_err("Failed to disable all sinks rc=%d\n", rc);
+ goto failed_calib;
+ }
+
+ rc = qpnp_wled_masked_write_reg(wled,
+ QPNP_WLED_MODULE_EN_REG(wled->ctrl_base),
+ QPNP_WLED_MODULE_EN_MASK,
+ QPNP_WLED_MODULE_EN_MASK);
+ if (rc < 0) {
+ pr_err("Failed to enable WLED module rc=%d\n", rc);
+ goto failed_calib;
+ }
+ /*
+ * Delay for the WLED soft-start, check the OVP status
+ * only after soft-start is complete
+ */
+ usleep_range(QPNP_WLED_SOFT_START_DLY_US,
+ QPNP_WLED_SOFT_START_DLY_US + 1000);
+
+ /* iterate through the strings one by one */
+ for (i = 0; i < wled->max_strings; i++) {
+ sink_test = 1 << (QPNP_WLED_CURR_SINK_SHIFT + i);
+
+ /* Enable feedback control */
+ rc = qpnp_wled_write_reg(wled,
+ QPNP_WLED_FDBK_OP_REG(wled->ctrl_base),
+ i + 1);
+ if (rc < 0) {
+ pr_err("Failed to enable feedback for SINK %d rc = %d\n",
+ i + 1, rc);
+ goto failed_calib;
+ }
+
+ /* enable the sink */
+ rc = qpnp_wled_write_reg(wled,
+ QPNP_WLED_CURR_SINK_REG(wled->sink_base), sink_test);
+ if (rc < 0) {
+ pr_err("Failed to configure SINK %d rc=%d\n",
+ i + 1, rc);
+ goto failed_calib;
+ }
+
+ /* delay for WLED soft-start */
+ usleep_range(QPNP_WLED_SOFT_START_DLY_US,
+ QPNP_WLED_SOFT_START_DLY_US + 1000);
+
+ rc = qpnp_wled_read_reg(wled,
+ QPNP_WLED_INT_RT_STS(wled->ctrl_base), &int_sts);
+ if (rc < 0) {
+ pr_err("Error in reading WLED_INT_RT_STS rc=%d\n", rc);
+ goto failed_calib;
+ }
+
+ if (int_sts & QPNP_WLED_OVP_FAULT_BIT)
+ pr_debug("WLED OVP fault detected with SINK %d\n",
+ i + 1);
+ else
+ sink_valid |= sink_test;
+ }
+
+ if (sink_valid == sink_config) {
+ pr_debug("WLED auto-calibration complete, default sink-config=%x OK!\n",
+ sink_config);
+ } else {
+ pr_warn("Invalid WLED default sink config=%x changing it to=%x\n",
+ sink_config, sink_valid);
+ sink_config = sink_valid;
+ }
+
+ if (!sink_config) {
+ pr_warn("No valid WLED sinks found\n");
+ goto failed_calib;
+ }
+
+ rc = qpnp_wled_masked_write_reg(wled,
+ QPNP_WLED_MODULE_EN_REG(wled->ctrl_base),
+ QPNP_WLED_MODULE_EN_MASK, 0);
+ if (rc < 0) {
+ pr_err("Failed to disable WLED module rc=%d\n", rc);
+ goto failed_calib;
+ }
+
+ /* write the new sink configuration */
+ rc = qpnp_wled_write_reg(wled,
+ QPNP_WLED_CURR_SINK_REG(wled->sink_base), sink_config);
+ if (rc < 0) {
+ pr_err("Failed to reconfigure the default sink rc=%d\n", rc);
+ goto failed_calib;
+ }
+
+ /* MODULATOR_EN setting for valid sinks */
+ for (i = 0; i < wled->max_strings; i++) {
+ if (sink_config & (1 << (QPNP_WLED_CURR_SINK_SHIFT + i)))
+ reg = (QPNP_WLED_MOD_EN << QPNP_WLED_MOD_EN_SHFT);
+ else
+ reg = 0x0; /* disable modulator_en for unused sink */
+
+ if (wled->dim_mode == QPNP_WLED_DIM_HYBRID)
+ reg &= QPNP_WLED_GATE_DRV_MASK;
+ else
+ reg |= ~QPNP_WLED_GATE_DRV_MASK;
+
+ rc = qpnp_wled_write_reg(wled,
+ QPNP_WLED_MOD_EN_REG(wled->sink_base, i), reg);
+ if (rc < 0) {
+ pr_err("Failed to configure MODULATOR_EN rc=%d\n", rc);
+ goto failed_calib;
+ }
+ }
+
+ /* restore the feedback setting */
+ rc = qpnp_wled_write_reg(wled,
+ QPNP_WLED_FDBK_OP_REG(wled->ctrl_base),
+ wled->fdbk_op);
+ if (rc < 0) {
+ pr_err("Failed to restore feedback setting rc=%d\n", rc);
+ goto failed_calib;
+ }
+
+ /* restore brightness */
+ rc = qpnp_wled_set_level(wled, wled->cdev.brightness);
+ if (rc < 0) {
+ pr_err("Failed to set brightness after calibration rc=%d\n",
+ rc);
+ goto failed_calib;
+ }
+
+ rc = qpnp_wled_masked_write_reg(wled,
+ QPNP_WLED_MODULE_EN_REG(wled->ctrl_base),
+ QPNP_WLED_MODULE_EN_MASK,
+ QPNP_WLED_MODULE_EN_MASK);
+ if (rc < 0) {
+ pr_err("Failed to enable WLED module rc=%d\n", rc);
+ goto failed_calib;
+ }
+
+ /* delay for WLED soft-start */
+ usleep_range(QPNP_WLED_SOFT_START_DLY_US,
+ QPNP_WLED_SOFT_START_DLY_US + 1000);
+
+failed_calib:
+ if (wled->ovp_irq > 0 && wled->ovp_irq_disabled) {
+ enable_irq(wled->ovp_irq);
+ wled->ovp_irq_disabled = false;
+ }
+ mutex_unlock(&wled->lock);
+ return rc;
+}
+
+#define WLED_AUTO_CAL_OVP_COUNT 5
+#define WLED_AUTO_CAL_CNT_DLY_US 1000000 /* 1 second */
+static bool qpnp_wled_auto_cal_required(struct qpnp_wled *wled)
+{
+ s64 elapsed_time_us;
+
+ /*
+ * Check if the OVP fault was an occasional one
+ * or if its firing continuously, the latter qualifies
+ * for an auto-calibration check.
+ */
+ if (!wled->auto_calibration_ovp_count) {
+ wled->start_ovp_fault_time = ktime_get();
+ wled->auto_calibration_ovp_count++;
+ } else {
+ elapsed_time_us = ktime_us_delta(ktime_get(),
+ wled->start_ovp_fault_time);
+ if (elapsed_time_us > WLED_AUTO_CAL_CNT_DLY_US)
+ wled->auto_calibration_ovp_count = 0;
+ else
+ wled->auto_calibration_ovp_count++;
+
+ if (wled->auto_calibration_ovp_count >=
+ WLED_AUTO_CAL_OVP_COUNT) {
+ wled->auto_calibration_ovp_count = 0;
+ return true;
+ }
+ }
+
+ return false;
+}
+
/* ovp irq handler */
static irqreturn_t qpnp_wled_ovp_irq_handler(int irq, void *_wled)
{
@@ -1114,6 +1339,21 @@ static irqreturn_t qpnp_wled_ovp_irq_handler(int irq, void *_wled)
if (fault_sts & (QPNP_WLED_OVP_FAULT_BIT | QPNP_WLED_ILIM_FAULT_BIT))
pr_err("WLED OVP fault detected, int_sts=%x fault_sts= %x\n",
int_sts, fault_sts);
+
+ if (fault_sts & QPNP_WLED_OVP_FAULT_BIT) {
+ if (wled->auto_calib_enabled && !wled->auto_calib_done) {
+ if (qpnp_wled_auto_cal_required(wled)) {
+ rc = wled_auto_calibrate(wled);
+ if (rc < 0) {
+ pr_err("Failed auto-calibration rc=%d\n",
+ rc);
+ return IRQ_HANDLED;
+ }
+ wled->auto_calib_done = true;
+ }
+ }
+ }
+
return IRQ_HANDLED;
}
@@ -1423,7 +1663,7 @@ static int qpnp_wled_vref_config(struct qpnp_wled *wled)
static int qpnp_wled_config(struct qpnp_wled *wled)
{
int rc, i, temp;
- u8 reg = 0;
+ u8 reg = 0, sink_en = 0, mask;
/* Configure display type */
rc = qpnp_wled_set_disp(wled, wled->ctrl_base);
@@ -1622,93 +1862,77 @@ static int qpnp_wled_config(struct qpnp_wled *wled)
rc = qpnp_wled_write_reg(wled, QPNP_WLED_CURR_SINK_REG(wled->sink_base),
reg);
- for (i = 0; i < wled->num_strings; i++) {
- if (wled->strings[i] >= QPNP_WLED_MAX_STRINGS) {
- dev_err(&wled->pdev->dev, "Invalid string number\n");
- return -EINVAL;
- }
-
- /* MODULATOR */
- rc = qpnp_wled_read_reg(wled,
- QPNP_WLED_MOD_EN_REG(wled->sink_base,
- wled->strings[i]), &reg);
- if (rc < 0)
- return rc;
- reg &= QPNP_WLED_MOD_EN_MASK;
- reg |= (QPNP_WLED_MOD_EN << QPNP_WLED_MOD_EN_SHFT);
-
- if (wled->dim_mode == QPNP_WLED_DIM_HYBRID)
- reg &= QPNP_WLED_GATE_DRV_MASK;
- else
- reg |= ~QPNP_WLED_GATE_DRV_MASK;
-
- rc = qpnp_wled_write_reg(wled,
- QPNP_WLED_MOD_EN_REG(wled->sink_base,
- wled->strings[i]), reg);
- if (rc)
- return rc;
-
+ for (i = 0; i < wled->max_strings; i++) {
/* SYNC DELAY */
if (wled->sync_dly_us > QPNP_WLED_SYNC_DLY_MAX_US)
wled->sync_dly_us = QPNP_WLED_SYNC_DLY_MAX_US;
- rc = qpnp_wled_read_reg(wled,
- QPNP_WLED_SYNC_DLY_REG(wled->sink_base,
- wled->strings[i]), &reg);
+ reg = wled->sync_dly_us / QPNP_WLED_SYNC_DLY_STEP_US;
+ mask = QPNP_WLED_SYNC_DLY_MASK;
+ rc = qpnp_wled_masked_write_reg(wled,
+ QPNP_WLED_SYNC_DLY_REG(wled->sink_base, i),
+ mask, reg);
if (rc < 0)
return rc;
- reg &= QPNP_WLED_SYNC_DLY_MASK;
- temp = wled->sync_dly_us / QPNP_WLED_SYNC_DLY_STEP_US;
- reg |= temp;
- rc = qpnp_wled_write_reg(wled,
- QPNP_WLED_SYNC_DLY_REG(wled->sink_base,
- wled->strings[i]), reg);
- if (rc)
- return rc;
/* FULL SCALE CURRENT */
if (wled->fs_curr_ua > QPNP_WLED_FS_CURR_MAX_UA)
wled->fs_curr_ua = QPNP_WLED_FS_CURR_MAX_UA;
- rc = qpnp_wled_read_reg(wled,
- QPNP_WLED_FS_CURR_REG(wled->sink_base,
- wled->strings[i]), &reg);
+ reg = wled->fs_curr_ua / QPNP_WLED_FS_CURR_STEP_UA;
+ mask = QPNP_WLED_FS_CURR_MASK;
+ rc = qpnp_wled_masked_write_reg(wled,
+ QPNP_WLED_FS_CURR_REG(wled->sink_base, i),
+ mask, reg);
if (rc < 0)
return rc;
- reg &= QPNP_WLED_FS_CURR_MASK;
- temp = wled->fs_curr_ua / QPNP_WLED_FS_CURR_STEP_UA;
- reg |= temp;
- rc = qpnp_wled_write_reg(wled,
- QPNP_WLED_FS_CURR_REG(wled->sink_base,
- wled->strings[i]), reg);
- if (rc)
- return rc;
/* CABC */
- rc = qpnp_wled_read_reg(wled,
- QPNP_WLED_CABC_REG(wled->sink_base,
- wled->strings[i]), &reg);
+ reg = wled->en_cabc ? (1 << QPNP_WLED_CABC_SHIFT) : 0;
+ mask = QPNP_WLED_CABC_MASK;
+ rc = qpnp_wled_masked_write_reg(wled,
+ QPNP_WLED_CABC_REG(wled->sink_base, i),
+ mask, reg);
if (rc < 0)
return rc;
- reg &= QPNP_WLED_CABC_MASK;
- reg |= (wled->en_cabc << QPNP_WLED_CABC_SHIFT);
- rc = qpnp_wled_write_reg(wled,
- QPNP_WLED_CABC_REG(wled->sink_base,
- wled->strings[i]), reg);
- if (rc)
- return rc;
+ }
- /* Enable CURRENT SINK */
+ /* Settings specific to valid sinks */
+ for (i = 0; i < wled->num_strings; i++) {
+ if (wled->strings[i] >= wled->max_strings) {
+ dev_err(&wled->pdev->dev, "Invalid string number\n");
+ return -EINVAL;
+ }
+ /* MODULATOR */
rc = qpnp_wled_read_reg(wled,
- QPNP_WLED_CURR_SINK_REG(wled->sink_base), &reg);
+ QPNP_WLED_MOD_EN_REG(wled->sink_base, i), &reg);
if (rc < 0)
return rc;
- temp = wled->strings[i] + QPNP_WLED_CURR_SINK_SHIFT;
- reg |= (1 << temp);
+ reg &= QPNP_WLED_MOD_EN_MASK;
+ reg |= (QPNP_WLED_MOD_EN << QPNP_WLED_MOD_EN_SHFT);
+
+ if (wled->dim_mode == QPNP_WLED_DIM_HYBRID)
+ reg &= QPNP_WLED_GATE_DRV_MASK;
+ else
+ reg |= ~QPNP_WLED_GATE_DRV_MASK;
+
rc = qpnp_wled_write_reg(wled,
- QPNP_WLED_CURR_SINK_REG(wled->sink_base), reg);
+ QPNP_WLED_MOD_EN_REG(wled->sink_base, i), reg);
if (rc)
return rc;
+
+ /* SINK EN */
+ temp = wled->strings[i] + QPNP_WLED_CURR_SINK_SHIFT;
+ sink_en |= (1 << temp);
+ }
+ mask = QPNP_WLED_CURR_SINK_MASK;
+ rc = qpnp_wled_masked_write_reg(wled,
+ QPNP_WLED_CURR_SINK_REG(wled->sink_base),
+ mask, sink_en);
+ if (rc < 0) {
+ dev_err(&wled->pdev->dev,
+ "Failed to enable WLED sink config rc = %d\n", rc);
+ return rc;
}
rc = qpnp_wled_sync_reg_toggle(wled);
@@ -1728,8 +1952,13 @@ static int qpnp_wled_config(struct qpnp_wled *wled)
wled->ovp_irq, rc);
return rc;
}
- disable_irq(wled->ovp_irq);
- wled->ovp_irq_disabled = true;
+ rc = qpnp_wled_read_reg(wled,
+ QPNP_WLED_MODULE_EN_REG(wled->ctrl_base), &reg);
+ /* disable the OVP irq only if the module is not enabled */
+ if (!rc && !(reg & QPNP_WLED_MODULE_EN_MASK)) {
+ disable_irq(wled->ovp_irq);
+ wled->ovp_irq_disabled = true;
+ }
}
if (wled->sc_irq >= 0) {
@@ -2091,11 +2320,16 @@ static int qpnp_wled_parse_dt(struct qpnp_wled *wled)
wled->en_cabc = of_property_read_bool(pdev->dev.of_node,
"qcom,en-cabc");
+ if (wled->pmic_rev_id->pmic_subtype == PM660L_SUBTYPE)
+ wled->max_strings = QPNP_PM660_WLED_MAX_STRINGS;
+ else
+ wled->max_strings = QPNP_WLED_MAX_STRINGS;
+
prop = of_find_property(pdev->dev.of_node,
"qcom,led-strings-list", &temp_val);
if (!prop || !temp_val || temp_val > QPNP_WLED_MAX_STRINGS) {
dev_err(&pdev->dev, "Invalid strings info, use default");
- wled->num_strings = QPNP_WLED_MAX_STRINGS;
+ wled->num_strings = wled->max_strings;
for (i = 0; i < wled->num_strings; i++)
wled->strings[i] = i;
} else {
@@ -2118,6 +2352,9 @@ static int qpnp_wled_parse_dt(struct qpnp_wled *wled)
wled->lcd_psm_ctrl = of_property_read_bool(pdev->dev.of_node,
"qcom,lcd-psm-ctrl");
+
+ wled->auto_calib_enabled = of_property_read_bool(pdev->dev.of_node,
+ "qcom,auto-calibration-enable");
return 0;
}
@@ -2185,13 +2422,13 @@ static int qpnp_wled_probe(struct platform_device *pdev)
}
mutex_init(&wled->bus_lock);
+ mutex_init(&wled->lock);
rc = qpnp_wled_config(wled);
if (rc) {
dev_err(&pdev->dev, "wled config failed\n");
return rc;
}
- mutex_init(&wled->lock);
INIT_WORK(&wled->work, qpnp_wled_work);
wled->ramp_ms = QPNP_WLED_RAMP_DLY_MS;
wled->ramp_step = 1;
diff --git a/drivers/leds/leds-qpnp.c b/drivers/leds/leds-qpnp.c
index ab0e4f99ebb9..e3cfffb5c563 100644
--- a/drivers/leds/leds-qpnp.c
+++ b/drivers/leds/leds-qpnp.c
@@ -875,6 +875,14 @@ static int qpnp_mpp_set(struct qpnp_led_data *led)
}
if (led->mpp_cfg->pwm_mode == PWM_MODE) {
/*config pwm for brightness scaling*/
+ rc = pwm_change_mode(led->mpp_cfg->pwm_cfg->pwm_dev,
+ PM_PWM_MODE_PWM);
+ if (rc < 0) {
+ dev_err(&led->pdev->dev,
+ "Failed to set PWM mode, rc = %d\n",
+ rc);
+ return rc;
+ }
period_us = led->mpp_cfg->pwm_cfg->pwm_period_us;
if (period_us > INT_MAX / NSEC_PER_USEC) {
duty_us = (period_us * led->cdev.brightness) /
@@ -1581,6 +1589,14 @@ static int qpnp_kpdbl_set(struct qpnp_led_data *led)
}
if (led->kpdbl_cfg->pwm_cfg->mode == PWM_MODE) {
+ rc = pwm_change_mode(led->kpdbl_cfg->pwm_cfg->pwm_dev,
+ PM_PWM_MODE_PWM);
+ if (rc < 0) {
+ dev_err(&led->pdev->dev,
+ "Failed to set PWM mode, rc = %d\n",
+ rc);
+ return rc;
+ }
period_us = led->kpdbl_cfg->pwm_cfg->pwm_period_us;
if (period_us > INT_MAX / NSEC_PER_USEC) {
duty_us = (period_us * led->cdev.brightness) /
@@ -1702,6 +1718,14 @@ static int qpnp_rgb_set(struct qpnp_led_data *led)
led->rgb_cfg->pwm_cfg->mode =
led->rgb_cfg->pwm_cfg->default_mode;
if (led->rgb_cfg->pwm_cfg->mode == PWM_MODE) {
+ rc = pwm_change_mode(led->rgb_cfg->pwm_cfg->pwm_dev,
+ PM_PWM_MODE_PWM);
+ if (rc < 0) {
+ dev_err(&led->pdev->dev,
+ "Failed to set PWM mode, rc = %d\n",
+ rc);
+ return rc;
+ }
period_us = led->rgb_cfg->pwm_cfg->pwm_period_us;
if (period_us > INT_MAX / NSEC_PER_USEC) {
duty_us = (period_us * led->cdev.brightness) /
@@ -2136,6 +2160,11 @@ static int qpnp_pwm_init(struct pwm_config_data *pwm_cfg,
dev_err(&pdev->dev, "Failed to configure pwm LUT\n");
return rc;
}
+ rc = pwm_change_mode(pwm_cfg->pwm_dev, PM_PWM_MODE_LPG);
+ if (rc < 0) {
+ dev_err(&pdev->dev, "Failed to set LPG mode\n");
+ return rc;
+ }
}
} else {
dev_err(&pdev->dev, "Invalid PWM device\n");
diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c
index 6a4811f85705..9cf826df89b1 100644
--- a/drivers/mailbox/mailbox.c
+++ b/drivers/mailbox/mailbox.c
@@ -104,11 +104,14 @@ static void tx_tick(struct mbox_chan *chan, int r)
/* Submit next message */
msg_submit(chan);
+ if (!mssg)
+ return;
+
/* Notify the client */
- if (mssg && chan->cl->tx_done)
+ if (chan->cl->tx_done)
chan->cl->tx_done(chan->cl, mssg, r);
- if (chan->cl->tx_block)
+ if (r != -ETIME && chan->cl->tx_block)
complete(&chan->tx_complete);
}
@@ -261,7 +264,7 @@ int mbox_send_message(struct mbox_chan *chan, void *mssg)
msg_submit(chan);
- if (chan->cl->tx_block && chan->active_req) {
+ if (chan->cl->tx_block) {
unsigned long wait;
int ret;
@@ -272,8 +275,8 @@ int mbox_send_message(struct mbox_chan *chan, void *mssg)
ret = wait_for_completion_timeout(&chan->tx_complete, wait);
if (ret == 0) {
- t = -EIO;
- tx_tick(chan, -EIO);
+ t = -ETIME;
+ tx_tick(chan, t);
}
}
diff --git a/drivers/md/md.c b/drivers/md/md.c
index eff554a12fb4..0a856cb181e9 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -1866,7 +1866,7 @@ super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
}
sb = page_address(rdev->sb_page);
sb->data_size = cpu_to_le64(num_sectors);
- sb->super_offset = rdev->sb_start;
+ sb->super_offset = cpu_to_le64(rdev->sb_start);
sb->sb_csum = calc_sb_1_csum(sb);
md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
rdev->sb_page);
@@ -2273,7 +2273,7 @@ static bool does_sb_need_changing(struct mddev *mddev)
/* Check if any mddev parameters have changed */
if ((mddev->dev_sectors != le64_to_cpu(sb->size)) ||
(mddev->reshape_position != le64_to_cpu(sb->reshape_position)) ||
- (mddev->layout != le64_to_cpu(sb->layout)) ||
+ (mddev->layout != le32_to_cpu(sb->layout)) ||
(mddev->raid_disks != le32_to_cpu(sb->raid_disks)) ||
(mddev->chunk_sectors != le32_to_cpu(sb->chunksize)))
return true;
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index d81be5e471d0..f24a9e14021d 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1088,7 +1088,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
*/
DEFINE_WAIT(w);
for (;;) {
- flush_signals(current);
+ sigset_t full, old;
prepare_to_wait(&conf->wait_barrier,
&w, TASK_INTERRUPTIBLE);
if (bio_end_sector(bio) <= mddev->suspend_lo ||
@@ -1097,7 +1097,10 @@ static void make_request(struct mddev *mddev, struct bio * bio)
!md_cluster_ops->area_resyncing(mddev, WRITE,
bio->bi_iter.bi_sector, bio_end_sector(bio))))
break;
+ sigfillset(&full);
+ sigprocmask(SIG_BLOCK, &full, &old);
schedule();
+ sigprocmask(SIG_SETMASK, &old, NULL);
}
finish_wait(&conf->wait_barrier, &w);
}
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 4384b46cee1a..8f60520c8392 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -5279,12 +5279,15 @@ static void make_request(struct mddev *mddev, struct bio * bi)
* userspace, we want an interruptible
* wait.
*/
- flush_signals(current);
prepare_to_wait(&conf->wait_for_overlap,
&w, TASK_INTERRUPTIBLE);
if (logical_sector >= mddev->suspend_lo &&
logical_sector < mddev->suspend_hi) {
+ sigset_t full, old;
+ sigfillset(&full);
+ sigprocmask(SIG_BLOCK, &full, &old);
schedule();
+ sigprocmask(SIG_SETMASK, &old, NULL);
do_prepare = true;
}
goto retry;
@@ -5818,6 +5821,8 @@ static void raid5_do_work(struct work_struct *work)
pr_debug("%d stripes handled\n", handled);
spin_unlock_irq(&conf->device_lock);
+
+ async_tx_issue_pending_all();
blk_finish_plug(&plug);
pr_debug("--- raid5worker inactive\n");
@@ -7528,12 +7533,10 @@ static void end_reshape(struct r5conf *conf)
{
if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) {
- struct md_rdev *rdev;
spin_lock_irq(&conf->device_lock);
conf->previous_raid_disks = conf->raid_disks;
- rdev_for_each(rdev, conf->mddev)
- rdev->data_offset = rdev->new_data_offset;
+ md_finish_reshape(conf->mddev);
smp_wmb();
conf->reshape_progress = MaxSector;
conf->mddev->reshape_position = MaxSector;
diff --git a/drivers/media/i2c/s5c73m3/s5c73m3-ctrls.c b/drivers/media/i2c/s5c73m3/s5c73m3-ctrls.c
index 8001cde1db1e..503135a4f47a 100644
--- a/drivers/media/i2c/s5c73m3/s5c73m3-ctrls.c
+++ b/drivers/media/i2c/s5c73m3/s5c73m3-ctrls.c
@@ -211,7 +211,7 @@ static int s5c73m3_3a_lock(struct s5c73m3 *state, struct v4l2_ctrl *ctrl)
}
if ((ctrl->val ^ ctrl->cur.val) & V4L2_LOCK_FOCUS)
- ret = s5c73m3_af_run(state, ~af_lock);
+ ret = s5c73m3_af_run(state, !af_lock);
return ret;
}
diff --git a/drivers/media/pci/cx88/cx88-cards.c b/drivers/media/pci/cx88/cx88-cards.c
index 8f2556ec3971..61611d1682d1 100644
--- a/drivers/media/pci/cx88/cx88-cards.c
+++ b/drivers/media/pci/cx88/cx88-cards.c
@@ -3691,7 +3691,14 @@ struct cx88_core *cx88_core_create(struct pci_dev *pci, int nr)
core->nr = nr;
sprintf(core->name, "cx88[%d]", core->nr);
- core->tvnorm = V4L2_STD_NTSC_M;
+ /*
+ * Note: Setting initial standard here would cause first call to
+ * cx88_set_tvnorm() to return without programming any registers. Leave
+ * it blank for at this point and it will get set later in
+ * cx8800_initdev()
+ */
+ core->tvnorm = 0;
+
core->width = 320;
core->height = 240;
core->field = V4L2_FIELD_INTERLACED;
diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c
index aef9acf351f6..abbf5b05b6f5 100644
--- a/drivers/media/pci/cx88/cx88-video.c
+++ b/drivers/media/pci/cx88/cx88-video.c
@@ -1429,7 +1429,7 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
/* initial device configuration */
mutex_lock(&core->lock);
- cx88_set_tvnorm(core, core->tvnorm);
+ cx88_set_tvnorm(core, V4L2_STD_NTSC_M);
v4l2_ctrl_handler_setup(&core->video_hdl);
v4l2_ctrl_handler_setup(&core->audio_hdl);
cx88_video_mux(core, 0);
diff --git a/drivers/media/pci/saa7134/saa7134-i2c.c b/drivers/media/pci/saa7134/saa7134-i2c.c
index 8ef6399d794f..bc957528f69f 100644
--- a/drivers/media/pci/saa7134/saa7134-i2c.c
+++ b/drivers/media/pci/saa7134/saa7134-i2c.c
@@ -355,12 +355,43 @@ static struct i2c_client saa7134_client_template = {
/* ----------------------------------------------------------- */
+/* On Medion 7134 reading EEPROM needs DVB-T demod i2c gate open */
+static void saa7134_i2c_eeprom_md7134_gate(struct saa7134_dev *dev)
+{
+ u8 subaddr = 0x7, dmdregval;
+ u8 data[2];
+ int ret;
+ struct i2c_msg i2cgatemsg_r[] = { {.addr = 0x08, .flags = 0,
+ .buf = &subaddr, .len = 1},
+ {.addr = 0x08,
+ .flags = I2C_M_RD,
+ .buf = &dmdregval, .len = 1}
+ };
+ struct i2c_msg i2cgatemsg_w[] = { {.addr = 0x08, .flags = 0,
+ .buf = data, .len = 2} };
+
+ ret = i2c_transfer(&dev->i2c_adap, i2cgatemsg_r, 2);
+ if ((ret == 2) && (dmdregval & 0x2)) {
+ pr_debug("%s: DVB-T demod i2c gate was left closed\n",
+ dev->name);
+
+ data[0] = subaddr;
+ data[1] = (dmdregval & ~0x2);
+ if (i2c_transfer(&dev->i2c_adap, i2cgatemsg_w, 1) != 1)
+ pr_err("%s: EEPROM i2c gate open failure\n",
+ dev->name);
+ }
+}
+
static int
saa7134_i2c_eeprom(struct saa7134_dev *dev, unsigned char *eedata, int len)
{
unsigned char buf;
int i,err;
+ if (dev->board == SAA7134_BOARD_MD7134)
+ saa7134_i2c_eeprom_md7134_gate(dev);
+
dev->i2c_client.addr = 0xa0 >> 1;
buf = 0;
if (1 != (err = i2c_master_send(&dev->i2c_client,&buf,1))) {
diff --git a/drivers/media/platform/msm/ais/fd/msm_fd_dev.c b/drivers/media/platform/msm/ais/fd/msm_fd_dev.c
index 420083f019cf..d9e109938e7e 100644
--- a/drivers/media/platform/msm/ais/fd/msm_fd_dev.c
+++ b/drivers/media/platform/msm/ais/fd/msm_fd_dev.c
@@ -430,6 +430,7 @@ static int msm_fd_open(struct file *file)
ctx->vb2_q.type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
ctx->vb2_q.io_modes = VB2_USERPTR;
ctx->vb2_q.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ mutex_init(&ctx->lock);
ret = vb2_queue_init(&ctx->vb2_q);
if (ret < 0) {
dev_err(device->dev, "Error queue init\n");
@@ -480,7 +481,9 @@ static int msm_fd_release(struct file *file)
msm_cpp_vbif_register_error_handler((void *)ctx,
VBIF_CLIENT_FD, NULL);
+ mutex_lock(&ctx->lock);
vb2_queue_release(&ctx->vb2_q);
+ mutex_unlock(&ctx->lock);
vfree(ctx->stats);
@@ -510,7 +513,9 @@ static unsigned int msm_fd_poll(struct file *file,
struct fd_ctx *ctx = msm_fd_ctx_from_fh(file->private_data);
unsigned int ret;
+ mutex_lock(&ctx->lock);
ret = vb2_poll(&ctx->vb2_q, file, wait);
+ mutex_unlock(&ctx->lock);
if (atomic_read(&ctx->subscribed_for_event)) {
poll_wait(file, &ctx->fh.wait, wait);
@@ -748,9 +753,9 @@ static int msm_fd_reqbufs(struct file *file,
int ret;
struct fd_ctx *ctx = msm_fd_ctx_from_fh(fh);
- mutex_lock(&ctx->fd_device->recovery_lock);
+ mutex_lock(&ctx->lock);
ret = vb2_reqbufs(&ctx->vb2_q, req);
- mutex_unlock(&ctx->fd_device->recovery_lock);
+ mutex_unlock(&ctx->lock);
return ret;
}
@@ -766,9 +771,9 @@ static int msm_fd_qbuf(struct file *file, void *fh,
int ret;
struct fd_ctx *ctx = msm_fd_ctx_from_fh(fh);
- mutex_lock(&ctx->fd_device->recovery_lock);
+ mutex_lock(&ctx->lock);
ret = vb2_qbuf(&ctx->vb2_q, pb);
- mutex_unlock(&ctx->fd_device->recovery_lock);
+ mutex_unlock(&ctx->lock);
return ret;
}
@@ -785,9 +790,9 @@ static int msm_fd_dqbuf(struct file *file,
int ret;
struct fd_ctx *ctx = msm_fd_ctx_from_fh(fh);
- mutex_lock(&ctx->fd_device->recovery_lock);
+ mutex_lock(&ctx->lock);
ret = vb2_dqbuf(&ctx->vb2_q, pb, file->f_flags & O_NONBLOCK);
- mutex_unlock(&ctx->fd_device->recovery_lock);
+ mutex_unlock(&ctx->lock);
return ret;
}
@@ -803,7 +808,9 @@ static int msm_fd_streamon(struct file *file,
struct fd_ctx *ctx = msm_fd_ctx_from_fh(fh);
int ret;
+ mutex_lock(&ctx->lock);
ret = vb2_streamon(&ctx->vb2_q, buf_type);
+ mutex_unlock(&ctx->lock);
if (ret < 0)
dev_err(ctx->fd_device->dev, "Stream on fails\n");
@@ -822,7 +829,9 @@ static int msm_fd_streamoff(struct file *file,
struct fd_ctx *ctx = msm_fd_ctx_from_fh(fh);
int ret;
+ mutex_lock(&ctx->lock);
ret = vb2_streamoff(&ctx->vb2_q, buf_type);
+ mutex_unlock(&ctx->lock);
if (ret < 0)
dev_err(ctx->fd_device->dev, "Stream off fails\n");
@@ -1053,14 +1062,18 @@ static int msm_fd_s_ctrl(struct file *file, void *fh, struct v4l2_control *a)
a->value = ctx->format.size->work_size;
break;
case V4L2_CID_FD_WORK_MEMORY_FD:
+ mutex_lock(&ctx->fd_device->recovery_lock);
if (ctx->work_buf.fd != -1)
msm_fd_hw_unmap_buffer(&ctx->work_buf);
if (a->value >= 0) {
ret = msm_fd_hw_map_buffer(&ctx->mem_pool,
a->value, &ctx->work_buf);
- if (ret < 0)
+ if (ret < 0) {
+ mutex_unlock(&ctx->fd_device->recovery_lock);
return ret;
+ }
}
+ mutex_unlock(&ctx->fd_device->recovery_lock);
break;
default:
return -EINVAL;
diff --git a/drivers/media/platform/msm/ais/fd/msm_fd_dev.h b/drivers/media/platform/msm/ais/fd/msm_fd_dev.h
index c15032256f4d..a7615a65d2fc 100644
--- a/drivers/media/platform/msm/ais/fd/msm_fd_dev.h
+++ b/drivers/media/platform/msm/ais/fd/msm_fd_dev.h
@@ -161,6 +161,7 @@ struct fd_ctx {
struct msm_fd_mem_pool mem_pool;
struct msm_fd_stats *stats;
struct msm_fd_buf_handle work_buf;
+ struct mutex lock;
};
/*
diff --git a/drivers/media/platform/msm/ais/isp/msm_isp.h b/drivers/media/platform/msm/ais/isp/msm_isp.h
index 72a76d178aa8..86974eeb4a32 100644
--- a/drivers/media/platform/msm/ais/isp/msm_isp.h
+++ b/drivers/media/platform/msm/ais/isp/msm_isp.h
@@ -355,6 +355,7 @@ struct msm_vfe_hardware_info {
uint32_t dmi_reg_offset;
uint32_t min_ab;
uint32_t min_ib;
+ uint32_t regulator_num;
const char *regulator_names[];
};
diff --git a/drivers/media/platform/msm/ais/isp/msm_isp47.c b/drivers/media/platform/msm/ais/isp/msm_isp47.c
index d63282f80aca..d33dc758aef9 100644
--- a/drivers/media/platform/msm/ais/isp/msm_isp47.c
+++ b/drivers/media/platform/msm/ais/isp/msm_isp47.c
@@ -2537,8 +2537,7 @@ int msm_vfe47_get_regulators(struct vfe_device *vfe_dev)
int rc = 0;
int i;
- vfe_dev->vfe_num_regulators =
- sizeof(*vfe_dev->hw_info->regulator_names) / sizeof(char *);
+ vfe_dev->vfe_num_regulators = vfe_dev->hw_info->regulator_num;
vfe_dev->regulator_info = kzalloc(sizeof(struct msm_cam_regulator) *
vfe_dev->vfe_num_regulators, GFP_KERNEL);
@@ -2811,6 +2810,7 @@ struct msm_vfe_hardware_info vfe47_hw_info = {
.dmi_reg_offset = 0xC2C,
.axi_hw_info = &msm_vfe47_axi_hw_info,
.stats_hw_info = &msm_vfe47_stats_hw_info,
+ .regulator_num = 3,
.regulator_names = {"vdd", "camss-vdd", "mmagic-vdd"},
};
EXPORT_SYMBOL(vfe47_hw_info);
diff --git a/drivers/media/platform/msm/ais/isp/msm_isp_stats_util.c b/drivers/media/platform/msm/ais/isp/msm_isp_stats_util.c
index 6e89544161ee..0d08cffda25c 100644
--- a/drivers/media/platform/msm/ais/isp/msm_isp_stats_util.c
+++ b/drivers/media/platform/msm/ais/isp/msm_isp_stats_util.c
@@ -891,6 +891,12 @@ int msm_isp_update_stats_stream(struct vfe_device *vfe_dev, void *arg)
struct msm_vfe_axi_stream_cfg_update_info *update_info = NULL;
struct msm_isp_sw_framskip *sw_skip_info = NULL;
+ if (update_cmd->num_streams > MSM_ISP_STATS_MAX) {
+ pr_err("%s: Invalid num_streams %d\n",
+ __func__, update_cmd->num_streams);
+ return -EINVAL;
+ }
+
/* validate request */
for (i = 0; i < update_cmd->num_streams; i++) {
update_info = (struct msm_vfe_axi_stream_cfg_update_info *)
diff --git a/drivers/media/platform/msm/ais/sensor/cci/Makefile b/drivers/media/platform/msm/ais/sensor/cci/Makefile
index 3942508c0d66..b8b8c83bc6de 100644
--- a/drivers/media/platform/msm/ais/sensor/cci/Makefile
+++ b/drivers/media/platform/msm/ais/sensor/cci/Makefile
@@ -2,3 +2,4 @@ ccflags-y += -Idrivers/media/platform/msm/ais
ccflags-y += -Idrivers/media/platform/msm/ais/common
ccflags-y += -Idrivers/media/platform/msm/ais/sensor/io
obj-$(CONFIG_MSM_AIS) += msm_cci.o
+obj-$(CONFIG_MSM_AIS) += msm_early_cam.o
diff --git a/drivers/media/platform/msm/ais/sensor/cci/msm_early_cam.c b/drivers/media/platform/msm/ais/sensor/cci/msm_early_cam.c
new file mode 100644
index 000000000000..00ec613e7303
--- /dev/null
+++ b/drivers/media/platform/msm/ais/sensor/cci/msm_early_cam.c
@@ -0,0 +1,279 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/of_platform.h>
+#include "msm_sd.h"
+#include "msm_early_cam.h"
+#include "msm_cam_cci_hwreg.h"
+#include "msm_camera_io_util.h"
+#include "msm_camera_dt_util.h"
+#include "cam_hw_ops.h"
+
+#undef CDBG
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+
+#undef EARLY_CAM_DBG
+#ifdef MSM_EARLY_CAM_DEBUG
+#define EARLY_CAM_DBG(fmt, args...) pr_err(fmt, ##args)
+#else
+#define EARLY_CAM_DBG(fmt, args...) pr_debug(fmt, ##args)
+#endif
+
+#define MSM_EARLY_CAM_DRV_NAME "msm_early_cam"
+static struct platform_driver msm_early_camera_driver;
+static struct early_cam_device *new_early_cam_dev;
+
+int msm_early_cam_disable_clocks(void)
+{
+ int rc = 0;
+
+ CDBG("%s:\n", __func__);
+ /* Vote OFF for clocks */
+ if (new_early_cam_dev == NULL) {
+ rc = -EINVAL;
+ pr_err("%s: clock structure uninitialised %d\n", __func__,
+ rc);
+ return rc;
+ }
+
+ if ((new_early_cam_dev->pdev == NULL) ||
+ (new_early_cam_dev->early_cam_clk_info == NULL) ||
+ (new_early_cam_dev->early_cam_clk == NULL) ||
+ (new_early_cam_dev->num_clk == 0)) {
+ rc = -EINVAL;
+ pr_err("%s: Clock details uninitialised %d\n", __func__,
+ rc);
+ return rc;
+ }
+
+ rc = msm_camera_clk_enable(&new_early_cam_dev->pdev->dev,
+ new_early_cam_dev->early_cam_clk_info,
+ new_early_cam_dev->early_cam_clk,
+ new_early_cam_dev->num_clk, false);
+ if (rc < 0) {
+ pr_err("%s: clk disable failed %d\n", __func__, rc);
+ return rc;
+ }
+
+ rc = cam_config_ahb_clk(NULL, 0, CAM_AHB_CLIENT_CSIPHY,
+ CAM_AHB_SUSPEND_VOTE);
+ if (rc < 0) {
+ pr_err("%s: failed to vote OFF AHB_CLIENT_CSIPHY %d\n",
+ __func__, rc);
+ return rc;
+ }
+
+ rc = cam_config_ahb_clk(NULL, 0, CAM_AHB_CLIENT_CSID,
+ CAM_AHB_SUSPEND_VOTE);
+ if (rc < 0) {
+ pr_err("%s: failed to vote OFF AHB_CLIENT_CSID %d\n",
+ __func__, rc);
+ return rc;
+ }
+
+ rc = cam_config_ahb_clk(NULL, 0, CAM_AHB_CLIENT_CCI,
+ CAM_AHB_SUSPEND_VOTE);
+ if (rc < 0) {
+ pr_err("%s: failed to vote OFF AHB_CLIENT_CCI %d\n",
+ __func__, rc);
+ return rc;
+ }
+
+ rc = cam_config_ahb_clk(NULL, 0, CAM_AHB_CLIENT_ISPIF,
+ CAM_AHB_SUSPEND_VOTE);
+ if (rc < 0) {
+ pr_err("%s: failed to vote OFF AHB_CLIENT_ISPIF %d\n",
+ __func__, rc);
+ return rc;
+ }
+
+ rc = cam_config_ahb_clk(NULL, 0, CAM_AHB_CLIENT_VFE0,
+ CAM_AHB_SUSPEND_VOTE);
+ if (rc < 0) {
+ pr_err("%s: failed to vote OFF AHB_CLIENT_VFE0 %d\n",
+ __func__, rc);
+ return rc;
+ }
+ pr_debug("Turned OFF camera clocks\n");
+ return 0;
+
+}
+static int msm_early_cam_probe(struct platform_device *pdev)
+{
+ int rc = 0;
+
+ CDBG("%s: pdev %pK device id = %d\n", __func__, pdev, pdev->id);
+
+ /* Vote for Early camera if enabled */
+ rc = cam_config_ahb_clk(NULL, 0, CAM_AHB_CLIENT_CSIPHY,
+ CAM_AHB_SVS_VOTE);
+ if (rc < 0) {
+ pr_err("%s: failed to vote for AHB\n", __func__);
+ return rc;
+ }
+
+ rc = cam_config_ahb_clk(NULL, 0, CAM_AHB_CLIENT_CSID,
+ CAM_AHB_SVS_VOTE);
+ if (rc < 0) {
+ pr_err("%s: failed to vote for AHB\n", __func__);
+ return rc;
+ }
+
+ rc = cam_config_ahb_clk(NULL, 0, CAM_AHB_CLIENT_CCI,
+ CAM_AHB_SVS_VOTE);
+ if (rc < 0) {
+ pr_err("%s: failed to vote for AHB\n", __func__);
+ return rc;
+ }
+
+ rc = cam_config_ahb_clk(NULL, 0, CAM_AHB_CLIENT_ISPIF,
+ CAM_AHB_SVS_VOTE);
+ if (rc < 0) {
+ pr_err("%s: failed to vote for AHB\n", __func__);
+ return rc;
+ }
+
+ rc = cam_config_ahb_clk(NULL, 0, CAM_AHB_CLIENT_VFE0,
+ CAM_AHB_SVS_VOTE);
+ if (rc < 0) {
+ pr_err("%s: failed to vote for AHB\n", __func__);
+ return rc;
+ }
+
+ new_early_cam_dev = kzalloc(sizeof(struct early_cam_device),
+ GFP_KERNEL);
+ if (!new_early_cam_dev)
+ return -ENOMEM;
+
+ if (pdev->dev.of_node)
+ of_property_read_u32((&pdev->dev)->of_node,
+ "cell-index", &pdev->id);
+
+ rc = msm_camera_get_clk_info_and_rates(pdev,
+ &new_early_cam_dev->early_cam_clk_info,
+ &new_early_cam_dev->early_cam_clk,
+ &new_early_cam_dev->early_cam_clk_rates,
+ &new_early_cam_dev->num_clk_cases,
+ &new_early_cam_dev->num_clk);
+ if (rc < 0) {
+ pr_err("%s: msm_early_cam_get_clk_info() failed", __func__);
+ kfree(new_early_cam_dev);
+ return -EFAULT;
+ }
+
+ new_early_cam_dev->ref_count = 0;
+ new_early_cam_dev->pdev = pdev;
+
+ rc = msm_camera_get_dt_vreg_data(
+ new_early_cam_dev->pdev->dev.of_node,
+ &(new_early_cam_dev->early_cam_vreg),
+ &(new_early_cam_dev->regulator_count));
+ if (rc < 0) {
+ pr_err("%s: msm_camera_get_dt_vreg_data fail\n", __func__);
+ rc = -EFAULT;
+ goto early_cam_release_mem;
+ }
+
+ if ((new_early_cam_dev->regulator_count < 0) ||
+ (new_early_cam_dev->regulator_count > MAX_REGULATOR)) {
+ pr_err("%s: invalid reg count = %d, max is %d\n", __func__,
+ new_early_cam_dev->regulator_count, MAX_REGULATOR);
+ rc = -EFAULT;
+ goto early_cam_invalid_vreg_data;
+ }
+
+ rc = msm_camera_config_vreg(&new_early_cam_dev->pdev->dev,
+ new_early_cam_dev->early_cam_vreg,
+ new_early_cam_dev->regulator_count,
+ NULL,
+ 0,
+ &new_early_cam_dev->early_cam_reg_ptr[0], 1);
+ if (rc < 0)
+ pr_err("%s:%d early_cam config_vreg failed\n", __func__,
+ __LINE__);
+
+ rc = msm_camera_enable_vreg(&new_early_cam_dev->pdev->dev,
+ new_early_cam_dev->early_cam_vreg,
+ new_early_cam_dev->regulator_count,
+ NULL,
+ 0,
+ &new_early_cam_dev->early_cam_reg_ptr[0], 1);
+ if (rc < 0)
+ pr_err("%s:%d early_cam enable_vreg failed\n", __func__,
+ __LINE__);
+
+ rc = msm_camera_clk_enable(&new_early_cam_dev->pdev->dev,
+ new_early_cam_dev->early_cam_clk_info,
+ new_early_cam_dev->early_cam_clk,
+ new_early_cam_dev->num_clk, true);
+
+ if (rc < 0) {
+ pr_err("%s: clk enable failed %d\n", __func__, rc);
+ rc = 0;
+ goto early_cam_release_mem;
+ }
+
+ platform_set_drvdata(pdev, new_early_cam_dev);
+
+ return 0;
+
+early_cam_invalid_vreg_data:
+ kfree(new_early_cam_dev->early_cam_vreg);
+early_cam_release_mem:
+ kfree(new_early_cam_dev);
+ new_early_cam_dev = NULL;
+ return rc;
+}
+
+static int msm_early_cam_exit(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static int __init msm_early_cam_init_module(void)
+{
+ return platform_driver_register(&msm_early_camera_driver);
+}
+
+static void __exit msm_early_cam_exit_module(void)
+{
+ kfree(new_early_cam_dev);
+ platform_driver_unregister(&msm_early_camera_driver);
+}
+
+static const struct of_device_id msm_early_camera_match_table[] = {
+ { .compatible = "qcom,early-cam" },
+ {},
+};
+
+static struct platform_driver msm_early_camera_driver = {
+ .probe = msm_early_cam_probe,
+ .remove = msm_early_cam_exit,
+ .driver = {
+ .name = MSM_EARLY_CAM_DRV_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = msm_early_camera_match_table,
+ },
+};
+
+MODULE_DEVICE_TABLE(of, msm_early_camera_match_table);
+
+module_init(msm_early_cam_init_module);
+module_exit(msm_early_cam_exit_module);
+MODULE_DESCRIPTION("MSM early camera driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/ais/sensor/cci/msm_early_cam.h b/drivers/media/platform/msm/ais/sensor/cci/msm_early_cam.h
new file mode 100644
index 000000000000..a40ab2d1ebc3
--- /dev/null
+++ b/drivers/media/platform/msm/ais/sensor/cci/msm_early_cam.h
@@ -0,0 +1,53 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_EARLY_CAM_H
+#define MSM_EARLY_CAM_H
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <media/v4l2-subdev.h>
+#include <linux/workqueue.h>
+#include <media/ais/msm_ais_sensor.h>
+#include <soc/qcom/ais.h>
+#include "msm_sd.h"
+#include "cam_soc_api.h"
+
+#define NUM_MASTERS 2
+#define NUM_QUEUES 2
+
+#define TRUE 1
+#define FALSE 0
+
+
+enum msm_early_cam_state_t {
+ STATE_DISABLED,
+ STATE_ENABLED,
+};
+
+struct early_cam_device {
+ struct platform_device *pdev;
+ uint8_t ref_count;
+ enum msm_early_cam_state_t early_cam_state;
+ size_t num_clk;
+ size_t num_clk_cases;
+ struct clk **early_cam_clk;
+ uint32_t **early_cam_clk_rates;
+ struct msm_cam_clk_info *early_cam_clk_info;
+ struct camera_vreg_t *early_cam_vreg;
+ struct regulator *early_cam_reg_ptr[MAX_REGULATOR];
+ int32_t regulator_count;
+};
+
+int msm_early_cam_disable_clocks(void);
+#endif
diff --git a/drivers/media/platform/msm/ais/sensor/msm_sensor_init.c b/drivers/media/platform/msm/ais/sensor/msm_sensor_init.c
index c3943be78226..ffbf963e819e 100644
--- a/drivers/media/platform/msm/ais/sensor/msm_sensor_init.c
+++ b/drivers/media/platform/msm/ais/sensor/msm_sensor_init.c
@@ -16,11 +16,17 @@
#include "msm_sensor_driver.h"
#include "msm_sensor.h"
#include "msm_sd.h"
+#include "msm_camera_io_util.h"
+#include "msm_early_cam.h"
/* Logging macro */
#undef CDBG
#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+#define EARLY_CAMERA_SIGNAL_DONE 0xa5a5a5a5
+#define EARLY_CAMERA_SIGNAL_DISABLED 0
+
+static bool early_camera_clock_off;
static struct msm_sensor_init_t *s_init;
static int msm_sensor_wait_for_probe_done(struct msm_sensor_init_t *s_init)
@@ -42,10 +48,14 @@ static int msm_sensor_wait_for_probe_done(struct msm_sensor_init_t *s_init)
return rc;
}
+#define MMSS_A_VFE_0_SPARE 0xC84
+
/* Static function definition */
int32_t msm_sensor_driver_cmd(struct msm_sensor_init_t *s_init, void *arg)
{
int32_t rc = 0;
+ u32 val = 0;
+ void __iomem *base;
struct sensor_init_cfg_data *cfg = (struct sensor_init_cfg_data *)arg;
/* Validate input parameters */
@@ -68,6 +78,28 @@ int32_t msm_sensor_driver_cmd(struct msm_sensor_init_t *s_init, void *arg)
break;
case CFG_SINIT_PROBE_DONE:
+ if (early_camera_clock_off == false) {
+ base = ioremap(0x00A10000, 0x1000);
+ val = msm_camera_io_r_mb(base + MMSS_A_VFE_0_SPARE);
+ while (val != EARLY_CAMERA_SIGNAL_DONE) {
+ if (val == EARLY_CAMERA_SIGNAL_DISABLED)
+ break;
+ msleep(1000);
+ val = msm_camera_io_r_mb(
+ base + MMSS_A_VFE_0_SPARE);
+ pr_err("Waiting for signal from LK val = %u\n",
+ val);
+ }
+ rc = msm_early_cam_disable_clocks();
+ if (rc < 0) {
+ pr_err("Failed to disable early camera :%d\n",
+ rc);
+ } else {
+ early_camera_clock_off = true;
+ pr_debug("Voted OFF early camera clocks\n");
+ }
+ }
+
s_init->module_init_status = 1;
wake_up(&s_init->state_wait);
break;
@@ -99,6 +131,7 @@ static int __init msm_sensor_init_module(void)
mutex_init(&s_init->imutex);
init_waitqueue_head(&s_init->state_wait);
+ early_camera_clock_off = false;
return ret;
}
diff --git a/drivers/media/platform/msm/ais/sensor/ois/msm_ois.c b/drivers/media/platform/msm/ais/sensor/ois/msm_ois.c
index aa7658f359ac..4a74e808d86f 100644
--- a/drivers/media/platform/msm/ais/sensor/ois/msm_ois.c
+++ b/drivers/media/platform/msm/ais/sensor/ois/msm_ois.c
@@ -623,11 +623,13 @@ static long msm_ois_subdev_ioctl(struct v4l2_subdev *sd,
pr_err("o_ctrl->i2c_client.i2c_func_tbl NULL\n");
return -EINVAL;
}
+ mutex_lock(o_ctrl->ois_mutex);
rc = msm_ois_power_down(o_ctrl);
if (rc < 0) {
pr_err("%s:%d OIS Power down failed\n",
__func__, __LINE__);
}
+ mutex_unlock(o_ctrl->ois_mutex);
return msm_ois_close(sd, NULL);
default:
return -ENOIOCTLCMD;
diff --git a/drivers/media/platform/msm/camera_v2/fd/msm_fd_dev.c b/drivers/media/platform/msm/camera_v2/fd/msm_fd_dev.c
index 63c3595a3f85..d881b4aea48f 100644
--- a/drivers/media/platform/msm/camera_v2/fd/msm_fd_dev.c
+++ b/drivers/media/platform/msm/camera_v2/fd/msm_fd_dev.c
@@ -434,6 +434,7 @@ static int msm_fd_open(struct file *file)
ctx->vb2_q.type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
ctx->vb2_q.io_modes = VB2_USERPTR;
ctx->vb2_q.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ mutex_init(&ctx->lock);
ret = vb2_queue_init(&ctx->vb2_q);
if (ret < 0) {
dev_err(device->dev, "Error queue init\n");
@@ -484,7 +485,9 @@ static int msm_fd_release(struct file *file)
msm_cpp_vbif_register_error_handler((void *)ctx,
VBIF_CLIENT_FD, NULL);
+ mutex_lock(&ctx->lock);
vb2_queue_release(&ctx->vb2_q);
+ mutex_unlock(&ctx->lock);
vfree(ctx->stats);
@@ -514,7 +517,9 @@ static unsigned int msm_fd_poll(struct file *file,
struct fd_ctx *ctx = msm_fd_ctx_from_fh(file->private_data);
unsigned int ret;
+ mutex_lock(&ctx->lock);
ret = vb2_poll(&ctx->vb2_q, file, wait);
+ mutex_unlock(&ctx->lock);
if (atomic_read(&ctx->subscribed_for_event)) {
poll_wait(file, &ctx->fh.wait, wait);
@@ -752,9 +757,9 @@ static int msm_fd_reqbufs(struct file *file,
int ret;
struct fd_ctx *ctx = msm_fd_ctx_from_fh(fh);
- mutex_lock(&ctx->fd_device->recovery_lock);
+ mutex_lock(&ctx->lock);
ret = vb2_reqbufs(&ctx->vb2_q, req);
- mutex_unlock(&ctx->fd_device->recovery_lock);
+ mutex_unlock(&ctx->lock);
return ret;
}
@@ -770,9 +775,9 @@ static int msm_fd_qbuf(struct file *file, void *fh,
int ret;
struct fd_ctx *ctx = msm_fd_ctx_from_fh(fh);
- mutex_lock(&ctx->fd_device->recovery_lock);
+ mutex_lock(&ctx->lock);
ret = vb2_qbuf(&ctx->vb2_q, pb);
- mutex_unlock(&ctx->fd_device->recovery_lock);
+ mutex_unlock(&ctx->lock);
return ret;
}
@@ -789,9 +794,9 @@ static int msm_fd_dqbuf(struct file *file,
int ret;
struct fd_ctx *ctx = msm_fd_ctx_from_fh(fh);
- mutex_lock(&ctx->fd_device->recovery_lock);
+ mutex_lock(&ctx->lock);
ret = vb2_dqbuf(&ctx->vb2_q, pb, file->f_flags & O_NONBLOCK);
- mutex_unlock(&ctx->fd_device->recovery_lock);
+ mutex_unlock(&ctx->lock);
return ret;
}
@@ -807,7 +812,9 @@ static int msm_fd_streamon(struct file *file,
struct fd_ctx *ctx = msm_fd_ctx_from_fh(fh);
int ret;
+ mutex_lock(&ctx->lock);
ret = vb2_streamon(&ctx->vb2_q, buf_type);
+ mutex_unlock(&ctx->lock);
if (ret < 0)
dev_err(ctx->fd_device->dev, "Stream on fails\n");
@@ -826,7 +833,9 @@ static int msm_fd_streamoff(struct file *file,
struct fd_ctx *ctx = msm_fd_ctx_from_fh(fh);
int ret;
+ mutex_lock(&ctx->lock);
ret = vb2_streamoff(&ctx->vb2_q, buf_type);
+ mutex_unlock(&ctx->lock);
if (ret < 0)
dev_err(ctx->fd_device->dev, "Stream off fails\n");
diff --git a/drivers/media/platform/msm/camera_v2/fd/msm_fd_dev.h b/drivers/media/platform/msm/camera_v2/fd/msm_fd_dev.h
index 6eae2b8d56fb..2b81e5b9ece3 100644
--- a/drivers/media/platform/msm/camera_v2/fd/msm_fd_dev.h
+++ b/drivers/media/platform/msm/camera_v2/fd/msm_fd_dev.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -174,6 +174,7 @@ struct fd_ctx {
struct msm_fd_mem_pool mem_pool;
struct msm_fd_stats *stats;
struct msm_fd_buf_handle work_buf;
+ struct mutex lock;
};
/*
diff --git a/drivers/media/platform/msm/camera_v2/msm.c b/drivers/media/platform/msm/camera_v2/msm.c
index 4e5dc66d94a9..04ca0fba71e5 100644
--- a/drivers/media/platform/msm/camera_v2/msm.c
+++ b/drivers/media/platform/msm/camera_v2/msm.c
@@ -388,6 +388,11 @@ static void msm_add_sd_in_position(struct msm_sd_subdev *msm_subdev,
struct msm_sd_subdev *temp_sd;
list_for_each_entry(temp_sd, sd_list, list) {
+ if (temp_sd == msm_subdev) {
+ pr_err("%s :Fail to add the same sd %d\n",
+ __func__, __LINE__);
+ return;
+ }
if (msm_subdev->close_seq < temp_sd->close_seq) {
list_add_tail(&msm_subdev->list, &temp_sd->list);
return;
diff --git a/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_cci_i2c.c b/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_cci_i2c.c
index 1a40aa1c78bd..6d9b0e987d0d 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_cci_i2c.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_cci_i2c.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -17,13 +17,15 @@
#undef CDBG
#define CDBG(fmt, args...) pr_debug(fmt, ##args)
#define S_I2C_DBG(fmt, args...) pr_debug(fmt, ##args)
+#define MAX_I2C_ADDR_TYPE_SIZE (MSM_CAMERA_I2C_3B_ADDR + 1)
+#define MAX_I2C_DATA_TYPE_SIZE (MSM_CAMERA_I2C_SET_BYTE_WRITE_MASK_DATA + 1)
int32_t msm_camera_cci_i2c_read(struct msm_camera_i2c_client *client,
uint32_t addr, uint16_t *data,
enum msm_camera_i2c_data_type data_type)
{
int32_t rc = -EFAULT;
- unsigned char buf[client->addr_type+data_type];
+ unsigned char buf[MAX_I2C_ADDR_TYPE_SIZE + MAX_I2C_DATA_TYPE_SIZE];
struct msm_camera_cci_ctrl cci_ctrl;
if ((client->addr_type != MSM_CAMERA_I2C_BYTE_ADDR
diff --git a/drivers/media/platform/msm/camera_v2/sensor/msm_sensor_driver.c b/drivers/media/platform/msm/camera_v2/sensor/msm_sensor_driver.c
index a41d7dba490e..57bc392f54fd 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/msm_sensor_driver.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/msm_sensor_driver.c
@@ -108,7 +108,11 @@ static int32_t msm_sensor_driver_create_i2c_v4l_subdev
s_ctrl->msm_sd.sd.entity.name = s_ctrl->msm_sd.sd.name;
s_ctrl->sensordata->sensor_info->session_id = session_id;
s_ctrl->msm_sd.close_seq = MSM_SD_CLOSE_2ND_CATEGORY | 0x3;
- msm_sd_register(&s_ctrl->msm_sd);
+ rc = msm_sd_register(&s_ctrl->msm_sd);
+ if (rc < 0) {
+ pr_err("failed: msm_sd_register rc %d", rc);
+ return rc;
+ }
msm_sensor_v4l2_subdev_fops = v4l2_subdev_fops;
#ifdef CONFIG_COMPAT
msm_sensor_v4l2_subdev_fops.compat_ioctl32 =
@@ -148,7 +152,11 @@ static int32_t msm_sensor_driver_create_v4l_subdev
s_ctrl->msm_sd.sd.entity.group_id = MSM_CAMERA_SUBDEV_SENSOR;
s_ctrl->msm_sd.sd.entity.name = s_ctrl->msm_sd.sd.name;
s_ctrl->msm_sd.close_seq = MSM_SD_CLOSE_2ND_CATEGORY | 0x3;
- msm_sd_register(&s_ctrl->msm_sd);
+ rc = msm_sd_register(&s_ctrl->msm_sd);
+ if (rc < 0) {
+ pr_err("failed: msm_sd_register rc %d", rc);
+ return rc;
+ }
msm_cam_copy_v4l2_subdev_fops(&msm_sensor_v4l2_subdev_fops);
#ifdef CONFIG_COMPAT
msm_sensor_v4l2_subdev_fops.compat_ioctl32 =
@@ -995,12 +1003,6 @@ CSID_TG:
pr_err("%s probe succeeded", slave_info->sensor_name);
- /*
- Set probe succeeded flag to 1 so that no other camera shall
- * probed on this slot
- */
- s_ctrl->is_probe_succeed = 1;
-
s_ctrl->bypass_video_node_creation =
slave_info->bypass_video_node_creation;
@@ -1048,6 +1050,11 @@ CSID_TG:
msm_sensor_fill_sensor_info(s_ctrl, probed_info, entity_name);
+ /*
+ * Set probe succeeded flag to 1 so that no other camera shall
+ * probed on this slot
+ */
+ s_ctrl->is_probe_succeed = 1;
return rc;
camera_power_down:
diff --git a/drivers/media/platform/msm/camera_v2/sensor/ois/msm_ois.c b/drivers/media/platform/msm/camera_v2/sensor/ois/msm_ois.c
index bfb15846e73c..302a7b16bc26 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/ois/msm_ois.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/ois/msm_ois.c
@@ -615,11 +615,13 @@ static long msm_ois_subdev_ioctl(struct v4l2_subdev *sd,
pr_err("o_ctrl->i2c_client.i2c_func_tbl NULL\n");
return -EINVAL;
}
+ mutex_lock(o_ctrl->ois_mutex);
rc = msm_ois_power_down(o_ctrl);
if (rc < 0) {
pr_err("%s:%d OIS Power down failed\n",
__func__, __LINE__);
}
+ mutex_unlock(o_ctrl->ois_mutex);
return msm_ois_close(sd, NULL);
default:
return -ENOIOCTLCMD;
@@ -818,6 +820,10 @@ static long msm_ois_subdev_do_ioctl(
parg = &ois_data;
break;
}
+ break;
+ case VIDIOC_MSM_OIS_CFG:
+ pr_err("%s: invalid cmd 0x%x received\n", __func__, cmd);
+ return -EINVAL;
}
rc = msm_ois_subdev_ioctl(sd, cmd, parg);
diff --git a/drivers/media/platform/msm/vidc/msm_vidc.c b/drivers/media/platform/msm/vidc/msm_vidc.c
index de4705c3d2eb..3677bb6e32e6 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc.c
@@ -533,11 +533,18 @@ static inline void save_v4l2_buffer(struct v4l2_buffer *b,
static int __map_and_update_binfo(struct msm_vidc_inst *inst,
struct buffer_info *binfo,
- struct v4l2_buffer *b, int i)
+ struct v4l2_buffer *b, u32 i)
{
int rc = 0;
struct msm_smem *same_fd_handle = NULL;
+ if (i >= VIDEO_MAX_PLANES) {
+ dprintk(VIDC_ERR, "Num planes exceeds max: %d, %d\n",
+ i, VIDEO_MAX_PLANES);
+ rc = -EINVAL;
+ goto exit;
+ }
+
same_fd_handle = get_same_fd_buffer(
inst, b->m.planes[i].reserved[0]);
@@ -558,6 +565,7 @@ static int __map_and_update_binfo(struct msm_vidc_inst *inst,
b->m.planes[i].m.userptr = binfo->device_addr[i];
}
+exit:
return rc;
}
@@ -565,7 +573,8 @@ static int __handle_fw_referenced_buffers(struct msm_vidc_inst *inst,
struct buffer_info *binfo,
struct v4l2_buffer *b)
{
- int i = 0, rc = 0;
+ int rc = 0;
+ u32 i = 0;
if (EXTRADATA_IDX(b->length)) {
i = EXTRADATA_IDX(b->length);
@@ -583,8 +592,8 @@ int map_and_register_buf(struct msm_vidc_inst *inst, struct v4l2_buffer *b)
{
struct buffer_info *binfo = NULL;
struct buffer_info *temp = NULL, *iterator = NULL;
- int plane = 0;
- int i = 0, rc = 0;
+ int plane = 0, rc = 0;
+ u32 i = 0;
if (!b || !inst) {
dprintk(VIDC_ERR, "%s: invalid input\n", __func__);
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-core.c b/drivers/media/platform/s5p-jpeg/jpeg-core.c
index 4a608cbe0fdb..9c6fc09b88e0 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-core.c
+++ b/drivers/media/platform/s5p-jpeg/jpeg-core.c
@@ -1098,10 +1098,10 @@ static bool s5p_jpeg_parse_hdr(struct s5p_jpeg_q_data *result,
struct s5p_jpeg_ctx *ctx)
{
int c, components = 0, notfound, n_dht = 0, n_dqt = 0;
- unsigned int height, width, word, subsampling = 0, sos = 0, sof = 0,
- sof_len = 0;
- unsigned int dht[S5P_JPEG_MAX_MARKER], dht_len[S5P_JPEG_MAX_MARKER],
- dqt[S5P_JPEG_MAX_MARKER], dqt_len[S5P_JPEG_MAX_MARKER];
+ unsigned int height = 0, width = 0, word, subsampling = 0;
+ unsigned int sos = 0, sof = 0, sof_len = 0;
+ unsigned int dht[S5P_JPEG_MAX_MARKER], dht_len[S5P_JPEG_MAX_MARKER];
+ unsigned int dqt[S5P_JPEG_MAX_MARKER], dqt_len[S5P_JPEG_MAX_MARKER];
long length;
struct s5p_jpeg_buffer jpeg_buffer;
diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c
index 65f80b8b9f7a..eb9e7feb9b13 100644
--- a/drivers/media/rc/imon.c
+++ b/drivers/media/rc/imon.c
@@ -1629,7 +1629,7 @@ static void imon_incoming_packet(struct imon_context *ictx,
if (kc == KEY_KEYBOARD && !ictx->release_code) {
ictx->last_keycode = kc;
if (!nomouse) {
- ictx->pad_mouse = ~(ictx->pad_mouse) & 0x1;
+ ictx->pad_mouse = !ictx->pad_mouse;
dev_dbg(dev, "toggling to %s mode\n",
ictx->pad_mouse ? "mouse" : "keyboard");
spin_unlock_irqrestore(&ictx->kc_lock, flags);
diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c
index 65fed7146e9b..cc91f7b3d90c 100644
--- a/drivers/misc/enclosure.c
+++ b/drivers/misc/enclosure.c
@@ -375,6 +375,7 @@ int enclosure_add_device(struct enclosure_device *edev, int component,
struct device *dev)
{
struct enclosure_component *cdev;
+ int err;
if (!edev || component >= edev->components)
return -EINVAL;
@@ -384,12 +385,17 @@ int enclosure_add_device(struct enclosure_device *edev, int component,
if (cdev->dev == dev)
return -EEXIST;
- if (cdev->dev)
+ if (cdev->dev) {
enclosure_remove_links(cdev);
-
- put_device(cdev->dev);
+ put_device(cdev->dev);
+ }
cdev->dev = get_device(dev);
- return enclosure_add_links(cdev);
+ err = enclosure_add_links(cdev);
+ if (err) {
+ put_device(cdev->dev);
+ cdev->dev = NULL;
+ }
+ return err;
}
EXPORT_SYMBOL_GPL(enclosure_add_device);
diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c
index 7bc5b5ad1122..cf897947fff2 100644
--- a/drivers/misc/qseecom.c
+++ b/drivers/misc/qseecom.c
@@ -8434,9 +8434,10 @@ out:
*/
static int qseecom_check_whitelist_feature(void)
{
- int version = scm_get_feat_version(FEATURE_ID_WHITELIST);
+ u64 version = 0;
+ int ret = scm_get_feat_version(FEATURE_ID_WHITELIST, &version);
- return version >= MAKE_WHITELIST_VERSION(1, 0, 0);
+ return (ret == 0) && (version >= MAKE_WHITELIST_VERSION(1, 0, 0));
}
static int qseecom_probe(struct platform_device *pdev)
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 01e5502917f7..d39b4056c169 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -1173,7 +1173,7 @@ idata_free:
cmd_done:
mmc_blk_put(md);
- if (card->cmdq_init)
+ if (card && card->cmdq_init)
wake_up(&card->host->cmdq_ctx.wait);
return err;
}
@@ -4623,6 +4623,10 @@ static int mmc_blk_probe(struct mmc_card *card)
dev_set_drvdata(&card->dev, md);
+#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
+ mmc_set_bus_resume_policy(card->host, 1);
+#endif
+
if (mmc_add_disk(md))
goto out;
@@ -4666,6 +4670,9 @@ static void mmc_blk_remove(struct mmc_card *card)
pm_runtime_put_noidle(&card->dev);
mmc_blk_remove_req(md);
dev_set_drvdata(&card->dev, NULL);
+#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
+ mmc_set_bus_resume_policy(card->host, 0);
+#endif
}
static int _mmc_blk_suspend(struct mmc_card *card, bool wait)
diff --git a/drivers/net/can/spi/rh850.c b/drivers/net/can/spi/rh850.c
index a93f979da9ed..d2b6e8caa112 100644
--- a/drivers/net/can/spi/rh850.c
+++ b/drivers/net/can/spi/rh850.c
@@ -1020,6 +1020,8 @@ static int rh850_create_netdev(struct spi_device *spi,
return -ENOMEM;
}
+ netdev->mtu = CANFD_MTU;
+
netdev_priv_data = netdev_priv(netdev);
netdev_priv_data->rh850_can = priv_data;
netdev_priv_data->netdev_index = index;
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 70da30095b89..a5e4b4b93d1b 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -1583,6 +1583,11 @@ static int bgmac_probe(struct bcma_device *core)
dev_warn(&core->dev, "Using random MAC: %pM\n", mac);
}
+ /* This (reset &) enable is not preset in specs or reference driver but
+ * Broadcom does it in arch PCI code when enabling fake PCI device.
+ */
+ bcma_core_enable(core, 0);
+
/* Allocation and references */
net_dev = alloc_etherdev(sizeof(*bgmac));
if (!net_dev)
diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.c b/drivers/net/ethernet/mellanox/mlx4/icm.c
index 2a9dd460a95f..e1f9e7cebf8f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/icm.c
+++ b/drivers/net/ethernet/mellanox/mlx4/icm.c
@@ -118,8 +118,13 @@ static int mlx4_alloc_icm_coherent(struct device *dev, struct scatterlist *mem,
if (!buf)
return -ENOMEM;
+ if (offset_in_page(buf)) {
+ dma_free_coherent(dev, PAGE_SIZE << order,
+ buf, sg_dma_address(mem));
+ return -ENOMEM;
+ }
+
sg_set_buf(mem, buf, PAGE_SIZE << order);
- BUG_ON(mem->offset);
sg_dma_len(mem) = PAGE_SIZE << order;
return 0;
}
diff --git a/drivers/net/ethernet/msm/msm_rmnet_mhi.c b/drivers/net/ethernet/msm/msm_rmnet_mhi.c
index 60b7a64c2edb..de14dcc6f4ed 100644
--- a/drivers/net/ethernet/msm/msm_rmnet_mhi.c
+++ b/drivers/net/ethernet/msm/msm_rmnet_mhi.c
@@ -942,10 +942,13 @@ net_dev_reg_fail:
netif_napi_del(&(rmnet_mhi_ptr->napi));
free_netdev(rmnet_mhi_ptr->dev);
net_dev_alloc_fail:
- mhi_close_channel(rmnet_mhi_ptr->rx_client_handle);
- rmnet_mhi_ptr->dev = NULL;
+ if (rmnet_mhi_ptr->rx_client_handle) {
+ mhi_close_channel(rmnet_mhi_ptr->rx_client_handle);
+ rmnet_mhi_ptr->dev = NULL;
+ }
mhi_rx_chan_start_fail:
- mhi_close_channel(rmnet_mhi_ptr->tx_client_handle);
+ if (rmnet_mhi_ptr->tx_client_handle)
+ mhi_close_channel(rmnet_mhi_ptr->tx_client_handle);
mhi_tx_chan_start_fail:
rmnet_log(rmnet_mhi_ptr, MSG_INFO, "Exited ret %d.\n", ret);
return ret;
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 79ef799f88ab..c5ea1018cb47 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -326,6 +326,7 @@ enum cfg_version {
static const struct pci_device_id rtl8169_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8129), 0, 0, RTL_CFG_0 },
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8136), 0, 0, RTL_CFG_2 },
+ { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8161), 0, 0, RTL_CFG_1 },
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 },
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_1 },
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 },
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 480f3dae0780..4296066a7ad3 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -750,6 +750,7 @@ static struct sh_eth_cpu_data sh7734_data = {
.tsu = 1,
.hw_crc = 1,
.select_mii = 1,
+ .shift_rd0 = 1,
};
/* SH7763 */
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index 84b9cca152eb..e83acc608678 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -907,7 +907,7 @@ static void decode_txts(struct dp83640_private *dp83640,
if (overflow) {
pr_debug("tx timestamp queue overflow, count %d\n", overflow);
while (skb) {
- skb_complete_tx_timestamp(skb, NULL);
+ kfree_skb(skb);
skb = skb_dequeue(&dp83640->tx_queue);
}
return;
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index e13ad6cdcc22..c8b85f1069ff 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -539,6 +539,8 @@ static int ksz9031_read_status(struct phy_device *phydev)
if ((regval & 0xFF) == 0xFF) {
phy_init_hw(phydev);
phydev->link = 0;
+ if (phydev->drv->config_intr && phy_interrupt_is_valid(phydev))
+ phydev->drv->config_intr(phydev);
}
return 0;
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 0bfbabad4431..1d1e5f7723ab 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -1442,7 +1442,7 @@ static struct phy_driver genphy_driver[] = {
.phy_id = 0xffffffff,
.phy_id_mask = 0xffffffff,
.name = "Generic PHY",
- .soft_reset = genphy_soft_reset,
+ .soft_reset = genphy_no_soft_reset,
.config_init = genphy_config_init,
.features = PHY_GBIT_FEATURES | SUPPORTED_MII |
SUPPORTED_AUI | SUPPORTED_FIBRE |
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index f64b25c221e8..cd93220c9b45 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -1009,6 +1009,7 @@ static int kaweth_probe(
struct net_device *netdev;
const eth_addr_t bcast_addr = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
int result = 0;
+ int rv = -EIO;
dev_dbg(dev,
"Kawasaki Device Probe (Device number:%d): 0x%4.4x:0x%4.4x:0x%4.4x\n",
@@ -1029,6 +1030,7 @@ static int kaweth_probe(
kaweth = netdev_priv(netdev);
kaweth->dev = udev;
kaweth->net = netdev;
+ kaweth->intf = intf;
spin_lock_init(&kaweth->device_lock);
init_waitqueue_head(&kaweth->term_wait);
@@ -1048,6 +1050,10 @@ static int kaweth_probe(
/* Download the firmware */
dev_info(dev, "Downloading firmware...\n");
kaweth->firmware_buf = (__u8 *)__get_free_page(GFP_KERNEL);
+ if (!kaweth->firmware_buf) {
+ rv = -ENOMEM;
+ goto err_free_netdev;
+ }
if ((result = kaweth_download_firmware(kaweth,
"kaweth/new_code.bin",
100,
@@ -1139,8 +1145,6 @@ err_fw:
dev_dbg(dev, "Initializing net device.\n");
- kaweth->intf = intf;
-
kaweth->tx_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!kaweth->tx_urb)
goto err_free_netdev;
@@ -1204,7 +1208,7 @@ err_only_tx:
err_free_netdev:
free_netdev(netdev);
- return -EIO;
+ return rv;
}
/****************************************************************
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 349aecbc210a..ac945f8781ac 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -733,15 +733,15 @@ static int vrf_del_slave(struct net_device *dev, struct net_device *port_dev)
static void vrf_dev_uninit(struct net_device *dev)
{
struct net_vrf *vrf = netdev_priv(dev);
- struct slave_queue *queue = &vrf->queue;
- struct list_head *head = &queue->all_slaves;
- struct slave *slave, *next;
+// struct slave_queue *queue = &vrf->queue;
+// struct list_head *head = &queue->all_slaves;
+// struct slave *slave, *next;
vrf_rtable_destroy(vrf);
vrf_rt6_destroy(vrf);
- list_for_each_entry_safe(slave, next, head, list)
- vrf_del_slave(dev, slave->dev);
+// list_for_each_entry_safe(slave, next, head, list)
+// vrf_del_slave(dev, slave->dev);
free_percpu(dev->dstats);
dev->dstats = NULL;
@@ -914,6 +914,14 @@ static int vrf_validate(struct nlattr *tb[], struct nlattr *data[])
static void vrf_dellink(struct net_device *dev, struct list_head *head)
{
+ struct net_vrf *vrf = netdev_priv(dev);
+ struct slave_queue *queue = &vrf->queue;
+ struct list_head *all_slaves = &queue->all_slaves;
+ struct slave *slave, *next;
+
+ list_for_each_entry_safe(slave, next, all_slaves, list)
+ vrf_del_slave(dev, slave->dev);
+
unregister_netdevice_queue(dev, head);
}
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index 10b33840e5e5..041d1d5eb718 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -1924,6 +1924,12 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
goto err_hif_stop;
}
+ status = ath10k_pktlog_connect(ar);
+ if (status) {
+ ath10k_err(ar, "could not connect pktlog: %d\n", status);
+ goto err_hif_stop;
+ }
+
status = ath10k_htc_start(&ar->htc);
if (status) {
ath10k_err(ar, "failed to start htc: %d\n", status);
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index 8ddbae96794d..ffcf30756b9e 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -467,6 +467,7 @@ struct ath10k_debug {
u64 fw_dbglog_mask;
u32 fw_dbglog_level;
u32 pktlog_filter;
+ enum ath10k_htc_ep_id eid;
u32 reg_addr;
u32 nf_cal_period;
void *cal_data;
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index ec8063e7986a..42aab9b86af3 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -25,6 +25,7 @@
#include "core.h"
#include "debug.h"
#include "hif.h"
+#include "htt.h"
#include "wmi-ops.h"
/* ms */
@@ -2617,6 +2618,178 @@ static const struct file_operations fops_fw_checksums = {
.llseek = default_llseek,
};
+static struct txctl_frm_hdr frm_hdr;
+
+static void ath10k_extract_frame_header(u8 *addr1, u8 *addr2, u8 *addr3)
+{
+ frm_hdr.bssid_tail = (addr1[IEEE80211_ADDR_LEN - 2] << BITS_PER_BYTE)
+ | (addr1[IEEE80211_ADDR_LEN - 1]);
+ frm_hdr.sa_tail = (addr2[IEEE80211_ADDR_LEN - 2] << BITS_PER_BYTE)
+ | (addr2[IEEE80211_ADDR_LEN - 1]);
+ frm_hdr.da_tail = (addr3[IEEE80211_ADDR_LEN - 2] << BITS_PER_BYTE)
+ | (addr3[IEEE80211_ADDR_LEN - 1]);
+}
+
+static void ath10k_process_ieee_hdr(void *data)
+{
+ u8 dir;
+ struct ieee80211_frame *wh;
+
+ if (!data)
+ return;
+
+ wh = (struct ieee80211_frame *)(data);
+ frm_hdr.framectrl = *(u_int16_t *)(wh->i_fc);
+ frm_hdr.seqctrl = *(u_int16_t *)(wh->i_seq);
+ dir = (wh->i_fc[1] & IEEE80211_FC1_DIR_MASK);
+
+ if (dir == IEEE80211_FC1_DIR_TODS)
+ ath10k_extract_frame_header(wh->i_addr1, wh->i_addr2,
+ wh->i_addr3);
+ else if (dir == IEEE80211_FC1_DIR_FROMDS)
+ ath10k_extract_frame_header(wh->i_addr2, wh->i_addr3,
+ wh->i_addr1);
+ else
+ ath10k_extract_frame_header(wh->i_addr3, wh->i_addr2,
+ wh->i_addr1);
+}
+
+static void ath10k_pktlog_process_rx(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct ath10k_pktlog_hdr *hdr = (void *)skb->data;
+ struct ath_pktlog_txctl pktlog_tx_ctrl;
+
+ switch (hdr->log_type) {
+ case ATH10K_PKTLOG_TYPE_TX_CTRL: {
+ spin_lock_bh(&ar->htt.tx_lock);
+
+ memcpy((void *)(&pktlog_tx_ctrl.hdr), (void *)hdr,
+ sizeof(pktlog_tx_ctrl.hdr));
+ pktlog_tx_ctrl.frm_hdr = frm_hdr;
+ memcpy((void *)pktlog_tx_ctrl.txdesc_ctl, (void *)hdr->payload,
+ __le16_to_cpu(hdr->size));
+ pktlog_tx_ctrl.hdr.size = sizeof(pktlog_tx_ctrl) -
+ sizeof(pktlog_tx_ctrl.hdr);
+
+ spin_unlock_bh(&ar->htt.tx_lock);
+
+ trace_ath10k_htt_pktlog(ar, (void *)&pktlog_tx_ctrl,
+ sizeof(pktlog_tx_ctrl));
+ break;
+ }
+ case ATH10K_PKTLOG_TYPE_TX_MSDU_ID:
+ break;
+ case ATH10K_PKTLOG_TYPE_TX_FRM_HDR: {
+ ath10k_process_ieee_hdr((void *)(hdr->payload));
+ trace_ath10k_htt_pktlog(ar, hdr, sizeof(*hdr) +
+ __le16_to_cpu(hdr->size));
+ break;
+ }
+ case ATH10K_PKTLOG_TYPE_RX_STAT:
+ case ATH10K_PKTLOG_TYPE_RC_FIND:
+ case ATH10K_PKTLOG_TYPE_RC_UPDATE:
+ case ATH10K_PKTLOG_TYPE_DBG_PRINT:
+ case ATH10K_PKTLOG_TYPE_TX_STAT:
+ case ATH10K_PKTLOG_TYPE_SW_EVENT:
+ trace_ath10k_htt_pktlog(ar, hdr, sizeof(*hdr) +
+ __le16_to_cpu(hdr->size));
+ break;
+ case ATH10K_PKTLOG_TYPE_TX_VIRT_ADDR: {
+ u32 desc_id = (u32)*((u32 *)(hdr->payload));
+ struct sk_buff *msdu;
+
+ spin_lock_bh(&ar->htt.tx_lock);
+ msdu = ath10k_htt_tx_find_msdu_by_id(&ar->htt, desc_id);
+
+ if (!msdu) {
+ ath10k_info(ar,
+ "Failed to get msdu, id: %d\n",
+ desc_id);
+ spin_unlock_bh(&ar->htt.tx_lock);
+ return;
+ }
+ ath10k_process_ieee_hdr((void *)msdu->data);
+ spin_unlock_bh(&ar->htt.tx_lock);
+ trace_ath10k_htt_pktlog(ar, hdr, sizeof(*hdr) +
+ __le16_to_cpu(hdr->size));
+ break;
+ }
+ }
+}
+
+int ath10k_rx_record_pktlog(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct sk_buff *pktlog_skb;
+ struct ath_pktlog_hdr *pl_hdr;
+ struct ath_pktlog_rx_info *pktlog_rx_info;
+ struct htt_rx_desc *rx_desc = (void *)skb->data - sizeof(*rx_desc);
+
+ if (!ar->debug.pktlog_filter)
+ return 0;
+
+ pktlog_skb = dev_alloc_skb(sizeof(struct ath_pktlog_hdr) +
+ sizeof(struct htt_rx_desc) -
+ sizeof(struct htt_host_fw_desc_base));
+ if (!pktlog_skb)
+ return -ENOMEM;
+
+ pktlog_rx_info = (struct ath_pktlog_rx_info *)pktlog_skb->data;
+ pl_hdr = &pktlog_rx_info->pl_hdr;
+
+ pl_hdr->flags = (1 << ATH10K_PKTLOG_FLG_FRM_TYPE_REMOTE_S);
+ pl_hdr->missed_cnt = 0;
+ pl_hdr->mac_id = 0;
+ pl_hdr->log_type = ATH10K_PKTLOG_TYPE_RX_STAT;
+ pl_hdr->flags |= ATH10K_PKTLOG_HDR_SIZE_16;
+ pl_hdr->size = sizeof(*rx_desc) -
+ sizeof(struct htt_host_fw_desc_base);
+
+ pl_hdr->timestamp =
+ cpu_to_le32(rx_desc->ppdu_end.wcn3990.rx_pkt_end.phy_timestamp_1);
+
+ pl_hdr->type_specific_data = 0xDEADAA;
+ memcpy((void *)pktlog_rx_info + sizeof(struct ath_pktlog_hdr),
+ (void *)rx_desc + sizeof(struct htt_host_fw_desc_base),
+ pl_hdr->size);
+
+ ath10k_pktlog_process_rx(ar, pktlog_skb);
+ dev_kfree_skb_any(pktlog_skb);
+ return 0;
+}
+
+static void ath10k_pktlog_htc_tx_complete(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ ath10k_info(ar, "PKTLOG htc completed\n");
+}
+
+int ath10k_pktlog_connect(struct ath10k *ar)
+{
+ int status;
+ struct ath10k_htc_svc_conn_req conn_req;
+ struct ath10k_htc_svc_conn_resp conn_resp;
+
+ memset(&conn_req, 0, sizeof(conn_req));
+ memset(&conn_resp, 0, sizeof(conn_resp));
+
+ conn_req.ep_ops.ep_tx_complete = ath10k_pktlog_htc_tx_complete;
+ conn_req.ep_ops.ep_rx_complete = ath10k_pktlog_process_rx;
+ conn_req.ep_ops.ep_tx_credits = NULL;
+
+ /* connect to control service */
+ conn_req.service_id = ATH10K_HTC_SVC_ID_HTT_LOG_MSG;
+ status = ath10k_htc_connect_service(&ar->htc, &conn_req, &conn_resp);
+ if (status) {
+ ath10k_warn(ar, "failed to connect to PKTLOG service: %d\n",
+ status);
+ return status;
+ }
+
+ ar->debug.eid = conn_resp.eid;
+
+ return 0;
+}
+
int ath10k_debug_create(struct ath10k *ar)
{
ar->debug.fw_crash_data = vzalloc(sizeof(*ar->debug.fw_crash_data));
diff --git a/drivers/net/wireless/ath/ath10k/debug.h b/drivers/net/wireless/ath/ath10k/debug.h
index f963391e3544..eb47720bbf91 100644
--- a/drivers/net/wireless/ath/ath10k/debug.h
+++ b/drivers/net/wireless/ath/ath10k/debug.h
@@ -57,6 +57,84 @@ enum ath10k_dbg_aggr_mode {
ATH10K_DBG_AGGR_MODE_MAX,
};
+#define IEEE80211_FC1_DIR_MASK 0x03
+#define IEEE80211_FC1_DIR_NODS 0x00 /* STA->STA */
+#define IEEE80211_FC1_DIR_TODS 0x01 /* STA->AP */
+#define IEEE80211_FC1_DIR_FROMDS 0x02 /* AP ->STA */
+#define IEEE80211_FC1_DIR_DSTODS 0x03 /* AP ->AP */
+#define IEEE80211_ADDR_LEN 6 /* size of 802.11 address */
+
+#define MAX_PKT_INFO_MSDU_ID 192
+#define MSDU_ID_INFO_ID_OFFSET \
+ ((MAX_PKT_INFO_MSDU_ID >> 3) + 4)
+
+#define PKTLOG_MAX_TXCTL_WORDS 57 /* +2 words for bitmap */
+#define HTT_TX_MSDU_LEN_MASK 0xffff
+
+struct txctl_frm_hdr {
+ __le16 framectrl; /* frame control field from header */
+ __le16 seqctrl; /* frame control field from header */
+ __le16 bssid_tail; /* last two octets of bssid */
+ __le16 sa_tail; /* last two octets of SA */
+ __le16 da_tail; /* last two octets of DA */
+ __le16 resvd;
+} __packed;
+
+struct ath_pktlog_hdr {
+ __le16 flags;
+ __le16 missed_cnt;
+ u8 log_type;
+ u8 mac_id;
+ __le16 size;
+ __le32 timestamp;
+ __le32 type_specific_data;
+} __packed;
+
+/* generic definitions for IEEE 802.11 frames */
+struct ieee80211_frame {
+ u8 i_fc[2];
+ u8 i_dur[2];
+ union {
+ struct {
+ u8 i_addr1[IEEE80211_ADDR_LEN];
+ u8 i_addr2[IEEE80211_ADDR_LEN];
+ u8 i_addr3[IEEE80211_ADDR_LEN];
+ };
+ u8 i_addr_all[3 * IEEE80211_ADDR_LEN];
+ };
+ u8 i_seq[2];
+} __packed;
+
+struct fw_pktlog_msdu_info {
+ __le32 num_msdu;
+ u8 bound_bmap[MAX_PKT_INFO_MSDU_ID >> 3];
+ __le16 id[MAX_PKT_INFO_MSDU_ID];
+} __packed;
+
+struct ath_pktlog_txctl {
+ struct ath_pktlog_hdr hdr;
+ struct txctl_frm_hdr frm_hdr;
+ __le32 txdesc_ctl[PKTLOG_MAX_TXCTL_WORDS];
+} __packed;
+
+struct ath_pktlog_msdu_id {
+ struct ath_pktlog_hdr hdr;
+ struct fw_pktlog_msdu_info msdu_info;
+} __packed;
+
+struct ath_pktlog_rx_info {
+ struct ath_pktlog_hdr pl_hdr;
+ struct rx_attention attention;
+ struct rx_frag_info frag_info;
+ struct rx_mpdu_start mpdu_start;
+ struct rx_msdu_start msdu_start;
+ struct rx_msdu_end msdu_end;
+ struct rx_mpdu_end mpdu_end;
+ struct rx_ppdu_start ppdu_start;
+ struct rx_ppdu_end ppdu_end;
+ u8 rx_hdr_status[RX_HTT_HDR_STATUS_LEN];
+} __packed;
+
/* FIXME: How to calculate the buffer size sanely? */
#define ATH10K_FW_STATS_BUF_SIZE (1024 * 1024)
#define ATH10K_DATAPATH_BUF_SIZE (1024 * 1024)
@@ -86,6 +164,7 @@ struct ath10k_fw_crash_data *
ath10k_debug_get_new_fw_crash_data(struct ath10k *ar);
void ath10k_debug_dbglog_add(struct ath10k *ar, u8 *buffer, int len);
+int ath10k_rx_record_pktlog(struct ath10k *ar, struct sk_buff *skb);
#define ATH10K_DFS_STAT_INC(ar, c) (ar->debug.dfs_stats.c++)
void ath10k_debug_get_et_strings(struct ieee80211_hw *hw,
@@ -98,6 +177,7 @@ void ath10k_debug_get_et_stats(struct ieee80211_hw *hw,
struct ethtool_stats *stats, u64 *data);
void fill_datapath_stats(struct ath10k *ar, struct ieee80211_rx_status *status);
size_t get_datapath_stat(char *buf, struct ath10k *ar);
+int ath10k_pktlog_connect(struct ath10k *ar);
#else
static inline int ath10k_debug_start(struct ath10k *ar)
{
@@ -142,6 +222,12 @@ static inline void ath10k_debug_dbglog_add(struct ath10k *ar, u8 *buffer,
{
}
+static inline int ath10k_rx_record_pktlog(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ return 0;
+}
+
static inline struct ath10k_fw_crash_data *
ath10k_debug_get_new_fw_crash_data(struct ath10k *ar)
{
@@ -158,6 +244,10 @@ static inline size_t get_datapath_stat(char *buf, struct ath10k *ar)
return 0;
}
+static inline int ath10k_pktlog_connect(struct ath10k *ar)
+{
+ return 0;
+}
#define ATH10K_DFS_STAT_INC(ar, c) do { } while (0)
#define ath10k_debug_get_et_strings NULL
diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h
index b0d6c2614731..6377c4ef427c 100644
--- a/drivers/net/wireless/ath/ath10k/htt.h
+++ b/drivers/net/wireless/ath/ath10k/htt.h
@@ -1745,16 +1745,21 @@ struct ath10k_htt {
#define RX_HTT_HDR_STATUS_LEN 64
-/* This structure layout is programmed via rx ring setup
- * so that FW knows how to transfer the rx descriptor to the host.
- * Buffers like this are placed on the rx ring. */
-struct htt_rx_desc {
+struct htt_host_fw_desc_base {
union {
/* This field is filled on the host using the msdu buffer
* from htt_rx_indication */
struct fw_rx_desc_base fw_desc;
u32 pad;
} __packed;
+};
+
+/* This structure layout is programmed via rx ring setup
+ * so that FW knows how to transfer the rx descriptor to the host.
+ * Buffers like this are placed on the rx ring.
+ */
+struct htt_rx_desc {
+ struct htt_host_fw_desc_base fw_desc_base;
struct {
struct rx_attention attention;
struct rx_frag_info frag_info;
@@ -1847,5 +1852,7 @@ int ath10k_htt_tx(struct ath10k_htt *htt,
void ath10k_htt_rx_pktlog_completion_handler(struct ath10k *ar,
struct sk_buff *skb);
int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget);
+struct sk_buff *ath10k_htt_tx_find_msdu_by_id(struct ath10k_htt *htt,
+ u16 msdu_id);
#endif
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index 437ea2c192b3..635b8281b055 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -969,6 +969,7 @@ static void ath10k_process_rx(struct ath10k *ar,
trace_ath10k_rx_hdr(ar, skb->data, skb->len);
trace_ath10k_rx_payload(ar, skb->data, skb->len);
+ ath10k_rx_record_pktlog(ar, skb);
ieee80211_rx_napi(ar->hw, NULL, skb, &ar->napi);
}
diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c
index 1b59721a91ac..6579eb2b410c 100644
--- a/drivers/net/wireless/ath/ath10k/htt_tx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_tx.c
@@ -218,6 +218,27 @@ int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb)
return ret;
}
+struct sk_buff *ath10k_htt_tx_find_msdu_by_id(struct ath10k_htt *htt,
+ u16 msdu_id)
+{
+ struct ath10k *ar;
+ struct sk_buff *ret;
+
+ if (!htt)
+ return NULL;
+
+ ar = htt->ar;
+
+ lockdep_assert_held(&htt->tx_lock);
+
+ ret = (struct sk_buff *)idr_find(&htt->pending_tx, msdu_id);
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx find msdu by msdu_id %s\n",
+ !ret ? "Failed" : "Success");
+
+ return ret;
+}
+
void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id)
{
struct ath10k *ar = htt->ar;
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index 0f2422480c4e..37479589b8e1 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -984,4 +984,37 @@ struct ath10k_shadow_reg_address {
extern struct ath10k_shadow_reg_value wcn3990_shadow_reg_value;
extern struct ath10k_shadow_reg_address wcn3990_shadow_reg_address;
+#define ATH10K_PKTLOG_HDR_SIZE_16 0x8000
+
+enum {
+ ATH10k_PKTLOG_FLG_FRM_TYPE_LOCAL_S = 0,
+ ATH10K_PKTLOG_FLG_FRM_TYPE_REMOTE_S,
+ ATH10K_PKTLOG_FLG_FRM_TYPE_CLONE_S,
+ ATH10K_PKTLOG_FLG_FRM_TYPE_CBF_S,
+ ATH10K_PKTLOG_FLG_FRM_TYPE_UNKNOWN_S
+};
+
+enum ath10k_pktlog_type {
+ ATH10K_PKTLOG_TYPE_TX_CTRL = 1,
+ ATH10K_PKTLOG_TYPE_TX_STAT,
+ ATH10K_PKTLOG_TYPE_TX_MSDU_ID,
+ ATH10K_PKTLOG_TYPE_TX_FRM_HDR,
+ ATH10K_PKTLOG_TYPE_RX_STAT,
+ ATH10K_PKTLOG_TYPE_RC_FIND,
+ ATH10K_PKTLOG_TYPE_RC_UPDATE,
+ ATH10K_PKTLOG_TYPE_TX_VIRT_ADDR,
+ ATH10K_PKTLOG_TYPE_DBG_PRINT,
+ ATH10K_PKTLOG_TYPE_SW_EVENT,
+ ATH10K_PKTLOG_TYPE_MAX,
+};
+
+struct ath10k_pktlog_hdr {
+ __le16 flags;
+ __le16 missed_cnt;
+ __le16 log_type;
+ __le16 size;
+ __le32 timestamp;
+ u8 payload[0];
+} __packed;
+
#endif /* _HW_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index cce931e51d7e..ec86c837e60a 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -7822,10 +7822,6 @@ static const struct ieee80211_iface_limit ath10k_wcn3990_if_limit[] = {
BIT(NL80211_IFTYPE_P2P_CLIENT) |
BIT(NL80211_IFTYPE_P2P_GO),
},
- {
- .max = 1,
- .types = BIT(NL80211_IFTYPE_P2P_DEVICE),
- },
};
static const struct ieee80211_iface_limit ath10k_wcn3990_qcs_if_limit[] = {
@@ -7845,10 +7841,6 @@ static const struct ieee80211_iface_limit ath10k_wcn3990_qcs_if_limit[] = {
#endif
BIT(NL80211_IFTYPE_P2P_GO),
},
- {
- .max = 1,
- .types = BIT(NL80211_IFTYPE_P2P_DEVICE),
- },
};
static const struct ieee80211_iface_limit ath10k_wcn3990_if_limit_ibss[] = {
@@ -8022,6 +8014,10 @@ int ath10k_mac_register(struct ath10k *ar)
BIT(NL80211_IFTYPE_P2P_CLIENT) |
BIT(NL80211_IFTYPE_P2P_GO);
+ if (QCA_REV_WCN3990(ar))
+ ar->hw->wiphy->interface_modes &=
+ ~BIT(NL80211_IFTYPE_P2P_DEVICE);
+
ieee80211_hw_set(ar->hw, SIGNAL_DBM);
ieee80211_hw_set(ar->hw, SUPPORTS_PS);
ieee80211_hw_set(ar->hw, SUPPORTS_DYNAMIC_PS);
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index 9e607b2fa2d4..ca77861c4320 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -3139,7 +3139,7 @@ int ath10k_pci_setup_resource(struct ath10k *ar)
setup_timer(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry,
(unsigned long)ar);
- if (QCA_REV_6174(ar))
+ if (QCA_REV_6174(ar) || QCA_REV_9377(ar))
ath10k_pci_override_ce_config(ar);
ret = ath10k_pci_alloc_pipes(ar);
diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c
index 75e81991913a..13736750e463 100644
--- a/drivers/net/wireless/ath/ath10k/snoc.c
+++ b/drivers/net/wireless/ath/ath10k/snoc.c
@@ -775,9 +775,6 @@ static int ath10k_snoc_hif_map_service_to_pipe(struct ath10k *ar,
}
}
- if (WARN_ON(!ul_set || !dl_set))
- return -ENOENT;
-
return 0;
}
@@ -1274,19 +1271,19 @@ static int ath10k_snoc_probe(struct platform_device *pdev)
ret = ath10k_snoc_claim(ar);
if (ret) {
ath10k_err(ar, "failed to claim device: %d\n", ret);
- goto err_core_destroy;
+ goto err_stop_qmi_service;
}
ret = ath10k_snoc_bus_configure(ar);
if (ret) {
ath10k_err(ar, "failed to configure bus: %d\n", ret);
- goto err_core_destroy;
+ goto err_stop_qmi_service;
}
ret = ath10k_snoc_alloc_pipes(ar);
if (ret) {
ath10k_err(ar, "failed to allocate copy engine pipes: %d\n",
ret);
- goto err_core_destroy;
+ goto err_stop_qmi_service;
}
netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_snoc_napi_poll,
@@ -1319,6 +1316,9 @@ err_free_irq:
err_free_pipes:
ath10k_snoc_free_pipes(ar);
+err_stop_qmi_service:
+ ath10k_snoc_stop_qmi_service(ar);
+
err_core_destroy:
ath10k_core_destroy(ar);
diff --git a/drivers/net/wireless/ath/ath10k/wmi-ops.h b/drivers/net/wireless/ath/ath10k/wmi-ops.h
index c3100fcd80f2..8bb01c1a35f7 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-ops.h
+++ b/drivers/net/wireless/ath/ath10k/wmi-ops.h
@@ -681,6 +681,9 @@ ath10k_wmi_vdev_spectral_conf(struct ath10k *ar,
struct sk_buff *skb;
u32 cmd_id;
+ if (!ar->wmi.ops->gen_vdev_spectral_conf)
+ return -EOPNOTSUPP;
+
skb = ar->wmi.ops->gen_vdev_spectral_conf(ar, arg);
if (IS_ERR(skb))
return PTR_ERR(skb);
@@ -696,6 +699,9 @@ ath10k_wmi_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id, u32 trigger,
struct sk_buff *skb;
u32 cmd_id;
+ if (!ar->wmi.ops->gen_vdev_spectral_enable)
+ return -EOPNOTSUPP;
+
skb = ar->wmi.ops->gen_vdev_spectral_enable(ar, vdev_id, trigger,
enable);
if (IS_ERR(skb))
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
index 201425e7f9cb..fbc8c9a9014b 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -1815,8 +1815,6 @@ static void ar9003_hw_spectral_scan_wait(struct ath_hw *ah)
static void ar9003_hw_tx99_start(struct ath_hw *ah, u32 qnum)
{
REG_SET_BIT(ah, AR_PHY_TEST, PHY_AGC_CLR);
- REG_SET_BIT(ah, 0x9864, 0x7f000);
- REG_SET_BIT(ah, 0x9924, 0x7f00fe);
REG_CLR_BIT(ah, AR_DIAG_SW, AR_DIAG_RX_DIS);
REG_WRITE(ah, AR_CR, AR_CR_RXD);
REG_WRITE(ah, AR_DLCL_IFS(qnum), 0);
diff --git a/drivers/net/wireless/ath/ath9k/tx99.c b/drivers/net/wireless/ath/ath9k/tx99.c
index ac4781f37e78..b4e6304afd40 100644
--- a/drivers/net/wireless/ath/ath9k/tx99.c
+++ b/drivers/net/wireless/ath/ath9k/tx99.c
@@ -190,22 +190,27 @@ static ssize_t write_file_tx99(struct file *file, const char __user *user_buf,
if (strtobool(buf, &start))
return -EINVAL;
+ mutex_lock(&sc->mutex);
+
if (start == sc->tx99_state) {
if (!start)
- return count;
+ goto out;
ath_dbg(common, XMIT, "Resetting TX99\n");
ath9k_tx99_deinit(sc);
}
if (!start) {
ath9k_tx99_deinit(sc);
- return count;
+ goto out;
}
r = ath9k_tx99_init(sc);
- if (r)
+ if (r) {
+ mutex_unlock(&sc->mutex);
return r;
-
+ }
+out:
+ mutex_unlock(&sc->mutex);
return count;
}
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index 63bb7686b811..94861020af12 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -960,7 +960,7 @@ int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
wil_hex_dump_misc("mgmt tx frame ", DUMP_PREFIX_OFFSET, 16, 1, buf,
len, true);
- if (len < sizeof(struct ieee80211_mgmt))
+ if (len < sizeof(struct ieee80211_hdr_3addr))
return -EINVAL;
cmd = kmalloc(sizeof(*cmd) + len, GFP_KERNEL);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c
index 70a6985334d5..da5826d788d6 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c
@@ -4472,6 +4472,11 @@ brcmf_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
cfg80211_mgmt_tx_status(wdev, *cookie, buf, len, true,
GFP_KERNEL);
} else if (ieee80211_is_action(mgmt->frame_control)) {
+ if (len > BRCMF_FIL_ACTION_FRAME_SIZE + DOT11_MGMT_HDR_LEN) {
+ brcmf_err("invalid action frame length\n");
+ err = -EINVAL;
+ goto exit;
+ }
af_params = kzalloc(sizeof(*af_params), GFP_KERNEL);
if (af_params == NULL) {
brcmf_err("unable to allocate frame\n");
diff --git a/drivers/net/wireless/cnss/cnss_pci.c b/drivers/net/wireless/cnss/cnss_pci.c
index 22c59d8c3c45..af92f00ca56e 100644
--- a/drivers/net/wireless/cnss/cnss_pci.c
+++ b/drivers/net/wireless/cnss/cnss_pci.c
@@ -83,6 +83,7 @@
#define QCA6180_DEVICE_ID (0x0041)
#define QCA6180_REV_ID_OFFSET (0x08)
+#define WLAN_EN_VREG_NAME "vdd-wlan-en"
#define WLAN_VREG_NAME "vdd-wlan"
#define WLAN_VREG_IO_NAME "vdd-wlan-io"
#define WLAN_VREG_XTAL_NAME "vdd-wlan-xtal"
@@ -114,6 +115,8 @@
#define WLAN_VREG_SP2T_MIN 2700000
#define POWER_ON_DELAY 2
+#define WLAN_VREG_IO_DELAY_MIN 100
+#define WLAN_VREG_IO_DELAY_MAX 1000
#define WLAN_ENABLE_DELAY 10
#define WLAN_RECOVERY_DELAY 1
#define PCIE_ENABLE_DELAY 100
@@ -134,7 +137,6 @@ static DEFINE_SPINLOCK(pci_link_down_lock);
#define FW_IMAGE_MISSION (0x02)
#define FW_IMAGE_BDATA (0x03)
#define FW_IMAGE_PRINT (0x04)
-#define FW_SETUP_DELAY 2000
#define SEG_METADATA (0x01)
#define SEG_NON_PAGED (0x02)
@@ -155,6 +157,7 @@ struct cnss_wlan_gpio_info {
};
struct cnss_wlan_vreg_info {
+ struct regulator *wlan_en_reg;
struct regulator *wlan_reg;
struct regulator *soc_swreg;
struct regulator *ant_switch;
@@ -237,6 +240,7 @@ static struct cnss_data {
dma_addr_t smmu_iova_start;
size_t smmu_iova_len;
struct cnss_wlan_vreg_info vreg_info;
+ bool wlan_en_vreg_support;
struct cnss_wlan_gpio_info gpio_info;
bool pcie_link_state;
bool pcie_link_down_ind;
@@ -274,10 +278,8 @@ static struct cnss_data {
u32 fw_dma_size;
u32 fw_seg_count;
struct segment_memory fw_seg_mem[MAX_NUM_OF_SEGMENTS];
- atomic_t fw_store_in_progress;
/* Firmware setup complete lock */
struct mutex fw_setup_stat_lock;
- struct completion fw_setup_complete;
void *bdata_cpu;
dma_addr_t bdata_dma;
u32 bdata_dma_size;
@@ -294,6 +296,17 @@ module_param(pcie_link_down_panic, uint, S_IRUSR | S_IWUSR);
MODULE_PARM_DESC(pcie_link_down_panic,
"Trigger kernel panic when PCIe link down is detected");
+static void cnss_put_wlan_enable_gpio(void)
+{
+ struct cnss_wlan_gpio_info *gpio_info = &penv->gpio_info;
+ struct cnss_wlan_vreg_info *vreg_info = &penv->vreg_info;
+
+ if (penv->wlan_en_vreg_support)
+ regulator_put(vreg_info->wlan_en_reg);
+ else
+ gpio_free(gpio_info->num);
+}
+
static int cnss_wlan_vreg_on(struct cnss_wlan_vreg_info *vreg_info)
{
int ret;
@@ -307,13 +320,6 @@ static int cnss_wlan_vreg_on(struct cnss_wlan_vreg_info *vreg_info)
}
}
- ret = regulator_enable(vreg_info->wlan_reg);
- if (ret) {
- pr_err("%s: regulator enable failed for WLAN power\n",
- __func__);
- goto error_enable;
- }
-
if (vreg_info->wlan_reg_io) {
ret = regulator_enable(vreg_info->wlan_reg_io);
if (ret) {
@@ -321,6 +327,8 @@ static int cnss_wlan_vreg_on(struct cnss_wlan_vreg_info *vreg_info)
__func__);
goto error_enable_reg_io;
}
+
+ usleep_range(WLAN_VREG_IO_DELAY_MIN, WLAN_VREG_IO_DELAY_MAX);
}
if (vreg_info->wlan_reg_xtal_aon) {
@@ -341,6 +349,13 @@ static int cnss_wlan_vreg_on(struct cnss_wlan_vreg_info *vreg_info)
}
}
+ ret = regulator_enable(vreg_info->wlan_reg);
+ if (ret) {
+ pr_err("%s: regulator enable failed for WLAN power\n",
+ __func__);
+ goto error_enable;
+ }
+
if (vreg_info->wlan_reg_sp2t) {
ret = regulator_enable(vreg_info->wlan_reg_sp2t);
if (ret) {
@@ -377,6 +392,8 @@ error_enable_ant_switch:
if (vreg_info->wlan_reg_sp2t)
regulator_disable(vreg_info->wlan_reg_sp2t);
error_enable_reg_sp2t:
+ regulator_disable(vreg_info->wlan_reg);
+error_enable:
if (vreg_info->wlan_reg_xtal)
regulator_disable(vreg_info->wlan_reg_xtal);
error_enable_reg_xtal:
@@ -386,8 +403,6 @@ error_enable_reg_xtal_aon:
if (vreg_info->wlan_reg_io)
regulator_disable(vreg_info->wlan_reg_io);
error_enable_reg_io:
- regulator_disable(vreg_info->wlan_reg);
-error_enable:
if (vreg_info->wlan_reg_core)
regulator_disable(vreg_info->wlan_reg_core);
error_enable_reg_core:
@@ -425,6 +440,13 @@ static int cnss_wlan_vreg_off(struct cnss_wlan_vreg_info *vreg_info)
}
}
+ ret = regulator_disable(vreg_info->wlan_reg);
+ if (ret) {
+ pr_err("%s: regulator disable failed for WLAN power\n",
+ __func__);
+ goto error_disable;
+ }
+
if (vreg_info->wlan_reg_xtal) {
ret = regulator_disable(vreg_info->wlan_reg_xtal);
if (ret) {
@@ -452,13 +474,6 @@ static int cnss_wlan_vreg_off(struct cnss_wlan_vreg_info *vreg_info)
}
}
- ret = regulator_disable(vreg_info->wlan_reg);
- if (ret) {
- pr_err("%s: regulator disable failed for WLAN power\n",
- __func__);
- goto error_disable;
- }
-
if (vreg_info->wlan_reg_core) {
ret = regulator_disable(vreg_info->wlan_reg_core);
if (ret) {
@@ -575,6 +590,25 @@ static void cnss_wlan_gpio_set(struct cnss_wlan_gpio_info *info, bool state)
info->name, info->state ? "enabled" : "disabled");
}
+static int cnss_configure_wlan_en_gpio(bool state)
+{
+ int ret = 0;
+ struct cnss_wlan_gpio_info *gpio_info = &penv->gpio_info;
+ struct cnss_wlan_vreg_info *vreg_info = &penv->vreg_info;
+
+ if (penv->wlan_en_vreg_support) {
+ if (state)
+ ret = regulator_enable(vreg_info->wlan_en_reg);
+ else
+ ret = regulator_disable(vreg_info->wlan_en_reg);
+ } else {
+ cnss_wlan_gpio_set(gpio_info, state);
+ }
+
+ msleep(WLAN_ENABLE_DELAY);
+ return ret;
+}
+
static int cnss_pinctrl_init(struct cnss_wlan_gpio_info *gpio_info,
struct platform_device *pdev)
{
@@ -681,14 +715,71 @@ end:
return ret;
}
+static int cnss_get_wlan_enable_gpio(
+ struct cnss_wlan_gpio_info *gpio_info,
+ struct platform_device *pdev)
+{
+ int ret = 0;
+ struct device *dev = &pdev->dev;
+
+ if (!of_find_property(dev->of_node, gpio_info->name, NULL)) {
+ gpio_info->prop = false;
+ return -ENODEV;
+ }
+
+ gpio_info->prop = true;
+ ret = of_get_named_gpio(dev->of_node, gpio_info->name, 0);
+ if (ret >= 0) {
+ gpio_info->num = ret;
+ } else {
+ if (ret == -EPROBE_DEFER)
+ pr_debug("get WLAN_EN GPIO probe defer\n");
+ else
+ pr_err(
+ "can't get gpio %s ret %d", gpio_info->name, ret);
+ }
+
+ ret = cnss_pinctrl_init(gpio_info, pdev);
+ if (ret)
+ pr_debug("%s: pinctrl init failed!\n", __func__);
+
+ ret = cnss_wlan_gpio_init(gpio_info);
+ if (ret)
+ pr_err("gpio init failed\n");
+
+ return ret;
+}
+
+static int cnss_get_wlan_bootstrap_gpio(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct device_node *node = (&pdev->dev)->of_node;
+
+ if (!of_find_property(node, WLAN_BOOTSTRAP_GPIO_NAME, NULL))
+ return ret;
+
+ penv->wlan_bootstrap_gpio =
+ of_get_named_gpio(node, WLAN_BOOTSTRAP_GPIO_NAME, 0);
+ if (penv->wlan_bootstrap_gpio > 0) {
+ ret = cnss_wlan_bootstrap_gpio_init();
+ } else {
+ ret = penv->wlan_bootstrap_gpio;
+ pr_err(
+ "%s: Can't get GPIO %s, ret = %d",
+ __func__, WLAN_BOOTSTRAP_GPIO_NAME, ret);
+ }
+
+ return ret;
+}
+
static int cnss_wlan_get_resources(struct platform_device *pdev)
{
int ret = 0;
struct cnss_wlan_gpio_info *gpio_info = &penv->gpio_info;
struct cnss_wlan_vreg_info *vreg_info = &penv->vreg_info;
+ struct device_node *node = pdev->dev.of_node;
- if (of_get_property(pdev->dev.of_node,
- WLAN_VREG_CORE_NAME"-supply", NULL)) {
+ if (of_get_property(node, WLAN_VREG_CORE_NAME "-supply", NULL)) {
vreg_info->wlan_reg_core = regulator_get(&pdev->dev,
WLAN_VREG_CORE_NAME);
if (IS_ERR(vreg_info->wlan_reg_core)) {
@@ -718,26 +809,7 @@ static int cnss_wlan_get_resources(struct platform_device *pdev)
}
}
- vreg_info->wlan_reg = regulator_get(&pdev->dev, WLAN_VREG_NAME);
-
- if (IS_ERR(vreg_info->wlan_reg)) {
- if (PTR_ERR(vreg_info->wlan_reg) == -EPROBE_DEFER)
- pr_err("%s: vreg probe defer\n", __func__);
- else
- pr_err("%s: vreg regulator get failed\n", __func__);
- ret = PTR_ERR(vreg_info->wlan_reg);
- goto err_reg_get;
- }
-
- ret = regulator_enable(vreg_info->wlan_reg);
-
- if (ret) {
- pr_err("%s: vreg initial vote failed\n", __func__);
- goto err_reg_enable;
- }
-
- if (of_get_property(pdev->dev.of_node,
- WLAN_VREG_IO_NAME"-supply", NULL)) {
+ if (of_get_property(node, WLAN_VREG_IO_NAME "-supply", NULL)) {
vreg_info->wlan_reg_io = regulator_get(&pdev->dev,
WLAN_VREG_IO_NAME);
if (!IS_ERR(vreg_info->wlan_reg_io)) {
@@ -755,14 +827,34 @@ static int cnss_wlan_get_resources(struct platform_device *pdev)
__func__);
goto err_reg_io_enable;
}
+
+ usleep_range(WLAN_VREG_IO_DELAY_MIN,
+ WLAN_VREG_IO_DELAY_MAX);
}
}
if (cnss_enable_xtal_ldo(pdev))
goto err_reg_xtal_enable;
- if (of_get_property(pdev->dev.of_node,
- WLAN_VREG_SP2T_NAME"-supply", NULL)) {
+ vreg_info->wlan_reg = regulator_get(&pdev->dev, WLAN_VREG_NAME);
+
+ if (IS_ERR(vreg_info->wlan_reg)) {
+ if (PTR_ERR(vreg_info->wlan_reg) == -EPROBE_DEFER)
+ pr_err("%s: vreg probe defer\n", __func__);
+ else
+ pr_err("%s: vreg regulator get failed\n", __func__);
+ ret = PTR_ERR(vreg_info->wlan_reg);
+ goto err_reg_get;
+ }
+
+ ret = regulator_enable(vreg_info->wlan_reg);
+
+ if (ret) {
+ pr_err("%s: vreg initial vote failed\n", __func__);
+ goto err_reg_enable;
+ }
+
+ if (of_get_property(node, WLAN_VREG_SP2T_NAME "-supply", NULL)) {
vreg_info->wlan_reg_sp2t =
regulator_get(&pdev->dev, WLAN_VREG_SP2T_NAME);
if (!IS_ERR(vreg_info->wlan_reg_sp2t)) {
@@ -783,8 +875,7 @@ static int cnss_wlan_get_resources(struct platform_device *pdev)
}
}
- if (of_get_property(pdev->dev.of_node,
- WLAN_ANT_SWITCH_NAME "-supply", NULL)) {
+ if (of_get_property(node, WLAN_ANT_SWITCH_NAME "-supply", NULL)) {
vreg_info->ant_switch =
regulator_get(&pdev->dev, WLAN_ANT_SWITCH_NAME);
if (!IS_ERR(vreg_info->ant_switch)) {
@@ -814,13 +905,10 @@ static int cnss_wlan_get_resources(struct platform_device *pdev)
}
}
- if (of_find_property((&pdev->dev)->of_node,
- "qcom,wlan-uart-access", NULL))
+ if (of_find_property(node, "qcom,wlan-uart-access", NULL))
penv->cap.cap_flag |= CNSS_HAS_UART_ACCESS;
- if (of_get_property(pdev->dev.of_node,
- WLAN_SWREG_NAME"-supply", NULL)) {
-
+ if (of_get_property(node, WLAN_SWREG_NAME "-supply", NULL)) {
vreg_info->soc_swreg = regulator_get(&pdev->dev,
WLAN_SWREG_NAME);
if (IS_ERR(vreg_info->soc_swreg)) {
@@ -843,68 +931,41 @@ static int cnss_wlan_get_resources(struct platform_device *pdev)
penv->cap.cap_flag |= CNSS_HAS_EXTERNAL_SWREG;
}
- vreg_info->state = VREG_ON;
-
- if (!of_find_property((&pdev->dev)->of_node, gpio_info->name, NULL)) {
- gpio_info->prop = false;
- goto end;
+ penv->wlan_en_vreg_support =
+ of_property_read_bool(node, "qcom,wlan-en-vreg-support");
+ if (penv->wlan_en_vreg_support) {
+ vreg_info->wlan_en_reg =
+ regulator_get(&pdev->dev, WLAN_EN_VREG_NAME);
+ if (IS_ERR(vreg_info->wlan_en_reg)) {
+ pr_err("%s:wlan_en vreg get failed\n", __func__);
+ ret = PTR_ERR(vreg_info->wlan_en_reg);
+ goto err_wlan_en_reg_get;
+ }
}
- gpio_info->prop = true;
- ret = of_get_named_gpio((&pdev->dev)->of_node,
- gpio_info->name, 0);
-
- if (ret >= 0) {
- gpio_info->num = ret;
- ret = 0;
- } else {
- if (ret == -EPROBE_DEFER)
- pr_debug("get WLAN_EN GPIO probe defer\n");
- else
- pr_err("can't get gpio %s ret %d",
- gpio_info->name, ret);
- goto err_get_gpio;
+ if (!penv->wlan_en_vreg_support) {
+ ret = cnss_get_wlan_enable_gpio(gpio_info, pdev);
+ if (ret) {
+ pr_err(
+ "%s:Failed to config the WLAN_EN gpio\n", __func__);
+ goto err_gpio_wlan_en;
+ }
}
+ vreg_info->state = VREG_ON;
- ret = cnss_pinctrl_init(gpio_info, pdev);
+ ret = cnss_get_wlan_bootstrap_gpio(pdev);
if (ret) {
- pr_err("%s: pinctrl init failed!\n", __func__);
- goto err_pinctrl_init;
+ pr_err("%s: Failed to enable wlan bootstrap gpio\n", __func__);
+ goto err_gpio_wlan_bootstrap;
}
- ret = cnss_wlan_gpio_init(gpio_info);
- if (ret) {
- pr_err("gpio init failed\n");
- goto err_gpio_init;
- }
-
- if (of_find_property((&pdev->dev)->of_node,
- WLAN_BOOTSTRAP_GPIO_NAME, NULL)) {
- penv->wlan_bootstrap_gpio =
- of_get_named_gpio((&pdev->dev)->of_node,
- WLAN_BOOTSTRAP_GPIO_NAME, 0);
- if (penv->wlan_bootstrap_gpio > 0) {
- ret = cnss_wlan_bootstrap_gpio_init();
- if (ret)
- goto err_gpio_init;
- } else {
- if (ret == -EPROBE_DEFER) {
- pr_debug("%s: Get GPIO %s probe defer\n",
- __func__, WLAN_BOOTSTRAP_GPIO_NAME);
- } else {
- pr_err("%s: Can't get GPIO %s, ret = %d",
- __func__, WLAN_BOOTSTRAP_GPIO_NAME,
- ret);
- }
- goto err_gpio_init;
- }
- }
-end:
return ret;
-err_gpio_init:
-err_pinctrl_init:
-err_get_gpio:
+err_gpio_wlan_bootstrap:
+ cnss_put_wlan_enable_gpio();
+err_gpio_wlan_en:
+err_wlan_en_reg_get:
+ vreg_info->wlan_en_reg = NULL;
if (vreg_info->soc_swreg)
regulator_disable(vreg_info->soc_swreg);
vreg_info->state = VREG_OFF;
@@ -929,7 +990,11 @@ err_reg_sp2t_enable:
err_reg_sp2t_set:
if (vreg_info->wlan_reg_sp2t)
regulator_put(vreg_info->wlan_reg_sp2t);
+ regulator_disable(vreg_info->wlan_reg);
+err_reg_enable:
+ regulator_put(vreg_info->wlan_reg);
+err_reg_get:
cnss_disable_xtal_ldo(pdev);
err_reg_xtal_enable:
@@ -940,12 +1005,6 @@ err_reg_io_enable:
err_reg_io_set:
if (vreg_info->wlan_reg_io)
regulator_put(vreg_info->wlan_reg_io);
- regulator_disable(vreg_info->wlan_reg);
-
-err_reg_enable:
- regulator_put(vreg_info->wlan_reg);
-
-err_reg_get:
if (vreg_info->wlan_reg_core)
regulator_disable(vreg_info->wlan_reg_core);
@@ -965,7 +1024,7 @@ static void cnss_wlan_release_resources(void)
if (penv->wlan_bootstrap_gpio > 0)
gpio_free(penv->wlan_bootstrap_gpio);
- gpio_free(gpio_info->num);
+ cnss_put_wlan_enable_gpio();
gpio_info->state = WLAN_EN_LOW;
gpio_info->prop = false;
cnss_wlan_vreg_set(vreg_info, VREG_OFF);
@@ -975,13 +1034,13 @@ static void cnss_wlan_release_resources(void)
regulator_put(vreg_info->ant_switch);
if (vreg_info->wlan_reg_sp2t)
regulator_put(vreg_info->wlan_reg_sp2t);
+ regulator_put(vreg_info->wlan_reg);
if (vreg_info->wlan_reg_xtal)
regulator_put(vreg_info->wlan_reg_xtal);
if (vreg_info->wlan_reg_xtal_aon)
regulator_put(vreg_info->wlan_reg_xtal_aon);
if (vreg_info->wlan_reg_io)
regulator_put(vreg_info->wlan_reg_io);
- regulator_put(vreg_info->wlan_reg);
if (vreg_info->wlan_reg_core)
regulator_put(vreg_info->wlan_reg_core);
vreg_info->state = VREG_OFF;
@@ -1374,15 +1433,6 @@ int cnss_get_fw_image(struct image_desc_info *image_desc_info)
!penv->fw_seg_count || !penv->bdata_seg_count)
return -EINVAL;
- /* Check for firmware setup trigger by usersapce is in progress
- * and wait for complition of firmware setup.
- */
-
- if (atomic_read(&penv->fw_store_in_progress)) {
- wait_for_completion_timeout(&penv->fw_setup_complete,
- msecs_to_jiffies(FW_SETUP_DELAY));
- }
-
mutex_lock(&penv->fw_setup_stat_lock);
image_desc_info->fw_addr = penv->fw_dma;
image_desc_info->fw_size = penv->fw_dma_size;
@@ -1560,7 +1610,6 @@ static int cnss_wlan_pci_probe(struct pci_dev *pdev,
{
int ret = 0;
struct cnss_wlan_vreg_info *vreg_info = &penv->vreg_info;
- struct cnss_wlan_gpio_info *gpio_info = &penv->gpio_info;
void *cpu_addr;
dma_addr_t dma_handle;
struct codeswap_codeseg_info *cnss_seg_info = NULL;
@@ -1619,7 +1668,7 @@ static int cnss_wlan_pci_probe(struct pci_dev *pdev,
penv->pcie_link_state = PCIE_LINK_DOWN;
}
- cnss_wlan_gpio_set(gpio_info, WLAN_EN_LOW);
+ cnss_configure_wlan_en_gpio(WLAN_EN_LOW);
ret = cnss_wlan_vreg_set(vreg_info, VREG_OFF);
if (ret) {
@@ -1627,7 +1676,9 @@ static int cnss_wlan_pci_probe(struct pci_dev *pdev,
goto err_pcie_suspend;
}
+ mutex_lock(&penv->fw_setup_stat_lock);
cnss_wlan_fw_mem_alloc(pdev);
+ mutex_unlock(&penv->fw_setup_stat_lock);
ret = device_create_file(&penv->pldev->dev, &dev_attr_wlan_setup);
@@ -1874,17 +1925,11 @@ static ssize_t fw_image_setup_store(struct device *dev,
if (!penv)
return -ENODEV;
- if (atomic_read(&penv->fw_store_in_progress)) {
- pr_info("%s: Firmware setup in progress\n", __func__);
- return 0;
- }
-
- atomic_set(&penv->fw_store_in_progress, 1);
- init_completion(&penv->fw_setup_complete);
+ mutex_lock(&penv->fw_setup_stat_lock);
+ pr_info("%s: Firmware setup in progress\n", __func__);
if (kstrtoint(buf, 0, &val)) {
- atomic_set(&penv->fw_store_in_progress, 0);
- complete(&penv->fw_setup_complete);
+ mutex_unlock(&penv->fw_setup_stat_lock);
return -EINVAL;
}
@@ -1895,8 +1940,7 @@ static ssize_t fw_image_setup_store(struct device *dev,
if (ret != 0) {
pr_err("%s: Invalid parsing of FW image files %d",
__func__, ret);
- atomic_set(&penv->fw_store_in_progress, 0);
- complete(&penv->fw_setup_complete);
+ mutex_unlock(&penv->fw_setup_stat_lock);
return -EINVAL;
}
penv->fw_image_setup = val;
@@ -1906,9 +1950,8 @@ static ssize_t fw_image_setup_store(struct device *dev,
penv->bmi_test = val;
}
- atomic_set(&penv->fw_store_in_progress, 0);
- complete(&penv->fw_setup_complete);
-
+ pr_info("%s: Firmware setup completed\n", __func__);
+ mutex_unlock(&penv->fw_setup_stat_lock);
return count;
}
@@ -2007,16 +2050,21 @@ int cnss_get_codeswap_struct(struct codeswap_codeseg_info *swap_seg)
{
struct codeswap_codeseg_info *cnss_seg_info = penv->cnss_seg_info;
+ mutex_lock(&penv->fw_setup_stat_lock);
if (!cnss_seg_info) {
swap_seg = NULL;
+ mutex_unlock(&penv->fw_setup_stat_lock);
return -ENOENT;
}
+
if (!atomic_read(&penv->fw_available)) {
pr_debug("%s: fw is not available\n", __func__);
+ mutex_unlock(&penv->fw_setup_stat_lock);
return -ENOENT;
}
*swap_seg = *cnss_seg_info;
+ mutex_unlock(&penv->fw_setup_stat_lock);
return 0;
}
@@ -2035,15 +2083,6 @@ static void cnss_wlan_memory_expansion(void)
u_int32_t total_length = 0;
struct pci_dev *pdev;
- /* Check for firmware setup trigger by usersapce is in progress
- * and wait for complition of firmware setup.
- */
-
- if (atomic_read(&penv->fw_store_in_progress)) {
- wait_for_completion_timeout(&penv->fw_setup_complete,
- msecs_to_jiffies(FW_SETUP_DELAY));
- }
-
mutex_lock(&penv->fw_setup_stat_lock);
filename = cnss_wlan_get_evicted_data_file();
pdev = penv->pdev;
@@ -2276,8 +2315,7 @@ again:
msleep(WLAN_BOOTSTRAP_DELAY);
}
- cnss_wlan_gpio_set(gpio_info, WLAN_EN_HIGH);
- msleep(WLAN_ENABLE_DELAY);
+ cnss_configure_wlan_en_gpio(WLAN_EN_HIGH);
if (!pdev) {
pr_debug("%s: invalid pdev. register pci device\n", __func__);
@@ -2360,8 +2398,7 @@ again:
cnss_get_pci_dev_bus_number(pdev),
pdev, PM_OPTIONS);
penv->pcie_link_state = PCIE_LINK_DOWN;
- cnss_wlan_gpio_set(gpio_info, WLAN_EN_LOW);
- msleep(WLAN_ENABLE_DELAY);
+ cnss_configure_wlan_en_gpio(WLAN_EN_LOW);
cnss_wlan_vreg_set(vreg_info, VREG_OFF);
msleep(POWER_ON_DELAY);
probe_again++;
@@ -2388,7 +2425,7 @@ err_pcie_link_up:
}
err_pcie_reg:
- cnss_wlan_gpio_set(gpio_info, WLAN_EN_LOW);
+ cnss_configure_wlan_en_gpio(WLAN_EN_LOW);
cnss_wlan_vreg_set(vreg_info, VREG_OFF);
if (penv->pdev) {
pr_err("%d: Unregistering PCI device\n", __LINE__);
@@ -2469,8 +2506,7 @@ void cnss_wlan_unregister_driver(struct cnss_wlan_driver *driver)
cut_power:
penv->driver = NULL;
- cnss_wlan_gpio_set(gpio_info, WLAN_EN_LOW);
-
+ cnss_configure_wlan_en_gpio(WLAN_EN_LOW);
if (cnss_wlan_vreg_set(vreg_info, VREG_OFF))
pr_err("wlan vreg OFF failed\n");
}
@@ -2582,8 +2618,7 @@ static int cnss_shutdown(const struct subsys_desc *subsys, bool force_stop)
}
cut_power:
- cnss_wlan_gpio_set(gpio_info, WLAN_EN_LOW);
-
+ cnss_configure_wlan_en_gpio(WLAN_EN_LOW);
if (cnss_wlan_vreg_set(vreg_info, VREG_OFF))
pr_err("cnss: Failed to set WLAN VREG_OFF!\n");
@@ -2616,8 +2651,7 @@ static int cnss_powerup(const struct subsys_desc *subsys)
}
msleep(POWER_ON_DELAY);
- cnss_wlan_gpio_set(gpio_info, WLAN_EN_HIGH);
- msleep(WLAN_ENABLE_DELAY);
+ cnss_configure_wlan_en_gpio(WLAN_EN_HIGH);
if (!pdev) {
pr_err("%d: invalid pdev\n", __LINE__);
@@ -2677,7 +2711,7 @@ err_wlan_reinit:
penv->pcie_link_state = PCIE_LINK_DOWN;
err_pcie_link_up:
- cnss_wlan_gpio_set(gpio_info, WLAN_EN_LOW);
+ cnss_configure_wlan_en_gpio(WLAN_EN_LOW);
cnss_wlan_vreg_set(vreg_info, VREG_OFF);
if (penv->pdev) {
pr_err("%d: Unregistering pci device\n", __LINE__);
@@ -2859,13 +2893,17 @@ static int cnss_probe(struct platform_device *pdev)
penv->vreg_info.wlan_reg = NULL;
penv->vreg_info.state = VREG_OFF;
penv->pci_register_again = false;
+ mutex_init(&penv->fw_setup_stat_lock);
ret = cnss_wlan_get_resources(pdev);
if (ret)
goto err_get_wlan_res;
- cnss_wlan_gpio_set(&penv->gpio_info, WLAN_EN_HIGH);
- msleep(WLAN_ENABLE_DELAY);
+ ret = cnss_configure_wlan_en_gpio(WLAN_EN_HIGH);
+ if (ret) {
+ pr_err("%s: Failed to enable WLAN enable gpio\n", __func__);
+ goto err_get_rc;
+ }
ret = of_property_read_u32(dev->of_node, "qcom,wlan-rc-num", &rc_num);
if (ret) {
@@ -3016,8 +3054,6 @@ skip_ramdump:
memset(phys_to_virt(0), 0, SZ_4K);
#endif
- atomic_set(&penv->fw_store_in_progress, 0);
- mutex_init(&penv->fw_setup_stat_lock);
ret = device_create_file(dev, &dev_attr_fw_image_setup);
if (ret) {
pr_err("cnss: fw_image_setup sys file creation failed\n");
@@ -3062,7 +3098,7 @@ err_subsys_reg:
err_esoc_reg:
err_pcie_enumerate:
err_get_rc:
- cnss_wlan_gpio_set(&penv->gpio_info, WLAN_EN_LOW);
+ cnss_configure_wlan_en_gpio(WLAN_EN_LOW);
cnss_wlan_release_resources();
err_get_wlan_res:
@@ -3073,8 +3109,6 @@ err_get_wlan_res:
static int cnss_remove(struct platform_device *pdev)
{
- struct cnss_wlan_gpio_info *gpio_info = &penv->gpio_info;
-
unregister_pm_notifier(&cnss_pm_notifier);
device_remove_file(&pdev->dev, &dev_attr_fw_image_setup);
@@ -3095,7 +3129,7 @@ static int cnss_remove(struct platform_device *pdev)
}
}
- cnss_wlan_gpio_set(gpio_info, WLAN_EN_LOW);
+ cnss_configure_wlan_en_gpio(WLAN_EN_LOW);
if (penv->wlan_bootstrap_gpio > 0)
gpio_set_value(penv->wlan_bootstrap_gpio, WLAN_BOOTSTRAP_LOW);
cnss_wlan_release_resources();
@@ -3596,8 +3630,7 @@ static int __cnss_pcie_power_up(struct device *dev)
msleep(WLAN_BOOTSTRAP_DELAY);
}
- cnss_wlan_gpio_set(gpio_info, WLAN_EN_HIGH);
- msleep(WLAN_ENABLE_DELAY);
+ cnss_configure_wlan_en_gpio(WLAN_EN_HIGH);
return 0;
}
@@ -3610,8 +3643,7 @@ static int __cnss_pcie_power_down(struct device *dev)
vreg_info = &penv->vreg_info;
gpio_info = &penv->gpio_info;
- cnss_wlan_gpio_set(gpio_info, WLAN_EN_LOW);
-
+ cnss_configure_wlan_en_gpio(WLAN_EN_LOW);
if (penv->wlan_bootstrap_gpio > 0)
gpio_set_value(penv->wlan_bootstrap_gpio, WLAN_BOOTSTRAP_LOW);
diff --git a/drivers/net/wireless/cnss2/debug.c b/drivers/net/wireless/cnss2/debug.c
index 360ab31c61dd..35d7fe1c318c 100644
--- a/drivers/net/wireless/cnss2/debug.c
+++ b/drivers/net/wireless/cnss2/debug.c
@@ -15,6 +15,7 @@
#include <linux/debugfs.h>
#include "main.h"
#include "debug.h"
+#include "pci.h"
#define CNSS_IPC_LOG_PAGES 32
@@ -97,6 +98,10 @@ static int cnss_stats_show_state(struct seq_file *s,
continue;
case CNSS_DEV_ERR_NOTIFY:
seq_puts(s, "DEV_ERR");
+ continue;
+ case CNSS_DRIVER_DEBUG:
+ seq_puts(s, "DRIVER_DEBUG");
+ continue;
}
seq_printf(s, "UNKNOWN-%d", i);
@@ -121,13 +126,323 @@ static int cnss_stats_open(struct inode *inode, struct file *file)
}
static const struct file_operations cnss_stats_fops = {
- .read = seq_read,
- .release = single_release,
- .open = cnss_stats_open,
- .owner = THIS_MODULE,
- .llseek = seq_lseek,
+ .read = seq_read,
+ .release = single_release,
+ .open = cnss_stats_open,
+ .owner = THIS_MODULE,
+ .llseek = seq_lseek,
+};
+
+static ssize_t cnss_dev_boot_debug_write(struct file *fp,
+ const char __user *user_buf,
+ size_t count, loff_t *off)
+{
+ struct cnss_plat_data *plat_priv =
+ ((struct seq_file *)fp->private_data)->private;
+ char buf[64];
+ char *cmd;
+ unsigned int len = 0;
+ int ret = 0;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+ cmd = buf;
+
+ if (sysfs_streq(cmd, "on")) {
+ ret = cnss_power_on_device(plat_priv);
+ } else if (sysfs_streq(cmd, "off")) {
+ cnss_power_off_device(plat_priv);
+ } else if (sysfs_streq(cmd, "enumerate")) {
+ ret = cnss_pci_init(plat_priv);
+ } else if (sysfs_streq(cmd, "download")) {
+ ret = cnss_pci_start_mhi(plat_priv->bus_priv);
+ } else if (sysfs_streq(cmd, "linkup")) {
+ ret = cnss_resume_pci_link(plat_priv->bus_priv);
+ } else if (sysfs_streq(cmd, "linkdown")) {
+ ret = cnss_suspend_pci_link(plat_priv->bus_priv);
+ } else if (sysfs_streq(cmd, "powerup")) {
+ set_bit(CNSS_DRIVER_DEBUG, &plat_priv->driver_state);
+ ret = cnss_driver_event_post(plat_priv,
+ CNSS_DRIVER_EVENT_POWER_UP,
+ true, NULL);
+ } else if (sysfs_streq(cmd, "shutdown")) {
+ ret = cnss_driver_event_post(plat_priv,
+ CNSS_DRIVER_EVENT_POWER_DOWN,
+ true, NULL);
+ clear_bit(CNSS_DRIVER_DEBUG, &plat_priv->driver_state);
+ } else {
+ cnss_pr_err("Device boot debugfs command is invalid\n");
+ ret = -EINVAL;
+ }
+
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static int cnss_dev_boot_debug_show(struct seq_file *s, void *data)
+{
+ seq_puts(s, "\nUsage: echo <action> > <debugfs_path>/cnss/dev_boot\n");
+ seq_puts(s, "<action> can be one of below:\n");
+ seq_puts(s, "on: turn on device power, assert WLAN_EN\n");
+ seq_puts(s, "off: de-assert WLAN_EN, turn off device power\n");
+ seq_puts(s, "enumerate: de-assert PERST, enumerate PCIe\n");
+ seq_puts(s, "download: download FW and do QMI handshake with FW\n");
+ seq_puts(s, "linkup: bring up PCIe link\n");
+ seq_puts(s, "linkdown: bring down PCIe link\n");
+ seq_puts(s, "powerup: full power on sequence to boot device, download FW and do QMI handshake with FW\n");
+ seq_puts(s, "shutdown: full power off sequence to shutdown device\n");
+
+ return 0;
+}
+
+static int cnss_dev_boot_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, cnss_dev_boot_debug_show, inode->i_private);
+}
+
+static const struct file_operations cnss_dev_boot_debug_fops = {
+ .read = seq_read,
+ .write = cnss_dev_boot_debug_write,
+ .release = single_release,
+ .open = cnss_dev_boot_debug_open,
+ .owner = THIS_MODULE,
+ .llseek = seq_lseek,
+};
+
+static int cnss_reg_read_debug_show(struct seq_file *s, void *data)
+{
+ struct cnss_plat_data *plat_priv = s->private;
+
+ mutex_lock(&plat_priv->dev_lock);
+ if (!plat_priv->diag_reg_read_buf) {
+ seq_puts(s, "\nUsage: echo <mem_type> <offset> <data_len> > <debugfs_path>/cnss/reg_read\n");
+ mutex_unlock(&plat_priv->dev_lock);
+ return 0;
+ }
+
+ seq_printf(s, "\nRegister read, address: 0x%x memory type: 0x%x length: 0x%x\n\n",
+ plat_priv->diag_reg_read_addr,
+ plat_priv->diag_reg_read_mem_type,
+ plat_priv->diag_reg_read_len);
+
+ seq_hex_dump(s, "", DUMP_PREFIX_OFFSET, 32, 4,
+ plat_priv->diag_reg_read_buf,
+ plat_priv->diag_reg_read_len, false);
+
+ plat_priv->diag_reg_read_len = 0;
+ kfree(plat_priv->diag_reg_read_buf);
+ plat_priv->diag_reg_read_buf = NULL;
+ mutex_unlock(&plat_priv->dev_lock);
+
+ return 0;
+}
+
+static ssize_t cnss_reg_read_debug_write(struct file *fp,
+ const char __user *user_buf,
+ size_t count, loff_t *off)
+{
+ struct cnss_plat_data *plat_priv =
+ ((struct seq_file *)fp->private_data)->private;
+ char buf[64];
+ char *sptr, *token;
+ unsigned int len = 0;
+ u32 reg_offset, mem_type;
+ u32 data_len = 0;
+ u8 *reg_buf = NULL;
+ const char *delim = " ";
+ int ret = 0;
+
+ if (!test_bit(CNSS_FW_READY, &plat_priv->driver_state)) {
+ cnss_pr_err("Firmware is not ready yet\n");
+ return -EINVAL;
+ }
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+ sptr = buf;
+
+ token = strsep(&sptr, delim);
+ if (!token)
+ return -EINVAL;
+
+ if (!sptr)
+ return -EINVAL;
+
+ if (kstrtou32(token, 0, &mem_type))
+ return -EINVAL;
+
+ token = strsep(&sptr, delim);
+ if (!token)
+ return -EINVAL;
+
+ if (!sptr)
+ return -EINVAL;
+
+ if (kstrtou32(token, 0, &reg_offset))
+ return -EINVAL;
+
+ token = strsep(&sptr, delim);
+ if (!token)
+ return -EINVAL;
+
+ if (kstrtou32(token, 0, &data_len))
+ return -EINVAL;
+
+ if (data_len == 0 ||
+ data_len > QMI_WLFW_MAX_ATHDIAG_DATA_SIZE_V01)
+ return -EINVAL;
+
+ mutex_lock(&plat_priv->dev_lock);
+ kfree(plat_priv->diag_reg_read_buf);
+ plat_priv->diag_reg_read_buf = NULL;
+
+ reg_buf = kzalloc(data_len, GFP_KERNEL);
+ if (!reg_buf) {
+ mutex_unlock(&plat_priv->dev_lock);
+ return -ENOMEM;
+ }
+
+ ret = cnss_wlfw_athdiag_read_send_sync(plat_priv, reg_offset,
+ mem_type, data_len,
+ reg_buf);
+ if (ret) {
+ kfree(reg_buf);
+ mutex_unlock(&plat_priv->dev_lock);
+ return ret;
+ }
+
+ plat_priv->diag_reg_read_addr = reg_offset;
+ plat_priv->diag_reg_read_mem_type = mem_type;
+ plat_priv->diag_reg_read_len = data_len;
+ plat_priv->diag_reg_read_buf = reg_buf;
+ mutex_unlock(&plat_priv->dev_lock);
+
+ return count;
+}
+
+static int cnss_reg_read_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, cnss_reg_read_debug_show, inode->i_private);
+}
+
+static const struct file_operations cnss_reg_read_debug_fops = {
+ .read = seq_read,
+ .write = cnss_reg_read_debug_write,
+ .open = cnss_reg_read_debug_open,
+ .owner = THIS_MODULE,
+ .llseek = seq_lseek,
+};
+
+static int cnss_reg_write_debug_show(struct seq_file *s, void *data)
+{
+ seq_puts(s, "\nUsage: echo <mem_type> <offset> <reg_val> > <debugfs_path>/cnss/reg_write\n");
+
+ return 0;
+}
+
+static ssize_t cnss_reg_write_debug_write(struct file *fp,
+ const char __user *user_buf,
+ size_t count, loff_t *off)
+{
+ struct cnss_plat_data *plat_priv =
+ ((struct seq_file *)fp->private_data)->private;
+ char buf[64];
+ char *sptr, *token;
+ unsigned int len = 0;
+ u32 reg_offset, mem_type, reg_val;
+ const char *delim = " ";
+ int ret = 0;
+
+ if (!test_bit(CNSS_FW_READY, &plat_priv->driver_state)) {
+ cnss_pr_err("Firmware is not ready yet\n");
+ return -EINVAL;
+ }
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+ sptr = buf;
+
+ token = strsep(&sptr, delim);
+ if (!token)
+ return -EINVAL;
+
+ if (!sptr)
+ return -EINVAL;
+
+ if (kstrtou32(token, 0, &mem_type))
+ return -EINVAL;
+
+ token = strsep(&sptr, delim);
+ if (!token)
+ return -EINVAL;
+
+ if (!sptr)
+ return -EINVAL;
+
+ if (kstrtou32(token, 0, &reg_offset))
+ return -EINVAL;
+
+ token = strsep(&sptr, delim);
+ if (!token)
+ return -EINVAL;
+
+ if (kstrtou32(token, 0, &reg_val))
+ return -EINVAL;
+
+ ret = cnss_wlfw_athdiag_write_send_sync(plat_priv, reg_offset, mem_type,
+ sizeof(u32),
+ (u8 *)&reg_val);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static int cnss_reg_write_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, cnss_reg_write_debug_show, inode->i_private);
+}
+
+static const struct file_operations cnss_reg_write_debug_fops = {
+ .read = seq_read,
+ .write = cnss_reg_write_debug_write,
+ .open = cnss_reg_write_debug_open,
+ .owner = THIS_MODULE,
+ .llseek = seq_lseek,
};
+#ifdef CONFIG_CNSS2_DEBUG
+static int cnss_create_debug_only_node(struct cnss_plat_data *plat_priv)
+{
+ struct dentry *root_dentry = plat_priv->root_dentry;
+
+ debugfs_create_file("dev_boot", 0600, root_dentry, plat_priv,
+ &cnss_dev_boot_debug_fops);
+ debugfs_create_file("reg_read", 0600, root_dentry, plat_priv,
+ &cnss_reg_read_debug_fops);
+ debugfs_create_file("reg_write", 0600, root_dentry, plat_priv,
+ &cnss_reg_write_debug_fops);
+
+ return 0;
+}
+#else
+static int cnss_create_debug_only_node(struct cnss_plat_data *plat_priv)
+{
+ return 0;
+}
+#endif
+
int cnss_debugfs_create(struct cnss_plat_data *plat_priv)
{
int ret = 0;
@@ -139,11 +454,16 @@ int cnss_debugfs_create(struct cnss_plat_data *plat_priv)
cnss_pr_err("Unable to create debugfs %d\n", ret);
goto out;
}
+
plat_priv->root_dentry = root_dentry;
+
debugfs_create_file("pin_connect_result", 0644, root_dentry, plat_priv,
&cnss_pin_connect_fops);
debugfs_create_file("stats", 0644, root_dentry, plat_priv,
&cnss_stats_fops);
+
+ cnss_create_debug_only_node(plat_priv);
+
out:
return ret;
}
diff --git a/drivers/net/wireless/cnss2/main.c b/drivers/net/wireless/cnss2/main.c
index e114d0c51a07..23a81ff071ee 100644
--- a/drivers/net/wireless/cnss2/main.c
+++ b/drivers/net/wireless/cnss2/main.c
@@ -57,6 +57,7 @@ MODULE_PARM_DESC(enable_waltest, "Enable to handle firmware waltest");
enum cnss_debug_quirks {
LINK_DOWN_SELF_RECOVERY,
+ SKIP_DEVICE_BOOT,
};
unsigned long quirks;
@@ -1059,11 +1060,15 @@ static int cnss_qca6174_shutdown(struct cnss_plat_data *plat_priv)
if (!pci_priv)
return -ENODEV;
+ if (test_bit(CNSS_DRIVER_DEBUG, &plat_priv->driver_state))
+ goto skip_driver_remove;
+
if (!plat_priv->driver_ops)
return -EINVAL;
cnss_driver_call_remove(plat_priv);
+skip_driver_remove:
cnss_request_bus_bandwidth(CNSS_BUS_WIDTH_NONE);
cnss_pci_set_monitor_wake_intr(pci_priv, false);
cnss_pci_set_auto_suspended(pci_priv, 0);
@@ -1161,7 +1166,8 @@ static int cnss_qca6290_shutdown(struct cnss_plat_data *plat_priv)
return -ENODEV;
if (test_bit(CNSS_COLD_BOOT_CAL, &plat_priv->driver_state) ||
- test_bit(CNSS_FW_BOOT_RECOVERY, &plat_priv->driver_state))
+ test_bit(CNSS_FW_BOOT_RECOVERY, &plat_priv->driver_state) ||
+ test_bit(CNSS_DRIVER_DEBUG, &plat_priv->driver_state))
goto skip_driver_remove;
if (!plat_priv->driver_ops)
@@ -2232,13 +2238,15 @@ static int cnss_probe(struct platform_device *plat_dev)
if (ret)
goto reset_ctx;
- ret = cnss_power_on_device(plat_priv);
- if (ret)
- goto free_res;
+ if (!test_bit(SKIP_DEVICE_BOOT, &quirks)) {
+ ret = cnss_power_on_device(plat_priv);
+ if (ret)
+ goto free_res;
- ret = cnss_pci_init(plat_priv);
- if (ret)
- goto power_off;
+ ret = cnss_pci_init(plat_priv);
+ if (ret)
+ goto power_off;
+ }
ret = cnss_register_esoc(plat_priv);
if (ret)
@@ -2275,6 +2283,7 @@ static int cnss_probe(struct platform_device *plat_dev)
ret);
init_completion(&plat_priv->power_up_complete);
+ mutex_init(&plat_priv->dev_lock);
cnss_pr_info("Platform driver probed successfully.\n");
@@ -2291,9 +2300,11 @@ unreg_bus_scale:
unreg_esoc:
cnss_unregister_esoc(plat_priv);
deinit_pci:
- cnss_pci_deinit(plat_priv);
+ if (!test_bit(SKIP_DEVICE_BOOT, &quirks))
+ cnss_pci_deinit(plat_priv);
power_off:
- cnss_power_off_device(plat_priv);
+ if (!test_bit(SKIP_DEVICE_BOOT, &quirks))
+ cnss_power_off_device(plat_priv);
free_res:
cnss_put_resources(plat_priv);
reset_ctx:
diff --git a/drivers/net/wireless/cnss2/main.h b/drivers/net/wireless/cnss2/main.h
index a2d9a02bde20..a5f9ce37b0ea 100644
--- a/drivers/net/wireless/cnss2/main.h
+++ b/drivers/net/wireless/cnss2/main.h
@@ -142,6 +142,7 @@ enum cnss_driver_state {
CNSS_DRIVER_RECOVERY,
CNSS_FW_BOOT_RECOVERY,
CNSS_DEV_ERR_NOTIFY,
+ CNSS_DRIVER_DEBUG,
};
struct cnss_recovery_data {
@@ -204,6 +205,11 @@ struct cnss_plat_data {
atomic_t pm_count;
struct timer_list fw_boot_timer;
struct completion power_up_complete;
+ struct mutex dev_lock; /* mutex for register access through debugfs */
+ u32 diag_reg_read_addr;
+ u32 diag_reg_read_mem_type;
+ u32 diag_reg_read_len;
+ u8 *diag_reg_read_buf;
};
void *cnss_bus_dev_to_bus_priv(struct device *dev);
diff --git a/drivers/net/wireless/cnss2/pci.c b/drivers/net/wireless/cnss2/pci.c
index edc39af2d361..2c297fba5c34 100644
--- a/drivers/net/wireless/cnss2/pci.c
+++ b/drivers/net/wireless/cnss2/pci.c
@@ -787,36 +787,6 @@ int cnss_pci_get_bar_info(struct cnss_pci_data *pci_priv, void __iomem **va,
return 0;
}
-#ifdef CONFIG_CNSS_QCA6290
-#define PCI_MAX_BAR_SIZE 0xD00000
-
-static void __iomem *cnss_pci_iomap(struct pci_dev *dev, int bar,
- unsigned long maxlen)
-{
- resource_size_t start = pci_resource_start(dev, bar);
- resource_size_t len = PCI_MAX_BAR_SIZE;
- unsigned long flags = pci_resource_flags(dev, bar);
-
- if (!len || !start)
- return NULL;
-
- if ((flags & IORESOURCE_IO) || (flags & IORESOURCE_MEM)) {
- if (flags & IORESOURCE_CACHEABLE && !(flags & IORESOURCE_IO))
- return ioremap(start, len);
- else
- return ioremap_nocache(start, len);
- }
-
- return NULL;
-}
-#else
-static void __iomem *cnss_pci_iomap(struct pci_dev *dev, int bar,
- unsigned long maxlen)
-{
- return pci_iomap(dev, bar, maxlen);
-}
-#endif
-
static struct cnss_msi_config msi_config = {
.total_vectors = 32,
.total_users = 4,
@@ -1003,7 +973,7 @@ static int cnss_pci_enable_bus(struct cnss_pci_data *pci_priv)
pci_set_master(pci_dev);
- pci_priv->bar = cnss_pci_iomap(pci_dev, PCI_BAR_NUM, 0);
+ pci_priv->bar = pci_iomap(pci_dev, PCI_BAR_NUM, 0);
if (!pci_priv->bar) {
cnss_pr_err("Failed to do PCI IO map!\n");
ret = -EIO;
@@ -1493,6 +1463,11 @@ static int cnss_pci_probe(struct pci_dev *pci_dev,
cnss_pci_disable_msi(pci_priv);
goto disable_bus;
}
+ ret = cnss_suspend_pci_link(pci_priv);
+ if (ret)
+ cnss_pr_err("Failed to suspend PCI link, err = %d\n",
+ ret);
+ cnss_power_off_device(plat_priv);
break;
default:
cnss_pr_err("Unknown PCI device found: 0x%x\n",
diff --git a/drivers/net/wireless/cnss2/qmi.c b/drivers/net/wireless/cnss2/qmi.c
index d1c0423b4517..e010e2c39f02 100644
--- a/drivers/net/wireless/cnss2/qmi.c
+++ b/drivers/net/wireless/cnss2/qmi.c
@@ -39,7 +39,7 @@ static bool daemon_support;
module_param(daemon_support, bool, 0600);
MODULE_PARM_DESC(daemon_support, "User space has cnss-daemon support or not");
-static bool bdf_bypass = true;
+static bool bdf_bypass;
#ifdef CONFIG_CNSS2_DEBUG
module_param(bdf_bypass, bool, 0600);
MODULE_PARM_DESC(bdf_bypass, "If BDF is not found, send dummy BDF to FW");
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index d59769e858f4..019d7165a045 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -2539,7 +2539,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
tasklet_hrtimer_init(&data->beacon_timer,
mac80211_hwsim_beacon,
- CLOCK_MONOTONIC_RAW, HRTIMER_MODE_ABS);
+ CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
spin_lock_bh(&hwsim_radio_lock);
list_add_tail(&data->list, &hwsim_radios);
diff --git a/drivers/net/wireless/ti/wlcore/spi.c b/drivers/net/wireless/ti/wlcore/spi.c
index 44f059f7f34e..9ebe00ea8f81 100644
--- a/drivers/net/wireless/ti/wlcore/spi.c
+++ b/drivers/net/wireless/ti/wlcore/spi.c
@@ -71,7 +71,7 @@
* only support SPI for 12xx - this code should be reworked when 18xx
* support is introduced
*/
-#define SPI_AGGR_BUFFER_SIZE (4 * PAGE_SIZE)
+#define SPI_AGGR_BUFFER_SIZE (4 * SZ_4K)
/* Maximum number of SPI write chunks */
#define WSPI_MAX_NUM_OF_CHUNKS \
diff --git a/drivers/net/wireless/wcnss/wcnss_vreg.c b/drivers/net/wireless/wcnss/wcnss_vreg.c
index 82b90ad00f8b..d94bd90f64da 100644
--- a/drivers/net/wireless/wcnss/wcnss_vreg.c
+++ b/drivers/net/wireless/wcnss/wcnss_vreg.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2015, 2017 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -71,23 +71,33 @@ static int is_power_on;
struct vregs_info {
const char * const name;
+ const char * const curr;
+ const char * const volt;
int state;
+ bool required;
struct regulator *regulator;
};
/* IRIS regulators for Pronto hardware */
-static struct vregs_info iris_vregs_pronto[] = {
- {"qcom,iris-vddxo", VREG_NULL_CONFIG, NULL},
- {"qcom,iris-vddrfa", VREG_NULL_CONFIG, NULL},
- {"qcom,iris-vddpa", VREG_NULL_CONFIG, NULL},
- {"qcom,iris-vdddig", VREG_NULL_CONFIG, NULL},
+static struct vregs_info iris_vregs[] = {
+ {"qcom,iris-vddxo", "qcom,iris-vddxo-current",
+ "qcom,iris-vddxo-voltage-level", VREG_NULL_CONFIG, true, NULL},
+ {"qcom,iris-vddrfa", "qcom,iris-vddrfa-current",
+ "qcom,iris-vddrfa-voltage-level", VREG_NULL_CONFIG, true, NULL},
+ {"qcom,iris-vddpa", "qcom,iris-vddpa-current",
+ "qcom,iris-vddpa-voltage-level", VREG_NULL_CONFIG, false, NULL},
+ {"qcom,iris-vdddig", "qcom,iris-vdddig-current",
+ "qcom,iris-vdddig-voltage-level", VREG_NULL_CONFIG, true, NULL},
};
/* WCNSS regulators for Pronto hardware */
static struct vregs_info pronto_vregs[] = {
- {"qcom,pronto-vddmx", VREG_NULL_CONFIG, NULL},
- {"qcom,pronto-vddcx", VREG_NULL_CONFIG, NULL},
- {"qcom,pronto-vddpx", VREG_NULL_CONFIG, NULL},
+ {"qcom,pronto-vddmx", "qcom,pronto-vddmx-current",
+ "qcom,vddmx-voltage-level", VREG_NULL_CONFIG, true, NULL},
+ {"qcom,pronto-vddcx", "qcom,pronto-vddcx-current",
+ "qcom,vddcx-voltage-level", VREG_NULL_CONFIG, true, NULL},
+ {"qcom,pronto-vddpx", "qcom,pronto-vddpx-current",
+ "qcom,vddpx-voltage-level", VREG_NULL_CONFIG, true, NULL},
};
struct host_driver {
@@ -184,6 +194,129 @@ int validate_iris_chip_id(u32 reg)
}
}
+static void wcnss_free_regulator(void)
+{
+ int vreg_i;
+
+ /* Free pronto voltage regulators from device node */
+ for (vreg_i = 0; vreg_i < PRONTO_REGULATORS; vreg_i++) {
+ if (pronto_vregs[vreg_i].state) {
+ regulator_put(pronto_vregs[vreg_i].regulator);
+ pronto_vregs[vreg_i].state = VREG_NULL_CONFIG;
+ }
+ }
+
+ /* Free IRIS voltage regulators from device node */
+ for (vreg_i = 0; vreg_i < IRIS_REGULATORS; vreg_i++) {
+ if (iris_vregs[vreg_i].state) {
+ regulator_put(iris_vregs[vreg_i].regulator);
+ iris_vregs[vreg_i].state = VREG_NULL_CONFIG;
+ }
+ }
+}
+
+static int
+wcnss_dt_parse_vreg_level(struct device *dev, int index,
+ const char *current_vreg_name, const char *vreg_name,
+ struct vregs_level *vlevel)
+{
+ int ret = 0;
+ /* array used to store nominal, low and high voltage values */
+ u32 voltage_levels[3], current_vreg;
+
+ ret = of_property_read_u32_array(dev->of_node, vreg_name,
+ voltage_levels,
+ ARRAY_SIZE(voltage_levels));
+ if (ret) {
+ dev_err(dev, "error reading %s property\n", vreg_name);
+ return ret;
+ }
+
+ vlevel[index].nominal_min = voltage_levels[0];
+ vlevel[index].low_power_min = voltage_levels[1];
+ vlevel[index].max_voltage = voltage_levels[2];
+
+ ret = of_property_read_u32(dev->of_node, current_vreg_name,
+ &current_vreg);
+ if (ret) {
+ dev_err(dev, "error reading %s property\n", current_vreg_name);
+ return ret;
+ }
+
+ vlevel[index].uA_load = current_vreg;
+
+ return ret;
+}
+
+int
+wcnss_parse_voltage_regulator(struct wcnss_wlan_config *wlan_config,
+ struct device *dev)
+{
+ int rc, vreg_i;
+
+ /* Parse pronto voltage regulators from device node */
+ for (vreg_i = 0; vreg_i < PRONTO_REGULATORS; vreg_i++) {
+ pronto_vregs[vreg_i].regulator =
+ regulator_get(dev, pronto_vregs[vreg_i].name);
+ if (IS_ERR(pronto_vregs[vreg_i].regulator)) {
+ if (pronto_vregs[vreg_i].required) {
+ rc = PTR_ERR(pronto_vregs[vreg_i].regulator);
+ dev_err(dev, "regulator get of %s failed (%d)\n",
+ pronto_vregs[vreg_i].name, rc);
+ goto wcnss_vreg_get_err;
+ } else {
+ dev_dbg(dev, "Skip optional regulator configuration: %s\n",
+ pronto_vregs[vreg_i].name);
+ continue;
+ }
+ }
+
+ pronto_vregs[vreg_i].state |= VREG_GET_REGULATOR_MASK;
+ rc = wcnss_dt_parse_vreg_level(dev, vreg_i,
+ pronto_vregs[vreg_i].curr,
+ pronto_vregs[vreg_i].volt,
+ wlan_config->pronto_vlevel);
+ if (rc) {
+ dev_err(dev, "error reading voltage-level property\n");
+ goto wcnss_vreg_get_err;
+ }
+ }
+
+ /* Parse iris voltage regulators from device node */
+ for (vreg_i = 0; vreg_i < IRIS_REGULATORS; vreg_i++) {
+ iris_vregs[vreg_i].regulator =
+ regulator_get(dev, iris_vregs[vreg_i].name);
+ if (IS_ERR(iris_vregs[vreg_i].regulator)) {
+ if (iris_vregs[vreg_i].required) {
+ rc = PTR_ERR(iris_vregs[vreg_i].regulator);
+ dev_err(dev, "regulator get of %s failed (%d)\n",
+ iris_vregs[vreg_i].name, rc);
+ goto wcnss_vreg_get_err;
+ } else {
+ dev_dbg(dev, "Skip optional regulator configuration: %s\n",
+ iris_vregs[vreg_i].name);
+ continue;
+ }
+ }
+
+ iris_vregs[vreg_i].state |= VREG_GET_REGULATOR_MASK;
+ rc = wcnss_dt_parse_vreg_level(dev, vreg_i,
+ iris_vregs[vreg_i].curr,
+ iris_vregs[vreg_i].volt,
+ wlan_config->iris_vlevel);
+ if (rc) {
+ dev_err(dev, "error reading voltage-level property\n");
+ goto wcnss_vreg_get_err;
+ }
+ }
+
+ return 0;
+
+wcnss_vreg_get_err:
+ wcnss_free_regulator();
+ return rc;
+}
+
void wcnss_iris_reset(u32 reg, void __iomem *pmu_conf_reg)
{
/* Reset IRIS */
@@ -419,11 +552,11 @@ static void wcnss_vregs_off(struct vregs_info regulators[], uint size,
/* Remove PWM mode */
if (regulators[i].state & VREG_OPTIMUM_MODE_MASK) {
- rc = regulator_set_optimum_mode(
- regulators[i].regulator, 0);
- if (rc < 0)
- pr_err("regulator_set_optimum_mode(%s) failed (%d)\n",
- regulators[i].name, rc);
+ rc = regulator_set_load(regulators[i].regulator, 0);
+ if (rc < 0) {
+ pr_err("regulator set load(%s) failed (%d)\n",
+ regulators[i].name, rc);
+ }
}
/* Set voltage to lowest level */
@@ -478,18 +611,10 @@ static int wcnss_vregs_on(struct device *dev,
}
for (i = 0; i < size; i++) {
- /* Get regulator source */
- regulators[i].regulator =
- regulator_get(dev, regulators[i].name);
- if (IS_ERR(regulators[i].regulator)) {
- rc = PTR_ERR(regulators[i].regulator);
- pr_err("regulator get of %s failed (%d)\n",
- regulators[i].name, rc);
- goto fail;
- }
- regulators[i].state |= VREG_GET_REGULATOR_MASK;
- reg_cnt = regulator_count_voltages(regulators[i].regulator);
+ if (regulators[i].state == VREG_NULL_CONFIG)
+ continue;
+ reg_cnt = regulator_count_voltages(regulators[i].regulator);
/* Set voltage to nominal. Exclude swtiches e.g. LVS */
if ((voltage_level[i].nominal_min ||
voltage_level[i].max_voltage) && (reg_cnt > 0)) {
@@ -518,11 +643,11 @@ static int wcnss_vregs_on(struct device *dev,
/* Vote for PWM/PFM mode if needed */
if (voltage_level[i].uA_load && (reg_cnt > 0)) {
- rc = regulator_set_optimum_mode(regulators[i].regulator,
- voltage_level[i].uA_load);
+ rc = regulator_set_load(regulators[i].regulator,
+ voltage_level[i].uA_load);
if (rc < 0) {
- pr_err("regulator_set_optimum_mode(%s) failed (%d)\n",
- regulators[i].name, rc);
+ pr_err("regulator set load(%s) failed (%d)\n",
+ regulators[i].name, rc);
goto fail;
}
regulators[i].state |= VREG_OPTIMUM_MODE_MASK;
@@ -551,8 +676,7 @@ static void wcnss_iris_vregs_off(enum wcnss_hw_type hw_type,
{
switch (hw_type) {
case WCNSS_PRONTO_HW:
- wcnss_vregs_off(iris_vregs_pronto,
- ARRAY_SIZE(iris_vregs_pronto),
+ wcnss_vregs_off(iris_vregs, ARRAY_SIZE(iris_vregs),
cfg->iris_vlevel);
break;
default:
@@ -569,8 +693,7 @@ static int wcnss_iris_vregs_on(struct device *dev,
switch (hw_type) {
case WCNSS_PRONTO_HW:
- ret = wcnss_vregs_on(dev, iris_vregs_pronto,
- ARRAY_SIZE(iris_vregs_pronto),
+ ret = wcnss_vregs_on(dev, iris_vregs, ARRAY_SIZE(iris_vregs),
cfg->iris_vlevel);
break;
default:
diff --git a/drivers/net/wireless/wcnss/wcnss_wlan.c b/drivers/net/wireless/wcnss/wcnss_wlan.c
index 9347882fba92..b604f61e1a05 100644
--- a/drivers/net/wireless/wcnss/wcnss_wlan.c
+++ b/drivers/net/wireless/wcnss/wcnss_wlan.c
@@ -36,6 +36,8 @@
#include <linux/qpnp/qpnp-adc.h>
#include <linux/pinctrl/consumer.h>
#include <linux/pm_qos.h>
+#include <linux/bitops.h>
+#include <soc/qcom/socinfo.h>
#include <soc/qcom/subsystem_restart.h>
#include <soc/qcom/subsystem_notif.h>
@@ -56,11 +58,27 @@
#define WCNSS_PM_QOS_TIMEOUT 15000
#define IS_CAL_DATA_PRESENT 0
#define WAIT_FOR_CBC_IND 2
+#define WCNSS_DUAL_BAND_CAPABILITY_OFFSET BIT(8)
/* module params */
#define WCNSS_CONFIG_UNSPECIFIED (-1)
#define UINT32_MAX (0xFFFFFFFFU)
+#define SUBSYS_NOTIF_MIN_INDEX 0
+#define SUBSYS_NOTIF_MAX_INDEX 9
+char *wcnss_subsys_notif_type[] = {
+ "SUBSYS_BEFORE_SHUTDOWN",
+ "SUBSYS_AFTER_SHUTDOWN",
+ "SUBSYS_BEFORE_POWERUP",
+ "SUBSYS_AFTER_POWERUP",
+ "SUBSYS_RAMDUMP_NOTIFICATION",
+ "SUBSYS_POWERUP_FAILURE",
+ "SUBSYS_PROXY_VOTE",
+ "SUBSYS_PROXY_UNVOTE",
+ "SUBSYS_SOC_RESET",
+ "SUBSYS_NOTIF_TYPE_COUNT"
+};
+
static int has_48mhz_xo = WCNSS_CONFIG_UNSPECIFIED;
module_param(has_48mhz_xo, int, S_IWUSR | S_IRUGO);
MODULE_PARM_DESC(has_48mhz_xo, "Is an external 48 MHz XO present");
@@ -119,6 +137,8 @@ static DEFINE_SPINLOCK(reg_spinlock);
#define PRONTO_PMU_COM_CSR_OFFSET 0x1040
#define PRONTO_PMU_SOFT_RESET_OFFSET 0x104C
+#define PRONTO_QFUSE_DUAL_BAND_OFFSET 0x0018
+
#define A2XB_CFG_OFFSET 0x00
#define A2XB_INT_SRC_OFFSET 0x0c
#define A2XB_TSTBUS_CTRL_OFFSET 0x14
@@ -183,11 +203,9 @@ static DEFINE_SPINLOCK(reg_spinlock);
#define WCNSS_MAX_BUILD_VER_LEN 256
#define WCNSS_MAX_CMD_LEN (128)
#define WCNSS_MIN_CMD_LEN (3)
-#define WCNSS_MIN_SERIAL_LEN (6)
/* control messages from userspace */
#define WCNSS_USR_CTRL_MSG_START 0x00000000
-#define WCNSS_USR_SERIAL_NUM (WCNSS_USR_CTRL_MSG_START + 1)
#define WCNSS_USR_HAS_CAL_DATA (WCNSS_USR_CTRL_MSG_START + 2)
#define WCNSS_USR_WLAN_MAC_ADDR (WCNSS_USR_CTRL_MSG_START + 3)
@@ -381,6 +399,7 @@ static struct {
void __iomem *pronto_saw2_base;
void __iomem *pronto_pll_base;
void __iomem *pronto_mcu_base;
+ void __iomem *pronto_qfuse;
void __iomem *wlan_tx_status;
void __iomem *wlan_tx_phy_aborts;
void __iomem *wlan_brdg_err_source;
@@ -423,6 +442,9 @@ static struct {
int pc_disabled;
struct delayed_work wcnss_pm_qos_del_req;
struct mutex pm_qos_mutex;
+ struct clk *snoc_wcnss;
+ unsigned int snoc_wcnss_clock_freq;
+ bool is_dual_band_disabled;
} *penv = NULL;
static ssize_t wcnss_wlan_macaddr_store(struct device *dev,
@@ -474,34 +496,6 @@ static ssize_t wcnss_wlan_macaddr_show(struct device *dev,
static DEVICE_ATTR(wcnss_mac_addr, S_IRUSR | S_IWUSR,
wcnss_wlan_macaddr_show, wcnss_wlan_macaddr_store);
-static ssize_t wcnss_serial_number_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- if (!penv)
- return -ENODEV;
-
- return scnprintf(buf, PAGE_SIZE, "%08X\n", penv->serial_number);
-}
-
-static ssize_t wcnss_serial_number_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
-{
- unsigned int value;
-
- if (!penv)
- return -ENODEV;
-
- if (sscanf(buf, "%08X", &value) != 1)
- return -EINVAL;
-
- penv->serial_number = value;
- return count;
-}
-
-static DEVICE_ATTR(serial_number, S_IRUSR | S_IWUSR,
- wcnss_serial_number_show, wcnss_serial_number_store);
-
-
static ssize_t wcnss_thermal_mitigation_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -595,7 +589,31 @@ void wcnss_pronto_is_a2xb_bus_stall(void *tst_addr, u32 fifo_mask, char *type)
}
}
-/* Log pronto debug registers before sending reset interrupt */
+int wcnss_get_dual_band_capability_info(struct platform_device *pdev)
+{
+ u32 reg = 0;
+ struct resource *res;
+
+ res = platform_get_resource_byname(
+ pdev, IORESOURCE_MEM, "pronto_qfuse");
+ if (!res)
+ return -EINVAL;
+
+ penv->pronto_qfuse = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(penv->pronto_qfuse))
+ return -ENOMEM;
+
+ reg = readl_relaxed(penv->pronto_qfuse +
+ PRONTO_QFUSE_DUAL_BAND_OFFSET);
+ if (reg & WCNSS_DUAL_BAND_CAPABILITY_OFFSET)
+ penv->is_dual_band_disabled = true;
+ else
+ penv->is_dual_band_disabled = false;
+
+ return 0;
+}
+
+/* Log pronto debug registers during SSR Timeout CB */
void wcnss_pronto_log_debug_regs(void)
{
void __iomem *reg_addr, *tst_addr, *tst_ctrl_addr;
@@ -1146,13 +1164,9 @@ static int wcnss_create_sysfs(struct device *dev)
if (!dev)
return -ENODEV;
- ret = device_create_file(dev, &dev_attr_serial_number);
- if (ret)
- return ret;
-
ret = device_create_file(dev, &dev_attr_thermal_mitigation);
if (ret)
- goto remove_serial;
+ return ret;
ret = device_create_file(dev, &dev_attr_wcnss_version);
if (ret)
@@ -1168,8 +1182,6 @@ remove_version:
device_remove_file(dev, &dev_attr_wcnss_version);
remove_thermal:
device_remove_file(dev, &dev_attr_thermal_mitigation);
-remove_serial:
- device_remove_file(dev, &dev_attr_serial_number);
return ret;
}
@@ -1177,7 +1189,6 @@ remove_serial:
static void wcnss_remove_sysfs(struct device *dev)
{
if (dev) {
- device_remove_file(dev, &dev_attr_serial_number);
device_remove_file(dev, &dev_attr_thermal_mitigation);
device_remove_file(dev, &dev_attr_wcnss_version);
device_remove_file(dev, &dev_attr_wcnss_mac_addr);
@@ -1625,8 +1636,13 @@ EXPORT_SYMBOL(wcnss_unregister_thermal_mitigation);
unsigned int wcnss_get_serial_number(void)
{
- if (penv)
+ if (penv) {
+ penv->serial_number = socinfo_get_serial_number();
+ pr_info("%s: Device serial number: %u\n",
+ __func__, penv->serial_number);
return penv->serial_number;
+ }
+
return 0;
}
EXPORT_SYMBOL(wcnss_get_serial_number);
@@ -1683,6 +1699,14 @@ int wcnss_wlan_iris_xo_mode(void)
}
EXPORT_SYMBOL(wcnss_wlan_iris_xo_mode);
+int wcnss_wlan_dual_band_disabled(void)
+{
+ if (penv && penv->pdev)
+ return penv->is_dual_band_disabled;
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL(wcnss_wlan_dual_band_disabled);
void wcnss_suspend_notify(void)
{
@@ -2608,15 +2632,6 @@ void process_usr_ctrl_cmd(u8 *buf, size_t len)
switch (cmd) {
- case WCNSS_USR_SERIAL_NUM:
- if (WCNSS_MIN_SERIAL_LEN > len) {
- pr_err("%s: Invalid serial number\n", __func__);
- return;
- }
- penv->serial_number = buf[2] << 24 | buf[3] << 16
- | buf[4] << 8 | buf[5];
- break;
-
case WCNSS_USR_HAS_CAL_DATA:
if (1 < buf[2])
pr_err("%s: Invalid data for cal %d\n", __func__,
@@ -2674,145 +2689,34 @@ static struct miscdevice wcnss_usr_ctrl = {
};
static int
-wcnss_dt_parse_vreg_level(struct device *dev, int index,
- const char *current_vreg_name, const char *vreg_name,
- struct vregs_level *vlevel)
-{
- int ret = 0;
- /* array used to store nominal, low and high voltage values
- */
- u32 voltage_levels[3], current_vreg;
-
- ret = of_property_read_u32_array(dev->of_node, vreg_name,
- voltage_levels,
- ARRAY_SIZE(voltage_levels));
- if (ret) {
- dev_err(dev, "error reading %s property\n", vreg_name);
- return ret;
- }
-
- vlevel[index].nominal_min = voltage_levels[0];
- vlevel[index].low_power_min = voltage_levels[1];
- vlevel[index].max_voltage = voltage_levels[2];
-
- ret = of_property_read_u32(dev->of_node, current_vreg_name,
- &current_vreg);
- if (ret) {
- dev_err(dev, "error reading %s property\n", current_vreg_name);
- return ret;
- }
-
- vlevel[index].uA_load = current_vreg;
-
- return ret;
-}
-
-static int
wcnss_trigger_config(struct platform_device *pdev)
{
int ret;
- int rc, index = 0;
+ int rc;
struct qcom_wcnss_opts *pdata;
struct resource *res;
int is_pronto_vadc;
int is_pronto_v3;
int pil_retry = 0;
- int has_pronto_hw = of_property_read_bool(pdev->dev.of_node,
- "qcom,has-pronto-hw");
+ struct device_node *node = (&pdev->dev)->of_node;
+ int has_pronto_hw = of_property_read_bool(node, "qcom,has-pronto-hw");
- is_pronto_vadc = of_property_read_bool(pdev->dev.of_node,
- "qcom,is-pronto-vadc");
+ is_pronto_vadc = of_property_read_bool(node, "qcom,is-pronto-vadc");
+ is_pronto_v3 = of_property_read_bool(node, "qcom,is-pronto-v3");
- is_pronto_v3 = of_property_read_bool(pdev->dev.of_node,
- "qcom,is-pronto-v3");
+ penv->is_vsys_adc_channel =
+ of_property_read_bool(node, "qcom,has-vsys-adc-channel");
+ penv->is_a2xb_split_reg =
+ of_property_read_bool(node, "qcom,has-a2xb-split-reg");
- penv->is_vsys_adc_channel = of_property_read_bool(pdev->dev.of_node,
- "qcom,has-vsys-adc-channel");
-
- penv->is_a2xb_split_reg = of_property_read_bool(pdev->dev.of_node,
- "qcom,has-a2xb-split-reg");
-
- if (of_property_read_u32(pdev->dev.of_node,
- "qcom,wlan-rx-buff-count", &penv->wlan_rx_buff_count)) {
+ if (of_property_read_u32(node, "qcom,wlan-rx-buff-count",
+ &penv->wlan_rx_buff_count)) {
penv->wlan_rx_buff_count = WCNSS_DEF_WLAN_RX_BUFF_COUNT;
}
- ret = wcnss_dt_parse_vreg_level(&pdev->dev, index,
- "qcom,pronto-vddmx-current",
- "qcom,vddmx-voltage-level",
- penv->wlan_config.pronto_vlevel);
-
- if (ret) {
- dev_err(&pdev->dev, "error reading voltage-level property\n");
- goto fail;
- }
-
- index++;
- ret = wcnss_dt_parse_vreg_level(&pdev->dev, index,
- "qcom,pronto-vddcx-current",
- "qcom,vddcx-voltage-level",
- penv->wlan_config.pronto_vlevel);
-
- if (ret) {
- dev_err(&pdev->dev, "error reading voltage-level property\n");
- goto fail;
- }
-
- index++;
- ret = wcnss_dt_parse_vreg_level(&pdev->dev, index,
- "qcom,pronto-vddpx-current",
- "qcom,vddpx-voltage-level",
- penv->wlan_config.pronto_vlevel);
-
- if (ret) {
- dev_err(&pdev->dev, "error reading voltage-level property\n");
- goto fail;
- }
-
- /* assign 0 to index now onwards, index variable re used to
- * represent iris regulator index
- */
- index = 0;
- ret = wcnss_dt_parse_vreg_level(&pdev->dev, index,
- "qcom,iris-vddxo-current",
- "qcom,iris-vddxo-voltage-level",
- penv->wlan_config.iris_vlevel);
-
- if (ret) {
- dev_err(&pdev->dev, "error reading voltage-level property\n");
- goto fail;
- }
-
- index++;
- ret = wcnss_dt_parse_vreg_level(&pdev->dev, index,
- "qcom,iris-vddrfa-current",
- "qcom,iris-vddrfa-voltage-level",
- penv->wlan_config.iris_vlevel);
-
- if (ret) {
- dev_err(&pdev->dev, "error reading voltage-level property\n");
- goto fail;
- }
-
- index++;
- ret = wcnss_dt_parse_vreg_level(&pdev->dev, index,
- "qcom,iris-vddpa-current",
- "qcom,iris-vddpa-voltage-level",
- penv->wlan_config.iris_vlevel);
-
- if (ret) {
- dev_err(&pdev->dev, "error reading voltage-level property\n");
- goto fail;
- }
-
- index++;
- ret = wcnss_dt_parse_vreg_level(&pdev->dev, index,
- "qcom,iris-vdddig-current",
- "qcom,iris-vdddig-voltage-level",
- penv->wlan_config.iris_vlevel);
-
- if (ret) {
- dev_err(&pdev->dev, "error reading voltage-level property\n");
+ rc = wcnss_parse_voltage_regulator(&penv->wlan_config, &pdev->dev);
+ if (rc) {
+ dev_err(&pdev->dev, "Failed to parse voltage regulators\n");
goto fail;
}
@@ -2825,8 +2729,8 @@ wcnss_trigger_config(struct platform_device *pdev)
pdata = pdev->dev.platform_data;
if (WCNSS_CONFIG_UNSPECIFIED == has_48mhz_xo) {
if (has_pronto_hw) {
- has_48mhz_xo = of_property_read_bool(pdev->dev.of_node,
- "qcom,has-48mhz-xo");
+ has_48mhz_xo =
+ of_property_read_bool(node, "qcom,has-48mhz-xo");
} else {
has_48mhz_xo = pdata->has_48mhz_xo;
}
@@ -2837,8 +2741,8 @@ wcnss_trigger_config(struct platform_device *pdev)
penv->wlan_config.is_pronto_v3 = is_pronto_v3;
if (WCNSS_CONFIG_UNSPECIFIED == has_autodetect_xo && has_pronto_hw) {
- has_autodetect_xo = of_property_read_bool(pdev->dev.of_node,
- "qcom,has-autodetect-xo");
+ has_autodetect_xo =
+ of_property_read_bool(node, "qcom,has-autodetect-xo");
}
penv->thermal_mitigation = 0;
@@ -3118,6 +3022,16 @@ wcnss_trigger_config(struct platform_device *pdev)
__func__);
goto fail_ioremap2;
}
+
+ if (of_property_read_bool(node,
+ "qcom,is-dual-band-disabled")) {
+ ret = wcnss_get_dual_band_capability_info(pdev);
+ if (ret) {
+ pr_err(
+ "%s: failed to get dual band info\n", __func__);
+ goto fail_ioremap2;
+ }
+ }
}
penv->adc_tm_dev = qpnp_get_adc_tm(&penv->pdev->dev, "wcnss");
@@ -3129,11 +3043,26 @@ wcnss_trigger_config(struct platform_device *pdev)
penv->fw_vbatt_state = WCNSS_CONFIG_UNSPECIFIED;
}
+ penv->snoc_wcnss = devm_clk_get(&penv->pdev->dev, "snoc_wcnss");
+ if (IS_ERR(penv->snoc_wcnss)) {
+ pr_err("%s: couldn't get snoc_wcnss\n", __func__);
+ penv->snoc_wcnss = NULL;
+ } else {
+ if (of_property_read_u32(pdev->dev.of_node,
+ "qcom,snoc-wcnss-clock-freq",
+ &penv->snoc_wcnss_clock_freq)) {
+ pr_debug("%s: wcnss snoc clock frequency is not defined\n",
+ __func__);
+ devm_clk_put(&penv->pdev->dev, penv->snoc_wcnss);
+ penv->snoc_wcnss = NULL;
+ }
+ }
+
if (penv->wlan_config.is_pronto_vadc) {
penv->vadc_dev = qpnp_get_vadc(&penv->pdev->dev, "wcnss");
if (IS_ERR(penv->vadc_dev)) {
- pr_err("%s: vadc get failed\n", __func__);
+ pr_debug("%s: vadc get failed\n", __func__);
penv->vadc_dev = NULL;
} else {
rc = wcnss_get_battery_volt(&penv->wlan_config.vbatt);
@@ -3191,6 +3120,38 @@ fail:
return ret;
}
+/* Driver requires to directly vote the snoc clocks
+ * To enable and disable snoc clock, it call
+ * wcnss_snoc_vote function
+ */
+void wcnss_snoc_vote(bool clk_chk_en)
+{
+ int rc;
+
+ if (!penv->snoc_wcnss) {
+ pr_err("%s: couldn't get clk snoc_wcnss\n", __func__);
+ return;
+ }
+
+ if (clk_chk_en) {
+ rc = clk_set_rate(penv->snoc_wcnss,
+ penv->snoc_wcnss_clock_freq);
+ if (rc) {
+ pr_err("%s: snoc_wcnss_clk-clk_set_rate failed =%d\n",
+ __func__, rc);
+ return;
+ }
+
+ if (clk_prepare_enable(penv->snoc_wcnss)) {
+ pr_err("%s: snoc_wcnss clk enable failed\n", __func__);
+ return;
+ }
+ } else {
+ clk_disable_unprepare(penv->snoc_wcnss);
+ }
+}
+EXPORT_SYMBOL(wcnss_snoc_vote);
+
/* wlan prop driver cannot invoke cancel_work_sync
* function directly, so to invoke this function it
* call wcnss_flush_work function
@@ -3377,7 +3338,15 @@ static int wcnss_notif_cb(struct notifier_block *this, unsigned long code,
struct notif_data *data = (struct notif_data *)ss_handle;
int ret, xo_mode;
- pr_info("%s: wcnss notification event: %lu\n", __func__, code);
+ if (!(code >= SUBSYS_NOTIF_MIN_INDEX) &&
+ (code <= SUBSYS_NOTIF_MAX_INDEX)) {
+ pr_debug("%s: Invaild subsystem notification code: %lu\n",
+ __func__, code);
+ return NOTIFY_DONE;
+ }
+
+ pr_debug("%s: wcnss notification event: %lu : %s\n",
+ __func__, code, wcnss_subsys_notif_type[code]);
if (code == SUBSYS_PROXY_VOTE) {
if (pdev && pwlanconfig) {
diff --git a/drivers/nfc/fdp/i2c.c b/drivers/nfc/fdp/i2c.c
index 532db28145c7..a5d7332dfce5 100644
--- a/drivers/nfc/fdp/i2c.c
+++ b/drivers/nfc/fdp/i2c.c
@@ -210,14 +210,14 @@ static irqreturn_t fdp_nci_i2c_irq_thread_fn(int irq, void *phy_id)
struct sk_buff *skb;
int r;
- client = phy->i2c_dev;
- dev_dbg(&client->dev, "%s\n", __func__);
-
if (!phy || irq != phy->i2c_dev->irq) {
WARN_ON_ONCE(1);
return IRQ_NONE;
}
+ client = phy->i2c_dev;
+ dev_dbg(&client->dev, "%s\n", __func__);
+
r = fdp_nci_i2c_read(phy, &skb);
if (r == -EREMOTEIO)
diff --git a/drivers/nfc/nfcmrvl/fw_dnld.c b/drivers/nfc/nfcmrvl/fw_dnld.c
index f8dcdf4b24f6..af62c4c854f3 100644
--- a/drivers/nfc/nfcmrvl/fw_dnld.c
+++ b/drivers/nfc/nfcmrvl/fw_dnld.c
@@ -459,7 +459,7 @@ int nfcmrvl_fw_dnld_init(struct nfcmrvl_private *priv)
INIT_WORK(&priv->fw_dnld.rx_work, fw_dnld_rx_work);
snprintf(name, sizeof(name), "%s_nfcmrvl_fw_dnld_rx_wq",
- dev_name(priv->dev));
+ dev_name(&priv->ndev->nfc_dev->dev));
priv->fw_dnld.rx_wq = create_singlethread_workqueue(name);
if (!priv->fw_dnld.rx_wq)
return -ENOMEM;
@@ -496,6 +496,7 @@ int nfcmrvl_fw_dnld_start(struct nci_dev *ndev, const char *firmware_name)
{
struct nfcmrvl_private *priv = nci_get_drvdata(ndev);
struct nfcmrvl_fw_dnld *fw_dnld = &priv->fw_dnld;
+ int res;
if (!priv->support_fw_dnld)
return -ENOTSUPP;
@@ -511,7 +512,9 @@ int nfcmrvl_fw_dnld_start(struct nci_dev *ndev, const char *firmware_name)
*/
/* Retrieve FW binary */
- if (request_firmware(&fw_dnld->fw, firmware_name, priv->dev) < 0) {
+ res = request_firmware(&fw_dnld->fw, firmware_name,
+ &ndev->nfc_dev->dev);
+ if (res < 0) {
nfc_err(priv->dev, "failed to retrieve FW %s", firmware_name);
return -ENOENT;
}
diff --git a/drivers/nfc/nfcmrvl/main.c b/drivers/nfc/nfcmrvl/main.c
index 51c8240a1672..a446590a71ca 100644
--- a/drivers/nfc/nfcmrvl/main.c
+++ b/drivers/nfc/nfcmrvl/main.c
@@ -124,12 +124,13 @@ struct nfcmrvl_private *nfcmrvl_nci_register_dev(enum nfcmrvl_phy phy,
memcpy(&priv->config, pdata, sizeof(*pdata));
if (priv->config.reset_n_io) {
- rc = devm_gpio_request_one(dev,
- priv->config.reset_n_io,
- GPIOF_OUT_INIT_LOW,
- "nfcmrvl_reset_n");
- if (rc < 0)
+ rc = gpio_request_one(priv->config.reset_n_io,
+ GPIOF_OUT_INIT_LOW,
+ "nfcmrvl_reset_n");
+ if (rc < 0) {
+ priv->config.reset_n_io = 0;
nfc_err(dev, "failed to request reset_n io\n");
+ }
}
if (phy == NFCMRVL_PHY_SPI) {
@@ -154,7 +155,13 @@ struct nfcmrvl_private *nfcmrvl_nci_register_dev(enum nfcmrvl_phy phy,
if (!priv->ndev) {
nfc_err(dev, "nci_allocate_device failed\n");
rc = -ENOMEM;
- goto error;
+ goto error_free_gpio;
+ }
+
+ rc = nfcmrvl_fw_dnld_init(priv);
+ if (rc) {
+ nfc_err(dev, "failed to initialize FW download %d\n", rc);
+ goto error_free_dev;
}
nci_set_drvdata(priv->ndev, priv);
@@ -162,24 +169,22 @@ struct nfcmrvl_private *nfcmrvl_nci_register_dev(enum nfcmrvl_phy phy,
rc = nci_register_device(priv->ndev);
if (rc) {
nfc_err(dev, "nci_register_device failed %d\n", rc);
- goto error_free_dev;
+ goto error_fw_dnld_deinit;
}
/* Ensure that controller is powered off */
nfcmrvl_chip_halt(priv);
- rc = nfcmrvl_fw_dnld_init(priv);
- if (rc) {
- nfc_err(dev, "failed to initialize FW download %d\n", rc);
- goto error_free_dev;
- }
-
nfc_info(dev, "registered with nci successfully\n");
return priv;
+error_fw_dnld_deinit:
+ nfcmrvl_fw_dnld_deinit(priv);
error_free_dev:
nci_free_device(priv->ndev);
-error:
+error_free_gpio:
+ if (priv->config.reset_n_io)
+ gpio_free(priv->config.reset_n_io);
kfree(priv);
return ERR_PTR(rc);
}
@@ -195,7 +200,7 @@ void nfcmrvl_nci_unregister_dev(struct nfcmrvl_private *priv)
nfcmrvl_fw_dnld_deinit(priv);
if (priv->config.reset_n_io)
- devm_gpio_free(priv->dev, priv->config.reset_n_io);
+ gpio_free(priv->config.reset_n_io);
nci_unregister_device(ndev);
nci_free_device(ndev);
diff --git a/drivers/nfc/nfcmrvl/uart.c b/drivers/nfc/nfcmrvl/uart.c
index 83a99e38e7bd..6c0c301611c4 100644
--- a/drivers/nfc/nfcmrvl/uart.c
+++ b/drivers/nfc/nfcmrvl/uart.c
@@ -109,6 +109,7 @@ static int nfcmrvl_nci_uart_open(struct nci_uart *nu)
struct nfcmrvl_private *priv;
struct nfcmrvl_platform_data *pdata = NULL;
struct nfcmrvl_platform_data config;
+ struct device *dev = nu->tty->dev;
/*
* Platform data cannot be used here since usually it is already used
@@ -116,9 +117,8 @@ static int nfcmrvl_nci_uart_open(struct nci_uart *nu)
* and check if DT entries were added.
*/
- if (nu->tty->dev->parent && nu->tty->dev->parent->of_node)
- if (nfcmrvl_uart_parse_dt(nu->tty->dev->parent->of_node,
- &config) == 0)
+ if (dev && dev->parent && dev->parent->of_node)
+ if (nfcmrvl_uart_parse_dt(dev->parent->of_node, &config) == 0)
pdata = &config;
if (!pdata) {
@@ -131,7 +131,7 @@ static int nfcmrvl_nci_uart_open(struct nci_uart *nu)
}
priv = nfcmrvl_nci_register_dev(NFCMRVL_PHY_UART, nu, &uart_ops,
- nu->tty->dev, pdata);
+ dev, pdata);
if (IS_ERR(priv))
return PTR_ERR(priv);
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
index efb2c1ceef98..957234272ef7 100644
--- a/drivers/nvdimm/btt.c
+++ b/drivers/nvdimm/btt.c
@@ -1205,10 +1205,13 @@ static int btt_rw_page(struct block_device *bdev, sector_t sector,
struct page *page, int rw)
{
struct btt *btt = bdev->bd_disk->private_data;
+ int rc;
- btt_do_bvec(btt, NULL, page, PAGE_CACHE_SIZE, 0, rw, sector);
- page_endio(page, rw & WRITE, 0);
- return 0;
+ rc = btt_do_bvec(btt, NULL, page, PAGE_CACHE_SIZE, 0, rw, sector);
+ if (rc == 0)
+ page_endio(page, rw & WRITE, 0);
+
+ return rc;
}
diff --git a/drivers/nvmem/imx-ocotp.c b/drivers/nvmem/imx-ocotp.c
index b7971d410b60..74e5360c53f0 100644
--- a/drivers/nvmem/imx-ocotp.c
+++ b/drivers/nvmem/imx-ocotp.c
@@ -88,7 +88,7 @@ static struct nvmem_config imx_ocotp_nvmem_config = {
static const struct of_device_id imx_ocotp_dt_ids[] = {
{ .compatible = "fsl,imx6q-ocotp", (void *)128 },
- { .compatible = "fsl,imx6sl-ocotp", (void *)32 },
+ { .compatible = "fsl,imx6sl-ocotp", (void *)64 },
{ .compatible = "fsl,imx6sx-ocotp", (void *)128 },
{ },
};
diff --git a/drivers/of/device.c b/drivers/of/device.c
index e5f47cec75f3..97a280d50d6d 100644
--- a/drivers/of/device.c
+++ b/drivers/of/device.c
@@ -225,6 +225,7 @@ ssize_t of_device_get_modalias(struct device *dev, char *str, ssize_t len)
return tsize;
}
+EXPORT_SYMBOL_GPL(of_device_get_modalias);
/**
* of_device_uevent - Display OF related uevent information
@@ -287,3 +288,4 @@ int of_device_uevent_modalias(struct device *dev, struct kobj_uevent_env *env)
return 0;
}
+EXPORT_SYMBOL_GPL(of_device_uevent_modalias);
diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c
index 8e11fb2831cd..34f1d6b41fb9 100644
--- a/drivers/parisc/ccio-dma.c
+++ b/drivers/parisc/ccio-dma.c
@@ -741,6 +741,8 @@ ccio_map_single(struct device *dev, void *addr, size_t size,
BUG_ON(!dev);
ioc = GET_IOC(dev);
+ if (!ioc)
+ return DMA_ERROR_CODE;
BUG_ON(size <= 0);
@@ -805,6 +807,10 @@ ccio_unmap_single(struct device *dev, dma_addr_t iova, size_t size,
BUG_ON(!dev);
ioc = GET_IOC(dev);
+ if (!ioc) {
+ WARN_ON(!ioc);
+ return;
+ }
DBG_RUN("%s() iovp 0x%lx/%x\n",
__func__, (long)iova, size);
@@ -908,6 +914,8 @@ ccio_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
BUG_ON(!dev);
ioc = GET_IOC(dev);
+ if (!ioc)
+ return 0;
DBG_RUN_SG("%s() START %d entries\n", __func__, nents);
@@ -980,6 +988,10 @@ ccio_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
BUG_ON(!dev);
ioc = GET_IOC(dev);
+ if (!ioc) {
+ WARN_ON(!ioc);
+ return;
+ }
DBG_RUN_SG("%s() START %d entries, %p,%x\n",
__func__, nents, sg_virt(sglist), sglist->length);
diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c
index a0580afe1713..7b0ca1551d7b 100644
--- a/drivers/parisc/dino.c
+++ b/drivers/parisc/dino.c
@@ -154,7 +154,10 @@ struct dino_device
};
/* Looks nice and keeps the compiler happy */
-#define DINO_DEV(d) ((struct dino_device *) d)
+#define DINO_DEV(d) ({ \
+ void *__pdata = d; \
+ BUG_ON(!__pdata); \
+ (struct dino_device *)__pdata; })
/*
diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c
index 42844c2bc065..d0c2759076a2 100644
--- a/drivers/parisc/lba_pci.c
+++ b/drivers/parisc/lba_pci.c
@@ -111,8 +111,10 @@ static u32 lba_t32;
/* Looks nice and keeps the compiler happy */
-#define LBA_DEV(d) ((struct lba_device *) (d))
-
+#define LBA_DEV(d) ({ \
+ void *__pdata = d; \
+ BUG_ON(!__pdata); \
+ (struct lba_device *)__pdata; })
/*
** Only allow 8 subsidiary busses per LBA
diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c
index 225049b492e5..d6326144ce01 100644
--- a/drivers/parisc/sba_iommu.c
+++ b/drivers/parisc/sba_iommu.c
@@ -691,6 +691,8 @@ static int sba_dma_supported( struct device *dev, u64 mask)
return 0;
ioc = GET_IOC(dev);
+ if (!ioc)
+ return 0;
/*
* check if mask is >= than the current max IO Virt Address
@@ -722,6 +724,8 @@ sba_map_single(struct device *dev, void *addr, size_t size,
int pide;
ioc = GET_IOC(dev);
+ if (!ioc)
+ return DMA_ERROR_CODE;
/* save offset bits */
offset = ((dma_addr_t) (long) addr) & ~IOVP_MASK;
@@ -803,6 +807,10 @@ sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size,
DBG_RUN("%s() iovp 0x%lx/%x\n", __func__, (long) iova, size);
ioc = GET_IOC(dev);
+ if (!ioc) {
+ WARN_ON(!ioc);
+ return;
+ }
offset = iova & ~IOVP_MASK;
iova ^= offset; /* clear offset bits */
size += offset;
@@ -942,6 +950,8 @@ sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
DBG_RUN_SG("%s() START %d entries\n", __func__, nents);
ioc = GET_IOC(dev);
+ if (!ioc)
+ return 0;
/* Fast path single entry scatterlists. */
if (nents == 1) {
@@ -1027,6 +1037,10 @@ sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
__func__, nents, sg_virt(sglist), sglist->length);
ioc = GET_IOC(dev);
+ if (!ioc) {
+ WARN_ON(!ioc);
+ return;
+ }
#ifdef SBA_COLLECT_STATS
ioc->usg_calls++;
diff --git a/drivers/pci/host/pci-msm.c b/drivers/pci/host/pci-msm.c
index 217c7ce3f57b..a741c9c7d115 100644
--- a/drivers/pci/host/pci-msm.c
+++ b/drivers/pci/host/pci-msm.c
@@ -278,6 +278,7 @@
#define PERST_PROPAGATION_DELAY_US_MIN 1000
#define PERST_PROPAGATION_DELAY_US_MAX 1005
+#define SWITCH_DELAY_MAX 20
#define REFCLK_STABILIZATION_DELAY_US_MIN 1000
#define REFCLK_STABILIZATION_DELAY_US_MAX 1005
#define LINK_UP_TIMEOUT_US_MIN 5000
@@ -595,7 +596,6 @@ struct msm_pcie_dev_t {
bool cfg_access;
spinlock_t cfg_lock;
unsigned long irqsave_flags;
- struct mutex enumerate_lock;
struct mutex setup_lock;
struct irq_domain *irq_domain;
@@ -626,6 +626,7 @@ struct msm_pcie_dev_t {
bool ext_ref_clk;
bool common_phy;
uint32_t ep_latency;
+ uint32_t switch_latency;
uint32_t wr_halt_size;
uint32_t cpl_timeout;
uint32_t current_bdf;
@@ -702,6 +703,9 @@ static u32 num_rc_on;
/* global lock for PCIe common PHY */
static struct mutex com_phy_lock;
+/* global lock for PCIe enumeration */
+static struct mutex enumerate_lock;
+
/* Table to track info of PCIe devices */
static struct msm_pcie_device_info
msm_pcie_dev_tbl[MAX_RC_NUM * MAX_DEVICE_NUM];
@@ -1735,7 +1739,8 @@ static bool pcie_phy_is_ready(struct msm_pcie_dev_t *dev)
static int msm_pcie_restore_sec_config(struct msm_pcie_dev_t *dev)
{
- int ret, scm_ret;
+ int ret;
+ u64 scm_ret;
if (!dev) {
pr_err("PCIe: the input pcie dev is NULL.\n");
@@ -1745,7 +1750,7 @@ static int msm_pcie_restore_sec_config(struct msm_pcie_dev_t *dev)
ret = scm_restore_sec_cfg(dev->scm_dev_id, 0, &scm_ret);
if (ret || scm_ret) {
PCIE_ERR(dev,
- "PCIe: RC%d failed(%d) to restore sec config, scm_ret=%d\n",
+ "PCIe: RC%d failed(%d) to restore sec config, scm_ret=%llu\n",
dev->rc_idx, ret, scm_ret);
return ret ? ret : -EINVAL;
}
@@ -1984,6 +1989,8 @@ static void msm_pcie_show_status(struct msm_pcie_dev_t *dev)
dev->common_phy);
PCIE_DBG_FS(dev, "ep_latency: %dms\n",
dev->ep_latency);
+ PCIE_DBG_FS(dev, "switch_latency: %dms\n",
+ dev->switch_latency);
PCIE_DBG_FS(dev, "wr_halt_size: 0x%x\n",
dev->wr_halt_size);
PCIE_DBG_FS(dev, "cpl_timeout: 0x%x\n",
@@ -4675,7 +4682,15 @@ int msm_pcie_enable(struct msm_pcie_dev_t *dev, u32 options)
goto link_fail;
}
- msleep(500);
+ if (dev->switch_latency) {
+ PCIE_DBG(dev, "switch_latency: %dms\n",
+ dev->switch_latency);
+ if (dev->switch_latency <= SWITCH_DELAY_MAX)
+ usleep_range(dev->switch_latency * 1000,
+ dev->switch_latency * 1000);
+ else
+ msleep(dev->switch_latency);
+ }
msm_pcie_config_controller(dev);
@@ -5037,7 +5052,7 @@ int msm_pcie_enumerate(u32 rc_idx)
int ret = 0, bus_ret = 0, scan_ret = 0;
struct msm_pcie_dev_t *dev = &msm_pcie_dev[rc_idx];
- mutex_lock(&dev->enumerate_lock);
+ mutex_lock(&enumerate_lock);
PCIE_DBG(dev, "Enumerate RC%d\n", rc_idx);
@@ -5156,7 +5171,7 @@ int msm_pcie_enumerate(u32 rc_idx)
}
out:
- mutex_unlock(&dev->enumerate_lock);
+ mutex_unlock(&enumerate_lock);
return ret;
}
@@ -6279,6 +6294,20 @@ static int msm_pcie_probe(struct platform_device *pdev)
PCIE_DBG(&msm_pcie_dev[rc_idx], "RC%d: ep-latency: 0x%x.\n",
rc_idx, msm_pcie_dev[rc_idx].ep_latency);
+ msm_pcie_dev[rc_idx].switch_latency = 0;
+ ret = of_property_read_u32((&pdev->dev)->of_node,
+ "qcom,switch-latency",
+ &msm_pcie_dev[rc_idx].switch_latency);
+
+ if (ret)
+ PCIE_DBG(&msm_pcie_dev[rc_idx],
+ "RC%d: switch-latency does not exist.\n",
+ rc_idx);
+ else
+ PCIE_DBG(&msm_pcie_dev[rc_idx],
+ "RC%d: switch-latency: 0x%x.\n",
+ rc_idx, msm_pcie_dev[rc_idx].switch_latency);
+
msm_pcie_dev[rc_idx].wr_halt_size = 0;
ret = of_property_read_u32(pdev->dev.of_node,
"qcom,wr-halt-size",
@@ -6604,6 +6633,7 @@ int __init pcie_init(void)
pcie_drv.rc_num = 0;
mutex_init(&pcie_drv.drv_lock);
mutex_init(&com_phy_lock);
+ mutex_init(&enumerate_lock);
for (i = 0; i < MAX_RC_NUM; i++) {
snprintf(rc_name, MAX_RC_NAME_LEN, "pcie%d-short", i);
@@ -6638,7 +6668,6 @@ int __init pcie_init(void)
rc_name, i);
spin_lock_init(&msm_pcie_dev[i].cfg_lock);
msm_pcie_dev[i].cfg_access = true;
- mutex_init(&msm_pcie_dev[i].enumerate_lock);
mutex_init(&msm_pcie_dev[i].setup_lock);
mutex_init(&msm_pcie_dev[i].recovery_lock);
spin_lock_init(&msm_pcie_dev[i].linkdown_lock);
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index d7ffd66814bb..fca925543fae 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -945,6 +945,7 @@ static int pci_pm_thaw_noirq(struct device *dev)
return pci_legacy_resume_early(dev);
pci_update_current_state(pci_dev, PCI_D0);
+ pci_restore_state(pci_dev);
if (drv && drv->pm && drv->pm->thaw_noirq)
error = drv->pm->thaw_noirq(dev);
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index 63ec68e6ac2a..39400dda27c2 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -552,6 +552,7 @@ static void armpmu_init(struct arm_pmu *armpmu)
.stop = armpmu_stop,
.read = armpmu_read,
.filter_match = armpmu_filter_match,
+ .events_across_hotplug = 1,
};
}
diff --git a/drivers/pinctrl/freescale/pinctrl-mxs.c b/drivers/pinctrl/freescale/pinctrl-mxs.c
index 6bbda6b4ab50..5da9c95dccb7 100644
--- a/drivers/pinctrl/freescale/pinctrl-mxs.c
+++ b/drivers/pinctrl/freescale/pinctrl-mxs.c
@@ -195,6 +195,16 @@ static int mxs_pinctrl_get_func_groups(struct pinctrl_dev *pctldev,
return 0;
}
+static void mxs_pinctrl_rmwl(u32 value, u32 mask, u8 shift, void __iomem *reg)
+{
+ u32 tmp;
+
+ tmp = readl(reg);
+ tmp &= ~(mask << shift);
+ tmp |= value << shift;
+ writel(tmp, reg);
+}
+
static int mxs_pinctrl_set_mux(struct pinctrl_dev *pctldev, unsigned selector,
unsigned group)
{
@@ -212,8 +222,7 @@ static int mxs_pinctrl_set_mux(struct pinctrl_dev *pctldev, unsigned selector,
reg += bank * 0x20 + pin / 16 * 0x10;
shift = pin % 16 * 2;
- writel(0x3 << shift, reg + CLR);
- writel(g->muxsel[i] << shift, reg + SET);
+ mxs_pinctrl_rmwl(g->muxsel[i], 0x3, shift, reg);
}
return 0;
@@ -280,8 +289,7 @@ static int mxs_pinconf_group_set(struct pinctrl_dev *pctldev,
/* mA */
if (config & MA_PRESENT) {
shift = pin % 8 * 4;
- writel(0x3 << shift, reg + CLR);
- writel(ma << shift, reg + SET);
+ mxs_pinctrl_rmwl(ma, 0x3, shift, reg);
}
/* vol */
diff --git a/drivers/pinctrl/meson/pinctrl-meson8b.c b/drivers/pinctrl/meson/pinctrl-meson8b.c
index 9677807db364..b505b87661f8 100644
--- a/drivers/pinctrl/meson/pinctrl-meson8b.c
+++ b/drivers/pinctrl/meson/pinctrl-meson8b.c
@@ -732,8 +732,8 @@ static const char * const sdxc_c_groups[] = {
static const char * const nand_groups[] = {
"nand_io", "nand_io_ce0", "nand_io_ce1",
"nand_io_rb0", "nand_ale", "nand_cle",
- "nand_wen_clk", "nand_ren_clk", "nand_dqs0",
- "nand_dqs1"
+ "nand_wen_clk", "nand_ren_clk", "nand_dqs_0",
+ "nand_dqs_1"
};
static const char * const nor_groups[] = {
diff --git a/drivers/pinctrl/sh-pfc/core.c b/drivers/pinctrl/sh-pfc/core.c
index 2b0d70217bbd..699efb1a8c45 100644
--- a/drivers/pinctrl/sh-pfc/core.c
+++ b/drivers/pinctrl/sh-pfc/core.c
@@ -543,6 +543,9 @@ static int sh_pfc_probe(struct platform_device *pdev)
ret = info->ops->init(pfc);
if (ret < 0)
return ret;
+
+ /* .init() may have overridden pfc->info */
+ info = pfc->info;
}
/* Enable dummy states for those platforms without pinctrl support */
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7791.c b/drivers/pinctrl/sh-pfc/pfc-r8a7791.c
index 87a4f44147c1..42ffa8708abc 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7791.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7791.c
@@ -1102,7 +1102,7 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP6_5_3, FMIN_E, SEL_FM_4),
PINMUX_IPSR_DATA(IP6_7_6, AUDIO_CLKOUT),
PINMUX_IPSR_MSEL(IP6_7_6, MSIOF1_SS1_B, SEL_SOF1_1),
- PINMUX_IPSR_MSEL(IP6_5_3, TX2, SEL_SCIF2_0),
+ PINMUX_IPSR_MSEL(IP6_7_6, TX2, SEL_SCIF2_0),
PINMUX_IPSR_MSEL(IP6_7_6, SCIFA2_TXD, SEL_SCIFA2_0),
PINMUX_IPSR_DATA(IP6_9_8, IRQ0),
PINMUX_IPSR_MSEL(IP6_9_8, SCIFB1_RXD_D, SEL_SCIFB1_3),
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c
index 90b973e15982..a7c81e988656 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c
@@ -394,7 +394,7 @@ static const struct sunxi_desc_pin sun8i_a83t_pins[] = {
SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 18),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x3, "owa")), /* DOUT */
+ SUNXI_FUNCTION(0x3, "spdif")), /* DOUT */
SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 19),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out")),
diff --git a/drivers/platform/msm/gsi/gsi.c b/drivers/platform/msm/gsi/gsi.c
index b111a5904952..2ae2438032b7 100644
--- a/drivers/platform/msm/gsi/gsi.c
+++ b/drivers/platform/msm/gsi/gsi.c
@@ -1901,6 +1901,20 @@ int gsi_stop_channel(unsigned long chan_hdl)
res = wait_for_completion_timeout(&ctx->compl,
msecs_to_jiffies(GSI_STOP_CMD_TIMEOUT_MS));
if (res == 0) {
+ /*
+ * check channel state here in case the channel is stopped but
+ * the interrupt was not handled yet.
+ */
+ val = gsi_readl(gsi_ctx->base +
+ GSI_EE_n_GSI_CH_k_CNTXT_0_OFFS(chan_hdl,
+ gsi_ctx->per.ee));
+ ctx->state = (val &
+ GSI_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_BMSK) >>
+ GSI_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_SHFT;
+ if (ctx->state == GSI_CHAN_STATE_STOPPED) {
+ res = GSI_STATUS_SUCCESS;
+ goto free_lock;
+ }
GSIDBG("chan_hdl=%lu timed out\n", chan_hdl);
res = -GSI_STATUS_TIMED_OUT;
goto free_lock;
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c b/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c
index 51806cec1e4d..49aa7f25347d 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c
@@ -649,8 +649,7 @@ static int __ipa_add_hdr_proc_ctx(struct ipa_hdr_proc_ctx_add *proc_ctx,
return 0;
ipa_insert_failed:
- if (offset)
- list_move(&offset->link,
+ list_move(&offset->link,
&htbl->head_free_offset_list[offset->bin]);
entry->offset_entry = NULL;
list_del(&entry->link);
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service_v01.c b/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service_v01.c
index dd591407d10f..5228b2db1410 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service_v01.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service_v01.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -1436,6 +1436,66 @@ struct elem_info ipa_fltr_installed_notif_req_msg_data_v01_ei[] = {
start_ipv6_filter_idx),
},
{
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x17,
+ .offset = offsetof(
+ struct ipa_fltr_installed_notif_req_msg_v01,
+ rule_id_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x17,
+ .offset = offsetof(
+ struct ipa_fltr_installed_notif_req_msg_v01,
+ rule_id_len),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = QMI_IPA_MAX_FILTERS_V01,
+ .elem_size = sizeof(uint32_t),
+ .is_array = VAR_LEN_ARRAY,
+ .tlv_type = 0x17,
+ .offset = offsetof(
+ struct ipa_fltr_installed_notif_req_msg_v01,
+ rule_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x18,
+ .offset = offsetof(
+ struct ipa_fltr_installed_notif_req_msg_v01,
+ dst_pipe_id_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x18,
+ .offset = offsetof(
+ struct ipa_fltr_installed_notif_req_msg_v01,
+ dst_pipe_id_len),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = QMI_IPA_MAX_CLIENT_DST_PIPES_V01,
+ .elem_size = sizeof(uint32_t),
+ .is_array = VAR_LEN_ARRAY,
+ .tlv_type = 0x18,
+ .offset = offsetof(
+ struct ipa_fltr_installed_notif_req_msg_v01,
+ dst_pipe_id),
+ },
+ {
.data_type = QMI_EOTI,
.is_array = NO_ARRAY,
.tlv_type = QMI_COMMON_TLV_TYPE,
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c b/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c
index 011ca300cc09..0531919487d7 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c
@@ -1399,7 +1399,7 @@ int ipa2_put_rt_tbl(u32 rt_tbl_hdl)
{
struct ipa_rt_tbl *entry;
enum ipa_ip_type ip = IPA_IP_MAX;
- int result;
+ int result = 0;
mutex_lock(&ipa_ctx->lock);
entry = ipa_id_find(rt_tbl_hdl);
diff --git a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
index 834712a71ac6..5dbd43b44540 100644
--- a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
@@ -1878,7 +1878,9 @@ void q6_deinitialize_rm(void)
if (ret < 0)
IPAWANERR("Error deleting resource %d, ret=%d\n",
IPA_RM_RESOURCE_Q6_PROD, ret);
- destroy_workqueue(ipa_rm_q6_workqueue);
+
+ if (ipa_rm_q6_workqueue)
+ destroy_workqueue(ipa_rm_q6_workqueue);
}
static void wake_tx_queue(struct work_struct *work)
@@ -2187,7 +2189,10 @@ timer_init_err:
IPAWANERR("Error deleting resource %d, ret=%d\n",
IPA_RM_RESOURCE_WWAN_0_PROD, ret);
create_rsrc_err:
- q6_deinitialize_rm();
+
+ if (!atomic_read(&is_ssr))
+ q6_deinitialize_rm();
+
q6_init_err:
free_netdev(ipa_netdevs[0]);
ipa_netdevs[0] = NULL;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index 62e263a139cc..52f99c830b47 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -2248,7 +2248,6 @@ static int ipa3_q6_set_ex_path_to_apps(void)
struct ipahal_imm_cmd_register_write reg_write;
struct ipahal_imm_cmd_pyld *cmd_pyld;
int retval;
- struct ipahal_reg_valmask valmask;
desc = kcalloc(ipa3_ctx->ipa_num_pipes, sizeof(struct ipa3_desc),
GFP_KERNEL);
@@ -2263,40 +2262,10 @@ static int ipa3_q6_set_ex_path_to_apps(void)
if (ep_idx == -1)
continue;
- if (ipa3_ctx->ep[ep_idx].valid &&
- ipa3_ctx->ep[ep_idx].skip_ep_cfg) {
- BUG_ON(num_descs >= ipa3_ctx->ipa_num_pipes);
-
- reg_write.skip_pipeline_clear = false;
- reg_write.pipeline_clear_options =
- IPAHAL_HPS_CLEAR;
- reg_write.offset =
- ipahal_get_reg_n_ofst(IPA_ENDP_STATUS_n,
- ep_idx);
- ipahal_get_status_ep_valmask(
- ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS),
- &valmask);
- reg_write.value = valmask.val;
- reg_write.value_mask = valmask.mask;
- cmd_pyld = ipahal_construct_imm_cmd(
- IPA_IMM_CMD_REGISTER_WRITE, &reg_write, false);
- if (!cmd_pyld) {
- IPAERR("fail construct register_write cmd\n");
- BUG();
- }
-
- desc[num_descs].opcode = ipahal_imm_cmd_get_opcode(
- IPA_IMM_CMD_REGISTER_WRITE);
- desc[num_descs].type = IPA_IMM_CMD_DESC;
- desc[num_descs].callback = ipa3_destroy_imm;
- desc[num_descs].user1 = cmd_pyld;
- desc[num_descs].pyld = cmd_pyld->data;
- desc[num_descs].len = cmd_pyld->len;
- num_descs++;
- }
-
- /* disable statuses for modem producers */
- if (IPA_CLIENT_IS_Q6_PROD(client_idx)) {
+ /* disable statuses for all modem controlled prod pipes */
+ if (IPA_CLIENT_IS_Q6_PROD(client_idx) ||
+ (ipa3_ctx->ep[ep_idx].valid &&
+ ipa3_ctx->ep[ep_idx].skip_ep_cfg)) {
ipa_assert_on(num_descs >= ipa3_ctx->ipa_num_pipes);
reg_write.skip_pipeline_clear = false;
@@ -5473,6 +5442,10 @@ static int ipa3_smp2p_probe(struct device *dev)
struct device_node *node = dev->of_node;
int res;
+ if (ipa3_ctx == NULL) {
+ IPAERR("ipa3_ctx was not initialized\n");
+ return -ENXIO;
+ }
IPADBG("node->name=%s\n", node->name);
if (strcmp("qcom,smp2pgpio_map_ipa_1_out", node->name) == 0) {
res = of_get_gpio(node, 0);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c b/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
index 7c3b5838242e..ce35ba02154d 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
@@ -426,8 +426,7 @@ static int __ipa_add_hdr_proc_ctx(struct ipa_hdr_proc_ctx_add *proc_ctx,
return 0;
ipa_insert_failed:
- if (offset)
- list_move(&offset->link,
+ list_move(&offset->link,
&htbl->head_free_offset_list[offset->bin]);
entry->offset_entry = NULL;
list_del(&entry->link);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
index 690a9db67ff0..571852c076ea 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
@@ -807,6 +807,11 @@ int ipa3_qmi_filter_notify_send(
return -EINVAL;
}
+ if (req->source_pipe_index == -1) {
+ IPAWANERR("Source pipe index invalid\n");
+ return -EINVAL;
+ }
+
mutex_lock(&ipa3_qmi_lock);
if (ipa3_qmi_ctx != NULL) {
/* cache the qmi_filter_request */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h
index d5d850309696..e6f1e2ce0b75 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h
@@ -121,6 +121,31 @@ extern struct elem_info ipa3_init_modem_driver_cmplt_resp_msg_data_v01_ei[];
extern struct elem_info ipa3_install_fltr_rule_req_ex_msg_data_v01_ei[];
extern struct elem_info ipa3_install_fltr_rule_resp_ex_msg_data_v01_ei[];
+ extern struct elem_info
+ ipa3_install_fltr_rule_req_ex_msg_data_v01_ei[];
+ extern struct elem_info
+ ipa3_install_fltr_rule_resp_ex_msg_data_v01_ei[];
+ extern struct elem_info
+ ipa3_ul_firewall_rule_type_data_v01_ei[];
+ extern struct elem_info
+ ipa3_ul_firewall_config_result_type_data_v01_ei[];
+ extern struct elem_info
+ ipa3_per_client_stats_info_type_data_v01_ei[];
+ extern struct elem_info
+ ipa3_enable_per_client_stats_req_msg_data_v01_ei[];
+ extern struct elem_info
+ ipa3_enable_per_client_stats_resp_msg_data_v01_ei[];
+ extern struct elem_info
+ ipa3_get_stats_per_client_req_msg_data_v01_ei[];
+ extern struct elem_info
+ ipa3_get_stats_per_client_resp_msg_data_v01_ei[];
+ extern struct elem_info
+ ipa3_configure_ul_firewall_rules_req_msg_data_v01_ei[];
+ extern struct elem_info
+ ipa3_configure_ul_firewall_rules_resp_msg_data_v01_ei[];
+ extern struct elem_info
+ ipa3_configure_ul_firewall_rules_ind_msg_data_v01_ei[];
+
/**
* struct ipa3_rmnet_context - IPA rmnet context
* @ipa_rmnet_ssr: support modem SSR
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service_v01.c b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service_v01.c
index 6a5cb4891c02..746863732dc5 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service_v01.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service_v01.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -16,6 +16,8 @@
#include <soc/qcom/msm_qmi_interface.h>
+#include "ipa_qmi_service.h"
+
/* Type Definitions */
static struct elem_info ipa3_hdr_tbl_info_type_data_v01_ei[] = {
{
@@ -1756,6 +1758,36 @@ struct elem_info ipa3_fltr_installed_notif_req_msg_data_v01_ei[] = {
rule_id),
},
{
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x18,
+ .offset = offsetof(
+ struct ipa_fltr_installed_notif_req_msg_v01,
+ dst_pipe_id_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x18,
+ .offset = offsetof(
+ struct ipa_fltr_installed_notif_req_msg_v01,
+ dst_pipe_id_len),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = QMI_IPA_MAX_CLIENT_DST_PIPES_V01,
+ .elem_size = sizeof(uint32_t),
+ .is_array = VAR_LEN_ARRAY,
+ .tlv_type = 0x18,
+ .offset = offsetof(
+ struct ipa_fltr_installed_notif_req_msg_v01,
+ dst_pipe_id),
+ },
+ {
.data_type = QMI_EOTI,
.is_array = NO_ARRAY,
.tlv_type = QMI_COMMON_TLV_TYPE,
@@ -2923,3 +2955,435 @@ struct elem_info ipa3_install_fltr_rule_resp_ex_msg_data_v01_ei[] = {
.tlv_type = QMI_COMMON_TLV_TYPE,
},
};
+
+struct elem_info ipa3_per_client_stats_info_type_data_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(
+ struct ipa_per_client_stats_info_type_v01,
+ client_id),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(
+ struct ipa_per_client_stats_info_type_v01,
+ src_pipe_id),
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint64_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(
+ struct ipa_per_client_stats_info_type_v01,
+ num_ul_ipv4_bytes),
+
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint64_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(
+ struct ipa_per_client_stats_info_type_v01,
+ num_ul_ipv6_bytes),
+
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint64_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(
+ struct ipa_per_client_stats_info_type_v01,
+ num_dl_ipv4_bytes),
+
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint64_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(
+ struct ipa_per_client_stats_info_type_v01,
+ num_dl_ipv6_bytes),
+
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(
+ struct ipa_per_client_stats_info_type_v01,
+ num_ul_ipv4_pkts),
+
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(
+ struct ipa_per_client_stats_info_type_v01,
+ num_ul_ipv6_pkts),
+
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(
+ struct ipa_per_client_stats_info_type_v01,
+ num_dl_ipv4_pkts),
+
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(
+ struct ipa_per_client_stats_info_type_v01,
+ num_dl_ipv6_pkts),
+
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info ipa3_ul_firewall_rule_type_data_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(
+ struct ipa_ul_firewall_rule_type_v01,
+ ip_type),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct ipa_filter_rule_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_ul_firewall_rule_type_v01,
+ filter_rule),
+ .ei_array = ipa3_filter_rule_type_data_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info ipa3_ul_firewall_config_result_type_data_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(
+ struct ipa_ul_firewall_config_result_type_v01,
+ is_success),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(
+ struct ipa_ul_firewall_config_result_type_v01,
+ mux_id),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info ipa3_enable_per_client_stats_req_msg_data_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct
+ ipa_enable_per_client_stats_req_msg_v01,
+ enable_per_client_stats),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info ipa3_enable_per_client_stats_resp_msg_data_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(
+ struct ipa_enable_per_client_stats_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info ipa3_get_stats_per_client_req_msg_data_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(
+ struct ipa_get_stats_per_client_req_msg_v01,
+ client_id),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(
+ struct ipa_get_stats_per_client_req_msg_v01,
+ src_pipe_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct ipa_get_stats_per_client_req_msg_v01,
+ reset_stats_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct ipa_get_stats_per_client_req_msg_v01,
+ reset_stats),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info ipa3_get_stats_per_client_resp_msg_data_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(
+ struct ipa_get_stats_per_client_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct ipa_get_stats_per_client_resp_msg_v01,
+ per_client_stats_list_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct ipa_get_stats_per_client_resp_msg_v01,
+ per_client_stats_list_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_IPA_MAX_PER_CLIENTS_V01,
+ .elem_size =
+ sizeof(struct ipa_per_client_stats_info_type_v01),
+ .is_array = VAR_LEN_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct ipa_get_stats_per_client_resp_msg_v01,
+ per_client_stats_list),
+ .ei_array =
+ ipa3_per_client_stats_info_type_data_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info
+ ipa3_configure_ul_firewall_rules_req_msg_data_v01_ei[] = {
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x1,
+ .offset = offsetof(
+ struct ipa_configure_ul_firewall_rules_req_msg_v01,
+ firewall_rules_list_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_IPA_MAX_UL_FIREWALL_RULES_V01,
+ .elem_size = sizeof(struct ipa_ul_firewall_rule_type_v01),
+ .is_array = VAR_LEN_ARRAY,
+ .tlv_type = 0x1,
+ .offset = offsetof(
+ struct ipa_configure_ul_firewall_rules_req_msg_v01,
+ firewall_rules_list),
+ .ei_array =
+ ipa3_ul_firewall_rule_type_data_v01_ei,
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x2,
+ .offset = offsetof(
+ struct ipa_configure_ul_firewall_rules_req_msg_v01,
+ mux_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct ipa_configure_ul_firewall_rules_req_msg_v01,
+ disable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct ipa_configure_ul_firewall_rules_req_msg_v01,
+ disable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(
+ struct ipa_configure_ul_firewall_rules_req_msg_v01,
+ are_blacklist_filters_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(
+ struct ipa_configure_ul_firewall_rules_req_msg_v01,
+ are_blacklist_filters),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info
+ ipa3_configure_ul_firewall_rules_resp_msg_data_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(
+ struct ipa_configure_ul_firewall_rules_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info
+ ipa3_configure_ul_firewall_rules_ind_msg_data_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(
+ struct ipa_ul_firewall_config_result_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(
+ struct ipa_configure_ul_firewall_rules_ind_msg_v01,
+ result),
+ .ei_array =
+ ipa3_ul_firewall_config_result_type_data_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
index bc7cc7060545..8e790c89ed13 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
@@ -1479,7 +1479,7 @@ int ipa3_put_rt_tbl(u32 rt_tbl_hdl)
{
struct ipa3_rt_tbl *entry;
enum ipa_ip_type ip = IPA_IP_MAX;
- int result;
+ int result = 0;
mutex_lock(&ipa3_ctx->lock);
entry = ipa3_id_find(rt_tbl_hdl);
@@ -1501,6 +1501,7 @@ int ipa3_put_rt_tbl(u32 rt_tbl_hdl)
ip = IPA_IP_v6;
else {
WARN_ON(1);
+ result = -EINVAL;
goto ret;
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
index e8bd0cd2ffb5..545c2b599a6f 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
@@ -2861,7 +2861,7 @@ static int ipa3_tag_generate_force_close_desc(struct ipa3_desc desc[],
IPAHAL_FULL_PIPELINE_CLEAR;
reg_write_agg_close.offset =
ipahal_get_reg_ofst(IPA_AGGR_FORCE_CLOSE);
- ipahal_get_aggr_force_close_valmask(1<<i, &valmask);
+ ipahal_get_aggr_force_close_valmask(i, &valmask);
reg_write_agg_close.value = valmask.val;
reg_write_agg_close.value_mask = valmask.mask;
cmd_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_REGISTER_WRITE,
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
index 0db9f30181a7..d0aa42c81750 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
@@ -1576,6 +1576,11 @@ void ipahal_get_aggr_force_close_valmask(int ep_idx,
IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_BMSK_V3_5;
}
+ if (ep_idx > (sizeof(valmask->val) * 8 - 1)) {
+ IPAHAL_ERR("too big ep_idx %d\n", ep_idx);
+ ipa_assert();
+ return;
+ }
IPA_SETFIELD_IN_REG(valmask->val, 1 << ep_idx, shft, bmsk);
valmask->mask = bmsk << shft;
}
@@ -1607,20 +1612,3 @@ void ipahal_get_fltrt_hash_flush_valmask(
valmask->mask = valmask->val;
}
-
-void ipahal_get_status_ep_valmask(int pipe_num,
- struct ipahal_reg_valmask *valmask)
-{
- if (!valmask) {
- IPAHAL_ERR("Input error\n");
- return;
- }
-
- valmask->val =
- (pipe_num & IPA_ENDP_STATUS_n_STATUS_ENDP_BMSK) <<
- IPA_ENDP_STATUS_n_STATUS_ENDP_SHFT;
-
- valmask->mask =
- IPA_ENDP_STATUS_n_STATUS_ENDP_BMSK <<
- IPA_ENDP_STATUS_n_STATUS_ENDP_SHFT;
-}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h
index 4db09475d5e2..9f32c071cb68 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h
@@ -467,8 +467,6 @@ void ipahal_get_aggr_force_close_valmask(int ep_idx,
void ipahal_get_fltrt_hash_flush_valmask(
struct ipahal_reg_fltrt_hash_flush *flush,
struct ipahal_reg_valmask *valmask);
-void ipahal_get_status_ep_valmask(int pipe_num,
- struct ipahal_reg_valmask *valmask);
#endif /* _IPAHAL_REG_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
index 039bc7da5153..8fbde6675070 100644
--- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
@@ -702,6 +702,11 @@ static int ipa3_wwan_add_ul_flt_rule_to_ipa(void)
/* send ipa_fltr_installed_notif_req_msg_v01 to Q6*/
req->source_pipe_index =
ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_PROD);
+ if (req->source_pipe_index == IPA_EP_NOT_ALLOCATED) {
+ IPAWANERR("ep mapping failed\n");
+ retval = -EFAULT;
+ }
+
req->install_status = QMI_RESULT_SUCCESS_V01;
req->rule_id_valid = 1;
req->rule_id_len = rmnet_ipa3_ctx->num_q6_rules;
@@ -1947,7 +1952,9 @@ void ipa3_q6_deinitialize_rm(void)
if (ret < 0)
IPAWANERR("Error deleting resource %d, ret=%d\n",
IPA_RM_RESOURCE_Q6_PROD, ret);
- destroy_workqueue(rmnet_ipa3_ctx->rm_q6_wq);
+
+ if (rmnet_ipa3_ctx->rm_q6_wq)
+ destroy_workqueue(rmnet_ipa3_ctx->rm_q6_wq);
}
static void ipa3_wake_tx_queue(struct work_struct *work)
@@ -2287,7 +2294,10 @@ timer_init_err:
IPAWANERR("Error deleting resource %d, ret=%d\n",
IPA_RM_RESOURCE_WWAN_0_PROD, ret);
create_rsrc_err:
- ipa3_q6_deinitialize_rm();
+
+ if (!atomic_read(&rmnet_ipa3_ctx->is_ssr))
+ ipa3_q6_deinitialize_rm();
+
q6_init_err:
free_netdev(dev);
rmnet_ipa3_ctx->wwan_priv = NULL;
diff --git a/drivers/platform/msm/mhi/mhi_iface.c b/drivers/platform/msm/mhi/mhi_iface.c
index a5936ea5a6aa..ce6d1257cfbb 100644
--- a/drivers/platform/msm/mhi/mhi_iface.c
+++ b/drivers/platform/msm/mhi/mhi_iface.c
@@ -509,7 +509,7 @@ static int __exit mhi_plat_remove(struct platform_device *pdev)
static int __init mhi_init(void)
{
- int r;
+ int r = -EAGAIN;
struct mhi_device_driver *mhi_dev_drv;
mhi_dev_drv = kmalloc(sizeof(*mhi_dev_drv), GFP_KERNEL);
diff --git a/drivers/platform/msm/mhi/mhi_init.c b/drivers/platform/msm/mhi/mhi_init.c
index b6edf707798b..a95579241524 100644
--- a/drivers/platform/msm/mhi/mhi_init.c
+++ b/drivers/platform/msm/mhi/mhi_init.c
@@ -141,7 +141,7 @@ int init_mhi_dev_mem(struct mhi_device_ctxt *mhi_dev_ctxt)
size_t mhi_mem_index = 0, ring_len;
void *dev_mem_start;
dma_addr_t dma_dev_mem_start;
- int i, r;
+ int i;
mhi_dev_ctxt->dev_space.dev_mem_len =
calculate_mhi_space(mhi_dev_ctxt);
@@ -244,7 +244,7 @@ err_ev_alloc:
mhi_dev_ctxt->dev_space.dev_mem_len,
mhi_dev_ctxt->dev_space.dev_mem_start,
mhi_dev_ctxt->dev_space.dma_dev_mem_start);
- return r;
+ return -EFAULT;
}
static int mhi_init_events(struct mhi_device_ctxt *mhi_dev_ctxt)
diff --git a/drivers/platform/msm/mhi/mhi_mmio_ops.c b/drivers/platform/msm/mhi/mhi_mmio_ops.c
index a991a2e68b34..18d0334ce1ec 100644
--- a/drivers/platform/msm/mhi/mhi_mmio_ops.c
+++ b/drivers/platform/msm/mhi/mhi_mmio_ops.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -109,7 +109,6 @@ int mhi_init_mmio(struct mhi_device_ctxt *mhi_dev_ctxt)
u64 pcie_dword_val = 0;
u32 pcie_word_val = 0;
u32 i = 0;
- int ret_val;
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"~~~ Initializing MMIO ~~~\n");
@@ -131,7 +130,7 @@ int mhi_init_mmio(struct mhi_device_ctxt *mhi_dev_ctxt)
if (mhi_dev_ctxt->core.mhi_ver != MHI_VERSION) {
mhi_log(mhi_dev_ctxt, MHI_MSG_CRITICAL,
"Bad MMIO version, 0x%x\n", mhi_dev_ctxt->core.mhi_ver);
- return ret_val;
+ return -ENXIO;
}
/* Enable the channels */
diff --git a/drivers/platform/msm/mhi/mhi_sys.c b/drivers/platform/msm/mhi/mhi_sys.c
index 1d9282627d4e..b1434daf1f60 100644
--- a/drivers/platform/msm/mhi/mhi_sys.c
+++ b/drivers/platform/msm/mhi/mhi_sys.c
@@ -329,7 +329,7 @@ uintptr_t mhi_p2v_addr(struct mhi_device_ctxt *mhi_dev_ctxt,
enum MHI_RING_TYPE type,
u32 chan, uintptr_t phy_ptr)
{
- uintptr_t virtual_ptr;
+ uintptr_t virtual_ptr = 0;
struct mhi_ring_ctxt *cs = &mhi_dev_ctxt->dev_space.ring_ctxt;
switch (type) {
@@ -358,7 +358,7 @@ dma_addr_t mhi_v2p_addr(struct mhi_device_ctxt *mhi_dev_ctxt,
enum MHI_RING_TYPE type,
u32 chan, uintptr_t va_ptr)
{
- dma_addr_t phy_ptr;
+ dma_addr_t phy_ptr = 0;
struct mhi_ring_ctxt *cs = &mhi_dev_ctxt->dev_space.ring_ctxt;
switch (type) {
diff --git a/drivers/platform/msm/mhi_uci/mhi_uci.c b/drivers/platform/msm/mhi_uci/mhi_uci.c
index 5b50666d30a2..9c35eeb177d9 100644
--- a/drivers/platform/msm/mhi_uci/mhi_uci.c
+++ b/drivers/platform/msm/mhi_uci/mhi_uci.c
@@ -1030,7 +1030,7 @@ error_dts:
static void process_rs232_state(struct uci_client *ctrl_client,
struct mhi_result *result)
{
- struct rs232_ctrl_msg *rs232_pkt;
+ struct rs232_ctrl_msg *rs232_pkt = result->buf_addr;
struct uci_client *client = NULL;
struct mhi_uci_ctxt_t *uci_ctxt = ctrl_client->uci_ctxt;
u32 msg_id;
@@ -1051,7 +1051,6 @@ static void process_rs232_state(struct uci_client *ctrl_client,
sizeof(struct rs232_ctrl_msg));
goto error_size;
}
- rs232_pkt = result->buf_addr;
MHI_GET_CTRL_DEST_ID(CTRL_DEST_ID, rs232_pkt, chan);
for (i = 0; i < MHI_SOFTWARE_CLIENT_LIMIT; i++)
if (chan == uci_ctxt->client_handles[i].out_attr.chan_id ||
diff --git a/drivers/platform/msm/qpnp-revid.c b/drivers/platform/msm/qpnp-revid.c
index 9ea0b40304eb..0fec8acde96c 100644
--- a/drivers/platform/msm/qpnp-revid.c
+++ b/drivers/platform/msm/qpnp-revid.c
@@ -27,6 +27,7 @@
#define REVID_SUBTYPE 0x5
#define REVID_STATUS1 0x8
#define REVID_SPARE_0 0x60
+#define REVID_TP_REV 0xf1
#define REVID_FAB_ID 0xf2
#define QPNP_REVID_DEV_NAME "qcom,qpnp-revid"
@@ -157,9 +158,9 @@ static size_t build_pmic_string(char *buf, size_t n, int sid,
static int qpnp_revid_probe(struct platform_device *pdev)
{
u8 rev1, rev2, rev3, rev4, pmic_type, pmic_subtype, pmic_status;
- u8 option1, option2, option3, option4, spare0, fab_id;
+ u8 option1, option2, option3, option4, spare0;
unsigned int base;
- int rc;
+ int rc, fab_id, tp_rev;
char pmic_string[PMIC_STRING_MAXLENGTH] = {'\0'};
struct revid_chip *revid_chip;
struct regmap *regmap;
@@ -207,6 +208,11 @@ static int qpnp_revid_probe(struct platform_device *pdev)
else
fab_id = -EINVAL;
+ if (of_property_read_bool(pdev->dev.of_node, "qcom,tp-rev-valid"))
+ tp_rev = qpnp_read_byte(regmap, base + REVID_TP_REV);
+ else
+ tp_rev = -EINVAL;
+
revid_chip = devm_kzalloc(&pdev->dev, sizeof(struct revid_chip),
GFP_KERNEL);
if (!revid_chip)
@@ -220,6 +226,7 @@ static int qpnp_revid_probe(struct platform_device *pdev)
revid_chip->data.pmic_subtype = pmic_subtype;
revid_chip->data.pmic_type = pmic_type;
revid_chip->data.fab_id = fab_id;
+ revid_chip->data.tp_rev = tp_rev;
if (pmic_subtype < ARRAY_SIZE(pmic_names))
revid_chip->data.pmic_name = pmic_names[pmic_subtype];
diff --git a/drivers/platform/msm/usb_bam.c b/drivers/platform/msm/usb_bam.c
index a151d0c7a770..49e0666d0c23 100644
--- a/drivers/platform/msm/usb_bam.c
+++ b/drivers/platform/msm/usb_bam.c
@@ -103,7 +103,6 @@ struct usb_bam_sps_type {
struct usb_bam_ctx_type {
struct usb_bam_sps_type usb_bam_sps;
struct resource *io_res;
- void __iomem *regs;
int irq;
struct platform_device *usb_bam_pdev;
struct workqueue_struct *usb_bam_wq;
@@ -113,6 +112,7 @@ struct usb_bam_ctx_type {
u32 inactivity_timer_ms;
bool is_bam_inactivity;
struct usb_bam_pipe_connect *usb_bam_connections;
+ struct msm_usb_bam_data *usb_bam_data;
spinlock_t usb_bam_lock;
};
@@ -235,10 +235,10 @@ void msm_bam_set_hsic_host_dev(struct device *dev)
if (dev) {
/* Hold the device until allowing lpm */
info[HSIC_CTRL].in_lpm = false;
- log_event_dbg("%s: Getting hsic device %p\n", __func__, dev);
+ log_event_dbg("%s: Getting hsic device %pK\n", __func__, dev);
pm_runtime_get(dev);
} else if (host_info[HSIC_CTRL].dev) {
- log_event_dbg("%s: Try Putting hsic device %p, lpm:%d\n",
+ log_event_dbg("%s: Try Putting hsic device %pK, lpm:%d\n",
__func__, host_info[HSIC_CTRL].dev,
info[HSIC_CTRL].in_lpm);
/* Just release previous device if not already done */
@@ -320,8 +320,6 @@ static int usb_bam_alloc_buffer(struct usb_bam_pipe_connect *pipe_connect)
{
int ret = 0;
struct usb_bam_ctx_type *ctx = &msm_usb_bam[pipe_connect->bam_type];
- struct msm_usb_bam_platform_data *pdata =
- ctx->usb_bam_pdev->dev.platform_data;
struct sps_mem_buffer *data_buf = &(pipe_connect->data_mem_buf);
struct sps_mem_buffer *desc_buf = &(pipe_connect->desc_mem_buf);
@@ -360,7 +358,7 @@ static int usb_bam_alloc_buffer(struct usb_bam_pipe_connect *pipe_connect)
}
data_buf->phys_base = pipe_connect->data_fifo_base_offset +
- pdata->usb_bam_fifo_baseaddr;
+ ctx->usb_bam_data->usb_bam_fifo_baseaddr;
data_buf->size = pipe_connect->data_fifo_size;
data_buf->base = ioremap(data_buf->phys_base, data_buf->size);
if (!data_buf->base) {
@@ -372,7 +370,7 @@ static int usb_bam_alloc_buffer(struct usb_bam_pipe_connect *pipe_connect)
memset_io(data_buf->base, 0, data_buf->size);
desc_buf->phys_base = pipe_connect->desc_fifo_base_offset +
- pdata->usb_bam_fifo_baseaddr;
+ ctx->usb_bam_data->usb_bam_fifo_baseaddr;
desc_buf->size = pipe_connect->desc_fifo_size;
desc_buf->base = ioremap(desc_buf->phys_base, desc_buf->size);
if (!desc_buf->base) {
@@ -825,7 +823,7 @@ static bool _hsic_host_bam_resume_core(void)
/* Exit from "full suspend" in case of hsic host */
if (host_info[HSIC_CTRL].dev && info[HSIC_CTRL].in_lpm) {
- log_event_dbg("%s: Getting hsic device %p\n", __func__,
+ log_event_dbg("%s: Getting hsic device %pK\n", __func__,
host_info[HSIC_CTRL].dev);
pm_runtime_get(host_info[HSIC_CTRL].dev);
info[HSIC_CTRL].in_lpm = false;
@@ -839,7 +837,7 @@ static void _hsic_host_bam_suspend_core(void)
log_event_dbg("%s: enter\n", __func__);
if (host_info[HSIC_CTRL].dev && !info[HSIC_CTRL].in_lpm) {
- log_event_dbg("%s: Putting hsic host device %p\n", __func__,
+ log_event_dbg("%s: Putting hsic host device %pK\n", __func__,
host_info[HSIC_CTRL].dev);
pm_runtime_put(host_info[HSIC_CTRL].dev);
info[HSIC_CTRL].in_lpm = true;
@@ -1070,7 +1068,6 @@ int usb_bam_connect(enum usb_ctrl cur_bam, int idx, u32 *bam_pipe_idx)
struct usb_bam_pipe_connect *pipe_connect =
&ctx->usb_bam_connections[idx];
struct device *bam_dev = &ctx->usb_bam_pdev->dev;
- struct msm_usb_bam_platform_data *pdata = bam_dev->platform_data;
enum usb_bam_mode cur_mode;
if (pipe_connect->enabled) {
@@ -1096,7 +1093,7 @@ int usb_bam_connect(enum usb_ctrl cur_bam, int idx, u32 *bam_pipe_idx)
spin_lock(&ctx->usb_bam_lock);
/* Check if BAM requires RESET before connect and reset of first pipe */
- if ((pdata->reset_on_connect == true) &&
+ if ((ctx->usb_bam_data->reset_on_connect == true) &&
(ctx->pipes_enabled_per_bam == 0)) {
spin_unlock(&ctx->usb_bam_lock);
@@ -1596,8 +1593,6 @@ static int ss_usb_cons_release_resource(void)
static void usb_bam_ipa_create_resources(enum usb_ctrl cur_bam)
{
struct usb_bam_ctx_type *ctx = &msm_usb_bam[cur_bam];
- struct msm_usb_bam_platform_data *pdata =
- ctx->usb_bam_pdev->dev.platform_data;
struct ipa_rm_create_params usb_prod_create_params;
struct ipa_rm_create_params usb_cons_create_params;
int ret;
@@ -1606,7 +1601,8 @@ static void usb_bam_ipa_create_resources(enum usb_ctrl cur_bam)
memset(&usb_prod_create_params, 0, sizeof(usb_prod_create_params));
usb_prod_create_params.name = ipa_rm_resource_prod[cur_bam];
usb_prod_create_params.reg_params.notify_cb = usb_prod_notify_cb;
- usb_prod_create_params.reg_params.user_data = &pdata->bam_type;
+ usb_prod_create_params.reg_params.user_data
+ = &ctx->usb_bam_data->bam_type;
usb_prod_create_params.floor_voltage = IPA_VOLTAGE_SVS;
ret = ipa_rm_create_resource(&usb_prod_create_params);
if (ret) {
@@ -1724,7 +1720,7 @@ static bool check_pipes_empty(enum usb_ctrl bam_type, u8 src_idx, u8 dst_idx)
/* If we have any remaints in the pipes we don't go to sleep */
prod_pipe = ctx->usb_bam_sps.sps_pipes[src_idx];
cons_pipe = ctx->usb_bam_sps.sps_pipes[dst_idx];
- log_event_dbg("prod_pipe=%p, cons_pipe=%p\n", prod_pipe, cons_pipe);
+ log_event_dbg("prod_pipe=%pK, cons_pipe=%pK\n", prod_pipe, cons_pipe);
if (!cons_pipe || (!prod_pipe &&
prod_pipe_connect->pipe_type == USB_BAM_PIPE_BAM2BAM)) {
@@ -2091,7 +2087,7 @@ static bool msm_bam_host_lpm_ok(enum usb_ctrl bam_type)
}
/* HSIC host will go now to lpm */
- log_event_dbg("%s: vote for suspend hsic %p\n",
+ log_event_dbg("%s: vote for suspend hsic %pK\n",
__func__, host_info[bam_type].dev);
for (i = 0; i < ctx->max_connections; i++) {
@@ -2136,16 +2132,14 @@ static int usb_bam_set_ipa_perf(enum usb_ctrl cur_bam,
int ret;
struct usb_bam_ctx_type *ctx = &msm_usb_bam[cur_bam];
struct ipa_rm_perf_profile ipa_rm_perf_prof;
- struct msm_usb_bam_platform_data *pdata =
- ctx->usb_bam_pdev->dev.platform_data;
if (usb_connection_speed == USB_SPEED_SUPER)
ipa_rm_perf_prof.max_supported_bandwidth_mbps =
- pdata->max_mbps_superspeed;
+ ctx->usb_bam_data->max_mbps_superspeed;
else
/* Bam2Bam is supported only for SS and HS (HW limitation) */
ipa_rm_perf_prof.max_supported_bandwidth_mbps =
- pdata->max_mbps_highspeed;
+ ctx->usb_bam_data->max_mbps_highspeed;
/*
* Having a max mbps property in dtsi file is a must
@@ -2182,7 +2176,6 @@ int usb_bam_connect_ipa(enum usb_ctrl cur_bam,
struct usb_bam_ctx_type *ctx = &msm_usb_bam[cur_bam];
struct usb_bam_pipe_connect *pipe_connect;
struct device *bam_dev = &ctx->usb_bam_pdev->dev;
- struct msm_usb_bam_platform_data *pdata = bam_dev->platform_data;
int ret;
bool bam2bam, is_dpl;
@@ -2260,7 +2253,8 @@ int usb_bam_connect_ipa(enum usb_ctrl cur_bam,
/* Check if BAM requires RESET before connect and reset first pipe */
spin_lock(&ctx->usb_bam_lock);
- if (pdata->reset_on_connect && !ctx->pipes_enabled_per_bam) {
+ if (ctx->usb_bam_data->reset_on_connect &&
+ !ctx->pipes_enabled_per_bam) {
spin_unlock(&ctx->usb_bam_lock);
if (cur_bam == CI_CTRL)
@@ -2454,7 +2448,7 @@ static void usb_bam_work(struct work_struct *w)
if (pipe_iter->bam_type == pipe_connect->bam_type &&
pipe_iter->dir == PEER_PERIPHERAL_TO_USB &&
pipe_iter->enabled) {
- log_event_dbg("%s: Register wakeup on pipe %p\n",
+ log_event_dbg("%s: Register wakeup on pipe %pK\n",
__func__, pipe_iter);
__usb_bam_register_wake_cb(
pipe_connect->bam_type, i,
@@ -2610,8 +2604,6 @@ int usb_bam_disconnect_pipe(enum usb_ctrl bam_type, u8 idx)
struct usb_bam_pipe_connect *pipe_connect;
struct device *bam_dev = &ctx->usb_bam_pdev->dev;
int ret;
- struct msm_usb_bam_platform_data *pdata =
- ctx->usb_bam_pdev->dev.platform_data;
pipe_connect = &ctx->usb_bam_connections[idx];
@@ -2639,7 +2631,8 @@ int usb_bam_disconnect_pipe(enum usb_ctrl bam_type, u8 idx)
log_event_dbg("%s: success disconnecting pipe %d\n", __func__, idx);
- if (pdata->reset_on_disconnect && !ctx->pipes_enabled_per_bam) {
+ if (ctx->usb_bam_data->reset_on_disconnect
+ && !ctx->pipes_enabled_per_bam) {
if (bam_type == CI_CTRL)
msm_hw_bam_disable(1);
@@ -2807,10 +2800,10 @@ static void usb_bam_sps_events(enum sps_callback_case sps_cb_case, void *user)
}
}
-static struct msm_usb_bam_platform_data *usb_bam_dt_to_pdata(
+static struct msm_usb_bam_data *usb_bam_dt_to_data(
struct platform_device *pdev, u32 usb_addr)
{
- struct msm_usb_bam_platform_data *pdata;
+ struct msm_usb_bam_data *usb_bam_data;
struct device_node *node = pdev->dev.of_node;
int rc = 0;
u8 i = 0;
@@ -2819,10 +2812,10 @@ static struct msm_usb_bam_platform_data *usb_bam_dt_to_pdata(
u32 threshold, max_connections = 0;
static struct usb_bam_pipe_connect *usb_bam_connections;
- pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata) {
+ usb_bam_data = devm_kzalloc(&pdev->dev, sizeof(*usb_bam_data),
+ GFP_KERNEL);
+ if (!usb_bam_data)
return NULL;
- }
rc = of_property_read_u32(node, "qcom,bam-type", &bam);
if (rc) {
@@ -2835,7 +2828,7 @@ static struct msm_usb_bam_platform_data *usb_bam_dt_to_pdata(
__func__, bam);
return NULL;
}
- pdata->bam_type = bam;
+ usb_bam_data->bam_type = bam;
rc = of_property_read_u32(node, "qcom,bam-mode", &bam_mode);
if (rc) {
@@ -2845,49 +2838,49 @@ static struct msm_usb_bam_platform_data *usb_bam_dt_to_pdata(
bam_mode = USB_BAM_DEVICE;
}
- pdata->reset_on_connect = of_property_read_bool(node,
+ usb_bam_data->reset_on_connect = of_property_read_bool(node,
"qcom,reset-bam-on-connect");
- pdata->reset_on_disconnect = of_property_read_bool(node,
+ usb_bam_data->reset_on_disconnect = of_property_read_bool(node,
"qcom,reset-bam-on-disconnect");
rc = of_property_read_u32(node, "qcom,usb-bam-num-pipes",
- &pdata->usb_bam_num_pipes);
+ &usb_bam_data->usb_bam_num_pipes);
if (rc) {
log_event_err("Invalid usb bam num pipes property\n");
return NULL;
}
rc = of_property_read_u32(node, "qcom,usb-bam-max-mbps-highspeed",
- &pdata->max_mbps_highspeed);
+ &usb_bam_data->max_mbps_highspeed);
if (rc)
- pdata->max_mbps_highspeed = 0;
+ usb_bam_data->max_mbps_highspeed = 0;
rc = of_property_read_u32(node, "qcom,usb-bam-max-mbps-superspeed",
- &pdata->max_mbps_superspeed);
+ &usb_bam_data->max_mbps_superspeed);
if (rc)
- pdata->max_mbps_superspeed = 0;
+ usb_bam_data->max_mbps_superspeed = 0;
rc = of_property_read_u32(node, "qcom,usb-bam-fifo-baseaddr", &addr);
if (rc)
pr_debug("%s: Invalid usb base address property\n", __func__);
else
- pdata->usb_bam_fifo_baseaddr = addr;
+ usb_bam_data->usb_bam_fifo_baseaddr = addr;
- pdata->ignore_core_reset_ack = of_property_read_bool(node,
+ usb_bam_data->ignore_core_reset_ack = of_property_read_bool(node,
"qcom,ignore-core-reset-ack");
- pdata->disable_clk_gating = of_property_read_bool(node,
+ usb_bam_data->disable_clk_gating = of_property_read_bool(node,
"qcom,disable-clk-gating");
rc = of_property_read_u32(node, "qcom,usb-bam-override-threshold",
&threshold);
if (rc)
- pdata->override_threshold = USB_THRESHOLD;
+ usb_bam_data->override_threshold = USB_THRESHOLD;
else
- pdata->override_threshold = threshold;
+ usb_bam_data->override_threshold = threshold;
- pdata->enable_hsusb_bam_on_boot = of_property_read_bool(node,
+ usb_bam_data->enable_hsusb_bam_on_boot = of_property_read_bool(node,
"qcom,enable-hsusb-bam-on-boot");
for_each_child_of_node(pdev->dev.of_node, node)
@@ -2923,7 +2916,7 @@ static struct msm_usb_bam_platform_data *usb_bam_dt_to_pdata(
goto err;
if (usb_bam_connections[i].mem_type == OCI_MEM) {
- if (!pdata->usb_bam_fifo_baseaddr) {
+ if (!usb_bam_data->usb_bam_fifo_baseaddr) {
log_event_err("%s: base address is missing\n",
__func__);
goto err;
@@ -3007,7 +3000,7 @@ static struct msm_usb_bam_platform_data *usb_bam_dt_to_pdata(
msm_usb_bam[bam].usb_bam_connections = usb_bam_connections;
msm_usb_bam[bam].max_connections = max_connections;
- return pdata;
+ return usb_bam_data;
err:
log_event_err("%s: failed\n", __func__);
return NULL;
@@ -3016,9 +3009,8 @@ err:
static int usb_bam_init(struct platform_device *pdev)
{
int ret;
- struct msm_usb_bam_platform_data *pdata = pdev->dev.platform_data;
- enum usb_ctrl bam_type = pdata->bam_type;
- struct usb_bam_ctx_type *ctx = &msm_usb_bam[bam_type];
+ struct usb_bam_ctx_type *ctx = dev_get_drvdata(&pdev->dev);
+ enum usb_ctrl bam_type = ctx->usb_bam_data->bam_type;
struct sps_bam_props props;
memset(&props, 0, sizeof(props));
@@ -3027,12 +3019,11 @@ static int usb_bam_init(struct platform_device *pdev)
bam_enable_strings[bam_type]);
props.phys_addr = ctx->io_res->start;
- props.virt_addr = ctx->regs;
props.virt_size = resource_size(ctx->io_res);
props.irq = ctx->irq;
- props.summing_threshold = pdata->override_threshold;
- props.event_threshold = pdata->override_threshold;
- props.num_pipes = pdata->usb_bam_num_pipes;
+ props.summing_threshold = ctx->usb_bam_data->override_threshold;
+ props.event_threshold = ctx->usb_bam_data->override_threshold;
+ props.num_pipes = ctx->usb_bam_data->usb_bam_num_pipes;
props.callback = usb_bam_sps_events;
props.user = bam_enable_strings[bam_type];
@@ -3040,10 +3031,10 @@ static int usb_bam_init(struct platform_device *pdev)
* HSUSB and HSIC Cores don't support RESET ACK signal to BAMs
* Hence, let BAM to ignore acknowledge from USB while resetting PIPE
*/
- if (pdata->ignore_core_reset_ack && bam_type != DWC3_CTRL)
+ if (ctx->usb_bam_data->ignore_core_reset_ack && bam_type != DWC3_CTRL)
props.options = SPS_BAM_NO_EXT_P_RST;
- if (pdata->disable_clk_gating)
+ if (ctx->usb_bam_data->disable_clk_gating)
props.options |= SPS_BAM_NO_LOCAL_CLK_GATING;
/*
@@ -3051,7 +3042,8 @@ static int usb_bam_init(struct platform_device *pdev)
* starting peripheral controller to avoid switching USB core mode
* from legacy to BAM with ongoing data transfers.
*/
- if (pdata->enable_hsusb_bam_on_boot && bam_type == CI_CTRL) {
+ if (ctx->usb_bam_data->enable_hsusb_bam_on_boot
+ && bam_type == CI_CTRL) {
pr_debug("Register and enable HSUSB BAM\n");
props.options |= SPS_BAM_OPT_ENABLE_AT_BOOT;
}
@@ -3068,9 +3060,8 @@ static int usb_bam_init(struct platform_device *pdev)
static int enable_usb_bam(struct platform_device *pdev)
{
int ret;
- struct msm_usb_bam_platform_data *pdata = pdev->dev.platform_data;
- enum usb_ctrl bam_type = pdata->bam_type;
- struct usb_bam_ctx_type *ctx = &msm_usb_bam[bam_type];
+ struct usb_bam_ctx_type *ctx = dev_get_drvdata(&pdev->dev);
+ enum usb_ctrl bam_type = ctx->usb_bam_data->bam_type;
ret = usb_bam_init(pdev);
if (ret) {
@@ -3137,14 +3128,19 @@ void usb_bam_register_panic_hdlr(void)
&usb_bam_panic_blk);
}
+static void usb_bam_unregister_panic_hdlr(void)
+{
+ atomic_notifier_chain_unregister(&panic_notifier_list,
+ &usb_bam_panic_blk);
+}
+
static int usb_bam_probe(struct platform_device *pdev)
{
int ret, i, irq;
struct resource *io_res;
- void __iomem *regs;
enum usb_ctrl bam_type;
struct usb_bam_ctx_type *ctx;
- struct msm_usb_bam_platform_data *pdata;
+ struct msm_usb_bam_data *usb_bam_data;
dev_dbg(&pdev->dev, "usb_bam_probe\n");
@@ -3160,25 +3156,19 @@ static int usb_bam_probe(struct platform_device *pdev)
return irq;
}
- regs = devm_ioremap(&pdev->dev, io_res->start, resource_size(io_res));
- if (!regs) {
- log_event_err("%s: ioremap failed\n", __func__);
- return -ENOMEM;
- }
-
/* specify BAM physical address to be filled in BAM connections */
- pdata = usb_bam_dt_to_pdata(pdev, io_res->start);
- if (!pdata)
+ usb_bam_data = usb_bam_dt_to_data(pdev, io_res->start);
+ if (!usb_bam_data)
return -EINVAL;
- pdev->dev.platform_data = pdata;
- bam_type = pdata->bam_type;
+ bam_type = usb_bam_data->bam_type;
ctx = &msm_usb_bam[bam_type];
+ dev_set_drvdata(&pdev->dev, ctx);
ctx->usb_bam_pdev = pdev;
ctx->irq = irq;
- ctx->regs = regs;
ctx->io_res = io_res;
+ ctx->usb_bam_data = usb_bam_data;
for (i = 0; i < ctx->max_connections; i++) {
ctx->usb_bam_connections[i].enabled = 0;
@@ -3319,15 +3309,15 @@ EXPORT_SYMBOL(usb_bam_get_bam_type);
bool msm_usb_bam_enable(enum usb_ctrl bam, bool bam_enable)
{
- struct msm_usb_bam_platform_data *pdata;
+ struct msm_usb_bam_data *usb_bam_data;
struct usb_bam_ctx_type *ctx = &msm_usb_bam[bam];
if (!ctx->usb_bam_pdev)
return 0;
- pdata = ctx->usb_bam_pdev->dev.platform_data;
+ usb_bam_data = ctx->usb_bam_data;
if ((bam != CI_CTRL) || !(bam_enable ||
- pdata->enable_hsusb_bam_on_boot))
+ usb_bam_data->enable_hsusb_bam_on_boot))
return 0;
msm_hw_bam_disable(1);
@@ -3409,10 +3399,10 @@ EXPORT_SYMBOL(msm_bam_hsic_lpm_ok);
static int usb_bam_remove(struct platform_device *pdev)
{
- struct msm_usb_bam_platform_data *pdata = pdev->dev.platform_data;
- enum usb_ctrl bam_type = pdata->bam_type;
- struct usb_bam_ctx_type *ctx = &msm_usb_bam[bam_type];
+ struct usb_bam_ctx_type *ctx = dev_get_drvdata(&pdev->dev);
+ usb_bam_unregister_panic_hdlr();
+ sps_deregister_bam_device(ctx->h_bam);
destroy_workqueue(ctx->usb_bam_wq);
return 0;
diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
index 0dd6017245fa..a45a51490817 100644
--- a/drivers/power/power_supply_sysfs.c
+++ b/drivers/power/power_supply_sysfs.c
@@ -297,6 +297,12 @@ static struct device_attribute power_supply_attrs[] = {
POWER_SUPPLY_ATTR(hw_current_max),
POWER_SUPPLY_ATTR(real_type),
POWER_SUPPLY_ATTR(pr_swap),
+ POWER_SUPPLY_ATTR(cc_step),
+ POWER_SUPPLY_ATTR(cc_step_sel),
+ POWER_SUPPLY_ATTR(sw_jeita_enabled),
+ POWER_SUPPLY_ATTR(pd_voltage_max),
+ POWER_SUPPLY_ATTR(pd_voltage_min),
+ POWER_SUPPLY_ATTR(sdp_current_max),
/* Local extensions of type int64_t */
POWER_SUPPLY_ATTR(charge_counter_ext),
/* Properties of type `const char *' */
diff --git a/drivers/power/supply/qcom/fg-core.h b/drivers/power/supply/qcom/fg-core.h
index 947108c1410e..88dcdd8fd7be 100644
--- a/drivers/power/supply/qcom/fg-core.h
+++ b/drivers/power/supply/qcom/fg-core.h
@@ -49,7 +49,7 @@
#define SRAM_READ "fg_sram_read"
#define SRAM_WRITE "fg_sram_write"
#define PROFILE_LOAD "fg_profile_load"
-#define DELTA_SOC "fg_delta_soc"
+#define TTF_PRIMING "fg_ttf_priming"
/* Delta BSOC irq votable reasons */
#define DELTA_BSOC_IRQ_VOTER "fg_delta_bsoc_irq"
@@ -81,6 +81,8 @@
#define BATT_THERM_NUM_COEFFS 3
+#define MAX_CC_STEPS 20
+
/* Debug flag definitions */
enum fg_debug_flag {
FG_IRQ = BIT(0), /* Show interrupts */
@@ -228,6 +230,11 @@ enum esr_timer_config {
NUM_ESR_TIMERS,
};
+enum ttf_mode {
+ TTF_MODE_NORMAL = 0,
+ TTF_MODE_QNOVO,
+};
+
/* DT parameters for FG device */
struct fg_dt_props {
bool force_load_profile;
@@ -309,16 +316,31 @@ struct fg_irq_info {
};
struct fg_circ_buf {
- int arr[20];
+ int arr[10];
int size;
int head;
};
+struct fg_cc_step_data {
+ int arr[MAX_CC_STEPS];
+ int sel;
+};
+
struct fg_pt {
s32 x;
s32 y;
};
+struct ttf {
+ struct fg_circ_buf ibatt;
+ struct fg_circ_buf vbatt;
+ struct fg_cc_step_data cc_step;
+ struct mutex lock;
+ int mode;
+ int last_ttf;
+ s64 last_ms;
+};
+
static const struct fg_pt fg_ln_table[] = {
{ 1000, 0 },
{ 2000, 693 },
@@ -358,6 +380,7 @@ struct fg_chip {
struct power_supply *usb_psy;
struct power_supply *dc_psy;
struct power_supply *parallel_psy;
+ struct power_supply *pc_port_psy;
struct iio_channel *batt_id_chan;
struct iio_channel *die_temp_chan;
struct fg_memif *sram;
@@ -374,9 +397,9 @@ struct fg_chip {
struct fg_cyc_ctr_data cyc_ctr;
struct notifier_block nb;
struct fg_cap_learning cl;
+ struct ttf ttf;
struct mutex bus_lock;
struct mutex sram_rw_lock;
- struct mutex batt_avg_lock;
struct mutex charge_full_lock;
u32 batt_soc_base;
u32 batt_info_base;
@@ -389,12 +412,14 @@ struct fg_chip {
int prev_charge_status;
int charge_done;
int charge_type;
+ int online_status;
int last_soc;
int last_batt_temp;
int health;
int maint_soc;
int delta_soc;
int last_msoc;
+ int last_recharge_volt_mv;
int esr_timer_charging_default[NUM_ESR_TIMERS];
enum slope_limit_status slope_limit_sts;
bool profile_available;
@@ -413,11 +438,8 @@ struct fg_chip {
struct completion soc_ready;
struct delayed_work profile_load_work;
struct work_struct status_change_work;
- struct work_struct cycle_count_work;
- struct delayed_work batt_avg_work;
+ struct delayed_work ttf_work;
struct delayed_work sram_dump_work;
- struct fg_circ_buf ibatt_circ_buf;
- struct fg_circ_buf vbatt_circ_buf;
};
/* Debugfs data structures are below */
@@ -475,5 +497,6 @@ extern bool is_qnovo_en(struct fg_chip *chip);
extern void fg_circ_buf_add(struct fg_circ_buf *, int);
extern void fg_circ_buf_clr(struct fg_circ_buf *);
extern int fg_circ_buf_avg(struct fg_circ_buf *, int *);
+extern int fg_circ_buf_median(struct fg_circ_buf *, int *);
extern int fg_lerp(const struct fg_pt *, size_t, s32, s32 *);
#endif
diff --git a/drivers/power/supply/qcom/fg-util.c b/drivers/power/supply/qcom/fg-util.c
index 9635044e02a5..0cb1dea7113b 100644
--- a/drivers/power/supply/qcom/fg-util.c
+++ b/drivers/power/supply/qcom/fg-util.c
@@ -10,6 +10,7 @@
* GNU General Public License for more details.
*/
+#include <linux/sort.h>
#include "fg-core.h"
void fg_circ_buf_add(struct fg_circ_buf *buf, int val)
@@ -39,6 +40,39 @@ int fg_circ_buf_avg(struct fg_circ_buf *buf, int *avg)
return 0;
}
+static int cmp_int(const void *a, const void *b)
+{
+ return *(int *)a - *(int *)b;
+}
+
+int fg_circ_buf_median(struct fg_circ_buf *buf, int *median)
+{
+ int *temp;
+
+ if (buf->size == 0)
+ return -ENODATA;
+
+ if (buf->size == 1) {
+ *median = buf->arr[0];
+ return 0;
+ }
+
+ temp = kmalloc_array(buf->size, sizeof(*temp), GFP_KERNEL);
+ if (!temp)
+ return -ENOMEM;
+
+ memcpy(temp, buf->arr, buf->size * sizeof(*temp));
+ sort(temp, buf->size, sizeof(*temp), cmp_int, NULL);
+
+ if (buf->size % 2)
+ *median = temp[buf->size / 2];
+ else
+ *median = (temp[buf->size / 2 - 1] + temp[buf->size / 2]) / 2;
+
+ kfree(temp);
+ return 0;
+}
+
int fg_lerp(const struct fg_pt *pts, size_t tablesize, s32 input, s32 *output)
{
int i;
diff --git a/drivers/power/supply/qcom/pmic-voter.c b/drivers/power/supply/qcom/pmic-voter.c
index b99558ed2100..3d0a71844608 100644
--- a/drivers/power/supply/qcom/pmic-voter.c
+++ b/drivers/power/supply/qcom/pmic-voter.c
@@ -438,12 +438,14 @@ out:
int rerun_election(struct votable *votable)
{
int rc = 0;
+ int effective_result;
lock_votable(votable);
+ effective_result = get_effective_result_locked(votable);
if (votable->callback)
rc = votable->callback(votable,
- votable->data,
- votable->effective_result,
+ votable->data,
+ effective_result,
get_client_str(votable, votable->effective_client_id));
unlock_votable(votable);
return rc;
@@ -519,11 +521,10 @@ static int show_votable_clients(struct seq_file *m, void *data)
lock_votable(votable);
- seq_printf(m, "Votable %s:\n", votable->name);
- seq_puts(m, "clients:\n");
for (i = 0; i < votable->num_clients; i++) {
if (votable->client_strs[i]) {
- seq_printf(m, "%-15s:\t\ten=%d\t\tv=%d\n",
+ seq_printf(m, "%s: %s:\t\t\ten=%d v=%d\n",
+ votable->name,
votable->client_strs[i],
votable->votes[i].enabled,
votable->votes[i].value);
@@ -542,11 +543,11 @@ static int show_votable_clients(struct seq_file *m, void *data)
break;
}
- seq_printf(m, "type: %s\n", type_str);
- seq_puts(m, "Effective:\n");
effective_client_str = get_effective_client_locked(votable);
- seq_printf(m, "%-15s:\t\tv=%d\n",
+ seq_printf(m, "%s: effective=%s type=%s v=%d\n",
+ votable->name,
effective_client_str ? effective_client_str : "none",
+ type_str,
get_effective_result_locked(votable));
unlock_votable(votable);
diff --git a/drivers/power/supply/qcom/qpnp-fg-gen3.c b/drivers/power/supply/qcom/qpnp-fg-gen3.c
index e03681ec13cc..361efd4fbbbd 100644
--- a/drivers/power/supply/qcom/qpnp-fg-gen3.c
+++ b/drivers/power/supply/qcom/qpnp-fg-gen3.c
@@ -75,6 +75,8 @@
#define ESR_TIMER_CHG_MAX_OFFSET 0
#define ESR_TIMER_CHG_INIT_WORD 18
#define ESR_TIMER_CHG_INIT_OFFSET 2
+#define ESR_EXTRACTION_ENABLE_WORD 19
+#define ESR_EXTRACTION_ENABLE_OFFSET 0
#define PROFILE_LOAD_WORD 24
#define PROFILE_LOAD_OFFSET 0
#define ESR_RSLOW_DISCHG_WORD 34
@@ -1171,6 +1173,42 @@ static bool batt_psy_initialized(struct fg_chip *chip)
return true;
}
+static bool usb_psy_initialized(struct fg_chip *chip)
+{
+ if (chip->usb_psy)
+ return true;
+
+ chip->usb_psy = power_supply_get_by_name("usb");
+ if (!chip->usb_psy)
+ return false;
+
+ return true;
+}
+
+static bool pc_port_psy_initialized(struct fg_chip *chip)
+{
+ if (chip->pc_port_psy)
+ return true;
+
+ chip->pc_port_psy = power_supply_get_by_name("pc_port");
+ if (!chip->pc_port_psy)
+ return false;
+
+ return true;
+}
+
+static bool dc_psy_initialized(struct fg_chip *chip)
+{
+ if (chip->dc_psy)
+ return true;
+
+ chip->dc_psy = power_supply_get_by_name("dc");
+ if (!chip->dc_psy)
+ return false;
+
+ return true;
+}
+
static bool is_parallel_charger_available(struct fg_chip *chip)
{
if (!chip->parallel_psy)
@@ -1277,11 +1315,20 @@ static bool is_temp_valid_cap_learning(struct fg_chip *chip)
return true;
}
+#define QNOVO_CL_SKEW_DECIPCT -30
static void fg_cap_learning_post_process(struct fg_chip *chip)
{
int64_t max_inc_val, min_dec_val, old_cap;
int rc;
+ if (is_qnovo_en(chip)) {
+ fg_dbg(chip, FG_CAP_LEARN, "applying skew %d on current learnt capacity %lld\n",
+ QNOVO_CL_SKEW_DECIPCT, chip->cl.final_cc_uah);
+ chip->cl.final_cc_uah = chip->cl.final_cc_uah *
+ (1000 + QNOVO_CL_SKEW_DECIPCT);
+ do_div(chip->cl.final_cc_uah, 1000);
+ }
+
max_inc_val = chip->cl.learned_cc_uah
* (1000 + chip->dt.cl_max_cap_inc);
do_div(max_inc_val, 1000);
@@ -1590,6 +1637,9 @@ static int fg_set_recharge_voltage(struct fg_chip *chip, int voltage_mv)
if (chip->wa_flags & PMI8998_V1_REV_WA)
return 0;
+ if (voltage_mv == chip->last_recharge_volt_mv)
+ return 0;
+
fg_dbg(chip, FG_STATUS, "Setting recharge voltage to %dmV\n",
voltage_mv);
fg_encode(chip->sp, FG_SRAM_RECHARGE_VBATT_THR, voltage_mv, &buf);
@@ -1604,6 +1654,7 @@ static int fg_set_recharge_voltage(struct fg_chip *chip, int voltage_mv)
return rc;
}
+ chip->last_recharge_volt_mv = voltage_mv;
return 0;
}
@@ -1611,7 +1662,7 @@ static int fg_set_recharge_voltage(struct fg_chip *chip, int voltage_mv)
static int fg_charge_full_update(struct fg_chip *chip)
{
union power_supply_propval prop = {0, };
- int rc, msoc, bsoc, recharge_soc;
+ int rc, msoc, bsoc, recharge_soc, msoc_raw;
u8 full_soc[2] = {0xFF, 0xFF};
if (!chip->dt.hold_soc_while_full)
@@ -1647,6 +1698,7 @@ static int fg_charge_full_update(struct fg_chip *chip)
pr_err("Error in getting msoc, rc=%d\n", rc);
goto out;
}
+ msoc_raw = DIV_ROUND_CLOSEST(msoc * FULL_SOC_RAW, FULL_CAPACITY);
fg_dbg(chip, FG_STATUS, "msoc: %d bsoc: %x health: %d status: %d full: %d\n",
msoc, bsoc, chip->health, chip->charge_status,
@@ -1670,7 +1722,7 @@ static int fg_charge_full_update(struct fg_chip *chip)
fg_dbg(chip, FG_STATUS, "Terminated charging @ SOC%d\n",
msoc);
}
- } else if ((bsoc >> 8) <= recharge_soc && chip->charge_full) {
+ } else if (msoc_raw < recharge_soc && chip->charge_full) {
chip->delta_soc = FULL_CAPACITY - msoc;
/*
@@ -1700,8 +1752,8 @@ static int fg_charge_full_update(struct fg_chip *chip)
rc);
goto out;
}
- fg_dbg(chip, FG_STATUS, "bsoc: %d recharge_soc: %d delta_soc: %d\n",
- bsoc >> 8, recharge_soc, chip->delta_soc);
+ fg_dbg(chip, FG_STATUS, "msoc_raw = %d bsoc: %d recharge_soc: %d delta_soc: %d\n",
+ msoc_raw, bsoc >> 8, recharge_soc, chip->delta_soc);
} else {
goto out;
}
@@ -1913,6 +1965,33 @@ static int fg_adjust_recharge_soc(struct fg_chip *chip)
return 0;
}
+static int fg_adjust_recharge_voltage(struct fg_chip *chip)
+{
+ int rc, recharge_volt_mv;
+
+ if (chip->dt.auto_recharge_soc)
+ return 0;
+
+ fg_dbg(chip, FG_STATUS, "health: %d chg_status: %d chg_done: %d\n",
+ chip->health, chip->charge_status, chip->charge_done);
+
+ recharge_volt_mv = chip->dt.recharge_volt_thr_mv;
+
+ /* Lower the recharge voltage in soft JEITA */
+ if (chip->health == POWER_SUPPLY_HEALTH_WARM ||
+ chip->health == POWER_SUPPLY_HEALTH_COOL)
+ recharge_volt_mv -= 200;
+
+ rc = fg_set_recharge_voltage(chip, recharge_volt_mv);
+ if (rc < 0) {
+ pr_err("Error in setting recharge_voltage, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ return 0;
+}
+
static int fg_slope_limit_config(struct fg_chip *chip, int batt_temp)
{
enum slope_limit_status status;
@@ -2130,102 +2209,67 @@ static int fg_esr_timer_config(struct fg_chip *chip, bool sleep)
return 0;
}
-static void fg_batt_avg_update(struct fg_chip *chip)
+static void fg_ttf_update(struct fg_chip *chip)
{
- if (chip->charge_status == chip->prev_charge_status)
- return;
-
- cancel_delayed_work_sync(&chip->batt_avg_work);
- fg_circ_buf_clr(&chip->ibatt_circ_buf);
- fg_circ_buf_clr(&chip->vbatt_circ_buf);
-
- if (chip->charge_status == POWER_SUPPLY_STATUS_CHARGING ||
- chip->charge_status == POWER_SUPPLY_STATUS_DISCHARGING)
- schedule_delayed_work(&chip->batt_avg_work,
- msecs_to_jiffies(2000));
-}
-
-static void status_change_work(struct work_struct *work)
-{
- struct fg_chip *chip = container_of(work,
- struct fg_chip, status_change_work);
+ int rc;
+ int delay_ms;
union power_supply_propval prop = {0, };
- int rc, batt_temp;
+ int online = 0;
- if (!batt_psy_initialized(chip)) {
- fg_dbg(chip, FG_STATUS, "Charger not available?!\n");
- goto out;
- }
+ if (usb_psy_initialized(chip)) {
+ rc = power_supply_get_property(chip->usb_psy,
+ POWER_SUPPLY_PROP_ONLINE, &prop);
+ if (rc < 0) {
+ pr_err("Couldn't read usb ONLINE prop rc=%d\n", rc);
+ return;
+ }
- rc = power_supply_get_property(chip->batt_psy, POWER_SUPPLY_PROP_STATUS,
- &prop);
- if (rc < 0) {
- pr_err("Error in getting charging status, rc=%d\n", rc);
- goto out;
+ online = online || prop.intval;
}
- chip->prev_charge_status = chip->charge_status;
- chip->charge_status = prop.intval;
- rc = power_supply_get_property(chip->batt_psy,
- POWER_SUPPLY_PROP_CHARGE_TYPE, &prop);
- if (rc < 0) {
- pr_err("Error in getting charge type, rc=%d\n", rc);
- goto out;
- }
+ if (pc_port_psy_initialized(chip)) {
+ rc = power_supply_get_property(chip->pc_port_psy,
+ POWER_SUPPLY_PROP_ONLINE, &prop);
+ if (rc < 0) {
+ pr_err("Couldn't read pc_port ONLINE prop rc=%d\n", rc);
+ return;
+ }
- chip->charge_type = prop.intval;
- rc = power_supply_get_property(chip->batt_psy,
- POWER_SUPPLY_PROP_CHARGE_DONE, &prop);
- if (rc < 0) {
- pr_err("Error in getting charge_done, rc=%d\n", rc);
- goto out;
+ online = online || prop.intval;
}
- chip->charge_done = prop.intval;
- if (chip->cyc_ctr.en)
- schedule_work(&chip->cycle_count_work);
-
- fg_cap_learning_update(chip);
-
- rc = fg_charge_full_update(chip);
- if (rc < 0)
- pr_err("Error in charge_full_update, rc=%d\n", rc);
-
- rc = fg_adjust_recharge_soc(chip);
- if (rc < 0)
- pr_err("Error in adjusting recharge_soc, rc=%d\n", rc);
-
- rc = fg_adjust_ki_coeff_dischg(chip);
- if (rc < 0)
- pr_err("Error in adjusting ki_coeff_dischg, rc=%d\n", rc);
-
- rc = fg_esr_fcc_config(chip);
- if (rc < 0)
- pr_err("Error in adjusting FCC for ESR, rc=%d\n", rc);
-
- rc = fg_esr_timer_config(chip, false);
- if (rc < 0)
- pr_err("Error in configuring ESR timer, rc=%d\n", rc);
-
- rc = fg_get_battery_temp(chip, &batt_temp);
- if (!rc) {
- rc = fg_slope_limit_config(chip, batt_temp);
- if (rc < 0)
- pr_err("Error in configuring slope limiter rc:%d\n",
- rc);
+ if (dc_psy_initialized(chip)) {
+ rc = power_supply_get_property(chip->dc_psy,
+ POWER_SUPPLY_PROP_ONLINE, &prop);
+ if (rc < 0) {
+ pr_err("Couldn't read dc ONLINE prop rc=%d\n", rc);
+ return;
+ }
- rc = fg_adjust_ki_coeff_full_soc(chip, batt_temp);
- if (rc < 0)
- pr_err("Error in configuring ki_coeff_full_soc rc:%d\n",
- rc);
+ online = online || prop.intval;
}
- fg_batt_avg_update(chip);
-out:
- fg_dbg(chip, FG_POWER_SUPPLY, "charge_status:%d charge_type:%d charge_done:%d\n",
- chip->charge_status, chip->charge_type, chip->charge_done);
- pm_relax(chip->dev);
+ if (chip->online_status == online)
+ return;
+
+ chip->online_status = online;
+ if (online)
+ /* wait 35 seconds for the input to settle */
+ delay_ms = 35000;
+ else
+ /* wait 5 seconds for current to settle during discharge */
+ delay_ms = 5000;
+
+ vote(chip->awake_votable, TTF_PRIMING, true, 0);
+ cancel_delayed_work_sync(&chip->ttf_work);
+ mutex_lock(&chip->ttf.lock);
+ fg_circ_buf_clr(&chip->ttf.ibatt);
+ fg_circ_buf_clr(&chip->ttf.vbatt);
+ chip->ttf.last_ttf = 0;
+ chip->ttf.last_ms = 0;
+ mutex_unlock(&chip->ttf.lock);
+ schedule_delayed_work(&chip->ttf_work, msecs_to_jiffies(delay_ms));
}
static void restore_cycle_counter(struct fg_chip *chip)
@@ -2233,6 +2277,9 @@ static void restore_cycle_counter(struct fg_chip *chip)
int rc = 0, i;
u8 data[2];
+ if (!chip->cyc_ctr.en)
+ return;
+
mutex_lock(&chip->cyc_ctr.lock);
for (i = 0; i < BUCKET_COUNT; i++) {
rc = fg_sram_read(chip, CYCLE_COUNT_WORD + (i / 2),
@@ -2286,20 +2333,25 @@ static int fg_inc_store_cycle_ctr(struct fg_chip *chip, int bucket)
rc = fg_sram_write(chip, CYCLE_COUNT_WORD + (bucket / 2),
CYCLE_COUNT_OFFSET + (bucket % 2) * 2, data, 2,
FG_IMA_DEFAULT);
- if (rc < 0)
+ if (rc < 0) {
pr_err("failed to write BATT_CYCLE[%d] rc=%d\n",
bucket, rc);
- else
- chip->cyc_ctr.count[bucket] = cyc_count;
+ return rc;
+ }
+
+ chip->cyc_ctr.count[bucket] = cyc_count;
+ fg_dbg(chip, FG_STATUS, "Stored count %d in bucket %d\n", cyc_count,
+ bucket);
+
return rc;
}
-static void cycle_count_work(struct work_struct *work)
+static void fg_cycle_counter_update(struct fg_chip *chip)
{
int rc = 0, bucket, i, batt_soc;
- struct fg_chip *chip = container_of(work,
- struct fg_chip,
- cycle_count_work);
+
+ if (!chip->cyc_ctr.en)
+ return;
mutex_lock(&chip->cyc_ctr.lock);
rc = fg_get_sram_prop(chip, FG_SRAM_BATT_SOC, &batt_soc);
@@ -2311,45 +2363,30 @@ static void cycle_count_work(struct work_struct *work)
/* We need only the most significant byte here */
batt_soc = (u32)batt_soc >> 24;
- if (chip->charge_status == POWER_SUPPLY_STATUS_CHARGING) {
- /* Find out which bucket the SOC falls in */
- bucket = batt_soc / BUCKET_SOC_PCT;
- pr_debug("batt_soc: %d bucket: %d\n", batt_soc, bucket);
+ /* Find out which bucket the SOC falls in */
+ bucket = batt_soc / BUCKET_SOC_PCT;
- /*
- * If we've started counting for the previous bucket,
- * then store the counter for that bucket if the
- * counter for current bucket is getting started.
- */
- if (bucket > 0 && chip->cyc_ctr.started[bucket - 1] &&
- !chip->cyc_ctr.started[bucket]) {
- rc = fg_inc_store_cycle_ctr(chip, bucket - 1);
- if (rc < 0) {
- pr_err("Error in storing cycle_ctr rc: %d\n",
- rc);
- goto out;
- } else {
- chip->cyc_ctr.started[bucket - 1] = false;
- chip->cyc_ctr.last_soc[bucket - 1] = 0;
- }
- }
+ if (chip->charge_status == POWER_SUPPLY_STATUS_CHARGING) {
if (!chip->cyc_ctr.started[bucket]) {
chip->cyc_ctr.started[bucket] = true;
chip->cyc_ctr.last_soc[bucket] = batt_soc;
}
- } else {
+ } else if (chip->charge_done || !is_input_present(chip)) {
for (i = 0; i < BUCKET_COUNT; i++) {
if (chip->cyc_ctr.started[i] &&
- batt_soc > chip->cyc_ctr.last_soc[i]) {
+ batt_soc > chip->cyc_ctr.last_soc[i] + 2) {
rc = fg_inc_store_cycle_ctr(chip, i);
if (rc < 0)
pr_err("Error in storing cycle_ctr rc: %d\n",
rc);
chip->cyc_ctr.last_soc[i] = 0;
+ chip->cyc_ctr.started[i] = false;
}
- chip->cyc_ctr.started[i] = false;
}
}
+
+ fg_dbg(chip, FG_STATUS, "batt_soc: %d bucket: %d chg_status: %d\n",
+ batt_soc, bucket, chip->charge_status);
out:
mutex_unlock(&chip->cyc_ctr.lock);
}
@@ -2370,6 +2407,87 @@ static int fg_get_cycle_count(struct fg_chip *chip)
return count;
}
+static void status_change_work(struct work_struct *work)
+{
+ struct fg_chip *chip = container_of(work,
+ struct fg_chip, status_change_work);
+ union power_supply_propval prop = {0, };
+ int rc, batt_temp;
+
+ if (!batt_psy_initialized(chip)) {
+ fg_dbg(chip, FG_STATUS, "Charger not available?!\n");
+ goto out;
+ }
+
+ rc = power_supply_get_property(chip->batt_psy, POWER_SUPPLY_PROP_STATUS,
+ &prop);
+ if (rc < 0) {
+ pr_err("Error in getting charging status, rc=%d\n", rc);
+ goto out;
+ }
+
+ chip->prev_charge_status = chip->charge_status;
+ chip->charge_status = prop.intval;
+ rc = power_supply_get_property(chip->batt_psy,
+ POWER_SUPPLY_PROP_CHARGE_TYPE, &prop);
+ if (rc < 0) {
+ pr_err("Error in getting charge type, rc=%d\n", rc);
+ goto out;
+ }
+
+ chip->charge_type = prop.intval;
+ rc = power_supply_get_property(chip->batt_psy,
+ POWER_SUPPLY_PROP_CHARGE_DONE, &prop);
+ if (rc < 0) {
+ pr_err("Error in getting charge_done, rc=%d\n", rc);
+ goto out;
+ }
+
+ chip->charge_done = prop.intval;
+ fg_cycle_counter_update(chip);
+ fg_cap_learning_update(chip);
+
+ rc = fg_charge_full_update(chip);
+ if (rc < 0)
+ pr_err("Error in charge_full_update, rc=%d\n", rc);
+
+ rc = fg_adjust_recharge_soc(chip);
+ if (rc < 0)
+ pr_err("Error in adjusting recharge_soc, rc=%d\n", rc);
+
+ rc = fg_adjust_recharge_voltage(chip);
+ if (rc < 0)
+ pr_err("Error in adjusting recharge_voltage, rc=%d\n", rc);
+
+ rc = fg_adjust_ki_coeff_dischg(chip);
+ if (rc < 0)
+ pr_err("Error in adjusting ki_coeff_dischg, rc=%d\n", rc);
+
+ rc = fg_esr_fcc_config(chip);
+ if (rc < 0)
+ pr_err("Error in adjusting FCC for ESR, rc=%d\n", rc);
+
+ rc = fg_get_battery_temp(chip, &batt_temp);
+ if (!rc) {
+ rc = fg_slope_limit_config(chip, batt_temp);
+ if (rc < 0)
+ pr_err("Error in configuring slope limiter rc:%d\n",
+ rc);
+
+ rc = fg_adjust_ki_coeff_full_soc(chip, batt_temp);
+ if (rc < 0)
+ pr_err("Error in configuring ki_coeff_full_soc rc:%d\n",
+ rc);
+ }
+
+ fg_ttf_update(chip);
+
+out:
+ fg_dbg(chip, FG_POWER_SUPPLY, "charge_status:%d charge_type:%d charge_done:%d\n",
+ chip->charge_status, chip->charge_type, chip->charge_done);
+ pm_relax(chip->dev);
+}
+
static int fg_bp_params_config(struct fg_chip *chip)
{
int rc = 0;
@@ -2730,45 +2848,19 @@ static struct kernel_param_ops fg_restart_ops = {
module_param_cb(restart, &fg_restart_ops, &fg_restart, 0644);
-#define BATT_AVG_POLL_PERIOD_MS 10000
-static void batt_avg_work(struct work_struct *work)
-{
- struct fg_chip *chip = container_of(work, struct fg_chip,
- batt_avg_work.work);
- int rc, ibatt_now, vbatt_now;
-
- mutex_lock(&chip->batt_avg_lock);
- rc = fg_get_battery_current(chip, &ibatt_now);
- if (rc < 0) {
- pr_err("failed to get battery current, rc=%d\n", rc);
- goto reschedule;
- }
-
- rc = fg_get_battery_voltage(chip, &vbatt_now);
- if (rc < 0) {
- pr_err("failed to get battery voltage, rc=%d\n", rc);
- goto reschedule;
- }
-
- fg_circ_buf_add(&chip->ibatt_circ_buf, ibatt_now);
- fg_circ_buf_add(&chip->vbatt_circ_buf, vbatt_now);
-
-reschedule:
- mutex_unlock(&chip->batt_avg_lock);
- schedule_delayed_work(&chip->batt_avg_work,
- msecs_to_jiffies(BATT_AVG_POLL_PERIOD_MS));
-}
-
#define HOURS_TO_SECONDS 3600
#define OCV_SLOPE_UV 10869
#define MILLI_UNIT 1000
#define MICRO_UNIT 1000000
-static int fg_get_time_to_full(struct fg_chip *chip, int *val)
+#define NANO_UNIT 1000000000
+static int fg_get_time_to_full_locked(struct fg_chip *chip, int *val)
{
- int rc, ibatt_avg, vbatt_avg, rbatt, msoc, ocv_cc2cv, full_soc,
- act_cap_uah;
- s32 i_cc2cv, soc_cc2cv, ln_val, centi_tau_scale;
- s64 t_predicted_cc = 0, t_predicted_cv = 0;
+ int rc, ibatt_avg, vbatt_avg, rbatt, msoc, full_soc, act_cap_mah,
+ i_cc2cv, soc_cc2cv, tau, divisor, iterm, ttf_mode,
+ i, soc_per_step, msoc_this_step, msoc_next_step,
+ ibatt_this_step, t_predicted_this_step, ttf_slope,
+ t_predicted_cv, t_predicted = 0;
+ s64 delta_ms;
if (chip->bp.float_volt_uv <= 0) {
pr_err("battery profile is not loaded\n");
@@ -2787,48 +2879,53 @@ static int fg_get_time_to_full(struct fg_chip *chip, int *val)
}
fg_dbg(chip, FG_TTF, "msoc=%d\n", msoc);
+ /* the battery is considered full if the SOC is 100% */
if (msoc >= 100) {
*val = 0;
return 0;
}
- mutex_lock(&chip->batt_avg_lock);
- rc = fg_circ_buf_avg(&chip->ibatt_circ_buf, &ibatt_avg);
- if (rc < 0) {
- /* try to get instantaneous current */
- rc = fg_get_battery_current(chip, &ibatt_avg);
- if (rc < 0) {
- mutex_unlock(&chip->batt_avg_lock);
- pr_err("failed to get battery current, rc=%d\n", rc);
- return rc;
- }
+ if (is_qnovo_en(chip))
+ ttf_mode = TTF_MODE_QNOVO;
+ else
+ ttf_mode = TTF_MODE_NORMAL;
+
+ /* when switching TTF algorithms the TTF needs to be reset */
+ if (chip->ttf.mode != ttf_mode) {
+ fg_circ_buf_clr(&chip->ttf.ibatt);
+ fg_circ_buf_clr(&chip->ttf.vbatt);
+ chip->ttf.last_ttf = 0;
+ chip->ttf.last_ms = 0;
+ chip->ttf.mode = ttf_mode;
}
- rc = fg_circ_buf_avg(&chip->vbatt_circ_buf, &vbatt_avg);
+ /* at least 10 samples are required to produce a stable IBATT */
+ if (chip->ttf.ibatt.size < 10) {
+ *val = -1;
+ return 0;
+ }
+
+ rc = fg_circ_buf_median(&chip->ttf.ibatt, &ibatt_avg);
if (rc < 0) {
- /* try to get instantaneous voltage */
- rc = fg_get_battery_voltage(chip, &vbatt_avg);
- if (rc < 0) {
- mutex_unlock(&chip->batt_avg_lock);
- pr_err("failed to get battery voltage, rc=%d\n", rc);
- return rc;
- }
+ pr_err("failed to get IBATT AVG rc=%d\n", rc);
+ return rc;
}
- mutex_unlock(&chip->batt_avg_lock);
- fg_dbg(chip, FG_TTF, "vbatt_avg=%d\n", vbatt_avg);
+ rc = fg_circ_buf_median(&chip->ttf.vbatt, &vbatt_avg);
+ if (rc < 0) {
+ pr_err("failed to get VBATT AVG rc=%d\n", rc);
+ return rc;
+ }
- /* clamp ibatt_avg to -150mA */
- if (ibatt_avg > -150000)
- ibatt_avg = -150000;
- fg_dbg(chip, FG_TTF, "ibatt_avg=%d\n", ibatt_avg);
+ ibatt_avg = -ibatt_avg / MILLI_UNIT;
+ vbatt_avg /= MILLI_UNIT;
- /* reverse polarity to be consistent with unsigned current settings */
- ibatt_avg = abs(ibatt_avg);
+ /* clamp ibatt_avg to iterm */
+ if (ibatt_avg < abs(chip->dt.sys_term_curr_ma))
+ ibatt_avg = abs(chip->dt.sys_term_curr_ma);
- /* estimated battery current at the CC to CV transition */
- i_cc2cv = div_s64((s64)ibatt_avg * vbatt_avg, chip->bp.float_volt_uv);
- fg_dbg(chip, FG_TTF, "i_cc2cv=%d\n", i_cc2cv);
+ fg_dbg(chip, FG_TTF, "ibatt_avg=%d\n", ibatt_avg);
+ fg_dbg(chip, FG_TTF, "vbatt_avg=%d\n", vbatt_avg);
rc = fg_get_battery_resistance(chip, &rbatt);
if (rc < 0) {
@@ -2836,19 +2933,14 @@ static int fg_get_time_to_full(struct fg_chip *chip, int *val)
return rc;
}
- /* clamp rbatt to 50mOhms */
- if (rbatt < 50000)
- rbatt = 50000;
-
+ rbatt /= MILLI_UNIT;
fg_dbg(chip, FG_TTF, "rbatt=%d\n", rbatt);
- rc = fg_get_sram_prop(chip, FG_SRAM_ACT_BATT_CAP, &act_cap_uah);
+ rc = fg_get_sram_prop(chip, FG_SRAM_ACT_BATT_CAP, &act_cap_mah);
if (rc < 0) {
pr_err("failed to get ACT_BATT_CAP rc=%d\n", rc);
return rc;
}
- act_cap_uah *= MILLI_UNIT;
- fg_dbg(chip, FG_TTF, "actual_capacity_uah=%d\n", act_cap_uah);
rc = fg_get_sram_prop(chip, FG_SRAM_FULL_SOC, &full_soc);
if (rc < 0) {
@@ -2857,69 +2949,148 @@ static int fg_get_time_to_full(struct fg_chip *chip, int *val)
}
full_soc = DIV_ROUND_CLOSEST(((u16)full_soc >> 8) * FULL_CAPACITY,
FULL_SOC_RAW);
- fg_dbg(chip, FG_TTF, "full_soc=%d\n", full_soc);
+ act_cap_mah = full_soc * act_cap_mah / 100;
+ fg_dbg(chip, FG_TTF, "act_cap_mah=%d\n", act_cap_mah);
+
+ /* estimated battery current at the CC to CV transition */
+ switch (chip->ttf.mode) {
+ case TTF_MODE_NORMAL:
+ i_cc2cv = ibatt_avg * vbatt_avg /
+ max(MILLI_UNIT, chip->bp.float_volt_uv / MILLI_UNIT);
+ break;
+ case TTF_MODE_QNOVO:
+ i_cc2cv = min(
+ chip->ttf.cc_step.arr[MAX_CC_STEPS - 1] / MILLI_UNIT,
+ ibatt_avg * vbatt_avg /
+ max(MILLI_UNIT, chip->bp.float_volt_uv / MILLI_UNIT));
+ break;
+ default:
+ pr_err("TTF mode %d is not supported\n", chip->ttf.mode);
+ break;
+ }
+ fg_dbg(chip, FG_TTF, "i_cc2cv=%d\n", i_cc2cv);
/* if we are already in CV state then we can skip estimating CC */
if (chip->charge_type == POWER_SUPPLY_CHARGE_TYPE_TAPER)
- goto skip_cc_estimate;
+ goto cv_estimate;
- /* if the charger is current limited then use power approximation */
- if (ibatt_avg > chip->bp.fastchg_curr_ma * MILLI_UNIT - 50000)
- ocv_cc2cv = div_s64((s64)rbatt * ibatt_avg, MICRO_UNIT);
- else
- ocv_cc2cv = div_s64((s64)rbatt * i_cc2cv, MICRO_UNIT);
- ocv_cc2cv = chip->bp.float_volt_uv - ocv_cc2cv;
- fg_dbg(chip, FG_TTF, "ocv_cc2cv=%d\n", ocv_cc2cv);
-
- soc_cc2cv = div_s64(chip->bp.float_volt_uv - ocv_cc2cv, OCV_SLOPE_UV);
/* estimated SOC at the CC to CV transition */
+ soc_cc2cv = DIV_ROUND_CLOSEST(rbatt * i_cc2cv, OCV_SLOPE_UV);
soc_cc2cv = 100 - soc_cc2cv;
fg_dbg(chip, FG_TTF, "soc_cc2cv=%d\n", soc_cc2cv);
- /* the esimated SOC may be lower than the current SOC */
- if (soc_cc2cv - msoc <= 0)
- goto skip_cc_estimate;
+ switch (chip->ttf.mode) {
+ case TTF_MODE_NORMAL:
+ if (soc_cc2cv - msoc <= 0)
+ goto cv_estimate;
+
+ divisor = max(100, (ibatt_avg + i_cc2cv) / 2 * 100);
+ t_predicted = div_s64((s64)act_cap_mah * (soc_cc2cv - msoc) *
+ HOURS_TO_SECONDS, divisor);
+ break;
+ case TTF_MODE_QNOVO:
+ soc_per_step = 100 / MAX_CC_STEPS;
+ for (i = msoc / soc_per_step; i < MAX_CC_STEPS - 1; ++i) {
+ msoc_next_step = (i + 1) * soc_per_step;
+ if (i == msoc / soc_per_step)
+ msoc_this_step = msoc;
+ else
+ msoc_this_step = i * soc_per_step;
+
+ /* scale ibatt by 85% to account for discharge pulses */
+ ibatt_this_step = min(
+ chip->ttf.cc_step.arr[i] / MILLI_UNIT,
+ ibatt_avg) * 85 / 100;
+ divisor = max(100, ibatt_this_step * 100);
+ t_predicted_this_step = div_s64((s64)act_cap_mah *
+ (msoc_next_step - msoc_this_step) *
+ HOURS_TO_SECONDS, divisor);
+ t_predicted += t_predicted_this_step;
+ fg_dbg(chip, FG_TTF, "[%d, %d] ma=%d t=%d\n",
+ msoc_this_step, msoc_next_step,
+ ibatt_this_step, t_predicted_this_step);
+ }
+ break;
+ default:
+ pr_err("TTF mode %d is not supported\n", chip->ttf.mode);
+ break;
+ }
- t_predicted_cc = div_s64((s64)full_soc * act_cap_uah, 100);
- t_predicted_cc = div_s64(t_predicted_cc * (soc_cc2cv - msoc), 100);
- t_predicted_cc *= HOURS_TO_SECONDS;
- t_predicted_cc = div_s64(t_predicted_cc, (ibatt_avg + i_cc2cv) / 2);
+cv_estimate:
+ fg_dbg(chip, FG_TTF, "t_predicted_cc=%d\n", t_predicted);
-skip_cc_estimate:
- fg_dbg(chip, FG_TTF, "t_predicted_cc=%lld\n", t_predicted_cc);
+ iterm = max(100, abs(chip->dt.sys_term_curr_ma) + 200);
+ fg_dbg(chip, FG_TTF, "iterm=%d\n", iterm);
- /* CV estimate starts here */
- if (chip->charge_type >= POWER_SUPPLY_CHARGE_TYPE_TAPER)
- ln_val = ibatt_avg / (abs(chip->dt.sys_term_curr_ma) + 200);
+ if (chip->charge_type == POWER_SUPPLY_CHARGE_TYPE_TAPER)
+ tau = max(MILLI_UNIT, ibatt_avg * MILLI_UNIT / iterm);
else
- ln_val = i_cc2cv / (abs(chip->dt.sys_term_curr_ma) + 200);
+ tau = max(MILLI_UNIT, i_cc2cv * MILLI_UNIT / iterm);
- if (msoc < 95)
- centi_tau_scale = 100;
- else
- centi_tau_scale = 20 * (100 - msoc);
-
- fg_dbg(chip, FG_TTF, "ln_in=%d\n", ln_val);
- rc = fg_lerp(fg_ln_table, ARRAY_SIZE(fg_ln_table), ln_val, &ln_val);
- fg_dbg(chip, FG_TTF, "ln_out=%d\n", ln_val);
- t_predicted_cv = div_s64((s64)act_cap_uah * rbatt, MICRO_UNIT);
- t_predicted_cv = div_s64(t_predicted_cv * centi_tau_scale, 100);
- t_predicted_cv = div_s64(t_predicted_cv * ln_val, MILLI_UNIT);
- t_predicted_cv = div_s64(t_predicted_cv * HOURS_TO_SECONDS, MICRO_UNIT);
- fg_dbg(chip, FG_TTF, "t_predicted_cv=%lld\n", t_predicted_cv);
- *val = t_predicted_cc + t_predicted_cv;
+ rc = fg_lerp(fg_ln_table, ARRAY_SIZE(fg_ln_table), tau, &tau);
+ if (rc < 0) {
+ pr_err("failed to interpolate tau rc=%d\n", rc);
+ return rc;
+ }
+
+ /* tau is scaled linearly from 95% to 100% SOC */
+ if (msoc >= 95)
+ tau = tau * 2 * (100 - msoc) / 10;
+
+ fg_dbg(chip, FG_TTF, "tau=%d\n", tau);
+ t_predicted_cv = div_s64((s64)act_cap_mah * rbatt * tau *
+ HOURS_TO_SECONDS, NANO_UNIT);
+ fg_dbg(chip, FG_TTF, "t_predicted_cv=%d\n", t_predicted_cv);
+ t_predicted += t_predicted_cv;
+
+ fg_dbg(chip, FG_TTF, "t_predicted_prefilter=%d\n", t_predicted);
+ if (chip->ttf.last_ms != 0) {
+ delta_ms = ktime_ms_delta(ktime_get_boottime(),
+ ms_to_ktime(chip->ttf.last_ms));
+ if (delta_ms > 10000) {
+ ttf_slope = div64_s64(
+ (s64)(t_predicted - chip->ttf.last_ttf) *
+ MICRO_UNIT, delta_ms);
+ if (ttf_slope > -100)
+ ttf_slope = -100;
+ else if (ttf_slope < -2000)
+ ttf_slope = -2000;
+
+ t_predicted = div_s64(
+ (s64)ttf_slope * delta_ms, MICRO_UNIT) +
+ chip->ttf.last_ttf;
+ fg_dbg(chip, FG_TTF, "ttf_slope=%d\n", ttf_slope);
+ } else {
+ t_predicted = chip->ttf.last_ttf;
+ }
+ }
+
+ /* clamp the ttf to 0 */
+ if (t_predicted < 0)
+ t_predicted = 0;
+
+ fg_dbg(chip, FG_TTF, "t_predicted_postfilter=%d\n", t_predicted);
+ *val = t_predicted;
return 0;
}
+static int fg_get_time_to_full(struct fg_chip *chip, int *val)
+{
+ int rc;
+
+ mutex_lock(&chip->ttf.lock);
+ rc = fg_get_time_to_full_locked(chip, val);
+ mutex_unlock(&chip->ttf.lock);
+ return rc;
+}
+
#define CENTI_ICORRECT_C0 105
#define CENTI_ICORRECT_C1 20
static int fg_get_time_to_empty(struct fg_chip *chip, int *val)
{
- int rc, ibatt_avg, msoc, act_cap_uah;
- s32 divisor;
- s64 t_predicted;
+ int rc, ibatt_avg, msoc, full_soc, act_cap_mah, divisor;
- rc = fg_circ_buf_avg(&chip->ibatt_circ_buf, &ibatt_avg);
+ rc = fg_circ_buf_median(&chip->ttf.ibatt, &ibatt_avg);
if (rc < 0) {
/* try to get instantaneous current */
rc = fg_get_battery_current(chip, &ibatt_avg);
@@ -2929,31 +3100,36 @@ static int fg_get_time_to_empty(struct fg_chip *chip, int *val)
}
}
- /* clamp ibatt_avg to 150mA */
- if (ibatt_avg < 150000)
- ibatt_avg = 150000;
+ ibatt_avg /= MILLI_UNIT;
+ /* clamp ibatt_avg to 100mA */
+ if (ibatt_avg < 100)
+ ibatt_avg = 100;
- rc = fg_get_sram_prop(chip, FG_SRAM_ACT_BATT_CAP, &act_cap_uah);
+ rc = fg_get_prop_capacity(chip, &msoc);
+ if (rc < 0) {
+ pr_err("Error in getting capacity, rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = fg_get_sram_prop(chip, FG_SRAM_ACT_BATT_CAP, &act_cap_mah);
if (rc < 0) {
pr_err("Error in getting ACT_BATT_CAP, rc=%d\n", rc);
return rc;
}
- act_cap_uah *= MILLI_UNIT;
- rc = fg_get_prop_capacity(chip, &msoc);
+ rc = fg_get_sram_prop(chip, FG_SRAM_FULL_SOC, &full_soc);
if (rc < 0) {
- pr_err("Error in getting capacity, rc=%d\n", rc);
+ pr_err("failed to get full soc rc=%d\n", rc);
return rc;
}
+ full_soc = DIV_ROUND_CLOSEST(((u16)full_soc >> 8) * FULL_CAPACITY,
+ FULL_SOC_RAW);
+ act_cap_mah = full_soc * act_cap_mah / 100;
- t_predicted = div_s64((s64)msoc * act_cap_uah, 100);
- t_predicted *= HOURS_TO_SECONDS;
divisor = CENTI_ICORRECT_C0 * 100 + CENTI_ICORRECT_C1 * msoc;
- divisor = div_s64((s64)divisor * ibatt_avg, 10000);
- if (divisor > 0)
- t_predicted = div_s64(t_predicted, divisor);
-
- *val = t_predicted;
+ divisor = ibatt_avg * divisor / 100;
+ divisor = max(100, divisor);
+ *val = act_cap_mah * msoc * HOURS_TO_SECONDS / divisor;
return 0;
}
@@ -3032,6 +3208,150 @@ static int fg_esr_validate(struct fg_chip *chip)
return 0;
}
+static int fg_force_esr_meas(struct fg_chip *chip)
+{
+ int rc;
+ int esr_uohms;
+
+ /* force esr extraction enable */
+ rc = fg_sram_masked_write(chip, ESR_EXTRACTION_ENABLE_WORD,
+ ESR_EXTRACTION_ENABLE_OFFSET, BIT(0), BIT(0),
+ FG_IMA_DEFAULT);
+ if (rc < 0) {
+ pr_err("failed to enable esr extn rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = fg_masked_write(chip, BATT_INFO_QNOVO_CFG(chip),
+ LD_REG_CTRL_BIT, 0);
+ if (rc < 0) {
+ pr_err("Error in configuring qnovo_cfg rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = fg_masked_write(chip, BATT_INFO_TM_MISC1(chip),
+ ESR_REQ_CTL_BIT | ESR_REQ_CTL_EN_BIT,
+ ESR_REQ_CTL_BIT | ESR_REQ_CTL_EN_BIT);
+ if (rc < 0) {
+ pr_err("Error in configuring force ESR rc=%d\n", rc);
+ return rc;
+ }
+
+ /* wait 1.5 seconds for hw to measure ESR */
+ msleep(1500);
+ rc = fg_masked_write(chip, BATT_INFO_TM_MISC1(chip),
+ ESR_REQ_CTL_BIT | ESR_REQ_CTL_EN_BIT,
+ 0);
+ if (rc < 0) {
+ pr_err("Error in restoring force ESR rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = fg_masked_write(chip, BATT_INFO_QNOVO_CFG(chip),
+ LD_REG_CTRL_BIT, LD_REG_CTRL_BIT);
+ if (rc < 0) {
+ pr_err("Error in restoring qnovo_cfg rc=%d\n", rc);
+ return rc;
+ }
+
+ /* force esr extraction disable */
+ rc = fg_sram_masked_write(chip, ESR_EXTRACTION_ENABLE_WORD,
+ ESR_EXTRACTION_ENABLE_OFFSET, BIT(0), 0,
+ FG_IMA_DEFAULT);
+ if (rc < 0) {
+ pr_err("failed to disable esr extn rc=%d\n", rc);
+ return rc;
+ }
+
+ fg_get_battery_resistance(chip, &esr_uohms);
+ fg_dbg(chip, FG_STATUS, "ESR uohms = %d\n", esr_uohms);
+
+ return rc;
+}
+
+static int fg_prepare_for_qnovo(struct fg_chip *chip, int qnovo_enable)
+{
+ int rc;
+
+ /* force esr extraction disable when qnovo enables */
+ rc = fg_sram_masked_write(chip, ESR_EXTRACTION_ENABLE_WORD,
+ ESR_EXTRACTION_ENABLE_OFFSET,
+ BIT(0), qnovo_enable ? 0 : BIT(0),
+ FG_IMA_DEFAULT);
+ if (rc < 0)
+ pr_err("Error in configuring esr extraction rc=%d\n", rc);
+
+ rc = fg_masked_write(chip, BATT_INFO_QNOVO_CFG(chip),
+ LD_REG_CTRL_BIT,
+ qnovo_enable ? LD_REG_CTRL_BIT : 0);
+ if (rc < 0) {
+ pr_err("Error in configuring qnovo_cfg rc=%d\n", rc);
+ return rc;
+ }
+ fg_dbg(chip, FG_STATUS, "Prepared for Qnovo\n");
+ return 0;
+}
+
+static void ttf_work(struct work_struct *work)
+{
+ struct fg_chip *chip = container_of(work, struct fg_chip,
+ ttf_work.work);
+ int rc, ibatt_now, vbatt_now, ttf;
+ ktime_t ktime_now;
+
+ mutex_lock(&chip->ttf.lock);
+ if (chip->charge_status != POWER_SUPPLY_STATUS_CHARGING &&
+ chip->charge_status != POWER_SUPPLY_STATUS_DISCHARGING)
+ goto end_work;
+
+ rc = fg_get_battery_current(chip, &ibatt_now);
+ if (rc < 0) {
+ pr_err("failed to get battery current, rc=%d\n", rc);
+ goto end_work;
+ }
+
+ rc = fg_get_battery_voltage(chip, &vbatt_now);
+ if (rc < 0) {
+ pr_err("failed to get battery voltage, rc=%d\n", rc);
+ goto end_work;
+ }
+
+ fg_circ_buf_add(&chip->ttf.ibatt, ibatt_now);
+ fg_circ_buf_add(&chip->ttf.vbatt, vbatt_now);
+
+ if (chip->charge_status == POWER_SUPPLY_STATUS_CHARGING) {
+ rc = fg_get_time_to_full_locked(chip, &ttf);
+ if (rc < 0) {
+ pr_err("failed to get ttf, rc=%d\n", rc);
+ goto end_work;
+ }
+
+ /* keep the wake lock and prime the IBATT and VBATT buffers */
+ if (ttf < 0) {
+ /* delay for one FG cycle */
+ schedule_delayed_work(&chip->ttf_work,
+ msecs_to_jiffies(1500));
+ mutex_unlock(&chip->ttf.lock);
+ return;
+ }
+
+ /* update the TTF reference point every minute */
+ ktime_now = ktime_get_boottime();
+ if (ktime_ms_delta(ktime_now,
+ ms_to_ktime(chip->ttf.last_ms)) > 60000 ||
+ chip->ttf.last_ms == 0) {
+ chip->ttf.last_ttf = ttf;
+ chip->ttf.last_ms = ktime_to_ms(ktime_now);
+ }
+ }
+
+ /* recurse every 10 seconds */
+ schedule_delayed_work(&chip->ttf_work, msecs_to_jiffies(10000));
+end_work:
+ vote(chip->awake_votable, TTF_PRIMING, false, 0);
+ mutex_unlock(&chip->ttf.lock);
+}
+
/* PSY CALLBACKS STAY HERE */
static int fg_psy_get_property(struct power_supply *psy,
@@ -3108,6 +3428,20 @@ static int fg_psy_get_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
rc = fg_get_sram_prop(chip, FG_SRAM_VBATT_FULL, &pval->intval);
break;
+ case POWER_SUPPLY_PROP_CC_STEP:
+ if ((chip->ttf.cc_step.sel >= 0) &&
+ (chip->ttf.cc_step.sel < MAX_CC_STEPS)) {
+ pval->intval =
+ chip->ttf.cc_step.arr[chip->ttf.cc_step.sel];
+ } else {
+ pr_err("cc_step_sel is out of bounds [0, %d]\n",
+ chip->ttf.cc_step.sel);
+ return -EINVAL;
+ }
+ break;
+ case POWER_SUPPLY_PROP_CC_STEP_SEL:
+ pval->intval = chip->ttf.cc_step.sel;
+ break;
default:
pr_err("unsupported property %d\n", psp);
rc = -EINVAL;
@@ -3140,6 +3474,32 @@ static int fg_psy_set_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
rc = fg_set_constant_chg_voltage(chip, pval->intval);
break;
+ case POWER_SUPPLY_PROP_RESISTANCE:
+ rc = fg_force_esr_meas(chip);
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_QNOVO_ENABLE:
+ rc = fg_prepare_for_qnovo(chip, pval->intval);
+ break;
+ case POWER_SUPPLY_PROP_CC_STEP:
+ if ((chip->ttf.cc_step.sel >= 0) &&
+ (chip->ttf.cc_step.sel < MAX_CC_STEPS)) {
+ chip->ttf.cc_step.arr[chip->ttf.cc_step.sel] =
+ pval->intval;
+ } else {
+ pr_err("cc_step_sel is out of bounds [0, %d]\n",
+ chip->ttf.cc_step.sel);
+ return -EINVAL;
+ }
+ break;
+ case POWER_SUPPLY_PROP_CC_STEP_SEL:
+ if ((pval->intval >= 0) && (pval->intval < MAX_CC_STEPS)) {
+ chip->ttf.cc_step.sel = pval->intval;
+ } else {
+ pr_err("cc_step_sel is out of bounds [0, %d]\n",
+ pval->intval);
+ return -EINVAL;
+ }
+ break;
default:
break;
}
@@ -3153,6 +3513,8 @@ static int fg_property_is_writeable(struct power_supply *psy,
switch (psp) {
case POWER_SUPPLY_PROP_CYCLE_COUNT_ID:
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
+ case POWER_SUPPLY_PROP_CC_STEP:
+ case POWER_SUPPLY_PROP_CC_STEP_SEL:
return 1;
default:
break;
@@ -3213,6 +3575,8 @@ static enum power_supply_property fg_psy_props[] = {
POWER_SUPPLY_PROP_SOC_REPORTING_READY,
POWER_SUPPLY_PROP_DEBUG_BATTERY,
POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE,
+ POWER_SUPPLY_PROP_CC_STEP,
+ POWER_SUPPLY_PROP_CC_STEP_SEL,
};
static const struct power_supply_desc fg_psy_desc = {
@@ -3423,8 +3787,7 @@ static int fg_hw_init(struct fg_chip *chip)
return rc;
}
- if (chip->cyc_ctr.en)
- restore_cycle_counter(chip);
+ restore_cycle_counter(chip);
if (chip->dt.jeita_hyst_temp >= 0) {
val = chip->dt.jeita_hyst_temp << JEITA_TEMP_HYST_SHIFT;
@@ -3651,6 +4014,11 @@ static irqreturn_t fg_delta_batt_temp_irq_handler(int irq, void *data)
if (rc < 0)
pr_err("Error in adjusting timebase, rc=%d\n", rc);
+ rc = fg_adjust_recharge_voltage(chip);
+ if (rc < 0)
+ pr_err("Error in adjusting recharge_voltage, rc=%d\n",
+ rc);
+
chip->last_batt_temp = batt_temp;
power_supply_changed(chip->batt_psy);
}
@@ -3698,8 +4066,7 @@ static irqreturn_t fg_delta_msoc_irq_handler(int irq, void *data)
int rc;
fg_dbg(chip, FG_IRQ, "irq %d triggered\n", irq);
- if (chip->cyc_ctr.en)
- schedule_work(&chip->cycle_count_work);
+ fg_cycle_counter_update(chip);
if (chip->cl.active)
fg_cap_learning_update(chip);
@@ -4394,6 +4761,7 @@ static int fg_gen3_probe(struct platform_device *pdev)
chip->charge_status = -EINVAL;
chip->prev_charge_status = -EINVAL;
chip->ki_coeff_full_soc = -EINVAL;
+ chip->online_status = -EINVAL;
chip->regmap = dev_get_regmap(chip->dev->parent, NULL);
if (!chip->regmap) {
dev_err(chip->dev, "Parent regmap is unavailable\n");
@@ -4462,14 +4830,13 @@ static int fg_gen3_probe(struct platform_device *pdev)
mutex_init(&chip->sram_rw_lock);
mutex_init(&chip->cyc_ctr.lock);
mutex_init(&chip->cl.lock);
- mutex_init(&chip->batt_avg_lock);
+ mutex_init(&chip->ttf.lock);
mutex_init(&chip->charge_full_lock);
init_completion(&chip->soc_update);
init_completion(&chip->soc_ready);
INIT_DELAYED_WORK(&chip->profile_load_work, profile_load_work);
INIT_WORK(&chip->status_change_work, status_change_work);
- INIT_WORK(&chip->cycle_count_work, cycle_count_work);
- INIT_DELAYED_WORK(&chip->batt_avg_work, batt_avg_work);
+ INIT_DELAYED_WORK(&chip->ttf_work, ttf_work);
INIT_DELAYED_WORK(&chip->sram_dump_work, sram_dump_work);
rc = fg_memif_init(chip);
@@ -4566,7 +4933,7 @@ static int fg_gen3_suspend(struct device *dev)
if (rc < 0)
pr_err("Error in configuring ESR timer, rc=%d\n", rc);
- cancel_delayed_work_sync(&chip->batt_avg_work);
+ cancel_delayed_work_sync(&chip->ttf_work);
if (fg_sram_dump)
cancel_delayed_work_sync(&chip->sram_dump_work);
return 0;
@@ -4581,9 +4948,7 @@ static int fg_gen3_resume(struct device *dev)
if (rc < 0)
pr_err("Error in configuring ESR timer, rc=%d\n", rc);
- fg_circ_buf_clr(&chip->ibatt_circ_buf);
- fg_circ_buf_clr(&chip->vbatt_circ_buf);
- schedule_delayed_work(&chip->batt_avg_work, 0);
+ schedule_delayed_work(&chip->ttf_work, 0);
if (fg_sram_dump)
schedule_delayed_work(&chip->sram_dump_work,
msecs_to_jiffies(fg_sram_dump_period_ms));
diff --git a/drivers/power/supply/qcom/qpnp-qnovo.c b/drivers/power/supply/qcom/qpnp-qnovo.c
index eb97eb0ff2ac..b20807990efc 100644
--- a/drivers/power/supply/qcom/qpnp-qnovo.c
+++ b/drivers/power/supply/qcom/qpnp-qnovo.c
@@ -20,6 +20,7 @@
#include <linux/of_irq.h>
#include <linux/qpnp/qpnp-revid.h>
#include <linux/pmic-voter.h>
+#include <linux/delay.h>
#define QNOVO_REVISION1 0x00
#define QNOVO_REVISION2 0x01
@@ -114,6 +115,17 @@
#define OK_TO_QNOVO_VOTER "ok_to_qnovo_voter"
#define QNOVO_VOTER "qnovo_voter"
+#define FG_AVAILABLE_VOTER "FG_AVAILABLE_VOTER"
+#define QNOVO_OVERALL_VOTER "QNOVO_OVERALL_VOTER"
+#define QNI_PT_VOTER "QNI_PT_VOTER"
+#define ESR_VOTER "ESR_VOTER"
+
+#define HW_OK_TO_QNOVO_VOTER "HW_OK_TO_QNOVO_VOTER"
+#define CHG_READY_VOTER "CHG_READY_VOTER"
+#define USB_READY_VOTER "USB_READY_VOTER"
+#define DC_READY_VOTER "DC_READY_VOTER"
+
+#define PT_RESTART_VOTER "PT_RESTART_VOTER"
struct qnovo_dt_props {
bool external_rsense;
@@ -127,6 +139,10 @@ struct qnovo {
struct qnovo_dt_props dt;
struct device *dev;
struct votable *disable_votable;
+ struct votable *pt_dis_votable;
+ struct votable *not_ok_to_qnovo_votable;
+ struct votable *chg_ready_votable;
+ struct votable *awake_votable;
struct class qnovo_class;
struct pmic_revid_data *pmic_rev_id;
u32 wa_flags;
@@ -138,10 +154,18 @@ struct qnovo {
s64 v_gain_mega;
struct notifier_block nb;
struct power_supply *batt_psy;
+ struct power_supply *bms_psy;
+ struct power_supply *usb_psy;
+ struct power_supply *dc_psy;
struct work_struct status_change_work;
int fv_uV_request;
int fcc_uA_request;
- bool ok_to_qnovo;
+ int usb_present;
+ int dc_present;
+ struct delayed_work usb_debounce_work;
+ struct delayed_work dc_debounce_work;
+
+ struct delayed_work ptrain_restart_work;
};
static int debug_mask;
@@ -229,6 +253,39 @@ static bool is_batt_available(struct qnovo *chip)
return true;
}
+static bool is_fg_available(struct qnovo *chip)
+{
+ if (!chip->bms_psy)
+ chip->bms_psy = power_supply_get_by_name("bms");
+
+ if (!chip->bms_psy)
+ return false;
+
+ return true;
+}
+
+static bool is_usb_available(struct qnovo *chip)
+{
+ if (!chip->usb_psy)
+ chip->usb_psy = power_supply_get_by_name("usb");
+
+ if (!chip->usb_psy)
+ return false;
+
+ return true;
+}
+
+static bool is_dc_available(struct qnovo *chip)
+{
+ if (!chip->dc_psy)
+ chip->dc_psy = power_supply_get_by_name("dc");
+
+ if (!chip->dc_psy)
+ return false;
+
+ return true;
+}
+
static int qnovo_batt_psy_update(struct qnovo *chip, bool disable)
{
union power_supply_propval pval = {0};
@@ -281,10 +338,86 @@ static int qnovo_disable_cb(struct votable *votable, void *data, int disable,
return -EINVAL;
}
+ /*
+ * fg must be available for enable FG_AVAILABLE_VOTER
+ * won't enable it otherwise
+ */
+
+ if (is_fg_available(chip))
+ power_supply_set_property(chip->bms_psy,
+ POWER_SUPPLY_PROP_CHARGE_QNOVO_ENABLE,
+ &pval);
+
+ vote(chip->pt_dis_votable, QNOVO_OVERALL_VOTER, disable, 0);
rc = qnovo_batt_psy_update(chip, disable);
return rc;
}
+static int pt_dis_votable_cb(struct votable *votable, void *data, int disable,
+ const char *client)
+{
+ struct qnovo *chip = data;
+ int rc;
+
+ if (disable) {
+ cancel_delayed_work_sync(&chip->ptrain_restart_work);
+ vote(chip->awake_votable, PT_RESTART_VOTER, false, 0);
+ }
+
+ rc = qnovo_masked_write(chip, QNOVO_PTRAIN_EN, QNOVO_PTRAIN_EN_BIT,
+ (bool)disable ? 0 : QNOVO_PTRAIN_EN_BIT);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't %s pulse train rc=%d\n",
+ (bool)disable ? "disable" : "enable", rc);
+ return rc;
+ }
+
+ if (!disable) {
+ vote(chip->awake_votable, PT_RESTART_VOTER, true, 0);
+ schedule_delayed_work(&chip->ptrain_restart_work,
+ msecs_to_jiffies(20));
+ }
+
+ return 0;
+}
+
+static int not_ok_to_qnovo_cb(struct votable *votable, void *data,
+ int not_ok_to_qnovo,
+ const char *client)
+{
+ struct qnovo *chip = data;
+
+ vote(chip->disable_votable, OK_TO_QNOVO_VOTER, not_ok_to_qnovo, 0);
+ if (not_ok_to_qnovo)
+ vote(chip->disable_votable, USER_VOTER, true, 0);
+
+ kobject_uevent(&chip->dev->kobj, KOBJ_CHANGE);
+ return 0;
+}
+
+static int chg_ready_cb(struct votable *votable, void *data, int ready,
+ const char *client)
+{
+ struct qnovo *chip = data;
+
+ vote(chip->not_ok_to_qnovo_votable, CHG_READY_VOTER, !ready, 0);
+
+ return 0;
+}
+
+static int awake_cb(struct votable *votable, void *data, int awake,
+ const char *client)
+{
+ struct qnovo *chip = data;
+
+ if (awake)
+ pm_stay_awake(chip->dev);
+ else
+ pm_relax(chip->dev);
+
+ return 0;
+}
+
static int qnovo_parse_dt(struct qnovo *chip)
{
struct device_node *node = chip->dev->of_node;
@@ -626,8 +759,9 @@ static ssize_t ok_to_qnovo_show(struct class *c, struct class_attribute *attr,
char *buf)
{
struct qnovo *chip = container_of(c, struct qnovo, qnovo_class);
+ int val = get_effective_result(chip->not_ok_to_qnovo_votable);
- return snprintf(buf, PAGE_SIZE, "%d\n", chip->ok_to_qnovo);
+ return snprintf(buf, PAGE_SIZE, "%d\n", !val);
}
static ssize_t qnovo_enable_show(struct class *c, struct class_attribute *attr,
@@ -656,21 +790,10 @@ static ssize_t qnovo_enable_store(struct class *c, struct class_attribute *attr,
static ssize_t pt_enable_show(struct class *c, struct class_attribute *attr,
char *ubuf)
{
- int i = attr - qnovo_attributes;
struct qnovo *chip = container_of(c, struct qnovo, qnovo_class);
- u8 buf[2] = {0, 0};
- u16 regval;
- int rc;
-
- rc = qnovo_read(chip, params[i].start_addr, buf, params[i].num_regs);
- if (rc < 0) {
- pr_err("Couldn't read %s rc = %d\n", params[i].name, rc);
- return -EINVAL;
- }
- regval = buf[1] << 8 | buf[0];
+ int val = get_effective_result(chip->pt_dis_votable);
- return snprintf(ubuf, PAGE_SIZE, "%d\n",
- (int)(regval & QNOVO_PTRAIN_EN_BIT));
+ return snprintf(ubuf, PAGE_SIZE, "%d\n", !val);
}
static ssize_t pt_enable_store(struct class *c, struct class_attribute *attr,
@@ -678,21 +801,12 @@ static ssize_t pt_enable_store(struct class *c, struct class_attribute *attr,
{
struct qnovo *chip = container_of(c, struct qnovo, qnovo_class);
unsigned long val;
- int rc = 0;
-
- if (get_effective_result(chip->disable_votable))
- return -EINVAL;
if (kstrtoul(ubuf, 0, &val))
return -EINVAL;
- rc = qnovo_masked_write(chip, QNOVO_PTRAIN_EN, QNOVO_PTRAIN_EN_BIT,
- (bool)val ? QNOVO_PTRAIN_EN_BIT : 0);
- if (rc < 0) {
- dev_err(chip->dev, "Couldn't %s pulse train rc=%d\n",
- (bool)val ? "enable" : "disable", rc);
- return rc;
- }
+ /* val being 0, userspace wishes to disable pt so vote true */
+ vote(chip->pt_dis_votable, QNI_PT_VOTER, val ? false : true, 0);
return count;
}
@@ -1116,41 +1230,146 @@ static int qnovo_update_status(struct qnovo *chip)
{
u8 val = 0;
int rc;
- bool ok_to_qnovo;
- bool changed = false;
+ bool hw_ok_to_qnovo;
rc = qnovo_read(chip, QNOVO_ERROR_STS2, &val, 1);
if (rc < 0) {
pr_err("Couldn't read error sts rc = %d\n", rc);
- ok_to_qnovo = false;
+ hw_ok_to_qnovo = false;
} else {
/*
* For CV mode keep qnovo enabled, userspace is expected to
* disable it after few runs
*/
- ok_to_qnovo = (val == ERR_CV_MODE || val == 0) ? true : false;
+ hw_ok_to_qnovo = (val == ERR_CV_MODE || val == 0) ?
+ true : false;
}
- if (chip->ok_to_qnovo ^ ok_to_qnovo) {
+ vote(chip->not_ok_to_qnovo_votable, HW_OK_TO_QNOVO_VOTER,
+ !hw_ok_to_qnovo, 0);
+ return 0;
+}
- vote(chip->disable_votable, OK_TO_QNOVO_VOTER, !ok_to_qnovo, 0);
- if (!ok_to_qnovo)
- vote(chip->disable_votable, USER_VOTER, true, 0);
+static void usb_debounce_work(struct work_struct *work)
+{
+ struct qnovo *chip = container_of(work,
+ struct qnovo, usb_debounce_work.work);
- chip->ok_to_qnovo = ok_to_qnovo;
- changed = true;
- }
+ vote(chip->chg_ready_votable, USB_READY_VOTER, true, 0);
+ vote(chip->awake_votable, USB_READY_VOTER, false, 0);
+}
- return changed;
+static void dc_debounce_work(struct work_struct *work)
+{
+ struct qnovo *chip = container_of(work,
+ struct qnovo, dc_debounce_work.work);
+
+ vote(chip->chg_ready_votable, DC_READY_VOTER, true, 0);
+ vote(chip->awake_votable, DC_READY_VOTER, false, 0);
}
+#define DEBOUNCE_MS 15000 /* 15 seconds */
static void status_change_work(struct work_struct *work)
{
struct qnovo *chip = container_of(work,
struct qnovo, status_change_work);
+ union power_supply_propval pval;
+ bool usb_present = false, dc_present = false;
+ int rc;
+
+ if (is_fg_available(chip))
+ vote(chip->disable_votable, FG_AVAILABLE_VOTER, false, 0);
+
+ if (is_usb_available(chip)) {
+ rc = power_supply_get_property(chip->usb_psy,
+ POWER_SUPPLY_PROP_PRESENT, &pval);
+ usb_present = (rc < 0) ? 0 : pval.intval;
+ }
+
+ if (chip->usb_present && !usb_present) {
+ /* removal */
+ chip->usb_present = 0;
+ cancel_delayed_work_sync(&chip->usb_debounce_work);
+ vote(chip->awake_votable, USB_READY_VOTER, false, 0);
+ vote(chip->chg_ready_votable, USB_READY_VOTER, false, 0);
+ } else if (!chip->usb_present && usb_present) {
+ /* insertion */
+ chip->usb_present = 1;
+ vote(chip->awake_votable, USB_READY_VOTER, true, 0);
+ schedule_delayed_work(&chip->usb_debounce_work,
+ msecs_to_jiffies(DEBOUNCE_MS));
+ }
+
+ if (is_dc_available(chip)) {
+ rc = power_supply_get_property(chip->dc_psy,
+ POWER_SUPPLY_PROP_PRESENT,
+ &pval);
+ dc_present = (rc < 0) ? 0 : pval.intval;
+ }
- if (qnovo_update_status(chip))
- kobject_uevent(&chip->dev->kobj, KOBJ_CHANGE);
+ if (usb_present)
+ dc_present = 0;
+
+ if (chip->dc_present && !dc_present) {
+ /* removal */
+ chip->dc_present = 0;
+ cancel_delayed_work_sync(&chip->dc_debounce_work);
+ vote(chip->awake_votable, DC_READY_VOTER, false, 0);
+ vote(chip->chg_ready_votable, DC_READY_VOTER, false, 0);
+ } else if (!chip->dc_present && dc_present) {
+ /* insertion */
+ chip->dc_present = 1;
+ vote(chip->awake_votable, DC_READY_VOTER, true, 0);
+ schedule_delayed_work(&chip->dc_debounce_work,
+ msecs_to_jiffies(DEBOUNCE_MS));
+ }
+
+ qnovo_update_status(chip);
+}
+
+static void ptrain_restart_work(struct work_struct *work)
+{
+ struct qnovo *chip = container_of(work,
+ struct qnovo, ptrain_restart_work.work);
+ u8 pt_t1, pt_t2;
+ int rc;
+
+ rc = qnovo_read(chip, QNOVO_PTTIME_STS, &pt_t1, 1);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read QNOVO_PTTIME_STS rc = %d\n",
+ rc);
+ goto clean_up;
+ }
+
+ /* pttime increments every 2 seconds */
+ msleep(2100);
+
+ rc = qnovo_read(chip, QNOVO_PTTIME_STS, &pt_t2, 1);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read QNOVO_PTTIME_STS rc = %d\n",
+ rc);
+ goto clean_up;
+ }
+
+ if (pt_t1 != pt_t2)
+ goto clean_up;
+
+ /* Toggle pt enable to restart pulse train */
+ rc = qnovo_masked_write(chip, QNOVO_PTRAIN_EN, QNOVO_PTRAIN_EN_BIT, 0);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't disable pulse train rc=%d\n", rc);
+ goto clean_up;
+ }
+ msleep(1000);
+ rc = qnovo_masked_write(chip, QNOVO_PTRAIN_EN, QNOVO_PTRAIN_EN_BIT,
+ QNOVO_PTRAIN_EN_BIT);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't enable pulse train rc=%d\n", rc);
+ goto clean_up;
+ }
+
+clean_up:
+ vote(chip->awake_votable, PT_RESTART_VOTER, false, 0);
}
static int qnovo_notifier_call(struct notifier_block *nb,
@@ -1162,7 +1381,10 @@ static int qnovo_notifier_call(struct notifier_block *nb,
if (ev != PSY_EVENT_PROP_CHANGED)
return NOTIFY_OK;
- if (strcmp(psy->desc->name, "battery") == 0)
+ if (strcmp(psy->desc->name, "battery") == 0
+ || strcmp(psy->desc->name, "bms") == 0
+ || strcmp(psy->desc->name, "usb") == 0
+ || strcmp(psy->desc->name, "dc") == 0)
schedule_work(&chip->status_change_work);
return NOTIFY_OK;
@@ -1171,7 +1393,34 @@ static int qnovo_notifier_call(struct notifier_block *nb,
static irqreturn_t handle_ptrain_done(int irq, void *data)
{
struct qnovo *chip = data;
+ union power_supply_propval pval = {0};
+ /*
+ * In some cases (esp shutting down) the userspace would disable by
+ * setting qnovo_enable=0. Also charger could be removed or there is
+ * an error (i.e. its not okay to run qnovo)-
+ * skip taking ESR measurement in such situations
+ */
+
+ if (get_client_vote(chip->disable_votable, USER_VOTER)
+ || get_effective_result(chip->not_ok_to_qnovo_votable) > 0)
+ return IRQ_HANDLED;
+
+ /*
+ * hw resets pt_en bit once ptrain_done triggers.
+ * vote on behalf of QNI to disable it such that
+ * once QNI enables it, the votable state changes
+ * and the callback that sets it is indeed invoked
+ */
+ vote(chip->pt_dis_votable, QNI_PT_VOTER, true, 0);
+
+ vote(chip->pt_dis_votable, ESR_VOTER, true, 0);
+ if (is_fg_available(chip))
+ power_supply_set_property(chip->bms_psy,
+ POWER_SUPPLY_PROP_RESISTANCE,
+ &pval);
+
+ vote(chip->pt_dis_votable, ESR_VOTER, false, 0);
qnovo_update_status(chip);
kobject_uevent(&chip->dev->kobj, KOBJ_CHANGE);
return IRQ_HANDLED;
@@ -1186,6 +1435,11 @@ static int qnovo_hw_init(struct qnovo *chip)
u8 val;
vote(chip->disable_votable, USER_VOTER, true, 0);
+ vote(chip->disable_votable, FG_AVAILABLE_VOTER, true, 0);
+
+ vote(chip->pt_dis_votable, QNI_PT_VOTER, true, 0);
+ vote(chip->pt_dis_votable, QNOVO_OVERALL_VOTER, true, 0);
+ vote(chip->pt_dis_votable, ESR_VOTER, false, 0);
val = 0;
rc = qnovo_write(chip, QNOVO_STRM_CTRL, &val, 1);
@@ -1349,12 +1603,45 @@ static int qnovo_probe(struct platform_device *pdev)
goto cleanup;
}
+ chip->pt_dis_votable = create_votable("QNOVO_PT_DIS", VOTE_SET_ANY,
+ pt_dis_votable_cb, chip);
+ if (IS_ERR(chip->pt_dis_votable)) {
+ rc = PTR_ERR(chip->pt_dis_votable);
+ goto destroy_disable_votable;
+ }
+
+ chip->not_ok_to_qnovo_votable = create_votable("QNOVO_NOT_OK",
+ VOTE_SET_ANY,
+ not_ok_to_qnovo_cb, chip);
+ if (IS_ERR(chip->not_ok_to_qnovo_votable)) {
+ rc = PTR_ERR(chip->not_ok_to_qnovo_votable);
+ goto destroy_pt_dis_votable;
+ }
+
+ chip->chg_ready_votable = create_votable("QNOVO_CHG_READY",
+ VOTE_SET_ANY,
+ chg_ready_cb, chip);
+ if (IS_ERR(chip->chg_ready_votable)) {
+ rc = PTR_ERR(chip->chg_ready_votable);
+ goto destroy_not_ok_to_qnovo_votable;
+ }
+
+ chip->awake_votable = create_votable("QNOVO_AWAKE", VOTE_SET_ANY,
+ awake_cb, chip);
+ if (IS_ERR(chip->awake_votable)) {
+ rc = PTR_ERR(chip->awake_votable);
+ goto destroy_chg_ready_votable;
+ }
+
INIT_WORK(&chip->status_change_work, status_change_work);
+ INIT_DELAYED_WORK(&chip->dc_debounce_work, dc_debounce_work);
+ INIT_DELAYED_WORK(&chip->usb_debounce_work, usb_debounce_work);
+ INIT_DELAYED_WORK(&chip->ptrain_restart_work, ptrain_restart_work);
rc = qnovo_hw_init(chip);
if (rc < 0) {
pr_err("Couldn't initialize hardware rc=%d\n", rc);
- goto destroy_votable;
+ goto destroy_awake_votable;
}
rc = qnovo_register_notifier(chip);
@@ -1390,7 +1677,15 @@ static int qnovo_probe(struct platform_device *pdev)
unreg_notifier:
power_supply_unreg_notifier(&chip->nb);
-destroy_votable:
+destroy_awake_votable:
+ destroy_votable(chip->awake_votable);
+destroy_chg_ready_votable:
+ destroy_votable(chip->chg_ready_votable);
+destroy_not_ok_to_qnovo_votable:
+ destroy_votable(chip->not_ok_to_qnovo_votable);
+destroy_pt_dis_votable:
+ destroy_votable(chip->pt_dis_votable);
+destroy_disable_votable:
destroy_votable(chip->disable_votable);
cleanup:
platform_set_drvdata(pdev, NULL);
@@ -1403,6 +1698,9 @@ static int qnovo_remove(struct platform_device *pdev)
class_unregister(&chip->qnovo_class);
power_supply_unreg_notifier(&chip->nb);
+ destroy_votable(chip->chg_ready_votable);
+ destroy_votable(chip->not_ok_to_qnovo_votable);
+ destroy_votable(chip->pt_dis_votable);
destroy_votable(chip->disable_votable);
platform_set_drvdata(pdev, NULL);
return 0;
diff --git a/drivers/power/supply/qcom/qpnp-smb2.c b/drivers/power/supply/qcom/qpnp-smb2.c
index 630d02eb7a67..c085256a794a 100644
--- a/drivers/power/supply/qcom/qpnp-smb2.c
+++ b/drivers/power/supply/qcom/qpnp-smb2.c
@@ -210,6 +210,9 @@ static int smb2_parse_dt(struct smb2 *chip)
chg->step_chg_enabled = of_property_read_bool(node,
"qcom,step-charging-enable");
+ chg->sw_jeita_enabled = of_property_read_bool(node,
+ "qcom,sw-jeita-enable");
+
rc = of_property_read_u32(node, "qcom,wd-bark-time-secs",
&chip->dt.wd_bark_time);
if (rc < 0 || chip->dt.wd_bark_time < MIN_WD_BARK_TIME)
@@ -327,7 +330,6 @@ static int smb2_parse_dt(struct smb2 *chip)
static enum power_supply_property smb2_usb_props[] = {
POWER_SUPPLY_PROP_PRESENT,
POWER_SUPPLY_PROP_ONLINE,
- POWER_SUPPLY_PROP_VOLTAGE_MIN,
POWER_SUPPLY_PROP_VOLTAGE_MAX,
POWER_SUPPLY_PROP_VOLTAGE_NOW,
POWER_SUPPLY_PROP_PD_CURRENT_MAX,
@@ -346,6 +348,9 @@ static enum power_supply_property smb2_usb_props[] = {
POWER_SUPPLY_PROP_HW_CURRENT_MAX,
POWER_SUPPLY_PROP_REAL_TYPE,
POWER_SUPPLY_PROP_PR_SWAP,
+ POWER_SUPPLY_PROP_PD_VOLTAGE_MAX,
+ POWER_SUPPLY_PROP_PD_VOLTAGE_MIN,
+ POWER_SUPPLY_PROP_SDP_CURRENT_MAX,
};
static int smb2_usb_get_prop(struct power_supply *psy,
@@ -377,20 +382,17 @@ static int smb2_usb_get_prop(struct power_supply *psy,
if (chg->real_charger_type == POWER_SUPPLY_TYPE_UNKNOWN)
val->intval = 0;
break;
- case POWER_SUPPLY_PROP_VOLTAGE_MIN:
- val->intval = chg->voltage_min_uv;
- break;
case POWER_SUPPLY_PROP_VOLTAGE_MAX:
- val->intval = chg->voltage_max_uv;
+ rc = smblib_get_prop_usb_voltage_max(chg, val);
break;
case POWER_SUPPLY_PROP_VOLTAGE_NOW:
rc = smblib_get_prop_usb_voltage_now(chg, val);
break;
case POWER_SUPPLY_PROP_PD_CURRENT_MAX:
- rc = smblib_get_prop_pd_current_max(chg, val);
+ val->intval = get_client_vote(chg->usb_icl_votable, PD_VOTER);
break;
case POWER_SUPPLY_PROP_CURRENT_MAX:
- rc = smblib_get_prop_usb_current_max(chg, val);
+ rc = smblib_get_prop_input_current_settled(chg, val);
break;
case POWER_SUPPLY_PROP_TYPE:
val->intval = POWER_SUPPLY_TYPE_USB_PD;
@@ -454,6 +456,16 @@ static int smb2_usb_get_prop(struct power_supply *psy,
case POWER_SUPPLY_PROP_PR_SWAP:
rc = smblib_get_prop_pr_swap_in_progress(chg, val);
break;
+ case POWER_SUPPLY_PROP_PD_VOLTAGE_MAX:
+ val->intval = chg->voltage_max_uv;
+ break;
+ case POWER_SUPPLY_PROP_PD_VOLTAGE_MIN:
+ val->intval = chg->voltage_min_uv;
+ break;
+ case POWER_SUPPLY_PROP_SDP_CURRENT_MAX:
+ val->intval = get_client_vote(chg->usb_icl_votable,
+ USB_PSY_VOTER);
+ break;
default:
pr_err("get prop %d is not supported in usb\n", psp);
rc = -EINVAL;
@@ -481,18 +493,9 @@ static int smb2_usb_set_prop(struct power_supply *psy,
}
switch (psp) {
- case POWER_SUPPLY_PROP_VOLTAGE_MIN:
- rc = smblib_set_prop_usb_voltage_min(chg, val);
- break;
- case POWER_SUPPLY_PROP_VOLTAGE_MAX:
- rc = smblib_set_prop_usb_voltage_max(chg, val);
- break;
case POWER_SUPPLY_PROP_PD_CURRENT_MAX:
rc = smblib_set_prop_pd_current_max(chg, val);
break;
- case POWER_SUPPLY_PROP_CURRENT_MAX:
- rc = smblib_set_prop_usb_current_max(chg, val);
- break;
case POWER_SUPPLY_PROP_TYPEC_POWER_ROLE:
rc = smblib_set_prop_typec_power_role(chg, val);
break;
@@ -515,6 +518,15 @@ static int smb2_usb_set_prop(struct power_supply *psy,
case POWER_SUPPLY_PROP_PR_SWAP:
rc = smblib_set_prop_pr_swap_in_progress(chg, val);
break;
+ case POWER_SUPPLY_PROP_PD_VOLTAGE_MAX:
+ rc = smblib_set_prop_pd_voltage_max(chg, val);
+ break;
+ case POWER_SUPPLY_PROP_PD_VOLTAGE_MIN:
+ rc = smblib_set_prop_pd_voltage_min(chg, val);
+ break;
+ case POWER_SUPPLY_PROP_SDP_CURRENT_MAX:
+ rc = smblib_set_prop_sdp_current_max(chg, val);
+ break;
default:
pr_err("set prop %d is not supported\n", psp);
rc = -EINVAL;
@@ -530,8 +542,6 @@ static int smb2_usb_prop_is_writeable(struct power_supply *psy,
enum power_supply_property psp)
{
switch (psp) {
- case POWER_SUPPLY_PROP_CURRENT_MAX:
- case POWER_SUPPLY_PROP_TYPEC_POWER_ROLE:
case POWER_SUPPLY_PROP_CTM_CURRENT_MAX:
return 1;
default:
@@ -573,6 +583,8 @@ static int smb2_init_usb_psy(struct smb2 *chip)
static enum power_supply_property smb2_usb_port_props[] = {
POWER_SUPPLY_PROP_TYPE,
POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_VOLTAGE_MAX,
+ POWER_SUPPLY_PROP_CURRENT_MAX,
};
static int smb2_usb_port_get_prop(struct power_supply *psy,
@@ -599,6 +611,12 @@ static int smb2_usb_port_get_prop(struct power_supply *psy,
else
val->intval = 0;
break;
+ case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+ val->intval = 5000000;
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_MAX:
+ rc = smblib_get_prop_input_current_settled(chg, val);
+ break;
default:
pr_err_ratelimited("Get prop %d is not supported in pc_port\n",
psp);
@@ -780,6 +798,7 @@ static enum power_supply_property smb2_dc_props[] = {
POWER_SUPPLY_PROP_PRESENT,
POWER_SUPPLY_PROP_ONLINE,
POWER_SUPPLY_PROP_CURRENT_MAX,
+ POWER_SUPPLY_PROP_REAL_TYPE,
};
static int smb2_dc_get_prop(struct power_supply *psy,
@@ -800,6 +819,9 @@ static int smb2_dc_get_prop(struct power_supply *psy,
case POWER_SUPPLY_PROP_CURRENT_MAX:
rc = smblib_get_prop_dc_current_max(chg, val);
break;
+ case POWER_SUPPLY_PROP_REAL_TYPE:
+ val->intval = POWER_SUPPLY_TYPE_WIPOWER;
+ break;
default:
return -EINVAL;
}
@@ -848,7 +870,7 @@ static int smb2_dc_prop_is_writeable(struct power_supply *psy,
static const struct power_supply_desc dc_psy_desc = {
.name = "dc",
- .type = POWER_SUPPLY_TYPE_WIPOWER,
+ .type = POWER_SUPPLY_TYPE_WIRELESS,
.properties = smb2_dc_props,
.num_properties = ARRAY_SIZE(smb2_dc_props),
.get_property = smb2_dc_get_prop,
@@ -898,12 +920,14 @@ static enum power_supply_property smb2_batt_props[] = {
POWER_SUPPLY_PROP_TEMP,
POWER_SUPPLY_PROP_TECHNOLOGY,
POWER_SUPPLY_PROP_STEP_CHARGING_ENABLED,
+ POWER_SUPPLY_PROP_SW_JEITA_ENABLED,
POWER_SUPPLY_PROP_CHARGE_DONE,
POWER_SUPPLY_PROP_PARALLEL_DISABLE,
POWER_SUPPLY_PROP_SET_SHIP_MODE,
POWER_SUPPLY_PROP_DIE_HEALTH,
POWER_SUPPLY_PROP_RERUN_AICL,
POWER_SUPPLY_PROP_DP_DM,
+ POWER_SUPPLY_PROP_CHARGE_COUNTER,
};
static int smb2_batt_get_prop(struct power_supply *psy,
@@ -955,6 +979,9 @@ static int smb2_batt_get_prop(struct power_supply *psy,
case POWER_SUPPLY_PROP_STEP_CHARGING_ENABLED:
val->intval = chg->step_chg_enabled;
break;
+ case POWER_SUPPLY_PROP_SW_JEITA_ENABLED:
+ val->intval = chg->sw_jeita_enabled;
+ break;
case POWER_SUPPLY_PROP_VOLTAGE_NOW:
rc = smblib_get_prop_batt_voltage_now(chg, val);
break;
@@ -1006,6 +1033,9 @@ static int smb2_batt_get_prop(struct power_supply *psy,
case POWER_SUPPLY_PROP_RERUN_AICL:
val->intval = 0;
break;
+ case POWER_SUPPLY_PROP_CHARGE_COUNTER:
+ rc = smblib_get_prop_batt_charge_counter(chg, val);
+ break;
default:
pr_err("batt power supply prop %d not supported\n", psp);
return -EINVAL;
@@ -1071,6 +1101,13 @@ static int smb2_batt_set_prop(struct power_supply *psy,
case POWER_SUPPLY_PROP_STEP_CHARGING_ENABLED:
chg->step_chg_enabled = !!val->intval;
break;
+ case POWER_SUPPLY_PROP_SW_JEITA_ENABLED:
+ if (chg->sw_jeita_enabled != (!!val->intval)) {
+ rc = smblib_disable_hw_jeita(chg, !!val->intval);
+ if (rc == 0)
+ chg->sw_jeita_enabled = !!val->intval;
+ }
+ break;
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
chg->batt_profile_fcc_ua = val->intval;
vote(chg->fcc_votable, BATT_PROFILE_VOTER, true, val->intval);
@@ -1112,6 +1149,7 @@ static int smb2_batt_prop_is_writeable(struct power_supply *psy,
case POWER_SUPPLY_PROP_RERUN_AICL:
case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED:
case POWER_SUPPLY_PROP_STEP_CHARGING_ENABLED:
+ case POWER_SUPPLY_PROP_SW_JEITA_ENABLED:
return 1;
default:
break;
@@ -1704,6 +1742,14 @@ static int smb2_init_hw(struct smb2 *chip)
}
}
+ if (chg->sw_jeita_enabled) {
+ rc = smblib_disable_hw_jeita(chg, true);
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't set hw jeita rc=%d\n", rc);
+ return rc;
+ }
+ }
+
return rc;
}
@@ -1776,8 +1822,8 @@ static int smb2_chg_config_init(struct smb2 *chip)
chip->chg.wa_flags |= BOOST_BACK_WA | OTG_WA;
chg->param.freq_buck = pm660_params.freq_buck;
chg->param.freq_boost = pm660_params.freq_boost;
- chg->chg_freq.freq_5V = 600;
- chg->chg_freq.freq_6V_8V = 800;
+ chg->chg_freq.freq_5V = 650;
+ chg->chg_freq.freq_6V_8V = 850;
chg->chg_freq.freq_9V = 1050;
chg->chg_freq.freq_12V = 1200;
chg->chg_freq.freq_removal = 1050;
diff --git a/drivers/power/supply/qcom/smb-lib.c b/drivers/power/supply/qcom/smb-lib.c
index ab6ba03e3556..f9d35ea7775b 100644
--- a/drivers/power/supply/qcom/smb-lib.c
+++ b/drivers/power/supply/qcom/smb-lib.c
@@ -420,19 +420,21 @@ static int smblib_set_adapter_allowance(struct smb_charger *chg,
{
int rc = 0;
- switch (allowed_voltage) {
- case USBIN_ADAPTER_ALLOW_12V:
- case USBIN_ADAPTER_ALLOW_5V_OR_12V:
- case USBIN_ADAPTER_ALLOW_9V_TO_12V:
- case USBIN_ADAPTER_ALLOW_5V_OR_9V_TO_12V:
- case USBIN_ADAPTER_ALLOW_5V_TO_12V:
- /* PM660 only support max. 9V */
- if (chg->smb_version == PM660_SUBTYPE) {
- smblib_dbg(chg, PR_MISC, "voltage not supported=%d\n",
- allowed_voltage);
+ /* PM660 only support max. 9V */
+ if (chg->smb_version == PM660_SUBTYPE) {
+ switch (allowed_voltage) {
+ case USBIN_ADAPTER_ALLOW_12V:
+ case USBIN_ADAPTER_ALLOW_9V_TO_12V:
+ allowed_voltage = USBIN_ADAPTER_ALLOW_9V;
+ break;
+ case USBIN_ADAPTER_ALLOW_5V_OR_12V:
+ case USBIN_ADAPTER_ALLOW_5V_OR_9V_TO_12V:
allowed_voltage = USBIN_ADAPTER_ALLOW_5V_OR_9V;
+ break;
+ case USBIN_ADAPTER_ALLOW_5V_TO_12V:
+ allowed_voltage = USBIN_ADAPTER_ALLOW_5V_TO_9V;
+ break;
}
- break;
}
rc = smblib_write(chg, USBIN_ADAPTER_ALLOW_CFG_REG, allowed_voltage);
@@ -488,6 +490,45 @@ static int smblib_set_usb_pd_allowed_voltage(struct smb_charger *chg,
/********************
* HELPER FUNCTIONS *
********************/
+static int smblib_request_dpdm(struct smb_charger *chg, bool enable)
+{
+ int rc = 0;
+
+ /* fetch the DPDM regulator */
+ if (!chg->dpdm_reg && of_get_property(chg->dev->of_node,
+ "dpdm-supply", NULL)) {
+ chg->dpdm_reg = devm_regulator_get(chg->dev, "dpdm");
+ if (IS_ERR(chg->dpdm_reg)) {
+ rc = PTR_ERR(chg->dpdm_reg);
+ smblib_err(chg, "Couldn't get dpdm regulator rc=%d\n",
+ rc);
+ chg->dpdm_reg = NULL;
+ return rc;
+ }
+ }
+
+ if (enable) {
+ if (chg->dpdm_reg && !regulator_is_enabled(chg->dpdm_reg)) {
+ smblib_dbg(chg, PR_MISC, "enabling DPDM regulator\n");
+ rc = regulator_enable(chg->dpdm_reg);
+ if (rc < 0)
+ smblib_err(chg,
+ "Couldn't enable dpdm regulator rc=%d\n",
+ rc);
+ }
+ } else {
+ if (chg->dpdm_reg && regulator_is_enabled(chg->dpdm_reg)) {
+ smblib_dbg(chg, PR_MISC, "disabling DPDM regulator\n");
+ rc = regulator_disable(chg->dpdm_reg);
+ if (rc < 0)
+ smblib_err(chg,
+ "Couldn't disable dpdm regulator rc=%d\n",
+ rc);
+ }
+ }
+
+ return rc;
+}
static void smblib_rerun_apsd(struct smb_charger *chg)
{
@@ -607,13 +648,9 @@ static void smblib_uusb_removal(struct smb_charger *chg)
cancel_delayed_work_sync(&chg->pl_enable_work);
- if (chg->dpdm_reg && regulator_is_enabled(chg->dpdm_reg)) {
- smblib_dbg(chg, PR_MISC, "disabling DPDM regulator\n");
- rc = regulator_disable(chg->dpdm_reg);
- if (rc < 0)
- smblib_err(chg, "Couldn't disable dpdm regulator rc=%d\n",
- rc);
- }
+ rc = smblib_request_dpdm(chg, false);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't to disable DPDM rc=%d\n", rc);
if (chg->wa_flags & BOOST_BACK_WA) {
data = chg->irq_info[SWITCH_POWER_OK_IRQ].irq_data;
@@ -705,24 +742,9 @@ int smblib_rerun_apsd_if_required(struct smb_charger *chg)
if (!val.intval)
return 0;
- /* fetch the DPDM regulator */
- if (!chg->dpdm_reg && of_get_property(chg->dev->of_node,
- "dpdm-supply", NULL)) {
- chg->dpdm_reg = devm_regulator_get(chg->dev, "dpdm");
- if (IS_ERR(chg->dpdm_reg)) {
- smblib_err(chg, "Couldn't get dpdm regulator rc=%ld\n",
- PTR_ERR(chg->dpdm_reg));
- chg->dpdm_reg = NULL;
- }
- }
-
- if (chg->dpdm_reg && !regulator_is_enabled(chg->dpdm_reg)) {
- smblib_dbg(chg, PR_MISC, "enabling DPDM regulator\n");
- rc = regulator_enable(chg->dpdm_reg);
- if (rc < 0)
- smblib_err(chg, "Couldn't enable dpdm regulator rc=%d\n",
- rc);
- }
+ rc = smblib_request_dpdm(chg, true);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't to enable DPDM rc=%d\n", rc);
chg->uusb_apsd_rerun_done = true;
smblib_rerun_apsd(chg);
@@ -1551,8 +1573,8 @@ int smblib_get_prop_batt_status(struct smb_charger *chg,
union power_supply_propval *val)
{
union power_supply_propval pval = {0, };
- bool usb_online, dc_online;
- u8 stat;
+ bool usb_online, dc_online, qnovo_en;
+ u8 stat, pt_en_cmd;
int rc;
rc = smblib_get_prop_usb_online(chg, &pval);
@@ -1620,11 +1642,22 @@ int smblib_get_prop_batt_status(struct smb_charger *chg,
smblib_err(chg, "Couldn't read BATTERY_CHARGER_STATUS_2 rc=%d\n",
rc);
return rc;
- }
+ }
stat &= ENABLE_TRICKLE_BIT | ENABLE_PRE_CHARGING_BIT |
ENABLE_FAST_CHARGING_BIT | ENABLE_FULLON_MODE_BIT;
- if (!stat)
+
+ rc = smblib_read(chg, QNOVO_PT_ENABLE_CMD_REG, &pt_en_cmd);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read QNOVO_PT_ENABLE_CMD_REG rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ qnovo_en = (bool)(pt_en_cmd & QNOVO_PT_ENABLE_CMD_BIT);
+
+ /* ignore stat7 when qnovo is enabled */
+ if (!qnovo_en && !stat)
val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
return 0;
@@ -1812,6 +1845,19 @@ int smblib_get_prop_charge_qnovo_enable(struct smb_charger *chg,
return 0;
}
+int smblib_get_prop_batt_charge_counter(struct smb_charger *chg,
+ union power_supply_propval *val)
+{
+ int rc;
+
+ if (!chg->bms_psy)
+ return -EINVAL;
+
+ rc = power_supply_get_property(chg->bms_psy,
+ POWER_SUPPLY_PROP_CHARGE_COUNTER, val);
+ return rc;
+}
+
/***********************
* BATTERY PSY SETTERS *
***********************/
@@ -2022,6 +2068,29 @@ int smblib_dp_dm(struct smb_charger *chg, int val)
return rc;
}
+int smblib_disable_hw_jeita(struct smb_charger *chg, bool disable)
+{
+ int rc;
+ u8 mask;
+
+ /*
+ * Disable h/w base JEITA compensation if s/w JEITA is enabled
+ */
+ mask = JEITA_EN_COLD_SL_FCV_BIT
+ | JEITA_EN_HOT_SL_FCV_BIT
+ | JEITA_EN_HOT_SL_CCC_BIT
+ | JEITA_EN_COLD_SL_CCC_BIT,
+ rc = smblib_masked_write(chg, JEITA_EN_CFG_REG, mask,
+ disable ? 0 : mask);
+ if (rc < 0) {
+ dev_err(chg->dev,
+ "Couldn't configure s/w jeita rc=%d\n",
+ rc);
+ return rc;
+ }
+ return 0;
+}
+
/*******************
* DC PSY GETTERS *
*******************/
@@ -2133,6 +2202,25 @@ int smblib_get_prop_usb_online(struct smb_charger *chg,
return rc;
}
+int smblib_get_prop_usb_voltage_max(struct smb_charger *chg,
+ union power_supply_propval *val)
+{
+ switch (chg->real_charger_type) {
+ case POWER_SUPPLY_TYPE_USB_HVDCP:
+ case POWER_SUPPLY_TYPE_USB_PD:
+ if (chg->smb_version == PM660_SUBTYPE)
+ val->intval = MICRO_9V;
+ else
+ val->intval = MICRO_12V;
+ break;
+ default:
+ val->intval = MICRO_5V;
+ break;
+ }
+
+ return 0;
+}
+
int smblib_get_prop_usb_voltage_now(struct smb_charger *chg,
union power_supply_propval *val)
{
@@ -2152,21 +2240,6 @@ int smblib_get_prop_usb_voltage_now(struct smb_charger *chg,
return iio_read_channel_processed(chg->iio.usbin_v_chan, &val->intval);
}
-int smblib_get_prop_pd_current_max(struct smb_charger *chg,
- union power_supply_propval *val)
-{
- val->intval = get_client_vote_locked(chg->usb_icl_votable, PD_VOTER);
- return 0;
-}
-
-int smblib_get_prop_usb_current_max(struct smb_charger *chg,
- union power_supply_propval *val)
-{
- val->intval = get_client_vote_locked(chg->usb_icl_votable,
- USB_PSY_VOTER);
- return 0;
-}
-
int smblib_get_prop_usb_current_now(struct smb_charger *chg,
union power_supply_propval *val)
{
@@ -2344,16 +2417,9 @@ int smblib_get_prop_input_current_settled(struct smb_charger *chg,
int smblib_get_prop_input_voltage_settled(struct smb_charger *chg,
union power_supply_propval *val)
{
- const struct apsd_result *apsd_result = smblib_get_apsd_result(chg);
int rc, pulses;
- val->intval = MICRO_5V;
- if (apsd_result == NULL) {
- smblib_err(chg, "APSD result is NULL\n");
- return 0;
- }
-
- switch (apsd_result->pst) {
+ switch (chg->real_charger_type) {
case POWER_SUPPLY_TYPE_USB_HVDCP_3:
rc = smblib_get_pulse_cnt(chg, &pulses);
if (rc < 0) {
@@ -2363,6 +2429,9 @@ int smblib_get_prop_input_voltage_settled(struct smb_charger *chg,
}
val->intval = MICRO_5V + HVDCP3_STEP_UV * pulses;
break;
+ case POWER_SUPPLY_TYPE_USB_PD:
+ val->intval = chg->voltage_min_uv;
+ break;
default:
val->intval = MICRO_5V;
break;
@@ -2508,7 +2577,7 @@ static int smblib_handle_usb_current(struct smb_charger *chg,
return rc;
}
-int smblib_set_prop_usb_current_max(struct smb_charger *chg,
+int smblib_set_prop_sdp_current_max(struct smb_charger *chg,
const union power_supply_propval *val)
{
int rc = 0;
@@ -2595,7 +2664,7 @@ int smblib_set_prop_typec_power_role(struct smb_charger *chg,
return rc;
}
-int smblib_set_prop_usb_voltage_min(struct smb_charger *chg,
+int smblib_set_prop_pd_voltage_min(struct smb_charger *chg,
const union power_supply_propval *val)
{
int rc, min_uv;
@@ -2610,10 +2679,11 @@ int smblib_set_prop_usb_voltage_min(struct smb_charger *chg,
}
chg->voltage_min_uv = min_uv;
+ power_supply_changed(chg->usb_main_psy);
return rc;
}
-int smblib_set_prop_usb_voltage_max(struct smb_charger *chg,
+int smblib_set_prop_pd_voltage_max(struct smb_charger *chg,
const union power_supply_propval *val)
{
int rc, max_uv;
@@ -3253,25 +3323,10 @@ void smblib_usb_plugin_locked(struct smb_charger *chg)
smblib_set_opt_freq_buck(chg, vbus_rising ? chg->chg_freq.freq_5V :
chg->chg_freq.freq_removal);
- /* fetch the DPDM regulator */
- if (!chg->dpdm_reg && of_get_property(chg->dev->of_node,
- "dpdm-supply", NULL)) {
- chg->dpdm_reg = devm_regulator_get(chg->dev, "dpdm");
- if (IS_ERR(chg->dpdm_reg)) {
- smblib_err(chg, "Couldn't get dpdm regulator rc=%ld\n",
- PTR_ERR(chg->dpdm_reg));
- chg->dpdm_reg = NULL;
- }
- }
-
if (vbus_rising) {
- if (chg->dpdm_reg && !regulator_is_enabled(chg->dpdm_reg)) {
- smblib_dbg(chg, PR_MISC, "enabling DPDM regulator\n");
- rc = regulator_enable(chg->dpdm_reg);
- if (rc < 0)
- smblib_err(chg, "Couldn't enable dpdm regulator rc=%d\n",
- rc);
- }
+ rc = smblib_request_dpdm(chg, true);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't to enable DPDM rc=%d\n", rc);
/* Schedule work to enable parallel charger */
vote(chg->awake_votable, PL_DELAY_VOTER, true, 0);
@@ -3291,13 +3346,9 @@ void smblib_usb_plugin_locked(struct smb_charger *chg)
}
}
- if (chg->dpdm_reg && regulator_is_enabled(chg->dpdm_reg)) {
- smblib_dbg(chg, PR_MISC, "disabling DPDM regulator\n");
- rc = regulator_disable(chg->dpdm_reg);
- if (rc < 0)
- smblib_err(chg, "Couldn't disable dpdm regulator rc=%d\n",
- rc);
- }
+ rc = smblib_request_dpdm(chg, false);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't disable DPDM rc=%d\n", rc);
}
if (chg->micro_usb_mode)
@@ -3565,6 +3616,32 @@ static void smblib_force_legacy_icl(struct smb_charger *chg, int pst)
}
}
+static void smblib_notify_extcon_props(struct smb_charger *chg)
+{
+ union power_supply_propval val;
+
+ smblib_get_prop_typec_cc_orientation(chg, &val);
+ extcon_set_cable_state_(chg->extcon, EXTCON_USB_CC,
+ (val.intval == 2) ? 1 : 0);
+ extcon_set_cable_state_(chg->extcon, EXTCON_USB_SPEED, true);
+}
+
+static void smblib_notify_device_mode(struct smb_charger *chg, bool enable)
+{
+ if (enable)
+ smblib_notify_extcon_props(chg);
+
+ extcon_set_cable_state_(chg->extcon, EXTCON_USB, enable);
+}
+
+static void smblib_notify_usb_host(struct smb_charger *chg, bool enable)
+{
+ if (enable)
+ smblib_notify_extcon_props(chg);
+
+ extcon_set_cable_state_(chg->extcon, EXTCON_USB_HOST, enable);
+}
+
#define HVDCP_DET_MS 2500
static void smblib_handle_apsd_done(struct smb_charger *chg, bool rising)
{
@@ -3584,6 +3661,8 @@ static void smblib_handle_apsd_done(struct smb_charger *chg, bool rising)
if (chg->micro_usb_mode)
extcon_set_cable_state_(chg->extcon, EXTCON_USB,
true);
+ if (chg->use_extcon)
+ smblib_notify_device_mode(chg, true);
case OCP_CHARGER_BIT:
case FLOAT_CHARGER_BIT:
/* if not DCP then no hvdcp timeout happens, Enable pd here. */
@@ -3668,6 +3747,10 @@ static void typec_sink_insertion(struct smb_charger *chg)
*/
vote(chg->pd_disallowed_votable_indirect, HVDCP_TIMEOUT_VOTER,
false, 0);
+ if (chg->use_extcon) {
+ smblib_notify_usb_host(chg, true);
+ chg->otg_present = true;
+ }
}
static void typec_sink_removal(struct smb_charger *chg)
@@ -3685,13 +3768,9 @@ static void smblib_handle_typec_removal(struct smb_charger *chg)
chg->cc2_detach_wa_active = false;
- if (chg->dpdm_reg && regulator_is_enabled(chg->dpdm_reg)) {
- smblib_dbg(chg, PR_MISC, "disabling DPDM regulator\n");
- rc = regulator_disable(chg->dpdm_reg);
- if (rc < 0)
- smblib_err(chg, "Couldn't disable dpdm regulator rc=%d\n",
- rc);
- }
+ rc = smblib_request_dpdm(chg, false);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't disable DPDM rc=%d\n", rc);
if (chg->wa_flags & BOOST_BACK_WA) {
data = chg->irq_info[SWITCH_POWER_OK_IRQ].irq_data;
@@ -3820,6 +3899,14 @@ unlock:
typec_sink_removal(chg);
smblib_update_usb_type(chg);
+
+ if (chg->use_extcon) {
+ if (chg->otg_present)
+ smblib_notify_usb_host(chg, false);
+ else
+ smblib_notify_device_mode(chg, false);
+ }
+ chg->otg_present = false;
}
static void smblib_handle_typec_insertion(struct smb_charger *chg)
@@ -3834,10 +3921,14 @@ static void smblib_handle_typec_insertion(struct smb_charger *chg)
smblib_err(chg, "Couldn't disable APSD_START_ON_CC rc=%d\n",
rc);
- if (chg->typec_status[3] & UFP_DFP_MODE_STATUS_BIT)
+ if (chg->typec_status[3] & UFP_DFP_MODE_STATUS_BIT) {
typec_sink_insertion(chg);
- else
+ } else {
+ rc = smblib_request_dpdm(chg, true);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't to enable DPDM rc=%d\n", rc);
typec_sink_removal(chg);
+ }
}
static void smblib_handle_rp_change(struct smb_charger *chg, int typec_mode)
@@ -4058,7 +4149,7 @@ irqreturn_t smblib_handle_wdog_bark(int irq, void *data)
if (rc < 0)
smblib_err(chg, "Couldn't pet the dog rc=%d\n", rc);
- if (chg->step_chg_enabled)
+ if (chg->step_chg_enabled || chg->sw_jeita_enabled)
power_supply_changed(chg->batt_psy);
return IRQ_HANDLED;
@@ -4696,7 +4787,8 @@ int smblib_init(struct smb_charger *chg)
return rc;
}
- rc = qcom_step_chg_init(chg->step_chg_enabled);
+ rc = qcom_step_chg_init(chg->step_chg_enabled,
+ chg->sw_jeita_enabled);
if (rc < 0) {
smblib_err(chg, "Couldn't init qcom_step_chg_init rc=%d\n",
rc);
diff --git a/drivers/power/supply/qcom/smb-lib.h b/drivers/power/supply/qcom/smb-lib.h
index 8c810352f78f..a89d09711ec8 100644
--- a/drivers/power/supply/qcom/smb-lib.h
+++ b/drivers/power/supply/qcom/smb-lib.h
@@ -139,9 +139,14 @@ struct smb_irq_info {
static const unsigned int smblib_extcon_cable[] = {
EXTCON_USB,
EXTCON_USB_HOST,
+ EXTCON_USB_CC,
+ EXTCON_USB_SPEED,
EXTCON_NONE,
};
+/* EXTCON_USB and EXTCON_USB_HOST are mutually exclusive */
+static const u32 smblib_extcon_exclusive[] = {0x3, 0};
+
struct smb_regulator {
struct regulator_dev *rdev;
struct regulator_desc rdesc;
@@ -306,6 +311,7 @@ struct smb_charger {
int dcp_icl_ua;
int fake_capacity;
bool step_chg_enabled;
+ bool sw_jeita_enabled;
bool is_hdc;
bool chg_done;
bool micro_usb_mode;
@@ -327,6 +333,8 @@ struct smb_charger {
int usb_icl_change_irq_enabled;
u32 jeita_status;
u8 float_cfg;
+ bool use_extcon;
+ bool otg_present;
/* workaround flag */
u32 wa_flags;
@@ -418,6 +426,8 @@ int smblib_get_prop_batt_current_now(struct smb_charger *chg,
union power_supply_propval *val);
int smblib_get_prop_batt_temp(struct smb_charger *chg,
union power_supply_propval *val);
+int smblib_get_prop_batt_charge_counter(struct smb_charger *chg,
+ union power_supply_propval *val);
int smblib_set_prop_input_suspend(struct smb_charger *chg,
const union power_supply_propval *val);
int smblib_set_prop_batt_capacity(struct smb_charger *chg,
@@ -442,11 +452,9 @@ int smblib_get_prop_usb_online(struct smb_charger *chg,
union power_supply_propval *val);
int smblib_get_prop_usb_suspend(struct smb_charger *chg,
union power_supply_propval *val);
-int smblib_get_prop_usb_voltage_now(struct smb_charger *chg,
+int smblib_get_prop_usb_voltage_max(struct smb_charger *chg,
union power_supply_propval *val);
-int smblib_get_prop_pd_current_max(struct smb_charger *chg,
- union power_supply_propval *val);
-int smblib_get_prop_usb_current_max(struct smb_charger *chg,
+int smblib_get_prop_usb_voltage_now(struct smb_charger *chg,
union power_supply_propval *val);
int smblib_get_prop_usb_current_now(struct smb_charger *chg,
union power_supply_propval *val);
@@ -474,11 +482,11 @@ int smblib_get_prop_charge_qnovo_enable(struct smb_charger *chg,
union power_supply_propval *val);
int smblib_set_prop_pd_current_max(struct smb_charger *chg,
const union power_supply_propval *val);
-int smblib_set_prop_usb_current_max(struct smb_charger *chg,
+int smblib_set_prop_sdp_current_max(struct smb_charger *chg,
const union power_supply_propval *val);
-int smblib_set_prop_usb_voltage_min(struct smb_charger *chg,
+int smblib_set_prop_pd_voltage_max(struct smb_charger *chg,
const union power_supply_propval *val);
-int smblib_set_prop_usb_voltage_max(struct smb_charger *chg,
+int smblib_set_prop_pd_voltage_min(struct smb_charger *chg,
const union power_supply_propval *val);
int smblib_set_prop_boost_current(struct smb_charger *chg,
const union power_supply_propval *val);
@@ -500,6 +508,7 @@ int smblib_get_prop_fcc_delta(struct smb_charger *chg,
union power_supply_propval *val);
int smblib_icl_override(struct smb_charger *chg, bool override);
int smblib_dp_dm(struct smb_charger *chg, int val);
+int smblib_disable_hw_jeita(struct smb_charger *chg, bool disable);
int smblib_rerun_aicl(struct smb_charger *chg);
int smblib_set_icl_current(struct smb_charger *chg, int icl_ua);
int smblib_get_icl_current(struct smb_charger *chg, int *icl_ua);
diff --git a/drivers/power/supply/qcom/smb138x-charger.c b/drivers/power/supply/qcom/smb138x-charger.c
index ec74e3825dd5..e7ca1e3fb108 100644
--- a/drivers/power/supply/qcom/smb138x-charger.c
+++ b/drivers/power/supply/qcom/smb138x-charger.c
@@ -215,6 +215,7 @@ static enum power_supply_property smb138x_usb_props[] = {
POWER_SUPPLY_PROP_TYPEC_MODE,
POWER_SUPPLY_PROP_TYPEC_POWER_ROLE,
POWER_SUPPLY_PROP_TYPEC_CC_ORIENTATION,
+ POWER_SUPPLY_PROP_SDP_CURRENT_MAX,
};
static int smb138x_usb_get_prop(struct power_supply *psy,
@@ -242,7 +243,7 @@ static int smb138x_usb_get_prop(struct power_supply *psy,
rc = smblib_get_prop_usb_voltage_now(chg, val);
break;
case POWER_SUPPLY_PROP_CURRENT_MAX:
- rc = smblib_get_prop_usb_current_max(chg, val);
+ val->intval = get_effective_result(chg->usb_icl_votable);
break;
case POWER_SUPPLY_PROP_TYPE:
val->intval = chg->usb_psy_desc.type;
@@ -256,6 +257,10 @@ static int smb138x_usb_get_prop(struct power_supply *psy,
case POWER_SUPPLY_PROP_TYPEC_CC_ORIENTATION:
rc = smblib_get_prop_typec_cc_orientation(chg, val);
break;
+ case POWER_SUPPLY_PROP_SDP_CURRENT_MAX:
+ val->intval = get_client_vote(chg->usb_icl_votable,
+ USB_PSY_VOTER);
+ break;
default:
pr_err("get prop %d is not supported\n", prop);
return -EINVAL;
@@ -278,18 +283,12 @@ static int smb138x_usb_set_prop(struct power_supply *psy,
int rc = 0;
switch (prop) {
- case POWER_SUPPLY_PROP_VOLTAGE_MIN:
- rc = smblib_set_prop_usb_voltage_min(chg, val);
- break;
- case POWER_SUPPLY_PROP_VOLTAGE_MAX:
- rc = smblib_set_prop_usb_voltage_max(chg, val);
- break;
- case POWER_SUPPLY_PROP_CURRENT_MAX:
- rc = smblib_set_prop_usb_current_max(chg, val);
- break;
case POWER_SUPPLY_PROP_TYPEC_POWER_ROLE:
rc = smblib_set_prop_typec_power_role(chg, val);
break;
+ case POWER_SUPPLY_PROP_SDP_CURRENT_MAX:
+ rc = smblib_set_prop_sdp_current_max(chg, val);
+ break;
default:
pr_err("set prop %d is not supported\n", prop);
return -EINVAL;
@@ -301,13 +300,6 @@ static int smb138x_usb_set_prop(struct power_supply *psy,
static int smb138x_usb_prop_is_writeable(struct power_supply *psy,
enum power_supply_property prop)
{
- switch (prop) {
- case POWER_SUPPLY_PROP_TYPEC_POWER_ROLE:
- return 1;
- default:
- break;
- }
-
return 0;
}
@@ -871,6 +863,13 @@ static int smb138x_init_slave_hw(struct smb138x *chip)
return rc;
}
+ /* Disable OTG */
+ rc = smblib_masked_write(chg, CMD_OTG_REG, OTG_EN_BIT, 0);
+ if (rc < 0) {
+ pr_err("Couldn't disable OTG rc=%d\n", rc);
+ return rc;
+ }
+
/* suspend parallel charging */
rc = smb138x_set_parallel_suspend(chip, true);
if (rc < 0) {
@@ -968,6 +967,20 @@ static int smb138x_init_hw(struct smb138x *chip)
chg->dcp_icl_ua = chip->dt.usb_icl_ua;
+ /* Disable OTG */
+ rc = smblib_masked_write(chg, CMD_OTG_REG, OTG_EN_BIT, 0);
+ if (rc < 0) {
+ pr_err("Couldn't disable OTG rc=%d\n", rc);
+ return rc;
+ }
+
+ /* Unsuspend USB input */
+ rc = smblib_masked_write(chg, USBIN_CMD_IL_REG, USBIN_SUSPEND_BIT, 0);
+ if (rc < 0) {
+ pr_err("Couldn't unsuspend USB, rc=%d\n", rc);
+ return rc;
+ }
+
/* configure to a fixed 700khz freq to avoid tdie errors */
rc = smblib_set_charge_param(chg, &chg->param.freq_buck, 700);
if (rc < 0) {
@@ -1608,14 +1621,33 @@ static int smb138x_remove(struct platform_device *pdev)
return 0;
}
+static void smb138x_shutdown(struct platform_device *pdev)
+{
+ struct smb138x *chip = platform_get_drvdata(pdev);
+ struct smb_charger *chg = &chip->chg;
+ int rc;
+
+ /* Suspend charging */
+ rc = smb138x_set_parallel_suspend(chip, true);
+ if (rc < 0)
+ pr_err("Couldn't suspend charging rc=%d\n", rc);
+
+ /* Disable OTG */
+ rc = smblib_masked_write(chg, CMD_OTG_REG, OTG_EN_BIT, 0);
+ if (rc < 0)
+ pr_err("Couldn't disable OTG rc=%d\n", rc);
+
+}
+
static struct platform_driver smb138x_driver = {
.driver = {
.name = "qcom,smb138x-charger",
.owner = THIS_MODULE,
.of_match_table = match_table,
},
- .probe = smb138x_probe,
- .remove = smb138x_remove,
+ .probe = smb138x_probe,
+ .remove = smb138x_remove,
+ .shutdown = smb138x_shutdown,
};
module_platform_driver(smb138x_driver);
diff --git a/drivers/power/supply/qcom/step-chg-jeita.c b/drivers/power/supply/qcom/step-chg-jeita.c
index a2c08be960be..5b41a456c6db 100644
--- a/drivers/power/supply/qcom/step-chg-jeita.c
+++ b/drivers/power/supply/qcom/step-chg-jeita.c
@@ -20,7 +20,7 @@
#define MAX_STEP_CHG_ENTRIES 8
#define STEP_CHG_VOTER "STEP_CHG_VOTER"
-#define STATUS_CHANGE_VOTER "STATUS_CHANGE_VOTER"
+#define JEITA_VOTER "JEITA_VOTER"
#define is_between(left, right, value) \
(((left) >= (right) && (left) >= (value) \
@@ -28,23 +28,44 @@
|| ((left) <= (right) && (left) <= (value) \
&& (value) <= (right)))
-struct step_chg_data {
- u32 vbatt_soc_low;
- u32 vbatt_soc_high;
- u32 fcc_ua;
+struct range_data {
+ u32 low_threshold;
+ u32 high_threshold;
+ u32 value;
};
struct step_chg_cfg {
- u32 psy_prop;
- char *prop_name;
- struct step_chg_data cfg[MAX_STEP_CHG_ENTRIES];
+ u32 psy_prop;
+ char *prop_name;
+ int hysteresis;
+ struct range_data fcc_cfg[MAX_STEP_CHG_ENTRIES];
+};
+
+struct jeita_fcc_cfg {
+ u32 psy_prop;
+ char *prop_name;
+ int hysteresis;
+ struct range_data fcc_cfg[MAX_STEP_CHG_ENTRIES];
+};
+
+struct jeita_fv_cfg {
+ u32 psy_prop;
+ char *prop_name;
+ int hysteresis;
+ struct range_data fv_cfg[MAX_STEP_CHG_ENTRIES];
};
struct step_chg_info {
- ktime_t last_update_time;
+ ktime_t step_last_update_time;
+ ktime_t jeita_last_update_time;
bool step_chg_enable;
+ bool sw_jeita_enable;
+ int jeita_fcc_index;
+ int jeita_fv_index;
+ int step_index;
struct votable *fcc_votable;
+ struct votable *fv_votable;
struct wakeup_source *step_chg_ws;
struct power_supply *batt_psy;
struct delayed_work status_change_work;
@@ -53,32 +74,70 @@ struct step_chg_info {
static struct step_chg_info *the_chip;
+#define STEP_CHG_HYSTERISIS_DELAY_US 5000000 /* 5 secs */
+
/*
* Step Charging Configuration
* Update the table based on the battery profile
* Supports VBATT and SOC based source
+ * range data must be in increasing ranges and shouldn't overlap
*/
static struct step_chg_cfg step_chg_config = {
- .psy_prop = POWER_SUPPLY_PROP_VOLTAGE_NOW,
- .prop_name = "VBATT",
- .cfg = {
+ .psy_prop = POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ .prop_name = "VBATT",
+ .hysteresis = 100000, /* 100mV */
+ .fcc_cfg = {
/* VBAT_LOW VBAT_HIGH FCC */
{3600000, 4000000, 3000000},
- {4000000, 4200000, 2800000},
- {4200000, 4400000, 2000000},
+ {4001000, 4200000, 2800000},
+ {4201000, 4400000, 2000000},
},
+ /*
+ * SOC STEP-CHG configuration example.
+ *
+ * .psy_prop = POWER_SUPPLY_PROP_CAPACITY,
+ * .prop_name = "SOC",
+ * .fcc_cfg = {
+ * //SOC_LOW SOC_HIGH FCC
+ * {20, 70, 3000000},
+ * {70, 90, 2750000},
+ * {90, 100, 2500000},
+ * },
+ */
+};
+
/*
- * SOC STEP-CHG configuration example.
- *
- * .psy_prop = POWER_SUPPLY_PROP_CAPACITY,
- * .prop_name = "SOC",
- * .cfg = {
- * //SOC_LOW SOC_HIGH FCC
- * {20, 70, 3000000},
- * {70, 90, 2750000},
- * {90, 100, 2500000},
- * },
+ * Jeita Charging Configuration
+ * Update the table based on the battery profile
+ * Please ensure that the TEMP ranges are programmed in the hw so that
+ * an interrupt is issued and a consequent psy changed will cause us to
+ * react immediately.
+ * range data must be in increasing ranges and shouldn't overlap.
+ * Gaps are okay
*/
+static struct jeita_fcc_cfg jeita_fcc_config = {
+ .psy_prop = POWER_SUPPLY_PROP_TEMP,
+ .prop_name = "BATT_TEMP",
+ .hysteresis = 10, /* 1degC hysteresis */
+ .fcc_cfg = {
+ /* TEMP_LOW TEMP_HIGH FCC */
+ {0, 100, 600000},
+ {101, 200, 2000000},
+ {201, 450, 3000000},
+ {451, 550, 600000},
+ },
+};
+
+static struct jeita_fv_cfg jeita_fv_config = {
+ .psy_prop = POWER_SUPPLY_PROP_TEMP,
+ .prop_name = "BATT_TEMP",
+ .hysteresis = 10, /* 1degC hysteresis */
+ .fv_cfg = {
+ /* TEMP_LOW TEMP_HIGH FCC */
+ {0, 100, 4200000},
+ {101, 450, 4400000},
+ {451, 550, 4200000},
+ },
};
static bool is_batt_available(struct step_chg_info *chip)
@@ -92,22 +151,67 @@ static bool is_batt_available(struct step_chg_info *chip)
return true;
}
-static int get_fcc(int threshold)
+static int get_val(struct range_data *range, int hysteresis, int current_index,
+ int threshold,
+ int *new_index, int *val)
{
int i;
+ *new_index = -EINVAL;
+ /* first find the matching index without hysteresis */
for (i = 0; i < MAX_STEP_CHG_ENTRIES; i++)
- if (is_between(step_chg_config.cfg[i].vbatt_soc_low,
- step_chg_config.cfg[i].vbatt_soc_high, threshold))
- return step_chg_config.cfg[i].fcc_ua;
+ if (is_between(range[i].low_threshold,
+ range[i].high_threshold, threshold)) {
+ *new_index = i;
+ *val = range[i].value;
+ }
+
+ /* if nothing was found, return -ENODATA */
+ if (*new_index == -EINVAL)
+ return -ENODATA;
+ /*
+ * If we don't have a current_index return this
+ * newfound value. There is no hysterisis from out of range
+ * to in range transition
+ */
+ if (current_index == -EINVAL)
+ return 0;
- return -ENODATA;
+ /*
+ * Check for hysteresis if it in the neighbourhood
+ * of our current index.
+ */
+ if (*new_index == current_index + 1) {
+ if (threshold < range[*new_index].low_threshold + hysteresis) {
+ /*
+ * Stay in the current index, threshold is not higher
+ * by hysteresis amount
+ */
+ *new_index = current_index;
+ *val = range[current_index].value;
+ }
+ } else if (*new_index == current_index - 1) {
+ if (threshold > range[*new_index].high_threshold - hysteresis) {
+ /*
+ * stay in the current index, threshold is not lower
+ * by hysteresis amount
+ */
+ *new_index = current_index;
+ *val = range[current_index].value;
+ }
+ }
+ return 0;
}
static int handle_step_chg_config(struct step_chg_info *chip)
{
union power_supply_propval pval = {0, };
int rc = 0, fcc_ua = 0;
+ u64 elapsed_us;
+
+ elapsed_us = ktime_us_delta(ktime_get(), chip->step_last_update_time);
+ if (elapsed_us < STEP_CHG_HYSTERISIS_DELAY_US)
+ goto reschedule;
rc = power_supply_get_property(chip->batt_psy,
POWER_SUPPLY_PROP_STEP_CHARGING_ENABLED, &pval);
@@ -119,7 +223,7 @@ static int handle_step_chg_config(struct step_chg_info *chip)
if (!chip->step_chg_enable) {
if (chip->fcc_votable)
vote(chip->fcc_votable, STEP_CHG_VOTER, false, 0);
- return 0;
+ goto update_time;
}
rc = power_supply_get_property(chip->batt_psy,
@@ -130,48 +234,151 @@ static int handle_step_chg_config(struct step_chg_info *chip)
return rc;
}
- chip->fcc_votable = find_votable("FCC");
- if (!chip->fcc_votable)
- return -EINVAL;
-
- fcc_ua = get_fcc(pval.intval);
- if (fcc_ua < 0) {
+ rc = get_val(step_chg_config.fcc_cfg, step_chg_config.hysteresis,
+ chip->step_index,
+ pval.intval,
+ &chip->step_index,
+ &fcc_ua);
+ if (rc < 0) {
/* remove the vote if no step-based fcc is found */
- vote(chip->fcc_votable, STEP_CHG_VOTER, false, 0);
- return 0;
+ if (chip->fcc_votable)
+ vote(chip->fcc_votable, STEP_CHG_VOTER, false, 0);
+ goto update_time;
}
+ if (!chip->fcc_votable)
+ chip->fcc_votable = find_votable("FCC");
+ if (!chip->fcc_votable)
+ return -EINVAL;
+
vote(chip->fcc_votable, STEP_CHG_VOTER, true, fcc_ua);
pr_debug("%s = %d Step-FCC = %duA\n",
step_chg_config.prop_name, pval.intval, fcc_ua);
+update_time:
+ chip->step_last_update_time = ktime_get();
return 0;
+
+reschedule:
+ /* reschedule 1000uS after the remaining time */
+ return (STEP_CHG_HYSTERISIS_DELAY_US - elapsed_us + 1000);
+}
+
+static int handle_jeita(struct step_chg_info *chip)
+{
+ union power_supply_propval pval = {0, };
+ int rc = 0, fcc_ua = 0, fv_uv = 0;
+ u64 elapsed_us;
+
+ rc = power_supply_get_property(chip->batt_psy,
+ POWER_SUPPLY_PROP_SW_JEITA_ENABLED, &pval);
+ if (rc < 0)
+ chip->sw_jeita_enable = 0;
+ else
+ chip->sw_jeita_enable = pval.intval;
+
+ if (!chip->sw_jeita_enable) {
+ if (chip->fcc_votable)
+ vote(chip->fcc_votable, JEITA_VOTER, false, 0);
+ if (chip->fv_votable)
+ vote(chip->fv_votable, JEITA_VOTER, false, 0);
+ return 0;
+ }
+
+ elapsed_us = ktime_us_delta(ktime_get(), chip->jeita_last_update_time);
+ if (elapsed_us < STEP_CHG_HYSTERISIS_DELAY_US)
+ goto reschedule;
+
+ rc = power_supply_get_property(chip->batt_psy,
+ jeita_fcc_config.psy_prop, &pval);
+ if (rc < 0) {
+ pr_err("Couldn't read %s property rc=%d\n",
+ step_chg_config.prop_name, rc);
+ return rc;
+ }
+
+ rc = get_val(jeita_fcc_config.fcc_cfg, jeita_fcc_config.hysteresis,
+ chip->jeita_fcc_index,
+ pval.intval,
+ &chip->jeita_fcc_index,
+ &fcc_ua);
+ if (rc < 0) {
+ /* remove the vote if no step-based fcc is found */
+ if (chip->fcc_votable)
+ vote(chip->fcc_votable, JEITA_VOTER, false, 0);
+ goto update_time;
+ }
+
+ if (!chip->fcc_votable)
+ chip->fcc_votable = find_votable("FCC");
+ if (!chip->fcc_votable)
+ /* changing FCC is a must */
+ return -EINVAL;
+
+ vote(chip->fcc_votable, JEITA_VOTER, true, fcc_ua);
+
+ rc = get_val(jeita_fv_config.fv_cfg, jeita_fv_config.hysteresis,
+ chip->jeita_fv_index,
+ pval.intval,
+ &chip->jeita_fv_index,
+ &fv_uv);
+ if (rc < 0) {
+ /* remove the vote if no step-based fcc is found */
+ if (chip->fv_votable)
+ vote(chip->fv_votable, JEITA_VOTER, false, 0);
+ goto update_time;
+ }
+
+ chip->fv_votable = find_votable("FV");
+ if (!chip->fv_votable)
+ goto update_time;
+
+ vote(chip->fv_votable, JEITA_VOTER, true, fv_uv);
+
+ pr_debug("%s = %d FCC = %duA FV = %duV\n",
+ step_chg_config.prop_name, pval.intval, fcc_ua, fv_uv);
+
+update_time:
+ chip->jeita_last_update_time = ktime_get();
+ return 0;
+
+reschedule:
+ /* reschedule 1000uS after the remaining time */
+ return (STEP_CHG_HYSTERISIS_DELAY_US - elapsed_us + 1000);
}
-#define STEP_CHG_HYSTERISIS_DELAY_US 5000000 /* 5 secs */
static void status_change_work(struct work_struct *work)
{
struct step_chg_info *chip = container_of(work,
struct step_chg_info, status_change_work.work);
int rc = 0;
- u64 elapsed_us;
-
- elapsed_us = ktime_us_delta(ktime_get(), chip->last_update_time);
- if (elapsed_us < STEP_CHG_HYSTERISIS_DELAY_US)
- goto release_ws;
+ int reschedule_us;
+ int reschedule_jeita_work_us = 0;
+ int reschedule_step_work_us = 0;
if (!is_batt_available(chip))
- goto release_ws;
+ return;
+
+ /* skip elapsed_us debounce for handling battery temperature */
+ rc = handle_jeita(chip);
+ if (rc > 0)
+ reschedule_jeita_work_us = rc;
+ else if (rc < 0)
+ pr_err("Couldn't handle sw jeita rc = %d\n", rc);
rc = handle_step_chg_config(chip);
+ if (rc > 0)
+ reschedule_step_work_us = rc;
if (rc < 0)
- goto release_ws;
+ pr_err("Couldn't handle step rc = %d\n", rc);
- chip->last_update_time = ktime_get();
-
-release_ws:
- __pm_relax(chip->step_chg_ws);
+ reschedule_us = min(reschedule_jeita_work_us, reschedule_step_work_us);
+ if (reschedule_us == 0)
+ __pm_relax(chip->step_chg_ws);
+ else
+ schedule_delayed_work(&chip->status_change_work,
+ usecs_to_jiffies(reschedule_us));
}
static int step_chg_notifier_call(struct notifier_block *nb,
@@ -205,7 +412,7 @@ static int step_chg_register_notifier(struct step_chg_info *chip)
return 0;
}
-int qcom_step_chg_init(bool step_chg_enable)
+int qcom_step_chg_init(bool step_chg_enable, bool sw_jeita_enable)
{
int rc;
struct step_chg_info *chip;
@@ -226,6 +433,11 @@ int qcom_step_chg_init(bool step_chg_enable)
}
chip->step_chg_enable = step_chg_enable;
+ chip->sw_jeita_enable = sw_jeita_enable;
+
+ chip->step_index = -EINVAL;
+ chip->jeita_fcc_index = -EINVAL;
+ chip->jeita_fv_index = -EINVAL;
if (step_chg_enable && (!step_chg_config.psy_prop ||
!step_chg_config.prop_name)) {
@@ -234,6 +446,20 @@ int qcom_step_chg_init(bool step_chg_enable)
return -ENODATA;
}
+ if (sw_jeita_enable && (!jeita_fcc_config.psy_prop ||
+ !jeita_fcc_config.prop_name)) {
+ /* fail if step-chg configuration is invalid */
+ pr_err("Jeita TEMP configuration not defined - fail\n");
+ return -ENODATA;
+ }
+
+ if (sw_jeita_enable && (!jeita_fv_config.psy_prop ||
+ !jeita_fv_config.prop_name)) {
+ /* fail if step-chg configuration is invalid */
+ pr_err("Jeita TEMP configuration not defined - fail\n");
+ return -ENODATA;
+ }
+
INIT_DELAYED_WORK(&chip->status_change_work, status_change_work);
rc = step_chg_register_notifier(chip);
diff --git a/drivers/power/supply/qcom/step-chg-jeita.h b/drivers/power/supply/qcom/step-chg-jeita.h
index 928eeb7a670b..53335c3c2c70 100644
--- a/drivers/power/supply/qcom/step-chg-jeita.h
+++ b/drivers/power/supply/qcom/step-chg-jeita.h
@@ -12,6 +12,6 @@
#ifndef __STEP_CHG_H__
#define __STEP_CHG_H__
-int qcom_step_chg_init(bool);
+int qcom_step_chg_init(bool, bool);
void qcom_step_chg_deinit(void);
#endif /* __STEP_CHG_H__ */
diff --git a/drivers/pwm/pwm-qpnp.c b/drivers/pwm/pwm-qpnp.c
index e46b1d583f40..2531b74b4588 100644
--- a/drivers/pwm/pwm-qpnp.c
+++ b/drivers/pwm/pwm-qpnp.c
@@ -328,6 +328,7 @@ struct qpnp_pwm_chip {
bool enabled;
struct _qpnp_pwm_config pwm_config;
struct qpnp_lpg_config lpg_config;
+ enum pm_pwm_mode pwm_mode;
spinlock_t lpg_lock;
enum qpnp_lpg_revision revision;
u8 sub_type;
@@ -1314,12 +1315,10 @@ after_table_write:
return rc;
}
+/* lpg_lock should be held while calling _pwm_enable() */
static int _pwm_enable(struct qpnp_pwm_chip *chip)
{
int rc = 0;
- unsigned long flags;
-
- spin_lock_irqsave(&chip->lpg_lock, flags);
if (QPNP_IS_PWM_CONFIG_SELECTED(
chip->qpnp_lpg_registers[QPNP_ENABLE_CONTROL]) ||
@@ -1332,8 +1331,21 @@ static int _pwm_enable(struct qpnp_pwm_chip *chip)
if (!rc)
chip->enabled = true;
- spin_unlock_irqrestore(&chip->lpg_lock, flags);
+ return rc;
+}
+
+/* lpg_lock should be held while calling _pwm_change_mode() */
+static int _pwm_change_mode(struct qpnp_pwm_chip *chip, enum pm_pwm_mode mode)
+{
+ int rc;
+ if (mode == PM_PWM_MODE_LPG)
+ rc = qpnp_configure_lpg_control(chip);
+ else
+ rc = qpnp_configure_pwm_control(chip);
+
+ if (rc)
+ pr_err("Failed to change the mode\n");
return rc;
}
@@ -1410,11 +1422,15 @@ static int qpnp_pwm_enable(struct pwm_chip *pwm_chip,
{
int rc;
struct qpnp_pwm_chip *chip = qpnp_pwm_from_pwm_chip(pwm_chip);
+ unsigned long flags;
+ spin_lock_irqsave(&chip->lpg_lock, flags);
rc = _pwm_enable(chip);
if (rc)
pr_err("Failed to enable PWM channel: %d\n", chip->channel_id);
+ spin_unlock_irqrestore(&chip->lpg_lock, flags);
+
return rc;
}
@@ -1452,20 +1468,6 @@ static void qpnp_pwm_disable(struct pwm_chip *pwm_chip,
chip->channel_id);
}
-static int _pwm_change_mode(struct qpnp_pwm_chip *chip, enum pm_pwm_mode mode)
-{
- int rc;
-
- if (mode)
- rc = qpnp_configure_lpg_control(chip);
- else
- rc = qpnp_configure_pwm_control(chip);
-
- if (rc)
- pr_err("Failed to change the mode\n");
- return rc;
-}
-
/**
* pwm_change_mode - Change the PWM mode configuration
* @pwm: the PWM device
@@ -1490,7 +1492,22 @@ int pwm_change_mode(struct pwm_device *pwm, enum pm_pwm_mode mode)
chip = qpnp_pwm_from_pwm_dev(pwm);
spin_lock_irqsave(&chip->lpg_lock, flags);
- rc = _pwm_change_mode(chip, mode);
+ if (chip->pwm_mode != mode) {
+ rc = _pwm_change_mode(chip, mode);
+ if (rc) {
+ pr_err("Failed to change mode: %d, rc=%d\n", mode, rc);
+ goto unlock;
+ }
+ chip->pwm_mode = mode;
+ if (chip->enabled) {
+ rc = _pwm_enable(chip);
+ if (rc) {
+ pr_err("Failed to enable PWM, rc=%d\n", rc);
+ goto unlock;
+ }
+ }
+ }
+unlock:
spin_unlock_irqrestore(&chip->lpg_lock, flags);
return rc;
@@ -1894,7 +1911,7 @@ out:
static int qpnp_parse_dt_config(struct platform_device *pdev,
struct qpnp_pwm_chip *chip)
{
- int rc, enable, lut_entry_size, list_size, i;
+ int rc, mode, lut_entry_size, list_size, i;
const char *label;
const __be32 *prop;
u32 size;
@@ -2078,18 +2095,20 @@ static int qpnp_parse_dt_config(struct platform_device *pdev,
}
}
- rc = of_property_read_u32(of_node, "qcom,mode-select", &enable);
+ rc = of_property_read_u32(of_node, "qcom,mode-select", &mode);
if (rc)
goto read_opt_props;
- if ((enable == PM_PWM_MODE_PWM && found_pwm_subnode == 0) ||
- (enable == PM_PWM_MODE_LPG && found_lpg_subnode == 0)) {
+ if (mode > PM_PWM_MODE_LPG ||
+ (mode == PM_PWM_MODE_PWM && found_pwm_subnode == 0) ||
+ (mode == PM_PWM_MODE_LPG && found_lpg_subnode == 0)) {
dev_err(&pdev->dev, "%s: Invalid mode select\n", __func__);
rc = -EINVAL;
goto out;
}
- _pwm_change_mode(chip, enable);
+ chip->pwm_mode = mode;
+ _pwm_change_mode(chip, mode);
_pwm_enable(chip);
read_opt_props:
diff --git a/drivers/regulator/qpnp-lcdb-regulator.c b/drivers/regulator/qpnp-lcdb-regulator.c
index 724e8a4c00da..27f2d9851c2a 100644
--- a/drivers/regulator/qpnp-lcdb-regulator.c
+++ b/drivers/regulator/qpnp-lcdb-regulator.c
@@ -147,6 +147,8 @@
#define MIN_SOFT_START_US 0
#define MAX_SOFT_START_US 2000
+#define BST_HEADROOM_DEFAULT_MV 200
+
struct ldo_regulator {
struct regulator_desc rdesc;
struct regulator_dev *rdev;
@@ -187,6 +189,7 @@ struct bst_params {
int soft_start_us;
int vreg_ok_dbc_us;
int voltage_mv;
+ u16 headroom_mv;
};
struct qpnp_lcdb {
@@ -853,28 +856,40 @@ irq_handled:
#define VOLTAGE_STEP_50_MV 50
#define VOLTAGE_STEP_50MV_OFFSET 0xA
static int qpnp_lcdb_set_bst_voltage(struct qpnp_lcdb *lcdb,
- int voltage_mv)
+ int voltage_mv, u8 type)
{
int rc = 0;
u8 val = 0;
+ int bst_voltage_mv;
+ struct ldo_regulator *ldo = &lcdb->ldo;
+ struct ncp_regulator *ncp = &lcdb->ncp;
+ struct bst_params *bst = &lcdb->bst;
+
+ /* Vout_Boost = headroom_mv + max( Vout_LDO, abs (Vout_NCP)) */
+ bst_voltage_mv = max(voltage_mv, max(ldo->voltage_mv, ncp->voltage_mv));
+ bst_voltage_mv += bst->headroom_mv;
+
+ if (bst_voltage_mv < MIN_BST_VOLTAGE_MV)
+ bst_voltage_mv = MIN_BST_VOLTAGE_MV;
+ else if (bst_voltage_mv > MAX_BST_VOLTAGE_MV)
+ bst_voltage_mv = MAX_BST_VOLTAGE_MV;
+
+ if (bst_voltage_mv != bst->voltage_mv) {
+ val = DIV_ROUND_UP(bst_voltage_mv - MIN_BST_VOLTAGE_MV,
+ VOLTAGE_STEP_50_MV);
- if (voltage_mv < MIN_BST_VOLTAGE_MV)
- voltage_mv = MIN_BST_VOLTAGE_MV;
- else if (voltage_mv > MAX_BST_VOLTAGE_MV)
- voltage_mv = MAX_BST_VOLTAGE_MV;
-
- val = DIV_ROUND_UP(voltage_mv - MIN_BST_VOLTAGE_MV,
- VOLTAGE_STEP_50_MV);
-
- rc = qpnp_lcdb_masked_write(lcdb, lcdb->base +
- LCDB_BST_OUTPUT_VOLTAGE_REG,
- SET_OUTPUT_VOLTAGE_MASK, val);
- if (rc < 0)
- pr_err("Failed to set boost voltage %d mv rc=%d\n",
- voltage_mv, rc);
- else
- pr_debug("Boost voltage set = %d mv (0x%02x = 0x%02x)\n",
- voltage_mv, LCDB_BST_OUTPUT_VOLTAGE_REG, val);
+ rc = qpnp_lcdb_masked_write(lcdb, lcdb->base +
+ LCDB_BST_OUTPUT_VOLTAGE_REG,
+ SET_OUTPUT_VOLTAGE_MASK, val);
+ if (rc < 0) {
+ pr_err("Failed to set boost voltage %d mv rc=%d\n",
+ bst_voltage_mv, rc);
+ } else {
+ pr_debug("Boost voltage set = %d mv (0x%02x = 0x%02x)\n",
+ bst_voltage_mv, LCDB_BST_OUTPUT_VOLTAGE_REG, val);
+ bst->voltage_mv = bst_voltage_mv;
+ }
+ }
return rc;
}
@@ -905,25 +920,16 @@ static int qpnp_lcdb_set_voltage(struct qpnp_lcdb *lcdb,
u16 offset = LCDB_LDO_OUTPUT_VOLTAGE_REG;
u8 val = 0;
- if (type == BST)
- return qpnp_lcdb_set_bst_voltage(lcdb, voltage_mv);
-
- if (type == NCP)
- offset = LCDB_NCP_OUTPUT_VOLTAGE_REG;
-
if (!is_between(voltage_mv, MIN_VOLTAGE_MV, MAX_VOLTAGE_MV)) {
pr_err("Invalid voltage %dmv (min=%d max=%d)\n",
voltage_mv, MIN_VOLTAGE_MV, MAX_VOLTAGE_MV);
return -EINVAL;
}
- /* Change the BST voltage to LDO + 100mV */
- if (type == LDO) {
- rc = qpnp_lcdb_set_bst_voltage(lcdb, voltage_mv + 100);
- if (rc < 0) {
- pr_err("Failed to set boost voltage rc=%d\n", rc);
- return rc;
- }
+ rc = qpnp_lcdb_set_bst_voltage(lcdb, voltage_mv, type);
+ if (rc < 0) {
+ pr_err("Failed to set boost voltage rc=%d\n", rc);
+ return rc;
}
/* Below logic is only valid for LDO and NCP type */
@@ -936,6 +942,9 @@ static int qpnp_lcdb_set_voltage(struct qpnp_lcdb *lcdb,
val += VOLTAGE_STEP_50MV_OFFSET;
}
+ if (type == NCP)
+ offset = LCDB_NCP_OUTPUT_VOLTAGE_REG;
+
rc = qpnp_lcdb_masked_write(lcdb, lcdb->base + offset,
SET_OUTPUT_VOLTAGE_MASK, val);
if (rc < 0)
@@ -1058,6 +1067,8 @@ static int qpnp_lcdb_ldo_regulator_set_voltage(struct regulator_dev *rdev,
rc = qpnp_lcdb_set_voltage(lcdb, min_uV / 1000, LDO);
if (rc < 0)
pr_err("Failed to set LDO voltage rc=%c\n", rc);
+ else
+ lcdb->ldo.voltage_mv = min_uV / 1000;
return rc;
}
@@ -1129,6 +1140,8 @@ static int qpnp_lcdb_ncp_regulator_set_voltage(struct regulator_dev *rdev,
rc = qpnp_lcdb_set_voltage(lcdb, min_uV / 1000, NCP);
if (rc < 0)
pr_err("Failed to set LDO voltage rc=%c\n", rc);
+ else
+ lcdb->ncp.voltage_mv = min_uV / 1000;
return rc;
}
@@ -1389,6 +1402,12 @@ static int qpnp_lcdb_bst_dt_init(struct qpnp_lcdb *lcdb)
return -EINVAL;
}
+ /* Boost head room configuration */
+ of_property_read_u16(node, "qcom,bst-headroom-mv",
+ &lcdb->bst.headroom_mv);
+ if (lcdb->bst.headroom_mv < BST_HEADROOM_DEFAULT_MV)
+ lcdb->bst.headroom_mv = BST_HEADROOM_DEFAULT_MV;
+
return 0;
}
@@ -1695,6 +1714,9 @@ static int qpnp_lcdb_init_bst(struct qpnp_lcdb *lcdb)
}
lcdb->bst.soft_start_us = (val & SOFT_START_MASK) * 200 + 200;
+ if (!lcdb->bst.headroom_mv)
+ lcdb->bst.headroom_mv = BST_HEADROOM_DEFAULT_MV;
+
return 0;
}
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h
index ce129e595b55..5c935847599c 100644
--- a/drivers/scsi/fnic/fnic.h
+++ b/drivers/scsi/fnic/fnic.h
@@ -248,6 +248,7 @@ struct fnic {
struct completion *remove_wait; /* device remove thread blocks */
atomic_t in_flight; /* io counter */
+ bool internal_reset_inprogress;
u32 _reserved; /* fill hole */
unsigned long state_flags; /* protected by host lock */
enum fnic_state state;
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index 266b909fe854..82e4bc8c11c5 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -2533,6 +2533,19 @@ int fnic_host_reset(struct scsi_cmnd *sc)
unsigned long wait_host_tmo;
struct Scsi_Host *shost = sc->device->host;
struct fc_lport *lp = shost_priv(shost);
+ struct fnic *fnic = lport_priv(lp);
+ unsigned long flags;
+
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ if (fnic->internal_reset_inprogress == 0) {
+ fnic->internal_reset_inprogress = 1;
+ } else {
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "host reset in progress skipping another host reset\n");
+ return SUCCESS;
+ }
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
/*
* If fnic_reset is successful, wait for fabric login to complete
@@ -2553,6 +2566,9 @@ int fnic_host_reset(struct scsi_cmnd *sc)
}
}
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ fnic->internal_reset_inprogress = 0;
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
return ret;
}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 5b2c37f1e908..9b5367294116 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -4981,15 +4981,14 @@ _base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, int sleep_flag,
static int
_base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
{
- int r, i;
+ int r, i, index;
unsigned long flags;
u32 reply_address;
u16 smid;
struct _tr_list *delayed_tr, *delayed_tr_next;
u8 hide_flag;
struct adapter_reply_queue *reply_q;
- long reply_post_free;
- u32 reply_post_free_sz, index = 0;
+ Mpi2ReplyDescriptorsUnion_t *reply_post_free_contig;
dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
__func__));
@@ -5061,27 +5060,27 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
_base_assign_reply_queues(ioc);
/* initialize Reply Post Free Queue */
- reply_post_free_sz = ioc->reply_post_queue_depth *
- sizeof(Mpi2DefaultReplyDescriptor_t);
- reply_post_free = (long)ioc->reply_post[index].reply_post_free;
+ index = 0;
+ reply_post_free_contig = ioc->reply_post[0].reply_post_free;
list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
+ /*
+ * If RDPQ is enabled, switch to the next allocation.
+ * Otherwise advance within the contiguous region.
+ */
+ if (ioc->rdpq_array_enable) {
+ reply_q->reply_post_free =
+ ioc->reply_post[index++].reply_post_free;
+ } else {
+ reply_q->reply_post_free = reply_post_free_contig;
+ reply_post_free_contig += ioc->reply_post_queue_depth;
+ }
+
reply_q->reply_post_host_index = 0;
- reply_q->reply_post_free = (Mpi2ReplyDescriptorsUnion_t *)
- reply_post_free;
for (i = 0; i < ioc->reply_post_queue_depth; i++)
reply_q->reply_post_free[i].Words =
cpu_to_le64(ULLONG_MAX);
if (!_base_is_controller_msix_enabled(ioc))
goto skip_init_reply_post_free_queue;
- /*
- * If RDPQ is enabled, switch to the next allocation.
- * Otherwise advance within the contiguous region.
- */
- if (ioc->rdpq_array_enable)
- reply_post_free = (long)
- ioc->reply_post[++index].reply_post_free;
- else
- reply_post_free += reply_post_free_sz;
}
skip_init_reply_post_free_queue:
diff --git a/drivers/scsi/snic/snic_main.c b/drivers/scsi/snic/snic_main.c
index 2b3c25371d76..8175f997e82c 100644
--- a/drivers/scsi/snic/snic_main.c
+++ b/drivers/scsi/snic/snic_main.c
@@ -584,6 +584,7 @@ snic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!pool) {
SNIC_HOST_ERR(shost, "dflt sgl pool creation failed\n");
+ ret = -ENOMEM;
goto err_free_res;
}
@@ -594,6 +595,7 @@ snic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!pool) {
SNIC_HOST_ERR(shost, "max sgl pool creation failed\n");
+ ret = -ENOMEM;
goto err_free_dflt_sgl_pool;
}
@@ -604,6 +606,7 @@ snic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!pool) {
SNIC_HOST_ERR(shost, "snic tmreq info pool creation failed.\n");
+ ret = -ENOMEM;
goto err_free_max_sgl_pool;
}
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index aae796678ffe..47106f937371 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -2416,7 +2416,8 @@ static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba,
*/
static int ufs_qcom_update_sec_cfg(struct ufs_hba *hba, bool restore_sec_cfg)
{
- int ret = 0, scm_ret = 0;
+ int ret = 0;
+ u64 scm_ret = 0;
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
/* scm command buffer structrue */
@@ -2457,7 +2458,7 @@ static int ufs_qcom_update_sec_cfg(struct ufs_hba *hba, bool restore_sec_cfg)
cbuf.device_id = UFS_TZ_DEV_ID;
ret = scm_restore_sec_cfg(cbuf.device_id, cbuf.spare, &scm_ret);
if (ret || scm_ret) {
- dev_dbg(hba->dev, "%s: failed, ret %d scm_ret %d\n",
+ dev_dbg(hba->dev, "%s: failed, ret %d scm_ret %llu\n",
__func__, ret, scm_ret);
if (!ret)
ret = scm_ret;
@@ -2466,7 +2467,7 @@ static int ufs_qcom_update_sec_cfg(struct ufs_hba *hba, bool restore_sec_cfg)
}
out:
- dev_dbg(hba->dev, "%s: ip: restore_sec_cfg %d, op: restore_sec_cfg %d, ret %d scm_ret %d\n",
+ dev_dbg(hba->dev, "%s: ip: restore_sec_cfg %d, op: restore_sec_cfg %d, ret %d scm_ret %llu\n",
__func__, restore_sec_cfg, host->sec_cfg_updated, ret, scm_ret);
return ret;
}
diff --git a/drivers/soc/qcom/glink_smem_native_xprt.c b/drivers/soc/qcom/glink_smem_native_xprt.c
index 37193bbb23b7..042108d4035b 100644
--- a/drivers/soc/qcom/glink_smem_native_xprt.c
+++ b/drivers/soc/qcom/glink_smem_native_xprt.c
@@ -2242,6 +2242,7 @@ static int parse_qos_dt_params(struct device_node *node,
einfo->ramp_time_us[i] = arr32[i];
rc = 0;
+ kfree(arr32);
return rc;
invalid_key:
diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c
index bf815cb68f90..7f2e4cc42e51 100644
--- a/drivers/soc/qcom/icnss.c
+++ b/drivers/soc/qcom/icnss.c
@@ -152,6 +152,8 @@ bool ignore_qmi_timeout;
#define ICNSS_QMI_ASSERT() do { } while (0)
#endif
+#define QMI_ERR_PLAT_CCPM_CLK_INIT_FAILED 0x77
+
enum icnss_debug_quirks {
HW_ALWAYS_ON,
HW_DEBUG_ENABLE,
@@ -266,7 +268,6 @@ struct icnss_msa_perm_list_t msa_perm_list[ICNSS_MSA_PERM_MAX] = {
struct icnss_event_pd_service_down_data {
bool crashed;
bool fw_rejuvenate;
- bool wdog_bite;
};
struct icnss_driver_event {
@@ -291,7 +292,6 @@ enum icnss_driver_state {
ICNSS_PD_RESTART,
ICNSS_MSA0_ASSIGNED,
ICNSS_WLFW_EXISTS,
- ICNSS_WDOG_BITE,
ICNSS_SHUTDOWN_DONE,
ICNSS_HOST_TRIGGERED_PDR,
};
@@ -735,7 +735,7 @@ static int wlfw_vbatt_send_sync_msg(struct icnss_priv *priv,
if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
icnss_pr_err("QMI vbatt request rejected, result:%d error:%d\n",
resp.resp.result, resp.resp.error);
- ret = resp.resp.result;
+ ret = -resp.resp.result;
goto out;
}
priv->stats.vbatt_resp++;
@@ -1219,7 +1219,7 @@ static int wlfw_msa_mem_info_send_sync_msg(void)
if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
icnss_pr_err("QMI MSA Mem info request rejected, result:%d error:%d\n",
resp.resp.result, resp.resp.error);
- ret = resp.resp.result;
+ ret = -resp.resp.result;
goto out;
}
@@ -1291,7 +1291,7 @@ static int wlfw_msa_ready_send_sync_msg(void)
if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
icnss_pr_err("QMI MSA ready request rejected: result:%d error:%d\n",
resp.resp.result, resp.resp.error);
- ret = resp.resp.result;
+ ret = -resp.resp.result;
goto out;
}
penv->stats.msa_ready_resp++;
@@ -1354,7 +1354,7 @@ static int wlfw_ind_register_send_sync_msg(void)
if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
icnss_pr_err("QMI indication register request rejected, resut:%d error:%d\n",
resp.resp.result, resp.resp.error);
- ret = resp.resp.result;
+ ret = -resp.resp.result;
goto out;
}
penv->stats.ind_register_resp++;
@@ -1401,7 +1401,9 @@ static int wlfw_cap_send_sync_msg(void)
if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
icnss_pr_err("QMI capability request rejected, result:%d error:%d\n",
resp.resp.result, resp.resp.error);
- ret = resp.resp.result;
+ ret = -resp.resp.result;
+ if (resp.resp.error == QMI_ERR_PLAT_CCPM_CLK_INIT_FAILED)
+ icnss_pr_err("RF card Not present");
goto out;
}
@@ -1484,7 +1486,7 @@ static int wlfw_wlan_mode_send_sync_msg(enum wlfw_driver_mode_enum_v01 mode)
if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
icnss_pr_err("QMI mode request rejected, mode:%d result:%d error:%d\n",
mode, resp.resp.result, resp.resp.error);
- ret = resp.resp.result;
+ ret = -resp.resp.result;
goto out;
}
penv->stats.mode_resp++;
@@ -1534,7 +1536,7 @@ static int wlfw_wlan_cfg_send_sync_msg(struct wlfw_wlan_cfg_req_msg_v01 *data)
if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
icnss_pr_err("QMI config request rejected, result:%d error:%d\n",
resp.resp.result, resp.resp.error);
- ret = resp.resp.result;
+ ret = -resp.resp.result;
goto out;
}
penv->stats.cfg_resp++;
@@ -1587,7 +1589,7 @@ static int wlfw_ini_send_sync_msg(uint8_t fw_log_mode)
if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
icnss_pr_err("QMI INI request rejected, fw_log_mode:%d result:%d error:%d\n",
fw_log_mode, resp.resp.result, resp.resp.error);
- ret = resp.resp.result;
+ ret = -resp.resp.result;
goto out;
}
penv->stats.ini_resp++;
@@ -1647,7 +1649,7 @@ static int wlfw_athdiag_read_send_sync_msg(struct icnss_priv *priv,
if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
icnss_pr_err("QMI athdiag read request rejected, result:%d error:%d\n",
resp->resp.result, resp->resp.error);
- ret = resp->resp.result;
+ ret = -resp->resp.result;
goto out;
}
@@ -1713,7 +1715,7 @@ static int wlfw_athdiag_write_send_sync_msg(struct icnss_priv *priv,
if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
icnss_pr_err("QMI athdiag write request rejected, result:%d error:%d\n",
resp.resp.result, resp.resp.error);
- ret = resp.resp.result;
+ ret = -resp.resp.result;
goto out;
}
out:
@@ -1808,7 +1810,7 @@ static int wlfw_rejuvenate_ack_send_sync_msg(struct icnss_priv *priv)
if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
icnss_pr_err("QMI rejuvenate ack request rejected, result:%d error %d\n",
resp.resp.result, resp.resp.error);
- ret = resp.resp.result;
+ ret = -resp.resp.result;
goto out;
}
priv->stats.rejuvenate_ack_resp++;
@@ -1869,7 +1871,7 @@ static int wlfw_dynamic_feature_mask_send_sync_msg(struct icnss_priv *priv,
if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
icnss_pr_err("QMI dynamic feature mask request rejected, result:%d error %d\n",
resp.resp.result, resp.resp.error);
- ret = resp.resp.result;
+ ret = -resp.resp.result;
goto out;
}
@@ -2149,10 +2151,7 @@ static int icnss_pd_restart_complete(struct icnss_priv *priv)
icnss_pm_relax(priv);
- if (test_bit(ICNSS_WDOG_BITE, &priv->state)) {
- icnss_call_driver_shutdown(priv);
- clear_bit(ICNSS_WDOG_BITE, &priv->state);
- }
+ icnss_call_driver_shutdown(priv);
clear_bit(ICNSS_PD_RESTART, &priv->state);
@@ -2302,8 +2301,7 @@ static int icnss_call_driver_remove(struct icnss_priv *priv)
static int icnss_fw_crashed(struct icnss_priv *priv,
struct icnss_event_pd_service_down_data *event_data)
{
- icnss_pr_dbg("FW crashed, state: 0x%lx, wdog_bite: %d\n",
- priv->state, event_data->wdog_bite);
+ icnss_pr_dbg("FW crashed, state: 0x%lx\n", priv->state);
set_bit(ICNSS_PD_RESTART, &priv->state);
clear_bit(ICNSS_FW_READY, &priv->state);
@@ -2313,17 +2311,9 @@ static int icnss_fw_crashed(struct icnss_priv *priv,
if (test_bit(ICNSS_DRIVER_PROBED, &priv->state))
icnss_call_driver_uevent(priv, ICNSS_UEVENT_FW_CRASHED, NULL);
- if (event_data->wdog_bite) {
- set_bit(ICNSS_WDOG_BITE, &priv->state);
- goto out;
- }
-
- icnss_call_driver_shutdown(priv);
-
if (event_data->fw_rejuvenate)
wlfw_rejuvenate_ack_send_sync_msg(priv);
-out:
return 0;
}
@@ -2520,9 +2510,6 @@ static int icnss_modem_notifier_nb(struct notifier_block *nb,
event_data->crashed = notif->crashed;
- if (notif->crashed == CRASH_STATUS_WDOG_BITE)
- event_data->wdog_bite = true;
-
fw_down_data.crashed = !!notif->crashed;
icnss_call_driver_uevent(priv, ICNSS_UEVENT_FW_DOWN, &fw_down_data);
@@ -2612,7 +2599,6 @@ static int icnss_service_notifier_notify(struct notifier_block *nb,
switch (*state) {
case ROOT_PD_WDOG_BITE:
- event_data->wdog_bite = true;
priv->stats.recovery.root_pd_crash++;
break;
case ROOT_PD_SHUTDOWN:
@@ -3832,9 +3818,6 @@ static int icnss_stats_show_state(struct seq_file *s, struct icnss_priv *priv)
case ICNSS_WLFW_EXISTS:
seq_puts(s, "WLAN FW EXISTS");
continue;
- case ICNSS_WDOG_BITE:
- seq_puts(s, "MODEM WDOG BITE");
- continue;
case ICNSS_SHUTDOWN_DONE:
seq_puts(s, "SHUTDOWN DONE");
continue;
diff --git a/drivers/soc/qcom/jtagv8-etm.c b/drivers/soc/qcom/jtagv8-etm.c
index 2c15f7896c82..63432e6026e2 100644
--- a/drivers/soc/qcom/jtagv8-etm.c
+++ b/drivers/soc/qcom/jtagv8-etm.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -1503,6 +1503,7 @@ static int jtag_mm_etm_callback(struct notifier_block *nfb,
void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
+ u64 version = 0;
if (!etm[cpu])
goto out;
@@ -1524,8 +1525,8 @@ static int jtag_mm_etm_callback(struct notifier_block *nfb,
goto out;
}
if (etm_arch_supported(etm[cpu]->arch)) {
- if (scm_get_feat_version(TZ_DBG_ETM_FEAT_ID) <
- TZ_DBG_ETM_VER)
+ if (!scm_get_feat_version(TZ_DBG_ETM_FEAT_ID, &version)
+ && version < TZ_DBG_ETM_VER)
etm[cpu]->save_restore_enabled = true;
else
pr_info("etm save-restore supported by TZ\n");
@@ -1615,8 +1616,10 @@ static int jtag_mm_etm_probe(struct platform_device *pdev, uint32_t cpu)
mutex_lock(&etmdata->mutex);
if (etmdata->init && !etmdata->enable) {
if (etm_arch_supported(etmdata->arch)) {
- if (scm_get_feat_version(TZ_DBG_ETM_FEAT_ID) <
- TZ_DBG_ETM_VER)
+ u64 version = 0;
+
+ if (!scm_get_feat_version(TZ_DBG_ETM_FEAT_ID, &version)
+ && (version < TZ_DBG_ETM_VER))
etmdata->save_restore_enabled = true;
else
pr_info("etm save-restore supported by TZ\n");
diff --git a/drivers/soc/qcom/jtagv8.c b/drivers/soc/qcom/jtagv8.c
index 94c391eabaea..f09ccce8f9c3 100644
--- a/drivers/soc/qcom/jtagv8.c
+++ b/drivers/soc/qcom/jtagv8.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -974,7 +974,7 @@ static struct notifier_block jtag_cpu_pm_notifier = {
static int __init msm_jtag_dbg_init(void)
{
int ret;
-
+ u64 version = 0;
if (msm_jtag_fuse_apps_access_disabled())
return -EPERM;
@@ -982,7 +982,8 @@ static int __init msm_jtag_dbg_init(void)
dbg_init_arch_data();
if (dbg_arch_supported(dbg.arch)) {
- if (scm_get_feat_version(TZ_DBG_ETM_FEAT_ID) < TZ_DBG_ETM_VER) {
+ if (!scm_get_feat_version(TZ_DBG_ETM_FEAT_ID, &version) &&
+ version < TZ_DBG_ETM_VER) {
dbg.save_restore_enabled = true;
} else {
pr_info("dbg save-restore supported by TZ\n");
diff --git a/drivers/soc/qcom/qbt1000.c b/drivers/soc/qcom/qbt1000.c
index d14e82415c5a..7a62c2e36da8 100644
--- a/drivers/soc/qcom/qbt1000.c
+++ b/drivers/soc/qcom/qbt1000.c
@@ -12,6 +12,7 @@
#define pr_fmt(fmt) "qbt1000:%s: " fmt, __func__
+#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/fs.h>
@@ -105,10 +106,14 @@ struct qbt1000_drvdata {
*/
struct fw_ipc_cmd {
uint32_t status;
+ uint32_t numMsgs;
+ uint8_t msg_data[FW_MAX_IPC_MSG_DATA_SIZE];
+};
+
+struct fw_ipc_header {
uint32_t msg_type;
uint32_t msg_len;
uint32_t resp_needed;
- uint8_t msg_data[FW_MAX_IPC_MSG_DATA_SIZE];
};
/*
@@ -352,7 +357,14 @@ static int qbt1000_open(struct inode *inode, struct file *file)
*/
static int qbt1000_release(struct inode *inode, struct file *file)
{
- struct qbt1000_drvdata *drvdata = file->private_data;
+ struct qbt1000_drvdata *drvdata;
+
+ if (!file->private_data) {
+ pr_err("Null pointer passed in file->private_data");
+ return -EINVAL;
+ }
+
+ drvdata = file->private_data;
atomic_inc(&drvdata->available);
return 0;
@@ -374,6 +386,11 @@ static long qbt1000_ioctl(struct file *file, unsigned cmd, unsigned long arg)
void __user *priv_arg = (void __user *)arg;
struct qbt1000_drvdata *drvdata;
+ if (!file->private_data) {
+ pr_err("Null pointer passed in file->private_data");
+ return -EINVAL;
+ }
+
drvdata = file->private_data;
if (IS_ERR(priv_arg)) {
@@ -789,6 +806,8 @@ static int qbt1000_create_input_device(struct qbt1000_drvdata *drvdata)
BIT_MASK(KEY_HOMEPAGE);
drvdata->in_dev->keybit[BIT_WORD(KEY_CAMERA)] |=
BIT_MASK(KEY_CAMERA);
+ drvdata->in_dev->keybit[BIT_WORD(KEY_VOLUMEDOWN)] |=
+ BIT_MASK(KEY_VOLUMEDOWN);
drvdata->in_dev->keybit[BIT_WORD(KEY_POWER)] |=
BIT_MASK(KEY_POWER);
@@ -917,11 +936,14 @@ static irqreturn_t qbt1000_gpio_isr(int irq, void *dev_id)
*/
static irqreturn_t qbt1000_ipc_irq_handler(int irq, void *dev_id)
{
+ uint8_t *msg_buffer;
struct fw_ipc_cmd *rx_cmd;
- int i;
+ struct fw_ipc_header *header;
+ int i, j;
uint32_t rxipc = FP_APP_CMD_RX_IPC;
struct qbt1000_drvdata *drvdata = (struct qbt1000_drvdata *)dev_id;
int rc = 0;
+ uint32_t retry_count = 10;
pm_stay_awake(drvdata->dev);
@@ -933,18 +955,25 @@ static irqreturn_t qbt1000_ipc_irq_handler(int irq, void *dev_id)
goto end;
}
- pr_debug("firmware interrupt received (irq %d)\n", irq);
-
if (!drvdata->fp_app_handle)
goto end;
- /*
- * send the TZ command to fetch the message from firmware
- * TZ will process the message if it can
- */
- rc = send_tz_cmd(drvdata, drvdata->fp_app_handle, 0,
- &rxipc, sizeof(rxipc),
- (void *)&rx_cmd, sizeof(*rx_cmd));
+ while (retry_count > 0) {
+ /*
+ * send the TZ command to fetch the message from firmware
+ * TZ will process the message if it can
+ */
+ rc = send_tz_cmd(drvdata, drvdata->fp_app_handle, 0,
+ &rxipc, sizeof(rxipc),
+ (void *)&rx_cmd, sizeof(*rx_cmd));
+ if (rc < 0) {
+ msleep(50); /* sleep for 50ms before retry */
+ retry_count -= 1;
+ continue;
+ } else {
+ break;
+ }
+ }
if (rc < 0) {
pr_err("failure sending tz cmd %d\n", rxipc);
@@ -956,29 +985,35 @@ static irqreturn_t qbt1000_ipc_irq_handler(int irq, void *dev_id)
goto end;
}
- /*
- * given the IPC message type, search for a corresponding event for the
- * driver client. If found, add to the events FIFO
- */
- for (i = 0; i < ARRAY_SIZE(g_msg_to_event); i++) {
- if (g_msg_to_event[i].msg_type == rx_cmd->msg_type) {
- enum qbt1000_fw_event ev = g_msg_to_event[i].fw_event;
- struct fw_event_desc fw_ev_desc;
-
- mutex_lock(&drvdata->fw_events_mutex);
- pr_debug("fw events: add %d\n", (int) ev);
- fw_ev_desc.ev = ev;
-
- if (!kfifo_put(&drvdata->fw_events, fw_ev_desc))
- pr_err("fw events: fifo full, drop event %d\n",
- (int) ev);
-
- mutex_unlock(&drvdata->fw_events_mutex);
- wake_up_interruptible(&drvdata->read_wait_queue);
- break;
+ msg_buffer = rx_cmd->msg_data;
+
+ for (j = 0; j < rx_cmd->numMsgs; j++) {
+ header = (struct fw_ipc_header *) msg_buffer;
+ /*
+ * given the IPC message type, search for a corresponding event
+ * for the driver client. If found, add to the events FIFO
+ */
+ for (i = 0; i < ARRAY_SIZE(g_msg_to_event); i++) {
+ if (g_msg_to_event[i].msg_type == header->msg_type) {
+ enum qbt1000_fw_event ev =
+ g_msg_to_event[i].fw_event;
+ struct fw_event_desc fw_ev_desc;
+
+ mutex_lock(&drvdata->fw_events_mutex);
+ pr_debug("fw events: add %d\n", (int) ev);
+ fw_ev_desc.ev = ev;
+
+ if (!kfifo_put(&drvdata->fw_events, fw_ev_desc))
+ pr_err("fw events: fifo full, drop event %d\n",
+ (int) ev);
+
+ mutex_unlock(&drvdata->fw_events_mutex);
+ break;
+ }
}
+ msg_buffer += sizeof(*header) + header->msg_len;
}
-
+ wake_up_interruptible(&drvdata->read_wait_queue);
end:
mutex_unlock(&drvdata->mutex);
pm_relax(drvdata->dev);
diff --git a/drivers/soc/qcom/qdsp6v2/msm_audio_ion.c b/drivers/soc/qcom/qdsp6v2/msm_audio_ion.c
index 83e3775ed533..b119c7a8441d 100644
--- a/drivers/soc/qcom/qdsp6v2/msm_audio_ion.c
+++ b/drivers/soc/qcom/qdsp6v2/msm_audio_ion.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -29,6 +29,7 @@
#include <linux/export.h>
#include <linux/qcom_iommu.h>
#include <asm/dma-iommu.h>
+#include <soc/qcom/secure_buffer.h>
#define MSM_AUDIO_ION_PROBED (1 << 0)
@@ -178,6 +179,87 @@ err:
}
EXPORT_SYMBOL(msm_audio_ion_alloc);
+static int msm_audio_hyp_assign(ion_phys_addr_t *paddr, size_t *pa_len,
+ u8 assign_type)
+{
+ int srcVM[1] = {VMID_HLOS};
+ int destVM[1] = {VMID_CP_ADSP_SHARED};
+ int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
+ int ret = 0;
+
+ switch (assign_type) {
+ case HLOS_TO_ADSP:
+ srcVM[0] = VMID_HLOS;
+ destVM[0] = VMID_CP_ADSP_SHARED;
+ break;
+ case ADSP_TO_HLOS:
+ srcVM[0] = VMID_CP_ADSP_SHARED;
+ destVM[0] = VMID_HLOS;
+ break;
+ default:
+ pr_err("%s: Invalid assign type = %d\n", __func__, assign_type);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ ret = hyp_assign_phys(*paddr, *pa_len, srcVM, 1, destVM, destVMperm, 1);
+ if (ret)
+ pr_err("%s: hyp_assign_phys failed for type %d, rc = %d\n",
+ __func__, assign_type, ret);
+done:
+ return ret;
+}
+
+int msm_audio_ion_phys_assign(const char *name, int fd, ion_phys_addr_t *paddr,
+ size_t *pa_len, u8 assign_type)
+{
+ struct ion_client *client;
+ struct ion_handle *handle;
+ int ret;
+
+ if (!(msm_audio_ion_data.device_status & MSM_AUDIO_ION_PROBED)) {
+ pr_debug("%s:probe is not done, deferred\n", __func__);
+ return -EPROBE_DEFER;
+ }
+
+ if (!name || !paddr || !pa_len) {
+ pr_err("%s: Invalid params\n", __func__);
+ return -EINVAL;
+ }
+
+ client = msm_audio_ion_client_create(name);
+ if (IS_ERR_OR_NULL((void *)(client))) {
+ pr_err("%s: ION create client failed\n", __func__);
+ return -EINVAL;
+ }
+
+ handle = ion_import_dma_buf(client, fd);
+ if (IS_ERR_OR_NULL((void *) (handle))) {
+ pr_err("%s: ion import dma buffer failed\n",
+ __func__);
+ ret = -EINVAL;
+ goto err_destroy_client;
+ }
+
+ ret = ion_phys(client, handle, paddr, pa_len);
+ if (ret) {
+ pr_err("%s: could not get physical address for handle, ret = %d\n",
+ __func__, ret);
+ goto err_ion_handle;
+ }
+ pr_debug("%s: ION Physical address is %x\n", __func__, (u32)*paddr);
+
+ ret = msm_audio_hyp_assign(paddr, pa_len, assign_type);
+
+err_ion_handle:
+ ion_free(client, handle);
+
+err_destroy_client:
+ ion_client_destroy(client);
+
+ return ret;
+}
+
int msm_audio_ion_import(const char *name, struct ion_client **client,
struct ion_handle **handle, int fd,
unsigned long *ionflag, size_t bufsz,
diff --git a/drivers/soc/qcom/qpnp-haptic.c b/drivers/soc/qcom/qpnp-haptic.c
index d86f8671705a..38cc86963181 100644
--- a/drivers/soc/qcom/qpnp-haptic.c
+++ b/drivers/soc/qcom/qpnp-haptic.c
@@ -2233,13 +2233,23 @@ static void qpnp_hap_td_enable(struct timed_output_dev *dev, int time_ms)
timed_dev);
int rc;
- if (time_ms <= 0)
+ if (time_ms < 0)
return;
+ mutex_lock(&hap->lock);
+
+ if (time_ms == 0) {
+ /* disable haptics */
+ hrtimer_cancel(&hap->hap_timer);
+ hap->state = 0;
+ schedule_work(&hap->work);
+ mutex_unlock(&hap->lock);
+ return;
+ }
+
if (time_ms < 10)
time_ms = 10;
- mutex_lock(&hap->lock);
if (is_sw_lra_auto_resonance_control(hap))
hrtimer_cancel(&hap->auto_res_err_poll_timer);
diff --git a/drivers/soc/qcom/scm.c b/drivers/soc/qcom/scm.c
index 31de6e5c173c..43e2e4d17648 100644
--- a/drivers/soc/qcom/scm.c
+++ b/drivers/soc/qcom/scm.c
@@ -397,18 +397,22 @@ static int __scm_call_armv8_64(u64 x0, u64 x1, u64 x2, u64 x3, u64 x4, u64 x5,
__asmeq("%1", R1_STR)
__asmeq("%2", R2_STR)
__asmeq("%3", R3_STR)
- __asmeq("%4", R0_STR)
- __asmeq("%5", R1_STR)
- __asmeq("%6", R2_STR)
- __asmeq("%7", R3_STR)
- __asmeq("%8", R4_STR)
- __asmeq("%9", R5_STR)
- __asmeq("%10", R6_STR)
+ __asmeq("%4", R4_STR)
+ __asmeq("%5", R5_STR)
+ __asmeq("%6", R6_STR)
+ __asmeq("%7", R0_STR)
+ __asmeq("%8", R1_STR)
+ __asmeq("%9", R2_STR)
+ __asmeq("%10", R3_STR)
+ __asmeq("%11", R4_STR)
+ __asmeq("%12", R5_STR)
+ __asmeq("%13", R6_STR)
#ifdef REQUIRES_SEC
".arch_extension sec\n"
#endif
"smc #0\n"
- : "=r" (r0), "=r" (r1), "=r" (r2), "=r" (r3)
+ : "=r" (r0), "=r" (r1), "=r" (r2), "=r" (r3),
+ "=r" (r4), "=r" (r5), "=r" (r6)
: "r" (r0), "r" (r1), "r" (r2), "r" (r3), "r" (r4),
"r" (r5), "r" (r6)
: "x7", "x8", "x9", "x10", "x11", "x12", "x13",
@@ -442,18 +446,22 @@ static int __scm_call_armv8_32(u32 w0, u32 w1, u32 w2, u32 w3, u32 w4, u32 w5,
__asmeq("%1", R1_STR)
__asmeq("%2", R2_STR)
__asmeq("%3", R3_STR)
- __asmeq("%4", R0_STR)
- __asmeq("%5", R1_STR)
- __asmeq("%6", R2_STR)
- __asmeq("%7", R3_STR)
- __asmeq("%8", R4_STR)
- __asmeq("%9", R5_STR)
- __asmeq("%10", R6_STR)
+ __asmeq("%4", R4_STR)
+ __asmeq("%5", R5_STR)
+ __asmeq("%6", R6_STR)
+ __asmeq("%7", R0_STR)
+ __asmeq("%8", R1_STR)
+ __asmeq("%9", R2_STR)
+ __asmeq("%10", R3_STR)
+ __asmeq("%11", R4_STR)
+ __asmeq("%12", R5_STR)
+ __asmeq("%13", R6_STR)
#ifdef REQUIRES_SEC
".arch_extension sec\n"
#endif
"smc #0\n"
- : "=r" (r0), "=r" (r1), "=r" (r2), "=r" (r3)
+ : "=r" (r0), "=r" (r1), "=r" (r2), "=r" (r3),
+ "=r" (r4), "=r" (r5), "=r" (r6)
: "r" (r0), "r" (r1), "r" (r2), "r" (r3), "r" (r4),
"r" (r5), "r" (r6)
: "x7", "x8", "x9", "x10", "x11", "x12", "x13",
@@ -490,18 +498,22 @@ static int __scm_call_armv8_32(u32 w0, u32 w1, u32 w2, u32 w3, u32 w4, u32 w5,
__asmeq("%1", R1_STR)
__asmeq("%2", R2_STR)
__asmeq("%3", R3_STR)
- __asmeq("%4", R0_STR)
- __asmeq("%5", R1_STR)
- __asmeq("%6", R2_STR)
- __asmeq("%7", R3_STR)
- __asmeq("%8", R4_STR)
- __asmeq("%9", R5_STR)
- __asmeq("%10", R6_STR)
+ __asmeq("%4", R4_STR)
+ __asmeq("%5", R5_STR)
+ __asmeq("%6", R6_STR)
+ __asmeq("%7", R0_STR)
+ __asmeq("%8", R1_STR)
+ __asmeq("%9", R2_STR)
+ __asmeq("%10", R3_STR)
+ __asmeq("%11", R4_STR)
+ __asmeq("%12", R5_STR)
+ __asmeq("%13", R6_STR)
#ifdef REQUIRES_SEC
".arch_extension sec\n"
#endif
"smc #0\n"
- : "=r" (r0), "=r" (r1), "=r" (r2), "=r" (r3)
+ : "=r" (r0), "=r" (r1), "=r" (r2), "=r" (r3),
+ "=r" (r4), "=r" (r5), "=r" (r6)
: "r" (r0), "r" (r1), "r" (r2), "r" (r3), "r" (r4),
"r" (r5), "r" (r6));
@@ -1117,54 +1129,55 @@ int scm_is_call_available(u32 svc_id, u32 cmd_id)
ret = scm_call(SCM_SVC_INFO, IS_CALL_AVAIL_CMD, &svc_cmd,
sizeof(svc_cmd), &ret_val, sizeof(ret_val));
- if (ret)
- return ret;
+ if (!ret && ret_val)
+ return 1;
+ else
+ return 0;
- return ret_val;
}
desc.arginfo = SCM_ARGS(1);
desc.args[0] = SCM_SIP_FNID(svc_id, cmd_id);
+ desc.ret[0] = 0;
ret = scm_call2(SCM_SIP_FNID(SCM_SVC_INFO, IS_CALL_AVAIL_CMD), &desc);
- if (ret)
- return ret;
+ if (!ret && desc.ret[0])
+ return 1;
+ else
+ return 0;
- return desc.ret[0];
}
EXPORT_SYMBOL(scm_is_call_available);
#define GET_FEAT_VERSION_CMD 3
-int scm_get_feat_version(u32 feat)
+int scm_get_feat_version(u32 feat, u64 *scm_ret)
{
struct scm_desc desc = {0};
int ret;
if (!is_scm_armv8()) {
if (scm_is_call_available(SCM_SVC_INFO, GET_FEAT_VERSION_CMD)) {
- u32 version;
- if (!scm_call(SCM_SVC_INFO, GET_FEAT_VERSION_CMD, &feat,
- sizeof(feat), &version, sizeof(version)))
- return version;
+ ret = scm_call(SCM_SVC_INFO, GET_FEAT_VERSION_CMD,
+ &feat, sizeof(feat), scm_ret, sizeof(*scm_ret));
+ return ret;
}
- return 0;
}
ret = scm_is_call_available(SCM_SVC_INFO, GET_FEAT_VERSION_CMD);
if (ret <= 0)
- return 0;
+ return -EAGAIN;
desc.args[0] = feat;
desc.arginfo = SCM_ARGS(1);
ret = scm_call2(SCM_SIP_FNID(SCM_SVC_INFO, GET_FEAT_VERSION_CMD),
&desc);
- if (!ret)
- return desc.ret[0];
- return 0;
+ *scm_ret = desc.ret[0];
+
+ return ret;
}
EXPORT_SYMBOL(scm_get_feat_version);
#define RESTORE_SEC_CFG 2
-int scm_restore_sec_cfg(u32 device_id, u32 spare, int *scm_ret)
+int scm_restore_sec_cfg(u32 device_id, u32 spare, u64 *scm_ret)
{
struct scm_desc desc = {0};
int ret;
diff --git a/drivers/soc/qcom/scm_qcpe.c b/drivers/soc/qcom/scm_qcpe.c
index 54a978157bda..1e369c73e34b 100644
--- a/drivers/soc/qcom/scm_qcpe.c
+++ b/drivers/soc/qcom/scm_qcpe.c
@@ -27,7 +27,7 @@
#define CREATE_TRACE_POINTS
#include <trace/events/scm.h>
-#include <uapi/linux/habmm.h>
+#include <linux/habmm.h>
#define SCM_ENOMEM (-5)
#define SCM_EOPNOTSUPP (-4)
@@ -218,8 +218,8 @@ static int scm_call_qcpe(u32 fn_id, struct scm_desc *desc)
if (!opened) {
ret = habmm_socket_open(&handle, MM_QCPE_VM1, 0, 0);
- if (ret != HAB_OK) {
- pr_err("scm_call2: habmm_socket_open failed with ret = %d",
+ if (ret) {
+ pr_err("scm_call_qcpe: habmm_socket_open failed with ret = %d",
ret);
return ret;
}
@@ -235,23 +235,30 @@ static int scm_call_qcpe(u32 fn_id, struct scm_desc *desc)
smc_params.sid = 0;
ret = habmm_socket_send(handle, &smc_params, sizeof(smc_params), 0);
- if (ret != HAB_OK)
+ if (ret)
return ret;
size_bytes = sizeof(smc_params);
+ memset(&smc_params, 0x0, sizeof(smc_params));
ret = habmm_socket_recv(handle, &smc_params, &size_bytes, 0, 0);
- if (ret != HAB_OK)
+ if (ret)
return ret;
+ if (size_bytes != sizeof(smc_params)) {
+ pr_err("scm_call_qcpe: expected size: %lu, actual=%u\n",
+ sizeof(smc_params), size_bytes);
+ return SCM_ERROR;
+ }
+
desc->ret[0] = smc_params.x1;
desc->ret[1] = smc_params.x2;
desc->ret[2] = smc_params.x3;
- pr_info("scm_call_qcpe: OUT: 0x%llx, 0x%llx, 0x%llx",
- desc->ret[0], desc->ret[1], desc->ret[2]);
+ pr_info("scm_call_qcpe: OUT: 0x%llx, 0x%llx, 0x%llx, 0x%llx",
+ smc_params.x0, desc->ret[0], desc->ret[1], desc->ret[2]);
- return 0;
+ return smc_params.x0;
}
static u32 smc(u32 cmd_addr)
diff --git a/drivers/soc/qcom/secure_buffer.c b/drivers/soc/qcom/secure_buffer.c
index 4307937d9f6d..e7a00cdb5b03 100644
--- a/drivers/soc/qcom/secure_buffer.c
+++ b/drivers/soc/qcom/secure_buffer.c
@@ -424,13 +424,14 @@ const char *msm_secure_vmid_to_string(int secure_vmid)
bool msm_secure_v2_is_supported(void)
{
- int version = scm_get_feat_version(FEATURE_ID_CP);
+ u64 version;
+ int ret = scm_get_feat_version(FEATURE_ID_CP, &version);
/*
* if the version is < 1.1.0 then dynamic buffer allocation is
* not supported
*/
- return version >= MAKE_CP_VERSION(1, 1, 0);
+ return (ret == 0) && (version >= MAKE_CP_VERSION(1, 1, 0));
}
static int __init alloc_secure_shared_memory(void)
diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c
index 882cd6618cd5..87a0e47eeae6 100644
--- a/drivers/spi/spi-dw.c
+++ b/drivers/spi/spi-dw.c
@@ -107,7 +107,10 @@ static const struct file_operations dw_spi_regs_ops = {
static int dw_spi_debugfs_init(struct dw_spi *dws)
{
- dws->debugfs = debugfs_create_dir("dw_spi", NULL);
+ char name[128];
+
+ snprintf(name, 128, "dw_spi-%s", dev_name(&dws->master->dev));
+ dws->debugfs = debugfs_create_dir(name, NULL);
if (!dws->debugfs)
return -ENOMEM;
diff --git a/drivers/spmi/spmi.c b/drivers/spmi/spmi.c
index b43e5656598a..a62ae9061f27 100644
--- a/drivers/spmi/spmi.c
+++ b/drivers/spmi/spmi.c
@@ -364,11 +364,23 @@ static int spmi_drv_remove(struct device *dev)
return 0;
}
+static int spmi_drv_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ int ret;
+
+ ret = of_device_uevent_modalias(dev, env);
+ if (ret != -ENODEV)
+ return ret;
+
+ return 0;
+}
+
static struct bus_type spmi_bus_type = {
.name = "spmi",
.match = spmi_device_match,
.probe = spmi_drv_probe,
.remove = spmi_drv_remove,
+ .uevent = spmi_drv_uevent,
};
/**
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index 07fc21797f0f..067cd58375a4 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -264,7 +264,7 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
mutex_lock(&dev->buffer_lock);
ion_buffer_add(dev, buffer);
mutex_unlock(&dev->buffer_lock);
- atomic_add(len, &heap->total_allocated);
+ atomic_long_add(len, &heap->total_allocated);
return buffer;
err:
@@ -282,7 +282,7 @@ void ion_buffer_destroy(struct ion_buffer *buffer)
buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
buffer->heap->ops->unmap_dma(buffer->heap, buffer);
- atomic_sub(buffer->size, &buffer->heap->total_allocated);
+ atomic_long_sub(buffer->size, &buffer->heap->total_allocated);
buffer->heap->ops->free(buffer);
vfree(buffer->pages);
kfree(buffer);
@@ -320,7 +320,7 @@ static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
{
mutex_lock(&buffer->lock);
if (buffer->handle_count == 0)
- atomic_add(buffer->size, &buffer->heap->total_handles);
+ atomic_long_add(buffer->size, &buffer->heap->total_handles);
buffer->handle_count++;
mutex_unlock(&buffer->lock);
@@ -346,7 +346,7 @@ static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
task = current->group_leader;
get_task_comm(buffer->task_comm, task);
buffer->pid = task_pid_nr(task);
- atomic_sub(buffer->size, &buffer->heap->total_handles);
+ atomic_long_sub(buffer->size, &buffer->heap->total_handles);
}
mutex_unlock(&buffer->lock);
}
@@ -1951,10 +1951,10 @@ void show_ion_usage(struct ion_device *dev)
"Total orphaned size");
pr_info("---------------------------------\n");
plist_for_each_entry(heap, &dev->heaps, node) {
- pr_info("%16.s 0x%16.x 0x%16.x\n",
- heap->name, atomic_read(&heap->total_allocated),
- atomic_read(&heap->total_allocated) -
- atomic_read(&heap->total_handles));
+ pr_info("%16.s 0x%16.lx 0x%16.lx\n",
+ heap->name, atomic_long_read(&heap->total_allocated),
+ atomic_long_read(&heap->total_allocated) -
+ atomic_long_read(&heap->total_handles));
if (heap->debug_show)
heap->debug_show(heap, NULL, 0);
diff --git a/drivers/staging/android/ion/ion_cma_heap.c b/drivers/staging/android/ion/ion_cma_heap.c
index dfb6d2f77af1..d932db4f9810 100644
--- a/drivers/staging/android/ion/ion_cma_heap.c
+++ b/drivers/staging/android/ion/ion_cma_heap.c
@@ -237,10 +237,12 @@ void ion_cma_heap_destroy(struct ion_heap *heap)
static void ion_secure_cma_free(struct ion_buffer *buffer)
{
- int ret = 0;
+ int i, ret = 0;
int source_vm;
int dest_vmid;
int dest_perms;
+ struct sg_table *sgt;
+ struct scatterlist *sg;
struct ion_cma_buffer_info *info = buffer->priv_virt;
source_vm = get_secure_vmid(buffer->flags);
@@ -251,14 +253,17 @@ static void ion_secure_cma_free(struct ion_buffer *buffer)
dest_vmid = VMID_HLOS;
dest_perms = PERM_READ | PERM_WRITE | PERM_EXEC;
- ret = hyp_assign_table(info->table, &source_vm, 1,
- &dest_vmid, &dest_perms, 1);
+ sgt = info->table;
+ ret = hyp_assign_table(sgt, &source_vm, 1, &dest_vmid, &dest_perms, 1);
if (ret) {
pr_err("%s: Not freeing memory since assign failed\n",
__func__);
return;
}
+ for_each_sg(sgt->sgl, sg, sgt->nents, i)
+ ClearPagePrivate(sg_page(sg));
+
ion_cma_free(buffer);
}
@@ -266,11 +271,13 @@ static int ion_secure_cma_allocate(struct ion_heap *heap,
struct ion_buffer *buffer, unsigned long len,
unsigned long align, unsigned long flags)
{
- int ret = 0;
+ int i, ret = 0;
int source_vm;
int dest_vm;
int dest_perms;
struct ion_cma_buffer_info *info;
+ struct sg_table *sgt;
+ struct scatterlist *sg;
source_vm = VMID_HLOS;
dest_vm = get_secure_vmid(flags);
@@ -292,12 +299,17 @@ static int ion_secure_cma_allocate(struct ion_heap *heap,
}
info = buffer->priv_virt;
- ret = hyp_assign_table(info->table, &source_vm, 1,
- &dest_vm, &dest_perms, 1);
+ sgt = info->table;
+ ret = hyp_assign_table(sgt, &source_vm, 1, &dest_vm, &dest_perms, 1);
if (ret) {
pr_err("%s: Assign call failed\n", __func__);
goto err;
}
+
+ /* Set the private bit to indicate that we've secured this */
+ for_each_sg(sgt->sgl, sg, sgt->nents, i)
+ SetPagePrivate(sg_page(sg));
+
return ret;
err:
diff --git a/drivers/staging/android/ion/ion_priv.h b/drivers/staging/android/ion/ion_priv.h
index b687675be02f..f2f2ca11c3fa 100644
--- a/drivers/staging/android/ion/ion_priv.h
+++ b/drivers/staging/android/ion/ion_priv.h
@@ -2,7 +2,7 @@
* drivers/staging/android/ion/ion_priv.h
*
* Copyright (C) 2011 Google, Inc.
- * Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -200,8 +200,8 @@ struct ion_heap {
struct task_struct *task;
int (*debug_show)(struct ion_heap *heap, struct seq_file *, void *);
- atomic_t total_allocated;
- atomic_t total_handles;
+ atomic_long_t total_allocated;
+ atomic_long_t total_handles;
};
/**
diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c
index 8deee007218b..9e12b2ce3272 100644
--- a/drivers/staging/android/lowmemorykiller.c
+++ b/drivers/staging/android/lowmemorykiller.c
@@ -537,7 +537,7 @@ static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc)
cache_limit = minfree * (long)(PAGE_SIZE / 1024);
free = other_free * (long)(PAGE_SIZE / 1024);
trace_lowmemory_kill(selected, cache_size, cache_limit, free);
- lowmem_print(1, "Killing '%s' (%d), adj %hd,\n" \
+ lowmem_print(1, "Killing '%s' (%d) (tgid %d), adj %hd,\n" \
" to free %ldkB on behalf of '%s' (%d) because\n" \
" cache %ldkB is below limit %ldkB for oom_score_adj %hd\n" \
" Free memory is %ldkB above reserved.\n" \
@@ -547,7 +547,7 @@ static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc)
" Total file cache is %ldkB\n" \
" Total zcache is %ldkB\n" \
" GFP mask is 0x%x\n",
- selected->comm, selected->pid,
+ selected->comm, selected->pid, selected->tgid,
selected_oom_score_adj,
selected_tasksize * (long)(PAGE_SIZE / 1024),
current->comm, current->pid,
diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
index 7b4af519e17e..b831f08e2769 100644
--- a/drivers/staging/comedi/comedi_fops.c
+++ b/drivers/staging/comedi/comedi_fops.c
@@ -2901,9 +2901,6 @@ static int __init comedi_init(void)
comedi_class->dev_groups = comedi_dev_groups;
- /* XXX requires /proc interface */
- comedi_proc_init();
-
/* create devices files for legacy/manual use */
for (i = 0; i < comedi_num_legacy_minors; i++) {
struct comedi_device *dev;
@@ -2911,6 +2908,7 @@ static int __init comedi_init(void)
dev = comedi_alloc_board_minor(NULL);
if (IS_ERR(dev)) {
comedi_cleanup_board_minors();
+ class_destroy(comedi_class);
cdev_del(&comedi_cdev);
unregister_chrdev_region(MKDEV(COMEDI_MAJOR, 0),
COMEDI_NUM_MINORS);
@@ -2920,6 +2918,9 @@ static int __init comedi_init(void)
mutex_unlock(&dev->mutex);
}
+ /* XXX requires /proc interface */
+ comedi_proc_init();
+
return 0;
}
module_init(comedi_init);
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
index 82a7c27c517f..02c3feef4e36 100644
--- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c
+++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
@@ -47,6 +47,7 @@ static struct usb_device_id rtw_usb_id_tbl[] = {
{USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */
{USB_DEVICE(0x2001, 0x3310)}, /* Dlink DWA-123 REV D1 */
{USB_DEVICE(0x2001, 0x3311)}, /* DLink GO-USB-N150 REV B1 */
+ {USB_DEVICE(0x2357, 0x010c)}, /* TP-Link TL-WN722N v2 */
{USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */
{} /* Terminating entry */
};
diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
index 01e642db311e..f35ee85f61b5 100644
--- a/drivers/staging/vt6656/main_usb.c
+++ b/drivers/staging/vt6656/main_usb.c
@@ -529,6 +529,9 @@ static int vnt_start(struct ieee80211_hw *hw)
goto free_all;
}
+ if (vnt_key_init_table(priv))
+ goto free_all;
+
priv->int_interval = 1; /* bInterval is set to 1 */
vnt_int_start_interrupt(priv);
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index dc1bd1f1bdfe..634ad3662ed6 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -868,6 +868,7 @@ DEF_TPG_ATTRIB(default_erl);
DEF_TPG_ATTRIB(t10_pi);
DEF_TPG_ATTRIB(fabric_prot_type);
DEF_TPG_ATTRIB(tpg_enabled_sendtargets);
+DEF_TPG_ATTRIB(login_keys_workaround);
static struct configfs_attribute *lio_target_tpg_attrib_attrs[] = {
&iscsi_tpg_attrib_attr_authentication,
@@ -883,6 +884,7 @@ static struct configfs_attribute *lio_target_tpg_attrib_attrs[] = {
&iscsi_tpg_attrib_attr_t10_pi,
&iscsi_tpg_attrib_attr_fabric_prot_type,
&iscsi_tpg_attrib_attr_tpg_enabled_sendtargets,
+ &iscsi_tpg_attrib_attr_login_keys_workaround,
NULL,
};
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
index 9fc9117d0f22..549a2bbbf4df 100644
--- a/drivers/target/iscsi/iscsi_target_nego.c
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -818,7 +818,8 @@ static int iscsi_target_handle_csg_zero(
SENDER_TARGET,
login->rsp_buf,
&login->rsp_length,
- conn->param_list);
+ conn->param_list,
+ conn->tpg->tpg_attrib.login_keys_workaround);
if (ret < 0)
return -1;
@@ -888,7 +889,8 @@ static int iscsi_target_handle_csg_one(struct iscsi_conn *conn, struct iscsi_log
SENDER_TARGET,
login->rsp_buf,
&login->rsp_length,
- conn->param_list);
+ conn->param_list,
+ conn->tpg->tpg_attrib.login_keys_workaround);
if (ret < 0) {
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
ISCSI_LOGIN_STATUS_INIT_ERR);
diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c
index 6d1b0acbc5b3..76bde76edad1 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.c
+++ b/drivers/target/iscsi/iscsi_target_parameters.c
@@ -764,7 +764,8 @@ static int iscsi_check_for_auth_key(char *key)
return 0;
}
-static void iscsi_check_proposer_for_optional_reply(struct iscsi_param *param)
+static void iscsi_check_proposer_for_optional_reply(struct iscsi_param *param,
+ bool keys_workaround)
{
if (IS_TYPE_BOOL_AND(param)) {
if (!strcmp(param->value, NO))
@@ -772,19 +773,31 @@ static void iscsi_check_proposer_for_optional_reply(struct iscsi_param *param)
} else if (IS_TYPE_BOOL_OR(param)) {
if (!strcmp(param->value, YES))
SET_PSTATE_REPLY_OPTIONAL(param);
- /*
- * Required for gPXE iSCSI boot client
- */
- if (!strcmp(param->name, IMMEDIATEDATA))
- SET_PSTATE_REPLY_OPTIONAL(param);
+
+ if (keys_workaround) {
+ /*
+ * Required for gPXE iSCSI boot client
+ */
+ if (!strcmp(param->name, IMMEDIATEDATA))
+ SET_PSTATE_REPLY_OPTIONAL(param);
+ }
} else if (IS_TYPE_NUMBER(param)) {
if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH))
SET_PSTATE_REPLY_OPTIONAL(param);
- /*
- * Required for gPXE iSCSI boot client
- */
- if (!strcmp(param->name, MAXCONNECTIONS))
- SET_PSTATE_REPLY_OPTIONAL(param);
+
+ if (keys_workaround) {
+ /*
+ * Required for Mellanox Flexboot PXE boot ROM
+ */
+ if (!strcmp(param->name, FIRSTBURSTLENGTH))
+ SET_PSTATE_REPLY_OPTIONAL(param);
+
+ /*
+ * Required for gPXE iSCSI boot client
+ */
+ if (!strcmp(param->name, MAXCONNECTIONS))
+ SET_PSTATE_REPLY_OPTIONAL(param);
+ }
} else if (IS_PHASE_DECLARATIVE(param))
SET_PSTATE_REPLY_OPTIONAL(param);
}
@@ -1421,7 +1434,8 @@ int iscsi_encode_text_output(
u8 sender,
char *textbuf,
u32 *length,
- struct iscsi_param_list *param_list)
+ struct iscsi_param_list *param_list,
+ bool keys_workaround)
{
char *output_buf = NULL;
struct iscsi_extra_response *er;
@@ -1457,7 +1471,8 @@ int iscsi_encode_text_output(
*length += 1;
output_buf = textbuf + *length;
SET_PSTATE_PROPOSER(param);
- iscsi_check_proposer_for_optional_reply(param);
+ iscsi_check_proposer_for_optional_reply(param,
+ keys_workaround);
pr_debug("Sending key: %s=%s\n",
param->name, param->value);
}
diff --git a/drivers/target/iscsi/iscsi_target_parameters.h b/drivers/target/iscsi/iscsi_target_parameters.h
index a0751e3f0813..17a58c2913f2 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.h
+++ b/drivers/target/iscsi/iscsi_target_parameters.h
@@ -40,7 +40,7 @@ extern int iscsi_extract_key_value(char *, char **, char **);
extern int iscsi_update_param_value(struct iscsi_param *, char *);
extern int iscsi_decode_text_input(u8, u8, char *, u32, struct iscsi_conn *);
extern int iscsi_encode_text_output(u8, u8, char *, u32 *,
- struct iscsi_param_list *);
+ struct iscsi_param_list *, bool);
extern int iscsi_check_negotiated_keys(struct iscsi_param_list *);
extern void iscsi_set_connection_parameters(struct iscsi_conn_ops *,
struct iscsi_param_list *);
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
index 205a509b0dfb..63e1dcc5914d 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.c
+++ b/drivers/target/iscsi/iscsi_target_tpg.c
@@ -227,6 +227,7 @@ static void iscsit_set_default_tpg_attribs(struct iscsi_portal_group *tpg)
a->t10_pi = TA_DEFAULT_T10_PI;
a->fabric_prot_type = TA_DEFAULT_FABRIC_PROT_TYPE;
a->tpg_enabled_sendtargets = TA_DEFAULT_TPG_ENABLED_SENDTARGETS;
+ a->login_keys_workaround = TA_DEFAULT_LOGIN_KEYS_WORKAROUND;
}
int iscsit_tpg_add_portal_group(struct iscsi_tiqn *tiqn, struct iscsi_portal_group *tpg)
@@ -899,3 +900,21 @@ int iscsit_ta_tpg_enabled_sendtargets(
return 0;
}
+
+int iscsit_ta_login_keys_workaround(
+ struct iscsi_portal_group *tpg,
+ u32 flag)
+{
+ struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
+
+ if ((flag != 0) && (flag != 1)) {
+ pr_err("Illegal value %d\n", flag);
+ return -EINVAL;
+ }
+
+ a->login_keys_workaround = flag;
+ pr_debug("iSCSI_TPG[%hu] - TPG enabled bit for login keys workaround: %s ",
+ tpg->tpgt, (a->login_keys_workaround) ? "ON" : "OFF");
+
+ return 0;
+}
diff --git a/drivers/target/iscsi/iscsi_target_tpg.h b/drivers/target/iscsi/iscsi_target_tpg.h
index 2da211920c18..901a712180f0 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.h
+++ b/drivers/target/iscsi/iscsi_target_tpg.h
@@ -39,5 +39,6 @@ extern int iscsit_ta_default_erl(struct iscsi_portal_group *, u32);
extern int iscsit_ta_t10_pi(struct iscsi_portal_group *, u32);
extern int iscsit_ta_fabric_prot_type(struct iscsi_portal_group *, u32);
extern int iscsit_ta_tpg_enabled_sendtargets(struct iscsi_portal_group *, u32);
+extern int iscsit_ta_login_keys_workaround(struct iscsi_portal_group *, u32);
#endif /* ISCSI_TARGET_TPG_H */
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 37c77db6e737..f71bedea973a 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -728,6 +728,15 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
if (cmd->transport_state & CMD_T_ABORTED ||
cmd->transport_state & CMD_T_STOP) {
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+ /*
+ * If COMPARE_AND_WRITE was stopped by __transport_wait_for_tasks(),
+ * release se_device->caw_sem obtained by sbc_compare_and_write()
+ * since target_complete_ok_work() or target_complete_failure_work()
+ * won't be called to invoke the normal CAW completion callbacks.
+ */
+ if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) {
+ up(&dev->caw_sem);
+ }
complete_all(&cmd->t_transport_stop_comp);
return;
} else if (!success) {
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index 65c7033e0df0..632fa8c1260b 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -192,8 +192,10 @@ unsigned long cpufreq_cooling_get_level(unsigned int cpu, unsigned int freq)
mutex_lock(&cooling_list_lock);
list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) {
if (cpumask_test_cpu(cpu, &cpufreq_dev->allowed_cpus)) {
+ unsigned long level = get_level(cpufreq_dev, freq);
+
mutex_unlock(&cooling_list_lock);
- return get_level(cpufreq_dev, freq);
+ return level;
}
}
mutex_unlock(&cooling_list_lock);
diff --git a/drivers/tty/serial/msm_serial_hs.c b/drivers/tty/serial/msm_serial_hs.c
index fc9faaee3170..b80e75fc3521 100644
--- a/drivers/tty/serial/msm_serial_hs.c
+++ b/drivers/tty/serial/msm_serial_hs.c
@@ -2255,7 +2255,7 @@ void disable_wakeup_interrupt(struct msm_hs_port *msm_uport)
return;
if (msm_uport->wakeup.enabled) {
- disable_irq_nosync(msm_uport->wakeup.irq);
+ disable_irq(msm_uport->wakeup.irq);
enable_irq(uport->irq);
spin_lock_irqsave(&uport->lock, flags);
msm_uport->wakeup.enabled = false;
@@ -2614,8 +2614,7 @@ static int msm_hs_startup(struct uart_port *uport)
msm_hs_resource_vote(msm_uport);
if (is_use_low_power_wakeup(msm_uport)) {
- ret = request_threaded_irq(msm_uport->wakeup.irq, NULL,
- msm_hs_wakeup_isr,
+ ret = request_irq(msm_uport->wakeup.irq, msm_hs_wakeup_isr,
IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
"msm_hs_wakeup", msm_uport);
if (unlikely(ret)) {
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 5ab54ef4f304..e4f69bddcfb1 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -2708,13 +2708,13 @@ int tioclinux(struct tty_struct *tty, unsigned long arg)
* related to the kernel should not use this.
*/
data = vt_get_shift_state();
- ret = __put_user(data, p);
+ ret = put_user(data, p);
break;
case TIOCL_GETMOUSEREPORTING:
console_lock(); /* May be overkill */
data = mouse_reporting();
console_unlock();
- ret = __put_user(data, p);
+ ret = put_user(data, p);
break;
case TIOCL_SETVESABLANK:
console_lock();
@@ -2723,7 +2723,7 @@ int tioclinux(struct tty_struct *tty, unsigned long arg)
break;
case TIOCL_GETKMSGREDIRECT:
data = vt_get_kmsg_redirect();
- ret = __put_user(data, p);
+ ret = put_user(data, p);
break;
case TIOCL_SETKMSGREDIRECT:
if (!capable(CAP_SYS_ADMIN)) {
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 0b7194086c5a..df96f5f88c15 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1759,6 +1759,9 @@ static const struct usb_device_id acm_ids[] = {
{ USB_DEVICE(0x1576, 0x03b1), /* Maretron USB100 */
.driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
},
+ { USB_DEVICE(0xfff0, 0x0100), /* DATECS FP-2000 */
+ .driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
+ },
{ USB_DEVICE(0x2912, 0x0001), /* ATOL FPrint */
.driver_info = CLEAR_HALT_CONDITIONS,
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 96b21b0dac1e..3116edfcdc18 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -223,6 +223,10 @@ static const struct usb_device_id usb_quirk_list[] = {
/* Blackmagic Design UltraStudio SDI */
{ USB_DEVICE(0x1edb, 0xbd4f), .driver_info = USB_QUIRK_NO_LPM },
+ /* Hauppauge HVR-950q */
+ { USB_DEVICE(0x2040, 0x7200), .driver_info =
+ USB_QUIRK_CONFIG_INTF_STRINGS },
+
/* INTEL VALUE SSD */
{ USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME },
diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c
index c3077ac11709..c5fc77eae894 100644
--- a/drivers/usb/dwc3/dwc3-msm.c
+++ b/drivers/usb/dwc3/dwc3-msm.c
@@ -2634,8 +2634,6 @@ done:
static void check_for_sdp_connection(struct work_struct *w)
{
- int ret;
- union power_supply_propval pval = {0};
struct dwc3_msm *mdwc =
container_of(w, struct dwc3_msm, sdp_check.work);
struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
@@ -2646,21 +2644,6 @@ static void check_for_sdp_connection(struct work_struct *w)
/* floating D+/D- lines detected */
if (dwc->gadget.state < USB_STATE_DEFAULT &&
dwc3_gadget_get_link_state(dwc) != DWC3_LINK_STATE_CMPLY) {
- if (!mdwc->usb_psy) {
- mdwc->usb_psy = power_supply_get_by_name("usb");
- if (!mdwc->usb_psy) {
- dev_dbg(mdwc->dev,
- "Could not get usb power_supply\n");
- return;
- }
- }
- pval.intval = -ETIMEDOUT;
- ret = power_supply_set_property(mdwc->usb_psy,
- POWER_SUPPLY_PROP_CURRENT_MAX, &pval);
- if (ret)
- dev_dbg(mdwc->dev,
- "power supply error when setting property\n");
-
mdwc->vbus_active = 0;
dbg_event(0xFF, "Q RW SPD CHK", mdwc->vbus_active);
queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
@@ -2915,7 +2898,7 @@ static int dwc3_msm_probe(struct platform_device *pdev)
ret = dwc3_msm_get_clk_gdsc(mdwc);
if (ret) {
dev_err(&pdev->dev, "error getting clock or gdsc.\n");
- return ret;
+ goto err;
}
mdwc->id_state = DWC3_ID_FLOAT;
@@ -2962,8 +2945,9 @@ static int dwc3_msm_probe(struct platform_device *pdev)
ret = devm_request_threaded_irq(&pdev->dev, mdwc->ss_phy_irq,
msm_dwc3_pwr_irq,
msm_dwc3_pwr_irq_thread,
- IRQF_TRIGGER_RISING | IRQF_EARLY_RESUME
- | IRQF_ONESHOT, "ss_phy_irq", mdwc);
+ IRQF_TRIGGER_HIGH | IRQ_TYPE_LEVEL_HIGH
+ | IRQF_EARLY_RESUME | IRQF_ONESHOT,
+ "ss_phy_irq", mdwc);
if (ret) {
dev_err(&pdev->dev, "irqreq ss_phy_irq failed: %d\n",
ret);
@@ -3223,19 +3207,14 @@ static int dwc3_msm_probe(struct platform_device *pdev)
return 0;
put_dwc3:
- platform_device_put(mdwc->dwc3);
if (mdwc->bus_perf_client)
msm_bus_scale_unregister_client(mdwc->bus_perf_client);
+ of_platform_depopulate(&pdev->dev);
err:
+ destroy_workqueue(mdwc->dwc3_wq);
return ret;
}
-static int dwc3_msm_remove_children(struct device *dev, void *data)
-{
- device_unregister(dev);
- return 0;
-}
-
static int dwc3_msm_remove(struct platform_device *pdev)
{
struct dwc3_msm *mdwc = platform_get_drvdata(pdev);
@@ -3272,8 +3251,7 @@ static int dwc3_msm_remove(struct platform_device *pdev)
if (mdwc->hs_phy)
mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
- platform_device_put(mdwc->dwc3);
- device_for_each_child(&pdev->dev, NULL, dwc3_msm_remove_children);
+ of_platform_depopulate(&pdev->dev);
dbg_event(0xFF, "Remov put", 0);
pm_runtime_disable(mdwc->dev);
@@ -3456,13 +3434,16 @@ static int dwc3_otg_start_host(struct dwc3_msm *mdwc, int on)
dev_dbg(mdwc->dev, "%s: turn on host\n", __func__);
mdwc->hs_phy->flags |= PHY_HOST_MODE;
- if (dwc->maximum_speed == USB_SPEED_SUPER)
+ if (dwc->maximum_speed == USB_SPEED_SUPER) {
mdwc->ss_phy->flags |= PHY_HOST_MODE;
+ usb_phy_notify_connect(mdwc->ss_phy,
+ USB_SPEED_SUPER);
+ }
+ usb_phy_notify_connect(mdwc->hs_phy, USB_SPEED_HIGH);
pm_runtime_get_sync(mdwc->dev);
dbg_event(0xFF, "StrtHost gync",
atomic_read(&mdwc->dev->power.usage_count));
- usb_phy_notify_connect(mdwc->hs_phy, USB_SPEED_HIGH);
if (!IS_ERR(mdwc->vbus_reg))
ret = regulator_enable(mdwc->vbus_reg);
if (ret) {
@@ -3570,8 +3551,13 @@ static int dwc3_otg_start_host(struct dwc3_msm *mdwc, int on)
dbg_event(0xFF, "StopHost gsync",
atomic_read(&mdwc->dev->power.usage_count));
usb_phy_notify_disconnect(mdwc->hs_phy, USB_SPEED_HIGH);
+ if (mdwc->ss_phy->flags & PHY_HOST_MODE) {
+ usb_phy_notify_disconnect(mdwc->ss_phy,
+ USB_SPEED_SUPER);
+ mdwc->ss_phy->flags &= ~PHY_HOST_MODE;
+ }
+
mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
- mdwc->ss_phy->flags &= ~PHY_HOST_MODE;
platform_device_del(dwc->xhci);
usb_unregister_notify(&mdwc->host_nb);
@@ -3708,16 +3694,18 @@ static int dwc3_msm_gadget_vbus_draw(struct dwc3_msm *mdwc, unsigned mA)
return 0;
psy_type = get_psy_type(mdwc);
- if (psy_type != POWER_SUPPLY_TYPE_USB &&
- psy_type != POWER_SUPPLY_TYPE_USB_FLOAT)
+ if (psy_type == POWER_SUPPLY_TYPE_USB) {
+ dev_info(mdwc->dev, "Avail curr from USB = %u\n", mA);
+ /* Set max current limit in uA */
+ pval.intval = 1000 * mA;
+ } else if (psy_type == POWER_SUPPLY_TYPE_USB_FLOAT) {
+ pval.intval = -ETIMEDOUT;
+ } else {
return 0;
+ }
- dev_info(mdwc->dev, "Avail curr from USB = %u\n", mA);
-
- /* Set max current limit in uA */
- pval.intval = 1000 * mA;
ret = power_supply_set_property(mdwc->usb_psy,
- POWER_SUPPLY_PROP_CURRENT_MAX, &pval);
+ POWER_SUPPLY_PROP_SDP_CURRENT_MAX, &pval);
if (ret) {
dev_dbg(mdwc->dev, "power supply error when setting property\n");
return ret;
diff --git a/drivers/usb/dwc3/dwc3-st.c b/drivers/usb/dwc3/dwc3-st.c
index 5c0adb9c6fb2..81db2fa08cad 100644
--- a/drivers/usb/dwc3/dwc3-st.c
+++ b/drivers/usb/dwc3/dwc3-st.c
@@ -224,7 +224,7 @@ static int st_dwc3_probe(struct platform_device *pdev)
dwc3_data->syscfg_reg_off = res->start;
- dev_vdbg(&pdev->dev, "glue-logic addr 0x%p, syscfg-reg offset 0x%x\n",
+ dev_vdbg(&pdev->dev, "glue-logic addr 0x%pK, syscfg-reg offset 0x%x\n",
dwc3_data->glue_base, dwc3_data->syscfg_reg_off);
dwc3_data->rstc_pwrdn = devm_reset_control_get(dev, "powerdown");
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index c2a6fdbfcfee..c244d908fa4f 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -439,6 +439,7 @@ static int dwc3_ep0_handle_status(struct dwc3 *dwc,
dwc->ep0_usb_req.request.length = sizeof(*response_pkt);
dwc->ep0_usb_req.request.buf = dwc->setup_buf;
dwc->ep0_usb_req.request.complete = dwc3_ep0_status_cmpl;
+ dwc->ep0_usb_req.request.dma = DMA_ERROR_CODE;
return __dwc3_gadget_ep0_queue(dep, &dwc->ep0_usb_req);
}
@@ -729,6 +730,7 @@ static int dwc3_ep0_set_sel(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
dwc->ep0_usb_req.request.length = dep->endpoint.maxpacket;
dwc->ep0_usb_req.request.buf = dwc->setup_buf;
dwc->ep0_usb_req.request.complete = dwc3_ep0_set_sel_cmpl;
+ dwc->ep0_usb_req.request.dma = DMA_ERROR_CODE;
return __dwc3_gadget_ep0_queue(dep, &dwc->ep0_usb_req);
}
diff --git a/drivers/usb/gadget/function/f_ccid.c b/drivers/usb/gadget/function/f_ccid.c
index 28ac8d0010d8..1a281833eadd 100644
--- a/drivers/usb/gadget/function/f_ccid.c
+++ b/drivers/usb/gadget/function/f_ccid.c
@@ -26,7 +26,7 @@
#include "f_ccid.h"
#define BULK_IN_BUFFER_SIZE sizeof(struct ccid_bulk_in_header)
-#define BULK_OUT_BUFFER_SIZE sizeof(struct ccid_bulk_out_header)
+#define BULK_OUT_BUFFER_SIZE 1024
#define CTRL_BUF_SIZE 4
#define FUNCTION_NAME "ccid"
#define MAX_INST_NAME_LEN 40
@@ -629,14 +629,14 @@ static ssize_t ccid_bulk_read(struct file *fp, char __user *buf,
struct f_ccid *ccid_dev = fp->private_data;
struct ccid_bulk_dev *bulk_dev = &ccid_dev->bulk_dev;
struct usb_request *req;
- int r = count, xfer;
+ int r = count, xfer, len;
int ret;
unsigned long flags;
pr_debug("ccid_bulk_read(%zu)\n", count);
if (count > BULK_OUT_BUFFER_SIZE) {
- pr_err("%s: max_buffer_size:%zu given_pkt_size:%zu\n",
+ pr_err("%s: max_buffer_size:%d given_pkt_size:%zu\n",
__func__, BULK_OUT_BUFFER_SIZE, count);
return -ENOMEM;
}
@@ -647,6 +647,7 @@ static ssize_t ccid_bulk_read(struct file *fp, char __user *buf,
goto done;
}
+ len = ALIGN(count, ccid_dev->out->maxpacket);
requeue_req:
spin_lock_irqsave(&ccid_dev->lock, flags);
if (!atomic_read(&ccid_dev->online)) {
@@ -655,7 +656,7 @@ requeue_req:
}
/* queue a request */
req = bulk_dev->rx_req;
- req->length = count;
+ req->length = len;
bulk_dev->rx_done = 0;
spin_unlock_irqrestore(&ccid_dev->lock, flags);
ret = usb_ep_queue(ccid_dev->out, req, GFP_KERNEL);
@@ -688,6 +689,9 @@ requeue_req:
spin_unlock_irqrestore(&ccid_dev->lock, flags);
goto requeue_req;
}
+ if (req->actual > count)
+ pr_err("%s More data received(%d) than required(%zu)\n",
+ __func__, req->actual, count);
xfer = (req->actual < count) ? req->actual : count;
atomic_set(&bulk_dev->rx_req_busy, 1);
spin_unlock_irqrestore(&ccid_dev->lock, flags);
@@ -875,7 +879,8 @@ static ssize_t ccid_ctrl_read(struct file *fp, char __user *buf,
count = CTRL_BUF_SIZE;
ret = wait_event_interruptible(ctrl_dev->tx_wait_q,
- ctrl_dev->tx_ctrl_done);
+ ctrl_dev->tx_ctrl_done ||
+ !atomic_read(&ccid_dev->online));
if (ret < 0)
return ret;
ctrl_dev->tx_ctrl_done = 0;
diff --git a/drivers/usb/gadget/function/f_gsi.c b/drivers/usb/gadget/function/f_gsi.c
index 3f903d4776b4..19fe6c8cb25a 100644
--- a/drivers/usb/gadget/function/f_gsi.c
+++ b/drivers/usb/gadget/function/f_gsi.c
@@ -1579,6 +1579,12 @@ static void gsi_rndis_command_complete(struct usb_ep *ep,
struct f_gsi *rndis = req->context;
int status;
+ if (req->status != 0) {
+ log_event_err("RNDIS command completion error %d\n",
+ req->status);
+ return;
+ }
+
status = rndis_msg_parser(rndis->params, (u8 *) req->buf);
if (status < 0)
log_event_err("RNDIS command error %d, %d/%d",
diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c
index 99285b416308..ee579ba2b59e 100644
--- a/drivers/usb/gadget/function/f_hid.c
+++ b/drivers/usb/gadget/function/f_hid.c
@@ -539,7 +539,7 @@ static int hidg_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
}
status = usb_ep_enable(hidg->out_ep);
if (status < 0) {
- ERROR(cdev, "Enable IN endpoint FAILED!\n");
+ ERROR(cdev, "Enable OUT endpoint FAILED!\n");
goto fail;
}
hidg->out_ep->driver_data = hidg;
diff --git a/drivers/usb/gadget/function/f_qc_rndis.c b/drivers/usb/gadget/function/f_qc_rndis.c
index 434af820e827..a28bcd084dc3 100644
--- a/drivers/usb/gadget/function/f_qc_rndis.c
+++ b/drivers/usb/gadget/function/f_qc_rndis.c
@@ -545,6 +545,12 @@ static void rndis_qc_command_complete(struct usb_ep *ep,
rndis_init_msg_type *buf;
u32 ul_max_xfer_size, dl_max_xfer_size;
+ if (req->status != 0) {
+ pr_err("%s: RNDIS command completion error %d\n",
+ __func__, req->status);
+ return;
+ }
+
spin_lock(&rndis_lock);
rndis = _rndis_qc;
if (!rndis || !rndis->notify || !rndis->notify->driver_data) {
diff --git a/drivers/usb/gadget/function/f_rndis.c b/drivers/usb/gadget/function/f_rndis.c
index 13888821109d..0917bc500023 100644
--- a/drivers/usb/gadget/function/f_rndis.c
+++ b/drivers/usb/gadget/function/f_rndis.c
@@ -463,6 +463,12 @@ static void rndis_command_complete(struct usb_ep *ep, struct usb_request *req)
int status;
rndis_init_msg_type *buf;
+ if (req->status != 0) {
+ pr_err("%s: RNDIS command completion error:%d\n",
+ __func__, req->status);
+ return;
+ }
+
/* received RNDIS command from USB_CDC_SEND_ENCAPSULATED_COMMAND */
// spin_lock(&dev->lock);
status = rndis_msg_parser(rndis->params, (u8 *) req->buf);
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 9dbd7595a7a3..fae1222d4bc8 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -783,6 +783,9 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
clear_bit(wIndex, &bus_state->resuming_ports);
set_bit(wIndex, &bus_state->rexit_ports);
+
+ xhci_test_and_clear_bit(xhci, port_array, wIndex,
+ PORT_PLC);
xhci_set_link_state(xhci, port_array, wIndex,
XDEV_U0);
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 12e38cf022ff..5bfe47816ac3 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -856,13 +856,16 @@ static void xhci_kill_endpoint_urbs(struct xhci_hcd *xhci,
(ep->ep_state & EP_GETTING_NO_STREAMS)) {
int stream_id;
- for (stream_id = 0; stream_id < ep->stream_info->num_streams;
+ for (stream_id = 1; stream_id < ep->stream_info->num_streams;
stream_id++) {
+ ring = ep->stream_info->stream_rings[stream_id];
+ if (!ring)
+ continue;
+
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
"Killing URBs for slot ID %u, ep index %u, stream %u",
- slot_id, ep_index, stream_id + 1);
- xhci_kill_ring_urbs(xhci,
- ep->stream_info->stream_rings[stream_id]);
+ slot_id, ep_index, stream_id);
+ xhci_kill_ring_urbs(xhci, ring);
}
} else {
ring = ep->ring;
diff --git a/drivers/usb/pd/policy_engine.c b/drivers/usb/pd/policy_engine.c
index 0cbe95304417..f9f47da8a88b 100644
--- a/drivers/usb/pd/policy_engine.c
+++ b/drivers/usb/pd/policy_engine.c
@@ -481,21 +481,18 @@ static inline void pd_reset_protocol(struct usbpd *pd)
pd->send_dr_swap = false;
}
-static int pd_send_msg(struct usbpd *pd, u8 hdr_type, const u32 *data,
- size_t num_data, enum pd_msg_type type)
+static int pd_send_msg(struct usbpd *pd, u8 msg_type, const u32 *data,
+ size_t num_data, enum pd_sop_type sop)
{
int ret;
u16 hdr;
- hdr = PD_MSG_HDR(hdr_type, pd->current_dr, pd->current_pr,
+ hdr = PD_MSG_HDR(msg_type, pd->current_dr, pd->current_pr,
pd->tx_msgid, num_data, pd->spec_rev);
- ret = pd_phy_write(hdr, (u8 *)data, num_data * sizeof(u32), type, 15);
- /* TODO figure out timeout. based on tReceive=1.1ms x nRetryCount? */
- if (ret < 0)
+ ret = pd_phy_write(hdr, (u8 *)data, num_data * sizeof(u32), sop);
+ if (ret)
return ret;
- else if (ret != num_data * sizeof(u32))
- return -EIO;
pd->tx_msgid = (pd->tx_msgid + 1) & PD_MAX_MSG_ID;
return 0;
@@ -596,7 +593,7 @@ static void pd_send_hard_reset(struct usbpd *pd)
/* Force CC logic to source/sink to keep Rp/Rd unchanged */
set_power_role(pd, pd->current_pr);
pd->hard_reset_count++;
- pd_phy_signal(HARD_RESET_SIG, 5); /* tHardResetComplete */
+ pd_phy_signal(HARD_RESET_SIG);
pd->in_pr_swap = false;
power_supply_set_property(pd->usb_psy, POWER_SUPPLY_PROP_PR_SWAP, &val);
}
@@ -612,12 +609,12 @@ static void kick_sm(struct usbpd *pd, int ms)
queue_work(pd->wq, &pd->sm_work);
}
-static void phy_sig_received(struct usbpd *pd, enum pd_sig_type type)
+static void phy_sig_received(struct usbpd *pd, enum pd_sig_type sig)
{
union power_supply_propval val = {1};
- if (type != HARD_RESET_SIG) {
- usbpd_err(&pd->dev, "invalid signal (%d) received\n", type);
+ if (sig != HARD_RESET_SIG) {
+ usbpd_err(&pd->dev, "invalid signal (%d) received\n", sig);
return;
}
@@ -632,16 +629,16 @@ static void phy_sig_received(struct usbpd *pd, enum pd_sig_type type)
kick_sm(pd, 0);
}
-static void phy_msg_received(struct usbpd *pd, enum pd_msg_type type,
+static void phy_msg_received(struct usbpd *pd, enum pd_sop_type sop,
u8 *buf, size_t len)
{
struct rx_msg *rx_msg;
unsigned long flags;
u16 header;
- if (type != SOP_MSG) {
+ if (sop != SOP_MSG) {
usbpd_err(&pd->dev, "invalid msg type (%d) received; only SOP supported\n",
- type);
+ sop);
return;
}
@@ -964,7 +961,7 @@ static void usbpd_set_state(struct usbpd *pd, enum usbpd_state next_state)
pd->current_voltage = pd->requested_voltage = 5000000;
val.intval = pd->requested_voltage; /* set max range to 5V */
power_supply_set_property(pd->usb_psy,
- POWER_SUPPLY_PROP_VOLTAGE_MAX, &val);
+ POWER_SUPPLY_PROP_PD_VOLTAGE_MAX, &val);
if (!pd->vbus_present) {
pd->current_state = PE_SNK_DISCOVERY;
@@ -1617,7 +1614,6 @@ static void usbpd_sm(struct work_struct *w)
else if (pd->current_dr == DR_DFP)
stop_usb_host(pd);
- pd->current_pr = PR_NONE;
pd->current_dr = DR_NONE;
reset_vdm_state(pd);
@@ -1663,7 +1659,7 @@ static void usbpd_sm(struct work_struct *w)
pd->requested_voltage = 5000000;
val.intval = pd->requested_voltage;
power_supply_set_property(pd->usb_psy,
- POWER_SUPPLY_PROP_VOLTAGE_MIN, &val);
+ POWER_SUPPLY_PROP_PD_VOLTAGE_MIN, &val);
pd->in_pr_swap = false;
val.intval = 0;
@@ -1982,8 +1978,8 @@ static void usbpd_sm(struct work_struct *w)
val.intval = pd->requested_voltage;
power_supply_set_property(pd->usb_psy,
pd->requested_voltage >= pd->current_voltage ?
- POWER_SUPPLY_PROP_VOLTAGE_MAX :
- POWER_SUPPLY_PROP_VOLTAGE_MIN,
+ POWER_SUPPLY_PROP_PD_VOLTAGE_MAX :
+ POWER_SUPPLY_PROP_PD_VOLTAGE_MIN,
&val);
/*
@@ -2034,8 +2030,8 @@ static void usbpd_sm(struct work_struct *w)
val.intval = pd->requested_voltage;
power_supply_set_property(pd->usb_psy,
pd->requested_voltage >= pd->current_voltage ?
- POWER_SUPPLY_PROP_VOLTAGE_MIN :
- POWER_SUPPLY_PROP_VOLTAGE_MAX, &val);
+ POWER_SUPPLY_PROP_PD_VOLTAGE_MIN :
+ POWER_SUPPLY_PROP_PD_VOLTAGE_MAX, &val);
pd->current_voltage = pd->requested_voltage;
/* resume charging */
@@ -2217,7 +2213,7 @@ static void usbpd_sm(struct work_struct *w)
val.intval = pd->requested_voltage;
power_supply_set_property(pd->usb_psy,
- POWER_SUPPLY_PROP_VOLTAGE_MIN, &val);
+ POWER_SUPPLY_PROP_PD_VOLTAGE_MIN, &val);
pd_send_hard_reset(pd);
pd->in_explicit_contract = false;
@@ -2480,6 +2476,16 @@ static int psy_changed(struct notifier_block *nb, unsigned long evt, void *ptr)
if (pd->current_pr == PR_SINK)
return 0;
+ /*
+ * Unexpected if not in PR swap; need to force disconnect from
+ * source so we can turn off VBUS, Vconn, PD PHY etc.
+ */
+ if (pd->current_pr == PR_SRC) {
+ usbpd_info(&pd->dev, "Forcing disconnect from source mode\n");
+ pd->current_pr = PR_NONE;
+ break;
+ }
+
pd->current_pr = PR_SINK;
break;
diff --git a/drivers/usb/pd/qpnp-pdphy.c b/drivers/usb/pd/qpnp-pdphy.c
index e200c25bc23a..b454442e2471 100644
--- a/drivers/usb/pd/qpnp-pdphy.c
+++ b/drivers/usb/pd/qpnp-pdphy.c
@@ -80,6 +80,10 @@
#define VDD_PDPHY_VOL_MAX 3300000 /* uV */
#define VDD_PDPHY_HPM_LOAD 3000 /* uA */
+/* timers */
+#define RECEIVER_RESPONSE_TIME 15 /* tReceiverResponse */
+#define HARD_RESET_COMPLETE_TIME 5 /* tHardResetComplete */
+
struct usb_pdphy {
struct device *dev;
struct regmap *regmap;
@@ -96,8 +100,8 @@ struct usb_pdphy {
int msg_tx_discarded_irq;
int msg_rx_discarded_irq;
- void (*signal_cb)(struct usbpd *pd, enum pd_sig_type type);
- void (*msg_rx_cb)(struct usbpd *pd, enum pd_msg_type type,
+ void (*signal_cb)(struct usbpd *pd, enum pd_sig_type sig);
+ void (*msg_rx_cb)(struct usbpd *pd, enum pd_sop_type sop,
u8 *buf, size_t len);
void (*shutdown_cb)(struct usbpd *pd);
@@ -401,14 +405,13 @@ int pd_phy_open(struct pd_phy_params *params)
}
EXPORT_SYMBOL(pd_phy_open);
-int pd_phy_signal(enum pd_sig_type type, unsigned int timeout_ms)
+int pd_phy_signal(enum pd_sig_type sig)
{
u8 val;
int ret;
struct usb_pdphy *pdphy = __pdphy;
- dev_dbg(pdphy->dev, "%s: type %d timeout %u\n", __func__, type,
- timeout_ms);
+ dev_dbg(pdphy->dev, "%s: type %d\n", __func__, sig);
if (!pdphy) {
pr_err("%s: pdphy not found\n", __func__);
@@ -428,7 +431,7 @@ int pd_phy_signal(enum pd_sig_type type, unsigned int timeout_ms)
usleep_range(2, 3);
- val = (type == CABLE_RESET_SIG ? TX_CONTROL_FRAME_TYPE_CABLE_RESET : 0)
+ val = (sig == CABLE_RESET_SIG ? TX_CONTROL_FRAME_TYPE_CABLE_RESET : 0)
| TX_CONTROL_SEND_SIGNAL;
ret = pdphy_reg_write(pdphy, USB_PDPHY_TX_CONTROL, val);
@@ -436,7 +439,8 @@ int pd_phy_signal(enum pd_sig_type type, unsigned int timeout_ms)
return ret;
ret = wait_event_interruptible_timeout(pdphy->tx_waitq,
- pdphy->tx_status != -EINPROGRESS, msecs_to_jiffies(timeout_ms));
+ pdphy->tx_status != -EINPROGRESS,
+ msecs_to_jiffies(HARD_RESET_COMPLETE_TIME));
if (ret <= 0) {
dev_err(pdphy->dev, "%s: failed ret %d", __func__, ret);
return ret ? ret : -ETIMEDOUT;
@@ -447,7 +451,7 @@ int pd_phy_signal(enum pd_sig_type type, unsigned int timeout_ms)
if (pdphy->tx_status)
return pdphy->tx_status;
- if (type == HARD_RESET_SIG)
+ if (sig == HARD_RESET_SIG)
/* Frame filter is reconfigured in pd_phy_open() */
return pdphy_reg_write(pdphy, USB_PDPHY_FRAME_FILTER, 0);
@@ -455,16 +459,15 @@ int pd_phy_signal(enum pd_sig_type type, unsigned int timeout_ms)
}
EXPORT_SYMBOL(pd_phy_signal);
-int pd_phy_write(u16 hdr, const u8 *data, size_t data_len,
- enum pd_msg_type type, unsigned int timeout_ms)
+int pd_phy_write(u16 hdr, const u8 *data, size_t data_len, enum pd_sop_type sop)
{
u8 val;
int ret;
size_t total_len = data_len + USB_PDPHY_MSG_HDR_LEN;
struct usb_pdphy *pdphy = __pdphy;
- dev_dbg(pdphy->dev, "%s: hdr %x frame type %d timeout %u\n",
- __func__, hdr, type, timeout_ms);
+ dev_dbg(pdphy->dev, "%s: hdr %x frame sop_type %d\n",
+ __func__, hdr, sop);
if (data && data_len)
print_hex_dump_debug("tx data obj:", DUMP_PREFIX_NONE, 32, 4,
@@ -518,14 +521,15 @@ int pd_phy_write(u16 hdr, const u8 *data, size_t data_len,
usleep_range(2, 3);
- val = TX_CONTROL_RETRY_COUNT | (type << 2) | TX_CONTROL_SEND_MSG;
+ val = TX_CONTROL_RETRY_COUNT | (sop << 2) | TX_CONTROL_SEND_MSG;
ret = pdphy_reg_write(pdphy, USB_PDPHY_TX_CONTROL, val);
if (ret)
return ret;
ret = wait_event_interruptible_timeout(pdphy->tx_waitq,
- pdphy->tx_status != -EINPROGRESS, msecs_to_jiffies(timeout_ms));
+ pdphy->tx_status != -EINPROGRESS,
+ msecs_to_jiffies(RECEIVER_RESPONSE_TIME));
if (ret <= 0) {
dev_err(pdphy->dev, "%s: failed ret %d", __func__, ret);
return ret ? ret : -ETIMEDOUT;
@@ -534,7 +538,7 @@ int pd_phy_write(u16 hdr, const u8 *data, size_t data_len,
if (hdr && !pdphy->tx_status)
pdphy->tx_bytes += data_len + USB_PDPHY_MSG_HDR_LEN;
- return pdphy->tx_status ? pdphy->tx_status : data_len;
+ return pdphy->tx_status ? pdphy->tx_status : 0;
}
EXPORT_SYMBOL(pd_phy_write);
diff --git a/drivers/usb/pd/usbpd.h b/drivers/usb/pd/usbpd.h
index 108701739f89..9b6053e940e9 100644
--- a/drivers/usb/pd/usbpd.h
+++ b/drivers/usb/pd/usbpd.h
@@ -45,7 +45,7 @@ enum pd_sig_type {
CABLE_RESET_SIG,
};
-enum pd_msg_type {
+enum pd_sop_type {
SOP_MSG = 0,
SOPI_MSG,
SOPII_MSG,
@@ -61,8 +61,8 @@ enum pd_spec_rev {
#define FRAME_FILTER_EN_HARD_RESET BIT(5)
struct pd_phy_params {
- void (*signal_cb)(struct usbpd *pd, enum pd_sig_type type);
- void (*msg_rx_cb)(struct usbpd *pd, enum pd_msg_type type,
+ void (*signal_cb)(struct usbpd *pd, enum pd_sig_type sig);
+ void (*msg_rx_cb)(struct usbpd *pd, enum pd_sop_type sop,
u8 *buf, size_t len);
void (*shutdown_cb)(struct usbpd *pd);
enum data_role data_role;
@@ -72,9 +72,9 @@ struct pd_phy_params {
#if IS_ENABLED(CONFIG_QPNP_USB_PDPHY)
int pd_phy_open(struct pd_phy_params *params);
-int pd_phy_signal(enum pd_sig_type type, unsigned int timeout_ms);
+int pd_phy_signal(enum pd_sig_type sig);
int pd_phy_write(u16 hdr, const u8 *data, size_t data_len,
- enum pd_msg_type type, unsigned int timeout_ms);
+ enum pd_sop_type sop);
int pd_phy_update_roles(enum data_role dr, enum power_role pr);
void pd_phy_close(void);
#else
@@ -83,13 +83,13 @@ static inline int pd_phy_open(struct pd_phy_params *params)
return -ENODEV;
}
-static inline int pd_phy_signal(enum pd_sig_type type, unsigned int timeout_ms)
+static inline int pd_phy_signal(enum pd_sig_type type)
{
return -ENODEV;
}
static inline int pd_phy_write(u16 hdr, const u8 *data, size_t data_len,
- enum pd_msg_type type, unsigned int timeout_ms)
+ enum pd_sop_type sop)
{
return -ENODEV;
}
diff --git a/drivers/usb/phy/phy-msm-ssusb-qmp.c b/drivers/usb/phy/phy-msm-ssusb-qmp.c
index 6ba742076baa..2bc3c6fa417a 100644
--- a/drivers/usb/phy/phy-msm-ssusb-qmp.c
+++ b/drivers/usb/phy/phy-msm-ssusb-qmp.c
@@ -149,15 +149,17 @@ static void msm_ssusb_qmp_enable_autonomous(struct msm_ssphy_qmp *phy,
if (enable) {
msm_ssusb_qmp_clr_lfps_rxterm_int(phy);
+ val = readb_relaxed(phy->base + autonomous_mode_offset);
+ val |= ARCVR_DTCT_EN;
if (phy->phy.flags & DEVICE_IN_SS_MODE) {
- val =
- readb_relaxed(phy->base + autonomous_mode_offset);
- val |= ARCVR_DTCT_EN;
val |= ALFPS_DTCT_EN;
val &= ~ARCVR_DTCT_EVENT_SEL;
- writeb_relaxed(val, phy->base + autonomous_mode_offset);
+ } else {
+ val &= ~ALFPS_DTCT_EN;
+ val |= ARCVR_DTCT_EVENT_SEL;
}
+ writeb_relaxed(val, phy->base + autonomous_mode_offset);
/* clamp phy level shifter to perform autonomous detection */
writel_relaxed(0x1, phy->vls_clamp_reg);
} else {
diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c
index d82fa36c3465..005da0866836 100644
--- a/drivers/usb/renesas_usbhs/common.c
+++ b/drivers/usb/renesas_usbhs/common.c
@@ -733,8 +733,10 @@ static int usbhsc_resume(struct device *dev)
struct usbhs_priv *priv = dev_get_drvdata(dev);
struct platform_device *pdev = usbhs_priv_to_pdev(priv);
- if (!usbhsc_flags_has(priv, USBHSF_RUNTIME_PWCTRL))
+ if (!usbhsc_flags_has(priv, USBHSF_RUNTIME_PWCTRL)) {
usbhsc_power_ctrl(priv, 1);
+ usbhs_mod_autonomy_mode(priv);
+ }
usbhs_platform_call(priv, phy_reset, pdev);
diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c
index efc4fae123a4..8647d2c2a8c4 100644
--- a/drivers/usb/renesas_usbhs/mod_gadget.c
+++ b/drivers/usb/renesas_usbhs/mod_gadget.c
@@ -37,6 +37,7 @@ struct usbhsg_gpriv;
struct usbhsg_uep {
struct usb_ep ep;
struct usbhs_pipe *pipe;
+ spinlock_t lock; /* protect the pipe */
char ep_name[EP_NAME_SIZE];
@@ -638,10 +639,16 @@ usbhsg_ep_enable_end:
static int usbhsg_ep_disable(struct usb_ep *ep)
{
struct usbhsg_uep *uep = usbhsg_ep_to_uep(ep);
- struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep);
+ struct usbhs_pipe *pipe;
+ unsigned long flags;
+ int ret = 0;
- if (!pipe)
- return -EINVAL;
+ spin_lock_irqsave(&uep->lock, flags);
+ pipe = usbhsg_uep_to_pipe(uep);
+ if (!pipe) {
+ ret = -EINVAL;
+ goto out;
+ }
usbhsg_pipe_disable(uep);
usbhs_pipe_free(pipe);
@@ -649,6 +656,9 @@ static int usbhsg_ep_disable(struct usb_ep *ep)
uep->pipe->mod_private = NULL;
uep->pipe = NULL;
+out:
+ spin_unlock_irqrestore(&uep->lock, flags);
+
return 0;
}
@@ -698,8 +708,11 @@ static int usbhsg_ep_dequeue(struct usb_ep *ep, struct usb_request *req)
{
struct usbhsg_uep *uep = usbhsg_ep_to_uep(ep);
struct usbhsg_request *ureq = usbhsg_req_to_ureq(req);
- struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep);
+ struct usbhs_pipe *pipe;
+ unsigned long flags;
+ spin_lock_irqsave(&uep->lock, flags);
+ pipe = usbhsg_uep_to_pipe(uep);
if (pipe)
usbhs_pkt_pop(pipe, usbhsg_ureq_to_pkt(ureq));
@@ -708,6 +721,7 @@ static int usbhsg_ep_dequeue(struct usb_ep *ep, struct usb_request *req)
* even if the pipe is NULL.
*/
usbhsg_queue_pop(uep, ureq, -ECONNRESET);
+ spin_unlock_irqrestore(&uep->lock, flags);
return 0;
}
@@ -854,10 +868,10 @@ static int usbhsg_try_stop(struct usbhs_priv *priv, u32 status)
{
struct usbhsg_gpriv *gpriv = usbhsg_priv_to_gpriv(priv);
struct usbhs_mod *mod = usbhs_mod_get_current(priv);
- struct usbhsg_uep *dcp = usbhsg_gpriv_to_dcp(gpriv);
+ struct usbhsg_uep *uep;
struct device *dev = usbhs_priv_to_dev(priv);
unsigned long flags;
- int ret = 0;
+ int ret = 0, i;
/******************** spin lock ********************/
usbhs_lock(priv, flags);
@@ -889,7 +903,9 @@ static int usbhsg_try_stop(struct usbhs_priv *priv, u32 status)
usbhs_sys_set_test_mode(priv, 0);
usbhs_sys_function_ctrl(priv, 0);
- usbhsg_ep_disable(&dcp->ep);
+ /* disable all eps */
+ usbhsg_for_each_uep_with_dcp(uep, gpriv, i)
+ usbhsg_ep_disable(&uep->ep);
dev_dbg(dev, "stop gadget\n");
@@ -1072,6 +1088,7 @@ int usbhs_mod_gadget_probe(struct usbhs_priv *priv)
ret = -ENOMEM;
goto usbhs_mod_gadget_probe_err_gpriv;
}
+ spin_lock_init(&uep->lock);
gpriv->transceiver = usb_get_phy(USB_PHY_TYPE_UNDEFINED);
dev_info(dev, "%stransceiver found\n",
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 33cec50978b8..b0dc6da3d970 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -134,6 +134,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0x8977) }, /* CEL MeshWorks DevKit Device */
{ USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */
{ USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */
+ { USB_DEVICE(0x10C4, 0x8A5E) }, /* CEL EM3588 ZigBee USB Stick Long Range */
{ USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
{ USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
{ USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 3bf61acfc26b..ebe51f11105d 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -1877,6 +1877,10 @@ static const struct usb_device_id option_ids[] = {
.driver_info = (kernel_ulong_t)&four_g_w100_blacklist
},
{ USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, SPEEDUP_PRODUCT_SU9800, 0xff) },
+ { USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, 0x9801, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+ { USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, 0x9803, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
{ USB_DEVICE(LONGCHEER_VENDOR_ID, ZOOM_PRODUCT_4597) },
{ USB_DEVICE(LONGCHEER_VENDOR_ID, IBALL_3_5G_CONNECT) },
{ USB_DEVICE(HAIER_VENDOR_ID, HAIER_PRODUCT_CE100) },
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index fd509ed6cf70..652b4334b26d 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -158,6 +158,7 @@ static const struct usb_device_id id_table[] = {
{DEVICE_SWI(0x1199, 0x9056)}, /* Sierra Wireless Modem */
{DEVICE_SWI(0x1199, 0x9060)}, /* Sierra Wireless Modem */
{DEVICE_SWI(0x1199, 0x9061)}, /* Sierra Wireless Modem */
+ {DEVICE_SWI(0x1199, 0x9063)}, /* Sierra Wireless EM7305 */
{DEVICE_SWI(0x1199, 0x9070)}, /* Sierra Wireless MC74xx */
{DEVICE_SWI(0x1199, 0x9071)}, /* Sierra Wireless MC74xx */
{DEVICE_SWI(0x1199, 0x9078)}, /* Sierra Wireless EM74xx */
diff --git a/drivers/usb/storage/isd200.c b/drivers/usb/storage/isd200.c
index 39afd7045c43..7bb5f8da5357 100644
--- a/drivers/usb/storage/isd200.c
+++ b/drivers/usb/storage/isd200.c
@@ -1520,8 +1520,11 @@ static void isd200_ata_command(struct scsi_cmnd *srb, struct us_data *us)
/* Make sure driver was initialized */
- if (us->extra == NULL)
+ if (us->extra == NULL) {
usb_stor_dbg(us, "ERROR Driver not initialized\n");
+ srb->result = DID_ERROR << 16;
+ return;
+ }
scsi_set_resid(srb, 0);
/* scsi_bufflen might change in protocol translation to ata */
diff --git a/drivers/usb/usbip/stub_main.c b/drivers/usb/usbip/stub_main.c
index 44ab43fc4fcc..af10f7b131a4 100644
--- a/drivers/usb/usbip/stub_main.c
+++ b/drivers/usb/usbip/stub_main.c
@@ -262,7 +262,11 @@ void stub_device_cleanup_urbs(struct stub_device *sdev)
kmem_cache_free(stub_priv_cache, priv);
kfree(urb->transfer_buffer);
+ urb->transfer_buffer = NULL;
+
kfree(urb->setup_packet);
+ urb->setup_packet = NULL;
+
usb_free_urb(urb);
}
}
diff --git a/drivers/usb/usbip/stub_tx.c b/drivers/usb/usbip/stub_tx.c
index dbcabc9dbe0d..021003c4de53 100644
--- a/drivers/usb/usbip/stub_tx.c
+++ b/drivers/usb/usbip/stub_tx.c
@@ -28,7 +28,11 @@ static void stub_free_priv_and_urb(struct stub_priv *priv)
struct urb *urb = priv->urb;
kfree(urb->setup_packet);
+ urb->setup_packet = NULL;
+
kfree(urb->transfer_buffer);
+ urb->transfer_buffer = NULL;
+
list_del(&priv->list);
kmem_cache_free(stub_priv_cache, priv);
usb_free_urb(urb);
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index 830e2fd47642..b31b84f56e8f 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -902,6 +902,10 @@ static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma)
return ret;
vdev->barmap[index] = pci_iomap(pdev, index, 0);
+ if (!vdev->barmap[index]) {
+ pci_release_selected_regions(pdev, 1 << index);
+ return -ENOMEM;
+ }
}
vma->vm_private_data = vdev;
diff --git a/drivers/vfio/pci/vfio_pci_rdwr.c b/drivers/vfio/pci/vfio_pci_rdwr.c
index 210db24d2204..4d39f7959adf 100644
--- a/drivers/vfio/pci/vfio_pci_rdwr.c
+++ b/drivers/vfio/pci/vfio_pci_rdwr.c
@@ -190,7 +190,10 @@ ssize_t vfio_pci_vga_rw(struct vfio_pci_device *vdev, char __user *buf,
if (!vdev->has_vga)
return -EINVAL;
- switch (pos) {
+ if (pos > 0xbfffful)
+ return -EINVAL;
+
+ switch ((u32)pos) {
case 0xa0000 ... 0xbffff:
count = min(count, (size_t)(0xc0000 - pos));
iomem = ioremap_nocache(0xa0000, 0xbffff - 0xa0000 + 1);
diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
index 6070b793cbcb..1e01e28f40f3 100644
--- a/drivers/vfio/vfio.c
+++ b/drivers/vfio/vfio.c
@@ -296,6 +296,34 @@ static void vfio_group_put(struct vfio_group *group)
kref_put_mutex(&group->kref, vfio_group_release, &vfio.group_lock);
}
+struct vfio_group_put_work {
+ struct work_struct work;
+ struct vfio_group *group;
+};
+
+static void vfio_group_put_bg(struct work_struct *work)
+{
+ struct vfio_group_put_work *do_work;
+
+ do_work = container_of(work, struct vfio_group_put_work, work);
+
+ vfio_group_put(do_work->group);
+ kfree(do_work);
+}
+
+static void vfio_group_schedule_put(struct vfio_group *group)
+{
+ struct vfio_group_put_work *do_work;
+
+ do_work = kmalloc(sizeof(*do_work), GFP_KERNEL);
+ if (WARN_ON(!do_work))
+ return;
+
+ INIT_WORK(&do_work->work, vfio_group_put_bg);
+ do_work->group = group;
+ schedule_work(&do_work->work);
+}
+
/* Assume group_lock or group reference is held */
static void vfio_group_get(struct vfio_group *group)
{
@@ -620,7 +648,14 @@ static int vfio_iommu_group_notifier(struct notifier_block *nb,
break;
}
- vfio_group_put(group);
+ /*
+ * If we're the last reference to the group, the group will be
+ * released, which includes unregistering the iommu group notifier.
+ * We hold a read-lock on that notifier list, unregistering needs
+ * a write-lock... deadlock. Release our reference asynchronously
+ * to avoid that situation.
+ */
+ vfio_group_schedule_put(group);
return NOTIFY_OK;
}
@@ -1552,6 +1587,15 @@ void vfio_group_put_external_user(struct vfio_group *group)
}
EXPORT_SYMBOL_GPL(vfio_group_put_external_user);
+bool vfio_external_group_match_file(struct vfio_group *test_group,
+ struct file *filep)
+{
+ struct vfio_group *group = filep->private_data;
+
+ return (filep->f_op == &vfio_group_fops) && (group == test_group);
+}
+EXPORT_SYMBOL_GPL(vfio_external_group_match_file);
+
int vfio_external_user_iommu_id(struct vfio_group *group)
{
return iommu_group_id(group->iommu_group);
diff --git a/drivers/video/fbdev/cobalt_lcdfb.c b/drivers/video/fbdev/cobalt_lcdfb.c
index 07675d6f323e..d4530b54479c 100644
--- a/drivers/video/fbdev/cobalt_lcdfb.c
+++ b/drivers/video/fbdev/cobalt_lcdfb.c
@@ -350,6 +350,11 @@ static int cobalt_lcdfb_probe(struct platform_device *dev)
info->screen_size = resource_size(res);
info->screen_base = devm_ioremap(&dev->dev, res->start,
info->screen_size);
+ if (!info->screen_base) {
+ framebuffer_release(info);
+ return -ENOMEM;
+ }
+
info->fbops = &cobalt_lcd_fbops;
info->fix = cobalt_lcdfb_fix;
info->fix.smem_start = res->start;
diff --git a/drivers/video/fbdev/msm/mdss_dsi.c b/drivers/video/fbdev/msm/mdss_dsi.c
index 4ac10ab494e5..5f7e7c6bcde0 100644
--- a/drivers/video/fbdev/msm/mdss_dsi.c
+++ b/drivers/video/fbdev/msm/mdss_dsi.c
@@ -2720,8 +2720,6 @@ static int mdss_dsi_event_handler(struct mdss_panel_data *pdata,
ctrl_pdata->update_phy_timing);
rc = mdss_dsi_on(pdata);
- mdss_dsi_op_mode_config(pdata->panel_info.mipi.mode,
- pdata);
break;
case MDSS_EVENT_UNBLANK:
if (ctrl_pdata->on_cmds.link_state == DSI_LP_MODE)
@@ -3268,21 +3266,6 @@ end:
return rc;
}
-static void mdss_dsi_ctrl_validate_lane_swap_config(
- struct mdss_dsi_ctrl_pdata *ctrl)
-{
- struct mipi_panel_info *mipi = &ctrl->panel_data.panel_info.mipi;
-
- if (!mipi->data_lane0)
- ctrl->lane_map[DSI_LOGICAL_LANE_0] = DSI_PHYSICAL_LANE_INVALID;
- if (!mipi->data_lane1)
- ctrl->lane_map[DSI_LOGICAL_LANE_1] = DSI_PHYSICAL_LANE_INVALID;
- if (!mipi->data_lane2)
- ctrl->lane_map[DSI_LOGICAL_LANE_2] = DSI_PHYSICAL_LANE_INVALID;
- if (!mipi->data_lane3)
- ctrl->lane_map[DSI_LOGICAL_LANE_3] = DSI_PHYSICAL_LANE_INVALID;
-}
-
static int mdss_dsi_ctrl_validate_config(struct mdss_dsi_ctrl_pdata *ctrl)
{
int rc = 0;
@@ -3292,8 +3275,6 @@ static int mdss_dsi_ctrl_validate_config(struct mdss_dsi_ctrl_pdata *ctrl)
goto error;
}
- mdss_dsi_ctrl_validate_lane_swap_config(ctrl);
-
/*
* check to make sure that the byte interface clock is specified for
* DSI ctrl version 2 and above.
diff --git a/drivers/video/fbdev/msm/mdss_hdmi_edid.c b/drivers/video/fbdev/msm/mdss_hdmi_edid.c
index abd2192997e5..2143c2bdb84b 100644
--- a/drivers/video/fbdev/msm/mdss_hdmi_edid.c
+++ b/drivers/video/fbdev/msm/mdss_hdmi_edid.c
@@ -156,10 +156,13 @@ struct hdmi_edid_ctrl {
};
static bool hdmi_edid_is_mode_supported(struct hdmi_edid_ctrl *edid_ctrl,
- struct msm_hdmi_mode_timing_info *timing)
+ struct msm_hdmi_mode_timing_info *timing, u32 out_format)
{
+ u32 pclk = hdmi_tx_setup_tmds_clk_rate(timing->pixel_freq,
+ out_format, false);
+
if (!timing->supported ||
- timing->pixel_freq > edid_ctrl->init_data.max_pclk_khz)
+ pclk > edid_ctrl->init_data.max_pclk_khz)
return false;
return true;
@@ -936,7 +939,8 @@ static void hdmi_edid_add_sink_y420_format(struct hdmi_edid_ctrl *edid_ctrl,
u32 ret = hdmi_get_supported_mode(&timing,
&edid_ctrl->init_data.ds_data,
video_format);
- u32 supported = hdmi_edid_is_mode_supported(edid_ctrl, &timing);
+ u32 supported = hdmi_edid_is_mode_supported(edid_ctrl,
+ &timing, MDP_Y_CBCR_H2V2);
struct hdmi_edid_sink_data *sink = &edid_ctrl->sink_data;
if (video_format >= HDMI_VFRMT_MAX) {
@@ -1704,7 +1708,8 @@ static void hdmi_edid_add_sink_video_format(struct hdmi_edid_ctrl *edid_ctrl,
u32 ret = hdmi_get_supported_mode(&timing,
&edid_ctrl->init_data.ds_data,
video_format);
- u32 supported = hdmi_edid_is_mode_supported(edid_ctrl, &timing);
+ u32 supported = hdmi_edid_is_mode_supported(edid_ctrl,
+ &timing, MDP_RGBA_8888);
struct hdmi_edid_sink_data *sink_data = &edid_ctrl->sink_data;
struct disp_mode_info *disp_mode_list = sink_data->disp_mode_list;
u32 i = 0;
diff --git a/drivers/video/fbdev/msm/mdss_hdmi_tx.c b/drivers/video/fbdev/msm/mdss_hdmi_tx.c
index 9ee0c27b225e..837147dc5036 100644
--- a/drivers/video/fbdev/msm/mdss_hdmi_tx.c
+++ b/drivers/video/fbdev/msm/mdss_hdmi_tx.c
@@ -272,20 +272,6 @@ static void hdmi_tx_cable_notify_work(struct work_struct *work)
mutex_unlock(&hdmi_ctrl->tx_lock);
} /* hdmi_tx_cable_notify_work */
-static bool hdmi_tx_is_cea_format(int mode)
-{
- bool cea_fmt;
-
- if ((mode > 0) && (mode <= HDMI_EVFRMT_END))
- cea_fmt = true;
- else
- cea_fmt = false;
-
- DEV_DBG("%s: %s\n", __func__, cea_fmt ? "Yes" : "No");
-
- return cea_fmt;
-}
-
static inline bool hdmi_tx_is_hdcp_enabled(struct hdmi_tx_ctrl *hdmi_ctrl)
{
return hdmi_ctrl->hdcp_feature_on &&
@@ -2193,6 +2179,7 @@ static int hdmi_tx_read_sink_info(struct hdmi_tx_ctrl *hdmi_ctrl)
int status = 0;
void *data;
struct dss_io_data *io;
+ u32 sink_max_pclk;
if (!hdmi_ctrl) {
DEV_ERR("%s: invalid input\n", __func__);
@@ -2232,16 +2219,22 @@ static int hdmi_tx_read_sink_info(struct hdmi_tx_ctrl *hdmi_ctrl)
/* parse edid if a valid edid buffer is present */
if (hdmi_ctrl->custom_edid || !hdmi_ctrl->sim_mode) {
status = hdmi_edid_parser(data);
- if (status)
+ if (status) {
DEV_ERR("%s: edid parse failed\n", __func__);
- else
+ } else {
/*
- * Updata HDMI max supported TMDS clock, consider
- * both sink and source capicity.
+ * Update HDMI max supported TMDS clock, consider
+ * both sink and source capacity. For DVI sink,
+ * could not get max TMDS clock from EDID, so just
+ * use source capacity.
*/
- hdmi_edid_set_max_pclk_rate(data,
- min(hdmi_edid_get_sink_caps_max_tmds_clk(data) / 1000,
- hdmi_ctrl->max_pclk_khz));
+ sink_max_pclk =
+ hdmi_edid_get_sink_caps_max_tmds_clk(data);
+ if (sink_max_pclk != 0)
+ hdmi_edid_set_max_pclk_rate(data,
+ min(sink_max_pclk / 1000,
+ hdmi_ctrl->max_pclk_khz));
+ }
}
bail:
if (hdmi_tx_enable_power(hdmi_ctrl, HDMI_TX_DDC_PM, false))
@@ -3192,9 +3185,7 @@ static int hdmi_tx_power_on(struct hdmi_tx_ctrl *hdmi_ctrl)
}
hdmi_ctrl->panel.vic = hdmi_ctrl->vic;
-
- if (!hdmi_tx_is_dvi_mode(hdmi_ctrl) &&
- hdmi_tx_is_cea_format(hdmi_ctrl->vic))
+ if (!hdmi_tx_is_dvi_mode(hdmi_ctrl))
hdmi_ctrl->panel.infoframe = true;
else
hdmi_ctrl->panel.infoframe = false;
diff --git a/drivers/video/fbdev/msm/mdss_mdp.c b/drivers/video/fbdev/msm/mdss_mdp.c
index 6fb32761a767..410d36a3ac31 100644
--- a/drivers/video/fbdev/msm/mdss_mdp.c
+++ b/drivers/video/fbdev/msm/mdss_mdp.c
@@ -1797,7 +1797,8 @@ static inline int mdss_mdp_irq_clk_register(struct mdss_data_type *mdata,
static void __mdss_restore_sec_cfg(struct mdss_data_type *mdata)
{
- int ret, scm_ret = 0;
+ int ret;
+ u64 scm_ret = 0;
if (test_bit(MDSS_CAPS_SCM_RESTORE_NOT_REQUIRED, mdata->mdss_caps_map))
return;
@@ -1808,7 +1809,7 @@ static void __mdss_restore_sec_cfg(struct mdss_data_type *mdata)
ret = scm_restore_sec_cfg(SEC_DEVICE_MDSS, 0, &scm_ret);
if (ret || scm_ret)
- pr_warn("scm_restore_sec_cfg failed %d %d\n",
+ pr_warn("scm_restore_sec_cfg failed %d %llu\n",
ret, scm_ret);
__mdss_mdp_reg_access_clk_enable(mdata, false);
@@ -2779,6 +2780,8 @@ ssize_t mdss_mdp_show_capabilities(struct device *dev,
SPRINT(" avr");
if (mdss_has_quirk(mdata, MDSS_QUIRK_HDR_SUPPORT_ENABLED))
SPRINT(" hdr");
+ if (mdata->nvig_pipes && mdata->mdp_rev >= MDSS_MDP_HW_REV_300)
+ SPRINT(" vig_csc_db"); /* double buffered VIG CSC block */
SPRINT("\n");
#undef SPRINT
diff --git a/drivers/video/fbdev/msm/mdss_mdp_layer.c b/drivers/video/fbdev/msm/mdss_mdp_layer.c
index 3ccc09d58480..ff93c343d41f 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_layer.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_layer.c
@@ -975,26 +975,31 @@ static int __validate_layer_reconfig(struct mdp_input_layer *layer,
struct mdss_mdp_pipe *pipe)
{
int status = 0;
- struct mdss_mdp_format_params *src_fmt;
+ struct mdss_mdp_format_params *layer_src_fmt;
+ struct mdss_data_type *mdata = mfd_to_mdata(pipe->mfd);
+ bool is_csc_db = (mdata->mdp_rev < MDSS_MDP_HW_REV_300) ? false : true;
+
+ layer_src_fmt = mdss_mdp_get_format_params(layer->buffer.format);
+ if (!layer_src_fmt) {
+ pr_err("Invalid layer format %d\n", layer->buffer.format);
+ status = -EINVAL;
+ goto err_exit;
+ }
/*
- * csc registers are not double buffered. It is not permitted
- * to change them on staged pipe with YUV layer.
+ * HW earlier to sdm 3.x.x does not support double buffer CSC.
+ * Invalidate any reconfig of CSC block on staged pipe.
*/
- if (pipe->csc_coeff_set != layer->color_space) {
- src_fmt = mdss_mdp_get_format_params(layer->buffer.format);
- if (!src_fmt) {
- pr_err("Invalid layer format %d\n",
- layer->buffer.format);
- status = -EINVAL;
- } else {
- if (pipe->src_fmt->is_yuv && src_fmt &&
- src_fmt->is_yuv) {
- status = -EPERM;
- pr_err("csc change is not permitted on used pipe\n");
- }
- }
+ if (!is_csc_db &&
+ ((!!pipe->src_fmt->is_yuv != !!layer_src_fmt->is_yuv) ||
+ (pipe->src_fmt->is_yuv && layer_src_fmt->is_yuv &&
+ pipe->csc_coeff_set != layer->color_space))) {
+ pr_err("CSC reconfig not allowed on staged pipe\n");
+ status = -EINVAL;
+ goto err_exit;
}
+
+err_exit:
return status;
}
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 6c031dd1bc4e..8a0243efd359 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -905,17 +905,60 @@ static int load_elf_binary(struct linux_binprm *bprm)
elf_flags = MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE;
vaddr = elf_ppnt->p_vaddr;
+ /*
+ * If we are loading ET_EXEC or we have already performed
+ * the ET_DYN load_addr calculations, proceed normally.
+ */
if (loc->elf_ex.e_type == ET_EXEC || load_addr_set) {
elf_flags |= MAP_FIXED;
} else if (loc->elf_ex.e_type == ET_DYN) {
- /* Try and get dynamic programs out of the way of the
- * default mmap base, as well as whatever program they
- * might try to exec. This is because the brk will
- * follow the loader, and is not movable. */
- load_bias = ELF_ET_DYN_BASE - vaddr;
- if (current->flags & PF_RANDOMIZE)
- load_bias += arch_mmap_rnd();
- load_bias = ELF_PAGESTART(load_bias);
+ /*
+ * This logic is run once for the first LOAD Program
+ * Header for ET_DYN binaries to calculate the
+ * randomization (load_bias) for all the LOAD
+ * Program Headers, and to calculate the entire
+ * size of the ELF mapping (total_size). (Note that
+ * load_addr_set is set to true later once the
+ * initial mapping is performed.)
+ *
+ * There are effectively two types of ET_DYN
+ * binaries: programs (i.e. PIE: ET_DYN with INTERP)
+ * and loaders (ET_DYN without INTERP, since they
+ * _are_ the ELF interpreter). The loaders must
+ * be loaded away from programs since the program
+ * may otherwise collide with the loader (especially
+ * for ET_EXEC which does not have a randomized
+ * position). For example to handle invocations of
+ * "./ld.so someprog" to test out a new version of
+ * the loader, the subsequent program that the
+ * loader loads must avoid the loader itself, so
+ * they cannot share the same load range. Sufficient
+ * room for the brk must be allocated with the
+ * loader as well, since brk must be available with
+ * the loader.
+ *
+ * Therefore, programs are loaded offset from
+ * ELF_ET_DYN_BASE and loaders are loaded into the
+ * independently randomized mmap region (0 load_bias
+ * without MAP_FIXED).
+ */
+ if (elf_interpreter) {
+ load_bias = ELF_ET_DYN_BASE;
+ if (current->flags & PF_RANDOMIZE)
+ load_bias += arch_mmap_rnd();
+ elf_flags |= MAP_FIXED;
+ } else
+ load_bias = 0;
+
+ /*
+ * Since load_bias is used for all subsequent loading
+ * calculations, we must lower it by the first vaddr
+ * so that the remaining calculations based on the
+ * ELF vaddrs will be correctly offset. The result
+ * is then page aligned.
+ */
+ load_bias = ELF_PAGESTART(load_bias - vaddr);
+
total_size = total_mapping_size(elf_phdata,
loc->elf_ex.e_phnum);
if (!total_size) {
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index a61926cb01c0..bebd6517355d 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -7521,11 +7521,18 @@ static void adjust_dio_outstanding_extents(struct inode *inode,
* within our reservation, otherwise we need to adjust our inode
* counter appropriately.
*/
- if (dio_data->outstanding_extents) {
+ if (dio_data->outstanding_extents >= num_extents) {
dio_data->outstanding_extents -= num_extents;
} else {
+ /*
+ * If dio write length has been split due to no large enough
+ * contiguous space, we need to compensate our inode counter
+ * appropriately.
+ */
+ u64 num_needed = num_extents - dio_data->outstanding_extents;
+
spin_lock(&BTRFS_I(inode)->lock);
- BTRFS_I(inode)->outstanding_extents += num_extents;
+ BTRFS_I(inode)->outstanding_extents += num_needed;
spin_unlock(&BTRFS_I(inode)->lock);
}
}
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index 9314b4ea2375..be7d187d53fd 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -247,6 +247,11 @@ static int __dcache_readdir(struct file *file, struct dir_context *ctx,
if (ret < 0)
err = ret;
dput(last);
+ /* last_name no longer match cache index */
+ if (fi->readdir_cache_idx >= 0) {
+ fi->readdir_cache_idx = -1;
+ fi->dir_release_count = 0;
+ }
}
return err;
}
diff --git a/fs/dcache.c b/fs/dcache.c
index 4c07a84d1317..ba56a39a3b74 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -269,6 +269,33 @@ static inline int dname_external(const struct dentry *dentry)
return dentry->d_name.name != dentry->d_iname;
}
+void take_dentry_name_snapshot(struct name_snapshot *name, struct dentry *dentry)
+{
+ spin_lock(&dentry->d_lock);
+ if (unlikely(dname_external(dentry))) {
+ struct external_name *p = external_name(dentry);
+ atomic_inc(&p->u.count);
+ spin_unlock(&dentry->d_lock);
+ name->name = p->name;
+ } else {
+ memcpy(name->inline_name, dentry->d_iname, DNAME_INLINE_LEN);
+ spin_unlock(&dentry->d_lock);
+ name->name = name->inline_name;
+ }
+}
+EXPORT_SYMBOL(take_dentry_name_snapshot);
+
+void release_dentry_name_snapshot(struct name_snapshot *name)
+{
+ if (unlikely(name->name != name->inline_name)) {
+ struct external_name *p;
+ p = container_of(name->name, struct external_name, name[0]);
+ if (unlikely(atomic_dec_and_test(&p->u.count)))
+ kfree_rcu(p, u.head);
+ }
+}
+EXPORT_SYMBOL(release_dentry_name_snapshot);
+
static inline void __d_set_inode_and_type(struct dentry *dentry,
struct inode *inode,
unsigned type_flags)
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
index 0f5d05bf2131..e49ba072bd64 100644
--- a/fs/debugfs/inode.c
+++ b/fs/debugfs/inode.c
@@ -669,7 +669,7 @@ struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry,
{
int error;
struct dentry *dentry = NULL, *trap;
- const char *old_name;
+ struct name_snapshot old_name;
trap = lock_rename(new_dir, old_dir);
/* Source or destination directories don't exist? */
@@ -684,19 +684,19 @@ struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry,
if (IS_ERR(dentry) || dentry == trap || d_really_is_positive(dentry))
goto exit;
- old_name = fsnotify_oldname_init(old_dentry->d_name.name);
+ take_dentry_name_snapshot(&old_name, old_dentry);
error = simple_rename(d_inode(old_dir), old_dentry, d_inode(new_dir),
dentry);
if (error) {
- fsnotify_oldname_free(old_name);
+ release_dentry_name_snapshot(&old_name);
goto exit;
}
d_move(old_dentry, dentry);
- fsnotify_move(d_inode(old_dir), d_inode(new_dir), old_name,
+ fsnotify_move(d_inode(old_dir), d_inode(new_dir), old_name.name,
d_is_dir(old_dentry),
NULL, old_dentry);
- fsnotify_oldname_free(old_name);
+ release_dentry_name_snapshot(&old_name);
unlock_rename(new_dir, old_dir);
dput(dentry);
return old_dentry;
diff --git a/fs/exec.c b/fs/exec.c
index 073ae12b396e..0428c34d4773 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -206,8 +206,7 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
if (write) {
unsigned long size = bprm->vma->vm_end - bprm->vma->vm_start;
- unsigned long ptr_size;
- struct rlimit *rlim;
+ unsigned long ptr_size, limit;
/*
* Since the stack will hold pointers to the strings, we
@@ -236,14 +235,16 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
return page;
/*
- * Limit to 1/4-th the stack size for the argv+env strings.
+ * Limit to 1/4 of the max stack size or 3/4 of _STK_LIM
+ * (whichever is smaller) for the argv+env strings.
* This ensures that:
* - the remaining binfmt code will not run out of stack space,
* - the program will have a reasonable amount of stack left
* to work from.
*/
- rlim = current->signal->rlim;
- if (size > READ_ONCE(rlim[RLIMIT_STACK].rlim_cur) / 4)
+ limit = _STK_LIM / 4 * 3;
+ limit = min(limit, rlimit(RLIMIT_STACK) / 4);
+ if (size > limit)
goto fail;
}
diff --git a/fs/ext4/sysfs.c b/fs/ext4/sysfs.c
index 5d09ea585840..c2ee23acf359 100644
--- a/fs/ext4/sysfs.c
+++ b/fs/ext4/sysfs.c
@@ -100,7 +100,7 @@ static ssize_t reserved_clusters_store(struct ext4_attr *a,
int ret;
ret = kstrtoull(skip_spaces(buf), 0, &val);
- if (!ret || val >= clusters)
+ if (ret || val >= clusters)
return -EINVAL;
atomic64_set(&sbi->s_resv_clusters, val);
diff --git a/fs/f2fs/acl.c b/fs/f2fs/acl.c
index e9a8d676c6bc..83dcf7bfd7b8 100644
--- a/fs/f2fs/acl.c
+++ b/fs/f2fs/acl.c
@@ -213,7 +213,7 @@ static int __f2fs_set_acl(struct inode *inode, int type,
switch (type) {
case ACL_TYPE_ACCESS:
name_index = F2FS_XATTR_INDEX_POSIX_ACL_ACCESS;
- if (acl) {
+ if (acl && !ipage) {
error = posix_acl_update_mode(inode, &inode->i_mode, &acl);
if (error)
return error;
diff --git a/fs/fcntl.c b/fs/fcntl.c
index ee85cd4e136a..62376451bbce 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -740,16 +740,10 @@ static int __init fcntl_init(void)
* Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY
* is defined as O_NONBLOCK on some platforms and not on others.
*/
- BUILD_BUG_ON(21 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
- O_RDONLY | O_WRONLY | O_RDWR |
- O_CREAT | O_EXCL | O_NOCTTY |
- O_TRUNC | O_APPEND | /* O_NONBLOCK | */
- __O_SYNC | O_DSYNC | FASYNC |
- O_DIRECT | O_LARGEFILE | O_DIRECTORY |
- O_NOFOLLOW | O_NOATIME | O_CLOEXEC |
- __FMODE_EXEC | O_PATH | __O_TMPFILE |
- __FMODE_NONOTIFY
- ));
+ BUILD_BUG_ON(21 - 1 /* for O_RDONLY being 0 */ !=
+ HWEIGHT32(
+ (VALID_OPEN_FLAGS & ~(O_NONBLOCK | O_NDELAY)) |
+ __FMODE_EXEC | __FMODE_NONOTIFY));
fasync_cache = kmem_cache_create("fasync_cache",
sizeof(struct fasync_struct), 0, SLAB_PANIC, NULL);
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 9cd8c92b953d..070901e76653 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -80,9 +80,9 @@ static struct rhashtable_params ht_parms = {
static struct rhashtable gl_hash_table;
-void gfs2_glock_free(struct gfs2_glock *gl)
+static void gfs2_glock_dealloc(struct rcu_head *rcu)
{
- struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
+ struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu);
if (gl->gl_ops->go_flags & GLOF_ASPACE) {
kmem_cache_free(gfs2_glock_aspace_cachep, gl);
@@ -90,6 +90,13 @@ void gfs2_glock_free(struct gfs2_glock *gl)
kfree(gl->gl_lksb.sb_lvbptr);
kmem_cache_free(gfs2_glock_cachep, gl);
}
+}
+
+void gfs2_glock_free(struct gfs2_glock *gl)
+{
+ struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
+
+ call_rcu(&gl->gl_rcu, gfs2_glock_dealloc);
if (atomic_dec_and_test(&sdp->sd_glock_disposal))
wake_up(&sdp->sd_glock_wait);
}
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index be519416c112..4a9077ec9313 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -367,6 +367,7 @@ struct gfs2_glock {
loff_t end;
} gl_vm;
};
+ struct rcu_head gl_rcu;
struct rhash_head gl_node;
};
diff --git a/fs/mount.h b/fs/mount.h
index 13a4ebbbaa74..37c64bbe840c 100644
--- a/fs/mount.h
+++ b/fs/mount.h
@@ -57,6 +57,7 @@ struct mount {
struct mnt_namespace *mnt_ns; /* containing namespace */
struct mountpoint *mnt_mp; /* where is it mounted */
struct hlist_node mnt_mp_list; /* list mounts with the same mountpoint */
+ struct list_head mnt_umounting; /* list entry for umount propagation */
#ifdef CONFIG_FSNOTIFY
struct hlist_head mnt_fsnotify_marks;
__u32 mnt_fsnotify_mask;
diff --git a/fs/namei.c b/fs/namei.c
index b5dfe2c5b51e..3b2b5c8e0ec7 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -4264,11 +4264,11 @@ int vfs_rename2(struct vfsmount *mnt,
{
int error;
bool is_dir = d_is_dir(old_dentry);
- const unsigned char *old_name;
struct inode *source = old_dentry->d_inode;
struct inode *target = new_dentry->d_inode;
bool new_is_dir = false;
unsigned max_links = new_dir->i_sb->s_max_links;
+ struct name_snapshot old_name;
/*
* Check source == target.
@@ -4322,7 +4322,7 @@ int vfs_rename2(struct vfsmount *mnt,
if (error)
return error;
- old_name = fsnotify_oldname_init(old_dentry->d_name.name);
+ take_dentry_name_snapshot(&old_name, old_dentry);
dget(new_dentry);
if (!is_dir || (flags & RENAME_EXCHANGE))
lock_two_nondirectories(source, target);
@@ -4383,14 +4383,14 @@ out:
mutex_unlock(&target->i_mutex);
dput(new_dentry);
if (!error) {
- fsnotify_move(old_dir, new_dir, old_name, is_dir,
+ fsnotify_move(old_dir, new_dir, old_name.name, is_dir,
!(flags & RENAME_EXCHANGE) ? target : NULL, old_dentry);
if (flags & RENAME_EXCHANGE) {
fsnotify_move(new_dir, old_dir, old_dentry->d_name.name,
new_is_dir, NULL, new_dentry);
}
}
- fsnotify_oldname_free(old_name);
+ release_dentry_name_snapshot(&old_name);
return error;
}
diff --git a/fs/namespace.c b/fs/namespace.c
index 0f52d90c356f..f32450c3e72c 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -237,6 +237,7 @@ static struct mount *alloc_vfsmnt(const char *name)
INIT_LIST_HEAD(&mnt->mnt_slave_list);
INIT_LIST_HEAD(&mnt->mnt_slave);
INIT_HLIST_NODE(&mnt->mnt_mp_list);
+ INIT_LIST_HEAD(&mnt->mnt_umounting);
#ifdef CONFIG_FSNOTIFY
INIT_HLIST_HEAD(&mnt->mnt_fsnotify_marks);
#endif
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 5b21b1ca2341..348e0a05bd18 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -1135,11 +1135,13 @@ static int nfs_lookup_revalidate(struct dentry *dentry, unsigned int flags)
/* Force a full look up iff the parent directory has changed */
if (!nfs_is_exclusive_create(dir, flags) &&
nfs_check_verifier(dir, dentry, flags & LOOKUP_RCU)) {
-
- if (nfs_lookup_verify_inode(inode, flags)) {
+ error = nfs_lookup_verify_inode(inode, flags);
+ if (error) {
if (flags & LOOKUP_RCU)
return -ECHILD;
- goto out_zap_parent;
+ if (error == -ESTALE)
+ goto out_zap_parent;
+ goto out_error;
}
goto out_valid;
}
@@ -1163,8 +1165,10 @@ static int nfs_lookup_revalidate(struct dentry *dentry, unsigned int flags)
trace_nfs_lookup_revalidate_enter(dir, dentry, flags);
error = NFS_PROTO(dir)->lookup(dir, &dentry->d_name, fhandle, fattr, label);
trace_nfs_lookup_revalidate_exit(dir, dentry, flags, error);
- if (error)
+ if (error == -ESTALE || error == -ENOENT)
goto out_bad;
+ if (error)
+ goto out_error;
if (nfs_compare_fh(NFS_FH(inode), fhandle))
goto out_bad;
if ((error = nfs_refresh_inode(inode, fattr)) != 0)
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index f714b98cfd74..668ac19af58f 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -1241,9 +1241,9 @@ static int nfs_check_inode_attributes(struct inode *inode, struct nfs_fattr *fat
return 0;
/* Has the inode gone and changed behind our back? */
if ((fattr->valid & NFS_ATTR_FATTR_FILEID) && nfsi->fileid != fattr->fileid)
- return -EIO;
+ return -ESTALE;
if ((fattr->valid & NFS_ATTR_FATTR_TYPE) && (inode->i_mode & S_IFMT) != (fattr->mode & S_IFMT))
- return -EIO;
+ return -ESTALE;
if ((fattr->valid & NFS_ATTR_FATTR_CHANGE) != 0 &&
inode->i_version != fattr->change_attr)
diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c
index db39de2dd4cb..a64adc2fced9 100644
--- a/fs/notify/fsnotify.c
+++ b/fs/notify/fsnotify.c
@@ -104,16 +104,20 @@ int __fsnotify_parent(struct path *path, struct dentry *dentry, __u32 mask)
if (unlikely(!fsnotify_inode_watches_children(p_inode)))
__fsnotify_update_child_dentry_flags(p_inode);
else if (p_inode->i_fsnotify_mask & mask) {
+ struct name_snapshot name;
+
/* we are notifying a parent so come up with the new mask which
* specifies these are events which came from a child. */
mask |= FS_EVENT_ON_CHILD;
+ take_dentry_name_snapshot(&name, dentry);
if (path)
ret = fsnotify(p_inode, mask, path, FSNOTIFY_EVENT_PATH,
- dentry->d_name.name, 0);
+ name.name, 0);
else
ret = fsnotify(p_inode, mask, dentry->d_inode, FSNOTIFY_EVENT_INODE,
- dentry->d_name.name, 0);
+ name.name, 0);
+ release_dentry_name_snapshot(&name);
}
dput(parent);
diff --git a/fs/open.c b/fs/open.c
index e70cca15c976..1fd96c5d3895 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -898,6 +898,12 @@ static inline int build_open_flags(int flags, umode_t mode, struct open_flags *o
int lookup_flags = 0;
int acc_mode;
+ /*
+ * Clear out all open flags we don't know about so that we don't report
+ * them in fcntl(F_GETFD) or similar interfaces.
+ */
+ flags &= VALID_OPEN_FLAGS;
+
if (flags & (O_CREAT | __O_TMPFILE))
op->mode = (mode & S_IALLUGO) | S_IFREG;
else
diff --git a/fs/pnode.c b/fs/pnode.c
index e4e428d621e9..ddb846f878b8 100644
--- a/fs/pnode.c
+++ b/fs/pnode.c
@@ -24,6 +24,11 @@ static inline struct mount *first_slave(struct mount *p)
return list_entry(p->mnt_slave_list.next, struct mount, mnt_slave);
}
+static inline struct mount *last_slave(struct mount *p)
+{
+ return list_entry(p->mnt_slave_list.prev, struct mount, mnt_slave);
+}
+
static inline struct mount *next_slave(struct mount *p)
{
return list_entry(p->mnt_slave.next, struct mount, mnt_slave);
@@ -164,6 +169,19 @@ static struct mount *propagation_next(struct mount *m,
}
}
+static struct mount *skip_propagation_subtree(struct mount *m,
+ struct mount *origin)
+{
+ /*
+ * Advance m such that propagation_next will not return
+ * the slaves of m.
+ */
+ if (!IS_MNT_NEW(m) && !list_empty(&m->mnt_slave_list))
+ m = last_slave(m);
+
+ return m;
+}
+
static struct mount *next_group(struct mount *m, struct mount *origin)
{
while (1) {
@@ -415,65 +433,104 @@ void propagate_mount_unlock(struct mount *mnt)
}
}
-/*
- * Mark all mounts that the MNT_LOCKED logic will allow to be unmounted.
- */
-static void mark_umount_candidates(struct mount *mnt)
+static void umount_one(struct mount *mnt, struct list_head *to_umount)
{
- struct mount *parent = mnt->mnt_parent;
- struct mount *m;
-
- BUG_ON(parent == mnt);
-
- for (m = propagation_next(parent, parent); m;
- m = propagation_next(m, parent)) {
- struct mount *child = __lookup_mnt(&m->mnt,
- mnt->mnt_mountpoint);
- if (!child || (child->mnt.mnt_flags & MNT_UMOUNT))
- continue;
- if (!IS_MNT_LOCKED(child) || IS_MNT_MARKED(m)) {
- SET_MNT_MARK(child);
- }
- }
+ CLEAR_MNT_MARK(mnt);
+ mnt->mnt.mnt_flags |= MNT_UMOUNT;
+ list_del_init(&mnt->mnt_child);
+ list_del_init(&mnt->mnt_umounting);
+ list_move_tail(&mnt->mnt_list, to_umount);
}
/*
* NOTE: unmounting 'mnt' naturally propagates to all other mounts its
* parent propagates to.
*/
-static void __propagate_umount(struct mount *mnt)
+static bool __propagate_umount(struct mount *mnt,
+ struct list_head *to_umount,
+ struct list_head *to_restore)
{
- struct mount *parent = mnt->mnt_parent;
- struct mount *m;
+ bool progress = false;
+ struct mount *child;
- BUG_ON(parent == mnt);
+ /*
+ * The state of the parent won't change if this mount is
+ * already unmounted or marked as without children.
+ */
+ if (mnt->mnt.mnt_flags & (MNT_UMOUNT | MNT_MARKED))
+ goto out;
- for (m = propagation_next(parent, parent); m;
- m = propagation_next(m, parent)) {
- struct mount *topper;
- struct mount *child = __lookup_mnt(&m->mnt,
- mnt->mnt_mountpoint);
- /*
- * umount the child only if the child has no children
- * and the child is marked safe to unmount.
- */
- if (!child || !IS_MNT_MARKED(child))
+ /* Verify topper is the only grandchild that has not been
+ * speculatively unmounted.
+ */
+ list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) {
+ if (child->mnt_mountpoint == mnt->mnt.mnt_root)
continue;
- CLEAR_MNT_MARK(child);
+ if (!list_empty(&child->mnt_umounting) && IS_MNT_MARKED(child))
+ continue;
+ /* Found a mounted child */
+ goto children;
+ }
- /* If there is exactly one mount covering all of child
- * replace child with that mount.
- */
- topper = find_topper(child);
- if (topper)
- mnt_change_mountpoint(child->mnt_parent, child->mnt_mp,
- topper);
+ /* Mark mounts that can be unmounted if not locked */
+ SET_MNT_MARK(mnt);
+ progress = true;
+
+ /* If a mount is without children and not locked umount it. */
+ if (!IS_MNT_LOCKED(mnt)) {
+ umount_one(mnt, to_umount);
+ } else {
+children:
+ list_move_tail(&mnt->mnt_umounting, to_restore);
+ }
+out:
+ return progress;
+}
+
+static void umount_list(struct list_head *to_umount,
+ struct list_head *to_restore)
+{
+ struct mount *mnt, *child, *tmp;
+ list_for_each_entry(mnt, to_umount, mnt_list) {
+ list_for_each_entry_safe(child, tmp, &mnt->mnt_mounts, mnt_child) {
+ /* topper? */
+ if (child->mnt_mountpoint == mnt->mnt.mnt_root)
+ list_move_tail(&child->mnt_umounting, to_restore);
+ else
+ umount_one(child, to_umount);
+ }
+ }
+}
- if (list_empty(&child->mnt_mounts)) {
- list_del_init(&child->mnt_child);
- child->mnt.mnt_flags |= MNT_UMOUNT;
- list_move_tail(&child->mnt_list, &mnt->mnt_list);
+static void restore_mounts(struct list_head *to_restore)
+{
+ /* Restore mounts to a clean working state */
+ while (!list_empty(to_restore)) {
+ struct mount *mnt, *parent;
+ struct mountpoint *mp;
+
+ mnt = list_first_entry(to_restore, struct mount, mnt_umounting);
+ CLEAR_MNT_MARK(mnt);
+ list_del_init(&mnt->mnt_umounting);
+
+ /* Should this mount be reparented? */
+ mp = mnt->mnt_mp;
+ parent = mnt->mnt_parent;
+ while (parent->mnt.mnt_flags & MNT_UMOUNT) {
+ mp = parent->mnt_mp;
+ parent = parent->mnt_parent;
}
+ if (parent != mnt->mnt_parent)
+ mnt_change_mountpoint(parent, mp, mnt);
+ }
+}
+
+static void cleanup_umount_visitations(struct list_head *visited)
+{
+ while (!list_empty(visited)) {
+ struct mount *mnt =
+ list_first_entry(visited, struct mount, mnt_umounting);
+ list_del_init(&mnt->mnt_umounting);
}
}
@@ -487,12 +544,69 @@ static void __propagate_umount(struct mount *mnt)
int propagate_umount(struct list_head *list)
{
struct mount *mnt;
+ LIST_HEAD(to_restore);
+ LIST_HEAD(to_umount);
+ LIST_HEAD(visited);
+
+ /* Find candidates for unmounting */
+ list_for_each_entry_reverse(mnt, list, mnt_list) {
+ struct mount *parent = mnt->mnt_parent;
+ struct mount *m;
+
+ /*
+ * If this mount has already been visited it is known that it's
+ * entire peer group and all of their slaves in the propagation
+ * tree for the mountpoint has already been visited and there is
+ * no need to visit them again.
+ */
+ if (!list_empty(&mnt->mnt_umounting))
+ continue;
+
+ list_add_tail(&mnt->mnt_umounting, &visited);
+ for (m = propagation_next(parent, parent); m;
+ m = propagation_next(m, parent)) {
+ struct mount *child = __lookup_mnt(&m->mnt,
+ mnt->mnt_mountpoint);
+ if (!child)
+ continue;
+
+ if (!list_empty(&child->mnt_umounting)) {
+ /*
+ * If the child has already been visited it is
+ * know that it's entire peer group and all of
+ * their slaves in the propgation tree for the
+ * mountpoint has already been visited and there
+ * is no need to visit this subtree again.
+ */
+ m = skip_propagation_subtree(m, parent);
+ continue;
+ } else if (child->mnt.mnt_flags & MNT_UMOUNT) {
+ /*
+ * We have come accross an partially unmounted
+ * mount in list that has not been visited yet.
+ * Remember it has been visited and continue
+ * about our merry way.
+ */
+ list_add_tail(&child->mnt_umounting, &visited);
+ continue;
+ }
+
+ /* Check the child and parents while progress is made */
+ while (__propagate_umount(child,
+ &to_umount, &to_restore)) {
+ /* Is the parent a umount candidate? */
+ child = child->mnt_parent;
+ if (list_empty(&child->mnt_umounting))
+ break;
+ }
+ }
+ }
- list_for_each_entry_reverse(mnt, list, mnt_list)
- mark_umount_candidates(mnt);
+ umount_list(&to_umount, &to_restore);
+ restore_mounts(&to_restore);
+ cleanup_umount_visitations(&visited);
+ list_splice_tail(&to_umount, list);
- list_for_each_entry(mnt, list, mnt_list)
- __propagate_umount(mnt);
return 0;
}
diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c
index 8d1e5e2db6a1..c9e4bc47c79d 100644
--- a/fs/pstore/ram.c
+++ b/fs/pstore/ram.c
@@ -434,7 +434,7 @@ static int ramoops_init_przs(struct device *dev, struct ramoops_context *cxt,
for (i = 0; i < cxt->max_dump_cnt; i++) {
cxt->przs[i] = persistent_ram_new(*paddr, cxt->record_size, 0,
&cxt->ecc_info,
- cxt->memtype);
+ cxt->memtype, 0);
if (IS_ERR(cxt->przs[i])) {
err = PTR_ERR(cxt->przs[i]);
dev_err(dev, "failed to request mem region (0x%zx@0x%llx): %d\n",
@@ -471,7 +471,8 @@ static int ramoops_init_prz(struct device *dev, struct ramoops_context *cxt,
return -ENOMEM;
}
- *prz = persistent_ram_new(*paddr, sz, sig, &cxt->ecc_info, cxt->memtype);
+ *prz = persistent_ram_new(*paddr, sz, sig, &cxt->ecc_info,
+ cxt->memtype, 0);
if (IS_ERR(*prz)) {
int err = PTR_ERR(*prz);
diff --git a/fs/pstore/ram_core.c b/fs/pstore/ram_core.c
index 3975deec02f8..e11672aa4575 100644
--- a/fs/pstore/ram_core.c
+++ b/fs/pstore/ram_core.c
@@ -48,16 +48,15 @@ static inline size_t buffer_start(struct persistent_ram_zone *prz)
return atomic_read(&prz->buffer->start);
}
-static DEFINE_RAW_SPINLOCK(buffer_lock);
-
/* increase and wrap the start pointer, returning the old value */
static size_t buffer_start_add(struct persistent_ram_zone *prz, size_t a)
{
int old;
int new;
- unsigned long flags;
+ unsigned long flags = 0;
- raw_spin_lock_irqsave(&buffer_lock, flags);
+ if (!(prz->flags & PRZ_FLAG_NO_LOCK))
+ raw_spin_lock_irqsave(&prz->buffer_lock, flags);
old = atomic_read(&prz->buffer->start);
new = old + a;
@@ -65,7 +64,8 @@ static size_t buffer_start_add(struct persistent_ram_zone *prz, size_t a)
new -= prz->buffer_size;
atomic_set(&prz->buffer->start, new);
- raw_spin_unlock_irqrestore(&buffer_lock, flags);
+ if (!(prz->flags & PRZ_FLAG_NO_LOCK))
+ raw_spin_unlock_irqrestore(&prz->buffer_lock, flags);
return old;
}
@@ -75,9 +75,10 @@ static void buffer_size_add(struct persistent_ram_zone *prz, size_t a)
{
size_t old;
size_t new;
- unsigned long flags;
+ unsigned long flags = 0;
- raw_spin_lock_irqsave(&buffer_lock, flags);
+ if (!(prz->flags & PRZ_FLAG_NO_LOCK))
+ raw_spin_lock_irqsave(&prz->buffer_lock, flags);
old = atomic_read(&prz->buffer->size);
if (old == prz->buffer_size)
@@ -89,7 +90,8 @@ static void buffer_size_add(struct persistent_ram_zone *prz, size_t a)
atomic_set(&prz->buffer->size, new);
exit:
- raw_spin_unlock_irqrestore(&buffer_lock, flags);
+ if (!(prz->flags & PRZ_FLAG_NO_LOCK))
+ raw_spin_unlock_irqrestore(&prz->buffer_lock, flags);
}
static void notrace persistent_ram_encode_rs8(struct persistent_ram_zone *prz,
@@ -491,6 +493,7 @@ static int persistent_ram_post_init(struct persistent_ram_zone *prz, u32 sig,
prz->buffer->sig);
}
+ /* Rewind missing or invalid memory area. */
prz->buffer->sig = sig;
persistent_ram_zap(prz);
@@ -517,7 +520,7 @@ void persistent_ram_free(struct persistent_ram_zone *prz)
struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size,
u32 sig, struct persistent_ram_ecc_info *ecc_info,
- unsigned int memtype)
+ unsigned int memtype, u32 flags)
{
struct persistent_ram_zone *prz;
int ret = -ENOMEM;
@@ -528,6 +531,10 @@ struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size,
goto err;
}
+ /* Initialize general buffer state. */
+ raw_spin_lock_init(&prz->buffer_lock);
+ prz->flags = flags;
+
ret = persistent_ram_buffer_map(start, size, prz, memtype);
if (ret)
goto err;
diff --git a/fs/sdcardfs/inode.c b/fs/sdcardfs/inode.c
index 60fea424835f..103dc45a131f 100644
--- a/fs/sdcardfs/inode.c
+++ b/fs/sdcardfs/inode.c
@@ -766,13 +766,9 @@ static int sdcardfs_setattr(struct vfsmount *mnt, struct dentry *dentry, struct
* afterwards in the other cases: we fsstack_copy_inode_size from
* the lower level.
*/
- if (current->mm)
- down_write(&current->mm->mmap_sem);
if (ia->ia_valid & ATTR_SIZE) {
err = inode_newsize_ok(&tmp, ia->ia_size);
if (err) {
- if (current->mm)
- up_write(&current->mm->mmap_sem);
goto out;
}
truncate_setsize(inode, ia->ia_size);
@@ -795,8 +791,6 @@ static int sdcardfs_setattr(struct vfsmount *mnt, struct dentry *dentry, struct
err = notify_change2(lower_mnt, lower_dentry, &lower_ia, /* note: lower_ia */
NULL);
mutex_unlock(&d_inode(lower_dentry)->i_mutex);
- if (current->mm)
- up_write(&current->mm->mmap_sem);
if (err)
goto out;
diff --git a/fs/sdcardfs/main.c b/fs/sdcardfs/main.c
index 3c5b51d49d21..80825b287836 100644
--- a/fs/sdcardfs/main.c
+++ b/fs/sdcardfs/main.c
@@ -364,41 +364,34 @@ out:
return err;
}
-/* A feature which supports mount_nodev() with options */
-static struct dentry *mount_nodev_with_options(struct vfsmount *mnt,
- struct file_system_type *fs_type, int flags,
- const char *dev_name, void *data,
- int (*fill_super)(struct vfsmount *, struct super_block *,
- const char *, void *, int))
+struct sdcardfs_mount_private {
+ struct vfsmount *mnt;
+ const char *dev_name;
+ void *raw_data;
+};
+static int __sdcardfs_fill_super(
+ struct super_block *sb,
+ void *_priv, int silent)
{
- int error;
- struct super_block *s = sget(fs_type, NULL, set_anon_super, flags, NULL);
-
- if (IS_ERR(s))
- return ERR_CAST(s);
-
- s->s_flags = flags;
+ struct sdcardfs_mount_private *priv = _priv;
- error = fill_super(mnt, s, dev_name, data, flags & MS_SILENT ? 1 : 0);
- if (error) {
- deactivate_locked_super(s);
- return ERR_PTR(error);
- }
- s->s_flags |= MS_ACTIVE;
- return dget(s->s_root);
+ return sdcardfs_read_super(priv->mnt,
+ sb, priv->dev_name, priv->raw_data, silent);
}
static struct dentry *sdcardfs_mount(struct vfsmount *mnt,
struct file_system_type *fs_type, int flags,
const char *dev_name, void *raw_data)
{
- /*
- * dev_name is a lower_path_name,
- * raw_data is a option string.
- */
- return mount_nodev_with_options(mnt, fs_type, flags, dev_name,
- raw_data, sdcardfs_read_super);
+ struct sdcardfs_mount_private priv = {
+ .mnt = mnt,
+ .dev_name = dev_name,
+ .raw_data = raw_data
+ };
+
+ return mount_nodev(fs_type, flags,
+ &priv, __sdcardfs_fill_super);
}
static struct dentry *sdcardfs_mount_wrn(struct file_system_type *fs_type,
@@ -423,7 +416,7 @@ void sdcardfs_kill_sb(struct super_block *sb)
list_del(&sbi->list);
mutex_unlock(&sdcardfs_super_list_lock);
}
- generic_shutdown_super(sb);
+ kill_anon_super(sb);
}
static struct file_system_type sdcardfs_fs_type = {
diff --git a/fs/seq_file.c b/fs/seq_file.c
index d672e2fec459..6dc4296eed62 100644
--- a/fs/seq_file.c
+++ b/fs/seq_file.c
@@ -72,9 +72,10 @@ int seq_open(struct file *file, const struct seq_operations *op)
mutex_init(&p->lock);
p->op = op;
-#ifdef CONFIG_USER_NS
- p->user_ns = file->f_cred->user_ns;
-#endif
+
+ // No refcounting: the lifetime of 'p' is constrained
+ // to the lifetime of the file.
+ p->file = file;
/*
* Wrappers around seq_open(e.g. swaps_open) need to be
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index 7be3166ba553..0e659d9c69a1 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -1235,8 +1235,8 @@ int udf_setsize(struct inode *inode, loff_t newsize)
return err;
}
set_size:
- truncate_setsize(inode, newsize);
up_write(&iinfo->i_data_sem);
+ truncate_setsize(inode, newsize);
} else {
if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
down_write(&iinfo->i_data_sem);
@@ -1253,9 +1253,9 @@ set_size:
udf_get_block);
if (err)
return err;
+ truncate_setsize(inode, newsize);
down_write(&iinfo->i_data_sem);
udf_clear_extent_cache(inode);
- truncate_setsize(inode, newsize);
udf_truncate_extents(inode);
up_write(&iinfo->i_data_sem);
}
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index 187b80267ff9..a9063ac50c4e 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -1426,6 +1426,26 @@ __xfs_get_blocks(
if (error)
goto out_unlock;
+ /*
+ * The only time we can ever safely find delalloc blocks on direct I/O
+ * is a dio write to post-eof speculative preallocation. All other
+ * scenarios are indicative of a problem or misuse (such as mixing
+ * direct and mapped I/O).
+ *
+ * The file may be unmapped by the time we get here so we cannot
+ * reliably fail the I/O based on mapping. Instead, fail the I/O if this
+ * is a read or a write within eof. Otherwise, carry on but warn as a
+ * precuation if the file happens to be mapped.
+ */
+ if (direct && imap.br_startblock == DELAYSTARTBLOCK) {
+ if (!create || offset < i_size_read(VFS_I(ip))) {
+ WARN_ON_ONCE(1);
+ error = -EIO;
+ goto out_unlock;
+ }
+ WARN_ON_ONCE(mapping_mapped(VFS_I(ip)->i_mapping));
+ }
+
/* for DAX, we convert unwritten extents directly */
if (create &&
(!nimaps ||
@@ -1525,7 +1545,6 @@ __xfs_get_blocks(
set_buffer_new(bh_result);
if (imap.br_startblock == DELAYSTARTBLOCK) {
- BUG_ON(direct);
if (create) {
set_buffer_uptodate(bh_result);
set_buffer_mapped(bh_result);
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 6555fdbee88a..f710a7075c0e 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -652,12 +652,15 @@ struct drm_encoder {
* @pt_scan_info: PT scan info obtained from the VCDB of EDID
* @it_scan_info: IT scan info obtained from the VCDB of EDID
* @ce_scan_info: CE scan info obtained from the VCDB of EDID
+ * @color_enc_fmt: Colorimetry encoding formats of sink
* @hdr_eotf: Electro optical transfer function obtained from HDR block
* @hdr_metadata_type_one: Metadata type one obtained from HDR block
* @hdr_max_luminance: desired max luminance obtained from HDR block
* @hdr_avg_luminance: desired avg luminance obtained from HDR block
* @hdr_min_luminance: desired min luminance obtained from HDR block
* @hdr_supported: does the sink support HDR content
+ * @rgb_qs: does the sink declare RGB selectable quantization range
+ * @yuv_qs: does the sink declare YCC selectable quantization range
* @edid_corrupt: indicates whether the last read EDID was corrupt
* @debugfs_entry: debugfs directory for this connector
* @state: current atomic state for this connector
@@ -740,12 +743,15 @@ struct drm_connector {
u8 pt_scan_info;
u8 it_scan_info;
u8 ce_scan_info;
+ u8 color_enc_fmt;
u32 hdr_eotf;
bool hdr_metadata_type_one;
u32 hdr_max_luminance;
u32 hdr_avg_luminance;
u32 hdr_min_luminance;
bool hdr_supported;
+ bool rgb_qs;
+ bool yuv_qs;
/* Flag for raw EDID header corruption - used in Displayport
* compliance testing - * Displayport Link CTS Core 1.2 rev1.1 4.2.2.6
*/
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index 69cb2ba37116..c0884e120041 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -214,6 +214,15 @@ struct detailed_timing {
#define DRM_EDID_YCBCR420_DC_36 (1 << 1)
#define DRM_EDID_YCBCR420_DC_30 (1 << 0)
+#define DRM_EDID_COLORIMETRY_xvYCC_601 (1 << 0)
+#define DRM_EDID_COLORIMETRY_xvYCC_709 (1 << 1)
+#define DRM_EDID_COLORIMETRY_sYCC_601 (1 << 2)
+#define DRM_EDID_COLORIMETRY_ADBYCC_601 (1 << 3)
+#define DRM_EDID_COLORIMETRY_ADB_RGB (1 << 4)
+#define DRM_EDID_COLORIMETRY_BT2020_CYCC (1 << 5)
+#define DRM_EDID_COLORIMETRY_BT2020_YCC (1 << 6)
+#define DRM_EDID_COLORIMETRY_BT2020_RGB (1 << 7)
+
/* ELD Header Block */
#define DRM_ELD_HEADER_BLOCK_SIZE 4
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 2043af2141c1..01448e01b40d 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -616,5 +616,11 @@ static inline struct inode *d_real_inode(struct dentry *dentry)
return d_backing_inode(d_real(dentry));
}
+struct name_snapshot {
+ const char *name;
+ char inline_name[DNAME_INLINE_LEN];
+};
+void take_dentry_name_snapshot(struct name_snapshot *, struct dentry *);
+void release_dentry_name_snapshot(struct name_snapshot *);
#endif /* __LINUX_DCACHE_H */
diff --git a/include/linux/device.h b/include/linux/device.h
index 4b4e2d5ce6e7..30c52d70c86d 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -368,6 +368,7 @@ int subsys_virtual_register(struct bus_type *subsys,
* @suspend: Used to put the device to sleep mode, usually to a low power
* state.
* @resume: Used to bring the device from the sleep mode.
+ * @shutdown: Called at shut-down time to quiesce the device.
* @ns_type: Callbacks so sysfs can detemine namespaces.
* @namespace: Namespace of the device belongs to this class.
* @pm: The default device power management operations of this class.
@@ -396,6 +397,7 @@ struct class {
int (*suspend)(struct device *dev, pm_message_t state);
int (*resume)(struct device *dev);
+ int (*shutdown)(struct device *dev);
const struct kobj_ns_type_operations *ns_type;
const void *(*namespace)(struct device *dev);
diff --git a/include/linux/fcntl.h b/include/linux/fcntl.h
index 76ce329e656d..1b48d9c9a561 100644
--- a/include/linux/fcntl.h
+++ b/include/linux/fcntl.h
@@ -3,6 +3,12 @@
#include <uapi/linux/fcntl.h>
+/* list of all valid flags for the open/openat flags argument: */
+#define VALID_OPEN_FLAGS \
+ (O_RDONLY | O_WRONLY | O_RDWR | O_CREAT | O_EXCL | O_NOCTTY | O_TRUNC | \
+ O_APPEND | O_NDELAY | O_NONBLOCK | O_NDELAY | __O_SYNC | O_DSYNC | \
+ FASYNC | O_DIRECT | O_LARGEFILE | O_DIRECTORY | O_NOFOLLOW | \
+ O_NOATIME | O_CLOEXEC | O_PATH | __O_TMPFILE)
#ifndef force_o_largefile
#define force_o_largefile() (BITS_PER_LONG != 32)
diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
index 7ee1774edee5..a7789559078b 100644
--- a/include/linux/fsnotify.h
+++ b/include/linux/fsnotify.h
@@ -310,35 +310,4 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
}
}
-#if defined(CONFIG_FSNOTIFY) /* notify helpers */
-
-/*
- * fsnotify_oldname_init - save off the old filename before we change it
- */
-static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
-{
- return kstrdup(name, GFP_KERNEL);
-}
-
-/*
- * fsnotify_oldname_free - free the name we got from fsnotify_oldname_init
- */
-static inline void fsnotify_oldname_free(const unsigned char *old_name)
-{
- kfree(old_name);
-}
-
-#else /* CONFIG_FSNOTIFY */
-
-static inline const char *fsnotify_oldname_init(const unsigned char *name)
-{
- return NULL;
-}
-
-static inline void fsnotify_oldname_free(const unsigned char *old_name)
-{
-}
-
-#endif /* CONFIG_FSNOTIFY */
-
#endif /* _LINUX_FS_NOTIFY_H */
diff --git a/include/linux/ipa.h b/include/linux/ipa.h
index c11a5c4afece..a4b817c5e4fc 100644
--- a/include/linux/ipa.h
+++ b/include/linux/ipa.h
@@ -40,6 +40,14 @@ enum ipa_nat_en_type {
};
/**
+ * enum ipa_ipv6ct_en_type - IPv6CT setting type in IPA end-point
+ */
+enum ipa_ipv6ct_en_type {
+ IPA_BYPASS_IPV6CT,
+ IPA_ENABLE_IPV6CT,
+};
+
+/**
* enum ipa_mode_type - mode setting type in IPA end-point
* @BASIC: basic mode
* @ENABLE_FRAMING_HDLC: not currently supported
@@ -119,6 +127,19 @@ struct ipa_ep_cfg_nat {
};
/**
+ * struct ipa_ep_cfg_conn_track - IPv6 Connection tracking configuration in
+ * IPA end-point
+ * @conn_track_en: Defines speculative conn_track action, means if specific
+ * pipe needs to have UL/DL IPv6 Connection Tracking or Bypass
+ * IPv6 Connection Tracking. 0: Bypass IPv6 Connection Tracking
+ * 1: IPv6 UL/DL Connection Tracking.
+ * Valid for Input Pipes only (IPA consumer)
+ */
+struct ipa_ep_cfg_conn_track {
+ enum ipa_ipv6ct_en_type conn_track_en;
+};
+
+/**
* struct ipa_ep_cfg_hdr - header configuration in IPA end-point
*
* @hdr_len:Header length in bytes to be added/removed. Assuming
@@ -386,7 +407,8 @@ struct ipa_ep_cfg_seq {
/**
* struct ipa_ep_cfg - configuration of IPA end-point
- * @nat: NAT parmeters
+ * @nat: NAT parameters
+ * @conn_track: IPv6CT parameters
* @hdr: Header parameters
* @hdr_ext: Extended header parameters
* @mode: Mode parameters
@@ -400,6 +422,7 @@ struct ipa_ep_cfg_seq {
*/
struct ipa_ep_cfg {
struct ipa_ep_cfg_nat nat;
+ struct ipa_ep_cfg_conn_track conn_track;
struct ipa_ep_cfg_hdr hdr;
struct ipa_ep_cfg_hdr_ext hdr_ext;
struct ipa_ep_cfg_mode mode;
diff --git a/include/linux/msm_audio_ion.h b/include/linux/msm_audio_ion.h
index 0761b880ca88..ff2fd04a3b6c 100644
--- a/include/linux/msm_audio_ion.h
+++ b/include/linux/msm_audio_ion.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2015, 2017 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -17,6 +17,12 @@
#include <sound/pcm.h>
#include <linux/msm_ion.h>
+enum {
+ HLOS_TO_ADSP = 1,
+ ADSP_TO_HLOS,
+};
+
+#define VMID_CP_ADSP_SHARED 33
int msm_audio_ion_alloc(const char *name, struct ion_client **client,
struct ion_handle **handle, size_t bufsz,
@@ -26,6 +32,7 @@ int msm_audio_ion_import(const char *name, struct ion_client **client,
struct ion_handle **handle, int fd,
unsigned long *ionflag, size_t bufsz,
ion_phys_addr_t *paddr, size_t *pa_len, void **vaddr);
+
int msm_audio_ion_free(struct ion_client *client, struct ion_handle *handle);
int msm_audio_ion_mmap(struct audio_buffer *substream,
struct vm_area_struct *vma);
@@ -42,4 +49,7 @@ int msm_audio_ion_import_legacy(const char *name, struct ion_client *client,
int msm_audio_ion_free_legacy(struct ion_client *client,
struct ion_handle *handle);
u32 msm_audio_populate_upper_32_bits(ion_phys_addr_t pa);
+
+int msm_audio_ion_phys_assign(const char *name, int fd, ion_phys_addr_t *paddr,
+ size_t *pa_len, u8 assign_type);
#endif /* _LINUX_MSM_AUDIO_ION_H */
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 05fde31b6dc6..b64825d6ad26 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -785,6 +785,10 @@ int genphy_read_status(struct phy_device *phydev);
int genphy_suspend(struct phy_device *phydev);
int genphy_resume(struct phy_device *phydev);
int genphy_soft_reset(struct phy_device *phydev);
+static inline int genphy_no_soft_reset(struct phy_device *phydev)
+{
+ return 0;
+}
void phy_driver_unregister(struct phy_driver *drv);
void phy_drivers_unregister(struct phy_driver *drv, int n);
int phy_driver_register(struct phy_driver *new_driver);
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index b584e353306d..982b93ccfbe4 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -249,6 +249,12 @@ enum power_supply_property {
POWER_SUPPLY_PROP_HW_CURRENT_MAX,
POWER_SUPPLY_PROP_REAL_TYPE,
POWER_SUPPLY_PROP_PR_SWAP,
+ POWER_SUPPLY_PROP_CC_STEP,
+ POWER_SUPPLY_PROP_CC_STEP_SEL,
+ POWER_SUPPLY_PROP_SW_JEITA_ENABLED,
+ POWER_SUPPLY_PROP_PD_VOLTAGE_MAX,
+ POWER_SUPPLY_PROP_PD_VOLTAGE_MIN,
+ POWER_SUPPLY_PROP_SDP_CURRENT_MAX,
/* Local extensions of type int64_t */
POWER_SUPPLY_PROP_CHARGE_COUNTER_EXT,
/* Properties of type `const char *' */
diff --git a/include/linux/pstore_ram.h b/include/linux/pstore_ram.h
index 45ac5a0d29ee..7097a45dbc25 100644
--- a/include/linux/pstore_ram.h
+++ b/include/linux/pstore_ram.h
@@ -24,6 +24,13 @@
#include <linux/list.h>
#include <linux/types.h>
+/*
+ * Choose whether access to the RAM zone requires locking or not. If a zone
+ * can be written to from different CPUs like with ftrace for example, then
+ * PRZ_FLAG_NO_LOCK is used. For all other cases, locking is required.
+ */
+#define PRZ_FLAG_NO_LOCK BIT(0)
+
struct persistent_ram_buffer;
struct rs_control;
@@ -40,6 +47,8 @@ struct persistent_ram_zone {
void *vaddr;
struct persistent_ram_buffer *buffer;
size_t buffer_size;
+ u32 flags;
+ raw_spinlock_t buffer_lock;
/* ECC correction */
char *par_buffer;
@@ -55,7 +64,7 @@ struct persistent_ram_zone {
struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size,
u32 sig, struct persistent_ram_ecc_info *ecc_info,
- unsigned int memtype);
+ unsigned int memtype, u32 flags);
void persistent_ram_free(struct persistent_ram_zone *prz);
void persistent_ram_zap(struct persistent_ram_zone *prz);
diff --git a/include/linux/qpnp/qpnp-revid.h b/include/linux/qpnp/qpnp-revid.h
index b025df568259..bbc4625a2d39 100644
--- a/include/linux/qpnp/qpnp-revid.h
+++ b/include/linux/qpnp/qpnp-revid.h
@@ -252,6 +252,7 @@ struct pmic_revid_data {
u8 pmic_subtype;
const char *pmic_name;
int fab_id;
+ int tp_rev;
};
#ifdef CONFIG_QPNP_REVID
diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
index dde00defbaa5..f3d45dd42695 100644
--- a/include/linux/seq_file.h
+++ b/include/linux/seq_file.h
@@ -7,13 +7,10 @@
#include <linux/mutex.h>
#include <linux/cpumask.h>
#include <linux/nodemask.h>
+#include <linux/fs.h>
+#include <linux/cred.h>
struct seq_operations;
-struct file;
-struct path;
-struct inode;
-struct dentry;
-struct user_namespace;
struct seq_file {
char *buf;
@@ -27,9 +24,7 @@ struct seq_file {
struct mutex lock;
const struct seq_operations *op;
int poll_event;
-#ifdef CONFIG_USER_NS
- struct user_namespace *user_ns;
-#endif
+ const struct file *file;
void *private;
};
@@ -147,7 +142,7 @@ int seq_release_private(struct inode *, struct file *);
static inline struct user_namespace *seq_user_ns(struct seq_file *seq)
{
#ifdef CONFIG_USER_NS
- return seq->user_ns;
+ return seq->file->f_cred->user_ns;
#else
extern struct user_namespace init_user_ns;
return &init_user_ns;
diff --git a/include/linux/tick.h b/include/linux/tick.h
index 1732697ea419..d1162e9b7a36 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -105,6 +105,7 @@ extern void tick_nohz_idle_enter(void);
extern void tick_nohz_idle_exit(void);
extern void tick_nohz_irq_exit(void);
extern ktime_t tick_nohz_get_sleep_length(void);
+extern unsigned long tick_nohz_get_idle_calls(void);
extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time);
extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time);
#else /* !CONFIG_NO_HZ_COMMON */
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index e888eb9a2eb9..dff7adbc60bb 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -579,9 +579,9 @@ extern void usb_ep0_reinit(struct usb_device *);
((USB_DIR_IN|USB_TYPE_STANDARD|USB_RECIP_INTERFACE)<<8)
#define EndpointRequest \
- ((USB_DIR_IN|USB_TYPE_STANDARD|USB_RECIP_INTERFACE)<<8)
+ ((USB_DIR_IN|USB_TYPE_STANDARD|USB_RECIP_ENDPOINT)<<8)
#define EndpointOutRequest \
- ((USB_DIR_OUT|USB_TYPE_STANDARD|USB_RECIP_INTERFACE)<<8)
+ ((USB_DIR_OUT|USB_TYPE_STANDARD|USB_RECIP_ENDPOINT)<<8)
/* class requests from the USB 2.0 hub spec, table 11-15 */
/* GetBusState and SetHubDescriptor are optional, omitted */
diff --git a/include/linux/usb_bam.h b/include/linux/usb_bam.h
index e65fb12c1410..b5b9edaab783 100644
--- a/include/linux/usb_bam.h
+++ b/include/linux/usb_bam.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -191,7 +191,7 @@ struct usb_bam_pipe_connect {
};
/**
- * struct msm_usb_bam_platform_data: pipe connection information
+ * struct msm_usb_bam_data: pipe connection information
* between USB/HSIC BAM and another BAM. USB/HSIC BAM can be
* either src BAM or dst BAM
* @usb_bam_num_pipes: max number of pipes to use.
@@ -211,7 +211,7 @@ struct usb_bam_pipe_connect {
* can work at in bam2bam mode when connected to SS host.
* @enable_hsusb_bam_on_boot: Enable HSUSB BAM (non-NDP) on bootup itself
*/
-struct msm_usb_bam_platform_data {
+struct msm_usb_bam_data {
u8 max_connections;
int usb_bam_num_pipes;
phys_addr_t usb_bam_fifo_baseaddr;
diff --git a/include/linux/vfio.h b/include/linux/vfio.h
index ddb440975382..34851bf2e2c8 100644
--- a/include/linux/vfio.h
+++ b/include/linux/vfio.h
@@ -85,6 +85,8 @@ extern void vfio_unregister_iommu_driver(
*/
extern struct vfio_group *vfio_group_get_external_user(struct file *filep);
extern void vfio_group_put_external_user(struct vfio_group *group);
+extern bool vfio_external_group_match_file(struct vfio_group *group,
+ struct file *filep);
extern int vfio_external_user_iommu_id(struct vfio_group *group);
extern long vfio_external_check_extension(struct vfio_group *group,
unsigned long arg);
diff --git a/include/linux/wcnss_wlan.h b/include/linux/wcnss_wlan.h
index c93364b861d9..bc838936d2c9 100644
--- a/include/linux/wcnss_wlan.h
+++ b/include/linux/wcnss_wlan.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -74,6 +74,8 @@ enum {
#define HAVE_WCNSS_CAL_DOWNLOAD 1
#define HAVE_CBC_DONE 1
#define HAVE_WCNSS_RX_BUFF_COUNT 1
+#define HAVE_WCNSS_SNOC_HIGH_FREQ_VOTING 1
+#define HAVE_WCNSS_5G_DISABLE 1
#define WLAN_MAC_ADDR_SIZE (6)
#define WLAN_RF_REG_ADDR_START_OFFSET 0x3
#define WLAN_RF_REG_DATA_START_OFFSET 0xf
@@ -132,12 +134,16 @@ void wcnss_riva_dump_pmic_regs(void);
int wcnss_xo_auto_detect_enabled(void);
u32 wcnss_get_wlan_rx_buff_count(void);
int wcnss_wlan_iris_xo_mode(void);
+int wcnss_wlan_dual_band_disabled(void);
void wcnss_flush_work(struct work_struct *work);
void wcnss_flush_delayed_work(struct delayed_work *dwork);
void wcnss_init_work(struct work_struct *work , void *callbackptr);
void wcnss_init_delayed_work(struct delayed_work *dwork , void *callbackptr);
int wcnss_get_iris_name(char *iris_version);
void wcnss_dump_stack(struct task_struct *task);
+void wcnss_snoc_vote(bool clk_chk_en);
+int wcnss_parse_voltage_regulator(struct wcnss_wlan_config *wlan_config,
+ struct device *dev);
#ifdef CONFIG_WCNSS_REGISTER_DUMP_ON_BITE
void wcnss_log_debug_regs_on_bite(void);
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index 814a13d22df6..f9bdfb096579 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -21,6 +21,7 @@ struct route_info {
#include <net/flow.h>
#include <net/ip6_fib.h>
#include <net/sock.h>
+#include <net/lwtunnel.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/route.h>
@@ -209,4 +210,11 @@ static inline struct in6_addr *rt6_nexthop(struct rt6_info *rt,
return daddr;
}
+static inline bool rt6_duplicate_nexthop(struct rt6_info *a, struct rt6_info *b)
+{
+ return a->dst.dev == b->dst.dev &&
+ a->rt6i_idev == b->rt6i_idev &&
+ ipv6_addr_equal(&a->rt6i_gateway, &b->rt6i_gateway) &&
+ !lwtunnel_cmp_encap(a->dst.lwtstate, b->dst.lwtstate);
+}
#endif
diff --git a/include/soc/qcom/scm.h b/include/soc/qcom/scm.h
index af389305207f..f0a3124dae00 100644
--- a/include/soc/qcom/scm.h
+++ b/include/soc/qcom/scm.h
@@ -121,9 +121,9 @@ extern s32 scm_call_atomic5_3(u32 svc, u32 cmd, u32 arg1, u32 arg2, u32 arg3,
extern u32 scm_get_version(void);
extern int scm_is_call_available(u32 svc_id, u32 cmd_id);
-extern int scm_get_feat_version(u32 feat);
+extern int scm_get_feat_version(u32 feat, u64 *scm_ret);
extern bool is_scm_armv8(void);
-extern int scm_restore_sec_cfg(u32 device_id, u32 spare, int *scm_ret);
+extern int scm_restore_sec_cfg(u32 device_id, u32 spare, u64 *scm_ret);
extern u32 scm_io_read(phys_addr_t address);
extern int scm_io_write(phys_addr_t address, u32 val);
extern bool scm_is_secure_device(void);
@@ -205,7 +205,7 @@ static inline int scm_is_call_available(u32 svc_id, u32 cmd_id)
return 0;
}
-static inline int scm_get_feat_version(u32 feat)
+static inline int scm_get_feat_version(u32 feat, u64 *scm_ret)
{
return 0;
}
@@ -215,7 +215,7 @@ static inline bool is_scm_armv8(void)
return true;
}
-static inline int scm_restore_sec_cfg(u32 device_id, u32 spare, int *scm_ret)
+static inline int scm_restore_sec_cfg(u32 device_id, u32 spare, u64 *scm_ret)
{
return 0;
}
diff --git a/include/sound/apr_audio-v2.h b/include/sound/apr_audio-v2.h
index 2680a13721c2..21b30bf7c74c 100644
--- a/include/sound/apr_audio-v2.h
+++ b/include/sound/apr_audio-v2.h
@@ -4012,7 +4012,7 @@ struct asm_stream_pan_ctrl_params {
uint16_t num_input_channels;
uint16_t output_channel_map[8];
uint16_t input_channel_map[8];
- uint16_t gain[64];
+ uint32_t gain[64];
} __packed;
#define ASM_END_POINT_DEVICE_MATRIX 0
diff --git a/include/sound/q6asm-v2.h b/include/sound/q6asm-v2.h
index 177c2f4da32e..2ff0a2b02d4a 100644
--- a/include/sound/q6asm-v2.h
+++ b/include/sound/q6asm-v2.h
@@ -99,7 +99,7 @@
#define SOFT_PAUSE_ENABLE 1
#define SOFT_PAUSE_DISABLE 0
-#define ASM_ACTIVE_STREAMS_ALLOWED 0x8
+#define ASM_ACTIVE_STREAMS_ALLOWED 0x9
/* Control session is used for mapping calibration memory */
#define ASM_CONTROL_SESSION (ASM_ACTIVE_STREAMS_ALLOWED + 1)
@@ -639,7 +639,7 @@ int q6asm_send_audio_effects_params(struct audio_client *ac, char *params,
int q6asm_send_stream_cmd(struct audio_client *ac,
struct msm_adsp_event_data *data);
-int q6asm_send_ion_fd(struct audio_client *ac, int fd);
+int q6asm_audio_map_shm_fd(struct audio_client *ac, int fd);
int q6asm_send_rtic_event_ack(struct audio_client *ac,
void *param, uint32_t params_length);
diff --git a/include/sound/q6core.h b/include/sound/q6core.h
index 1d81bda4b513..4f55880d410f 100644
--- a/include/sound/q6core.h
+++ b/include/sound/q6core.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2016, 2017 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -20,6 +20,8 @@
#define AVCS_CMDRSP_ADSP_EVENT_GET_STATE 0x0001290D
bool q6core_is_adsp_ready(void);
+int q6core_add_remove_pool_pages(phys_addr_t buf_add, uint32_t bufsz,
+ uint32_t mempool_id, bool add_pages);
#define ADSP_CMD_SET_DTS_EAGLE_DATA_ID 0x00012919
#define DTS_EAGLE_LICENSE_ID 0x00028346
@@ -153,4 +155,16 @@ struct avcs_cmd_deregister_topologies {
int32_t core_set_license(uint32_t key, uint32_t module_id);
int32_t core_get_license_status(uint32_t module_id);
+#define ADSP_MEMORY_MAP_HLOS_PHYSPOOL 4
+#define AVCS_CMD_ADD_POOL_PAGES 0x0001292E
+#define AVCS_CMD_REMOVE_POOL_PAGES 0x0001292F
+
+struct avs_mem_assign_region {
+ struct apr_hdr hdr;
+ u32 pool_id;
+ u32 size;
+ u32 addr_lsw;
+ u32 addr_msw;
+} __packed;
+
#endif /* __Q6CORE_H__ */
diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h
index 373d3342002b..e0efe3fcf739 100644
--- a/include/target/iscsi/iscsi_target_core.h
+++ b/include/target/iscsi/iscsi_target_core.h
@@ -64,6 +64,14 @@
#define TA_DEFAULT_FABRIC_PROT_TYPE 0
/* TPG status needs to be enabled to return sendtargets discovery endpoint info */
#define TA_DEFAULT_TPG_ENABLED_SENDTARGETS 1
+/*
+ * Used to control the sending of keys with optional to respond state bit,
+ * as a workaround for non RFC compliant initiators,that do not propose,
+ * nor respond to specific keys required for login to complete.
+ *
+ * See iscsi_check_proposer_for_optional_reply() for more details.
+ */
+#define TA_DEFAULT_LOGIN_KEYS_WORKAROUND 1
#define ISCSI_IOV_DATA_BUFFER 5
@@ -765,6 +773,7 @@ struct iscsi_tpg_attrib {
u8 t10_pi;
u32 fabric_prot_type;
u32 tpg_enabled_sendtargets;
+ u32 login_keys_workaround;
struct iscsi_portal_group *tpg;
};
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 84f7f2139a91..feffec7b489a 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -1698,6 +1698,48 @@ TRACE_EVENT(sched_boost_task,
);
/*
+ * Tracepoint for find_best_target
+ */
+TRACE_EVENT(sched_find_best_target,
+
+ TP_PROTO(struct task_struct *tsk, bool prefer_idle,
+ unsigned long min_util, int start_cpu,
+ int best_idle, int best_active, int target),
+
+ TP_ARGS(tsk, prefer_idle, min_util, start_cpu,
+ best_idle, best_active, target),
+
+ TP_STRUCT__entry(
+ __array( char, comm, TASK_COMM_LEN )
+ __field( pid_t, pid )
+ __field( unsigned long, min_util )
+ __field( bool, prefer_idle )
+ __field( int, start_cpu )
+ __field( int, best_idle )
+ __field( int, best_active )
+ __field( int, target )
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
+ __entry->pid = tsk->pid;
+ __entry->min_util = min_util;
+ __entry->prefer_idle = prefer_idle;
+ __entry->start_cpu = start_cpu;
+ __entry->best_idle = best_idle;
+ __entry->best_active = best_active;
+ __entry->target = target;
+ ),
+
+ TP_printk("pid=%d comm=%s prefer_idle=%d start_cpu=%d "
+ "best_idle=%d best_active=%d target=%d",
+ __entry->pid, __entry->comm,
+ __entry->prefer_idle, __entry->start_cpu,
+ __entry->best_idle, __entry->best_active,
+ __entry->target)
+);
+
+/*
* Tracepoint for accounting sched group energy
*/
TRACE_EVENT(sched_energy_diff,
diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h
index 8b51873e7b08..a852f2a3701f 100644
--- a/include/uapi/drm/msm_drm.h
+++ b/include/uapi/drm/msm_drm.h
@@ -234,7 +234,7 @@ struct drm_msm_gem_submit_cmd {
__u32 size; /* in, cmdstream size */
__u32 pad;
__u32 nr_relocs; /* in, number of submit_reloc's */
- __u64 __user relocs; /* in, ptr to array of submit_reloc's */
+ __u64 relocs; /* in, ptr to array of submit_reloc's */
};
/* Each buffer referenced elsewhere in the cmdstream submit (ie. the
@@ -274,8 +274,8 @@ struct drm_msm_gem_submit {
__u32 fence; /* out */
__u32 nr_bos; /* in, number of submit_bo's */
__u32 nr_cmds; /* in, number of submit_cmd's */
- __u64 __user bos; /* in, ptr to array of submit_bo's */
- __u64 __user cmds; /* in, ptr to array of submit_cmd's */
+ __u64 bos; /* in, ptr to array of submit_bo's */
+ __u64 cmds; /* in, ptr to array of submit_cmd's */
__s32 fence_fd; /* gap for the fence_fd which is upstream */
__u32 queueid; /* in, submitqueue id */
};
diff --git a/include/uapi/linux/android/binder.h b/include/uapi/linux/android/binder.h
index 7668b5791c91..5539933b3491 100644
--- a/include/uapi/linux/android/binder.h
+++ b/include/uapi/linux/android/binder.h
@@ -37,9 +37,56 @@ enum {
BINDER_TYPE_PTR = B_PACK_CHARS('p', 't', '*', B_TYPE_LARGE),
};
-enum {
+/**
+ * enum flat_binder_object_shifts: shift values for flat_binder_object_flags
+ * @FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT: shift for getting scheduler policy.
+ *
+ */
+enum flat_binder_object_shifts {
+ FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT = 9,
+};
+
+/**
+ * enum flat_binder_object_flags - flags for use in flat_binder_object.flags
+ */
+enum flat_binder_object_flags {
+ /**
+ * @FLAT_BINDER_FLAG_PRIORITY_MASK: bit-mask for min scheduler priority
+ *
+ * These bits can be used to set the minimum scheduler priority
+ * at which transactions into this node should run. Valid values
+ * in these bits depend on the scheduler policy encoded in
+ * @FLAT_BINDER_FLAG_SCHED_POLICY_MASK.
+ *
+ * For SCHED_NORMAL/SCHED_BATCH, the valid range is between [-20..19]
+ * For SCHED_FIFO/SCHED_RR, the value can run between [1..99]
+ */
FLAT_BINDER_FLAG_PRIORITY_MASK = 0xff,
+ /**
+ * @FLAT_BINDER_FLAG_ACCEPTS_FDS: whether the node accepts fds.
+ */
FLAT_BINDER_FLAG_ACCEPTS_FDS = 0x100,
+ /**
+ * @FLAT_BINDER_FLAG_SCHED_POLICY_MASK: bit-mask for scheduling policy
+ *
+ * These two bits can be used to set the min scheduling policy at which
+ * transactions on this node should run. These match the UAPI
+ * scheduler policy values, eg:
+ * 00b: SCHED_NORMAL
+ * 01b: SCHED_FIFO
+ * 10b: SCHED_RR
+ * 11b: SCHED_BATCH
+ */
+ FLAT_BINDER_FLAG_SCHED_POLICY_MASK =
+ 3U << FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT,
+
+ /**
+ * @FLAT_BINDER_FLAG_INHERIT_RT: whether the node inherits RT policy
+ *
+ * Only when set, calls into this node will inherit a real-time
+ * scheduling policy from the caller (for synchronous transactions).
+ */
+ FLAT_BINDER_FLAG_INHERIT_RT = 0x800,
};
#ifdef BINDER_IPC_32BIT
@@ -186,6 +233,19 @@ struct binder_version {
#define BINDER_CURRENT_PROTOCOL_VERSION 8
#endif
+/*
+ * Use with BINDER_GET_NODE_DEBUG_INFO, driver reads ptr, writes to all fields.
+ * Set ptr to NULL for the first call to get the info for the first node, and
+ * then repeat the call passing the previously returned value to get the next
+ * nodes. ptr will be 0 when there are no more nodes.
+ */
+struct binder_node_debug_info {
+ binder_uintptr_t ptr;
+ binder_uintptr_t cookie;
+ __u32 has_strong_ref;
+ __u32 has_weak_ref;
+};
+
#define BINDER_WRITE_READ _IOWR('b', 1, struct binder_write_read)
#define BINDER_SET_IDLE_TIMEOUT _IOW('b', 3, __s64)
#define BINDER_SET_MAX_THREADS _IOW('b', 5, __u32)
@@ -193,6 +253,7 @@ struct binder_version {
#define BINDER_SET_CONTEXT_MGR _IOW('b', 7, __s32)
#define BINDER_THREAD_EXIT _IOW('b', 8, __s32)
#define BINDER_VERSION _IOWR('b', 9, struct binder_version)
+#define BINDER_GET_NODE_DEBUG_INFO _IOWR('b', 11, struct binder_node_debug_info)
/*
* NOTE: Two special error codes you should check for when calling
diff --git a/include/uapi/linux/ipa_qmi_service_v01.h b/include/uapi/linux/ipa_qmi_service_v01.h
index 60867630e1a1..dc46ee0f29a2 100644
--- a/include/uapi/linux/ipa_qmi_service_v01.h
+++ b/include/uapi/linux/ipa_qmi_service_v01.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -47,6 +47,12 @@
#define QMI_IPA_MAX_FILTERS_EX_V01 128
#define QMI_IPA_MAX_PIPES_V01 20
#define QMI_IPA_MAX_APN_V01 8
+#define QMI_IPA_MAX_PER_CLIENTS_V01 64
+/* Currently max we can use is only 1. But for scalability purpose
+ * we are having max value as 8.
+ */
+#define QMI_IPA_MAX_CLIENT_DST_PIPES_V01 8
+#define QMI_IPA_MAX_UL_FIREWALL_RULES_V01 64
#define IPA_INT_MAX ((int)(~0U>>1))
#define IPA_INT_MIN (-IPA_INT_MAX - 1)
@@ -984,6 +990,16 @@ struct ipa_fltr_installed_notif_req_msg_v01 {
* failure, the Rule Ids in this list must be set to a reserved
* index (255).
*/
+
+ /* Optional */
+ /* List of destination pipe IDs. */
+ uint8_t dst_pipe_id_valid;
+ /* Must be set to true if dst_pipe_id is being passed. */
+ uint32_t dst_pipe_id_len;
+ /* Must be set to # of elements in dst_pipe_id. */
+ uint32_t dst_pipe_id[QMI_IPA_MAX_CLIENT_DST_PIPES_V01];
+ /* Provides the list of destination pipe IDs for a source pipe. */
+
}; /* Message */
/* Response Message; This is the message that is exchanged between the
@@ -1622,6 +1638,273 @@ struct ipa_install_fltr_rule_resp_ex_msg_v01 {
*/
}; /* Message */
+/*
+ * Request Message; Requests the modem IPA driver to enable or
+ * disable collection of per client statistics.
+ */
+struct ipa_enable_per_client_stats_req_msg_v01 {
+
+ /* Mandatory */
+ /* Collect statistics per client; */
+ uint8_t enable_per_client_stats;
+ /*
+ * Indicates whether to start or stop collecting
+ * per client statistics.
+ */
+}; /* Message */
+
+/*
+ * Response Message; Requests the modem IPA driver to enable or disable
+ * collection of per client statistics.
+ */
+struct ipa_enable_per_client_stats_resp_msg_v01 {
+
+ /* Mandatory */
+ /* Result Code */
+ struct ipa_qmi_response_type_v01 resp;
+ /* Standard response type. */
+}; /* Message */
+
+struct ipa_per_client_stats_info_type_v01 {
+
+ uint32_t client_id;
+ /*
+ * Id of the client on APPS processor side for which Modem processor
+ * needs to send uplink/downlink statistics.
+ */
+
+ uint32_t src_pipe_id;
+ /*
+ * IPA consumer pipe on which client on APPS side sent uplink
+ * data to modem.
+ */
+
+ uint64_t num_ul_ipv4_bytes;
+ /*
+ * Accumulated number of uplink IPv4 bytes for a client.
+ */
+
+ uint64_t num_ul_ipv6_bytes;
+ /*
+ * Accumulated number of uplink IPv6 bytes for a client.
+ */
+
+ uint64_t num_dl_ipv4_bytes;
+ /*
+ * Accumulated number of downlink IPv4 bytes for a client.
+ */
+
+ uint64_t num_dl_ipv6_bytes;
+ /*
+ * Accumulated number of downlink IPv6 byes for a client.
+ */
+
+
+ uint32_t num_ul_ipv4_pkts;
+ /*
+ * Accumulated number of uplink IPv4 packets for a client.
+ */
+
+ uint32_t num_ul_ipv6_pkts;
+ /*
+ * Accumulated number of uplink IPv6 packets for a client.
+ */
+
+ uint32_t num_dl_ipv4_pkts;
+ /*
+ * Accumulated number of downlink IPv4 packets for a client.
+ */
+
+ uint32_t num_dl_ipv6_pkts;
+ /*
+ * Accumulated number of downlink IPv6 packets for a client.
+ */
+}; /* Type */
+
+/*
+ * Request Message; Requests the modem IPA driver to provide statistics
+ * for a givenclient.
+ */
+struct ipa_get_stats_per_client_req_msg_v01 {
+
+ /* Mandatory */
+ /* Client id */
+ uint32_t client_id;
+ /*
+ * Id of the client on APPS processor side for which Modem processor
+ * needs to send uplink/downlink statistics. if client id is specified
+ * as 0xffffffff, then Q6 will send the stats for all the clients of
+ * the specified source pipe.
+ */
+
+ /* Mandatory */
+ /* Source pipe id */
+ uint32_t src_pipe_id;
+ /*
+ * IPA consumer pipe on which client on APPS side sent uplink
+ * data to modem. In future, this implementation can be extended
+ * to provide 0xffffffff as the source pipe id, where Q6 will send
+ * the stats of all the clients across all different tethered-pipes.
+ */
+
+ /* Optional */
+ /* Reset client statistics. */
+ uint8_t reset_stats_valid;
+ /* Must be set to true if reset_stats is being passed. */
+ uint8_t reset_stats;
+ /*
+ * Option to reset the statistics currently collected by modem for this
+ * particular client.
+ */
+}; /* Message */
+
+/*
+ * Response Message; Requests the modem IPA driver to provide statistics
+ * for a given client.
+ */
+struct ipa_get_stats_per_client_resp_msg_v01 {
+
+ /* Mandatory */
+ /* Result Code */
+ struct ipa_qmi_response_type_v01 resp;
+ /* Standard response type. */
+
+ /* Optional */
+ /* Per clients Statistics List */
+ uint8_t per_client_stats_list_valid;
+ /* Must be set to true if per_client_stats_list is being passed. */
+ uint32_t per_client_stats_list_len;
+ /* Must be set to # of elements in per_client_stats_list. */
+ struct ipa_per_client_stats_info_type_v01
+ per_client_stats_list[QMI_IPA_MAX_PER_CLIENTS_V01];
+ /*
+ * List of all per client statistics that are retrieved.
+ */
+}; /* Message */
+
+struct ipa_ul_firewall_rule_type_v01 {
+
+ enum ipa_ip_type_enum_v01 ip_type;
+ /*
+ * IP type for which this rule is applicable.
+ * The driver must identify the filter table (v6 or v4), and this
+ * field is essential for that. Values:
+ * - QMI_IPA_IP_TYPE_INVALID (0) -- Invalid IP type identifier
+ * - QMI_IPA_IP_TYPE_V4 (1) -- IPv4 type
+ * - QMI_IPA_IP_TYPE_V6 (2) -- IPv6 type
+ */
+
+ struct ipa_filter_rule_type_v01 filter_rule;
+ /*
+ * Rules in the filter specification. These rules are the
+ * ones that are matched against fields in the packet.
+ * Currently we only send IPv6 whitelist rules to Q6.
+ */
+}; /* Type */
+
+/*
+ * Request Message; Requestes remote IPA driver to install uplink
+ * firewall rules.
+ */
+struct ipa_configure_ul_firewall_rules_req_msg_v01 {
+
+ /* Optional */
+ /* Uplink Firewall Specification */
+ uint32_t firewall_rules_list_len;
+ /* Must be set to # of elements in firewall_rules_list. */
+ struct ipa_ul_firewall_rule_type_v01
+ firewall_rules_list[QMI_IPA_MAX_UL_FIREWALL_RULES_V01];
+ /*
+ * List of uplink firewall specifications of filters that must be
+ * installed.
+ */
+
+ uint32_t mux_id;
+ /*
+ * QMAP Mux ID. As a part of the QMAP protocol,
+ * several data calls may be multiplexed over the same physical
+ * transport channel. This identifier is used to identify one
+ * such data call. The maximum value for this identifier is 255.
+ */
+
+ /* Optional */
+ uint8_t disable_valid;
+ /* Must be set to true if enable is being passed. */
+ uint8_t disable;
+ /*
+ * Indicates whether uplink firewall needs to be enabled or disabled.
+ */
+
+ /* Optional */
+ uint8_t are_blacklist_filters_valid;
+ /* Must be set to true if are_blacklist_filters is being passed. */
+ uint8_t are_blacklist_filters;
+ /*
+ * Indicates whether the filters received as part of this message are
+ * blacklist filters. i.e. drop uplink packets matching these rules.
+ */
+}; /* Message */
+
+/*
+ * Response Message; Requestes remote IPA driver to install
+ * uplink firewall rules.
+ */
+struct ipa_configure_ul_firewall_rules_resp_msg_v01 {
+
+ /* Mandatory */
+ /* Result Code */
+ struct ipa_qmi_response_type_v01 resp;
+ /*
+ * Standard response type.
+ * Standard response type. Contains the following data members:
+ * qmi_result_type -- QMI_RESULT_SUCCESS or QMI_RESULT_FAILURE
+ * qmi_error_type -- Error code. Possible error code values are
+ * described in the error codes section of each message definition.
+ */
+}; /* Message */
+
+enum ipa_ul_firewall_status_enum_v01 {
+ IPA_UL_FIREWALL_STATUS_ENUM_MIN_ENUM_VAL_V01 = -2147483647,
+ /* To force a 32 bit signed enum. Do not change or use*/
+ QMI_IPA_UL_FIREWALL_STATUS_SUCCESS_V01 = 0,
+ /* Indicates that the uplink firewall rules
+ * are configured successfully.
+ */
+ QMI_IPA_UL_FIREWALL_STATUS_FAILURE_V01 = 1,
+ /* Indicates that the uplink firewall rules
+ * are not configured successfully.
+ */
+ IPA_UL_FIREWALL_STATUS_ENUM_MAX_ENUM_VAL_V01 = 2147483647
+ /* To force a 32 bit signed enum. Do not change or use*/
+};
+
+struct ipa_ul_firewall_config_result_type_v01 {
+
+ enum ipa_ul_firewall_status_enum_v01 is_success;
+ /*
+ * Indicates whether the uplink firewall rules are configured
+ * successfully.
+ */
+
+ uint32_t mux_id;
+ /*
+ * QMAP Mux ID. As a part of the QMAP protocol,
+ * several data calls may be multiplexed over the same physical
+ * transport channel. This identifier is used to identify one
+ * such data call. The maximum value for this identifier is 255.
+ */
+};
+
+/*
+ * Indication Message; Requestes remote IPA driver to install
+ * uplink firewall rules.
+ */
+struct ipa_configure_ul_firewall_rules_ind_msg_v01 {
+
+ struct ipa_ul_firewall_config_result_type_v01 result;
+}; /* Message */
+
+
/*Service Message Definition*/
#define QMI_IPA_INDICATION_REGISTER_REQ_V01 0x0020
#define QMI_IPA_INDICATION_REGISTER_RESP_V01 0x0020
@@ -1655,6 +1938,13 @@ struct ipa_install_fltr_rule_resp_ex_msg_v01 {
#define QMI_IPA_INIT_MODEM_DRIVER_CMPLT_RESP_V01 0x0035
#define QMI_IPA_INSTALL_FILTER_RULE_EX_REQ_V01 0x0037
#define QMI_IPA_INSTALL_FILTER_RULE_EX_RESP_V01 0x0037
+#define QMI_IPA_ENABLE_PER_CLIENT_STATS_REQ_V01 0x0038
+#define QMI_IPA_ENABLE_PER_CLIENT_STATS_RESP_V01 0x0038
+#define QMI_IPA_GET_STATS_PER_CLIENT_REQ_V01 0x0039
+#define QMI_IPA_GET_STATS_PER_CLIENT_RESP_V01 0x0039
+#define QMI_IPA_INSTALL_UL_FIREWALL_RULES_REQ_V01 0x003A
+#define QMI_IPA_INSTALL_UL_FIREWALL_RULES_RESP_V01 0x003A
+#define QMI_IPA_INSTALL_UL_FIREWALL_RULES_IND_V01 0x003A
/* add for max length*/
#define QMI_IPA_INIT_MODEM_DRIVER_REQ_MAX_MSG_LEN_V01 134
@@ -1663,7 +1953,7 @@ struct ipa_install_fltr_rule_resp_ex_msg_v01 {
#define QMI_IPA_INDICATION_REGISTER_RESP_MAX_MSG_LEN_V01 7
#define QMI_IPA_INSTALL_FILTER_RULE_REQ_MAX_MSG_LEN_V01 22369
#define QMI_IPA_INSTALL_FILTER_RULE_RESP_MAX_MSG_LEN_V01 783
-#define QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_MAX_MSG_LEN_V01 834
+#define QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_MAX_MSG_LEN_V01 870
#define QMI_IPA_FILTER_INSTALLED_NOTIF_RESP_MAX_MSG_LEN_V01 7
#define QMI_IPA_MASTER_DRIVER_INIT_COMPLETE_IND_MAX_MSG_LEN_V01 7
#define QMI_IPA_DATA_USAGE_QUOTA_REACHED_IND_MAX_MSG_LEN_V01 15
@@ -1696,6 +1986,15 @@ struct ipa_install_fltr_rule_resp_ex_msg_v01 {
#define QMI_IPA_INSTALL_FILTER_RULE_EX_REQ_MAX_MSG_LEN_V01 22685
#define QMI_IPA_INSTALL_FILTER_RULE_EX_RESP_MAX_MSG_LEN_V01 523
+#define QMI_IPA_ENABLE_PER_CLIENT_STATS_REQ_MAX_MSG_LEN_V01 4
+#define QMI_IPA_ENABLE_PER_CLIENT_STATS_RESP_MAX_MSG_LEN_V01 7
+
+#define QMI_IPA_GET_STATS_PER_CLIENT_REQ_MAX_MSG_LEN_V01 18
+#define QMI_IPA_GET_STATS_PER_CLIENT_RESP_MAX_MSG_LEN_V01 3595
+
+#define QMI_IPA_INSTALL_UL_FIREWALL_RULES_REQ_MAX_MSG_LEN_V01 9875
+#define QMI_IPA_INSTALL_UL_FIREWALL_RULES_RESP_MAX_MSG_LEN_V01 7
+#define QMI_IPA_INSTALL_UL_FIREWALL_RULES_IND_MAX_MSG_LEN_V01 11
/* Service Object Accessor */
#endif/* IPA_QMI_SERVICE_V01_H */
diff --git a/include/uapi/sound/compress_offload.h b/include/uapi/sound/compress_offload.h
index 866ec3d2af69..7845fdd556fa 100644
--- a/include/uapi/sound/compress_offload.h
+++ b/include/uapi/sound/compress_offload.h
@@ -138,6 +138,11 @@ struct snd_compr_audio_info {
#define SNDRV_COMPRESS_CLK_REC_MODE_NONE 0
#define SNDRV_COMPRESS_CLK_REC_MODE_AUTO 1
+enum sndrv_compress_latency_mode {
+ SNDRV_COMPRESS_LEGACY_LATENCY_MODE = 0,
+ SNDRV_COMPRESS_LOW_LATENCY_MODE = 1,
+};
+
/**
* enum sndrv_compress_encoder
* @SNDRV_COMPRESS_ENCODER_PADDING: no of samples appended by the encoder at the
@@ -164,6 +169,7 @@ enum sndrv_compress_encoder {
SNDRV_COMPRESS_START_DELAY = 9,
SNDRV_COMPRESS_ENABLE_ADJUST_SESSION_CLOCK = 10,
SNDRV_COMPRESS_ADJUST_SESSION_CLOCK = 11,
+ SNDRV_COMPRESS_LATENCY_MODE = 12,
};
#define SNDRV_COMPRESS_PATH_DELAY SNDRV_COMPRESS_PATH_DELAY
@@ -174,6 +180,7 @@ enum sndrv_compress_encoder {
#define SNDRV_COMPRESS_ENABLE_ADJUST_SESSION_CLOCK \
SNDRV_COMPRESS_ENABLE_ADJUST_SESSION_CLOCK
#define SNDRV_COMPRESS_ADJUST_SESSION_CLOCK SNDRV_COMPRESS_ADJUST_SESSION_CLOCK
+#define SNDRV_COMPRESS_LATENCY_MODE SNDRV_COMPRESS_LATENCY_MODE
/**
* struct snd_compr_metadata - compressed stream metadata
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index 25b7a678f9ef..46436543ad28 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -1251,8 +1251,10 @@ retry:
timeo = MAX_SCHEDULE_TIMEOUT;
ret = netlink_attachskb(sock, nc, &timeo, NULL);
- if (ret == 1)
+ if (ret == 1) {
+ sock = NULL;
goto retry;
+ }
if (ret) {
sock = NULL;
nc = NULL;
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 85de5094b936..c97bce6a0e0e 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -765,6 +765,11 @@ static int check_xadd(struct verifier_env *env, struct bpf_insn *insn)
if (err)
return err;
+ if (is_pointer_value(env, insn->src_reg)) {
+ verbose("R%d leaks addr into mem\n", insn->src_reg);
+ return -EACCES;
+ }
+
/* check whether atomic_add can read the memory */
err = check_mem_access(env, insn->dst_reg, insn->off,
BPF_SIZE(insn->code), BPF_READ, -1);
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 7fee87daac56..87f5d841f796 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -1693,7 +1693,33 @@ static int __perf_remove_from_context(void *info)
return 0;
}
-/*
+
+#ifdef CONFIG_SMP
+static void perf_retry_remove(struct perf_event *event,
+ struct remove_event *rep)
+{
+ int up_ret;
+ /*
+ * CPU was offline. Bring it online so we can
+ * gracefully exit a perf context.
+ */
+ up_ret = cpu_up(event->cpu);
+ if (!up_ret)
+ /* Try the remove call once again. */
+ cpu_function_call(event->cpu, __perf_remove_from_context,
+ rep);
+ else
+ pr_err("Failed to bring up CPU: %d, ret: %d\n",
+ event->cpu, up_ret);
+}
+#else
+static void perf_retry_remove(struct perf_event *event,
+ struct remove_event *rep)
+{
+}
+#endif
+
+ /*
* Remove the event from a task's (or a CPU's) list of events.
*
* CPU events are removed with a smp call. For task events we only
@@ -1728,6 +1754,9 @@ static void __ref perf_remove_from_context(struct perf_event *event,
*/
ret = cpu_function_call(event->cpu, __perf_remove_from_context,
&re);
+ if (ret == -ENXIO)
+ perf_retry_remove(event, &re);
+
return;
}
@@ -3408,22 +3437,27 @@ u64 perf_event_read_local(struct perf_event *event)
static int perf_event_read(struct perf_event *event, bool group)
{
- int ret = 0;
+ int event_cpu, ret = 0;
/*
* If event is enabled and currently active on a CPU, update the
* value in the event structure:
*/
+ event_cpu = READ_ONCE(event->oncpu);
+
if (event->state == PERF_EVENT_STATE_ACTIVE &&
- !cpu_isolated(event->oncpu)) {
+ !cpu_isolated(event_cpu)) {
struct perf_read_data data = {
.event = event,
.group = group,
.ret = 0,
};
+
+ if ((unsigned int)event_cpu >= nr_cpu_ids)
+ return 0;
if (!event->attr.exclude_idle ||
- !per_cpu(is_idle, event->oncpu)) {
- smp_call_function_single(event->oncpu,
+ !per_cpu(is_idle, event_cpu)) {
+ smp_call_function_single(event_cpu,
__perf_event_read, &data, 1);
ret = data.ret;
}
@@ -6566,21 +6600,6 @@ static void perf_log_itrace_start(struct perf_event *event)
perf_output_end(&handle);
}
-static bool sample_is_allowed(struct perf_event *event, struct pt_regs *regs)
-{
- /*
- * Due to interrupt latency (AKA "skid"), we may enter the
- * kernel before taking an overflow, even if the PMU is only
- * counting user events.
- * To avoid leaking information to userspace, we must always
- * reject kernel samples when exclude_kernel is set.
- */
- if (event->attr.exclude_kernel && !user_mode(regs))
- return false;
-
- return true;
-}
-
/*
* Generic event overflow handling, sampling.
*/
@@ -6628,12 +6647,6 @@ static int __perf_event_overflow(struct perf_event *event,
}
/*
- * For security, drop the skid kernel samples if necessary.
- */
- if (!sample_is_allowed(event, regs))
- return ret;
-
- /*
* XXX event_limit might not quite work as expected on inherited
* events
*/
@@ -7109,6 +7122,8 @@ static struct pmu perf_swevent = {
.start = perf_swevent_start,
.stop = perf_swevent_stop,
.read = perf_swevent_read,
+
+ .events_across_hotplug = 1,
};
#ifdef CONFIG_EVENT_TRACING
@@ -7230,6 +7245,8 @@ static struct pmu perf_tracepoint = {
.start = perf_swevent_start,
.stop = perf_swevent_stop,
.read = perf_swevent_read,
+
+ .events_across_hotplug = 1,
};
static inline void perf_tp_register(void)
@@ -7517,6 +7534,8 @@ static struct pmu perf_cpu_clock = {
.start = cpu_clock_event_start,
.stop = cpu_clock_event_stop,
.read = cpu_clock_event_read,
+
+ .events_across_hotplug = 1,
};
/*
@@ -7598,6 +7617,8 @@ static struct pmu perf_task_clock = {
.start = task_clock_event_start,
.stop = task_clock_event_stop,
.read = task_clock_event_read,
+
+ .events_across_hotplug = 1,
};
static void perf_pmu_nop_void(struct pmu *pmu)
diff --git a/kernel/events/hw_breakpoint.c b/kernel/events/hw_breakpoint.c
index 92ce5f4ccc26..7da5b674d16e 100644
--- a/kernel/events/hw_breakpoint.c
+++ b/kernel/events/hw_breakpoint.c
@@ -614,6 +614,8 @@ static struct pmu perf_breakpoint = {
.start = hw_breakpoint_start,
.stop = hw_breakpoint_stop,
.read = hw_breakpoint_pmu_read,
+
+ .events_across_hotplug = 1,
};
int __init init_hw_breakpoint(void)
diff --git a/kernel/extable.c b/kernel/extable.c
index e820ccee9846..4f06fc34313f 100644
--- a/kernel/extable.c
+++ b/kernel/extable.c
@@ -66,7 +66,7 @@ static inline int init_kernel_text(unsigned long addr)
return 0;
}
-int core_kernel_text(unsigned long addr)
+int notrace core_kernel_text(unsigned long addr)
{
if (addr >= (unsigned long)_stext &&
addr < (unsigned long)_etext)
diff --git a/kernel/fork.c b/kernel/fork.c
index 246b8a57a32d..fef4df444f47 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -832,8 +832,7 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
mm = get_task_mm(task);
if (mm && mm != current->mm &&
- !ptrace_may_access(task, mode) &&
- !capable(CAP_SYS_RESOURCE)) {
+ !ptrace_may_access(task, mode)) {
mmput(mm);
mm = ERR_PTR(-EACCES);
}
diff --git a/kernel/resource.c b/kernel/resource.c
index 4c9835c09dcd..c09d484f7b5f 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -105,16 +105,25 @@ static int r_show(struct seq_file *m, void *v)
{
struct resource *root = m->private;
struct resource *r = v, *p;
+ unsigned long long start, end;
int width = root->end < 0x10000 ? 4 : 8;
int depth;
for (depth = 0, p = r; depth < MAX_IORES_LEVEL; depth++, p = p->parent)
if (p->parent == root)
break;
+
+ if (file_ns_capable(m->file, &init_user_ns, CAP_SYS_ADMIN)) {
+ start = r->start;
+ end = r->end;
+ } else {
+ start = end = 0;
+ }
+
seq_printf(m, "%*s%0*llx-%0*llx : %s\n",
depth * 2, "",
- width, (unsigned long long) r->start,
- width, (unsigned long long) r->end,
+ width, start,
+ width, end,
r->name ? r->name : "<BAD>");
return 0;
}
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 0071785e698b..4ecca604e64b 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -6924,6 +6924,9 @@ enum s_alloc {
* Build an iteration mask that can exclude certain CPUs from the upwards
* domain traversal.
*
+ * Only CPUs that can arrive at this group should be considered to continue
+ * balancing.
+ *
* Asymmetric node setups can result in situations where the domain tree is of
* unequal depth, make sure to skip domains that already cover the entire
* range.
@@ -6935,18 +6938,31 @@ enum s_alloc {
*/
static void build_group_mask(struct sched_domain *sd, struct sched_group *sg)
{
- const struct cpumask *span = sched_domain_span(sd);
+ const struct cpumask *sg_span = sched_group_cpus(sg);
struct sd_data *sdd = sd->private;
struct sched_domain *sibling;
int i;
- for_each_cpu(i, span) {
+ for_each_cpu(i, sg_span) {
sibling = *per_cpu_ptr(sdd->sd, i);
- if (!cpumask_test_cpu(i, sched_domain_span(sibling)))
+
+ /*
+ * Can happen in the asymmetric case, where these siblings are
+ * unused. The mask will not be empty because those CPUs that
+ * do have the top domain _should_ span the domain.
+ */
+ if (!sibling->child)
+ continue;
+
+ /* If we would not end up here, we can't continue from here */
+ if (!cpumask_equal(sg_span, sched_domain_span(sibling->child)))
continue;
cpumask_set_cpu(i, sched_group_mask(sg));
}
+
+ /* We must not have empty masks here */
+ WARN_ON_ONCE(cpumask_empty(sched_group_mask(sg)));
}
/*
@@ -9216,11 +9232,20 @@ cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
if (IS_ERR(tg))
return ERR_PTR(-ENOMEM);
- sched_online_group(tg, parent);
-
return &tg->css;
}
+/* Expose task group only after completing cgroup initialization */
+static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
+{
+ struct task_group *tg = css_tg(css);
+ struct task_group *parent = css_tg(css->parent);
+
+ if (parent)
+ sched_online_group(tg, parent);
+ return 0;
+}
+
static void cpu_cgroup_css_released(struct cgroup_subsys_state *css)
{
struct task_group *tg = css_tg(css);
@@ -9602,6 +9627,7 @@ static struct cftype cpu_files[] = {
struct cgroup_subsys cpu_cgrp_subsys = {
.css_alloc = cpu_cgroup_css_alloc,
+ .css_online = cpu_cgroup_css_online,
.css_released = cpu_cgroup_css_released,
.css_free = cpu_cgroup_css_free,
.fork = cpu_cgroup_fork,
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index 75bfbb336722..e12309c1b07b 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -47,6 +47,7 @@ struct sugov_policy {
s64 up_rate_delay_ns;
s64 down_rate_delay_ns;
unsigned int next_freq;
+ unsigned int cached_raw_freq;
/* The next fields are only needed if fast switch cannot be used. */
struct irq_work irq_work;
@@ -63,7 +64,6 @@ struct sugov_cpu {
struct update_util_data update_util;
struct sugov_policy *sg_policy;
- unsigned int cached_raw_freq;
unsigned long iowait_boost;
unsigned long iowait_boost_max;
u64 last_update;
@@ -72,6 +72,11 @@ struct sugov_cpu {
unsigned long util;
unsigned long max;
unsigned int flags;
+
+ /* The field below is for single-CPU policies only. */
+#ifdef CONFIG_NO_HZ_COMMON
+ unsigned long saved_idle_calls;
+#endif
};
static DEFINE_PER_CPU(struct sugov_cpu, sugov_cpu);
@@ -127,22 +132,20 @@ static void sugov_update_commit(struct sugov_policy *sg_policy, u64 time,
if (sugov_up_down_rate_limit(sg_policy, time, next_freq))
return;
+ if (sg_policy->next_freq == next_freq)
+ return;
+
+ sg_policy->next_freq = next_freq;
+ sg_policy->last_freq_update_time = time;
+
if (policy->fast_switch_enabled) {
- if (sg_policy->next_freq == next_freq) {
- trace_cpu_frequency(policy->cur, smp_processor_id());
- return;
- }
- sg_policy->next_freq = next_freq;
- sg_policy->last_freq_update_time = time;
next_freq = cpufreq_driver_fast_switch(policy, next_freq);
if (next_freq == CPUFREQ_ENTRY_INVALID)
return;
policy->cur = next_freq;
trace_cpu_frequency(next_freq, smp_processor_id());
- } else if (sg_policy->next_freq != next_freq) {
- sg_policy->next_freq = next_freq;
- sg_policy->last_freq_update_time = time;
+ } else {
sg_policy->work_in_progress = true;
irq_work_queue(&sg_policy->irq_work);
}
@@ -150,7 +153,7 @@ static void sugov_update_commit(struct sugov_policy *sg_policy, u64 time,
/**
* get_next_freq - Compute a new frequency for a given cpufreq policy.
- * @sg_cpu: schedutil cpu object to compute the new frequency for.
+ * @sg_policy: schedutil policy object to compute the new frequency for.
* @util: Current CPU utilization.
* @max: CPU capacity.
*
@@ -170,19 +173,18 @@ static void sugov_update_commit(struct sugov_policy *sg_policy, u64 time,
* next_freq (as calculated above) is returned, subject to policy min/max and
* cpufreq driver limitations.
*/
-static unsigned int get_next_freq(struct sugov_cpu *sg_cpu, unsigned long util,
- unsigned long max)
+static unsigned int get_next_freq(struct sugov_policy *sg_policy,
+ unsigned long util, unsigned long max)
{
- struct sugov_policy *sg_policy = sg_cpu->sg_policy;
struct cpufreq_policy *policy = sg_policy->policy;
unsigned int freq = arch_scale_freq_invariant() ?
policy->cpuinfo.max_freq : policy->cur;
freq = (freq + (freq >> 2)) * util / max;
- if (freq == sg_cpu->cached_raw_freq && sg_policy->next_freq != UINT_MAX)
+ if (freq == sg_policy->cached_raw_freq && sg_policy->next_freq != UINT_MAX)
return sg_policy->next_freq;
- sg_cpu->cached_raw_freq = freq;
+ sg_policy->cached_raw_freq = freq;
return cpufreq_driver_resolve_freq(policy, freq);
}
@@ -248,6 +250,19 @@ static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, unsigned long *util,
sg_cpu->iowait_boost >>= 1;
}
+#ifdef CONFIG_NO_HZ_COMMON
+static bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu)
+{
+ unsigned long idle_calls = tick_nohz_get_idle_calls();
+ bool ret = idle_calls == sg_cpu->saved_idle_calls;
+
+ sg_cpu->saved_idle_calls = idle_calls;
+ return ret;
+}
+#else
+static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; }
+#endif /* CONFIG_NO_HZ_COMMON */
+
static void sugov_update_single(struct update_util_data *hook, u64 time,
unsigned int flags)
{
@@ -256,6 +271,7 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
struct cpufreq_policy *policy = sg_policy->policy;
unsigned long util, max;
unsigned int next_f;
+ bool busy;
sugov_set_iowait_boost(sg_cpu, time, flags);
sg_cpu->last_update = time;
@@ -263,40 +279,37 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
if (!sugov_should_update_freq(sg_policy, time))
return;
+ busy = sugov_cpu_is_busy(sg_cpu);
+
if (flags & SCHED_CPUFREQ_DL) {
next_f = policy->cpuinfo.max_freq;
} else {
sugov_get_util(&util, &max, time);
sugov_iowait_boost(sg_cpu, &util, &max);
- next_f = get_next_freq(sg_cpu, util, max);
+ next_f = get_next_freq(sg_policy, util, max);
+ /*
+ * Do not reduce the frequency if the CPU has not been idle
+ * recently, as the reduction is likely to be premature then.
+ */
+ if (busy && next_f < sg_policy->next_freq)
+ next_f = sg_policy->next_freq;
}
sugov_update_commit(sg_policy, time, next_f);
}
-static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu,
- unsigned long util, unsigned long max,
- unsigned int flags)
+static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu)
{
struct sugov_policy *sg_policy = sg_cpu->sg_policy;
struct cpufreq_policy *policy = sg_policy->policy;
- unsigned int max_f = policy->cpuinfo.max_freq;
u64 last_freq_update_time = sg_policy->last_freq_update_time;
+ unsigned long util = 0, max = 1;
unsigned int j;
- if (flags & SCHED_CPUFREQ_DL)
- return max_f;
-
- sugov_iowait_boost(sg_cpu, &util, &max);
-
for_each_cpu(j, policy->cpus) {
- struct sugov_cpu *j_sg_cpu;
+ struct sugov_cpu *j_sg_cpu = &per_cpu(sugov_cpu, j);
unsigned long j_util, j_max;
s64 delta_ns;
- if (j == smp_processor_id())
- continue;
-
- j_sg_cpu = &per_cpu(sugov_cpu, j);
/*
* If the CPU utilization was last updated before the previous
* frequency update and the time elapsed between the last update
@@ -310,7 +323,7 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu,
continue;
}
if (j_sg_cpu->flags & SCHED_CPUFREQ_DL)
- return max_f;
+ return policy->cpuinfo.max_freq;
j_util = j_sg_cpu->util;
j_max = j_sg_cpu->max;
@@ -322,7 +335,7 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu,
sugov_iowait_boost(j_sg_cpu, &util, &max);
}
- return get_next_freq(sg_cpu, util, max);
+ return get_next_freq(sg_policy, util, max);
}
static void sugov_update_shared(struct update_util_data *hook, u64 time,
@@ -345,7 +358,11 @@ static void sugov_update_shared(struct update_util_data *hook, u64 time,
sg_cpu->last_update = time;
if (sugov_should_update_freq(sg_policy, time)) {
- next_f = sugov_next_freq_shared(sg_cpu, util, max, flags);
+ if (flags & SCHED_CPUFREQ_DL)
+ next_f = sg_policy->policy->cpuinfo.max_freq;
+ else
+ next_f = sugov_next_freq_shared(sg_cpu);
+
sugov_update_commit(sg_policy, time, next_f);
}
@@ -371,15 +388,15 @@ static void sugov_irq_work(struct irq_work *irq_work)
sg_policy = container_of(irq_work, struct sugov_policy, irq_work);
/*
- * For Real Time and Deadline tasks, schedutil governor shoots the
- * frequency to maximum. And special care must be taken to ensure that
- * this kthread doesn't result in that.
+ * For RT and deadline tasks, the schedutil governor shoots the
+ * frequency to maximum. Special care must be taken to ensure that this
+ * kthread doesn't result in the same behavior.
*
* This is (mostly) guaranteed by the work_in_progress flag. The flag is
- * updated only at the end of the sugov_work() and before that schedutil
- * rejects all other frequency scaling requests.
+ * updated only at the end of the sugov_work() function and before that
+ * the schedutil governor rejects all other frequency scaling requests.
*
- * Though there is a very rare case where the RT thread yields right
+ * There is a very rare case though, where the RT thread yields right
* after the work_in_progress flag is cleared. The effects of that are
* neglected for now.
*/
@@ -489,15 +506,12 @@ static struct sugov_policy *sugov_policy_alloc(struct cpufreq_policy *policy)
return NULL;
sg_policy->policy = policy;
- init_irq_work(&sg_policy->irq_work, sugov_irq_work);
- mutex_init(&sg_policy->work_lock);
raw_spin_lock_init(&sg_policy->update_lock);
return sg_policy;
}
static void sugov_policy_free(struct sugov_policy *sg_policy)
{
- mutex_destroy(&sg_policy->work_lock);
kfree(sg_policy);
}
@@ -531,6 +545,9 @@ static int sugov_kthread_create(struct sugov_policy *sg_policy)
sg_policy->thread = thread;
kthread_bind_mask(thread, policy->related_cpus);
+ init_irq_work(&sg_policy->irq_work, sugov_irq_work);
+ mutex_init(&sg_policy->work_lock);
+
wake_up_process(thread);
return 0;
@@ -544,6 +561,7 @@ static void sugov_kthread_stop(struct sugov_policy *sg_policy)
flush_kthread_worker(&sg_policy->worker);
kthread_stop(sg_policy->thread);
+ mutex_destroy(&sg_policy->work_lock);
}
static struct sugov_tunables *sugov_tunables_alloc(struct sugov_policy *sg_policy)
@@ -578,9 +596,13 @@ static int sugov_init(struct cpufreq_policy *policy)
if (policy->governor_data)
return -EBUSY;
+ cpufreq_enable_fast_switch(policy);
+
sg_policy = sugov_policy_alloc(policy);
- if (!sg_policy)
- return -ENOMEM;
+ if (!sg_policy) {
+ ret = -ENOMEM;
+ goto disable_fast_switch;
+ }
ret = sugov_kthread_create(sg_policy);
if (ret)
@@ -623,13 +645,11 @@ static int sugov_init(struct cpufreq_policy *policy)
if (ret)
goto fail;
- out:
+out:
mutex_unlock(&global_tunables_lock);
-
- cpufreq_enable_fast_switch(policy);
return 0;
- fail:
+fail:
policy->governor_data = NULL;
sugov_tunables_free(tunables);
@@ -640,6 +660,10 @@ free_sg_policy:
mutex_unlock(&global_tunables_lock);
sugov_policy_free(sg_policy);
+
+disable_fast_switch:
+ cpufreq_disable_fast_switch(policy);
+
pr_err("initialization failed (error %d)\n", ret);
return ret;
}
@@ -650,8 +674,6 @@ static int sugov_exit(struct cpufreq_policy *policy)
struct sugov_tunables *tunables = sg_policy->tunables;
unsigned int count;
- cpufreq_disable_fast_switch(policy);
-
mutex_lock(&global_tunables_lock);
count = gov_attr_set_put(&tunables->attr_set, &sg_policy->tunables_hook);
@@ -664,6 +686,7 @@ static int sugov_exit(struct cpufreq_policy *policy)
sugov_kthread_stop(sg_policy);
sugov_policy_free(sg_policy);
+ cpufreq_disable_fast_switch(policy);
return 0;
}
@@ -681,25 +704,19 @@ static int sugov_start(struct cpufreq_policy *policy)
sg_policy->next_freq = UINT_MAX;
sg_policy->work_in_progress = false;
sg_policy->need_freq_update = false;
+ sg_policy->cached_raw_freq = 0;
for_each_cpu(cpu, policy->cpus) {
struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
+ memset(sg_cpu, 0, sizeof(*sg_cpu));
sg_cpu->sg_policy = sg_policy;
- if (policy_is_shared(policy)) {
- sg_cpu->util = 0;
- sg_cpu->max = 0;
- sg_cpu->flags = SCHED_CPUFREQ_DL;
- sg_cpu->last_update = 0;
- sg_cpu->cached_raw_freq = 0;
- sg_cpu->iowait_boost = 0;
- sg_cpu->iowait_boost_max = policy->cpuinfo.max_freq;
- cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util,
- sugov_update_shared);
- } else {
- cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util,
- sugov_update_single);
- }
+ sg_cpu->flags = SCHED_CPUFREQ_DL;
+ sg_cpu->iowait_boost_max = policy->cpuinfo.max_freq;
+ cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util,
+ policy_is_shared(policy) ?
+ sugov_update_shared :
+ sugov_update_single);
}
return 0;
}
@@ -714,9 +731,10 @@ static int sugov_stop(struct cpufreq_policy *policy)
synchronize_sched();
- irq_work_sync(&sg_policy->irq_work);
- kthread_cancel_work_sync(&sg_policy->work);
-
+ if (!policy->fast_switch_enabled) {
+ irq_work_sync(&sg_policy->irq_work);
+ kthread_cancel_work_sync(&sg_policy->work);
+ }
return 0;
}
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 422438d43d90..853064319b0d 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -35,6 +35,8 @@
#include "sched.h"
#include <trace/events/sched.h>
#include "tune.h"
+#include "walt.h"
+
/*
* Targeted preemption latency for CPU-bound tasks:
* (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds)
@@ -6548,9 +6550,11 @@ static int find_new_capacity(struct energy_env *eenv,
return idx;
}
-static int group_idle_state(struct sched_group *sg)
+static int group_idle_state(struct energy_env *eenv, struct sched_group *sg)
{
int i, state = INT_MAX;
+ int src_in_grp, dst_in_grp;
+ long grp_util = 0;
/* Find the shallowest idle state in the sched group. */
for_each_cpu(i, sched_group_cpus(sg))
@@ -6559,6 +6563,54 @@ static int group_idle_state(struct sched_group *sg)
/* Take non-cpuidle idling into account (active idle/arch_cpu_idle()) */
state++;
+ /*
+ * Try to estimate if a deeper idle state is
+ * achievable when we move the task.
+ */
+ for_each_cpu(i, sched_group_cpus(sg))
+ grp_util += cpu_util(i);
+
+ src_in_grp = cpumask_test_cpu(eenv->src_cpu, sched_group_cpus(sg));
+ dst_in_grp = cpumask_test_cpu(eenv->dst_cpu, sched_group_cpus(sg));
+ if (src_in_grp == dst_in_grp) {
+ /* both CPUs under consideration are in the same group or not in
+ * either group, migration should leave idle state the same.
+ */
+ goto end;
+ }
+ /* add or remove util as appropriate to indicate what group util
+ * will be (worst case - no concurrent execution) after moving the task
+ */
+ grp_util += src_in_grp ? -eenv->util_delta : eenv->util_delta;
+
+ if (grp_util <=
+ ((long)sg->sgc->max_capacity * (int)sg->group_weight)) {
+ /* after moving, this group is at most partly
+ * occupied, so it should have some idle time.
+ */
+ int max_idle_state_idx = sg->sge->nr_idle_states - 2;
+ int new_state = grp_util * max_idle_state_idx;
+ if (grp_util <= 0)
+ /* group will have no util, use lowest state */
+ new_state = max_idle_state_idx + 1;
+ else {
+ /* for partially idle, linearly map util to idle
+ * states, excluding the lowest one. This does not
+ * correspond to the state we expect to enter in
+ * reality, but an indication of what might happen.
+ */
+ new_state = min(max_idle_state_idx, (int)
+ (new_state / sg->sgc->max_capacity));
+ new_state = max_idle_state_idx - new_state;
+ }
+ state = new_state;
+ } else {
+ /* After moving, the group will be fully occupied
+ * so assume it will not be idle at all.
+ */
+ state = 0;
+ }
+end:
return state;
}
@@ -6631,8 +6683,9 @@ static int sched_group_energy(struct energy_env *eenv)
}
}
- idle_idx = group_idle_state(sg);
+ idle_idx = group_idle_state(eenv, sg);
group_util = group_norm_util(eenv, sg);
+
sg_busy_energy = (group_util * sg->sge->cap_states[cap_idx].power)
>> SCHED_CAPACITY_SHIFT;
sg_idle_energy = ((SCHED_LOAD_SCALE-group_util)
@@ -7337,48 +7390,59 @@ static int start_cpu(bool boosted)
return boosted ? rd->max_cap_orig_cpu : rd->min_cap_orig_cpu;
}
-static inline int find_best_target(struct task_struct *p, bool boosted, bool prefer_idle)
+static inline int find_best_target(struct task_struct *p, int *backup_cpu,
+ bool boosted, bool prefer_idle)
{
- int target_cpu = -1;
- unsigned long target_util = prefer_idle ? ULONG_MAX : 0;
- unsigned long backup_capacity = ULONG_MAX;
- int best_idle_cpu = -1;
- int best_idle_cstate = INT_MAX;
- int backup_cpu = -1;
+ unsigned long best_idle_min_cap_orig = ULONG_MAX;
unsigned long min_util = boosted_task_util(p);
+ unsigned long target_capacity = ULONG_MAX;
+ unsigned long min_wake_util = ULONG_MAX;
+ unsigned long target_max_spare_cap = 0;
+ unsigned long target_util = ULONG_MAX;
+ unsigned long best_active_util = ULONG_MAX;
+ int best_idle_cstate = INT_MAX;
struct sched_domain *sd;
struct sched_group *sg;
- int cpu = start_cpu(boosted);
+ int best_active_cpu = -1;
+ int best_idle_cpu = -1;
+ int target_cpu = -1;
+ int cpu, i;
+
+ *backup_cpu = -1;
schedstat_inc(p, se.statistics.nr_wakeups_fbt_attempts);
schedstat_inc(this_rq(), eas_stats.fbt_attempts);
+ /* Find start CPU based on boost value */
+ cpu = start_cpu(boosted);
if (cpu < 0) {
schedstat_inc(p, se.statistics.nr_wakeups_fbt_no_cpu);
schedstat_inc(this_rq(), eas_stats.fbt_no_cpu);
- return target_cpu;
+ return -1;
}
+ /* Find SD for the start CPU */
sd = rcu_dereference(per_cpu(sd_ea, cpu));
-
if (!sd) {
schedstat_inc(p, se.statistics.nr_wakeups_fbt_no_sd);
schedstat_inc(this_rq(), eas_stats.fbt_no_sd);
- return target_cpu;
+ return -1;
}
+ /* Scan CPUs in all SDs */
sg = sd->groups;
-
do {
- int i;
-
for_each_cpu_and(i, tsk_cpus_allowed(p), sched_group_cpus(sg)) {
- unsigned long cur_capacity, new_util, wake_util;
- unsigned long min_wake_util = ULONG_MAX;
+ unsigned long capacity_curr = capacity_curr_of(i);
+ unsigned long capacity_orig = capacity_orig_of(i);
+ unsigned long wake_util, new_util;
if (!cpu_online(i))
continue;
+ if (walt_cpu_high_irqload(i))
+ continue;
+
/*
* p's blocked utilization is still accounted for on prev_cpu
* so prev_cpu will receive a negative bias due to the double
@@ -7393,65 +7457,204 @@ static inline int find_best_target(struct task_struct *p, bool boosted, bool pre
* than the one required to boost the task.
*/
new_util = max(min_util, new_util);
-
- if (new_util > capacity_orig_of(i))
+ if (new_util > capacity_orig)
continue;
/*
- * Unconditionally favoring tasks that prefer idle cpus to
+ * Case A) Latency sensitive tasks
+ *
+ * Unconditionally favoring tasks that prefer idle CPU to
* improve latency.
+ *
+ * Looking for:
+ * - an idle CPU, whatever its idle_state is, since
+ * the first CPUs we explore are more likely to be
+ * reserved for latency sensitive tasks.
+ * - a non idle CPU where the task fits in its current
+ * capacity and has the maximum spare capacity.
+ * - a non idle CPU with lower contention from other
+ * tasks and running at the lowest possible OPP.
+ *
+ * The last two goals tries to favor a non idle CPU
+ * where the task can run as if it is "almost alone".
+ * A maximum spare capacity CPU is favoured since
+ * the task already fits into that CPU's capacity
+ * without waiting for an OPP chance.
+ *
+ * The following code path is the only one in the CPUs
+ * exploration loop which is always used by
+ * prefer_idle tasks. It exits the loop with wither a
+ * best_active_cpu or a target_cpu which should
+ * represent an optimal choice for latency sensitive
+ * tasks.
*/
- if (idle_cpu(i) && prefer_idle) {
- schedstat_inc(p, se.statistics.nr_wakeups_fbt_pref_idle);
- schedstat_inc(this_rq(), eas_stats.fbt_pref_idle);
- return i;
- }
+ if (prefer_idle) {
- cur_capacity = capacity_curr_of(i);
-
- if (new_util < cur_capacity) {
- if (cpu_rq(i)->nr_running) {
- /*
- * Find a target cpu with the lowest/highest
- * utilization if prefer_idle/!prefer_idle.
- */
- if (prefer_idle) {
- /* Favor the CPU that last ran the task */
- if (new_util > target_util ||
- wake_util > min_wake_util)
- continue;
- min_wake_util = wake_util;
- target_util = new_util;
- target_cpu = i;
- } else if (target_util < new_util) {
- target_util = new_util;
- target_cpu = i;
- }
- } else if (!prefer_idle) {
- int idle_idx = idle_get_state_idx(cpu_rq(i));
+ /*
+ * Case A.1: IDLE CPU
+ * Return the first IDLE CPU we find.
+ */
+ if (idle_cpu(i)) {
+ schedstat_inc(p, se.statistics.nr_wakeups_fbt_pref_idle);
+ schedstat_inc(this_rq(), eas_stats.fbt_pref_idle);
- if (best_idle_cpu < 0 ||
- (sysctl_sched_cstate_aware &&
- best_idle_cstate > idle_idx)) {
- best_idle_cstate = idle_idx;
- best_idle_cpu = i;
- }
+ trace_sched_find_best_target(p,
+ prefer_idle, min_util,
+ cpu, best_idle_cpu,
+ best_active_cpu, i);
+
+ return i;
}
- } else if (backup_capacity > cur_capacity) {
- /* Find a backup cpu with least capacity. */
- backup_capacity = cur_capacity;
- backup_cpu = i;
+
+ /*
+ * Case A.2: Target ACTIVE CPU
+ * Favor CPUs with max spare capacity.
+ */
+ if ((capacity_curr > new_util) &&
+ (capacity_orig - new_util > target_max_spare_cap)) {
+ target_max_spare_cap = capacity_orig - new_util;
+ target_cpu = i;
+ continue;
+ }
+ if (target_cpu != -1)
+ continue;
+
+
+ /*
+ * Case A.3: Backup ACTIVE CPU
+ * Favor CPUs with:
+ * - lower utilization due to other tasks
+ * - lower utilization with the task in
+ */
+ if (wake_util > min_wake_util)
+ continue;
+ if (new_util > best_active_util)
+ continue;
+ min_wake_util = wake_util;
+ best_active_util = new_util;
+ best_active_cpu = i;
+ continue;
}
+
+ /*
+ * Case B) Non latency sensitive tasks on IDLE CPUs.
+ *
+ * Find an optimal backup IDLE CPU for non latency
+ * sensitive tasks.
+ *
+ * Looking for:
+ * - minimizing the capacity_orig,
+ * i.e. preferring LITTLE CPUs
+ * - favoring shallowest idle states
+ * i.e. avoid to wakeup deep-idle CPUs
+ *
+ * The following code path is used by non latency
+ * sensitive tasks if IDLE CPUs are available. If at
+ * least one of such CPUs are available it sets the
+ * best_idle_cpu to the most suitable idle CPU to be
+ * selected.
+ *
+ * If idle CPUs are available, favour these CPUs to
+ * improve performances by spreading tasks.
+ * Indeed, the energy_diff() computed by the caller
+ * will take care to ensure the minimization of energy
+ * consumptions without affecting performance.
+ */
+ if (idle_cpu(i)) {
+ int idle_idx = idle_get_state_idx(cpu_rq(i));
+
+ /* Select idle CPU with lower cap_orig */
+ if (capacity_orig > best_idle_min_cap_orig)
+ continue;
+
+ /*
+ * Skip CPUs in deeper idle state, but only
+ * if they are also less energy efficient.
+ * IOW, prefer a deep IDLE LITTLE CPU vs a
+ * shallow idle big CPU.
+ */
+ if (sysctl_sched_cstate_aware &&
+ best_idle_cstate <= idle_idx)
+ continue;
+
+ /* Keep track of best idle CPU */
+ best_idle_min_cap_orig = capacity_orig;
+ best_idle_cstate = idle_idx;
+ best_idle_cpu = i;
+ continue;
+ }
+
+ /*
+ * Case C) Non latency sensitive tasks on ACTIVE CPUs.
+ *
+ * Pack tasks in the most energy efficient capacities.
+ *
+ * This task packing strategy prefers more energy
+ * efficient CPUs (i.e. pack on smaller maximum
+ * capacity CPUs) while also trying to spread tasks to
+ * run them all at the lower OPP.
+ *
+ * This assumes for example that it's more energy
+ * efficient to run two tasks on two CPUs at a lower
+ * OPP than packing both on a single CPU but running
+ * that CPU at an higher OPP.
+ *
+ * Thus, this case keep track of the CPU with the
+ * smallest maximum capacity and highest spare maximum
+ * capacity.
+ */
+
+ /* Favor CPUs with smaller capacity */
+ if (capacity_orig > target_capacity)
+ continue;
+
+ /* Favor CPUs with maximum spare capacity */
+ if ((capacity_orig - new_util) < target_max_spare_cap)
+ continue;
+
+ target_max_spare_cap = capacity_orig - new_util;
+ target_capacity = capacity_orig;
+ target_util = new_util;
+ target_cpu = i;
}
+
} while (sg = sg->next, sg != sd->groups);
- if (target_cpu < 0)
- target_cpu = best_idle_cpu >= 0 ? best_idle_cpu : backup_cpu;
+ /*
+ * For non latency sensitive tasks, cases B and C in the previous loop,
+ * we pick the best IDLE CPU only if we was not able to find a target
+ * ACTIVE CPU.
+ *
+ * Policies priorities:
+ *
+ * - prefer_idle tasks:
+ *
+ * a) IDLE CPU available, we return immediately
+ * b) ACTIVE CPU where task fits and has the bigger maximum spare
+ * capacity (i.e. target_cpu)
+ * c) ACTIVE CPU with less contention due to other tasks
+ * (i.e. best_active_cpu)
+ *
+ * - NON prefer_idle tasks:
+ *
+ * a) ACTIVE CPU: target_cpu
+ * b) IDLE CPU: best_idle_cpu
+ */
+ if (target_cpu == -1)
+ target_cpu = prefer_idle
+ ? best_active_cpu
+ : best_idle_cpu;
+ else
+ *backup_cpu = prefer_idle
+ ? best_active_cpu
+ : best_idle_cpu;
- if (target_cpu >= 0) {
- schedstat_inc(p, se.statistics.nr_wakeups_fbt_count);
- schedstat_inc(this_rq(), eas_stats.fbt_count);
- }
+ trace_sched_find_best_target(p, prefer_idle, min_util, cpu,
+ best_idle_cpu, best_active_cpu,
+ target_cpu);
+
+ schedstat_inc(p, se.statistics.nr_wakeups_fbt_count);
+ schedstat_inc(this_rq(), eas_stats.fbt_count);
return target_cpu;
}
@@ -7483,7 +7686,7 @@ static int wake_cap(struct task_struct *p, int cpu, int prev_cpu)
static int select_energy_cpu_brute(struct task_struct *p, int prev_cpu, int sync)
{
struct sched_domain *sd;
- int target_cpu = prev_cpu, tmp_target;
+ int target_cpu = prev_cpu, tmp_target, tmp_backup;
bool boosted, prefer_idle;
schedstat_inc(p, se.statistics.nr_wakeups_secb_attempts);
@@ -7508,9 +7711,11 @@ static int select_energy_cpu_brute(struct task_struct *p, int prev_cpu, int sync
prefer_idle = 0;
#endif
+ sync_entity_load_avg(&p->se);
+
sd = rcu_dereference(per_cpu(sd_ea, prev_cpu));
/* Find a cpu with sufficient capacity */
- tmp_target = find_best_target(p, boosted, prefer_idle);
+ tmp_target = find_best_target(p, &tmp_backup, boosted, prefer_idle);
if (!sd)
goto unlock;
@@ -7539,10 +7744,15 @@ static int select_energy_cpu_brute(struct task_struct *p, int prev_cpu, int sync
}
if (energy_diff(&eenv) >= 0) {
- schedstat_inc(p, se.statistics.nr_wakeups_secb_no_nrg_sav);
- schedstat_inc(this_rq(), eas_stats.secb_no_nrg_sav);
- target_cpu = prev_cpu;
- goto unlock;
+ /* No energy saving for target_cpu, try backup */
+ target_cpu = tmp_backup;
+ eenv.dst_cpu = target_cpu;
+ if (tmp_backup < 0 || energy_diff(&eenv) >= 0) {
+ schedstat_inc(p, se.statistics.nr_wakeups_secb_no_nrg_sav);
+ schedstat_inc(this_rq(), eas_stats.secb_no_nrg_sav);
+ target_cpu = prev_cpu;
+ goto unlock;
+ }
}
schedstat_inc(p, se.statistics.nr_wakeups_secb_nrg_sav);
@@ -7584,16 +7794,9 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
return select_best_cpu(p, prev_cpu, 0, sync);
#endif
- if (sd_flag & SD_BALANCE_WAKE) {
- /*
- * do wake_cap unconditionally as it causes task and cpu
- * utilization to be synced, and we need that for energy
- * aware wakeups
- */
- int _wake_cap = wake_cap(p, cpu, prev_cpu);
- want_affine = !wake_wide(p) && !_wake_cap
+ if (sd_flag & SD_BALANCE_WAKE)
+ want_affine = !wake_wide(p) && !wake_cap(p, cpu, prev_cpu)
&& cpumask_test_cpu(cpu, tsk_cpus_allowed(p));
- }
if (energy_aware() && !(cpu_rq(prev_cpu)->rd->overutilized))
return select_energy_cpu_brute(p, prev_cpu, sync);
@@ -9189,6 +9392,38 @@ group_type group_classify(struct sched_group *group,
return group_other;
}
+#ifdef CONFIG_NO_HZ_COMMON
+/*
+ * idle load balancing data
+ * - used by the nohz balance, but we want it available here
+ * so that we can see which CPUs have no tick.
+ */
+static struct {
+ cpumask_var_t idle_cpus_mask;
+ atomic_t nr_cpus;
+ unsigned long next_balance; /* in jiffy units */
+} nohz ____cacheline_aligned;
+
+static inline void update_cpu_stats_if_tickless(struct rq *rq)
+{
+ /* only called from update_sg_lb_stats when irqs are disabled */
+ if (cpumask_test_cpu(rq->cpu, nohz.idle_cpus_mask)) {
+ /* rate limit updates to once-per-jiffie at most */
+ if (READ_ONCE(jiffies) <= rq->last_load_update_tick)
+ return;
+
+ raw_spin_lock(&rq->lock);
+ update_rq_clock(rq);
+ update_idle_cpu_load(rq);
+ update_cfs_rq_load_avg(rq->clock_task, &rq->cfs, false);
+ raw_spin_unlock(&rq->lock);
+ }
+}
+
+#else
+static inline void update_cpu_stats_if_tickless(struct rq *rq) { }
+#endif
+
/**
* update_sg_lb_stats - Update sched_group's statistics for load balancing.
* @env: The load balancing environment.
@@ -9220,6 +9455,12 @@ static inline void update_sg_lb_stats(struct lb_env *env,
if (cpu_isolated(i))
continue;
+ /* if we are entering idle and there are CPUs with
+ * their tick stopped, do an update for them
+ */
+ if (env->idle == CPU_NEWLY_IDLE)
+ update_cpu_stats_if_tickless(rq);
+
/* Bias balancing toward cpus of our domain */
if (local_group)
load = target_load(i, load_idx);
@@ -10680,11 +10921,6 @@ static inline int on_null_domain(struct rq *rq)
* needed, they will kick the idle load balancer, which then does idle
* load balancing for all the idle CPUs.
*/
-static struct {
- cpumask_var_t idle_cpus_mask;
- atomic_t nr_cpus;
- unsigned long next_balance; /* in jiffy units */
-} nohz ____cacheline_aligned;
#ifdef CONFIG_SCHED_HMP
static inline int find_new_hmp_ilb(int type)
@@ -11111,6 +11347,10 @@ static inline int _nohz_kick_needed(struct rq *rq, int cpu, int *type)
(!energy_aware() || cpu_overutilized(cpu)))
return true;
+ /* Do idle load balance if there have misfit task */
+ if (energy_aware() && rq->misfit_task)
+ return 1;
+
return (rq->nr_running >= 2);
}
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index c03d51a017bf..ee095f4e7230 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -1823,6 +1823,7 @@ static int find_lowest_rq_hmp(struct task_struct *task)
* the best one based on our affinity and topology.
*/
+retry:
for_each_sched_cluster(cluster) {
if (boost_on_big && cluster->capacity != max_possible_capacity)
continue;
@@ -1830,6 +1831,15 @@ static int find_lowest_rq_hmp(struct task_struct *task)
cpumask_and(&candidate_mask, &cluster->cpus, lowest_mask);
cpumask_andnot(&candidate_mask, &candidate_mask,
cpu_isolated_mask);
+ /*
+ * When placement boost is active, if there is no eligible CPU
+ * in the highest capacity cluster, we fallback to the other
+ * clusters. So clear the CPUs of the traversed cluster from
+ * the lowest_mask.
+ */
+ if (unlikely(boost_on_big))
+ cpumask_andnot(lowest_mask, lowest_mask,
+ &cluster->cpus);
if (cpumask_empty(&candidate_mask))
continue;
@@ -1869,6 +1879,11 @@ static int find_lowest_rq_hmp(struct task_struct *task)
break;
}
+ if (unlikely(boost_on_big && best_cpu == -1)) {
+ boost_on_big = 0;
+ goto retry;
+ }
+
return best_cpu;
}
#endif /* CONFIG_SCHED_HMP */
diff --git a/kernel/sched/walt.c b/kernel/sched/walt.c
index 6e053bd9830c..92c3aae8e056 100644
--- a/kernel/sched/walt.c
+++ b/kernel/sched/walt.c
@@ -72,7 +72,15 @@ static cpumask_t mpc_mask = CPU_MASK_ALL;
__read_mostly unsigned int walt_ravg_window = 20000000;
/* Min window size (in ns) = 10ms */
+#ifdef CONFIG_HZ_300
+/*
+ * Tick interval becomes to 3333333 due to
+ * rounding error when HZ=300.
+ */
+#define MIN_SCHED_RAVG_WINDOW (3333333 * 6)
+#else
#define MIN_SCHED_RAVG_WINDOW 10000000
+#endif
/* Max window size (in ns) = 1s */
#define MAX_SCHED_RAVG_WINDOW 1000000000
diff --git a/kernel/sched/walt.h b/kernel/sched/walt.h
index e181c87a928d..f56c4da16d0b 100644
--- a/kernel/sched/walt.h
+++ b/kernel/sched/walt.h
@@ -55,6 +55,8 @@ static inline void walt_migrate_sync_cpu(int cpu) { }
static inline void walt_init_cpu_efficiency(void) { }
static inline u64 walt_ktime_clock(void) { return 0; }
+#define walt_cpu_high_irqload(cpu) false
+
#endif /* CONFIG_SCHED_WALT */
extern unsigned int walt_disabled;
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index f27d2ba78d14..8576e6385d63 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -2380,9 +2380,12 @@ static int do_proc_douintvec_conv(bool *negp, unsigned long *lvalp,
if (write) {
if (*negp)
return -EINVAL;
+ if (*lvalp > UINT_MAX)
+ return -EINVAL;
*valp = *lvalp;
} else {
unsigned int val = *valp;
+ *negp = false;
*lvalp = (unsigned long)val;
}
return 0;
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
index 1a4de0022cc5..ceec77c652b5 100644
--- a/kernel/time/alarmtimer.c
+++ b/kernel/time/alarmtimer.c
@@ -848,7 +848,8 @@ static int alarm_timer_set(struct k_itimer *timr, int flags,
* Rate limit to the tick as a hot fix to prevent DOS. Will be
* mopped up later.
*/
- if (ktime_to_ns(timr->it.alarm.interval) < TICK_NSEC)
+ if (timr->it.alarm.interval.tv64 &&
+ ktime_to_ns(timr->it.alarm.interval) < TICK_NSEC)
timr->it.alarm.interval = ktime_set(0, TICK_NSEC);
exp = timespec_to_ktime(new_setting->it_value);
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index ec2102104cb8..333f627a3a3b 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -896,6 +896,18 @@ ktime_t tick_nohz_get_sleep_length(void)
return ts->sleep_length;
}
+/**
+ * tick_nohz_get_idle_calls - return the current idle calls counter value
+ *
+ * Called from the schedutil frequency scaling governor in scheduler context.
+ */
+unsigned long tick_nohz_get_idle_calls(void)
+{
+ struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
+
+ return ts->idle_calls;
+}
+
static void tick_nohz_account_idle_ticks(struct tick_sched *ts)
{
#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 34b2a0d5cf1a..eba904bae48c 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -3535,7 +3535,7 @@ match_records(struct ftrace_hash *hash, char *func, int len, char *mod)
int exclude_mod = 0;
int found = 0;
int ret;
- int clear_filter;
+ int clear_filter = 0;
if (func) {
func_g.type = filter_parse_regex(func, len, &func_g.search,
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 60d246c4eefa..a579a874045b 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1362,7 +1362,7 @@ static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
struct saved_cmdlines_buffer {
unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
unsigned *map_cmdline_to_pid;
- unsigned *saved_tgids;
+ unsigned *map_cmdline_to_tgid;
unsigned cmdline_num;
int cmdline_idx;
char *saved_cmdlines;
@@ -1396,9 +1396,10 @@ static int allocate_cmdlines_buffer(unsigned int val,
return -ENOMEM;
}
- s->saved_tgids = kmalloc_array(val, sizeof(*s->saved_tgids),
- GFP_KERNEL);
- if (!s->saved_tgids) {
+ s->map_cmdline_to_tgid = kmalloc_array(val,
+ sizeof(*s->map_cmdline_to_tgid),
+ GFP_KERNEL);
+ if (!s->map_cmdline_to_tgid) {
kfree(s->map_cmdline_to_pid);
kfree(s->saved_cmdlines);
return -ENOMEM;
@@ -1410,8 +1411,8 @@ static int allocate_cmdlines_buffer(unsigned int val,
sizeof(s->map_pid_to_cmdline));
memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
val * sizeof(*s->map_cmdline_to_pid));
- memset(s->saved_tgids, 0,
- val * sizeof(*s->saved_tgids));
+ memset(s->map_cmdline_to_tgid, NO_CMDLINE_MAP,
+ val * sizeof(*s->map_cmdline_to_tgid));
return 0;
}
@@ -1577,14 +1578,17 @@ static int trace_save_cmdline(struct task_struct *tsk)
if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
return 0;
+ preempt_disable();
/*
* It's not the end of the world if we don't get
* the lock, but we also don't want to spin
* nor do we want to disable interrupts,
* so if we miss here, then better luck next time.
*/
- if (!arch_spin_trylock(&trace_cmdline_lock))
+ if (!arch_spin_trylock(&trace_cmdline_lock)) {
+ preempt_enable();
return 0;
+ }
idx = savedcmd->map_pid_to_cmdline[tsk->pid];
if (idx == NO_CMDLINE_MAP) {
@@ -1607,8 +1611,9 @@ static int trace_save_cmdline(struct task_struct *tsk)
}
set_cmdline(idx, tsk->comm);
- savedcmd->saved_tgids[idx] = tsk->tgid;
+ savedcmd->map_cmdline_to_tgid[idx] = tsk->tgid;
arch_spin_unlock(&trace_cmdline_lock);
+ preempt_enable();
return 1;
}
@@ -1650,19 +1655,29 @@ void trace_find_cmdline(int pid, char comm[])
preempt_enable();
}
-int trace_find_tgid(int pid)
+static int __find_tgid_locked(int pid)
{
unsigned map;
int tgid;
- preempt_disable();
- arch_spin_lock(&trace_cmdline_lock);
map = savedcmd->map_pid_to_cmdline[pid];
if (map != NO_CMDLINE_MAP)
- tgid = savedcmd->saved_tgids[map];
+ tgid = savedcmd->map_cmdline_to_tgid[map];
else
tgid = -1;
+ return tgid;
+}
+
+int trace_find_tgid(int pid)
+{
+ int tgid;
+
+ preempt_disable();
+ arch_spin_lock(&trace_cmdline_lock);
+
+ tgid = __find_tgid_locked(pid);
+
arch_spin_unlock(&trace_cmdline_lock);
preempt_enable();
@@ -3979,10 +3994,15 @@ tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
{
char buf[64];
int r;
+ unsigned int n;
+ preempt_disable();
arch_spin_lock(&trace_cmdline_lock);
- r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
+ n = savedcmd->cmdline_num;
arch_spin_unlock(&trace_cmdline_lock);
+ preempt_enable();
+
+ r = scnprintf(buf, sizeof(buf), "%u\n", n);
return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
}
@@ -3991,7 +4011,7 @@ static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
{
kfree(s->saved_cmdlines);
kfree(s->map_cmdline_to_pid);
- kfree(s->saved_tgids);
+ kfree(s->map_cmdline_to_tgid);
kfree(s);
}
@@ -4008,10 +4028,12 @@ static int tracing_resize_saved_cmdlines(unsigned int val)
return -ENOMEM;
}
+ preempt_disable();
arch_spin_lock(&trace_cmdline_lock);
savedcmd_temp = savedcmd;
savedcmd = s;
arch_spin_unlock(&trace_cmdline_lock);
+ preempt_enable();
free_saved_cmdlines_buffer(savedcmd_temp);
return 0;
@@ -4230,33 +4252,61 @@ tracing_saved_tgids_read(struct file *file, char __user *ubuf,
char *file_buf;
char *buf;
int len = 0;
- int pid;
int i;
+ int *pids;
+ int n = 0;
- file_buf = kmalloc(savedcmd->cmdline_num*(16+1+16), GFP_KERNEL);
- if (!file_buf)
- return -ENOMEM;
+ preempt_disable();
+ arch_spin_lock(&trace_cmdline_lock);
- buf = file_buf;
+ pids = kmalloc_array(savedcmd->cmdline_num, 2*sizeof(int), GFP_KERNEL);
+ if (!pids) {
+ arch_spin_unlock(&trace_cmdline_lock);
+ preempt_enable();
+ return -ENOMEM;
+ }
for (i = 0; i < savedcmd->cmdline_num; i++) {
- int tgid;
- int r;
+ int pid;
pid = savedcmd->map_cmdline_to_pid[i];
if (pid == -1 || pid == NO_CMDLINE_MAP)
continue;
- tgid = trace_find_tgid(pid);
- r = sprintf(buf, "%d %d\n", pid, tgid);
+ pids[n] = pid;
+ pids[n+1] = __find_tgid_locked(pid);
+ n += 2;
+ }
+ arch_spin_unlock(&trace_cmdline_lock);
+ preempt_enable();
+
+ if (n == 0) {
+ kfree(pids);
+ return 0;
+ }
+
+ /* enough to hold max pair of pids + space, lr and nul */
+ len = n * 12;
+ file_buf = kmalloc(len, GFP_KERNEL);
+ if (!file_buf) {
+ kfree(pids);
+ return -ENOMEM;
+ }
+
+ buf = file_buf;
+ for (i = 0; i < n && len > 0; i += 2) {
+ int r;
+
+ r = snprintf(buf, len, "%d %d\n", pids[i], pids[i+1]);
buf += r;
- len += r;
+ len -= r;
}
len = simple_read_from_buffer(ubuf, cnt, ppos,
- file_buf, len);
+ file_buf, buf - file_buf);
kfree(file_buf);
+ kfree(pids);
return len;
}
@@ -6847,6 +6897,7 @@ static int instance_rmdir(const char *name)
}
kfree(tr->topts);
+ free_cpumask_var(tr->tracing_cpumask);
kfree(tr->name);
kfree(tr);
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 12ea4ea619ee..e9092a0247bf 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -659,30 +659,25 @@ static int create_trace_kprobe(int argc, char **argv)
pr_info("Probe point is not specified.\n");
return -EINVAL;
}
- if (isdigit(argv[1][0])) {
- if (is_return) {
- pr_info("Return probe point must be a symbol.\n");
- return -EINVAL;
- }
- /* an address specified */
- ret = kstrtoul(&argv[1][0], 0, (unsigned long *)&addr);
- if (ret) {
- pr_info("Failed to parse address.\n");
- return ret;
- }
- } else {
+
+ /* try to parse an address. if that fails, try to read the
+ * input as a symbol. */
+ if (kstrtoul(argv[1], 0, (unsigned long *)&addr)) {
/* a symbol specified */
symbol = argv[1];
/* TODO: support .init module functions */
ret = traceprobe_split_symbol_offset(symbol, &offset);
if (ret) {
- pr_info("Failed to parse symbol.\n");
+ pr_info("Failed to parse either an address or a symbol.\n");
return ret;
}
if (offset && is_return) {
pr_info("Return probe must be used without offset.\n");
return -EINVAL;
}
+ } else if (is_return) {
+ pr_info("Return probe point must be a symbol.\n");
+ return -EINVAL;
}
argc -= 2; argv += 2;
diff --git a/mm/mmap.c b/mm/mmap.c
index 092729c2cb72..16743bf76a88 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2206,7 +2206,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
/* Guard against exceeding limits of the address space. */
address &= PAGE_MASK;
- if (address >= TASK_SIZE)
+ if (address >= (TASK_SIZE & PAGE_MASK))
return -ENOMEM;
address += PAGE_SIZE;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 94fecacf0ddc..5f6e29f25af9 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2676,7 +2676,7 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
if (!populated_zone(zone))
continue;
- classzone_idx = requested_highidx;
+ classzone_idx = gfp_zone(sc->gfp_mask);
while (!populated_zone(zone->zone_pgdat->node_zones +
classzone_idx))
classzone_idx--;
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index e20ae2d3c498..5e4199d5a388 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -292,6 +292,10 @@ static void vlan_sync_address(struct net_device *dev,
if (ether_addr_equal(vlan->real_dev_addr, dev->dev_addr))
return;
+ /* vlan continues to inherit address of lower device */
+ if (vlan_dev_inherit_address(vlandev, dev))
+ goto out;
+
/* vlan address was different from the old address and is equal to
* the new address */
if (!ether_addr_equal(vlandev->dev_addr, vlan->real_dev_addr) &&
@@ -304,6 +308,7 @@ static void vlan_sync_address(struct net_device *dev,
!ether_addr_equal(vlandev->dev_addr, dev->dev_addr))
dev_uc_add(dev, vlandev->dev_addr);
+out:
ether_addr_copy(vlan->real_dev_addr, dev->dev_addr);
}
diff --git a/net/8021q/vlan.h b/net/8021q/vlan.h
index 9d010a09ab98..cc1557978066 100644
--- a/net/8021q/vlan.h
+++ b/net/8021q/vlan.h
@@ -109,6 +109,8 @@ int vlan_check_real_dev(struct net_device *real_dev,
void vlan_setup(struct net_device *dev);
int register_vlan_dev(struct net_device *dev);
void unregister_vlan_dev(struct net_device *dev, struct list_head *head);
+bool vlan_dev_inherit_address(struct net_device *dev,
+ struct net_device *real_dev);
static inline u32 vlan_get_ingress_priority(struct net_device *dev,
u16 vlan_tci)
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index fded86508117..ca4dc9031073 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -244,6 +244,17 @@ void vlan_dev_get_realdev_name(const struct net_device *dev, char *result)
strncpy(result, vlan_dev_priv(dev)->real_dev->name, 23);
}
+bool vlan_dev_inherit_address(struct net_device *dev,
+ struct net_device *real_dev)
+{
+ if (dev->addr_assign_type != NET_ADDR_STOLEN)
+ return false;
+
+ ether_addr_copy(dev->dev_addr, real_dev->dev_addr);
+ call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
+ return true;
+}
+
static int vlan_dev_open(struct net_device *dev)
{
struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
@@ -254,7 +265,8 @@ static int vlan_dev_open(struct net_device *dev)
!(vlan->flags & VLAN_FLAG_LOOSE_BINDING))
return -ENETDOWN;
- if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr)) {
+ if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr) &&
+ !vlan_dev_inherit_address(dev, real_dev)) {
err = dev_uc_add(real_dev, dev->dev_addr);
if (err < 0)
goto out;
@@ -558,8 +570,10 @@ static int vlan_dev_init(struct net_device *dev)
/* ipv6 shared card related stuff */
dev->dev_id = real_dev->dev_id;
- if (is_zero_ether_addr(dev->dev_addr))
- eth_hw_addr_inherit(dev, real_dev);
+ if (is_zero_ether_addr(dev->dev_addr)) {
+ ether_addr_copy(dev->dev_addr, real_dev->dev_addr);
+ dev->addr_assign_type = NET_ADDR_STOLEN;
+ }
if (is_zero_ether_addr(dev->broadcast))
memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len);
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index 6f2c704e41ab..b166b3e441a5 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -23,6 +23,7 @@
#include <linux/debugfs.h>
#include <linux/crypto.h>
#include <linux/scatterlist.h>
+#include <crypto/algapi.h>
#include <crypto/b128ops.h>
#include <net/bluetooth/bluetooth.h>
@@ -524,7 +525,7 @@ bool smp_irk_matches(struct hci_dev *hdev, const u8 irk[16],
if (err)
return false;
- return !memcmp(bdaddr->b, hash, 3);
+ return !crypto_memneq(bdaddr->b, hash, 3);
}
int smp_generate_rpa(struct hci_dev *hdev, const u8 irk[16], bdaddr_t *rpa)
@@ -577,7 +578,7 @@ int smp_generate_oob(struct hci_dev *hdev, u8 hash[16], u8 rand[16])
/* This is unlikely, but we need to check that
* we didn't accidentially generate a debug key.
*/
- if (memcmp(smp->local_sk, debug_sk, 32))
+ if (crypto_memneq(smp->local_sk, debug_sk, 32))
break;
}
smp->debug_key = false;
@@ -991,7 +992,7 @@ static u8 smp_random(struct smp_chan *smp)
if (ret)
return SMP_UNSPECIFIED;
- if (memcmp(smp->pcnf, confirm, sizeof(smp->pcnf)) != 0) {
+ if (crypto_memneq(smp->pcnf, confirm, sizeof(smp->pcnf))) {
BT_ERR("Pairing failed (confirmation values mismatch)");
return SMP_CONFIRM_FAILED;
}
@@ -1491,7 +1492,7 @@ static u8 sc_passkey_round(struct smp_chan *smp, u8 smp_op)
smp->rrnd, r, cfm))
return SMP_UNSPECIFIED;
- if (memcmp(smp->pcnf, cfm, 16))
+ if (crypto_memneq(smp->pcnf, cfm, 16))
return SMP_CONFIRM_FAILED;
smp->passkey_round++;
@@ -1875,7 +1876,7 @@ static u8 sc_send_public_key(struct smp_chan *smp)
/* This is unlikely, but we need to check that
* we didn't accidentially generate a debug key.
*/
- if (memcmp(smp->local_sk, debug_sk, 32))
+ if (crypto_memneq(smp->local_sk, debug_sk, 32))
break;
}
}
@@ -2140,7 +2141,7 @@ static u8 smp_cmd_pairing_random(struct l2cap_conn *conn, struct sk_buff *skb)
if (err)
return SMP_UNSPECIFIED;
- if (memcmp(smp->pcnf, cfm, 16))
+ if (crypto_memneq(smp->pcnf, cfm, 16))
return SMP_CONFIRM_FAILED;
} else {
smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(smp->prnd),
@@ -2621,7 +2622,7 @@ static int smp_cmd_public_key(struct l2cap_conn *conn, struct sk_buff *skb)
if (err)
return SMP_UNSPECIFIED;
- if (memcmp(cfm.confirm_val, smp->pcnf, 16))
+ if (crypto_memneq(cfm.confirm_val, smp->pcnf, 16))
return SMP_CONFIRM_FAILED;
}
@@ -2654,7 +2655,7 @@ static int smp_cmd_public_key(struct l2cap_conn *conn, struct sk_buff *skb)
else
hcon->pending_sec_level = BT_SECURITY_FIPS;
- if (!memcmp(debug_pk, smp->remote_pk, 64))
+ if (!crypto_memneq(debug_pk, smp->remote_pk, 64))
set_bit(SMP_FLAG_DEBUG_KEY, &smp->flags);
if (smp->method == DSP_PASSKEY) {
@@ -2753,7 +2754,7 @@ static int smp_cmd_dhkey_check(struct l2cap_conn *conn, struct sk_buff *skb)
if (err)
return SMP_UNSPECIFIED;
- if (memcmp(check->e, e, 16))
+ if (crypto_memneq(check->e, e, 16))
return SMP_DHKEY_CHECK_FAILED;
if (!hcon->out) {
@@ -3463,7 +3464,7 @@ static int __init test_ah(struct crypto_blkcipher *tfm_aes)
if (err)
return err;
- if (memcmp(res, exp, 3))
+ if (crypto_memneq(res, exp, 3))
return -EINVAL;
return 0;
@@ -3493,7 +3494,7 @@ static int __init test_c1(struct crypto_blkcipher *tfm_aes)
if (err)
return err;
- if (memcmp(res, exp, 16))
+ if (crypto_memneq(res, exp, 16))
return -EINVAL;
return 0;
@@ -3518,7 +3519,7 @@ static int __init test_s1(struct crypto_blkcipher *tfm_aes)
if (err)
return err;
- if (memcmp(res, exp, 16))
+ if (crypto_memneq(res, exp, 16))
return -EINVAL;
return 0;
@@ -3550,7 +3551,7 @@ static int __init test_f4(struct crypto_hash *tfm_cmac)
if (err)
return err;
- if (memcmp(res, exp, 16))
+ if (crypto_memneq(res, exp, 16))
return -EINVAL;
return 0;
@@ -3584,10 +3585,10 @@ static int __init test_f5(struct crypto_hash *tfm_cmac)
if (err)
return err;
- if (memcmp(mackey, exp_mackey, 16))
+ if (crypto_memneq(mackey, exp_mackey, 16))
return -EINVAL;
- if (memcmp(ltk, exp_ltk, 16))
+ if (crypto_memneq(ltk, exp_ltk, 16))
return -EINVAL;
return 0;
@@ -3620,7 +3621,7 @@ static int __init test_f6(struct crypto_hash *tfm_cmac)
if (err)
return err;
- if (memcmp(res, exp, 16))
+ if (crypto_memneq(res, exp, 16))
return -EINVAL;
return 0;
@@ -3674,7 +3675,7 @@ static int __init test_h6(struct crypto_hash *tfm_cmac)
if (err)
return err;
- if (memcmp(res, exp, 16))
+ if (crypto_memneq(res, exp, 16))
return -EINVAL;
return 0;
diff --git a/net/core/dev.c b/net/core/dev.c
index 9a3aaba15c5a..1ce844e41eaf 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2552,9 +2552,10 @@ EXPORT_SYMBOL(skb_mac_gso_segment);
static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
{
if (tx_path)
- return skb->ip_summed != CHECKSUM_PARTIAL;
- else
- return skb->ip_summed == CHECKSUM_NONE;
+ return skb->ip_summed != CHECKSUM_PARTIAL &&
+ skb->ip_summed != CHECKSUM_NONE;
+
+ return skb->ip_summed == CHECKSUM_NONE;
}
/**
@@ -2573,11 +2574,12 @@ static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
netdev_features_t features, bool tx_path)
{
+ struct sk_buff *segs;
+
if (unlikely(skb_needs_check(skb, tx_path))) {
int err;
- skb_warn_bad_offload(skb);
-
+ /* We're going to init ->check field in TCP or UDP header */
err = skb_cow_head(skb, 0);
if (err < 0)
return ERR_PTR(err);
@@ -2592,7 +2594,12 @@ struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
skb_reset_mac_header(skb);
skb_reset_mac_len(skb);
- return skb_mac_gso_segment(skb, features);
+ segs = skb_mac_gso_segment(skb, features);
+
+ if (unlikely(skb_needs_check(skb, tx_path)))
+ skb_warn_bad_offload(skb);
+
+ return segs;
}
EXPORT_SYMBOL(__skb_gso_segment);
@@ -4383,6 +4390,12 @@ struct packet_offload *gro_find_complete_by_type(__be16 type)
}
EXPORT_SYMBOL(gro_find_complete_by_type);
+static void napi_skb_free_stolen_head(struct sk_buff *skb)
+{
+ skb_dst_drop(skb);
+ kmem_cache_free(skbuff_head_cache, skb);
+}
+
static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
{
switch (ret) {
@@ -4396,12 +4409,10 @@ static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
break;
case GRO_MERGED_FREE:
- if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) {
- skb_dst_drop(skb);
- kmem_cache_free(skbuff_head_cache, skb);
- } else {
+ if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
+ napi_skb_free_stolen_head(skb);
+ else
__kfree_skb(skb);
- }
break;
case GRO_HELD:
@@ -4467,10 +4478,16 @@ static gro_result_t napi_frags_finish(struct napi_struct *napi,
break;
case GRO_DROP:
- case GRO_MERGED_FREE:
napi_reuse_skb(napi, skb);
break;
+ case GRO_MERGED_FREE:
+ if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
+ napi_skb_free_stolen_head(skb);
+ else
+ napi_reuse_skb(napi, skb);
+ break;
+
case GRO_MERGED:
break;
}
@@ -7088,8 +7105,8 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
} else {
netdev_stats_to_stats64(storage, &dev->stats);
}
- storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
- storage->tx_dropped += atomic_long_read(&dev->tx_dropped);
+ storage->rx_dropped += (unsigned long)atomic_long_read(&dev->rx_dropped);
+ storage->tx_dropped += (unsigned long)atomic_long_read(&dev->tx_dropped);
return storage;
}
EXPORT_SYMBOL(dev_get_stats);
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 57e5938fd669..9bdd7847ef3a 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2272,6 +2272,8 @@ int tcp_disconnect(struct sock *sk, int flags)
tcp_init_send_head(sk);
memset(&tp->rx_opt, 0, sizeof(tp->rx_opt));
__sk_dst_reset(sk);
+ dst_release(sk->sk_rx_dst);
+ sk->sk_rx_dst = NULL;
tcp_saved_syn_free(tp);
WARN_ON(inet->inet_num && !icsk->icsk_bind_hash);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 710101376a76..69d52fee247e 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2166,8 +2166,7 @@ static void tcp_mark_head_lost(struct sock *sk, int packets, int mark_head)
{
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb;
- int cnt, oldcnt;
- int err;
+ int cnt, oldcnt, lost;
unsigned int mss;
/* Use SACK to deduce losses of new sequences sent during recovery */
const u32 loss_high = tcp_is_sack(tp) ? tp->snd_nxt : tp->high_seq;
@@ -2207,9 +2206,10 @@ static void tcp_mark_head_lost(struct sock *sk, int packets, int mark_head)
break;
mss = tcp_skb_mss(skb);
- err = tcp_fragment(sk, skb, (packets - oldcnt) * mss,
- mss, GFP_ATOMIC);
- if (err < 0)
+ /* If needed, chop off the prefix to mark as lost. */
+ lost = (packets - oldcnt) * mss;
+ if (lost < skb->len &&
+ tcp_fragment(sk, skb, lost, mss, GFP_ATOMIC) < 0)
break;
cnt = packets;
}
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 55e928819846..4b707ad4ffbd 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -1801,17 +1801,7 @@ struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net, const struct in6_addr *add
static void addrconf_dad_stop(struct inet6_ifaddr *ifp, int dad_failed)
{
- if (ifp->flags&IFA_F_PERMANENT) {
- spin_lock_bh(&ifp->lock);
- addrconf_del_dad_work(ifp);
- ifp->flags |= IFA_F_TENTATIVE;
- if (dad_failed)
- ifp->flags |= IFA_F_DADFAILED;
- spin_unlock_bh(&ifp->lock);
- if (dad_failed)
- ipv6_ifa_notify(0, ifp);
- in6_ifa_put(ifp);
- } else if (ifp->flags&IFA_F_TEMPORARY) {
+ if (ifp->flags&IFA_F_TEMPORARY) {
struct inet6_ifaddr *ifpub;
spin_lock_bh(&ifp->lock);
ifpub = ifp->ifpub;
@@ -1824,6 +1814,16 @@ static void addrconf_dad_stop(struct inet6_ifaddr *ifp, int dad_failed)
spin_unlock_bh(&ifp->lock);
}
ipv6_del_addr(ifp);
+ } else if (ifp->flags&IFA_F_PERMANENT || !dad_failed) {
+ spin_lock_bh(&ifp->lock);
+ addrconf_del_dad_work(ifp);
+ ifp->flags |= IFA_F_TENTATIVE;
+ if (dad_failed)
+ ifp->flags |= IFA_F_DADFAILED;
+ spin_unlock_bh(&ifp->lock);
+ if (dad_failed)
+ ipv6_ifa_notify(0, ifp);
+ in6_ifa_put(ifp);
} else {
ipv6_del_addr(ifp);
}
@@ -3212,6 +3212,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct inet6_dev *idev = __in6_dev_get(dev);
+ struct net *net = dev_net(dev);
int run_pending = 0;
int err;
@@ -3227,7 +3228,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
case NETDEV_CHANGEMTU:
/* if MTU under IPV6_MIN_MTU stop IPv6 on this interface. */
if (dev->mtu < IPV6_MIN_MTU) {
- addrconf_ifdown(dev, 1);
+ addrconf_ifdown(dev, dev != net->loopback_dev);
break;
}
@@ -3340,7 +3341,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
* IPV6_MIN_MTU stop IPv6 on this interface.
*/
if (dev->mtu < IPV6_MIN_MTU)
- addrconf_ifdown(dev, 1);
+ addrconf_ifdown(dev, dev != net->loopback_dev);
}
break;
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 1ac06723f0d7..f60e8caea767 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -767,10 +767,7 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
goto next_iter;
}
- if (iter->dst.dev == rt->dst.dev &&
- iter->rt6i_idev == rt->rt6i_idev &&
- ipv6_addr_equal(&iter->rt6i_gateway,
- &rt->rt6i_gateway)) {
+ if (rt6_duplicate_nexthop(iter, rt)) {
if (rt->rt6i_nsiblings)
rt->rt6i_nsiblings = 0;
if (!(iter->rt6i_flags & RTF_EXPIRES))
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 72b53b83a720..6177acdf60ac 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -1360,7 +1360,7 @@ emsgsize:
*/
cork->length += length;
- if (((length > mtu) ||
+ if ((((length + fragheaderlen) > mtu) ||
(skb && skb_is_gso(skb))) &&
(sk->sk_protocol == IPPROTO_UDP) &&
(rt->dst.dev->features & NETIF_F_UFO) &&
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index e98613d2f34f..dd37fe0b6a49 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -2822,17 +2822,11 @@ static int ip6_route_info_append(struct list_head *rt6_nh_list,
struct rt6_info *rt, struct fib6_config *r_cfg)
{
struct rt6_nh *nh;
- struct rt6_info *rtnh;
int err = -EEXIST;
list_for_each_entry(nh, rt6_nh_list, next) {
/* check if rt6_info already exists */
- rtnh = nh->rt6_info;
-
- if (rtnh->dst.dev == rt->dst.dev &&
- rtnh->rt6i_idev == rt->rt6i_idev &&
- ipv6_addr_equal(&rtnh->rt6i_gateway,
- &rt->rt6i_gateway))
+ if (rt6_duplicate_nexthop(nh->rt6_info, rt))
return err;
}
diff --git a/net/key/af_key.c b/net/key/af_key.c
index e67c28e614b9..2e1050ec2cf0 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -63,8 +63,13 @@ struct pfkey_sock {
} u;
struct sk_buff *skb;
} dump;
+ struct mutex dump_lock;
};
+static int parse_sockaddr_pair(struct sockaddr *sa, int ext_len,
+ xfrm_address_t *saddr, xfrm_address_t *daddr,
+ u16 *family);
+
static inline struct pfkey_sock *pfkey_sk(struct sock *sk)
{
return (struct pfkey_sock *)sk;
@@ -139,6 +144,7 @@ static int pfkey_create(struct net *net, struct socket *sock, int protocol,
{
struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id);
struct sock *sk;
+ struct pfkey_sock *pfk;
int err;
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
@@ -153,6 +159,9 @@ static int pfkey_create(struct net *net, struct socket *sock, int protocol,
if (sk == NULL)
goto out;
+ pfk = pfkey_sk(sk);
+ mutex_init(&pfk->dump_lock);
+
sock->ops = &pfkey_ops;
sock_init_data(sock, sk);
@@ -281,13 +290,23 @@ static int pfkey_do_dump(struct pfkey_sock *pfk)
struct sadb_msg *hdr;
int rc;
+ mutex_lock(&pfk->dump_lock);
+ if (!pfk->dump.dump) {
+ rc = 0;
+ goto out;
+ }
+
rc = pfk->dump.dump(pfk);
- if (rc == -ENOBUFS)
- return 0;
+ if (rc == -ENOBUFS) {
+ rc = 0;
+ goto out;
+ }
if (pfk->dump.skb) {
- if (!pfkey_can_dump(&pfk->sk))
- return 0;
+ if (!pfkey_can_dump(&pfk->sk)) {
+ rc = 0;
+ goto out;
+ }
hdr = (struct sadb_msg *) pfk->dump.skb->data;
hdr->sadb_msg_seq = 0;
@@ -298,6 +317,9 @@ static int pfkey_do_dump(struct pfkey_sock *pfk)
}
pfkey_terminate_dump(pfk);
+
+out:
+ mutex_unlock(&pfk->dump_lock);
return rc;
}
@@ -1802,19 +1824,26 @@ static int pfkey_dump(struct sock *sk, struct sk_buff *skb, const struct sadb_ms
struct xfrm_address_filter *filter = NULL;
struct pfkey_sock *pfk = pfkey_sk(sk);
- if (pfk->dump.dump != NULL)
+ mutex_lock(&pfk->dump_lock);
+ if (pfk->dump.dump != NULL) {
+ mutex_unlock(&pfk->dump_lock);
return -EBUSY;
+ }
proto = pfkey_satype2proto(hdr->sadb_msg_satype);
- if (proto == 0)
+ if (proto == 0) {
+ mutex_unlock(&pfk->dump_lock);
return -EINVAL;
+ }
if (ext_hdrs[SADB_X_EXT_FILTER - 1]) {
struct sadb_x_filter *xfilter = ext_hdrs[SADB_X_EXT_FILTER - 1];
filter = kmalloc(sizeof(*filter), GFP_KERNEL);
- if (filter == NULL)
+ if (filter == NULL) {
+ mutex_unlock(&pfk->dump_lock);
return -ENOMEM;
+ }
memcpy(&filter->saddr, &xfilter->sadb_x_filter_saddr,
sizeof(xfrm_address_t));
@@ -1830,6 +1859,7 @@ static int pfkey_dump(struct sock *sk, struct sk_buff *skb, const struct sadb_ms
pfk->dump.dump = pfkey_dump_sa;
pfk->dump.done = pfkey_dump_sa_done;
xfrm_state_walk_init(&pfk->dump.u.state, proto, filter);
+ mutex_unlock(&pfk->dump_lock);
return pfkey_do_dump(pfk);
}
@@ -1922,19 +1952,14 @@ parse_ipsecrequest(struct xfrm_policy *xp, struct sadb_x_ipsecrequest *rq)
/* addresses present only in tunnel mode */
if (t->mode == XFRM_MODE_TUNNEL) {
- u8 *sa = (u8 *) (rq + 1);
- int family, socklen;
+ int err;
- family = pfkey_sockaddr_extract((struct sockaddr *)sa,
- &t->saddr);
- if (!family)
- return -EINVAL;
-
- socklen = pfkey_sockaddr_len(family);
- if (pfkey_sockaddr_extract((struct sockaddr *)(sa + socklen),
- &t->id.daddr) != family)
- return -EINVAL;
- t->encap_family = family;
+ err = parse_sockaddr_pair(
+ (struct sockaddr *)(rq + 1),
+ rq->sadb_x_ipsecrequest_len - sizeof(*rq),
+ &t->saddr, &t->id.daddr, &t->encap_family);
+ if (err)
+ return err;
} else
t->encap_family = xp->family;
@@ -1954,7 +1979,11 @@ parse_ipsecrequests(struct xfrm_policy *xp, struct sadb_x_policy *pol)
if (pol->sadb_x_policy_len * 8 < sizeof(struct sadb_x_policy))
return -EINVAL;
- while (len >= sizeof(struct sadb_x_ipsecrequest)) {
+ while (len >= sizeof(*rq)) {
+ if (len < rq->sadb_x_ipsecrequest_len ||
+ rq->sadb_x_ipsecrequest_len < sizeof(*rq))
+ return -EINVAL;
+
if ((err = parse_ipsecrequest(xp, rq)) < 0)
return err;
len -= rq->sadb_x_ipsecrequest_len;
@@ -2417,7 +2446,6 @@ out:
return err;
}
-#ifdef CONFIG_NET_KEY_MIGRATE
static int pfkey_sockaddr_pair_size(sa_family_t family)
{
return PFKEY_ALIGN8(pfkey_sockaddr_len(family) * 2);
@@ -2429,7 +2457,7 @@ static int parse_sockaddr_pair(struct sockaddr *sa, int ext_len,
{
int af, socklen;
- if (ext_len < pfkey_sockaddr_pair_size(sa->sa_family))
+ if (ext_len < 2 || ext_len < pfkey_sockaddr_pair_size(sa->sa_family))
return -EINVAL;
af = pfkey_sockaddr_extract(sa, saddr);
@@ -2445,6 +2473,7 @@ static int parse_sockaddr_pair(struct sockaddr *sa, int ext_len,
return 0;
}
+#ifdef CONFIG_NET_KEY_MIGRATE
static int ipsecrequests_to_migrate(struct sadb_x_ipsecrequest *rq1, int len,
struct xfrm_migrate *m)
{
@@ -2452,13 +2481,14 @@ static int ipsecrequests_to_migrate(struct sadb_x_ipsecrequest *rq1, int len,
struct sadb_x_ipsecrequest *rq2;
int mode;
- if (len <= sizeof(struct sadb_x_ipsecrequest) ||
- len < rq1->sadb_x_ipsecrequest_len)
+ if (len < sizeof(*rq1) ||
+ len < rq1->sadb_x_ipsecrequest_len ||
+ rq1->sadb_x_ipsecrequest_len < sizeof(*rq1))
return -EINVAL;
/* old endoints */
err = parse_sockaddr_pair((struct sockaddr *)(rq1 + 1),
- rq1->sadb_x_ipsecrequest_len,
+ rq1->sadb_x_ipsecrequest_len - sizeof(*rq1),
&m->old_saddr, &m->old_daddr,
&m->old_family);
if (err)
@@ -2467,13 +2497,14 @@ static int ipsecrequests_to_migrate(struct sadb_x_ipsecrequest *rq1, int len,
rq2 = (struct sadb_x_ipsecrequest *)((u8 *)rq1 + rq1->sadb_x_ipsecrequest_len);
len -= rq1->sadb_x_ipsecrequest_len;
- if (len <= sizeof(struct sadb_x_ipsecrequest) ||
- len < rq2->sadb_x_ipsecrequest_len)
+ if (len <= sizeof(*rq2) ||
+ len < rq2->sadb_x_ipsecrequest_len ||
+ rq2->sadb_x_ipsecrequest_len < sizeof(*rq2))
return -EINVAL;
/* new endpoints */
err = parse_sockaddr_pair((struct sockaddr *)(rq2 + 1),
- rq2->sadb_x_ipsecrequest_len,
+ rq2->sadb_x_ipsecrequest_len - sizeof(*rq2),
&m->new_saddr, &m->new_daddr,
&m->new_family);
if (err)
@@ -2688,14 +2719,18 @@ static int pfkey_spddump(struct sock *sk, struct sk_buff *skb, const struct sadb
{
struct pfkey_sock *pfk = pfkey_sk(sk);
- if (pfk->dump.dump != NULL)
+ mutex_lock(&pfk->dump_lock);
+ if (pfk->dump.dump != NULL) {
+ mutex_unlock(&pfk->dump_lock);
return -EBUSY;
+ }
pfk->dump.msg_version = hdr->sadb_msg_version;
pfk->dump.msg_portid = hdr->sadb_msg_pid;
pfk->dump.dump = pfkey_dump_sp;
pfk->dump.done = pfkey_dump_sp_done;
xfrm_policy_walk_init(&pfk->dump.u.policy, XFRM_POLICY_TYPE_MAIN);
+ mutex_unlock(&pfk->dump_lock);
return pfkey_do_dump(pfk);
}
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 4da560005b0e..dd1649caa2b2 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -845,10 +845,8 @@ static int handle_response_icmp(int af, struct sk_buff *skb,
{
unsigned int verdict = NF_DROP;
- if (IP_VS_FWD_METHOD(cp) != 0) {
- pr_err("shouldn't reach here, because the box is on the "
- "half connection in the tun/dr module.\n");
- }
+ if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ)
+ goto ignore_cp;
/* Ensure the checksum is correct */
if (!skb_csum_unnecessary(skb) && ip_vs_checksum_complete(skb, ihl)) {
@@ -882,6 +880,8 @@ static int handle_response_icmp(int af, struct sk_buff *skb,
ip_vs_notrack(skb);
else
ip_vs_update_conntrack(skb, cp, 0);
+
+ignore_cp:
verdict = NF_ACCEPT;
out:
@@ -1242,8 +1242,11 @@ ip_vs_out(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, in
*/
cp = pp->conn_out_get(ipvs, af, skb, &iph);
- if (likely(cp))
+ if (likely(cp)) {
+ if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ)
+ goto ignore_cp;
return handle_response(af, skb, pd, cp, &iph, hooknum);
+ }
if (sysctl_nat_icmp_send(ipvs) &&
(pp->protocol == IPPROTO_TCP ||
pp->protocol == IPPROTO_UDP ||
@@ -1285,9 +1288,15 @@ ip_vs_out(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, in
}
}
}
+
+out:
IP_VS_DBG_PKT(12, af, pp, skb, iph.off,
"ip_vs_out: packet continues traversal as normal");
return NF_ACCEPT;
+
+ignore_cp:
+ __ip_vs_conn_put(cp);
+ goto out;
}
/*
diff --git a/net/nfc/core.c b/net/nfc/core.c
index 1fe3d3b362c0..c5a2c7e733b3 100644
--- a/net/nfc/core.c
+++ b/net/nfc/core.c
@@ -969,6 +969,8 @@ static void nfc_release(struct device *d)
kfree(se);
}
+ ida_simple_remove(&nfc_index_ida, dev->idx);
+
kfree(dev);
}
@@ -1043,6 +1045,7 @@ struct nfc_dev *nfc_allocate_device(struct nfc_ops *ops,
int tx_headroom, int tx_tailroom)
{
struct nfc_dev *dev;
+ int rc;
if (!ops->start_poll || !ops->stop_poll || !ops->activate_target ||
!ops->deactivate_target || !ops->im_transceive)
@@ -1055,6 +1058,15 @@ struct nfc_dev *nfc_allocate_device(struct nfc_ops *ops,
if (!dev)
return NULL;
+ rc = ida_simple_get(&nfc_index_ida, 0, 0, GFP_KERNEL);
+ if (rc < 0)
+ goto err_free_dev;
+ dev->idx = rc;
+
+ dev->dev.class = &nfc_class;
+ dev_set_name(&dev->dev, "nfc%d", dev->idx);
+ device_initialize(&dev->dev);
+
dev->ops = ops;
dev->supported_protocols = supported_protocols;
dev->tx_headroom = tx_headroom;
@@ -1077,6 +1089,11 @@ struct nfc_dev *nfc_allocate_device(struct nfc_ops *ops,
}
return dev;
+
+err_free_dev:
+ kfree(dev);
+
+ return ERR_PTR(rc);
}
EXPORT_SYMBOL(nfc_allocate_device);
@@ -1091,14 +1108,6 @@ int nfc_register_device(struct nfc_dev *dev)
pr_debug("dev_name=%s\n", dev_name(&dev->dev));
- dev->idx = ida_simple_get(&nfc_index_ida, 0, 0, GFP_KERNEL);
- if (dev->idx < 0)
- return dev->idx;
-
- dev->dev.class = &nfc_class;
- dev_set_name(&dev->dev, "nfc%d", dev->idx);
- device_initialize(&dev->dev);
-
mutex_lock(&nfc_devlist_mutex);
nfc_devlist_generation++;
rc = device_add(&dev->dev);
@@ -1136,12 +1145,10 @@ EXPORT_SYMBOL(nfc_register_device);
*/
void nfc_unregister_device(struct nfc_dev *dev)
{
- int rc, id;
+ int rc;
pr_debug("dev_name=%s\n", dev_name(&dev->dev));
- id = dev->idx;
-
if (dev->rfkill) {
rfkill_unregister(dev->rfkill);
rfkill_destroy(dev->rfkill);
@@ -1166,8 +1173,6 @@ void nfc_unregister_device(struct nfc_dev *dev)
nfc_devlist_generation++;
device_del(&dev->dev);
mutex_unlock(&nfc_devlist_mutex);
-
- ida_simple_remove(&nfc_index_ida, id);
}
EXPORT_SYMBOL(nfc_unregister_device);
diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c
index ecf0a0196f18..9c222a106c7f 100644
--- a/net/nfc/llcp_sock.c
+++ b/net/nfc/llcp_sock.c
@@ -76,7 +76,8 @@ static int llcp_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
struct sockaddr_nfc_llcp llcp_addr;
int len, ret = 0;
- if (!addr || addr->sa_family != AF_NFC)
+ if (!addr || alen < offsetofend(struct sockaddr, sa_family) ||
+ addr->sa_family != AF_NFC)
return -EINVAL;
pr_debug("sk %p addr %p family %d\n", sk, addr, addr->sa_family);
@@ -150,7 +151,8 @@ static int llcp_raw_sock_bind(struct socket *sock, struct sockaddr *addr,
struct sockaddr_nfc_llcp llcp_addr;
int len, ret = 0;
- if (!addr || addr->sa_family != AF_NFC)
+ if (!addr || alen < offsetofend(struct sockaddr, sa_family) ||
+ addr->sa_family != AF_NFC)
return -EINVAL;
pr_debug("sk %p addr %p family %d\n", sk, addr, addr->sa_family);
@@ -655,8 +657,7 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr,
pr_debug("sock %p sk %p flags 0x%x\n", sock, sk, flags);
- if (!addr || len < sizeof(struct sockaddr_nfc) ||
- addr->sa_family != AF_NFC)
+ if (!addr || len < sizeof(*addr) || addr->sa_family != AF_NFC)
return -EINVAL;
if (addr->service_name_len == 0 && addr->dsap == 0)
diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
index 10c99a578421..67583ad7f610 100644
--- a/net/nfc/nci/core.c
+++ b/net/nfc/nci/core.c
@@ -1084,8 +1084,7 @@ struct nci_dev *nci_allocate_device(struct nci_ops *ops,
return ndev;
free_nfc:
- kfree(ndev->nfc_dev);
-
+ nfc_free_device(ndev->nfc_dev);
free_nci:
kfree(ndev);
return NULL;
diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
index f58c1fba1026..12dfb457275d 100644
--- a/net/nfc/netlink.c
+++ b/net/nfc/netlink.c
@@ -873,7 +873,9 @@ static int nfc_genl_activate_target(struct sk_buff *skb, struct genl_info *info)
u32 device_idx, target_idx, protocol;
int rc;
- if (!info->attrs[NFC_ATTR_DEVICE_INDEX])
+ if (!info->attrs[NFC_ATTR_DEVICE_INDEX] ||
+ !info->attrs[NFC_ATTR_TARGET_INDEX] ||
+ !info->attrs[NFC_ATTR_PROTOCOLS])
return -EINVAL;
device_idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]);
diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c
index 0936a4a32b47..e353e3255206 100644
--- a/net/rds/tcp_listen.c
+++ b/net/rds/tcp_listen.c
@@ -78,7 +78,7 @@ int rds_tcp_accept_one(struct socket *sock)
struct inet_sock *inet;
struct rds_tcp_connection *rs_tcp;
- ret = sock_create_kern(sock_net(sock->sk), sock->sk->sk_family,
+ ret = sock_create_lite(sock->sk->sk_family,
sock->sk->sk_type, sock->sk->sk_protocol,
&new_sock);
if (ret)
diff --git a/net/rmnet_data/rmnet_data_handlers.c b/net/rmnet_data/rmnet_data_handlers.c
index ae60f35b363d..b17556c346ce 100644
--- a/net/rmnet_data/rmnet_data_handlers.c
+++ b/net/rmnet_data/rmnet_data_handlers.c
@@ -476,10 +476,12 @@ static rx_handler_result_t _rmnet_map_ingress_handler(struct sk_buff *skb,
if (likely((ckresult == RMNET_MAP_CHECKSUM_OK)
|| (ckresult == RMNET_MAP_CHECKSUM_SKIPPED)))
skb->ip_summed |= CHECKSUM_UNNECESSARY;
- else if (ckresult != RMNET_MAP_CHECKSUM_ERR_UNKNOWN_IP_VERSION
- && ckresult != RMNET_MAP_CHECKSUM_ERR_UNKNOWN_TRANSPORT
- && ckresult != RMNET_MAP_CHECKSUM_VALID_FLAG_NOT_SET
- && ckresult != RMNET_MAP_CHECKSUM_FRAGMENTED_PACKET) {
+ else if (ckresult !=
+ RMNET_MAP_CHECKSUM_ERR_UNKNOWN_IP_VERSION &&
+ ckresult != RMNET_MAP_CHECKSUM_VALIDATION_FAILED &&
+ ckresult != RMNET_MAP_CHECKSUM_ERR_UNKNOWN_TRANSPORT &&
+ ckresult != RMNET_MAP_CHECKSUM_VALID_FLAG_NOT_SET &&
+ ckresult != RMNET_MAP_CHECKSUM_FRAGMENTED_PACKET) {
rmnet_kfree_skb(skb,
RMNET_STATS_SKBFREE_INGRESS_BAD_MAP_CKSUM);
return RX_HANDLER_CONSUMED;
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 5474dc7c125a..ca4ecc246347 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -1004,6 +1004,9 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
return sch;
}
+ /* ops->init() failed, we call ->destroy() like qdisc_create_dflt() */
+ if (ops->destroy)
+ ops->destroy(sch);
err_out3:
dev_put(dev);
kfree((char *) sch - sch->padded);
diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c
index 13d6f83ec491..45d4b2f22f62 100644
--- a/net/sched/sch_hhf.c
+++ b/net/sched/sch_hhf.c
@@ -636,7 +636,9 @@ static int hhf_init(struct Qdisc *sch, struct nlattr *opt)
q->hhf_arrays[i] = hhf_zalloc(HHF_ARRAYS_LEN *
sizeof(u32));
if (!q->hhf_arrays[i]) {
- hhf_destroy(sch);
+ /* Note: hhf_destroy() will be called
+ * by our caller.
+ */
return -ENOMEM;
}
}
@@ -647,7 +649,9 @@ static int hhf_init(struct Qdisc *sch, struct nlattr *opt)
q->hhf_valid_bits[i] = hhf_zalloc(HHF_ARRAYS_LEN /
BITS_PER_BYTE);
if (!q->hhf_valid_bits[i]) {
- hhf_destroy(sch);
+ /* Note: hhf_destroy() will be called
+ * by our caller.
+ */
return -ENOMEM;
}
}
diff --git a/net/sched/sch_mq.c b/net/sched/sch_mq.c
index 3e82f047caaf..d9c84328e7eb 100644
--- a/net/sched/sch_mq.c
+++ b/net/sched/sch_mq.c
@@ -52,7 +52,7 @@ static int mq_init(struct Qdisc *sch, struct nlattr *opt)
/* pre-allocate qdiscs, attachment can't fail */
priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]),
GFP_KERNEL);
- if (priv->qdiscs == NULL)
+ if (!priv->qdiscs)
return -ENOMEM;
for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
@@ -60,18 +60,14 @@ static int mq_init(struct Qdisc *sch, struct nlattr *opt)
qdisc = qdisc_create_dflt(dev_queue, default_qdisc_ops,
TC_H_MAKE(TC_H_MAJ(sch->handle),
TC_H_MIN(ntx + 1)));
- if (qdisc == NULL)
- goto err;
+ if (!qdisc)
+ return -ENOMEM;
priv->qdiscs[ntx] = qdisc;
qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
}
sch->flags |= TCQ_F_MQROOT;
return 0;
-
-err:
- mq_destroy(sch);
- return -ENOMEM;
}
static void mq_attach(struct Qdisc *sch)
diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c
index ad70ecf57ce7..66bccc5ff4ea 100644
--- a/net/sched/sch_mqprio.c
+++ b/net/sched/sch_mqprio.c
@@ -117,20 +117,17 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt)
/* pre-allocate qdisc, attachment can't fail */
priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]),
GFP_KERNEL);
- if (priv->qdiscs == NULL) {
- err = -ENOMEM;
- goto err;
- }
+ if (!priv->qdiscs)
+ return -ENOMEM;
for (i = 0; i < dev->num_tx_queues; i++) {
dev_queue = netdev_get_tx_queue(dev, i);
qdisc = qdisc_create_dflt(dev_queue, default_qdisc_ops,
TC_H_MAKE(TC_H_MAJ(sch->handle),
TC_H_MIN(i + 1)));
- if (qdisc == NULL) {
- err = -ENOMEM;
- goto err;
- }
+ if (!qdisc)
+ return -ENOMEM;
+
priv->qdiscs[i] = qdisc;
qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
}
@@ -143,7 +140,7 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt)
priv->hw_owned = 1;
err = dev->netdev_ops->ndo_setup_tc(dev, qopt->num_tc);
if (err)
- goto err;
+ return err;
} else {
netdev_set_num_tc(dev, qopt->num_tc);
for (i = 0; i < qopt->num_tc; i++)
@@ -157,10 +154,6 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt)
sch->flags |= TCQ_F_MQROOT;
return 0;
-
-err:
- mqprio_destroy(sch);
- return err;
}
static void mqprio_attach(struct Qdisc *sch)
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 498f0a2cb47f..4431e2833e45 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -742,9 +742,10 @@ static int sfq_init(struct Qdisc *sch, struct nlattr *opt)
q->ht = sfq_alloc(sizeof(q->ht[0]) * q->divisor);
q->slots = sfq_alloc(sizeof(q->slots[0]) * q->maxflows);
if (!q->ht || !q->slots) {
- sfq_destroy(sch);
+ /* Note: sfq_destroy() will be called by our caller */
return -ENOMEM;
}
+
for (i = 0; i < q->divisor; i++)
q->ht[i] = SFQ_EMPTY_SLOT;
diff --git a/net/wireless/db.txt b/net/wireless/db.txt
index 1afd88a87e0f..0727a6e9f780 100644
--- a/net/wireless/db.txt
+++ b/net/wireless/db.txt
@@ -539,6 +539,12 @@ country GH: DFS-FCC
(5490 - 5730 @ 160), (24), DFS
(5735 - 5835 @ 80), (30)
+country GI: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+
country GL: DFS-ETSI
(2402 - 2482 @ 40), (20)
(5170 - 5250 @ 80), (23), AUTO-BW
@@ -679,10 +685,6 @@ country IN:
(5170 - 5330 @ 160), (23)
(5735 - 5835 @ 80), (30)
-country IR:
- (2402 - 2482 @ 40), (20)
- (5735 - 5835 @ 80), (30)
-
country IS: DFS-ETSI
(2402 - 2482 @ 40), (20)
(5170 - 5250 @ 80), (23), AUTO-BW
@@ -1417,7 +1419,7 @@ country UG: DFS-FCC
country US: DFS-FCC
(2402 - 2472 @ 40), (30)
- (5170 - 5250 @ 80), (24), AUTO-BW
+ (5170 - 5250 @ 80), (30), AUTO-BW
(5250 - 5330 @ 80), (24), DFS, AUTO-BW
(5490 - 5730 @ 160), (24), DFS
(5735 - 5835 @ 80), (30)
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index d0d09c290ff8..2a9ec3e05c73 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -302,8 +302,7 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
[NL80211_ATTR_WPA_VERSIONS] = { .type = NLA_U32 },
[NL80211_ATTR_PID] = { .type = NLA_U32 },
[NL80211_ATTR_4ADDR] = { .type = NLA_U8 },
- [NL80211_ATTR_PMKID] = { .type = NLA_BINARY,
- .len = WLAN_PMKID_LEN },
+ [NL80211_ATTR_PMKID] = { .len = WLAN_PMKID_LEN },
[NL80211_ATTR_DURATION] = { .type = NLA_U32 },
[NL80211_ATTR_COOKIE] = { .type = NLA_U64 },
[NL80211_ATTR_TX_RATES] = { .type = NLA_NESTED },
@@ -359,6 +358,7 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
[NL80211_ATTR_SCAN_FLAGS] = { .type = NLA_U32 },
[NL80211_ATTR_P2P_CTWINDOW] = { .type = NLA_U8 },
[NL80211_ATTR_P2P_OPPPS] = { .type = NLA_U8 },
+ [NL80211_ATTR_LOCAL_MESH_POWER_MODE] = {. type = NLA_U32 },
[NL80211_ATTR_ACL_POLICY] = {. type = NLA_U32 },
[NL80211_ATTR_MAC_ADDRS] = { .type = NLA_NESTED },
[NL80211_ATTR_STA_CAPABILITY] = { .type = NLA_U16 },
@@ -6124,6 +6124,10 @@ static int validate_scan_freqs(struct nlattr *freqs)
struct nlattr *attr1, *attr2;
int n_channels = 0, tmp1, tmp2;
+ nla_for_each_nested(attr1, freqs, tmp1)
+ if (nla_len(attr1) != sizeof(u32))
+ return 0;
+
nla_for_each_nested(attr1, freqs, tmp1) {
n_channels++;
/*
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 0f45e1a3a7d1..c8700399d7fd 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1214,7 +1214,7 @@ static inline int policy_to_flow_dir(int dir)
}
static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir,
- const struct flowi *fl)
+ const struct flowi *fl, u16 family)
{
struct xfrm_policy *pol;
struct net *net = sock_net(sk);
@@ -1223,8 +1223,7 @@ static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir,
read_lock_bh(&net->xfrm.xfrm_policy_lock);
pol = rcu_dereference(sk->sk_policy[dir]);
if (pol != NULL) {
- bool match = xfrm_selector_match(&pol->selector, fl,
- sk->sk_family);
+ bool match = xfrm_selector_match(&pol->selector, fl, family);
int err = 0;
if (match) {
@@ -2171,7 +2170,7 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
sk = sk_const_to_full_sk(sk);
if (sk && sk->sk_policy[XFRM_POLICY_OUT]) {
num_pols = 1;
- pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl);
+ pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl, family);
err = xfrm_expand_policies(fl, family, pols,
&num_pols, &num_xfrms);
if (err < 0)
@@ -2450,7 +2449,7 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
pol = NULL;
sk = sk_to_full_sk(sk);
if (sk && sk->sk_policy[dir]) {
- pol = xfrm_sk_policy_lookup(sk, dir, &fl);
+ pol = xfrm_sk_policy_lookup(sk, dir, &fl, family);
if (IS_ERR(pol)) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
return 0;
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index c72bcb33496a..02996be239bc 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -3410,7 +3410,7 @@ sub process {
$fixedline =~ s/\s*=\s*$/ = {/;
fix_insert_line($fixlinenr, $fixedline);
$fixedline = $line;
- $fixedline =~ s/^(.\s*){\s*/$1/;
+ $fixedline =~ s/^(.\s*)\{\s*/$1/;
fix_insert_line($fixlinenr, $fixedline);
}
}
@@ -3760,7 +3760,7 @@ sub process {
my $fixedline = rtrim($prevrawline) . " {";
fix_insert_line($fixlinenr, $fixedline);
$fixedline = $rawline;
- $fixedline =~ s/^(.\s*){\s*/$1\t/;
+ $fixedline =~ s/^(.\s*)\{\s*/$1\t/;
if ($fixedline !~ /^\+\s*$/) {
fix_insert_line($fixlinenr, $fixedline);
}
@@ -4249,7 +4249,7 @@ sub process {
if (ERROR("SPACING",
"space required before the open brace '{'\n" . $herecurr) &&
$fix) {
- $fixed[$fixlinenr] =~ s/^(\+.*(?:do|\))){/$1 {/;
+ $fixed[$fixlinenr] =~ s/^(\+.*(?:do|\)))\{/$1 {/;
}
}
diff --git a/security/keys/encrypted-keys/encrypted.c b/security/keys/encrypted-keys/encrypted.c
index 696ccfa08d10..31898856682e 100644
--- a/security/keys/encrypted-keys/encrypted.c
+++ b/security/keys/encrypted-keys/encrypted.c
@@ -428,7 +428,7 @@ static int init_blkcipher_desc(struct blkcipher_desc *desc, const u8 *key,
static struct key *request_master_key(struct encrypted_key_payload *epayload,
const u8 **master_key, size_t *master_keylen)
{
- struct key *mkey = NULL;
+ struct key *mkey = ERR_PTR(-EINVAL);
if (!strncmp(epayload->master_desc, KEY_TRUSTED_PREFIX,
KEY_TRUSTED_PREFIX_LEN)) {
diff --git a/sound/soc/codecs/nau8825.c b/sound/soc/codecs/nau8825.c
index c1b87c5800b1..b3fddba4c084 100644
--- a/sound/soc/codecs/nau8825.c
+++ b/sound/soc/codecs/nau8825.c
@@ -936,7 +936,8 @@ static void nau8825_fll_apply(struct nau8825 *nau8825,
NAU8825_FLL_INTEGER_MASK, fll_param->fll_int);
/* FLL pre-scaler */
regmap_update_bits(nau8825->regmap, NAU8825_REG_FLL4,
- NAU8825_FLL_REF_DIV_MASK, fll_param->clk_ref_div);
+ NAU8825_FLL_REF_DIV_MASK,
+ fll_param->clk_ref_div << NAU8825_FLL_REF_DIV_SFT);
/* select divided VCO input */
regmap_update_bits(nau8825->regmap, NAU8825_REG_FLL5,
NAU8825_FLL_FILTER_SW_MASK, 0x0000);
diff --git a/sound/soc/codecs/nau8825.h b/sound/soc/codecs/nau8825.h
index dff8edb83bfd..a0b220726a63 100644
--- a/sound/soc/codecs/nau8825.h
+++ b/sound/soc/codecs/nau8825.h
@@ -114,7 +114,8 @@
#define NAU8825_FLL_INTEGER_MASK (0x3ff << 0)
/* FLL4 (0x07) */
-#define NAU8825_FLL_REF_DIV_MASK (0x3 << 10)
+#define NAU8825_FLL_REF_DIV_SFT 10
+#define NAU8825_FLL_REF_DIV_MASK (0x3 << NAU8825_FLL_REF_DIV_SFT)
/* FLL5 (0x08) */
#define NAU8825_FLL_FILTER_SW_MASK (0x1 << 14)
diff --git a/sound/soc/codecs/tlv320aic3x.c b/sound/soc/codecs/tlv320aic3x.c
index a564759845f9..5a3f544bb3a8 100644
--- a/sound/soc/codecs/tlv320aic3x.c
+++ b/sound/soc/codecs/tlv320aic3x.c
@@ -126,6 +126,16 @@ static const struct reg_default aic3x_reg[] = {
{ 108, 0x00 }, { 109, 0x00 },
};
+static bool aic3x_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case AIC3X_RESET:
+ return true;
+ default:
+ return false;
+ }
+}
+
static const struct regmap_config aic3x_regmap = {
.reg_bits = 8,
.val_bits = 8,
@@ -133,6 +143,9 @@ static const struct regmap_config aic3x_regmap = {
.max_register = DAC_ICC_ADJ,
.reg_defaults = aic3x_reg,
.num_reg_defaults = ARRAY_SIZE(aic3x_reg),
+
+ .volatile_reg = aic3x_volatile_reg,
+
.cache_type = REGCACHE_RBTREE,
};
diff --git a/sound/soc/codecs/wcd-dsp-mgr.c b/sound/soc/codecs/wcd-dsp-mgr.c
index 9b1c8c98946c..1613c5baa9c7 100644
--- a/sound/soc/codecs/wcd-dsp-mgr.c
+++ b/sound/soc/codecs/wcd-dsp-mgr.c
@@ -415,22 +415,24 @@ static int wdsp_download_segments(struct wdsp_mgr_priv *wdsp,
/* Go through the list of segments and download one by one */
list_for_each_entry(seg, wdsp->seg_list, list) {
ret = wdsp_load_each_segment(wdsp, seg);
- if (IS_ERR_VALUE(ret)) {
- wdsp_broadcast_event_downseq(wdsp,
- WDSP_EVENT_DLOAD_FAILED,
- NULL);
+ if (ret)
goto dload_error;
- }
}
+ /* Flush the list before setting status and notifying components */
+ wdsp_flush_segment_list(wdsp->seg_list);
+
WDSP_SET_STATUS(wdsp, status);
/* Notify all components that image is downloaded */
wdsp_broadcast_event_downseq(wdsp, post, NULL);
+done:
+ return ret;
dload_error:
wdsp_flush_segment_list(wdsp->seg_list);
-done:
+ wdsp_broadcast_event_downseq(wdsp, WDSP_EVENT_DLOAD_FAILED, NULL);
+
return ret;
}
@@ -484,10 +486,14 @@ static int wdsp_enable_dsp(struct wdsp_mgr_priv *wdsp)
/* Make sure wdsp is in good state */
if (!WDSP_STATUS_IS_SET(wdsp, WDSP_STATUS_CODE_DLOADED)) {
WDSP_ERR(wdsp, "WDSP in invalid state 0x%x", wdsp->status);
- ret = -EINVAL;
- goto done;
+ return -EINVAL;
}
+ /*
+ * Acquire SSR mutex lock to make sure enablement of DSP
+ * does not race with SSR handling.
+ */
+ WDSP_MGR_MUTEX_LOCK(wdsp, wdsp->ssr_mutex);
/* Download the read-write sections of image */
ret = wdsp_download_segments(wdsp, WDSP_ELF_FLAG_WRITE);
if (IS_ERR_VALUE(ret)) {
@@ -508,6 +514,7 @@ static int wdsp_enable_dsp(struct wdsp_mgr_priv *wdsp)
wdsp_broadcast_event_downseq(wdsp, WDSP_EVENT_POST_BOOTUP, NULL);
WDSP_SET_STATUS(wdsp, WDSP_STATUS_BOOTED);
done:
+ WDSP_MGR_MUTEX_UNLOCK(wdsp, wdsp->ssr_mutex);
return ret;
}
diff --git a/sound/soc/codecs/wcd934x/wcd934x-dsp-cntl.c b/sound/soc/codecs/wcd934x/wcd934x-dsp-cntl.c
index e791bf07ec67..29c218013a07 100644
--- a/sound/soc/codecs/wcd934x/wcd934x-dsp-cntl.c
+++ b/sound/soc/codecs/wcd934x/wcd934x-dsp-cntl.c
@@ -763,10 +763,6 @@ static int wcd_control_handler(struct device *dev, void *priv_data,
case WDSP_EVENT_DLOAD_FAILED:
case WDSP_EVENT_POST_SHUTDOWN:
- if (event == WDSP_EVENT_POST_DLOAD_CODE)
- /* Mark DSP online since code download is complete */
- wcd_cntl_change_online_state(cntl, 1);
-
/* Disable CPAR */
wcd_cntl_cpar_ctrl(cntl, false);
/* Disable all the clocks */
@@ -775,6 +771,11 @@ static int wcd_control_handler(struct device *dev, void *priv_data,
dev_err(codec->dev,
"%s: Failed to disable clocks, err = %d\n",
__func__, ret);
+
+ if (event == WDSP_EVENT_POST_DLOAD_CODE)
+ /* Mark DSP online since code download is complete */
+ wcd_cntl_change_online_state(cntl, 1);
+
break;
case WDSP_EVENT_PRE_DLOAD_DATA:
diff --git a/sound/soc/msm/apq8096-auto.c b/sound/soc/msm/apq8096-auto.c
index 2ae78f75c340..b1dff8764618 100644
--- a/sound/soc/msm/apq8096-auto.c
+++ b/sound/soc/msm/apq8096-auto.c
@@ -3481,12 +3481,13 @@ static struct snd_soc_dai_link apq8096_common_dai_links[] = {
.be_id = MSM_FRONTEND_DAI_MULTIMEDIA15,
},
{
- .name = "MSM8996 Compress9",
- .stream_name = "Compress9",
+ .name = "MSM8996 ULL NOIRQ 2",
+ .stream_name = "MM_NOIRQ_2",
.cpu_dai_name = "MultiMedia16",
- .platform_name = "msm-compress-dsp",
+ .platform_name = "msm-pcm-dsp-noirq",
.dynamic = 1,
.dpcm_playback = 1,
+ .dpcm_capture = 1,
.trigger = {SND_SOC_DPCM_TRIGGER_POST,
SND_SOC_DPCM_TRIGGER_POST},
.codec_dai_name = "snd-soc-dummy-dai",
diff --git a/sound/soc/msm/msm-dai-fe.c b/sound/soc/msm/msm-dai-fe.c
index 9be628cc2351..5facafdc7729 100644
--- a/sound/soc/msm/msm-dai-fe.c
+++ b/sound/soc/msm/msm-dai-fe.c
@@ -2496,8 +2496,21 @@ static struct snd_soc_dai_driver msm_fe_dais[] = {
.rate_min = 8000,
.rate_max = 384000,
},
+ .capture = {
+ .stream_name = "MultiMedia16 Capture",
+ .aif_name = "MM_UL16",
+ .rates = (SNDRV_PCM_RATE_8000_48000|
+ SNDRV_PCM_RATE_KNOT),
+ .formats = (SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S24_3LE |
+ SNDRV_PCM_FMTBIT_S32_LE),
+ .channels_min = 1,
+ .channels_max = 8,
+ .rate_min = 8000,
+ .rate_max = 48000,
+ },
.ops = &msm_fe_Multimedia_dai_ops,
- .compress_new = snd_soc_new_compress,
.name = "MultiMedia16",
.probe = fe_dai_probe,
},
diff --git a/sound/soc/msm/msm8996.c b/sound/soc/msm/msm8996.c
index 4eafd322d50d..010dfa3322a0 100644
--- a/sound/soc/msm/msm8996.c
+++ b/sound/soc/msm/msm8996.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -2910,12 +2910,13 @@ static struct snd_soc_dai_link msm8996_common_dai_links[] = {
.be_id = MSM_FRONTEND_DAI_MULTIMEDIA15,
},
{
- .name = "MSM8996 Compress9",
- .stream_name = "Compress9",
+ .name = "MSM8996 ULL NOIRQ_2",
+ .stream_name = "MM_NOIRQ_2",
.cpu_dai_name = "MultiMedia16",
- .platform_name = "msm-compress-dsp",
+ .platform_name = "msm-pcm-dsp-noirq",
.dynamic = 1,
.dpcm_playback = 1,
+ .dpcm_capture = 1,
.trigger = {SND_SOC_DPCM_TRIGGER_POST,
SND_SOC_DPCM_TRIGGER_POST},
.codec_dai_name = "snd-soc-dummy-dai",
diff --git a/sound/soc/msm/msm8998.c b/sound/soc/msm/msm8998.c
index 87ad98ec1913..9159ea642816 100644
--- a/sound/soc/msm/msm8998.c
+++ b/sound/soc/msm/msm8998.c
@@ -434,7 +434,8 @@ static char const *tdm_sample_rate_text[] = {"KHZ_8", "KHZ_16", "KHZ_32",
static const char *const auxpcm_rate_text[] = {"KHZ_8", "KHZ_16"};
static char const *mi2s_rate_text[] = {"KHZ_8", "KHZ_16",
"KHZ_32", "KHZ_44P1", "KHZ_48",
- "KHZ_96", "KHZ_192"};
+ "KHZ_88P2", "KHZ_96", "KHZ_176P4",
+ "KHZ_192"};
static const char *const mi2s_ch_text[] = {"One", "Two", "Three", "Four",
"Five", "Six", "Seven",
"Eight"};
@@ -2228,12 +2229,18 @@ static int mi2s_get_sample_rate_val(int sample_rate)
case SAMPLING_RATE_48KHZ:
sample_rate_val = 4;
break;
- case SAMPLING_RATE_96KHZ:
+ case SAMPLING_RATE_88P2KHZ:
sample_rate_val = 5;
break;
- case SAMPLING_RATE_192KHZ:
+ case SAMPLING_RATE_96KHZ:
sample_rate_val = 6;
break;
+ case SAMPLING_RATE_176P4KHZ:
+ sample_rate_val = 7;
+ break;
+ case SAMPLING_RATE_192KHZ:
+ sample_rate_val = 8;
+ break;
default:
sample_rate_val = 4;
break;
@@ -2262,9 +2269,15 @@ static int mi2s_get_sample_rate(int value)
sample_rate = SAMPLING_RATE_48KHZ;
break;
case 5:
- sample_rate = SAMPLING_RATE_96KHZ;
+ sample_rate = SAMPLING_RATE_88P2KHZ;
break;
case 6:
+ sample_rate = SAMPLING_RATE_96KHZ;
+ break;
+ case 7:
+ sample_rate = SAMPLING_RATE_176P4KHZ;
+ break;
+ case 8:
sample_rate = SAMPLING_RATE_192KHZ;
break;
default:
@@ -5315,12 +5328,13 @@ static struct snd_soc_dai_link msm_common_dai_links[] = {
.be_id = MSM_FRONTEND_DAI_MULTIMEDIA15,
},
{
- .name = MSM_DAILINK_NAME(Compress9),
- .stream_name = "Compress9",
+ .name = MSM_DAILINK_NAME(ULL_NOIRQ_2),
+ .stream_name = "MM_NOIRQ_2",
.cpu_dai_name = "MultiMedia16",
- .platform_name = "msm-compress-dsp",
+ .platform_name = "msm-pcm-dsp-noirq",
.dynamic = 1,
.dpcm_playback = 1,
+ .dpcm_capture = 1,
.trigger = {SND_SOC_DPCM_TRIGGER_POST,
SND_SOC_DPCM_TRIGGER_POST},
.codec_dai_name = "snd-soc-dummy-dai",
diff --git a/sound/soc/msm/qdsp6v2/msm-audio-effects-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-audio-effects-q6-v2.c
index e312a879b86a..1286d3185780 100644
--- a/sound/soc/msm/qdsp6v2/msm-audio-effects-q6-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-audio-effects-q6-v2.c
@@ -155,7 +155,7 @@ int msm_audio_effects_virtualizer_handler(struct audio_client *ac,
MAX_INBAND_PARAM_SZ,
"VIRT ENABLE", rc);
if (rc != 0)
- break;
+ goto invalid_config;
*updt_params++ =
AUDPROC_MODULE_ID_VIRTUALIZER;
*updt_params++ =
@@ -183,7 +183,7 @@ int msm_audio_effects_virtualizer_handler(struct audio_client *ac,
MAX_INBAND_PARAM_SZ,
"VIRT STRENGTH", rc);
if (rc != 0)
- break;
+ goto invalid_config;
*updt_params++ =
AUDPROC_MODULE_ID_VIRTUALIZER;
*updt_params++ =
@@ -211,7 +211,7 @@ int msm_audio_effects_virtualizer_handler(struct audio_client *ac,
MAX_INBAND_PARAM_SZ,
"VIRT OUT_TYPE", rc);
if (rc != 0)
- break;
+ goto invalid_config;
*updt_params++ =
AUDPROC_MODULE_ID_VIRTUALIZER;
*updt_params++ =
@@ -239,7 +239,7 @@ int msm_audio_effects_virtualizer_handler(struct audio_client *ac,
MAX_INBAND_PARAM_SZ,
"VIRT GAIN_ADJUST", rc);
if (rc != 0)
- break;
+ goto invalid_config;
*updt_params++ =
AUDPROC_MODULE_ID_VIRTUALIZER;
*updt_params++ =
@@ -318,7 +318,7 @@ int msm_audio_effects_reverb_handler(struct audio_client *ac,
MAX_INBAND_PARAM_SZ,
"REVERB_ENABLE", rc);
if (rc != 0)
- break;
+ goto invalid_config;
*updt_params++ =
AUDPROC_MODULE_ID_REVERB;
*updt_params++ =
@@ -346,7 +346,7 @@ int msm_audio_effects_reverb_handler(struct audio_client *ac,
MAX_INBAND_PARAM_SZ,
"REVERB_MODE", rc);
if (rc != 0)
- break;
+ goto invalid_config;
*updt_params++ =
AUDPROC_MODULE_ID_REVERB;
*updt_params++ =
@@ -374,7 +374,7 @@ int msm_audio_effects_reverb_handler(struct audio_client *ac,
MAX_INBAND_PARAM_SZ,
"REVERB_PRESET", rc);
if (rc != 0)
- break;
+ goto invalid_config;
*updt_params++ =
AUDPROC_MODULE_ID_REVERB;
*updt_params++ =
@@ -402,7 +402,7 @@ int msm_audio_effects_reverb_handler(struct audio_client *ac,
MAX_INBAND_PARAM_SZ,
"REVERB_WET_MIX", rc);
if (rc != 0)
- break;
+ goto invalid_config;
*updt_params++ =
AUDPROC_MODULE_ID_REVERB;
*updt_params++ =
@@ -430,7 +430,7 @@ int msm_audio_effects_reverb_handler(struct audio_client *ac,
MAX_INBAND_PARAM_SZ,
"REVERB_GAIN_ADJUST", rc);
if (rc != 0)
- break;
+ goto invalid_config;
*updt_params++ =
AUDPROC_MODULE_ID_REVERB;
*updt_params++ =
@@ -458,7 +458,7 @@ int msm_audio_effects_reverb_handler(struct audio_client *ac,
MAX_INBAND_PARAM_SZ,
"REVERB_ROOM_LEVEL", rc);
if (rc != 0)
- break;
+ goto invalid_config;
*updt_params++ =
AUDPROC_MODULE_ID_REVERB;
*updt_params++ =
@@ -486,7 +486,7 @@ int msm_audio_effects_reverb_handler(struct audio_client *ac,
MAX_INBAND_PARAM_SZ,
"REVERB_ROOM_HF_LEVEL", rc);
if (rc != 0)
- break;
+ goto invalid_config;
*updt_params++ =
AUDPROC_MODULE_ID_REVERB;
*updt_params++ =
@@ -514,7 +514,7 @@ int msm_audio_effects_reverb_handler(struct audio_client *ac,
MAX_INBAND_PARAM_SZ,
"REVERB_DECAY_TIME", rc);
if (rc != 0)
- break;
+ goto invalid_config;
*updt_params++ =
AUDPROC_MODULE_ID_REVERB;
*updt_params++ =
@@ -542,7 +542,7 @@ int msm_audio_effects_reverb_handler(struct audio_client *ac,
MAX_INBAND_PARAM_SZ,
"REVERB_DECAY_HF_RATIO", rc);
if (rc != 0)
- break;
+ goto invalid_config;
*updt_params++ =
AUDPROC_MODULE_ID_REVERB;
*updt_params++ =
@@ -570,7 +570,7 @@ int msm_audio_effects_reverb_handler(struct audio_client *ac,
MAX_INBAND_PARAM_SZ,
"REVERB_REFLECTIONS_LEVEL", rc);
if (rc != 0)
- break;
+ goto invalid_config;
*updt_params++ =
AUDPROC_MODULE_ID_REVERB;
*updt_params++ =
@@ -598,7 +598,7 @@ int msm_audio_effects_reverb_handler(struct audio_client *ac,
MAX_INBAND_PARAM_SZ,
"REVERB_REFLECTIONS_DELAY", rc);
if (rc != 0)
- break;
+ goto invalid_config;
*updt_params++ =
AUDPROC_MODULE_ID_REVERB;
*updt_params++ =
@@ -626,7 +626,7 @@ int msm_audio_effects_reverb_handler(struct audio_client *ac,
MAX_INBAND_PARAM_SZ,
"REVERB_LEVEL", rc);
if (rc != 0)
- break;
+ goto invalid_config;
*updt_params++ =
AUDPROC_MODULE_ID_REVERB;
*updt_params++ =
@@ -654,7 +654,7 @@ int msm_audio_effects_reverb_handler(struct audio_client *ac,
MAX_INBAND_PARAM_SZ,
"REVERB_DELAY", rc);
if (rc != 0)
- break;
+ goto invalid_config;
*updt_params++ =
AUDPROC_MODULE_ID_REVERB;
*updt_params++ =
@@ -682,7 +682,7 @@ int msm_audio_effects_reverb_handler(struct audio_client *ac,
MAX_INBAND_PARAM_SZ,
"REVERB_DIFFUSION", rc);
if (rc != 0)
- break;
+ goto invalid_config;
*updt_params++ =
AUDPROC_MODULE_ID_REVERB;
*updt_params++ =
@@ -710,7 +710,7 @@ int msm_audio_effects_reverb_handler(struct audio_client *ac,
MAX_INBAND_PARAM_SZ,
"REVERB_DENSITY", rc);
if (rc != 0)
- break;
+ goto invalid_config;
*updt_params++ =
AUDPROC_MODULE_ID_REVERB;
*updt_params++ =
@@ -790,7 +790,7 @@ int msm_audio_effects_bass_boost_handler(struct audio_client *ac,
MAX_INBAND_PARAM_SZ,
"BASS_BOOST_ENABLE", rc);
if (rc != 0)
- break;
+ goto invalid_config;
*updt_params++ =
AUDPROC_MODULE_ID_BASS_BOOST;
*updt_params++ =
@@ -818,7 +818,7 @@ int msm_audio_effects_bass_boost_handler(struct audio_client *ac,
MAX_INBAND_PARAM_SZ,
"BASS_BOOST_MODE", rc);
if (rc != 0)
- break;
+ goto invalid_config;
*updt_params++ =
AUDPROC_MODULE_ID_BASS_BOOST;
*updt_params++ =
@@ -846,7 +846,7 @@ int msm_audio_effects_bass_boost_handler(struct audio_client *ac,
MAX_INBAND_PARAM_SZ,
"BASS_BOOST_STRENGTH", rc);
if (rc != 0)
- break;
+ goto invalid_config;
*updt_params++ =
AUDPROC_MODULE_ID_BASS_BOOST;
*updt_params++ =
@@ -924,7 +924,7 @@ int msm_audio_effects_pbe_handler(struct audio_client *ac,
MAX_INBAND_PARAM_SZ,
"PBE_ENABLE", rc);
if (rc != 0)
- break;
+ goto invalid_config;
*updt_params++ =
AUDPROC_MODULE_ID_PBE;
*updt_params++ =
@@ -950,7 +950,7 @@ int msm_audio_effects_pbe_handler(struct audio_client *ac,
MAX_INBAND_PARAM_SZ,
"PBE_PARAM", rc);
if (rc != 0)
- break;
+ goto invalid_config;
*updt_params++ =
AUDPROC_MODULE_ID_PBE;
*updt_params++ =
@@ -1035,7 +1035,7 @@ int msm_audio_effects_popless_eq_handler(struct audio_client *ac,
MAX_INBAND_PARAM_SZ,
"EQ_ENABLE", rc);
if (rc != 0)
- break;
+ goto invalid_config;
*updt_params++ =
AUDPROC_MODULE_ID_POPLESS_EQUALIZER;
*updt_params++ =
@@ -1103,7 +1103,7 @@ int msm_audio_effects_popless_eq_handler(struct audio_client *ac,
MAX_INBAND_PARAM_SZ,
"EQ_CONFIG", rc);
if (rc != 0)
- break;
+ goto invalid_config;
*updt_params++ =
AUDPROC_MODULE_ID_POPLESS_EQUALIZER;
*updt_params++ =
@@ -1154,7 +1154,7 @@ int msm_audio_effects_popless_eq_handler(struct audio_client *ac,
MAX_INBAND_PARAM_SZ,
"EQ_BAND_INDEX", rc);
if (rc != 0)
- break;
+ goto invalid_config;
*updt_params++ =
AUDPROC_MODULE_ID_POPLESS_EQUALIZER;
*updt_params++ =
@@ -1186,7 +1186,7 @@ int msm_audio_effects_popless_eq_handler(struct audio_client *ac,
MAX_INBAND_PARAM_SZ,
"EQ_SINGLE_BAND_FREQ", rc);
if (rc != 0)
- break;
+ goto invalid_config;
*updt_params++ =
AUDPROC_MODULE_ID_POPLESS_EQUALIZER;
*updt_params++ =
@@ -1276,7 +1276,7 @@ static int __msm_audio_effects_volume_handler(struct audio_client *ac,
"VOLUME/VOLUME2_GAIN_2CH",
rc);
if (rc != 0)
- break;
+ goto invalid_config;
if (instance == SOFT_VOLUME_INSTANCE_2)
*updt_params++ =
ASM_MODULE_ID_VOL_CTRL2;
@@ -1325,7 +1325,7 @@ static int __msm_audio_effects_volume_handler(struct audio_client *ac,
"VOLUME/VOLUME2_GAIN_MASTER",
rc);
if (rc != 0)
- break;
+ goto invalid_config;
if (instance == SOFT_VOLUME_INSTANCE_2)
*updt_params++ =
ASM_MODULE_ID_VOL_CTRL2;
diff --git a/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c
index 0acf6e8ffe49..076dbed207a9 100644
--- a/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c
@@ -39,6 +39,7 @@
#include <sound/apr_audio-v2.h>
#include <sound/q6asm-v2.h>
+#include <sound/q6core.h>
#include <sound/compress_params.h>
#include <sound/compress_offload.h>
#include <sound/compress_driver.h>
@@ -103,6 +104,7 @@ struct msm_compr_pdata {
bool use_legacy_api; /* indicates use older asm apis*/
struct msm_compr_dec_params *dec_params[MSM_FRONTEND_DAI_MAX];
struct msm_compr_ch_map *ch_map[MSM_FRONTEND_DAI_MAX];
+ int32_t ion_fd[MSM_FRONTEND_DAI_MAX];
};
struct msm_compr_audio {
@@ -156,6 +158,8 @@ struct msm_compr_audio {
uint32_t start_delay_lsw;
uint32_t start_delay_msw;
+ int32_t shm_ion_fd;
+
uint64_t marker_timestamp;
struct msm_compr_gapless_state gapless_state;
@@ -1506,6 +1510,40 @@ static int msm_compr_configure_dsp_for_capture(struct snd_compr_stream *cstream)
return ret;
}
+static int msm_compr_map_unmap_fd(int fd, bool add_pages)
+{
+ ion_phys_addr_t paddr;
+ size_t pa_len = 0;
+ int ret = 0;
+ u8 assign_type;
+
+ if (add_pages)
+ assign_type = HLOS_TO_ADSP;
+ else
+ assign_type = ADSP_TO_HLOS;
+
+ ret = msm_audio_ion_phys_assign("audio_lib_mem_client", fd,
+ &paddr, &pa_len, assign_type);
+ if (ret) {
+ pr_err("%s: audio lib ION phys failed, rc = %d\n",
+ __func__, ret);
+ goto done;
+ }
+
+ ret = q6core_add_remove_pool_pages(paddr, pa_len,
+ ADSP_MEMORY_MAP_HLOS_PHYSPOOL, add_pages);
+ if (ret) {
+ pr_err("%s: add remove pages failed, rc = %d\n", __func__, ret);
+ /* Assign back to HLOS if add pages cmd failed */
+ if (add_pages)
+ msm_audio_ion_phys_assign("audio_lib_mem_client", fd,
+ &paddr, &pa_len, ADSP_TO_HLOS);
+ }
+
+done:
+ return ret;
+}
+
static int msm_compr_playback_open(struct snd_compr_stream *cstream)
{
struct snd_compr_runtime *runtime = cstream->runtime;
@@ -1513,6 +1551,7 @@ static int msm_compr_playback_open(struct snd_compr_stream *cstream)
struct msm_compr_audio *prtd;
struct msm_compr_pdata *pdata =
snd_soc_platform_get_drvdata(rtd->platform);
+ int ret = 0;
pr_debug("%s\n", __func__);
prtd = kzalloc(sizeof(struct msm_compr_audio), GFP_KERNEL);
@@ -1528,19 +1567,16 @@ static int msm_compr_playback_open(struct snd_compr_stream *cstream)
kzalloc(sizeof(struct msm_compr_audio_effects), GFP_KERNEL);
if (!pdata->audio_effects[rtd->dai_link->be_id]) {
pr_err("%s: Could not allocate memory for effects\n", __func__);
- pdata->cstream[rtd->dai_link->be_id] = NULL;
- kfree(prtd);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto effect_err;
}
pdata->dec_params[rtd->dai_link->be_id] =
kzalloc(sizeof(struct msm_compr_dec_params), GFP_KERNEL);
if (!pdata->dec_params[rtd->dai_link->be_id]) {
pr_err("%s: Could not allocate memory for dec params\n",
__func__);
- kfree(pdata->audio_effects[rtd->dai_link->be_id]);
- pdata->cstream[rtd->dai_link->be_id] = NULL;
- kfree(prtd);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto param_err;
}
prtd->codec = FORMAT_MP3;
prtd->bytes_received = 0;
@@ -1584,19 +1620,32 @@ static int msm_compr_playback_open(struct snd_compr_stream *cstream)
(app_cb)compr_event_handler, prtd);
if (!prtd->audio_client) {
pr_err("%s: Could not allocate memory for client\n", __func__);
- kfree(pdata->audio_effects[rtd->dai_link->be_id]);
- kfree(pdata->dec_params[rtd->dai_link->be_id]);
- pdata->cstream[rtd->dai_link->be_id] = NULL;
- runtime->private_data = NULL;
- kfree(prtd);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto ac_err;
}
pr_debug("%s: session ID %d\n", __func__, prtd->audio_client->session);
prtd->audio_client->perf_mode = false;
prtd->session_id = prtd->audio_client->session;
msm_adsp_init_mixer_ctl_pp_event_queue(rtd);
-
+ if (pdata->ion_fd[rtd->dai_link->be_id] > 0) {
+ ret = msm_compr_map_unmap_fd(
+ pdata->ion_fd[rtd->dai_link->be_id], true);
+ if (ret < 0)
+ goto map_err;
+ }
return 0;
+
+map_err:
+ q6asm_audio_client_free(prtd->audio_client);
+ac_err:
+ kfree(pdata->dec_params[rtd->dai_link->be_id]);
+param_err:
+ kfree(pdata->audio_effects[rtd->dai_link->be_id]);
+effect_err:
+ pdata->cstream[rtd->dai_link->be_id] = NULL;
+ runtime->private_data = NULL;
+ kfree(prtd);
+ return ret;
}
static int msm_compr_capture_open(struct snd_compr_stream *cstream)
@@ -1675,6 +1724,8 @@ static int msm_compr_playback_free(struct snd_compr_stream *cstream)
int dir = IN, ret = 0, stream_id;
unsigned long flags;
uint32_t stream_index;
+ ion_phys_addr_t paddr;
+ size_t pa_len = 0;
pr_debug("%s\n", __func__);
@@ -1748,6 +1799,15 @@ static int msm_compr_playback_free(struct snd_compr_stream *cstream)
}
q6asm_audio_client_buf_free_contiguous(dir, ac);
+ if (prtd->shm_ion_fd > 0)
+ msm_audio_ion_phys_assign("audio_shm_mem_client",
+ prtd->shm_ion_fd,
+ &paddr, &pa_len, ADSP_TO_HLOS);
+ if (pdata->ion_fd[soc_prtd->dai_link->be_id] > 0) {
+ msm_compr_map_unmap_fd(pdata->ion_fd[soc_prtd->dai_link->be_id],
+ false);
+ pdata->ion_fd[soc_prtd->dai_link->be_id] = 0;
+ }
q6asm_audio_client_free(ac);
msm_adsp_clean_mixer_ctl_pp_event_queue(soc_prtd);
@@ -3655,7 +3715,7 @@ done:
return ret;
}
-static int msm_compr_ion_fd_map_put(struct snd_kcontrol *kcontrol,
+static int msm_compr_shm_ion_fd_map_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_component *comp = snd_kcontrol_chip(kcontrol);
@@ -3664,7 +3724,6 @@ static int msm_compr_ion_fd_map_put(struct snd_kcontrol *kcontrol,
snd_soc_component_get_drvdata(comp);
struct snd_compr_stream *cstream = NULL;
struct msm_compr_audio *prtd;
- int fd;
int ret = 0;
if (fe_id >= MSM_FRONTEND_DAI_MAX) {
@@ -3694,10 +3753,34 @@ static int msm_compr_ion_fd_map_put(struct snd_kcontrol *kcontrol,
goto done;
}
- memcpy(&fd, ucontrol->value.bytes.data, sizeof(fd));
- ret = q6asm_send_ion_fd(prtd->audio_client, fd);
+ memcpy(&prtd->shm_ion_fd, ucontrol->value.bytes.data,
+ sizeof(prtd->shm_ion_fd));
+ ret = q6asm_audio_map_shm_fd(prtd->audio_client, prtd->shm_ion_fd);
if (ret < 0)
- pr_err("%s: failed to register ion fd\n", __func__);
+ pr_err("%s: failed to map shm mem\n", __func__);
+done:
+ return ret;
+}
+
+static int msm_compr_lib_ion_fd_map_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *comp = snd_kcontrol_chip(kcontrol);
+ unsigned long fe_id = kcontrol->private_value;
+ struct msm_compr_pdata *pdata = (struct msm_compr_pdata *)
+ snd_soc_component_get_drvdata(comp);
+ int ret = 0;
+
+ if (fe_id >= MSM_FRONTEND_DAI_MAX) {
+ pr_err("%s Received out of bounds invalid fe_id %lu\n",
+ __func__, fe_id);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ memcpy(&pdata->ion_fd[fe_id], ucontrol->value.bytes.data,
+ sizeof(pdata->ion_fd[fe_id]));
+
done:
return ret;
}
@@ -4329,7 +4412,7 @@ static int msm_compr_add_channel_map_control(struct snd_soc_pcm_runtime *rtd)
return 0;
}
-static int msm_compr_add_io_fd_cmd_control(struct snd_soc_pcm_runtime *rtd)
+static int msm_compr_add_shm_ion_fd_cmd_control(struct snd_soc_pcm_runtime *rtd)
{
const char *mixer_ctl_name = "Playback ION FD";
const char *deviceNo = "NN";
@@ -4341,7 +4424,52 @@ static int msm_compr_add_io_fd_cmd_control(struct snd_soc_pcm_runtime *rtd)
.name = "?",
.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
.info = msm_adsp_stream_cmd_info,
- .put = msm_compr_ion_fd_map_put,
+ .put = msm_compr_shm_ion_fd_map_put,
+ .private_value = 0,
+ }
+ };
+
+ if (!rtd) {
+ pr_err("%s NULL rtd\n", __func__);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ ctl_len = strlen(mixer_ctl_name) + 1 + strlen(deviceNo) + 1;
+ mixer_str = kzalloc(ctl_len, GFP_KERNEL);
+ if (!mixer_str) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ snprintf(mixer_str, ctl_len, "%s %d", mixer_ctl_name, rtd->pcm->device);
+ fe_ion_fd_config_control[0].name = mixer_str;
+ fe_ion_fd_config_control[0].private_value = rtd->dai_link->be_id;
+ pr_debug("%s: Registering new mixer ctl %s\n", __func__, mixer_str);
+ ret = snd_soc_add_platform_controls(rtd->platform,
+ fe_ion_fd_config_control,
+ ARRAY_SIZE(fe_ion_fd_config_control));
+ if (ret < 0)
+ pr_err("%s: failed to add ctl %s\n", __func__, mixer_str);
+
+ kfree(mixer_str);
+done:
+ return ret;
+}
+
+static int msm_compr_add_lib_ion_fd_cmd_control(struct snd_soc_pcm_runtime *rtd)
+{
+ const char *mixer_ctl_name = "Playback ION LIB FD";
+ const char *deviceNo = "NN";
+ char *mixer_str = NULL;
+ int ctl_len = 0, ret = 0;
+ struct snd_kcontrol_new fe_ion_fd_config_control[1] = {
+ {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "?",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .info = msm_adsp_stream_cmd_info,
+ .put = msm_compr_lib_ion_fd_map_put,
.private_value = 0,
}
};
@@ -4442,11 +4570,16 @@ static int msm_compr_new(struct snd_soc_pcm_runtime *rtd)
pr_err("%s: Could not add Compr ADSP Stream Callback Control\n",
__func__);
- rc = msm_compr_add_io_fd_cmd_control(rtd);
+ rc = msm_compr_add_shm_ion_fd_cmd_control(rtd);
if (rc)
pr_err("%s: Could not add Compr ion fd Control\n",
__func__);
+ rc = msm_compr_add_lib_ion_fd_cmd_control(rtd);
+ if (rc)
+ pr_err("%s: Could not add Compr ion lib fd Control\n",
+ __func__);
+
rc = msm_compr_add_event_ack_cmd_control(rtd);
if (rc)
pr_err("%s: Could not add Compr event ack Control\n",
diff --git a/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c
index 3610901addea..292b3d04f7d5 100644
--- a/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c
@@ -4152,7 +4152,8 @@ static struct snd_soc_dai_driver msm_dai_q6_mi2s_dai[] = {
.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_11025 |
SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_22050 |
SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 |
- SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_96000 |
+ SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_88200 |
+ SNDRV_PCM_RATE_96000 | SNDRV_PCM_RATE_176400 |
SNDRV_PCM_RATE_192000,
.formats = SNDRV_PCM_FMTBIT_S16_LE,
.rate_min = 8000,
diff --git a/sound/soc/msm/qdsp6v2/msm-lsm-client.c b/sound/soc/msm/qdsp6v2/msm-lsm-client.c
index 3e72aa130c18..35270e3340ec 100644
--- a/sound/soc/msm/qdsp6v2/msm-lsm-client.c
+++ b/sound/soc/msm/qdsp6v2/msm-lsm-client.c
@@ -1683,7 +1683,7 @@ static int msm_lsm_ioctl(struct snd_pcm_substream *substream,
dev_err(rtd->dev,
"%s REG_SND_MODEL failed err %d\n",
__func__, err);
- return err;
+ goto done;
}
break;
case SNDRV_LSM_SET_PARAMS: {
@@ -1855,13 +1855,15 @@ static int msm_lsm_ioctl(struct snd_pcm_substream *substream,
dev_err(rtd->dev,
"%s: Invalid params event_status_v3\n",
__func__);
- return -EINVAL;
+ err = -EINVAL;
+ goto done;
}
if (copy_from_user(&userarg, arg, sizeof(userarg))) {
dev_err(rtd->dev,
"%s: err copyuser event_status_v3\n",
__func__);
- return -EFAULT;
+ err = -EFAULT;
+ goto done;
}
if (userarg.payload_size >
@@ -1869,7 +1871,8 @@ static int msm_lsm_ioctl(struct snd_pcm_substream *substream,
pr_err("%s: payload_size %d is invalid, max allowed = %d\n",
__func__, userarg.payload_size,
LISTEN_MAX_STATUS_PAYLOAD_SIZE);
- return -EINVAL;
+ err = -EINVAL;
+ goto done;
}
size = sizeof(struct snd_lsm_event_status_v3) +
@@ -1879,7 +1882,8 @@ static int msm_lsm_ioctl(struct snd_pcm_substream *substream,
dev_err(rtd->dev,
"%s: Allocation failed event status size %d\n",
__func__, size);
- return -EFAULT;
+ err = -EFAULT;
+ goto done;
}
user->payload_size = userarg.payload_size;
err = msm_lsm_ioctl_shared(substream, cmd, user);
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c
index de769e8b806c..b94eb6fbfeea 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c
@@ -1668,7 +1668,7 @@ static int msm_pcm_playback_pan_scale_ctl_put(struct snd_kcontrol *kcontrol,
for (i = 0; i < pan_param.num_output_channels *
pan_param.num_input_channels; i++) {
pan_param.gain[i] =
- !(ucontrol->value.integer.value[len++] > 0) ?
+ !(ucontrol->value.integer.value[len++] > 0) ?
0 : 2 << 13;
}
}
@@ -1679,8 +1679,12 @@ static int msm_pcm_playback_pan_scale_ctl_put(struct snd_kcontrol *kcontrol,
pan_param.num_input_channels;
for (i = 0; i < pan_param.num_output_channels *
pan_param.num_input_channels; i++) {
+ /*
+ * The data userspace passes is already in Q14 format.
+ * For volume gain is in Q28.
+ */
pan_param.gain[i] =
- ucontrol->value.integer.value[len++];
+ ucontrol->value.integer.value[len++] << 14;
}
ret = q6asm_set_vol_ctrl_gain_pair(prtd->audio_client,
&pan_param);
@@ -1753,7 +1757,7 @@ static int msm_pcm_playback_dnmix_ctl_put(struct snd_kcontrol *kcontrol,
struct msm_audio *prtd;
struct asm_stream_pan_ctrl_params dnmix_param;
- int be_id = ucontrol->value.integer.value[len];
+ int be_id = ucontrol->value.integer.value[len++];
int stream_id = 0;
if (!usr_info) {
@@ -1809,7 +1813,7 @@ static int msm_pcm_playback_dnmix_ctl_put(struct snd_kcontrol *kcontrol,
for (i = 0; i < dnmix_param.num_output_channels *
dnmix_param.num_input_channels; i++) {
dnmix_param.gain[i] =
- ucontrol->value.integer.value[len++];
+ ucontrol->value.integer.value[len++];
}
}
msm_routing_set_downmix_control_data(be_id,
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
index e92065d0412d..7326e658c947 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
@@ -3675,6 +3675,11 @@ static const struct snd_kcontrol_new ext_ec_ref_mux_ul9 =
msm_route_ec_ref_rx_enum[0],
msm_routing_ec_ref_rx_get, msm_routing_ec_ref_rx_put);
+static const struct snd_kcontrol_new ext_ec_ref_mux_ul16 =
+ SOC_DAPM_ENUM_EXT("AUDIO_REF_EC_UL16 MUX Mux",
+ msm_route_ec_ref_rx_enum[0],
+ msm_routing_ec_ref_rx_get, msm_routing_ec_ref_rx_put);
+
static const struct snd_kcontrol_new ext_ec_ref_mux_ul17 =
SOC_DAPM_ENUM_EXT("AUDIO_REF_EC_UL17 MUX Mux",
msm_route_ec_ref_rx_enum[0],
@@ -7370,6 +7375,114 @@ static const struct snd_kcontrol_new mmul8_mixer_controls[] = {
msm_routing_put_audio_mixer),
};
+static const struct snd_kcontrol_new mmul16_mixer_controls[] = {
+ SOC_SINGLE_EXT("SLIM_0_TX", MSM_BACKEND_DAI_SLIMBUS_0_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("PRI_MI2S_TX", MSM_BACKEND_DAI_PRI_MI2S_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("SEC_MI2S_TX", MSM_BACKEND_DAI_SECONDARY_MI2S_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("TERT_MI2S_TX", MSM_BACKEND_DAI_TERTIARY_MI2S_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("INT2_MI2S_TX", MSM_BACKEND_DAI_INT2_MI2S_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("INT3_MI2S_TX", MSM_BACKEND_DAI_INT3_MI2S_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_QUATERNARY_MI2S_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("INTERNAL_FM_TX", MSM_BACKEND_DAI_INT_FM_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("INTERNAL_BT_SCO_TX", MSM_BACKEND_DAI_INT_BT_SCO_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("AFE_PCM_TX", MSM_BACKEND_DAI_AFE_PCM_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("VOC_REC_DL", MSM_BACKEND_DAI_INCALL_RECORD_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("VOC_REC_UL", MSM_BACKEND_DAI_INCALL_RECORD_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("SLIM_6_TX", MSM_BACKEND_DAI_SLIMBUS_6_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("PRI_TDM_TX_0", MSM_BACKEND_DAI_PRI_TDM_TX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("PRI_TDM_TX_1", MSM_BACKEND_DAI_PRI_TDM_TX_1,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("PRI_TDM_TX_2", MSM_BACKEND_DAI_PRI_TDM_TX_2,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("PRI_TDM_TX_3", MSM_BACKEND_DAI_PRI_TDM_TX_3,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("SEC_TDM_TX_0", MSM_BACKEND_DAI_SEC_TDM_TX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("SEC_TDM_TX_1", MSM_BACKEND_DAI_SEC_TDM_TX_1,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("SEC_TDM_TX_2", MSM_BACKEND_DAI_SEC_TDM_TX_2,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("SEC_TDM_TX_3", MSM_BACKEND_DAI_SEC_TDM_TX_3,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("TERT_TDM_TX_0", MSM_BACKEND_DAI_TERT_TDM_TX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("TERT_TDM_TX_1", MSM_BACKEND_DAI_TERT_TDM_TX_1,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("TERT_TDM_TX_2", MSM_BACKEND_DAI_TERT_TDM_TX_2,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("TERT_TDM_TX_3", MSM_BACKEND_DAI_TERT_TDM_TX_3,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("QUAT_TDM_TX_0", MSM_BACKEND_DAI_QUAT_TDM_TX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("QUAT_TDM_TX_1", MSM_BACKEND_DAI_QUAT_TDM_TX_1,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("QUAT_TDM_TX_2", MSM_BACKEND_DAI_QUAT_TDM_TX_2,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("QUAT_TDM_TX_3", MSM_BACKEND_DAI_QUAT_TDM_TX_3,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("SLIM_7_TX", MSM_BACKEND_DAI_SLIMBUS_7_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("USB_AUDIO_TX", MSM_BACKEND_DAI_USB_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MI2S_TX", MSM_BACKEND_DAI_MI2S_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("AUX_PCM_TX", MSM_BACKEND_DAI_AUXPCM_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("SEC_AUX_PCM_TX", MSM_BACKEND_DAI_SEC_AUXPCM_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("QUAT_AUX_PCM_TX", MSM_BACKEND_DAI_QUAT_AUXPCM_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+};
+
static const struct snd_kcontrol_new mmul9_mixer_controls[] = {
SOC_SINGLE_EXT("SLIM_0_TX", MSM_BACKEND_DAI_SLIMBUS_0_TX,
MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer,
@@ -11745,6 +11858,7 @@ static const struct snd_soc_dapm_widget msm_qdsp6_widgets[] = {
SND_SOC_DAPM_AIF_OUT("MM_UL6", "MultiMedia6 Capture", 0, 0, 0, 0),
SND_SOC_DAPM_AIF_OUT("MM_UL8", "MultiMedia8 Capture", 0, 0, 0, 0),
SND_SOC_DAPM_AIF_OUT("MM_UL9", "MultiMedia9 Capture", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("MM_UL16", "MultiMedia16 Capture", 0, 0, 0, 0),
SND_SOC_DAPM_AIF_OUT("MM_UL17", "MultiMedia17 Capture", 0, 0, 0, 0),
SND_SOC_DAPM_AIF_OUT("MM_UL18", "MultiMedia18 Capture", 0, 0, 0, 0),
SND_SOC_DAPM_AIF_OUT("MM_UL19", "MultiMedia19 Capture", 0, 0, 0, 0),
@@ -12483,6 +12597,8 @@ static const struct snd_soc_dapm_widget msm_qdsp6_widgets[] = {
mmul8_mixer_controls, ARRAY_SIZE(mmul8_mixer_controls)),
SND_SOC_DAPM_MIXER("MultiMedia9 Mixer", SND_SOC_NOPM, 0, 0,
mmul9_mixer_controls, ARRAY_SIZE(mmul9_mixer_controls)),
+ SND_SOC_DAPM_MIXER("MultiMedia16 Mixer", SND_SOC_NOPM, 0, 0,
+ mmul16_mixer_controls, ARRAY_SIZE(mmul16_mixer_controls)),
SND_SOC_DAPM_MIXER("MultiMedia17 Mixer", SND_SOC_NOPM, 0, 0,
mmul17_mixer_controls, ARRAY_SIZE(mmul17_mixer_controls)),
SND_SOC_DAPM_MIXER("MultiMedia18 Mixer", SND_SOC_NOPM, 0, 0,
@@ -12817,6 +12933,8 @@ static const struct snd_soc_dapm_widget msm_qdsp6_widgets[] = {
&ext_ec_ref_mux_ul8),
SND_SOC_DAPM_MUX("AUDIO_REF_EC_UL9 MUX", SND_SOC_NOPM, 0, 0,
&ext_ec_ref_mux_ul9),
+ SND_SOC_DAPM_MUX("AUDIO_REF_EC_UL16 MUX", SND_SOC_NOPM, 0, 0,
+ &ext_ec_ref_mux_ul16),
SND_SOC_DAPM_MUX("AUDIO_REF_EC_UL17 MUX", SND_SOC_NOPM, 0, 0,
&ext_ec_ref_mux_ul17),
SND_SOC_DAPM_MUX("AUDIO_REF_EC_UL18 MUX", SND_SOC_NOPM, 0, 0,
@@ -13099,6 +13217,7 @@ static const struct snd_soc_dapm_route intercon[] = {
{"MultiMedia8 Mixer", "INT3_MI2S_TX", "INT3_MI2S_TX"},
{"MultiMedia3 Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"},
{"MultiMedia5 Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"},
+ {"MultiMedia16 Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"},
{"MultiMedia5 Mixer", "SLIM_7_TX", "SLIMBUS_7_TX"},
{"MultiMedia5 Mixer", "SLIM_8_TX", "SLIMBUS_8_TX"},
{"MI2S_RX Audio Mixer", "MultiMedia1", "MM_DL1"},
@@ -13711,6 +13830,7 @@ static const struct snd_soc_dapm_route intercon[] = {
{"MultiMedia2 Mixer", "MI2S_TX", "MI2S_TX"},
{"MultiMedia3 Mixer", "MI2S_TX", "MI2S_TX"},
{"MultiMedia5 Mixer", "MI2S_TX", "MI2S_TX"},
+ {"MultiMedia16 Mixer", "MI2S_TX", "MI2S_TX"},
{"MultiMedia1 Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
{"MultiMedia2 Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
{"MultiMedia6 Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
@@ -13731,12 +13851,15 @@ static const struct snd_soc_dapm_route intercon[] = {
{"MultiMedia1 Mixer", "SEC_AUX_PCM_UL_TX", "SEC_AUX_PCM_TX"},
{"MultiMedia3 Mixer", "SEC_AUX_PCM_TX", "SEC_AUX_PCM_TX"},
{"MultiMedia5 Mixer", "SEC_AUX_PCM_TX", "SEC_AUX_PCM_TX"},
+ {"MultiMedia16 Mixer", "AUX_PCM_TX", "AUX_PCM_TX"},
+ {"MultiMedia16 Mixer", "SEC_AUX_PCM_TX", "SEC_AUX_PCM_TX"},
{"MultiMedia1 Mixer", "TERT_AUXPCM_UL_TX", "TERT_AUX_PCM_TX"},
{"MultiMedia3 Mixer", "TERT_AUX_PCM_TX", "TERT_AUX_PCM_TX"},
{"MultiMedia5 Mixer", "TERT_AUX_PCM_TX", "TERT_AUX_PCM_TX"},
{"MultiMedia1 Mixer", "QUAT_AUXPCM_UL_TX", "QUAT_AUX_PCM_TX"},
{"MultiMedia3 Mixer", "QUAT_AUX_PCM_TX", "QUAT_AUX_PCM_TX"},
{"MultiMedia5 Mixer", "QUAT_AUX_PCM_TX", "QUAT_AUX_PCM_TX"},
+ {"MultiMedia16 Mixer", "QUAT_AUX_PCM_TX", "QUAT_AUX_PCM_TX"},
{"MultiMedia2 Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"},
{"MultiMedia2 Mixer", "SLIM_6_TX", "SLIMBUS_6_TX"},
{"MultiMedia2 Mixer", "SLIM_1_TX", "SLIMBUS_1_TX"},
@@ -13752,9 +13875,11 @@ static const struct snd_soc_dapm_route intercon[] = {
{"MultiMedia6 Mixer", "INT2_MI2S_TX", "INT2_MI2S_TX"},
{"MultiMedia3 Mixer", "INT2_MI2S_TX", "INT2_MI2S_TX"},
{"MultiMedia5 Mixer", "INT2_MI2S_TX", "INT2_MI2S_TX"},
+ {"MultiMedia16 Mixer", "INT2_MI2S_TX", "INT2_MI2S_TX"},
{"MultiMedia6 Mixer", "INT3_MI2S_TX", "INT3_MI2S_TX"},
{"MultiMedia3 Mixer", "INT3_MI2S_TX", "INT3_MI2S_TX"},
{"MultiMedia5 Mixer", "INT3_MI2S_TX", "INT3_MI2S_TX"},
+ {"MultiMedia16 Mixer", "INT3_MI2S_TX", "INT3_MI2S_TX"},
{"MultiMedia6 Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"},
{"MultiMedia6 Mixer", "AUX_PCM_UL_TX", "AUX_PCM_TX"},
{"MultiMedia6 Mixer", "SEC_AUX_PCM_UL_TX", "SEC_AUX_PCM_TX"},
@@ -13935,6 +14060,24 @@ static const struct snd_soc_dapm_route intercon[] = {
{"MultiMedia6 Mixer", "USB_AUDIO_TX", "USB_AUDIO_TX"},
{"MultiMedia8 Mixer", "USB_AUDIO_TX", "USB_AUDIO_TX"},
+ {"MultiMedia16 Mixer", "PRI_TDM_TX_0", "PRI_TDM_TX_0"},
+ {"MultiMedia16 Mixer", "PRI_TDM_TX_1", "PRI_TDM_TX_1"},
+ {"MultiMedia16 Mixer", "PRI_TDM_TX_2", "PRI_TDM_TX_2"},
+ {"MultiMedia16 Mixer", "PRI_TDM_TX_3", "PRI_TDM_TX_3"},
+ {"MultiMedia16 Mixer", "SEC_TDM_TX_0", "SEC_TDM_TX_0"},
+ {"MultiMedia16 Mixer", "SEC_TDM_TX_1", "SEC_TDM_TX_1"},
+ {"MultiMedia16 Mixer", "SEC_TDM_TX_2", "SEC_TDM_TX_2"},
+ {"MultiMedia16 Mixer", "SEC_TDM_TX_3", "SEC_TDM_TX_3"},
+ {"MultiMedia16 Mixer", "TERT_TDM_TX_0", "TERT_TDM_TX_0"},
+ {"MultiMedia16 Mixer", "TERT_TDM_TX_1", "TERT_TDM_TX_1"},
+ {"MultiMedia16 Mixer", "TERT_TDM_TX_2", "TERT_TDM_TX_2"},
+ {"MultiMedia16 Mixer", "TERT_TDM_TX_3", "TERT_TDM_TX_3"},
+ {"MultiMedia16 Mixer", "QUAT_TDM_TX_0", "QUAT_TDM_TX_0"},
+ {"MultiMedia16 Mixer", "QUAT_TDM_TX_1", "QUAT_TDM_TX_1"},
+ {"MultiMedia16 Mixer", "QUAT_TDM_TX_2", "QUAT_TDM_TX_2"},
+ {"MultiMedia16 Mixer", "QUAT_TDM_TX_3", "QUAT_TDM_TX_3"},
+ {"MultiMedia16 Mixer", "USB_AUDIO_TX", "USB_AUDIO_TX"},
+
{"INTERNAL_BT_SCO_RX Audio Mixer", "MultiMedia1", "MM_DL1"},
{"INTERNAL_BT_SCO_RX Audio Mixer", "MultiMedia2", "MM_DL2"},
{"INTERNAL_BT_SCO_RX Audio Mixer", "MultiMedia3", "MM_DL3"},
@@ -14017,8 +14160,10 @@ static const struct snd_soc_dapm_route intercon[] = {
{"MultiMedia19 Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"},
{"MultiMedia5 Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"},
{"MultiMedia8 Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"},
+ {"MultiMedia16 Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"},
{"MultiMedia1 Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
{"MultiMedia4 Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
+ {"MultiMedia16 Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
{"MultiMedia17 Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
{"MultiMedia18 Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
{"MultiMedia19 Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
@@ -14034,6 +14179,7 @@ static const struct snd_soc_dapm_route intercon[] = {
{"MultiMedia19 Mixer", "AFE_PCM_TX", "PCM_TX"},
{"MultiMedia5 Mixer", "AFE_PCM_TX", "PCM_TX"},
{"MultiMedia8 Mixer", "AFE_PCM_TX", "PCM_TX"},
+ {"MultiMedia16 Mixer", "AFE_PCM_TX", "PCM_TX"},
{"MM_UL1", NULL, "MultiMedia1 Mixer"},
{"MultiMedia2 Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
{"MM_UL2", NULL, "MultiMedia2 Mixer"},
@@ -14043,6 +14189,7 @@ static const struct snd_soc_dapm_route intercon[] = {
{"MM_UL6", NULL, "MultiMedia6 Mixer"},
{"MM_UL8", NULL, "MultiMedia8 Mixer"},
{"MM_UL9", NULL, "MultiMedia9 Mixer"},
+ {"MM_UL16", NULL, "MultiMedia16 Mixer"},
{"MM_UL17", NULL, "MultiMedia17 Mixer"},
{"MM_UL18", NULL, "MultiMedia18 Mixer"},
{"MM_UL19", NULL, "MultiMedia19 Mixer"},
@@ -14461,6 +14608,7 @@ static const struct snd_soc_dapm_route intercon[] = {
{"MM_UL6", NULL, "AUDIO_REF_EC_UL6 MUX"},
{"MM_UL8", NULL, "AUDIO_REF_EC_UL8 MUX"},
{"MM_UL9", NULL, "AUDIO_REF_EC_UL9 MUX"},
+ {"MM_UL16", NULL, "AUDIO_REF_EC_UL16 MUX"},
{"MM_UL17", NULL, "AUDIO_REF_EC_UL17 MUX"},
{"MM_UL18", NULL, "AUDIO_REF_EC_UL18 MUX"},
{"MM_UL19", NULL, "AUDIO_REF_EC_UL19 MUX"},
@@ -15511,10 +15659,6 @@ static int msm_pcm_routing_close(struct snd_pcm_substream *substream)
bedai->active = 0;
bedai->sample_rate = 0;
bedai->channel = 0;
- for (i = 0; i < MSM_FRONTEND_DAI_MAX; i++) {
- if (bedai->passthr_mode[i] != LISTEN)
- bedai->passthr_mode[i] = LEGACY_PCM;
- }
mutex_unlock(&routing_lock);
return 0;
@@ -16053,6 +16197,8 @@ int msm_routing_set_downmix_control_data(int be_id, int session_id,
char *adm_params = NULL;
int port_id, copp_idx = 0;
uint32_t params_length = 0;
+ uint16_t ii;
+ uint16_t *dst_gain_ptr = NULL;
if (be_id >= MSM_BACKEND_DAI_MAX) {
rc = -EINVAL;
@@ -16065,7 +16211,7 @@ int msm_routing_set_downmix_control_data(int be_id, int session_id,
variable_payload = dnmix_param->num_output_channels * sizeof(uint16_t)+
dnmix_param->num_input_channels * sizeof(uint16_t) +
- dnmix_param->num_output_channels * sizeof(uint16_t) *
+ dnmix_param->num_output_channels *
dnmix_param->num_input_channels * sizeof(uint16_t);
i = (variable_payload % sizeof(uint32_t));
variable_payload += (i == 0) ? 0 : sizeof(uint32_t) - i;
@@ -16104,14 +16250,15 @@ int msm_routing_set_downmix_control_data(int be_id, int session_id,
dnmix_param->num_output_channels * sizeof(uint16_t)),
dnmix_param->input_channel_map,
dnmix_param->num_input_channels * sizeof(uint16_t));
- memcpy(((u8 *)adm_params +
+
+ dst_gain_ptr = (uint16_t *) ((u8 *)adm_params +
sizeof(struct adm_pspd_param_data_t) +
sizeof(struct audproc_chmixer_param_coeff) +
(dnmix_param->num_output_channels * sizeof(uint16_t)) +
- (dnmix_param->num_input_channels * sizeof(uint16_t))),
- dnmix_param->gain,
- (dnmix_param->num_output_channels * sizeof(uint16_t)) *
(dnmix_param->num_input_channels * sizeof(uint16_t)));
+ for (ii = 0; ii < dnmix_param->num_output_channels *
+ dnmix_param->num_input_channels; ii++)
+ dst_gain_ptr[ii] = (uint16_t) dnmix_param->gain[ii];
if (params_length) {
rc = adm_set_pspd_matrix_params(port_id,
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.h b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.h
index c640be343e10..21dfa48308c3 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.h
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.h
@@ -56,8 +56,8 @@
#define LPASS_BE_SEC_MI2S_TX "SEC_MI2S_TX"
#define LPASS_BE_PRI_MI2S_RX "PRI_MI2S_RX"
#define LPASS_BE_PRI_MI2S_TX "PRI_MI2S_TX"
-#define LPASS_BE_TERT_MI2S_RX "TERTIARY_MI2S_RX"
-#define LPASS_BE_TERT_MI2S_TX "TERTIARY_MI2S_TX"
+#define LPASS_BE_TERT_MI2S_RX "TERT_MI2S_RX"
+#define LPASS_BE_TERT_MI2S_TX "TERT_MI2S_TX"
#define LPASS_BE_AUDIO_I2S_RX "AUDIO_I2S_RX"
#define LPASS_BE_STUB_RX "STUB_RX"
#define LPASS_BE_STUB_TX "STUB_TX"
diff --git a/sound/soc/msm/qdsp6v2/msm-transcode-loopback-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-transcode-loopback-q6-v2.c
index 5c5f7bc482c8..fdeb8a15ffee 100644
--- a/sound/soc/msm/qdsp6v2/msm-transcode-loopback-q6-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-transcode-loopback-q6-v2.c
@@ -27,6 +27,8 @@
#include <sound/initval.h>
#include <sound/control.h>
#include <sound/q6asm-v2.h>
+#include <sound/q6core.h>
+#include <sound/q6audio-v2.h>
#include <sound/pcm_params.h>
#include <sound/timer.h>
#include <sound/tlv.h>
@@ -40,11 +42,21 @@
#include "msm-qti-pp-config.h"
#define LOOPBACK_SESSION_MAX_NUM_STREAMS 2
+/* Max volume corresponding to 24dB */
+#define TRANSCODE_LR_VOL_MAX_STEPS 0xFFFF
+
+#define APP_TYPE_CONFIG_IDX_APP_TYPE 0
+#define APP_TYPE_CONFIG_IDX_ACDB_ID 1
+#define APP_TYPE_CONFIG_IDX_SAMPLE_RATE 2
+#define APP_TYPE_CONFIG_IDX_BE_ID 3
static DEFINE_MUTEX(transcode_loopback_session_lock);
struct trans_loopback_pdata {
struct snd_compr_stream *cstream[MSM_FRONTEND_DAI_MAX];
+ int32_t ion_fd[MSM_FRONTEND_DAI_MAX];
+ uint32_t master_gain;
+ int perf_mode;
};
struct loopback_stream {
@@ -79,6 +91,7 @@ struct msm_transcode_loopback {
int session_id;
struct audio_client *audio_client;
+ int32_t shm_ion_fd;
};
/* Transcode loopback global info struct */
@@ -179,6 +192,40 @@ static void populate_codec_list(struct msm_transcode_loopback *trans,
}
}
+static int msm_transcode_map_unmap_fd(int fd, bool add_pages)
+{
+ ion_phys_addr_t paddr;
+ size_t pa_len = 0;
+ int ret = 0;
+ u8 assign_type;
+
+ if (add_pages)
+ assign_type = HLOS_TO_ADSP;
+ else
+ assign_type = ADSP_TO_HLOS;
+
+ ret = msm_audio_ion_phys_assign("audio_lib_mem_client", fd,
+ &paddr, &pa_len, assign_type);
+ if (ret) {
+ pr_err("%s: audio lib ION phys failed, rc = %d\n", __func__,
+ ret);
+ goto done;
+ }
+
+ ret = q6core_add_remove_pool_pages(paddr, pa_len,
+ ADSP_MEMORY_MAP_HLOS_PHYSPOOL, add_pages);
+ if (ret) {
+ pr_err("%s: add remove pages failed, rc = %d\n", __func__, ret);
+ /* Assign back to HLOS if add pages cmd failed */
+ if (add_pages)
+ msm_audio_ion_phys_assign("audio_lib_mem_client", fd,
+ &paddr, &pa_len, ADSP_TO_HLOS);
+ }
+
+done:
+ return ret;
+}
+
static int msm_transcode_loopback_open(struct snd_compr_stream *cstream)
{
int ret = 0;
@@ -224,6 +271,13 @@ static int msm_transcode_loopback_open(struct snd_compr_stream *cstream)
goto exit;
}
msm_adsp_init_mixer_ctl_pp_event_queue(rtd);
+ if (pdata->ion_fd[rtd->dai_link->be_id] > 0) {
+ ret = msm_transcode_map_unmap_fd(
+ pdata->ion_fd[rtd->dai_link->be_id],
+ true);
+ if (ret < 0)
+ goto exit;
+ }
}
pr_debug("%s: num stream%d, stream name %s\n", __func__,
@@ -274,7 +328,11 @@ static int msm_transcode_loopback_free(struct snd_compr_stream *cstream)
struct snd_compr_runtime *runtime = cstream->runtime;
struct msm_transcode_loopback *trans = runtime->private_data;
struct snd_soc_pcm_runtime *rtd = snd_pcm_substream_chip(cstream);
+ struct trans_loopback_pdata *pdata = snd_soc_platform_get_drvdata(
+ rtd->platform);
int ret = 0;
+ ion_phys_addr_t paddr;
+ size_t pa_len = 0;
mutex_lock(&trans->lock);
@@ -286,6 +344,19 @@ static int msm_transcode_loopback_free(struct snd_compr_stream *cstream)
if (cstream->direction == SND_COMPRESS_PLAYBACK) {
memset(&trans->sink, 0, sizeof(struct loopback_stream));
msm_adsp_clean_mixer_ctl_pp_event_queue(rtd);
+ if (trans->shm_ion_fd > 0) {
+ msm_audio_ion_phys_assign("audio_shm_mem_client",
+ trans->shm_ion_fd,
+ &paddr, &pa_len,
+ ADSP_TO_HLOS);
+ trans->shm_ion_fd = 0;
+ }
+ if (pdata->ion_fd[rtd->dai_link->be_id] > 0) {
+ msm_transcode_map_unmap_fd(
+ pdata->ion_fd[rtd->dai_link->be_id],
+ false);
+ pdata->ion_fd[rtd->dai_link->be_id] = 0;
+ }
} else if (cstream->direction == SND_COMPRESS_CAPTURE) {
memset(&trans->source, 0, sizeof(struct loopback_stream));
}
@@ -337,6 +408,8 @@ static int msm_transcode_loopback_set_params(struct snd_compr_stream *cstream,
struct msm_transcode_loopback *trans = runtime->private_data;
struct snd_soc_pcm_runtime *soc_pcm_rx;
struct snd_soc_pcm_runtime *soc_pcm_tx;
+ struct snd_soc_pcm_runtime *rtd;
+ struct trans_loopback_pdata *pdata;
uint32_t bit_width = 16;
int ret = 0;
@@ -347,6 +420,9 @@ static int msm_transcode_loopback_set_params(struct snd_compr_stream *cstream,
mutex_lock(&trans->lock);
+ rtd = snd_pcm_substream_chip(cstream);
+ pdata = snd_soc_platform_get_drvdata(rtd->platform);
+
if (cstream->direction == SND_COMPRESS_PLAYBACK) {
if (codec_param->codec.id == SND_AUDIOCODEC_PCM) {
trans->sink.codec_format =
@@ -428,7 +504,7 @@ static int msm_transcode_loopback_set_params(struct snd_compr_stream *cstream,
pr_debug("%s: ASM client allocated, callback %pK\n", __func__,
loopback_event_handler);
trans->session_id = trans->audio_client->session;
- trans->audio_client->perf_mode = false;
+ trans->audio_client->perf_mode = pdata->perf_mode;
ret = q6asm_open_transcode_loopback(trans->audio_client,
bit_width,
trans->source.codec_format,
@@ -447,7 +523,7 @@ static int msm_transcode_loopback_set_params(struct snd_compr_stream *cstream,
if (trans->source.codec_format != FORMAT_LINEAR_PCM)
msm_pcm_routing_reg_phy_compr_stream(
soc_pcm_tx->dai_link->be_id,
- trans->audio_client->perf_mode,
+ false,
trans->session_id,
SNDRV_PCM_STREAM_CAPTURE,
COMPRESSED_PASSTHROUGH_GEN);
@@ -460,7 +536,7 @@ static int msm_transcode_loopback_set_params(struct snd_compr_stream *cstream,
/* Opening Rx ADM in LOW_LATENCY mode by default */
msm_pcm_routing_reg_phy_stream(
soc_pcm_rx->dai_link->be_id,
- true,
+ trans->audio_client->perf_mode,
trans->session_id,
SNDRV_PCM_STREAM_PLAYBACK);
pr_debug("%s: Successfully opened ADM sessions\n", __func__);
@@ -493,6 +569,46 @@ static int msm_transcode_loopback_get_caps(struct snd_compr_stream *cstream,
return 0;
}
+static int msm_transcode_loopback_set_metadata(struct snd_compr_stream *cstream,
+ struct snd_compr_metadata *metadata)
+{
+ struct snd_soc_pcm_runtime *rtd;
+ struct trans_loopback_pdata *pdata;
+
+ if (!metadata || !cstream) {
+ pr_err("%s: Invalid arguments\n", __func__);
+ return -EINVAL;
+ }
+
+ rtd = snd_pcm_substream_chip(cstream);
+ pdata = snd_soc_platform_get_drvdata(rtd->platform);
+
+ switch (metadata->key) {
+ case SNDRV_COMPRESS_LATENCY_MODE:
+ {
+ switch (metadata->value[0]) {
+ case SNDRV_COMPRESS_LEGACY_LATENCY_MODE:
+ pdata->perf_mode = LEGACY_PCM_MODE;
+ break;
+ case SNDRV_COMPRESS_LOW_LATENCY_MODE:
+ pdata->perf_mode = LOW_LATENCY_PCM_MODE;
+ break;
+ default:
+ pr_debug("%s: Unsupported latency mode %d, default to Legacy\n",
+ __func__, metadata->value[0]);
+ pdata->perf_mode = LEGACY_PCM_MODE;
+ break;
+ }
+ }
+ break;
+ default:
+ pr_debug("%s: Unsupported metadata %d\n",
+ __func__, metadata->key);
+ break;
+ }
+ return 0;
+}
+
static int msm_transcode_stream_cmd_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
@@ -557,7 +673,7 @@ done:
return ret;
}
-static int msm_transcode_ion_fd_map_put(struct snd_kcontrol *kcontrol,
+static int msm_transcode_shm_ion_fd_map_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_component *comp = snd_kcontrol_chip(kcontrol);
@@ -566,7 +682,6 @@ static int msm_transcode_ion_fd_map_put(struct snd_kcontrol *kcontrol,
snd_soc_component_get_drvdata(comp);
struct snd_compr_stream *cstream = NULL;
struct msm_transcode_loopback *prtd;
- int fd;
int ret = 0;
if (fe_id >= MSM_FRONTEND_DAI_MAX) {
@@ -596,10 +711,34 @@ static int msm_transcode_ion_fd_map_put(struct snd_kcontrol *kcontrol,
goto done;
}
- memcpy(&fd, ucontrol->value.bytes.data, sizeof(fd));
- ret = q6asm_send_ion_fd(prtd->audio_client, fd);
+ memcpy(&prtd->shm_ion_fd, ucontrol->value.bytes.data,
+ sizeof(prtd->shm_ion_fd));
+ ret = q6asm_audio_map_shm_fd(prtd->audio_client, prtd->shm_ion_fd);
if (ret < 0)
- pr_err("%s: failed to register ion fd\n", __func__);
+ pr_err("%s: failed to map shm mem\n", __func__);
+done:
+ return ret;
+}
+
+
+static int msm_transcode_lib_ion_fd_map_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *comp = snd_kcontrol_chip(kcontrol);
+ unsigned long fe_id = kcontrol->private_value;
+ struct trans_loopback_pdata *pdata = (struct trans_loopback_pdata *)
+ snd_soc_component_get_drvdata(comp);
+ int ret = 0;
+
+ if (fe_id >= MSM_FRONTEND_DAI_MAX) {
+ pr_err("%s Received out of bounds invalid fe_id %lu\n",
+ __func__, fe_id);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ memcpy(&pdata->ion_fd[fe_id], ucontrol->value.bytes.data,
+ sizeof(pdata->ion_fd[fe_id]));
done:
return ret;
}
@@ -663,6 +802,141 @@ done:
return ret;
}
+static int msm_transcode_playback_app_type_cfg_put(
+ struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ u64 fe_id = kcontrol->private_value;
+ int session_type = SESSION_TYPE_RX;
+ int be_id = ucontrol->value.integer.value[APP_TYPE_CONFIG_IDX_BE_ID];
+ struct msm_pcm_stream_app_type_cfg cfg_data = {0, 0, 48000};
+ int ret = 0;
+
+ cfg_data.app_type = ucontrol->value.integer.value[
+ APP_TYPE_CONFIG_IDX_APP_TYPE];
+ cfg_data.acdb_dev_id = ucontrol->value.integer.value[
+ APP_TYPE_CONFIG_IDX_ACDB_ID];
+ if (ucontrol->value.integer.value[APP_TYPE_CONFIG_IDX_SAMPLE_RATE] != 0)
+ cfg_data.sample_rate = ucontrol->value.integer.value[
+ APP_TYPE_CONFIG_IDX_SAMPLE_RATE];
+ pr_debug("%s: fe_id %llu session_type %d be_id %d app_type %d acdb_dev_id %d sample_rate- %d\n",
+ __func__, fe_id, session_type, be_id,
+ cfg_data.app_type, cfg_data.acdb_dev_id, cfg_data.sample_rate);
+ ret = msm_pcm_routing_reg_stream_app_type_cfg(fe_id, session_type,
+ be_id, &cfg_data);
+ if (ret < 0)
+ pr_err("%s: msm_transcode_playback_stream_app_type_cfg set failed returned %d\n",
+ __func__, ret);
+
+ return ret;
+}
+
+static int msm_transcode_playback_app_type_cfg_get(
+ struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ u64 fe_id = kcontrol->private_value;
+ int session_type = SESSION_TYPE_RX;
+ int be_id = 0;
+ struct msm_pcm_stream_app_type_cfg cfg_data = {0};
+ int ret = 0;
+
+ ret = msm_pcm_routing_get_stream_app_type_cfg(fe_id, session_type,
+ &be_id, &cfg_data);
+ if (ret < 0) {
+ pr_err("%s: msm_transcode_playback_stream_app_type_cfg get failed returned %d\n",
+ __func__, ret);
+ goto done;
+ }
+
+ ucontrol->value.integer.value[APP_TYPE_CONFIG_IDX_APP_TYPE] =
+ cfg_data.app_type;
+ ucontrol->value.integer.value[APP_TYPE_CONFIG_IDX_ACDB_ID] =
+ cfg_data.acdb_dev_id;
+ ucontrol->value.integer.value[APP_TYPE_CONFIG_IDX_SAMPLE_RATE] =
+ cfg_data.sample_rate;
+ ucontrol->value.integer.value[APP_TYPE_CONFIG_IDX_BE_ID] = be_id;
+ pr_debug("%s: fedai_id %llu, session_type %d, be_id %d, app_type %d, acdb_dev_id %d, sample_rate %d\n",
+ __func__, fe_id, session_type, be_id,
+ cfg_data.app_type, cfg_data.acdb_dev_id, cfg_data.sample_rate);
+done:
+ return ret;
+}
+
+static int msm_transcode_set_volume(struct snd_compr_stream *cstream,
+ uint32_t master_gain)
+{
+ int rc = 0;
+ struct msm_transcode_loopback *prtd;
+ struct snd_soc_pcm_runtime *rtd;
+
+ pr_debug("%s: master_gain %d\n", __func__, master_gain);
+ if (!cstream || !cstream->runtime) {
+ pr_err("%s: session not active\n", __func__);
+ return -EPERM;
+ }
+ rtd = cstream->private_data;
+ prtd = cstream->runtime->private_data;
+
+ if (!rtd || !rtd->platform || !prtd || !prtd->audio_client) {
+ pr_err("%s: invalid rtd, prtd or audio client", __func__);
+ return -EINVAL;
+ }
+
+ rc = q6asm_set_volume(prtd->audio_client, master_gain);
+ if (rc < 0)
+ pr_err("%s: Send vol gain command failed rc=%d\n",
+ __func__, rc);
+
+ return rc;
+}
+
+static int msm_transcode_volume_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *comp = snd_kcontrol_chip(kcontrol);
+ unsigned long fe_id = kcontrol->private_value;
+ struct trans_loopback_pdata *pdata = (struct trans_loopback_pdata *)
+ snd_soc_component_get_drvdata(comp);
+ struct snd_compr_stream *cstream = NULL;
+ uint32_t ret = 0;
+
+ if (fe_id >= MSM_FRONTEND_DAI_MAX) {
+ pr_err("%s Received out of bounds fe_id %lu\n",
+ __func__, fe_id);
+ return -EINVAL;
+ }
+
+ cstream = pdata->cstream[fe_id];
+ pdata->master_gain = ucontrol->value.integer.value[0];
+
+ pr_debug("%s: fe_id %lu master_gain %d\n",
+ __func__, fe_id, pdata->master_gain);
+ if (cstream)
+ ret = msm_transcode_set_volume(cstream, pdata->master_gain);
+ return ret;
+}
+
+static int msm_transcode_volume_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *comp = snd_kcontrol_chip(kcontrol);
+ unsigned long fe_id = kcontrol->private_value;
+
+ struct trans_loopback_pdata *pdata = (struct trans_loopback_pdata *)
+ snd_soc_component_get_drvdata(comp);
+
+ if (fe_id >= MSM_FRONTEND_DAI_MAX) {
+ pr_err("%s Received out of bound fe_id %lu\n", __func__, fe_id);
+ return -EINVAL;
+ }
+
+ pr_debug("%s: fe_id %lu\n", __func__, fe_id);
+ ucontrol->value.integer.value[0] = pdata->master_gain;
+
+ return 0;
+}
+
static int msm_transcode_stream_cmd_control(
struct snd_soc_pcm_runtime *rtd)
{
@@ -773,7 +1047,8 @@ done:
return ret;
}
-static int msm_transcode_add_ion_fd_cmd_control(struct snd_soc_pcm_runtime *rtd)
+static int msm_transcode_add_shm_ion_fd_cmd_control(
+ struct snd_soc_pcm_runtime *rtd)
{
const char *mixer_ctl_name = "Playback ION FD";
const char *deviceNo = "NN";
@@ -785,7 +1060,53 @@ static int msm_transcode_add_ion_fd_cmd_control(struct snd_soc_pcm_runtime *rtd)
.name = "?",
.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
.info = msm_adsp_stream_cmd_info,
- .put = msm_transcode_ion_fd_map_put,
+ .put = msm_transcode_shm_ion_fd_map_put,
+ .private_value = 0,
+ }
+ };
+
+ if (!rtd) {
+ pr_err("%s NULL rtd\n", __func__);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ ctl_len = strlen(mixer_ctl_name) + 1 + strlen(deviceNo) + 1;
+ mixer_str = kzalloc(ctl_len, GFP_KERNEL);
+ if (!mixer_str) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ snprintf(mixer_str, ctl_len, "%s %d", mixer_ctl_name, rtd->pcm->device);
+ fe_ion_fd_config_control[0].name = mixer_str;
+ fe_ion_fd_config_control[0].private_value = rtd->dai_link->be_id;
+ pr_debug("%s: Registering new mixer ctl %s\n", __func__, mixer_str);
+ ret = snd_soc_add_platform_controls(rtd->platform,
+ fe_ion_fd_config_control,
+ ARRAY_SIZE(fe_ion_fd_config_control));
+ if (ret < 0)
+ pr_err("%s: failed to add ctl %s\n", __func__, mixer_str);
+
+ kfree(mixer_str);
+done:
+ return ret;
+}
+
+static int msm_transcode_add_lib_ion_fd_cmd_control(
+ struct snd_soc_pcm_runtime *rtd)
+{
+ const char *mixer_ctl_name = "Playback ION LIB FD";
+ const char *deviceNo = "NN";
+ char *mixer_str = NULL;
+ int ctl_len = 0, ret = 0;
+ struct snd_kcontrol_new fe_ion_fd_config_control[1] = {
+ {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "?",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .info = msm_adsp_stream_cmd_info,
+ .put = msm_transcode_lib_ion_fd_map_put,
.private_value = 0,
}
};
@@ -864,6 +1185,99 @@ done:
return ret;
}
+static int msm_transcode_app_type_cfg_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+ uinfo->count = 5;
+ uinfo->value.integer.min = 0;
+ uinfo->value.integer.max = 0xFFFFFFFF;
+ return 0;
+}
+
+static int msm_transcode_add_app_type_cfg_control(
+ struct snd_soc_pcm_runtime *rtd)
+{
+ char mixer_str[32];
+ int rc = 0;
+ struct snd_kcontrol_new fe_app_type_cfg_control[1] = {
+ {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .info = msm_transcode_app_type_cfg_info,
+ .put = msm_transcode_playback_app_type_cfg_put,
+ .get = msm_transcode_playback_app_type_cfg_get,
+ .private_value = 0,
+ }
+ };
+
+ if (!rtd) {
+ pr_err("%s NULL rtd\n", __func__);
+
+ return -EINVAL;
+ }
+
+ if (rtd->compr->direction == SND_COMPRESS_PLAYBACK) {
+
+ snprintf(mixer_str, sizeof(mixer_str),
+ "Audio Stream %d App Type Cfg",
+ rtd->pcm->device);
+
+ fe_app_type_cfg_control[0].name = mixer_str;
+ fe_app_type_cfg_control[0].private_value = rtd->dai_link->be_id;
+
+ fe_app_type_cfg_control[0].put =
+ msm_transcode_playback_app_type_cfg_put;
+ fe_app_type_cfg_control[0].get =
+ msm_transcode_playback_app_type_cfg_get;
+
+ pr_debug("Registering new mixer ctl %s", mixer_str);
+ snd_soc_add_platform_controls(rtd->platform,
+ fe_app_type_cfg_control,
+ ARRAY_SIZE(fe_app_type_cfg_control));
+ }
+
+ return rc;
+}
+static int msm_transcode_volume_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+ uinfo->count = 1;
+ uinfo->value.integer.min = 0;
+ uinfo->value.integer.max = TRANSCODE_LR_VOL_MAX_STEPS;
+ return 0;
+}
+
+static int msm_transcode_add_volume_control(struct snd_soc_pcm_runtime *rtd)
+{
+ struct snd_kcontrol_new fe_volume_control[1] = {
+ {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "Transcode Loopback Rx Volume",
+ .access = SNDRV_CTL_ELEM_ACCESS_TLV_READ |
+ SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .info = msm_transcode_volume_info,
+ .get = msm_transcode_volume_get,
+ .put = msm_transcode_volume_put,
+ .private_value = 0,
+ }
+ };
+
+ if (!rtd) {
+ pr_err("%s NULL rtd\n", __func__);
+ return -EINVAL;
+ }
+ if (rtd->compr->direction == SND_COMPRESS_PLAYBACK) {
+ fe_volume_control[0].private_value = rtd->dai_link->be_id;
+ pr_debug("Registering new mixer ctl %s",
+ fe_volume_control[0].name);
+ snd_soc_add_platform_controls(rtd->platform, fe_volume_control,
+ ARRAY_SIZE(fe_volume_control));
+ }
+ return 0;
+}
+
static int msm_transcode_loopback_new(struct snd_soc_pcm_runtime *rtd)
{
int rc;
@@ -877,9 +1291,14 @@ static int msm_transcode_loopback_new(struct snd_soc_pcm_runtime *rtd)
pr_err("%s: ADSP Stream callback Control open failed\n",
__func__);
- rc = msm_transcode_add_ion_fd_cmd_control(rtd);
+ rc = msm_transcode_add_shm_ion_fd_cmd_control(rtd);
+ if (rc)
+ pr_err("%s: Could not add transcode shm ion fd Control\n",
+ __func__);
+
+ rc = msm_transcode_add_lib_ion_fd_cmd_control(rtd);
if (rc)
- pr_err("%s: Could not add transcode ion fd Control\n",
+ pr_err("%s: Could not add transcode lib ion fd Control\n",
__func__);
rc = msm_transcode_add_event_ack_cmd_control(rtd);
@@ -887,6 +1306,16 @@ static int msm_transcode_loopback_new(struct snd_soc_pcm_runtime *rtd)
pr_err("%s: Could not add transcode event ack Control\n",
__func__);
+ rc = msm_transcode_add_app_type_cfg_control(rtd);
+ if (rc)
+ pr_err("%s: Could not add Compr App Type Cfg Control\n",
+ __func__);
+
+ rc = msm_transcode_add_volume_control(rtd);
+ if (rc)
+ pr_err("%s: Could not add transcode volume Control\n",
+ __func__);
+
return 0;
}
@@ -896,6 +1325,7 @@ static struct snd_compr_ops msm_transcode_loopback_ops = {
.trigger = msm_transcode_loopback_trigger,
.set_params = msm_transcode_loopback_set_params,
.get_caps = msm_transcode_loopback_get_caps,
+ .set_metadata = msm_transcode_loopback_set_metadata,
};
@@ -910,6 +1340,7 @@ static int msm_transcode_loopback_probe(struct snd_soc_platform *platform)
if (!pdata)
return -ENOMEM;
+ pdata->perf_mode = LOW_LATENCY_PCM_MODE;
snd_soc_platform_set_drvdata(platform, pdata);
return 0;
}
diff --git a/sound/soc/msm/qdsp6v2/q6adm.c b/sound/soc/msm/qdsp6v2/q6adm.c
index e1bfc950d0e3..018681309f2e 100644
--- a/sound/soc/msm/qdsp6v2/q6adm.c
+++ b/sound/soc/msm/qdsp6v2/q6adm.c
@@ -2364,7 +2364,8 @@ int adm_open(int port_id, int path, int rate, int channel_mode, int topology,
struct adm_cmd_device_open_v5 open;
struct adm_cmd_device_open_v6 open_v6;
int ret = 0;
- int port_idx, copp_idx, flags;
+ int port_idx, flags;
+ int copp_idx = -1;
int tmp_port = q6audio_get_port_id(port_id);
pr_debug("%s:port %#x path:%d rate:%d mode:%d perf_mode:%d,topo_id %d\n",
@@ -2418,8 +2419,17 @@ int adm_open(int port_id, int path, int rate, int channel_mode, int topology,
(topology == VPM_TX_DM_RFECNS_COPP_TOPOLOGY))
rate = 16000;
- copp_idx = adm_get_idx_if_copp_exists(port_idx, topology, perf_mode,
- rate, bit_width, app_type);
+ /*
+ * Routing driver reuses the same adm for streams with the same
+ * app_type, sample_rate etc.
+ * This isn't allowed for ULL streams as per the DSP interface
+ */
+ if (perf_mode != ULTRA_LOW_LATENCY_PCM_MODE)
+ copp_idx = adm_get_idx_if_copp_exists(port_idx, topology,
+ perf_mode,
+ rate, bit_width,
+ app_type);
+
if (copp_idx < 0) {
copp_idx = adm_get_next_available_copp(port_idx);
if (copp_idx >= MAX_COPPS_PER_PORT) {
diff --git a/sound/soc/msm/qdsp6v2/q6asm.c b/sound/soc/msm/qdsp6v2/q6asm.c
index a44569530846..609ae8cd82ac 100644
--- a/sound/soc/msm/qdsp6v2/q6asm.c
+++ b/sound/soc/msm/qdsp6v2/q6asm.c
@@ -36,6 +36,7 @@
#include <sound/apr_audio-v2.h>
#include <sound/q6asm-v2.h>
+#include <sound/q6core.h>
#include <sound/q6audio-v2.h>
#include <sound/audio_cal_utils.h>
#include <sound/adsp_err.h>
@@ -7117,13 +7118,10 @@ fail_cmd:
return rc;
}
-int q6asm_send_ion_fd(struct audio_client *ac, int fd)
+int q6asm_audio_map_shm_fd(struct audio_client *ac, int fd)
{
- struct ion_client *client;
- struct ion_handle *handle;
ion_phys_addr_t paddr;
size_t pa_len = 0;
- void *vaddr;
int ret;
int sz = 0;
struct avs_rtic_shared_mem_addr shm;
@@ -7139,19 +7137,10 @@ int q6asm_send_ion_fd(struct audio_client *ac, int fd)
goto fail_cmd;
}
- ret = msm_audio_ion_import("audio_mem_client",
- &client,
- &handle,
- fd,
- NULL,
- 0,
- &paddr,
- &pa_len,
- &vaddr);
+ ret = msm_audio_ion_phys_assign("audio_shm_mem_client", fd,
+ &paddr, &pa_len, HLOS_TO_ADSP);
if (ret) {
- pr_err("%s: audio ION import failed, rc = %d\n",
- __func__, ret);
- ret = -ENOMEM;
+ pr_err("%s: shm ION phys failed, rc = %d\n", __func__, ret);
goto fail_cmd;
}
/* get payload length */
@@ -7554,7 +7543,9 @@ int q6asm_set_mfc_panning_params(struct audio_client *ac,
struct asm_stream_param_data_v2 data;
struct audproc_chmixer_param_coeff pan_cfg;
uint16_t variable_payload = 0;
- uint16_t *asm_params = NULL;
+ char *asm_params = NULL;
+ uint16_t ii;
+ uint16_t *dst_gain_ptr = NULL;
sz = rc = i = 0;
if (ac == NULL) {
@@ -7615,7 +7606,7 @@ int q6asm_set_mfc_panning_params(struct audio_client *ac,
variable_payload = pan_param->num_output_channels * sizeof(uint16_t)+
pan_param->num_input_channels * sizeof(uint16_t) +
- pan_param->num_output_channels * sizeof(uint16_t) *
+ pan_param->num_output_channels *
pan_param->num_input_channels * sizeof(uint16_t);
i = (variable_payload % sizeof(uint32_t));
variable_payload += (i == 0) ? 0 : sizeof(uint32_t) - i;
@@ -7675,15 +7666,16 @@ int q6asm_set_mfc_panning_params(struct audio_client *ac,
pan_param->num_output_channels * sizeof(uint16_t)),
pan_param->input_channel_map,
pan_param->num_input_channels * sizeof(uint16_t));
- memcpy(((u8 *)asm_params + sizeof(struct apr_hdr) +
+
+ dst_gain_ptr = (uint16_t *) ((u8 *)asm_params + sizeof(struct apr_hdr) +
sizeof(struct asm_stream_cmd_set_pp_params_v2) +
sizeof(struct asm_stream_param_data_v2) +
sizeof(struct audproc_chmixer_param_coeff) +
(pan_param->num_output_channels * sizeof(uint16_t)) +
- (pan_param->num_input_channels * sizeof(uint16_t))),
- pan_param->gain,
- (pan_param->num_output_channels * sizeof(uint16_t)) *
(pan_param->num_input_channels * sizeof(uint16_t)));
+ for (ii = 0; ii < pan_param->num_output_channels *
+ pan_param->num_input_channels; ii++)
+ dst_gain_ptr[ii] = (uint16_t) pan_param->gain[ii];
rc = apr_send_pkt(ac->apr, (uint32_t *) asm_params);
if (rc < 0) {
diff --git a/sound/soc/msm/qdsp6v2/q6core.c b/sound/soc/msm/qdsp6v2/q6core.c
index 4340d31c218c..6fed443186e5 100644
--- a/sound/soc/msm/qdsp6v2/q6core.c
+++ b/sound/soc/msm/qdsp6v2/q6core.c
@@ -119,6 +119,18 @@ static int32_t aprv2_core_fn_q(struct apr_client_data *data, void *priv)
q6core_lcl.bus_bw_resp_received = 1;
wake_up(&q6core_lcl.bus_bw_req_wait);
break;
+ case AVCS_CMD_ADD_POOL_PAGES:
+ pr_debug("%s: Cmd = AVCS_CMD_ADD_POOL_PAGES status[0x%x]\n",
+ __func__, payload1[1]);
+ q6core_lcl.bus_bw_resp_received = 1;
+ wake_up(&q6core_lcl.bus_bw_req_wait);
+ break;
+ case AVCS_CMD_REMOVE_POOL_PAGES:
+ pr_debug("%s: Cmd = AVCS_CMD_REMOVE_POOL_PAGES status[0x%x]\n",
+ __func__, payload1[1]);
+ q6core_lcl.bus_bw_resp_received = 1;
+ wake_up(&q6core_lcl.bus_bw_req_wait);
+ break;
default:
pr_err("%s: Invalid cmd rsp[0x%x][0x%x] opcode %d\n",
__func__,
@@ -542,6 +554,56 @@ done:
return ret;
}
+int q6core_add_remove_pool_pages(ion_phys_addr_t buf_add, uint32_t bufsz,
+ uint32_t mempool_id, bool add_pages)
+{
+ struct avs_mem_assign_region mem_pool;
+ int ret = 0, sz;
+
+ if (add_pages)
+ mem_pool.hdr.opcode = AVCS_CMD_ADD_POOL_PAGES;
+ else
+ mem_pool.hdr.opcode = AVCS_CMD_REMOVE_POOL_PAGES;
+
+ /* get payload length */
+ sz = sizeof(struct avs_mem_assign_region);
+ mem_pool.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(sizeof(struct apr_hdr)),
+ APR_PKT_VER);
+ mem_pool.hdr.src_port = 0;
+ mem_pool.hdr.dest_port = 0;
+ mem_pool.hdr.token = 0;
+ mem_pool.hdr.pkt_size = sz;
+ mem_pool.pool_id = mempool_id;
+ mem_pool.size = bufsz;
+ mem_pool.addr_lsw = lower_32_bits(buf_add);
+ mem_pool.addr_msw = msm_audio_populate_upper_32_bits(buf_add);
+ pr_debug("%s: sending memory map, size %d\n",
+ __func__, bufsz);
+
+ q6core_lcl.bus_bw_resp_received = 0;
+ ret = apr_send_pkt(q6core_lcl.core_handle_q, (uint32_t *)&mem_pool);
+ if (ret < 0) {
+ pr_err("%s: library map region failed %d\n",
+ __func__, ret);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ ret = wait_event_timeout(q6core_lcl.bus_bw_req_wait,
+ (q6core_lcl.bus_bw_resp_received == 1),
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret) {
+ pr_err("%s: timeout. waited for library memory map\n",
+ __func__);
+ ret = -ETIME;
+ goto done;
+ }
+ ret = 0;
+done:
+ return ret;
+}
+
static int q6core_dereg_all_custom_topologies(void)
{
int ret = 0;
diff --git a/sound/soc/msm/sdm660-ext-dai-links.c b/sound/soc/msm/sdm660-ext-dai-links.c
index 1c03d8c9e797..30c3ffe2347d 100644
--- a/sound/soc/msm/sdm660-ext-dai-links.c
+++ b/sound/soc/msm/sdm660-ext-dai-links.c
@@ -1270,10 +1270,10 @@ static struct snd_soc_dai_link msm_ext_common_fe_dai[] = {
.be_id = MSM_FRONTEND_DAI_MULTIMEDIA15,
},
{/* hw:x,33 */
- .name = MSM_DAILINK_NAME(Compress9),
- .stream_name = "Compress9",
+ .name = MSM_DAILINK_NAME(ULL_NOIRQ_2),
+ .stream_name = "MM_NOIRQ_2",
.cpu_dai_name = "MultiMedia16",
- .platform_name = "msm-compress-dsp",
+ .platform_name = "msm-pcm-dsp-noirq",
.dynamic = 1,
.dpcm_capture = 1,
.dpcm_playback = 1,
diff --git a/sound/soc/msm/sdm660-internal.c b/sound/soc/msm/sdm660-internal.c
index f4219148e81c..0f28e100f535 100644
--- a/sound/soc/msm/sdm660-internal.c
+++ b/sound/soc/msm/sdm660-internal.c
@@ -2188,10 +2188,10 @@ static struct snd_soc_dai_link msm_int_dai[] = {
.be_id = MSM_FRONTEND_DAI_MULTIMEDIA15,
},
{/* hw:x,33 */
- .name = MSM_DAILINK_NAME(Compress9),
- .stream_name = "Compress9",
+ .name = MSM_DAILINK_NAME(ULL_NOIRQ_2),
+ .stream_name = "MM_NOIRQ_2",
.cpu_dai_name = "MultiMedia16",
- .platform_name = "msm-compress-dsp",
+ .platform_name = "msm-pcm-dsp-noirq",
.dynamic = 1,
.dpcm_capture = 1,
.dpcm_playback = 1,
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index e63b4346e82e..fae57d18bbd7 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -3063,7 +3063,8 @@ static int snd_soc_codec_set_bias_level(struct snd_soc_dapm_context *dapm,
*/
void snd_soc_card_change_online_state(struct snd_soc_card *soc_card, int online)
{
- snd_card_change_online_state(soc_card->snd_card, online);
+ if (soc_card && soc_card->snd_card)
+ snd_card_change_online_state(soc_card->snd_card, online);
}
EXPORT_SYMBOL(snd_soc_card_change_online_state);
diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
index 2bba7dbc35f9..61f64dcd5374 100644
--- a/sound/usb/endpoint.c
+++ b/sound/usb/endpoint.c
@@ -384,6 +384,9 @@ static void snd_complete_urb(struct urb *urb)
if (unlikely(atomic_read(&ep->chip->shutdown)))
goto exit_clear;
+ if (unlikely(!test_bit(EP_FLAG_RUNNING, &ep->flags)))
+ goto exit_clear;
+
if (usb_pipeout(ep->pipe)) {
retire_outbound_urb(ep, ctx);
/* can be stopped during retire callback */
diff --git a/tools/include/linux/compiler.h b/tools/include/linux/compiler.h
index fa7208a32d76..8a679b21f0c4 100644
--- a/tools/include/linux/compiler.h
+++ b/tools/include/linux/compiler.h
@@ -115,4 +115,13 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
#define WRITE_ONCE(x, val) \
({ union { typeof(x) __val; char __c[1]; } __u = { .__val = (val) }; __write_once_size(&(x), __u.__c, sizeof(x)); __u.__val; })
+
+#ifndef __fallthrough
+# if defined(__GNUC__) && __GNUC__ >= 7
+# define __fallthrough __attribute__ ((fallthrough))
+# else
+# define __fallthrough
+# endif
+#endif
+
#endif /* _TOOLS_LINUX_COMPILER_H */
diff --git a/tools/lib/lockdep/uinclude/linux/lockdep.h b/tools/lib/lockdep/uinclude/linux/lockdep.h
index c808c7d02d21..e69118b2077e 100644
--- a/tools/lib/lockdep/uinclude/linux/lockdep.h
+++ b/tools/lib/lockdep/uinclude/linux/lockdep.h
@@ -8,7 +8,7 @@
#include <linux/utsname.h>
#include <linux/compiler.h>
-#define MAX_LOCK_DEPTH 2000UL
+#define MAX_LOCK_DEPTH 255UL
#define asmlinkage
#define __visible
diff --git a/tools/lib/traceevent/plugin_sched_switch.c b/tools/lib/traceevent/plugin_sched_switch.c
index f1ce60065258..ec30c2fcbac0 100644
--- a/tools/lib/traceevent/plugin_sched_switch.c
+++ b/tools/lib/traceevent/plugin_sched_switch.c
@@ -111,7 +111,7 @@ static int sched_switch_handler(struct trace_seq *s,
trace_seq_printf(s, "%lld ", val);
if (pevent_get_field_val(s, event, "prev_prio", record, &val, 0) == 0)
- trace_seq_printf(s, "[%lld] ", val);
+ trace_seq_printf(s, "[%d] ", (int) val);
if (pevent_get_field_val(s, event, "prev_state", record, &val, 0) == 0)
write_state(s, val);
@@ -129,7 +129,7 @@ static int sched_switch_handler(struct trace_seq *s,
trace_seq_printf(s, "%lld", val);
if (pevent_get_field_val(s, event, "next_prio", record, &val, 0) == 0)
- trace_seq_printf(s, " [%lld]", val);
+ trace_seq_printf(s, " [%d]", (int) val);
return 0;
}
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index 74c265e0ffa0..fb1c9ddc3478 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -566,9 +566,9 @@ install-tests: all install-gtk
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/attr'; \
$(INSTALL) tests/attr/* '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/attr'
-install-bin: install-tools install-tests
+install-bin: install-tools install-tests install-traceevent-plugins
-install: install-bin try-install-man install-traceevent-plugins
+install: install-bin try-install-man
install-python_ext:
$(PYTHON_WORD) util/setup.py --quiet install --root='/$(DESTDIR_SQ)'
diff --git a/tools/perf/arch/x86/tests/intel-cqm.c b/tools/perf/arch/x86/tests/intel-cqm.c
index d28c1b6a3b54..fa5d17af88b7 100644
--- a/tools/perf/arch/x86/tests/intel-cqm.c
+++ b/tools/perf/arch/x86/tests/intel-cqm.c
@@ -17,7 +17,7 @@ static pid_t spawn(void)
if (pid)
return pid;
- while(1);
+ while(1)
sleep(5);
return 0;
}
diff --git a/tools/perf/arch/x86/util/dwarf-regs.c b/tools/perf/arch/x86/util/dwarf-regs.c
index 9223c164e545..1f86ee8fb831 100644
--- a/tools/perf/arch/x86/util/dwarf-regs.c
+++ b/tools/perf/arch/x86/util/dwarf-regs.c
@@ -63,6 +63,8 @@ struct pt_regs_offset {
# define REG_OFFSET_NAME_32(n, r) {.name = n, .offset = offsetof(struct pt_regs, r)}
#endif
+/* TODO: switching by dwarf address size */
+#ifndef __x86_64__
static const struct pt_regs_offset x86_32_regoffset_table[] = {
REG_OFFSET_NAME_32("%ax", eax),
REG_OFFSET_NAME_32("%cx", ecx),
@@ -75,6 +77,8 @@ static const struct pt_regs_offset x86_32_regoffset_table[] = {
REG_OFFSET_END,
};
+#define regoffset_table x86_32_regoffset_table
+#else
static const struct pt_regs_offset x86_64_regoffset_table[] = {
REG_OFFSET_NAME_64("%ax", rax),
REG_OFFSET_NAME_64("%dx", rdx),
@@ -95,11 +99,7 @@ static const struct pt_regs_offset x86_64_regoffset_table[] = {
REG_OFFSET_END,
};
-/* TODO: switching by dwarf address size */
-#ifdef __x86_64__
#define regoffset_table x86_64_regoffset_table
-#else
-#define regoffset_table x86_32_regoffset_table
#endif
/* Minus 1 for the ending REG_OFFSET_END */
diff --git a/tools/perf/bench/numa.c b/tools/perf/bench/numa.c
index 492df2752a2d..b4eb5b679081 100644
--- a/tools/perf/bench/numa.c
+++ b/tools/perf/bench/numa.c
@@ -1570,13 +1570,13 @@ static int __bench_numa(const char *name)
"GB/sec,", "total-speed", "GB/sec total speed");
if (g->p.show_details >= 2) {
- char tname[32];
+ char tname[14 + 2 * 10 + 1];
struct thread_data *td;
for (p = 0; p < g->p.nr_proc; p++) {
for (t = 0; t < g->p.nr_threads; t++) {
- memset(tname, 0, 32);
+ memset(tname, 0, sizeof(tname));
td = g->threads + p*g->p.nr_threads + t;
- snprintf(tname, 32, "process%d:thread%d", p, t);
+ snprintf(tname, sizeof(tname), "process%d:thread%d", p, t);
print_res(tname, td->speed_gbs,
"GB/sec", "thread-speed", "GB/sec/thread speed");
print_res(tname, td->system_time_ns / 1e9,
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index 368d1e1561f7..48840556bf2d 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -1253,21 +1253,19 @@ static int is_directory(const char *base_path, const struct dirent *dent)
return S_ISDIR(st.st_mode);
}
-#define for_each_lang(scripts_path, scripts_dir, lang_dirent, lang_next)\
- while (!readdir_r(scripts_dir, &lang_dirent, &lang_next) && \
- lang_next) \
- if ((lang_dirent.d_type == DT_DIR || \
- (lang_dirent.d_type == DT_UNKNOWN && \
- is_directory(scripts_path, &lang_dirent))) && \
- (strcmp(lang_dirent.d_name, ".")) && \
- (strcmp(lang_dirent.d_name, "..")))
-
-#define for_each_script(lang_path, lang_dir, script_dirent, script_next)\
- while (!readdir_r(lang_dir, &script_dirent, &script_next) && \
- script_next) \
- if (script_dirent.d_type != DT_DIR && \
- (script_dirent.d_type != DT_UNKNOWN || \
- !is_directory(lang_path, &script_dirent)))
+#define for_each_lang(scripts_path, scripts_dir, lang_dirent) \
+ while ((lang_dirent = readdir(scripts_dir)) != NULL) \
+ if ((lang_dirent->d_type == DT_DIR || \
+ (lang_dirent->d_type == DT_UNKNOWN && \
+ is_directory(scripts_path, lang_dirent))) && \
+ (strcmp(lang_dirent->d_name, ".")) && \
+ (strcmp(lang_dirent->d_name, "..")))
+
+#define for_each_script(lang_path, lang_dir, script_dirent) \
+ while ((script_dirent = readdir(lang_dir)) != NULL) \
+ if (script_dirent->d_type != DT_DIR && \
+ (script_dirent->d_type != DT_UNKNOWN || \
+ !is_directory(lang_path, script_dirent)))
#define RECORD_SUFFIX "-record"
@@ -1413,7 +1411,7 @@ static int list_available_scripts(const struct option *opt __maybe_unused,
const char *s __maybe_unused,
int unset __maybe_unused)
{
- struct dirent *script_next, *lang_next, script_dirent, lang_dirent;
+ struct dirent *script_dirent, *lang_dirent;
char scripts_path[MAXPATHLEN];
DIR *scripts_dir, *lang_dir;
char script_path[MAXPATHLEN];
@@ -1428,19 +1426,19 @@ static int list_available_scripts(const struct option *opt __maybe_unused,
if (!scripts_dir)
return -1;
- for_each_lang(scripts_path, scripts_dir, lang_dirent, lang_next) {
+ for_each_lang(scripts_path, scripts_dir, lang_dirent) {
snprintf(lang_path, MAXPATHLEN, "%s/%s/bin", scripts_path,
- lang_dirent.d_name);
+ lang_dirent->d_name);
lang_dir = opendir(lang_path);
if (!lang_dir)
continue;
- for_each_script(lang_path, lang_dir, script_dirent, script_next) {
- script_root = get_script_root(&script_dirent, REPORT_SUFFIX);
+ for_each_script(lang_path, lang_dir, script_dirent) {
+ script_root = get_script_root(script_dirent, REPORT_SUFFIX);
if (script_root) {
desc = script_desc__findnew(script_root);
snprintf(script_path, MAXPATHLEN, "%s/%s",
- lang_path, script_dirent.d_name);
+ lang_path, script_dirent->d_name);
read_script_info(desc, script_path);
free(script_root);
}
@@ -1528,7 +1526,7 @@ static int check_ev_match(char *dir_name, char *scriptname,
*/
int find_scripts(char **scripts_array, char **scripts_path_array)
{
- struct dirent *script_next, *lang_next, script_dirent, lang_dirent;
+ struct dirent *script_dirent, *lang_dirent;
char scripts_path[MAXPATHLEN], lang_path[MAXPATHLEN];
DIR *scripts_dir, *lang_dir;
struct perf_session *session;
@@ -1551,9 +1549,9 @@ int find_scripts(char **scripts_array, char **scripts_path_array)
return -1;
}
- for_each_lang(scripts_path, scripts_dir, lang_dirent, lang_next) {
+ for_each_lang(scripts_path, scripts_dir, lang_dirent) {
snprintf(lang_path, MAXPATHLEN, "%s/%s", scripts_path,
- lang_dirent.d_name);
+ lang_dirent->d_name);
#ifdef NO_LIBPERL
if (strstr(lang_path, "perl"))
continue;
@@ -1567,16 +1565,16 @@ int find_scripts(char **scripts_array, char **scripts_path_array)
if (!lang_dir)
continue;
- for_each_script(lang_path, lang_dir, script_dirent, script_next) {
+ for_each_script(lang_path, lang_dir, script_dirent) {
/* Skip those real time scripts: xxxtop.p[yl] */
- if (strstr(script_dirent.d_name, "top."))
+ if (strstr(script_dirent->d_name, "top."))
continue;
sprintf(scripts_path_array[i], "%s/%s", lang_path,
- script_dirent.d_name);
- temp = strchr(script_dirent.d_name, '.');
+ script_dirent->d_name);
+ temp = strchr(script_dirent->d_name, '.');
snprintf(scripts_array[i],
- (temp - script_dirent.d_name) + 1,
- "%s", script_dirent.d_name);
+ (temp - script_dirent->d_name) + 1,
+ "%s", script_dirent->d_name);
if (check_ev_match(lang_path,
scripts_array[i], session))
@@ -1594,7 +1592,7 @@ int find_scripts(char **scripts_array, char **scripts_path_array)
static char *get_script_path(const char *script_root, const char *suffix)
{
- struct dirent *script_next, *lang_next, script_dirent, lang_dirent;
+ struct dirent *script_dirent, *lang_dirent;
char scripts_path[MAXPATHLEN];
char script_path[MAXPATHLEN];
DIR *scripts_dir, *lang_dir;
@@ -1607,21 +1605,21 @@ static char *get_script_path(const char *script_root, const char *suffix)
if (!scripts_dir)
return NULL;
- for_each_lang(scripts_path, scripts_dir, lang_dirent, lang_next) {
+ for_each_lang(scripts_path, scripts_dir, lang_dirent) {
snprintf(lang_path, MAXPATHLEN, "%s/%s/bin", scripts_path,
- lang_dirent.d_name);
+ lang_dirent->d_name);
lang_dir = opendir(lang_path);
if (!lang_dir)
continue;
- for_each_script(lang_path, lang_dir, script_dirent, script_next) {
- __script_root = get_script_root(&script_dirent, suffix);
+ for_each_script(lang_path, lang_dir, script_dirent) {
+ __script_root = get_script_root(script_dirent, suffix);
if (__script_root && !strcmp(script_root, __script_root)) {
free(__script_root);
closedir(lang_dir);
closedir(scripts_dir);
snprintf(script_path, MAXPATHLEN, "%s/%s",
- lang_path, script_dirent.d_name);
+ lang_path, script_dirent->d_name);
return strdup(script_path);
}
free(__script_root);
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 7e2e72e6d9d1..4a8a02c302d2 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -636,7 +636,7 @@ repeat:
case -1:
if (errno == EINTR)
continue;
- /* Fall trhu */
+ __fallthrough;
default:
c = getc(stdin);
tcsetattr(0, TCSAFLUSH, &save);
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index c783d8fd3a80..ebe7115c751a 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -1617,6 +1617,7 @@ static int trace__process_event(struct trace *trace, struct machine *machine,
color_fprintf(trace->output, PERF_COLOR_RED,
"LOST %" PRIu64 " events!\n", event->lost.lost);
ret = machine__process_lost_event(machine, event, sample);
+ break;
default:
ret = machine__process_event(machine, event, sample);
break;
diff --git a/tools/perf/tests/parse-events.c b/tools/perf/tests/parse-events.c
index 636d7b42d844..54af2f2e2ee4 100644
--- a/tools/perf/tests/parse-events.c
+++ b/tools/perf/tests/parse-events.c
@@ -1727,15 +1727,14 @@ static int test_pmu_events(void)
}
while (!ret && (ent = readdir(dir))) {
-#define MAX_NAME 100
struct evlist_test e;
- char name[MAX_NAME];
+ char name[2 * NAME_MAX + 1 + 12 + 3];
if (!strcmp(ent->d_name, ".") ||
!strcmp(ent->d_name, ".."))
continue;
- snprintf(name, MAX_NAME, "cpu/event=%s/u", ent->d_name);
+ snprintf(name, sizeof(name), "cpu/event=%s/u", ent->d_name);
e.name = name;
e.check = test__checkevent_pmu_events;
@@ -1743,11 +1742,10 @@ static int test_pmu_events(void)
ret = test_event(&e);
if (ret)
break;
- snprintf(name, MAX_NAME, "%s:u,cpu/event=%s/u", ent->d_name, ent->d_name);
+ snprintf(name, sizeof(name), "%s:u,cpu/event=%s/u", ent->d_name, ent->d_name);
e.name = name;
e.check = test__checkevent_pmu_events_mix;
ret = test_event(&e);
-#undef MAX_NAME
}
closedir(dir);
diff --git a/tools/perf/ui/browser.c b/tools/perf/ui/browser.c
index e9703c0829f1..07b5f5951b25 100644
--- a/tools/perf/ui/browser.c
+++ b/tools/perf/ui/browser.c
@@ -702,7 +702,7 @@ static void __ui_browser__line_arrow_down(struct ui_browser *browser,
ui_browser__gotorc(browser, row, column + 1);
SLsmg_draw_hline(2);
- if (row++ == 0)
+ if (++row == 0)
goto out;
} else
row = 0;
diff --git a/tools/perf/ui/browsers/annotate.c b/tools/perf/ui/browsers/annotate.c
index d4d7cc27252f..718bd46d47fa 100644
--- a/tools/perf/ui/browsers/annotate.c
+++ b/tools/perf/ui/browsers/annotate.c
@@ -755,11 +755,11 @@ static int annotate_browser__run(struct annotate_browser *browser,
nd = browser->curr_hot;
break;
case K_UNTAB:
- if (nd != NULL)
+ if (nd != NULL) {
nd = rb_next(nd);
if (nd == NULL)
nd = rb_first(&browser->entries);
- else
+ } else
nd = browser->curr_hot;
break;
case K_F1:
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index 956187bf1a85..26cba64345e3 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -416,7 +416,7 @@ static int __event__synthesize_thread(union perf_event *comm_event,
{
char filename[PATH_MAX];
DIR *tasks;
- struct dirent dirent, *next;
+ struct dirent *dirent;
pid_t tgid, ppid;
int rc = 0;
@@ -445,11 +445,11 @@ static int __event__synthesize_thread(union perf_event *comm_event,
return 0;
}
- while (!readdir_r(tasks, &dirent, &next) && next) {
+ while ((dirent = readdir(tasks)) != NULL) {
char *end;
pid_t _pid;
- _pid = strtol(dirent.d_name, &end, 10);
+ _pid = strtol(dirent->d_name, &end, 10);
if (*end)
continue;
@@ -558,7 +558,7 @@ int perf_event__synthesize_threads(struct perf_tool *tool,
{
DIR *proc;
char proc_path[PATH_MAX];
- struct dirent dirent, *next;
+ struct dirent *dirent;
union perf_event *comm_event, *mmap_event, *fork_event;
int err = -1;
@@ -583,9 +583,9 @@ int perf_event__synthesize_threads(struct perf_tool *tool,
if (proc == NULL)
goto out_free_fork;
- while (!readdir_r(proc, &dirent, &next) && next) {
+ while ((dirent = readdir(proc)) != NULL) {
char *end;
- pid_t pid = strtol(dirent.d_name, &end, 10);
+ pid_t pid = strtol(dirent->d_name, &end, 10);
if (*end) /* only interested in proper numerical dirents */
continue;
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
index 71df7acf8643..eeeae0629ad3 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
@@ -22,6 +22,7 @@
#include <errno.h>
#include <stdint.h>
#include <inttypes.h>
+#include <linux/compiler.h>
#include "../cache.h"
#include "../util.h"
@@ -63,6 +64,25 @@ enum intel_pt_pkt_state {
INTEL_PT_STATE_FUP_NO_TIP,
};
+static inline bool intel_pt_sample_time(enum intel_pt_pkt_state pkt_state)
+{
+ switch (pkt_state) {
+ case INTEL_PT_STATE_NO_PSB:
+ case INTEL_PT_STATE_NO_IP:
+ case INTEL_PT_STATE_ERR_RESYNC:
+ case INTEL_PT_STATE_IN_SYNC:
+ case INTEL_PT_STATE_TNT:
+ return true;
+ case INTEL_PT_STATE_TIP:
+ case INTEL_PT_STATE_TIP_PGD:
+ case INTEL_PT_STATE_FUP:
+ case INTEL_PT_STATE_FUP_NO_TIP:
+ return false;
+ default:
+ return true;
+ };
+}
+
#ifdef INTEL_PT_STRICT
#define INTEL_PT_STATE_ERR1 INTEL_PT_STATE_NO_PSB
#define INTEL_PT_STATE_ERR2 INTEL_PT_STATE_NO_PSB
@@ -90,6 +110,7 @@ struct intel_pt_decoder {
bool have_tma;
bool have_cyc;
bool fixup_last_mtc;
+ bool have_last_ip;
uint64_t pos;
uint64_t last_ip;
uint64_t ip;
@@ -97,6 +118,7 @@ struct intel_pt_decoder {
uint64_t timestamp;
uint64_t tsc_timestamp;
uint64_t ref_timestamp;
+ uint64_t sample_timestamp;
uint64_t ret_addr;
uint64_t ctc_timestamp;
uint64_t ctc_delta;
@@ -124,8 +146,6 @@ struct intel_pt_decoder {
bool have_calc_cyc_to_tsc;
int exec_mode;
unsigned int insn_bytes;
- uint64_t sign_bit;
- uint64_t sign_bits;
uint64_t period;
enum intel_pt_period_type period_type;
uint64_t tot_insn_cnt;
@@ -139,6 +159,7 @@ struct intel_pt_decoder {
unsigned int fup_tx_flags;
unsigned int tx_flags;
uint64_t timestamp_insn_cnt;
+ uint64_t sample_insn_cnt;
uint64_t stuck_ip;
int no_progress;
int stuck_ip_prd;
@@ -192,9 +213,6 @@ struct intel_pt_decoder *intel_pt_decoder_new(struct intel_pt_params *params)
decoder->data = params->data;
decoder->return_compression = params->return_compression;
- decoder->sign_bit = (uint64_t)1 << 47;
- decoder->sign_bits = ~(((uint64_t)1 << 48) - 1);
-
decoder->period = params->period;
decoder->period_type = params->period_type;
@@ -363,21 +381,30 @@ int intel_pt__strerror(int code, char *buf, size_t buflen)
return 0;
}
-static uint64_t intel_pt_calc_ip(struct intel_pt_decoder *decoder,
- const struct intel_pt_pkt *packet,
+static uint64_t intel_pt_calc_ip(const struct intel_pt_pkt *packet,
uint64_t last_ip)
{
uint64_t ip;
switch (packet->count) {
- case 2:
+ case 1:
ip = (last_ip & (uint64_t)0xffffffffffff0000ULL) |
packet->payload;
break;
- case 4:
+ case 2:
ip = (last_ip & (uint64_t)0xffffffff00000000ULL) |
packet->payload;
break;
+ case 3:
+ ip = packet->payload;
+ /* Sign-extend 6-byte ip */
+ if (ip & (uint64_t)0x800000000000ULL)
+ ip |= (uint64_t)0xffff000000000000ULL;
+ break;
+ case 4:
+ ip = (last_ip & (uint64_t)0xffff000000000000ULL) |
+ packet->payload;
+ break;
case 6:
ip = packet->payload;
break;
@@ -385,16 +412,13 @@ static uint64_t intel_pt_calc_ip(struct intel_pt_decoder *decoder,
return 0;
}
- if (ip & decoder->sign_bit)
- return ip | decoder->sign_bits;
-
return ip;
}
static inline void intel_pt_set_last_ip(struct intel_pt_decoder *decoder)
{
- decoder->last_ip = intel_pt_calc_ip(decoder, &decoder->packet,
- decoder->last_ip);
+ decoder->last_ip = intel_pt_calc_ip(&decoder->packet, decoder->last_ip);
+ decoder->have_last_ip = true;
}
static inline void intel_pt_set_ip(struct intel_pt_decoder *decoder)
@@ -895,6 +919,7 @@ static int intel_pt_walk_insn(struct intel_pt_decoder *decoder,
decoder->tot_insn_cnt += insn_cnt;
decoder->timestamp_insn_cnt += insn_cnt;
+ decoder->sample_insn_cnt += insn_cnt;
decoder->period_insn_cnt += insn_cnt;
if (err) {
@@ -1413,7 +1438,8 @@ static int intel_pt_walk_psbend(struct intel_pt_decoder *decoder)
case INTEL_PT_FUP:
decoder->pge = true;
- intel_pt_set_last_ip(decoder);
+ if (decoder->packet.count)
+ intel_pt_set_last_ip(decoder);
break;
case INTEL_PT_MODE_TSX:
@@ -1617,6 +1643,8 @@ next:
break;
case INTEL_PT_PSB:
+ decoder->last_ip = 0;
+ decoder->have_last_ip = true;
intel_pt_clear_stack(&decoder->stack);
err = intel_pt_walk_psbend(decoder);
if (err == -EAGAIN)
@@ -1695,6 +1723,13 @@ next:
}
}
+static inline bool intel_pt_have_ip(struct intel_pt_decoder *decoder)
+{
+ return decoder->packet.count &&
+ (decoder->have_last_ip || decoder->packet.count == 3 ||
+ decoder->packet.count == 6);
+}
+
/* Walk PSB+ packets to get in sync. */
static int intel_pt_walk_psb(struct intel_pt_decoder *decoder)
{
@@ -1708,6 +1743,7 @@ static int intel_pt_walk_psb(struct intel_pt_decoder *decoder)
switch (decoder->packet.type) {
case INTEL_PT_TIP_PGD:
decoder->continuous_period = false;
+ __fallthrough;
case INTEL_PT_TIP_PGE:
case INTEL_PT_TIP:
intel_pt_log("ERROR: Unexpected packet\n");
@@ -1715,8 +1751,7 @@ static int intel_pt_walk_psb(struct intel_pt_decoder *decoder)
case INTEL_PT_FUP:
decoder->pge = true;
- if (decoder->last_ip || decoder->packet.count == 6 ||
- decoder->packet.count == 0) {
+ if (intel_pt_have_ip(decoder)) {
uint64_t current_ip = decoder->ip;
intel_pt_set_ip(decoder);
@@ -1762,6 +1797,8 @@ static int intel_pt_walk_psb(struct intel_pt_decoder *decoder)
decoder->pge = false;
decoder->continuous_period = false;
intel_pt_clear_tx_flags(decoder);
+ __fallthrough;
+
case INTEL_PT_TNT:
decoder->have_tma = false;
intel_pt_log("ERROR: Unexpected packet\n");
@@ -1802,27 +1839,21 @@ static int intel_pt_walk_to_ip(struct intel_pt_decoder *decoder)
switch (decoder->packet.type) {
case INTEL_PT_TIP_PGD:
decoder->continuous_period = false;
+ __fallthrough;
case INTEL_PT_TIP_PGE:
case INTEL_PT_TIP:
decoder->pge = decoder->packet.type != INTEL_PT_TIP_PGD;
- if (decoder->last_ip || decoder->packet.count == 6 ||
- decoder->packet.count == 0)
+ if (intel_pt_have_ip(decoder))
intel_pt_set_ip(decoder);
if (decoder->ip)
return 0;
break;
case INTEL_PT_FUP:
- if (decoder->overflow) {
- if (decoder->last_ip ||
- decoder->packet.count == 6 ||
- decoder->packet.count == 0)
- intel_pt_set_ip(decoder);
- if (decoder->ip)
- return 0;
- }
- if (decoder->packet.count)
- intel_pt_set_last_ip(decoder);
+ if (intel_pt_have_ip(decoder))
+ intel_pt_set_ip(decoder);
+ if (decoder->ip)
+ return 0;
break;
case INTEL_PT_MTC:
@@ -1871,6 +1902,9 @@ static int intel_pt_walk_to_ip(struct intel_pt_decoder *decoder)
break;
case INTEL_PT_PSB:
+ decoder->last_ip = 0;
+ decoder->have_last_ip = true;
+ intel_pt_clear_stack(&decoder->stack);
err = intel_pt_walk_psb(decoder);
if (err)
return err;
@@ -1896,6 +1930,8 @@ static int intel_pt_sync_ip(struct intel_pt_decoder *decoder)
{
int err;
+ decoder->set_fup_tx_flags = false;
+
intel_pt_log("Scanning for full IP\n");
err = intel_pt_walk_to_ip(decoder);
if (err)
@@ -2004,6 +2040,7 @@ static int intel_pt_sync(struct intel_pt_decoder *decoder)
decoder->pge = false;
decoder->continuous_period = false;
+ decoder->have_last_ip = false;
decoder->last_ip = 0;
decoder->ip = 0;
intel_pt_clear_stack(&decoder->stack);
@@ -2012,6 +2049,7 @@ static int intel_pt_sync(struct intel_pt_decoder *decoder)
if (err)
return err;
+ decoder->have_last_ip = true;
decoder->pkt_state = INTEL_PT_STATE_NO_IP;
err = intel_pt_walk_psb(decoder);
@@ -2030,7 +2068,7 @@ static int intel_pt_sync(struct intel_pt_decoder *decoder)
static uint64_t intel_pt_est_timestamp(struct intel_pt_decoder *decoder)
{
- uint64_t est = decoder->timestamp_insn_cnt << 1;
+ uint64_t est = decoder->sample_insn_cnt << 1;
if (!decoder->cbr || !decoder->max_non_turbo_ratio)
goto out;
@@ -2038,7 +2076,7 @@ static uint64_t intel_pt_est_timestamp(struct intel_pt_decoder *decoder)
est *= decoder->max_non_turbo_ratio;
est /= decoder->cbr;
out:
- return decoder->timestamp + est;
+ return decoder->sample_timestamp + est;
}
const struct intel_pt_state *intel_pt_decode(struct intel_pt_decoder *decoder)
@@ -2054,7 +2092,9 @@ const struct intel_pt_state *intel_pt_decode(struct intel_pt_decoder *decoder)
err = intel_pt_sync(decoder);
break;
case INTEL_PT_STATE_NO_IP:
+ decoder->have_last_ip = false;
decoder->last_ip = 0;
+ decoder->ip = 0;
/* Fall through */
case INTEL_PT_STATE_ERR_RESYNC:
err = intel_pt_sync_ip(decoder);
@@ -2091,15 +2131,24 @@ const struct intel_pt_state *intel_pt_decode(struct intel_pt_decoder *decoder)
}
} while (err == -ENOLINK);
- decoder->state.err = err ? intel_pt_ext_err(err) : 0;
- decoder->state.timestamp = decoder->timestamp;
+ if (err) {
+ decoder->state.err = intel_pt_ext_err(err);
+ decoder->state.from_ip = decoder->ip;
+ decoder->sample_timestamp = decoder->timestamp;
+ decoder->sample_insn_cnt = decoder->timestamp_insn_cnt;
+ } else {
+ decoder->state.err = 0;
+ if (intel_pt_sample_time(decoder->pkt_state)) {
+ decoder->sample_timestamp = decoder->timestamp;
+ decoder->sample_insn_cnt = decoder->timestamp_insn_cnt;
+ }
+ }
+
+ decoder->state.timestamp = decoder->sample_timestamp;
decoder->state.est_timestamp = intel_pt_est_timestamp(decoder);
decoder->state.cr3 = decoder->cr3;
decoder->state.tot_insn_cnt = decoder->tot_insn_cnt;
- if (err)
- decoder->state.from_ip = decoder->ip;
-
return &decoder->state;
}
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c
index b1257c816310..7528ae4f7e28 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c
@@ -17,6 +17,7 @@
#include <string.h>
#include <endian.h>
#include <byteswap.h>
+#include <linux/compiler.h>
#include "intel-pt-pkt-decoder.h"
@@ -292,36 +293,46 @@ static int intel_pt_get_ip(enum intel_pt_pkt_type type, unsigned int byte,
const unsigned char *buf, size_t len,
struct intel_pt_pkt *packet)
{
- switch (byte >> 5) {
+ int ip_len;
+
+ packet->count = byte >> 5;
+
+ switch (packet->count) {
case 0:
- packet->count = 0;
+ ip_len = 0;
break;
case 1:
if (len < 3)
return INTEL_PT_NEED_MORE_BYTES;
- packet->count = 2;
+ ip_len = 2;
packet->payload = le16_to_cpu(*(uint16_t *)(buf + 1));
break;
case 2:
if (len < 5)
return INTEL_PT_NEED_MORE_BYTES;
- packet->count = 4;
+ ip_len = 4;
packet->payload = le32_to_cpu(*(uint32_t *)(buf + 1));
break;
case 3:
- case 6:
+ case 4:
if (len < 7)
return INTEL_PT_NEED_MORE_BYTES;
- packet->count = 6;
+ ip_len = 6;
memcpy_le64(&packet->payload, buf + 1, 6);
break;
+ case 6:
+ if (len < 9)
+ return INTEL_PT_NEED_MORE_BYTES;
+ ip_len = 8;
+ packet->payload = le64_to_cpu(*(uint64_t *)(buf + 1));
+ break;
default:
return INTEL_PT_BAD_PACKET;
}
packet->type = type;
- return packet->count + 1;
+ return ip_len + 1;
}
static int intel_pt_get_mode(const unsigned char *buf, size_t len,
@@ -488,6 +499,7 @@ int intel_pt_pkt_desc(const struct intel_pt_pkt *packet, char *buf,
case INTEL_PT_FUP:
if (!(packet->count))
return snprintf(buf, buf_len, "%s no ip", name);
+ __fallthrough;
case INTEL_PT_CYC:
case INTEL_PT_VMCS:
case INTEL_PT_MTC:
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 854dd2105bd5..881bbb5e7912 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -138,11 +138,11 @@ struct event_symbol event_symbols_sw[PERF_COUNT_SW_MAX] = {
#define PERF_EVENT_TYPE(config) __PERF_EVENT_FIELD(config, TYPE)
#define PERF_EVENT_ID(config) __PERF_EVENT_FIELD(config, EVENT)
-#define for_each_subsystem(sys_dir, sys_dirent, sys_next) \
- while (!readdir_r(sys_dir, &sys_dirent, &sys_next) && sys_next) \
- if (sys_dirent.d_type == DT_DIR && \
- (strcmp(sys_dirent.d_name, ".")) && \
- (strcmp(sys_dirent.d_name, "..")))
+#define for_each_subsystem(sys_dir, sys_dirent) \
+ while ((sys_dirent = readdir(sys_dir)) != NULL) \
+ if (sys_dirent->d_type == DT_DIR && \
+ (strcmp(sys_dirent->d_name, ".")) && \
+ (strcmp(sys_dirent->d_name, "..")))
static int tp_event_has_id(struct dirent *sys_dir, struct dirent *evt_dir)
{
@@ -159,12 +159,12 @@ static int tp_event_has_id(struct dirent *sys_dir, struct dirent *evt_dir)
return 0;
}
-#define for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) \
- while (!readdir_r(evt_dir, &evt_dirent, &evt_next) && evt_next) \
- if (evt_dirent.d_type == DT_DIR && \
- (strcmp(evt_dirent.d_name, ".")) && \
- (strcmp(evt_dirent.d_name, "..")) && \
- (!tp_event_has_id(&sys_dirent, &evt_dirent)))
+#define for_each_event(sys_dirent, evt_dir, evt_dirent) \
+ while ((evt_dirent = readdir(evt_dir)) != NULL) \
+ if (evt_dirent->d_type == DT_DIR && \
+ (strcmp(evt_dirent->d_name, ".")) && \
+ (strcmp(evt_dirent->d_name, "..")) && \
+ (!tp_event_has_id(sys_dirent, evt_dirent)))
#define MAX_EVENT_LENGTH 512
@@ -173,7 +173,7 @@ struct tracepoint_path *tracepoint_id_to_path(u64 config)
{
struct tracepoint_path *path = NULL;
DIR *sys_dir, *evt_dir;
- struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent;
+ struct dirent *sys_dirent, *evt_dirent;
char id_buf[24];
int fd;
u64 id;
@@ -184,18 +184,18 @@ struct tracepoint_path *tracepoint_id_to_path(u64 config)
if (!sys_dir)
return NULL;
- for_each_subsystem(sys_dir, sys_dirent, sys_next) {
+ for_each_subsystem(sys_dir, sys_dirent) {
snprintf(dir_path, MAXPATHLEN, "%s/%s", tracing_events_path,
- sys_dirent.d_name);
+ sys_dirent->d_name);
evt_dir = opendir(dir_path);
if (!evt_dir)
continue;
- for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) {
+ for_each_event(sys_dirent, evt_dir, evt_dirent) {
snprintf(evt_path, MAXPATHLEN, "%s/%s/id", dir_path,
- evt_dirent.d_name);
+ evt_dirent->d_name);
fd = open(evt_path, O_RDONLY);
if (fd < 0)
continue;
@@ -220,9 +220,9 @@ struct tracepoint_path *tracepoint_id_to_path(u64 config)
free(path);
return NULL;
}
- strncpy(path->system, sys_dirent.d_name,
+ strncpy(path->system, sys_dirent->d_name,
MAX_EVENT_LENGTH);
- strncpy(path->name, evt_dirent.d_name,
+ strncpy(path->name, evt_dirent->d_name,
MAX_EVENT_LENGTH);
return path;
}
@@ -1662,7 +1662,7 @@ void print_tracepoint_events(const char *subsys_glob, const char *event_glob,
bool name_only)
{
DIR *sys_dir, *evt_dir;
- struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent;
+ struct dirent *sys_dirent, *evt_dirent;
char evt_path[MAXPATHLEN];
char dir_path[MAXPATHLEN];
char **evt_list = NULL;
@@ -1680,20 +1680,20 @@ restart:
goto out_close_sys_dir;
}
- for_each_subsystem(sys_dir, sys_dirent, sys_next) {
+ for_each_subsystem(sys_dir, sys_dirent) {
if (subsys_glob != NULL &&
- !strglobmatch(sys_dirent.d_name, subsys_glob))
+ !strglobmatch(sys_dirent->d_name, subsys_glob))
continue;
snprintf(dir_path, MAXPATHLEN, "%s/%s", tracing_events_path,
- sys_dirent.d_name);
+ sys_dirent->d_name);
evt_dir = opendir(dir_path);
if (!evt_dir)
continue;
- for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) {
+ for_each_event(sys_dirent, evt_dir, evt_dirent) {
if (event_glob != NULL &&
- !strglobmatch(evt_dirent.d_name, event_glob))
+ !strglobmatch(evt_dirent->d_name, event_glob))
continue;
if (!evt_num_known) {
@@ -1702,7 +1702,7 @@ restart:
}
snprintf(evt_path, MAXPATHLEN, "%s:%s",
- sys_dirent.d_name, evt_dirent.d_name);
+ sys_dirent->d_name, evt_dirent->d_name);
evt_list[evt_i] = strdup(evt_path);
if (evt_list[evt_i] == NULL)
@@ -1755,7 +1755,7 @@ out_close_sys_dir:
int is_valid_tracepoint(const char *event_string)
{
DIR *sys_dir, *evt_dir;
- struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent;
+ struct dirent *sys_dirent, *evt_dirent;
char evt_path[MAXPATHLEN];
char dir_path[MAXPATHLEN];
@@ -1763,17 +1763,17 @@ int is_valid_tracepoint(const char *event_string)
if (!sys_dir)
return 0;
- for_each_subsystem(sys_dir, sys_dirent, sys_next) {
+ for_each_subsystem(sys_dir, sys_dirent) {
snprintf(dir_path, MAXPATHLEN, "%s/%s", tracing_events_path,
- sys_dirent.d_name);
+ sys_dirent->d_name);
evt_dir = opendir(dir_path);
if (!evt_dir)
continue;
- for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) {
+ for_each_event(sys_dirent, evt_dir, evt_dirent) {
snprintf(evt_path, MAXPATHLEN, "%s:%s",
- sys_dirent.d_name, evt_dirent.d_name);
+ sys_dirent->d_name, evt_dirent->d_name);
if (!strcmp(evt_path, event_string)) {
closedir(evt_dir);
closedir(sys_dir);
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
index 6f2a0279476c..593066c68e3d 100644
--- a/tools/perf/util/pmu.c
+++ b/tools/perf/util/pmu.c
@@ -153,7 +153,7 @@ static int perf_pmu__parse_unit(struct perf_pmu_alias *alias, char *dir, char *n
if (fd == -1)
return -1;
- sret = read(fd, alias->unit, UNIT_MAX_LEN);
+ sret = read(fd, alias->unit, UNIT_MAX_LEN);
if (sret < 0)
goto error;
diff --git a/tools/perf/util/scripting-engines/Build b/tools/perf/util/scripting-engines/Build
index 6516e220c247..82d28c67e0f3 100644
--- a/tools/perf/util/scripting-engines/Build
+++ b/tools/perf/util/scripting-engines/Build
@@ -1,6 +1,6 @@
libperf-$(CONFIG_LIBPERL) += trace-event-perl.o
libperf-$(CONFIG_LIBPYTHON) += trace-event-python.o
-CFLAGS_trace-event-perl.o += $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow -Wno-undef -Wno-switch-default
+CFLAGS_trace-event-perl.o += $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow -Wno-nested-externs -Wno-undef -Wno-switch-default
CFLAGS_trace-event-python.o += $(PYTHON_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow
diff --git a/tools/perf/util/strfilter.c b/tools/perf/util/strfilter.c
index bcae659b6546..efb53772e0ec 100644
--- a/tools/perf/util/strfilter.c
+++ b/tools/perf/util/strfilter.c
@@ -269,6 +269,7 @@ static int strfilter_node__sprint(struct strfilter_node *node, char *buf)
len = strfilter_node__sprint_pt(node->l, buf);
if (len < 0)
return len;
+ __fallthrough;
case '!':
if (buf) {
*(buf + len++) = *node->p;
diff --git a/tools/perf/util/string.c b/tools/perf/util/string.c
index fc8781de62db..accb7ece1d3c 100644
--- a/tools/perf/util/string.c
+++ b/tools/perf/util/string.c
@@ -21,6 +21,8 @@ s64 perf_atoll(const char *str)
case 'b': case 'B':
if (*p)
goto out_err;
+
+ __fallthrough;
case '\0':
return length;
default:
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
index 27ae382feb2d..7c97ecaeae48 100644
--- a/tools/perf/util/symbol-elf.c
+++ b/tools/perf/util/symbol-elf.c
@@ -488,6 +488,12 @@ int sysfs__read_build_id(const char *filename, void *build_id, size_t size)
break;
} else {
int n = namesz + descsz;
+
+ if (n > (int)sizeof(bf)) {
+ n = sizeof(bf);
+ pr_debug("%s: truncating reading of build id in sysfs file %s: n_namesz=%u, n_descsz=%u.\n",
+ __func__, filename, nhdr.n_namesz, nhdr.n_descsz);
+ }
if (read(fd, bf, n) != n)
break;
}
diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c
index 0a9ae8014729..829508a21448 100644
--- a/tools/perf/util/thread.c
+++ b/tools/perf/util/thread.c
@@ -227,7 +227,7 @@ void thread__find_cpumode_addr_location(struct thread *thread,
struct addr_location *al)
{
size_t i;
- const u8 const cpumodes[] = {
+ const u8 cpumodes[] = {
PERF_RECORD_MISC_USER,
PERF_RECORD_MISC_KERNEL,
PERF_RECORD_MISC_GUEST_USER,
diff --git a/tools/perf/util/thread_map.c b/tools/perf/util/thread_map.c
index 6ec3c5ca438f..4e666b95b87e 100644
--- a/tools/perf/util/thread_map.c
+++ b/tools/perf/util/thread_map.c
@@ -92,8 +92,8 @@ struct thread_map *thread_map__new_by_uid(uid_t uid)
{
DIR *proc;
int max_threads = 32, items, i;
- char path[256];
- struct dirent dirent, *next, **namelist = NULL;
+ char path[NAME_MAX + 1 + 6];
+ struct dirent *dirent, **namelist = NULL;
struct thread_map *threads = thread_map__alloc(max_threads);
if (threads == NULL)
@@ -106,16 +106,16 @@ struct thread_map *thread_map__new_by_uid(uid_t uid)
threads->nr = 0;
atomic_set(&threads->refcnt, 1);
- while (!readdir_r(proc, &dirent, &next) && next) {
+ while ((dirent = readdir(proc)) != NULL) {
char *end;
bool grow = false;
struct stat st;
- pid_t pid = strtol(dirent.d_name, &end, 10);
+ pid_t pid = strtol(dirent->d_name, &end, 10);
if (*end) /* only interested in proper numerical dirents */
continue;
- snprintf(path, sizeof(path), "/proc/%s", dirent.d_name);
+ snprintf(path, sizeof(path), "/proc/%s", dirent->d_name);
if (stat(path, &st) != 0)
continue;
diff --git a/tools/testing/selftests/capabilities/test_execve.c b/tools/testing/selftests/capabilities/test_execve.c
index 10a21a958aaf..763f37fecfb8 100644
--- a/tools/testing/selftests/capabilities/test_execve.c
+++ b/tools/testing/selftests/capabilities/test_execve.c
@@ -138,9 +138,6 @@ static void chdir_to_tmpfs(void)
if (chdir(cwd) != 0)
err(1, "chdir to private tmpfs");
-
- if (umount2(".", MNT_DETACH) != 0)
- err(1, "detach private tmpfs");
}
static void copy_fromat_to(int fromfd, const char *fromname, const char *toname)
@@ -248,7 +245,7 @@ static int do_tests(int uid, const char *our_path)
err(1, "chown");
if (chmod("validate_cap_sgidnonroot", S_ISGID | 0710) != 0)
err(1, "chmod");
-}
+ }
capng_get_caps_process();
@@ -384,7 +381,7 @@ static int do_tests(int uid, const char *our_path)
} else {
printf("[RUN]\tNon-root +ia, sgidnonroot => i\n");
exec_other_validate_cap("./validate_cap_sgidnonroot",
- false, false, true, false);
+ false, false, true, false);
if (fork_wait()) {
printf("[RUN]\tNon-root +ia, sgidroot => i\n");
diff --git a/virt/kvm/vfio.c b/virt/kvm/vfio.c
index 1dd087da6f31..111e09c3f4bf 100644
--- a/virt/kvm/vfio.c
+++ b/virt/kvm/vfio.c
@@ -47,6 +47,22 @@ static struct vfio_group *kvm_vfio_group_get_external_user(struct file *filep)
return vfio_group;
}
+static bool kvm_vfio_external_group_match_file(struct vfio_group *group,
+ struct file *filep)
+{
+ bool ret, (*fn)(struct vfio_group *, struct file *);
+
+ fn = symbol_get(vfio_external_group_match_file);
+ if (!fn)
+ return false;
+
+ ret = fn(group, filep);
+
+ symbol_put(vfio_external_group_match_file);
+
+ return ret;
+}
+
static void kvm_vfio_group_put_external_user(struct vfio_group *vfio_group)
{
void (*fn)(struct vfio_group *);
@@ -171,18 +187,13 @@ static int kvm_vfio_set_group(struct kvm_device *dev, long attr, u64 arg)
if (!f.file)
return -EBADF;
- vfio_group = kvm_vfio_group_get_external_user(f.file);
- fdput(f);
-
- if (IS_ERR(vfio_group))
- return PTR_ERR(vfio_group);
-
ret = -ENOENT;
mutex_lock(&kv->lock);
list_for_each_entry(kvg, &kv->group_list, node) {
- if (kvg->vfio_group != vfio_group)
+ if (!kvm_vfio_external_group_match_file(kvg->vfio_group,
+ f.file))
continue;
list_del(&kvg->node);
@@ -196,7 +207,7 @@ static int kvm_vfio_set_group(struct kvm_device *dev, long attr, u64 arg)
mutex_unlock(&kv->lock);
- kvm_vfio_group_put_external_user(vfio_group);
+ fdput(f);
kvm_vfio_update_coherency(dev);